#ifndef _LVE_CGROUP_GENERIC_ #define _LVE_CGROUP_GENERIC_ /* generic cpu control */ struct lvp_ve_private; struct light_ve; extern bool lve_kill_on_shrink; void lve_cgroup_release(struct dentry *cgrp, int flags); void generic_resource_unlink(uint32_t id, struct c_private *lcontext, char *name); void generic_lvp_path(char *name, uint32_t lvp_id); void generic_lve_path(char *name, uint32_t lve_id); int generic_lvp_init(struct lvp_ve_private *lvp, char *name); void generic_lvp_fini(struct lvp_ve_private *lvp); int generic_lve_init(struct light_ve *ve, char *name); void generic_lve_fini(struct light_ve *ve); /* mask needs because a separate flag to cpu limit and resource limits * enter */ int generic_cgroup_enter(struct task_struct *task, struct c_private *lcontext, unsigned long subsys_mask); int generic_cpu_init_lve(struct light_ve *ve, char *name); int lve_cgroup_cpuw_set(struct c_private *lcontext, int32_t new); int lve_cgroup_cpus_set(struct c_private *lcontext, lve_limits_t reseller, lve_limits_t old, unsigned int ncpus); int lve_cgroup_cpu_set(struct c_private *lcontext, lve_limits_t reseller, lve_limits_t old, unsigned int ncpu); int generic_cgroup_disable_swappiness(struct dentry *mem_cgrp); int lve_cgroup_mem_set(struct c_private *lcontext, int new); #endif