#include #include #include #include #include #include #include #include #include #include #include #ifdef IMPL_LINK_PROT_EXPERIMENTAL #include #endif #include "light_ve.h" #include "lve_internal.h" #include "lve_hooks.h" #include "lve_debug.h" #include "resource.h" #include "tags.h" #include "lve_global_params.h" #include "lve_task_locker.h" #include "lsm/link_protect.h" #ifdef LVE_DEBUG_CGROUP_BREAK #include "os/cgroup_lib.h" #endif static LIST_HEAD(lve_hooks); struct lve_hook { struct ftrace_ops ops; void *func; struct list_head list; }; static void lve_hook_trampoline(unsigned long ip, unsigned long pip, struct ftrace_ops *op, #ifdef HAVE_FTRACE_REGS struct ftrace_regs *fs_regs) #else struct pt_regs *pt_regs) #endif { struct lve_hook *hook = container_of(op, struct lve_hook, ops); #ifdef HAVE_FTRACE_REGS struct pt_regs *pt_regs = &fs_regs->regs; #endif if (!lve_hook_recursive_check_reset()) lve_set_program_counter(pt_regs, (unsigned long long)hook->func); } #if FEAT_LINK_PROT == 1 #ifdef IMPL_LINK_PROT_EXPERIMENTAL static kuid_t __empty_uid = { .val = -1 }; #define LVE_USR_EMPTY __empty_uid static kuid_t link_uids[PID_MAX_LIMIT]; kuid_t get_current_link_uid() { return link_uids[current->pid]; } static bool is_current_link_uid_empty(void) { return uid_eq(get_current_link_uid(), LVE_USR_EMPTY); } #endif #endif #ifdef HAVE_FTRACE_OPS_FL_RECURSION #define FTRACE_OPS_FL_RECURSION_SAFE FTRACE_OPS_FL_RECURSION #endif int lve_register_hook(char *name, void *func) { struct lve_hook *hook; int rc; hook = kzalloc(sizeof(*hook), GFP_KERNEL); if (!hook) return -ENOMEM; hook->func = func; hook->ops.func = lve_hook_trampoline; hook->ops.flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY; rc = ftrace_set_filter(&hook->ops, name, strlen(name), 1); if (rc) { LVE_ERR("unable to apply hook filter (%s)\n", name); goto err_free_hook; } rc = register_ftrace_function(&hook->ops); if (rc) { LVE_ERR("unable to register a hook (%s), rc = %d\n", name, rc); goto err_free_hook; } list_add_tail(&hook->list, &lve_hooks); return 0; err_free_hook: kfree(hook); return rc; } /* we don't need specific hook unregister yet */ static void lve_unregister_hooks(void) { struct lve_hook *hook, *hook_next; list_for_each_entry_safe(hook, hook_next, &lve_hooks, list) { unregister_ftrace_function(&hook->ops); kfree(hook); } } #if OPENVZ_VERSION == 0 static int lve_search_binary_handler_trace(struct linux_binprm *bprm) { int rc = 0; rc = LVE_HOOK_RECURSIVE_CALL(lve_search_binary_handler(bprm)); if (rc == 0 && lve_exec_process(bprm->file->f_path.dentry, from_kuid(&init_user_ns, current_euid()))) { #ifdef HAVE_FORCE_SIG force_sig(SIGSEGV, current); #else force_sig(SIGSEGV); #endif } return rc; } #endif #if defined(HAVE_CGROUP_POST_FORK_WITH_1ARG) || defined(HAVE_CGROUP_POST_FORK_WITH_2ARGS) static void lve_cgroup_post_fork_trace(struct task_struct *child, void *old_ss_priv) { switch_tag_fork(child); LVE_HOOK_RECURSIVE_CALL(lve_cgroup_post_fork(child, old_ss_priv)); } #else #error "cgroup_post_fork has unsupported prototype" #endif static void lve_proc_exit_connector_trace(struct task_struct *tsk) { struct switch_data *sw_data; sw_data = LVE_TAG_GET(tsk); if (sw_data != NULL) { LVE_DBG("sw_data=%px comm=%s\n", sw_data, tsk->comm); lve_exit_task(tsk, sw_data); /* Be careful here since last put can initiate i/o */ LVE_TAG_PUT_DELAYED(sw_data); } LVE_HOOK_RECURSIVE_CALL(lve_proc_exit_connector(tsk)); } #ifdef FEAT_XFS_QUOTA /* We should be aware of the constant changes from version to version */ #if RHEL_MAJOR >= 9 #define XFS_QMOPT_FORCE_RES (1u << 3) /* ignore quota limits */ #else #define XFS_QMOPT_FORCE_RES 0x0000010 /* ignore quota limits */ #endif int lve_xfs_trans_dqresv_trace(void *tp, void *mp, void *dqp, long nblks, long ninos, uint flags) { if (param_is_enabled(LVE_XFS_QUOTA_CAP_RES_BYPASS) && cap_raised(current_cap(), CAP_SYS_RESOURCE)) flags |= XFS_QMOPT_FORCE_RES; return LVE_HOOK_RECURSIVE_CALL(lve_xfs_trans_dqresv(tp, mp, dqp, nblks, ninos, flags)); } #endif #ifdef FEAT_DC static int lve_mnt_want_write_trace(struct vfsmount *m) { if (lve_read_only_users >= 0 && __kuid_val(current_cred()->fsuid) >= lve_read_only_users) return -EROFS; return LVE_HOOK_RECURSIVE_CALL(mnt_want_write(m)); } #endif #ifdef LVE_DEBUG_CGROUP_BREAK static inline unsigned long lve_get_subsys_mask(struct cgroup *cgrp) { #ifdef HAVE_CGROUP_ACTUAL_SUBSYS_MASK return cgrp->root->actual_subsys_mask; #else return cgrp->root->subsys_mask; #endif } static bool proc_uses_lve(struct task_struct *leader, bool threadgroup) { struct task_struct *task; bool in_use = false; /* Assuming at this point cgroups are locked */ rcu_read_lock(); task = leader; do { in_use |= (LVE_TAG(task) != NULL); if (!threadgroup || in_use) break; } while_each_thread(leader, task); rcu_read_unlock(); return in_use; } static bool lve_debug_cgroups = true; int lve_cgroup_attach_task_trace(struct cgroup *cgrp, struct task_struct *tsk, bool threadgroup) { static DEFINE_RATELIMIT_STATE(ratelimit_state, 600 * HZ, 1); if (!(lve_debug_cgroups && proc_uses_lve(tsk, threadgroup) && /* systemd own empty hierarchy does not have a subsys */ cgrp && cgrp->root && lve_get_subsys_mask(cgrp)) || LVE_API_ENTERED()) return LVE_HOOK_RECURSIVE_CALL(lve_cgroup_attach_task(cgrp, tsk, threadgroup)); if (__ratelimit(&ratelimit_state)) { printk("%s(%d): moving task %s(%d) outside of lve, mask %lu\n", current->comm, current->pid, tsk->comm, tsk->pid, lve_get_subsys_mask(cgrp)); dump_stack(); } /* just pretend move succeeded but really do nothing */ return 0; } #endif #if FEAT_LINK_PROT == 1 static int lve_readlink_trace(int dfd, const char __user *pathname, char __user *buf, int bufsiz) { if (lve_sys_readlink(dfd, pathname, buf, bufsiz) == -EACCES) return -EACCES; return LVE_HOOK_RECURSIVE_CALL(lve_do_readlinkat(dfd, pathname, buf, bufsiz)); } #ifndef IMPL_LINK_PROT_EXPERIMENTAL #ifndef HAVE_I_RWSEM static int lve_lookup_slow_trace(struct nameidata *nd, struct path *path) { struct mutex *i_mutex = &nd->path.dentry->d_inode->i_mutex; struct task_struct *owner = READ_ONCE(i_mutex->owner); if (likely(owner != current)) return LVE_HOOK_RECURSIVE_CALL(lve_lookup_slow(nd, path)); return lve_lookup_slow_locked(nd, path); } #else /* * RWSEM code using two low bits of "owner" to store internal state */ #define RWSEM_READER_OWNED (1UL << 0) #define RWSEM_ANONYMOUSLY_OWNED (1UL << 1) #define RWSEM_BITS_MASK (RWSEM_READER_OWNED | RWSEM_ANONYMOUSLY_OWNED) static struct dentry * lve_lookup_slow_trace(const struct qstr *name, struct dentry *dir, unsigned int flags) { struct rw_semaphore *i_rwsem = &dir->d_inode->i_rwsem; unsigned long owner; if (!rwsem_is_locked(i_rwsem)) goto out_default; #ifdef HAVE_OWNER_ATOMIC owner = atomic_long_read(&i_rwsem->owner); #else owner = (unsigned long)READ_ONCE(i_rwsem->owner); #endif if (owner == 0UL || owner & RWSEM_READER_OWNED) goto out_default; owner &= ~RWSEM_BITS_MASK; if (unlikely((struct task_struct *)owner == current)) return lve__lookup_slow(name, dir, flags); out_default: return LVE_HOOK_RECURSIVE_CALL(lve_lookup_slow(name, dir, flags)); } #endif #endif #ifdef IMPL_LINK_PROT_EXPERIMENTAL int set_current_link_uid(kuid_t uid) { if (uid_eq(link_uids[current->pid], LVE_USR_EMPTY)) link_uids[current->pid] = uid; // if the previous link uid doesn't match - fail. else if (!uid_eq(uid, link_uids[current->pid])) return -EACCES; return 0; } void reset_current_link_uid() { link_uids[current->pid] = LVE_USR_EMPTY; } #if RHEL_MAJOR == 9 enum {WALK_TRAILING = 1, WALK_MORE = 2, WALK_NOFOLLOW = 4}; #define LOOKUP_LAST_CHECK(flags) (flags == WALK_TRAILING) #define LOOKUP_LAST_NESTED(flags) (flags == 0) #else enum {WALK_FOLLOW = 1, WALK_MORE = 2}; #define LOOKUP_LAST_CHECK(flags) (flags == 0) #define LOOKUP_LAST_NESTED(flags) (flags == WALK_FOLLOW) #endif static int lve_link_check(struct inode *inode) { int ret = 0; /* Last non-symlink path element */ kuid_t link_uid = get_current_link_uid(); if (!uid_eq(link_uid, LVE_USR_EMPTY)) { if (!uid_eq(inode->i_uid, link_uid)) ret = -EACCES; } return ret; } #ifdef HAVE_OPEN_LAST static char *lve_do_last_trace(struct nameidata *nd, struct file *file, const void *op) { char *ret = LVE_HOOK_RECURSIVE_CALL(lve_do_last(nd, file, op)); int rc; if (ret) return ret; if ((rc = lve_link_check(nd_get_inode(nd)))!= 0) ret = ERR_PTR(rc); return ret; } static char *lve_walk_component_trace(struct nameidata *nd, int flags) { char *ret = LVE_HOOK_RECURSIVE_CALL(lve_walk_component(nd, flags)); int rc = 0; if (ret) return ret; if (LOOKUP_LAST_CHECK(flags) || LOOKUP_LAST_NESTED(flags)) rc = lve_link_check(nd_get_inode(nd)); if (rc) ret = ERR_PTR(rc); return ret; } #else static int lve_do_last_trace(struct nameidata *nd, struct file *file, const void *op) { int ret = LVE_HOOK_RECURSIVE_CALL(lve_do_last(nd, file, op)); if (ret) return ret; return lve_link_check(nd_get_inode(nd)); } static int lve_walk_component_trace(struct nameidata *nd, int flags) { int rc = LVE_HOOK_RECURSIVE_CALL(lve_walk_component(nd, flags)); if (rc) return rc; if (LOOKUP_LAST_CHECK(flags) || LOOKUP_LAST_NESTED(flags)) rc = lve_link_check(nd_get_inode(nd)); return rc; } #endif static void lve_terminate_walk_trace(struct nameidata *nd) { LVE_HOOK_RECURSIVE_CALL(lve_terminate_walk(nd)); if (likely(is_current_link_uid_empty())) return; reset_current_link_uid(); } static int lve_register_lookup_hooks(void) { int rc; rc = lve_register_hook("terminate_walk", lve_terminate_walk_trace); if (rc < 0) return rc; rc = lve_register_hook("walk_component", lve_walk_component_trace); if (rc < 0) return rc; return lve_register_hook(LVE_DO_LAST_SYM, lve_do_last_trace); } extern int lve_handle_symlink_rename(int olddfd, const char __user *oldname, int newdfd, const char __user *newname, unsigned int flags); #if RHEL_MAJOR<9 static int lve_do_renameat2_trace(int olddfd, const char __user *oldname, int newdfd, const char __user *newname, unsigned int flags) { int rc = lve_handle_symlink_rename(olddfd, oldname, newdfd, newname, flags); if (rc < 0) return rc; return LVE_HOOK_RECURSIVE_CALL(lve_do_renameat2(olddfd, oldname, newdfd, newname, flags)); } #else static int lve_do_renameat2_trace(int olddfd, struct filename *oldname, int newdfd, struct filename *newname, unsigned int flags) { int rc = 0; if (!IS_ERR(oldname) && !IS_ERR(newname)) rc = lve_handle_symlink_rename(olddfd, oldname->name, newdfd, newname->name, flags); return rc < 0 ? rc : LVE_HOOK_RECURSIVE_CALL(lve_do_renameat2(olddfd, oldname, newdfd, newname, flags)); } #endif extern int lve_do_handle_symlink_create(const char __user *oldname, int newdfd, const char __user *newname); #if RHEL_MAJOR<9 static long lve_do_symlinkat_trace(const char __user *oldname, int newdfd, const char __user *newname) { int rc = lve_do_handle_symlink_create(oldname, newdfd, newname); if (rc < 0) return rc; return LVE_HOOK_RECURSIVE_CALL(lve_do_symlinkat(oldname, newdfd, newname)); } #else static long lve_do_symlinkat_trace(struct filename *oldname, int newdfd, struct filename *newname) { int rc = 0; if (!IS_ERR(oldname) && !IS_ERR(newname)) rc = lve_do_handle_symlink_create(oldname->name, newdfd, newname->name); return rc < 0 ? rc : LVE_HOOK_RECURSIVE_CALL(lve_do_symlinkat(oldname, newdfd, newname)); } #endif #endif #endif #ifdef FEAT_DC #ifdef HAVE_FSNOTIFY_7ARGS #undef HAVE_FSNOTIFY_QSTR_NAME #define HAVE_FSNOTIFY_QSTR_NAME 1 static int lve_fsnotify_trace(__u32 mask, const void *datap, int data_type, struct inode *dir, const struct qstr *oname, struct inode *oinode, u32 cookie) #else static int lve_fsnotify_trace(struct inode *oinode, __u32 mask, void *datap, int data_type, void *oname, u32 cookie) #endif { enum dc_event event; #ifdef HAVE_FSNOTIFY_7ARGS struct inode *inode = dir; #else struct inode *inode = oinode; #endif #ifdef HAVE_FSNOTIFY_QSTR_NAME struct qstr *q; #endif struct dentry *dentry = NULL; const char *name = NULL; int namelen = 0; if (!lve_dc) goto out; if (from_kuid(&init_user_ns, current_fsuid()) < lve_dc_min_uid) goto out; #ifdef HAVE_FSNOTIFY_QSTR_NAME q = (struct qstr *)oname; if (q) { name = q->name; namelen = q->len; } #else if (oname) { name = oname; namelen = strlen(name); } #endif if ((mask & (FS_CREATE|FS_ISDIR)) == (FS_CREATE|FS_ISDIR)) { /* mkdir */ event = DC_MKDIR; } else if ((mask & (FS_DELETE|FS_ISDIR)) == (FS_DELETE|FS_ISDIR)) { /* rmdir */ event = DC_RMDIR; } else if (mask & FS_CREATE) { struct inode *sinode; /* create, link, symlink */ if (data_type == FSNOTIFY_EVENT_INODE) sinode = (struct inode *)datap; else #ifdef HAVE_FSNOTIFY_7ARGS if (data_type == FSNOTIFY_EVENT_DENTRY) sinode = d_inode((struct dentry *)datap); else #endif WARN_ON(1); if (S_ISLNK(sinode->i_mode)) event = DC_SYMLINK; else if (sinode->i_nlink > 1) event = DC_LINK; else event = DC_CREATE; } else if (mask & FS_DELETE) { /* delete */ event = DC_UNLINK; } else if (mask & (FS_MOVED_FROM|FS_MOVED_TO)) { /* move */ event = DC_RENAME; } else { goto out; } lve_fsnotify_process_event(event, inode, name, namelen, dentry); out: #ifdef HAVE_FSNOTIFY_7ARGS return LVE_HOOK_RECURSIVE_CALL(fsnotify(mask, datap, data_type, dir, oname, oinode, cookie)); #else return LVE_HOOK_RECURSIVE_CALL(fsnotify(oinode, mask, datap, data_type, oname, cookie)); #endif } #ifdef HAVE_FSNOTIFY_NAMEREMOTE static void lve_fsnotify_nameremove_trace(struct dentry *dentry, int isdir) { enum dc_event event = DC_UNLINK; if (!lve_dc) goto out; if (from_kuid(&init_user_ns, current_fsuid()) < lve_dc_min_uid) goto out; if (IS_ROOT(dentry)) goto out; if (isdir) event = DC_RMDIR; lve_fsnotify_process_event(event, NULL, NULL, 0, dentry); out: LVE_HOOK_RECURSIVE_CALL(fsnotify_nameremove(dentry, isdir)); } #endif /* HAVE_FSNOTIFY_NAMEREMOTE */ static int lve___fsnotify_parent_trace( #ifndef HAVE_FSNOTIFY_7ARGS struct path *path, struct dentry *dentry, __u32 mask) #else struct dentry *dentry, __u32 mask, const void *data, int data_type) #endif { enum dc_event event; if (!lve_dc) goto out; if (from_kuid(&init_user_ns, current_fsuid()) < lve_dc_min_uid) goto out; #ifndef HAVE_FSNOTIFY_7ARGS if (!dentry) dentry = path->dentry; #endif #ifndef HAVE_FSNOTIFY_NAMEREMOTE if ((mask & (FS_DELETE|FS_ISDIR)) == (FS_DELETE|FS_ISDIR)) { /* rmdir */ event = DC_RMDIR; } else if (mask & FS_DELETE) { /* unlink */ event = DC_UNLINK; } else #endif if (mask & (FS_MODIFY|FS_ATTRIB)) { /* FIXME: chmod, chown, truncate */ event = DC_CHMOD; } else if ((mask & FS_CLOSE_WRITE) && S_ISREG(dentry->d_inode->i_mode)) { event = DC_CLOSE; } else { goto out; } lve_fsnotify_process_event(event, NULL, NULL, 0, dentry); out: #ifndef HAVE_FSNOTIFY_7ARGS return LVE_HOOK_RECURSIVE_CALL(__fsnotify_parent(path, dentry, mask)); #else return LVE_HOOK_RECURSIVE_CALL(__fsnotify_parent(dentry, mask, data, data_type)); #endif } #endif /* FEAT_DC */ #ifdef FEAT_IOLIMIT #ifdef HAVE_IOMAP_DIO_RW #include ssize_t lve_iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, const struct iomap_ops *ops, const struct iomap_dio_ops *dops, unsigned int dio_flags, size_t done_before) { if (iov_iter_rw(iter) == WRITE) lve_iolimits_dio_write_account(iov_iter_count(iter)); return LVE_HOOK_RECURSIVE_CALL(iomap_dio_rw(iocb, iter, ops, dops, dio_flags, done_before)); } #endif #endif #if RHEL_MAJOR == 0 /* This hook is only needed for pid-to-swdata mappings */ static int lve_de_thread_trace(struct task_struct *tsk) { struct switch_data *tag; int rc; /* Thread cannot go away because it's always current */ BUG_ON(tsk != current); lve_task_lock(tsk); tag = LVE_TAG(tsk); LVE_TAG_CLEAR(tsk); rc = LVE_HOOK_RECURSIVE_CALL(lve_de_thread(tsk)); LVE_TAG_SET(tsk, tag); if (tag && tag->sw_from->lve_unlinked && !(tag->sw_flags & LVE_ENTER_NO_KILLABLE)) lve_kill(tsk); lve_task_unlock(tsk); return rc; } #endif static long lve_tty_ioctl_trace(struct file *file, unsigned int cmd, unsigned long arg) { if (cmd == TIOCSTI && !lve_tty_legacy_tiocsti) return -EIO; return LVE_HOOK_RECURSIVE_CALL(lve_tty_ioctl(file, cmd, arg)); } int lve_hooks_init(void) { int rc; #ifdef FEAT_IOLIMIT #if OPENVZ_VERSION == 0 rc = tracepoint_probe_register(lve_iolimits_rq_issue_ptr, lve_iolimits_rq_issue, NULL); if (rc < 0) goto cleanup_probes; rc = tracepoint_probe_register(lve_iolimits_wb_dirty_ptr, lve_iolimits_wb_dirty, NULL); if (rc < 0) goto cleanup_trace_bio; #ifndef HAVE_IOMAP_DIO_RW rc = tracepoint_probe_register(lve_iolimits_ext4_direct_IO_enter_ptr, lve_iolimits_ext4_direct_IO_enter, NULL); if (rc < 0) goto cleanup_trace_dirty; rc = tracepoint_probe_register(lve_iolimits_xfs_file_direct_write_ptr, lve_iolimits_xfs_file_direct_write, NULL); if (rc < 0) goto cleanup_trace_ext4; #endif #endif #endif #ifdef FEAT_IOLIMIT #ifdef HAVE_IOMAP_DIO_RW rc = lve_register_hook("iomap_dio_rw", lve_iomap_dio_rw); if (rc) goto cleanup_all; #endif #endif rc = lve_register_hook("cgroup_post_fork", lve_cgroup_post_fork_trace); if (rc) goto cleanup_all; #ifdef FEAT_XFS_QUOTA rc = lve_register_hook("xfs_trans_dqresv", lve_xfs_trans_dqresv_trace); if (rc) goto cleanup_all; #endif rc = lve_register_hook("proc_exit_connector", lve_proc_exit_connector_trace); if (rc) goto cleanup_all; #ifdef FEAT_DC rc = lve_register_hook("fsnotify", lve_fsnotify_trace); if (rc) goto cleanup_all; #ifdef HAVE_FSNOTIFY_NAMEREMOTE rc = lve_register_hook("fsnotify_nameremove", lve_fsnotify_nameremove_trace); if (rc) goto cleanup_all; #endif rc = lve_register_hook("__fsnotify_parent", lve___fsnotify_parent_trace); if (rc) goto cleanup_all; #endif #ifdef FEAT_DC rc = lve_register_hook("mnt_want_write", lve_mnt_want_write_trace); if (rc) goto cleanup_all; #endif #if OPENVZ_VERSION == 0 rc = lve_register_hook("search_binary_handler", lve_search_binary_handler_trace); if (rc) goto cleanup_all; #endif #ifdef LVE_DEBUG_CGROUP_BREAK rc = lve_register_hook("cgroup_attach_task", lve_cgroup_attach_task_trace); if (rc) goto cleanup_all; #endif rc = lve_register_hook("get_avenrun", lve_get_avenrun_trace); if (rc) goto cleanup_all; #if FEAT_LINK_PROT == 1 rc = lve_register_hook("do_readlinkat", lve_readlink_trace); if (rc) goto cleanup_all; #ifndef IMPL_LINK_PROT_EXPERIMENTAL rc = lve_register_hook("lookup_slow", lve_lookup_slow_trace); if (rc) goto cleanup_all; #endif #ifdef IMPL_LINK_PROT_EXPERIMENTAL memset(link_uids, LVE_USR_EMPTY.val, sizeof(link_uids)); rc = lve_register_hook("do_renameat2", lve_do_renameat2_trace); if (rc) goto cleanup_all; rc = lve_register_hook("do_symlinkat", lve_do_symlinkat_trace); if (rc) goto cleanup_all; rc = lve_register_lookup_hooks(); if (rc) goto cleanup_all; #endif #endif rc = lve_register_hook("tty_ioctl", lve_tty_ioctl_trace); if (rc) goto cleanup_all; #if RHEL_MAJOR == 0 rc = lve_register_hook("de_thread", lve_de_thread_trace); if (rc) goto cleanup_all; #endif #if OPENVZ_VERSION != 0 rc = lve_exec_init(); if (rc) goto cleanup_all; #endif return 0; cleanup_all: lve_unregister_hooks(); #ifdef FEAT_IOLIMIT #if OPENVZ_VERSION == 0 #ifndef HAVE_IOMAP_DIO_RW tracepoint_probe_unregister(lve_iolimits_xfs_file_direct_write_ptr, lve_iolimits_xfs_file_direct_write, NULL); cleanup_trace_ext4: tracepoint_probe_unregister(lve_iolimits_ext4_direct_IO_enter_ptr, lve_iolimits_ext4_direct_IO_enter, NULL); cleanup_trace_dirty: #endif tracepoint_probe_unregister(lve_iolimits_wb_dirty_ptr, lve_iolimits_wb_dirty, NULL); cleanup_trace_bio: tracepoint_probe_unregister(lve_iolimits_rq_issue_ptr, lve_iolimits_rq_issue, NULL); cleanup_probes: #endif #endif return rc; } void lve_hooks_fini(void) { #if OPENVZ_VERSION != 0 lve_exec_fini(); #endif lve_unregister_hooks(); #ifdef FEAT_IOLIMIT #if OPENVZ_VERSION == 0 #ifndef HAVE_IOMAP_DIO_RW tracepoint_probe_unregister(lve_iolimits_xfs_file_direct_write_ptr, lve_iolimits_xfs_file_direct_write, NULL); tracepoint_probe_unregister(lve_iolimits_ext4_direct_IO_enter_ptr, lve_iolimits_ext4_direct_IO_enter, NULL); #endif tracepoint_probe_unregister(lve_iolimits_rq_issue_ptr, lve_iolimits_rq_issue, NULL); tracepoint_probe_unregister(lve_iolimits_wb_dirty_ptr, lve_iolimits_wb_dirty, NULL); #endif #endif } #ifdef LVE_DEBUG_CGROUP_BREAK module_param(lve_debug_cgroups, bool, 0644); #endif