#ifndef _LVE_OS_COMPAT_H_ #define _LVE_OS_COMPAT_H_ #include #include #include #include #include "lve_kmod_c.h" #include "kernel_exp.h" struct proc_dir_entry; struct lvp_ve_private; #ifdef CONFIG_VE #ifdef HAVE_VE_PROC_ROOT #define lve_procfs_root(lvp) (current->ve_task_info.owner_env->proc_root) #else static inline struct proc_dir_entry *lve_procfs_root(struct lvp_ve_private *lvp) { return NULL; } #endif /* HAVE_VE_PROC_ROOT */ #else #ifndef HAVE_PROC_ROOT static inline struct proc_dir_entry *lve_procfs_root(struct lvp_ve_private *lvp) { return NULL; } #else #define lve_procfs_root(lvp) (&glob_proc_root) #endif #endif #ifdef LVE_PER_VE #define CAP_LVE_ADMIN (CAP_VE_SYS_ADMIN) #else #define CAP_LVE_ADMIN (CAP_SYS_ADMIN) #endif #include static inline struct task_struct *lve_find_task(pid_t pid) { struct task_struct *task; rcu_read_lock(); task = lve_find_task_by_vpid(pid); if (task) get_task_struct(task); rcu_read_unlock(); return task; } #ifdef HAVE_TASKLIST_QRWLOCK #define task_rlock() qread_lock(lve_tasklist_lock_ptr) #define task_runlock() qread_unlock(lve_tasklist_lock_ptr) #else #define task_rlock() read_lock(lve_tasklist_lock_ptr) #define task_runlock() read_unlock(lve_tasklist_lock_ptr) #endif #ifdef HAVE_PLAIN_CRED_EUID #define lve_cred_euid(credp) (credp)->euid #else #define lve_cred_euid(credp) (credp)->euid.val #endif #ifdef HAVE_DO_EACH_THREAD_ALL #define lve_do_each_thread do_each_thread_all #define lve_while_each_thread while_each_thread_all #else #define lve_do_each_thread do_each_thread #define lve_while_each_thread while_each_thread #endif #ifdef HAVE_UB_SYNC_MEMCG #define lve_sync_ub_usage lve_ub_sync_memcg #else #define lve_sync_ub_usage ub_update_resources #endif #ifndef HAVE_RENAMEDATA #ifdef HAVE_VFS_RENAME_WITH_6ARGS #define lve_vfs_rename(a,b,c,d) vfs_rename(a,b,c,d,NULL,0) #else #ifdef HAVE_VFS_RENAME_WITH_5ARGS #define lve_vfs_rename(a,b,c,d) vfs_rename(a,b,c,d,NULL) #else #define lve_vfs_rename vfs_rename #endif /* HAVE_VFS_RENAME_WITH_5ARGS */ #endif /* HAVE_VFS_RENAME_WITH_6ARGS */ #else extern int lve_vfs_rename(struct inode *olddir, struct dentry *olddentry, struct inode *newdir, struct dentry *newdentry); #endif struct lve_namespace { struct nsproxy *lve_nsproxy; struct fs_struct *lve_fs; }; #ifndef HAVE_ATOMIC_SET_MASK static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) { atomic_and(~mask, v); } static inline void atomic_set_mask(unsigned int mask, atomic_t *v) { atomic_or(mask, v); } #endif #ifdef HAVE_INODE_LOCK #define LOCK_INODE(i) inode_lock(i) #define LOCK_INODE_NESTED(i,n) inode_lock_nested(i,n) #define UNLOCK_INODE(i) inode_unlock(i) #else #define LOCK_INODE(i) mutex_lock(&(i)->i_mutex) #define LOCK_INODE_NESTED(i,n) mutex_lock_nested(&(i)->i_mutex, n) #define UNLOCK_INODE(i) mutex_unlock(&(i)->i_mutex) #endif #ifndef HAVE_PDE_DATA #define PDE_DATA(i) pde_data(i) #endif #ifdef HAVE_KERNEL_READ_LAST_POSP static inline ssize_t lve_kernel_read(struct file *f, void *buf, size_t count) { loff_t off = 0; return kernel_read(f, buf, count, &off); } #else static inline ssize_t lve_kernel_read(struct file *f, void *buf, size_t count) { return kernel_read(f, 0, buf, count); } #endif #ifdef HAVE_KERNEL_WRITE_LAST_POSP static inline ssize_t lve_kernel_write(struct file *f, const void *buf, size_t count) { loff_t off = 0; return kernel_write(f, buf, count, &off); } #else static inline ssize_t lve_kernel_write(struct file *f, const void *buf, size_t count) { return kernel_write(f, buf, count, 0); } #endif #ifdef HAVE_SMP_MB__AFTER_CLEAR_BIT #define lve_mb_after_clear_bit smp_mb__after_clear_bit #else #define lve_mb_after_clear_bit smp_mb__after_atomic #endif #ifdef HAVE_GET_USER_PAGES_REMOTE static inline long lve_get_user_pages(struct task_struct *task, struct mm_struct *mm, unsigned long addr, unsigned long len) { #ifdef HAVE_GET_USER_PAGES_REMOTE8 return get_user_pages_remote(task, mm, addr, len, 0, 0, NULL, NULL); #else int locked = 0; return get_user_pages_remote(current->mm, addr, len, FOLL_TOUCH, NULL, NULL, &locked); #endif } #else static inline long lve_get_user_pages(struct task_struct *task, struct mm_struct *mm, unsigned long addr, unsigned long len) { /* * Setting write = 0, so we: * 1. Don't force COW to occur * 2. Don't force anonymous allocations * (for read-fault, just the same zeroed page is always used) */ return get_user_pages(task, mm, addr, len, 0, 0, NULL, NULL); } #endif #ifdef CONFIG_X86_64 #define lve_set_program_counter(regs, pcc) regs->ip = pcc #elif defined(CONFIG_ARM64) #define lve_set_program_counter(regs, pcc) regs->pc = pcc #endif #if IS_BUILTIN(CONFIG_CGROUP_PIDS) #define HAVE_PIDS_CGRP #endif #ifndef CALC_LOAD #include #endif #ifndef CALC_LOAD #define CALC_LOAD(load, exp, n) load = calc_load(load, exp, n) #endif #ifdef HAVE_FORCE_SIG #define lve_kill(p) force_sig(SIGKILL, p) #else #define lve_kill(p) send_sig_info(SIGKILL, SEND_SIG_PRIV, p) #endif #ifndef HAVE_KTIME_GET_SECONDS #define ktime_get_seconds get_seconds #endif #endif