{
--- a/kernel/sched.c
+++ b/kernel/sched.c
-@@ -4967,6 +4967,7 @@ int can_nice(const struct task_struct *p
+@@ -4953,6 +4953,7 @@ int can_nice(const struct task_struct *p
return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
capable(CAP_SYS_NICE));
}
* We enter with non-exclusive mmap_sem (to exclude vma changes,
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
-@@ -1231,6 +1231,7 @@ void unmap_kernel_range(unsigned long ad
+@@ -1232,6 +1232,7 @@ void unmap_kernel_range(unsigned long ad
vunmap_page_range(addr, end);
flush_tlb_kernel_range(addr, end);
}
int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
{
-@@ -1346,6 +1347,7 @@ struct vm_struct *get_vm_area(unsigned l
+@@ -1369,6 +1370,7 @@ struct vm_struct *get_vm_area(unsigned l
return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
-1, GFP_KERNEL, __builtin_return_address(0));
}
void *caller)
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
-@@ -872,6 +872,7 @@ extern bool skip_free_areas_node(unsigne
+@@ -871,6 +871,7 @@ extern bool skip_free_areas_node(unsigne
int shmem_lock(struct file *file, int lock, struct user_struct *user);
struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);