{
--- a/kernel/sched.c
+++ b/kernel/sched.c
-@@ -4905,6 +4905,7 @@ int can_nice(const struct task_struct *p
+@@ -4923,6 +4923,7 @@ int can_nice(const struct task_struct *p
return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
capable(CAP_SYS_NICE));
}
--- a/mm/memory.c
+++ b/mm/memory.c
-@@ -1395,6 +1395,7 @@ unsigned long zap_page_range(struct vm_a
+@@ -1396,6 +1396,7 @@ unsigned long zap_page_range(struct vm_a
tlb_finish_mmu(&tlb, address, end);
return end;
}
/**
* zap_vma_ptes - remove ptes mapping the vma
-@@ -3014,6 +3015,7 @@ static inline int check_stack_guard_page
+@@ -3071,6 +3072,7 @@ static inline int check_stack_guard_page
}
return 0;
}
* We enter with non-exclusive mmap_sem (to exclude vma changes,
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
-@@ -1245,6 +1245,7 @@ void unmap_kernel_range(unsigned long ad
+@@ -1246,6 +1246,7 @@ void unmap_kernel_range(unsigned long ad
vunmap_page_range(addr, end);
flush_tlb_kernel_range(addr, end);
}
int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
{
-@@ -1360,6 +1361,7 @@ struct vm_struct *get_vm_area(unsigned l
+@@ -1383,6 +1384,7 @@ struct vm_struct *get_vm_area(unsigned l
return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
-1, GFP_KERNEL, __builtin_return_address(0));
}
void *caller)
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
-@@ -871,6 +871,7 @@ extern bool skip_free_areas_node(unsigne
+@@ -870,6 +870,7 @@ extern bool skip_free_areas_node(unsigne
int shmem_lock(struct file *file, int lock, struct user_struct *user);
struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);