+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -2480,6 +2480,15 @@ static int shmem_get_sb(struct file_syst
+ return get_sb_nodev(fs_type, flags, data, shmem_fill_super, mnt);
+ }
+
++void shmem_set_file(struct vm_area_struct *vma, struct file *file)
++{
++ if (vma->vm_file)
++ fput(vma->vm_file);
++ vma->vm_file = file;
++ vma->vm_ops = &shmem_vm_ops;
++}
++EXPORT_SYMBOL_GPL(shmem_set_file);
++
+ static struct file_system_type tmpfs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "tmpfs",
+@@ -2600,9 +2609,6 @@ int shmem_zero_setup(struct vm_area_stru
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+- if (vma->vm_file)
+- fput(vma->vm_file);
+- vma->vm_file = file;
+- vma->vm_ops = &shmem_vm_ops;
++ shmem_set_file(vma, vma->vm_file);
+ return 0;
+ }
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -270,6 +270,7 @@ int expand_files(struct files_struct *fi
+ /* All good, so we try */
+ return expand_fdtable(files, nr);
+ }
++EXPORT_SYMBOL_GPL(expand_files);
+
+ static int count_open_files(struct fdtable *fdt)
+ {
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -509,6 +509,7 @@ struct files_struct *get_files_struct(st
+
+ return files;
+ }
++EXPORT_SYMBOL_GPL(get_files_struct);
+
+ void put_files_struct(struct files_struct *files)
+ {
+@@ -528,6 +529,7 @@ void put_files_struct(struct files_struc
+ free_fdtable(fdt);
+ }
+ }
++EXPORT_SYMBOL_GPL(put_files_struct);
+
+ void reset_files_struct(struct files_struct *files)
+ {
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -155,6 +155,7 @@ void __put_task_struct(struct task_struc
+ if (!profile_handoff_task(tsk))
+ free_task(tsk);
+ }
++EXPORT_SYMBOL_GPL(__put_task_struct);
+
+ /*
+ * macro override instead of weak attribute alias, to workaround
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -5015,6 +5015,7 @@ int can_nice(const struct task_struct *p
+ return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
+ capable(CAP_SYS_NICE));
+ }
++EXPORT_SYMBOL_GPL(can_nice);
+
+ #ifdef __ARCH_WANT_SYS_NICE
+
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -993,6 +993,7 @@ unsigned long zap_page_range(struct vm_a
+ tlb_finish_mmu(tlb, address, end);
+ return end;
+ }
++EXPORT_SYMBOL_GPL(zap_page_range);
+
+ /**
+ * zap_vma_ptes - remove ptes mapping the vma
+@@ -2271,6 +2272,7 @@ int vmtruncate_range(struct inode *inode
+
+ return 0;
+ }
++EXPORT_SYMBOL_GPL(vmtruncate_range);
+
+ /*
+ * We enter with non-exclusive mmap_sem (to exclude vma changes,
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -980,6 +980,7 @@ void unmap_kernel_range(unsigned long ad
+ vunmap_page_range(addr, end);
+ flush_tlb_kernel_range(addr, end);
+ }
++EXPORT_SYMBOL_GPL(unmap_kernel_range);
+
+ int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
+ {
+@@ -1085,6 +1086,7 @@ struct vm_struct *get_vm_area(unsigned l
+ return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
+ -1, GFP_KERNEL, __builtin_return_address(0));
+ }
++EXPORT_SYMBOL_GPL(get_vm_area);
+
+ struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
+ void *caller)