1 --- a/arch/mips/Makefile
2 +++ b/arch/mips/Makefile
3 @@ -82,7 +82,7 @@ all-$(CONFIG_BOOT_ELF64) := $(vmlinux-64
4 cflags-y += -G 0 -mno-abicalls -fno-pic -pipe
5 cflags-y += -msoft-float
6 LDFLAGS_vmlinux += -G 0 -static -n -nostdlib
7 -MODFLAGS += -mlong-calls
8 +MODFLAGS += -mno-long-calls
10 cflags-y += -ffreestanding
12 --- a/arch/mips/include/asm/module.h
13 +++ b/arch/mips/include/asm/module.h
14 @@ -9,6 +9,11 @@ struct mod_arch_specific {
15 struct list_head dbe_list;
16 const struct exception_table_entry *dbe_start;
17 const struct exception_table_entry *dbe_end;
20 + unsigned int core_plt_offset;
21 + unsigned int core_plt_size;
22 + unsigned int init_plt_offset;
25 typedef uint8_t Elf64_Byte; /* Type for a 8-bit quantity. */
26 --- a/arch/mips/kernel/module.c
27 +++ b/arch/mips/kernel/module.c
28 @@ -43,6 +43,114 @@ static struct mips_hi16 *mips_hi16_list;
29 static LIST_HEAD(dbe_list);
30 static DEFINE_SPINLOCK(dbe_lock);
33 + * Get the potential max trampolines size required of the init and
34 + * non-init sections. Only used if we cannot find enough contiguous
35 + * physically mapped memory to put the module into.
38 +get_plt_size(const Elf32_Ehdr *hdr, const Elf32_Shdr *sechdrs,
39 + const char *secstrings, unsigned int symindex, bool is_init)
41 + unsigned long ret = 0;
45 + /* Everything marked ALLOC (this includes the exported symbols) */
46 + for (i = 1; i < hdr->e_shnum; ++i) {
47 + unsigned int info = sechdrs[i].sh_info;
49 + if (sechdrs[i].sh_type != SHT_REL
50 + && sechdrs[i].sh_type != SHT_RELA)
53 + /* Not a valid relocation section? */
54 + if (info >= hdr->e_shnum)
57 + /* Don't bother with non-allocated sections */
58 + if (!(sechdrs[info].sh_flags & SHF_ALLOC))
61 + /* If it's called *.init*, and we're not init, we're
63 + if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != 0)
67 + syms = (Elf_Sym *) sechdrs[symindex].sh_addr;
68 + if (sechdrs[i].sh_type == SHT_REL) {
69 + Elf_Mips_Rel *rel = (void *) sechdrs[i].sh_addr;
70 + unsigned int size = sechdrs[i].sh_size / sizeof(*rel);
72 + for (j = 0; j < size; ++j) {
75 + if (ELF_MIPS_R_TYPE(rel[j]) != R_MIPS_26)
78 + sym = syms + ELF_MIPS_R_SYM(rel[j]);
79 + if (!is_init && sym->st_shndx != SHN_UNDEF)
82 + ret += 4 * sizeof(int);
85 + Elf_Mips_Rela *rela = (void *) sechdrs[i].sh_addr;
86 + unsigned int size = sechdrs[i].sh_size / sizeof(*rela);
88 + for (j = 0; j < size; ++j) {
91 + if (ELF_MIPS_R_TYPE(rela[j]) != R_MIPS_26)
94 + sym = syms + ELF_MIPS_R_SYM(rela[j]);
95 + if (!is_init && sym->st_shndx != SHN_UNDEF)
98 + ret += 4 * sizeof(int);
106 +static void *alloc_phys(unsigned long size)
112 + size = PAGE_ALIGN(size);
113 + order = get_order(size);
115 + page = alloc_pages(GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN |
116 + __GFP_THISNODE, order);
120 + split_page(page, order);
122 + for (p = page + (size >> PAGE_SHIFT); p < page + (1 << order); ++p)
125 + return page_address(page);
128 +static void free_phys(void *ptr, unsigned long size)
133 + page = virt_to_page(ptr);
134 + end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT);
136 + for (; page < end; ++page)
140 void *module_alloc(unsigned long size)
143 @@ -58,16 +166,41 @@ void *module_alloc(unsigned long size)
145 return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL);
151 - return vmalloc(size);
153 + ptr = alloc_phys(size);
155 + /* If we failed to allocate physically contiguous memory,
156 + * fall back to regular vmalloc. The module loader code will
157 + * create jump tables to handle long jumps */
159 + return vmalloc(size);
165 +static inline bool is_phys_addr(void *ptr)
167 + return (KSEGX(ptr) == KSEG0);
170 /* Free memory returned from module_alloc */
171 void module_free(struct module *mod, void *module_region)
173 - vfree(module_region);
174 + if (is_phys_addr(module_region)) {
175 + if (mod->module_init == module_region)
176 + free_phys(module_region, mod->init_size);
177 + else if (mod->module_core == module_region)
178 + free_phys(module_region, mod->core_size);
182 + vfree(module_region);
184 /* FIXME: If module_region == mod->init_region, trim exception
187 @@ -75,6 +208,24 @@ void module_free(struct module *mod, voi
188 int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
189 char *secstrings, struct module *mod)
191 + unsigned int symindex = 0;
192 + unsigned int core_size, init_size;
195 + for (i = 1; i < hdr->e_shnum; i++)
196 + if (sechdrs[i].sh_type == SHT_SYMTAB)
199 + core_size = get_plt_size(hdr, sechdrs, secstrings, symindex, false);
200 + init_size = get_plt_size(hdr, sechdrs, secstrings, symindex, true);
202 + mod->arch.core_plt_offset = 0;
203 + mod->arch.core_plt_size = core_size;
204 + mod->arch.init_plt_offset = core_size;
205 + mod->arch.plt_tbl = kmalloc(core_size + init_size, GFP_KERNEL);
206 + if (!mod->arch.plt_tbl)
212 @@ -97,45 +248,73 @@ static int apply_r_mips_32_rela(struct m
216 -static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v)
217 +static Elf_Addr add_plt_entry_to(unsigned *plt_offset,
218 + void *start, unsigned size, Elf_Addr v)
221 - printk(KERN_ERR "module %s: dangerous relocation\n", me->name);
224 + unsigned *tramp = start + *plt_offset;
226 - if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
228 - "module %s: relocation overflow\n",
232 + if (*plt_offset == size)
235 + *plt_offset += 4 * sizeof(int);
237 + /* adjust carry for addiu */
238 + if (v & 0x00008000)
241 - *location = (*location & ~0x03ffffff) |
242 - ((*location + (v >> 2)) & 0x03ffffff);
243 + tramp[0] = 0x3c190000 | (v >> 16); /* lui t9, hi16 */
244 + tramp[1] = 0x27390000 | (v & 0xffff); /* addiu t9, t9, lo16 */
245 + tramp[2] = 0x03200008; /* jr t9 */
246 + tramp[3] = 0x00000000; /* nop */
248 + return (Elf_Addr) tramp;
251 +static Elf_Addr add_plt_entry(struct module *me, void *location, Elf_Addr v)
253 + if (location >= me->module_core &&
254 + location < me->module_core + me->core_size)
255 + return add_plt_entry_to(&me->arch.core_plt_offset,
256 + me->module_core, me->core_size, v);
258 + if (location >= me->module_init &&
259 + location < me->module_init + me->init_size)
260 + return add_plt_entry_to(&me->arch.init_plt_offset,
261 + me->module_init, me->init_size, v);
266 -static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v)
267 +static int set_r_mips_26(struct module *me, u32 *location, u32 ofs, Elf_Addr v)
270 printk(KERN_ERR "module %s: dangerous relocation\n", me->name);
274 - if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
275 + if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000) &&
276 + ((v = add_plt_entry(me, location, v + (ofs << 2))) == 0)) {
278 "module %s: relocation overflow\n",
283 - *location = (*location & ~0x03ffffff) | ((v >> 2) & 0x03ffffff);
284 + *location = (*location & ~0x03ffffff) | ((ofs + (v >> 2)) & 0x03ffffff);
289 +static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v)
291 + return set_r_mips_26(me, location, *location & 0x03ffffff, v);
294 +static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v)
296 + return set_r_mips_26(me, location, 0, v);
299 static int apply_r_mips_hi16_rel(struct module *me, u32 *location, Elf_Addr v)
302 @@ -400,11 +579,23 @@ int module_finalize(const Elf_Ehdr *hdr,
303 list_add(&me->arch.dbe_list, &dbe_list);
304 spin_unlock_irq(&dbe_lock);
307 + /* Get rid of the fixup trampoline if we're running the module
308 + * from physically mapped address space */
309 + if (me->arch.core_plt_offset == 0 &&
310 + me->arch.init_plt_offset == me->arch.core_plt_size &&
311 + is_phys_addr(me->module_core)) {
312 + kfree(me->arch.plt_tbl);
313 + me->arch.plt_tbl = NULL;
319 void module_arch_cleanup(struct module *mod)
321 + if (mod->arch.plt_tbl)
322 + kfree(mod->arch.plt_tbl);
323 spin_lock_irq(&dbe_lock);
324 list_del(&mod->arch.dbe_list);
325 spin_unlock_irq(&dbe_lock);