1 From db94c8c3ec831b5fab828487ba05041bc048fccf Mon Sep 17 00:00:00 2001
2 From: Kurt Mahan <kmahan@freescale.com>
3 Date: Tue, 15 Jul 2008 17:48:11 -0600
4 Subject: [PATCH] Move VMALLOC region to a valid area.
6 LTIBName: mcfv4e-vmalloc-fix
7 Signed-off-by: Kurt Mahan <kmahan@freescale.com>
9 arch/m68k/mm/cf-mmu.c | 7 +------
10 include/asm-m68k/pgtable.h | 6 +++++-
11 2 files changed, 6 insertions(+), 7 deletions(-)
13 --- a/arch/m68k/mm/cf-mmu.c
14 +++ b/arch/m68k/mm/cf-mmu.c
16 #include <asm/coldfire.h>
17 #include <asm/tlbflush.h>
19 -#define KMAPAREA(x) ((x >= KMAP_START) && ( x < KMAP_END))
20 +#define KMAPAREA(x) ((x >= VMALLOC_START) && ( x < KMAP_END))
24 @@ -62,11 +62,6 @@ void free_initmem(void)
25 unsigned long start = (unsigned long)&__init_begin;
26 unsigned long end = (unsigned long)&__init_end;
29 - * JKM -- revisit -- the latest round of vmlinux.lds changes has caused
30 - * a little grief with how init areas are handled. With the new toolchain
31 - * release I'll fix this.
33 printk(KERN_INFO "free_initmem: __init_begin = 0x%lx __init_end = 0x%lx\n", start, end);
35 addr = (unsigned long)&__init_begin;
36 --- a/include/asm-m68k/pgtable.h
37 +++ b/include/asm-m68k/pgtable.h
39 #define KMAP_START 0x0DC00000
40 #define KMAP_END 0x0E000000
41 #elif defined(CONFIG_COLDFIRE)
42 -#define KMAP_START 0xd0000000
43 +#define VMALLOC_START 0xc0000000
44 +#define VMALLOC_END 0xcfffffff
45 +#define KMAP_START (VMALLOC_END + 1)
46 #define KMAP_END 0xe0000000
48 #define KMAP_START 0xd0000000
50 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
51 * area for the same reason. ;)
53 +#if !defined(CONFIG_COLDFIRE)
54 #define VMALLOC_OFFSET (8*1024*1024)
55 #define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
56 #define VMALLOC_END KMAP_START
59 extern unsigned long vmalloc_end;
60 #define VMALLOC_START 0x0f800000