1 From 2bef1f8ce148cce9e782f75f9537767c1d8c0eea Mon Sep 17 00:00:00 2001
2 From: Kurt Mahan <kmahan@freescale.com>
3 Date: Wed, 31 Oct 2007 16:58:27 -0600
4 Subject: [PATCH] Core Coldfire/MCF5445x arch/mm changes.
6 LTIBName: mcfv4e-arch-mm-mods-1
7 Signed-off-by: Kurt Mahan <kmahan@freescale.com>
9 arch/m68k/mm/Makefile | 1 +
10 arch/m68k/mm/cache.c | 41 ++++++++
11 arch/m68k/mm/cf-mmu.c | 251 +++++++++++++++++++++++++++++++++++++++++++++++++
12 arch/m68k/mm/hwtest.c | 2 +
13 arch/m68k/mm/init.c | 3 +-
14 arch/m68k/mm/kmap.c | 13 +++
15 arch/m68k/mm/memory.c | 66 +++++++++++++-
16 7 files changed, 373 insertions(+), 4 deletions(-)
17 create mode 100644 arch/m68k/mm/cf-mmu.c
19 --- a/arch/m68k/mm/Makefile
20 +++ b/arch/m68k/mm/Makefile
21 @@ -6,3 +6,4 @@ obj-y := cache.o init.o fault.o hwtest.
23 obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o
24 obj-$(CONFIG_MMU_SUN3) += sun3kmap.o sun3mmu.o
25 +obj-$(CONFIG_MMU_CFV4E) += cf-mmu.o kmap.o memory.o
26 --- a/arch/m68k/mm/cache.c
27 +++ b/arch/m68k/mm/cache.c
29 #include <asm/pgalloc.h>
30 #include <asm/traps.h>
32 +#ifdef CONFIG_COLDFIRE
33 +#include <asm/cfcache.h>
34 +#endif /* CONFIG_COLDFIRE */
36 +#ifndef CONFIG_COLDFIRE
37 static unsigned long virt_to_phys_slow(unsigned long vaddr)
40 @@ -69,11 +73,45 @@ static unsigned long virt_to_phys_slow(u
44 +#endif /* CONFIG_COLDFIRE */
47 /* Push n pages at kernel virtual address and clear the icache */
48 /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
49 void flush_icache_range(unsigned long address, unsigned long endaddr)
51 +#ifdef CONFIG_COLDFIRE
53 + unsigned long start_set;
54 + unsigned long end_set;
56 + start_set = address & _ICACHE_SET_MASK;
57 + end_set = endaddr & _ICACHE_SET_MASK;
59 + if (start_set > end_set) {
60 + /* from the begining to the lowest address */
61 + for (set = 0; set <= end_set; set += (0x10 - 3))
62 + asm volatile ("cpushl %%ic,(%0)\n"
64 + "\tcpushl %%ic,(%0)\n"
66 + "\tcpushl %%ic,(%0)\n"
68 + "\tcpushl %%ic,(%0)" : : "a" (set));
70 + /* next loop will finish the cache ie pass the hole */
71 + end_set = LAST_ICACHE_ADDR;
73 + for (set = start_set; set <= end_set; set += (0x10 - 3))
74 + asm volatile ("cpushl %%ic,(%0)\n"
76 + "\tcpushl %%ic,(%0)\n"
78 + "\tcpushl %%ic,(%0)\n"
80 + "\tcpushl %%ic,(%0)" : : "a" (set));
82 +#else /* !CONFIG_COLDFIRE */
84 if (CPU_IS_040_OR_060) {
86 @@ -94,9 +132,11 @@ void flush_icache_range(unsigned long ad
90 +#endif /* CONFIG_COLDFIRE */
92 EXPORT_SYMBOL(flush_icache_range);
94 +#ifndef CONFIG_COLDFIRE
95 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
96 unsigned long addr, int len)
98 @@ -115,4 +155,5 @@ void flush_icache_user_range(struct vm_a
102 +#endif /* CONFIG_COLDFIRE */
105 +++ b/arch/m68k/mm/cf-mmu.c
108 + * linux/arch/m68k/mm/cf-mmu.c
110 + * Based upon linux/arch/m68k/mm/sun3mmu.c
111 + * Based upon linux/arch/ppc/mm/mmu_context.c
113 + * Implementations of mm routines specific to the Coldfire MMU.
115 + * Copyright (c) 2008 Freescale Semiconductor, Inc.
118 +#include <linux/signal.h>
119 +#include <linux/sched.h>
120 +#include <linux/mm.h>
121 +#include <linux/swap.h>
122 +#include <linux/kernel.h>
123 +#include <linux/string.h>
124 +#include <linux/types.h>
125 +#include <linux/init.h>
126 +#ifdef CONFIG_BLK_DEV_RAM
127 +#include <linux/blkdev.h>
129 +#include <linux/bootmem.h>
131 +#include <asm/setup.h>
132 +#include <asm/uaccess.h>
133 +#include <asm/page.h>
134 +#include <asm/pgtable.h>
135 +#include <asm/system.h>
136 +#include <asm/machdep.h>
138 +#include <asm/mmu_context.h>
139 +#include <asm/cf_pgalloc.h>
141 +#include <asm/coldfire.h>
142 +#include <asm/tlbflush.h>
144 +mm_context_t next_mmu_context;
145 +unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
147 +atomic_t nr_free_contexts;
148 +struct mm_struct *context_mm[LAST_CONTEXT+1];
149 +void steal_context(void);
152 +const char bad_pmd_string[] = "Bad pmd in pte_alloc: %08lx\n";
154 +extern unsigned long empty_bad_page_table;
155 +extern unsigned long empty_bad_page;
156 +extern unsigned long num_pages;
158 +extern char __init_begin, __init_end;
160 +void free_initmem(void)
162 + unsigned long addr;
163 + unsigned long start = (unsigned long)&__init_begin;
164 + unsigned long end = (unsigned long)&__init_end;
166 + printk(KERN_INFO "free_initmem: __init_begin = 0x%lx __init_end = 0x%lx\n", start, end);
168 + addr = (unsigned long)&__init_begin;
169 + for (; addr < (unsigned long)&__init_end; addr += PAGE_SIZE) {
170 + /* not currently used */
171 + virt_to_page(addr)->flags &= ~(1 << PG_reserved);
172 + init_page_count(virt_to_page(addr));
178 +/* Coldfire paging_init derived from sun3 */
179 +void __init paging_init(void)
184 + unsigned long address;
185 + unsigned long next_pgtable;
186 + unsigned long bootmem_end;
187 + unsigned long zones_size[MAX_NR_ZONES];
188 + unsigned long size;
189 + enum zone_type zone;
191 + empty_zero_page = (void *)alloc_bootmem_pages(PAGE_SIZE);
192 + memset((void *)empty_zero_page, 0, PAGE_SIZE);
194 + pg_dir = swapper_pg_dir;
195 + memset(swapper_pg_dir, 0, sizeof (swapper_pg_dir));
197 + size = num_pages * sizeof(pte_t);
198 + size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
199 + next_pgtable = (unsigned long)alloc_bootmem_pages(size);
201 + bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
202 + pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
204 + address = PAGE_OFFSET;
205 + while (address < (unsigned long)high_memory)
207 + pg_table = (pte_t *)next_pgtable;
208 + next_pgtable += PTRS_PER_PTE * sizeof (pte_t);
209 + pgd_val(*pg_dir) = (unsigned long) pg_table;
212 + /* now change pg_table to kernel virtual addresses */
213 + for (i=0; i<PTRS_PER_PTE; ++i, ++pg_table)
215 + pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
216 + if (address >= (unsigned long)high_memory)
219 + set_pte (pg_table, pte);
220 + address += PAGE_SIZE;
224 + current->mm = NULL;
227 + for (zone = 0; zone < MAX_NR_ZONES; zone++)
228 + zones_size[zone] = 0x0;
230 + /* allocate the bottom 32M (0x40x 0x41x) to DMA - head.S marks them NO CACHE */
231 + /* JKM - this should be changed to allocate from the TOP (0x4f,0x4e) but the
232 + * allocator is being a bit challenging */
233 + zones_size[ZONE_DMA] = (32*1024*1024) >> PAGE_SHIFT;
235 + /* allocate the rest to NORMAL - head.S marks them CACHE */
236 + zones_size[ZONE_NORMAL] = (((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT) - zones_size[0];
238 + free_area_init(zones_size);
242 +int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
244 + struct mm_struct *mm;
248 + unsigned long mmuar;
252 + local_save_flags(flags);
253 + local_irq_disable();
255 + mmuar = ( dtlb ) ? regs->mmuar
256 + : regs->pc + (extension_word * sizeof(long));
258 + mm = (!user_mode(regs) && (mmuar >= PAGE_OFFSET)) ? &init_mm
261 + local_irq_restore(flags);
265 + pgd = pgd_offset(mm, mmuar);
266 + if (pgd_none(*pgd)) {
267 + local_irq_restore(flags);
271 + pmd = pmd_offset(pgd, mmuar);
272 + if (pmd_none(*pmd)) {
273 + local_irq_restore(flags);
277 + pte = (mmuar >= PAGE_OFFSET) ? pte_offset_kernel(pmd, mmuar)
278 + : pte_offset_map(pmd, mmuar);
279 + if (pte_none(*pte) || !pte_present(*pte)) {
280 + local_irq_restore(flags);
285 + if (!pte_write(*pte)) {
286 + local_irq_restore(flags);
289 + set_pte(pte, pte_mkdirty(*pte));
292 + set_pte(pte, pte_mkyoung(*pte));
293 + asid = mm->context & 0xff;
294 + if (!pte_dirty(*pte) && mmuar<=PAGE_OFFSET)
295 + set_pte(pte, pte_wrprotect(*pte));
297 + *MMUTR = (mmuar & PAGE_MASK) | (asid << CF_ASID_MMU_SHIFT)
298 + | (((int)(pte->pte) & (int)CF_PAGE_MMUTR_MASK ) >> CF_PAGE_MMUTR_SHIFT)
301 + *MMUDR = (pte_val(*pte) & PAGE_MASK)
302 + | ((pte->pte) & CF_PAGE_MMUDR_MASK)
303 + | MMUDR_SZ8K | MMUDR_X;
306 + *MMUOR = MMUOR_ACC | MMUOR_UAA;
308 + *MMUOR = MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA;
311 + /*printk("cf_tlb_miss: va=%lx, pa=%lx\n", (mmuar & PAGE_MASK),
312 + (pte_val(*pte) & PAGE_MASK));*/
313 + local_irq_restore(flags);
318 +/* The following was taken from arch/ppc/mmu_context.c
320 + * Initialize the context management stuff.
322 +void __init mmu_context_init(void)
325 + * Some processors have too few contexts to reserve one for
326 + * init_mm, and require using context 0 for a normal task.
327 + * Other processors reserve the use of context zero for the kernel.
328 + * This code assumes FIRST_CONTEXT < 32.
330 + context_map[0] = (1 << FIRST_CONTEXT) - 1;
331 + next_mmu_context = FIRST_CONTEXT;
332 + atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
336 + * Steal a context from a task that has one at the moment.
337 + * This is only used on 8xx and 4xx and we presently assume that
338 + * they don't do SMP. If they do then thicfpgalloc.hs will have to check
339 + * whether the MM we steal is in use.
340 + * We also assume that this is only used on systems that don't
341 + * use an MMU hash table - this is true for 8xx and 4xx.
342 + * This isn't an LRU system, it just frees up each context in
343 + * turn (sort-of pseudo-random replacement :). This would be the
344 + * place to implement an LRU scheme if anyone was motivated to do it.
347 +void steal_context(void)
349 + struct mm_struct *mm;
350 + /* free up context `next_mmu_context' */
351 + /* if we shouldn't free context 0, don't... */
352 + if (next_mmu_context < FIRST_CONTEXT)
353 + next_mmu_context = FIRST_CONTEXT;
354 + mm = context_mm[next_mmu_context];
356 + destroy_context(mm);
358 --- a/arch/m68k/mm/hwtest.c
359 +++ b/arch/m68k/mm/hwtest.c
362 #include <linux/module.h>
364 +#ifndef CONFIG_COLDFIRE
365 int hwreg_present( volatile void *regp )
368 @@ -82,4 +83,5 @@ int hwreg_write( volatile void *regp, un
371 EXPORT_SYMBOL(hwreg_write);
374 --- a/arch/m68k/mm/init.c
375 +++ b/arch/m68k/mm/init.c
376 @@ -122,7 +122,6 @@ void __init mem_init(void)
378 atari_stram_mem_init_hook();
381 /* this will put all memory onto the freelists */
382 totalram_pages = num_physpages = 0;
383 for_each_online_pgdat(pgdat) {
384 @@ -146,7 +145,7 @@ void __init mem_init(void)
389 +#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
390 /* insert pointer tables allocated so far into the tablelist */
391 init_pointer_table((unsigned long)kernel_pg_dir);
392 for (i = 0; i < PTRS_PER_PGD; i++) {
393 --- a/arch/m68k/mm/kmap.c
394 +++ b/arch/m68k/mm/kmap.c
399 +#ifndef CONFIG_COLDFIRE
400 #define PTRTREESIZE (256*1024)
402 +#define PTRTREESIZE PAGE_SIZE
406 * For 040/060 we can use the virtual memory area like other architectures,
407 @@ -50,7 +54,11 @@ static inline void free_io_area(void *ad
411 +#ifdef CONFIG_COLDFIRE
412 +#define IO_SIZE PAGE_SIZE
414 #define IO_SIZE (256*1024)
417 static struct vm_struct *iolist;
419 @@ -170,7 +178,12 @@ void __iomem *__ioremap(unsigned long ph
423 +#ifndef CONFIG_COLDFIRE
424 physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
426 + physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY | \
430 case IOMAP_NOCACHE_SER:
431 case IOMAP_NOCACHE_NONSER:
432 --- a/arch/m68k/mm/memory.c
433 +++ b/arch/m68k/mm/memory.c
434 @@ -203,7 +203,38 @@ static inline void pushcl040(unsigned lo
436 void cache_clear (unsigned long paddr, int len)
438 - if (CPU_IS_040_OR_060) {
439 + if (CPU_IS_CFV4E) {
441 + unsigned long start_set;
442 + unsigned long end_set;
444 + start_set = paddr & _ICACHE_SET_MASK;
445 + end_set = (paddr+len-1) & _ICACHE_SET_MASK;
447 + if (start_set > end_set) {
448 + /* from the begining to the lowest address */
449 + for (set = 0; set <= end_set; set += (0x10 - 3))
450 + asm volatile("cpushl %%bc,(%0)\n"
451 + "\taddq%.l #1,%0\n"
452 + "\tcpushl %%bc,(%0)\n"
453 + "\taddq%.l #1,%0\n"
454 + "\tcpushl %%bc,(%0)\n"
455 + "\taddq%.l #1,%0\n"
456 + "\tcpushl %%bc,(%0)" : : "a" (set));
458 + /* next loop will finish the cache ie pass the hole */
459 + end_set = LAST_ICACHE_ADDR;
461 + for (set = start_set; set <= end_set; set += (0x10 - 3))
462 + asm volatile("cpushl %%bc,(%0)\n"
463 + "\taddq%.l #1,%0\n"
464 + "\tcpushl %%bc,(%0)\n"
465 + "\taddq%.l #1,%0\n"
466 + "\tcpushl %%bc,(%0)\n"
467 + "\taddq%.l #1,%0\n"
468 + "\tcpushl %%bc,(%0)" : : "a" (set));
470 + } else if (CPU_IS_040_OR_060) {
474 @@ -250,7 +281,38 @@ EXPORT_SYMBOL(cache_clear);
476 void cache_push (unsigned long paddr, int len)
478 - if (CPU_IS_040_OR_060) {
479 + if (CPU_IS_CFV4E) {
481 + unsigned long start_set;
482 + unsigned long end_set;
484 + start_set = paddr & _ICACHE_SET_MASK;
485 + end_set = (paddr+len-1) & _ICACHE_SET_MASK;
487 + if (start_set > end_set) {
488 + /* from the begining to the lowest address */
489 + for (set = 0; set <= end_set; set += (0x10 - 3))
490 + asm volatile("cpushl %%bc,(%0)\n"
491 + "\taddq%.l #1,%0\n"
492 + "\tcpushl %%bc,(%0)\n"
493 + "\taddq%.l #1,%0\n"
494 + "\tcpushl %%bc,(%0)\n"
495 + "\taddq%.l #1,%0\n"
496 + "\tcpushl %%bc,(%0)" : : "a" (set));
498 + /* next loop will finish the cache ie pass the hole */
499 + end_set = LAST_ICACHE_ADDR;
501 + for (set = start_set; set <= end_set; set += (0x10 - 3))
502 + asm volatile("cpushl %%bc,(%0)\n"
503 + "\taddq%.l #1,%0\n"
504 + "\tcpushl %%bc,(%0)\n"
505 + "\taddq%.l #1,%0\n"
506 + "\tcpushl %%bc,(%0)\n"
507 + "\taddq%.l #1,%0\n"
508 + "\tcpushl %%bc,(%0)" : : "a" (set));
510 + } else if (CPU_IS_040_OR_060) {