2 * linux/arch/m68k/mm/cf-mmu.c
4 * Based upon linux/arch/m68k/mm/sun3mmu.c
5 * Based upon linux/arch/ppc/mm/mmu_context.c
7 * Implementations of mm routines specific to the Coldfire MMU.
9 * Copyright (c) 2008 Freescale Semiconductor, Inc.
10 * Copyright Freescale Semiconductor, Inc. 2008-2009
11 * Jason Jin Jason.Jin@freescale.com
12 * Shrek Wu B16972@freescale.com
15 #include <linux/signal.h>
16 #include <linux/sched.h>
18 #include <linux/swap.h>
19 #include <linux/kernel.h>
20 #include <linux/string.h>
21 #include <linux/types.h>
22 #include <linux/init.h>
23 #ifdef CONFIG_BLK_DEV_RAM
24 #include <linux/blkdev.h>
26 #include <linux/bootmem.h>
28 #include <asm/setup.h>
29 #include <asm/uaccess.h>
31 #include <asm/pgtable.h>
32 #include <asm/system.h>
33 #include <asm/machdep.h>
35 #include <asm/mmu_context.h>
36 #include <asm/cf_pgalloc.h>
38 #include <asm/coldfire.h>
39 #include <asm/tlbflush.h>
41 #define KMAPAREA(x) ((x >= VMALLOC_START) && (x < KMAP_END))
46 unsigned long next_mmu_context
;
48 mm_context_t next_mmu_context
;
51 unsigned long context_map
[LAST_CONTEXT
/ BITS_PER_LONG
+ 1];
53 atomic_t nr_free_contexts
;
54 struct mm_struct
*context_mm
[LAST_CONTEXT
+1];
55 void steal_context(void);
57 void m68k_setup_node(int);
59 const char bad_pmd_string
[] = "Bad pmd in pte_alloc: %08lx\n";
61 extern unsigned long empty_bad_page_table
;
62 extern unsigned long empty_bad_page
;
63 extern unsigned long num_pages
;
65 extern unsigned long availmem
;
67 extern char __init_begin
, __init_end
;
70 * Free memory used for system initialization.
72 void free_initmem(void)
76 unsigned long start
= (unsigned long)&__init_begin
;
77 unsigned long end
= (unsigned long)&__init_end
;
79 printk(KERN_INFO
"free_initmem: __init_begin = 0x%lx __init_end = 0x%lx\n", start
, end
);
81 addr
= (unsigned long)&__init_begin
;
82 for (; addr
< (unsigned long)&__init_end
; addr
+= PAGE_SIZE
) {
83 /* not currently used */
84 virt_to_page(addr
)->flags
&= ~(1 << PG_reserved
);
85 init_page_count(virt_to_page(addr
));
93 * Initialize the paging system.
95 void __init
paging_init(void)
100 unsigned long address
;
101 unsigned long next_pgtable
;
102 unsigned long zones_size
[MAX_NR_ZONES
];
106 /* allocate zero page */
107 empty_zero_page
= (void *)alloc_bootmem_pages(PAGE_SIZE
);
108 memset((void *)empty_zero_page
, 0, PAGE_SIZE
);
110 /* zero kernel page directory */
111 pg_dir
= swapper_pg_dir
;
112 memset(swapper_pg_dir
, 0, sizeof(swapper_pg_dir
));
114 * setup page tables for PHYSRAM
117 /* starting loc in page directory */
118 pg_dir
+= PAGE_OFFSET
>> PGDIR_SHIFT
;
120 /* allocate page tables */
121 size
= num_pages
* sizeof(pte_t
);
122 size
= (size
+ PAGE_SIZE
) & ~(PAGE_SIZE
-1);
123 next_pgtable
= (unsigned long)alloc_bootmem_pages(size
);
124 address
= PAGE_OFFSET
;
125 while (address
< (unsigned long)high_memory
) {
126 /* setup page table in page directory */
127 pg_table
= (pte_t
*)next_pgtable
;
128 next_pgtable
+= PTRS_PER_PTE
* sizeof(pte_t
);
129 pgd_val(*pg_dir
) = (unsigned long)pg_table
;
132 /* create PTEs in page table */
133 for (i
=0; i
<PTRS_PER_PTE
; ++i
, ++pg_table
) {
134 pte_t pte
= pfn_pte(virt_to_pfn(address
), PAGE_INIT
);
135 if (address
>= (unsigned long)high_memory
)
138 set_pte(pg_table
, pte
);
139 address
+= PAGE_SIZE
;
144 * setup page tables for DMA area
147 /* starting loc in page directory */
148 pg_dir
= swapper_pg_dir
;
149 pg_dir
+= CONFIG_DMA_BASE
>> PGDIR_SHIFT
;
151 /* allocate page tables */
152 size
= (CONFIG_DMA_SIZE
>> PAGE_SHIFT
) * sizeof(pte_t
);
153 size
= (size
+ PAGE_SIZE
) & ~(PAGE_SIZE
-1);
154 next_pgtable
= (unsigned long)alloc_bootmem_pages(size
);
155 address
= CONFIG_DMA_BASE
;
156 while (address
< (CONFIG_DMA_BASE
+ CONFIG_DMA_SIZE
)) {
157 /* setup page table in page directory */
158 pg_table
= (pte_t
*)next_pgtable
;
159 next_pgtable
+= PTRS_PER_PTE
* sizeof(pte_t
);
160 pgd_val(*pg_dir
) = (unsigned long)pg_table
;
163 /* create PTEs in page table */
164 for (i
=0; i
<PTRS_PER_PTE
; ++i
, ++pg_table
) {
165 pte_t pte
= pfn_pte(virt_to_pfn(address
), PAGE_INIT
);
166 if (address
>= (CONFIG_DMA_BASE
+ CONFIG_DMA_SIZE
))
169 set_pte(pg_table
, pte
);
170 address
+= PAGE_SIZE
;
181 for (zone
= 0; zone
< MAX_NR_ZONES
; zone
++)
182 zones_size
[zone
] = 0x0;
184 zones_size
[ZONE_DMA
] = CONFIG_DMA_SIZE
>> PAGE_SHIFT
;
185 zones_size
[ZONE_NORMAL
] = (((unsigned long)high_memory
-
186 PAGE_OFFSET
) >> PAGE_SHIFT
) -
187 zones_size
[ZONE_DMA
];
189 free_area_init(zones_size
);
192 * Handle a missed TLB
194 int cf_tlb_miss(struct pt_regs
*regs
, int write
, int dtlb
, int extension_word
)
196 struct mm_struct
*mm
;
204 local_save_flags(flags
);
207 mmuar
= ( dtlb
) ? regs
->mmuar
208 : regs
->pc
+ (extension_word
* sizeof(long));
210 mm
= (!user_mode(regs
) && KMAPAREA(mmuar
)) ? &init_mm
: current
->mm
;
213 local_irq_restore(flags
);
217 pgd
= pgd_offset(mm
, mmuar
);
218 if (pgd_none(*pgd
)) {
219 local_irq_restore(flags
);
223 pmd
= pmd_offset(pgd
, mmuar
);
224 if (pmd_none(*pmd
)) {
225 local_irq_restore(flags
);
229 pte
= (KMAPAREA(mmuar
)) ? pte_offset_kernel(pmd
, mmuar
)
230 : pte_offset_map(pmd
, mmuar
);
231 if (pte_none(*pte
) || !pte_present(*pte
)) {
232 local_irq_restore(flags
);
237 if (!pte_write(*pte
)) {
238 local_irq_restore(flags
);
241 set_pte(pte
, pte_mkdirty(*pte
));
244 set_pte(pte
, pte_mkyoung(*pte
));
245 asid
= cpu_context(mm
) & 0xff;
246 if (!pte_dirty(*pte
) && !KMAPAREA(mmuar
))
247 set_pte(pte
, pte_wrprotect(*pte
));
249 *MMUTR
= (mmuar
& PAGE_MASK
) | (asid
<< CF_ASID_MMU_SHIFT
)
250 | (((int)(pte
->pte
) & (int)CF_PAGE_MMUTR_MASK
) >> CF_PAGE_MMUTR_SHIFT
)
253 *MMUDR
= (pte_val(*pte
) & PAGE_MASK
)
254 | ((pte
->pte
) & CF_PAGE_MMUDR_MASK
)
255 | MMUDR_SZ8K
| MMUDR_X
;
258 *MMUOR
= MMUOR_ACC
| MMUOR_UAA
;
260 *MMUOR
= MMUOR_ITLB
| MMUOR_ACC
| MMUOR_UAA
;
265 printk("cf_tlb_miss: va=%lx, pa=%lx\n", (mmuar
& PAGE_MASK
),
266 (pte_val(*pte
) & PAGE_MASK
));
268 local_irq_restore(flags
);
276 * Based on arch/ppc/mmu_context.c
280 * Initialize the context management system.
282 void __init
mmu_context_init(void)
285 * Some processors have too few contexts to reserve one for
286 * init_mm, and require using context 0 for a normal task.
287 * Other processors reserve the use of context zero for the kernel.
288 * This code assumes FIRST_CONTEXT < 32.
290 context_map
[0] = (1 << FIRST_CONTEXT
) - 1;
291 next_mmu_context
= FIRST_CONTEXT
;
292 atomic_set(&nr_free_contexts
, LAST_CONTEXT
- FIRST_CONTEXT
+ 1);
296 * Steal a context from a task that has one at the moment.
297 * This is only used on 8xx and 4xx and we presently assume that
298 * they don't do SMP. If they do then thicfpgalloc.hs will have to check
299 * whether the MM we steal is in use.
300 * We also assume that this is only used on systems that don't
301 * use an MMU hash table - this is true for 8xx and 4xx.
302 * This isn't an LRU system, it just frees up each context in
303 * turn (sort-of pseudo-random replacement :). This would be the
304 * place to implement an LRU scheme if anyone was motivated to do it.
307 void steal_context(void)
309 struct mm_struct
*mm
;
310 /* free up context `next_mmu_context' */
311 /* if we shouldn't free context 0, don't... */
312 if (next_mmu_context
< FIRST_CONTEXT
)
313 next_mmu_context
= FIRST_CONTEXT
;
314 mm
= context_mm
[next_mmu_context
];