2 * arch/m68k/include/asm/cf_548x_cacheflush.h - Coldfire 547x/548x Cache
4 * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
5 * Kurt Mahan kmahan@freescale.com
6 * Shrek Wu b16972@freescale.com
8 * Based on include/asm-m68k/cacheflush.h
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
15 #ifndef M68K_CF_548x_CACHEFLUSH_H
16 #define M68K_CF_548x_CACHEFLUSH_H
18 #include <asm/cfcache.h>
20 * Cache handling functions
23 #define flush_icache() \
26 unsigned long start_set; \
27 unsigned long end_set; \
30 end_set = (unsigned long)LAST_DCACHE_ADDR; \
32 for (set = start_set; set <= end_set; set += (0x10 - 3)) {\
33 asm volatile("cpushl %%ic,(%0)\n" \
35 "\tcpushl %%ic,(%0)\n" \
37 "\tcpushl %%ic,(%0)\n" \
39 "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set)); \
43 #define flush_dcache() \
46 unsigned long start_set; \
47 unsigned long end_set; \
50 end_set = (unsigned long)LAST_DCACHE_ADDR; \
52 for (set = start_set; set <= end_set; set += (0x10 - 3)) { \
53 asm volatile("cpushl %%dc,(%0)\n" \
55 "\tcpushl %%dc,(%0)\n" \
57 "\tcpushl %%dc,(%0)\n" \
59 "\tcpushl %%dc,(%0)" : "=a" (set) : "a" (set)); \
63 #define flush_bcache() \
66 unsigned long start_set; \
67 unsigned long end_set; \
70 end_set = (unsigned long)LAST_DCACHE_ADDR; \
72 for (set = start_set; set <= end_set; set += (0x10 - 3)) { \
73 asm volatile("cpushl %%bc,(%0)\n" \
75 "\tcpushl %%bc,(%0)\n" \
77 "\tcpushl %%bc,(%0)\n" \
79 "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set)); \
84 * invalidate the cache for the specified memory range.
85 * It starts at the physical address specified for
86 * the given number of bytes.
88 extern void cache_clear(unsigned long paddr
, int len
);
90 * push any dirty cache in the specified memory range.
91 * It starts at the physical address specified for
92 * the given number of bytes.
94 extern void cache_push(unsigned long paddr
, int len
);
97 * push and invalidate pages in the specified user virtual
100 extern void cache_push_v(unsigned long vaddr
, int len
);
102 /* This is needed whenever the virtual mapping of the current
106 * flush_cache_mm - Flush an mm_struct
107 * @mm: mm_struct to flush
109 static inline void flush_cache_mm(struct mm_struct
*mm
)
111 if (mm
== current
->mm
)
115 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
117 #define flush_cache_all() flush_bcache()
120 * flush_cache_range - Flush a cache range
122 * @start: Starting address
123 * @end: Ending address
125 * flush_cache_range must be a macro to avoid a dependency on
126 * linux/mm.h which includes this file.
128 static inline void flush_cache_range(struct vm_area_struct
*vma
,
129 unsigned long start
, unsigned long end
)
131 if (vma
->vm_mm
== current
->mm
)
133 /*cf_cache_flush_range(start, end);*/
137 * flush_cache_page - Flush a page of the cache
142 * flush_cache_page must be a macro to avoid a dependency on
143 * linux/mm.h which includes this file.
145 static inline void flush_cache_page(struct vm_area_struct
*vma
,
146 unsigned long vmaddr
, unsigned long pfn
)
148 if (vma
->vm_mm
== current
->mm
)
150 /*cf_cache_flush_range(vmaddr, vmaddr+PAGE_SIZE);*/
153 /* Push the page at kernel virtual address and clear the icache */
154 /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
155 #define flush_page_to_ram(page) __flush_page_to_ram((void *) page_address(page))
156 extern inline void __flush_page_to_ram(void *address
)
159 unsigned long start_set
;
160 unsigned long end_set
;
161 unsigned long addr
= (unsigned long) address
;
163 addr
&= ~(PAGE_SIZE
- 1);
164 /* round down to page start address */
166 start_set
= addr
& _ICACHE_SET_MASK
;
167 end_set
= (addr
+ PAGE_SIZE
-1) & _ICACHE_SET_MASK
;
169 if (start_set
> end_set
) {
170 /* from the begining to the lowest address */
171 for (set
= 0; set
<= end_set
; set
+= (0x10 - 3)) {
172 asm volatile("cpushl %%bc,(%0)\n"
174 "\tcpushl %%bc,(%0)\n"
176 "\tcpushl %%bc,(%0)\n"
178 "\tcpushl %%bc,(%0)" : "=a" (set
) : "a" (set
));
180 /* next loop will finish the cache ie pass the hole */
181 end_set
= LAST_ICACHE_ADDR
;
184 for (set
= start_set
; set
<= end_set
; set
+= (0x10 - 3)) {
185 asm volatile("cpushl %%bc,(%0)\n"
187 "\tcpushl %%bc,(%0)\n"
189 "\tcpushl %%bc,(%0)\n"
191 "\tcpushl %%bc,(%0)" : "=a" (set
) : "a" (set
));
195 /* Use __flush_page_to_ram() for flush_dcache_page all values are same - MW */
196 #define flush_dcache_page(page) \
197 __flush_page_to_ram((void *) page_address(page))
198 #define flush_icache_page(vma, pg) \
199 __flush_page_to_ram((void *) page_address(pg))
200 #define flush_icache_user_range(adr, len) \
203 #define flush_icache_user_page(vma, page, addr, len) \
206 /* Push n pages at kernel virtual address and clear the icache */
207 /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
208 extern inline void flush_icache_range(unsigned long address
,
209 unsigned long endaddr
)
212 unsigned long start_set
;
213 unsigned long end_set
;
215 start_set
= address
& _ICACHE_SET_MASK
;
216 end_set
= endaddr
& _ICACHE_SET_MASK
;
218 if (start_set
> end_set
) {
219 /* from the begining to the lowest address */
220 for (set
= 0; set
<= end_set
; set
+= (0x10 - 3)) {
221 asm volatile("cpushl %%ic,(%0)\n"
223 "\tcpushl %%ic,(%0)\n"
225 "\tcpushl %%ic,(%0)\n"
227 "\tcpushl %%ic,(%0)" : "=a" (set
) : "a" (set
));
229 /* next loop will finish the cache ie pass the hole */
230 end_set
= LAST_ICACHE_ADDR
;
232 for (set
= start_set
; set
<= end_set
; set
+= (0x10 - 3)) {
233 asm volatile("cpushl %%ic,(%0)\n"
235 "\tcpushl %%ic,(%0)\n"
237 "\tcpushl %%ic,(%0)\n"
239 "\tcpushl %%ic,(%0)" : "=a" (set
) : "a" (set
));
243 static inline void copy_to_user_page(struct vm_area_struct
*vma
,
244 struct page
*page
, unsigned long vaddr
,
245 void *dst
, void *src
, int len
)
247 memcpy(dst
, src
, len
);
248 flush_icache_user_page(vma
, page
, vaddr
, len
);
250 static inline void copy_from_user_page(struct vm_area_struct
*vma
,
251 struct page
*page
, unsigned long vaddr
,
252 void *dst
, void *src
, int len
)
254 memcpy(dst
, src
, len
);
257 #define flush_cache_vmap(start, end) flush_cache_all()
258 #define flush_cache_vunmap(start, end) flush_cache_all()
259 #define flush_dcache_mmap_lock(mapping) do { } while (0)
260 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
262 #endif /* M68K_CF_548x_CACHEFLUSH_H */
This page took 0.05122 seconds and 5 git commands to generate.