rtl8366_smi: implement a function for detecting whether the attached switch is RTL836...
[openwrt.git] / target / linux / coldfire / files-2.6.31 / arch / m68k / include / asm / cf_548x_cacheflush.h
1 /*
2 * arch/m68k/include/asm/cf_548x_cacheflush.h - Coldfire 547x/548x Cache
3 *
4 * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
5 * Kurt Mahan kmahan@freescale.com
6 * Shrek Wu b16972@freescale.com
7 *
8 * Based on include/asm-m68k/cacheflush.h
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15 #ifndef M68K_CF_548x_CACHEFLUSH_H
16 #define M68K_CF_548x_CACHEFLUSH_H
17
18 #include <asm/cfcache.h>
19 /*
20 * Cache handling functions
21 */
22
23 #define flush_icache() \
24 ({ \
25 unsigned long set; \
26 unsigned long start_set; \
27 unsigned long end_set; \
28 \
29 start_set = 0; \
30 end_set = (unsigned long)LAST_DCACHE_ADDR; \
31 \
32 for (set = start_set; set <= end_set; set += (0x10 - 3)) {\
33 asm volatile("cpushl %%ic,(%0)\n" \
34 "\taddq%.l #1,%0\n" \
35 "\tcpushl %%ic,(%0)\n" \
36 "\taddq%.l #1,%0\n" \
37 "\tcpushl %%ic,(%0)\n" \
38 "\taddq%.l #1,%0\n" \
39 "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set)); \
40 } \
41 })
42
43 #define flush_dcache() \
44 ({ \
45 unsigned long set; \
46 unsigned long start_set; \
47 unsigned long end_set; \
48 \
49 start_set = 0; \
50 end_set = (unsigned long)LAST_DCACHE_ADDR; \
51 \
52 for (set = start_set; set <= end_set; set += (0x10 - 3)) { \
53 asm volatile("cpushl %%dc,(%0)\n" \
54 "\taddq%.l #1,%0\n" \
55 "\tcpushl %%dc,(%0)\n" \
56 "\taddq%.l #1,%0\n" \
57 "\tcpushl %%dc,(%0)\n" \
58 "\taddq%.l #1,%0\n" \
59 "\tcpushl %%dc,(%0)" : "=a" (set) : "a" (set)); \
60 } \
61 })
62
63 #define flush_bcache() \
64 ({ \
65 unsigned long set; \
66 unsigned long start_set; \
67 unsigned long end_set; \
68 \
69 start_set = 0; \
70 end_set = (unsigned long)LAST_DCACHE_ADDR; \
71 \
72 for (set = start_set; set <= end_set; set += (0x10 - 3)) { \
73 asm volatile("cpushl %%bc,(%0)\n" \
74 "\taddq%.l #1,%0\n" \
75 "\tcpushl %%bc,(%0)\n" \
76 "\taddq%.l #1,%0\n" \
77 "\tcpushl %%bc,(%0)\n" \
78 "\taddq%.l #1,%0\n" \
79 "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set)); \
80 } \
81 })
82
83 /*
84 * invalidate the cache for the specified memory range.
85 * It starts at the physical address specified for
86 * the given number of bytes.
87 */
88 extern void cache_clear(unsigned long paddr, int len);
89 /*
90 * push any dirty cache in the specified memory range.
91 * It starts at the physical address specified for
92 * the given number of bytes.
93 */
94 extern void cache_push(unsigned long paddr, int len);
95
96 /*
97 * push and invalidate pages in the specified user virtual
98 * memory range.
99 */
100 extern void cache_push_v(unsigned long vaddr, int len);
101
102 /* This is needed whenever the virtual mapping of the current
103 process changes. */
104
105 /**
106 * flush_cache_mm - Flush an mm_struct
107 * @mm: mm_struct to flush
108 */
109 static inline void flush_cache_mm(struct mm_struct *mm)
110 {
111 if (mm == current->mm)
112 flush_bcache();
113 }
114
115 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
116
117 #define flush_cache_all() flush_bcache()
118
119 /**
120 * flush_cache_range - Flush a cache range
121 * @vma: vma struct
122 * @start: Starting address
123 * @end: Ending address
124 *
125 * flush_cache_range must be a macro to avoid a dependency on
126 * linux/mm.h which includes this file.
127 */
128 static inline void flush_cache_range(struct vm_area_struct *vma,
129 unsigned long start, unsigned long end)
130 {
131 if (vma->vm_mm == current->mm)
132 flush_bcache();
133 /*cf_cache_flush_range(start, end);*/
134 }
135
136 /**
137 * flush_cache_page - Flush a page of the cache
138 * @vma: vma struct
139 * @vmaddr:
140 * @pfn: page numer
141 *
142 * flush_cache_page must be a macro to avoid a dependency on
143 * linux/mm.h which includes this file.
144 */
145 static inline void flush_cache_page(struct vm_area_struct *vma,
146 unsigned long vmaddr, unsigned long pfn)
147 {
148 if (vma->vm_mm == current->mm)
149 flush_bcache();
150 /*cf_cache_flush_range(vmaddr, vmaddr+PAGE_SIZE);*/
151 }
152
153 /* Push the page at kernel virtual address and clear the icache */
154 /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
155 #define flush_page_to_ram(page) __flush_page_to_ram((void *) page_address(page))
156 extern inline void __flush_page_to_ram(void *address)
157 {
158 unsigned long set;
159 unsigned long start_set;
160 unsigned long end_set;
161 unsigned long addr = (unsigned long) address;
162
163 addr &= ~(PAGE_SIZE - 1);
164 /* round down to page start address */
165
166 start_set = addr & _ICACHE_SET_MASK;
167 end_set = (addr + PAGE_SIZE-1) & _ICACHE_SET_MASK;
168
169 if (start_set > end_set) {
170 /* from the begining to the lowest address */
171 for (set = 0; set <= end_set; set += (0x10 - 3)) {
172 asm volatile("cpushl %%bc,(%0)\n"
173 "\taddq%.l #1,%0\n"
174 "\tcpushl %%bc,(%0)\n"
175 "\taddq%.l #1,%0\n"
176 "\tcpushl %%bc,(%0)\n"
177 "\taddq%.l #1,%0\n"
178 "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));
179 }
180 /* next loop will finish the cache ie pass the hole */
181 end_set = LAST_ICACHE_ADDR;
182 }
183
184 for (set = start_set; set <= end_set; set += (0x10 - 3)) {
185 asm volatile("cpushl %%bc,(%0)\n"
186 "\taddq%.l #1,%0\n"
187 "\tcpushl %%bc,(%0)\n"
188 "\taddq%.l #1,%0\n"
189 "\tcpushl %%bc,(%0)\n"
190 "\taddq%.l #1,%0\n"
191 "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));
192 }
193 }
194
195 /* Use __flush_page_to_ram() for flush_dcache_page all values are same - MW */
196 #define flush_dcache_page(page) \
197 __flush_page_to_ram((void *) page_address(page))
198 #define flush_icache_page(vma, pg) \
199 __flush_page_to_ram((void *) page_address(pg))
200 #define flush_icache_user_range(adr, len) \
201 do { } while (0)
202 /* NL */
203 #define flush_icache_user_page(vma, page, addr, len) \
204 do { } while (0)
205
206 /* Push n pages at kernel virtual address and clear the icache */
207 /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
208 extern inline void flush_icache_range(unsigned long address,
209 unsigned long endaddr)
210 {
211 unsigned long set;
212 unsigned long start_set;
213 unsigned long end_set;
214
215 start_set = address & _ICACHE_SET_MASK;
216 end_set = endaddr & _ICACHE_SET_MASK;
217
218 if (start_set > end_set) {
219 /* from the begining to the lowest address */
220 for (set = 0; set <= end_set; set += (0x10 - 3)) {
221 asm volatile("cpushl %%ic,(%0)\n"
222 "\taddq%.l #1,%0\n"
223 "\tcpushl %%ic,(%0)\n"
224 "\taddq%.l #1,%0\n"
225 "\tcpushl %%ic,(%0)\n"
226 "\taddq%.l #1,%0\n"
227 "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));
228 }
229 /* next loop will finish the cache ie pass the hole */
230 end_set = LAST_ICACHE_ADDR;
231 }
232 for (set = start_set; set <= end_set; set += (0x10 - 3)) {
233 asm volatile("cpushl %%ic,(%0)\n"
234 "\taddq%.l #1,%0\n"
235 "\tcpushl %%ic,(%0)\n"
236 "\taddq%.l #1,%0\n"
237 "\tcpushl %%ic,(%0)\n"
238 "\taddq%.l #1,%0\n"
239 "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));
240 }
241 }
242
243 static inline void copy_to_user_page(struct vm_area_struct *vma,
244 struct page *page, unsigned long vaddr,
245 void *dst, void *src, int len)
246 {
247 memcpy(dst, src, len);
248 flush_icache_user_page(vma, page, vaddr, len);
249 }
250 static inline void copy_from_user_page(struct vm_area_struct *vma,
251 struct page *page, unsigned long vaddr,
252 void *dst, void *src, int len)
253 {
254 memcpy(dst, src, len);
255 }
256
257 #define flush_cache_vmap(start, end) flush_cache_all()
258 #define flush_cache_vunmap(start, end) flush_cache_all()
259 #define flush_dcache_mmap_lock(mapping) do { } while (0)
260 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
261
262 #endif /* M68K_CF_548x_CACHEFLUSH_H */
This page took 0.05122 seconds and 5 git commands to generate.