[include] autotools.mk: let PKG_REMOVE_FILES default to aclocal.m4
[openwrt.git] / target / linux / xburst / patches-2.6.34 / 002-xburst-cache-quirks.patch
1 From 3d317cc06fce61787e4429b98d6073e69a6b6cd7 Mon Sep 17 00:00:00 2001
2 From: Lars-Peter Clausen <lars@metafoo.de>
3 Date: Sat, 24 Apr 2010 17:34:29 +0200
4 Subject: [PATCH] JZ4740 cache quirks
5
6 ---
7 arch/mips/include/asm/r4kcache.h | 231 ++++++++++++++++++++++++++++++++++++++
8 1 files changed, 231 insertions(+), 0 deletions(-)
9
10 diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
11 index 387bf59..b500056 100644
12 --- a/arch/mips/include/asm/r4kcache.h
13 +++ b/arch/mips/include/asm/r4kcache.h
14 @@ -17,6 +17,58 @@
15 #include <asm/cpu-features.h>
16 #include <asm/mipsmtregs.h>
17
18 +#ifdef CONFIG_JZRISC
19 +
20 +#define K0_TO_K1() \
21 +do { \
22 + unsigned long __k0_addr; \
23 + \
24 + __asm__ __volatile__( \
25 + "la %0, 1f\n\t" \
26 + "or %0, %0, %1\n\t" \
27 + "jr %0\n\t" \
28 + "nop\n\t" \
29 + "1: nop\n" \
30 + : "=&r"(__k0_addr) \
31 + : "r" (0x20000000) ); \
32 +} while(0)
33 +
34 +#define K1_TO_K0() \
35 +do { \
36 + unsigned long __k0_addr; \
37 + __asm__ __volatile__( \
38 + "nop;nop;nop;nop;nop;nop;nop\n\t" \
39 + "la %0, 1f\n\t" \
40 + "jr %0\n\t" \
41 + "nop\n\t" \
42 + "1: nop\n" \
43 + : "=&r" (__k0_addr)); \
44 +} while (0)
45 +
46 +#define INVALIDATE_BTB() \
47 +do { \
48 + unsigned long tmp; \
49 + __asm__ __volatile__( \
50 + ".set mips32\n\t" \
51 + "mfc0 %0, $16, 7\n\t" \
52 + "nop\n\t" \
53 + "ori %0, 2\n\t" \
54 + "mtc0 %0, $16, 7\n\t" \
55 + "nop\n\t" \
56 + : "=&r" (tmp)); \
57 +} while (0)
58 +
59 +#define SYNC_WB() __asm__ __volatile__ ("sync")
60 +
61 +#else /* CONFIG_JZRISC */
62 +
63 +#define K0_TO_K1() do { } while (0)
64 +#define K1_TO_K0() do { } while (0)
65 +#define INVALIDATE_BTB() do { } while (0)
66 +#define SYNC_WB() do { } while (0)
67 +
68 +#endif /* CONFIG_JZRISC */
69 +
70 /*
71 * This macro return a properly sign-extended address suitable as base address
72 * for indexed cache operations. Two issues here:
73 @@ -144,6 +196,7 @@ static inline void flush_icache_line_indexed(unsigned long addr)
74 {
75 __iflush_prologue
76 cache_op(Index_Invalidate_I, addr);
77 + INVALIDATE_BTB();
78 __iflush_epilogue
79 }
80
81 @@ -151,6 +204,7 @@ static inline void flush_dcache_line_indexed(unsigned long addr)
82 {
83 __dflush_prologue
84 cache_op(Index_Writeback_Inv_D, addr);
85 + SYNC_WB();
86 __dflush_epilogue
87 }
88
89 @@ -163,6 +217,7 @@ static inline void flush_icache_line(unsigned long addr)
90 {
91 __iflush_prologue
92 cache_op(Hit_Invalidate_I, addr);
93 + INVALIDATE_BTB();
94 __iflush_epilogue
95 }
96
97 @@ -170,6 +225,7 @@ static inline void flush_dcache_line(unsigned long addr)
98 {
99 __dflush_prologue
100 cache_op(Hit_Writeback_Inv_D, addr);
101 + SYNC_WB();
102 __dflush_epilogue
103 }
104
105 @@ -177,6 +233,7 @@ static inline void invalidate_dcache_line(unsigned long addr)
106 {
107 __dflush_prologue
108 cache_op(Hit_Invalidate_D, addr);
109 + SYNC_WB();
110 __dflush_epilogue
111 }
112
113 @@ -209,6 +266,7 @@ static inline void flush_scache_line(unsigned long addr)
114 static inline void protected_flush_icache_line(unsigned long addr)
115 {
116 protected_cache_op(Hit_Invalidate_I, addr);
117 + INVALIDATE_BTB();
118 }
119
120 /*
121 @@ -220,6 +278,7 @@ static inline void protected_flush_icache_line(unsigned long addr)
122 static inline void protected_writeback_dcache_line(unsigned long addr)
123 {
124 protected_cache_op(Hit_Writeback_Inv_D, addr);
125 + SYNC_WB();
126 }
127
128 static inline void protected_writeback_scache_line(unsigned long addr)
129 @@ -396,8 +455,10 @@ static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page)
130 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16)
131 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
132 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
133 +#ifndef CONFIG_JZRISC
134 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32)
135 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
136 +#endif
137 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
138 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64)
139 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
140 @@ -405,12 +466,122 @@ __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
141 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
142
143 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16)
144 +#ifndef CONFIG_JZRISC
145 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32)
146 +#endif
147 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16)
148 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32)
149 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64)
150 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128)
151
152 +#ifdef CONFIG_JZRISC
153 +
154 +static inline void blast_dcache32(void)
155 +{
156 + unsigned long start = INDEX_BASE;
157 + unsigned long end = start + current_cpu_data.dcache.waysize;
158 + unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
159 + unsigned long ws_end = current_cpu_data.dcache.ways <<
160 + current_cpu_data.dcache.waybit;
161 + unsigned long ws, addr;
162 +
163 + for (ws = 0; ws < ws_end; ws += ws_inc)
164 + for (addr = start; addr < end; addr += 0x400)
165 + cache32_unroll32(addr|ws,Index_Writeback_Inv_D);
166 +
167 + SYNC_WB();
168 +}
169 +
170 +static inline void blast_dcache32_page(unsigned long page)
171 +{
172 + unsigned long start = page;
173 + unsigned long end = page + PAGE_SIZE;
174 +
175 + do {
176 + cache32_unroll32(start,Hit_Writeback_Inv_D);
177 + start += 0x400;
178 + } while (start < end);
179 +
180 + SYNC_WB();
181 +}
182 +
183 +static inline void blast_dcache32_page_indexed(unsigned long page)
184 +{
185 + unsigned long indexmask = current_cpu_data.dcache.waysize - 1;
186 + unsigned long start = INDEX_BASE + (page & indexmask);
187 + unsigned long end = start + PAGE_SIZE;
188 + unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
189 + unsigned long ws_end = current_cpu_data.dcache.ways <<
190 + current_cpu_data.dcache.waybit;
191 + unsigned long ws, addr;
192 +
193 + for (ws = 0; ws < ws_end; ws += ws_inc)
194 + for (addr = start; addr < end; addr += 0x400)
195 + cache32_unroll32(addr|ws,Index_Writeback_Inv_D);
196 +
197 + SYNC_WB();
198 +}
199 +
200 +static inline void blast_icache32(void)
201 +{
202 + unsigned long start = INDEX_BASE;
203 + unsigned long end = start + current_cpu_data.icache.waysize;
204 + unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
205 + unsigned long ws_end = current_cpu_data.icache.ways <<
206 + current_cpu_data.icache.waybit;
207 + unsigned long ws, addr;
208 +
209 + K0_TO_K1();
210 +
211 + for (ws = 0; ws < ws_end; ws += ws_inc)
212 + for (addr = start; addr < end; addr += 0x400)
213 + cache32_unroll32(addr|ws,Index_Invalidate_I);
214 +
215 + INVALIDATE_BTB();
216 +
217 + K1_TO_K0();
218 +}
219 +
220 +static inline void blast_icache32_page(unsigned long page)
221 +{
222 + unsigned long start = page;
223 + unsigned long end = page + PAGE_SIZE;
224 +
225 + K0_TO_K1();
226 +
227 + do {
228 + cache32_unroll32(start,Hit_Invalidate_I);
229 + start += 0x400;
230 + } while (start < end);
231 +
232 + INVALIDATE_BTB();
233 +
234 + K1_TO_K0();
235 +}
236 +
237 +static inline void blast_icache32_page_indexed(unsigned long page)
238 +{
239 + unsigned long indexmask = current_cpu_data.icache.waysize - 1;
240 + unsigned long start = INDEX_BASE + (page & indexmask);
241 + unsigned long end = start + PAGE_SIZE;
242 + unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
243 + unsigned long ws_end = current_cpu_data.icache.ways <<
244 + current_cpu_data.icache.waybit;
245 + unsigned long ws, addr;
246 +
247 + K0_TO_K1();
248 +
249 + for (ws = 0; ws < ws_end; ws += ws_inc)
250 + for (addr = start; addr < end; addr += 0x400)
251 + cache32_unroll32(addr|ws,Index_Invalidate_I);
252 +
253 + INVALIDATE_BTB();
254 +
255 + K1_TO_K0();
256 +}
257 +
258 +#endif /* CONFIG_JZRISC */
259 +
260 /* build blast_xxx_range, protected_blast_xxx_range */
261 #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot) \
262 static inline void prot##blast_##pfx##cache##_range(unsigned long start, \
263 @@ -432,13 +603,73 @@ static inline void prot##blast_##pfx##cache##_range(unsigned long start, \
264 __##pfx##flush_epilogue \
265 }
266
267 +#ifndef CONFIG_JZRISC
268 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_)
269 +#endif
270 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_)
271 +#ifndef CONFIG_JZRISC
272 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_)
273 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, )
274 +#endif
275 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, )
276 /* blast_inv_dcache_range */
277 __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, )
278 __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, )
279
280 +#ifdef CONFIG_JZRISC
281 +
282 +static inline void protected_blast_dcache_range(unsigned long start,
283 + unsigned long end)
284 +{
285 + unsigned long lsize = cpu_dcache_line_size();
286 + unsigned long addr = start & ~(lsize - 1);
287 + unsigned long aend = (end - 1) & ~(lsize - 1);
288 +
289 + while (1) {
290 + protected_cache_op(Hit_Writeback_Inv_D, addr);
291 + if (addr == aend)
292 + break;
293 + addr += lsize;
294 + }
295 + SYNC_WB();
296 +}
297 +
298 +static inline void protected_blast_icache_range(unsigned long start,
299 + unsigned long end)
300 +{
301 + unsigned long lsize = cpu_icache_line_size();
302 + unsigned long addr = start & ~(lsize - 1);
303 + unsigned long aend = (end - 1) & ~(lsize - 1);
304 +
305 + K0_TO_K1();
306 +
307 + while (1) {
308 + protected_cache_op(Hit_Invalidate_I, addr);
309 + if (addr == aend)
310 + break;
311 + addr += lsize;
312 + }
313 + INVALIDATE_BTB();
314 +
315 + K1_TO_K0();
316 +}
317 +
318 +static inline void blast_dcache_range(unsigned long start,
319 + unsigned long end)
320 +{
321 + unsigned long lsize = cpu_dcache_line_size();
322 + unsigned long addr = start & ~(lsize - 1);
323 + unsigned long aend = (end - 1) & ~(lsize - 1);
324 +
325 + while (1) {
326 + cache_op(Hit_Writeback_Inv_D, addr);
327 + if (addr == aend)
328 + break;
329 + addr += lsize;
330 + }
331 + SYNC_WB();
332 +}
333 +
334 +#endif /* CONFIG_JZRISC */
335 +
336 #endif /* _ASM_R4KCACHE_H */
337 --
338 1.5.6.5
339
This page took 0.055914 seconds and 5 git commands to generate.