ar7_gpio: remove unneeded checks and volatile
[openwrt.git] / target / linux / brcm-2.4 / patches / 003-bcm47xx_cache_fixes.patch
1 diff -urN linux.old/arch/mips/kernel/entry.S linux.dev/arch/mips/kernel/entry.S
2 --- linux.old/arch/mips/kernel/entry.S 2005-07-05 16:46:49.000000000 +0200
3 +++ linux.dev/arch/mips/kernel/entry.S 2005-07-06 11:23:55.000000000 +0200
4 @@ -100,6 +100,10 @@
5 * and R4400 SC and MC versions.
6 */
7 NESTED(except_vec3_generic, 0, sp)
8 +#ifdef CONFIG_BCM4710
9 + nop
10 + nop
11 +#endif
12 #if R5432_CP0_INTERRUPT_WAR
13 mfc0 k0, CP0_INDEX
14 #endif
15 diff -urN linux.old/arch/mips/mm/c-r4k.c linux.dev/arch/mips/mm/c-r4k.c
16 --- linux.old/arch/mips/mm/c-r4k.c 2005-07-05 16:46:49.000000000 +0200
17 +++ linux.dev/arch/mips/mm/c-r4k.c 2005-07-06 11:23:55.000000000 +0200
18 @@ -14,6 +14,12 @@
19 #include <linux/mm.h>
20 #include <linux/bitops.h>
21
22 +#ifdef CONFIG_BCM4710
23 +#include "../bcm947xx/include/typedefs.h"
24 +#include "../bcm947xx/include/sbconfig.h"
25 +#include <asm/paccess.h>
26 +#endif
27 +
28 #include <asm/bcache.h>
29 #include <asm/bootinfo.h>
30 #include <asm/cacheops.h>
31 @@ -40,6 +46,8 @@
32 .bc_inv = (void *)no_sc_noop
33 };
34
35 +int bcm4710 = 0;
36 +EXPORT_SYMBOL(bcm4710);
37 struct bcache_ops *bcops = &no_sc_ops;
38
39 #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x2010)
40 @@ -64,8 +72,10 @@
41 static inline void r4k_blast_dcache_page_setup(void)
42 {
43 unsigned long dc_lsize = current_cpu_data.dcache.linesz;
44 -
45 - if (dc_lsize == 16)
46 +
47 + if (bcm4710)
48 + r4k_blast_dcache_page = blast_dcache_page;
49 + else if (dc_lsize == 16)
50 r4k_blast_dcache_page = blast_dcache16_page;
51 else if (dc_lsize == 32)
52 r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
53 @@ -77,7 +87,9 @@
54 {
55 unsigned long dc_lsize = current_cpu_data.dcache.linesz;
56
57 - if (dc_lsize == 16)
58 + if (bcm4710)
59 + r4k_blast_dcache_page_indexed = blast_dcache_page_indexed;
60 + else if (dc_lsize == 16)
61 r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
62 else if (dc_lsize == 32)
63 r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
64 @@ -89,7 +101,9 @@
65 {
66 unsigned long dc_lsize = current_cpu_data.dcache.linesz;
67
68 - if (dc_lsize == 16)
69 + if (bcm4710)
70 + r4k_blast_dcache = blast_dcache;
71 + else if (dc_lsize == 16)
72 r4k_blast_dcache = blast_dcache16;
73 else if (dc_lsize == 32)
74 r4k_blast_dcache = blast_dcache32;
75 @@ -266,6 +280,7 @@
76 r4k_blast_dcache();
77 r4k_blast_icache();
78
79 + if (!bcm4710)
80 switch (current_cpu_data.cputype) {
81 case CPU_R4000SC:
82 case CPU_R4000MC:
83 @@ -304,10 +319,10 @@
84 * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
85 * only flush the primary caches but R10000 and R12000 behave sane ...
86 */
87 - if (current_cpu_data.cputype == CPU_R4000SC ||
88 + if (!bcm4710 && (current_cpu_data.cputype == CPU_R4000SC ||
89 current_cpu_data.cputype == CPU_R4000MC ||
90 current_cpu_data.cputype == CPU_R4400SC ||
91 - current_cpu_data.cputype == CPU_R4400MC)
92 + current_cpu_data.cputype == CPU_R4400MC))
93 r4k_blast_scache();
94 }
95
96 @@ -383,12 +398,15 @@
97 unsigned long ic_lsize = current_cpu_data.icache.linesz;
98 unsigned long addr, aend;
99
100 + addr = start & ~(dc_lsize - 1);
101 + aend = (end - 1) & ~(dc_lsize - 1);
102 +
103 if (!cpu_has_ic_fills_f_dc) {
104 if (end - start > dcache_size)
105 r4k_blast_dcache();
106 else {
107 - addr = start & ~(dc_lsize - 1);
108 - aend = (end - 1) & ~(dc_lsize - 1);
109 + BCM4710_PROTECTED_FILL_TLB(addr);
110 + BCM4710_PROTECTED_FILL_TLB(aend);
111
112 while (1) {
113 /* Hit_Writeback_Inv_D */
114 @@ -403,8 +421,6 @@
115 if (end - start > icache_size)
116 r4k_blast_icache();
117 else {
118 - addr = start & ~(ic_lsize - 1);
119 - aend = (end - 1) & ~(ic_lsize - 1);
120 while (1) {
121 /* Hit_Invalidate_I */
122 protected_flush_icache_line(addr);
123 @@ -413,6 +429,9 @@
124 addr += ic_lsize;
125 }
126 }
127 +
128 + if (bcm4710)
129 + flush_cache_all();
130 }
131
132 /*
133 @@ -443,7 +462,8 @@
134 if (cpu_has_subset_pcaches) {
135 unsigned long addr = (unsigned long) page_address(page);
136
137 - r4k_blast_scache_page(addr);
138 + if (!bcm4710)
139 + r4k_blast_scache_page(addr);
140 ClearPageDcacheDirty(page);
141
142 return;
143 @@ -451,6 +471,7 @@
144
145 if (!cpu_has_ic_fills_f_dc) {
146 unsigned long addr = (unsigned long) page_address(page);
147 +
148 r4k_blast_dcache_page(addr);
149 ClearPageDcacheDirty(page);
150 }
151 @@ -477,7 +498,7 @@
152 /* Catch bad driver code */
153 BUG_ON(size == 0);
154
155 - if (cpu_has_subset_pcaches) {
156 + if (!bcm4710 && cpu_has_subset_pcaches) {
157 unsigned long sc_lsize = current_cpu_data.scache.linesz;
158
159 if (size >= scache_size) {
160 @@ -509,6 +530,8 @@
161 R4600_HIT_CACHEOP_WAR_IMPL;
162 a = addr & ~(dc_lsize - 1);
163 end = (addr + size - 1) & ~(dc_lsize - 1);
164 + BCM4710_FILL_TLB(a);
165 + BCM4710_FILL_TLB(end);
166 while (1) {
167 flush_dcache_line(a); /* Hit_Writeback_Inv_D */
168 if (a == end)
169 @@ -527,7 +550,7 @@
170 /* Catch bad driver code */
171 BUG_ON(size == 0);
172
173 - if (cpu_has_subset_pcaches) {
174 + if (!bcm4710 && (cpu_has_subset_pcaches)) {
175 unsigned long sc_lsize = current_cpu_data.scache.linesz;
176
177 if (size >= scache_size) {
178 @@ -554,6 +577,8 @@
179 R4600_HIT_CACHEOP_WAR_IMPL;
180 a = addr & ~(dc_lsize - 1);
181 end = (addr + size - 1) & ~(dc_lsize - 1);
182 + BCM4710_FILL_TLB(a);
183 + BCM4710_FILL_TLB(end);
184 while (1) {
185 flush_dcache_line(a); /* Hit_Writeback_Inv_D */
186 if (a == end)
187 @@ -577,6 +602,8 @@
188 unsigned long dc_lsize = current_cpu_data.dcache.linesz;
189
190 R4600_HIT_CACHEOP_WAR_IMPL;
191 + BCM4710_PROTECTED_FILL_TLB(addr);
192 + BCM4710_PROTECTED_FILL_TLB(addr + 4);
193 protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
194 protected_flush_icache_line(addr & ~(ic_lsize - 1));
195 if (MIPS4K_ICACHE_REFILL_WAR) {
196 @@ -986,10 +1013,12 @@
197 case CPU_R4000MC:
198 case CPU_R4400SC:
199 case CPU_R4400MC:
200 - probe_scache_kseg1 = (probe_func_t) (KSEG1ADDR(&probe_scache));
201 - sc_present = probe_scache_kseg1(config);
202 - if (sc_present)
203 - c->options |= MIPS_CPU_CACHE_CDEX_S;
204 + if (!bcm4710) {
205 + probe_scache_kseg1 = (probe_func_t) (KSEG1ADDR(&probe_scache));
206 + sc_present = probe_scache_kseg1(config);
207 + if (sc_present)
208 + c->options |= MIPS_CPU_CACHE_CDEX_S;
209 + }
210 break;
211
212 case CPU_R10000:
213 @@ -1041,6 +1070,19 @@
214 static inline void coherency_setup(void)
215 {
216 change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
217 +
218 +#if defined(CONFIG_BCM4310) || defined(CONFIG_BCM4704) || defined(CONFIG_BCM5365)
219 + if (BCM330X(current_cpu_data.processor_id)) {
220 + uint32 cm;
221 +
222 + cm = read_c0_diag();
223 + /* Enable icache */
224 + cm |= (1 << 31);
225 + /* Enable dcache */
226 + cm |= (1 << 30);
227 + write_c0_diag(cm);
228 + }
229 +#endif
230
231 /*
232 * c0_status.cu=0 specifies that updates by the sc instruction use
233 @@ -1073,6 +1115,12 @@
234 memcpy((void *)(KSEG0 + 0x100), &except_vec2_generic, 0x80);
235 memcpy((void *)(KSEG1 + 0x100), &except_vec2_generic, 0x80);
236
237 + if (current_cpu_data.cputype == CPU_BCM4710 && (current_cpu_data.processor_id & PRID_REV_MASK) == 0) {
238 + printk("Enabling BCM4710A0 cache workarounds.\n");
239 + bcm4710 = 1;
240 + } else
241 + bcm4710 = 0;
242 +
243 probe_pcache();
244 setup_scache();
245
246 diff -urN linux.old/arch/mips/mm/tlbex-mips32.S linux.dev/arch/mips/mm/tlbex-mips32.S
247 --- linux.old/arch/mips/mm/tlbex-mips32.S 2005-07-05 16:46:49.000000000 +0200
248 +++ linux.dev/arch/mips/mm/tlbex-mips32.S 2005-07-06 11:23:56.000000000 +0200
249 @@ -90,6 +90,9 @@
250 .set noat
251 LEAF(except_vec0_r4000)
252 .set mips3
253 +#ifdef CONFIG_BCM4704
254 + nop
255 +#endif
256 #ifdef CONFIG_SMP
257 mfc0 k1, CP0_CONTEXT
258 la k0, pgd_current
259 diff -urN linux.old/include/asm-mips/r4kcache.h linux.dev/include/asm-mips/r4kcache.h
260 --- linux.old/include/asm-mips/r4kcache.h 2005-07-05 16:46:49.000000000 +0200
261 +++ linux.dev/include/asm-mips/r4kcache.h 2005-07-06 12:52:57.000000000 +0200
262 @@ -15,6 +15,18 @@
263 #include <asm/asm.h>
264 #include <asm/cacheops.h>
265
266 +#ifdef CONFIG_BCM4710
267 +#define BCM4710_DUMMY_RREG() (((sbconfig_t *)(KSEG1ADDR(SB_ENUM_BASE + SBCONFIGOFF)))->sbimstate)
268 +
269 +#define BCM4710_FILL_TLB(addr) (*(volatile unsigned long *)(addr))
270 +#define BCM4710_PROTECTED_FILL_TLB(addr) ({ unsigned long x; get_dbe(x, (volatile unsigned long *)(addr)); })
271 +#else
272 +#define BCM4710_DUMMY_RREG()
273 +
274 +#define BCM4710_FILL_TLB(addr)
275 +#define BCM4710_PROTECTED_FILL_TLB(addr)
276 +#endif
277 +
278 #define cache_op(op,addr) \
279 __asm__ __volatile__( \
280 " .set noreorder \n" \
281 @@ -27,12 +39,25 @@
282
283 static inline void flush_icache_line_indexed(unsigned long addr)
284 {
285 - cache_op(Index_Invalidate_I, addr);
286 + unsigned int way;
287 + unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
288 +
289 + for (way = 0; way < current_cpu_data.dcache.ways; way++) {
290 + cache_op(Index_Invalidate_I, addr);
291 + addr += ws_inc;
292 + }
293 }
294
295 static inline void flush_dcache_line_indexed(unsigned long addr)
296 {
297 - cache_op(Index_Writeback_Inv_D, addr);
298 + unsigned int way;
299 + unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
300 +
301 + for (way = 0; way < current_cpu_data.dcache.ways; way++) {
302 + BCM4710_DUMMY_RREG();
303 + cache_op(Index_Writeback_Inv_D, addr);
304 + addr += ws_inc;
305 + }
306 }
307
308 static inline void flush_scache_line_indexed(unsigned long addr)
309 @@ -47,6 +72,7 @@
310
311 static inline void flush_dcache_line(unsigned long addr)
312 {
313 + BCM4710_DUMMY_RREG();
314 cache_op(Hit_Writeback_Inv_D, addr);
315 }
316
317 @@ -91,6 +117,7 @@
318 */
319 static inline void protected_writeback_dcache_line(unsigned long addr)
320 {
321 + BCM4710_DUMMY_RREG();
322 __asm__ __volatile__(
323 ".set noreorder\n\t"
324 ".set mips3\n"
325 @@ -138,6 +165,62 @@
326 : "r" (base), \
327 "i" (op));
328
329 +#define cache_unroll(base,op) \
330 + __asm__ __volatile__(" \
331 + .set noreorder; \
332 + .set mips3; \
333 + cache %1, (%0); \
334 + .set mips0; \
335 + .set reorder" \
336 + : \
337 + : "r" (base), \
338 + "i" (op));
339 +
340 +
341 +static inline void blast_dcache(void)
342 +{
343 + unsigned long start = KSEG0;
344 + unsigned long dcache_size = current_cpu_data.dcache.waysize * current_cpu_data.dcache.ways;
345 + unsigned long end = (start + dcache_size);
346 +
347 + while(start < end) {
348 + BCM4710_DUMMY_RREG();
349 + cache_unroll(start,Index_Writeback_Inv_D);
350 + start += current_cpu_data.dcache.linesz;
351 + }
352 +}
353 +
354 +static inline void blast_dcache_page(unsigned long page)
355 +{
356 + unsigned long start = page;
357 + unsigned long end = start + PAGE_SIZE;
358 +
359 + BCM4710_FILL_TLB(start);
360 + do {
361 + BCM4710_DUMMY_RREG();
362 + cache_unroll(start,Hit_Writeback_Inv_D);
363 + start += current_cpu_data.dcache.linesz;
364 + } while (start < end);
365 +}
366 +
367 +static inline void blast_dcache_page_indexed(unsigned long page)
368 +{
369 + unsigned long start = page;
370 + unsigned long end = start + PAGE_SIZE;
371 + unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
372 + unsigned long ws_end = current_cpu_data.dcache.ways <<
373 + current_cpu_data.dcache.waybit;
374 + unsigned long ws, addr;
375 +
376 + for (ws = 0; ws < ws_end; ws += ws_inc) {
377 + start = page + ws;
378 + for (addr = start; addr < end; addr += current_cpu_data.dcache.linesz) {
379 + BCM4710_DUMMY_RREG();
380 + cache_unroll(addr,Index_Writeback_Inv_D);
381 + }
382 + }
383 +}
384 +
385 static inline void blast_dcache16(void)
386 {
387 unsigned long start = KSEG0;
388 @@ -148,8 +231,9 @@
389 unsigned long ws, addr;
390
391 for (ws = 0; ws < ws_end; ws += ws_inc)
392 - for (addr = start; addr < end; addr += 0x200)
393 + for (addr = start; addr < end; addr += 0x200) {
394 cache16_unroll32(addr|ws,Index_Writeback_Inv_D);
395 + }
396 }
397
398 static inline void blast_dcache16_page(unsigned long page)
399 @@ -173,8 +257,9 @@
400 unsigned long ws, addr;
401
402 for (ws = 0; ws < ws_end; ws += ws_inc)
403 - for (addr = start; addr < end; addr += 0x200)
404 + for (addr = start; addr < end; addr += 0x200) {
405 cache16_unroll32(addr|ws,Index_Writeback_Inv_D);
406 + }
407 }
408
409 static inline void blast_icache16(void)
410 @@ -196,6 +281,7 @@
411 unsigned long start = page;
412 unsigned long end = start + PAGE_SIZE;
413
414 + BCM4710_FILL_TLB(start);
415 do {
416 cache16_unroll32(start,Hit_Invalidate_I);
417 start += 0x200;
418 @@ -281,6 +367,7 @@
419 : "r" (base), \
420 "i" (op));
421
422 +
423 static inline void blast_dcache32(void)
424 {
425 unsigned long start = KSEG0;
426 @@ -291,8 +378,9 @@
427 unsigned long ws, addr;
428
429 for (ws = 0; ws < ws_end; ws += ws_inc)
430 - for (addr = start; addr < end; addr += 0x400)
431 + for (addr = start; addr < end; addr += 0x400) {
432 cache32_unroll32(addr|ws,Index_Writeback_Inv_D);
433 + }
434 }
435
436 static inline void blast_dcache32_page(unsigned long page)
437 @@ -316,8 +404,9 @@
438 unsigned long ws, addr;
439
440 for (ws = 0; ws < ws_end; ws += ws_inc)
441 - for (addr = start; addr < end; addr += 0x400)
442 + for (addr = start; addr < end; addr += 0x400) {
443 cache32_unroll32(addr|ws,Index_Writeback_Inv_D);
444 + }
445 }
446
447 static inline void blast_icache32(void)
448 @@ -339,6 +428,7 @@
449 unsigned long start = page;
450 unsigned long end = start + PAGE_SIZE;
451
452 + BCM4710_FILL_TLB(start);
453 do {
454 cache32_unroll32(start,Hit_Invalidate_I);
455 start += 0x400;
456 @@ -443,6 +533,7 @@
457 unsigned long start = page;
458 unsigned long end = start + PAGE_SIZE;
459
460 + BCM4710_FILL_TLB(start);
461 do {
462 cache64_unroll32(start,Hit_Invalidate_I);
463 start += 0x800;
464 diff -urN linux.old/include/asm-mips/stackframe.h linux.dev/include/asm-mips/stackframe.h
465 --- linux.old/include/asm-mips/stackframe.h 2005-07-05 16:46:49.000000000 +0200
466 +++ linux.dev/include/asm-mips/stackframe.h 2005-07-06 11:23:56.000000000 +0200
467 @@ -209,6 +209,20 @@
468
469 #endif
470
471 +#if defined(CONFIG_BCM4710) || defined(CONFIG_BCM4704)
472 +
473 +#undef RESTORE_SP_AND_RET
474 +#define RESTORE_SP_AND_RET \
475 + lw sp, PT_R29(sp); \
476 + .set mips3; \
477 + nop; \
478 + nop; \
479 + eret; \
480 + .set mips0
481 +
482 +#endif
483 +
484 +
485 #define RESTORE_SP \
486 lw sp, PT_R29(sp); \
487
488 diff -urN linux.old/mm/memory.c linux.dev/mm/memory.c
489 --- linux.old/mm/memory.c 2005-04-04 03:42:20.000000000 +0200
490 +++ linux.dev/mm/memory.c 2005-07-06 11:23:56.000000000 +0200
491 @@ -925,6 +925,7 @@
492 flush_page_to_ram(new_page);
493 flush_cache_page(vma, address);
494 establish_pte(vma, address, page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));
495 + flush_icache_page(vma, new_page);
496 }
497
498 /*
This page took 0.057566 seconds and 5 git commands to generate.