[ar71xx] enable yaffs on 2.6.28
[openwrt.git] / target / linux / brcm47xx / patches-2.6.25 / 150-cpu_fixes.patch
1 --- a/arch/mips/kernel/genex.S
2 +++ b/arch/mips/kernel/genex.S
3 @@ -51,6 +51,10 @@ NESTED(except_vec1_generic, 0, sp)
4 NESTED(except_vec3_generic, 0, sp)
5 .set push
6 .set noat
7 +#ifdef CONFIG_BCM47XX
8 + nop
9 + nop
10 +#endif
11 #if R5432_CP0_INTERRUPT_WAR
12 mfc0 k0, CP0_INDEX
13 #endif
14 --- a/arch/mips/mm/c-r4k.c
15 +++ b/arch/mips/mm/c-r4k.c
16 @@ -33,6 +33,9 @@
17 #include <asm/cacheflush.h> /* for run_uncached() */
18
19
20 +/* For enabling BCM4710 cache workarounds */
21 +int bcm4710 = 0;
22 +
23 /*
24 * Special Variant of smp_call_function for use by cache functions:
25 *
26 @@ -97,6 +100,9 @@ static void __cpuinit r4k_blast_dcache_p
27 {
28 unsigned long dc_lsize = cpu_dcache_line_size();
29
30 + if (bcm4710)
31 + r4k_blast_dcache_page = blast_dcache_page;
32 + else
33 if (dc_lsize == 0)
34 r4k_blast_dcache_page = (void *)cache_noop;
35 else if (dc_lsize == 16)
36 @@ -111,6 +117,9 @@ static void __cpuinit r4k_blast_dcache_p
37 {
38 unsigned long dc_lsize = cpu_dcache_line_size();
39
40 + if (bcm4710)
41 + r4k_blast_dcache_page_indexed = blast_dcache_page_indexed;
42 + else
43 if (dc_lsize == 0)
44 r4k_blast_dcache_page_indexed = (void *)cache_noop;
45 else if (dc_lsize == 16)
46 @@ -125,6 +134,9 @@ static void __cpuinit r4k_blast_dcache_s
47 {
48 unsigned long dc_lsize = cpu_dcache_line_size();
49
50 + if (bcm4710)
51 + r4k_blast_dcache = blast_dcache;
52 + else
53 if (dc_lsize == 0)
54 r4k_blast_dcache = (void *)cache_noop;
55 else if (dc_lsize == 16)
56 @@ -630,6 +642,8 @@ static void local_r4k_flush_cache_sigtra
57 unsigned long addr = (unsigned long) arg;
58
59 R4600_HIT_CACHEOP_WAR_IMPL;
60 + BCM4710_PROTECTED_FILL_TLB(addr);
61 + BCM4710_PROTECTED_FILL_TLB(addr + 4);
62 if (dc_lsize)
63 protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
64 if (!cpu_icache_snoops_remote_store && scache_size)
65 @@ -1215,6 +1229,17 @@ static void __cpuinit coherency_setup(vo
66 * silly idea of putting something else there ...
67 */
68 switch (current_cpu_type()) {
69 + case CPU_BCM3302:
70 + {
71 + u32 cm;
72 + cm = read_c0_diag();
73 + /* Enable icache */
74 + cm |= (1 << 31);
75 + /* Enable dcache */
76 + cm |= (1 << 30);
77 + write_c0_diag(cm);
78 + }
79 + break;
80 case CPU_R4000PC:
81 case CPU_R4000SC:
82 case CPU_R4000MC:
83 @@ -1254,6 +1279,15 @@ void __cpuinit r4k_cache_init(void)
84 break;
85 }
86
87 + /* Check if special workarounds are required */
88 +#ifdef CONFIG_BCM47XX
89 + if (current_cpu_data.cputype == CPU_BCM4710 && (current_cpu_data.processor_id & 0xff) == 0) {
90 + printk("Enabling BCM4710A0 cache workarounds.\n");
91 + bcm4710 = 1;
92 + } else
93 +#endif
94 + bcm4710 = 0;
95 +
96 probe_pcache();
97 setup_scache();
98
99 @@ -1303,5 +1337,13 @@ void __cpuinit r4k_cache_init(void)
100 build_clear_page();
101 build_copy_page();
102 local_r4k___flush_cache_all(NULL);
103 +#ifdef CONFIG_BCM47XX
104 + {
105 + static void (*_coherency_setup)(void);
106 + _coherency_setup = (void (*)(void)) KSEG1ADDR(coherency_setup);
107 + _coherency_setup();
108 + }
109 +#else
110 coherency_setup();
111 +#endif
112 }
113 --- a/arch/mips/mm/tlbex.c
114 +++ b/arch/mips/mm/tlbex.c
115 @@ -677,6 +677,9 @@ static void __cpuinit build_r4000_tlb_re
116 /* No need for uasm_i_nop */
117 }
118
119 +#ifdef CONFIG_BCM47XX
120 + uasm_i_nop(&p);
121 +#endif
122 #ifdef CONFIG_64BIT
123 build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
124 #else
125 @@ -1084,6 +1087,9 @@ build_r4000_tlbchange_handler_head(u32 *
126 struct uasm_reloc **r, unsigned int pte,
127 unsigned int ptr)
128 {
129 +#ifdef CONFIG_BCM47XX
130 + uasm_i_nop(p);
131 +#endif
132 #ifdef CONFIG_64BIT
133 build_get_pmde64(p, l, r, pte, ptr); /* get pmd in ptr */
134 #else
135 --- a/include/asm-mips/r4kcache.h
136 +++ b/include/asm-mips/r4kcache.h
137 @@ -17,6 +17,20 @@
138 #include <asm/cpu-features.h>
139 #include <asm/mipsmtregs.h>
140
141 +#ifdef CONFIG_BCM47XX
142 +#include <asm/paccess.h>
143 +#include <linux/ssb/ssb.h>
144 +#define BCM4710_DUMMY_RREG() ((void) *((u8 *) KSEG1ADDR(SSB_ENUM_BASE + SSB_IMSTATE)))
145 +
146 +#define BCM4710_FILL_TLB(addr) (*(volatile unsigned long *)(addr))
147 +#define BCM4710_PROTECTED_FILL_TLB(addr) ({ unsigned long x; get_dbe(x, (volatile unsigned long *)(addr)); })
148 +#else
149 +#define BCM4710_DUMMY_RREG()
150 +
151 +#define BCM4710_FILL_TLB(addr)
152 +#define BCM4710_PROTECTED_FILL_TLB(addr)
153 +#endif
154 +
155 /*
156 * This macro return a properly sign-extended address suitable as base address
157 * for indexed cache operations. Two issues here:
158 @@ -150,6 +164,7 @@ static inline void flush_icache_line_ind
159 static inline void flush_dcache_line_indexed(unsigned long addr)
160 {
161 __dflush_prologue
162 + BCM4710_DUMMY_RREG();
163 cache_op(Index_Writeback_Inv_D, addr);
164 __dflush_epilogue
165 }
166 @@ -169,6 +184,7 @@ static inline void flush_icache_line(uns
167 static inline void flush_dcache_line(unsigned long addr)
168 {
169 __dflush_prologue
170 + BCM4710_DUMMY_RREG();
171 cache_op(Hit_Writeback_Inv_D, addr);
172 __dflush_epilogue
173 }
174 @@ -176,6 +192,7 @@ static inline void flush_dcache_line(uns
175 static inline void invalidate_dcache_line(unsigned long addr)
176 {
177 __dflush_prologue
178 + BCM4710_DUMMY_RREG();
179 cache_op(Hit_Invalidate_D, addr);
180 __dflush_epilogue
181 }
182 @@ -208,6 +225,7 @@ static inline void flush_scache_line(uns
183 */
184 static inline void protected_flush_icache_line(unsigned long addr)
185 {
186 + BCM4710_DUMMY_RREG();
187 protected_cache_op(Hit_Invalidate_I, addr);
188 }
189
190 @@ -219,6 +237,7 @@ static inline void protected_flush_icach
191 */
192 static inline void protected_writeback_dcache_line(unsigned long addr)
193 {
194 + BCM4710_DUMMY_RREG();
195 protected_cache_op(Hit_Writeback_Inv_D, addr);
196 }
197
198 @@ -339,8 +358,52 @@ static inline void invalidate_tcache_pag
199 : "r" (base), \
200 "i" (op));
201
202 +static inline void blast_dcache(void)
203 +{
204 + unsigned long start = KSEG0;
205 + unsigned long dcache_size = current_cpu_data.dcache.waysize * current_cpu_data.dcache.ways;
206 + unsigned long end = (start + dcache_size);
207 +
208 + do {
209 + BCM4710_DUMMY_RREG();
210 + cache_op(Index_Writeback_Inv_D, start);
211 + start += current_cpu_data.dcache.linesz;
212 + } while(start < end);
213 +}
214 +
215 +static inline void blast_dcache_page(unsigned long page)
216 +{
217 + unsigned long start = page;
218 + unsigned long end = start + PAGE_SIZE;
219 +
220 + BCM4710_FILL_TLB(start);
221 + do {
222 + BCM4710_DUMMY_RREG();
223 + cache_op(Hit_Writeback_Inv_D, start);
224 + start += current_cpu_data.dcache.linesz;
225 + } while(start < end);
226 +}
227 +
228 +static inline void blast_dcache_page_indexed(unsigned long page)
229 +{
230 + unsigned long start = page;
231 + unsigned long end = start + PAGE_SIZE;
232 + unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
233 + unsigned long ws_end = current_cpu_data.dcache.ways <<
234 + current_cpu_data.dcache.waybit;
235 + unsigned long ws, addr;
236 + for (ws = 0; ws < ws_end; ws += ws_inc) {
237 + start = page + ws;
238 + for (addr = start; addr < end; addr += current_cpu_data.dcache.linesz) {
239 + BCM4710_DUMMY_RREG();
240 + cache_op(Index_Writeback_Inv_D, addr);
241 + }
242 + }
243 +}
244 +
245 +
246 /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
247 -#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize) \
248 +#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, war) \
249 static inline void blast_##pfx##cache##lsize(void) \
250 { \
251 unsigned long start = INDEX_BASE; \
252 @@ -352,6 +415,7 @@ static inline void blast_##pfx##cache##l
253 \
254 __##pfx##flush_prologue \
255 \
256 + war \
257 for (ws = 0; ws < ws_end; ws += ws_inc) \
258 for (addr = start; addr < end; addr += lsize * 32) \
259 cache##lsize##_unroll32(addr|ws, indexop); \
260 @@ -366,6 +430,7 @@ static inline void blast_##pfx##cache##l
261 \
262 __##pfx##flush_prologue \
263 \
264 + war \
265 do { \
266 cache##lsize##_unroll32(start, hitop); \
267 start += lsize * 32; \
268 @@ -384,6 +449,8 @@ static inline void blast_##pfx##cache##l
269 current_cpu_data.desc.waybit; \
270 unsigned long ws, addr; \
271 \
272 + war \
273 + \
274 __##pfx##flush_prologue \
275 \
276 for (ws = 0; ws < ws_end; ws += ws_inc) \
277 @@ -393,35 +460,37 @@ static inline void blast_##pfx##cache##l
278 __##pfx##flush_epilogue \
279 }
280
281 -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16)
282 -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
283 -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
284 -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32)
285 -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
286 -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
287 -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
288 -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
289 -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
290 -
291 -__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16)
292 -__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32)
293 -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16)
294 -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32)
295 -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64)
296 -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128)
297 +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
298 +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, BCM4710_FILL_TLB(start);)
299 +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
300 +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
301 +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, BCM4710_FILL_TLB(start);)
302 +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
303 +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, BCM4710_FILL_TLB(start);)
304 +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
305 +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
306 +
307 +__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
308 +__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
309 +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
310 +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
311 +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
312 +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
313
314 /* build blast_xxx_range, protected_blast_xxx_range */
315 -#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot) \
316 +#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, war, war2) \
317 static inline void prot##blast_##pfx##cache##_range(unsigned long start, \
318 unsigned long end) \
319 { \
320 unsigned long lsize = cpu_##desc##_line_size(); \
321 unsigned long addr = start & ~(lsize - 1); \
322 unsigned long aend = (end - 1) & ~(lsize - 1); \
323 + war \
324 \
325 __##pfx##flush_prologue \
326 \
327 while (1) { \
328 + war2 \
329 prot##cache_op(hitop, addr); \
330 if (addr == aend) \
331 break; \
332 @@ -431,13 +500,13 @@ static inline void prot##blast_##pfx##ca
333 __##pfx##flush_epilogue \
334 }
335
336 -__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_)
337 -__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_)
338 -__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_)
339 -__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, )
340 -__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, )
341 +__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, BCM4710_PROTECTED_FILL_TLB(addr); BCM4710_PROTECTED_FILL_TLB(aend);, BCM4710_DUMMY_RREG();)
342 +__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_,, )
343 +__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_,, )
344 +__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D,, BCM4710_FILL_TLB(addr); BCM4710_FILL_TLB(aend);, BCM4710_DUMMY_RREG();)
345 +__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD,,, )
346 /* blast_inv_dcache_range */
347 -__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, )
348 -__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, )
349 +__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D,,,BCM4710_DUMMY_RREG();)
350 +__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD,,, )
351
352 #endif /* _ASM_R4KCACHE_H */
353 --- a/include/asm-mips/stackframe.h
354 +++ b/include/asm-mips/stackframe.h
355 @@ -359,6 +359,10 @@
356 .macro RESTORE_SP_AND_RET
357 LONG_L sp, PT_R29(sp)
358 .set mips3
359 +#ifdef CONFIG_BCM47XX
360 + nop
361 + nop
362 +#endif
363 eret
364 .set mips0
365 .endm
This page took 0.064123 seconds and 5 git commands to generate.