avr32: fix kernel panic when using ondemand cpufreq givernor
[openwrt.git] / target / linux / brcm47xx / patches-2.6.25 / 150-cpu_fixes.patch
1 Index: linux-2.6.25.4/arch/mips/kernel/genex.S
2 ===================================================================
3 --- linux-2.6.25.4.orig/arch/mips/kernel/genex.S
4 +++ linux-2.6.25.4/arch/mips/kernel/genex.S
5 @@ -51,6 +51,10 @@ NESTED(except_vec1_generic, 0, sp)
6 NESTED(except_vec3_generic, 0, sp)
7 .set push
8 .set noat
9 +#ifdef CONFIG_BCM47XX
10 + nop
11 + nop
12 +#endif
13 #if R5432_CP0_INTERRUPT_WAR
14 mfc0 k0, CP0_INDEX
15 #endif
16 Index: linux-2.6.25.4/arch/mips/mm/c-r4k.c
17 ===================================================================
18 --- linux-2.6.25.4.orig/arch/mips/mm/c-r4k.c
19 +++ linux-2.6.25.4/arch/mips/mm/c-r4k.c
20 @@ -33,6 +33,9 @@
21 #include <asm/cacheflush.h> /* for run_uncached() */
22
23
24 +/* For enabling BCM4710 cache workarounds */
25 +int bcm4710 = 0;
26 +
27 /*
28 * Special Variant of smp_call_function for use by cache functions:
29 *
30 @@ -97,6 +100,9 @@ static void __cpuinit r4k_blast_dcache_p
31 {
32 unsigned long dc_lsize = cpu_dcache_line_size();
33
34 + if (bcm4710)
35 + r4k_blast_dcache_page = blast_dcache_page;
36 + else
37 if (dc_lsize == 0)
38 r4k_blast_dcache_page = (void *)cache_noop;
39 else if (dc_lsize == 16)
40 @@ -111,6 +117,9 @@ static void __cpuinit r4k_blast_dcache_p
41 {
42 unsigned long dc_lsize = cpu_dcache_line_size();
43
44 + if (bcm4710)
45 + r4k_blast_dcache_page_indexed = blast_dcache_page_indexed;
46 + else
47 if (dc_lsize == 0)
48 r4k_blast_dcache_page_indexed = (void *)cache_noop;
49 else if (dc_lsize == 16)
50 @@ -125,6 +134,9 @@ static void __cpuinit r4k_blast_dcache_s
51 {
52 unsigned long dc_lsize = cpu_dcache_line_size();
53
54 + if (bcm4710)
55 + r4k_blast_dcache = blast_dcache;
56 + else
57 if (dc_lsize == 0)
58 r4k_blast_dcache = (void *)cache_noop;
59 else if (dc_lsize == 16)
60 @@ -630,6 +642,8 @@ static void local_r4k_flush_cache_sigtra
61 unsigned long addr = (unsigned long) arg;
62
63 R4600_HIT_CACHEOP_WAR_IMPL;
64 + BCM4710_PROTECTED_FILL_TLB(addr);
65 + BCM4710_PROTECTED_FILL_TLB(addr + 4);
66 if (dc_lsize)
67 protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
68 if (!cpu_icache_snoops_remote_store && scache_size)
69 @@ -1215,6 +1229,17 @@ static void __cpuinit coherency_setup(vo
70 * silly idea of putting something else there ...
71 */
72 switch (current_cpu_type()) {
73 + case CPU_BCM3302:
74 + {
75 + u32 cm;
76 + cm = read_c0_diag();
77 + /* Enable icache */
78 + cm |= (1 << 31);
79 + /* Enable dcache */
80 + cm |= (1 << 30);
81 + write_c0_diag(cm);
82 + }
83 + break;
84 case CPU_R4000PC:
85 case CPU_R4000SC:
86 case CPU_R4000MC:
87 @@ -1254,6 +1279,15 @@ void __cpuinit r4k_cache_init(void)
88 break;
89 }
90
91 + /* Check if special workarounds are required */
92 +#ifdef CONFIG_BCM47XX
93 + if (current_cpu_data.cputype == CPU_BCM4710 && (current_cpu_data.processor_id & 0xff) == 0) {
94 + printk("Enabling BCM4710A0 cache workarounds.\n");
95 + bcm4710 = 1;
96 + } else
97 +#endif
98 + bcm4710 = 0;
99 +
100 probe_pcache();
101 setup_scache();
102
103 @@ -1303,5 +1337,13 @@ void __cpuinit r4k_cache_init(void)
104 build_clear_page();
105 build_copy_page();
106 local_r4k___flush_cache_all(NULL);
107 +#ifdef CONFIG_BCM47XX
108 + {
109 + static void (*_coherency_setup)(void);
110 + _coherency_setup = (void (*)(void)) KSEG1ADDR(coherency_setup);
111 + _coherency_setup();
112 + }
113 +#else
114 coherency_setup();
115 +#endif
116 }
117 Index: linux-2.6.25.4/arch/mips/mm/tlbex.c
118 ===================================================================
119 --- linux-2.6.25.4.orig/arch/mips/mm/tlbex.c
120 +++ linux-2.6.25.4/arch/mips/mm/tlbex.c
121 @@ -677,6 +677,9 @@ static void __cpuinit build_r4000_tlb_re
122 /* No need for uasm_i_nop */
123 }
124
125 +#ifdef CONFIG_BCM47XX
126 + uasm_i_nop(&p);
127 +#endif
128 #ifdef CONFIG_64BIT
129 build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
130 #else
131 @@ -1084,6 +1087,9 @@ build_r4000_tlbchange_handler_head(u32 *
132 struct uasm_reloc **r, unsigned int pte,
133 unsigned int ptr)
134 {
135 +#ifdef CONFIG_BCM47XX
136 + uasm_i_nop(p);
137 +#endif
138 #ifdef CONFIG_64BIT
139 build_get_pmde64(p, l, r, pte, ptr); /* get pmd in ptr */
140 #else
141 Index: linux-2.6.25.4/include/asm-mips/r4kcache.h
142 ===================================================================
143 --- linux-2.6.25.4.orig/include/asm-mips/r4kcache.h
144 +++ linux-2.6.25.4/include/asm-mips/r4kcache.h
145 @@ -17,6 +17,20 @@
146 #include <asm/cpu-features.h>
147 #include <asm/mipsmtregs.h>
148
149 +#ifdef CONFIG_BCM47XX
150 +#include <asm/paccess.h>
151 +#include <linux/ssb/ssb.h>
152 +#define BCM4710_DUMMY_RREG() ((void) *((u8 *) KSEG1ADDR(SSB_ENUM_BASE + SSB_IMSTATE)))
153 +
154 +#define BCM4710_FILL_TLB(addr) (*(volatile unsigned long *)(addr))
155 +#define BCM4710_PROTECTED_FILL_TLB(addr) ({ unsigned long x; get_dbe(x, (volatile unsigned long *)(addr)); })
156 +#else
157 +#define BCM4710_DUMMY_RREG()
158 +
159 +#define BCM4710_FILL_TLB(addr)
160 +#define BCM4710_PROTECTED_FILL_TLB(addr)
161 +#endif
162 +
163 /*
164 * This macro return a properly sign-extended address suitable as base address
165 * for indexed cache operations. Two issues here:
166 @@ -150,6 +164,7 @@ static inline void flush_icache_line_ind
167 static inline void flush_dcache_line_indexed(unsigned long addr)
168 {
169 __dflush_prologue
170 + BCM4710_DUMMY_RREG();
171 cache_op(Index_Writeback_Inv_D, addr);
172 __dflush_epilogue
173 }
174 @@ -169,6 +184,7 @@ static inline void flush_icache_line(uns
175 static inline void flush_dcache_line(unsigned long addr)
176 {
177 __dflush_prologue
178 + BCM4710_DUMMY_RREG();
179 cache_op(Hit_Writeback_Inv_D, addr);
180 __dflush_epilogue
181 }
182 @@ -176,6 +192,7 @@ static inline void flush_dcache_line(uns
183 static inline void invalidate_dcache_line(unsigned long addr)
184 {
185 __dflush_prologue
186 + BCM4710_DUMMY_RREG();
187 cache_op(Hit_Invalidate_D, addr);
188 __dflush_epilogue
189 }
190 @@ -208,6 +225,7 @@ static inline void flush_scache_line(uns
191 */
192 static inline void protected_flush_icache_line(unsigned long addr)
193 {
194 + BCM4710_DUMMY_RREG();
195 protected_cache_op(Hit_Invalidate_I, addr);
196 }
197
198 @@ -219,6 +237,7 @@ static inline void protected_flush_icach
199 */
200 static inline void protected_writeback_dcache_line(unsigned long addr)
201 {
202 + BCM4710_DUMMY_RREG();
203 protected_cache_op(Hit_Writeback_Inv_D, addr);
204 }
205
206 @@ -339,8 +358,52 @@ static inline void invalidate_tcache_pag
207 : "r" (base), \
208 "i" (op));
209
210 +static inline void blast_dcache(void)
211 +{
212 + unsigned long start = KSEG0;
213 + unsigned long dcache_size = current_cpu_data.dcache.waysize * current_cpu_data.dcache.ways;
214 + unsigned long end = (start + dcache_size);
215 +
216 + do {
217 + BCM4710_DUMMY_RREG();
218 + cache_op(Index_Writeback_Inv_D, start);
219 + start += current_cpu_data.dcache.linesz;
220 + } while(start < end);
221 +}
222 +
223 +static inline void blast_dcache_page(unsigned long page)
224 +{
225 + unsigned long start = page;
226 + unsigned long end = start + PAGE_SIZE;
227 +
228 + BCM4710_FILL_TLB(start);
229 + do {
230 + BCM4710_DUMMY_RREG();
231 + cache_op(Hit_Writeback_Inv_D, start);
232 + start += current_cpu_data.dcache.linesz;
233 + } while(start < end);
234 +}
235 +
236 +static inline void blast_dcache_page_indexed(unsigned long page)
237 +{
238 + unsigned long start = page;
239 + unsigned long end = start + PAGE_SIZE;
240 + unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
241 + unsigned long ws_end = current_cpu_data.dcache.ways <<
242 + current_cpu_data.dcache.waybit;
243 + unsigned long ws, addr;
244 + for (ws = 0; ws < ws_end; ws += ws_inc) {
245 + start = page + ws;
246 + for (addr = start; addr < end; addr += current_cpu_data.dcache.linesz) {
247 + BCM4710_DUMMY_RREG();
248 + cache_op(Index_Writeback_Inv_D, addr);
249 + }
250 + }
251 +}
252 +
253 +
254 /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
255 -#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize) \
256 +#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, war) \
257 static inline void blast_##pfx##cache##lsize(void) \
258 { \
259 unsigned long start = INDEX_BASE; \
260 @@ -352,6 +415,7 @@ static inline void blast_##pfx##cache##l
261 \
262 __##pfx##flush_prologue \
263 \
264 + war \
265 for (ws = 0; ws < ws_end; ws += ws_inc) \
266 for (addr = start; addr < end; addr += lsize * 32) \
267 cache##lsize##_unroll32(addr|ws, indexop); \
268 @@ -366,6 +430,7 @@ static inline void blast_##pfx##cache##l
269 \
270 __##pfx##flush_prologue \
271 \
272 + war \
273 do { \
274 cache##lsize##_unroll32(start, hitop); \
275 start += lsize * 32; \
276 @@ -384,6 +449,8 @@ static inline void blast_##pfx##cache##l
277 current_cpu_data.desc.waybit; \
278 unsigned long ws, addr; \
279 \
280 + war \
281 + \
282 __##pfx##flush_prologue \
283 \
284 for (ws = 0; ws < ws_end; ws += ws_inc) \
285 @@ -393,35 +460,37 @@ static inline void blast_##pfx##cache##l
286 __##pfx##flush_epilogue \
287 }
288
289 -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16)
290 -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
291 -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
292 -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32)
293 -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
294 -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
295 -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
296 -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
297 -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
298 -
299 -__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16)
300 -__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32)
301 -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16)
302 -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32)
303 -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64)
304 -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128)
305 +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
306 +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, BCM4710_FILL_TLB(start);)
307 +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
308 +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
309 +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, BCM4710_FILL_TLB(start);)
310 +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
311 +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, BCM4710_FILL_TLB(start);)
312 +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
313 +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
314 +
315 +__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
316 +__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
317 +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
318 +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
319 +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
320 +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
321
322 /* build blast_xxx_range, protected_blast_xxx_range */
323 -#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot) \
324 +#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, war, war2) \
325 static inline void prot##blast_##pfx##cache##_range(unsigned long start, \
326 unsigned long end) \
327 { \
328 unsigned long lsize = cpu_##desc##_line_size(); \
329 unsigned long addr = start & ~(lsize - 1); \
330 unsigned long aend = (end - 1) & ~(lsize - 1); \
331 + war \
332 \
333 __##pfx##flush_prologue \
334 \
335 while (1) { \
336 + war2 \
337 prot##cache_op(hitop, addr); \
338 if (addr == aend) \
339 break; \
340 @@ -431,13 +500,13 @@ static inline void prot##blast_##pfx##ca
341 __##pfx##flush_epilogue \
342 }
343
344 -__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_)
345 -__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_)
346 -__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_)
347 -__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, )
348 -__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, )
349 +__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, BCM4710_PROTECTED_FILL_TLB(addr); BCM4710_PROTECTED_FILL_TLB(aend);, BCM4710_DUMMY_RREG();)
350 +__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_,, )
351 +__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_,, )
352 +__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D,, BCM4710_FILL_TLB(addr); BCM4710_FILL_TLB(aend);, BCM4710_DUMMY_RREG();)
353 +__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD,,, )
354 /* blast_inv_dcache_range */
355 -__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, )
356 -__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, )
357 +__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D,,,BCM4710_DUMMY_RREG();)
358 +__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD,,, )
359
360 #endif /* _ASM_R4KCACHE_H */
361 Index: linux-2.6.25.4/include/asm-mips/stackframe.h
362 ===================================================================
363 --- linux-2.6.25.4.orig/include/asm-mips/stackframe.h
364 +++ linux-2.6.25.4/include/asm-mips/stackframe.h
365 @@ -359,6 +359,10 @@
366 .macro RESTORE_SP_AND_RET
367 LONG_L sp, PT_R29(sp)
368 .set mips3
369 +#ifdef CONFIG_BCM47XX
370 + nop
371 + nop
372 +#endif
373 eret
374 .set mips0
375 .endm
This page took 0.076918 seconds and 5 git commands to generate.