1 diff -urN linux.old/arch/mips/kernel/entry.S linux.dev/arch/mips/kernel/entry.S
2 --- linux.old/arch/mips/kernel/entry.S 2005-06-26 16:27:01.000000000 +0200
3 +++ linux.dev/arch/mips/kernel/entry.S 2005-06-29 20:24:54.000000000 +0200
5 * and R4400 SC and MC versions.
7 NESTED(except_vec3_generic, 0, sp)
12 #if R5432_CP0_INTERRUPT_WAR
15 diff -urN linux.old/arch/mips/mm/c-r4k.c linux.dev/arch/mips/mm/c-r4k.c
16 --- linux.old/arch/mips/mm/c-r4k.c 2005-06-26 16:27:01.000000000 +0200
17 +++ linux.dev/arch/mips/mm/c-r4k.c 2005-06-30 22:24:29.000000000 +0200
20 #include <linux/bitops.h>
22 +#ifdef CONFIG_BCM4710
23 +#include "../bcm947xx/include/typedefs.h"
24 +#include "../bcm947xx/include/sbconfig.h"
25 +#include <asm/paccess.h>
28 #include <asm/bcache.h>
29 #include <asm/bootinfo.h>
30 #include <asm/cacheops.h>
32 .bc_inv = (void *)no_sc_noop
35 +static int bcm4710 = 0;
36 struct bcache_ops *bcops = &no_sc_ops;
38 #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x2010)
44 switch (current_cpu_data.cputype) {
48 * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
49 * only flush the primary caches but R10000 and R12000 behave sane ...
51 - if (current_cpu_data.cputype == CPU_R4000SC ||
52 + if (!bcm4710 && (current_cpu_data.cputype == CPU_R4000SC ||
53 current_cpu_data.cputype == CPU_R4000MC ||
54 current_cpu_data.cputype == CPU_R4400SC ||
55 - current_cpu_data.cputype == CPU_R4400MC)
56 + current_cpu_data.cputype == CPU_R4400MC))
61 unsigned long ic_lsize = current_cpu_data.icache.linesz;
62 unsigned long addr, aend;
64 + addr = start & ~(dc_lsize - 1);
65 + aend = (end - 1) & ~(dc_lsize - 1);
67 if (!cpu_has_ic_fills_f_dc) {
68 if (end - start > dcache_size)
71 - addr = start & ~(dc_lsize - 1);
72 - aend = (end - 1) & ~(dc_lsize - 1);
73 + BCM4710_PROTECTED_FILL_TLB(addr);
74 + BCM4710_PROTECTED_FILL_TLB(aend);
77 /* Hit_Writeback_Inv_D */
79 if (end - start > icache_size)
82 - addr = start & ~(ic_lsize - 1);
83 - aend = (end - 1) & ~(ic_lsize - 1);
85 /* Hit_Invalidate_I */
86 protected_flush_icache_line(addr);
88 if (cpu_has_subset_pcaches) {
89 unsigned long addr = (unsigned long) page_address(page);
91 - r4k_blast_scache_page(addr);
93 + r4k_blast_scache_page(addr);
94 ClearPageDcacheDirty(page);
99 if (!cpu_has_ic_fills_f_dc) {
100 unsigned long addr = (unsigned long) page_address(page);
102 r4k_blast_dcache_page(addr);
103 ClearPageDcacheDirty(page);
106 /* Catch bad driver code */
109 - if (cpu_has_subset_pcaches) {
110 + if (!bcm4710 && cpu_has_subset_pcaches) {
111 unsigned long sc_lsize = current_cpu_data.scache.linesz;
113 if (size >= scache_size) {
115 R4600_HIT_CACHEOP_WAR_IMPL;
116 a = addr & ~(dc_lsize - 1);
117 end = (addr + size - 1) & ~(dc_lsize - 1);
118 + BCM4710_FILL_TLB(a);
119 + BCM4710_FILL_TLB(end);
121 flush_dcache_line(a); /* Hit_Writeback_Inv_D */
124 /* Catch bad driver code */
127 - if (cpu_has_subset_pcaches) {
128 + if (!bcm4710 && (cpu_has_subset_pcaches)) {
129 unsigned long sc_lsize = current_cpu_data.scache.linesz;
131 if (size >= scache_size) {
133 R4600_HIT_CACHEOP_WAR_IMPL;
134 a = addr & ~(dc_lsize - 1);
135 end = (addr + size - 1) & ~(dc_lsize - 1);
136 + BCM4710_FILL_TLB(a);
137 + BCM4710_FILL_TLB(end);
139 flush_dcache_line(a); /* Hit_Writeback_Inv_D */
142 unsigned long dc_lsize = current_cpu_data.dcache.linesz;
144 R4600_HIT_CACHEOP_WAR_IMPL;
145 + BCM4710_PROTECTED_FILL_TLB(addr);
146 + BCM4710_PROTECTED_FILL_TLB(addr + 4);
147 protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
148 protected_flush_icache_line(addr & ~(ic_lsize - 1));
149 if (MIPS4K_ICACHE_REFILL_WAR) {
150 @@ -986,10 +1003,12 @@
154 - probe_scache_kseg1 = (probe_func_t) (KSEG1ADDR(&probe_scache));
155 - sc_present = probe_scache_kseg1(config);
157 - c->options |= MIPS_CPU_CACHE_CDEX_S;
159 + probe_scache_kseg1 = (probe_func_t) (KSEG1ADDR(&probe_scache));
160 + sc_present = probe_scache_kseg1(config);
162 + c->options |= MIPS_CPU_CACHE_CDEX_S;
167 @@ -1041,6 +1060,19 @@
168 static inline void coherency_setup(void)
170 change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
172 +#if defined(CONFIG_BCM4310) || defined(CONFIG_BCM4704) || defined(CONFIG_BCM5365)
173 + if (BCM330X(current_cpu_data.processor_id)) {
176 + cm = read_c0_diag();
177 + /* Enable icache */
179 + /* Enable dcache */
186 * c0_status.cu=0 specifies that updates by the sc instruction use
187 @@ -1062,6 +1094,42 @@
191 +#ifdef CONFIG_BCM4704
192 +static void __init mips32_icache_fill(unsigned long addr, uint nbytes)
194 + unsigned long ic_lsize = current_cpu_data.icache.linesz;
196 + for (i = 0; i < nbytes; i += ic_lsize)
197 + fill_icache_line((addr + i));
201 + * This must be run from the cache on 4704A0
202 + * so there are no mips core BIU ops in progress
203 + * when the PFC is enabled.
205 +#define PFC_CR0 0xff400000 /* control reg 0 */
206 +#define PFC_CR1 0xff400004 /* control reg 1 */
207 +static void __init enable_pfc(u32 mode)
210 + *(volatile u32 *)PFC_CR1 = 0xffff0000;
213 + *(volatile u32 *)PFC_CR0 = mode;
216 +void check_enable_mips_pfc(int val)
218 + /* enable prefetch cache */
219 + if (BCM330X(current_cpu_data.processor_id)
220 + && (read_c0_diag() & (1 << 29))) {
221 + mips32_icache_fill((unsigned long) &enable_pfc, 64);
227 void __init ld_mmu_r4xx0(void)
229 extern void build_clear_page(void);
230 @@ -1073,6 +1141,11 @@
231 memcpy((void *)(KSEG0 + 0x100), &except_vec2_generic, 0x80);
232 memcpy((void *)(KSEG1 + 0x100), &except_vec2_generic, 0x80);
234 + if (current_cpu_data.cputype == CPU_BCM4710 && (current_cpu_data.processor_id & PRID_REV_MASK) == 0)
242 @@ -1117,47 +1190,9 @@
248 -#ifdef CONFIG_BCM4704
249 -static void __init mips32_icache_fill(unsigned long addr, uint nbytes)
251 - unsigned long ic_lsize = current_cpu_data.icache.linesz;
253 - for (i = 0; i < nbytes; i += ic_lsize)
254 - fill_icache_line((addr + i));
258 - * This must be run from the cache on 4704A0
259 - * so there are no mips core BIU ops in progress
260 - * when the PFC is enabled.
262 -#define PFC_CR0 0xff400000 /* control reg 0 */
263 -#define PFC_CR1 0xff400004 /* control reg 1 */
264 -static void __init enable_pfc(u32 mode)
267 - *(volatile u32 *)PFC_CR1 = 0xffff0000;
270 - *(volatile u32 *)PFC_CR0 = mode;
275 -void check_enable_mips_pfc(int val)
279 #ifdef CONFIG_BCM4704
280 - struct cpuinfo_mips *c = ¤t_cpu_data;
282 - /* enable prefetch cache */
283 - if (((c->processor_id & (PRID_COMP_MASK | PRID_IMP_MASK)) == PRID_IMP_BCM3302)
284 - && (read_c0_diag() & (1 << 29))) {
285 - mips32_icache_fill((unsigned long) &enable_pfc, 64);
288 + check_enable_mips_pfc(0x15);
292 diff -urN linux.old/arch/mips/mm/tlb-r4k.c linux.dev/arch/mips/mm/tlb-r4k.c
293 --- linux.old/arch/mips/mm/tlb-r4k.c 2005-06-26 16:24:26.000000000 +0200
294 +++ linux.dev/arch/mips/mm/tlb-r4k.c 2005-06-29 20:29:16.000000000 +0200
296 old_ctx = read_c0_entryhi();
297 write_c0_entrylo0(0);
298 write_c0_entrylo1(0);
301 entry = read_c0_wired();
304 write_c0_index(entry);
312 write_c0_entryhi(KSEG0 + idx*0x2000);
318 write_c0_entryhi(oldpid);
324 write_c0_entryhi(oldpid);
325 local_irq_restore(flags);
328 pmdp = pmd_offset(pgdp, address);
329 idx = read_c0_index();
330 ptep = pte_offset(pmdp, address);
332 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
333 write_c0_entrylo0(ptep->pte_high);
338 write_c0_entryhi(pid);
340 local_irq_restore(flags);
346 write_c0_index(temp_tlb_entry);
348 write_c0_pagemask(pagemask);
349 write_c0_entryhi(entryhi);
350 write_c0_entrylo0(entrylo0);
351 diff -urN linux.old/arch/mips/mm/tlbex-mips32.S linux.dev/arch/mips/mm/tlbex-mips32.S
352 --- linux.old/arch/mips/mm/tlbex-mips32.S 2005-06-26 16:27:01.000000000 +0200
353 +++ linux.dev/arch/mips/mm/tlbex-mips32.S 2005-06-29 20:24:54.000000000 +0200
356 LEAF(except_vec0_r4000)
358 +#ifdef CONFIG_BCM4704
364 diff -urN linux.old/include/asm-mips/r4kcache.h linux.dev/include/asm-mips/r4kcache.h
365 --- linux.old/include/asm-mips/r4kcache.h 2005-06-26 16:27:01.000000000 +0200
366 +++ linux.dev/include/asm-mips/r4kcache.h 2005-06-30 22:39:42.000000000 +0200
369 #include <asm/cacheops.h>
371 +#ifdef CONFIG_BCM4710
372 +#define BCM4710_DUMMY_RREG() (((sbconfig_t *)(KSEG1ADDR(SB_ENUM_BASE + SBCONFIGOFF)))->sbimstate)
374 +#define BCM4710_FILL_TLB(addr) (*(volatile unsigned long *)(addr))
375 +#define BCM4710_PROTECTED_FILL_TLB(addr) ({ unsigned long x; get_dbe(x, (volatile unsigned long *)(addr)); })
377 +#define BCM4710_DUMMY_RREG()
379 +#define BCM4710_FILL_TLB(addr)
380 +#define BCM4710_PROTECTED_FILL_TLB(addr)
383 #define cache_op(op,addr) \
384 __asm__ __volatile__( \
385 " .set noreorder \n" \
388 static inline void flush_dcache_line_indexed(unsigned long addr)
390 + BCM4710_DUMMY_RREG();
391 cache_op(Index_Writeback_Inv_D, addr);
396 static inline void flush_dcache_line(unsigned long addr)
398 + BCM4710_DUMMY_RREG();
399 cache_op(Hit_Writeback_Inv_D, addr);
404 static inline void protected_writeback_dcache_line(unsigned long addr)
406 + BCM4710_DUMMY_RREG();
407 __asm__ __volatile__(
411 unsigned long ws, addr;
413 for (ws = 0; ws < ws_end; ws += ws_inc)
414 - for (addr = start; addr < end; addr += 0x200)
415 + for (addr = start; addr < end; addr += 0x200) {
416 + BCM4710_DUMMY_RREG();
417 cache16_unroll32(addr|ws,Index_Writeback_Inv_D);
421 static inline void blast_dcache16_page(unsigned long page)
423 unsigned long start = page;
424 unsigned long end = start + PAGE_SIZE;
426 + BCM4710_FILL_TLB(start);
428 + BCM4710_DUMMY_RREG();
429 cache16_unroll32(start,Hit_Writeback_Inv_D);
431 } while (start < end);
433 unsigned long ws, addr;
435 for (ws = 0; ws < ws_end; ws += ws_inc)
436 - for (addr = start; addr < end; addr += 0x200)
437 + for (addr = start; addr < end; addr += 0x200) {
438 + BCM4710_DUMMY_RREG();
439 cache16_unroll32(addr|ws,Index_Writeback_Inv_D);
443 static inline void blast_icache16(void)
445 unsigned long start = page;
446 unsigned long end = start + PAGE_SIZE;
448 + BCM4710_FILL_TLB(start);
450 cache16_unroll32(start,Hit_Invalidate_I);
457 static inline void blast_dcache32(void)
459 unsigned long start = KSEG0;
461 unsigned long ws, addr;
463 for (ws = 0; ws < ws_end; ws += ws_inc)
464 - for (addr = start; addr < end; addr += 0x400)
465 + for (addr = start; addr < end; addr += 0x400) {
466 + BCM4710_DUMMY_RREG();
467 cache32_unroll32(addr|ws,Index_Writeback_Inv_D);
471 static inline void blast_dcache32_page(unsigned long page)
473 unsigned long start = page;
474 unsigned long end = start + PAGE_SIZE;
476 + BCM4710_FILL_TLB(start);
478 + BCM4710_DUMMY_RREG();
479 cache32_unroll32(start,Hit_Writeback_Inv_D);
481 } while (start < end);
483 unsigned long ws, addr;
485 for (ws = 0; ws < ws_end; ws += ws_inc)
486 - for (addr = start; addr < end; addr += 0x400)
487 + for (addr = start; addr < end; addr += 0x400) {
488 + BCM4710_DUMMY_RREG();
489 cache32_unroll32(addr|ws,Index_Writeback_Inv_D);
493 static inline void blast_icache32(void)
495 unsigned long start = page;
496 unsigned long end = start + PAGE_SIZE;
498 + BCM4710_FILL_TLB(start);
500 cache32_unroll32(start,Hit_Invalidate_I);
503 unsigned long start = page;
504 unsigned long end = start + PAGE_SIZE;
506 + BCM4710_FILL_TLB(start);
508 cache64_unroll32(start,Hit_Invalidate_I);
510 diff -urN linux.old/include/asm-mips/stackframe.h linux.dev/include/asm-mips/stackframe.h
511 --- linux.old/include/asm-mips/stackframe.h 2005-06-26 16:27:01.000000000 +0200
512 +++ linux.dev/include/asm-mips/stackframe.h 2005-06-30 19:04:46.000000000 +0200
517 +#elif defined(CONFIG_BCM4710) || defined(CONFIG_BCM4704)
519 +#define RESTORE_SOME \
522 + mfc0 t0, CP0_STATUS; \
526 + mtc0 t0, CP0_STATUS; \
529 + lw v0, PT_STATUS(sp); \
533 + ori v1, v0, ST0_IE; \
534 + xori v1, v1, ST0_IE; \
535 + mtc0 v1, CP0_STATUS; \
536 + mtc0 v0, CP0_STATUS; \
537 + lw v1, PT_EPC(sp); \
538 + mtc0 v1, CP0_EPC; \
539 + lw $31, PT_R31(sp); \
540 + lw $28, PT_R28(sp); \
541 + lw $25, PT_R25(sp); \
542 + lw $7, PT_R7(sp); \
543 + lw $6, PT_R6(sp); \
544 + lw $5, PT_R5(sp); \
545 + lw $4, PT_R4(sp); \
546 + lw $3, PT_R3(sp); \
549 +#define RESTORE_SP_AND_RET \
550 + lw sp, PT_R29(sp); \
559 #define RESTORE_SOME \