1 diff -urN linux.old/arch/mips/kernel/entry.S linux.dev/arch/mips/kernel/entry.S
2 --- linux.old/arch/mips/kernel/entry.S 2005-07-04 23:39:26.000000000 +0200
3 +++ linux.dev/arch/mips/kernel/entry.S 2005-07-05 14:33:14.000000000 +0200
5 * and R4400 SC and MC versions.
7 NESTED(except_vec3_generic, 0, sp)
12 #if R5432_CP0_INTERRUPT_WAR
15 diff -urN linux.old/arch/mips/mm/c-r4k.c linux.dev/arch/mips/mm/c-r4k.c
16 --- linux.old/arch/mips/mm/c-r4k.c 2005-07-04 23:39:26.000000000 +0200
17 +++ linux.dev/arch/mips/mm/c-r4k.c 2005-07-05 15:11:49.000000000 +0200
20 #include <linux/bitops.h>
22 +#ifdef CONFIG_BCM4710
23 +#include "../bcm947xx/include/typedefs.h"
24 +#include "../bcm947xx/include/sbconfig.h"
25 +#include <asm/paccess.h>
28 #include <asm/bcache.h>
29 #include <asm/bootinfo.h>
30 #include <asm/cacheops.h>
32 .bc_inv = (void *)no_sc_noop
35 +static int bcm4710 = 0;
36 struct bcache_ops *bcops = &no_sc_ops;
38 #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x2010)
40 static inline void r4k_blast_dcache_page_setup(void)
42 unsigned long dc_lsize = current_cpu_data.dcache.linesz;
47 + r4k_blast_dcache_page = blast_dcache_page;
48 + else if (dc_lsize == 16)
49 r4k_blast_dcache_page = blast_dcache16_page;
50 else if (dc_lsize == 32)
51 r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
54 unsigned long dc_lsize = current_cpu_data.dcache.linesz;
58 + r4k_blast_dcache_page_indexed = blast_dcache_page_indexed;
59 + else if (dc_lsize == 16)
60 r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
61 else if (dc_lsize == 32)
62 r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
65 unsigned long dc_lsize = current_cpu_data.dcache.linesz;
69 + r4k_blast_dcache = blast_dcache;
70 + else if (dc_lsize == 16)
71 r4k_blast_dcache = blast_dcache16;
72 else if (dc_lsize == 32)
73 r4k_blast_dcache = blast_dcache32;
79 switch (current_cpu_data.cputype) {
83 * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
84 * only flush the primary caches but R10000 and R12000 behave sane ...
86 - if (current_cpu_data.cputype == CPU_R4000SC ||
87 + if (!bcm4710 && (current_cpu_data.cputype == CPU_R4000SC ||
88 current_cpu_data.cputype == CPU_R4000MC ||
89 current_cpu_data.cputype == CPU_R4400SC ||
90 - current_cpu_data.cputype == CPU_R4400MC)
91 + current_cpu_data.cputype == CPU_R4400MC))
96 unsigned long ic_lsize = current_cpu_data.icache.linesz;
97 unsigned long addr, aend;
99 + addr = start & ~(dc_lsize - 1);
100 + aend = (end - 1) & ~(dc_lsize - 1);
102 if (!cpu_has_ic_fills_f_dc) {
103 if (end - start > dcache_size)
106 - addr = start & ~(dc_lsize - 1);
107 - aend = (end - 1) & ~(dc_lsize - 1);
108 + BCM4710_PROTECTED_FILL_TLB(addr);
109 + BCM4710_PROTECTED_FILL_TLB(aend);
112 /* Hit_Writeback_Inv_D */
114 if (end - start > icache_size)
117 - addr = start & ~(ic_lsize - 1);
118 - aend = (end - 1) & ~(ic_lsize - 1);
120 /* Hit_Invalidate_I */
121 protected_flush_icache_line(addr);
123 if (cpu_has_subset_pcaches) {
124 unsigned long addr = (unsigned long) page_address(page);
126 - r4k_blast_scache_page(addr);
128 + r4k_blast_scache_page(addr);
129 ClearPageDcacheDirty(page);
134 if (!cpu_has_ic_fills_f_dc) {
135 unsigned long addr = (unsigned long) page_address(page);
137 r4k_blast_dcache_page(addr);
138 ClearPageDcacheDirty(page);
141 /* Catch bad driver code */
144 - if (cpu_has_subset_pcaches) {
145 + if (!bcm4710 && cpu_has_subset_pcaches) {
146 unsigned long sc_lsize = current_cpu_data.scache.linesz;
148 if (size >= scache_size) {
150 R4600_HIT_CACHEOP_WAR_IMPL;
151 a = addr & ~(dc_lsize - 1);
152 end = (addr + size - 1) & ~(dc_lsize - 1);
153 + BCM4710_FILL_TLB(a);
154 + BCM4710_FILL_TLB(end);
156 flush_dcache_line(a); /* Hit_Writeback_Inv_D */
159 /* Catch bad driver code */
162 - if (cpu_has_subset_pcaches) {
163 + if (!bcm4710 && (cpu_has_subset_pcaches)) {
164 unsigned long sc_lsize = current_cpu_data.scache.linesz;
166 if (size >= scache_size) {
168 R4600_HIT_CACHEOP_WAR_IMPL;
169 a = addr & ~(dc_lsize - 1);
170 end = (addr + size - 1) & ~(dc_lsize - 1);
171 + BCM4710_FILL_TLB(a);
172 + BCM4710_FILL_TLB(end);
174 flush_dcache_line(a); /* Hit_Writeback_Inv_D */
177 unsigned long dc_lsize = current_cpu_data.dcache.linesz;
179 R4600_HIT_CACHEOP_WAR_IMPL;
180 + BCM4710_PROTECTED_FILL_TLB(addr);
181 + BCM4710_PROTECTED_FILL_TLB(addr + 4);
182 protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
183 protected_flush_icache_line(addr & ~(ic_lsize - 1));
184 if (MIPS4K_ICACHE_REFILL_WAR) {
185 @@ -986,10 +1009,12 @@
189 - probe_scache_kseg1 = (probe_func_t) (KSEG1ADDR(&probe_scache));
190 - sc_present = probe_scache_kseg1(config);
192 - c->options |= MIPS_CPU_CACHE_CDEX_S;
194 + probe_scache_kseg1 = (probe_func_t) (KSEG1ADDR(&probe_scache));
195 + sc_present = probe_scache_kseg1(config);
197 + c->options |= MIPS_CPU_CACHE_CDEX_S;
202 @@ -1041,6 +1066,19 @@
203 static inline void coherency_setup(void)
205 change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
207 +#if defined(CONFIG_BCM4310) || defined(CONFIG_BCM4704) || defined(CONFIG_BCM5365)
208 + if (BCM330X(current_cpu_data.processor_id)) {
211 + cm = read_c0_diag();
212 + /* Enable icache */
214 + /* Enable dcache */
221 * c0_status.cu=0 specifies that updates by the sc instruction use
222 @@ -1062,6 +1100,42 @@
226 +#ifdef CONFIG_BCM4704
227 +static void __init mips32_icache_fill(unsigned long addr, uint nbytes)
229 + unsigned long ic_lsize = current_cpu_data.icache.linesz;
231 + for (i = 0; i < nbytes; i += ic_lsize)
232 + fill_icache_line((addr + i));
236 + * This must be run from the cache on 4704A0
237 + * so there are no mips core BIU ops in progress
238 + * when the PFC is enabled.
240 +#define PFC_CR0 0xff400000 /* control reg 0 */
241 +#define PFC_CR1 0xff400004 /* control reg 1 */
242 +static void __init enable_pfc(u32 mode)
245 + *(volatile u32 *)PFC_CR1 = 0xffff0000;
248 + *(volatile u32 *)PFC_CR0 = mode;
251 +void check_enable_mips_pfc(int val)
253 + /* enable prefetch cache */
254 + if (BCM330X(current_cpu_data.processor_id)
255 + && (read_c0_diag() & (1 << 29))) {
256 + mips32_icache_fill((unsigned long) &enable_pfc, 64);
262 void __init ld_mmu_r4xx0(void)
264 extern void build_clear_page(void);
265 @@ -1073,6 +1147,11 @@
266 memcpy((void *)(KSEG0 + 0x100), &except_vec2_generic, 0x80);
267 memcpy((void *)(KSEG1 + 0x100), &except_vec2_generic, 0x80);
269 + if (current_cpu_data.cputype == CPU_BCM4710 && (current_cpu_data.processor_id & PRID_REV_MASK) == 0)
277 @@ -1117,47 +1196,9 @@
283 -#ifdef CONFIG_BCM4704
284 -static void __init mips32_icache_fill(unsigned long addr, uint nbytes)
286 - unsigned long ic_lsize = current_cpu_data.icache.linesz;
288 - for (i = 0; i < nbytes; i += ic_lsize)
289 - fill_icache_line((addr + i));
293 - * This must be run from the cache on 4704A0
294 - * so there are no mips core BIU ops in progress
295 - * when the PFC is enabled.
297 -#define PFC_CR0 0xff400000 /* control reg 0 */
298 -#define PFC_CR1 0xff400004 /* control reg 1 */
299 -static void __init enable_pfc(u32 mode)
302 - *(volatile u32 *)PFC_CR1 = 0xffff0000;
305 - *(volatile u32 *)PFC_CR0 = mode;
310 -void check_enable_mips_pfc(int val)
314 #ifdef CONFIG_BCM4704
315 - struct cpuinfo_mips *c = ¤t_cpu_data;
317 - /* enable prefetch cache */
318 - if (((c->processor_id & (PRID_COMP_MASK | PRID_IMP_MASK)) == PRID_IMP_BCM3302)
319 - && (read_c0_diag() & (1 << 29))) {
320 - mips32_icache_fill((unsigned long) &enable_pfc, 64);
323 + check_enable_mips_pfc(0x15);
327 diff -urN linux.old/arch/mips/mm/tlb-r4k.c linux.dev/arch/mips/mm/tlb-r4k.c
328 --- linux.old/arch/mips/mm/tlb-r4k.c 2005-07-04 23:39:26.000000000 +0200
329 +++ linux.dev/arch/mips/mm/tlb-r4k.c 2005-07-05 14:33:14.000000000 +0200
331 old_ctx = read_c0_entryhi();
332 write_c0_entrylo0(0);
333 write_c0_entrylo1(0);
336 entry = read_c0_wired();
339 write_c0_index(entry);
347 write_c0_entryhi(KSEG0 + idx*0x2000);
353 write_c0_entryhi(oldpid);
359 write_c0_entryhi(oldpid);
360 local_irq_restore(flags);
363 pmdp = pmd_offset(pgdp, address);
364 idx = read_c0_index();
365 ptep = pte_offset(pmdp, address);
367 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
368 write_c0_entrylo0(ptep->pte_high);
373 write_c0_entryhi(pid);
375 local_irq_restore(flags);
381 write_c0_index(temp_tlb_entry);
383 write_c0_pagemask(pagemask);
384 write_c0_entryhi(entryhi);
385 write_c0_entrylo0(entrylo0);
386 diff -urN linux.old/arch/mips/mm/tlbex-mips32.S linux.dev/arch/mips/mm/tlbex-mips32.S
387 --- linux.old/arch/mips/mm/tlbex-mips32.S 2005-07-04 23:39:26.000000000 +0200
388 +++ linux.dev/arch/mips/mm/tlbex-mips32.S 2005-07-05 14:33:14.000000000 +0200
391 LEAF(except_vec0_r4000)
393 +#ifdef CONFIG_BCM4704
399 diff -urN linux.old/include/asm-mips/r4kcache.h linux.dev/include/asm-mips/r4kcache.h
400 --- linux.old/include/asm-mips/r4kcache.h 2005-07-04 23:39:26.000000000 +0200
401 +++ linux.dev/include/asm-mips/r4kcache.h 2005-07-05 15:13:56.000000000 +0200
404 #include <asm/cacheops.h>
406 +#ifdef CONFIG_BCM4710
407 +#define BCM4710_DUMMY_RREG() (((sbconfig_t *)(KSEG1ADDR(SB_ENUM_BASE + SBCONFIGOFF)))->sbimstate)
409 +#define BCM4710_FILL_TLB(addr) (*(volatile unsigned long *)(addr))
410 +#define BCM4710_PROTECTED_FILL_TLB(addr) ({ unsigned long x; get_dbe(x, (volatile unsigned long *)(addr)); })
412 +#define BCM4710_DUMMY_RREG()
414 +#define BCM4710_FILL_TLB(addr)
415 +#define BCM4710_PROTECTED_FILL_TLB(addr)
418 #define cache_op(op,addr) \
419 __asm__ __volatile__( \
420 " .set noreorder \n" \
423 static inline void flush_dcache_line_indexed(unsigned long addr)
425 + BCM4710_DUMMY_RREG();
426 cache_op(Index_Writeback_Inv_D, addr);
431 static inline void flush_dcache_line(unsigned long addr)
433 + BCM4710_DUMMY_RREG();
434 cache_op(Hit_Writeback_Inv_D, addr);
439 static inline void protected_writeback_dcache_line(unsigned long addr)
441 + BCM4710_DUMMY_RREG();
442 __asm__ __volatile__(
449 +#define cache_unroll(base,op) \
450 + __asm__ __volatile__(" \
461 +static inline void blast_dcache(void)
463 + unsigned long start = KSEG0;
464 + unsigned long end = start + current_cpu_data.dcache.waysize;
466 + while(start < end) {
467 + BCM4710_DUMMY_RREG();
468 + cache_unroll(start,Index_Writeback_Inv_D);
469 + start += current_cpu_data.dcache.linesz;
473 +static inline void blast_dcache_page(unsigned long page)
475 + unsigned long start = page;
476 + unsigned long end = start + PAGE_SIZE;
478 + BCM4710_FILL_TLB(start);
480 + BCM4710_DUMMY_RREG();
481 + cache_unroll(start,Hit_Writeback_Inv_D);
482 + start += current_cpu_data.dcache.linesz;
483 + } while (start < end);
486 +static inline void blast_dcache_page_indexed(unsigned long page)
488 + unsigned long start = page;
489 + unsigned long end = start + PAGE_SIZE;
490 + unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
491 + unsigned long ws_end = current_cpu_data.dcache.ways <<
492 + current_cpu_data.dcache.waybit;
493 + unsigned long ws, addr;
495 + for (ws = 0; ws < ws_end; ws += ws_inc)
496 + for (addr = start; addr < end; addr += start += current_cpu_data.dcache.linesz) {
497 + BCM4710_DUMMY_RREG();
498 + cache_unroll(addr,Index_Writeback_Inv_D);
502 static inline void blast_dcache16(void)
504 unsigned long start = KSEG0;
506 unsigned long ws, addr;
508 for (ws = 0; ws < ws_end; ws += ws_inc)
509 - for (addr = start; addr < end; addr += 0x200)
510 + for (addr = start; addr < end; addr += 0x200) {
511 cache16_unroll32(addr|ws,Index_Writeback_Inv_D);
515 static inline void blast_dcache16_page(unsigned long page)
517 unsigned long ws, addr;
519 for (ws = 0; ws < ws_end; ws += ws_inc)
520 - for (addr = start; addr < end; addr += 0x200)
521 + for (addr = start; addr < end; addr += 0x200) {
522 cache16_unroll32(addr|ws,Index_Writeback_Inv_D);
526 static inline void blast_icache16(void)
528 unsigned long start = page;
529 unsigned long end = start + PAGE_SIZE;
531 + BCM4710_FILL_TLB(start);
533 cache16_unroll32(start,Hit_Invalidate_I);
540 static inline void blast_dcache32(void)
542 unsigned long start = KSEG0;
544 unsigned long ws, addr;
546 for (ws = 0; ws < ws_end; ws += ws_inc)
547 - for (addr = start; addr < end; addr += 0x400)
548 + for (addr = start; addr < end; addr += 0x400) {
549 cache32_unroll32(addr|ws,Index_Writeback_Inv_D);
553 static inline void blast_dcache32_page(unsigned long page)
555 unsigned long ws, addr;
557 for (ws = 0; ws < ws_end; ws += ws_inc)
558 - for (addr = start; addr < end; addr += 0x400)
559 + for (addr = start; addr < end; addr += 0x400) {
560 cache32_unroll32(addr|ws,Index_Writeback_Inv_D);
564 static inline void blast_icache32(void)
566 unsigned long start = page;
567 unsigned long end = start + PAGE_SIZE;
569 + BCM4710_FILL_TLB(start);
571 cache32_unroll32(start,Hit_Invalidate_I);
574 unsigned long start = page;
575 unsigned long end = start + PAGE_SIZE;
577 + BCM4710_FILL_TLB(start);
579 cache64_unroll32(start,Hit_Invalidate_I);
581 diff -urN linux.old/include/asm-mips/stackframe.h linux.dev/include/asm-mips/stackframe.h
582 --- linux.old/include/asm-mips/stackframe.h 2005-07-04 23:39:26.000000000 +0200
583 +++ linux.dev/include/asm-mips/stackframe.h 2005-07-05 14:33:14.000000000 +0200
588 +#elif defined(CONFIG_BCM4710) || defined(CONFIG_BCM4704)
590 +#define RESTORE_SOME \
593 + mfc0 t0, CP0_STATUS; \
597 + mtc0 t0, CP0_STATUS; \
600 + lw v0, PT_STATUS(sp); \
604 + ori v1, v0, ST0_IE; \
605 + xori v1, v1, ST0_IE; \
606 + mtc0 v1, CP0_STATUS; \
607 + mtc0 v0, CP0_STATUS; \
608 + lw v1, PT_EPC(sp); \
609 + mtc0 v1, CP0_EPC; \
610 + lw $31, PT_R31(sp); \
611 + lw $28, PT_R28(sp); \
612 + lw $25, PT_R25(sp); \
613 + lw $7, PT_R7(sp); \
614 + lw $6, PT_R6(sp); \
615 + lw $5, PT_R5(sp); \
616 + lw $4, PT_R4(sp); \
617 + lw $3, PT_R3(sp); \
620 +#define RESTORE_SP_AND_RET \
621 + lw sp, PT_R29(sp); \
630 #define RESTORE_SOME \