+diff -urN linux.old/arch/mips/mm/tlbex-mips32.S linux.dev/arch/mips/mm/tlbex-mips32.S
+--- linux.old/arch/mips/mm/tlbex-mips32.S 2005-05-28 17:42:03.000000000 +0200
++++ linux.dev/arch/mips/mm/tlbex-mips32.S 2005-05-28 21:48:55.000000000 +0200
+@@ -90,6 +90,9 @@
+ .set noat
+ LEAF(except_vec0_r4000)
+ .set mips3
++#ifdef CONFIG_BCM4704
++ nop
++#endif
+ #ifdef CONFIG_SMP
+ mfc0 k1, CP0_CONTEXT
+ la k0, pgd_current
+diff -urN linux.old/arch/mips/mm/pg-r4k.c linux.dev/arch/mips/mm/pg-r4k.c
+--- linux.old/arch/mips/mm/pg-r4k.c 2005-01-19 15:09:29.000000000 +0100
++++ linux.dev/arch/mips/mm/pg-r4k.c 2005-05-28 21:57:52.000000000 +0200
+@@ -180,6 +180,7 @@
+
+ static inline void build_cdex_s(void)
+ {
++#if !defined(CONFIG_BCM4704) && !defined(CONFIG_BCM4710)
+ union mips_instruction mi;
+
+ if ((store_offset & (cpu_scache_line_size() - 1)))
+@@ -192,10 +193,12 @@
+ mi.c_format.simmediate = store_offset;
+
+ emit_instruction(mi);
++#endif
+ }
+
+ static inline void build_cdex_p(void)
+ {
++#if !defined(CONFIG_BCM4704) && !defined(CONFIG_BCM4710)
+ union mips_instruction mi;
+
+ if (store_offset & (cpu_dcache_line_size() - 1))
+@@ -218,6 +221,7 @@
+ mi.c_format.simmediate = store_offset;
+
+ emit_instruction(mi);
++#endif
+ }
+
+ static void __build_store_reg(int reg)
+diff -urN linux.old/include/asm-mips/stackframe.h linux.dev/include/asm-mips/stackframe.h
+--- linux.old/include/asm-mips/stackframe.h 2002-11-29 00:53:15.000000000 +0100
++++ linux.dev/include/asm-mips/stackframe.h 2005-05-28 21:53:03.000000000 +0200
+@@ -172,6 +172,46 @@
+ rfe; \
+ .set pop
+
++#elif defined(CONFIG_BCM4710) || defined(CONFIG_BCM4704)
++
++#define RESTORE_SOME \
++ .set push; \
++ .set reorder; \
++ mfc0 t0, CP0_STATUS; \
++ .set pop; \
++ ori t0, 0x1f; \
++ xori t0, 0x1f; \
++ mtc0 t0, CP0_STATUS; \
++ li v1, 0xff00; \
++ and t0, v1; \
++ lw v0, PT_STATUS(sp); \
++ nor v1, $0, v1; \
++ and v0, v1; \
++ or v0, t0; \
++ ori v1, v0, ST0_IE; \
++ xori v1, v1, ST0_IE; \
++ mtc0 v1, CP0_STATUS; \
++ mtc0 v0, CP0_STATUS; \
++ lw v1, PT_EPC(sp); \
++ mtc0 v1, CP0_EPC; \
++ lw $31, PT_R31(sp); \
++ lw $28, PT_R28(sp); \
++ lw $25, PT_R25(sp); \
++ lw $7, PT_R7(sp); \
++ lw $6, PT_R6(sp); \
++ lw $5, PT_R5(sp); \
++ lw $4, PT_R4(sp); \
++ lw $3, PT_R3(sp); \
++ lw $2, PT_R2(sp)
++
++#define RESTORE_SP_AND_RET \
++ lw sp, PT_R29(sp); \
++ nop; \
++ nop; \
++ .set mips3; \
++ eret; \
++ .set mips0
++
+ #else
+
+ #define RESTORE_SOME \
+diff -urN linux.old/arch/mips/mm/tlbex-r4k.S linux.dev/arch/mips/mm/tlbex-r4k.S
+--- linux.old/arch/mips/mm/tlbex-r4k.S 2005-05-28 17:42:03.000000000 +0200
++++ linux.dev/arch/mips/mm/tlbex-r4k.S 2005-05-29 15:04:43.000000000 +0200
+@@ -168,6 +168,9 @@
+ .set noat
+ LEAF(except_vec0_r4000)
+ .set mips3
++#ifdef CONFIG_BCM4704
++ nop
++#endif
+ GET_PGD(k0, k1) # get pgd pointer
+ mfc0 k0, CP0_BADVADDR # Get faulting address
+ srl k0, k0, _PGDIR_SHIFT # get pgd only bits
+diff -urN linux.old/arch/mips/kernel/entry.S linux.dev/arch/mips/kernel/entry.S
+--- linux.old/arch/mips/kernel/entry.S 2003-08-25 13:44:40.000000000 +0200
++++ linux.dev/arch/mips/kernel/entry.S 2005-06-01 20:10:36.000000000 +0200
+@@ -100,6 +100,10 @@
+ * and R4400 SC and MC versions.
+ */
+ NESTED(except_vec3_generic, 0, sp)
++#ifdef CONFIG_BCM4710
++ nop
++ nop
++#endif
+ #if R5432_CP0_INTERRUPT_WAR
+ mfc0 k0, CP0_INDEX
+ #endif
+diff -urN linux.old/arch/mips/mm/c-r4k.c linux.dev/arch/mips/mm/c-r4k.c
+--- linux.old/arch/mips/mm/c-r4k.c 2005-06-01 18:42:44.000000000 +0200
++++ linux.dev/arch/mips/mm/c-r4k.c 2005-06-01 18:49:07.000000000 +0200
+@@ -14,6 +14,12 @@
+ #include <linux/mm.h>
+ #include <linux/bitops.h>
+
++#ifdef CONFIG_BCM4710
++#include "../bcm947xx/include/typedefs.h"
++#include "../bcm947xx/include/sbconfig.h"
++#include <asm/paccess.h>
++#endif
++
+ #include <asm/bcache.h>
+ #include <asm/bootinfo.h>
+ #include <asm/cacheops.h>
+@@ -390,6 +396,11 @@
+ addr = start & ~(dc_lsize - 1);
+ aend = (end - 1) & ~(dc_lsize - 1);
+
++#ifdef CONFIG_BCM4710
++ BCM4710_FILL_TLB(addr);
++ BCM4710_FILL_TLB(aend);
++#endif
++
+ while (1) {
+ /* Hit_Writeback_Inv_D */
+ protected_writeback_dcache_line(addr);
+@@ -405,6 +416,10 @@
+ else {
+ addr = start & ~(ic_lsize - 1);
+ aend = (end - 1) & ~(ic_lsize - 1);
++#ifdef CONFIG_BCM4710
++ BCM4710_FILL_TLB(addr);
++ BCM4710_FILL_TLB(aend);
++#endif
+ while (1) {
+ /* Hit_Invalidate_I */
+ protected_flush_icache_line(addr);
+@@ -487,6 +502,10 @@
+
+ a = addr & ~(sc_lsize - 1);
+ end = (addr + size - 1) & ~(sc_lsize - 1);
++#ifdef CONFIG_BCM4710
++ BCM4710_FILL_TLB(a);
++ BCM4710_FILL_TLB(end);
++#endif
+ while (1) {
+ flush_scache_line(a); /* Hit_Writeback_Inv_SD */
+ if (a == end)
+@@ -509,6 +528,10 @@
+ R4600_HIT_CACHEOP_WAR_IMPL;
+ a = addr & ~(dc_lsize - 1);
+ end = (addr + size - 1) & ~(dc_lsize - 1);
++#ifdef CONFIG_BCM4710
++ BCM4710_FILL_TLB(a);
++ BCM4710_FILL_TLB(end);
++#endif
+ while (1) {
+ flush_dcache_line(a); /* Hit_Writeback_Inv_D */
+ if (a == end)
+@@ -537,6 +560,10 @@
+
+ a = addr & ~(sc_lsize - 1);
+ end = (addr + size - 1) & ~(sc_lsize - 1);
++#ifdef CONFIG_BCM4710
++ BCM4710_FILL_TLB(a);
++ BCM4710_FILL_TLB(end);
++#endif
+ while (1) {
+ flush_scache_line(a); /* Hit_Writeback_Inv_SD */
+ if (a == end)
+@@ -576,6 +603,10 @@
+ unsigned long ic_lsize = current_cpu_data.icache.linesz;
+ unsigned long dc_lsize = current_cpu_data.dcache.linesz;
+
++#ifdef CONFIG_BCM4710
++ BCM4710_PROTECTED_FILL_TLB(addr);
++ BCM4710_PROTECTED_FILL_TLB(addr + 4);
++#endif
+ R4600_HIT_CACHEOP_WAR_IMPL;
+ protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
+ protected_flush_icache_line(addr & ~(ic_lsize - 1));
+diff -urN linux.old/include/asm-mips/r4kcache.h linux.dev/include/asm-mips/r4kcache.h
+--- linux.old/include/asm-mips/r4kcache.h 2005-06-01 18:42:43.000000000 +0200
++++ linux.dev/include/asm-mips/r4kcache.h 2005-06-01 19:07:11.000000000 +0200
+@@ -15,6 +15,25 @@
+ #include <asm/asm.h>
+ #include <asm/cacheops.h>
+
++#ifdef CONFIG_BCM4710
++#define BCM4710_DUMMY_RREG() (((sbconfig_t *)(KSEG1ADDR(SB_ENUM_BASE + SBCONFIGOFF)))->sbimstate)
++
++#define BCM4710_FILL_TLB(addr) (*(volatile unsigned long *)(addr))
++#define BCM4710_PROTECTED_FILL_TLB(addr) ({ unsigned long x; get_dbe(x, (volatile unsigned long *)(addr)); })
++
++#define cache_op(op,addr) \
++ BCM4710_DUMMY_RREG(); \
++ __asm__ __volatile__( \
++ " .set noreorder \n" \
++ " .set mips3\n\t \n" \
++ " cache %0, %1 \n" \
++ " .set mips0 \n" \
++ " .set reorder" \
++ : \
++ : "i" (op), "m" (*(unsigned char *)(addr)))
++
++#else
++
+ #define cache_op(op,addr) \
+ __asm__ __volatile__( \
+ " .set noreorder \n" \
+@@ -24,6 +43,8 @@
+ " .set reorder" \
+ : \
+ : "i" (op), "m" (*(unsigned char *)(addr)))
++#endif
++
+
+ static inline void flush_icache_line_indexed(unsigned long addr)
+ {
+@@ -32,6 +53,9 @@
+
+ static inline void flush_dcache_line_indexed(unsigned long addr)
+ {
++#ifdef CONFIG_BCM4710
++ BCM4710_DUMMY_RREG();
++#endif
+ cache_op(Index_Writeback_Inv_D, addr);
+ }
+
+@@ -47,6 +71,10 @@
+
+ static inline void flush_dcache_line(unsigned long addr)
+ {
++
++#ifdef CONFIG_BCM4710
++ BCM4710_DUMMY_RREG();
++#endif
+ cache_op(Hit_Writeback_Inv_D, addr);
+ }
+
+@@ -91,6 +119,9 @@
+ */
+ static inline void protected_writeback_dcache_line(unsigned long addr)
+ {
++#ifdef CONFIG_BCM4710
++ BCM4710_DUMMY_RREG();
++#endif
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ ".set mips3\n"
+@@ -148,8 +179,12 @@
+ unsigned long ws, addr;
+
+ for (ws = 0; ws < ws_end; ws += ws_inc)
+- for (addr = start; addr < end; addr += 0x200)
++ for (addr = start; addr < end; addr += 0x200) {
++#ifdef CONFIG_BCM4710
++ BCM4710_DUMMY_RREG();
++#endif
+ cache16_unroll32(addr|ws,Index_Writeback_Inv_D);
++ }
+ }
+
+ static inline void blast_dcache16_page(unsigned long page)
+@@ -158,6 +193,9 @@
+ unsigned long end = start + PAGE_SIZE;
+
+ do {
++#ifdef CONFIG_BCM4710
++ BCM4710_DUMMY_RREG();
++#endif
+ cache16_unroll32(start,Hit_Writeback_Inv_D);
+ start += 0x200;
+ } while (start < end);
+@@ -173,8 +211,12 @@
+ unsigned long ws, addr;
+
+ for (ws = 0; ws < ws_end; ws += ws_inc)
+- for (addr = start; addr < end; addr += 0x200)
++ for (addr = start; addr < end; addr += 0x200) {
++#ifdef CONFIG_BCM4710
++ BCM4710_DUMMY_RREG();
++#endif
+ cache16_unroll32(addr|ws,Index_Writeback_Inv_D);
++ }
+ }
+
+ static inline void blast_icache16(void)
+@@ -196,7 +238,13 @@
+ unsigned long start = page;
+ unsigned long end = start + PAGE_SIZE;
+
++#ifdef CONFIG_BCM4710
++ BCM4710_FILL_TLB(start);
++#endif
+ do {
++#ifdef CONFIG_BCM4710
++ BCM4710_DUMMY_RREG();
++#endif
+ cache16_unroll32(start,Hit_Invalidate_I);
+ start += 0x200;
+ } while (start < end);
+@@ -291,8 +339,12 @@
+ unsigned long ws, addr;
+
+ for (ws = 0; ws < ws_end; ws += ws_inc)
+- for (addr = start; addr < end; addr += 0x400)
++ for (addr = start; addr < end; addr += 0x400) {
++#ifdef CONFIG_BCM4710
++ BCM4710_DUMMY_RREG();
++#endif
+ cache32_unroll32(addr|ws,Index_Writeback_Inv_D);
++ }
+ }
+
+ static inline void blast_dcache32_page(unsigned long page)
+@@ -300,7 +352,13 @@
+ unsigned long start = page;
+ unsigned long end = start + PAGE_SIZE;
+
++#ifdef CONFIG_BCM4710
++ __asm__ __volatile__("nop;nop;nop;nop");
++#endif
+ do {
++#ifdef CONFIG_BCM4710
++ BCM4710_DUMMY_RREG();
++#endif
+ cache32_unroll32(start,Hit_Writeback_Inv_D);
+ start += 0x400;
+ } while (start < end);
+@@ -339,6 +397,9 @@
+ unsigned long start = page;
+ unsigned long end = start + PAGE_SIZE;
+
++#ifdef CONFIG_BCM4710
++ BCM4710_FILL_TLB(start);
++#endif
+ do {
+ cache32_unroll32(start,Hit_Invalidate_I);
+ start += 0x400;
+diff -urN linux.old/arch/mips/mm/c-r4k.c linux.dev/arch/mips/mm/c-r4k.c
+--- linux.old/arch/mips/mm/c-r4k.c 2005-06-01 18:49:07.000000000 +0200
++++ linux.dev/arch/mips/mm/c-r4k.c 2005-06-03 12:11:13.000000000 +0200
+@@ -51,6 +51,7 @@
+ #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x2010)
+ #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x2020)
+
++#ifndef CONFIG_BCM4710
+ #define R4600_HIT_CACHEOP_WAR_IMPL \
+ do { \
+ if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
+@@ -58,11 +59,17 @@
+ if (R4600_V1_HIT_CACHEOP_WAR) \
+ __asm__ __volatile__("nop;nop;nop;nop"); \
+ } while (0)
++#else
++#define R4600_HIT_CACHEOP_WAR_IMPL
++#endif
+
+ static void (* r4k_blast_dcache_page)(unsigned long addr);
+
+ static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
+ {
++#ifdef CONFIG_BCM4710
++ BCM4710_FILL_TLB(addr);
++#endif
+ R4600_HIT_CACHEOP_WAR_IMPL;
+ blast_dcache32_page(addr);
+ }
+@@ -581,6 +588,10 @@
+ R4600_HIT_CACHEOP_WAR_IMPL;
+ a = addr & ~(dc_lsize - 1);
+ end = (addr + size - 1) & ~(dc_lsize - 1);
++#ifdef CONFIG_BCM4710
++ BCM4710_FILL_TLB(a);
++ BCM4710_FILL_TLB(end);
++#endif
+ while (1) {
+ flush_dcache_line(a); /* Hit_Writeback_Inv_D */
+ if (a == end)
+diff -urN linux.old/arch/mips/mm/c-r4k.c linux.dev/arch/mips/mm/c-r4k.c
+--- linux.old/arch/mips/mm/c-r4k.c 2005-06-11 19:39:17.000000000 +0200
++++ linux.dev/arch/mips/mm/c-r4k.c 2005-06-11 19:54:48.000000000 +0200
+@@ -1083,6 +1083,19 @@
+ static inline void coherency_setup(void)
+ {
+ change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
++
++#if defined(CONFIG_BCM4310) || defined(CONFIG_BCM4704) || defined(CONFIG_BCM5365)
++ if (BCM330X(current_cpu_data.processor_id)) {
++ uint32 cm;
++
++ cm = read_c0_diag();
++ /* Enable icache */
++ cm |= (1 << 31);
++ /* Enable dcache */
++ cm |= (1 << 30);
++ write_c0_diag(cm);
++ }
++#endif
+
+ /*
+ * c0_status.cu=0 specifies that updates by the sc instruction use
+@@ -1104,6 +1117,42 @@
+
+ }
+
++#ifdef CONFIG_BCM4704
++static void __init mips32_icache_fill(unsigned long addr, uint nbytes)
++{
++ unsigned long ic_lsize = current_cpu_data.icache.linesz;
++ int i;
++ for (i = 0; i < nbytes; i += ic_lsize)
++ fill_icache_line((addr + i));
++}
++
++/*
++ * This must be run from the cache on 4704A0
++ * so there are no mips core BIU ops in progress
++ * when the PFC is enabled.
++ */
++#define PFC_CR0 0xff400000 /* control reg 0 */
++#define PFC_CR1 0xff400004 /* control reg 1 */
++static void __init enable_pfc(u32 mode)
++{
++ /* write range */
++ *(volatile u32 *)PFC_CR1 = 0xffff0000;
++
++ /* enable */
++ *(volatile u32 *)PFC_CR0 = mode;
++}
++
++void check_enable_mips_pfc(int val)
++{
++ /* enable prefetch cache */
++ if (BCM330X(current_cpu_data.processor_id)
++ && (read_c0_diag() & (1 << 29))) {
++ mips32_icache_fill((unsigned long) &enable_pfc, 64);
++ enable_pfc(val);
++ }
++}
++#endif
++
+ void __init ld_mmu_r4xx0(void)
+ {
+ extern void build_clear_page(void);
+@@ -1159,47 +1208,9 @@
+
+ build_clear_page();
+ build_copy_page();
+-}
+-
+-#ifdef CONFIG_BCM4704
+-static void __init mips32_icache_fill(unsigned long addr, uint nbytes)
+-{
+- unsigned long ic_lsize = current_cpu_data.icache.linesz;
+- int i;
+- for (i = 0; i < nbytes; i += ic_lsize)
+- fill_icache_line((addr + i));
+-}
+-
+-/*
+- * This must be run from the cache on 4704A0
+- * so there are no mips core BIU ops in progress
+- * when the PFC is enabled.
+- */
+-#define PFC_CR0 0xff400000 /* control reg 0 */
+-#define PFC_CR1 0xff400004 /* control reg 1 */
+-static void __init enable_pfc(u32 mode)
+-{
+- /* write range */
+- *(volatile u32 *)PFC_CR1 = 0xffff0000;
+-
+- /* enable */
+- *(volatile u32 *)PFC_CR0 = mode;
+-}
+-#endif
+-
+-
+-void check_enable_mips_pfc(int val)
+-{
+-
++
+ #ifdef CONFIG_BCM4704
+- struct cpuinfo_mips *c = ¤t_cpu_data;
+-
+- /* enable prefetch cache */
+- if (((c->processor_id & (PRID_COMP_MASK | PRID_IMP_MASK)) == PRID_IMP_BCM3302)
+- && (read_c0_diag() & (1 << 29))) {
+- mips32_icache_fill((unsigned long) &enable_pfc, 64);
+- enable_pfc(val);
+- }
++ check_enable_mips_pfc(0x15);
+ #endif
+ }
+