-@@ -203,8 +219,12 @@
-
- static void (* r4k_blast_icache_page)(unsigned long addr);
-
-+static void r4k_flush_cache_all(void);
- static void __init r4k_blast_icache_page_setup(void)
- {
-+#ifdef CONFIG_BCM947XX
-+ r4k_blast_icache_page = (void *)r4k_flush_cache_all;
-+#else
- unsigned long ic_lsize = cpu_icache_line_size();
-
- if (ic_lsize == 0)
-@@ -215,6 +235,7 @@
- r4k_blast_icache_page = blast_icache32_page;
- else if (ic_lsize == 64)
- r4k_blast_icache_page = blast_icache64_page;
-+#endif
- }
-
-
-@@ -222,6 +243,9 @@
-
- static void __init r4k_blast_icache_page_indexed_setup(void)
- {
-+#ifdef CONFIG_BCM947XX
-+ r4k_blast_icache_page_indexed = (void *)r4k_flush_cache_all;
-+#else
- unsigned long ic_lsize = cpu_icache_line_size();
-
- if (ic_lsize == 0)
-@@ -240,6 +264,7 @@
- blast_icache32_page_indexed;
- } else if (ic_lsize == 64)
- r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
-+#endif
- }
-
- static void (* r4k_blast_icache)(void);
-@@ -323,12 +348,17 @@
- */
- static inline void local_r4k_flush_cache_all(void * args)
- {
-+ unsigned long flags;
-+
-+ local_irq_save(flags);
- r4k_blast_dcache();
-+ r4k_blast_icache();
-+ local_irq_restore(flags);
- }
-
- static void r4k_flush_cache_all(void)
- {
-- if (!cpu_has_dc_aliases)
-+ if (!cpu_has_dc_aliases && cpu_use_kmap_coherent)
- return;
-
- r4k_on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1);
-@@ -336,6 +366,9 @@
-
- static inline void local_r4k___flush_cache_all(void * args)
- {
-+ unsigned long flags;
-+
-+ local_irq_save(flags);
- #if defined(CONFIG_CPU_LOONGSON2)
- r4k_blast_scache();
- return;
-@@ -353,6 +386,7 @@
- case CPU_R14000:
- r4k_blast_scache();
- }
-+ local_irq_restore(flags);
- }
-
- static void r4k___flush_cache_all(void)
-@@ -363,17 +397,21 @@
- static inline void local_r4k_flush_cache_range(void * args)
- {
- struct vm_area_struct *vma = args;
-+ unsigned long flags;
-
- if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
- return;
-
-+ local_irq_save(flags);
- r4k_blast_dcache();
-+ r4k_blast_icache();
-+ local_irq_restore(flags);
- }
-
- static void r4k_flush_cache_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
- {
-- if (!cpu_has_dc_aliases)
-+ if (!cpu_has_dc_aliases && cpu_use_kmap_coherent)
- return;
-
- r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
-@@ -382,6 +420,7 @@
- static inline void local_r4k_flush_cache_mm(void * args)
- {
- struct mm_struct *mm = args;
-+ unsigned long flags;
-
- if (!cpu_context(smp_processor_id(), mm))
- return;
-@@ -400,12 +439,15 @@
- return;
- }
-
-+ local_irq_save(flags);
- r4k_blast_dcache();
-+ r4k_blast_icache();
-+ local_irq_restore(flags);
- }
-
- static void r4k_flush_cache_mm(struct mm_struct *mm)
- {
-- if (!cpu_has_dc_aliases)
-+ if (!cpu_has_dc_aliases && cpu_use_kmap_coherent)
- return;
-
- r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
-@@ -425,6 +467,7 @@
- unsigned long paddr = fcp_args->pfn << PAGE_SHIFT;
- int exec = vma->vm_flags & VM_EXEC;
- struct mm_struct *mm = vma->vm_mm;
-+ unsigned long flags;
- pgd_t *pgdp;
- pud_t *pudp;
- pmd_t *pmdp;
-@@ -456,8 +499,9 @@
- * for every cache flush operation. So we do indexed flushes
- * in that case, which doesn't overly flush the cache too much.
- */
-+ local_irq_save(flags);
- if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
-- if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
-+ if (!cpu_use_kmap_coherent || cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
- r4k_blast_dcache_page(addr);
- if (exec && !cpu_icache_snoops_remote_store)
- r4k_blast_scache_page(addr);
-@@ -465,14 +509,14 @@
- if (exec)
- r4k_blast_icache_page(addr);
-
-- return;
-+ goto done;
- }
-
- /*
- * Do indexed flush, too much work to get the (possible) TLB refills
- * to work correctly.
- */
-- if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
-+ if (!cpu_use_kmap_coherent || cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
- r4k_blast_dcache_page_indexed(cpu_has_pindexed_dcache ?
- paddr : addr);
- if (exec && !cpu_icache_snoops_remote_store) {
-@@ -488,6 +532,8 @@
- } else
- r4k_blast_icache_page_indexed(addr);
- }
-+done:
-+ local_irq_restore(flags);
- }
-
- static void r4k_flush_cache_page(struct vm_area_struct *vma,
-@@ -504,7 +550,11 @@
-
- static inline void local_r4k_flush_data_cache_page(void * addr)
- {
-+ unsigned long flags;
-+
-+ local_irq_save(flags);
- r4k_blast_dcache_page((unsigned long) addr);
-+ local_irq_restore(flags);
- }
-
- static void r4k_flush_data_cache_page(unsigned long addr)
-@@ -547,6 +597,9 @@
-
- static void r4k_flush_icache_range(unsigned long start, unsigned long end)
- {
-+#ifdef CONFIG_BCM947XX
-+ r4k_flush_cache_all();
-+#else
- struct flush_icache_range_args args;
-
- args.start = start;
-@@ -554,12 +607,15 @@
-
- r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
- instruction_hazard();
-+#endif
- }
-
- #ifdef CONFIG_DMA_NONCOHERENT
-
- static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
- {
-+ unsigned long flags;
-+
- /* Catch bad driver code */
- BUG_ON(size == 0);
-
-@@ -576,18 +632,21 @@
- * subset property so we have to flush the primary caches
- * explicitly
- */
-+ local_irq_save(flags);
- if (size >= dcache_size) {
- r4k_blast_dcache();
- } else {
- R4600_HIT_CACHEOP_WAR_IMPL;
- blast_dcache_range(addr, addr + size);
- }
--
- bc_wback_inv(addr, size);
-+ local_irq_restore(flags);
- }
-
- static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
- {
-+ unsigned long flags;
-+
- /* Catch bad driver code */
- BUG_ON(size == 0);
-
-@@ -599,6 +658,7 @@
- return;
- }
-
-+ local_irq_save(flags);
- if (size >= dcache_size) {
- r4k_blast_dcache();
- } else {
-@@ -607,6 +667,7 @@
- }
-
- bc_inv(addr, size);
-+ local_irq_restore(flags);
- }
- #endif /* CONFIG_DMA_NONCOHERENT */
-
-@@ -621,8 +682,12 @@
- unsigned long dc_lsize = cpu_dcache_line_size();
- unsigned long sc_lsize = cpu_scache_line_size();