1 From 940b4fea5ebfde3abe03c6469a57c01ee961497a Mon Sep 17 00:00:00 2001
2 From: Kurt Mahan <kmahan@freescale.com>
3 Date: Wed, 18 Jun 2008 15:20:21 -0600
4 Subject: [PATCH] Split 547x/548x and 5445x cache routines into separate files.
6 LTIBName: mcfv4e-cache-split
7 Signed-off-by: Kurt Mahan <kmahan@freescale.com>
9 include/asm-m68k/cf_5445x_cacheflush.h | 447 ++++++++++++++++++++++++++++++++
10 include/asm-m68k/cf_548x_cacheflush.h | 259 ++++++++++++++++++
11 include/asm-m68k/cf_cacheflush.h | 244 +-----------------
12 3 files changed, 711 insertions(+), 239 deletions(-)
13 create mode 100644 include/asm-m68k/cf_5445x_cacheflush.h
14 create mode 100644 include/asm-m68k/cf_548x_cacheflush.h
17 +++ b/include/asm-m68k/cf_5445x_cacheflush.h
20 + * include/asm-m68k/cf_5445x_cacheflush.h - Coldfire 5445x Cache
22 + * Based on include/asm-m68k/cacheflush.h
24 + * Coldfire pieces by:
25 + * Kurt Mahan kmahan@freescale.com
27 + * Copyright Freescale Semiconductor, Inc. 2007, 2008
29 + * This program is free software; you can redistribute it and/or modify it
30 + * under the terms of the GNU General Public License as published by the
31 + * Free Software Foundation; either version 2 of the License, or (at your
32 + * option) any later version.
34 +#ifndef M68K_CF_5445x_CACHEFLUSH_H
35 +#define M68K_CF_5445x_CACHEFLUSH_H
37 +#include <asm/cfcache.h>
40 + * Coldfire Cache Model
42 + * The Coldfire processors use a Harvard architecture cache configured
43 + * as four-way set associative. The cache does not implement bus snooping
44 + * so cache coherency with other masters must be maintained in software.
46 + * The cache is managed via the CPUSHL instruction in conjunction with
47 + * bits set in the CACR (cache control register). Currently the code
48 + * uses the CPUSHL enhancement which adds the ability to
49 + * invalidate/clear/push a cacheline by physical address. This feature
50 + * is designated in the Hardware Configuration Register [D1-CPES].
53 + * DPI[28] cpushl invalidate disable for d-cache
54 + * IDPI[12] cpushl invalidate disable for i-cache
55 + * SPA[14] cpushl search by physical address
56 + * IVO[20] cpushl invalidate only
58 + * Random Terminology:
59 + * * invalidate = reset the cache line's valid bit
60 + * * push = generate a line-sized store of the data if its contents are marked
61 + * as modifed (the modified flag is cleared after the store)
62 + * * clear = push + invalidate
66 + * flush_icache - Flush all of the instruction cache
68 +static inline void flush_icache(void)
70 + asm volatile("nop\n"
71 + "moveq%.l #0,%%d0\n"
72 + "moveq%.l #0,%%d1\n"
73 + "move%.l %%d0,%%a0\n"
75 + "cpushl %%ic,(%%a0)\n"
76 + "add%.l #0x0010,%%a0\n"
80 + "moveq%.l #0,%%d1\n"
82 + "move%.l %%d0,%%a0\n"
85 + : : "i" (CACHE_SETS)
86 + : "a0", "d0", "d1");
90 + * flush_dcache - Flush all of the data cache
92 +static inline void flush_dcache(void)
94 + asm volatile("nop\n"
95 + "moveq%.l #0,%%d0\n"
96 + "moveq%.l #0,%%d1\n"
97 + "move%.l %%d0,%%a0\n"
99 + "cpushl %%dc,(%%a0)\n"
100 + "add%.l #0x0010,%%a0\n"
101 + "addq%.l #1,%%d1\n"
102 + "cmpi%.l %0,%%d1\n"
104 + "moveq%.l #0,%%d1\n"
105 + "addq%.l #1,%%d0\n"
106 + "move%.l %%d0,%%a0\n"
107 + "cmpi%.l #4,%%d0\n"
109 + : : "i" (CACHE_SETS)
110 + : "a0", "d0", "d1");
114 + * flush_bcache - Flush all of both caches
116 +static inline void flush_bcache(void)
118 + asm volatile("nop\n"
119 + "moveq%.l #0,%%d0\n"
120 + "moveq%.l #0,%%d1\n"
121 + "move%.l %%d0,%%a0\n"
123 + "cpushl %%bc,(%%a0)\n"
124 + "add%.l #0x0010,%%a0\n"
125 + "addq%.l #1,%%d1\n"
126 + "cmpi%.l %0,%%d1\n"
128 + "moveq%.l #0,%%d1\n"
129 + "addq%.l #1,%%d0\n"
130 + "move%.l %%d0,%%a0\n"
131 + "cmpi%.l #4,%%d0\n"
133 + : : "i" (CACHE_SETS)
134 + : "a0", "d0", "d1");
138 + * cf_cache_clear - invalidate cache
139 + * @paddr: starting physical address
140 + * @len: number of bytes
142 + * Invalidate cache lines starting at paddr for len bytes.
143 + * Those lines are not pushed.
145 +static inline void cf_cache_clear(unsigned long paddr, int len)
147 + /* number of lines */
148 + len = (len + (CACHE_LINE_SIZE-1)) / CACHE_LINE_SIZE;
152 + /* align on set boundary */
153 + paddr &= 0xfffffff0;
155 + asm volatile("nop\n"
156 + "move%.l %2,%%d0\n"
158 + "movec %%d0,%%cacr\n"
159 + "move%.l %0,%%a0\n"
160 + "move%.l %1,%%d0\n"
162 + "cpushl %%bc,(%%a0)\n"
163 + "lea 0x10(%%a0),%%a0\n"
164 + "subq%.l #1,%%d0\n"
166 + "movec %2,%%cacr\n"
167 + : : "a" (paddr), "r" (len),
169 + "i" (CF_CACR_SPA+CF_CACR_IVO)
174 + * cf_cache_push - Push dirty cache out with no invalidate
175 + * @paddr: starting physical address
176 + * @len: number of bytes
178 + * Push the any dirty lines starting at paddr for len bytes.
179 + * Those lines are not invalidated.
181 +static inline void cf_cache_push(unsigned long paddr, int len)
183 + /* number of lines */
184 + len = (len + (CACHE_LINE_SIZE-1)) / CACHE_LINE_SIZE;
188 + /* align on set boundary */
189 + paddr &= 0xfffffff0;
191 + asm volatile("nop\n"
192 + "move%.l %2,%%d0\n"
194 + "movec %%d0,%%cacr\n"
195 + "move%.l %0,%%a0\n"
196 + "move%.l %1,%%d0\n"
198 + "cpushl %%bc,(%%a0)\n"
199 + "lea 0x10(%%a0),%%a0\n"
200 + "subq%.l #1,%%d0\n"
202 + "movec %2,%%cacr\n"
203 + : : "a" (paddr), "r" (len),
205 + "i" (CF_CACR_SPA+CF_CACR_DPI+CF_CACR_IDPI)
210 + * cf_cache_flush - Push dirty cache out and invalidate
211 + * @paddr: starting physical address
212 + * @len: number of bytes
214 + * Push the any dirty lines starting at paddr for len bytes and
215 + * invalidate those lines.
217 +static inline void cf_cache_flush(unsigned long paddr, int len)
219 + /* number of lines */
220 + len = (len + (CACHE_LINE_SIZE-1)) / CACHE_LINE_SIZE;
224 + /* align on set boundary */
225 + paddr &= 0xfffffff0;
227 + asm volatile("nop\n"
228 + "move%.l %2,%%d0\n"
230 + "movec %%d0,%%cacr\n"
231 + "move%.l %0,%%a0\n"
232 + "move%.l %1,%%d0\n"
234 + "cpushl %%bc,(%%a0)\n"
235 + "lea 0x10(%%a0),%%a0\n"
236 + "subq%.l #1,%%d0\n"
238 + "movec %2,%%cacr\n"
239 + : : "a" (paddr), "r" (len),
246 + * cf_cache_flush_range - Push dirty data/inst cache in range out and invalidate
247 + * @vstart - starting virtual address
248 + * @vend: ending virtual address
250 + * Push the any dirty data/instr lines starting at paddr for len bytes and
251 + * invalidate those lines.
253 +static inline void cf_cache_flush_range(unsigned long vstart, unsigned long vend)
257 + /* align on set boundary */
258 + vstart &= 0xfffffff0;
259 + vend = PAGE_ALIGN((vend + (CACHE_LINE_SIZE-1))) & 0xfffffff0;
260 + len = vend - vstart;
263 + vstart = __pa(vstart);
264 + vend = vstart + len;
266 + asm volatile("nop\n"
267 + "move%.l %2,%%d0\n"
269 + "movec %%d0,%%cacr\n"
270 + "move%.l %0,%%a0\n"
271 + "move%.l %1,%%a1\n"
273 + "cpushl %%bc,(%%a0)\n"
274 + "lea 0x10(%%a0),%%a0\n"
275 + "cmpa%.l %%a0,%%a1\n"
277 + "movec %2,%%cacr\n"
279 + : "a" (vstart), "a" (vend),
282 + : "a0", "a1", "d0");
286 + * cf_dcache_flush_range - Push dirty data cache in range out and invalidate
287 + * @vstart - starting virtual address
288 + * @vend: ending virtual address
290 + * Push the any dirty data lines starting at paddr for len bytes and
291 + * invalidate those lines.
293 +static inline void cf_dcache_flush_range(unsigned long vstart, unsigned long vend)
295 + /* align on set boundary */
296 + vstart &= 0xfffffff0;
297 + vend = (vend + (CACHE_LINE_SIZE-1)) & 0xfffffff0;
299 + asm volatile("nop\n"
300 + "move%.l %2,%%d0\n"
302 + "movec %%d0,%%cacr\n"
303 + "move%.l %0,%%a0\n"
304 + "move%.l %1,%%a1\n"
306 + "cpushl %%dc,(%%a0)\n"
307 + "lea 0x10(%%a0),%%a0\n"
308 + "cmpa%.l %%a0,%%a1\n"
310 + "movec %2,%%cacr\n"
312 + : "a" (__pa(vstart)), "a" (__pa(vend)),
315 + : "a0", "a1", "d0");
319 + * cf_icache_flush_range - Push dirty inst cache in range out and invalidate
320 + * @vstart - starting virtual address
321 + * @vend: ending virtual address
323 + * Push the any dirty instr lines starting at paddr for len bytes and
324 + * invalidate those lines. This should just be an invalidate since you
325 + * shouldn't be able to have dirty instruction cache.
327 +static inline void cf_icache_flush_range(unsigned long vstart, unsigned long vend)
329 + /* align on set boundary */
330 + vstart &= 0xfffffff0;
331 + vend = (vend + (CACHE_LINE_SIZE-1)) & 0xfffffff0;
333 + asm volatile("nop\n"
334 + "move%.l %2,%%d0\n"
336 + "movec %%d0,%%cacr\n"
337 + "move%.l %0,%%a0\n"
338 + "move%.l %1,%%a1\n"
340 + "cpushl %%ic,(%%a0)\n"
341 + "lea 0x10(%%a0),%%a0\n"
342 + "cmpa%.l %%a0,%%a1\n"
344 + "movec %2,%%cacr\n"
346 + : "a" (__pa(vstart)), "a" (__pa(vend)),
349 + : "a0", "a1", "d0");
353 + * flush_cache_mm - Flush an mm_struct
354 + * @mm: mm_struct to flush
356 +static inline void flush_cache_mm(struct mm_struct *mm)
358 + if (mm == current->mm)
362 +#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
365 + * flush_cache_range - Flush a cache range
367 + * @start: Starting address
368 + * @end: Ending address
370 + * flush_cache_range must be a macro to avoid a dependency on
371 + * linux/mm.h which includes this file.
373 +static inline void flush_cache_range(struct vm_area_struct *vma,
374 + unsigned long start, unsigned long end)
376 + if (vma->vm_mm == current->mm)
377 + cf_cache_flush_range(start, end);
381 + * flush_cache_page - Flush a page of the cache
386 + * flush_cache_page must be a macro to avoid a dependency on
387 + * linux/mm.h which includes this file.
389 +static inline void flush_cache_page(struct vm_area_struct *vma,
390 + unsigned long vmaddr, unsigned long pfn)
392 + if (vma->vm_mm == current->mm)
393 + cf_cache_flush_range(vmaddr, vmaddr+PAGE_SIZE);
397 + * __flush_page_to_ram - Push a page out of the cache
398 + * @vaddr: Virtual address at start of page
400 + * Push the page at kernel virtual address *vaddr* and clear
403 +static inline void __flush_page_to_ram(void *vaddr)
405 + asm volatile("nop\n"
406 + "move%.l %2,%%d0\n"
408 + "movec %%d0,%%cacr\n"
409 + "move%.l %0,%%d0\n"
410 + "and%.l #0xfffffff0,%%d0\n"
411 + "move%.l %%d0,%%a0\n"
412 + "move%.l %1,%%d0\n"
414 + "cpushl %%bc,(%%a0)\n"
415 + "lea 0x10(%%a0),%%a0\n"
416 + "subq%.l #1,%%d0\n"
418 + "movec %2,%%cacr\n"
419 + : : "a" (__pa(vaddr)), "i" (PAGE_SIZE / CACHE_LINE_SIZE),
420 + "r" (shadow_cacr), "i" (CF_CACR_SPA)
425 + * Various defines for the kernel.
428 +extern void cache_clear(unsigned long paddr, int len);
429 +extern void cache_push(unsigned long paddr, int len);
430 +extern void flush_icache_range(unsigned long address, unsigned long endaddr);
432 +#define flush_cache_all() flush_bcache()
433 +#define flush_cache_vmap(start, end) flush_bcache()
434 +#define flush_cache_vunmap(start, end) flush_bcache()
436 +#define flush_dcache_range(vstart, vend) cf_dcache_flush_range(vstart, vend)
437 +#define flush_dcache_page(page) __flush_page_to_ram(page_address(page))
438 +#define flush_dcache_mmap_lock(mapping) do { } while (0)
439 +#define flush_dcache_mmap_unlock(mapping) do { } while (0)
441 +#define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page))
444 + * copy_to_user_page - Copy memory to user page
446 +static inline void copy_to_user_page(struct vm_area_struct *vma,
447 + struct page *page, unsigned long vaddr,
448 + void *dst, void *src, int len)
450 + memcpy(dst, src, len);
451 + cf_cache_flush(page_to_phys(page), PAGE_SIZE);
455 + * copy_from_user_page - Copy memory from user page
457 +static inline void copy_from_user_page(struct vm_area_struct *vma,
458 + struct page *page, unsigned long vaddr,
459 + void *dst, void *src, int len)
461 + cf_cache_flush(page_to_phys(page), PAGE_SIZE);
462 + memcpy(dst, src, len);
465 +#endif /* M68K_CF_5445x_CACHEFLUSH_H */
467 +++ b/include/asm-m68k/cf_548x_cacheflush.h
470 + * include/asm-m68k/cf_548x_cacheflush.h - Coldfire 547x/548x Cache
472 + * Based on include/asm-m68k/cacheflush.h
474 + * Coldfire pieces by:
475 + * Kurt Mahan kmahan@freescale.com
477 + * Copyright Freescale Semiconductor, Inc. 2007, 2008
479 + * This program is free software; you can redistribute it and/or modify it
480 + * under the terms of the GNU General Public License as published by the
481 + * Free Software Foundation; either version 2 of the License, or (at your
482 + * option) any later version.
484 +#ifndef M68K_CF_548x_CACHEFLUSH_H
485 +#define M68K_CF_548x_CACHEFLUSH_H
487 +#include <asm/cfcache.h>
489 + * Cache handling functions
492 +#define flush_icache() \
494 + unsigned long set; \
495 + unsigned long start_set; \
496 + unsigned long end_set; \
499 + end_set = (unsigned long)LAST_DCACHE_ADDR; \
501 + for (set = start_set; set <= end_set; set += (0x10 - 3)) { \
502 + asm volatile("cpushl %%ic,(%0)\n" \
503 + "\taddq%.l #1,%0\n" \
504 + "\tcpushl %%ic,(%0)\n" \
505 + "\taddq%.l #1,%0\n" \
506 + "\tcpushl %%ic,(%0)\n" \
507 + "\taddq%.l #1,%0\n" \
508 + "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set)); \
512 +#define flush_dcache() \
514 + unsigned long set; \
515 + unsigned long start_set; \
516 + unsigned long end_set; \
519 + end_set = (unsigned long)LAST_DCACHE_ADDR; \
521 + for (set = start_set; set <= end_set; set += (0x10 - 3)) { \
522 + asm volatile("cpushl %%dc,(%0)\n" \
523 + "\taddq%.l #1,%0\n" \
524 + "\tcpushl %%dc,(%0)\n" \
525 + "\taddq%.l #1,%0\n" \
526 + "\tcpushl %%dc,(%0)\n" \
527 + "\taddq%.l #1,%0\n" \
528 + "\tcpushl %%dc,(%0)" : "=a" (set) : "a" (set)); \
532 +#define flush_bcache() \
534 + unsigned long set; \
535 + unsigned long start_set; \
536 + unsigned long end_set; \
539 + end_set = (unsigned long)LAST_DCACHE_ADDR; \
541 + for (set = start_set; set <= end_set; set += (0x10 - 3)) { \
542 + asm volatile("cpushl %%bc,(%0)\n" \
543 + "\taddq%.l #1,%0\n" \
544 + "\tcpushl %%bc,(%0)\n" \
545 + "\taddq%.l #1,%0\n" \
546 + "\tcpushl %%bc,(%0)\n" \
547 + "\taddq%.l #1,%0\n" \
548 + "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set)); \
553 + * invalidate the cache for the specified memory range.
554 + * It starts at the physical address specified for
555 + * the given number of bytes.
557 +extern void cache_clear(unsigned long paddr, int len);
559 + * push any dirty cache in the specified memory range.
560 + * It starts at the physical address specified for
561 + * the given number of bytes.
563 +extern void cache_push(unsigned long paddr, int len);
566 + * push and invalidate pages in the specified user virtual
569 +extern void cache_push_v(unsigned long vaddr, int len);
571 +/* This is needed whenever the virtual mapping of the current
572 + process changes. */
575 + * flush_cache_mm - Flush an mm_struct
576 + * @mm: mm_struct to flush
578 +static inline void flush_cache_mm(struct mm_struct *mm)
580 + if (mm == current->mm)
584 +#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
586 +#define flush_cache_all() flush_bcache()
589 + * flush_cache_range - Flush a cache range
591 + * @start: Starting address
592 + * @end: Ending address
594 + * flush_cache_range must be a macro to avoid a dependency on
595 + * linux/mm.h which includes this file.
597 +static inline void flush_cache_range(struct vm_area_struct *vma,
598 + unsigned long start, unsigned long end)
600 + if (vma->vm_mm == current->mm)
602 +// cf_cache_flush_range(start, end);
606 + * flush_cache_page - Flush a page of the cache
611 + * flush_cache_page must be a macro to avoid a dependency on
612 + * linux/mm.h which includes this file.
614 +static inline void flush_cache_page(struct vm_area_struct *vma,
615 + unsigned long vmaddr, unsigned long pfn)
617 + if (vma->vm_mm == current->mm)
619 +// cf_cache_flush_range(vmaddr, vmaddr+PAGE_SIZE);
622 +/* Push the page at kernel virtual address and clear the icache */
623 +/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
624 +#define flush_page_to_ram(page) __flush_page_to_ram((void *) page_address(page))
625 +extern inline void __flush_page_to_ram(void *address)
628 + unsigned long start_set;
629 + unsigned long end_set;
630 + unsigned long addr = (unsigned long) address;
632 + addr &= ~(PAGE_SIZE - 1); /* round down to page start address */
634 + start_set = addr & _ICACHE_SET_MASK;
635 + end_set = (addr + PAGE_SIZE-1) & _ICACHE_SET_MASK;
637 + if (start_set > end_set) {
638 + /* from the begining to the lowest address */
639 + for (set = 0; set <= end_set; set += (0x10 - 3)) {
640 + asm volatile("cpushl %%bc,(%0)\n"
641 + "\taddq%.l #1,%0\n"
642 + "\tcpushl %%bc,(%0)\n"
643 + "\taddq%.l #1,%0\n"
644 + "\tcpushl %%bc,(%0)\n"
645 + "\taddq%.l #1,%0\n"
646 + "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));
648 + /* next loop will finish the cache ie pass the hole */
649 + end_set = LAST_ICACHE_ADDR;
651 + for (set = start_set; set <= end_set; set += (0x10 - 3)) {
652 + asm volatile("cpushl %%bc,(%0)\n"
653 + "\taddq%.l #1,%0\n"
654 + "\tcpushl %%bc,(%0)\n"
655 + "\taddq%.l #1,%0\n"
656 + "\tcpushl %%bc,(%0)\n"
657 + "\taddq%.l #1,%0\n"
658 + "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));
662 +/* Use __flush_page_to_ram() for flush_dcache_page all values are same - MW */
663 +#define flush_dcache_page(page) \
664 + __flush_page_to_ram((void *) page_address(page))
665 +#define flush_icache_page(vma,pg) \
666 + __flush_page_to_ram((void *) page_address(pg))
667 +#define flush_icache_user_range(adr,len) do { } while (0)
669 +#define flush_icache_user_page(vma,page,addr,len) do { } while (0)
671 +/* Push n pages at kernel virtual address and clear the icache */
672 +/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
673 +extern inline void flush_icache_range (unsigned long address,
674 + unsigned long endaddr)
677 + unsigned long start_set;
678 + unsigned long end_set;
680 + start_set = address & _ICACHE_SET_MASK;
681 + end_set = endaddr & _ICACHE_SET_MASK;
683 + if (start_set > end_set) {
684 + /* from the begining to the lowest address */
685 + for (set = 0; set <= end_set; set += (0x10 - 3)) {
686 + asm volatile("cpushl %%ic,(%0)\n"
687 + "\taddq%.l #1,%0\n"
688 + "\tcpushl %%ic,(%0)\n"
689 + "\taddq%.l #1,%0\n"
690 + "\tcpushl %%ic,(%0)\n"
691 + "\taddq%.l #1,%0\n"
692 + "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));
694 + /* next loop will finish the cache ie pass the hole */
695 + end_set = LAST_ICACHE_ADDR;
697 + for (set = start_set; set <= end_set; set += (0x10 - 3)) {
698 + asm volatile("cpushl %%ic,(%0)\n"
699 + "\taddq%.l #1,%0\n"
700 + "\tcpushl %%ic,(%0)\n"
701 + "\taddq%.l #1,%0\n"
702 + "\tcpushl %%ic,(%0)\n"
703 + "\taddq%.l #1,%0\n"
704 + "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));
708 +static inline void copy_to_user_page(struct vm_area_struct *vma,
709 + struct page *page, unsigned long vaddr,
710 + void *dst, void *src, int len)
712 + memcpy(dst, src, len);
713 + flush_icache_user_page(vma, page, vaddr, len);
715 +static inline void copy_from_user_page(struct vm_area_struct *vma,
716 + struct page *page, unsigned long vaddr,
717 + void *dst, void *src, int len)
719 + memcpy(dst, src, len);
722 +#define flush_cache_vmap(start, end) flush_cache_all()
723 +#define flush_cache_vunmap(start, end) flush_cache_all()
724 +#define flush_dcache_mmap_lock(mapping) do { } while (0)
725 +#define flush_dcache_mmap_unlock(mapping) do { } while (0)
727 +#endif /* M68K_CF_548x_CACHEFLUSH_H */
728 --- a/include/asm-m68k/cf_cacheflush.h
729 +++ b/include/asm-m68k/cf_cacheflush.h
731 #ifndef M68K_CF_CACHEFLUSH_H
732 #define M68K_CF_CACHEFLUSH_H
734 -#include <asm/cfcache.h>
736 - * Cache handling functions
739 -#define flush_icache() \
741 - unsigned long set; \
742 - unsigned long start_set; \
743 - unsigned long end_set; \
746 - end_set = (unsigned long)LAST_DCACHE_ADDR; \
748 - for (set = start_set; set <= end_set; set += (0x10 - 3)) { \
749 - asm volatile("cpushl %%ic,(%0)\n" \
750 - "\taddq%.l #1,%0\n" \
751 - "\tcpushl %%ic,(%0)\n" \
752 - "\taddq%.l #1,%0\n" \
753 - "\tcpushl %%ic,(%0)\n" \
754 - "\taddq%.l #1,%0\n" \
755 - "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set)); \
759 -#define flush_dcache() \
761 - unsigned long set; \
762 - unsigned long start_set; \
763 - unsigned long end_set; \
766 - end_set = (unsigned long)LAST_DCACHE_ADDR; \
768 - for (set = start_set; set <= end_set; set += (0x10 - 3)) { \
769 - asm volatile("cpushl %%dc,(%0)\n" \
770 - "\taddq%.l #1,%0\n" \
771 - "\tcpushl %%dc,(%0)\n" \
772 - "\taddq%.l #1,%0\n" \
773 - "\tcpushl %%dc,(%0)\n" \
774 - "\taddq%.l #1,%0\n" \
775 - "\tcpushl %%dc,(%0)" : "=a" (set) : "a" (set)); \
779 -#define flush_bcache() \
781 - unsigned long set; \
782 - unsigned long start_set; \
783 - unsigned long end_set; \
786 - end_set = (unsigned long)LAST_DCACHE_ADDR; \
788 - for (set = start_set; set <= end_set; set += (0x10 - 3)) { \
789 - asm volatile("cpushl %%bc,(%0)\n" \
790 - "\taddq%.l #1,%0\n" \
791 - "\tcpushl %%bc,(%0)\n" \
792 - "\taddq%.l #1,%0\n" \
793 - "\tcpushl %%bc,(%0)\n" \
794 - "\taddq%.l #1,%0\n" \
795 - "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set)); \
800 - * invalidate the cache for the specified memory range.
801 - * It starts at the physical address specified for
802 - * the given number of bytes.
804 -extern void cache_clear(unsigned long paddr, int len);
806 - * push any dirty cache in the specified memory range.
807 - * It starts at the physical address specified for
808 - * the given number of bytes.
810 -extern void cache_push(unsigned long paddr, int len);
813 - * push and invalidate pages in the specified user virtual
816 -extern void cache_push_v(unsigned long vaddr, int len);
818 -/* This is needed whenever the virtual mapping of the current
819 - process changes. */
822 - * flush_cache_mm - Flush an mm_struct
823 - * @mm: mm_struct to flush
825 -static inline void flush_cache_mm(struct mm_struct *mm)
827 - if (mm == current->mm)
831 -#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
833 -#define flush_cache_all() flush_bcache()
836 - * flush_cache_range - Flush a cache range
838 - * @start: Starting address
839 - * @end: Ending address
841 - * flush_cache_range must be a macro to avoid a dependency on
842 - * linux/mm.h which includes this file.
844 -static inline void flush_cache_range(struct vm_area_struct *vma,
845 - unsigned long start, unsigned long end)
847 - if (vma->vm_mm == current->mm)
849 -// cf_cache_flush_range(start, end);
853 - * flush_cache_page - Flush a page of the cache
858 - * flush_cache_page must be a macro to avoid a dependency on
859 - * linux/mm.h which includes this file.
861 -static inline void flush_cache_page(struct vm_area_struct *vma,
862 - unsigned long vmaddr, unsigned long pfn)
864 - if (vma->vm_mm == current->mm)
866 -// cf_cache_flush_range(vmaddr, vmaddr+PAGE_SIZE);
869 -/* Push the page at kernel virtual address and clear the icache */
870 -/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
871 -#define flush_page_to_ram(page) __flush_page_to_ram((void *) page_address(page))
872 -extern inline void __flush_page_to_ram(void *address)
875 - unsigned long start_set;
876 - unsigned long end_set;
877 - unsigned long addr = (unsigned long) address;
879 - addr &= ~(PAGE_SIZE - 1); /* round down to page start address */
881 - start_set = addr & _ICACHE_SET_MASK;
882 - end_set = (addr + PAGE_SIZE-1) & _ICACHE_SET_MASK;
884 - if (start_set > end_set) {
885 - /* from the begining to the lowest address */
886 - for (set = 0; set <= end_set; set += (0x10 - 3)) {
887 - asm volatile("cpushl %%bc,(%0)\n"
888 - "\taddq%.l #1,%0\n"
889 - "\tcpushl %%bc,(%0)\n"
890 - "\taddq%.l #1,%0\n"
891 - "\tcpushl %%bc,(%0)\n"
892 - "\taddq%.l #1,%0\n"
893 - "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));
895 - /* next loop will finish the cache ie pass the hole */
896 - end_set = LAST_ICACHE_ADDR;
898 - for (set = start_set; set <= end_set; set += (0x10 - 3)) {
899 - asm volatile("cpushl %%bc,(%0)\n"
900 - "\taddq%.l #1,%0\n"
901 - "\tcpushl %%bc,(%0)\n"
902 - "\taddq%.l #1,%0\n"
903 - "\tcpushl %%bc,(%0)\n"
904 - "\taddq%.l #1,%0\n"
905 - "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));
909 -/* Use __flush_page_to_ram() for flush_dcache_page all values are same - MW */
910 -#define flush_dcache_page(page) \
911 - __flush_page_to_ram((void *) page_address(page))
912 -#define flush_icache_page(vma,pg) \
913 - __flush_page_to_ram((void *) page_address(pg))
914 -#define flush_icache_user_range(adr,len) do { } while (0)
916 -#define flush_icache_user_page(vma,page,addr,len) do { } while (0)
918 -/* Push n pages at kernel virtual address and clear the icache */
919 -/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
920 -extern inline void flush_icache_range (unsigned long address,
921 - unsigned long endaddr)
924 - unsigned long start_set;
925 - unsigned long end_set;
927 - start_set = address & _ICACHE_SET_MASK;
928 - end_set = endaddr & _ICACHE_SET_MASK;
930 - if (start_set > end_set) {
931 - /* from the begining to the lowest address */
932 - for (set = 0; set <= end_set; set += (0x10 - 3)) {
933 - asm volatile("cpushl %%ic,(%0)\n"
934 - "\taddq%.l #1,%0\n"
935 - "\tcpushl %%ic,(%0)\n"
936 - "\taddq%.l #1,%0\n"
937 - "\tcpushl %%ic,(%0)\n"
938 - "\taddq%.l #1,%0\n"
939 - "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));
941 - /* next loop will finish the cache ie pass the hole */
942 - end_set = LAST_ICACHE_ADDR;
944 - for (set = start_set; set <= end_set; set += (0x10 - 3)) {
945 - asm volatile("cpushl %%ic,(%0)\n"
946 - "\taddq%.l #1,%0\n"
947 - "\tcpushl %%ic,(%0)\n"
948 - "\taddq%.l #1,%0\n"
949 - "\tcpushl %%ic,(%0)\n"
950 - "\taddq%.l #1,%0\n"
951 - "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));
955 -static inline void copy_to_user_page(struct vm_area_struct *vma,
956 - struct page *page, unsigned long vaddr,
957 - void *dst, void *src, int len)
959 - memcpy(dst, src, len);
960 - flush_icache_user_page(vma, page, vaddr, len);
962 -static inline void copy_from_user_page(struct vm_area_struct *vma,
963 - struct page *page, unsigned long vaddr,
964 - void *dst, void *src, int len)
966 - memcpy(dst, src, len);
969 -#define flush_cache_vmap(start, end) flush_cache_all()
970 -#define flush_cache_vunmap(start, end) flush_cache_all()
971 -#define flush_dcache_mmap_lock(mapping) do { } while (0)
972 -#define flush_dcache_mmap_unlock(mapping) do { } while (0)
973 +#ifdef CONFIG_M5445X
974 +#include "cf_5445x_cacheflush.h"
976 +#include "cf_548x_cacheflush.h"
979 #endif /* M68K_CF_CACHEFLUSH_H */