1 Platforms will now have to supply a function dma_device_is_coherent which
2 returns if a particular device participates in the coherence domain. For
3 most platforms this function will always return 0 or 1.
5 Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
6 Signed-off-by: Felix Fietkau <nbd@openwrt.org>
8 diff -urN linux.old/arch/mips/Kconfig linux.dev/arch/mips/Kconfig
9 --- linux.old/arch/mips/Kconfig 2007-01-10 20:10:37.000000000 +0100
10 +++ linux.dev/arch/mips/Kconfig 2007-02-09 20:26:45.367388152 +0100
17 select DMA_NONCOHERENT
19 select R5000_CPU_SCACHE
21 config DMA_NEED_PCI_MAP_STATE
30 diff -urN linux.old/arch/mips/mm/dma-coherent.c linux.dev/arch/mips/mm/dma-coherent.c
31 --- linux.old/arch/mips/mm/dma-coherent.c 2007-01-10 20:10:37.000000000 +0100
32 +++ linux.dev/arch/mips/mm/dma-coherent.c 1970-01-01 01:00:00.000000000 +0100
35 - * This file is subject to the terms and conditions of the GNU General Public
36 - * License. See the file "COPYING" in the main directory of this archive
39 - * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
40 - * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org>
41 - * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
43 -#include <linux/types.h>
44 -#include <linux/dma-mapping.h>
45 -#include <linux/mm.h>
46 -#include <linux/module.h>
47 -#include <linux/string.h>
49 -#include <asm/cache.h>
52 -void *dma_alloc_noncoherent(struct device *dev, size_t size,
53 - dma_addr_t * dma_handle, gfp_t gfp)
56 - /* ignore region specifiers */
57 - gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
59 - if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
61 - ret = (void *) __get_free_pages(gfp, get_order(size));
64 - memset(ret, 0, size);
65 - *dma_handle = virt_to_phys(ret);
71 -EXPORT_SYMBOL(dma_alloc_noncoherent);
73 -void *dma_alloc_coherent(struct device *dev, size_t size,
74 - dma_addr_t * dma_handle, gfp_t gfp)
75 - __attribute__((alias("dma_alloc_noncoherent")));
77 -EXPORT_SYMBOL(dma_alloc_coherent);
79 -void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
80 - dma_addr_t dma_handle)
82 - unsigned long addr = (unsigned long) vaddr;
84 - free_pages(addr, get_order(size));
87 -EXPORT_SYMBOL(dma_free_noncoherent);
89 -void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
90 - dma_addr_t dma_handle) __attribute__((alias("dma_free_noncoherent")));
92 -EXPORT_SYMBOL(dma_free_coherent);
94 -dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
95 - enum dma_data_direction direction)
97 - BUG_ON(direction == DMA_NONE);
102 -EXPORT_SYMBOL(dma_map_single);
104 -void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
105 - enum dma_data_direction direction)
107 - BUG_ON(direction == DMA_NONE);
110 -EXPORT_SYMBOL(dma_unmap_single);
112 -int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
113 - enum dma_data_direction direction)
117 - BUG_ON(direction == DMA_NONE);
119 - for (i = 0; i < nents; i++, sg++) {
120 - sg->dma_address = (dma_addr_t)page_to_phys(sg->page) + sg->offset;
126 -EXPORT_SYMBOL(dma_map_sg);
128 -dma_addr_t dma_map_page(struct device *dev, struct page *page,
129 - unsigned long offset, size_t size, enum dma_data_direction direction)
131 - BUG_ON(direction == DMA_NONE);
133 - return page_to_phys(page) + offset;
136 -EXPORT_SYMBOL(dma_map_page);
138 -void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
139 - enum dma_data_direction direction)
141 - BUG_ON(direction == DMA_NONE);
144 -EXPORT_SYMBOL(dma_unmap_page);
146 -void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
147 - enum dma_data_direction direction)
149 - BUG_ON(direction == DMA_NONE);
152 -EXPORT_SYMBOL(dma_unmap_sg);
154 -void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
155 - size_t size, enum dma_data_direction direction)
157 - BUG_ON(direction == DMA_NONE);
160 -EXPORT_SYMBOL(dma_sync_single_for_cpu);
162 -void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
163 - size_t size, enum dma_data_direction direction)
165 - BUG_ON(direction == DMA_NONE);
168 -EXPORT_SYMBOL(dma_sync_single_for_device);
170 -void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
171 - unsigned long offset, size_t size,
172 - enum dma_data_direction direction)
174 - BUG_ON(direction == DMA_NONE);
177 -EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
179 -void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
180 - unsigned long offset, size_t size,
181 - enum dma_data_direction direction)
183 - BUG_ON(direction == DMA_NONE);
186 -EXPORT_SYMBOL(dma_sync_single_range_for_device);
188 -void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
189 - enum dma_data_direction direction)
191 - BUG_ON(direction == DMA_NONE);
194 -EXPORT_SYMBOL(dma_sync_sg_for_cpu);
196 -void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
197 - enum dma_data_direction direction)
199 - BUG_ON(direction == DMA_NONE);
202 -EXPORT_SYMBOL(dma_sync_sg_for_device);
204 -int dma_mapping_error(dma_addr_t dma_addr)
209 -EXPORT_SYMBOL(dma_mapping_error);
211 -int dma_supported(struct device *dev, u64 mask)
214 - * we fall back to GFP_DMA when the mask isn't all 1s,
215 - * so we can't guarantee allocations that must be
216 - * within a tighter range than GFP_DMA..
218 - if (mask < 0x00ffffff)
224 -EXPORT_SYMBOL(dma_supported);
226 -int dma_is_consistent(dma_addr_t dma_addr)
231 -EXPORT_SYMBOL(dma_is_consistent);
233 -void dma_cache_sync(void *vaddr, size_t size,
234 - enum dma_data_direction direction)
236 - BUG_ON(direction == DMA_NONE);
239 -EXPORT_SYMBOL(dma_cache_sync);
241 -/* The DAC routines are a PCIism.. */
245 -#include <linux/pci.h>
247 -dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev,
248 - struct page *page, unsigned long offset, int direction)
250 - return (dma64_addr_t)page_to_phys(page) + offset;
253 -EXPORT_SYMBOL(pci_dac_page_to_dma);
255 -struct page *pci_dac_dma_to_page(struct pci_dev *pdev,
256 - dma64_addr_t dma_addr)
258 - return mem_map + (dma_addr >> PAGE_SHIFT);
261 -EXPORT_SYMBOL(pci_dac_dma_to_page);
263 -unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev,
264 - dma64_addr_t dma_addr)
266 - return dma_addr & ~PAGE_MASK;
269 -EXPORT_SYMBOL(pci_dac_dma_to_offset);
271 -void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev,
272 - dma64_addr_t dma_addr, size_t len, int direction)
274 - BUG_ON(direction == PCI_DMA_NONE);
277 -EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu);
279 -void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev,
280 - dma64_addr_t dma_addr, size_t len, int direction)
282 - BUG_ON(direction == PCI_DMA_NONE);
285 -EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device);
287 -#endif /* CONFIG_PCI */
288 diff -urN linux.old/arch/mips/mm/dma-default.c linux.dev/arch/mips/mm/dma-default.c
289 --- linux.old/arch/mips/mm/dma-default.c 1970-01-01 01:00:00.000000000 +0100
290 +++ linux.dev/arch/mips/mm/dma-default.c 2007-02-09 20:26:48.671885792 +0100
293 + * This file is subject to the terms and conditions of the GNU General Public
294 + * License. See the file "COPYING" in the main directory of this archive
295 + * for more details.
297 + * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
298 + * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
299 + * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
302 +#include <linux/types.h>
303 +#include <linux/dma-mapping.h>
304 +#include <linux/mm.h>
305 +#include <linux/module.h>
306 +#include <linux/string.h>
308 +#include <asm/cache.h>
311 +#include <dma-coherence.h>
314 + * Warning on the terminology - Linux calls an uncached area coherent;
315 + * MIPS terminology calls memory areas with hardware maintained coherency
319 +static inline int cpu_is_noncoherent_r10000(struct device *dev)
321 + return !plat_device_is_coherent(dev) &&
322 + (current_cpu_data.cputype == CPU_R10000 &&
323 + current_cpu_data.cputype == CPU_R12000);
326 +void *dma_alloc_noncoherent(struct device *dev, size_t size,
327 + dma_addr_t * dma_handle, gfp_t gfp)
331 + /* ignore region specifiers */
332 + gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
334 + if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
336 + ret = (void *) __get_free_pages(gfp, get_order(size));
339 + memset(ret, 0, size);
340 + *dma_handle = plat_map_dma_mem(dev, ret, size);
346 +EXPORT_SYMBOL(dma_alloc_noncoherent);
348 +void *dma_alloc_coherent(struct device *dev, size_t size,
349 + dma_addr_t * dma_handle, gfp_t gfp)
353 + /* ignore region specifiers */
354 + gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
356 + if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
358 + ret = (void *) __get_free_pages(gfp, get_order(size));
361 + memset(ret, 0, size);
362 + *dma_handle = plat_map_dma_mem(dev, ret, size);
364 + if (!plat_device_is_coherent(dev)) {
365 + dma_cache_wback_inv((unsigned long) ret, size);
366 + ret = UNCAC_ADDR(ret);
373 +EXPORT_SYMBOL(dma_alloc_coherent);
375 +void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
376 + dma_addr_t dma_handle)
378 + free_pages((unsigned long) vaddr, get_order(size));
381 +EXPORT_SYMBOL(dma_free_noncoherent);
383 +void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
384 + dma_addr_t dma_handle)
386 + unsigned long addr = (unsigned long) vaddr;
388 + if (!plat_device_is_coherent(dev))
389 + addr = CAC_ADDR(addr);
391 + free_pages(addr, get_order(size));
394 +EXPORT_SYMBOL(dma_free_coherent);
396 +static inline void __dma_sync(unsigned long addr, size_t size,
397 + enum dma_data_direction direction)
399 + switch (direction) {
400 + case DMA_TO_DEVICE:
401 + dma_cache_wback(addr, size);
404 + case DMA_FROM_DEVICE:
405 + dma_cache_inv(addr, size);
408 + case DMA_BIDIRECTIONAL:
409 + dma_cache_wback_inv(addr, size);
417 +dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
418 + enum dma_data_direction direction)
420 + unsigned long addr = (unsigned long) ptr;
422 + if (!plat_device_is_coherent(dev))
423 + __dma_sync(addr, size, direction);
425 + return plat_map_dma_mem(dev, ptr, size);
428 +EXPORT_SYMBOL(dma_map_single);
430 +void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
431 + enum dma_data_direction direction)
433 + if (cpu_is_noncoherent_r10000(dev))
434 + __dma_sync(plat_dma_addr_to_phys(dma_addr) + PAGE_OFFSET, size,
437 + plat_unmap_dma_mem(dma_addr);
440 +EXPORT_SYMBOL(dma_unmap_single);
442 +int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
443 + enum dma_data_direction direction)
447 + BUG_ON(direction == DMA_NONE);
449 + for (i = 0; i < nents; i++, sg++) {
450 + unsigned long addr;
452 + addr = (unsigned long) page_address(sg->page);
453 + if (!plat_device_is_coherent(dev) && addr)
454 + __dma_sync(addr + sg->offset, sg->length, direction);
455 + sg->dma_address = plat_map_dma_mem_page(dev, sg->page) +
462 +EXPORT_SYMBOL(dma_map_sg);
464 +dma_addr_t dma_map_page(struct device *dev, struct page *page,
465 + unsigned long offset, size_t size, enum dma_data_direction direction)
467 + BUG_ON(direction == DMA_NONE);
469 + if (!plat_device_is_coherent(dev)) {
470 + unsigned long addr;
472 + addr = (unsigned long) page_address(page) + offset;
473 + dma_cache_wback_inv(addr, size);
476 + return plat_map_dma_mem_page(dev, page) + offset;
479 +EXPORT_SYMBOL(dma_map_page);
481 +void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
482 + enum dma_data_direction direction)
484 + BUG_ON(direction == DMA_NONE);
486 + if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) {
487 + unsigned long addr;
489 + addr = plat_dma_addr_to_phys(dma_address);
490 + dma_cache_wback_inv(addr, size);
493 + plat_unmap_dma_mem(dma_address);
496 +EXPORT_SYMBOL(dma_unmap_page);
498 +void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
499 + enum dma_data_direction direction)
501 + unsigned long addr;
504 + BUG_ON(direction == DMA_NONE);
506 + for (i = 0; i < nhwentries; i++, sg++) {
507 + if (!plat_device_is_coherent(dev) &&
508 + direction != DMA_TO_DEVICE) {
509 + addr = (unsigned long) page_address(sg->page);
511 + __dma_sync(addr + sg->offset, sg->length,
514 + plat_unmap_dma_mem(sg->dma_address);
518 +EXPORT_SYMBOL(dma_unmap_sg);
520 +void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
521 + size_t size, enum dma_data_direction direction)
523 + BUG_ON(direction == DMA_NONE);
525 + if (cpu_is_noncoherent_r10000(dev)) {
526 + unsigned long addr;
528 + addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle);
529 + __dma_sync(addr, size, direction);
533 +EXPORT_SYMBOL(dma_sync_single_for_cpu);
535 +void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
536 + size_t size, enum dma_data_direction direction)
538 + BUG_ON(direction == DMA_NONE);
540 + if (cpu_is_noncoherent_r10000(dev)) {
541 + unsigned long addr;
543 + addr = plat_dma_addr_to_phys(dma_handle);
544 + __dma_sync(addr, size, direction);
548 +EXPORT_SYMBOL(dma_sync_single_for_device);
550 +void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
551 + unsigned long offset, size_t size, enum dma_data_direction direction)
553 + BUG_ON(direction == DMA_NONE);
555 + if (cpu_is_noncoherent_r10000(dev)) {
556 + unsigned long addr;
558 + addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle);
559 + __dma_sync(addr + offset, size, direction);
563 +EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
565 +void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
566 + unsigned long offset, size_t size, enum dma_data_direction direction)
568 + BUG_ON(direction == DMA_NONE);
570 + if (cpu_is_noncoherent_r10000(dev)) {
571 + unsigned long addr;
573 + addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle);
574 + __dma_sync(addr + offset, size, direction);
578 +EXPORT_SYMBOL(dma_sync_single_range_for_device);
580 +void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
581 + enum dma_data_direction direction)
585 + BUG_ON(direction == DMA_NONE);
587 + /* Make sure that gcc doesn't leave the empty loop body. */
588 + for (i = 0; i < nelems; i++, sg++) {
589 + if (!plat_device_is_coherent(dev))
590 + __dma_sync((unsigned long)page_address(sg->page),
591 + sg->length, direction);
592 + plat_unmap_dma_mem(sg->dma_address);
596 +EXPORT_SYMBOL(dma_sync_sg_for_cpu);
598 +void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
599 + enum dma_data_direction direction)
603 + BUG_ON(direction == DMA_NONE);
605 + /* Make sure that gcc doesn't leave the empty loop body. */
606 + for (i = 0; i < nelems; i++, sg++) {
607 + if (!plat_device_is_coherent(dev))
608 + __dma_sync((unsigned long)page_address(sg->page),
609 + sg->length, direction);
610 + plat_unmap_dma_mem(sg->dma_address);
614 +EXPORT_SYMBOL(dma_sync_sg_for_device);
616 +int dma_mapping_error(dma_addr_t dma_addr)
621 +EXPORT_SYMBOL(dma_mapping_error);
623 +int dma_supported(struct device *dev, u64 mask)
626 + * we fall back to GFP_DMA when the mask isn't all 1s,
627 + * so we can't guarantee allocations that must be
628 + * within a tighter range than GFP_DMA..
630 + if (mask < 0x00ffffff)
636 +EXPORT_SYMBOL(dma_supported);
638 +int dma_is_consistent(dma_addr_t dma_addr)
640 + return plat_device_is_coherent(NULL);
643 +EXPORT_SYMBOL(dma_is_consistent);
645 +void dma_cache_sync(void *vaddr, size_t size,
646 + enum dma_data_direction direction)
648 + BUG_ON(direction == DMA_NONE);
650 + if (!plat_device_is_coherent(NULL))
651 + dma_cache_wback_inv((unsigned long)vaddr, size);
654 +EXPORT_SYMBOL(dma_cache_sync);
655 diff -urN linux.old/arch/mips/mm/dma-ip27.c linux.dev/arch/mips/mm/dma-ip27.c
656 --- linux.old/arch/mips/mm/dma-ip27.c 2007-01-10 20:10:37.000000000 +0100
657 +++ linux.dev/arch/mips/mm/dma-ip27.c 1970-01-01 01:00:00.000000000 +0100
660 - * This file is subject to the terms and conditions of the GNU General Public
661 - * License. See the file "COPYING" in the main directory of this archive
662 - * for more details.
664 - * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
665 - * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org>
666 - * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
668 -#include <linux/types.h>
669 -#include <linux/mm.h>
670 -#include <linux/module.h>
671 -#include <linux/string.h>
672 -#include <linux/pci.h>
674 -#include <asm/cache.h>
675 -#include <asm/pci/bridge.h>
677 -#define pdev_to_baddr(pdev, addr) \
678 - (BRIDGE_CONTROLLER(pdev->bus)->baddr + (addr))
679 -#define dev_to_baddr(dev, addr) \
680 - pdev_to_baddr(to_pci_dev(dev), (addr))
682 -void *dma_alloc_noncoherent(struct device *dev, size_t size,
683 - dma_addr_t * dma_handle, gfp_t gfp)
687 - /* ignore region specifiers */
688 - gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
690 - if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
692 - ret = (void *) __get_free_pages(gfp, get_order(size));
695 - memset(ret, 0, size);
696 - *dma_handle = dev_to_baddr(dev, virt_to_phys(ret));
702 -EXPORT_SYMBOL(dma_alloc_noncoherent);
704 -void *dma_alloc_coherent(struct device *dev, size_t size,
705 - dma_addr_t * dma_handle, gfp_t gfp)
706 - __attribute__((alias("dma_alloc_noncoherent")));
708 -EXPORT_SYMBOL(dma_alloc_coherent);
710 -void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
711 - dma_addr_t dma_handle)
713 - unsigned long addr = (unsigned long) vaddr;
715 - free_pages(addr, get_order(size));
718 -EXPORT_SYMBOL(dma_free_noncoherent);
720 -void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
721 - dma_addr_t dma_handle) __attribute__((alias("dma_free_noncoherent")));
723 -EXPORT_SYMBOL(dma_free_coherent);
725 -dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
726 - enum dma_data_direction direction)
728 - BUG_ON(direction == DMA_NONE);
730 - return dev_to_baddr(dev, __pa(ptr));
733 -EXPORT_SYMBOL(dma_map_single);
735 -void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
736 - enum dma_data_direction direction)
738 - BUG_ON(direction == DMA_NONE);
741 -EXPORT_SYMBOL(dma_unmap_single);
743 -int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
744 - enum dma_data_direction direction)
748 - BUG_ON(direction == DMA_NONE);
750 - for (i = 0; i < nents; i++, sg++) {
751 - sg->dma_address = (dma_addr_t) dev_to_baddr(dev,
752 - page_to_phys(sg->page) + sg->offset);
758 -EXPORT_SYMBOL(dma_map_sg);
760 -dma_addr_t dma_map_page(struct device *dev, struct page *page,
761 - unsigned long offset, size_t size, enum dma_data_direction direction)
763 - BUG_ON(direction == DMA_NONE);
765 - return dev_to_baddr(dev, page_to_phys(page) + offset);
768 -EXPORT_SYMBOL(dma_map_page);
770 -void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
771 - enum dma_data_direction direction)
773 - BUG_ON(direction == DMA_NONE);
776 -EXPORT_SYMBOL(dma_unmap_page);
778 -void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
779 - enum dma_data_direction direction)
781 - BUG_ON(direction == DMA_NONE);
784 -EXPORT_SYMBOL(dma_unmap_sg);
786 -void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
787 - enum dma_data_direction direction)
789 - BUG_ON(direction == DMA_NONE);
792 -EXPORT_SYMBOL(dma_sync_single_for_cpu);
794 -void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
795 - enum dma_data_direction direction)
797 - BUG_ON(direction == DMA_NONE);
800 -EXPORT_SYMBOL(dma_sync_single_for_device);
802 -void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
803 - unsigned long offset, size_t size,
804 - enum dma_data_direction direction)
806 - BUG_ON(direction == DMA_NONE);
809 -EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
811 -void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
812 - unsigned long offset, size_t size,
813 - enum dma_data_direction direction)
815 - BUG_ON(direction == DMA_NONE);
818 -EXPORT_SYMBOL(dma_sync_single_range_for_device);
820 -void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
821 - enum dma_data_direction direction)
823 - BUG_ON(direction == DMA_NONE);
826 -EXPORT_SYMBOL(dma_sync_sg_for_cpu);
828 -void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
829 - enum dma_data_direction direction)
831 - BUG_ON(direction == DMA_NONE);
834 -EXPORT_SYMBOL(dma_sync_sg_for_device);
836 -int dma_mapping_error(dma_addr_t dma_addr)
841 -EXPORT_SYMBOL(dma_mapping_error);
843 -int dma_supported(struct device *dev, u64 mask)
846 - * we fall back to GFP_DMA when the mask isn't all 1s,
847 - * so we can't guarantee allocations that must be
848 - * within a tighter range than GFP_DMA..
850 - if (mask < 0x00ffffff)
856 -EXPORT_SYMBOL(dma_supported);
858 -int dma_is_consistent(dma_addr_t dma_addr)
863 -EXPORT_SYMBOL(dma_is_consistent);
865 -void dma_cache_sync(void *vaddr, size_t size,
866 - enum dma_data_direction direction)
868 - BUG_ON(direction == DMA_NONE);
871 -EXPORT_SYMBOL(dma_cache_sync);
873 -dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev,
874 - struct page *page, unsigned long offset, int direction)
876 - dma64_addr_t addr = page_to_phys(page) + offset;
878 - return (dma64_addr_t) pdev_to_baddr(pdev, addr);
881 -EXPORT_SYMBOL(pci_dac_page_to_dma);
883 -struct page *pci_dac_dma_to_page(struct pci_dev *pdev,
884 - dma64_addr_t dma_addr)
886 - struct bridge_controller *bc = BRIDGE_CONTROLLER(pdev->bus);
888 - return pfn_to_page((dma_addr - bc->baddr) >> PAGE_SHIFT);
891 -EXPORT_SYMBOL(pci_dac_dma_to_page);
893 -unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev,
894 - dma64_addr_t dma_addr)
896 - return dma_addr & ~PAGE_MASK;
899 -EXPORT_SYMBOL(pci_dac_dma_to_offset);
901 -void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev,
902 - dma64_addr_t dma_addr, size_t len, int direction)
904 - BUG_ON(direction == PCI_DMA_NONE);
907 -EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu);
909 -void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev,
910 - dma64_addr_t dma_addr, size_t len, int direction)
912 - BUG_ON(direction == PCI_DMA_NONE);
915 -EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device);
916 diff -urN linux.old/arch/mips/mm/dma-ip32.c linux.dev/arch/mips/mm/dma-ip32.c
917 --- linux.old/arch/mips/mm/dma-ip32.c 2007-01-10 20:10:37.000000000 +0100
918 +++ linux.dev/arch/mips/mm/dma-ip32.c 1970-01-01 01:00:00.000000000 +0100
921 - * This file is subject to the terms and conditions of the GNU General Public
922 - * License. See the file "COPYING" in the main directory of this archive
923 - * for more details.
925 - * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
926 - * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org>
927 - * Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com>
928 - * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
929 - * IP32 changes by Ilya.
931 -#include <linux/types.h>
932 -#include <linux/mm.h>
933 -#include <linux/module.h>
934 -#include <linux/string.h>
935 -#include <linux/dma-mapping.h>
937 -#include <asm/cache.h>
939 -#include <asm/ip32/crime.h>
942 - * Warning on the terminology - Linux calls an uncached area coherent;
943 - * MIPS terminology calls memory areas with hardware maintained coherency
949 - * 1. CPU sees memory as two chunks: 0-256M@0x0, and the rest @0x40000000+256M
950 - * 2. PCI sees memory as one big chunk @0x0 (or we could use 0x40000000 for native-endian)
951 - * 3. All other devices see memory as one big chunk at 0x40000000
952 - * 4. Non-PCI devices will pass NULL as struct device*
953 - * Thus we translate differently, depending on device.
956 -#define RAM_OFFSET_MASK 0x3fffffff
958 -void *dma_alloc_noncoherent(struct device *dev, size_t size,
959 - dma_addr_t * dma_handle, gfp_t gfp)
962 - /* ignore region specifiers */
963 - gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
965 - if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
967 - ret = (void *) __get_free_pages(gfp, get_order(size));
970 - unsigned long addr = virt_to_phys(ret)&RAM_OFFSET_MASK;
971 - memset(ret, 0, size);
973 - addr+= CRIME_HI_MEM_BASE;
974 - *dma_handle = addr;
980 -EXPORT_SYMBOL(dma_alloc_noncoherent);
982 -void *dma_alloc_coherent(struct device *dev, size_t size,
983 - dma_addr_t * dma_handle, gfp_t gfp)
987 - ret = dma_alloc_noncoherent(dev, size, dma_handle, gfp);
989 - dma_cache_wback_inv((unsigned long) ret, size);
990 - ret = UNCAC_ADDR(ret);
996 -EXPORT_SYMBOL(dma_alloc_coherent);
998 -void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
999 - dma_addr_t dma_handle)
1001 - free_pages((unsigned long) vaddr, get_order(size));
1004 -EXPORT_SYMBOL(dma_free_noncoherent);
1006 -void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
1007 - dma_addr_t dma_handle)
1009 - unsigned long addr = (unsigned long) vaddr;
1011 - addr = CAC_ADDR(addr);
1012 - free_pages(addr, get_order(size));
1015 -EXPORT_SYMBOL(dma_free_coherent);
1017 -static inline void __dma_sync(unsigned long addr, size_t size,
1018 - enum dma_data_direction direction)
1020 - switch (direction) {
1021 - case DMA_TO_DEVICE:
1022 - dma_cache_wback(addr, size);
1025 - case DMA_FROM_DEVICE:
1026 - dma_cache_inv(addr, size);
1029 - case DMA_BIDIRECTIONAL:
1030 - dma_cache_wback_inv(addr, size);
1038 -dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
1039 - enum dma_data_direction direction)
1041 - unsigned long addr = (unsigned long) ptr;
1043 - switch (direction) {
1044 - case DMA_TO_DEVICE:
1045 - dma_cache_wback(addr, size);
1048 - case DMA_FROM_DEVICE:
1049 - dma_cache_inv(addr, size);
1052 - case DMA_BIDIRECTIONAL:
1053 - dma_cache_wback_inv(addr, size);
1060 - addr = virt_to_phys(ptr)&RAM_OFFSET_MASK;
1062 - addr+=CRIME_HI_MEM_BASE;
1063 - return (dma_addr_t)addr;
1066 -EXPORT_SYMBOL(dma_map_single);
1068 -void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
1069 - enum dma_data_direction direction)
1071 - switch (direction) {
1072 - case DMA_TO_DEVICE:
1075 - case DMA_FROM_DEVICE:
1078 - case DMA_BIDIRECTIONAL:
1086 -EXPORT_SYMBOL(dma_unmap_single);
1088 -int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1089 - enum dma_data_direction direction)
1093 - BUG_ON(direction == DMA_NONE);
1095 - for (i = 0; i < nents; i++, sg++) {
1096 - unsigned long addr;
1098 - addr = (unsigned long) page_address(sg->page)+sg->offset;
1100 - __dma_sync(addr, sg->length, direction);
1101 - addr = __pa(addr)&RAM_OFFSET_MASK;
1103 - addr += CRIME_HI_MEM_BASE;
1104 - sg->dma_address = (dma_addr_t)addr;
1110 -EXPORT_SYMBOL(dma_map_sg);
1112 -dma_addr_t dma_map_page(struct device *dev, struct page *page,
1113 - unsigned long offset, size_t size, enum dma_data_direction direction)
1115 - unsigned long addr;
1117 - BUG_ON(direction == DMA_NONE);
1119 - addr = (unsigned long) page_address(page) + offset;
1120 - dma_cache_wback_inv(addr, size);
1121 - addr = __pa(addr)&RAM_OFFSET_MASK;
1123 - addr += CRIME_HI_MEM_BASE;
1125 - return (dma_addr_t)addr;
1128 -EXPORT_SYMBOL(dma_map_page);
1130 -void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
1131 - enum dma_data_direction direction)
1133 - BUG_ON(direction == DMA_NONE);
1135 - if (direction != DMA_TO_DEVICE) {
1136 - unsigned long addr;
1138 - dma_address&=RAM_OFFSET_MASK;
1139 - addr = dma_address + PAGE_OFFSET;
1140 - if(dma_address>=256*1024*1024)
1141 - addr+=CRIME_HI_MEM_BASE;
1142 - dma_cache_wback_inv(addr, size);
1146 -EXPORT_SYMBOL(dma_unmap_page);
1148 -void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
1149 - enum dma_data_direction direction)
1151 - unsigned long addr;
1154 - BUG_ON(direction == DMA_NONE);
1156 - if (direction == DMA_TO_DEVICE)
1159 - for (i = 0; i < nhwentries; i++, sg++) {
1160 - addr = (unsigned long) page_address(sg->page);
1163 - dma_cache_wback_inv(addr + sg->offset, sg->length);
1167 -EXPORT_SYMBOL(dma_unmap_sg);
1169 -void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1170 - size_t size, enum dma_data_direction direction)
1172 - unsigned long addr;
1174 - BUG_ON(direction == DMA_NONE);
1176 - dma_handle&=RAM_OFFSET_MASK;
1177 - addr = dma_handle + PAGE_OFFSET;
1178 - if(dma_handle>=256*1024*1024)
1179 - addr+=CRIME_HI_MEM_BASE;
1180 - __dma_sync(addr, size, direction);
1183 -EXPORT_SYMBOL(dma_sync_single_for_cpu);
1185 -void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
1186 - size_t size, enum dma_data_direction direction)
1188 - unsigned long addr;
1190 - BUG_ON(direction == DMA_NONE);
1192 - dma_handle&=RAM_OFFSET_MASK;
1193 - addr = dma_handle + PAGE_OFFSET;
1194 - if(dma_handle>=256*1024*1024)
1195 - addr+=CRIME_HI_MEM_BASE;
1196 - __dma_sync(addr, size, direction);
1199 -EXPORT_SYMBOL(dma_sync_single_for_device);
1201 -void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
1202 - unsigned long offset, size_t size, enum dma_data_direction direction)
1204 - unsigned long addr;
1206 - BUG_ON(direction == DMA_NONE);
1208 - dma_handle&=RAM_OFFSET_MASK;
1209 - addr = dma_handle + offset + PAGE_OFFSET;
1210 - if(dma_handle>=256*1024*1024)
1211 - addr+=CRIME_HI_MEM_BASE;
1212 - __dma_sync(addr, size, direction);
1215 -EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
1217 -void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
1218 - unsigned long offset, size_t size, enum dma_data_direction direction)
1220 - unsigned long addr;
1222 - BUG_ON(direction == DMA_NONE);
1224 - dma_handle&=RAM_OFFSET_MASK;
1225 - addr = dma_handle + offset + PAGE_OFFSET;
1226 - if(dma_handle>=256*1024*1024)
1227 - addr+=CRIME_HI_MEM_BASE;
1228 - __dma_sync(addr, size, direction);
1231 -EXPORT_SYMBOL(dma_sync_single_range_for_device);
1233 -void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
1234 - enum dma_data_direction direction)
1238 - BUG_ON(direction == DMA_NONE);
1240 - /* Make sure that gcc doesn't leave the empty loop body. */
1241 - for (i = 0; i < nelems; i++, sg++)
1242 - __dma_sync((unsigned long)page_address(sg->page),
1243 - sg->length, direction);
1246 -EXPORT_SYMBOL(dma_sync_sg_for_cpu);
1248 -void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
1249 - enum dma_data_direction direction)
1253 - BUG_ON(direction == DMA_NONE);
1255 - /* Make sure that gcc doesn't leave the empty loop body. */
1256 - for (i = 0; i < nelems; i++, sg++)
1257 - __dma_sync((unsigned long)page_address(sg->page),
1258 - sg->length, direction);
1261 -EXPORT_SYMBOL(dma_sync_sg_for_device);
1263 -int dma_mapping_error(dma_addr_t dma_addr)
1268 -EXPORT_SYMBOL(dma_mapping_error);
1270 -int dma_supported(struct device *dev, u64 mask)
1273 - * we fall back to GFP_DMA when the mask isn't all 1s,
1274 - * so we can't guarantee allocations that must be
1275 - * within a tighter range than GFP_DMA..
1277 - if (mask < 0x00ffffff)
1283 -EXPORT_SYMBOL(dma_supported);
1285 -int dma_is_consistent(dma_addr_t dma_addr)
1290 -EXPORT_SYMBOL(dma_is_consistent);
1292 -void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction)
1294 - if (direction == DMA_NONE)
1297 - dma_cache_wback_inv((unsigned long)vaddr, size);
1300 -EXPORT_SYMBOL(dma_cache_sync);
1302 diff -urN linux.old/arch/mips/mm/dma-noncoherent.c linux.dev/arch/mips/mm/dma-noncoherent.c
1303 --- linux.old/arch/mips/mm/dma-noncoherent.c 2007-01-10 20:10:37.000000000 +0100
1304 +++ linux.dev/arch/mips/mm/dma-noncoherent.c 1970-01-01 01:00:00.000000000 +0100
1307 - * This file is subject to the terms and conditions of the GNU General Public
1308 - * License. See the file "COPYING" in the main directory of this archive
1309 - * for more details.
1311 - * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
1312 - * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org>
1313 - * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
1315 -#include <linux/types.h>
1316 -#include <linux/mm.h>
1317 -#include <linux/module.h>
1318 -#include <linux/string.h>
1319 -#include <linux/dma-mapping.h>
1321 -#include <asm/cache.h>
1322 -#include <asm/io.h>
1325 - * Warning on the terminology - Linux calls an uncached area coherent;
1326 - * MIPS terminology calls memory areas with hardware maintained coherency
1330 -void *dma_alloc_noncoherent(struct device *dev, size_t size,
1331 - dma_addr_t * dma_handle, gfp_t gfp)
1334 - /* ignore region specifiers */
1335 - gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
1337 - if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
1339 - ret = (void *) __get_free_pages(gfp, get_order(size));
1341 - if (ret != NULL) {
1342 - memset(ret, 0, size);
1343 - *dma_handle = virt_to_phys(ret);
1349 -EXPORT_SYMBOL(dma_alloc_noncoherent);
1351 -void *dma_alloc_coherent(struct device *dev, size_t size,
1352 - dma_addr_t * dma_handle, gfp_t gfp)
1356 - ret = dma_alloc_noncoherent(dev, size, dma_handle, gfp);
1358 - dma_cache_wback_inv((unsigned long) ret, size);
1359 - ret = UNCAC_ADDR(ret);
1365 -EXPORT_SYMBOL(dma_alloc_coherent);
1367 -void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
1368 - dma_addr_t dma_handle)
1370 - free_pages((unsigned long) vaddr, get_order(size));
1373 -EXPORT_SYMBOL(dma_free_noncoherent);
1375 -void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
1376 - dma_addr_t dma_handle)
1378 - unsigned long addr = (unsigned long) vaddr;
1380 - addr = CAC_ADDR(addr);
1381 - free_pages(addr, get_order(size));
1384 -EXPORT_SYMBOL(dma_free_coherent);
1386 -static inline void __dma_sync(unsigned long addr, size_t size,
1387 - enum dma_data_direction direction)
1389 - switch (direction) {
1390 - case DMA_TO_DEVICE:
1391 - dma_cache_wback(addr, size);
1394 - case DMA_FROM_DEVICE:
1395 - dma_cache_inv(addr, size);
1398 - case DMA_BIDIRECTIONAL:
1399 - dma_cache_wback_inv(addr, size);
1407 -dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
1408 - enum dma_data_direction direction)
1410 - unsigned long addr = (unsigned long) ptr;
1412 - __dma_sync(addr, size, direction);
1414 - return virt_to_phys(ptr);
1417 -EXPORT_SYMBOL(dma_map_single);
1419 -void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
1420 - enum dma_data_direction direction)
1422 - unsigned long addr;
1423 - addr = dma_addr + PAGE_OFFSET;
1425 - //__dma_sync(addr, size, direction);
1428 -EXPORT_SYMBOL(dma_unmap_single);
1430 -int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1431 - enum dma_data_direction direction)
1435 - BUG_ON(direction == DMA_NONE);
1437 - for (i = 0; i < nents; i++, sg++) {
1438 - unsigned long addr;
1440 - addr = (unsigned long) page_address(sg->page);
1442 - __dma_sync(addr + sg->offset, sg->length, direction);
1443 - sg->dma_address = (dma_addr_t)page_to_phys(sg->page)
1451 -EXPORT_SYMBOL(dma_map_sg);
1453 -dma_addr_t dma_map_page(struct device *dev, struct page *page,
1454 - unsigned long offset, size_t size, enum dma_data_direction direction)
1456 - unsigned long addr;
1458 - BUG_ON(direction == DMA_NONE);
1460 - addr = (unsigned long) page_address(page) + offset;
1461 - dma_cache_wback_inv(addr, size);
1463 - return page_to_phys(page) + offset;
1466 -EXPORT_SYMBOL(dma_map_page);
1468 -void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
1469 - enum dma_data_direction direction)
1471 - BUG_ON(direction == DMA_NONE);
1473 - if (direction != DMA_TO_DEVICE) {
1474 - unsigned long addr;
1476 - addr = dma_address + PAGE_OFFSET;
1477 - dma_cache_wback_inv(addr, size);
1481 -EXPORT_SYMBOL(dma_unmap_page);
1483 -void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
1484 - enum dma_data_direction direction)
1486 - unsigned long addr;
1489 - BUG_ON(direction == DMA_NONE);
1491 - if (direction == DMA_TO_DEVICE)
1494 - for (i = 0; i < nhwentries; i++, sg++) {
1495 - addr = (unsigned long) page_address(sg->page);
1497 - __dma_sync(addr + sg->offset, sg->length, direction);
1501 -EXPORT_SYMBOL(dma_unmap_sg);
1503 -void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1504 - size_t size, enum dma_data_direction direction)
1506 - unsigned long addr;
1508 - BUG_ON(direction == DMA_NONE);
1510 - addr = dma_handle + PAGE_OFFSET;
1511 - __dma_sync(addr, size, direction);
1514 -EXPORT_SYMBOL(dma_sync_single_for_cpu);
1516 -void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
1517 - size_t size, enum dma_data_direction direction)
1519 - unsigned long addr;
1521 - BUG_ON(direction == DMA_NONE);
1523 - addr = dma_handle + PAGE_OFFSET;
1524 - __dma_sync(addr, size, direction);
1527 -EXPORT_SYMBOL(dma_sync_single_for_device);
1529 -void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
1530 - unsigned long offset, size_t size, enum dma_data_direction direction)
1532 - unsigned long addr;
1534 - BUG_ON(direction == DMA_NONE);
1536 - addr = dma_handle + offset + PAGE_OFFSET;
1537 - __dma_sync(addr, size, direction);
1540 -EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
1542 -void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
1543 - unsigned long offset, size_t size, enum dma_data_direction direction)
1545 - unsigned long addr;
1547 - BUG_ON(direction == DMA_NONE);
1549 - addr = dma_handle + offset + PAGE_OFFSET;
1550 - __dma_sync(addr, size, direction);
1553 -EXPORT_SYMBOL(dma_sync_single_range_for_device);
1555 -void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
1556 - enum dma_data_direction direction)
1560 - BUG_ON(direction == DMA_NONE);
1562 - /* Make sure that gcc doesn't leave the empty loop body. */
1563 - for (i = 0; i < nelems; i++, sg++)
1564 - __dma_sync((unsigned long)page_address(sg->page),
1565 - sg->length, direction);
1568 -EXPORT_SYMBOL(dma_sync_sg_for_cpu);
1570 -void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
1571 - enum dma_data_direction direction)
1575 - BUG_ON(direction == DMA_NONE);
1577 - /* Make sure that gcc doesn't leave the empty loop body. */
1578 - for (i = 0; i < nelems; i++, sg++)
1579 - __dma_sync((unsigned long)page_address(sg->page),
1580 - sg->length, direction);
1583 -EXPORT_SYMBOL(dma_sync_sg_for_device);
1585 -int dma_mapping_error(dma_addr_t dma_addr)
1590 -EXPORT_SYMBOL(dma_mapping_error);
1592 -int dma_supported(struct device *dev, u64 mask)
1595 - * we fall back to GFP_DMA when the mask isn't all 1s,
1596 - * so we can't guarantee allocations that must be
1597 - * within a tighter range than GFP_DMA..
1599 - if (mask < 0x00ffffff)
1605 -EXPORT_SYMBOL(dma_supported);
1607 -int dma_is_consistent(dma_addr_t dma_addr)
1612 -EXPORT_SYMBOL(dma_is_consistent);
1614 -void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction)
1616 - if (direction == DMA_NONE)
1619 - dma_cache_wback_inv((unsigned long)vaddr, size);
1622 -EXPORT_SYMBOL(dma_cache_sync);
1624 -/* The DAC routines are a PCIism.. */
1628 -#include <linux/pci.h>
1630 -dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev,
1631 - struct page *page, unsigned long offset, int direction)
1633 - return (dma64_addr_t)page_to_phys(page) + offset;
1636 -EXPORT_SYMBOL(pci_dac_page_to_dma);
1638 -struct page *pci_dac_dma_to_page(struct pci_dev *pdev,
1639 - dma64_addr_t dma_addr)
1641 - return mem_map + (dma_addr >> PAGE_SHIFT);
1644 -EXPORT_SYMBOL(pci_dac_dma_to_page);
1646 -unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev,
1647 - dma64_addr_t dma_addr)
1649 - return dma_addr & ~PAGE_MASK;
1652 -EXPORT_SYMBOL(pci_dac_dma_to_offset);
1654 -void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev,
1655 - dma64_addr_t dma_addr, size_t len, int direction)
1657 - BUG_ON(direction == PCI_DMA_NONE);
1659 - dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len);
1662 -EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu);
1664 -void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev,
1665 - dma64_addr_t dma_addr, size_t len, int direction)
1667 - BUG_ON(direction == PCI_DMA_NONE);
1669 - dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len);
1672 -EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device);
1674 -#endif /* CONFIG_PCI */
1675 diff -urN linux.old/arch/mips/mm/Makefile linux.dev/arch/mips/mm/Makefile
1676 --- linux.old/arch/mips/mm/Makefile 2007-01-10 20:10:37.000000000 +0100
1677 +++ linux.dev/arch/mips/mm/Makefile 2007-02-09 20:26:45.376386784 +0100
1679 # Makefile for the Linux/MIPS-specific parts of the memory manager.
1682 -obj-y += cache.o extable.o fault.o init.o pgtable.o \
1683 - tlbex.o tlbex-fault.o
1684 +obj-y += cache.o dma-default.o extable.o fault.o \
1685 + init.o pgtable.o tlbex.o tlbex-fault.o
1687 obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o
1688 obj-$(CONFIG_64BIT) += pgtable-64.o
1690 obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
1691 obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o
1694 -# Choose one DMA coherency model
1696 -ifndef CONFIG_OWN_DMA
1697 -obj-$(CONFIG_DMA_COHERENT) += dma-coherent.o
1698 -obj-$(CONFIG_DMA_NONCOHERENT) += dma-noncoherent.o
1700 -obj-$(CONFIG_DMA_IP27) += dma-ip27.o
1701 -obj-$(CONFIG_DMA_IP32) += dma-ip32.o
1703 EXTRA_AFLAGS := $(CFLAGS)
1704 diff -urN linux.old/arch/mips/pci/Makefile linux.dev/arch/mips/pci/Makefile
1705 --- linux.old/arch/mips/pci/Makefile 2007-01-10 20:10:37.000000000 +0100
1706 +++ linux.dev/arch/mips/pci/Makefile 2007-02-09 20:26:50.961537712 +0100
1708 # Makefile for the PCI specific kernel interface routines under Linux.
1712 +obj-y += pci.o pci-dac.o
1715 # PCI bus host bridge specific code
1716 diff -urN linux.old/arch/mips/pci/pci-dac.c linux.dev/arch/mips/pci/pci-dac.c
1717 --- linux.old/arch/mips/pci/pci-dac.c 1970-01-01 01:00:00.000000000 +0100
1718 +++ linux.dev/arch/mips/pci/pci-dac.c 2007-02-09 20:26:50.961537712 +0100
1721 + * This file is subject to the terms and conditions of the GNU General Public
1722 + * License. See the file "COPYING" in the main directory of this archive
1723 + * for more details.
1725 + * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
1726 + * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
1727 + * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
1730 +#include <linux/types.h>
1731 +#include <linux/dma-mapping.h>
1732 +#include <linux/mm.h>
1733 +#include <linux/module.h>
1734 +#include <linux/string.h>
1736 +#include <asm/cache.h>
1737 +#include <asm/io.h>
1739 +#include <dma-coherence.h>
1741 +#include <linux/pci.h>
1743 +dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev,
1744 + struct page *page, unsigned long offset, int direction)
1746 + struct device *dev = &pdev->dev;
1748 + BUG_ON(direction == DMA_NONE);
1750 + if (!plat_device_is_coherent(dev)) {
1751 + unsigned long addr;
1753 + addr = (unsigned long) page_address(page) + offset;
1754 + dma_cache_wback_inv(addr, PAGE_SIZE);
1757 + return plat_map_dma_mem_page(dev, page) + offset;
1760 +EXPORT_SYMBOL(pci_dac_page_to_dma);
1762 +struct page *pci_dac_dma_to_page(struct pci_dev *pdev,
1763 + dma64_addr_t dma_addr)
1765 + return pfn_to_page(plat_dma_addr_to_phys(dma_addr) >> PAGE_SHIFT);
1768 +EXPORT_SYMBOL(pci_dac_dma_to_page);
1770 +unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev,
1771 + dma64_addr_t dma_addr)
1773 + return dma_addr & ~PAGE_MASK;
1776 +EXPORT_SYMBOL(pci_dac_dma_to_offset);
1778 +void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev,
1779 + dma64_addr_t dma_addr, size_t len, int direction)
1781 + BUG_ON(direction == PCI_DMA_NONE);
1783 + if (!plat_device_is_coherent(&pdev->dev))
1784 + dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len);
1787 +EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu);
1789 +void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev,
1790 + dma64_addr_t dma_addr, size_t len, int direction)
1792 + BUG_ON(direction == PCI_DMA_NONE);
1794 + if (!plat_device_is_coherent(&pdev->dev))
1795 + dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len);
1798 +EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device);
1799 diff -urN linux.old/include/asm-mips/mach-generic/dma-coherence.h linux.dev/include/asm-mips/mach-generic/dma-coherence.h
1800 --- linux.old/include/asm-mips/mach-generic/dma-coherence.h 1970-01-01 01:00:00.000000000 +0100
1801 +++ linux.dev/include/asm-mips/mach-generic/dma-coherence.h 2007-02-09 20:26:50.962537560 +0100
1804 + * This file is subject to the terms and conditions of the GNU General Public
1805 + * License. See the file "COPYING" in the main directory of this archive
1806 + * for more details.
1808 + * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
1811 +#ifndef __ASM_MACH_GENERIC_DMA_COHERENCE_H
1812 +#define __ASM_MACH_GENERIC_DMA_COHERENCE_H
1816 +static dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size)
1818 + return virt_to_phys(addr);
1821 +static dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page)
1823 + return page_to_phys(page);
1826 +static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
1831 +static void plat_unmap_dma_mem(dma_addr_t dma_addr)
1835 +static inline int plat_device_is_coherent(struct device *dev)
1837 +#ifdef CONFIG_DMA_COHERENT
1840 +#ifdef CONFIG_DMA_NONCOHERENT
1845 +#endif /* __ASM_MACH_GENERIC_DMA_COHERENCE_H */
1846 diff -urN linux.old/include/asm-mips/mach-generic/kmalloc.h linux.dev/include/asm-mips/mach-generic/kmalloc.h
1847 --- linux.old/include/asm-mips/mach-generic/kmalloc.h 2007-01-10 20:10:37.000000000 +0100
1848 +++ linux.dev/include/asm-mips/mach-generic/kmalloc.h 2007-02-09 20:26:50.962537560 +0100
1850 #ifndef CONFIG_DMA_COHERENT
1852 * Total overkill for most systems but need as a safe default.
1853 + * Set this one if any device in the system might do non-coherent DMA.
1855 #define ARCH_KMALLOC_MINALIGN 128
1857 diff -urN linux.old/include/asm-mips/mach-ip27/dma-coherence.h linux.dev/include/asm-mips/mach-ip27/dma-coherence.h
1858 --- linux.old/include/asm-mips/mach-ip27/dma-coherence.h 1970-01-01 01:00:00.000000000 +0100
1859 +++ linux.dev/include/asm-mips/mach-ip27/dma-coherence.h 2007-02-09 20:26:50.962537560 +0100
1862 + * This file is subject to the terms and conditions of the GNU General Public
1863 + * License. See the file "COPYING" in the main directory of this archive
1864 + * for more details.
1866 + * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
1869 +#ifndef __ASM_MACH_IP27_DMA_COHERENCE_H
1870 +#define __ASM_MACH_IP27_DMA_COHERENCE_H
1872 +#include <asm/pci/bridge.h>
1874 +#define pdev_to_baddr(pdev, addr) \
1875 + (BRIDGE_CONTROLLER(pdev->bus)->baddr + (addr))
1876 +#define dev_to_baddr(dev, addr) \
1877 + pdev_to_baddr(to_pci_dev(dev), (addr))
1881 +static dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size)
1883 + dma_addr_t pa = dev_to_baddr(dev, virt_to_phys(addr));
1888 +static dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page)
1890 + dma_addr_t pa = dev_to_baddr(dev, page_to_phys(page));
1895 +static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
1897 + return dma_addr & (0xffUL << 56);
1900 +static void plat_unmap_dma_mem(dma_addr_t dma_addr)
1904 +static inline int plat_device_is_coherent(struct device *dev)
1906 + return 1; /* IP27 non-cohernet mode is unsupported */
1909 +#endif /* __ASM_MACH_IP27_DMA_COHERENCE_H */
1910 diff -urN linux.old/include/asm-mips/mach-ip32/dma-coherence.h linux.dev/include/asm-mips/mach-ip32/dma-coherence.h
1911 --- linux.old/include/asm-mips/mach-ip32/dma-coherence.h 1970-01-01 01:00:00.000000000 +0100
1912 +++ linux.dev/include/asm-mips/mach-ip32/dma-coherence.h 2007-02-09 20:26:50.963537408 +0100
1915 + * This file is subject to the terms and conditions of the GNU General Public
1916 + * License. See the file "COPYING" in the main directory of this archive
1917 + * for more details.
1919 + * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
1922 +#ifndef __ASM_MACH_IP35_DMA_COHERENCE_H
1923 +#define __ASM_MACH_IP35_DMA_COHERENCE_H
1925 +#include <asm/ip32/crime.h>
1931 + * 1. CPU sees memory as two chunks: 0-256M@0x0, and the rest @0x40000000+256M
1932 + * 2. PCI sees memory as one big chunk @0x0 (or we could use 0x40000000 for
1934 + * 3. All other devices see memory as one big chunk at 0x40000000
1935 + * 4. Non-PCI devices will pass NULL as struct device*
1937 + * Thus we translate differently, depending on device.
1940 +#define RAM_OFFSET_MASK 0x3fffffffUL
1942 +static dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size)
1944 + dma_addr_t pa = virt_to_phys(addr) & RAM_OFFSET_MASK;
1947 + pa += CRIME_HI_MEM_BASE;
1952 +static dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page)
1956 + pa = page_to_phys(page) & RAM_OFFSET_MASK;
1959 + pa += CRIME_HI_MEM_BASE;
1964 +/* This is almost certainly wrong but it's what dma-ip32.c used to use */
1965 +static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
1967 + unsigned long addr = dma_addr & RAM_OFFSET_MASK;
1969 + if (dma_addr >= 256*1024*1024)
1970 + addr += CRIME_HI_MEM_BASE;
1975 +static void plat_unmap_dma_mem(dma_addr_t dma_addr)
1979 +static inline int plat_device_is_coherent(struct device *dev)
1981 + return 0; /* IP32 is non-cohernet */
1984 +#endif /* __ASM_MACH_IP35_DMA_COHERENCE_H */
1985 diff -urN linux.old/include/asm-mips/mach-jazz/dma-coherence.h linux.dev/include/asm-mips/mach-jazz/dma-coherence.h
1986 --- linux.old/include/asm-mips/mach-jazz/dma-coherence.h 1970-01-01 01:00:00.000000000 +0100
1987 +++ linux.dev/include/asm-mips/mach-jazz/dma-coherence.h 2007-02-09 20:26:50.963537408 +0100
1990 + * This file is subject to the terms and conditions of the GNU General Public
1991 + * License. See the file "COPYING" in the main directory of this archive
1992 + * for more details.
1994 + * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
1996 +#ifndef __ASM_MACH_JAZZ_DMA_COHERENCE_H
1997 +#define __ASM_MACH_JAZZ_DMA_COHERENCE_H
1999 +#include <asm/jazzdma.h>
2003 +static dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size)
2005 + return vdma_alloc(virt_to_phys(addr), size);
2008 +static dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page)
2010 + return vdma_alloc(page_to_phys(page), PAGE_SIZE);
2013 +static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
2015 + return vdma_log2phys(dma_addr);
2018 +static void plat_unmap_dma_mem(dma_addr_t dma_addr)
2020 + vdma_free(dma_addr);
2023 +static inline int plat_device_is_coherent(struct device *dev)
2028 +#endif /* __ASM_MACH_JAZZ_DMA_COHERENCE_H */