meshcube: override the boot loader's (wrong) kernel command line
[openwrt.git] / target / linux / atheros-2.6 / patches / 150-mips_cache_cleanup.patch
1 Platforms will now have to supply a function dma_device_is_coherent which
2 returns if a particular device participates in the coherence domain. For
3 most platforms this function will always return 0 or 1.
4
5 Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
6 Signed-off-by: Felix Fietkau <nbd@openwrt.org>
7
8 diff -urN linux.old/arch/mips/Kconfig linux.dev/arch/mips/Kconfig
9 --- linux.old/arch/mips/Kconfig 2007-01-10 20:10:37.000000000 +0100
10 +++ linux.dev/arch/mips/Kconfig 2007-02-09 20:26:45.367388152 +0100
11 @@ -571,8 +571,6 @@
12 select ARC
13 select ARC32
14 select BOOT_ELF32
15 - select OWN_DMA
16 - select DMA_IP32
17 select DMA_NONCOHERENT
18 select HW_HAS_PCI
19 select R5000_CPU_SCACHE
20 @@ -835,9 +833,6 @@
21 config DMA_NEED_PCI_MAP_STATE
22 bool
23
24 -config OWN_DMA
25 - bool
26 -
27 config EARLY_PRINTK
28 bool
29
30 diff -urN linux.old/arch/mips/mm/dma-coherent.c linux.dev/arch/mips/mm/dma-coherent.c
31 --- linux.old/arch/mips/mm/dma-coherent.c 2007-01-10 20:10:37.000000000 +0100
32 +++ linux.dev/arch/mips/mm/dma-coherent.c 1970-01-01 01:00:00.000000000 +0100
33 @@ -1,254 +0,0 @@
34 -/*
35 - * This file is subject to the terms and conditions of the GNU General Public
36 - * License. See the file "COPYING" in the main directory of this archive
37 - * for more details.
38 - *
39 - * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
40 - * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org>
41 - * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
42 - */
43 -#include <linux/types.h>
44 -#include <linux/dma-mapping.h>
45 -#include <linux/mm.h>
46 -#include <linux/module.h>
47 -#include <linux/string.h>
48 -
49 -#include <asm/cache.h>
50 -#include <asm/io.h>
51 -
52 -void *dma_alloc_noncoherent(struct device *dev, size_t size,
53 - dma_addr_t * dma_handle, gfp_t gfp)
54 -{
55 - void *ret;
56 - /* ignore region specifiers */
57 - gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
58 -
59 - if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
60 - gfp |= GFP_DMA;
61 - ret = (void *) __get_free_pages(gfp, get_order(size));
62 -
63 - if (ret != NULL) {
64 - memset(ret, 0, size);
65 - *dma_handle = virt_to_phys(ret);
66 - }
67 -
68 - return ret;
69 -}
70 -
71 -EXPORT_SYMBOL(dma_alloc_noncoherent);
72 -
73 -void *dma_alloc_coherent(struct device *dev, size_t size,
74 - dma_addr_t * dma_handle, gfp_t gfp)
75 - __attribute__((alias("dma_alloc_noncoherent")));
76 -
77 -EXPORT_SYMBOL(dma_alloc_coherent);
78 -
79 -void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
80 - dma_addr_t dma_handle)
81 -{
82 - unsigned long addr = (unsigned long) vaddr;
83 -
84 - free_pages(addr, get_order(size));
85 -}
86 -
87 -EXPORT_SYMBOL(dma_free_noncoherent);
88 -
89 -void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
90 - dma_addr_t dma_handle) __attribute__((alias("dma_free_noncoherent")));
91 -
92 -EXPORT_SYMBOL(dma_free_coherent);
93 -
94 -dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
95 - enum dma_data_direction direction)
96 -{
97 - BUG_ON(direction == DMA_NONE);
98 -
99 - return __pa(ptr);
100 -}
101 -
102 -EXPORT_SYMBOL(dma_map_single);
103 -
104 -void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
105 - enum dma_data_direction direction)
106 -{
107 - BUG_ON(direction == DMA_NONE);
108 -}
109 -
110 -EXPORT_SYMBOL(dma_unmap_single);
111 -
112 -int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
113 - enum dma_data_direction direction)
114 -{
115 - int i;
116 -
117 - BUG_ON(direction == DMA_NONE);
118 -
119 - for (i = 0; i < nents; i++, sg++) {
120 - sg->dma_address = (dma_addr_t)page_to_phys(sg->page) + sg->offset;
121 - }
122 -
123 - return nents;
124 -}
125 -
126 -EXPORT_SYMBOL(dma_map_sg);
127 -
128 -dma_addr_t dma_map_page(struct device *dev, struct page *page,
129 - unsigned long offset, size_t size, enum dma_data_direction direction)
130 -{
131 - BUG_ON(direction == DMA_NONE);
132 -
133 - return page_to_phys(page) + offset;
134 -}
135 -
136 -EXPORT_SYMBOL(dma_map_page);
137 -
138 -void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
139 - enum dma_data_direction direction)
140 -{
141 - BUG_ON(direction == DMA_NONE);
142 -}
143 -
144 -EXPORT_SYMBOL(dma_unmap_page);
145 -
146 -void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
147 - enum dma_data_direction direction)
148 -{
149 - BUG_ON(direction == DMA_NONE);
150 -}
151 -
152 -EXPORT_SYMBOL(dma_unmap_sg);
153 -
154 -void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
155 - size_t size, enum dma_data_direction direction)
156 -{
157 - BUG_ON(direction == DMA_NONE);
158 -}
159 -
160 -EXPORT_SYMBOL(dma_sync_single_for_cpu);
161 -
162 -void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
163 - size_t size, enum dma_data_direction direction)
164 -{
165 - BUG_ON(direction == DMA_NONE);
166 -}
167 -
168 -EXPORT_SYMBOL(dma_sync_single_for_device);
169 -
170 -void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
171 - unsigned long offset, size_t size,
172 - enum dma_data_direction direction)
173 -{
174 - BUG_ON(direction == DMA_NONE);
175 -}
176 -
177 -EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
178 -
179 -void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
180 - unsigned long offset, size_t size,
181 - enum dma_data_direction direction)
182 -{
183 - BUG_ON(direction == DMA_NONE);
184 -}
185 -
186 -EXPORT_SYMBOL(dma_sync_single_range_for_device);
187 -
188 -void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
189 - enum dma_data_direction direction)
190 -{
191 - BUG_ON(direction == DMA_NONE);
192 -}
193 -
194 -EXPORT_SYMBOL(dma_sync_sg_for_cpu);
195 -
196 -void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
197 - enum dma_data_direction direction)
198 -{
199 - BUG_ON(direction == DMA_NONE);
200 -}
201 -
202 -EXPORT_SYMBOL(dma_sync_sg_for_device);
203 -
204 -int dma_mapping_error(dma_addr_t dma_addr)
205 -{
206 - return 0;
207 -}
208 -
209 -EXPORT_SYMBOL(dma_mapping_error);
210 -
211 -int dma_supported(struct device *dev, u64 mask)
212 -{
213 - /*
214 - * we fall back to GFP_DMA when the mask isn't all 1s,
215 - * so we can't guarantee allocations that must be
216 - * within a tighter range than GFP_DMA..
217 - */
218 - if (mask < 0x00ffffff)
219 - return 0;
220 -
221 - return 1;
222 -}
223 -
224 -EXPORT_SYMBOL(dma_supported);
225 -
226 -int dma_is_consistent(dma_addr_t dma_addr)
227 -{
228 - return 1;
229 -}
230 -
231 -EXPORT_SYMBOL(dma_is_consistent);
232 -
233 -void dma_cache_sync(void *vaddr, size_t size,
234 - enum dma_data_direction direction)
235 -{
236 - BUG_ON(direction == DMA_NONE);
237 -}
238 -
239 -EXPORT_SYMBOL(dma_cache_sync);
240 -
241 -/* The DAC routines are a PCIism.. */
242 -
243 -#ifdef CONFIG_PCI
244 -
245 -#include <linux/pci.h>
246 -
247 -dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev,
248 - struct page *page, unsigned long offset, int direction)
249 -{
250 - return (dma64_addr_t)page_to_phys(page) + offset;
251 -}
252 -
253 -EXPORT_SYMBOL(pci_dac_page_to_dma);
254 -
255 -struct page *pci_dac_dma_to_page(struct pci_dev *pdev,
256 - dma64_addr_t dma_addr)
257 -{
258 - return mem_map + (dma_addr >> PAGE_SHIFT);
259 -}
260 -
261 -EXPORT_SYMBOL(pci_dac_dma_to_page);
262 -
263 -unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev,
264 - dma64_addr_t dma_addr)
265 -{
266 - return dma_addr & ~PAGE_MASK;
267 -}
268 -
269 -EXPORT_SYMBOL(pci_dac_dma_to_offset);
270 -
271 -void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev,
272 - dma64_addr_t dma_addr, size_t len, int direction)
273 -{
274 - BUG_ON(direction == PCI_DMA_NONE);
275 -}
276 -
277 -EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu);
278 -
279 -void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev,
280 - dma64_addr_t dma_addr, size_t len, int direction)
281 -{
282 - BUG_ON(direction == PCI_DMA_NONE);
283 -}
284 -
285 -EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device);
286 -
287 -#endif /* CONFIG_PCI */
288 diff -urN linux.old/arch/mips/mm/dma-default.c linux.dev/arch/mips/mm/dma-default.c
289 --- linux.old/arch/mips/mm/dma-default.c 1970-01-01 01:00:00.000000000 +0100
290 +++ linux.dev/arch/mips/mm/dma-default.c 2007-02-09 20:26:48.671885792 +0100
291 @@ -0,0 +1,363 @@
292 +/*
293 + * This file is subject to the terms and conditions of the GNU General Public
294 + * License. See the file "COPYING" in the main directory of this archive
295 + * for more details.
296 + *
297 + * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
298 + * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
299 + * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
300 + */
301 +
302 +#include <linux/types.h>
303 +#include <linux/dma-mapping.h>
304 +#include <linux/mm.h>
305 +#include <linux/module.h>
306 +#include <linux/string.h>
307 +
308 +#include <asm/cache.h>
309 +#include <asm/io.h>
310 +
311 +#include <dma-coherence.h>
312 +
313 +/*
314 + * Warning on the terminology - Linux calls an uncached area coherent;
315 + * MIPS terminology calls memory areas with hardware maintained coherency
316 + * coherent.
317 + */
318 +
319 +static inline int cpu_is_noncoherent_r10000(struct device *dev)
320 +{
321 + return !plat_device_is_coherent(dev) &&
322 + (current_cpu_data.cputype == CPU_R10000 &&
323 + current_cpu_data.cputype == CPU_R12000);
324 +}
325 +
326 +void *dma_alloc_noncoherent(struct device *dev, size_t size,
327 + dma_addr_t * dma_handle, gfp_t gfp)
328 +{
329 + void *ret;
330 +
331 + /* ignore region specifiers */
332 + gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
333 +
334 + if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
335 + gfp |= GFP_DMA;
336 + ret = (void *) __get_free_pages(gfp, get_order(size));
337 +
338 + if (ret != NULL) {
339 + memset(ret, 0, size);
340 + *dma_handle = plat_map_dma_mem(dev, ret, size);
341 + }
342 +
343 + return ret;
344 +}
345 +
346 +EXPORT_SYMBOL(dma_alloc_noncoherent);
347 +
348 +void *dma_alloc_coherent(struct device *dev, size_t size,
349 + dma_addr_t * dma_handle, gfp_t gfp)
350 +{
351 + void *ret;
352 +
353 + /* ignore region specifiers */
354 + gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
355 +
356 + if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
357 + gfp |= GFP_DMA;
358 + ret = (void *) __get_free_pages(gfp, get_order(size));
359 +
360 + if (ret) {
361 + memset(ret, 0, size);
362 + *dma_handle = plat_map_dma_mem(dev, ret, size);
363 +
364 + if (!plat_device_is_coherent(dev)) {
365 + dma_cache_wback_inv((unsigned long) ret, size);
366 + ret = UNCAC_ADDR(ret);
367 + }
368 + }
369 +
370 + return ret;
371 +}
372 +
373 +EXPORT_SYMBOL(dma_alloc_coherent);
374 +
375 +void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
376 + dma_addr_t dma_handle)
377 +{
378 + free_pages((unsigned long) vaddr, get_order(size));
379 +}
380 +
381 +EXPORT_SYMBOL(dma_free_noncoherent);
382 +
383 +void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
384 + dma_addr_t dma_handle)
385 +{
386 + unsigned long addr = (unsigned long) vaddr;
387 +
388 + if (!plat_device_is_coherent(dev))
389 + addr = CAC_ADDR(addr);
390 +
391 + free_pages(addr, get_order(size));
392 +}
393 +
394 +EXPORT_SYMBOL(dma_free_coherent);
395 +
396 +static inline void __dma_sync(unsigned long addr, size_t size,
397 + enum dma_data_direction direction)
398 +{
399 + switch (direction) {
400 + case DMA_TO_DEVICE:
401 + dma_cache_wback(addr, size);
402 + break;
403 +
404 + case DMA_FROM_DEVICE:
405 + dma_cache_inv(addr, size);
406 + break;
407 +
408 + case DMA_BIDIRECTIONAL:
409 + dma_cache_wback_inv(addr, size);
410 + break;
411 +
412 + default:
413 + BUG();
414 + }
415 +}
416 +
417 +dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
418 + enum dma_data_direction direction)
419 +{
420 + unsigned long addr = (unsigned long) ptr;
421 +
422 + if (!plat_device_is_coherent(dev))
423 + __dma_sync(addr, size, direction);
424 +
425 + return plat_map_dma_mem(dev, ptr, size);
426 +}
427 +
428 +EXPORT_SYMBOL(dma_map_single);
429 +
430 +void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
431 + enum dma_data_direction direction)
432 +{
433 + if (cpu_is_noncoherent_r10000(dev))
434 + __dma_sync(plat_dma_addr_to_phys(dma_addr) + PAGE_OFFSET, size,
435 + direction);
436 +
437 + plat_unmap_dma_mem(dma_addr);
438 +}
439 +
440 +EXPORT_SYMBOL(dma_unmap_single);
441 +
442 +int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
443 + enum dma_data_direction direction)
444 +{
445 + int i;
446 +
447 + BUG_ON(direction == DMA_NONE);
448 +
449 + for (i = 0; i < nents; i++, sg++) {
450 + unsigned long addr;
451 +
452 + addr = (unsigned long) page_address(sg->page);
453 + if (!plat_device_is_coherent(dev) && addr)
454 + __dma_sync(addr + sg->offset, sg->length, direction);
455 + sg->dma_address = plat_map_dma_mem_page(dev, sg->page) +
456 + sg->offset;
457 + }
458 +
459 + return nents;
460 +}
461 +
462 +EXPORT_SYMBOL(dma_map_sg);
463 +
464 +dma_addr_t dma_map_page(struct device *dev, struct page *page,
465 + unsigned long offset, size_t size, enum dma_data_direction direction)
466 +{
467 + BUG_ON(direction == DMA_NONE);
468 +
469 + if (!plat_device_is_coherent(dev)) {
470 + unsigned long addr;
471 +
472 + addr = (unsigned long) page_address(page) + offset;
473 + dma_cache_wback_inv(addr, size);
474 + }
475 +
476 + return plat_map_dma_mem_page(dev, page) + offset;
477 +}
478 +
479 +EXPORT_SYMBOL(dma_map_page);
480 +
481 +void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
482 + enum dma_data_direction direction)
483 +{
484 + BUG_ON(direction == DMA_NONE);
485 +
486 + if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) {
487 + unsigned long addr;
488 +
489 + addr = plat_dma_addr_to_phys(dma_address);
490 + dma_cache_wback_inv(addr, size);
491 + }
492 +
493 + plat_unmap_dma_mem(dma_address);
494 +}
495 +
496 +EXPORT_SYMBOL(dma_unmap_page);
497 +
498 +void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
499 + enum dma_data_direction direction)
500 +{
501 + unsigned long addr;
502 + int i;
503 +
504 + BUG_ON(direction == DMA_NONE);
505 +
506 + for (i = 0; i < nhwentries; i++, sg++) {
507 + if (!plat_device_is_coherent(dev) &&
508 + direction != DMA_TO_DEVICE) {
509 + addr = (unsigned long) page_address(sg->page);
510 + if (addr)
511 + __dma_sync(addr + sg->offset, sg->length,
512 + direction);
513 + }
514 + plat_unmap_dma_mem(sg->dma_address);
515 + }
516 +}
517 +
518 +EXPORT_SYMBOL(dma_unmap_sg);
519 +
520 +void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
521 + size_t size, enum dma_data_direction direction)
522 +{
523 + BUG_ON(direction == DMA_NONE);
524 +
525 + if (cpu_is_noncoherent_r10000(dev)) {
526 + unsigned long addr;
527 +
528 + addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle);
529 + __dma_sync(addr, size, direction);
530 + }
531 +}
532 +
533 +EXPORT_SYMBOL(dma_sync_single_for_cpu);
534 +
535 +void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
536 + size_t size, enum dma_data_direction direction)
537 +{
538 + BUG_ON(direction == DMA_NONE);
539 +
540 + if (cpu_is_noncoherent_r10000(dev)) {
541 + unsigned long addr;
542 +
543 + addr = plat_dma_addr_to_phys(dma_handle);
544 + __dma_sync(addr, size, direction);
545 + }
546 +}
547 +
548 +EXPORT_SYMBOL(dma_sync_single_for_device);
549 +
550 +void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
551 + unsigned long offset, size_t size, enum dma_data_direction direction)
552 +{
553 + BUG_ON(direction == DMA_NONE);
554 +
555 + if (cpu_is_noncoherent_r10000(dev)) {
556 + unsigned long addr;
557 +
558 + addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle);
559 + __dma_sync(addr + offset, size, direction);
560 + }
561 +}
562 +
563 +EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
564 +
565 +void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
566 + unsigned long offset, size_t size, enum dma_data_direction direction)
567 +{
568 + BUG_ON(direction == DMA_NONE);
569 +
570 + if (cpu_is_noncoherent_r10000(dev)) {
571 + unsigned long addr;
572 +
573 + addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle);
574 + __dma_sync(addr + offset, size, direction);
575 + }
576 +}
577 +
578 +EXPORT_SYMBOL(dma_sync_single_range_for_device);
579 +
580 +void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
581 + enum dma_data_direction direction)
582 +{
583 + int i;
584 +
585 + BUG_ON(direction == DMA_NONE);
586 +
587 + /* Make sure that gcc doesn't leave the empty loop body. */
588 + for (i = 0; i < nelems; i++, sg++) {
589 + if (!plat_device_is_coherent(dev))
590 + __dma_sync((unsigned long)page_address(sg->page),
591 + sg->length, direction);
592 + plat_unmap_dma_mem(sg->dma_address);
593 + }
594 +}
595 +
596 +EXPORT_SYMBOL(dma_sync_sg_for_cpu);
597 +
598 +void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
599 + enum dma_data_direction direction)
600 +{
601 + int i;
602 +
603 + BUG_ON(direction == DMA_NONE);
604 +
605 + /* Make sure that gcc doesn't leave the empty loop body. */
606 + for (i = 0; i < nelems; i++, sg++) {
607 + if (!plat_device_is_coherent(dev))
608 + __dma_sync((unsigned long)page_address(sg->page),
609 + sg->length, direction);
610 + plat_unmap_dma_mem(sg->dma_address);
611 + }
612 +}
613 +
614 +EXPORT_SYMBOL(dma_sync_sg_for_device);
615 +
616 +int dma_mapping_error(dma_addr_t dma_addr)
617 +{
618 + return 0;
619 +}
620 +
621 +EXPORT_SYMBOL(dma_mapping_error);
622 +
623 +int dma_supported(struct device *dev, u64 mask)
624 +{
625 + /*
626 + * we fall back to GFP_DMA when the mask isn't all 1s,
627 + * so we can't guarantee allocations that must be
628 + * within a tighter range than GFP_DMA..
629 + */
630 + if (mask < 0x00ffffff)
631 + return 0;
632 +
633 + return 1;
634 +}
635 +
636 +EXPORT_SYMBOL(dma_supported);
637 +
638 +int dma_is_consistent(dma_addr_t dma_addr)
639 +{
640 + return plat_device_is_coherent(NULL);
641 +}
642 +
643 +EXPORT_SYMBOL(dma_is_consistent);
644 +
645 +void dma_cache_sync(void *vaddr, size_t size,
646 + enum dma_data_direction direction)
647 +{
648 + BUG_ON(direction == DMA_NONE);
649 +
650 + if (!plat_device_is_coherent(NULL))
651 + dma_cache_wback_inv((unsigned long)vaddr, size);
652 +}
653 +
654 +EXPORT_SYMBOL(dma_cache_sync);
655 diff -urN linux.old/arch/mips/mm/dma-ip27.c linux.dev/arch/mips/mm/dma-ip27.c
656 --- linux.old/arch/mips/mm/dma-ip27.c 2007-01-10 20:10:37.000000000 +0100
657 +++ linux.dev/arch/mips/mm/dma-ip27.c 1970-01-01 01:00:00.000000000 +0100
658 @@ -1,257 +0,0 @@
659 -/*
660 - * This file is subject to the terms and conditions of the GNU General Public
661 - * License. See the file "COPYING" in the main directory of this archive
662 - * for more details.
663 - *
664 - * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
665 - * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org>
666 - * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
667 - */
668 -#include <linux/types.h>
669 -#include <linux/mm.h>
670 -#include <linux/module.h>
671 -#include <linux/string.h>
672 -#include <linux/pci.h>
673 -
674 -#include <asm/cache.h>
675 -#include <asm/pci/bridge.h>
676 -
677 -#define pdev_to_baddr(pdev, addr) \
678 - (BRIDGE_CONTROLLER(pdev->bus)->baddr + (addr))
679 -#define dev_to_baddr(dev, addr) \
680 - pdev_to_baddr(to_pci_dev(dev), (addr))
681 -
682 -void *dma_alloc_noncoherent(struct device *dev, size_t size,
683 - dma_addr_t * dma_handle, gfp_t gfp)
684 -{
685 - void *ret;
686 -
687 - /* ignore region specifiers */
688 - gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
689 -
690 - if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
691 - gfp |= GFP_DMA;
692 - ret = (void *) __get_free_pages(gfp, get_order(size));
693 -
694 - if (ret != NULL) {
695 - memset(ret, 0, size);
696 - *dma_handle = dev_to_baddr(dev, virt_to_phys(ret));
697 - }
698 -
699 - return ret;
700 -}
701 -
702 -EXPORT_SYMBOL(dma_alloc_noncoherent);
703 -
704 -void *dma_alloc_coherent(struct device *dev, size_t size,
705 - dma_addr_t * dma_handle, gfp_t gfp)
706 - __attribute__((alias("dma_alloc_noncoherent")));
707 -
708 -EXPORT_SYMBOL(dma_alloc_coherent);
709 -
710 -void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
711 - dma_addr_t dma_handle)
712 -{
713 - unsigned long addr = (unsigned long) vaddr;
714 -
715 - free_pages(addr, get_order(size));
716 -}
717 -
718 -EXPORT_SYMBOL(dma_free_noncoherent);
719 -
720 -void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
721 - dma_addr_t dma_handle) __attribute__((alias("dma_free_noncoherent")));
722 -
723 -EXPORT_SYMBOL(dma_free_coherent);
724 -
725 -dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
726 - enum dma_data_direction direction)
727 -{
728 - BUG_ON(direction == DMA_NONE);
729 -
730 - return dev_to_baddr(dev, __pa(ptr));
731 -}
732 -
733 -EXPORT_SYMBOL(dma_map_single);
734 -
735 -void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
736 - enum dma_data_direction direction)
737 -{
738 - BUG_ON(direction == DMA_NONE);
739 -}
740 -
741 -EXPORT_SYMBOL(dma_unmap_single);
742 -
743 -int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
744 - enum dma_data_direction direction)
745 -{
746 - int i;
747 -
748 - BUG_ON(direction == DMA_NONE);
749 -
750 - for (i = 0; i < nents; i++, sg++) {
751 - sg->dma_address = (dma_addr_t) dev_to_baddr(dev,
752 - page_to_phys(sg->page) + sg->offset);
753 - }
754 -
755 - return nents;
756 -}
757 -
758 -EXPORT_SYMBOL(dma_map_sg);
759 -
760 -dma_addr_t dma_map_page(struct device *dev, struct page *page,
761 - unsigned long offset, size_t size, enum dma_data_direction direction)
762 -{
763 - BUG_ON(direction == DMA_NONE);
764 -
765 - return dev_to_baddr(dev, page_to_phys(page) + offset);
766 -}
767 -
768 -EXPORT_SYMBOL(dma_map_page);
769 -
770 -void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
771 - enum dma_data_direction direction)
772 -{
773 - BUG_ON(direction == DMA_NONE);
774 -}
775 -
776 -EXPORT_SYMBOL(dma_unmap_page);
777 -
778 -void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
779 - enum dma_data_direction direction)
780 -{
781 - BUG_ON(direction == DMA_NONE);
782 -}
783 -
784 -EXPORT_SYMBOL(dma_unmap_sg);
785 -
786 -void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
787 - enum dma_data_direction direction)
788 -{
789 - BUG_ON(direction == DMA_NONE);
790 -}
791 -
792 -EXPORT_SYMBOL(dma_sync_single_for_cpu);
793 -
794 -void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
795 - enum dma_data_direction direction)
796 -{
797 - BUG_ON(direction == DMA_NONE);
798 -}
799 -
800 -EXPORT_SYMBOL(dma_sync_single_for_device);
801 -
802 -void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
803 - unsigned long offset, size_t size,
804 - enum dma_data_direction direction)
805 -{
806 - BUG_ON(direction == DMA_NONE);
807 -}
808 -
809 -EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
810 -
811 -void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
812 - unsigned long offset, size_t size,
813 - enum dma_data_direction direction)
814 -{
815 - BUG_ON(direction == DMA_NONE);
816 -}
817 -
818 -EXPORT_SYMBOL(dma_sync_single_range_for_device);
819 -
820 -void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
821 - enum dma_data_direction direction)
822 -{
823 - BUG_ON(direction == DMA_NONE);
824 -}
825 -
826 -EXPORT_SYMBOL(dma_sync_sg_for_cpu);
827 -
828 -void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
829 - enum dma_data_direction direction)
830 -{
831 - BUG_ON(direction == DMA_NONE);
832 -}
833 -
834 -EXPORT_SYMBOL(dma_sync_sg_for_device);
835 -
836 -int dma_mapping_error(dma_addr_t dma_addr)
837 -{
838 - return 0;
839 -}
840 -
841 -EXPORT_SYMBOL(dma_mapping_error);
842 -
843 -int dma_supported(struct device *dev, u64 mask)
844 -{
845 - /*
846 - * we fall back to GFP_DMA when the mask isn't all 1s,
847 - * so we can't guarantee allocations that must be
848 - * within a tighter range than GFP_DMA..
849 - */
850 - if (mask < 0x00ffffff)
851 - return 0;
852 -
853 - return 1;
854 -}
855 -
856 -EXPORT_SYMBOL(dma_supported);
857 -
858 -int dma_is_consistent(dma_addr_t dma_addr)
859 -{
860 - return 1;
861 -}
862 -
863 -EXPORT_SYMBOL(dma_is_consistent);
864 -
865 -void dma_cache_sync(void *vaddr, size_t size,
866 - enum dma_data_direction direction)
867 -{
868 - BUG_ON(direction == DMA_NONE);
869 -}
870 -
871 -EXPORT_SYMBOL(dma_cache_sync);
872 -
873 -dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev,
874 - struct page *page, unsigned long offset, int direction)
875 -{
876 - dma64_addr_t addr = page_to_phys(page) + offset;
877 -
878 - return (dma64_addr_t) pdev_to_baddr(pdev, addr);
879 -}
880 -
881 -EXPORT_SYMBOL(pci_dac_page_to_dma);
882 -
883 -struct page *pci_dac_dma_to_page(struct pci_dev *pdev,
884 - dma64_addr_t dma_addr)
885 -{
886 - struct bridge_controller *bc = BRIDGE_CONTROLLER(pdev->bus);
887 -
888 - return pfn_to_page((dma_addr - bc->baddr) >> PAGE_SHIFT);
889 -}
890 -
891 -EXPORT_SYMBOL(pci_dac_dma_to_page);
892 -
893 -unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev,
894 - dma64_addr_t dma_addr)
895 -{
896 - return dma_addr & ~PAGE_MASK;
897 -}
898 -
899 -EXPORT_SYMBOL(pci_dac_dma_to_offset);
900 -
901 -void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev,
902 - dma64_addr_t dma_addr, size_t len, int direction)
903 -{
904 - BUG_ON(direction == PCI_DMA_NONE);
905 -}
906 -
907 -EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu);
908 -
909 -void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev,
910 - dma64_addr_t dma_addr, size_t len, int direction)
911 -{
912 - BUG_ON(direction == PCI_DMA_NONE);
913 -}
914 -
915 -EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device);
916 diff -urN linux.old/arch/mips/mm/dma-ip32.c linux.dev/arch/mips/mm/dma-ip32.c
917 --- linux.old/arch/mips/mm/dma-ip32.c 2007-01-10 20:10:37.000000000 +0100
918 +++ linux.dev/arch/mips/mm/dma-ip32.c 1970-01-01 01:00:00.000000000 +0100
919 @@ -1,382 +0,0 @@
920 -/*
921 - * This file is subject to the terms and conditions of the GNU General Public
922 - * License. See the file "COPYING" in the main directory of this archive
923 - * for more details.
924 - *
925 - * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
926 - * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org>
927 - * Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com>
928 - * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
929 - * IP32 changes by Ilya.
930 - */
931 -#include <linux/types.h>
932 -#include <linux/mm.h>
933 -#include <linux/module.h>
934 -#include <linux/string.h>
935 -#include <linux/dma-mapping.h>
936 -
937 -#include <asm/cache.h>
938 -#include <asm/io.h>
939 -#include <asm/ip32/crime.h>
940 -
941 -/*
942 - * Warning on the terminology - Linux calls an uncached area coherent;
943 - * MIPS terminology calls memory areas with hardware maintained coherency
944 - * coherent.
945 - */
946 -
947 -/*
948 - * Few notes.
949 - * 1. CPU sees memory as two chunks: 0-256M@0x0, and the rest @0x40000000+256M
950 - * 2. PCI sees memory as one big chunk @0x0 (or we could use 0x40000000 for native-endian)
951 - * 3. All other devices see memory as one big chunk at 0x40000000
952 - * 4. Non-PCI devices will pass NULL as struct device*
953 - * Thus we translate differently, depending on device.
954 - */
955 -
956 -#define RAM_OFFSET_MASK 0x3fffffff
957 -
958 -void *dma_alloc_noncoherent(struct device *dev, size_t size,
959 - dma_addr_t * dma_handle, gfp_t gfp)
960 -{
961 - void *ret;
962 - /* ignore region specifiers */
963 - gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
964 -
965 - if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
966 - gfp |= GFP_DMA;
967 - ret = (void *) __get_free_pages(gfp, get_order(size));
968 -
969 - if (ret != NULL) {
970 - unsigned long addr = virt_to_phys(ret)&RAM_OFFSET_MASK;
971 - memset(ret, 0, size);
972 - if(dev==NULL)
973 - addr+= CRIME_HI_MEM_BASE;
974 - *dma_handle = addr;
975 - }
976 -
977 - return ret;
978 -}
979 -
980 -EXPORT_SYMBOL(dma_alloc_noncoherent);
981 -
982 -void *dma_alloc_coherent(struct device *dev, size_t size,
983 - dma_addr_t * dma_handle, gfp_t gfp)
984 -{
985 - void *ret;
986 -
987 - ret = dma_alloc_noncoherent(dev, size, dma_handle, gfp);
988 - if (ret) {
989 - dma_cache_wback_inv((unsigned long) ret, size);
990 - ret = UNCAC_ADDR(ret);
991 - }
992 -
993 - return ret;
994 -}
995 -
996 -EXPORT_SYMBOL(dma_alloc_coherent);
997 -
998 -void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
999 - dma_addr_t dma_handle)
1000 -{
1001 - free_pages((unsigned long) vaddr, get_order(size));
1002 -}
1003 -
1004 -EXPORT_SYMBOL(dma_free_noncoherent);
1005 -
1006 -void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
1007 - dma_addr_t dma_handle)
1008 -{
1009 - unsigned long addr = (unsigned long) vaddr;
1010 -
1011 - addr = CAC_ADDR(addr);
1012 - free_pages(addr, get_order(size));
1013 -}
1014 -
1015 -EXPORT_SYMBOL(dma_free_coherent);
1016 -
1017 -static inline void __dma_sync(unsigned long addr, size_t size,
1018 - enum dma_data_direction direction)
1019 -{
1020 - switch (direction) {
1021 - case DMA_TO_DEVICE:
1022 - dma_cache_wback(addr, size);
1023 - break;
1024 -
1025 - case DMA_FROM_DEVICE:
1026 - dma_cache_inv(addr, size);
1027 - break;
1028 -
1029 - case DMA_BIDIRECTIONAL:
1030 - dma_cache_wback_inv(addr, size);
1031 - break;
1032 -
1033 - default:
1034 - BUG();
1035 - }
1036 -}
1037 -
1038 -dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
1039 - enum dma_data_direction direction)
1040 -{
1041 - unsigned long addr = (unsigned long) ptr;
1042 -
1043 - switch (direction) {
1044 - case DMA_TO_DEVICE:
1045 - dma_cache_wback(addr, size);
1046 - break;
1047 -
1048 - case DMA_FROM_DEVICE:
1049 - dma_cache_inv(addr, size);
1050 - break;
1051 -
1052 - case DMA_BIDIRECTIONAL:
1053 - dma_cache_wback_inv(addr, size);
1054 - break;
1055 -
1056 - default:
1057 - BUG();
1058 - }
1059 -
1060 - addr = virt_to_phys(ptr)&RAM_OFFSET_MASK;
1061 - if(dev == NULL)
1062 - addr+=CRIME_HI_MEM_BASE;
1063 - return (dma_addr_t)addr;
1064 -}
1065 -
1066 -EXPORT_SYMBOL(dma_map_single);
1067 -
1068 -void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
1069 - enum dma_data_direction direction)
1070 -{
1071 - switch (direction) {
1072 - case DMA_TO_DEVICE:
1073 - break;
1074 -
1075 - case DMA_FROM_DEVICE:
1076 - break;
1077 -
1078 - case DMA_BIDIRECTIONAL:
1079 - break;
1080 -
1081 - default:
1082 - BUG();
1083 - }
1084 -}
1085 -
1086 -EXPORT_SYMBOL(dma_unmap_single);
1087 -
1088 -int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1089 - enum dma_data_direction direction)
1090 -{
1091 - int i;
1092 -
1093 - BUG_ON(direction == DMA_NONE);
1094 -
1095 - for (i = 0; i < nents; i++, sg++) {
1096 - unsigned long addr;
1097 -
1098 - addr = (unsigned long) page_address(sg->page)+sg->offset;
1099 - if (addr)
1100 - __dma_sync(addr, sg->length, direction);
1101 - addr = __pa(addr)&RAM_OFFSET_MASK;
1102 - if(dev == NULL)
1103 - addr += CRIME_HI_MEM_BASE;
1104 - sg->dma_address = (dma_addr_t)addr;
1105 - }
1106 -
1107 - return nents;
1108 -}
1109 -
1110 -EXPORT_SYMBOL(dma_map_sg);
1111 -
1112 -dma_addr_t dma_map_page(struct device *dev, struct page *page,
1113 - unsigned long offset, size_t size, enum dma_data_direction direction)
1114 -{
1115 - unsigned long addr;
1116 -
1117 - BUG_ON(direction == DMA_NONE);
1118 -
1119 - addr = (unsigned long) page_address(page) + offset;
1120 - dma_cache_wback_inv(addr, size);
1121 - addr = __pa(addr)&RAM_OFFSET_MASK;
1122 - if(dev == NULL)
1123 - addr += CRIME_HI_MEM_BASE;
1124 -
1125 - return (dma_addr_t)addr;
1126 -}
1127 -
1128 -EXPORT_SYMBOL(dma_map_page);
1129 -
1130 -void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
1131 - enum dma_data_direction direction)
1132 -{
1133 - BUG_ON(direction == DMA_NONE);
1134 -
1135 - if (direction != DMA_TO_DEVICE) {
1136 - unsigned long addr;
1137 -
1138 - dma_address&=RAM_OFFSET_MASK;
1139 - addr = dma_address + PAGE_OFFSET;
1140 - if(dma_address>=256*1024*1024)
1141 - addr+=CRIME_HI_MEM_BASE;
1142 - dma_cache_wback_inv(addr, size);
1143 - }
1144 -}
1145 -
1146 -EXPORT_SYMBOL(dma_unmap_page);
1147 -
1148 -void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
1149 - enum dma_data_direction direction)
1150 -{
1151 - unsigned long addr;
1152 - int i;
1153 -
1154 - BUG_ON(direction == DMA_NONE);
1155 -
1156 - if (direction == DMA_TO_DEVICE)
1157 - return;
1158 -
1159 - for (i = 0; i < nhwentries; i++, sg++) {
1160 - addr = (unsigned long) page_address(sg->page);
1161 - if (!addr)
1162 - continue;
1163 - dma_cache_wback_inv(addr + sg->offset, sg->length);
1164 - }
1165 -}
1166 -
1167 -EXPORT_SYMBOL(dma_unmap_sg);
1168 -
1169 -void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1170 - size_t size, enum dma_data_direction direction)
1171 -{
1172 - unsigned long addr;
1173 -
1174 - BUG_ON(direction == DMA_NONE);
1175 -
1176 - dma_handle&=RAM_OFFSET_MASK;
1177 - addr = dma_handle + PAGE_OFFSET;
1178 - if(dma_handle>=256*1024*1024)
1179 - addr+=CRIME_HI_MEM_BASE;
1180 - __dma_sync(addr, size, direction);
1181 -}
1182 -
1183 -EXPORT_SYMBOL(dma_sync_single_for_cpu);
1184 -
1185 -void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
1186 - size_t size, enum dma_data_direction direction)
1187 -{
1188 - unsigned long addr;
1189 -
1190 - BUG_ON(direction == DMA_NONE);
1191 -
1192 - dma_handle&=RAM_OFFSET_MASK;
1193 - addr = dma_handle + PAGE_OFFSET;
1194 - if(dma_handle>=256*1024*1024)
1195 - addr+=CRIME_HI_MEM_BASE;
1196 - __dma_sync(addr, size, direction);
1197 -}
1198 -
1199 -EXPORT_SYMBOL(dma_sync_single_for_device);
1200 -
1201 -void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
1202 - unsigned long offset, size_t size, enum dma_data_direction direction)
1203 -{
1204 - unsigned long addr;
1205 -
1206 - BUG_ON(direction == DMA_NONE);
1207 -
1208 - dma_handle&=RAM_OFFSET_MASK;
1209 - addr = dma_handle + offset + PAGE_OFFSET;
1210 - if(dma_handle>=256*1024*1024)
1211 - addr+=CRIME_HI_MEM_BASE;
1212 - __dma_sync(addr, size, direction);
1213 -}
1214 -
1215 -EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
1216 -
1217 -void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
1218 - unsigned long offset, size_t size, enum dma_data_direction direction)
1219 -{
1220 - unsigned long addr;
1221 -
1222 - BUG_ON(direction == DMA_NONE);
1223 -
1224 - dma_handle&=RAM_OFFSET_MASK;
1225 - addr = dma_handle + offset + PAGE_OFFSET;
1226 - if(dma_handle>=256*1024*1024)
1227 - addr+=CRIME_HI_MEM_BASE;
1228 - __dma_sync(addr, size, direction);
1229 -}
1230 -
1231 -EXPORT_SYMBOL(dma_sync_single_range_for_device);
1232 -
1233 -void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
1234 - enum dma_data_direction direction)
1235 -{
1236 - int i;
1237 -
1238 - BUG_ON(direction == DMA_NONE);
1239 -
1240 - /* Make sure that gcc doesn't leave the empty loop body. */
1241 - for (i = 0; i < nelems; i++, sg++)
1242 - __dma_sync((unsigned long)page_address(sg->page),
1243 - sg->length, direction);
1244 -}
1245 -
1246 -EXPORT_SYMBOL(dma_sync_sg_for_cpu);
1247 -
1248 -void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
1249 - enum dma_data_direction direction)
1250 -{
1251 - int i;
1252 -
1253 - BUG_ON(direction == DMA_NONE);
1254 -
1255 - /* Make sure that gcc doesn't leave the empty loop body. */
1256 - for (i = 0; i < nelems; i++, sg++)
1257 - __dma_sync((unsigned long)page_address(sg->page),
1258 - sg->length, direction);
1259 -}
1260 -
1261 -EXPORT_SYMBOL(dma_sync_sg_for_device);
1262 -
1263 -int dma_mapping_error(dma_addr_t dma_addr)
1264 -{
1265 - return 0;
1266 -}
1267 -
1268 -EXPORT_SYMBOL(dma_mapping_error);
1269 -
1270 -int dma_supported(struct device *dev, u64 mask)
1271 -{
1272 - /*
1273 - * we fall back to GFP_DMA when the mask isn't all 1s,
1274 - * so we can't guarantee allocations that must be
1275 - * within a tighter range than GFP_DMA..
1276 - */
1277 - if (mask < 0x00ffffff)
1278 - return 0;
1279 -
1280 - return 1;
1281 -}
1282 -
1283 -EXPORT_SYMBOL(dma_supported);
1284 -
1285 -int dma_is_consistent(dma_addr_t dma_addr)
1286 -{
1287 - return 1;
1288 -}
1289 -
1290 -EXPORT_SYMBOL(dma_is_consistent);
1291 -
1292 -void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction)
1293 -{
1294 - if (direction == DMA_NONE)
1295 - return;
1296 -
1297 - dma_cache_wback_inv((unsigned long)vaddr, size);
1298 -}
1299 -
1300 -EXPORT_SYMBOL(dma_cache_sync);
1301 -
1302 diff -urN linux.old/arch/mips/mm/dma-noncoherent.c linux.dev/arch/mips/mm/dma-noncoherent.c
1303 --- linux.old/arch/mips/mm/dma-noncoherent.c 2007-01-10 20:10:37.000000000 +0100
1304 +++ linux.dev/arch/mips/mm/dma-noncoherent.c 1970-01-01 01:00:00.000000000 +0100
1305 @@ -1,369 +0,0 @@
1306 -/*
1307 - * This file is subject to the terms and conditions of the GNU General Public
1308 - * License. See the file "COPYING" in the main directory of this archive
1309 - * for more details.
1310 - *
1311 - * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
1312 - * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org>
1313 - * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
1314 - */
1315 -#include <linux/types.h>
1316 -#include <linux/mm.h>
1317 -#include <linux/module.h>
1318 -#include <linux/string.h>
1319 -#include <linux/dma-mapping.h>
1320 -
1321 -#include <asm/cache.h>
1322 -#include <asm/io.h>
1323 -
1324 -/*
1325 - * Warning on the terminology - Linux calls an uncached area coherent;
1326 - * MIPS terminology calls memory areas with hardware maintained coherency
1327 - * coherent.
1328 - */
1329 -
1330 -void *dma_alloc_noncoherent(struct device *dev, size_t size,
1331 - dma_addr_t * dma_handle, gfp_t gfp)
1332 -{
1333 - void *ret;
1334 - /* ignore region specifiers */
1335 - gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
1336 -
1337 - if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
1338 - gfp |= GFP_DMA;
1339 - ret = (void *) __get_free_pages(gfp, get_order(size));
1340 -
1341 - if (ret != NULL) {
1342 - memset(ret, 0, size);
1343 - *dma_handle = virt_to_phys(ret);
1344 - }
1345 -
1346 - return ret;
1347 -}
1348 -
1349 -EXPORT_SYMBOL(dma_alloc_noncoherent);
1350 -
1351 -void *dma_alloc_coherent(struct device *dev, size_t size,
1352 - dma_addr_t * dma_handle, gfp_t gfp)
1353 -{
1354 - void *ret;
1355 -
1356 - ret = dma_alloc_noncoherent(dev, size, dma_handle, gfp);
1357 - if (ret) {
1358 - dma_cache_wback_inv((unsigned long) ret, size);
1359 - ret = UNCAC_ADDR(ret);
1360 - }
1361 -
1362 - return ret;
1363 -}
1364 -
1365 -EXPORT_SYMBOL(dma_alloc_coherent);
1366 -
1367 -void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
1368 - dma_addr_t dma_handle)
1369 -{
1370 - free_pages((unsigned long) vaddr, get_order(size));
1371 -}
1372 -
1373 -EXPORT_SYMBOL(dma_free_noncoherent);
1374 -
1375 -void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
1376 - dma_addr_t dma_handle)
1377 -{
1378 - unsigned long addr = (unsigned long) vaddr;
1379 -
1380 - addr = CAC_ADDR(addr);
1381 - free_pages(addr, get_order(size));
1382 -}
1383 -
1384 -EXPORT_SYMBOL(dma_free_coherent);
1385 -
1386 -static inline void __dma_sync(unsigned long addr, size_t size,
1387 - enum dma_data_direction direction)
1388 -{
1389 - switch (direction) {
1390 - case DMA_TO_DEVICE:
1391 - dma_cache_wback(addr, size);
1392 - break;
1393 -
1394 - case DMA_FROM_DEVICE:
1395 - dma_cache_inv(addr, size);
1396 - break;
1397 -
1398 - case DMA_BIDIRECTIONAL:
1399 - dma_cache_wback_inv(addr, size);
1400 - break;
1401 -
1402 - default:
1403 - BUG();
1404 - }
1405 -}
1406 -
1407 -dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
1408 - enum dma_data_direction direction)
1409 -{
1410 - unsigned long addr = (unsigned long) ptr;
1411 -
1412 - __dma_sync(addr, size, direction);
1413 -
1414 - return virt_to_phys(ptr);
1415 -}
1416 -
1417 -EXPORT_SYMBOL(dma_map_single);
1418 -
1419 -void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
1420 - enum dma_data_direction direction)
1421 -{
1422 - unsigned long addr;
1423 - addr = dma_addr + PAGE_OFFSET;
1424 -
1425 - //__dma_sync(addr, size, direction);
1426 -}
1427 -
1428 -EXPORT_SYMBOL(dma_unmap_single);
1429 -
1430 -int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1431 - enum dma_data_direction direction)
1432 -{
1433 - int i;
1434 -
1435 - BUG_ON(direction == DMA_NONE);
1436 -
1437 - for (i = 0; i < nents; i++, sg++) {
1438 - unsigned long addr;
1439 -
1440 - addr = (unsigned long) page_address(sg->page);
1441 - if (addr) {
1442 - __dma_sync(addr + sg->offset, sg->length, direction);
1443 - sg->dma_address = (dma_addr_t)page_to_phys(sg->page)
1444 - + sg->offset;
1445 - }
1446 - }
1447 -
1448 - return nents;
1449 -}
1450 -
1451 -EXPORT_SYMBOL(dma_map_sg);
1452 -
1453 -dma_addr_t dma_map_page(struct device *dev, struct page *page,
1454 - unsigned long offset, size_t size, enum dma_data_direction direction)
1455 -{
1456 - unsigned long addr;
1457 -
1458 - BUG_ON(direction == DMA_NONE);
1459 -
1460 - addr = (unsigned long) page_address(page) + offset;
1461 - dma_cache_wback_inv(addr, size);
1462 -
1463 - return page_to_phys(page) + offset;
1464 -}
1465 -
1466 -EXPORT_SYMBOL(dma_map_page);
1467 -
1468 -void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
1469 - enum dma_data_direction direction)
1470 -{
1471 - BUG_ON(direction == DMA_NONE);
1472 -
1473 - if (direction != DMA_TO_DEVICE) {
1474 - unsigned long addr;
1475 -
1476 - addr = dma_address + PAGE_OFFSET;
1477 - dma_cache_wback_inv(addr, size);
1478 - }
1479 -}
1480 -
1481 -EXPORT_SYMBOL(dma_unmap_page);
1482 -
1483 -void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
1484 - enum dma_data_direction direction)
1485 -{
1486 - unsigned long addr;
1487 - int i;
1488 -
1489 - BUG_ON(direction == DMA_NONE);
1490 -
1491 - if (direction == DMA_TO_DEVICE)
1492 - return;
1493 -
1494 - for (i = 0; i < nhwentries; i++, sg++) {
1495 - addr = (unsigned long) page_address(sg->page);
1496 - if (addr)
1497 - __dma_sync(addr + sg->offset, sg->length, direction);
1498 - }
1499 -}
1500 -
1501 -EXPORT_SYMBOL(dma_unmap_sg);
1502 -
1503 -void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1504 - size_t size, enum dma_data_direction direction)
1505 -{
1506 - unsigned long addr;
1507 -
1508 - BUG_ON(direction == DMA_NONE);
1509 -
1510 - addr = dma_handle + PAGE_OFFSET;
1511 - __dma_sync(addr, size, direction);
1512 -}
1513 -
1514 -EXPORT_SYMBOL(dma_sync_single_for_cpu);
1515 -
1516 -void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
1517 - size_t size, enum dma_data_direction direction)
1518 -{
1519 - unsigned long addr;
1520 -
1521 - BUG_ON(direction == DMA_NONE);
1522 -
1523 - addr = dma_handle + PAGE_OFFSET;
1524 - __dma_sync(addr, size, direction);
1525 -}
1526 -
1527 -EXPORT_SYMBOL(dma_sync_single_for_device);
1528 -
1529 -void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
1530 - unsigned long offset, size_t size, enum dma_data_direction direction)
1531 -{
1532 - unsigned long addr;
1533 -
1534 - BUG_ON(direction == DMA_NONE);
1535 -
1536 - addr = dma_handle + offset + PAGE_OFFSET;
1537 - __dma_sync(addr, size, direction);
1538 -}
1539 -
1540 -EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
1541 -
1542 -void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
1543 - unsigned long offset, size_t size, enum dma_data_direction direction)
1544 -{
1545 - unsigned long addr;
1546 -
1547 - BUG_ON(direction == DMA_NONE);
1548 -
1549 - addr = dma_handle + offset + PAGE_OFFSET;
1550 - __dma_sync(addr, size, direction);
1551 -}
1552 -
1553 -EXPORT_SYMBOL(dma_sync_single_range_for_device);
1554 -
1555 -void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
1556 - enum dma_data_direction direction)
1557 -{
1558 - int i;
1559 -
1560 - BUG_ON(direction == DMA_NONE);
1561 -
1562 - /* Make sure that gcc doesn't leave the empty loop body. */
1563 - for (i = 0; i < nelems; i++, sg++)
1564 - __dma_sync((unsigned long)page_address(sg->page),
1565 - sg->length, direction);
1566 -}
1567 -
1568 -EXPORT_SYMBOL(dma_sync_sg_for_cpu);
1569 -
1570 -void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
1571 - enum dma_data_direction direction)
1572 -{
1573 - int i;
1574 -
1575 - BUG_ON(direction == DMA_NONE);
1576 -
1577 - /* Make sure that gcc doesn't leave the empty loop body. */
1578 - for (i = 0; i < nelems; i++, sg++)
1579 - __dma_sync((unsigned long)page_address(sg->page),
1580 - sg->length, direction);
1581 -}
1582 -
1583 -EXPORT_SYMBOL(dma_sync_sg_for_device);
1584 -
1585 -int dma_mapping_error(dma_addr_t dma_addr)
1586 -{
1587 - return 0;
1588 -}
1589 -
1590 -EXPORT_SYMBOL(dma_mapping_error);
1591 -
1592 -int dma_supported(struct device *dev, u64 mask)
1593 -{
1594 - /*
1595 - * we fall back to GFP_DMA when the mask isn't all 1s,
1596 - * so we can't guarantee allocations that must be
1597 - * within a tighter range than GFP_DMA..
1598 - */
1599 - if (mask < 0x00ffffff)
1600 - return 0;
1601 -
1602 - return 1;
1603 -}
1604 -
1605 -EXPORT_SYMBOL(dma_supported);
1606 -
1607 -int dma_is_consistent(dma_addr_t dma_addr)
1608 -{
1609 - return 1;
1610 -}
1611 -
1612 -EXPORT_SYMBOL(dma_is_consistent);
1613 -
1614 -void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction)
1615 -{
1616 - if (direction == DMA_NONE)
1617 - return;
1618 -
1619 - dma_cache_wback_inv((unsigned long)vaddr, size);
1620 -}
1621 -
1622 -EXPORT_SYMBOL(dma_cache_sync);
1623 -
1624 -/* The DAC routines are a PCIism.. */
1625 -
1626 -#ifdef CONFIG_PCI
1627 -
1628 -#include <linux/pci.h>
1629 -
1630 -dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev,
1631 - struct page *page, unsigned long offset, int direction)
1632 -{
1633 - return (dma64_addr_t)page_to_phys(page) + offset;
1634 -}
1635 -
1636 -EXPORT_SYMBOL(pci_dac_page_to_dma);
1637 -
1638 -struct page *pci_dac_dma_to_page(struct pci_dev *pdev,
1639 - dma64_addr_t dma_addr)
1640 -{
1641 - return mem_map + (dma_addr >> PAGE_SHIFT);
1642 -}
1643 -
1644 -EXPORT_SYMBOL(pci_dac_dma_to_page);
1645 -
1646 -unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev,
1647 - dma64_addr_t dma_addr)
1648 -{
1649 - return dma_addr & ~PAGE_MASK;
1650 -}
1651 -
1652 -EXPORT_SYMBOL(pci_dac_dma_to_offset);
1653 -
1654 -void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev,
1655 - dma64_addr_t dma_addr, size_t len, int direction)
1656 -{
1657 - BUG_ON(direction == PCI_DMA_NONE);
1658 -
1659 - dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len);
1660 -}
1661 -
1662 -EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu);
1663 -
1664 -void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev,
1665 - dma64_addr_t dma_addr, size_t len, int direction)
1666 -{
1667 - BUG_ON(direction == PCI_DMA_NONE);
1668 -
1669 - dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len);
1670 -}
1671 -
1672 -EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device);
1673 -
1674 -#endif /* CONFIG_PCI */
1675 diff -urN linux.old/arch/mips/mm/Makefile linux.dev/arch/mips/mm/Makefile
1676 --- linux.old/arch/mips/mm/Makefile 2007-01-10 20:10:37.000000000 +0100
1677 +++ linux.dev/arch/mips/mm/Makefile 2007-02-09 20:26:45.376386784 +0100
1678 @@ -2,8 +2,8 @@
1679 # Makefile for the Linux/MIPS-specific parts of the memory manager.
1680 #
1681
1682 -obj-y += cache.o extable.o fault.o init.o pgtable.o \
1683 - tlbex.o tlbex-fault.o
1684 +obj-y += cache.o dma-default.o extable.o fault.o \
1685 + init.o pgtable.o tlbex.o tlbex-fault.o
1686
1687 obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o
1688 obj-$(CONFIG_64BIT) += pgtable-64.o
1689 @@ -32,14 +32,4 @@
1690 obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
1691 obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o
1692
1693 -#
1694 -# Choose one DMA coherency model
1695 -#
1696 -ifndef CONFIG_OWN_DMA
1697 -obj-$(CONFIG_DMA_COHERENT) += dma-coherent.o
1698 -obj-$(CONFIG_DMA_NONCOHERENT) += dma-noncoherent.o
1699 -endif
1700 -obj-$(CONFIG_DMA_IP27) += dma-ip27.o
1701 -obj-$(CONFIG_DMA_IP32) += dma-ip32.o
1702 -
1703 EXTRA_AFLAGS := $(CFLAGS)
1704 diff -urN linux.old/arch/mips/pci/Makefile linux.dev/arch/mips/pci/Makefile
1705 --- linux.old/arch/mips/pci/Makefile 2007-01-10 20:10:37.000000000 +0100
1706 +++ linux.dev/arch/mips/pci/Makefile 2007-02-09 20:26:50.961537712 +0100
1707 @@ -2,7 +2,7 @@
1708 # Makefile for the PCI specific kernel interface routines under Linux.
1709 #
1710
1711 -obj-y += pci.o
1712 +obj-y += pci.o pci-dac.o
1713
1714 #
1715 # PCI bus host bridge specific code
1716 diff -urN linux.old/arch/mips/pci/pci-dac.c linux.dev/arch/mips/pci/pci-dac.c
1717 --- linux.old/arch/mips/pci/pci-dac.c 1970-01-01 01:00:00.000000000 +0100
1718 +++ linux.dev/arch/mips/pci/pci-dac.c 2007-02-09 20:26:50.961537712 +0100
1719 @@ -0,0 +1,79 @@
1720 +/*
1721 + * This file is subject to the terms and conditions of the GNU General Public
1722 + * License. See the file "COPYING" in the main directory of this archive
1723 + * for more details.
1724 + *
1725 + * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
1726 + * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
1727 + * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
1728 + */
1729 +
1730 +#include <linux/types.h>
1731 +#include <linux/dma-mapping.h>
1732 +#include <linux/mm.h>
1733 +#include <linux/module.h>
1734 +#include <linux/string.h>
1735 +
1736 +#include <asm/cache.h>
1737 +#include <asm/io.h>
1738 +
1739 +#include <dma-coherence.h>
1740 +
1741 +#include <linux/pci.h>
1742 +
1743 +dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev,
1744 + struct page *page, unsigned long offset, int direction)
1745 +{
1746 + struct device *dev = &pdev->dev;
1747 +
1748 + BUG_ON(direction == DMA_NONE);
1749 +
1750 + if (!plat_device_is_coherent(dev)) {
1751 + unsigned long addr;
1752 +
1753 + addr = (unsigned long) page_address(page) + offset;
1754 + dma_cache_wback_inv(addr, PAGE_SIZE);
1755 + }
1756 +
1757 + return plat_map_dma_mem_page(dev, page) + offset;
1758 +}
1759 +
1760 +EXPORT_SYMBOL(pci_dac_page_to_dma);
1761 +
1762 +struct page *pci_dac_dma_to_page(struct pci_dev *pdev,
1763 + dma64_addr_t dma_addr)
1764 +{
1765 + return pfn_to_page(plat_dma_addr_to_phys(dma_addr) >> PAGE_SHIFT);
1766 +}
1767 +
1768 +EXPORT_SYMBOL(pci_dac_dma_to_page);
1769 +
1770 +unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev,
1771 + dma64_addr_t dma_addr)
1772 +{
1773 + return dma_addr & ~PAGE_MASK;
1774 +}
1775 +
1776 +EXPORT_SYMBOL(pci_dac_dma_to_offset);
1777 +
1778 +void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev,
1779 + dma64_addr_t dma_addr, size_t len, int direction)
1780 +{
1781 + BUG_ON(direction == PCI_DMA_NONE);
1782 +
1783 + if (!plat_device_is_coherent(&pdev->dev))
1784 + dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len);
1785 +}
1786 +
1787 +EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu);
1788 +
1789 +void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev,
1790 + dma64_addr_t dma_addr, size_t len, int direction)
1791 +{
1792 + BUG_ON(direction == PCI_DMA_NONE);
1793 +
1794 + if (!plat_device_is_coherent(&pdev->dev))
1795 + dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len);
1796 +}
1797 +
1798 +EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device);
1799 diff -urN linux.old/include/asm-mips/mach-generic/dma-coherence.h linux.dev/include/asm-mips/mach-generic/dma-coherence.h
1800 --- linux.old/include/asm-mips/mach-generic/dma-coherence.h 1970-01-01 01:00:00.000000000 +0100
1801 +++ linux.dev/include/asm-mips/mach-generic/dma-coherence.h 2007-02-09 20:26:50.962537560 +0100
1802 @@ -0,0 +1,43 @@
1803 +/*
1804 + * This file is subject to the terms and conditions of the GNU General Public
1805 + * License. See the file "COPYING" in the main directory of this archive
1806 + * for more details.
1807 + *
1808 + * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
1809 + *
1810 + */
1811 +#ifndef __ASM_MACH_GENERIC_DMA_COHERENCE_H
1812 +#define __ASM_MACH_GENERIC_DMA_COHERENCE_H
1813 +
1814 +struct device;
1815 +
1816 +static dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size)
1817 +{
1818 + return virt_to_phys(addr);
1819 +}
1820 +
1821 +static dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page)
1822 +{
1823 + return page_to_phys(page);
1824 +}
1825 +
1826 +static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
1827 +{
1828 + return dma_addr;
1829 +}
1830 +
1831 +static void plat_unmap_dma_mem(dma_addr_t dma_addr)
1832 +{
1833 +}
1834 +
1835 +static inline int plat_device_is_coherent(struct device *dev)
1836 +{
1837 +#ifdef CONFIG_DMA_COHERENT
1838 + return 1;
1839 +#endif
1840 +#ifdef CONFIG_DMA_NONCOHERENT
1841 + return 0;
1842 +#endif
1843 +}
1844 +
1845 +#endif /* __ASM_MACH_GENERIC_DMA_COHERENCE_H */
1846 diff -urN linux.old/include/asm-mips/mach-generic/kmalloc.h linux.dev/include/asm-mips/mach-generic/kmalloc.h
1847 --- linux.old/include/asm-mips/mach-generic/kmalloc.h 2007-01-10 20:10:37.000000000 +0100
1848 +++ linux.dev/include/asm-mips/mach-generic/kmalloc.h 2007-02-09 20:26:50.962537560 +0100
1849 @@ -5,6 +5,7 @@
1850 #ifndef CONFIG_DMA_COHERENT
1851 /*
1852 * Total overkill for most systems but need as a safe default.
1853 + * Set this one if any device in the system might do non-coherent DMA.
1854 */
1855 #define ARCH_KMALLOC_MINALIGN 128
1856 #endif
1857 diff -urN linux.old/include/asm-mips/mach-ip27/dma-coherence.h linux.dev/include/asm-mips/mach-ip27/dma-coherence.h
1858 --- linux.old/include/asm-mips/mach-ip27/dma-coherence.h 1970-01-01 01:00:00.000000000 +0100
1859 +++ linux.dev/include/asm-mips/mach-ip27/dma-coherence.h 2007-02-09 20:26:50.962537560 +0100
1860 @@ -0,0 +1,49 @@
1861 +/*
1862 + * This file is subject to the terms and conditions of the GNU General Public
1863 + * License. See the file "COPYING" in the main directory of this archive
1864 + * for more details.
1865 + *
1866 + * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
1867 + *
1868 + */
1869 +#ifndef __ASM_MACH_IP27_DMA_COHERENCE_H
1870 +#define __ASM_MACH_IP27_DMA_COHERENCE_H
1871 +
1872 +#include <asm/pci/bridge.h>
1873 +
1874 +#define pdev_to_baddr(pdev, addr) \
1875 + (BRIDGE_CONTROLLER(pdev->bus)->baddr + (addr))
1876 +#define dev_to_baddr(dev, addr) \
1877 + pdev_to_baddr(to_pci_dev(dev), (addr))
1878 +
1879 +struct device;
1880 +
1881 +static dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size)
1882 +{
1883 + dma_addr_t pa = dev_to_baddr(dev, virt_to_phys(addr));
1884 +
1885 + return pa;
1886 +}
1887 +
1888 +static dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page)
1889 +{
1890 + dma_addr_t pa = dev_to_baddr(dev, page_to_phys(page));
1891 +
1892 + return pa;
1893 +}
1894 +
1895 +static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
1896 +{
1897 + return dma_addr & (0xffUL << 56);
1898 +}
1899 +
1900 +static void plat_unmap_dma_mem(dma_addr_t dma_addr)
1901 +{
1902 +}
1903 +
1904 +static inline int plat_device_is_coherent(struct device *dev)
1905 +{
1906 + return 1; /* IP27 non-cohernet mode is unsupported */
1907 +}
1908 +
1909 +#endif /* __ASM_MACH_IP27_DMA_COHERENCE_H */
1910 diff -urN linux.old/include/asm-mips/mach-ip32/dma-coherence.h linux.dev/include/asm-mips/mach-ip32/dma-coherence.h
1911 --- linux.old/include/asm-mips/mach-ip32/dma-coherence.h 1970-01-01 01:00:00.000000000 +0100
1912 +++ linux.dev/include/asm-mips/mach-ip32/dma-coherence.h 2007-02-09 20:26:50.963537408 +0100
1913 @@ -0,0 +1,71 @@
1914 +/*
1915 + * This file is subject to the terms and conditions of the GNU General Public
1916 + * License. See the file "COPYING" in the main directory of this archive
1917 + * for more details.
1918 + *
1919 + * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
1920 + *
1921 + */
1922 +#ifndef __ASM_MACH_IP35_DMA_COHERENCE_H
1923 +#define __ASM_MACH_IP35_DMA_COHERENCE_H
1924 +
1925 +#include <asm/ip32/crime.h>
1926 +
1927 +struct device;
1928 +
1929 +/*
1930 + * Few notes.
1931 + * 1. CPU sees memory as two chunks: 0-256M@0x0, and the rest @0x40000000+256M
1932 + * 2. PCI sees memory as one big chunk @0x0 (or we could use 0x40000000 for
1933 + * native-endian)
1934 + * 3. All other devices see memory as one big chunk at 0x40000000
1935 + * 4. Non-PCI devices will pass NULL as struct device*
1936 + *
1937 + * Thus we translate differently, depending on device.
1938 + */
1939 +
1940 +#define RAM_OFFSET_MASK 0x3fffffffUL
1941 +
1942 +static dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size)
1943 +{
1944 + dma_addr_t pa = virt_to_phys(addr) & RAM_OFFSET_MASK;
1945 +
1946 + if (dev == NULL)
1947 + pa += CRIME_HI_MEM_BASE;
1948 +
1949 + return pa;
1950 +}
1951 +
1952 +static dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page)
1953 +{
1954 + dma_addr_t pa;
1955 +
1956 + pa = page_to_phys(page) & RAM_OFFSET_MASK;
1957 +
1958 + if (dev == NULL)
1959 + pa += CRIME_HI_MEM_BASE;
1960 +
1961 + return pa;
1962 +}
1963 +
1964 +/* This is almost certainly wrong but it's what dma-ip32.c used to use */
1965 +static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
1966 +{
1967 + unsigned long addr = dma_addr & RAM_OFFSET_MASK;
1968 +
1969 + if (dma_addr >= 256*1024*1024)
1970 + addr += CRIME_HI_MEM_BASE;
1971 +
1972 + return addr;
1973 +}
1974 +
1975 +static void plat_unmap_dma_mem(dma_addr_t dma_addr)
1976 +{
1977 +}
1978 +
1979 +static inline int plat_device_is_coherent(struct device *dev)
1980 +{
1981 + return 0; /* IP32 is non-cohernet */
1982 +}
1983 +
1984 +#endif /* __ASM_MACH_IP35_DMA_COHERENCE_H */
1985 diff -urN linux.old/include/asm-mips/mach-jazz/dma-coherence.h linux.dev/include/asm-mips/mach-jazz/dma-coherence.h
1986 --- linux.old/include/asm-mips/mach-jazz/dma-coherence.h 1970-01-01 01:00:00.000000000 +0100
1987 +++ linux.dev/include/asm-mips/mach-jazz/dma-coherence.h 2007-02-09 20:26:50.963537408 +0100
1988 @@ -0,0 +1,40 @@
1989 +/*
1990 + * This file is subject to the terms and conditions of the GNU General Public
1991 + * License. See the file "COPYING" in the main directory of this archive
1992 + * for more details.
1993 + *
1994 + * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
1995 + */
1996 +#ifndef __ASM_MACH_JAZZ_DMA_COHERENCE_H
1997 +#define __ASM_MACH_JAZZ_DMA_COHERENCE_H
1998 +
1999 +#include <asm/jazzdma.h>
2000 +
2001 +struct device;
2002 +
2003 +static dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size)
2004 +{
2005 + return vdma_alloc(virt_to_phys(addr), size);
2006 +}
2007 +
2008 +static dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page)
2009 +{
2010 + return vdma_alloc(page_to_phys(page), PAGE_SIZE);
2011 +}
2012 +
2013 +static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
2014 +{
2015 + return vdma_log2phys(dma_addr);
2016 +}
2017 +
2018 +static void plat_unmap_dma_mem(dma_addr_t dma_addr)
2019 +{
2020 + vdma_free(dma_addr);
2021 +}
2022 +
2023 +static inline int plat_device_is_coherent(struct device *dev)
2024 +{
2025 + return 0;
2026 +}
2027 +
2028 +#endif /* __ASM_MACH_JAZZ_DMA_COHERENCE_H */
This page took 0.113207 seconds and 5 git commands to generate.