atheros: do not alter the network vlan config if swconfig is required and missing
[openwrt.git] / target / linux / coldfire / patches / 002-mcfv4e_coldfire_headers.patch
1 From 127797e5cf8a036825007586b914b75897aba554 Mon Sep 17 00:00:00 2001
2 From: Kurt Mahan <kmahan@freescale.com>
3 Date: Wed, 31 Oct 2007 16:39:31 -0600
4 Subject: [PATCH] Add Coldfire Support into existing headers.
5
6 Modifications to the various M68k header files to add
7 Coldfire processor support.
8
9 LTIBName: mcfv4e-coldfire-headers
10 Signed-off-by: Kurt Mahan <kmahan@freescale.com>
11 ---
12 include/asm-m68k/atomic.h | 23 ++-
13 include/asm-m68k/bitops.h | 426 ++++++++++++++++++++++++++++++++++++++++
14 include/asm-m68k/bootinfo.h | 13 ++
15 include/asm-m68k/byteorder.h | 12 +-
16 include/asm-m68k/cacheflush.h | 4 +
17 include/asm-m68k/checksum.h | 10 +
18 include/asm-m68k/delay.h | 26 +++
19 include/asm-m68k/div64.h | 4 +
20 include/asm-m68k/elf.h | 2 +-
21 include/asm-m68k/fpu.h | 2 +
22 include/asm-m68k/io.h | 26 +++-
23 include/asm-m68k/irq.h | 5 +-
24 include/asm-m68k/machdep.h | 7 +
25 include/asm-m68k/mmu_context.h | 84 ++++++++-
26 include/asm-m68k/page.h | 20 ++-
27 include/asm-m68k/page_offset.h | 7 +-
28 include/asm-m68k/pci.h | 99 ++++++----
29 include/asm-m68k/pgalloc.h | 4 +-
30 include/asm-m68k/pgtable.h | 15 ++
31 include/asm-m68k/processor.h | 46 ++++-
32 include/asm-m68k/ptrace.h | 11 +
33 include/asm-m68k/raw_io.h | 58 ++++++
34 include/asm-m68k/segment.h | 10 +
35 include/asm-m68k/setup.h | 27 +++
36 include/asm-m68k/signal.h | 5 +
37 include/asm-m68k/string.h | 2 +
38 include/asm-m68k/system.h | 17 ++-
39 include/asm-m68k/thread_info.h | 1 +
40 include/asm-m68k/tlbflush.h | 16 ++-
41 include/asm-m68k/uaccess.h | 4 +
42 30 files changed, 925 insertions(+), 61 deletions(-)
43
44 --- a/include/asm-m68k/atomic.h
45 +++ b/include/asm-m68k/atomic.h
46 @@ -2,7 +2,7 @@
47 #define __ARCH_M68K_ATOMIC__
48
49
50 -#include <asm/system.h>
51 +#include <asm/system.h> /* local_irq_XXX() */
52
53 /*
54 * Atomic operations that C can't guarantee us. Useful for
55 @@ -21,12 +21,20 @@ typedef struct { int counter; } atomic_t
56
57 static inline void atomic_add(int i, atomic_t *v)
58 {
59 +#ifndef CONFIG_COLDFIRE
60 __asm__ __volatile__("addl %1,%0" : "+m" (*v) : "id" (i));
61 +#else
62 + __asm__ __volatile__("addl %1,%0" : "=m" (*v) : "d" (i), "m" (*v));
63 +#endif
64 }
65
66 static inline void atomic_sub(int i, atomic_t *v)
67 {
68 +#ifndef CONFIG_COLDFIRE
69 __asm__ __volatile__("subl %1,%0" : "+m" (*v) : "id" (i));
70 +#else
71 + __asm__ __volatile__("subl %1,%0" : "=m" (*v) : "d" (i), "m" (*v));
72 +#endif
73 }
74
75 static inline void atomic_inc(atomic_t *v)
76 @@ -46,6 +54,14 @@ static inline int atomic_dec_and_test(at
77 return c != 0;
78 }
79
80 +static __inline__ int atomic_dec_and_test_lt(volatile atomic_t *v)
81 +{
82 + char c;
83 + __asm__ __volatile__("subql #1,%1; slt %0" : "=d" (c), "=m" (*v)
84 + : "m" (*v));
85 + return c != 0 ;
86 +}
87 +
88 static inline int atomic_inc_and_test(atomic_t *v)
89 {
90 char c;
91 @@ -156,7 +172,12 @@ static inline int atomic_sub_and_test(in
92 static inline int atomic_add_negative(int i, atomic_t *v)
93 {
94 char c;
95 +#ifndef CONFIG_COLDFIRE
96 __asm__ __volatile__("addl %2,%1; smi %0" : "=d" (c), "+m" (*v): "g" (i));
97 +#else
98 + __asm__ __volatile__("addl %2,%1; smi %0" : "=d" (c), "=m" (*v)
99 + : "d" (i) , "m" (*v));
100 +#endif
101 return c != 0;
102 }
103
104 --- a/include/asm-m68k/bitops.h
105 +++ b/include/asm-m68k/bitops.h
106 @@ -19,6 +19,7 @@
107 *
108 * They use the standard big-endian m680x0 bit ordering.
109 */
110 +#ifndef CONFIG_COLDFIRE
111
112 #define test_and_set_bit(nr,vaddr) \
113 (__builtin_constant_p(nr) ? \
114 @@ -457,4 +458,429 @@ static inline int ext2_find_next_bit(con
115
116 #endif /* __KERNEL__ */
117
118 +#else /* CONFIG_COLDFIRE */
119 +
120 +#define test_and_set_bit(nr,vaddr) \
121 + (__builtin_constant_p(nr) ? \
122 + __constant_coldfire_test_and_set_bit(nr, vaddr) : \
123 + __generic_coldfire_test_and_set_bit(nr, vaddr))
124 +
125 +
126 +static __inline__ int __constant_coldfire_test_and_set_bit(int nr,
127 + volatile void *vaddr)
128 +{
129 + char retval;
130 + volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
131 +
132 + __asm__ __volatile__ ("bset %2,%1; sne %0"
133 + : "=d" (retval), "+QUd" (*p)
134 + : "di" (nr & 7));
135 + return retval;
136 +}
137 +
138 +static __inline__ int __generic_coldfire_test_and_set_bit(int nr,
139 + volatile void *vaddr)
140 +{
141 + char retval;
142 +
143 + __asm__ __volatile__ ("bset %2,%1; sne %0"
144 + : "=d" (retval), "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
145 + : "d" (nr)
146 + : "memory");
147 + return retval;
148 +}
149 +#define __test_and_set_bit(nr, vaddr) test_and_set_bit(nr, vaddr)
150 +
151 +#define set_bit(nr,vaddr) \
152 + (__builtin_constant_p(nr) ? \
153 + __constant_coldfire_set_bit(nr, vaddr) : \
154 + __generic_coldfire_set_bit(nr, vaddr))
155 +
156 +static __inline__ void __constant_coldfire_set_bit(int nr,
157 + volatile void *vaddr)
158 +{
159 + volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
160 + __asm__ __volatile__ ("bset %1,%0"
161 + : "+QUd" (*p) : "di" (nr & 7));
162 +}
163 +
164 +static __inline__ void __generic_coldfire_set_bit(int nr, volatile void *vaddr)
165 +{
166 + __asm__ __volatile__ ("bset %1,%0"
167 + : "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
168 + : "d" (nr)
169 + : "memory");
170 +}
171 +#define __set_bit(nr, vaddr) set_bit(nr, vaddr)
172 +
173 +#define test_and_clear_bit(nr, vaddr) \
174 + (__builtin_constant_p(nr) ? \
175 + __constant_coldfire_test_and_clear_bit(nr, vaddr) : \
176 + __generic_coldfire_test_and_clear_bit(nr, vaddr))
177 +
178 +static __inline__ int __constant_coldfire_test_and_clear_bit(int nr,
179 + volatile void *vaddr)
180 +{
181 + char retval;
182 + volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
183 +
184 + __asm__ __volatile__ ("bclr %2,%1; sne %0"
185 + : "=d" (retval), "+QUd" (*p)
186 + : "id" (nr & 7));
187 +
188 + return retval;
189 +}
190 +
191 +static __inline__ int __generic_coldfire_test_and_clear_bit(int nr,
192 + volatile void *vaddr)
193 +{
194 + char retval;
195 +
196 + __asm__ __volatile__ ("bclr %2,%1; sne %0"
197 + : "=d" (retval), "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
198 + : "d" (nr & 7)
199 + : "memory");
200 +
201 + return retval;
202 +}
203 +#define __test_and_clear_bit(nr, vaddr) test_and_clear_bit(nr, vaddr)
204 +
205 +/*
206 + * clear_bit() doesn't provide any barrier for the compiler.
207 + */
208 +#define smp_mb__before_clear_bit() barrier()
209 +#define smp_mb__after_clear_bit() barrier()
210 +
211 +#define clear_bit(nr,vaddr) \
212 + (__builtin_constant_p(nr) ? \
213 + __constant_coldfire_clear_bit(nr, vaddr) : \
214 + __generic_coldfire_clear_bit(nr, vaddr))
215 +
216 +static __inline__ void __constant_coldfire_clear_bit(int nr,
217 + volatile void *vaddr)
218 +{
219 + volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
220 + __asm__ __volatile__ ("bclr %1,%0"
221 + : "+QUd" (*p) : "id" (nr & 7));
222 +}
223 +
224 +static __inline__ void __generic_coldfire_clear_bit(int nr,
225 + volatile void *vaddr)
226 +{
227 + __asm__ __volatile__ ("bclr %1,%0"
228 + : "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
229 + : "d" (nr)
230 + : "memory");
231 +}
232 +#define __clear_bit(nr, vaddr) clear_bit(nr, vaddr)
233 +
234 +#define test_and_change_bit(nr, vaddr) \
235 + (__builtin_constant_p(nr) ? \
236 + __constant_coldfire_test_and_change_bit(nr, vaddr) : \
237 + __generic_coldfire_test_and_change_bit(nr, vaddr))
238 +
239 +static __inline__ int __constant_coldfire_test_and_change_bit(int nr,
240 + volatile void *vaddr)
241 +{
242 + char retval;
243 + volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
244 +
245 + __asm__ __volatile__ ("bchg %2,%1; sne %0"
246 + : "=d" (retval), "+QUd" (*p)
247 + : "id" (nr & 7));
248 +
249 + return retval;
250 +}
251 +
252 +static __inline__ int __generic_coldfire_test_and_change_bit(int nr,
253 + volatile void *vaddr)
254 +{
255 + char retval;
256 +
257 + __asm__ __volatile__ ("bchg %2,%1; sne %0"
258 + : "=d" (retval), "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
259 + : "id" (nr)
260 + : "memory");
261 +
262 + return retval;
263 +}
264 +#define __test_and_change_bit(nr, vaddr) test_and_change_bit(nr, vaddr)
265 +#define __change_bit(nr, vaddr) change_bit(nr, vaddr)
266 +
267 +#define change_bit(nr,vaddr) \
268 + (__builtin_constant_p(nr) ? \
269 + __constant_coldfire_change_bit(nr, vaddr) : \
270 + __generic_coldfire_change_bit(nr, vaddr))
271 +
272 +static __inline__ void __constant_coldfire_change_bit(int nr,
273 + volatile void *vaddr)
274 +{
275 + volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
276 + __asm__ __volatile__ ("bchg %1,%0"
277 + : "+QUd" (*p) : "id" (nr & 7));
278 +}
279 +
280 +static __inline__ void __generic_coldfire_change_bit(int nr,
281 + volatile void *vaddr)
282 +{
283 + __asm__ __volatile__ ("bchg %1,%0"
284 + : "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
285 + : "d" (nr)
286 + : "memory");
287 +}
288 +
289 +static inline int test_bit(int nr, const unsigned long *vaddr)
290 +{
291 + return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
292 +}
293 +
294 +static __inline__ unsigned long ffz(unsigned long word)
295 +{
296 + unsigned long result = 0;
297 +
298 + while (word & 1) {
299 + result++;
300 + word >>= 1;
301 + }
302 + return result;
303 +}
304 +
305 +/* find_next_zero_bit() finds the first zero bit in a bit string of length
306 + * 'size' bits, starting the search at bit 'offset'. This is largely based
307 + * on Linus's ALPHA routines.
308 + */
309 +static __inline__ unsigned long find_next_zero_bit(void *addr,
310 + unsigned long size, unsigned long offset)
311 +{
312 + unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
313 + unsigned long result = offset & ~31UL;
314 + unsigned long tmp;
315 +
316 + if (offset >= size)
317 + return size;
318 + size -= result;
319 + offset &= 31UL;
320 + if (offset) {
321 + tmp = *(p++);
322 + tmp |= ~0UL >> (32-offset);
323 + if (size < 32)
324 + goto found_first;
325 + if (~tmp)
326 + goto found_middle;
327 + size -= 32;
328 + result += 32;
329 + }
330 + while (size & ~31UL) {
331 + tmp = *(p++);
332 + if (~tmp)
333 + goto found_middle;
334 + result += 32;
335 + size -= 32;
336 + }
337 + if (!size)
338 + return result;
339 + tmp = *p;
340 +
341 +found_first:
342 + tmp |= ~0UL >> size;
343 +found_middle:
344 + return result + ffz(tmp);
345 +}
346 +
347 +#define find_first_zero_bit(addr, size) find_next_zero_bit(((void *)addr), \
348 + (size), 0)
349 +
350 +/* Ported from included/linux/bitops.h */
351 +static __inline__ int ffs(int x)
352 +{
353 + int r = 1;
354 +
355 + if (!x)
356 + return 0;
357 + if (!(x & 0xffff)) {
358 + x >>= 16;
359 + r += 16;
360 + }
361 + if (!(x & 0xff)) {
362 + x >>= 8;
363 + r += 8;
364 + }
365 + if (!(x & 0xf)) {
366 + x >>= 4;
367 + r += 4;
368 + }
369 + if (!(x & 3)) {
370 + x >>= 2;
371 + r += 2;
372 + }
373 + if (!(x & 1)) {
374 + x >>= 1;
375 + r += 1;
376 + }
377 + return r;
378 +}
379 +#define __ffs(x) (ffs(x) - 1)
380 +
381 +/* find_next_bit - find the next set bit in a memory region
382 + * (from asm-ppc/bitops.h)
383 + */
384 +static __inline__ unsigned long find_next_bit(const unsigned long *addr,
385 + unsigned long size, unsigned long offset)
386 +{
387 + unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
388 + unsigned int result = offset & ~31UL;
389 + unsigned int tmp;
390 +
391 + if (offset >= size)
392 + return size;
393 + size -= result;
394 + offset &= 31UL;
395 + if (offset) {
396 + tmp = *p++;
397 + tmp &= ~0UL << offset;
398 + if (size < 32)
399 + goto found_first;
400 + if (tmp)
401 + goto found_middle;
402 + size -= 32;
403 + result += 32;
404 + }
405 + while (size >= 32) {
406 + tmp = *p++;
407 + if (tmp != 0)
408 + goto found_middle;
409 + result += 32;
410 + size -= 32;
411 + }
412 + if (!size)
413 + return result;
414 + tmp = *p;
415 +
416 +found_first:
417 + tmp &= ~0UL >> (32 - size);
418 + if (tmp == 0UL) /* Are any bits set? */
419 + return result + size; /* Nope. */
420 +found_middle:
421 + return result + __ffs(tmp);
422 +}
423 +
424 +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
425 +
426 +#ifdef __KERNEL__
427 +
428 +/* Ported from include/linux/bitops.h */
429 +static __inline__ int fls(int x)
430 +{
431 + int r = 32;
432 +
433 + if (!x)
434 + return 0;
435 + if (!(x & 0xffff0000u)) {
436 + x <<= 16;
437 + r -= 16;
438 + }
439 + if (!(x & 0xff000000u)) {
440 + x <<= 8;
441 + r -= 8;
442 + }
443 + if (!(x & 0xf0000000u)) {
444 + x <<= 4;
445 + r -= 4;
446 + }
447 + if (!(x & 0xc0000000u)) {
448 + x <<= 2;
449 + r -= 2;
450 + }
451 + if (!(x & 0x80000000u)) {
452 + x <<= 1;
453 + r -= 1;
454 + }
455 + return r;
456 +}
457 +
458 +#include <asm-generic/bitops/fls64.h>
459 +#include <asm-generic/bitops/sched.h>
460 +#include <asm-generic/bitops/hweight.h>
461 +
462 +#define minix_find_first_zero_bit(addr, size) find_next_zero_bit((addr), \
463 + (size), 0)
464 +#define minix_test_and_set_bit(nr, addr) test_and_set_bit((nr), \
465 + (unsigned long *)(addr))
466 +#define minix_set_bit(nr, addr) set_bit((nr), \
467 + (unsigned long *)(addr))
468 +#define minix_test_and_clear_bit(nr, addr) test_and_clear_bit((nr), \
469 + (unsigned long *)(addr))
470 +
471 +static inline int minix_test_bit(int nr, const volatile unsigned long *vaddr)
472 +{
473 + int *a = (int *)vaddr;
474 + int mask;
475 +
476 + a += nr >> 5;
477 + mask = 1 << (nr & 0x1f);
478 + return ((mask & *a) != 0);
479 +}
480 +
481 +#define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 24, \
482 + (unsigned long *)(addr))
483 +#define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 24, \
484 + (unsigned long *)(addr))
485 +#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 24, \
486 + (unsigned long *)(addr))
487 +#define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 24, \
488 + (unsigned long *)(addr))
489 +
490 +static inline int ext2_test_bit(int nr, const void *vaddr)
491 +{
492 + const unsigned char *p = vaddr;
493 + return (p[nr >> 3] & (1U << (nr & 7))) != 0;
494 +}
495 +
496 +static inline int ext2_find_first_zero_bit(const void *vaddr, unsigned size)
497 +{
498 + const unsigned long *p = vaddr, *addr = vaddr;
499 + int res;
500 +
501 + if (!size)
502 + return 0;
503 +
504 + size = (size >> 5) + ((size & 31) > 0);
505 + while (*p++ == ~0UL) {
506 + if (--size == 0)
507 + return (p - addr) << 5;
508 + }
509 +
510 + --p;
511 + for (res = 0; res < 32; res++)
512 + if (!ext2_test_bit (res, p))
513 + break;
514 + return (p - addr) * 32 + res;
515 +}
516 +
517 +static inline int ext2_find_next_zero_bit(const void *vaddr, unsigned size,
518 + unsigned offset)
519 +{
520 + const unsigned long *addr = vaddr;
521 + const unsigned long *p = addr + (offset >> 5);
522 + int bit = offset & 31UL, res;
523 +
524 + if (offset >= size)
525 + return size;
526 +
527 + if (bit) {
528 + /* Look for zero in first longword */
529 + for (res = bit; res < 32; res++)
530 + if (!ext2_test_bit (res, p))
531 + return (p - addr) * 32 + res;
532 + p++;
533 + }
534 + /* No zero yet, search remaining full bytes for a zero */
535 + res = ext2_find_first_zero_bit(p, size - 32 * (p - addr));
536 + return (p - addr) * 32 + res;
537 +}
538 +
539 +#endif /* KERNEL */
540 +
541 +#endif /* CONFIG_COLDFIRE */
542 +
543 #endif /* _M68K_BITOPS_H */
544 --- a/include/asm-m68k/bootinfo.h
545 +++ b/include/asm-m68k/bootinfo.h
546 @@ -49,6 +49,19 @@ struct bi_record {
547 #endif /* __ASSEMBLY__ */
548
549
550 +#ifndef __ASSEMBLY__
551 +
552 +struct uboot_record {
553 + unsigned long bd_info;
554 + unsigned long initrd_start;
555 + unsigned long initrd_end;
556 + unsigned long cmd_line_start;
557 + unsigned long cmd_line_stop;
558 +};
559 +
560 +#endif /* __ASSEMBLY__ */
561 +
562 +
563 /*
564 * Tag Definitions
565 *
566 --- a/include/asm-m68k/byteorder.h
567 +++ b/include/asm-m68k/byteorder.h
568 @@ -4,8 +4,15 @@
569 #include <asm/types.h>
570 #include <linux/compiler.h>
571
572 -#ifdef __GNUC__
573 -
574 +#if defined(__GNUC__)
575 +#if defined(__mcfisaaplus__) || defined(__mcfisac__)
576 +static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 val)
577 +{
578 + __asm__ ("byterev %0" : "=d" (val) : "0" (val));
579 + return val;
580 +}
581 +#define __arch__swab32(x) ___arch__swab32(x)
582 +#elif !defined(__mcoldfire__)
583 static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 val)
584 {
585 __asm__("rolw #8,%0; swap %0; rolw #8,%0" : "=d" (val) : "0" (val));
586 @@ -14,6 +21,7 @@ static __inline__ __attribute_const__ __
587 #define __arch__swab32(x) ___arch__swab32(x)
588
589 #endif
590 +#endif
591
592 #if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__)
593 # define __BYTEORDER_HAS_U64__
594 --- a/include/asm-m68k/cacheflush.h
595 +++ b/include/asm-m68k/cacheflush.h
596 @@ -6,6 +6,9 @@
597 /* cache code */
598 #define FLUSH_I_AND_D (0x00000808)
599 #define FLUSH_I (0x00000008)
600 +#ifdef CONFIG_COLDFIRE
601 +#include <asm/cf_cacheflush.h>
602 +#else /* !CONFIG_COLDFIRE */
603
604 /*
605 * Cache handling functions
606 @@ -153,4 +156,5 @@ static inline void copy_from_user_page(s
607 memcpy(dst, src, len);
608 }
609
610 +#endif /* !CONFIG_COLDFIRE */
611 #endif /* _M68K_CACHEFLUSH_H */
612 --- a/include/asm-m68k/checksum.h
613 +++ b/include/asm-m68k/checksum.h
614 @@ -34,6 +34,7 @@ extern __wsum csum_partial_copy_nocheck(
615 void *dst, int len,
616 __wsum sum);
617
618 +#ifndef CONFIG_COLDFIRE /* CF has own copy in arch/m68k/lib/checksum.c */
619 /*
620 * This is a version of ip_compute_csum() optimized for IP headers,
621 * which always checksum on 4 octet boundaries.
622 @@ -59,6 +60,9 @@ static inline __sum16 ip_fast_csum(const
623 : "memory");
624 return (__force __sum16)~sum;
625 }
626 +#else
627 +extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
628 +#endif
629
630 /*
631 * Fold a partial checksum
632 @@ -67,6 +71,11 @@ static inline __sum16 ip_fast_csum(const
633 static inline __sum16 csum_fold(__wsum sum)
634 {
635 unsigned int tmp = (__force u32)sum;
636 +#ifdef CONFIG_COLDFIRE
637 + tmp = (tmp & 0xffff) + (tmp >> 16);
638 + tmp = (tmp & 0xffff) + (tmp >> 16);
639 + return (__force __sum16) ~tmp;
640 +#else
641 __asm__("swap %1\n\t"
642 "addw %1, %0\n\t"
643 "clrw %1\n\t"
644 @@ -74,6 +83,7 @@ static inline __sum16 csum_fold(__wsum s
645 : "=&d" (sum), "=&d" (tmp)
646 : "0" (sum), "1" (tmp));
647 return (__force __sum16)~sum;
648 +#endif
649 }
650
651
652 --- a/include/asm-m68k/delay.h
653 +++ b/include/asm-m68k/delay.h
654 @@ -11,8 +11,25 @@
655
656 static inline void __delay(unsigned long loops)
657 {
658 +#if defined(CONFIG_COLDFIRE)
659 + /* The coldfire runs this loop at significantly different speeds
660 + * depending upon long word alignment or not. We'll pad it to
661 + * long word alignment which is the faster version.
662 + * The 0x4a8e is of course a 'tstl %fp' instruction. This is better
663 + * than using a NOP (0x4e71) instruction because it executes in one
664 + * cycle not three and doesn't allow for an arbitary delay waiting
665 + * for bus cycles to finish. Also fp/a6 isn't likely to cause a
666 + * stall waiting for the register to become valid if such is added
667 + * to the coldfire at some stage.
668 + */
669 + __asm__ __volatile__ (".balignw 4, 0x4a8e\n\t"
670 + "1: subql #1, %0\n\t"
671 + "jcc 1b"
672 + : "=d" (loops) : "0" (loops));
673 +#else
674 __asm__ __volatile__ ("1: subql #1,%0; jcc 1b"
675 : "=d" (loops) : "0" (loops));
676 +#endif
677 }
678
679 extern void __bad_udelay(void);
680 @@ -26,12 +43,17 @@ extern void __bad_udelay(void);
681 */
682 static inline void __const_udelay(unsigned long xloops)
683 {
684 +#if defined(CONFIG_COLDFIRE)
685 +
686 + __delay(((((unsigned long long) xloops * loops_per_jiffy))>>32)*HZ);
687 +#else
688 unsigned long tmp;
689
690 __asm__ ("mulul %2,%0:%1"
691 : "=d" (xloops), "=d" (tmp)
692 : "d" (xloops), "1" (loops_per_jiffy));
693 __delay(xloops * HZ);
694 +#endif
695 }
696
697 static inline void __udelay(unsigned long usecs)
698 @@ -46,12 +68,16 @@ static inline void __udelay(unsigned lon
699 static inline unsigned long muldiv(unsigned long a, unsigned long b,
700 unsigned long c)
701 {
702 +#if defined(CONFIG_COLDFIRE)
703 + return (long)(((unsigned long long)a * b)/c);
704 +#else
705 unsigned long tmp;
706
707 __asm__ ("mulul %2,%0:%1; divul %3,%0:%1"
708 : "=d" (tmp), "=d" (a)
709 : "d" (b), "d" (c), "1" (a));
710 return a;
711 +#endif
712 }
713
714 #endif /* defined(_M68K_DELAY_H) */
715 --- a/include/asm-m68k/div64.h
716 +++ b/include/asm-m68k/div64.h
717 @@ -5,6 +5,7 @@
718
719 /* n = n / base; return rem; */
720
721 +#ifndef CONFIG_COLDFIRE
722 #define do_div(n, base) ({ \
723 union { \
724 unsigned long n32[2]; \
725 @@ -24,6 +25,9 @@
726 (n) = __n.n64; \
727 __rem; \
728 })
729 +#else
730 +# include <asm-generic/div64.h>
731 +#endif
732
733 extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
734 #endif /* _M68K_DIV64_H */
735 --- a/include/asm-m68k/elf.h
736 +++ b/include/asm-m68k/elf.h
737 @@ -60,7 +60,7 @@ typedef struct user_m68kfp_struct elf_fp
738 #define ELF_PLAT_INIT(_r, load_addr) _r->a1 = 0
739
740 #define USE_ELF_CORE_DUMP
741 -#ifndef CONFIG_SUN3
742 +#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
743 #define ELF_EXEC_PAGESIZE 4096
744 #else
745 #define ELF_EXEC_PAGESIZE 8192
746 --- a/include/asm-m68k/fpu.h
747 +++ b/include/asm-m68k/fpu.h
748 @@ -12,6 +12,8 @@
749 #define FPSTATESIZE (96/sizeof(unsigned char))
750 #elif defined(CONFIG_M68KFPU_EMU)
751 #define FPSTATESIZE (28/sizeof(unsigned char))
752 +#elif defined(CONFIG_CFV4E)
753 +#define FPSTATESIZE (16/sizeof(unsigned char))
754 #elif defined(CONFIG_M68060)
755 #define FPSTATESIZE (12/sizeof(unsigned char))
756 #else
757 --- a/include/asm-m68k/io.h
758 +++ b/include/asm-m68k/io.h
759 @@ -397,10 +397,12 @@ static inline void memcpy_toio(volatile
760 __builtin_memcpy((void __force *) dst, src, count);
761 }
762
763 -#ifndef CONFIG_SUN3
764 -#define IO_SPACE_LIMIT 0xffff
765 -#else
766 +#if defined(CONFIG_SUN3)
767 #define IO_SPACE_LIMIT 0x0fffffff
768 +#elif defined(CONFIG_COLDFIRE)
769 +#define IO_SPACE_LIMIT 0xffffffff
770 +#else
771 +#define IO_SPACE_LIMIT 0xffff
772 #endif
773
774 #endif /* __KERNEL__ */
775 @@ -418,4 +420,22 @@ static inline void memcpy_toio(volatile
776 */
777 #define xlate_dev_kmem_ptr(p) p
778
779 +#ifdef CONFIG_COLDFIRE
780 +
781 +#define memset_io(a, b, c) memset((void *)(a), (b), (c))
782 +#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
783 +#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
784 +#if !defined(readb)
785 +#define readb(addr) \
786 + ({ unsigned char __v = (*(volatile unsigned char *) (addr)); __v; })
787 +#define readw(addr) \
788 + ({ unsigned short __v = (*(volatile unsigned short *) (addr)); __v; })
789 +#define readl(addr) \
790 + ({ unsigned int __v = (*(volatile unsigned int *) (addr)); __v; })
791 +#define writeb(b, addr) (void)((*(volatile unsigned char *) (addr)) = (b))
792 +#define writew(b, addr) (void)((*(volatile unsigned short *) (addr)) = (b))
793 +#define writel(b, addr) (void)((*(volatile unsigned int *) (addr)) = (b))
794 +#endif /* readb */
795 +#endif /* CONFIG_COLDFIRE */
796 +
797 #endif /* _IO_H */
798 --- a/include/asm-m68k/irq.h
799 +++ b/include/asm-m68k/irq.h
800 @@ -11,7 +11,10 @@
801 * Currently the Atari has 72 and the Amiga 24, but if both are
802 * supported in the kernel it is better to make room for 72.
803 */
804 -#if defined(CONFIG_VME) || defined(CONFIG_SUN3) || defined(CONFIG_SUN3X)
805 +#if defined(CONFIG_COLDFIRE)
806 +#define SYS_IRQS 256
807 +#define NR_IRQS SYS_IRQS
808 +#elif defined(CONFIG_VME) || defined(CONFIG_SUN3) || defined(CONFIG_SUN3X)
809 #define NR_IRQS 200
810 #elif defined(CONFIG_ATARI) || defined(CONFIG_MAC)
811 #define NR_IRQS 72
812 --- a/include/asm-m68k/machdep.h
813 +++ b/include/asm-m68k/machdep.h
814 @@ -32,4 +32,11 @@ extern void (*mach_heartbeat) (int);
815 extern void (*mach_l2_flush) (int);
816 extern void (*mach_beep) (unsigned int, unsigned int);
817
818 +#ifdef CONFIG_COLDFIRE
819 +extern void __init config_coldfire(void);
820 +extern void __init mmu_context_init(void);
821 +extern irq_handler_t mach_default_handler;
822 +extern void (*mach_tick)(void);
823 +#endif
824 +
825 #endif /* _M68K_MACHDEP_H */
826 --- a/include/asm-m68k/mmu_context.h
827 +++ b/include/asm-m68k/mmu_context.h
828 @@ -7,7 +7,7 @@ static inline void enter_lazy_tlb(struct
829 {
830 }
831
832 -#ifndef CONFIG_SUN3
833 +#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
834
835 #include <asm/setup.h>
836 #include <asm/page.h>
837 @@ -102,7 +102,7 @@ static inline void activate_mm(struct mm
838 switch_mm_0460(next_mm);
839 }
840
841 -#else /* CONFIG_SUN3 */
842 +#elif defined(CONFIG_SUN3)
843 #include <asm/sun3mmu.h>
844 #include <linux/sched.h>
845
846 @@ -150,5 +150,83 @@ static inline void activate_mm(struct mm
847 activate_context(next_mm);
848 }
849
850 -#endif
851 +#else /* CONFIG_COLDFIRE */
852 +
853 +#include <asm/atomic.h>
854 +#include <asm/bitops.h>
855 +#include <asm/mmu.h>
856 +
857 +#define NO_CONTEXT 256
858 +#define LAST_CONTEXT 255
859 +#define FIRST_CONTEXT 1
860 +
861 +extern void set_context(mm_context_t context, pgd_t *pgd);
862 +extern unsigned long context_map[];
863 +extern mm_context_t next_mmu_context;
864 +
865 +extern atomic_t nr_free_contexts;
866 +extern struct mm_struct *context_mm[LAST_CONTEXT+1];
867 +extern void steal_context(void);
868 +
869 +static inline void get_mmu_context(struct mm_struct *mm)
870 +{
871 + mm_context_t ctx;
872 +
873 + if (mm->context != NO_CONTEXT)
874 + return;
875 + while (atomic_dec_and_test_lt(&nr_free_contexts)) {
876 + atomic_inc(&nr_free_contexts);
877 + steal_context();
878 + }
879 + ctx = next_mmu_context;
880 + while (test_and_set_bit(ctx, context_map)) {
881 + ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
882 + if (ctx > LAST_CONTEXT)
883 + ctx = 0;
884 + }
885 + next_mmu_context = (ctx + 1) & LAST_CONTEXT;
886 + mm->context = ctx;
887 + context_mm[ctx] = mm;
888 +}
889 +
890 +/*
891 + * Set up the context for a new address space.
892 + */
893 +#define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0)
894 +
895 +/*
896 + * We're finished using the context for an address space.
897 + */
898 +static inline void destroy_context(struct mm_struct *mm)
899 +{
900 + if (mm->context != NO_CONTEXT) {
901 + clear_bit(mm->context, context_map);
902 + mm->context = NO_CONTEXT;
903 + atomic_inc(&nr_free_contexts);
904 + }
905 +}
906 +
907 +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
908 + struct task_struct *tsk)
909 +{
910 + get_mmu_context(tsk->mm);
911 + set_context(tsk->mm->context, next->pgd);
912 +}
913 +
914 +/*
915 + * After we have set current->mm to a new value, this activates
916 + * the context for the new mm so we see the new mappings.
917 + */
918 +static inline void activate_mm(struct mm_struct *active_mm,
919 + struct mm_struct *mm)
920 +{
921 + get_mmu_context(mm);
922 + set_context(mm->context, mm->pgd);
923 +}
924 +
925 +#define deactivate_mm(tsk, mm) do { } while (0)
926 +
927 +extern void mmu_context_init(void);
928 +
929 +#endif /* CONFIG_COLDFIRE */
930 #endif
931 --- a/include/asm-m68k/page.h
932 +++ b/include/asm-m68k/page.h
933 @@ -4,7 +4,7 @@
934 #include <linux/const.h>
935
936 /* PAGE_SHIFT determines the page size */
937 -#ifndef CONFIG_SUN3
938 +#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
939 #define PAGE_SHIFT (12)
940 #else
941 #define PAGE_SHIFT (13)
942 @@ -116,10 +116,23 @@ typedef struct page *pgtable_t;
943
944 extern unsigned long m68k_memoffset;
945
946 -#ifndef CONFIG_SUN3
947 +#if !defined(CONFIG_SUN3)
948
949 #define WANT_PAGE_VIRTUAL
950
951 +#if defined(CONFIG_COLDFIRE)
952 +static inline unsigned long ___pa(void *vaddr)
953 +{
954 + return (((unsigned long)vaddr & 0x0fffffff) + CONFIG_SDRAM_BASE);
955 +}
956 +#define __pa(vaddr) ___pa((void *)(vaddr))
957 +
958 +static inline void *__va(unsigned long paddr)
959 +{
960 + return (void *)((paddr & 0x0fffffff) + PAGE_OFFSET);
961 +}
962 +
963 +#else
964 static inline unsigned long ___pa(void *vaddr)
965 {
966 unsigned long paddr;
967 @@ -141,6 +154,7 @@ static inline void *__va(unsigned long p
968 : "0" (paddr), "i" (m68k_fixup_memoffset));
969 return vaddr;
970 }
971 +#endif
972
973 #else /* !CONFIG_SUN3 */
974 /* This #define is a horrible hack to suppress lots of warnings. --m */
975 @@ -172,6 +186,8 @@ static inline void *__va(unsigned long x
976 * memory node, but we have no highmem, so that works for now.
977 * TODO: implement (fast) pfn<->pgdat_idx conversion functions, this makes lots
978 * of the shifts unnecessary.
979 + *
980 + * PFNs are used to map physical pages. So PFN[0] maps to the base phys addr.
981 */
982 #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
983 #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
984 --- a/include/asm-m68k/page_offset.h
985 +++ b/include/asm-m68k/page_offset.h
986 @@ -1,8 +1,11 @@
987
988 /* This handles the memory map.. */
989 -#ifndef CONFIG_SUN3
990 +#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
991 #define PAGE_OFFSET_RAW 0x00000000
992 -#else
993 +#elif defined(CONFIG_SUN3)
994 #define PAGE_OFFSET_RAW 0x0E000000
995 +#else /* CONFIG_COLDFIRE */
996 +#define PAGE_OFFSET_RAW 0xC0000000
997 +#define PHYS_OFFSET 0x40000000
998 #endif
999
1000 --- a/include/asm-m68k/pci.h
1001 +++ b/include/asm-m68k/pci.h
1002 @@ -1,57 +1,86 @@
1003 -#ifndef _ASM_M68K_PCI_H
1004 -#define _ASM_M68K_PCI_H
1005 -
1006 /*
1007 - * asm-m68k/pci_m68k.h - m68k specific PCI declarations.
1008 + * asm-m68k/pci.h - m68k specific PCI declarations.
1009 *
1010 - * Written by Wout Klaren.
1011 + * Coldfire Implementation Copyright (c) 2007 Freescale Semiconductor, Inc.
1012 + * Kurt Mahan <kmahan@freescale.com>
1013 */
1014 +#ifndef _ASM_M68K_PCI_H
1015 +#define _ASM_M68K_PCI_H
1016
1017 -#include <asm/scatterlist.h>
1018 +#ifdef CONFIG_PCI
1019
1020 -struct pci_ops;
1021 +#include <asm-generic/pci-dma-compat.h>
1022
1023 /*
1024 - * Structure with hardware dependent information and functions of the
1025 - * PCI bus.
1026 + * The PCI address space does equal the physical memory
1027 + * address space. The networking and block device layers use
1028 + * this boolean for bounce buffer decisions.
1029 */
1030 +#define PCI_DMA_BUS_IS_PHYS (1)
1031
1032 -struct pci_bus_info
1033 -{
1034 - /*
1035 - * Resources of the PCI bus.
1036 - */
1037 -
1038 - struct resource mem_space;
1039 - struct resource io_space;
1040 +#define PCIBIOS_MIN_IO 0x00004000
1041 +#define PCIBIOS_MIN_MEM 0x02000000
1042
1043 - /*
1044 - * System dependent functions.
1045 - */
1046 +#define pcibios_assign_all_busses() 0
1047 +#define pcibios_scan_all_fns(a, b) 0
1048
1049 - struct pci_ops *m68k_pci_ops;
1050 +static inline void
1051 +pcibios_set_master(struct pci_dev *dev)
1052 +{
1053 + /* no special bus mastering setup handling */
1054 +}
1055
1056 - void (*fixup)(int pci_modify);
1057 - void (*conf_device)(struct pci_dev *dev);
1058 -};
1059 +static inline void
1060 +pcibios_penalize_isa_irq(int irq, int active)
1061 +{
1062 + /* no dynamic PCI IRQ allocation */
1063 +}
1064
1065 -#define pcibios_assign_all_busses() 0
1066 -#define pcibios_scan_all_fns(a, b) 0
1067 +static inline void
1068 +pcibios_add_platform_entries(struct pci_dev *dev)
1069 +{
1070 + /* no special handling */
1071 +}
1072
1073 -static inline void pcibios_set_master(struct pci_dev *dev)
1074 +static inline void
1075 +pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
1076 + struct resource *res)
1077 {
1078 - /* No special bus mastering setup handling */
1079 +#ifdef CONFIG_M54455
1080 + if ((res->start == 0xa0000000) || (res->start == 0xa8000000)) {
1081 + /* HACK! FIX! kludge to fix bridge mapping */
1082 + region->start = res->start & 0x0fffffff;
1083 + region->end = res->end & 0x0fffffff;
1084 + } else {
1085 + region->start = res->start;
1086 + region->end = res->end;
1087 + }
1088 +#else
1089 + region->start = res->start;
1090 + region->end = res->end;
1091 +#endif
1092 }
1093
1094 -static inline void pcibios_penalize_isa_irq(int irq, int active)
1095 +static inline void
1096 +pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
1097 + struct pci_bus_region *region)
1098 {
1099 - /* We don't do dynamic PCI IRQ allocation */
1100 + res->start = region->start;
1101 + res->end = region->end;
1102 }
1103
1104 -/* The PCI address space does equal the physical memory
1105 - * address space. The networking and block device layers use
1106 - * this boolean for bounce buffer decisions.
1107 - */
1108 -#define PCI_DMA_BUS_IS_PHYS (1)
1109 +static inline struct resource *
1110 +pcibios_select_root(struct pci_dev *pdev, struct resource *res)
1111 +{
1112 + struct resource *root = NULL;
1113 +
1114 + if (res->flags & IORESOURCE_IO)
1115 + root = &ioport_resource;
1116 + if (res->flags & IORESOURCE_MEM)
1117 + root = &iomem_resource;
1118 +
1119 + return root;
1120 +}
1121
1122 +#endif /* CONFIG_PCI */
1123 #endif /* _ASM_M68K_PCI_H */
1124 --- a/include/asm-m68k/pgalloc.h
1125 +++ b/include/asm-m68k/pgalloc.h
1126 @@ -8,8 +8,10 @@
1127 #include <asm/virtconvert.h>
1128
1129
1130 -#ifdef CONFIG_SUN3
1131 +#if defined(CONFIG_SUN3)
1132 #include <asm/sun3_pgalloc.h>
1133 +#elif defined(CONFIG_COLDFIRE)
1134 +#include <asm/cf_pgalloc.h>
1135 #else
1136 #include <asm/motorola_pgalloc.h>
1137 #endif
1138 --- a/include/asm-m68k/pgtable.h
1139 +++ b/include/asm-m68k/pgtable.h
1140 @@ -40,6 +40,8 @@
1141 /* PGDIR_SHIFT determines what a third-level page table entry can map */
1142 #ifdef CONFIG_SUN3
1143 #define PGDIR_SHIFT 17
1144 +#elif defined(CONFIG_COLDFIRE)
1145 +#define PGDIR_SHIFT 22
1146 #else
1147 #define PGDIR_SHIFT 25
1148 #endif
1149 @@ -54,6 +56,10 @@
1150 #define PTRS_PER_PTE 16
1151 #define PTRS_PER_PMD 1
1152 #define PTRS_PER_PGD 2048
1153 +#elif defined(CONFIG_COLDFIRE)
1154 +#define PTRS_PER_PTE 512
1155 +#define PTRS_PER_PMD 1
1156 +#define PTRS_PER_PGD 1024
1157 #else
1158 #define PTRS_PER_PTE 1024
1159 #define PTRS_PER_PMD 8
1160 @@ -66,6 +72,9 @@
1161 #ifdef CONFIG_SUN3
1162 #define KMAP_START 0x0DC00000
1163 #define KMAP_END 0x0E000000
1164 +#elif defined(CONFIG_COLDFIRE)
1165 +#define KMAP_START 0xe0000000
1166 +#define KMAP_END 0xf0000000
1167 #else
1168 #define KMAP_START 0xd0000000
1169 #define KMAP_END 0xf0000000
1170 @@ -130,6 +139,8 @@ static inline void update_mmu_cache(stru
1171
1172 #ifdef CONFIG_SUN3
1173 #include <asm/sun3_pgtable.h>
1174 +#elif defined(CONFIG_COLDFIRE)
1175 +#include <asm/cf_pgtable.h>
1176 #else
1177 #include <asm/motorola_pgtable.h>
1178 #endif
1179 @@ -140,6 +151,9 @@ static inline void update_mmu_cache(stru
1180 /*
1181 * Macro to mark a page protection value as "uncacheable".
1182 */
1183 +#ifdef CONFIG_COLDFIRE
1184 +# define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | CF_PAGE_NOCACHE))
1185 +#else /* CONFIG_COLDFIRE */
1186 #ifdef SUN3_PAGE_NOCACHE
1187 # define __SUN3_PAGE_NOCACHE SUN3_PAGE_NOCACHE
1188 #else
1189 @@ -154,6 +168,7 @@ static inline void update_mmu_cache(stru
1190 ? (__pgprot((pgprot_val(prot) & _CACHEMASK040) | _PAGE_NOCACHE_S)) \
1191 : (prot)))
1192
1193 +#endif /* CONFIG_COLDFIRE */
1194 #endif /* !__ASSEMBLY__ */
1195
1196 /*
1197 --- a/include/asm-m68k/processor.h
1198 +++ b/include/asm-m68k/processor.h
1199 @@ -22,24 +22,38 @@ static inline unsigned long rdusp(void)
1200 {
1201 unsigned long usp;
1202
1203 +#ifndef CONFIG_COLDFIRE
1204 __asm__ __volatile__("move %/usp,%0" : "=a" (usp));
1205 +#else
1206 + __asm__ __volatile__("movel %/usp,%0" : "=a" (usp));
1207 +#endif
1208 return usp;
1209 }
1210
1211 static inline void wrusp(unsigned long usp)
1212 {
1213 +#ifndef CONFIG_COLDFIRE
1214 __asm__ __volatile__("move %0,%/usp" : : "a" (usp));
1215 +#else
1216 + __asm__ __volatile__("movel %0,%/usp" : : "a" (usp));
1217 +#endif
1218 }
1219
1220 /*
1221 * User space process size: 3.75GB. This is hardcoded into a few places,
1222 * so don't change it unless you know what you are doing.
1223 */
1224 -#ifndef CONFIG_SUN3
1225 +#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
1226 #define TASK_SIZE (0xF0000000UL)
1227 +#elif defined(CONFIG_COLDFIRE)
1228 +#define TASK_SIZE (0xC0000000UL)
1229 +#else /* CONFIG_SUN3 */
1230 +#ifdef __ASSEMBLY__
1231 +#define TASK_SIZE (0x0E000000)
1232 #else
1233 #define TASK_SIZE (0x0E000000UL)
1234 #endif
1235 +#endif
1236
1237 #ifdef __KERNEL__
1238 #define STACK_TOP TASK_SIZE
1239 @@ -49,9 +63,11 @@ static inline void wrusp(unsigned long u
1240 /* This decides where the kernel will search for a free chunk of vm
1241 * space during mmap's.
1242 */
1243 -#ifndef CONFIG_SUN3
1244 -#define TASK_UNMAPPED_BASE 0xC0000000UL
1245 -#else
1246 +#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
1247 +#define TASK_UNMAPPED_BASE 0xC0000000UL
1248 +#elif defined(CONFIG_COLDFIRE)
1249 +#define TASK_UNMAPPED_BASE 0x80000000UL
1250 +#else /* CONFIG_SUN3 */
1251 #define TASK_UNMAPPED_BASE 0x0A000000UL
1252 #endif
1253 #define TASK_UNMAPPED_ALIGN(addr, off) PAGE_ALIGN(addr)
1254 @@ -60,7 +76,11 @@ struct thread_struct {
1255 unsigned long ksp; /* kernel stack pointer */
1256 unsigned long usp; /* user stack pointer */
1257 unsigned short sr; /* saved status register */
1258 +#ifndef CONFIG_COLDFIRE
1259 unsigned short fs; /* saved fs (sfc, dfc) */
1260 +#else
1261 + mm_segment_t fs;
1262 +#endif
1263 unsigned long crp[2]; /* cpu root pointer */
1264 unsigned long esp0; /* points to SR of stack frame */
1265 unsigned long faddr; /* info about last fault */
1266 @@ -81,6 +101,7 @@ struct thread_struct {
1267 /*
1268 * Do necessary setup to start up a newly executed thread.
1269 */
1270 +#ifndef CONFIG_COLDFIRE
1271 static inline void start_thread(struct pt_regs * regs, unsigned long pc,
1272 unsigned long usp)
1273 {
1274 @@ -91,6 +112,23 @@ static inline void start_thread(struct p
1275 regs->sr &= ~0x2000;
1276 wrusp(usp);
1277 }
1278 +#else
1279 +/*
1280 + * Do necessary setup to start up a newly executed thread.
1281 + *
1282 + * pass the data segment into user programs if it exists,
1283 + * it can't hurt anything as far as I can tell
1284 + */
1285 +#define start_thread(_regs, _pc, _usp) \
1286 +do { \
1287 + set_fs(USER_DS); /* reads from user space */ \
1288 + (_regs)->pc = (_pc); \
1289 + if (current->mm) \
1290 + (_regs)->d5 = current->mm->start_data; \
1291 + (_regs)->sr &= ~0x2000; \
1292 + wrusp(_usp); \
1293 +} while (0)
1294 +#endif
1295
1296 /* Forward declaration, a strange C thing */
1297 struct task_struct;
1298 --- a/include/asm-m68k/ptrace.h
1299 +++ b/include/asm-m68k/ptrace.h
1300 @@ -38,10 +38,21 @@ struct pt_regs {
1301 long d0;
1302 long orig_d0;
1303 long stkadj;
1304 +#ifndef CONFIG_COLDFIRE
1305 unsigned short sr;
1306 unsigned long pc;
1307 unsigned format : 4; /* frame format specifier */
1308 unsigned vector : 12; /* vector offset */
1309 +#else
1310 + unsigned long mmuar;
1311 + unsigned long mmusr;
1312 + unsigned format : 4; /* frame format specifier */
1313 + unsigned fs2 : 2;
1314 + unsigned vector: 8;
1315 + unsigned fs1 : 2;
1316 + unsigned short sr;
1317 + unsigned long pc;
1318 +#endif
1319 };
1320
1321 /*
1322 --- a/include/asm-m68k/raw_io.h
1323 +++ b/include/asm-m68k/raw_io.h
1324 @@ -77,6 +77,7 @@ static inline void raw_outsb(volatile u8
1325 out_8(port, *buf++);
1326 }
1327
1328 +#ifndef CONFIG_COLDFIRE
1329 static inline void raw_insw(volatile u16 __iomem *port, u16 *buf, unsigned int nr)
1330 {
1331 unsigned int tmp;
1332 @@ -342,6 +343,63 @@ static inline void raw_outsw_swapw(volat
1333 : "d0", "a0", "a1", "d6");
1334 }
1335
1336 +
1337 +#else /*CONFIG_COLDFIRE */
1338 +
1339 +static inline void raw_insw(volatile u16 *port, u16 *buf, unsigned int nr)
1340 +{
1341 + unsigned int i;
1342 +
1343 + for (i = 0; i < nr; i++)
1344 + *buf++ = raw_inw(port);
1345 +}
1346 +
1347 +static inline void raw_outsw(volatile u16 *port, const u16 *buf,
1348 + unsigned int nr)
1349 +{
1350 + unsigned int i;
1351 +
1352 + for (i = 0; i < nr; i++, buf++)
1353 + raw_outw(*buf, port);
1354 +}
1355 +
1356 +static inline void raw_insl(volatile u32 *port, u32 *buf, unsigned int nr)
1357 +{
1358 + unsigned int i;
1359 +
1360 + for (i = 0; i < nr; i++)
1361 + *buf++ = raw_inl(port);
1362 +}
1363 +
1364 +static inline void raw_outsl(volatile u32 *port, const u32 *buf,
1365 + unsigned int nr)
1366 +{
1367 + unsigned int i;
1368 +
1369 + for (i = 0; i < nr; i++, buf++)
1370 + raw_outl(*buf, port);
1371 +}
1372 +
1373 +static inline void raw_insw_swapw(volatile u16 *port, u16 *buf,
1374 + unsigned int nr)
1375 +{
1376 + unsigned int i;
1377 +
1378 + for (i = 0; i < nr; i++)
1379 + *buf++ = in_le16(port);
1380 +
1381 +}
1382 +
1383 +static inline void raw_outsw_swapw(volatile u16 __iomem *port, const u16 *buf,
1384 + unsigned int nr)
1385 +{
1386 + unsigned int i;
1387 +
1388 + for (i = 0; i < nr; i++, buf++)
1389 + out_le16(port, *buf);
1390 +}
1391 +#endif /*CONFIG_COLDFIRE */
1392 +
1393 #endif /* __KERNEL__ */
1394
1395 #endif /* _RAW_IO_H */
1396 --- a/include/asm-m68k/segment.h
1397 +++ b/include/asm-m68k/segment.h
1398 @@ -29,6 +29,7 @@ typedef struct {
1399 * Get/set the SFC/DFC registers for MOVES instructions
1400 */
1401
1402 +#ifndef CONFIG_COLDFIRE
1403 static inline mm_segment_t get_fs(void)
1404 {
1405 mm_segment_t _v;
1406 @@ -50,6 +51,15 @@ static inline void set_fs(mm_segment_t v
1407 : /* no outputs */ : "r" (val.seg) : "memory");
1408 }
1409
1410 +#else /* CONFIG_COLDFIRE */
1411 +
1412 +#include <asm/current.h>
1413 +#define get_fs() (current->thread.fs)
1414 +#define set_fs(val) (current->thread.fs = (val))
1415 +#define get_ds() (KERNEL_DS)
1416 +
1417 +#endif /* CONFIG_COLDFIRE */
1418 +
1419 #define segment_eq(a,b) ((a).seg == (b).seg)
1420
1421 #endif /* __ASSEMBLY__ */
1422 --- a/include/asm-m68k/setup.h
1423 +++ b/include/asm-m68k/setup.h
1424 @@ -40,6 +40,7 @@
1425 #define MACH_HP300 9
1426 #define MACH_Q40 10
1427 #define MACH_SUN3X 11
1428 +#define MACH_CFMMU 12
1429
1430 #define COMMAND_LINE_SIZE 256
1431
1432 @@ -189,6 +190,14 @@ extern unsigned long m68k_machtype;
1433 # define MACH_TYPE (MACH_SUN3X)
1434 #endif
1435
1436 +#if !defined(CONFIG_COLDFIRE)
1437 +# define MACH_IS_COLDFIRE (0)
1438 +#else
1439 +# define CONFIG_COLDFIRE_ONLY
1440 +# define MACH_IS_COLDFIRE (1)
1441 +# define MACH_TYPE (MACH_CFMMU)
1442 +#endif
1443 +
1444 #ifndef MACH_TYPE
1445 # define MACH_TYPE (m68k_machtype)
1446 #endif
1447 @@ -211,23 +220,31 @@ extern unsigned long m68k_machtype;
1448 #define CPUB_68030 1
1449 #define CPUB_68040 2
1450 #define CPUB_68060 3
1451 +#define CPUB_CFV4E 4
1452
1453 #define CPU_68020 (1<<CPUB_68020)
1454 #define CPU_68030 (1<<CPUB_68030)
1455 #define CPU_68040 (1<<CPUB_68040)
1456 #define CPU_68060 (1<<CPUB_68060)
1457 +#define CPU_CFV4E (1<<CPUB_CFV4E)
1458
1459 #define FPUB_68881 0
1460 #define FPUB_68882 1
1461 #define FPUB_68040 2 /* Internal FPU */
1462 #define FPUB_68060 3 /* Internal FPU */
1463 #define FPUB_SUNFPA 4 /* Sun-3 FPA */
1464 +#define FPUB_CFV4E 5
1465
1466 #define FPU_68881 (1<<FPUB_68881)
1467 #define FPU_68882 (1<<FPUB_68882)
1468 #define FPU_68040 (1<<FPUB_68040)
1469 #define FPU_68060 (1<<FPUB_68060)
1470 #define FPU_SUNFPA (1<<FPUB_SUNFPA)
1471 +#ifndef CONFIG_M54455
1472 +#define FPU_CFV4E (1<<FPUB_CFV4E)
1473 +#else
1474 +#define FPU_CFV4E 0
1475 +#endif
1476
1477 #define MMUB_68851 0
1478 #define MMUB_68030 1 /* Internal MMU */
1479 @@ -235,6 +252,7 @@ extern unsigned long m68k_machtype;
1480 #define MMUB_68060 3 /* Internal MMU */
1481 #define MMUB_APOLLO 4 /* Custom Apollo */
1482 #define MMUB_SUN3 5 /* Custom Sun-3 */
1483 +#define MMUB_CFV4E 6
1484
1485 #define MMU_68851 (1<<MMUB_68851)
1486 #define MMU_68030 (1<<MMUB_68030)
1487 @@ -242,6 +260,7 @@ extern unsigned long m68k_machtype;
1488 #define MMU_68060 (1<<MMUB_68060)
1489 #define MMU_SUN3 (1<<MMUB_SUN3)
1490 #define MMU_APOLLO (1<<MMUB_APOLLO)
1491 +#define MMU_CFV4E (1<<MMUB_CFV4E)
1492
1493 #ifdef __KERNEL__
1494
1495 @@ -341,6 +360,14 @@ extern int m68k_is040or060;
1496 # endif
1497 #endif
1498
1499 +#if !defined(CONFIG_CFV4E)
1500 +# define CPU_IS_COLDFIRE (0)
1501 +#else
1502 +# define CPU_IS_COLDFIRE (1)
1503 +# define CPU_IS_CFV4E (1)
1504 +# define MMU_IS_CFV4E (1)
1505 +#endif
1506 +
1507 #define CPU_TYPE (m68k_cputype)
1508
1509 #ifdef CONFIG_M68KFPU_EMU
1510 --- a/include/asm-m68k/signal.h
1511 +++ b/include/asm-m68k/signal.h
1512 @@ -150,6 +150,7 @@ typedef struct sigaltstack {
1513 #ifdef __KERNEL__
1514 #include <asm/sigcontext.h>
1515
1516 +#ifndef CONFIG_COLDFIRE
1517 #define __HAVE_ARCH_SIG_BITOPS
1518
1519 static inline void sigaddset(sigset_t *set, int _sig)
1520 @@ -200,6 +201,10 @@ static inline int sigfindinword(unsigned
1521
1522 struct pt_regs;
1523 extern void ptrace_signal_deliver(struct pt_regs *regs, void *cookie);
1524 +#else
1525 +
1526 +#define ptrace_signal_deliver(regs, cookie) do { } while (0)
1527 +#endif /* CONFIG_COLDFIRE */
1528
1529 #endif /* __KERNEL__ */
1530
1531 --- a/include/asm-m68k/string.h
1532 +++ b/include/asm-m68k/string.h
1533 @@ -93,6 +93,7 @@ static inline char *strchr(const char *s
1534 return (char *)s - 1;
1535 }
1536
1537 +#ifndef CONFIG_COLDFIRE
1538 #define __HAVE_ARCH_STRCMP
1539 static inline int strcmp(const char *cs, const char *ct)
1540 {
1541 @@ -110,6 +111,7 @@ static inline int strcmp(const char *cs,
1542 : "+a" (cs), "+a" (ct), "=d" (res));
1543 return res;
1544 }
1545 +#endif
1546
1547 #define __HAVE_ARCH_MEMSET
1548 extern void *memset(void *, int, __kernel_size_t);
1549 --- a/include/asm-m68k/system.h
1550 +++ b/include/asm-m68k/system.h
1551 @@ -63,16 +63,25 @@ asmlinkage void resume(void);
1552 #define smp_read_barrier_depends() ((void)0)
1553
1554 /* interrupt control.. */
1555 -#if 0
1556 -#define local_irq_enable() asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory")
1557 -#else
1558 #include <linux/hardirq.h>
1559 +#ifndef CONFIG_COLDFIRE
1560 #define local_irq_enable() ({ \
1561 if (MACH_IS_Q40 || !hardirq_count()) \
1562 asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory"); \
1563 })
1564 -#endif
1565 #define local_irq_disable() asm volatile ("oriw #0x0700,%%sr": : : "memory")
1566 +#else /* CONFIG_COLDFIRE */
1567 +#define local_irq_enable() \
1568 + asm volatile ("move.w %%sr, %%d0\n\t" \
1569 + "andil #0xf8ff,%%d0\n\t" \
1570 + "move.w %%d0, %%sr\n" \
1571 + : : : "cc", "d0", "memory")
1572 +#define local_irq_disable() \
1573 + asm volatile ("move %/sr,%%d0\n\t" \
1574 + "ori.l #0x0700,%%d0\n\t" \
1575 + "move %%d0,%/sr\n" \
1576 + : : : "cc", "%d0", "memory")
1577 +#endif
1578 #define local_save_flags(x) asm volatile ("movew %%sr,%0":"=d" (x) : : "memory")
1579 #define local_irq_restore(x) asm volatile ("movew %0,%%sr": :"d" (x) : "memory")
1580
1581 --- a/include/asm-m68k/thread_info.h
1582 +++ b/include/asm-m68k/thread_info.h
1583 @@ -58,5 +58,6 @@ struct thread_info {
1584 #define TIF_DELAYED_TRACE 14 /* single step a syscall */
1585 #define TIF_SYSCALL_TRACE 15 /* syscall trace active */
1586 #define TIF_MEMDIE 16
1587 +#define TIF_FREEZE 17 /* freezing processes */
1588
1589 #endif /* _ASM_M68K_THREAD_INFO_H */
1590 --- a/include/asm-m68k/tlbflush.h
1591 +++ b/include/asm-m68k/tlbflush.h
1592 @@ -2,7 +2,7 @@
1593 #define _M68K_TLBFLUSH_H
1594
1595
1596 -#ifndef CONFIG_SUN3
1597 +#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
1598
1599 #include <asm/current.h>
1600
1601 @@ -92,7 +92,12 @@ static inline void flush_tlb_kernel_rang
1602 flush_tlb_all();
1603 }
1604
1605 -#else
1606 +static inline void flush_tlb_pgtables(struct mm_struct *mm,
1607 + unsigned long start, unsigned long end)
1608 +{
1609 +}
1610 +
1611 +#elif defined(CONFIG_SUN3)
1612
1613
1614 /* Reserved PMEGs. */
1615 @@ -214,6 +219,13 @@ static inline void flush_tlb_kernel_page
1616 sun3_put_segmap (addr & ~(SUN3_PMEG_SIZE - 1), SUN3_INVALID_PMEG);
1617 }
1618
1619 +static inline void flush_tlb_pgtables(struct mm_struct *mm,
1620 + unsigned long start, unsigned long end)
1621 +{
1622 +}
1623 +
1624 +#else /* CONFIG_COLDFIRE */
1625 +#include <asm/cf_tlbflush.h>
1626 #endif
1627
1628 #endif /* _M68K_TLBFLUSH_H */
1629 --- a/include/asm-m68k/uaccess.h
1630 +++ b/include/asm-m68k/uaccess.h
1631 @@ -1,6 +1,9 @@
1632 #ifndef __M68K_UACCESS_H
1633 #define __M68K_UACCESS_H
1634
1635 +#ifdef CONFIG_COLDFIRE
1636 +#include <asm/cf_uaccess.h>
1637 +#else
1638 /*
1639 * User space memory access functions
1640 */
1641 @@ -367,4 +370,5 @@ unsigned long __clear_user(void __user *
1642
1643 #define strlen_user(str) strnlen_user(str, 32767)
1644
1645 +#endif /* CONFIG_COLDFIRE */
1646 #endif /* _M68K_UACCESS_H */
This page took 0.115641 seconds and 5 git commands to generate.