1 From 70b2bd01829b38a1a79caeda05d436b2e5fecf82 Mon Sep 17 00:00:00 2001
2 From: Kurt Mahan <kmahan@freescale.com>
3 Date: Wed, 31 Oct 2007 17:00:18 -0600
4 Subject: [PATCH] Core Coldfire/MCF5445x specific code.
6 LTIBName: mcfv4e-coldfire-code
7 Signed-off-by: Kurt Mahan <kmahan@freescale.com>
9 arch/m68k/coldfire/Makefile | 11 +
10 arch/m68k/coldfire/cache.c | 215 +++++++++
11 arch/m68k/coldfire/config.c | 420 ++++++++++++++++++
12 arch/m68k/coldfire/entry.S | 701 ++++++++++++++++++++++++++++++
13 arch/m68k/coldfire/head.S | 474 ++++++++++++++++++++
14 arch/m68k/coldfire/ints.c | 384 ++++++++++++++++
15 arch/m68k/coldfire/iomap.c | 54 +++
16 arch/m68k/coldfire/mcf5445x-pci.c | 427 ++++++++++++++++++
17 arch/m68k/coldfire/muldi3.S | 64 +++
18 arch/m68k/coldfire/pci.c | 245 +++++++++++
19 arch/m68k/coldfire/signal.c | 868 +++++++++++++++++++++++++++++++++++++
20 arch/m68k/coldfire/traps.c | 454 +++++++++++++++++++
21 arch/m68k/coldfire/vmlinux-cf.lds | 92 ++++
22 13 files changed, 4409 insertions(+), 0 deletions(-)
23 create mode 100644 arch/m68k/coldfire/Makefile
24 create mode 100644 arch/m68k/coldfire/cache.c
25 create mode 100644 arch/m68k/coldfire/config.c
26 create mode 100644 arch/m68k/coldfire/entry.S
27 create mode 100644 arch/m68k/coldfire/head.S
28 create mode 100644 arch/m68k/coldfire/ints.c
29 create mode 100644 arch/m68k/coldfire/iomap.c
30 create mode 100644 arch/m68k/coldfire/mcf5445x-pci.c
31 create mode 100644 arch/m68k/coldfire/muldi3.S
32 create mode 100644 arch/m68k/coldfire/pci.c
33 create mode 100644 arch/m68k/coldfire/signal.c
34 create mode 100644 arch/m68k/coldfire/traps.c
35 create mode 100644 arch/m68k/coldfire/vmlinux-cf.lds
38 +++ b/arch/m68k/coldfire/Makefile
41 +# Makefile for Linux arch/m68k/coldfire source directory
44 +obj-y:= entry.o config.o cache.o signal.o muldi3.o traps.o ints.o
46 +ifneq ($(strip $(CONFIG_USB) $(CONFIG_USB_GADGET_MCF5445X)),)
50 +obj-$(CONFIG_PCI) += pci.o mcf5445x-pci.o iomap.o
52 +++ b/arch/m68k/coldfire/cache.c
55 + * linux/arch/m68k/coldifre/cache.c
57 + * Matt Waddel Matt.Waddel@freescale.com
58 + * Copyright Freescale Semiconductor, Inc. 2007
60 + * This program is free software; you can redistribute it and/or modify
61 + * it under the terms of the GNU General Public License as published by
62 + * the Free Software Foundation; either version 2 of the License, or
63 + * (at your option) any later version.
66 +#include <linux/interrupt.h>
67 +#include <asm/cfcache.h>
68 +#include <asm/coldfire.h>
69 +#include <asm/system.h>
71 +#define _DCACHE_SIZE (2*16384)
72 +#define _ICACHE_SIZE (2*16384)
77 + * Masks for cache sizes. Programming note: because the set size is a
78 + * power of two, the mask is also the last address in the set.
81 +#define _DCACHE_SET_MASK ((_DCACHE_SIZE/64-1)<<_SET_SHIFT)
82 +#define _ICACHE_SET_MASK ((_ICACHE_SIZE/64-1)<<_SET_SHIFT)
83 +#define LAST_DCACHE_ADDR _DCACHE_SET_MASK
84 +#define LAST_ICACHE_ADDR _ICACHE_SET_MASK
86 +/************************************************************
87 + * Routine to cleanly flush the cache, pushing all lines and
88 + * invalidating them.
90 + * The is the flash-resident version, used after copying the .text
91 + * segment from flash to ram.
92 + *************************************************************/
93 +void FLASHDcacheFlushInvalidate(void)
94 + __attribute__ ((section (".text_loader")));
96 +void FLASHDcacheFlushInvalidate()
99 + unsigned long start_set;
100 + unsigned long end_set;
103 + end_set = (unsigned long)LAST_DCACHE_ADDR;
105 + for (set = start_set; set < end_set; set += (0x10 - 3))
106 + asm volatile("cpushl %%dc,(%0)\n"
107 + "\taddq%.l #1,%0\n"
108 + "\tcpushl %%dc,(%0)\n"
109 + "\taddq%.l #1,%0\n"
110 + "\tcpushl %%dc,(%0)\n"
111 + "\taddq%.l #1,%0\n"
112 + "\tcpushl %%dc,(%0)" : : "a" (set));
115 +/************************************************************
116 + * Routine to cleanly flush the cache, pushing all lines and
117 + * invalidating them.
119 + *************************************************************/
120 +void DcacheFlushInvalidate()
123 + unsigned long start_set;
124 + unsigned long end_set;
127 + end_set = (unsigned long)LAST_DCACHE_ADDR;
129 + for (set = start_set; set < end_set; set += (0x10 - 3))
130 + asm volatile("cpushl %%dc,(%0)\n"
131 + "\taddq%.l #1,%0\n"
132 + "\tcpushl %%dc,(%0)\n"
133 + "\taddq%.l #1,%0\n"
134 + "\tcpushl %%dc,(%0)\n"
135 + "\taddq%.l #1,%0\n"
136 + "\tcpushl %%dc,(%0)" : : "a" (set));
141 +/******************************************************************************
142 + * Routine to cleanly flush the a block of cache, pushing all relevant lines
143 + * and invalidating them.
145 + ******************************************************************************/
146 +void DcacheFlushInvalidateCacheBlock(void *start, unsigned long size)
149 + unsigned long start_set;
150 + unsigned long end_set;
152 + /* if size is bigger than the cache can store
153 + * set the size to the maximum amount
156 + if (size > LAST_DCACHE_ADDR)
157 + size = LAST_DCACHE_ADDR;
159 + start_set = ((unsigned long)start) & _DCACHE_SET_MASK;
160 + end_set = ((unsigned long)(start+size-1)) & _DCACHE_SET_MASK;
162 + if (start_set > end_set) {
163 + /* from the begining to the lowest address */
164 + for (set = 0; set <= end_set; set += (0x10 - 3))
165 + asm volatile("cpushl %%dc,(%0)\n"
166 + "\taddq%.l #1,%0\n"
167 + "\tcpushl %%dc,(%0)\n"
168 + "\taddq%.l #1,%0\n"
169 + "\tcpushl %%dc,(%0)\n"
170 + "\taddq%.l #1,%0\n"
171 + "\tcpushl %%dc,(%0)" : : "a" (set));
173 + /* next loop will finish the cache ie pass the hole */
174 + end_set = LAST_DCACHE_ADDR;
176 + for (set = start_set; set <= end_set; set += (0x10 - 3))
177 + asm volatile("cpushl %%dc,(%0)\n"
178 + "\taddq%.l #1,%0\n"
179 + "\tcpushl %%dc,(%0)\n"
180 + "\taddq%.l #1,%0\n"
181 + "\tcpushl %%dc,(%0)\n"
182 + "\taddq%.l #1,%0\n"
183 + "\tcpushl %%dc,(%0)" : : "a" (set));
187 +void IcacheInvalidateCacheBlock(void *start, unsigned long size)
190 + unsigned long start_set;
191 + unsigned long end_set;
193 + /* if size is bigger than the cache can store
194 + * set the size to the maximum ammount
197 + if (size > LAST_ICACHE_ADDR)
198 + size = LAST_ICACHE_ADDR;
200 + start_set = ((unsigned long)start) & _ICACHE_SET_MASK;
201 + end_set = ((unsigned long)(start+size-1)) & _ICACHE_SET_MASK;
203 + if (start_set > end_set) {
204 + /* from the begining to the lowest address */
205 + for (set = 0; set <= end_set; set += (0x10 - 3))
206 + asm volatile("cpushl %%ic,(%0)\n"
207 + "\taddq%.l #1,%0\n"
208 + "\tcpushl %%ic,(%0)\n"
209 + "\taddq%.l #1,%0\n"
210 + "\tcpushl %%ic,(%0)\n"
211 + "\taddq%.l #1,%0\n"
212 + "\tcpushl %%ic,(%0)" : : "a" (set));
214 + /* next loop will finish the cache ie pass the hole */
215 + end_set = LAST_ICACHE_ADDR;
217 + for (set = start_set; set <= end_set; set += (0x10 - 3))
218 + asm volatile("cpushl %%ic,(%0)\n"
219 + "\taddq%.l #1,%0\n"
220 + "\tcpushl %%ic,(%0)\n"
221 + "\taddq%.l #1,%0\n"
222 + "\tcpushl %%ic,(%0)\n"
223 + "\taddq%.l #1,%0\n"
224 + "\tcpushl %%ic,(%0)" : : "a" (set));
228 +/********************************************************************
229 + * Disable the data cache completely
230 + ********************************************************************/
231 +void DcacheDisable(void)
234 + unsigned long flags;
236 + local_save_flags(flags);
237 + local_irq_disable();
239 + DcacheFlushInvalidate(); /* begin by flushing the cache */
240 + newValue = CACHE_DISABLE_MODE; /* disable it */
241 + cacr_set(newValue);
242 + local_irq_restore(flags);
245 +/********************************************************************
246 + * Unconditionally enable the data cache
247 + ********************************************************************/
248 +void DcacheEnable(void)
250 + cacr_set(CACHE_INITIAL_MODE);
254 +unsigned long shadow_cacr;
256 +void cacr_set(unsigned long x)
260 + __asm__ __volatile__ ("movec %0, %%cacr"
262 + : "r" (shadow_cacr));
265 +unsigned long cacr_get(void)
267 + return shadow_cacr;
270 +++ b/arch/m68k/coldfire/config.c
273 + * linux/arch/m68k/coldifre/config.c
275 + * Matt Waddel Matt.Waddel@freescale.com
276 + * Copyright Freescale Semiconductor, Inc. 2007
278 + * This program is free software; you can redistribute it and/or modify
279 + * it under the terms of the GNU General Public License as published by
280 + * the Free Software Foundation; either version 2 of the License, or
281 + * (at your option) any later version.
284 +#include <linux/module.h>
285 +#include <linux/init.h>
286 +#include <linux/string.h>
287 +#include <linux/kernel.h>
288 +#include <linux/console.h>
289 +#include <linux/bootmem.h>
290 +#include <linux/mm.h>
291 +#include <asm/bootinfo.h>
292 +#include <asm/machdep.h>
293 +#include <asm/coldfire.h>
294 +#include <asm/cfcache.h>
295 +#include <asm/bootinfo.h>
297 +#include <asm/cfmmu.h>
298 +#include <asm/setup.h>
299 +#include <asm/irq.h>
300 +#include <asm/traps.h>
301 +#include <asm/movs.h>
302 +#include <asm/movs.h>
303 +#include <asm/page.h>
304 +#include <asm/pgalloc.h>
305 +#include <asm/mcf5445x_intc.h>
306 +#include <asm/mcf5445x_sdramc.h>
307 +#include <asm/mcf5445x_fbcs.h>
308 +#include <asm/mcf5445x_dtim.h>
310 +/* JKM -- testing */
311 +#include <linux/pfn.h>
314 +extern int get_irq_list(struct seq_file *p, void *v);
315 +extern char _text, _end;
316 +extern char _etext, _edata, __init_begin, __init_end;
317 +extern struct console mcfrs_console;
318 +extern char m68k_command_line[CL_SIZE];
319 +extern unsigned long availmem;
321 +static int irq_enable[NR_IRQS];
322 +unsigned long num_pages;
324 +void coldfire_sort_memrec(void)
328 + /* Sort the m68k_memory records by address */
329 + for (i = 0; i < m68k_num_memory; ++i) {
330 + for (j = i + 1; j < m68k_num_memory; ++j) {
331 + if (m68k_memory[i].addr > m68k_memory[j].addr) {
332 + struct mem_info tmp;
333 + tmp = m68k_memory[i];
334 + m68k_memory[i] = m68k_memory[j];
335 + m68k_memory[j] = tmp;
339 + /* Trim off discontiguous bits */
340 + for (i = 1; i < m68k_num_memory; ++i) {
341 + if ((m68k_memory[i-1].addr + m68k_memory[i-1].size) !=
342 + m68k_memory[i].addr) {
343 + printk(KERN_DEBUG "m68k_parse_bootinfo: addr gap between \
345 + m68k_memory[i-1].addr+m68k_memory[i-1].size,
346 + m68k_memory[i].addr);
347 + m68k_num_memory = i;
353 +int __init uboot_commandline(char *bootargs)
355 + int len = 0, cmd_line_len;
356 + static struct uboot_record uboot_info;
358 + extern unsigned long uboot_info_stk;
360 + /* Add 0x80000000 to get post-remapped kernel memory location */
361 + uboot_info.bd_info = (*(u32 *)(uboot_info_stk)) + 0x80000000;
362 + uboot_info.initrd_start = (*(u32 *)(uboot_info_stk+4)) + 0x80000000;
363 + uboot_info.initrd_end = (*(u32 *)(uboot_info_stk+8)) + 0x80000000;
364 + uboot_info.cmd_line_start = (*(u32 *)(uboot_info_stk+12)) + 0x80000000;
365 + uboot_info.cmd_line_stop = (*(u32 *)(uboot_info_stk+16)) + 0x80000000;
367 + cmd_line_len = uboot_info.cmd_line_stop - uboot_info.cmd_line_start;
368 + if ((cmd_line_len > 0) && (cmd_line_len < CL_SIZE-1))
369 + len = (int)strncpy(bootargs, (char *)uboot_info.cmd_line_start,\
376 + * This routine does things not done in the bootloader.
378 +#define DEFAULT_COMMAND_LINE "root=/dev/mtdblock1 rw rootfstype=jffs2 ip=none mtdparts=physmap-flash.0:5M(kernel)ro,-(jffs2)"
379 +asmlinkage void __init cf_early_init(void)
381 + struct bi_record *record = (struct bi_record *) &_end;
385 + SET_VBR((void *)MCF_RAMBAR1);
387 + /* Mask all interrupts */
388 + MCF_INTC0_IMRL = 0xFFFFFFFF;
389 + MCF_INTC0_IMRH = 0xFFFFFFFF;
390 + MCF_INTC1_IMRL = 0xFFFFFFFF;
391 + MCF_INTC1_IMRH = 0xFFFFFFFF;
393 +#if defined(CONFIG_NOR_FLASH_BASE)
394 + MCF_FBCS_CSAR(1) = CONFIG_NOR_FLASH_BASE;
396 + MCF_FBCS_CSAR(1) = 0x00000000;
399 +#if CONFIG_SDRAM_SIZE > (256*1024*1024)
400 + /* Init optional SDRAM chip select */
401 + MCF_SDRAMC_SDCS(1) = (256*1024*1024) | 0x1B;
404 + m68k_machtype = MACH_CFMMU;
405 + m68k_fputype = FPU_CFV4E;
406 + m68k_mmutype = MMU_CFV4E;
407 + m68k_cputype = CPU_CFV4E;
409 + m68k_num_memory = 0;
410 + m68k_memory[m68k_num_memory].addr = CONFIG_SDRAM_BASE;
411 + m68k_memory[m68k_num_memory++].size = CONFIG_SDRAM_SIZE;
413 + if (!uboot_commandline(m68k_command_line)) {
414 +#if defined(CONFIG_BOOTPARAM)
415 + strncpy(m68k_command_line, CONFIG_BOOTPARAM_STRING, CL_SIZE-1);
417 + strcpy(m68k_command_line, DEFAULT_COMMAND_LINE);
422 +#if defined(CONFIG_BLK_DEV_INITRD)
423 + /* add initrd image */
424 + record = (struct bi_record *) ((void *)record + record->size);
425 + record->tag = BI_RAMDISK;
426 + record->size = sizeof(record->tag) + sizeof(record->size)
427 + + sizeof(record->data[0]) + sizeof(record->data[1]);
430 + /* Mark end of tags. */
431 + record = (struct bi_record *) ((void *) record + record->size);
433 + record->data[0] = 0;
434 + record->data[1] = 0;
435 + record->size = sizeof(record->tag) + sizeof(record->size)
436 + + sizeof(record->data[0]) + sizeof(record->data[1]);
438 + /* Invalidate caches via CACR */
439 + cacr_set(CACHE_DISABLE_MODE);
441 + /* Turn on caches via CACR, enable EUSP */
442 + cacr_set(CACHE_INITIAL_MODE);
445 +void settimericr(unsigned int timer, unsigned int level)
447 + volatile unsigned char *icrp;
453 + case 2: irq = 33; icr = MCFSIM_ICR_TIMER2; break;
454 + default: irq = 32; icr = MCFSIM_ICR_TIMER1; break;
457 + icrp = (volatile unsigned char *) (icr);
459 + coldfire_enable_irq0(irq);
463 +/* Assembler routines */
464 +asmlinkage void buserr(void);
465 +asmlinkage void trap(void);
466 +asmlinkage void system_call(void);
467 +asmlinkage void inthandler(void);
469 +void __init coldfire_trap_init(void)
474 + vectors = (e_vector *)MCF_RAMBAR1;
476 + * There is a common trap handler and common interrupt
477 + * handler that handle almost every vector. We treat
478 + * the system call and bus error special, they get their
479 + * own first level handlers.
481 + for (i = 3; (i <= 23); i++)
483 + for (i = 33; (i <= 63); i++)
485 + for (i = 24; (i <= 31); i++)
486 + vectors[i] = inthandler;
487 + for (i = 64; (i < 255); i++)
488 + vectors[i] = inthandler;
491 + vectors[2] = buserr;
492 + vectors[32] = system_call;
495 +void coldfire_tick(void)
497 + /* Reset the ColdFire timer */
498 + __raw_writeb(MCF_DTIM_DTER_CAP | MCF_DTIM_DTER_REF, MCF_DTIM0_DTER);
501 +void __init coldfire_sched_init(irq_handler_t handler)
503 + unsigned int mcf_timerlevel = 5;
504 + unsigned int mcf_timervector = 64+32;
506 + __raw_writew(MCF_DTIM_DTMR_RST_RST, MCF_DTIM0_DTMR);
507 + __raw_writel(((MCF_BUSCLK / 16) / HZ), MCF_DTIM0_DTRR);
508 + __raw_writew(MCF_DTIM_DTMR_ORRI | MCF_DTIM_DTMR_CLK_DIV16 |
509 + MCF_DTIM_DTMR_FRR | MCF_DTIM_DTMR_RST_EN, \
512 + request_irq(mcf_timervector, handler, SA_INTERRUPT, \
513 + "timer", (void *)MCF_DTIM0_DTMR);
515 + settimericr(1, mcf_timerlevel);
518 +int timerirqpending(int timer)
520 + unsigned int imr = 0;
523 + case 1: imr = 0x1; break;
524 + case 2: imr = 0x2; break;
528 + return (getiprh() & imr);
531 +unsigned long coldfire_gettimeoffset(void)
533 + volatile unsigned long trr, tcn, offset;
535 + tcn = __raw_readw(MCF_DTIM0_DTCN);
536 + trr = __raw_readl(MCF_DTIM0_DTRR);
537 + offset = (tcn * (1000000 / HZ)) / trr;
539 + /* Check if we just wrapped the counters and maybe missed a tick */
540 + if ((offset < (1000000 / HZ / 2)) && timerirqpending(1))
541 + offset += 1000000 / HZ;
545 +void coldfire_reboot(void)
547 + /* disable interrupts and do a software reset */
548 + asm("movew #0x2700, %%sr\n\t"
549 + "moveb #0x80, %%d0\n\t"
550 + "moveb %%d0, 0xfc0a0000\n\t"
554 +/* int coldfire_hwclk(int i, struct rtc_time *t)
556 + printk ("Real time clock needs porting.\n");
560 +static void coldfire_get_model(char *model)
562 + sprintf(model, "Version 4 ColdFire");
565 +void coldfire_enable_irq(unsigned int vec)
567 + unsigned long flags;
571 + if (((int)vec < 0) || (vec > 63)) {
572 + printk(KERN_WARNING "enable_irq %d failed\n", vec);
576 + local_irq_save(flags);
579 + MCF_INTC0_IMRL &= ~(1 << vec);
581 + MCF_INTC0_IMRH &= ~(1 << (vec - 32));
582 + local_irq_restore(flags);
585 +void coldfire_disable_irq(unsigned int vec)
587 + unsigned long flags;
591 + if (((int)vec < 0) || (vec > 63)) {
592 + printk(KERN_WARNING "disable_irq %d failed\n", vec);
596 + local_irq_save(flags);
597 + if (--irq_enable[vec] == 0) {
599 + MCF_INTC0_IMRL |= (1 << vec);
601 + MCF_INTC0_IMRH |= (1 << (vec - 32));
604 + local_irq_restore(flags);
608 +coldfire_bootmem_alloc(unsigned long memory_start, unsigned long memory_end)
610 + unsigned long base_pfn;
612 + /* compute total pages in system */
613 + num_pages = PAGE_ALIGN(memory_end - PAGE_OFFSET) >> PAGE_SHIFT;
615 + /* align start/end to page boundries */
616 + memory_start = PAGE_ALIGN(memory_start);
617 + memory_end = memory_end & PAGE_MASK;
620 + base_pfn = __pa(PAGE_OFFSET) >> PAGE_SHIFT;
621 + min_low_pfn = __pa(memory_start) >> PAGE_SHIFT;
622 + max_low_pfn = __pa(memory_end) >> PAGE_SHIFT;
624 + high_memory = (void *)memory_end;
625 + availmem = memory_start;
627 + /* setup bootmem data */
628 + m68k_setup_node(0);
629 + availmem += init_bootmem_node(NODE_DATA(0), min_low_pfn,
630 + base_pfn, max_low_pfn);
631 + availmem = PAGE_ALIGN(availmem);
632 + free_bootmem(__pa(availmem), memory_end - (availmem));
635 +void __init config_coldfire(void)
637 + unsigned long endmem, startmem;
641 + * Calculate endmem from m68k_memory, assume all are contiguous
643 + startmem = ((((int) &_end) + (PAGE_SIZE - 1)) & PAGE_MASK);
644 + endmem = PAGE_OFFSET;
645 + for (i = 0; i < m68k_num_memory; ++i)
646 + endmem += m68k_memory[i].size;
648 + printk(KERN_INFO "starting up linux startmem 0x%lx, endmem 0x%lx, \
649 + size %luMB\n", startmem, endmem, (endmem - startmem) >> 20);
651 + memset(irq_enable, 0, sizeof(irq_enable));
654 + * Setup coldfire mach-specific handlers
656 + mach_max_dma_address = 0xffffffff;
657 + mach_sched_init = coldfire_sched_init;
658 + mach_tick = coldfire_tick;
659 + mach_gettimeoffset = coldfire_gettimeoffset;
660 + mach_reset = coldfire_reboot;
661 +/* mach_hwclk = coldfire_hwclk; to be done */
662 + mach_get_model = coldfire_get_model;
664 + coldfire_bootmem_alloc(startmem, endmem);
669 +/* #ifdef CONFIG_BLK_DEV_INITRD
670 + if (m68k_ramdisk.size) {
671 + reserve_bootmem (__pa(m68k_ramdisk.addr), m68k_ramdisk.size);
672 + initrd_start = (unsigned long) m68k_ramdisk.addr;
673 + initrd_end = initrd_start + m68k_ramdisk.size;
674 + printk (KERN_DEBUG "initrd: %08lx - %08lx\n", initrd_start,
679 +#if defined(CONFIG_DUMMY_CONSOLE) || defined(CONFIG_FRAMEBUFFER_CONSOLE)
680 + conswitchp = &dummy_con;
683 +#if defined(CONFIG_SERIAL_COLDFIRE)
685 + * This causes trouble when it is re-registered later.
686 + * Currently this is fixed by conditionally commenting
687 + * out the register_console in mcf_serial.c
689 + register_console(&mcfrs_console);
693 +++ b/arch/m68k/coldfire/entry.S
696 + * arch/m68k/coldfire/entry.S
698 + * Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
699 + * Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
700 + * Kenneth Albanowski <kjahds@kjahds.com>,
701 + * Copyright (C) 2000 Lineo Inc. (www.lineo.com)
702 + * Copyright (C) 2004-2006 Macq Electronique SA. (www.macqel.com)
703 + * Matt Waddel Matt.Waddel@freescale.com
704 + * Kurt Mahan kmahan@freescale.com
705 + * Copyright Freescale Semiconductor, Inc. 2007
709 + * arch/m68knommu/platform/5307/entry.S &
710 + * arch/m68k/kernel/entry.S
712 + * Copyright (C) 1991, 1992 Linus Torvalds
714 + * This file is subject to the terms and conditions of the GNU General Public
715 + * License. See the file README.legal in the main directory of this archive
716 + * for more details.
718 + * Linux/m68k support by Hamish Macdonald
720 + * ColdFire support by Greg Ungerer (gerg@snapgear.com)
721 + * 5307 fixes by David W. Miller
722 + * linux 2.4 support David McCullough <davidm@snapgear.com>
723 + * Bug, speed and maintainability fixes by Philippe De Muyter <phdm@macqel.be>
724 + * Ported to mmu Coldfire by Matt Waddel
727 +#include <linux/sys.h>
728 +#include <linux/linkage.h>
729 +#include <asm/cf_entry.h>
730 +#include <asm/errno.h>
731 +#include <asm/setup.h>
732 +#include <asm/segment.h>
733 +#include <asm/traps.h>
734 +#include <asm/unistd.h>
739 + * - TINFO_PREEMPT (struct thread_info / preempt_count)
740 + * Used to keep track of preemptability
741 + * - TINFO_FLAGS (struct thread_info / flags - include/asm-m68k/thread_info.h)
742 + * Various bit flags that are checked for scheduling/tracing
743 + * Bits 0-7 are checked every exception exit
744 + * 8-15 are checked every syscall exit
747 + * TIF_NEED_RESCHED 7
748 + * TIF_DELAYED_TRACE 14
749 + * TIF_SYSCALL_TRACE 15
750 + * TIF_MEMDIE 16 (never checked here)
767 +.globl ret_from_exception
768 +.globl ret_from_signal
769 +.globl sys_call_table
770 +.globl ret_from_interrupt
776 + movel %sp,%sp@- /* stack frame pointer argument */
779 + jra .Lret_from_exception
784 + movel %sp,%sp@- /* stack frame pointer argument */
787 + jra .Lret_from_exception
789 + /* After a fork we jump here directly from resume,
790 + %d1 contains the previous task schedule_tail */
791 +ENTRY(ret_from_fork)
795 + jra .Lret_from_exception
798 + movel #-ENOSYS,%d1 /* needed for strace */
799 + movel %d1,%sp@(PT_D0)
803 + RESTORE_SWITCH_STACK
805 + movel %sp@(PT_ORIG_D0),%d0
806 + cmpl #NR_syscalls,%d0
810 + movel %d1,%sp@(PT_D0)
811 + jra ret_from_exception
817 + RESTORE_SWITCH_STACK
819 + jra .Lret_from_exception
821 +ENTRY(ret_from_signal)
822 + RESTORE_SWITCH_STACK
824 + jra .Lret_from_exception
830 + /* save top of frame */
831 + movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
833 + /* syscall trace */
834 + tstb %curptr@(TASK_INFO+TINFO_FLAGS+2)
835 + jmi do_trace_entry /* SYSCALL_TRACE is set */
836 + cmpl #NR_syscalls,%d0
839 + movel #sys_call_table,%a0
844 + movel %d0,%sp@(PT_D0) /* save the return value */
846 + movew %curptr@(TASK_INFO+TINFO_FLAGS+2),%d0
847 + jne syscall_exit_work /* flags set so process */
851 + btst #5,%sp@(PT_SR) /* check if returning to kernel */
852 + bnes 1b /* if so, skip resched, signals */
854 + btstl #15,%d0 /* check if SYSCALL_TRACE */
856 + btstl #14,%d0 /* check if DELAYED_TRACE */
857 + jne do_delayed_trace
858 + btstl #6,%d0 /* check if SIGPENDING */
859 + jne do_signal_return
860 + pea resume_userspace
863 +ENTRY(ret_from_exception)
864 +.Lret_from_exception:
865 + btst #5,%sp@(PT_SR) /* check if returning to kernel */
866 + bnes 1f /* if so, skip resched, signals */
867 + movel %d0,%sp@- /* Only allow interrupts when we are */
868 + move %sr,%d0 /* last one on the kernel stack, */
869 + andl #ALLOWINT,%d0 /* otherwise stack overflow can occur */
870 + move %d0,%sr /* during heavy interrupt load. */
874 + moveb %curptr@(TASK_INFO+TINFO_FLAGS+3),%d0
875 + jne exit_work /* SIGPENDING and/or NEED_RESCHED set */
879 + /* save top of frame */
880 + movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
881 + btstl #6,%d0 /* check for SIGPENDING in flags */
882 + jne do_signal_return
883 + pea resume_userspace
887 + subql #4,%sp /* dummy return address */
889 + pea %sp@(SWITCH_STACK_SIZE)
893 + RESTORE_SWITCH_STACK
895 + jbra resume_userspace
898 + bclr #7,%sp@(PT_SR) /* clear trace bit in SR */
899 + pea 1 /* send SIGTRAP */
900 + movel %curptr,%sp@-
905 + jbra resume_userspace
908 + * This is the interrupt handler (for all hardware interrupt
909 + * sources). It figures out the vector number and calls the appropriate
910 + * interrupt service routine directly.
915 + addql #1,%curptr@(TASK_INFO+TINFO_PREEMPT)
916 + /* put exception # in d0 */
917 + movel %sp@(PT_VECTOR),%d0
918 + swap %d0 /* extract bits 25:18 */
923 + movel %d0,%sp@- /* put vector # on stack */
924 +auto_irqhandler_fixup = . + 2
925 + jbsr process_int /* process the IRQ */
926 + addql #8,%sp /* pop parameters off stack */
928 +ENTRY(ret_from_interrupt)
931 + subql #1,%curptr@(TASK_INFO+TINFO_PREEMPT)
932 + jeq ret_from_last_interrupt
936 +ret_from_last_interrupt:
937 + moveb %sp@(PT_SR),%d0
938 + andl #(~ALLOWINT>>8)&0xff,%d0
941 + /* check if we need to do software interrupts */
942 + tstl irq_stat+CPUSTAT_SOFTIRQ_PENDING
943 + jeq .Lret_from_exception
944 + pea ret_from_exception
947 +ENTRY(user_inthandler)
950 + addql #1,%curptr@(TASK_INFO+TINFO_PREEMPT)
951 + /* put exception # in d0 */
952 + movel %sp@(PT_VECTOR),%d0
953 +user_irqvec_fixup = . + 2
954 + swap %d0 /* extract bits 25:18 */
959 + movel %d0,%sp@- /* put vector # on stack */
960 +user_irqhandler_fixup = . + 2
961 + jbsr process_int /* process the IRQ */
962 + addql #8,%sp /* pop parameters off stack */
964 + subql #1,%curptr@(TASK_INFO+TINFO_PREEMPT)
965 + jeq ret_from_last_interrupt
968 +/* Handler for uninitialized and spurious interrupts */
970 +ENTRY(bad_inthandler)
973 + addql #1,%curptr@(TASK_INFO+TINFO_PREEMPT)
979 + subql #1,%curptr@(TASK_INFO+TINFO_PREEMPT)
980 + jeq ret_from_last_interrupt
985 + pea %sp@(SWITCH_STACK_SIZE)
988 + RESTORE_SWITCH_STACK
993 + pea %sp@(SWITCH_STACK_SIZE)
996 + RESTORE_SWITCH_STACK
1001 + pea %sp@(SWITCH_STACK_SIZE)
1004 + RESTORE_SWITCH_STACK
1007 +ENTRY(sys_sigsuspend)
1009 + pea %sp@(SWITCH_STACK_SIZE)
1010 + jbsr do_sigsuspend
1012 + RESTORE_SWITCH_STACK
1015 +ENTRY(sys_rt_sigsuspend)
1017 + pea %sp@(SWITCH_STACK_SIZE)
1018 + jbsr do_rt_sigsuspend
1020 + RESTORE_SWITCH_STACK
1023 +ENTRY(sys_sigreturn)
1026 + RESTORE_SWITCH_STACK
1029 +ENTRY(sys_rt_sigreturn)
1031 + jbsr do_rt_sigreturn
1032 + RESTORE_SWITCH_STACK
1037 + * Beware - when entering resume, prev (the current task) is
1038 + * in a0, next (the new task) is in a1,so don't change these
1039 + * registers until their contents are no longer needed.
1044 + movew %d0,%a0@(TASK_THREAD+THREAD_SR)
1047 + /* Save USP via %a1 (which is saved/restored from %d0) */
1050 + movel %a1,%a0@(TASK_THREAD+THREAD_USP)
1053 + /* save non-scratch registers on stack */
1056 + /* save current kernel stack pointer */
1057 + movel %sp,%a0@(TASK_THREAD+THREAD_KSP)
1059 + /* Return previous task in %d1 */
1062 + /* switch to new task (a1 contains new task) */
1065 + /* restore the kernel stack pointer */
1066 + movel %a1@(TASK_THREAD+THREAD_KSP),%sp
1068 + /* restore non-scratch registers */
1069 + RESTORE_SWITCH_STACK
1071 + /* restore user stack pointer */
1072 + movel %a1@(TASK_THREAD+THREAD_USP),%a0
1075 + /* restore status register */
1076 + movew %a1@(TASK_THREAD+THREAD_SR),%d0
1084 + .long sys_ni_syscall /* 0 - old "setup()" system call*/
1089 + .long sys_open /* 5 */
1094 + .long sys_unlink /* 10 */
1099 + .long sys_chmod /* 15 */
1101 + .long sys_ni_syscall /* old break syscall holder */
1104 + .long sys_getpid /* 20 */
1106 + .long sys_oldumount
1107 + .long sys_setuid16
1108 + .long sys_getuid16
1109 + .long sys_stime /* 25 */
1114 + .long sys_utime /* 30 */
1115 + .long sys_ni_syscall /* old stty syscall holder */
1116 + .long sys_ni_syscall /* old gtty syscall holder */
1119 + .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
1124 + .long sys_rmdir /* 40 */
1128 + .long sys_ni_syscall /* old prof syscall holder */
1129 + .long sys_brk /* 45 */
1130 + .long sys_setgid16
1131 + .long sys_getgid16
1133 + .long sys_geteuid16
1134 + .long sys_getegid16 /* 50 */
1136 + .long sys_umount /* recycled never used phys() */
1137 + .long sys_ni_syscall /* old lock syscall holder */
1139 + .long sys_fcntl /* 55 */
1140 + .long sys_ni_syscall /* old mpx syscall holder */
1142 + .long sys_ni_syscall /* old ulimit syscall holder */
1143 + .long sys_ni_syscall
1144 + .long sys_umask /* 60 */
1149 + .long sys_getpgrp /* 65 */
1151 + .long sys_sigaction
1152 + .long sys_sgetmask
1153 + .long sys_ssetmask
1154 + .long sys_setreuid16 /* 70 */
1155 + .long sys_setregid16
1156 + .long sys_sigsuspend
1157 + .long sys_sigpending
1158 + .long sys_sethostname
1159 + .long sys_setrlimit /* 75 */
1160 + .long sys_old_getrlimit
1161 + .long sys_getrusage
1162 + .long sys_gettimeofday
1163 + .long sys_settimeofday
1164 + .long sys_getgroups16 /* 80 */
1165 + .long sys_setgroups16
1169 + .long sys_readlink /* 85 */
1174 + .long old_mmap /* 90 */
1176 + .long sys_truncate
1177 + .long sys_ftruncate
1179 + .long sys_fchown16 /* 95 */
1180 + .long sys_getpriority
1181 + .long sys_setpriority
1182 + .long sys_ni_syscall /* old profil syscall holder */
1184 + .long sys_fstatfs /* 100 */
1185 + .long sys_ni_syscall /* ioperm for i386 */
1186 + .long sys_socketcall
1188 + .long sys_setitimer
1189 + .long sys_getitimer /* 105 */
1191 + .long sys_newlstat
1192 + .long sys_newfstat
1193 + .long sys_ni_syscall
1194 + .long sys_ni_syscall /* 110 */ /* iopl for i386 */
1196 + .long sys_ni_syscall /* obsolete idle() syscall */
1197 + .long sys_ni_syscall /* vm86old for i386 */
1199 + .long sys_swapoff /* 115 */
1203 + .long sys_sigreturn
1204 + .long sys_clone /* 120 */
1205 + .long sys_setdomainname
1206 + .long sys_newuname
1207 + .long sys_cacheflush /* modify_ldt for i386 */
1208 + .long sys_adjtimex
1209 + .long sys_mprotect /* 125 */
1210 + .long sys_sigprocmask
1211 + .long sys_ni_syscall /* old "create_module" */
1212 + .long sys_init_module
1213 + .long sys_delete_module
1214 + .long sys_ni_syscall /* 130 - old "get_kernel_syms" */
1215 + .long sys_quotactl
1219 + .long sys_sysfs /* 135 */
1220 + .long sys_personality
1221 + .long sys_ni_syscall /* for afs_syscall */
1222 + .long sys_setfsuid16
1223 + .long sys_setfsgid16
1224 + .long sys_llseek /* 140 */
1225 + .long sys_getdents
1229 + .long sys_readv /* 145 */
1232 + .long sys_fdatasync
1234 + .long sys_mlock /* 150 */
1236 + .long sys_mlockall
1237 + .long sys_munlockall
1238 + .long sys_sched_setparam
1239 + .long sys_sched_getparam /* 155 */
1240 + .long sys_sched_setscheduler
1241 + .long sys_sched_getscheduler
1242 + .long sys_sched_yield
1243 + .long sys_sched_get_priority_max
1244 + .long sys_sched_get_priority_min /* 160 */
1245 + .long sys_sched_rr_get_interval
1246 + .long sys_nanosleep
1248 + .long sys_setresuid16
1249 + .long sys_getresuid16 /* 165 */
1250 + .long sys_getpagesize
1251 + .long sys_ni_syscall /* old sys_query_module */
1253 + .long sys_nfsservctl
1254 + .long sys_setresgid16 /* 170 */
1255 + .long sys_getresgid16
1257 + .long sys_rt_sigreturn
1258 + .long sys_rt_sigaction
1259 + .long sys_rt_sigprocmask /* 175 */
1260 + .long sys_rt_sigpending
1261 + .long sys_rt_sigtimedwait
1262 + .long sys_rt_sigqueueinfo
1263 + .long sys_rt_sigsuspend
1264 + .long sys_pread64 /* 180 */
1265 + .long sys_pwrite64
1266 + .long sys_lchown16;
1269 + .long sys_capset /* 185 */
1270 + .long sys_sigaltstack
1271 + .long sys_sendfile
1272 + .long sys_ni_syscall /* streams1 */
1273 + .long sys_ni_syscall /* streams2 */
1274 + .long sys_vfork /* 190 */
1275 + .long sys_getrlimit
1277 + .long sys_truncate64
1278 + .long sys_ftruncate64
1279 + .long sys_stat64 /* 195 */
1284 + .long sys_getgid /* 200 */
1287 + .long sys_setreuid
1288 + .long sys_setregid
1289 + .long sys_getgroups /* 205 */
1290 + .long sys_setgroups
1292 + .long sys_setresuid
1293 + .long sys_getresuid
1294 + .long sys_setresgid /* 210 */
1295 + .long sys_getresgid
1299 + .long sys_setfsuid /* 215 */
1300 + .long sys_setfsgid
1301 + .long sys_pivot_root
1302 + .long sys_ni_syscall
1303 + .long sys_ni_syscall
1304 + .long sys_getdents64 /* 220 */
1307 + .long sys_setxattr
1308 + .long sys_lsetxattr
1309 + .long sys_fsetxattr /* 225 */
1310 + .long sys_getxattr
1311 + .long sys_lgetxattr
1312 + .long sys_fgetxattr
1313 + .long sys_listxattr
1314 + .long sys_llistxattr /* 230 */
1315 + .long sys_flistxattr
1316 + .long sys_removexattr
1317 + .long sys_lremovexattr
1318 + .long sys_fremovexattr
1319 + .long sys_futex /* 235 */
1320 + .long sys_sendfile64
1324 + .long sys_readahead /* 240 */
1325 + .long sys_io_setup
1326 + .long sys_io_destroy
1327 + .long sys_io_getevents
1328 + .long sys_io_submit
1329 + .long sys_io_cancel /* 245 */
1330 + .long sys_fadvise64
1331 + .long sys_exit_group
1332 + .long sys_lookup_dcookie
1333 + .long sys_epoll_create
1334 + .long sys_epoll_ctl /* 250 */
1335 + .long sys_epoll_wait
1336 + .long sys_remap_file_pages
1337 + .long sys_set_tid_address
1338 + .long sys_timer_create
1339 + .long sys_timer_settime /* 255 */
1340 + .long sys_timer_gettime
1341 + .long sys_timer_getoverrun
1342 + .long sys_timer_delete
1343 + .long sys_clock_settime
1344 + .long sys_clock_gettime /* 260 */
1345 + .long sys_clock_getres
1346 + .long sys_clock_nanosleep
1347 + .long sys_statfs64
1348 + .long sys_fstatfs64
1349 + .long sys_tgkill /* 265 */
1351 + .long sys_fadvise64_64
1353 + .long sys_get_mempolicy
1354 + .long sys_set_mempolicy /* 270 */
1356 + .long sys_mq_unlink
1357 + .long sys_mq_timedsend
1358 + .long sys_mq_timedreceive
1359 + .long sys_mq_notify /* 275 */
1360 + .long sys_mq_getsetattr
1362 + .long sys_ni_syscall /* for sys_vserver */
1364 + .long sys_request_key /* 280 */
1366 + .long sys_ioprio_set
1367 + .long sys_ioprio_get
1368 + .long sys_inotify_init
1369 + .long sys_inotify_add_watch /* 285 */
1370 + .long sys_inotify_rm_watch
1371 + .long sys_migrate_pages
1374 + .long sys_mknodat /* 290 */
1375 + .long sys_fchownat
1376 + .long sys_futimesat
1377 + .long sys_fstatat64
1378 + .long sys_unlinkat
1379 + .long sys_renameat /* 295 */
1381 + .long sys_symlinkat
1382 + .long sys_readlinkat
1383 + .long sys_fchmodat
1384 + .long sys_faccessat /* 300 */
1385 + .long sys_ni_syscall /* Reserved for pselect6 */
1386 + .long sys_ni_syscall /* Reserved for ppoll */
1388 + .long sys_set_robust_list
1389 + .long sys_get_robust_list /* 305 */
1391 + .long sys_sync_file_range
1393 + .long sys_vmsplice
1394 + .long sys_move_pages /* 310 */
1397 +++ b/arch/m68k/coldfire/head.S
1400 + * head.S is the MMU enabled ColdFire specific initial boot code
1402 + * Ported to ColdFire by
1403 + * Matt Waddel Matt.Waddel@freescale.com
1404 + * Kurt Mahan kmahan@freescale.com
1405 + * Copyright Freescale Semiconductor, Inc. 2007
1407 + * This program is free software; you can redistribute it and/or modify
1408 + * it under the terms of the GNU General Public License as published by
1409 + * the Free Software Foundation; either version 2 of the License, or
1410 + * (at your option) any later version.
1412 + * Parts of this code came from arch/m68k/kernel/head.S
1414 +#include <linux/linkage.h>
1415 +#include <linux/init.h>
1416 +#include <asm/bootinfo.h>
1417 +#include <asm/setup.h>
1418 +#include <asm/entry.h>
1419 +#include <asm/pgtable.h>
1420 +#include <asm/page.h>
1421 +#include <asm/coldfire.h>
1422 +#include <asm/mcfuart.h>
1423 +#include <asm/cfcache.h>
1427 +.globl kernel_pg_dir
1433 +/* When debugging use readable names for labels */
1435 +#define L(name) .head.S.##name
1437 +#define L(name) .head.S./**/name
1441 +#define L(name) .L##name
1443 +#define L(name) .L/**/name
1447 +/* The __INITDATA stuff is a no-op when ftrace or kgdb are turned on */
1449 +#define __INITDATA .data
1450 +#define __FINIT .previous
1454 + * Setup ACR mappings to provide the following memory map:
1456 + * 0xA0000000 -> 0xAFFFFFFF [0] NO CACHE / PRECISE / SUPER ONLY
1457 + * 0xFC000000 -> 0xFCFFFFFF [1] NO CACHE / PRECISE / SUPER ONLY
1459 + * None currently (mapped via TLBs)
1462 +#define ACR0_DEFAULT #0xA00FA048 /* ACR0 default value */
1463 +#define ACR1_DEFAULT #0xFC00A040 /* ACR1 default value */
1464 +#define ACR2_DEFAULT #0x00000000 /* ACR2 default value */
1465 +#define ACR3_DEFAULT #0x00000000 /* ACR3 default value */
1467 +/* ACR mapping for FPGA (maps 0) */
1468 +#define ACR0_FPGA #0x000FA048 /* ACR0 enable FPGA */
1470 +/* Several macros to make the writing of subroutines easier:
1471 + * - func_start marks the beginning of the routine which setups the frame
1472 + * register and saves the registers, it also defines another macro
1473 + * to automatically restore the registers again.
1474 + * - func_return marks the end of the routine and simply calls the prepared
1475 + * macro to restore registers and jump back to the caller.
1476 + * - func_define generates another macro to automatically put arguments
1477 + * onto the stack call the subroutine and cleanup the stack again.
1480 +.macro load_symbol_address symbol,register
1481 + movel #\symbol,\register
1484 +.macro func_start name,saveregs,savesize,stack=0
1486 + linkw %a6,#-\stack
1487 + subal #(\savesize),%sp
1488 + moveml \saveregs,%sp@
1489 +.set stackstart,-\stack
1491 +.macro func_return_\name
1492 + moveml %sp@,\saveregs
1493 + addal #(\savesize),%sp
1499 +.macro func_return name
1503 +.macro func_call name
1507 +.macro move_stack nr,arg1,arg2,arg3,arg4
1509 + move_stack "(\nr-1)",\arg2,\arg3,\arg4
1514 +.macro func_define name,nr=0
1515 +.macro \name arg1,arg2,arg3,arg4
1516 + move_stack \nr,\arg1,\arg2,\arg3,\arg4
1519 + lea %sp@(\nr*4),%sp
1524 +func_define serial_putc,1
1528 + func_call serial_putc
1547 + mmu_map - creates a new TLB entry
1549 + virt_addr Must be on proper boundary
1550 + phys_addr Must be on proper boundary
1551 + itlb MMUOR_ITLB if instruction TLB or 0
1552 + asid address space ID
1553 + shared_global MMUTR_SG if shared between different ASIDs or 0
1554 + size_code MMUDR_SZ1M 1 MB
1558 + cache_mode MMUDR_INC instruction non-cacheable
1559 + MMUDR_IC instruction cacheable
1560 + MMUDR_DWT data writethrough
1561 + MMUDR_DCB data copyback
1562 + MMUDR_DNCP data non-cacheable, precise
1563 + MMUDR_DNCIP data non-cacheable, imprecise
1564 + super_prot MMUDR_SP if user mode generates exception or 0
1565 + readable MMUDR_R if permits read access (data TLB) or 0
1566 + writable MMUDR_W if permits write access (data TLB) or 0
1567 + executable MMUDR_X if permits execute access (instruction TLB) or 0
1568 + locked MMUDR_LK prevents TLB entry from being replaced or 0
1569 + temp_data_reg a data register to use for temporary values
1571 +.macro mmu_map virt_addr,phys_addr,itlb,asid,shared_global,size_code,cache_mode,super_prot,readable,writable,executable,locked,temp_data_reg
1572 + /* Set up search of TLB. */
1573 + movel #(\virt_addr+1), \temp_data_reg
1574 + movel \temp_data_reg, MMUAR
1576 + movel #(MMUOR_STLB + MMUOR_ADR +\itlb), \temp_data_reg
1577 + movew \temp_data_reg, (MMUOR)
1578 + /* Set up tag value. */
1579 + movel #(\virt_addr + \asid + \shared_global + MMUTR_V), \temp_data_reg
1580 + movel \temp_data_reg, MMUTR
1581 + /* Set up data value. */
1582 + movel #(\phys_addr + \size_code + \cache_mode + \super_prot + \readable + \writable + \executable + \locked), \temp_data_reg
1583 + movel \temp_data_reg, MMUDR
1585 + movel #(MMUOR_ACC + MMUOR_UAA + \itlb), \temp_data_reg
1586 + movew \temp_data_reg, (MMUOR)
1587 +.endm /* mmu_map */
1589 +.macro mmu_unmap virt_addr,itlb,temp_data_reg
1590 + /* Set up search of TLB. */
1591 + movel #(\virt_addr+1), \temp_data_reg
1592 + movel \temp_data_reg, MMUAR
1594 + movel #(MMUOR_STLB + MMUOR_ADR +\itlb), \temp_data_reg
1595 + movew \temp_data_reg, (MMUOR)
1596 + /* Test for hit. */
1597 + movel MMUSR,\temp_data_reg
1598 + btst #MMUSR_HITN,\temp_data_reg
1600 + /* Read the TLB. */
1601 + movel #(MMUOR_RW + MMUOR_ACC +\itlb), \temp_data_reg
1602 + movew \temp_data_reg, (MMUOR)
1603 + movel MMUSR,\temp_data_reg
1604 + /* Set up tag value. */
1605 + movel #0, \temp_data_reg
1606 + movel \temp_data_reg, MMUTR
1607 + /* Set up data value. */
1608 + movel #0, \temp_data_reg
1609 + movel \temp_data_reg, MMUDR
1611 + movel #(MMUOR_ACC + MMUOR_UAA + \itlb), \temp_data_reg
1612 + movew \temp_data_reg, (MMUOR)
1614 +.endm /* mmu_unmap */
1617 +.section ".text.head","ax"
1619 +/* Version numbers of the bootinfo interface -- if we later pass info
1620 + * from boot ROM we might want to put something real here.
1622 + * The area from _stext to _start will later be used as kernel pointer table
1624 + bras 1f /* Jump over bootinfo version numbers */
1626 + .long BOOTINFOV_MAGIC
1628 +1: jmp __start-0x80000000
1630 +.equ kernel_pg_dir,_stext
1631 +.equ .,_stext+0x1000
1638 +/* Save the location of u-boot info - cmd line, bd_info, etc. */
1639 + movel %a7,%a4 /* Don't use %a4 before cf_early_init */
1640 + addl #0x80000004,%a4 /* 0x80000004= 1 stack push + high mem offset */
1642 +/* Setup initial stack pointer */
1643 + movel #0x40001000,%sp
1649 + movel #(MCF_RAMBAR1 + 0x221), %d0
1650 + movec %d0, %rambar1
1653 + movel #(MMU_BASE+1),%d0
1654 + movecl %d0,%mmubar
1655 + movel #MMUOR_CA,%a0 /* Clear tlb entries */
1657 + movel #(MMUOR_CA + MMUOR_ITLB),%a0 /* Use ITLB for searches */
1659 + movel #0,%a0 /* Clear Addr Space User ID */
1663 + movel ACR0_DEFAULT, %d0 /* ACR0 (DATA) setup */
1665 + movel ACR1_DEFAULT, %d0 /* ACR1 (DATA) setup */
1667 + movel ACR2_DEFAULT, %d0 /* ACR2 (CODE) setup */
1669 + movel ACR3_DEFAULT, %d0 /* ACR3 (CODE) setup */
1672 + /* If you change the memory size to another value make a matching
1673 + change in paging_init(cf-mmu.c) to zones_size[]. */
1675 + /* Map 256MB as code */
1676 + mmu_map (PAGE_OFFSET+0*0x1000000), (PHYS_OFFSET+0*0x1000000), \
1677 + MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
1678 + 0, 0, MMUDR_X, MMUDR_LK, %d0
1679 + mmu_map (PAGE_OFFSET+1*0x1000000), (PHYS_OFFSET+1*0x1000000), \
1680 + MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
1681 + 0, 0, MMUDR_X, MMUDR_LK, %d0
1682 + mmu_map (PAGE_OFFSET+2*0x1000000), (PHYS_OFFSET+2*0x1000000), \
1683 + MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
1684 + 0, 0, MMUDR_X, MMUDR_LK, %d0
1685 + mmu_map (PAGE_OFFSET+3*0x1000000), (PHYS_OFFSET+3*0x1000000), \
1686 + MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
1687 + 0, 0, MMUDR_X, MMUDR_LK, %d0
1688 + mmu_map (PAGE_OFFSET+4*0x1000000), (PHYS_OFFSET+4*0x1000000), \
1689 + MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
1690 + 0, 0, MMUDR_X, MMUDR_LK, %d0
1691 + mmu_map (PAGE_OFFSET+5*0x1000000), (PHYS_OFFSET+5*0x1000000), \
1692 + MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
1693 + 0, 0, MMUDR_X, MMUDR_LK, %d0
1694 + mmu_map (PAGE_OFFSET+6*0x1000000), (PHYS_OFFSET+6*0x1000000), \
1695 + MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
1696 + 0, 0, MMUDR_X, MMUDR_LK, %d0
1697 + mmu_map (PAGE_OFFSET+7*0x1000000), (PHYS_OFFSET+7*0x1000000), \
1698 + MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
1699 + 0, 0, MMUDR_X, MMUDR_LK, %d0
1700 + mmu_map (PAGE_OFFSET+8*0x1000000), (PHYS_OFFSET+8*0x1000000), \
1701 + MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
1702 + 0, 0, MMUDR_X, MMUDR_LK, %d0
1703 + mmu_map (PAGE_OFFSET+9*0x1000000), (PHYS_OFFSET+9*0x1000000), \
1704 + MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
1705 + 0, 0, MMUDR_X, MMUDR_LK, %d0
1706 + mmu_map (PAGE_OFFSET+10*0x1000000), (PHYS_OFFSET+10*0x1000000), \
1707 + MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
1708 + 0, 0, MMUDR_X, MMUDR_LK, %d0
1709 + mmu_map (PAGE_OFFSET+11*0x1000000), (PHYS_OFFSET+11*0x1000000), \
1710 + MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
1711 + 0, 0, MMUDR_X, MMUDR_LK, %d0
1712 + mmu_map (PAGE_OFFSET+12*0x1000000), (PHYS_OFFSET+12*0x1000000), \
1713 + MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
1714 + 0, 0, MMUDR_X, MMUDR_LK, %d0
1715 + mmu_map (PAGE_OFFSET+13*0x1000000), (PHYS_OFFSET+13*0x1000000), \
1716 + MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
1717 + 0, 0, MMUDR_X, MMUDR_LK, %d0
1718 + mmu_map (PAGE_OFFSET+14*0x1000000), (PHYS_OFFSET+14*0x1000000), \
1719 + MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
1720 + 0, 0, MMUDR_X, MMUDR_LK, %d0
1721 + mmu_map (PAGE_OFFSET+15*0x1000000), (PHYS_OFFSET+15*0x1000000), \
1722 + MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
1723 + 0, 0, MMUDR_X, MMUDR_LK, %d0
1725 + /* Map 256MB as data also */
1726 + mmu_map (PAGE_OFFSET+0*0x1000000), (PHYS_OFFSET+0*0x1000000), 0, 0, \
1727 + MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1729 + mmu_map (PAGE_OFFSET+1*0x1000000), (PHYS_OFFSET+1*0x1000000), 0, 0, \
1730 + MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1732 + mmu_map (PAGE_OFFSET+2*0x1000000), (PHYS_OFFSET+2*0x1000000), 0, 0, \
1733 + MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1735 + mmu_map (PAGE_OFFSET+3*0x1000000), (PHYS_OFFSET+3*0x1000000), 0, 0, \
1736 + MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1738 + mmu_map (PAGE_OFFSET+4*0x1000000), (PHYS_OFFSET+4*0x1000000), 0, 0, \
1739 + MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1741 + mmu_map (PAGE_OFFSET+5*0x1000000), (PHYS_OFFSET+5*0x1000000), 0, 0, \
1742 + MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1744 + mmu_map (PAGE_OFFSET+6*0x1000000), (PHYS_OFFSET+6*0x1000000), 0, 0, \
1745 + MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1747 + mmu_map (PAGE_OFFSET+7*0x1000000), (PHYS_OFFSET+7*0x1000000), 0, 0, \
1748 + MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1750 + mmu_map (PAGE_OFFSET+8*0x1000000), (PHYS_OFFSET+8*0x1000000), 0, 0, \
1751 + MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1753 + mmu_map (PAGE_OFFSET+9*0x1000000), (PHYS_OFFSET+9*0x1000000), 0, 0, \
1754 + MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1756 + mmu_map (PAGE_OFFSET+10*0x1000000), (PHYS_OFFSET+10*0x1000000), 0, 0, \
1757 + MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1759 + mmu_map (PAGE_OFFSET+11*0x1000000), (PHYS_OFFSET+11*0x1000000), 0, 0, \
1760 + MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1762 + mmu_map (PAGE_OFFSET+12*0x1000000), (PHYS_OFFSET+12*0x1000000), 0, 0, \
1763 + MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1765 + mmu_map (PAGE_OFFSET+13*0x1000000), (PHYS_OFFSET+13*0x1000000), 0, 0, \
1766 + MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1768 + mmu_map (PAGE_OFFSET+14*0x1000000), (PHYS_OFFSET+14*0x1000000), 0, 0, \
1769 + MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1771 + mmu_map (PAGE_OFFSET+15*0x1000000), (PHYS_OFFSET+15*0x1000000), 0, 0, \
1772 + MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1775 + /* Do unity mapping to enable the MMU. Map first 16 MB in place as
1776 + code (delete TLBs after MMU is enabled and we are executing in high
1778 + mmu_map (PHYS_OFFSET+0*0x1000000), (PHYS_OFFSET+0*0x1000000), \
1779 + MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_INC, MMUDR_SP, 0, \
1780 + 0, MMUDR_X, 0, %d0
1781 + /* Map first 16 MB as data too. */
1782 + mmu_map (PHYS_OFFSET+0*0x1000000), (PHYS_OFFSET+0*0x1000000), 0, 0, \
1783 + MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1787 + movel #(MMUCR_EN),%a0
1789 + nop /* This synchs the pipeline after a write to MMUCR */
1791 + movel #__running_high,%a0 /* Get around PC-relative addressing. */
1794 +ENTRY(__running_high)
1795 + load_symbol_address _stext,%sp
1796 + movel L(memory_start),%a0
1797 + movel %a0,availmem
1798 + load_symbol_address L(phys_kernel_start),%a0
1799 + load_symbol_address _stext,%a1
1801 + addl #PAGE_OFFSET,%a1
1804 + /* Unmap first 16 MB, code and data. */
1805 + mmu_unmap (PHYS_OFFSET+0*0x1000000), MMUOR_ITLB, %d0
1806 + mmu_unmap (PHYS_OFFSET+0*0x1000000), 0, %d0
1808 +/* Setup initial stack pointer */
1810 + lea init_thread_union+THREAD_SIZE,%sp
1811 + subl %a6,%a6 /* clear a6 for gdb */
1813 +#ifdef CONFIG_MCF_USER_HALT
1814 +/* Setup debug control reg to allow halts from user space */
1819 + movel %a4,uboot_info_stk /* save uboot info to variable */
1823 +.section ".text.head","ax"
1825 +func_start set_context,%d0,(1*4)
1828 +func_return set_context
1831 + * set_fpga(addr,val)
1833 + * Map in 0x00000000 -> 0x0fffffff and then do the write.
1838 + movel ACR0_FPGA, %d0
1843 + movel ACR0_DEFAULT, %d0
1854 +L(phys_kernel_start):
1859 + .long PAGE_OFFSET_RAW
1861 +#ifdef CONFIG_MCF_USER_HALT
1863 + * Enable User Halt Enable in the debug control register.
1866 + .word 0x2c80 /* DR0 */
1867 + .word 0x00b0 /* 31:16 */
1868 + .word 0x0400 /* 15:0 -- enable UHE */
1869 + .word 0x0000 /* unused */
1874 +++ b/arch/m68k/coldfire/ints.c
1877 + * linux/arch/m68k/coldfire/ints.c -- General interrupt handling code
1879 + * Copyright (C) 1999-2002 Greg Ungerer (gerg@snapgear.com)
1880 + * Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
1881 + * Kenneth Albanowski <kjahds@kjahds.com>,
1882 + * Copyright (C) 2000 Lineo Inc. (www.lineo.com)
1883 + * Matt Waddel Matt.Waddel@freescale.com
1884 + * Copyright Freescale Semiconductor, Inc. 2007
1885 + * Kurt Mahan kmahan@freescale.com
1888 + * linux/arch/m68k/kernel/ints.c &
1889 + * linux/arch/m68knommu/5307/ints.c
1891 + * This file is subject to the terms and conditions of the GNU General Public
1892 + * License. See the file COPYING in the main directory of this archive
1893 + * for more details.
1896 +#include <linux/module.h>
1897 +#include <linux/types.h>
1898 +#include <linux/init.h>
1899 +#include <linux/sched.h>
1900 +#include <linux/kernel_stat.h>
1901 +#include <linux/errno.h>
1902 +#include <linux/seq_file.h>
1903 +#include <linux/interrupt.h>
1905 +#include <asm/system.h>
1906 +#include <asm/irq.h>
1907 +#include <asm/traps.h>
1908 +#include <asm/page.h>
1909 +#include <asm/machdep.h>
1910 +#include <asm/irq_regs.h>
1912 +#include <asm/mcfsim.h>
1915 + * IRQ Handler lists.
1917 +static struct irq_node *irq_list[SYS_IRQS];
1918 +static struct irq_controller *irq_controller[SYS_IRQS];
1919 +static int irq_depth[SYS_IRQS];
1924 +#ifdef CONFIG_M54455
1925 +void m5445x_irq_enable(unsigned int irq);
1926 +void m5445x_irq_disable(unsigned int irq);
1927 +static struct irq_controller m5445x_irq_controller = {
1929 + .lock = SPIN_LOCK_UNLOCKED,
1930 + .enable = m5445x_irq_enable,
1931 + .disable = m5445x_irq_disable,
1935 +#define POOL_SIZE SYS_IRQS
1936 +static struct irq_node pool[POOL_SIZE];
1937 +static struct irq_node *get_irq_node(void);
1939 +/* The number of spurious interrupts */
1940 +unsigned int num_spurious;
1941 +asmlinkage void handle_badint(struct pt_regs *regs);
1944 + * void init_IRQ(void)
1946 + * This function should be called during kernel startup to initialize
1947 + * the IRQ handling routines.
1949 +void __init init_IRQ(void)
1953 +#ifdef CONFIG_M54455
1954 + for (i = 0; i < SYS_IRQS; i++)
1955 + irq_controller[i] = &m5445x_irq_controller;
1960 + * process_int(unsigned long vec, struct pt_regs *fp)
1962 + * Process an interrupt. Called from entry.S.
1964 +asmlinkage void process_int(unsigned long vec, struct pt_regs *fp)
1966 + struct pt_regs *old_regs;
1967 + struct irq_node *node;
1968 + old_regs = set_irq_regs(fp);
1969 + kstat_cpu(0).irqs[vec]++;
1971 + node = irq_list[vec];
1973 + handle_badint(fp);
1976 + node->handler(vec, node->dev_id);
1977 + node = node->next;
1981 + set_irq_regs(old_regs);
1985 + * show_interrupts( struct seq_file *p, void *v)
1987 + * Called to show all the current interrupt information.
1989 +int show_interrupts(struct seq_file *p, void *v)
1991 + struct irq_controller *contr;
1992 + struct irq_node *node;
1993 + int i = *(loff_t *) v;
1995 + if ((i < NR_IRQS) && (irq_list[i])) {
1996 + contr = irq_controller[i];
1997 + node = irq_list[i];
1998 + seq_printf(p, "%-8s %3u: %10u %s", contr->name, i,
1999 + kstat_cpu(0).irqs[i], node->devname);
2000 + while ((node = node->next))
2001 + seq_printf(p, ", %s", node->devname);
2003 + seq_printf(p, "\n");
2010 + * get_irq_node(void)
2012 + * Get an irq node from the pool.
2014 +struct irq_node *get_irq_node(void)
2016 + struct irq_node *p = pool;
2019 + for (i = 0; i < POOL_SIZE; i++, p++) {
2020 + if (!p->handler) {
2021 + memset(p, 0, sizeof(struct irq_node));
2025 + printk(KERN_INFO "%s(%s:%d): No more irq nodes, I suggest you \
2026 + increase POOL_SIZE", __FUNCTION__, __FILE__, __LINE__);
2030 +void init_irq_proc(void)
2032 + /* Insert /proc/irq driver here */
2035 +int setup_irq(unsigned int irq, struct irq_node *node)
2037 + struct irq_controller *contr;
2038 + struct irq_node **prev;
2039 + unsigned long flags;
2041 + if (irq >= NR_IRQS || !irq_controller[irq]) {
2042 + printk("%s: Incorrect IRQ %d from %s\n",
2043 + __FUNCTION__, irq, node->devname);
2047 + contr = irq_controller[irq];
2048 + spin_lock_irqsave(&contr->lock, flags);
2050 + prev = irq_list + irq;
2052 + /* Can't share interrupts unless both agree to */
2053 + if (!((*prev)->flags & node->flags & IRQF_SHARED)) {
2054 + spin_unlock_irqrestore(&contr->lock, flags);
2058 + prev = &(*prev)->next;
2061 + if (!irq_list[irq]) {
2062 + if (contr->startup)
2063 + contr->startup(irq);
2065 + contr->enable(irq);
2067 + node->next = NULL;
2070 + spin_unlock_irqrestore(&contr->lock, flags);
2075 +int request_irq(unsigned int irq,
2076 + irq_handler_t handler,
2077 + unsigned long flags, const char *devname, void *dev_id)
2079 + struct irq_node *node = get_irq_node();
2085 + node->handler = handler;
2086 + node->flags = flags;
2087 + node->dev_id = dev_id;
2088 + node->devname = devname;
2090 + res = setup_irq(irq, node);
2092 + node->handler = NULL;
2096 +EXPORT_SYMBOL(request_irq);
2098 +void free_irq(unsigned int irq, void *dev_id)
2100 + struct irq_controller *contr;
2101 + struct irq_node **p, *node;
2102 + unsigned long flags;
2104 + if (irq >= NR_IRQS || !irq_controller[irq]) {
2105 + printk(KERN_DEBUG "%s: Incorrect IRQ %d\n", __FUNCTION__, irq);
2109 + contr = irq_controller[irq];
2110 + spin_lock_irqsave(&contr->lock, flags);
2112 + p = irq_list + irq;
2113 + while ((node = *p)) {
2114 + if (node->dev_id == dev_id)
2121 + node->handler = NULL;
2123 + printk(KERN_DEBUG "%s: Removing probably wrong IRQ %d\n",
2124 + __FUNCTION__, irq);
2126 + if (!irq_list[irq]) {
2127 + if (contr->shutdown)
2128 + contr->shutdown(irq);
2130 + contr->disable(irq);
2133 + spin_unlock_irqrestore(&contr->lock, flags);
2135 +EXPORT_SYMBOL(free_irq);
2137 +void enable_irq(unsigned int irq)
2139 + struct irq_controller *contr;
2140 + unsigned long flags;
2142 + if (irq >= NR_IRQS || !irq_controller[irq]) {
2143 + printk(KERN_DEBUG "%s: Incorrect IRQ %d\n", __FUNCTION__, irq);
2147 + contr = irq_controller[irq];
2148 + spin_lock_irqsave(&contr->lock, flags);
2149 + if (irq_depth[irq]) {
2150 + if (!--irq_depth[irq]) {
2151 + if (contr->enable)
2152 + contr->enable(irq);
2156 + spin_unlock_irqrestore(&contr->lock, flags);
2158 +EXPORT_SYMBOL(enable_irq);
2160 +void disable_irq(unsigned int irq)
2162 + struct irq_controller *contr;
2163 + unsigned long flags;
2165 + if (irq >= NR_IRQS || !irq_controller[irq]) {
2166 + printk(KERN_DEBUG "%s: Incorrect IRQ %d\n", __FUNCTION__, irq);
2170 + contr = irq_controller[irq];
2171 + spin_lock_irqsave(&contr->lock, flags);
2172 + if (!irq_depth[irq]++) {
2173 + if (contr->disable)
2174 + contr->disable(irq);
2176 + spin_unlock_irqrestore(&contr->lock, flags);
2178 +EXPORT_SYMBOL(disable_irq);
2180 +unsigned long probe_irq_on(void)
2184 +EXPORT_SYMBOL(probe_irq_on);
2186 +int probe_irq_off(unsigned long irqs)
2190 +EXPORT_SYMBOL(probe_irq_off);
2192 +asmlinkage void handle_badint(struct pt_regs *regs)
2194 + kstat_cpu(0).irqs[0]++;
2196 + printk(KERN_DEBUG "unexpected interrupt from %u\n", regs->vector);
2198 +EXPORT_SYMBOL(handle_badint);
2200 +#ifdef CONFIG_M54455
2202 + * M5445X Implementation
2204 +void m5445x_irq_enable(unsigned int irq)
2206 + /* enable the interrupt hardware */
2210 + /* adjust past non-hardware ints */
2213 + /* check for eport */
2214 + if ((irq > 0) && (irq < 8)) {
2215 + /* enable eport */
2216 + MCF_EPORT_EPPAR &= ~(3 << (irq*2)); /* level */
2217 + MCF_EPORT_EPDDR &= ~(1 << irq); /* input */
2218 + MCF_EPORT_EPIER |= 1 << irq; /* irq enabled */
2222 + /* controller 0 */
2223 + MCF_INTC0_ICR(irq) = 0x02;
2224 + MCF_INTC0_CIMR = irq;
2226 + /* controller 1 */
2228 + MCF_INTC1_ICR(irq) = 0x02;
2229 + MCF_INTC1_CIMR = irq;
2233 +void m5445x_irq_disable(unsigned int irq)
2235 + /* disable the interrupt hardware */
2239 + /* adjust past non-hardware ints */
2242 + /* check for eport */
2243 + if ((irq > 0) && (irq < 8)) {
2244 + /* disable eport */
2245 + MCF_EPORT_EPIER &= ~(1 << irq);
2249 + /* controller 0 */
2250 + MCF_INTC0_ICR(irq) = 0x00;
2251 + MCF_INTC0_SIMR = irq;
2253 + /* controller 1 */
2255 + MCF_INTC1_ICR(irq) = 0x00;
2256 + MCF_INTC1_SIMR = irq;
2261 +++ b/arch/m68k/coldfire/iomap.c
2264 + * arch/m68k/coldfire/iomap.c
2266 + * Generic coldfire iomap interface
2268 + * Based on the sh64 iomap.c by Paul Mundt.
2270 + * This file is subject to the terms and conditions of the GNU General Public
2271 + * License. See the file "COPYING" in the main directory of this archive
2272 + * for more details.
2274 +#include <linux/pci.h>
2275 +#include <asm/io.h>
2277 +void __iomem *__attribute__ ((weak))
2278 +ioport_map(unsigned long port, unsigned int len)
2280 + return (void __iomem *)port;
2282 +EXPORT_SYMBOL(pci_iomap);
2284 +void ioport_unmap(void __iomem *addr)
2288 +EXPORT_SYMBOL(pci_iounmap);
2290 +void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
2292 + unsigned long start = pci_resource_start(dev, bar);
2293 + unsigned long len = pci_resource_len(dev, bar);
2294 + unsigned long flags = pci_resource_flags(dev, bar);
2295 +printk(KERN_INFO "PCI_IOMAP: BAR=%d START=0x%lx LEN=0x%lx FLAGS=0x%lx\n",
2296 + bar, start, len, flags);
2300 + if (max && len > max)
2302 + if (flags & IORESOURCE_IO)
2303 + return ioport_map(start, len);
2304 + if (flags & IORESOURCE_MEM)
2305 + return (void __iomem *)start;
2310 +EXPORT_SYMBOL(ioport_map);
2312 +void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
2316 +EXPORT_SYMBOL(ioport_unmap);
2318 +++ b/arch/m68k/coldfire/mcf5445x-pci.c
2321 + * arch/m68k/coldfire/mcf5445x-pci.c
2323 + * Coldfire M5445x specific PCI implementation.
2325 + * Copyright (c) 2007 Freescale Semiconductor, Inc.
2326 + * Kurt Mahan <kmahan@freescale.com>
2329 +#include <linux/delay.h>
2330 +#include <linux/pci.h>
2332 +#include <asm/mcfsim.h>
2333 +#include <asm/pci.h>
2334 +#include <asm/irq.h>
2337 + * Layout MCF5445x to PCI memory mappings:
2339 + * WIN MCF5445x PCI TYPE
2340 + * --- -------- --- ----
2341 + * [0] 0xA0000000 -> 0xA7FFFFFF 0xA0000000 -> 0xA7FFFFFF MEM
2342 + * [1] 0xA8000000 -> 0xABFFFFFF 0xA8000000 -> 0xABFFFFFF MEM
2343 + * [2] 0xAC000000 -> 0xAFFFFFFF 0xAC000000 -> 0xAFFFFFFF IO
2346 +#define MCF5445X_PCI_MEM_BASE 0xA0000000
2347 +#define MCF5445X_PCI_MEM_SIZE 0x0C000000
2349 +#define MCF5445X_PCI_CONFIG_BASE 0xAC000000
2350 +#define MCF5445X_PCI_CONFIG_SIZE 0x04000000
2352 +#define MCF5445X_PCI_IO_BASE 0xAC000000
2353 +#define MCF5445X_PCI_IO_SIZE 0x04000000
2355 +/* PCI Bus memory resource block */
2356 +struct resource pci_iomem_resource = {
2357 + .name = "PCI memory space",
2358 + .start = MCF5445X_PCI_MEM_BASE,
2359 + .flags = IORESOURCE_MEM,
2360 + .end = MCF5445X_PCI_MEM_BASE + MCF5445X_PCI_MEM_SIZE - 1
2363 +/* PCI Bus ioport resource block */
2364 +struct resource pci_ioport_resource = {
2365 + .name = "PCI I/O space",
2366 + .start = MCF5445X_PCI_IO_BASE,
2367 + .flags = IORESOURCE_IO,
2368 + .end = MCF5445X_PCI_IO_BASE + MCF5445X_PCI_IO_SIZE - 1
2372 + * The M54455EVB multiplexes all the PCI interrupts via
2373 + * the FPGA and routes them to a single interrupt. The
2374 + * PCI spec requires all PCI interrupt routines be smart
2375 + * enough to sort out their own interrupts.
2376 + * The interrupt source from the FPGA is configured
2379 +#define MCF5445X_PCI_IRQ 0x43
2381 +#define PCI_SLOTS 4
2386 +#define FPGA_PCI_IRQ_ENABLE (u32 *)0x09000000
2387 +#define FPGA_PCI_IRQ_STATUS (u32 *)0x09000004
2388 +#define FPGA_PCI_IRQ_ROUTE (u32 *)0x0900000c
2389 +#define FPGA_SEVEN_LED (u32 *)0x09000014
2391 +extern void set_fpga(u32 *addr, u32 val);
2394 +void mcf5445x_pci_dumpregs(void);
2398 + * static void mcf5445x_conf_device(struct pci_dev *dev)
2400 + * Machine dependent Configure the given device.
2404 + * dev - the pci device.
2407 +mcf5445x_conf_device(struct pci_dev *dev)
2409 + set_fpga(FPGA_PCI_IRQ_ENABLE, 0x0f);
2413 + * int mcf5445x_pci_config_read(unsigned int seg, unsigned int bus,
2414 + * unsigned int devfn, int reg,
2417 + * Read from PCI configuration space.
2420 +int mcf5445x_pci_config_read(unsigned int seg, unsigned int bus,
2421 + unsigned int devfn, int reg, int len, u32 *value)
2423 + u32 addr = MCF_PCI_PCICAR_BUSNUM(bus) |
2424 + MCF_PCI_PCICAR_DEVNUM(PCI_SLOT(devfn)) |
2425 + MCF_PCI_PCICAR_FUNCNUM(PCI_FUNC(devfn)) |
2426 + MCF_PCI_PCICAR_DWORD(reg) |
2429 + if ((bus > 255) || (devfn > 255) || (reg > 255)) {
2434 + /* setup for config mode */
2435 + MCF_PCI_PCICAR = addr;
2436 + __asm__ __volatile__("nop");
2440 + *value = *(volatile u8 *)(MCF5445X_PCI_CONFIG_BASE+(reg&3));
2443 + *value = le16_to_cpu(*(volatile u16 *)
2444 + (MCF5445X_PCI_CONFIG_BASE + (reg&2)));
2447 + *value = le32_to_cpu(*(volatile u32 *)
2448 + (MCF5445X_PCI_CONFIG_BASE));
2452 + /* clear config mode */
2453 + MCF_PCI_PCICAR = ~MCF_PCI_PCICAR_E;
2454 + __asm__ __volatile__("nop");
2460 + * int mcf5445x_pci_config_write(unsigned int seg, unsigned int bus,
2461 + * unsigned int devfn, int reg,
2464 + * Write to PCI configuration space
2466 +int mcf5445x_pci_config_write(unsigned int seg, unsigned int bus,
2467 + unsigned int devfn, int reg, int len, u32 value)
2469 + u32 addr = MCF_PCI_PCICAR_BUSNUM(bus) |
2470 + MCF_PCI_PCICAR_DEVNUM(PCI_SLOT(devfn)) |
2471 + MCF_PCI_PCICAR_FUNCNUM(PCI_FUNC(devfn)) |
2472 + MCF_PCI_PCICAR_DWORD(reg) |
2475 + if ((bus > 255) || (devfn > 255) || (reg > 255))
2478 + /* setup for config mode */
2479 + MCF_PCI_PCICAR = addr;
2480 + __asm__ __volatile__("nop");
2484 + *(volatile u8 *)(MCF5445X_PCI_CONFIG_BASE+(reg&3)) = (u8)value;
2487 + *(volatile u16 *)(MCF5445X_PCI_CONFIG_BASE+(reg&2)) =
2488 + cpu_to_le16((u16)value);
2491 + *(volatile u32 *)(MCF5445X_PCI_CONFIG_BASE) =
2492 + cpu_to_le32(value);
2496 + /* clear config mode */
2497 + MCF_PCI_PCICAR = ~MCF_PCI_PCICAR_E;
2498 + __asm__ __volatile__("nop");
2503 +/* hardware operations */
2504 +static struct pci_raw_ops mcf5445x_pci_ops = {
2505 + .read = mcf5445x_pci_config_read,
2506 + .write = mcf5445x_pci_config_write,
2510 + * irqreturn_t mcf5445x_pci_interrupt( int irq, void *dev)
2512 + * PCI controller interrupt handler.
2515 +mcf5445x_pci_interrupt(int irq, void *dev)
2517 + u32 status = MCF_PCI_PCIGSCR;
2519 + printk(KERN_INFO "PCI: Controller irq status=0x%08x\n", status);
2522 + MCF_PCI_PCIGSCR = status;
2524 + return IRQ_HANDLED;
2528 + * irqreturn_t mcf5445x_pci_arb_interrupt( int irq, void *dev)
2530 + * PCI Arbiter interrupt handler.
2533 +mcf5445x_pci_arb_interrupt(int irq, void *dev)
2535 + u32 status = MCF_PCIARB_PASR;
2537 + printk(KERN_INFO "PCI: Arbiter irq status=0x%08x\n", status);
2540 + MCF_PCIARB_PASR = status;
2541 + return IRQ_HANDLED;
2545 + * struct pci_bus_info *init_mcf5445x_pci(void)
2547 + * Machine specific initialisation:
2549 + * - Allocate and initialise a 'pci_bus_info' structure
2550 + * - Initialize hardware
2552 + * Result: pointer to 'pci_bus_info' structure.
2555 +init_mcf5445x_pci(void)
2558 + * Initialize the PCI core
2561 + /* arbitration controller */
2562 + MCF_PCIARB_PACR = MCF_PCIARB_PACR_INTMPRI |
2563 + MCF_PCIARB_PACR_EXTMPRI(0x0f) |
2564 + MCF_PCIARB_PACR_INTMINTEN |
2565 + MCF_PCIARB_PACR_EXTMINTEN(0x0f);
2567 + /* pci pin assignment regs */
2568 + MCF_GPIO_PAR_PCI = MCF_GPIO_PAR_PCI_GNT0 |
2569 + MCF_GPIO_PAR_PCI_GNT1 |
2570 + MCF_GPIO_PAR_PCI_GNT2 |
2571 + MCF_GPIO_PAR_PCI_GNT3_GNT3 |
2572 + MCF_GPIO_PAR_PCI_REQ0 |
2573 + MCF_GPIO_PAR_PCI_REQ1 |
2574 + MCF_GPIO_PAR_PCI_REQ2 |
2575 + MCF_GPIO_PAR_PCI_REQ3_REQ3;
2577 + /* target control reg */
2578 + MCF_PCI_PCITCR = MCF_PCI_PCITCR_P |
2579 + MCF_PCI_PCITCR_WCT(8);
2581 + /* PCI MEM address */
2582 + MCF_PCI_PCIIW0BTAR = 0xA007A000;
2584 + /* PCI MEM address */
2585 + MCF_PCI_PCIIW1BTAR = 0xA803A800;
2587 + /* PCI IO address */
2588 + MCF_PCI_PCIIW2BTAR = 0xAC03AC00;
2590 + /* window control */
2591 + MCF_PCI_PCIIWCR = MCF_PCI_PCIIWCR_WINCTRL0_ENABLE |
2592 + MCF_PCI_PCIIWCR_WINCTRL0_MEMREAD |
2593 + MCF_PCI_PCIIWCR_WINCTRL1_ENABLE |
2594 + MCF_PCI_PCIIWCR_WINCTRL1_MEMREAD |
2595 + MCF_PCI_PCIIWCR_WINCTRL2_ENABLE |
2596 + MCF_PCI_PCIIWCR_WINCTRL2_IO;
2598 + /* initiator control reg */
2599 + MCF_PCI_PCIICR = 0x00ff;
2601 + /* type 0 - command */
2602 + MCF_PCI_PCISCR = MCF_PCI_PCISCR_MW | /* mem write/inval */
2603 + MCF_PCI_PCISCR_B | /* bus master enable */
2604 + MCF_PCI_PCISCR_M; /* mem access enable */
2606 + /* type 0 - config reg */
2607 + MCF_PCI_PCICR1 = MCF_PCI_PCICR1_CACHELINESIZE(8) |
2608 + MCF_PCI_PCICR1_LATTIMER(0xff);
2610 + /* type 0 - config 2 reg */
2611 + MCF_PCI_PCICR2 = 0;
2613 + /* target control reg */
2614 + MCF_PCI_PCITCR2 = MCF_PCI_PCITCR2_B0E |
2615 + MCF_PCI_PCITCR2_B4E;
2617 + /* translate addresses from PCI[0] to CF[SDRAM] */
2618 + MCF_PCI_PCITBATR0 = MCF_RAMBAR1 | MCF_PCI_PCITBATR0_EN;
2619 + MCF_PCI_PCITBATR4 = MCF_RAMBAR1 | MCF_PCI_PCITBATR4_EN;
2621 + /* setup controller interrupt handlers */
2622 + if (request_irq(55+128, mcf5445x_pci_interrupt, IRQF_SHARED,
2623 + "PCI Controller", NULL))
2624 + printk(KERN_ERR "PCI: Unable to register controller irq\n");
2626 + if (request_irq (56+128, mcf5445x_pci_arb_interrupt, IRQF_SHARED, "PCI Arbiter", NULL))
2627 + printk(KERN_ERR "PCI: Unable to register arbiter irq\n");
2629 + /* global control - clear reset bit */
2630 + MCF_PCI_PCIGSCR = MCF_PCI_PCIGSCR_SEE |
2631 + MCF_PCI_PCIGSCR_PEE;
2633 + /* let everything settle */
2636 + /* allocate bus ioport resource */
2637 + if (request_resource(&ioport_resource, &pci_ioport_resource) < 0)
2638 + printk(KERN_ERR "PCI: Unable to alloc ioport resource\n");
2640 + /* allocate bus iomem resource */
2641 + if (request_resource(&iomem_resource, &pci_iomem_resource) < 0)
2642 + printk(KERN_ERR "PCI: Unable to alloc iomem resource\n");
2644 + /* setup FPGA to route PCI to IRQ3(67), SW7 to IRQ7, SW6 to IRQ4 */
2645 + set_fpga(FPGA_PCI_IRQ_ENABLE, 0x00000000);
2646 + set_fpga(FPGA_PCI_IRQ_ROUTE, 0x00000039);
2647 + set_fpga(FPGA_SEVEN_LED, 0x000000FF);
2649 + raw_pci_ops = &mcf5445x_pci_ops;
2664 +struct regdump type0regs[] = {
2665 + { 0xfc0a8000, "PCIIDR" },
2666 + { 0xfc0a8004, "PCISCR" },
2667 + { 0xfc0a8008, "PCICCRIR" },
2668 + { 0xfc0a800c, "PCICR1" },
2669 + { 0xfc0a8010, "PCIBAR0" },
2670 + { 0xfc0a8014, "PCIBAR1" },
2671 + { 0xfc0a8018, "PCIBAR2" },
2672 + { 0xfc0a801c, "PCIBAR3" },
2673 + { 0xfc0a8020, "PCIBAR4" },
2674 + { 0xfc0a8024, "PCIBAR5" },
2675 + { 0xfc0a8028, "PCICCPR" },
2676 + { 0xfc0a802c, "PCISID" },
2677 + { 0xfc0a8030, "PCIERBAR" },
2678 + { 0xfc0a8034, "PCICPR" },
2679 + { 0xfc0a803c, "PCICR2" },
2683 +struct regdump genregs[] = {
2684 + { 0xfc0a8060, "PCIGSCR" },
2685 + { 0xfc0a8064, "PCITBATR0" },
2686 + { 0xfc0a8068, "PCITBATR1" },
2687 + { 0xfc0a806c, "PCITCR1" },
2688 + { 0xfc0a8070, "PCIIW0BTAR" },
2689 + { 0xfc0a8074, "PCIIW1BTAR" },
2690 + { 0xfc0a8078, "PCIIW2BTAR" },
2691 + { 0xfc0a8080, "PCIIWCR" },
2692 + { 0xfc0a8084, "PCIICR" },
2693 + { 0xfc0a8088, "PCIISR" },
2694 + { 0xfc0a808c, "PCITCR2" },
2695 + { 0xfc0a8090, "PCITBATR0" },
2696 + { 0xfc0a8094, "PCITBATR1" },
2697 + { 0xfc0a8098, "PCITBATR2" },
2698 + { 0xfc0a809c, "PCITBATR3" },
2699 + { 0xfc0a80a0, "PCITBATR4" },
2700 + { 0xfc0a80a4, "PCITBATR5" },
2701 + { 0xfc0a80a8, "PCIINTR" },
2702 + { 0xfc0a80f8, "PCICAR" },
2706 +struct regdump arbregs[] = {
2707 + { 0xfc0ac000, "PACR" },
2708 + { 0xfc0ac004, "PASR" }, /* documentation error */
2713 + * void mcf5445x_pci_dumpregs()
2715 + * Dump out all the PCI registers
2718 +mcf5445x_pci_dumpregs(void)
2720 + struct regdump *reg;
2722 + printk(KERN_INFO "*** MCF5445x PCI TARGET 0 REGISTERS ***\n");
2725 + while (reg->addr) {
2726 + printk(KERN_INFO "0x%08x 0x%08x %s\n", reg->addr,
2727 + *((u32 *)reg->addr), reg->regname);
2731 + printk(KERN_INFO "\n*** MCF5445x PCI GENERAL REGISTERS ***\n");
2733 + while (reg->addr) {
2734 + printk(KERN_INFO "0x%08x 0x%08x %s\n", reg->addr,
2735 + *((u32 *)reg->addr), reg->regname);
2738 + printk(KERN_INFO "\n*** MCF5445x PCI ARBITER REGISTERS ***\n");
2740 + while (reg->addr) {
2741 + printk(KERN_INFO "0x%08x 0x%08x %s\n", reg->addr,
2742 + *((u32 *)reg->addr), reg->regname);
2748 +++ b/arch/m68k/coldfire/muldi3.S
2751 + * Coldfire muldi3 assembly verion
2754 +#include <linux/linkage.h>
2760 + moveml %d2-%d7/%a2-%a3,%sp@
2761 + moveal %fp@(8), %a2
2762 + moveal %fp@(12), %a3
2763 + moveal %fp@(16), %a0
2764 + moveal %fp@(20),%a1
2810 + moveml %sp@, %d2-%d7/%a2-%a3
2815 +++ b/arch/m68k/coldfire/pci.c
2818 + * linux/arch/m68k/coldfire/pci.c
2820 + * PCI initialization for Coldfire architectures.
2822 + * Currently Supported:
2825 + * Copyright (c) 2007 Freescale Semiconductor, Inc.
2826 + * Kurt Mahan <kmahan@freescale.com>
2829 +#include <linux/kernel.h>
2830 +#include <linux/init.h>
2831 +#include <linux/pci.h>
2833 +#include <asm/mcfsim.h>
2834 +#include <asm/pci.h>
2836 +/* pci ops for reading/writing config */
2837 +struct pci_raw_ops *raw_pci_ops;
2839 +/* pci debug flag */
2840 +static int debug_pci;
2842 +#ifdef CONFIG_M54455
2843 +extern int init_mcf5445x_pci(void);
2844 +extern void mcf5445x_conf_device(struct pci_dev *dev);
2845 +extern void mcf5445x_pci_dumpregs(void);
2847 +extern struct resource pci_ioport_resource;
2848 +extern struct resource pci_iomem_resource;
2852 +pci_read(struct pci_bus *bus, unsigned int devfn, int where,
2853 + int size, u32 *value)
2855 + return raw_pci_ops->read(0, bus->number, devfn, where, size, value);
2859 +pci_write(struct pci_bus *bus, unsigned int devfn, int where,
2860 + int size, u32 value)
2862 + return raw_pci_ops->write(0, bus->number, devfn, where, size, value);
2865 +struct pci_ops pci_root_ops = {
2867 + .write = pci_write,
2871 + * pcibios_setup(char *)
2873 + * Initialize the pcibios based on cmd line params.
2876 +pcibios_setup(char *str)
2878 + if (!strcmp(str, "debug")) {
2886 + * We need to avoid collisions with `mirrored' VGA ports
2887 + * and other strange ISA hardware, so we always want the
2888 + * addresses to be allocated in the 0x000-0x0ff region
2891 + * Why? Because some silly external IO cards only decode
2892 + * the low 10 bits of the IO address. The 0x00-0xff region
2893 + * is reserved for motherboard devices that decode all 16
2894 + * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
2895 + * but we want to try to avoid allocating at 0x2900-0x2bff
2896 + * which might have be mirrored at 0x0100-0x03ff..
2899 +pcibios_align_resource(void *data, struct resource *res, resource_size_t size,
2900 + resource_size_t align)
2902 + struct pci_dev *dev = data;
2904 + if (res->flags & IORESOURCE_IO) {
2905 + resource_size_t start = res->start;
2908 + printk(KERN_ERR "PCI: I/O Region %s/%d too large"
2909 + " (%ld bytes)\n", pci_name(dev),
2910 + dev->resource - res, (long int)size);
2912 + if (start & 0x300) {
2913 + start = (start + 0x3ff) & ~0x3ff;
2914 + res->start = start;
2920 + * Swizzle the device pin each time we cross a bridge
2921 + * and return the slot number.
2923 +static u8 __devinit
2924 +pcibios_swizzle(struct pci_dev *dev, u8 *pin)
2930 + * Map a slot/pin to an IRQ.
2933 +pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
2939 + * pcibios_update_irq(struct pci_dev *dev, int irq)
2941 + * Update a PCI interrupt.
2944 +pcibios_update_irq(struct pci_dev *dev, int irq)
2946 + pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
2950 + * pcibios_enable_device(struct pci_dev *dev, int mask)
2952 + * Enable a device on the PCI bus.
2955 +pcibios_enable_device(struct pci_dev *dev, int mask)
2959 + struct resource *r;
2961 + pci_read_config_word(dev, PCI_COMMAND, &cmd);
2963 + for (idx = 0; idx < 6; idx++) {
2964 + r = &dev->resource[idx];
2965 + if (!r->start && r->end) {
2966 + printk(KERN_ERR "PCI: Device %s not available because "
2967 + "of resource collisions\n", pci_name(dev));
2970 + if (r->flags & IORESOURCE_IO)
2971 + cmd |= PCI_COMMAND_IO;
2972 + if (r->flags & IORESOURCE_MEM)
2973 + cmd |= PCI_COMMAND_MEMORY;
2975 + if (cmd != old_cmd) {
2976 + printk("PCI: Enabling device %s (%04x -> %04x)\n",
2977 + pci_name(dev), old_cmd, cmd);
2978 + pci_write_config_word(dev, PCI_COMMAND, cmd);
2979 +#ifdef CONFIG_M54455
2980 + mcf5445x_conf_device(dev);
2988 + * pcibios_fixup_bus(struct pci_bus *bus)
2991 +pcibios_fixup_bus(struct pci_bus *bus)
2993 + struct pci_dev *dev = bus->self;
2997 +#ifdef CONFIG_M54455
2998 + bus->resource[0] = &pci_ioport_resource;
2999 + bus->resource[1] = &pci_iomem_resource;
3005 + * pcibios_init(void)
3007 + * Allocate/initialize low level pci bus/devices.
3012 + struct pci_bus *bus;
3014 + if (!raw_pci_ops) {
3015 + printk(KERN_WARNING "PCIBIOS: FATAL: NO PCI Hardware found\n");
3019 + /* allocate and scan the (only) bus */
3020 + bus = pci_scan_bus_parented(NULL, 0, &pci_root_ops, NULL);
3022 + /* setup everything */
3024 + /* compute the bridge window sizes */
3025 + pci_bus_size_bridges(bus);
3027 + /* (re)assign device resources */
3028 + pci_bus_assign_resources(bus);
3030 + /* add the bus to the system */
3031 + pci_bus_add_devices(bus);
3034 + pci_fixup_irqs(pcibios_swizzle, pcibios_map_irq);
3043 + * Initialize the PCI Hardware.
3048 +#if defined(CONFIG_M54455)
3049 + init_mcf5445x_pci();
3052 + printk(KERN_ERR "PCI: FATAL: NO PCI Detected\n");
3057 +/* low level hardware (first) */
3058 +arch_initcall(pci_init);
3060 +/* basic bios init (second) */
3061 +subsys_initcall(pcibios_init);
3063 +++ b/arch/m68k/coldfire/signal.c
3066 + * linux/arch/m68k/kernel/signal.c
3068 + * Copyright (C) 1991, 1992 Linus Torvalds
3070 + * This file is subject to the terms and conditions of the GNU General Public
3071 + * License. See the file COPYING in the main directory of this archive
3072 + * for more details.
3076 + * Derived from m68k/kernel/signal.c and the original authors are credited
3079 + * Coldfire support by:
3080 + * Matt Waddel Matt.Waddel@freescale.com
3081 + * Copyright Freescale Semiconductor, Inc 2007
3084 +#include <linux/sched.h>
3085 +#include <linux/mm.h>
3086 +#include <linux/kernel.h>
3087 +#include <linux/signal.h>
3088 +#include <linux/syscalls.h>
3089 +#include <linux/errno.h>
3090 +#include <linux/wait.h>
3091 +#include <linux/ptrace.h>
3092 +#include <linux/unistd.h>
3093 +#include <linux/stddef.h>
3094 +#include <linux/highuid.h>
3095 +#include <linux/personality.h>
3096 +#include <linux/tty.h>
3097 +#include <linux/binfmts.h>
3099 +#include <asm/setup.h>
3100 +#include <asm/cf_uaccess.h>
3101 +#include <asm/cf_pgtable.h>
3102 +#include <asm/traps.h>
3103 +#include <asm/ucontext.h>
3105 +#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
3107 +asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs);
3109 +const int frame_extra_sizes[16] = {
3111 + [2] = sizeof(((struct frame *)0)->un.fmt2),
3112 + [3] = sizeof(((struct frame *)0)->un.fmt3),
3116 + [7] = sizeof(((struct frame *)0)->un.fmt7),
3118 + [9] = sizeof(((struct frame *)0)->un.fmt9),
3119 + [10] = sizeof(((struct frame *)0)->un.fmta),
3120 + [11] = sizeof(((struct frame *)0)->un.fmtb),
3128 + * Atomically swap in the new signal mask, and wait for a signal.
3130 +asmlinkage int do_sigsuspend(struct pt_regs *regs)
3132 + old_sigset_t mask = regs->d3;
3135 + mask &= _BLOCKABLE;
3136 + spin_lock_irq(¤t->sighand->siglock);
3137 + saveset = current->blocked;
3138 + siginitset(¤t->blocked, mask);
3139 + recalc_sigpending();
3140 + spin_unlock_irq(¤t->sighand->siglock);
3142 + regs->d0 = -EINTR;
3144 + current->state = TASK_INTERRUPTIBLE;
3146 + if (do_signal(&saveset, regs))
3152 +do_rt_sigsuspend(struct pt_regs *regs)
3154 + sigset_t __user *unewset = (sigset_t __user *)regs->d1;
3155 + size_t sigsetsize = (size_t)regs->d2;
3156 + sigset_t saveset, newset;
3158 + /* XXX: Don't preclude handling different sized sigset_t's. */
3159 + if (sigsetsize != sizeof(sigset_t))
3162 + if (copy_from_user(&newset, unewset, sizeof(newset)))
3164 + sigdelsetmask(&newset, ~_BLOCKABLE);
3166 + spin_lock_irq(¤t->sighand->siglock);
3167 + saveset = current->blocked;
3168 + current->blocked = newset;
3169 + recalc_sigpending();
3170 + spin_unlock_irq(¤t->sighand->siglock);
3172 + regs->d0 = -EINTR;
3174 + current->state = TASK_INTERRUPTIBLE;
3176 + if (do_signal(&saveset, regs))
3182 +sys_sigaction(int sig, const struct old_sigaction __user *act,
3183 + struct old_sigaction __user *oact)
3185 + struct k_sigaction new_ka, old_ka;
3189 + old_sigset_t mask;
3190 + if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3191 + __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3192 + __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
3194 + __get_user(new_ka.sa.sa_flags, &act->sa_flags);
3195 + __get_user(mask, &act->sa_mask);
3196 + siginitset(&new_ka.sa.sa_mask, mask);
3199 + ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3201 + if (!ret && oact) {
3202 + if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3203 + __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3204 + __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
3206 + __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3207 + __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
3214 +sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
3216 + return do_sigaltstack(uss, uoss, rdusp());
3221 + * Do a signal return; undo the signal stack.
3223 + * Keep the return code on the stack quadword aligned!
3224 + * That makes the cache flush below easier.
3229 + char __user *pretcode;
3232 + struct sigcontext __user *psc;
3234 + unsigned long extramask[_NSIG_WORDS-1];
3235 + struct sigcontext sc;
3240 + char __user *pretcode;
3242 + struct siginfo __user *pinfo;
3245 + struct siginfo info;
3246 + struct ucontext uc;
3249 +#define FPCONTEXT_SIZE 216
3250 +#define uc_fpstate uc_filler[0]
3251 +#define uc_formatvec uc_filler[FPCONTEXT_SIZE/4]
3252 +#define uc_extra uc_filler[FPCONTEXT_SIZE/4+1]
3255 +static unsigned char fpu_version; /* version num of fpu, set by setup_frame */
3257 +static inline int restore_fpu_state(struct sigcontext *sc)
3262 + /* restore registers */
3263 + memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12);
3264 + memcpy(current->thread.fp, sc->sc_fpregs, 24);
3268 + if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
3269 + /* Verify the frame format. */
3270 + if (!CPU_IS_060 && (sc->sc_fpstate[0] != fpu_version))
3272 + if (CPU_IS_020_OR_030) {
3273 + if (m68k_fputype & FPU_68881 &&
3274 + !(sc->sc_fpstate[1] == 0x18 || sc->sc_fpstate[1] == 0xb4))
3276 + if (m68k_fputype & FPU_68882 &&
3277 + !(sc->sc_fpstate[1] == 0x38 || sc->sc_fpstate[1] == 0xd4))
3279 + } else if (CPU_IS_040) {
3280 + if (!(sc->sc_fpstate[1] == 0x00 ||
3281 + sc->sc_fpstate[1] == 0x28 ||
3282 + sc->sc_fpstate[1] == 0x60))
3284 + } else if (CPU_IS_060) {
3285 + if (!(sc->sc_fpstate[3] == 0x00 ||
3286 + sc->sc_fpstate[3] == 0x60 ||
3287 + sc->sc_fpstate[3] == 0xe0))
3299 +static inline int rt_restore_fpu_state(struct ucontext __user *uc)
3301 + unsigned char fpstate[FPCONTEXT_SIZE];
3302 + int context_size = CPU_IS_060 ? 8 : 0;
3303 + fpregset_t fpregs;
3307 + /* restore fpu control register */
3308 + if (__copy_from_user(current->thread.fpcntl,
3309 + uc->uc_mcontext.fpregs.f_fpcntl, 12))
3311 + /* restore all other fpu register */
3312 + if (__copy_from_user(current->thread.fp,
3313 + uc->uc_mcontext.fpregs.f_fpregs, 96))
3318 + if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
3320 + if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
3322 + context_size = fpstate[1];
3323 + /* Verify the frame format. */
3324 + if (!CPU_IS_060 && (fpstate[0] != fpu_version))
3326 + if (CPU_IS_020_OR_030) {
3327 + if (m68k_fputype & FPU_68881 &&
3328 + !(context_size == 0x18 || context_size == 0xb4))
3330 + if (m68k_fputype & FPU_68882 &&
3331 + !(context_size == 0x38 || context_size == 0xd4))
3333 + } else if (CPU_IS_040) {
3334 + if (!(context_size == 0x00 ||
3335 + context_size == 0x28 ||
3336 + context_size == 0x60))
3338 + } else if (CPU_IS_060) {
3339 + if (!(fpstate[3] == 0x00 ||
3340 + fpstate[3] == 0x60 ||
3341 + fpstate[3] == 0xe0))
3345 + if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
3349 + if (context_size &&
3350 + __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
3361 +restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc,
3362 + void __user *fp, int *pd0)
3364 + int fsize, formatvec;
3365 + struct sigcontext context;
3368 + /* get previous context */
3369 + if (copy_from_user(&context, usc, sizeof(context)))
3372 + /* restore passed registers */
3373 + regs->d1 = context.sc_d1;
3374 + regs->a0 = context.sc_a0;
3375 + regs->a1 = context.sc_a1;
3376 + regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff);
3377 + regs->pc = context.sc_pc;
3378 + regs->orig_d0 = -1; /* disable syscall checks */
3379 + wrusp(context.sc_usp);
3380 + formatvec = context.sc_formatvec;
3381 + regs->format = formatvec >> 12;
3382 + regs->vector = formatvec & 0xfff;
3385 + err = restore_fpu_state(&context);
3388 + fsize = frame_extra_sizes[regs->format];
3391 + * user process trying to return with weird frame format
3394 + printk(KERN_DEBUG "user process returning with weird \
3400 + /* OK. Make room on the supervisor stack for the extra junk,
3405 + struct switch_stack *sw = (struct switch_stack *)regs - 1;
3406 + regs->d0 = context.sc_d0;
3407 +#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
3408 + __asm__ __volatile__
3409 + (" movel %0,%/sp\n\t"
3410 + " bra ret_from_signal\n"
3412 + ".section __ex_table,\"a\"\n"
3416 + : /* no outputs, it doesn't ever return */
3417 + : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
3418 + "n" (frame_offset), "a" (fp)
3420 +#undef frame_offset
3422 + * If we ever get here an exception occurred while
3423 + * building the above stack-frame.
3428 + *pd0 = context.sc_d0;
3436 +rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
3437 + struct ucontext __user *uc, int *pd0)
3440 + greg_t __user *gregs = uc->uc_mcontext.gregs;
3441 + unsigned long usp;
3444 + err = __get_user(temp, &uc->uc_mcontext.version);
3445 + if (temp != MCONTEXT_VERSION)
3447 + /* restore passed registers */
3448 + err |= __get_user(regs->d0, &gregs[0]);
3449 + err |= __get_user(regs->d1, &gregs[1]);
3450 + err |= __get_user(regs->d2, &gregs[2]);
3451 + err |= __get_user(regs->d3, &gregs[3]);
3452 + err |= __get_user(regs->d4, &gregs[4]);
3453 + err |= __get_user(regs->d5, &gregs[5]);
3454 + err |= __get_user(sw->d6, &gregs[6]);
3455 + err |= __get_user(sw->d7, &gregs[7]);
3456 + err |= __get_user(regs->a0, &gregs[8]);
3457 + err |= __get_user(regs->a1, &gregs[9]);
3458 + err |= __get_user(regs->a2, &gregs[10]);
3459 + err |= __get_user(sw->a3, &gregs[11]);
3460 + err |= __get_user(sw->a4, &gregs[12]);
3461 + err |= __get_user(sw->a5, &gregs[13]);
3462 + err |= __get_user(sw->a6, &gregs[14]);
3463 + err |= __get_user(usp, &gregs[15]);
3465 + err |= __get_user(regs->pc, &gregs[16]);
3466 + err |= __get_user(temp, &gregs[17]);
3467 + regs->sr = (regs->sr & 0xff00) | (temp & 0xff);
3468 + regs->orig_d0 = -1; /* disable syscall checks */
3469 + err |= __get_user(temp, &uc->uc_formatvec);
3470 + regs->format = temp >> 12;
3471 + regs->vector = temp & 0xfff;
3474 + err |= rt_restore_fpu_state(uc);
3477 + if (do_sigaltstack(&uc->uc_stack, NULL, usp) == -EFAULT)
3480 + fsize = frame_extra_sizes[regs->format];
3483 + * user process trying to return with weird frame format
3486 + printk(KERN_DEBUG "user process returning with weird \
3492 + /* OK. Make room on the supervisor stack for the extra junk,
3497 +#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
3498 + __asm__ __volatile__
3499 + (" movel %0,%/sp\n\t"
3500 + " bra ret_from_signal\n"
3502 + ".section __ex_table,\"a\"\n"
3506 + : /* no outputs, it doesn't ever return */
3507 + : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
3508 + "n" (frame_offset), "a" (&uc->uc_extra)
3510 +#undef frame_offset
3512 + * If we ever get here an exception occurred while
3513 + * building the above stack-frame.
3525 +asmlinkage int do_sigreturn(unsigned long __unused)
3527 + struct switch_stack *sw = (struct switch_stack *) &__unused;
3528 + struct pt_regs *regs = (struct pt_regs *) (sw + 1);
3529 + unsigned long usp = rdusp();
3530 + struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
3534 + if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
3536 + if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
3537 + (_NSIG_WORDS > 1 &&
3538 + __copy_from_user(&set.sig[1], &frame->extramask,
3539 + sizeof(frame->extramask))))
3542 + sigdelsetmask(&set, ~_BLOCKABLE);
3543 + spin_lock_irq(¤t->sighand->siglock);
3544 + current->blocked = set;
3545 + recalc_sigpending();
3546 + spin_unlock_irq(¤t->sighand->siglock);
3548 + if (restore_sigcontext(regs, &frame->sc, frame + 1, &d0))
3553 + force_sig(SIGSEGV, current);
3557 +asmlinkage int do_rt_sigreturn(unsigned long __unused)
3559 + struct switch_stack *sw = (struct switch_stack *) &__unused;
3560 + struct pt_regs *regs = (struct pt_regs *) (sw + 1);
3561 + unsigned long usp = rdusp();
3562 + struct rt_sigframe __user *frame =
3563 + (struct rt_sigframe __user *)(usp - 4);
3567 + if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
3569 + if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
3572 + sigdelsetmask(&set, ~_BLOCKABLE);
3573 + spin_lock_irq(¤t->sighand->siglock);
3574 + current->blocked = set;
3575 + recalc_sigpending();
3576 + spin_unlock_irq(¤t->sighand->siglock);
3578 + if (rt_restore_ucontext(regs, sw, &frame->uc, &d0))
3583 + force_sig(SIGSEGV, current);
3589 + * Set up a signal frame.
3592 +static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
3595 + /* save registers */
3596 + memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12);
3597 + memcpy(sc->sc_fpregs, current->thread.fp, 24);
3602 +static inline int rt_save_fpu_state(struct ucontext __user *uc,
3603 + struct pt_regs *regs)
3608 + /* save fpu control register */
3609 + err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpcntl,
3610 + current->thread.fpcntl, 12);
3611 + /* save all other fpu register */
3612 + err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs,
3613 + current->thread.fp, 96);
3621 +static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
3622 + unsigned long mask)
3624 + sc->sc_mask = mask;
3625 + sc->sc_usp = rdusp();
3626 + sc->sc_d0 = regs->d0;
3627 + sc->sc_d1 = regs->d1;
3628 + sc->sc_a0 = regs->a0;
3629 + sc->sc_a1 = regs->a1;
3630 + sc->sc_sr = regs->sr;
3631 + sc->sc_pc = regs->pc;
3632 + sc->sc_formatvec = regs->format << 12 | regs->vector;
3634 + save_fpu_state(sc, regs);
3638 +static inline int rt_setup_ucontext(struct ucontext __user *uc,
3639 + struct pt_regs *regs)
3641 + struct switch_stack *sw = (struct switch_stack *)regs - 1;
3642 + greg_t __user *gregs = uc->uc_mcontext.gregs;
3645 + err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
3646 + err |= __put_user(regs->d0, &gregs[0]);
3647 + err |= __put_user(regs->d1, &gregs[1]);
3648 + err |= __put_user(regs->d2, &gregs[2]);
3649 + err |= __put_user(regs->d3, &gregs[3]);
3650 + err |= __put_user(regs->d4, &gregs[4]);
3651 + err |= __put_user(regs->d5, &gregs[5]);
3652 + err |= __put_user(sw->d6, &gregs[6]);
3653 + err |= __put_user(sw->d7, &gregs[7]);
3654 + err |= __put_user(regs->a0, &gregs[8]);
3655 + err |= __put_user(regs->a1, &gregs[9]);
3656 + err |= __put_user(regs->a2, &gregs[10]);
3657 + err |= __put_user(sw->a3, &gregs[11]);
3658 + err |= __put_user(sw->a4, &gregs[12]);
3659 + err |= __put_user(sw->a5, &gregs[13]);
3660 + err |= __put_user(sw->a6, &gregs[14]);
3661 + err |= __put_user(rdusp(), &gregs[15]);
3662 + err |= __put_user(regs->pc, &gregs[16]);
3663 + err |= __put_user(regs->sr, &gregs[17]);
3664 + err |= __put_user((regs->format << 12) | regs->vector,
3665 + &uc->uc_formatvec);
3667 + err |= rt_save_fpu_state(uc, regs);
3672 +extern void IcacheInvalidateCacheBlock(void *, unsigned long);
3673 +static inline void push_cache(unsigned long vaddr)
3675 + IcacheInvalidateCacheBlock((void *)vaddr, 8);
3678 +static inline void __user *
3679 +get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
3681 + unsigned long usp;
3683 + /* Default to using normal stack. */
3686 + /* This is the X/Open sanctioned signal stack switching. */
3687 + if (ka->sa.sa_flags & SA_ONSTACK) {
3688 + if (!sas_ss_flags(usp))
3689 + usp = current->sas_ss_sp + current->sas_ss_size;
3691 + return (void __user *)((usp - frame_size) & -8UL);
3694 +static void setup_frame(int sig, struct k_sigaction *ka,
3695 + sigset_t *set, struct pt_regs *regs)
3697 + struct sigframe __user *frame;
3698 + int fsize = frame_extra_sizes[regs->format];
3699 + struct sigcontext context;
3704 + printk(KERN_DEBUG "setup_frame: Unknown frame format %#x\n",
3707 + goto give_sigsegv;
3710 + frame = get_sigframe(ka, regs, sizeof(*frame));
3712 + err |= __put_user((current_thread_info()->exec_domain
3713 + && current_thread_info()->exec_domain->signal_invmap
3715 + ? current_thread_info()->exec_domain->signal_invmap[sig]
3719 + err |= __put_user(regs->vector, &frame->code);
3720 + err |= __put_user(&frame->sc, &frame->psc);
3722 + if (_NSIG_WORDS > 1)
3723 + err |= copy_to_user(frame->extramask, &set->sig[1],
3724 + sizeof(frame->extramask));
3726 + setup_sigcontext(&context, regs, set->sig[0]);
3727 + err |= copy_to_user(&frame->sc, &context, sizeof(context));
3729 + /* Set up to return from userspace. */
3730 + err |= __put_user(frame->retcode, &frame->pretcode);
3731 + /* moveq #,d0; trap #0 */
3732 + err |= __put_user(0x70004e40 + (__NR_sigreturn << 16),
3733 + (long __user *)(frame->retcode));
3736 + goto give_sigsegv;
3738 + push_cache((unsigned long) &frame->retcode);
3740 + /* Set up registers for signal handler */
3741 + wrusp((unsigned long) frame);
3742 + regs->pc = (unsigned long) ka->sa.sa_handler;
3745 + /* Prepare to skip over the extra stuff in the exception frame. */
3746 + if (regs->stkadj) {
3747 + struct pt_regs *tregs =
3748 + (struct pt_regs *)((ulong)regs + regs->stkadj);
3750 + printk(KERN_DEBUG "Performing stackadjust=%04x\n",
3753 + /* This must be copied with decreasing addresses to
3754 + handle overlaps. */
3755 + tregs->vector = 0;
3756 + tregs->format = 0;
3757 + tregs->pc = regs->pc;
3758 + tregs->sr = regs->sr;
3763 + force_sigsegv(sig, current);
3764 + goto adjust_stack;
3767 +static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
3768 + sigset_t *set, struct pt_regs *regs)
3770 + struct rt_sigframe __user *frame;
3771 + int fsize = frame_extra_sizes[regs->format];
3776 + printk(KERN_DEBUG "setup_frame: Unknown frame format %#x\n",
3779 + goto give_sigsegv;
3782 + frame = get_sigframe(ka, regs, sizeof(*frame));
3785 + err |= copy_to_user(&frame->uc.uc_extra, regs + 1, fsize);
3786 + regs->stkadj = fsize;
3789 + err |= __put_user((current_thread_info()->exec_domain
3790 + && current_thread_info()->exec_domain->signal_invmap
3792 + ? current_thread_info()->exec_domain->signal_invmap[sig]
3795 + err |= __put_user(&frame->info, &frame->pinfo);
3796 + err |= __put_user(&frame->uc, &frame->puc);
3797 + err |= copy_siginfo_to_user(&frame->info, info);
3799 + /* Create the ucontext. */
3800 + err |= __put_user(0, &frame->uc.uc_flags);
3801 + err |= __put_user(NULL, &frame->uc.uc_link);
3802 + err |= __put_user((void __user *)current->sas_ss_sp,
3803 + &frame->uc.uc_stack.ss_sp);
3804 + err |= __put_user(sas_ss_flags(rdusp()),
3805 + &frame->uc.uc_stack.ss_flags);
3806 + err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
3807 + err |= rt_setup_ucontext(&frame->uc, regs);
3808 + err |= copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
3810 + /* Set up to return from userspace. */
3811 + err |= __put_user(frame->retcode, &frame->pretcode);
3813 + /* moveq #,d0; andi.l #,D0; trap #0 */
3814 + err |= __put_user(0x70AD0280, (long *)(frame->retcode + 0));
3815 + err |= __put_user(0x000000ff, (long *)(frame->retcode + 4));
3816 + err |= __put_user(0x4e400000, (long *)(frame->retcode + 8));
3819 + goto give_sigsegv;
3821 + push_cache((unsigned long) &frame->retcode);
3823 + /* Set up registers for signal handler */
3824 + wrusp((unsigned long) frame);
3825 + regs->pc = (unsigned long) ka->sa.sa_handler;
3828 + /* Prepare to skip over the extra stuff in the exception frame. */
3829 + if (regs->stkadj) {
3830 + struct pt_regs *tregs =
3831 + (struct pt_regs *)((ulong)regs + regs->stkadj);
3833 + printk(KERN_DEBUG "Performing stackadjust=%04x\n",
3836 + /* This must be copied with decreasing addresses to
3837 + handle overlaps. */
3838 + tregs->vector = 0;
3839 + tregs->format = 0;
3840 + tregs->pc = regs->pc;
3841 + tregs->sr = regs->sr;
3846 + force_sigsegv(sig, current);
3847 + goto adjust_stack;
3851 +handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
3853 + switch (regs->d0) {
3854 + case -ERESTARTNOHAND:
3857 + regs->d0 = -EINTR;
3860 + case -ERESTARTSYS:
3861 + if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
3862 + regs->d0 = -EINTR;
3866 + case -ERESTARTNOINTR:
3868 + regs->d0 = regs->orig_d0;
3875 + * OK, we're invoking a handler
3878 +handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
3879 + sigset_t *oldset, struct pt_regs *regs)
3881 + /* are we from a system call? */
3882 + if (regs->orig_d0 >= 0)
3883 + /* If so, check system call restarting.. */
3884 + handle_restart(regs, ka, 1);
3886 + /* set up the stack frame */
3887 + if (ka->sa.sa_flags & SA_SIGINFO)
3888 + setup_rt_frame(sig, ka, info, oldset, regs);
3890 + setup_frame(sig, ka, oldset, regs);
3892 + if (ka->sa.sa_flags & SA_ONESHOT)
3893 + ka->sa.sa_handler = SIG_DFL;
3895 + spin_lock_irq(¤t->sighand->siglock);
3896 + sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask);
3897 + if (!(ka->sa.sa_flags & SA_NODEFER))
3898 + sigaddset(¤t->blocked, sig);
3899 + recalc_sigpending();
3900 + spin_unlock_irq(¤t->sighand->siglock);
3904 + * Note that 'init' is a special process: it doesn't get signals it doesn't
3905 + * want to handle. Thus you cannot kill init even with a SIGKILL even by
3908 +asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs)
3911 + struct k_sigaction ka;
3914 + current->thread.esp0 = (unsigned long) regs;
3917 + oldset = ¤t->blocked;
3919 + signr = get_signal_to_deliver(&info, &ka, regs, NULL);
3921 + /* Whee! Actually deliver the signal. */
3922 + handle_signal(signr, &ka, &info, oldset, regs);
3926 + /* Did we come from a system call? */
3927 + if (regs->orig_d0 >= 0)
3928 + /* Restart the system call - no handlers present */
3929 + handle_restart(regs, NULL, 0);
3934 +++ b/arch/m68k/coldfire/traps.c
3937 + * linux/arch/m68knommu/kernel/traps.c
3939 + * Copyright (C) 1993, 1994 by Hamish Macdonald
3941 + * 68040 fixes by Michael Rausch
3942 + * 68040 fixes by Martin Apel
3943 + * 68060 fixes by Roman Hodek
3944 + * 68060 fixes by Jesper Skov
3946 + * This file is subject to the terms and conditions of the GNU General Public
3947 + * License. See the file COPYING in the main directory of this archive
3948 + * for more details.
3952 + * Sets up all exception vectors
3954 +#include <linux/sched.h>
3955 +#include <linux/signal.h>
3956 +#include <linux/kernel.h>
3957 +#include <linux/mm.h>
3958 +#include <linux/module.h>
3959 +#include <linux/types.h>
3960 +#include <linux/a.out.h>
3961 +#include <linux/user.h>
3962 +#include <linux/string.h>
3963 +#include <linux/linkage.h>
3964 +#include <linux/init.h>
3965 +#include <linux/ptrace.h>
3966 +#include <linux/kallsyms.h>
3968 +#include <asm/setup.h>
3969 +#include <asm/fpu.h>
3970 +#include <asm/system.h>
3971 +#include <asm/uaccess.h>
3972 +#include <asm/traps.h>
3973 +#include <asm/pgtable.h>
3974 +#include <asm/machdep.h>
3975 +#include <asm/siginfo.h>
3977 +static char const * const vec_names[] = {
3978 + "RESET SP", "RESET PC", "BUS ERROR", "ADDRESS ERROR",
3979 + "ILLEGAL INSTRUCTION", "ZERO DIVIDE", "CHK", "TRAPcc",
3980 + "PRIVILEGE VIOLATION", "TRACE", "LINE 1010", "LINE 1111",
3981 + "UNASSIGNED RESERVED 12", "COPROCESSOR PROTOCOL VIOLATION",
3982 + "FORMAT ERROR", "UNINITIALIZED INTERRUPT",
3983 + "UNASSIGNED RESERVED 16", "UNASSIGNED RESERVED 17",
3984 + "UNASSIGNED RESERVED 18", "UNASSIGNED RESERVED 19",
3985 + "UNASSIGNED RESERVED 20", "UNASSIGNED RESERVED 21",
3986 + "UNASSIGNED RESERVED 22", "UNASSIGNED RESERVED 23",
3987 + "SPURIOUS INTERRUPT", "LEVEL 1 INT", "LEVEL 2 INT", "LEVEL 3 INT",
3988 + "LEVEL 4 INT", "LEVEL 5 INT", "LEVEL 6 INT", "LEVEL 7 INT",
3989 + "SYSCALL", "TRAP #1", "TRAP #2", "TRAP #3",
3990 + "TRAP #4", "TRAP #5", "TRAP #6", "TRAP #7",
3991 + "TRAP #8", "TRAP #9", "TRAP #10", "TRAP #11",
3992 + "TRAP #12", "TRAP #13", "TRAP #14", "TRAP #15",
3993 + "FPCP BSUN", "FPCP INEXACT", "FPCP DIV BY 0", "FPCP UNDERFLOW",
3994 + "FPCP OPERAND ERROR", "FPCP OVERFLOW", "FPCP SNAN",
3995 + "FPCP UNSUPPORTED OPERATION",
3996 + "MMU CONFIGURATION ERROR"
3999 +asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
4000 + unsigned long error_code);
4001 +asmlinkage void trap_c(struct frame *fp);
4002 +extern void __init coldfire_trap_init(void);
4004 +void __init trap_init(void)
4006 + coldfire_trap_init();
4009 +/* The following table converts the FS encoding of a ColdFire
4010 + exception stack frame into the error_code value needed by
4013 +static const unsigned char fs_err_code[] = {
4033 +static const char *fs_err_msg[16] = {
4036 + "Interrupt during debug service routine",
4039 + "TLB X miss (opword)",
4040 + "TLB X miss (ext. word)",
4041 + "IFP in emulator mode",
4047 + "R/RMW Protection",
4049 + "OEP in emulator mode",
4053 +static inline void access_errorCF(struct frame *fp)
4055 + unsigned long int mmusr, complainingAddress;
4056 + unsigned int err_code, fs;
4057 + int need_page_fault;
4059 + mmusr = fp->ptregs.mmusr;
4060 + complainingAddress = fp->ptregs.mmuar;
4062 + printk(KERN_DEBUG "pc %#lx, mmusr %#lx, complainingAddress %#lx\n", \
4063 + fp->ptregs.pc, mmusr, complainingAddress);
4068 + * bit 0 == 0 means no page found, 1 means protection fault
4069 + * bit 1 == 0 means read, 1 means write
4072 + fs = (fp->ptregs.fs2 << 2) | fp->ptregs.fs1;
4074 + case 5: /* 0101 TLB opword X miss */
4075 + need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 0);
4076 + complainingAddress = fp->ptregs.pc;
4078 + case 6: /* 0110 TLB extension word X miss */
4079 + need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 1);
4080 + complainingAddress = fp->ptregs.pc + sizeof(long);
4082 + case 10: /* 1010 TLB W miss */
4083 + need_page_fault = cf_tlb_miss(&fp->ptregs, 1, 1, 0);
4085 + case 14: /* 1110 TLB R miss */
4086 + need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 1, 0);
4090 + /* 0001 Reserved */
4091 + /* 0010 Interrupt during debug service routine */
4092 + /* 0011 Reserved */
4093 + /* 0100 X Protection */
4094 + /* 0111 IFP in emulator mode */
4095 + /* 1000 W Protection*/
4096 + /* 1001 Write error*/
4097 + /* 1011 Reserved*/
4098 + /* 1100 R Protection*/
4099 + /* 1101 R Protection*/
4100 + /* 1111 OEP in emulator mode*/
4101 + need_page_fault = 1;
4105 + if (need_page_fault) {
4106 + err_code = fs_err_code[fs];
4107 + if ((fs == 13) && (mmusr & MMUSR_WF)) /* rd-mod-wr access */
4108 + err_code |= 2; /* bit1 - write, bit0 - protection */
4109 + do_page_fault(&fp->ptregs, complainingAddress, err_code);
4113 +void die_if_kernel(char *str, struct pt_regs *fp, int nr)
4115 + if (!(fp->sr & PS_S))
4118 + console_verbose();
4119 + printk(KERN_EMERG "%s: %08x\n", str, nr);
4120 + printk(KERN_EMERG "PC: [<%08lx>]", fp->pc);
4121 + print_symbol(" %s", fp->pc);
4122 + printk(KERN_EMERG "\nSR: %04x SP: %p a2: %08lx\n",
4123 + fp->sr, fp, fp->a2);
4124 + printk(KERN_EMERG "d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n",
4125 + fp->d0, fp->d1, fp->d2, fp->d3);
4126 + printk(KERN_EMERG "d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n",
4127 + fp->d4, fp->d5, fp->a0, fp->a1);
4129 + printk(KERN_EMERG "Process %s (pid: %d, stackpage=%08lx)\n",
4130 + current->comm, current->pid, PAGE_SIZE+(unsigned long)current);
4131 + show_stack(NULL, (unsigned long *)fp);
4135 +asmlinkage void buserr_c(struct frame *fp)
4139 + /* Only set esp0 if coming from user mode */
4140 + if (user_mode(&fp->ptregs))
4141 + current->thread.esp0 = (unsigned long) fp;
4143 + fs = (fp->ptregs.fs2 << 2) | fp->ptregs.fs1;
4145 + printk(KERN_DEBUG "*** Bus Error *** (%x)%s\n", fs,
4146 + fs_err_msg[fs & 0xf]);
4157 + access_errorCF(fp);
4160 + die_if_kernel("bad frame format", &fp->ptregs, 0);
4162 + printk(KERN_DEBUG "Unknown SIGSEGV - 4\n");
4164 + force_sig(SIGSEGV, current);
4169 +int kstack_depth_to_print = 48;
4171 +void show_stack(struct task_struct *task, unsigned long *stack)
4173 + unsigned long *endstack, addr, symaddr;
4174 + extern char _start, _etext;
4179 + stack = (unsigned long *)task->thread.ksp;
4181 + stack = (unsigned long *)&stack;
4184 + addr = (unsigned long) stack;
4185 + endstack = (unsigned long *) PAGE_ALIGN(addr);
4187 + printk(KERN_EMERG "Stack from %08lx:", (unsigned long)stack);
4188 + for (i = 0; i < kstack_depth_to_print; i++) {
4189 + if (stack + 1 > endstack)
4192 + printk("\n" KERN_EMERG " ");
4194 + printk(KERN_EMERG " %08lx", *stack++);
4195 + if ((symaddr >= 0xc0000000) && (symaddr < 0xc1000000))
4196 + print_symbol("(%s)", symaddr);
4200 + printk(KERN_EMERG "Call Trace:");
4202 + while (stack + 1 <= endstack) {
4205 + * If the address is either in the text segment of the
4206 + * kernel, or in the region which contains vmalloc'ed
4207 + * memory, it *may* be the address of a calling
4208 + * routine; if so, print it so that someone tracing
4209 + * down the cause of the crash will be able to figure
4210 + * out the call path that was taken.
4212 + if (((addr >= (unsigned long) &_start) &&
4213 + (addr <= (unsigned long) &_etext))) {
4215 + printk("\n" KERN_EMERG " ");
4216 + printk(KERN_EMERG " [<%08lx>]", addr);
4223 +void bad_super_trap(struct frame *fp)
4225 + console_verbose();
4226 + if (fp->ptregs.vector < 4*sizeof(vec_names)/sizeof(vec_names[0]))
4227 + printk(KERN_WARNING "*** %s *** FORMAT=%X\n",
4228 + vec_names[(fp->ptregs.vector) >> 2],
4229 + fp->ptregs.format);
4231 + printk(KERN_WARNING "*** Exception %d *** FORMAT=%X\n",
4232 + (fp->ptregs.vector) >> 2,
4233 + fp->ptregs.format);
4234 + printk(KERN_WARNING "Current process id is %d\n", current->pid);
4235 + die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0);
4238 +asmlinkage void trap_c(struct frame *fp)
4243 + if (fp->ptregs.sr & PS_S) {
4244 + if ((fp->ptregs.vector >> 2) == VEC_TRACE) {
4245 + /* traced a trapping instruction */
4246 + current->ptrace |= PT_DTRACE;
4248 + bad_super_trap(fp);
4252 + /* send the appropriate signal to the user program */
4253 + switch ((fp->ptregs.vector) >> 2) {
4255 + info.si_code = BUS_ADRALN;
4261 + info.si_code = ILL_ILLOPC;
4265 + info.si_code = ILL_PRVOPC;
4269 + info.si_code = ILL_COPROC;
4272 + case VEC_TRAP1: /* gdbserver breakpoint */
4273 + fp->ptregs.pc -= 2;
4274 + info.si_code = TRAP_TRACE;
4290 + info.si_code = ILL_ILLTRP;
4296 + info.si_code = FPE_FLTINV;
4300 + info.si_code = FPE_FLTRES;
4304 + info.si_code = FPE_FLTDIV;
4308 + info.si_code = FPE_FLTUND;
4312 + info.si_code = FPE_FLTOVF;
4316 + info.si_code = FPE_INTDIV;
4321 + info.si_code = FPE_INTOVF;
4324 + case VEC_TRACE: /* ptrace single step */
4325 + info.si_code = TRAP_TRACE;
4328 + case VEC_TRAP15: /* breakpoint */
4329 + info.si_code = TRAP_BRKPT;
4333 + info.si_code = ILL_ILLOPC;
4337 + info.si_signo = sig;
4338 + info.si_errno = 0;
4339 + switch (fp->ptregs.format) {
4341 + info.si_addr = (void *) fp->ptregs.pc;
4344 + info.si_addr = (void *) fp->un.fmt2.iaddr;
4347 + info.si_addr = (void *) fp->un.fmt7.effaddr;
4350 + info.si_addr = (void *) fp->un.fmt9.iaddr;
4353 + info.si_addr = (void *) fp->un.fmta.daddr;
4356 + info.si_addr = (void *) fp->un.fmtb.daddr;
4359 + force_sig_info(sig, &info, current);
4362 +asmlinkage void set_esp0(unsigned long ssp)
4364 + current->thread.esp0 = ssp;
4368 + * The architecture-independent backtrace generator
4370 +void dump_stack(void)
4372 + unsigned long stack;
4374 + show_stack(current, &stack);
4376 +EXPORT_SYMBOL(dump_stack);
4378 +#ifdef CONFIG_M68KFPU_EMU
4379 +asmlinkage void fpemu_signal(int signal, int code, void *addr)
4383 + info.si_signo = signal;
4384 + info.si_errno = 0;
4385 + info.si_code = code;
4386 + info.si_addr = addr;
4387 + force_sig_info(signal, &info, current);
4391 +++ b/arch/m68k/coldfire/vmlinux-cf.lds
4393 +/* ld script to make m68k Coldfire Linux kernel */
4395 +#include <asm-generic/vmlinux.lds.h>
4397 +OUTPUT_FORMAT("elf32-m68k", "elf32-m68k", "elf32-m68k")
4400 +jiffies = jiffies_64 + 4;
4404 + _text = .; /* Text and read-only data */
4414 + _etext = .; /* End of text section */
4417 + __start___ex_table = .;
4418 + __ex_table : { *(__ex_table) }
4419 + __stop___ex_table = .;
4423 + .data : { /* Data */
4428 + .bss : { *(.bss) } /* BSS */
4431 + .data.cacheline_aligned : { *(.data.cacheline_aligned) } :data
4433 + _edata = .; /* End of data section */
4435 + . = ALIGN(8192); /* Initrd */
4442 + .init.data : { *(.init.data) }
4444 + __setup_start = .;
4445 + .init.setup : { *(.init.setup) }
4447 + __initcall_start = .;
4448 + .initcall.init : {
4451 + __initcall_end = .;
4452 + __con_initcall_start = .;
4453 + .con_initcall.init : { *(.con_initcall.init) }
4454 + __con_initcall_end = .;
4456 +#ifdef CONFIG_BLK_DEV_INITRD
4458 + __initramfs_start = .;
4459 + .init.ramfs : { *(.init.ramfs) }
4460 + __initramfs_end = .;
4465 + .data.init_task : { *(.data.init_task) } /* The initial task and kernel stack */
4469 + /* Sections to be discarded */
4476 + /* Stabs debugging sections. */
4477 + .stab 0 : { *(.stab) }
4478 + .stabstr 0 : { *(.stabstr) }
4479 + .stab.excl 0 : { *(.stab.excl) }
4480 + .stab.exclstr 0 : { *(.stab.exclstr) }
4481 + .stab.index 0 : { *(.stab.index) }
4482 + .stab.indexstr 0 : { *(.stab.indexstr) }
4483 + .comment 0 : { *(.comment) }