++struct psbl_rec {
++ unsigned int psbl_size;
++ unsigned int env_base;
++ unsigned int env_size;
++ unsigned int ffs_base;
++ unsigned int ffs_size;
++};
++
++static const char psp_env_version[] = "TIENV0.8";
++
++int __init prom_init(int argc, char **argv, char **envp)
++{
++ int i;
++
++ t_env_var *env = (t_env_var *) envp;
++ struct psbl_rec *psbl = (struct psbl_rec *)(KSEG1ADDR(0x94000300));
++ void *psp_env = (void *)KSEG1ADDR(psbl->env_base);
++
++ prom_argc = argc;
++ _prom_argv = (int *)argv;
++ _prom_envp = (int *)envp;
++
++ if(strcmp(psp_env, psp_env_version) == 0) {
++ /* PSPBOOT */
++
++ env_type = 1;
++ _prom_envp = psp_env;
++ max_env_entry = (psbl->env_size / 16) - 1;
++ } else {
++ /* Copy what we need locally so we are not dependent on
++ * bootloader RAM. In Adam2, the environment parameters
++ * are in flash but the table that references them is in
++ * RAM
++ */
++
++ for(i=0; i < MAX_ENV_ENTRY; i++, env++) {
++ if (env->name) {
++ local_envp[i].name = env->name;
++ local_envp[i].val = env->val;
++ } else {
++ local_envp[i].name = NULL;
++ local_envp[i].val = NULL;
++ }
++ }
++ }
++
++ set_io_port_base(0);
++
++ prom_printf("\nLINUX started...\n");
++ prom_init_cmdline();
++ prom_meminit();
++
++ return 0;
++}
+diff -urN linux.old/arch/mips/ar7/irq.c linux.dev/arch/mips/ar7/irq.c
+--- linux.old/arch/mips/ar7/irq.c 1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/ar7/irq.c 2005-08-12 23:42:18.679820112 +0200
+@@ -0,0 +1,709 @@
++/*
++ * Nitin Dhingra, iamnd@ti.com
++ * Copyright (C) 2002 Texas Instruments, Inc. All rights reserved.
++ *
++ * ########################################################################
++ *
++ * This program is free software; you can distribute it and/or modify it
++ * under the terms of the GNU General Public License (Version 2) as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++ * for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
++ *
++ * ########################################################################
++ *
++ * Routines for generic manipulation of the interrupts found on the Texas
++ * Instruments avalanche board
++ *
++ */
++
++#include <linux/config.h>
++#include <linux/init.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/interrupt.h>
++#include <linux/kernel_stat.h>
++#include <linux/proc_fs.h>
++#include <asm/irq.h>
++#include <asm/mips-boards/prom.h>
++#include <asm/ar7/ar7.h>
++#include <asm/ar7/avalanche_intc.h>
++#include <asm/gdb-stub.h>
++
++
++#define shutdown_avalanche_irq disable_avalanche_irq
++#define mask_and_ack_avalanche_irq disable_avalanche_irq
++
++static unsigned int startup_avalanche_irq(unsigned int irq);
++static void end_avalanche_irq(unsigned int irq);
++void enable_avalanche_irq(unsigned int irq_nr);
++void disable_avalanche_irq(unsigned int irq_nr);
++
++static struct hw_interrupt_type avalanche_irq_type = {
++ "TI AVALANCHE",
++ startup_avalanche_irq,
++ shutdown_avalanche_irq,
++ enable_avalanche_irq,
++ disable_avalanche_irq,
++ mask_and_ack_avalanche_irq,
++ end_avalanche_irq,
++ NULL
++};
++
++irq_desc_t irq_desc_ti[AVALANCHE_INT_END+1] __cacheline_aligned =
++{ [0 ... AVALANCHE_INT_END] = { 0, &avalanche_irq_type, NULL, 0, SPIN_LOCK_UNLOCKED}};
++
++
++unsigned long spurious_count = 0;
++
++struct avalanche_ictrl_regs *avalanche_hw0_icregs; /* Interrupt control regs (primary) */
++struct avalanche_exctrl_regs *avalanche_hw0_ecregs; /* Exception control regs (secondary) */
++struct avalanche_ipace_regs *avalanche_hw0_ipaceregs;
++struct avalanche_channel_int_number *avalanche_hw0_chregs; /* Channel control registers */
++
++extern asmlinkage void mipsIRQ(void);
++
++
++/*
++ * The avalanche/MIPS interrupt line numbers are used to represent the
++ * interrupts within the irqaction arrays. The index notation is
++ * is as follows:
++ *
++ * 0-7 MIPS CPU Exceptions (HW/SW)
++ * 8-47 Primary Interrupts (Avalanche)
++ * 48-79 Secondary Interrupts (Avalanche)
++ *
++ */
++
++
++static struct irqaction *hw0_irq_action_primary[AVINTNUM(AVALANCHE_INT_END_PRIMARY)] =
++{
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL
++};
++
++static struct irqaction *hw0_irq_action_secondary[AVINTNUM(AVALANCHE_INT_END_SECONDARY)] =
++{
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL
++};
++
++/*
++ This remaps interrupts to exist on other channels than the default
++ channels. essentially we can use the line # as the index for this
++ array
++ */
++
++
++static unsigned long line_to_channel[AVINTNUM(AVALANCHE_INT_END_PRIMARY)];
++unsigned long uni_secondary_interrupt = 0;
++
++static struct irqaction r4ktimer_action = {
++ NULL, 0, 0, "R4000 timer/counter", NULL, NULL,
++};
++
++static struct irqaction *irq_action[8] = {
++ NULL, /* SW int 0 */
++ NULL, /* SW int 1 */
++ NULL, /* HW int 0 */
++ NULL,
++ NULL,
++ NULL, /* HW int 3 */
++ NULL, /* HW int 4 */
++ &r4ktimer_action /* HW int 5 */
++};
++
++static void end_avalanche_irq(unsigned int irq)
++{
++ if (!(irq_desc_ti[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
++ enable_avalanche_irq(irq);
++}
++
++void disable_avalanche_irq(unsigned int irq_nr)
++{
++ unsigned long flags;
++ unsigned long chan_nr=0;
++ unsigned long int_bit=0;
++
++ if(irq_nr >= AVALANCHE_INT_END)
++ {
++ printk("whee, invalid irq_nr %d\n", irq_nr);
++ panic("IRQ, you lose...");
++ }
++
++ save_and_cli(flags);
++
++
++ if(irq_nr < MIPS_EXCEPTION_OFFSET)
++ {
++ /* disable mips exception */
++
++ int_bit = read_c0_status() & ~(1 << (8+irq_nr));
++ change_c0_status(ST0_IM,int_bit);
++ restore_flags(flags);
++ return;
++ }
++
++ /* irq_nr represents the line number for the interrupt. We must
++ * disable the channel number associated with that line number.
++ */
++
++ if(irq_nr > AVALANCHE_INT_END_PRIMARY_REG2)
++ chan_nr = AVINTNUM(irq_nr); /*CHECK THIS ALSO*/
++ else
++ chan_nr = line_to_channel[AVINTNUM(irq_nr)];/* WE NEED A LINE TO CHANNEL MAPPING FUNCTION HERE*/
++
++ /* disable the interrupt channel bit */
++
++ /* primary interrupt #'s 0-31 */
++
++ if(chan_nr <= AVINTNUM(AVALANCHE_INT_END_PRIMARY_REG1))
++ avalanche_hw0_icregs->intecr1 = (1 << chan_nr);
++
++ /* primary interrupt #'s 32-39 */
++
++ else if ((chan_nr <= AVINTNUM(AVALANCHE_INT_END_PRIMARY_REG2)) &&
++ (chan_nr > AVINTNUM(AVALANCHE_INT_END_PRIMARY_REG1)))
++ avalanche_hw0_icregs->intecr2 = (1 << (chan_nr - AVINTNUM(AVALANCHE_INT_END_SECONDARY)));
++
++ else /* secondary interrupt #'s 0-31 */
++ avalanche_hw0_ecregs->exiecr = (1 << (chan_nr - AVINTNUM(AVALANCHE_INT_END_PRIMARY)));
++
++ restore_flags(flags);
++}
++
++void enable_avalanche_irq(unsigned int irq_nr)
++{
++ unsigned long flags;
++ unsigned long chan_nr=0;
++ unsigned long int_bit=0;
++
++ if(irq_nr > AVALANCHE_INT_END) {
++ printk("whee, invalid irq_nr %d\n", irq_nr);
++ panic("IRQ, you lose...");
++ }
++
++ save_and_cli(flags);
++
++
++ if(irq_nr < MIPS_EXCEPTION_OFFSET)
++ {
++ /* Enable MIPS exceptions */
++ int_bit = read_c0_status();
++ change_c0_status(ST0_IM,int_bit | (1<<(8+irq_nr)));
++ restore_flags(flags);
++ return;
++ }
++
++ /* irq_nr represents the line number for the interrupt. We must
++ * disable the channel number associated with that line number.
++ */
++
++ if(irq_nr > AVALANCHE_INT_END_PRIMARY_REG2)
++ chan_nr = AVINTNUM(irq_nr);
++ else
++ chan_nr = line_to_channel[AVINTNUM(irq_nr)];
++
++ /* enable the interrupt channel bit */
++
++ /* primary interrupt #'s 0-31 */
++ if(chan_nr <= AVINTNUM(AVALANCHE_INT_END_PRIMARY_REG1))
++ avalanche_hw0_icregs->intesr1 = (1 << chan_nr);
++
++ /* primary interrupt #'s 32 throuth 39 */
++ else if ((chan_nr <= AVINTNUM(AVALANCHE_INT_END_PRIMARY_REG2)) &&
++ (chan_nr > AVINTNUM(AVALANCHE_INT_END_PRIMARY_REG1)))
++ avalanche_hw0_icregs->intesr2 = (1 << (chan_nr - AVINTNUM(AVALANCHE_INT_END_SECONDARY)));
++
++ else /* secondary interrupt #'s 0-31 */
++ avalanche_hw0_ecregs->exiesr = (1 << (chan_nr - AVINTNUM(AVALANCHE_INT_END_PRIMARY)));
++
++ restore_flags(flags);
++}
++
++static unsigned int startup_avalanche_irq(unsigned int irq)
++{
++ enable_avalanche_irq(irq);
++ return 0; /* never anything pending */
++}
++
++
++int get_irq_list(char *buf)
++{
++ int i, len = 0;
++ int num = 0;
++ struct irqaction *action;
++
++ for (i = 0; i < MIPS_EXCEPTION_OFFSET; i++, num++)
++ {
++ action = irq_action[i];
++ if (!action)
++ continue;
++ len += sprintf(buf+len, "%2d: %8d %c %s",
++ num, kstat.irqs[0][num],
++ (action->flags & SA_INTERRUPT) ? '+' : ' ',
++ action->name);
++ for (action=action->next; action; action = action->next) {
++ len += sprintf(buf+len, ",%s %s",
++ (action->flags & SA_INTERRUPT) ? " +" : "",
++ action->name);
++ }
++ len += sprintf(buf+len, " [MIPS interrupt]\n");
++ }
++
++
++ for (i = 0; i < AVINTNUM(AVALANCHE_INT_END); i++,num++)
++ {
++ if(i < AVINTNUM(AVALANCHE_INT_END_PRIMARY))
++ action = hw0_irq_action_primary[i];
++ else
++ action = hw0_irq_action_secondary[i-AVINTNUM(AVALANCHE_INT_END_PRIMARY)];
++ if (!action)
++ continue;
++ len += sprintf(buf+len, "%2d: %8d %c %s",
++ num, kstat.irqs[0][ LNXINTNUM(i) ],
++ (action->flags & SA_INTERRUPT) ? '+' : ' ',
++ action->name);
++
++ for (action=action->next; action; action = action->next)
++ {
++ len += sprintf(buf+len, ",%s %s",
++ (action->flags & SA_INTERRUPT) ? " +" : "",
++ action->name);
++ }
++
++ if(i < AVINTNUM(AVALANCHE_INT_END_PRIMARY))
++ len += sprintf(buf+len, " [hw0 (Avalanche Primary)]\n");
++ else
++ len += sprintf(buf+len, " [hw0 (Avalanche Secondary)]\n");
++
++ }
++
++ return len;
++}
++
++int request_irq(unsigned int irq,
++ void (*handler)(int, void *, struct pt_regs *),
++ unsigned long irqflags,
++ const char * devname,
++ void *dev_id)
++{
++ struct irqaction *action;
++
++ if (irq > AVALANCHE_INT_END)
++ return -EINVAL;
++ if (!handler)
++ return -EINVAL;
++
++ action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL);
++ if(!action)
++ return -ENOMEM;
++
++ action->handler = handler;
++ action->flags = irqflags;
++ action->mask = 0;
++ action->name = devname;
++ irq_desc_ti[irq].action = action;
++ action->dev_id = dev_id;
++
++ action->next = 0;
++
++ if(irq < MIPS_EXCEPTION_OFFSET)
++ {
++ irq_action[irq] = action;
++ enable_avalanche_irq(irq);
++ return 0;
++ }
++
++ if(irq < AVALANCHE_INT_END_PRIMARY)
++ hw0_irq_action_primary[line_to_channel[AVINTNUM(irq)]] = action;
++ else
++ hw0_irq_action_secondary[irq - AVALANCHE_INT_END_PRIMARY] = action;
++
++ enable_avalanche_irq(irq);
++
++ return 0;
++}
++
++void free_irq(unsigned int irq, void *dev_id)
++{
++ struct irqaction *action;
++
++ if (irq > AVALANCHE_INT_END) {
++ printk("Trying to free IRQ%d\n",irq);
++ return;
++ }
++
++ if(irq < MIPS_EXCEPTION_OFFSET)
++ {
++ action = irq_action[irq];
++ irq_action[irq] = NULL;
++ irq_desc_ti[irq].action = NULL;
++ disable_avalanche_irq(irq);
++ kfree(action);
++ return;
++ }
++
++ if(irq < AVALANCHE_INT_END_PRIMARY) {
++ action = hw0_irq_action_primary[line_to_channel[AVINTNUM(irq)]];
++ hw0_irq_action_primary[line_to_channel[AVINTNUM(irq)]] = NULL;
++ irq_desc_ti[irq].action = NULL;
++ }
++ else {
++ action = hw0_irq_action_secondary[irq - AVALANCHE_INT_END_PRIMARY];
++ hw0_irq_action_secondary[irq - AVALANCHE_INT_END_PRIMARY] = NULL;
++ irq_desc_ti[irq].action = NULL;
++ }
++
++ disable_avalanche_irq(irq);
++ kfree(action);
++}
++
++#ifdef CONFIG_KGDB
++extern void breakpoint(void);
++extern int remote_debug;
++#endif
++
++
++//void init_IRQ(void) __init;
++void __init init_IRQ(void)
++{
++ int i;
++
++ avalanche_hw0_icregs = (struct avalanche_ictrl_regs *)AVALANCHE_ICTRL_REGS_BASE;
++ avalanche_hw0_ecregs = (struct avalanche_exctrl_regs *)AVALANCHE_ECTRL_REGS_BASE;
++ avalanche_hw0_ipaceregs = (struct avalanche_ipace_regs *)AVALANCHE_IPACE_REGS_BASE;
++ avalanche_hw0_chregs = (struct avalanche_channel_int_number *)AVALANCHE_CHCTRL_REGS_BASE;
++
++ /* Disable interrupts and clear pending
++ */
++
++ avalanche_hw0_icregs->intecr1 = 0xffffffff; /* disable interrupts 0:31 */
++ avalanche_hw0_icregs->intcr1 = 0xffffffff; /* clear interrupts 0:31 */
++ avalanche_hw0_icregs->intecr2 = 0xff; /* disable interrupts 32:39 */
++ avalanche_hw0_icregs->intcr2 = 0xff; /* clear interrupts 32:39 */
++ avalanche_hw0_ecregs->exiecr = 0xffffffff; /* disable secondary interrupts 0:31 */
++ avalanche_hw0_ecregs->excr = 0xffffffff; /* clear secondary interrupts 0:31 */
++
++
++ // avalanche_hw0_ipaceregs->ipacep = (2*get_avalanche_vbus_freq()/1000000)*4;
++ /* hack for speeding up the pacing. */
++ printk("the pacing pre-scalar has been set as 600.\n");
++ avalanche_hw0_ipaceregs->ipacep = 600;
++ /* Channel to line mapping, Line to Channel mapping */
++
++ for(i = 0; i < 40; i++)
++ avalanche_int_set(i,i);
++
++ /* Now safe to set the exception vector. */
++ set_except_vector(0, mipsIRQ);
++
++ /* Setup the IRQ description array. These will be mapped
++ * as flat interrupts numbers. The mapping is as follows
++ *
++ * 0-7 MIPS CPU Exceptions (HW/SW)
++ * 8-46 Primary Interrupts (Avalanche)
++ * 47-78 Secondary Interrupts (Avalanche)
++ */
++
++ for (i = 0; i <= AVALANCHE_INT_END; i++)
++ {
++ irq_desc_ti[i].status = IRQ_DISABLED;
++ irq_desc_ti[i].action = 0;
++ irq_desc_ti[i].depth = 1;
++ irq_desc_ti[i].handler = &avalanche_irq_type;
++ }
++
++#ifdef CONFIG_KGDB
++ if (remote_debug)
++ {
++ set_debug_traps();
++ breakpoint();
++ }
++#endif
++}
++
++void avalanche_hw0_irqdispatch(struct pt_regs *regs)
++{
++ struct irqaction *action;
++ int irq, cpu = smp_processor_id();
++ unsigned long int_line_number,status;
++ int i,secondary = 0;
++ int chan_nr=0;
++
++ int_line_number = ((avalanche_hw0_icregs->pintir >> 16) & 0x3F);
++ chan_nr = ((avalanche_hw0_icregs->pintir) & 0x3F);
++
++
++ if(chan_nr < 32)
++ {
++ if( chan_nr != uni_secondary_interrupt)
++ avalanche_hw0_icregs->intcr1 = (1<<chan_nr);
++
++ }
++
++ if((chan_nr < 40) && (chan_nr > 31))
++ {
++ avalanche_hw0_icregs->intcr2 = (1<<(chan_nr-AVINTNUM(AVALANCHE_INT_END_SECONDARY)));
++ }
++
++
++ /* If the Priority Interrupt Index Register returns 40 then no
++ * interrupts are pending
++ */
++
++ if(chan_nr == 40)
++ return;
++
++ if(chan_nr == uni_secondary_interrupt)
++ {
++ status = avalanche_hw0_ecregs->exsr;
++ for(i=0; i < AVINTNUM(AVALANCHE_INT_END_SECONDARY); i++)
++ {
++ if (status & 1<<i)
++ {
++ /* clear secondary interrupt */
++ avalanche_hw0_ecregs->excr = 1 << i;
++ break;
++ }
++ }
++ irq = i;
++ secondary = 1;
++
++ /* clear the universal secondary interrupt */
++ avalanche_hw0_icregs->intcr1 = 1 << uni_secondary_interrupt;
++
++ }
++ else
++ irq = chan_nr;
++
++ /* Suraj Add code to clear secondary interrupt */
++
++ if(secondary)
++ action = hw0_irq_action_secondary[irq];
++ else
++ action = hw0_irq_action_primary[irq];
++
++ /* if action == NULL, then we don't have a handler for the irq */
++
++ if ( action == NULL ) {
++ printk("No handler for hw0 irq: %i\n", irq);
++ return;
++ }
++
++ irq_enter(cpu,irq);
++ if(secondary)
++ {
++ kstat.irqs[0][(irq + AVINTNUM(AVALANCHE_INT_END_PRIMARY)) + 8]++;
++ action->handler((irq + AVALANCHE_INT_END_PRIMARY), action->dev_id, regs);
++ }
++ else
++ {
++ kstat.irqs[0][irq + 8]++;
++ action->handler(LNXINTNUM(irq), action->dev_id, regs);
++ }
++
++ irq_exit(cpu,irq);
++
++ if(softirq_pending(cpu))
++ do_softirq();
++
++ return;
++}
++
++void avalanche_int_set(int channel, int line)
++{
++ switch(channel)
++ {
++ case(0):
++ avalanche_hw0_chregs->cintnr0 = line;
++ break;
++ case(1):
++ avalanche_hw0_chregs->cintnr1 = line;
++ break;
++ case(2):
++ avalanche_hw0_chregs->cintnr2 = line;
++ break;
++ case(3):
++ avalanche_hw0_chregs->cintnr3 = line;
++ break;
++ case(4):
++ avalanche_hw0_chregs->cintnr4 = line;
++ break;
++ case(5):
++ avalanche_hw0_chregs->cintnr5 = line;
++ break;
++ case(6):
++ avalanche_hw0_chregs->cintnr6 = line;
++ break;
++ case(7):
++ avalanche_hw0_chregs->cintnr7 = line;
++ break;
++ case(8):
++ avalanche_hw0_chregs->cintnr8 = line;
++ break;
++ case(9):
++ avalanche_hw0_chregs->cintnr9 = line;
++ break;
++ case(10):
++ avalanche_hw0_chregs->cintnr10 = line;
++ break;
++ case(11):
++ avalanche_hw0_chregs->cintnr11 = line;
++ break;
++ case(12):
++ avalanche_hw0_chregs->cintnr12 = line;
++ break;
++ case(13):
++ avalanche_hw0_chregs->cintnr13 = line;
++ break;
++ case(14):
++ avalanche_hw0_chregs->cintnr14 = line;
++ break;
++ case(15):
++ avalanche_hw0_chregs->cintnr15 = line;
++ break;
++ case(16):
++ avalanche_hw0_chregs->cintnr16 = line;
++ break;
++ case(17):
++ avalanche_hw0_chregs->cintnr17 = line;
++ break;
++ case(18):
++ avalanche_hw0_chregs->cintnr18 = line;
++ break;
++ case(19):
++ avalanche_hw0_chregs->cintnr19 = line;
++ break;
++ case(20):
++ avalanche_hw0_chregs->cintnr20 = line;
++ break;
++ case(21):
++ avalanche_hw0_chregs->cintnr21 = line;
++ break;
++ case(22):
++ avalanche_hw0_chregs->cintnr22 = line;
++ break;
++ case(23):
++ avalanche_hw0_chregs->cintnr23 = line;
++ break;
++ case(24):
++ avalanche_hw0_chregs->cintnr24 = line;
++ break;
++ case(25):
++ avalanche_hw0_chregs->cintnr25 = line;
++ break;
++ case(26):
++ avalanche_hw0_chregs->cintnr26 = line;
++ break;
++ case(27):
++ avalanche_hw0_chregs->cintnr27 = line;
++ break;
++ case(28):
++ avalanche_hw0_chregs->cintnr28 = line;
++ break;
++ case(29):
++ avalanche_hw0_chregs->cintnr29 = line;
++ break;
++ case(30):
++ avalanche_hw0_chregs->cintnr30 = line;
++ break;
++ case(31):
++ avalanche_hw0_chregs->cintnr31 = line;
++ break;
++ case(32):
++ avalanche_hw0_chregs->cintnr32 = line;
++ break;
++ case(33):
++ avalanche_hw0_chregs->cintnr33 = line;
++ break;
++ case(34):
++ avalanche_hw0_chregs->cintnr34 = line;
++ break;
++ case(35):
++ avalanche_hw0_chregs->cintnr35 = line;
++ break;
++ case(36):
++ avalanche_hw0_chregs->cintnr36 = line;
++ break;
++ case(37):
++ avalanche_hw0_chregs->cintnr37 = line;
++ break;
++ case(38):
++ avalanche_hw0_chregs->cintnr38 = line;
++ break;
++ case(39):
++ avalanche_hw0_chregs->cintnr39 = line;
++ break;
++ default:
++ printk("Error: Unknown Avalanche interrupt channel\n");
++ }
++
++ line_to_channel[line] = channel; /* Suraj check */
++
++ if (channel == UNIFIED_SECONDARY_INTERRUPT)
++ uni_secondary_interrupt = line;
++
++}
++
++
++#define AVALANCHE_MAX_PACING_BLK 3
++#define AVALANCHE_PACING_LOW_VAL 2
++#define AVALANCHE_PACING_HIGH_VAL 63
++
++int avalanche_request_pacing(int irq_nr, unsigned int blk_num,
++ unsigned int pace_value)
++{
++ unsigned int blk_offset;
++ unsigned long flags;
++
++ if(irq_nr < MIPS_EXCEPTION_OFFSET &&
++ irq_nr >= AVALANCHE_INT_END_PRIMARY)
++ return (0);
++
++ if(blk_num > AVALANCHE_MAX_PACING_BLK)
++ return(-1);
++
++ if(pace_value > AVALANCHE_PACING_HIGH_VAL &&
++ pace_value < AVALANCHE_PACING_LOW_VAL)
++ return(-1);
++
++ blk_offset = blk_num*8;
++
++ save_and_cli(flags);
++
++ /* disable the interrupt pacing, if enabled previously */
++ avalanche_hw0_ipaceregs->ipacemax &= ~(0xff << blk_offset);
++
++ /* clear the pacing map */
++ avalanche_hw0_ipaceregs->ipacemap &= ~(0xff << blk_offset);
++
++ /* setup the new values */
++ avalanche_hw0_ipaceregs->ipacemap |= ((AVINTNUM(irq_nr)) << blk_offset);
++ avalanche_hw0_ipaceregs->ipacemax |= ((0x80 | pace_value) << blk_offset);
++
++ restore_flags(flags);
++
++ return(0);
++}
+diff -urN linux.old/arch/mips/ar7/Makefile linux.dev/arch/mips/ar7/Makefile
+--- linux.old/arch/mips/ar7/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/ar7/Makefile 2005-08-12 21:21:30.425150040 +0200
+@@ -0,0 +1,14 @@
++.S.s:
++ $(CPP) $(AFLAGS) $< -o $*.s
++
++.S.o:
++ $(CC) $(AFLAGS) -c $< -o $*.o
++
++EXTRA_CFLAGS := -I$(TOPDIR)/include/asm/ar7 -DLITTLE_ENDIAN -D_LINK_KSEG0_
++O_TARGET := ar7.o
++
++obj-y := tnetd73xx_misc.o misc.o
++export-objs := misc.o
++obj-y += setup.o irq.o mipsIRQ.o reset.o init.o psp_env.o memory.o printf.o cmdline.o time.o
++
++include $(TOPDIR)/Rules.make
+diff -urN linux.old/arch/mips/ar7/memory.c linux.dev/arch/mips/ar7/memory.c
+--- linux.old/arch/mips/ar7/memory.c 1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/ar7/memory.c 2005-08-12 19:52:25.301732312 +0200
+@@ -0,0 +1,131 @@
++/*
++ * Carsten Langgaard, carstenl@mips.com
++ * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
++ *
++ * ########################################################################
++ *
++ * This program is free software; you can distribute it and/or modify it
++ * under the terms of the GNU General Public License (Version 2) as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++ * for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
++ *
++ * ########################################################################
++ *
++ * PROM library functions for acquiring/using memory descriptors given to
++ * us from the YAMON.
++ *
++ */
++#include <linux/config.h>
++#include <linux/init.h>
++#include <linux/mm.h>
++#include <linux/bootmem.h>
++
++#include <asm/bootinfo.h>
++#include <asm/page.h>
++#include <asm/mips-boards/prom.h>
++
++enum yamon_memtypes {
++ yamon_dontuse,
++ yamon_prom,
++ yamon_free,
++};
++struct prom_pmemblock mdesc[PROM_MAX_PMEMBLOCKS];
++
++/* References to section boundaries */
++extern char _end;
++
++#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK)
++
++
++struct prom_pmemblock * __init prom_getmdesc(void)
++{
++ char *memsize_str;
++ unsigned int memsize;
++
++ memsize_str = prom_getenv("memsize");
++ if (!memsize_str) {
++ memsize = 0x02000000;
++ } else {
++ memsize = simple_strtol(memsize_str, NULL, 0);
++ }
++
++ memset(mdesc, 0, sizeof(mdesc));
++
++ mdesc[0].type = yamon_dontuse;
++ mdesc[0].base = 0x00000000;
++ mdesc[0].size = CONFIG_AR7_MEMORY;
++
++ mdesc[1].type = yamon_prom;
++ mdesc[1].base = CONFIG_AR7_MEMORY;
++ mdesc[1].size = 0x00020000;
++
++ mdesc[2].type = yamon_free;
++ mdesc[2].base = CONFIG_AR7_MEMORY + 0x00020000;
++ mdesc[2].size = (memsize + CONFIG_AR7_MEMORY) - mdesc[2].base;
++
++ return &mdesc[0];
++}
++
++static int __init prom_memtype_classify (unsigned int type)
++{
++ switch (type) {
++ case yamon_free:
++ return BOOT_MEM_RAM;
++ case yamon_prom:
++ return BOOT_MEM_ROM_DATA;
++ default:
++ return BOOT_MEM_RESERVED;
++ }
++}
++
++void __init prom_meminit(void)
++{
++ struct prom_pmemblock *p;
++
++ p = prom_getmdesc();
++
++ while (p->size) {
++ long type;
++ unsigned long base, size;
++
++ type = prom_memtype_classify (p->type);
++ base = p->base;
++ size = p->size;
++
++ add_memory_region(base, size, type);
++ p++;
++ }
++}
++
++void __init prom_free_prom_memory (void)
++{
++#if 0
++ int i;
++ unsigned long freed = 0;
++ unsigned long addr;
++
++ for (i = 0; i < boot_mem_map.nr_map; i++) {
++ if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA)
++ continue;
++
++ addr = boot_mem_map.map[i].addr;
++ while (addr < boot_mem_map.map[i].addr
++ + boot_mem_map.map[i].size) {
++ ClearPageReserved(virt_to_page(__va(addr)));
++ set_page_count(virt_to_page(__va(addr)), 1);
++ free_page((unsigned long)__va(addr));
++ addr += PAGE_SIZE;
++ freed += PAGE_SIZE;
++ }
++ }
++ printk("Freeing prom memory: %ldkb freed\n", freed >> 10);
++#endif
++}
+diff -urN linux.old/arch/mips/ar7/mipsIRQ.S linux.dev/arch/mips/ar7/mipsIRQ.S
+--- linux.old/arch/mips/ar7/mipsIRQ.S 1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/ar7/mipsIRQ.S 2005-08-12 19:32:05.138225360 +0200
+@@ -0,0 +1,120 @@
++/*
++ * Carsten Langgaard, carstenl@mips.com
++ * Copyright (C) 1999, 2000 MIPS Technologies, Inc. All rights reserved.
++ *
++ * ########################################################################
++ *
++ * This program is free software; you can distribute it and/or modify it
++ * under the terms of the GNU General Public License (Version 2) as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++ * for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
++ *
++ * ########################################################################
++ *
++ * Interrupt exception dispatch code.
++ *
++ */
++#include <linux/config.h>
++
++#include <asm/asm.h>
++#include <asm/mipsregs.h>
++#include <asm/regdef.h>
++#include <asm/stackframe.h>
++
++/* A lot of complication here is taken away because:
++ *
++ * 1) We handle one interrupt and return, sitting in a loop and moving across
++ * all the pending IRQ bits in the cause register is _NOT_ the answer, the
++ * common case is one pending IRQ so optimize in that direction.
++ *
++ * 2) We need not check against bits in the status register IRQ mask, that
++ * would make this routine slow as hell.
++ *
++ * 3) Linux only thinks in terms of all IRQs on or all IRQs off, nothing in
++ * between like BSD spl() brain-damage.
++ *
++ * Furthermore, the IRQs on the MIPS board look basically (barring software
++ * IRQs which we don't use at all and all external interrupt sources are
++ * combined together on hardware interrupt 0 (MIPS IRQ 2)) like:
++ *
++ * MIPS IRQ Source
++ * -------- ------
++ * 0 Software (ignored)
++ * 1 Software (ignored)
++ * 2 Combined hardware interrupt (hw0)
++ * 3 Hardware (ignored)
++ * 4 Hardware (ignored)
++ * 5 Hardware (ignored)
++ * 6 Hardware (ignored)
++ * 7 R4k timer (what we use)
++ *
++ * Note: On the SEAD board thing are a little bit different.
++ * Here IRQ 2 (hw0) is wired to the UART0 and IRQ 3 (hw1) is wired
++ * wired to UART1.
++ *
++ * We handle the IRQ according to _our_ priority which is:
++ *
++ * Highest ---- R4k Timer
++ * Lowest ---- Combined hardware interrupt
++ *
++ * then we just return, if multiple IRQs are pending then we will just take
++ * another exception, big deal.
++ */
++
++.text
++.set noreorder
++.set noat
++ .align 5
++NESTED(mipsIRQ, PT_SIZE, sp)
++ SAVE_ALL
++ CLI
++ .set at
++
++ mfc0 s0, CP0_CAUSE # get irq bits
++
++ /* First we check for r4k counter/timer IRQ. */
++ andi a0, s0, CAUSEF_IP7
++ beq a0, zero, 1f
++ andi a0, s0, CAUSEF_IP2 # delay slot, check hw0 interrupt
++
++ /* Wheee, a timer interrupt. */
++ move a0, sp
++ jal ar7_timer_interrupt
++ nop
++
++ j ret_from_irq
++ nop
++
++ 1:
++ beq a0, zero, 1f # delay slot, check hw3 interrupt
++ nop
++
++ /* Wheee, combined hardware level zero interrupt. */
++ jal avalanche_hw0_irqdispatch
++ move a0, sp # delay slot
++
++ j ret_from_irq
++ nop # delay slot
++
++ 1:
++ /*
++ * Here by mistake? This is possible, what can happen is that by the
++ * time we take the exception the IRQ pin goes low, so just leave if
++ * this is the case.
++ */
++ move a1,s0
++ PRINT("Got interrupt: c0_cause = %08x\n")
++ mfc0 a1, CP0_EPC
++ PRINT("c0_epc = %08x\n")
++
++ j ret_from_irq
++ nop
++END(mipsIRQ)
+diff -urN linux.old/arch/mips/ar7/misc.c linux.dev/arch/mips/ar7/misc.c
+--- linux.old/arch/mips/ar7/misc.c 1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/ar7/misc.c 2005-08-12 19:32:05.136225664 +0200
+@@ -0,0 +1,319 @@
++#include <asm/ar7/sangam.h>
++#include <asm/ar7/avalanche_misc.h>
++#include <linux/module.h>
++#include <linux/spinlock.h>
++
++#define TRUE 1
++
++static unsigned int avalanche_vbus_freq;
++
++REMOTE_VLYNQ_DEV_RESET_CTRL_FN p_remote_vlynq_dev_reset_ctrl = NULL;
++
++/*****************************************************************************
++ * Reset Control Module.
++ *****************************************************************************/
++void avalanche_reset_ctrl(unsigned int module_reset_bit,
++ AVALANCHE_RESET_CTRL_T reset_ctrl)
++{
++ volatile unsigned int *reset_reg = (unsigned int*) AVALANCHE_RST_CTRL_PRCR;
++
++ if(module_reset_bit >= 32 && module_reset_bit < 64)
++ return;
++
++ if(module_reset_bit >= 64)
++ {
++ if(p_remote_vlynq_dev_reset_ctrl)
++ return(p_remote_vlynq_dev_reset_ctrl(module_reset_bit - 64, reset_ctrl));
++ else
++ return;
++ }
++
++ if(reset_ctrl == OUT_OF_RESET)
++ *reset_reg |= 1 << module_reset_bit;
++ else
++ *reset_reg &= ~(1 << module_reset_bit);
++}
++
++AVALANCHE_RESET_CTRL_T avalanche_get_reset_status(unsigned int module_reset_bit)
++{
++ volatile unsigned int *reset_reg = (unsigned int*) AVALANCHE_RST_CTRL_PRCR;
++
++ return (((*reset_reg) & (1 << module_reset_bit)) ? OUT_OF_RESET : IN_RESET );
++}
++
++void avalanche_sys_reset(AVALANCHE_SYS_RST_MODE_T mode)
++{
++ volatile unsigned int *sw_reset_reg = (unsigned int*) AVALANCHE_RST_CTRL_SWRCR;
++ *sw_reset_reg = mode;
++}
++
++#define AVALANCHE_RST_CTRL_RSR_MASK 0x3
++
++AVALANCHE_SYS_RESET_STATUS_T avalanche_get_sys_last_reset_status()
++{
++ volatile unsigned int *sys_reset_status = (unsigned int*) AVALANCHE_RST_CTRL_RSR;
++
++ return ( (AVALANCHE_SYS_RESET_STATUS_T) (*sys_reset_status & AVALANCHE_RST_CTRL_RSR_MASK) );
++}
++
++
++/*****************************************************************************
++ * Power Control Module
++ *****************************************************************************/
++#define AVALANCHE_GLOBAL_POWER_DOWN_MASK 0x3FFFFFFF /* bit 31, 30 masked */
++#define AVALANCHE_GLOBAL_POWER_DOWN_BIT 30 /* shift to bit 30, 31 */
++
++
++void avalanche_power_ctrl(unsigned int module_power_bit, AVALANCHE_POWER_CTRL_T power_ctrl)
++{
++ volatile unsigned int *power_reg = (unsigned int*)AVALANCHE_POWER_CTRL_PDCR;
++
++ if (power_ctrl == POWER_CTRL_POWER_DOWN)
++ /* power down the module */
++ *power_reg |= (1 << module_power_bit);
++ else
++ /* power on the module */
++ *power_reg &= (~(1 << module_power_bit));
++}
++
++AVALANCHE_POWER_CTRL_T avalanche_get_power_status(unsigned int module_power_bit)
++{
++ volatile unsigned int *power_status_reg = (unsigned int*)AVALANCHE_POWER_CTRL_PDCR;
++
++ return (((*power_status_reg) & (1 << module_power_bit)) ? POWER_CTRL_POWER_DOWN : POWER_CTRL_POWER_UP);
++}
++
++void avalanche_set_global_power_mode(AVALANCHE_SYS_POWER_MODE_T power_mode)
++{
++ volatile unsigned int *power_status_reg = (unsigned int*)AVALANCHE_POWER_CTRL_PDCR;
++
++ *power_status_reg &= AVALANCHE_GLOBAL_POWER_DOWN_MASK;
++ *power_status_reg |= ( power_mode << AVALANCHE_GLOBAL_POWER_DOWN_BIT);
++}
++
++AVALANCHE_SYS_POWER_MODE_T avalanche_get_global_power_mode(void)
++{
++ volatile unsigned int *power_status_reg = (unsigned int*)AVALANCHE_POWER_CTRL_PDCR;
++
++ return((AVALANCHE_SYS_POWER_MODE_T) (((*power_status_reg) & (~AVALANCHE_GLOBAL_POWER_DOWN_MASK))
++ >> AVALANCHE_GLOBAL_POWER_DOWN_BIT));
++}
++
++/*****************************************************************************
++ * GPIO Control
++ *****************************************************************************/