1 diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig
2 index f9cc2b6..9274d3f 100644
3 --- a/drivers/net/arm/Kconfig
4 +++ b/drivers/net/arm/Kconfig
5 @@ -47,3 +47,13 @@ config EP93XX_ETH
7 This is a driver for the ethernet hardware included in EP93xx CPUs.
8 Say Y if you are building a kernel for EP93xx based devices.
11 + tristate "IXP4xx Ethernet support"
12 + depends on NET_ETHERNET && ARM && ARCH_IXP4XX
17 + Say Y here if you want to use built-in Ethernet ports
18 + on IXP4xx processor.
19 diff --git a/drivers/net/arm/Makefile b/drivers/net/arm/Makefile
20 index a4c8682..7c812ac 100644
21 --- a/drivers/net/arm/Makefile
22 +++ b/drivers/net/arm/Makefile
23 @@ -9,3 +9,4 @@ obj-$(CONFIG_ARM_ETHER3) += ether3.o
24 obj-$(CONFIG_ARM_ETHER1) += ether1.o
25 obj-$(CONFIG_ARM_AT91_ETHER) += at91_ether.o
26 obj-$(CONFIG_EP93XX_ETH) += ep93xx_eth.o
27 +obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o
28 diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
30 index 0000000..d8bbbc7
32 +++ b/drivers/net/arm/ixp4xx_eth.c
35 + * Intel IXP4xx Ethernet driver for Linux
37 + * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
39 + * This program is free software; you can redistribute it and/or modify it
40 + * under the terms of version 2 of the GNU General Public License
41 + * as published by the Free Software Foundation.
43 + * Ethernet port config (0x00 is not present on IXP42X):
45 + * logical port 0x00 0x10 0x20
46 + * NPE 0 (NPE-A) 1 (NPE-B) 2 (NPE-C)
47 + * physical PortId 2 0 1
49 + * RX-free queue 26 27 28
50 + * TX-done queue is always 31, per-port RX and TX-ready queues are configurable
54 + * bits 0 -> 1 - NPE ID (RX and TX-done)
55 + * bits 0 -> 2 - priority (TX, per 802.1D)
56 + * bits 3 -> 4 - port ID (user-set?)
57 + * bits 5 -> 31 - physical descriptor address
60 +#include <linux/delay.h>
61 +#include <linux/dma-mapping.h>
62 +#include <linux/dmapool.h>
63 +#include <linux/etherdevice.h>
64 +#include <linux/io.h>
65 +#include <linux/kernel.h>
66 +#include <linux/mii.h>
67 +#include <linux/platform_device.h>
68 +#include <asm/arch/npe.h>
69 +#include <asm/arch/qmgr.h>
71 +#define DEBUG_QUEUES 0
75 +#define DEBUG_PKT_BYTES 0
77 +#define DEBUG_CLOSE 0
79 +#define DRV_NAME "ixp4xx_eth"
83 +#define RX_DESCS 64 /* also length of all RX queues */
84 +#define TX_DESCS 16 /* also length of all TX queues */
85 +#define TXDONE_QUEUE_LEN 64 /* dwords */
87 +#define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS))
88 +#define REGS_SIZE 0x1000
89 +#define MAX_MRU 1536 /* 0x600 */
90 +#define RX_BUFF_SIZE ALIGN((NET_IP_ALIGN) + MAX_MRU, 4)
92 +#define NAPI_WEIGHT 16
93 +#define MDIO_INTERVAL (3 * HZ)
94 +#define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */
95 +#define MAX_MII_RESET_RETRIES 100 /* mdio_read() cycles, typically 4 */
96 +#define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */
98 +#define NPE_ID(port_id) ((port_id) >> 4)
99 +#define PHYSICAL_ID(port_id) ((NPE_ID(port_id) + 2) % 3)
100 +#define TX_QUEUE(port_id) (NPE_ID(port_id) + 23)
101 +#define RXFREE_QUEUE(port_id) (NPE_ID(port_id) + 26)
102 +#define TXDONE_QUEUE 31
104 +/* TX Control Registers */
105 +#define TX_CNTRL0_TX_EN 0x01
106 +#define TX_CNTRL0_HALFDUPLEX 0x02
107 +#define TX_CNTRL0_RETRY 0x04
108 +#define TX_CNTRL0_PAD_EN 0x08
109 +#define TX_CNTRL0_APPEND_FCS 0x10
110 +#define TX_CNTRL0_2DEFER 0x20
111 +#define TX_CNTRL0_RMII 0x40 /* reduced MII */
112 +#define TX_CNTRL1_RETRIES 0x0F /* 4 bits */
114 +/* RX Control Registers */
115 +#define RX_CNTRL0_RX_EN 0x01
116 +#define RX_CNTRL0_PADSTRIP_EN 0x02
117 +#define RX_CNTRL0_SEND_FCS 0x04
118 +#define RX_CNTRL0_PAUSE_EN 0x08
119 +#define RX_CNTRL0_LOOP_EN 0x10
120 +#define RX_CNTRL0_ADDR_FLTR_EN 0x20
121 +#define RX_CNTRL0_RX_RUNT_EN 0x40
122 +#define RX_CNTRL0_BCAST_DIS 0x80
123 +#define RX_CNTRL1_DEFER_EN 0x01
125 +/* Core Control Register */
126 +#define CORE_RESET 0x01
127 +#define CORE_RX_FIFO_FLUSH 0x02
128 +#define CORE_TX_FIFO_FLUSH 0x04
129 +#define CORE_SEND_JAM 0x08
130 +#define CORE_MDC_EN 0x10 /* MDIO using NPE-B ETH-0 only */
132 +#define DEFAULT_TX_CNTRL0 (TX_CNTRL0_TX_EN | TX_CNTRL0_RETRY | \
133 + TX_CNTRL0_PAD_EN | TX_CNTRL0_APPEND_FCS | \
135 +#define DEFAULT_RX_CNTRL0 RX_CNTRL0_RX_EN
136 +#define DEFAULT_CORE_CNTRL CORE_MDC_EN
139 +/* NPE message codes */
140 +#define NPE_GETSTATUS 0x00
141 +#define NPE_EDB_SETPORTADDRESS 0x01
142 +#define NPE_EDB_GETMACADDRESSDATABASE 0x02
143 +#define NPE_EDB_SETMACADDRESSSDATABASE 0x03
144 +#define NPE_GETSTATS 0x04
145 +#define NPE_RESETSTATS 0x05
146 +#define NPE_SETMAXFRAMELENGTHS 0x06
147 +#define NPE_VLAN_SETRXTAGMODE 0x07
148 +#define NPE_VLAN_SETDEFAULTRXVID 0x08
149 +#define NPE_VLAN_SETPORTVLANTABLEENTRY 0x09
150 +#define NPE_VLAN_SETPORTVLANTABLERANGE 0x0A
151 +#define NPE_VLAN_SETRXQOSENTRY 0x0B
152 +#define NPE_VLAN_SETPORTIDEXTRACTIONMODE 0x0C
153 +#define NPE_STP_SETBLOCKINGSTATE 0x0D
154 +#define NPE_FW_SETFIREWALLMODE 0x0E
155 +#define NPE_PC_SETFRAMECONTROLDURATIONID 0x0F
156 +#define NPE_PC_SETAPMACTABLE 0x11
157 +#define NPE_SETLOOPBACK_MODE 0x12
158 +#define NPE_PC_SETBSSIDTABLE 0x13
159 +#define NPE_ADDRESS_FILTER_CONFIG 0x14
160 +#define NPE_APPENDFCSCONFIG 0x15
161 +#define NPE_NOTIFY_MAC_RECOVERY_DONE 0x16
162 +#define NPE_MAC_RECOVERY_START 0x17
166 +typedef struct sk_buff buffer_t;
167 +#define free_buffer dev_kfree_skb
168 +#define free_buffer_irq dev_kfree_skb_irq
170 +typedef void buffer_t;
171 +#define free_buffer kfree
172 +#define free_buffer_irq kfree
176 + u32 tx_control[2], __res1[2]; /* 000 */
177 + u32 rx_control[2], __res2[2]; /* 010 */
178 + u32 random_seed, __res3[3]; /* 020 */
179 + u32 partial_empty_threshold, __res4; /* 030 */
180 + u32 partial_full_threshold, __res5; /* 038 */
181 + u32 tx_start_bytes, __res6[3]; /* 040 */
182 + u32 tx_deferral, rx_deferral,__res7[2]; /* 050 */
183 + u32 tx_2part_deferral[2], __res8[2]; /* 060 */
184 + u32 slot_time, __res9[3]; /* 070 */
185 + u32 mdio_command[4]; /* 080 */
186 + u32 mdio_status[4]; /* 090 */
187 + u32 mcast_mask[6], __res10[2]; /* 0A0 */
188 + u32 mcast_addr[6], __res11[2]; /* 0C0 */
189 + u32 int_clock_threshold, __res12[3]; /* 0E0 */
190 + u32 hw_addr[6], __res13[61]; /* 0F0 */
191 + u32 core_control; /* 1FC */
195 + struct resource *mem_res;
196 + struct eth_regs __iomem *regs;
198 + struct net_device *netdev;
199 + struct napi_struct napi;
200 + struct net_device_stats stat;
201 + struct mii_if_info mii;
202 + struct delayed_work mdio_thread;
203 + struct eth_plat_info *plat;
204 + buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
205 + struct desc *desc_tab; /* coherent */
207 + int id; /* logical port ID */
211 +/* NPE message structure */
214 + u8 cmd, eth_id, byte2, byte3;
215 + u8 byte4, byte5, byte6, byte7;
217 + u8 byte3, byte2, eth_id, cmd;
218 + u8 byte7, byte6, byte5, byte4;
222 +/* Ethernet packet descriptor */
224 + u32 next; /* pointer to next buffer, unused */
227 + u16 buf_len; /* buffer length */
228 + u16 pkt_len; /* packet length */
229 + u32 data; /* pointer to data buffer in RAM */
237 + u16 pkt_len; /* packet length */
238 + u16 buf_len; /* buffer length */
239 + u32 data; /* pointer to data buffer in RAM */
249 + u8 dst_mac_0, dst_mac_1, dst_mac_2, dst_mac_3;
250 + u8 dst_mac_4, dst_mac_5, src_mac_0, src_mac_1;
251 + u8 src_mac_2, src_mac_3, src_mac_4, src_mac_5;
253 + u8 dst_mac_3, dst_mac_2, dst_mac_1, dst_mac_0;
254 + u8 src_mac_1, src_mac_0, dst_mac_5, dst_mac_4;
255 + u8 src_mac_5, src_mac_4, src_mac_3, src_mac_2;
260 +#define rx_desc_phys(port, n) ((port)->desc_tab_phys + \
261 + (n) * sizeof(struct desc))
262 +#define rx_desc_ptr(port, n) (&(port)->desc_tab[n])
264 +#define tx_desc_phys(port, n) ((port)->desc_tab_phys + \
265 + ((n) + RX_DESCS) * sizeof(struct desc))
266 +#define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS])
269 +static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
272 + for (i = 0; i < cnt; i++)
273 + dest[i] = swab32(src[i]);
277 +static spinlock_t mdio_lock;
278 +static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */
279 +static int ports_open;
280 +static struct port *npe_port_tab[MAX_NPES];
281 +static struct dma_pool *dma_pool;
284 +static u16 mdio_cmd(struct net_device *dev, int phy_id, int location,
285 + int write, u16 cmd)
289 + if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) {
290 + printk(KERN_ERR "%s: MII not ready to transmit\n", dev->name);
295 + __raw_writel(cmd & 0xFF, &mdio_regs->mdio_command[0]);
296 + __raw_writel(cmd >> 8, &mdio_regs->mdio_command[1]);
298 + __raw_writel(((phy_id << 5) | location) & 0xFF,
299 + &mdio_regs->mdio_command[2]);
300 + __raw_writel((phy_id >> 3) | (write << 2) | 0x80 /* GO */,
301 + &mdio_regs->mdio_command[3]);
303 + while ((cycles < MAX_MDIO_RETRIES) &&
304 + (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80)) {
309 + if (cycles == MAX_MDIO_RETRIES) {
310 + printk(KERN_ERR "%s: MII write failed\n", dev->name);
315 + printk(KERN_DEBUG "%s: mdio_cmd() took %i cycles\n", dev->name,
322 + if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) {
323 + printk(KERN_ERR "%s: MII read failed\n", dev->name);
327 + return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) |
328 + (__raw_readl(&mdio_regs->mdio_status[1]) << 8);
331 +static int mdio_read(struct net_device *dev, int phy_id, int location)
333 + unsigned long flags;
336 + spin_lock_irqsave(&mdio_lock, flags);
337 + val = mdio_cmd(dev, phy_id, location, 0, 0);
338 + spin_unlock_irqrestore(&mdio_lock, flags);
342 +static void mdio_write(struct net_device *dev, int phy_id, int location,
345 + unsigned long flags;
347 + spin_lock_irqsave(&mdio_lock, flags);
348 + mdio_cmd(dev, phy_id, location, 1, val);
349 + spin_unlock_irqrestore(&mdio_lock, flags);
352 +static void phy_reset(struct net_device *dev, int phy_id)
354 + struct port *port = netdev_priv(dev);
357 + mdio_write(dev, phy_id, MII_BMCR, port->mii_bmcr | BMCR_RESET);
359 + while (cycles < MAX_MII_RESET_RETRIES) {
360 + if (!(mdio_read(dev, phy_id, MII_BMCR) & BMCR_RESET)) {
362 + printk(KERN_DEBUG "%s: phy_reset() took %i cycles\n",
363 + dev->name, cycles);
371 + printk(KERN_ERR "%s: MII reset failed\n", dev->name);
374 +static void eth_set_duplex(struct port *port)
376 + if (port->mii.full_duplex)
377 + __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX,
378 + &port->regs->tx_control[0]);
380 + __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
381 + &port->regs->tx_control[0]);
385 +static void phy_check_media(struct port *port, int init)
387 + if (mii_check_media(&port->mii, 1, init))
388 + eth_set_duplex(port);
389 + if (port->mii.force_media) { /* mii_check_media() doesn't work */
390 + struct net_device *dev = port->netdev;
391 + int cur_link = mii_link_ok(&port->mii);
392 + int prev_link = netif_carrier_ok(dev);
394 + if (!prev_link && cur_link) {
395 + printk(KERN_INFO "%s: link up\n", dev->name);
396 + netif_carrier_on(dev);
397 + } else if (prev_link && !cur_link) {
398 + printk(KERN_INFO "%s: link down\n", dev->name);
399 + netif_carrier_off(dev);
405 +static void mdio_thread(struct work_struct *work)
407 + struct port *port = container_of(work, struct port, mdio_thread.work);
409 + phy_check_media(port, 0);
410 + schedule_delayed_work(&port->mdio_thread, MDIO_INTERVAL);
414 +static inline void debug_pkt(struct net_device *dev, const char *func,
420 + printk(KERN_DEBUG "%s: %s(%i) ", dev->name, func, len);
421 + for (i = 0; i < len; i++) {
422 + if (i >= DEBUG_PKT_BYTES)
425 + ((i == 6) || (i == 12) || (i >= 14)) ? " " : "",
433 +static inline void debug_desc(u32 phys, struct desc *desc)
436 + printk(KERN_DEBUG "%X: %X %3X %3X %08X %2X < %2X %4X %X"
437 + " %X %X %02X%02X%02X%02X%02X%02X < %02X%02X%02X%02X%02X%02X\n",
438 + phys, desc->next, desc->buf_len, desc->pkt_len,
439 + desc->data, desc->dest_id, desc->src_id, desc->flags,
440 + desc->qos, desc->padlen, desc->vlan_tci,
441 + desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2,
442 + desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5,
443 + desc->src_mac_0, desc->src_mac_1, desc->src_mac_2,
444 + desc->src_mac_3, desc->src_mac_4, desc->src_mac_5);
448 +static inline void debug_queue(unsigned int queue, int is_get, u32 phys)
455 + { TX_QUEUE(0x10), "TX#0 " },
456 + { TX_QUEUE(0x20), "TX#1 " },
457 + { TX_QUEUE(0x00), "TX#2 " },
458 + { RXFREE_QUEUE(0x10), "RX-free#0 " },
459 + { RXFREE_QUEUE(0x20), "RX-free#1 " },
460 + { RXFREE_QUEUE(0x00), "RX-free#2 " },
461 + { TXDONE_QUEUE, "TX-done " },
465 + for (i = 0; i < ARRAY_SIZE(names); i++)
466 + if (names[i].queue == queue)
469 + printk(KERN_DEBUG "Queue %i %s%s %X\n", queue,
470 + i < ARRAY_SIZE(names) ? names[i].name : "",
471 + is_get ? "->" : "<-", phys);
475 +static inline u32 queue_get_entry(unsigned int queue)
477 + u32 phys = qmgr_get_entry(queue);
478 + debug_queue(queue, 1, phys);
482 +static inline int queue_get_desc(unsigned int queue, struct port *port,
485 + u32 phys, tab_phys, n_desc;
488 + if (!(phys = queue_get_entry(queue)))
491 + phys &= ~0x1F; /* mask out non-address bits */
492 + tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0);
493 + tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0);
494 + n_desc = (phys - tab_phys) / sizeof(struct desc);
495 + BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS));
496 + debug_desc(phys, &tab[n_desc]);
497 + BUG_ON(tab[n_desc].next);
501 +static inline void queue_put_desc(unsigned int queue, u32 phys,
504 + debug_queue(queue, 0, phys);
505 + debug_desc(phys, desc);
506 + BUG_ON(phys & 0x1F);
507 + qmgr_put_entry(queue, phys);
508 + BUG_ON(qmgr_stat_overflow(queue));
512 +static inline void dma_unmap_tx(struct port *port, struct desc *desc)
515 + dma_unmap_single(&port->netdev->dev, desc->data,
516 + desc->buf_len, DMA_TO_DEVICE);
518 + dma_unmap_single(&port->netdev->dev, desc->data & ~3,
519 + ALIGN((desc->data & 3) + desc->buf_len, 4),
525 +static void eth_rx_irq(void *pdev)
527 + struct net_device *dev = pdev;
528 + struct port *port = netdev_priv(dev);
531 + printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name);
533 + qmgr_disable_irq(port->plat->rxq);
534 + netif_rx_schedule(dev, &port->napi);
537 +static int eth_poll(struct napi_struct *napi, int budget)
539 + struct port *port = container_of(napi, struct port, napi);
540 + struct net_device *dev = port->netdev;
541 + unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id);
545 + printk(KERN_DEBUG "%s: eth_poll\n", dev->name);
548 + while (received < budget) {
549 + struct sk_buff *skb;
553 + struct sk_buff *temp;
557 + if ((n = queue_get_desc(rxq, port, 0)) < 0) {
558 + received = 0; /* No packet received */
560 + printk(KERN_DEBUG "%s: eth_poll netif_rx_complete\n",
563 + netif_rx_complete(dev, napi);
564 + qmgr_enable_irq(rxq);
565 + if (!qmgr_stat_empty(rxq) &&
566 + netif_rx_reschedule(dev, napi)) {
568 + printk(KERN_DEBUG "%s: eth_poll"
569 + " netif_rx_reschedule successed\n",
572 + qmgr_disable_irq(rxq);
576 + printk(KERN_DEBUG "%s: eth_poll all done\n",
579 + return 0; /* all work done */
582 + desc = rx_desc_ptr(port, n);
585 + if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) {
586 + phys = dma_map_single(&dev->dev, skb->data,
587 + RX_BUFF_SIZE, DMA_FROM_DEVICE);
588 + if (dma_mapping_error(phys)) {
589 + dev_kfree_skb(skb);
594 + skb = netdev_alloc_skb(dev,
595 + ALIGN(NET_IP_ALIGN + desc->pkt_len, 4));
599 + port->stat.rx_dropped++;
600 + /* put the desc back on RX-ready queue */
601 + desc->buf_len = MAX_MRU;
603 + queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
607 + /* process received frame */
610 + skb = port->rx_buff_tab[n];
611 + dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN,
612 + RX_BUFF_SIZE, DMA_FROM_DEVICE);
614 + dma_sync_single(&dev->dev, desc->data - NET_IP_ALIGN,
615 + RX_BUFF_SIZE, DMA_FROM_DEVICE);
616 + memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
617 + ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4);
619 + skb_reserve(skb, NET_IP_ALIGN);
620 + skb_put(skb, desc->pkt_len);
622 + debug_pkt(dev, "eth_poll", skb->data, skb->len);
624 + skb->protocol = eth_type_trans(skb, dev);
625 + dev->last_rx = jiffies;
626 + port->stat.rx_packets++;
627 + port->stat.rx_bytes += skb->len;
628 + netif_receive_skb(skb);
630 + /* put the new buffer on RX-free queue */
632 + port->rx_buff_tab[n] = temp;
633 + desc->data = phys + NET_IP_ALIGN;
635 + desc->buf_len = MAX_MRU;
637 + queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
642 + printk(KERN_DEBUG "eth_poll(): end, not all work done\n");
644 + return received; /* not all work done */
648 +static void eth_txdone_irq(void *unused)
653 + printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n");
655 + while ((phys = queue_get_entry(TXDONE_QUEUE)) != 0) {
656 + u32 npe_id, n_desc;
662 + BUG_ON(npe_id >= MAX_NPES);
663 + port = npe_port_tab[npe_id];
665 + phys &= ~0x1F; /* mask out non-address bits */
666 + n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc);
667 + BUG_ON(n_desc >= TX_DESCS);
668 + desc = tx_desc_ptr(port, n_desc);
669 + debug_desc(phys, desc);
671 + if (port->tx_buff_tab[n_desc]) { /* not the draining packet */
672 + port->stat.tx_packets++;
673 + port->stat.tx_bytes += desc->pkt_len;
675 + dma_unmap_tx(port, desc);
677 + printk(KERN_DEBUG "%s: eth_txdone_irq free %p\n",
678 + port->netdev->name, port->tx_buff_tab[n_desc]);
680 + free_buffer_irq(port->tx_buff_tab[n_desc]);
681 + port->tx_buff_tab[n_desc] = NULL;
684 + start = qmgr_stat_empty(port->plat->txreadyq);
685 + queue_put_desc(port->plat->txreadyq, phys, desc);
688 + printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n",
689 + port->netdev->name);
691 + netif_wake_queue(port->netdev);
696 +static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
698 + struct port *port = netdev_priv(dev);
699 + unsigned int txreadyq = port->plat->txreadyq;
700 + int len, offset, bytes, n;
706 + printk(KERN_DEBUG "%s: eth_xmit\n", dev->name);
709 + if (unlikely(skb->len > MAX_MRU)) {
710 + dev_kfree_skb(skb);
711 + port->stat.tx_errors++;
712 + return NETDEV_TX_OK;
715 + debug_pkt(dev, "eth_xmit", skb->data, skb->len);
719 + offset = 0; /* no need to keep alignment */
723 + offset = (int)skb->data & 3; /* keep 32-bit alignment */
724 + bytes = ALIGN(offset + len, 4);
725 + if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
726 + dev_kfree_skb(skb);
727 + port->stat.tx_dropped++;
728 + return NETDEV_TX_OK;
730 + memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
731 + dev_kfree_skb(skb);
734 + phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
735 + if (dma_mapping_error(phys)) {
737 + dev_kfree_skb(skb);
741 + port->stat.tx_dropped++;
742 + return NETDEV_TX_OK;
745 + n = queue_get_desc(txreadyq, port, 1);
747 + desc = tx_desc_ptr(port, n);
750 + port->tx_buff_tab[n] = skb;
752 + port->tx_buff_tab[n] = mem;
754 + desc->data = phys + offset;
755 + desc->buf_len = desc->pkt_len = len;
757 + /* NPE firmware pads short frames with zeros internally */
759 + queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc);
760 + dev->trans_start = jiffies;
762 + if (qmgr_stat_empty(txreadyq)) {
764 + printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name);
766 + netif_stop_queue(dev);
767 + /* we could miss TX ready interrupt */
768 + if (!qmgr_stat_empty(txreadyq)) {
770 + printk(KERN_DEBUG "%s: eth_xmit ready again\n",
773 + netif_wake_queue(dev);
778 + printk(KERN_DEBUG "%s: eth_xmit end\n", dev->name);
780 + return NETDEV_TX_OK;
784 +static struct net_device_stats *eth_stats(struct net_device *dev)
786 + struct port *port = netdev_priv(dev);
787 + return &port->stat;
790 +static void eth_set_mcast_list(struct net_device *dev)
792 + struct port *port = netdev_priv(dev);
793 + struct dev_mc_list *mclist = dev->mc_list;
794 + u8 diffs[ETH_ALEN], *addr;
795 + int cnt = dev->mc_count, i;
797 + if ((dev->flags & IFF_PROMISC) || !mclist || !cnt) {
798 + __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN,
799 + &port->regs->rx_control[0]);
803 + memset(diffs, 0, ETH_ALEN);
804 + addr = mclist->dmi_addr; /* first MAC address */
806 + while (--cnt && (mclist = mclist->next))
807 + for (i = 0; i < ETH_ALEN; i++)
808 + diffs[i] |= addr[i] ^ mclist->dmi_addr[i];
810 + for (i = 0; i < ETH_ALEN; i++) {
811 + __raw_writel(addr[i], &port->regs->mcast_addr[i]);
812 + __raw_writel(~diffs[i], &port->regs->mcast_mask[i]);
815 + __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN,
816 + &port->regs->rx_control[0]);
820 +static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
822 + struct port *port = netdev_priv(dev);
823 + unsigned int duplex_chg;
826 + if (!netif_running(dev))
828 + err = generic_mii_ioctl(&port->mii, if_mii(req), cmd, &duplex_chg);
830 + eth_set_duplex(port);
835 +static int request_queues(struct port *port)
839 + err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0);
843 + err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0);
847 + err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0);
851 + err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0);
855 + /* TX-done queue handles skbs sent out by the NPEs */
857 + err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0);
864 + qmgr_release_queue(port->plat->txreadyq);
866 + qmgr_release_queue(TX_QUEUE(port->id));
868 + qmgr_release_queue(port->plat->rxq);
870 + qmgr_release_queue(RXFREE_QUEUE(port->id));
871 + printk(KERN_DEBUG "%s: unable to request hardware queues\n",
872 + port->netdev->name);
876 +static void release_queues(struct port *port)
878 + qmgr_release_queue(RXFREE_QUEUE(port->id));
879 + qmgr_release_queue(port->plat->rxq);
880 + qmgr_release_queue(TX_QUEUE(port->id));
881 + qmgr_release_queue(port->plat->txreadyq);
884 + qmgr_release_queue(TXDONE_QUEUE);
887 +static int init_queues(struct port *port)
892 + if (!(dma_pool = dma_pool_create(DRV_NAME, NULL,
893 + POOL_ALLOC_SIZE, 32, 0)))
896 + if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
897 + &port->desc_tab_phys)))
899 + memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
900 + memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
901 + memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
903 + /* Setup RX buffers */
904 + for (i = 0; i < RX_DESCS; i++) {
905 + struct desc *desc = rx_desc_ptr(port, i);
906 + buffer_t *buff; /* skb or kmalloc()ated memory */
909 + if (!(buff = netdev_alloc_skb(port->netdev, RX_BUFF_SIZE)))
913 + if (!(buff = kmalloc(RX_BUFF_SIZE, GFP_KERNEL)))
917 + desc->buf_len = MAX_MRU;
918 + desc->data = dma_map_single(&port->netdev->dev, data,
919 + RX_BUFF_SIZE, DMA_FROM_DEVICE);
920 + if (dma_mapping_error(desc->data)) {
924 + desc->data += NET_IP_ALIGN;
925 + port->rx_buff_tab[i] = buff;
931 +static void destroy_queues(struct port *port)
935 + if (port->desc_tab) {
936 + for (i = 0; i < RX_DESCS; i++) {
937 + struct desc *desc = rx_desc_ptr(port, i);
938 + buffer_t *buff = port->rx_buff_tab[i];
940 + dma_unmap_single(&port->netdev->dev,
941 + desc->data - NET_IP_ALIGN,
942 + RX_BUFF_SIZE, DMA_FROM_DEVICE);
946 + for (i = 0; i < TX_DESCS; i++) {
947 + struct desc *desc = tx_desc_ptr(port, i);
948 + buffer_t *buff = port->tx_buff_tab[i];
950 + dma_unmap_tx(port, desc);
954 + dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys);
955 + port->desc_tab = NULL;
958 + if (!ports_open && dma_pool) {
959 + dma_pool_destroy(dma_pool);
964 +static int eth_open(struct net_device *dev)
966 + struct port *port = netdev_priv(dev);
967 + struct npe *npe = port->npe;
971 + if (!npe_running(npe)) {
972 + err = npe_load_firmware(npe, npe_name(npe), &dev->dev);
976 + if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) {
977 + printk(KERN_ERR "%s: %s not responding\n", dev->name,
983 + mdio_write(dev, port->plat->phy, MII_BMCR, port->mii_bmcr);
985 + memset(&msg, 0, sizeof(msg));
986 + msg.cmd = NPE_VLAN_SETRXQOSENTRY;
987 + msg.eth_id = port->id;
988 + msg.byte5 = port->plat->rxq | 0x80;
989 + msg.byte7 = port->plat->rxq << 4;
990 + for (i = 0; i < 8; i++) {
992 + if (npe_send_recv_message(port->npe, &msg, "ETH_SET_RXQ"))
996 + msg.cmd = NPE_EDB_SETPORTADDRESS;
997 + msg.eth_id = PHYSICAL_ID(port->id);
998 + msg.byte2 = dev->dev_addr[0];
999 + msg.byte3 = dev->dev_addr[1];
1000 + msg.byte4 = dev->dev_addr[2];
1001 + msg.byte5 = dev->dev_addr[3];
1002 + msg.byte6 = dev->dev_addr[4];
1003 + msg.byte7 = dev->dev_addr[5];
1004 + if (npe_send_recv_message(port->npe, &msg, "ETH_SET_MAC"))
1007 + memset(&msg, 0, sizeof(msg));
1008 + msg.cmd = NPE_FW_SETFIREWALLMODE;
1009 + msg.eth_id = port->id;
1010 + if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE"))
1013 + if ((err = request_queues(port)) != 0)
1016 + if ((err = init_queues(port)) != 0) {
1017 + destroy_queues(port);
1018 + release_queues(port);
1022 + for (i = 0; i < ETH_ALEN; i++)
1023 + __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]);
1024 + __raw_writel(0x08, &port->regs->random_seed);
1025 + __raw_writel(0x12, &port->regs->partial_empty_threshold);
1026 + __raw_writel(0x30, &port->regs->partial_full_threshold);
1027 + __raw_writel(0x08, &port->regs->tx_start_bytes);
1028 + __raw_writel(0x15, &port->regs->tx_deferral);
1029 + __raw_writel(0x08, &port->regs->tx_2part_deferral[0]);
1030 + __raw_writel(0x07, &port->regs->tx_2part_deferral[1]);
1031 + __raw_writel(0x80, &port->regs->slot_time);
1032 + __raw_writel(0x01, &port->regs->int_clock_threshold);
1034 + /* Populate queues with buffers, no failure after this point */
1035 + for (i = 0; i < TX_DESCS; i++)
1036 + queue_put_desc(port->plat->txreadyq,
1037 + tx_desc_phys(port, i), tx_desc_ptr(port, i));
1039 + for (i = 0; i < RX_DESCS; i++)
1040 + queue_put_desc(RXFREE_QUEUE(port->id),
1041 + rx_desc_phys(port, i), rx_desc_ptr(port, i));
1043 + __raw_writel(TX_CNTRL1_RETRIES, &port->regs->tx_control[1]);
1044 + __raw_writel(DEFAULT_TX_CNTRL0, &port->regs->tx_control[0]);
1045 + __raw_writel(0, &port->regs->rx_control[1]);
1046 + __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]);
1048 + napi_enable(&port->napi);
1049 + phy_check_media(port, 1);
1050 + eth_set_mcast_list(dev);
1051 + netif_start_queue(dev);
1052 + schedule_delayed_work(&port->mdio_thread, MDIO_INTERVAL);
1054 + qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY,
1056 + if (!ports_open) {
1057 + qmgr_set_irq(TXDONE_QUEUE, QUEUE_IRQ_SRC_NOT_EMPTY,
1058 + eth_txdone_irq, NULL);
1059 + qmgr_enable_irq(TXDONE_QUEUE);
1062 + netif_rx_schedule(dev, &port->napi); /* we may already have RX data, enables IRQ */
1066 +static int eth_close(struct net_device *dev)
1068 + struct port *port = netdev_priv(dev);
1070 + int buffs = RX_DESCS; /* allocated RX buffers */
1074 + qmgr_disable_irq(port->plat->rxq);
1075 + napi_disable(&port->napi);
1076 + netif_stop_queue(dev);
1078 + while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0)
1081 + memset(&msg, 0, sizeof(msg));
1082 + msg.cmd = NPE_SETLOOPBACK_MODE;
1083 + msg.eth_id = port->id;
1085 + if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK"))
1086 + printk(KERN_CRIT "%s: unable to enable loopback\n", dev->name);
1089 + do { /* drain RX buffers */
1090 + while (queue_get_desc(port->plat->rxq, port, 0) >= 0)
1094 + if (qmgr_stat_empty(TX_QUEUE(port->id))) {
1095 + /* we have to inject some packet */
1096 + struct desc *desc;
1098 + int n = queue_get_desc(port->plat->txreadyq, port, 1);
1100 + desc = tx_desc_ptr(port, n);
1101 + phys = tx_desc_phys(port, n);
1102 + desc->buf_len = desc->pkt_len = 1;
1104 + queue_put_desc(TX_QUEUE(port->id), phys, desc);
1107 + } while (++i < MAX_CLOSE_WAIT);
1110 + printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)"
1111 + " left in NPE\n", dev->name, buffs);
1114 + printk(KERN_DEBUG "Draining RX queue took %i cycles\n", i);
1118 + while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0)
1119 + buffs--; /* cancel TX */
1123 + while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0)
1127 + } while (++i < MAX_CLOSE_WAIT);
1130 + printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) "
1131 + "left in NPE\n", dev->name, buffs);
1134 + printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
1138 + if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK"))
1139 + printk(KERN_CRIT "%s: unable to disable loopback\n",
1142 + port->mii_bmcr = mdio_read(dev, port->plat->phy, MII_BMCR) &
1143 + ~(BMCR_RESET | BMCR_PDOWN); /* may have been altered */
1144 + mdio_write(dev, port->plat->phy, MII_BMCR,
1145 + port->mii_bmcr | BMCR_PDOWN);
1148 + qmgr_disable_irq(TXDONE_QUEUE);
1149 + cancel_rearming_delayed_work(&port->mdio_thread);
1150 + destroy_queues(port);
1151 + release_queues(port);
1155 +static int __devinit eth_init_one(struct platform_device *pdev)
1157 + struct port *port;
1158 + struct net_device *dev;
1159 + struct eth_plat_info *plat = pdev->dev.platform_data;
1163 + if (!(dev = alloc_etherdev(sizeof(struct port))))
1166 + SET_NETDEV_DEV(dev, &pdev->dev);
1167 + port = netdev_priv(dev);
1168 + port->netdev = dev;
1169 + port->id = pdev->id;
1171 + switch (port->id) {
1172 + case IXP4XX_ETH_NPEA:
1173 + port->regs = (struct eth_regs __iomem *)IXP4XX_EthA_BASE_VIRT;
1174 + regs_phys = IXP4XX_EthA_BASE_PHYS;
1176 + case IXP4XX_ETH_NPEB:
1177 + port->regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
1178 + regs_phys = IXP4XX_EthB_BASE_PHYS;
1180 + case IXP4XX_ETH_NPEC:
1181 + port->regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT;
1182 + regs_phys = IXP4XX_EthC_BASE_PHYS;
1189 + dev->open = eth_open;
1190 + dev->hard_start_xmit = eth_xmit;
1191 + dev->stop = eth_close;
1192 + dev->get_stats = eth_stats;
1193 + dev->do_ioctl = eth_ioctl;
1194 + dev->set_multicast_list = eth_set_mcast_list;
1195 + dev->tx_queue_len = 100;
1197 + netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT);
1199 + if (!(port->npe = npe_request(NPE_ID(port->id)))) {
1204 + if (register_netdev(dev)) {
1209 + port->mem_res = request_mem_region(regs_phys, REGS_SIZE, dev->name);
1210 + if (!port->mem_res) {
1215 + port->plat = plat;
1216 + npe_port_tab[NPE_ID(port->id)] = port;
1217 + memcpy(dev->dev_addr, plat->hwaddr, ETH_ALEN);
1219 + platform_set_drvdata(pdev, dev);
1221 + __raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET,
1222 + &port->regs->core_control);
1224 + __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
1227 + port->mii.dev = dev;
1228 + port->mii.mdio_read = mdio_read;
1229 + port->mii.mdio_write = mdio_write;
1230 + port->mii.phy_id = plat->phy;
1231 + port->mii.phy_id_mask = 0x1F;
1232 + port->mii.reg_num_mask = 0x1F;
1234 + printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy,
1235 + npe_name(port->npe));
1237 + phy_reset(dev, plat->phy);
1238 + port->mii_bmcr = mdio_read(dev, plat->phy, MII_BMCR) &
1239 + ~(BMCR_RESET | BMCR_PDOWN);
1240 + mdio_write(dev, plat->phy, MII_BMCR, port->mii_bmcr | BMCR_PDOWN);
1242 + INIT_DELAYED_WORK(&port->mdio_thread, mdio_thread);
1246 + unregister_netdev(dev);
1248 + npe_release(port->npe);
1254 +static int __devexit eth_remove_one(struct platform_device *pdev)
1256 + struct net_device *dev = platform_get_drvdata(pdev);
1257 + struct port *port = netdev_priv(dev);
1259 + unregister_netdev(dev);
1260 + npe_port_tab[NPE_ID(port->id)] = NULL;
1261 + platform_set_drvdata(pdev, NULL);
1262 + npe_release(port->npe);
1263 + release_resource(port->mem_res);
1268 +static struct platform_driver drv = {
1269 + .driver.name = DRV_NAME,
1270 + .probe = eth_init_one,
1271 + .remove = eth_remove_one,
1274 +static int __init eth_init_module(void)
1276 + if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0))
1279 + /* All MII PHY accesses use NPE-B Ethernet registers */
1280 + spin_lock_init(&mdio_lock);
1281 + mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
1282 + __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
1284 + return platform_driver_register(&drv);
1287 +static void __exit eth_cleanup_module(void)
1289 + platform_driver_unregister(&drv);
1292 +MODULE_AUTHOR("Krzysztof Halasa");
1293 +MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver");
1294 +MODULE_LICENSE("GPL v2");
1295 +MODULE_ALIAS("platform:ixp4xx_eth");
1296 +module_init(eth_init_module);
1297 +module_exit(eth_cleanup_module);
1298 diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
1299 index a3df09e..94e7aa7 100644
1300 --- a/drivers/net/wan/Kconfig
1301 +++ b/drivers/net/wan/Kconfig
1302 @@ -334,6 +334,15 @@ config DSCC4_PCI_RST
1304 Say Y if your card supports this feature.
1307 + tristate "IXP4xx HSS (synchronous serial port) support"
1308 + depends on HDLC && ARM && ARCH_IXP4XX
1310 + select IXP4XX_QMGR
1312 + Say Y here if you want to use built-in HSS ports
1313 + on IXP4xx processor.
1316 tristate "Frame Relay DLCI support"
1318 diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
1319 index d61fef3..1b1d116 100644
1320 --- a/drivers/net/wan/Makefile
1321 +++ b/drivers/net/wan/Makefile
1322 @@ -42,6 +42,7 @@ obj-$(CONFIG_C101) += c101.o
1323 obj-$(CONFIG_WANXL) += wanxl.o
1324 obj-$(CONFIG_PCI200SYN) += pci200syn.o
1325 obj-$(CONFIG_PC300TOO) += pc300too.o
1326 +obj-$(CONFIG_IXP4XX_HSS) += ixp4xx_hss.o
1328 clean-files := wanxlfw.inc
1329 $(obj)/wanxl.o: $(obj)/wanxlfw.inc
1330 diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
1331 new file mode 100644
1332 index 0000000..cf971b3
1334 +++ b/drivers/net/wan/ixp4xx_hss.c
1337 + * Intel IXP4xx HSS (synchronous serial port) driver for Linux
1339 + * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
1341 + * This program is free software; you can redistribute it and/or modify it
1342 + * under the terms of version 2 of the GNU General Public License
1343 + * as published by the Free Software Foundation.
1346 +#include <linux/bitops.h>
1347 +#include <linux/cdev.h>
1348 +#include <linux/dma-mapping.h>
1349 +#include <linux/dmapool.h>
1350 +#include <linux/fs.h>
1351 +#include <linux/io.h>
1352 +#include <linux/kernel.h>
1353 +#include <linux/hdlc.h>
1354 +#include <linux/platform_device.h>
1355 +#include <linux/poll.h>
1356 +#include <asm/arch/npe.h>
1357 +#include <asm/arch/qmgr.h>
1359 +#define DEBUG_QUEUES 0
1360 +#define DEBUG_DESC 0
1363 +#define DEBUG_PKT_BYTES 0
1364 +#define DEBUG_CLOSE 0
1365 +#define DEBUG_FRAMER 0
1367 +#define DRV_NAME "ixp4xx_hss"
1369 +#define PKT_EXTRA_FLAGS 0 /* orig 1 */
1370 +#define TX_FRAME_SYNC_OFFSET 0 /* channelized */
1371 +#define PKT_NUM_PIPES 1 /* 1, 2 or 4 */
1372 +#define PKT_PIPE_FIFO_SIZEW 4 /* total 4 dwords per HSS */
1374 +#define RX_DESCS 16 /* also length of all RX queues */
1375 +#define TX_DESCS 16 /* also length of all TX queues */
1377 +#define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS))
1378 +#define RX_SIZE (HDLC_MAX_MRU + 4) /* NPE needs more space */
1379 +#define MAX_CLOSE_WAIT 1000 /* microseconds */
1380 +#define HSS_COUNT 2
1381 +#define MIN_FRAME_SIZE 16 /* bits */
1382 +#define MAX_FRAME_SIZE 257 /* 256 bits + framing bit */
1383 +#define MAX_CHANNELS (MAX_FRAME_SIZE / 8)
1384 +#define MAX_CHAN_DEVICES 32
1385 +#define CHANNEL_HDLC 0xFE
1386 +#define CHANNEL_UNUSED 0xFF
1388 +#define NAPI_WEIGHT 16
1389 +#define CHAN_RX_TRIGGER 16 /* 8 RX frames = 1 ms @ E1 */
1390 +#define CHAN_RX_FRAMES 64
1391 +#define MAX_CHAN_RX_BAD_SYNC (CHAN_RX_TRIGGER / 2 /* pairs */ - 3)
1392 +#define CHAN_TX_LIST_FRAMES 16 /* bytes/channel per list, 16 - 48 */
1393 +#define CHAN_TX_LISTS 8
1394 +#define CHAN_TX_FRAMES (CHAN_TX_LIST_FRAMES * CHAN_TX_LISTS)
1395 +#define CHAN_QUEUE_LEN 16 /* minimum possible */
1399 +#define HSS0_CHL_RXTRIG_QUEUE 12 /* orig size = 32 dwords */
1400 +#define HSS0_PKT_RX_QUEUE 13 /* orig size = 32 dwords */
1401 +#define HSS0_PKT_TX0_QUEUE 14 /* orig size = 16 dwords */
1402 +#define HSS0_PKT_TX1_QUEUE 15
1403 +#define HSS0_PKT_TX2_QUEUE 16
1404 +#define HSS0_PKT_TX3_QUEUE 17
1405 +#define HSS0_PKT_RXFREE0_QUEUE 18 /* orig size = 16 dwords */
1406 +#define HSS0_PKT_RXFREE1_QUEUE 19
1407 +#define HSS0_PKT_RXFREE2_QUEUE 20
1408 +#define HSS0_PKT_RXFREE3_QUEUE 21
1409 +#define HSS0_PKT_TXDONE_QUEUE 22 /* orig size = 64 dwords */
1411 +#define HSS1_CHL_RXTRIG_QUEUE 10
1412 +#define HSS1_PKT_RX_QUEUE 0
1413 +#define HSS1_PKT_TX0_QUEUE 5
1414 +#define HSS1_PKT_TX1_QUEUE 6
1415 +#define HSS1_PKT_TX2_QUEUE 7
1416 +#define HSS1_PKT_TX3_QUEUE 8
1417 +#define HSS1_PKT_RXFREE0_QUEUE 1
1418 +#define HSS1_PKT_RXFREE1_QUEUE 2
1419 +#define HSS1_PKT_RXFREE2_QUEUE 3
1420 +#define HSS1_PKT_RXFREE3_QUEUE 4
1421 +#define HSS1_PKT_TXDONE_QUEUE 9
1423 +#define NPE_PKT_MODE_HDLC 0
1424 +#define NPE_PKT_MODE_RAW 1
1425 +#define NPE_PKT_MODE_56KMODE 2
1426 +#define NPE_PKT_MODE_56KENDIAN_MSB 4
1428 +/* PKT_PIPE_HDLC_CFG_WRITE flags */
1429 +#define PKT_HDLC_IDLE_ONES 0x1 /* default = flags */
1430 +#define PKT_HDLC_CRC_32 0x2 /* default = CRC-16 */
1431 +#define PKT_HDLC_MSB_ENDIAN 0x4 /* default = LE */
1434 +/* hss_config, PCRs */
1435 +/* Frame sync sampling, default = active low */
1436 +#define PCR_FRM_SYNC_ACTIVE_HIGH 0x40000000
1437 +#define PCR_FRM_SYNC_FALLINGEDGE 0x80000000
1438 +#define PCR_FRM_SYNC_RISINGEDGE 0xC0000000
1440 +/* Frame sync pin: input (default) or output generated off a given clk edge */
1441 +#define PCR_FRM_SYNC_OUTPUT_FALLING 0x20000000
1442 +#define PCR_FRM_SYNC_OUTPUT_RISING 0x30000000
1444 +/* Frame and data clock sampling on edge, default = falling */
1445 +#define PCR_FCLK_EDGE_RISING 0x08000000
1446 +#define PCR_DCLK_EDGE_RISING 0x04000000
1448 +/* Clock direction, default = input */
1449 +#define PCR_SYNC_CLK_DIR_OUTPUT 0x02000000
1451 +/* Generate/Receive frame pulses, default = enabled */
1452 +#define PCR_FRM_PULSE_DISABLED 0x01000000
1454 + /* Data rate is full (default) or half the configured clk speed */
1455 +#define PCR_HALF_CLK_RATE 0x00200000
1457 +/* Invert data between NPE and HSS FIFOs? (default = no) */
1458 +#define PCR_DATA_POLARITY_INVERT 0x00100000
1460 +/* TX/RX endianness, default = LSB */
1461 +#define PCR_MSB_ENDIAN 0x00080000
1463 +/* Normal (default) / open drain mode (TX only) */
1464 +#define PCR_TX_PINS_OPEN_DRAIN 0x00040000
1466 +/* No framing bit transmitted and expected on RX? (default = framing bit) */
1467 +#define PCR_SOF_NO_FBIT 0x00020000
1469 +/* Drive data pins? */
1470 +#define PCR_TX_DATA_ENABLE 0x00010000
1472 +/* Voice 56k type: drive the data pins low (default), high, high Z */
1473 +#define PCR_TX_V56K_HIGH 0x00002000
1474 +#define PCR_TX_V56K_HIGH_IMP 0x00004000
1476 +/* Unassigned type: drive the data pins low (default), high, high Z */
1477 +#define PCR_TX_UNASS_HIGH 0x00000800
1478 +#define PCR_TX_UNASS_HIGH_IMP 0x00001000
1480 +/* T1 @ 1.544MHz only: Fbit dictated in FIFO (default) or high Z */
1481 +#define PCR_TX_FB_HIGH_IMP 0x00000400
1483 +/* 56k data endiannes - which bit unused: high (default) or low */
1484 +#define PCR_TX_56KE_BIT_0_UNUSED 0x00000200
1486 +/* 56k data transmission type: 32/8 bit data (default) or 56K data */
1487 +#define PCR_TX_56KS_56K_DATA 0x00000100
1489 +/* hss_config, cCR */
1490 +/* Number of packetized clients, default = 1 */
1491 +#define CCR_NPE_HFIFO_2_HDLC 0x04000000
1492 +#define CCR_NPE_HFIFO_3_OR_4HDLC 0x08000000
1494 +/* default = no loopback */
1495 +#define CCR_LOOPBACK 0x02000000
1497 +/* HSS number, default = 0 (first) */
1498 +#define CCR_SECOND_HSS 0x01000000
1501 +/* hss_config, clkCR: main:10, num:10, denom:12 */
1502 +#define CLK42X_SPEED_EXP ((0x3FF << 22) | ( 2 << 12) | 15) /*65 KHz*/
1504 +#define CLK42X_SPEED_512KHZ (( 130 << 22) | ( 2 << 12) | 15)
1505 +#define CLK42X_SPEED_1536KHZ (( 43 << 22) | ( 18 << 12) | 47)
1506 +#define CLK42X_SPEED_1544KHZ (( 43 << 22) | ( 33 << 12) | 192)
1507 +#define CLK42X_SPEED_2048KHZ (( 32 << 22) | ( 34 << 12) | 63)
1508 +#define CLK42X_SPEED_4096KHZ (( 16 << 22) | ( 34 << 12) | 127)
1509 +#define CLK42X_SPEED_8192KHZ (( 8 << 22) | ( 34 << 12) | 255)
1511 +#define CLK46X_SPEED_512KHZ (( 130 << 22) | ( 24 << 12) | 127)
1512 +#define CLK46X_SPEED_1536KHZ (( 43 << 22) | (152 << 12) | 383)
1513 +#define CLK46X_SPEED_1544KHZ (( 43 << 22) | ( 66 << 12) | 385)
1514 +#define CLK46X_SPEED_2048KHZ (( 32 << 22) | (280 << 12) | 511)
1515 +#define CLK46X_SPEED_4096KHZ (( 16 << 22) | (280 << 12) | 1023)
1516 +#define CLK46X_SPEED_8192KHZ (( 8 << 22) | (280 << 12) | 2047)
1519 +/* hss_config, LUT entries */
1520 +#define TDMMAP_UNASSIGNED 0
1521 +#define TDMMAP_HDLC 1 /* HDLC - packetized */
1522 +#define TDMMAP_VOICE56K 2 /* Voice56K - 7-bit channelized */
1523 +#define TDMMAP_VOICE64K 3 /* Voice64K - 8-bit channelized */
1525 +/* offsets into HSS config */
1526 +#define HSS_CONFIG_TX_PCR 0x00 /* port configuration registers */
1527 +#define HSS_CONFIG_RX_PCR 0x04
1528 +#define HSS_CONFIG_CORE_CR 0x08 /* loopback control, HSS# */
1529 +#define HSS_CONFIG_CLOCK_CR 0x0C /* clock generator control */
1530 +#define HSS_CONFIG_TX_FCR 0x10 /* frame configuration registers */
1531 +#define HSS_CONFIG_RX_FCR 0x14
1532 +#define HSS_CONFIG_TX_LUT 0x18 /* channel look-up tables */
1533 +#define HSS_CONFIG_RX_LUT 0x38
1536 +/* NPE command codes */
1537 +/* writes the ConfigWord value to the location specified by offset */
1538 +#define PORT_CONFIG_WRITE 0x40
1540 +/* triggers the NPE to load the contents of the configuration table */
1541 +#define PORT_CONFIG_LOAD 0x41
1543 +/* triggers the NPE to return an HssErrorReadResponse message */
1544 +#define PORT_ERROR_READ 0x42
1546 +/* reset NPE internal status and enable the HssChannelized operation */
1547 +#define CHAN_FLOW_ENABLE 0x43
1548 +#define CHAN_FLOW_DISABLE 0x44
1549 +#define CHAN_IDLE_PATTERN_WRITE 0x45
1550 +#define CHAN_NUM_CHANS_WRITE 0x46
1551 +#define CHAN_RX_BUF_ADDR_WRITE 0x47
1552 +#define CHAN_RX_BUF_CFG_WRITE 0x48
1553 +#define CHAN_TX_BLK_CFG_WRITE 0x49
1554 +#define CHAN_TX_BUF_ADDR_WRITE 0x4A
1555 +#define CHAN_TX_BUF_SIZE_WRITE 0x4B
1556 +#define CHAN_TSLOTSWITCH_ENABLE 0x4C
1557 +#define CHAN_TSLOTSWITCH_DISABLE 0x4D
1559 +/* downloads the gainWord value for a timeslot switching channel associated
1561 +#define CHAN_TSLOTSWITCH_GCT_DOWNLOAD 0x4E
1563 +/* triggers the NPE to reset internal status and enable the HssPacketized
1564 + operation for the flow specified by pPipe */
1565 +#define PKT_PIPE_FLOW_ENABLE 0x50
1566 +#define PKT_PIPE_FLOW_DISABLE 0x51
1567 +#define PKT_NUM_PIPES_WRITE 0x52
1568 +#define PKT_PIPE_FIFO_SIZEW_WRITE 0x53
1569 +#define PKT_PIPE_HDLC_CFG_WRITE 0x54
1570 +#define PKT_PIPE_IDLE_PATTERN_WRITE 0x55
1571 +#define PKT_PIPE_RX_SIZE_WRITE 0x56
1572 +#define PKT_PIPE_MODE_WRITE 0x57
1574 +/* HDLC packet status values - desc->status */
1575 +#define ERR_SHUTDOWN 1 /* stop or shutdown occurrance */
1576 +#define ERR_HDLC_ALIGN 2 /* HDLC alignment error */
1577 +#define ERR_HDLC_FCS 3 /* HDLC Frame Check Sum error */
1578 +#define ERR_RXFREE_Q_EMPTY 4 /* RX-free queue became empty while receiving
1579 + this packet (if buf_len < pkt_len) */
1580 +#define ERR_HDLC_TOO_LONG 5 /* HDLC frame size too long */
1581 +#define ERR_HDLC_ABORT 6 /* abort sequence received */
1582 +#define ERR_DISCONNECTING 7 /* disconnect is in progress */
1585 +enum mode {MODE_HDLC = 0, MODE_RAW, MODE_G704};
1586 +enum error_bit {TX_ERROR_BIT = 0, RX_ERROR_BIT = 1};
1587 +enum alignment { NOT_ALIGNED = 0, EVEN_FIRST, ODD_FIRST };
1590 +typedef struct sk_buff buffer_t;
1591 +#define free_buffer dev_kfree_skb
1592 +#define free_buffer_irq dev_kfree_skb_irq
1594 +typedef void buffer_t;
1595 +#define free_buffer kfree
1596 +#define free_buffer_irq kfree
1599 +struct chan_device {
1601 + struct device *dev;
1602 + struct port *port;
1603 + unsigned int open_count, excl_open;
1604 + unsigned int tx_first, tx_count, rx_first, rx_count; /* bytes */
1605 + unsigned long errors_bitmap;
1606 + u8 id, chan_count;
1607 + u8 log_channels[MAX_CHANNELS];
1611 + struct device *dev;
1613 + struct net_device *netdev;
1614 + struct napi_struct napi;
1615 + struct hss_plat_info *plat;
1616 + buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
1617 + struct desc *desc_tab; /* coherent */
1618 + u32 desc_tab_phys;
1620 + atomic_t chan_tx_irq_number, chan_rx_irq_number;
1621 + wait_queue_head_t chan_tx_waitq, chan_rx_waitq;
1624 + /* the following fields must be protected by npe_lock */
1626 + unsigned int clock_type, clock_rate, loopback;
1627 + unsigned int frame_size, frame_sync_offset;
1629 + struct chan_device *chan_devices[MAX_CHAN_DEVICES];
1631 + u32 chan_tx_buf_phys, chan_rx_buf_phys;
1632 + unsigned int chan_open_count, hdlc_open;
1633 + unsigned int chan_started, initialized, just_set_offset;
1634 + enum alignment aligned, carrier;
1635 + unsigned int chan_last_rx, chan_last_tx;
1636 + /* assigned channels, may be invalid with given frame length or mode */
1637 + u8 channels[MAX_CHANNELS];
1641 +/* NPE message structure */
1644 + u8 cmd, unused, hss_port, index;
1646 + struct { u8 data8a, data8b, data8c, data8d; };
1647 + struct { u16 data16a, data16b; };
1648 + struct { u32 data32; };
1651 + u8 index, hss_port, unused, cmd;
1653 + struct { u8 data8d, data8c, data8b, data8a; };
1654 + struct { u16 data16b, data16a; };
1655 + struct { u32 data32; };
1660 +/* HDLC packet descriptor */
1662 + u32 next; /* pointer to next buffer, unused */
1665 + u16 buf_len; /* buffer length */
1666 + u16 pkt_len; /* packet length */
1667 + u32 data; /* pointer to data buffer in RAM */
1672 + u16 pkt_len; /* packet length */
1673 + u16 buf_len; /* buffer length */
1674 + u32 data; /* pointer to data buffer in RAM */
1679 + u32 __reserved1[4];
1683 +#define rx_desc_phys(port, n) ((port)->desc_tab_phys + \
1684 + (n) * sizeof(struct desc))
1685 +#define rx_desc_ptr(port, n) (&(port)->desc_tab[n])
1687 +#define tx_desc_phys(port, n) ((port)->desc_tab_phys + \
1688 + ((n) + RX_DESCS) * sizeof(struct desc))
1689 +#define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS])
1691 +#define chan_tx_buf_len(port) (port->frame_size / 8 * CHAN_TX_FRAMES)
1692 +#define chan_tx_lists_len(port) (port->frame_size / 8 * CHAN_TX_LISTS * \
1694 +#define chan_rx_buf_len(port) (port->frame_size / 8 * CHAN_RX_FRAMES)
1696 +#define chan_tx_buf(port) ((port)->chan_buf)
1697 +#define chan_tx_lists(port) (chan_tx_buf(port) + chan_tx_buf_len(port))
1698 +#define chan_rx_buf(port) (chan_tx_lists(port) + chan_tx_lists_len(port))
1700 +#define chan_tx_lists_phys(port) ((port)->chan_tx_buf_phys + \
1701 + chan_tx_buf_len(port))
1703 +static int hss_prepare_chan(struct port *port);
1704 +void hss_chan_stop(struct port *port);
1706 +/*****************************************************************************
1707 + * global variables
1708 + ****************************************************************************/
1710 +static struct class *hss_class;
1711 +static int chan_major;
1712 +static int ports_open;
1713 +static struct dma_pool *dma_pool;
1714 +static spinlock_t npe_lock;
1716 +static const struct {
1717 + int tx, txdone, rx, rxfree, chan;
1718 +}queue_ids[2] = {{HSS0_PKT_TX0_QUEUE, HSS0_PKT_TXDONE_QUEUE, HSS0_PKT_RX_QUEUE,
1719 + HSS0_PKT_RXFREE0_QUEUE, HSS0_CHL_RXTRIG_QUEUE},
1720 + {HSS1_PKT_TX0_QUEUE, HSS1_PKT_TXDONE_QUEUE, HSS1_PKT_RX_QUEUE,
1721 + HSS1_PKT_RXFREE0_QUEUE, HSS1_CHL_RXTRIG_QUEUE},
1724 +/*****************************************************************************
1725 + * utility functions
1726 + ****************************************************************************/
1728 +static inline struct port* dev_to_port(struct net_device *dev)
1730 + return dev_to_hdlc(dev)->priv;
1733 +static inline struct chan_device* inode_to_chan_dev(struct inode *inode)
1735 + return container_of(inode->i_cdev, struct chan_device, cdev);
1739 +static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
1742 + for (i = 0; i < cnt; i++)
1743 + dest[i] = swab32(src[i]);
1747 +static int get_number(const char **buf, size_t *len, unsigned int *ptr,
1748 + unsigned int min, unsigned int max)
1751 + unsigned long val = simple_strtoul(*buf, &endp, 10);
1753 + if (endp == *buf || endp - *buf > *len || val < min || val > max)
1755 + *len -= endp - *buf;
1761 +static int parse_channels(const char **buf, size_t *len, u8 *channels)
1763 + unsigned int ch, next = 0;
1765 + if (*len && (*buf)[*len - 1] == '\n')
1768 + memset(channels, 0, MAX_CHANNELS);
1773 + /* Format: "A,B-C,...", A > B > C */
1775 + if (get_number(buf, len, &ch, next, MAX_CHANNELS - 1))
1781 + if (**buf == ',') {
1790 + if (get_number(buf, len, &ch, next, MAX_CHANNELS - 1))
1792 + while (next <= ch)
1793 + channels[next++] = 1;
1804 +static size_t print_channels(struct port *port, char *buf, u8 id)
1806 + unsigned int ch, cnt = 0;
1809 + for (ch = 0; ch < MAX_CHANNELS; ch++)
1810 + if (port->channels[ch] == id) {
1812 + sprintf(buf + len, "%s%u", len ? "," : "", ch);
1813 + len += strlen(buf + len);
1818 + sprintf(buf + len, "-%u", ch - 1);
1819 + len += strlen(buf + len);
1824 + sprintf(buf + len, "-%u", ch - 1);
1825 + len += strlen(buf + len);
1828 + buf[len++] = '\n';
1832 +static inline unsigned int sub_offset(unsigned int a, unsigned int b,
1833 + unsigned int modulo)
1835 + return (modulo /* make sure the result >= 0 */ + a - b) % modulo;
1838 +/*****************************************************************************
1840 + ****************************************************************************/
1842 +static void hss_config_load(struct port *port)
1847 + memset(&msg, 0, sizeof(msg));
1848 + msg.cmd = PORT_CONFIG_LOAD;
1849 + msg.hss_port = port->id;
1850 + if (npe_send_message(port->npe, &msg, "HSS_LOAD_CONFIG"))
1852 + if (npe_recv_message(port->npe, &msg, "HSS_LOAD_CONFIG"))
1855 + /* HSS_LOAD_CONFIG for port #1 returns port_id = #4 */
1856 + if (msg.cmd != PORT_CONFIG_LOAD || msg.data32)
1859 + /* HDLC may stop working without this */
1860 + npe_recv_message(port->npe, &msg, "FLUSH_IT");
1864 + printk(KERN_CRIT "HSS-%i: unable to reload HSS configuration\n",
1869 +static void hss_config_set_pcr(struct port *port)
1874 + memset(&msg, 0, sizeof(msg));
1875 + msg.cmd = PORT_CONFIG_WRITE;
1876 + msg.hss_port = port->id;
1877 + msg.index = HSS_CONFIG_TX_PCR;
1878 + msg.data32 = PCR_FRM_SYNC_OUTPUT_RISING | PCR_MSB_ENDIAN |
1879 + PCR_TX_DATA_ENABLE;
1880 + if (port->frame_size % 8 == 0)
1881 + msg.data32 |= PCR_SOF_NO_FBIT;
1882 + if (port->clock_type == CLOCK_INT)
1883 + msg.data32 |= PCR_SYNC_CLK_DIR_OUTPUT;
1884 + if (npe_send_message(port->npe, &msg, "HSS_SET_TX_PCR"))
1887 + msg.index = HSS_CONFIG_RX_PCR;
1888 + msg.data32 ^= PCR_TX_DATA_ENABLE | PCR_DCLK_EDGE_RISING;
1889 + if (npe_send_message(port->npe, &msg, "HSS_SET_RX_PCR"))
1894 + printk(KERN_CRIT "HSS-%i: unable to set HSS PCR registers\n", port->id);
1898 +static void hss_config_set_hdlc_cfg(struct port *port)
1902 + memset(&msg, 0, sizeof(msg));
1903 + msg.cmd = PKT_PIPE_HDLC_CFG_WRITE;
1904 + msg.hss_port = port->id;
1905 + msg.data8a = port->hdlc_cfg; /* rx_cfg */
1906 + msg.data8b = port->hdlc_cfg | (PKT_EXTRA_FLAGS << 3); /* tx_cfg */
1907 + if (npe_send_message(port->npe, &msg, "HSS_SET_HDLC_CFG")) {
1908 + printk(KERN_CRIT "HSS-%i: unable to set HSS HDLC"
1909 + " configuration\n", port->id);
1914 +static void hss_config_set_core(struct port *port)
1918 + memset(&msg, 0, sizeof(msg));
1919 + msg.cmd = PORT_CONFIG_WRITE;
1920 + msg.hss_port = port->id;
1921 + msg.index = HSS_CONFIG_CORE_CR;
1922 + msg.data32 = (port->loopback ? CCR_LOOPBACK : 0) |
1923 + (port->id ? CCR_SECOND_HSS : 0);
1924 + if (npe_send_message(port->npe, &msg, "HSS_SET_CORE_CR")) {
1925 + printk(KERN_CRIT "HSS-%i: unable to set HSS core control"
1926 + " register\n", port->id);
1931 +static void hss_config_set_line(struct port *port)
1935 + hss_config_set_pcr(port);
1936 + hss_config_set_core(port);
1938 + memset(&msg, 0, sizeof(msg));
1939 + msg.cmd = PORT_CONFIG_WRITE;
1940 + msg.hss_port = port->id;
1941 + msg.index = HSS_CONFIG_CLOCK_CR;
1942 + msg.data32 = CLK42X_SPEED_2048KHZ /* FIXME */;
1943 + if (npe_send_message(port->npe, &msg, "HSS_SET_CLOCK_CR")) {
1944 + printk(KERN_CRIT "HSS-%i: unable to set HSS clock control"
1945 + " register\n", port->id);
1950 +static void hss_config_set_rx_frame(struct port *port)
1954 + memset(&msg, 0, sizeof(msg));
1955 + msg.cmd = PORT_CONFIG_WRITE;
1956 + msg.hss_port = port->id;
1957 + msg.index = HSS_CONFIG_RX_FCR;
1958 + msg.data16a = port->frame_sync_offset;
1959 + msg.data16b = port->frame_size - 1;
1960 + if (npe_send_message(port->npe, &msg, "HSS_SET_RX_FCR")) {
1961 + printk(KERN_CRIT "HSS-%i: unable to set HSS RX frame size"
1962 + " and offset\n", port->id);
1967 +static void hss_config_set_frame(struct port *port)
1971 + memset(&msg, 0, sizeof(msg));
1972 + msg.cmd = PORT_CONFIG_WRITE;
1973 + msg.hss_port = port->id;
1974 + msg.index = HSS_CONFIG_TX_FCR;
1975 + msg.data16a = TX_FRAME_SYNC_OFFSET;
1976 + msg.data16b = port->frame_size - 1;
1977 + if (npe_send_message(port->npe, &msg, "HSS_SET_TX_FCR")) {
1978 + printk(KERN_CRIT "HSS-%i: unable to set HSS TX frame size"
1979 + " and offset\n", port->id);
1982 + hss_config_set_rx_frame(port);
1985 +static void hss_config_set_lut(struct port *port)
1988 + int chan_count = 0, log_chan = 0, i, ch;
1989 + u32 lut[MAX_CHANNELS / 4];
1991 + memset(lut, 0, sizeof(lut));
1992 + for (i = 0; i < MAX_CHAN_DEVICES; i++)
1993 + if (port->chan_devices[i])
1994 + port->chan_devices[i]->chan_count = 0;
1996 + memset(&msg, 0, sizeof(msg));
1997 + msg.cmd = PORT_CONFIG_WRITE;
1998 + msg.hss_port = port->id;
2000 + for (ch = 0; ch < MAX_CHANNELS; ch++) {
2001 + struct chan_device *chdev = NULL;
2002 + unsigned int entry;
2004 + if (port->channels[ch] < MAX_CHAN_DEVICES /* assigned */)
2005 + chdev = port->chan_devices[port->channels[ch]];
2007 + if (port->mode == MODE_G704 && ch == 0)
2008 + entry = TDMMAP_VOICE64K; /* PCM-31 pattern */
2009 + else if (port->mode == MODE_HDLC ||
2010 + port->channels[ch] == CHANNEL_HDLC)
2011 + entry = TDMMAP_HDLC;
2012 + else if (chdev && chdev->open_count) {
2013 + entry = TDMMAP_VOICE64K;
2014 + chdev->log_channels[chdev->chan_count++] = log_chan;
2016 + entry = TDMMAP_UNASSIGNED;
2017 + if (entry == TDMMAP_VOICE64K) {
2023 + msg.data32 |= entry << 30;
2025 + if (ch % 16 == 15) {
2026 + msg.index = HSS_CONFIG_TX_LUT + ((ch / 4) & ~3);
2027 + if (npe_send_message(port->npe, &msg, "HSS_SET_TX_LUT"))
2030 + msg.index += HSS_CONFIG_RX_LUT - HSS_CONFIG_TX_LUT;
2031 + if (npe_send_message(port->npe, &msg, "HSS_SET_RX_LUT"))
2035 + if (ch != MAX_CHANNELS) {
2036 + printk(KERN_CRIT "HSS-%i: unable to set HSS channel look-up"
2037 + " table\n", port->id);
2041 + hss_config_set_frame(port);
2046 + memset(&msg, 0, sizeof(msg));
2047 + msg.cmd = CHAN_NUM_CHANS_WRITE;
2048 + msg.hss_port = port->id;
2049 + msg.data8a = chan_count;
2050 + if (npe_send_message(port->npe, &msg, "CHAN_NUM_CHANS_WRITE")) {
2051 + printk(KERN_CRIT "HSS-%i: unable to set HSS channel count\n",
2056 + /* don't leak data */
2057 + // FIXME memset(chan_tx_buf(port), 0, CHAN_TX_FRAMES * chan_count);
2058 + if (port->mode == MODE_G704) /* G.704 PCM-31 sync pattern */
2059 + for (i = 0; i < CHAN_TX_FRAMES; i += 4)
2060 + *(u32*)(chan_tx_buf(port) + i) = 0x9BDF9BDF;
2062 + for (i = 0; i < CHAN_TX_LISTS; i++) {
2063 + u32 phys = port->chan_tx_buf_phys + i * CHAN_TX_LIST_FRAMES;
2064 + u32 *list = ((u32 *)chan_tx_lists(port)) + i * chan_count;
2065 + for (ch = 0; ch < chan_count; ch++)
2066 + list[ch] = phys + ch * CHAN_TX_FRAMES;
2068 + dma_sync_single(port->dev, port->chan_tx_buf_phys,
2069 + chan_tx_buf_len(port) + chan_tx_lists_len(port),
2073 +static u32 hss_config_get_status(struct port *port)
2078 + memset(&msg, 0, sizeof(msg));
2079 + msg.cmd = PORT_ERROR_READ;
2080 + msg.hss_port = port->id;
2081 + if (npe_send_message(port->npe, &msg, "PORT_ERROR_READ"))
2083 + if (npe_recv_message(port->npe, &msg, "PORT_ERROR_READ"))
2086 + return msg.data32;
2089 + printk(KERN_CRIT "HSS-%i: unable to read HSS status\n", port->id);
2093 +static void hss_config_start_chan(struct port *port)
2097 + port->chan_last_tx = 0;
2098 + port->chan_last_rx = 0;
2101 + memset(&msg, 0, sizeof(msg));
2102 + msg.cmd = CHAN_RX_BUF_ADDR_WRITE;
2103 + msg.hss_port = port->id;
2104 + msg.data32 = port->chan_rx_buf_phys;
2105 + if (npe_send_message(port->npe, &msg, "CHAN_RX_BUF_ADDR_WRITE"))
2108 + memset(&msg, 0, sizeof(msg));
2109 + msg.cmd = CHAN_TX_BUF_ADDR_WRITE;
2110 + msg.hss_port = port->id;
2111 + msg.data32 = chan_tx_lists_phys(port);
2112 + if (npe_send_message(port->npe, &msg, "CHAN_TX_BUF_ADDR_WRITE"))
2115 + memset(&msg, 0, sizeof(msg));
2116 + msg.cmd = CHAN_FLOW_ENABLE;
2117 + msg.hss_port = port->id;
2118 + if (npe_send_message(port->npe, &msg, "CHAN_FLOW_ENABLE"))
2120 + port->chan_started = 1;
2124 + printk(KERN_CRIT "HSS-%i: unable to start channelized flow\n",
2129 +static void hss_config_stop_chan(struct port *port)
2133 + if (!port->chan_started)
2136 + memset(&msg, 0, sizeof(msg));
2137 + msg.cmd = CHAN_FLOW_DISABLE;
2138 + msg.hss_port = port->id;
2139 + if (npe_send_message(port->npe, &msg, "CHAN_FLOW_DISABLE")) {
2140 + printk(KERN_CRIT "HSS-%i: unable to stop channelized flow\n",
2144 + hss_config_get_status(port); /* make sure it's halted */
2147 +static void hss_config_start_hdlc(struct port *port)
2151 + memset(&msg, 0, sizeof(msg));
2152 + msg.cmd = PKT_PIPE_FLOW_ENABLE;
2153 + msg.hss_port = port->id;
2155 + if (npe_send_message(port->npe, &msg, "HSS_ENABLE_PKT_PIPE")) {
2156 + printk(KERN_CRIT "HSS-%i: unable to stop packetized flow\n",
2162 +static void hss_config_stop_hdlc(struct port *port)
2166 + memset(&msg, 0, sizeof(msg));
2167 + msg.cmd = PKT_PIPE_FLOW_DISABLE;
2168 + msg.hss_port = port->id;
2169 + if (npe_send_message(port->npe, &msg, "HSS_DISABLE_PKT_PIPE")) {
2170 + printk(KERN_CRIT "HSS-%i: unable to stop packetized flow\n",
2174 + hss_config_get_status(port); /* make sure it's halted */
2177 +static int hss_config_load_firmware(struct port *port)
2181 + if (port->initialized)
2184 + if (!npe_running(port->npe)) {
2186 + if ((err = npe_load_firmware(port->npe, npe_name(port->npe),
2192 + /* HSS main configuration */
2193 + hss_config_set_line(port);
2195 + hss_config_set_frame(port);
2197 + /* HDLC mode configuration */
2198 + memset(&msg, 0, sizeof(msg));
2199 + msg.cmd = PKT_NUM_PIPES_WRITE;
2200 + msg.hss_port = port->id;
2201 + msg.data8a = PKT_NUM_PIPES;
2202 + if (npe_send_message(port->npe, &msg, "HSS_SET_PKT_PIPES"))
2205 + msg.cmd = PKT_PIPE_FIFO_SIZEW_WRITE;
2206 + msg.data8a = PKT_PIPE_FIFO_SIZEW;
2207 + if (npe_send_message(port->npe, &msg, "HSS_SET_PKT_FIFO"))
2210 + msg.cmd = PKT_PIPE_MODE_WRITE;
2211 + msg.data8a = NPE_PKT_MODE_HDLC;
2212 + /* msg.data8b = inv_mask */
2213 + /* msg.data8c = or_mask */
2214 + if (npe_send_message(port->npe, &msg, "HSS_SET_PKT_MODE"))
2217 + msg.cmd = PKT_PIPE_RX_SIZE_WRITE;
2218 + msg.data16a = HDLC_MAX_MRU; /* including CRC */
2219 + if (npe_send_message(port->npe, &msg, "HSS_SET_PKT_RX_SIZE"))
2222 + msg.cmd = PKT_PIPE_IDLE_PATTERN_WRITE;
2223 + msg.data32 = 0x7F7F7F7F; /* ??? FIXME */
2224 + if (npe_send_message(port->npe, &msg, "HSS_SET_PKT_IDLE"))
2227 + /* Channelized operation settings */
2228 + memset(&msg, 0, sizeof(msg));
2229 + msg.cmd = CHAN_TX_BLK_CFG_WRITE;
2230 + msg.hss_port = port->id;
2231 + msg.data8b = (CHAN_TX_LIST_FRAMES & ~7) / 2;
2232 + msg.data8a = msg.data8b / 4;
2233 + msg.data8d = CHAN_TX_LIST_FRAMES - msg.data8b;
2234 + msg.data8c = msg.data8d / 4;
2235 + if (npe_send_message(port->npe, &msg, "CHAN_TX_BLK_CFG_WRITE"))
2238 + memset(&msg, 0, sizeof(msg));
2239 + msg.cmd = CHAN_RX_BUF_CFG_WRITE;
2240 + msg.hss_port = port->id;
2241 + msg.data8a = CHAN_RX_TRIGGER / 8;
2242 + msg.data8b = CHAN_RX_FRAMES;
2243 + if (npe_send_message(port->npe, &msg, "CHAN_RX_BUF_CFG_WRITE"))
2246 + memset(&msg, 0, sizeof(msg));
2247 + msg.cmd = CHAN_TX_BUF_SIZE_WRITE;
2248 + msg.hss_port = port->id;
2249 + msg.data8a = CHAN_TX_LISTS;
2250 + if (npe_send_message(port->npe, &msg, "CHAN_TX_BUF_SIZE_WRITE"))
2253 + port->initialized = 1;
2257 + printk(KERN_CRIT "HSS-%i: unable to start HSS operation\n", port->id);
2261 +/*****************************************************************************
2262 + * packetized (HDLC) operation
2263 + ****************************************************************************/
2265 +static inline void debug_pkt(struct net_device *dev, const char *func,
2266 + u8 *data, int len)
2268 +#if DEBUG_PKT_BYTES
2271 + printk(KERN_DEBUG "%s: %s(%i) ", dev->name, func, len);
2272 + for (i = 0; i < len; i++) {
2273 + if (i >= DEBUG_PKT_BYTES)
2275 + printk(KERN_DEBUG "%s%02X", !(i % 4) ? " " : "", data[i]);
2277 + printk(KERN_DEBUG "\n");
2282 +static inline void debug_desc(u32 phys, struct desc *desc)
2285 + printk(KERN_DEBUG "%X: %X %3X %3X %08X %X %X\n",
2286 + phys, desc->next, desc->buf_len, desc->pkt_len,
2287 + desc->data, desc->status, desc->error_count);
2291 +static inline void debug_queue(unsigned int queue, int is_get, u32 phys)
2298 + { HSS0_PKT_TX0_QUEUE, "TX#0 " },
2299 + { HSS0_PKT_TXDONE_QUEUE, "TX-done#0 " },
2300 + { HSS0_PKT_RX_QUEUE, "RX#0 " },
2301 + { HSS0_PKT_RXFREE0_QUEUE, "RX-free#0 " },
2302 + { HSS1_PKT_TX0_QUEUE, "TX#1 " },
2303 + { HSS1_PKT_TXDONE_QUEUE, "TX-done#1 " },
2304 + { HSS1_PKT_RX_QUEUE, "RX#1 " },
2305 + { HSS1_PKT_RXFREE0_QUEUE, "RX-free#1 " },
2309 + for (i = 0; i < ARRAY_SIZE(names); i++)
2310 + if (names[i].queue == queue)
2313 + printk(KERN_DEBUG "Queue %i %s%s %X\n", queue,
2314 + i < ARRAY_SIZE(names) ? names[i].name : "",
2315 + is_get ? "->" : "<-", phys);
2319 +static inline u32 queue_get_entry(unsigned int queue)
2321 + u32 phys = qmgr_get_entry(queue);
2322 + debug_queue(queue, 1, phys);
2326 +static inline int queue_get_desc(unsigned int queue, struct port *port,
2329 + u32 phys, tab_phys, n_desc;
2332 + if (!(phys = queue_get_entry(queue)))
2335 + BUG_ON(phys & 0x1F);
2336 + tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0);
2337 + tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0);
2338 + n_desc = (phys - tab_phys) / sizeof(struct desc);
2339 + BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS));
2340 + debug_desc(phys, &tab[n_desc]);
2341 + BUG_ON(tab[n_desc].next);
2345 +static inline void queue_put_desc(unsigned int queue, u32 phys,
2346 + struct desc *desc)
2348 + debug_queue(queue, 0, phys);
2349 + debug_desc(phys, desc);
2350 + BUG_ON(phys & 0x1F);
2351 + qmgr_put_entry(queue, phys);
2352 + BUG_ON(qmgr_stat_overflow(queue));
2356 +static inline void dma_unmap_tx(struct port *port, struct desc *desc)
2359 + dma_unmap_single(&port->netdev->dev, desc->data,
2360 + desc->buf_len, DMA_TO_DEVICE);
2362 + dma_unmap_single(&port->netdev->dev, desc->data & ~3,
2363 + ALIGN((desc->data & 3) + desc->buf_len, 4),
2369 +static void hss_hdlc_set_carrier(void *pdev, int carrier)
2371 + struct net_device *netdev = pdev;
2372 + struct port *port = dev_to_port(netdev);
2373 + unsigned long flags;
2375 + spin_lock_irqsave(&npe_lock, flags);
2376 + port->carrier = carrier;
2377 + if (!port->loopback) {
2379 + netif_carrier_on(netdev);
2381 + netif_carrier_off(netdev);
2383 + spin_unlock_irqrestore(&npe_lock, flags);
2386 +static void hss_hdlc_rx_irq(void *pdev)
2388 + struct net_device *dev = pdev;
2389 + struct port *port = dev_to_port(dev);
2392 + printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name);
2394 + qmgr_disable_irq(queue_ids[port->id].rx);
2395 + netif_rx_schedule(dev, &port->napi);
2398 +static int hss_hdlc_poll(struct napi_struct *napi, int budget)
2400 + struct port *port = container_of(napi, struct port, napi);
2401 + struct net_device *dev = port->netdev;
2402 + unsigned int rxq = queue_ids[port->id].rx;
2403 + unsigned int rxfreeq = queue_ids[port->id].rxfree;
2404 + struct net_device_stats *stats = hdlc_stats(dev);
2408 + printk(KERN_DEBUG "%s: hss_hdlc_poll\n", dev->name);
2411 + while (received < budget) {
2412 + struct sk_buff *skb;
2413 + struct desc *desc;
2416 + struct sk_buff *temp;
2420 + if ((n = queue_get_desc(rxq, port, 0)) < 0) {
2421 + received = 0; /* No packet received */
2423 + printk(KERN_DEBUG "%s: hss_hdlc_poll"
2424 + " netif_rx_complete\n", dev->name);
2426 + netif_rx_complete(dev, napi);
2427 + qmgr_enable_irq(rxq);
2428 + if (!qmgr_stat_empty(rxq) &&
2429 + netif_rx_reschedule(dev, napi)) {
2431 + printk(KERN_DEBUG "%s: hss_hdlc_poll"
2432 + " netif_rx_reschedule succeeded\n",
2435 + qmgr_disable_irq(rxq);
2439 + printk(KERN_DEBUG "%s: hss_hdlc_poll all done\n",
2442 + return 0; /* all work done */
2445 + desc = rx_desc_ptr(port, n);
2446 +#if 0 /* FIXME - error_count counts modulo 256, perhaps we should use it */
2447 + if (desc->error_count)
2448 + printk(KERN_DEBUG "%s: hss_hdlc_poll status 0x%02X"
2449 + " errors %u\n", dev->name, desc->status,
2450 + desc->error_count);
2453 + switch (desc->status) {
2456 + if ((skb = netdev_alloc_skb(dev, RX_SIZE)) != NULL) {
2457 + phys = dma_map_single(&dev->dev, skb->data,
2460 + if (dma_mapping_error(phys)) {
2461 + dev_kfree_skb(skb);
2466 + skb = netdev_alloc_skb(dev, desc->pkt_len);
2469 + stats->rx_dropped++;
2471 + case ERR_HDLC_ALIGN:
2472 + case ERR_HDLC_ABORT:
2473 + stats->rx_frame_errors++;
2474 + stats->rx_errors++;
2476 + case ERR_HDLC_FCS:
2477 + stats->rx_crc_errors++;
2478 + stats->rx_errors++;
2480 + case ERR_HDLC_TOO_LONG:
2481 + stats->rx_length_errors++;
2482 + stats->rx_errors++;
2484 + default: /* FIXME - remove printk */
2485 + printk(KERN_ERR "%s: hss_hdlc_poll: status 0x%02X"
2486 + " errors %u\n", dev->name, desc->status,
2487 + desc->error_count);
2488 + stats->rx_errors++;
2492 + /* put the desc back on RX-ready queue */
2493 + desc->buf_len = RX_SIZE;
2494 + desc->pkt_len = desc->status = 0;
2495 + queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
2499 + /* process received frame */
2502 + skb = port->rx_buff_tab[n];
2503 + dma_unmap_single(&dev->dev, desc->data,
2504 + RX_SIZE, DMA_FROM_DEVICE);
2506 + dma_sync_single(&dev->dev, desc->data,
2507 + RX_SIZE, DMA_FROM_DEVICE);
2508 + memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
2509 + ALIGN(desc->pkt_len, 4) / 4);
2511 + skb_put(skb, desc->pkt_len);
2513 + debug_pkt(dev, "hss_hdlc_poll", skb->data, skb->len);
2515 + skb->protocol = hdlc_type_trans(skb, dev);
2516 + dev->last_rx = jiffies;
2517 + stats->rx_packets++;
2518 + stats->rx_bytes += skb->len;
2519 + netif_receive_skb(skb);
2521 + /* put the new buffer on RX-free queue */
2523 + port->rx_buff_tab[n] = temp;
2524 + desc->data = phys;
2526 + desc->buf_len = RX_SIZE;
2527 + desc->pkt_len = 0;
2528 + queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
2532 + printk(KERN_DEBUG "hss_hdlc_poll: end, not all work done\n");
2534 + return received; /* not all work done */
2538 +static void hss_hdlc_txdone_irq(void *pdev)
2540 + struct net_device *dev = pdev;
2541 + struct port *port = dev_to_port(dev);
2542 + struct net_device_stats *stats = hdlc_stats(dev);
2546 + printk(KERN_DEBUG DRV_NAME ": hss_hdlc_txdone_irq\n");
2548 + while ((n_desc = queue_get_desc(queue_ids[port->id].txdone,
2550 + struct desc *desc;
2553 + desc = tx_desc_ptr(port, n_desc);
2555 + stats->tx_packets++;
2556 + stats->tx_bytes += desc->pkt_len;
2558 + dma_unmap_tx(port, desc);
2560 + printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq free %p\n",
2561 + dev->name, port->tx_buff_tab[n_desc]);
2563 + free_buffer_irq(port->tx_buff_tab[n_desc]);
2564 + port->tx_buff_tab[n_desc] = NULL;
2566 + start = qmgr_stat_empty(port->plat->txreadyq);
2567 + queue_put_desc(port->plat->txreadyq,
2568 + tx_desc_phys(port, n_desc), desc);
2571 + printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq xmit"
2572 + " ready\n", dev->name);
2574 + netif_wake_queue(dev);
2579 +static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
2581 + struct port *port = dev_to_port(dev);
2582 + struct net_device_stats *stats = hdlc_stats(dev);
2583 + unsigned int txreadyq = port->plat->txreadyq;
2584 + int len, offset, bytes, n;
2587 + struct desc *desc;
2590 + printk(KERN_DEBUG "%s: hss_hdlc_xmit\n", dev->name);
2593 + if (unlikely(skb->len > HDLC_MAX_MRU)) {
2594 + dev_kfree_skb(skb);
2595 + stats->tx_errors++;
2596 + return NETDEV_TX_OK;
2599 + debug_pkt(dev, "hss_hdlc_xmit", skb->data, skb->len);
2603 + offset = 0; /* no need to keep alignment */
2607 + offset = (int)skb->data & 3; /* keep 32-bit alignment */
2608 + bytes = ALIGN(offset + len, 4);
2609 + if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
2610 + dev_kfree_skb(skb);
2611 + stats->tx_dropped++;
2612 + return NETDEV_TX_OK;
2614 + memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
2615 + dev_kfree_skb(skb);
2618 + phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
2619 + if (dma_mapping_error(phys)) {
2621 + dev_kfree_skb(skb);
2625 + stats->tx_dropped++;
2626 + return NETDEV_TX_OK;
2629 + n = queue_get_desc(txreadyq, port, 1);
2631 + desc = tx_desc_ptr(port, n);
2634 + port->tx_buff_tab[n] = skb;
2636 + port->tx_buff_tab[n] = mem;
2638 + desc->data = phys + offset;
2639 + desc->buf_len = desc->pkt_len = len;
2642 + queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc);
2643 + dev->trans_start = jiffies;
2645 + if (qmgr_stat_empty(txreadyq)) {
2647 + printk(KERN_DEBUG "%s: hss_hdlc_xmit queue full\n", dev->name);
2649 + netif_stop_queue(dev);
2650 + /* we could miss TX ready interrupt */
2651 + if (!qmgr_stat_empty(txreadyq)) {
2653 + printk(KERN_DEBUG "%s: hss_hdlc_xmit ready again\n",
2656 + netif_wake_queue(dev);
2661 + printk(KERN_DEBUG "%s: hss_hdlc_xmit end\n", dev->name);
2663 + return NETDEV_TX_OK;
2667 +static int request_hdlc_queues(struct port *port)
2671 + err = qmgr_request_queue(queue_ids[port->id].rxfree, RX_DESCS, 0, 0);
2675 + err = qmgr_request_queue(queue_ids[port->id].rx, RX_DESCS, 0, 0);
2679 + err = qmgr_request_queue(queue_ids[port->id].tx, TX_DESCS, 0, 0);
2683 + err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0);
2687 + err = qmgr_request_queue(queue_ids[port->id].txdone, TX_DESCS, 0, 0);
2693 + qmgr_release_queue(port->plat->txreadyq);
2695 + qmgr_release_queue(queue_ids[port->id].tx);
2697 + qmgr_release_queue(queue_ids[port->id].rx);
2699 + qmgr_release_queue(queue_ids[port->id].rxfree);
2700 + printk(KERN_DEBUG "%s: unable to request hardware queues\n",
2701 + port->netdev->name);
2705 +static void release_hdlc_queues(struct port *port)
2707 + qmgr_release_queue(queue_ids[port->id].rxfree);
2708 + qmgr_release_queue(queue_ids[port->id].rx);
2709 + qmgr_release_queue(queue_ids[port->id].txdone);
2710 + qmgr_release_queue(queue_ids[port->id].tx);
2711 + qmgr_release_queue(port->plat->txreadyq);
2714 +static int init_hdlc_queues(struct port *port)
2719 + if (!(dma_pool = dma_pool_create(DRV_NAME, NULL,
2720 + POOL_ALLOC_SIZE, 32, 0)))
2723 + if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
2724 + &port->desc_tab_phys)))
2726 + memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
2727 + memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
2728 + memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
2730 + /* Setup RX buffers */
2731 + for (i = 0; i < RX_DESCS; i++) {
2732 + struct desc *desc = rx_desc_ptr(port, i);
2736 + if (!(buff = netdev_alloc_skb(port->netdev, RX_SIZE)))
2738 + data = buff->data;
2740 + if (!(buff = kmalloc(RX_SIZE, GFP_KERNEL)))
2744 + desc->buf_len = RX_SIZE;
2745 + desc->data = dma_map_single(&port->netdev->dev, data,
2746 + RX_SIZE, DMA_FROM_DEVICE);
2747 + if (dma_mapping_error(desc->data)) {
2748 + free_buffer(buff);
2751 + port->rx_buff_tab[i] = buff;
2757 +static void destroy_hdlc_queues(struct port *port)
2761 + if (port->desc_tab) {
2762 + for (i = 0; i < RX_DESCS; i++) {
2763 + struct desc *desc = rx_desc_ptr(port, i);
2764 + buffer_t *buff = port->rx_buff_tab[i];
2766 + dma_unmap_single(&port->netdev->dev,
2767 + desc->data, RX_SIZE,
2769 + free_buffer(buff);
2772 + for (i = 0; i < TX_DESCS; i++) {
2773 + struct desc *desc = tx_desc_ptr(port, i);
2774 + buffer_t *buff = port->tx_buff_tab[i];
2776 + dma_unmap_tx(port, desc);
2777 + free_buffer(buff);
2780 + dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys);
2781 + port->desc_tab = NULL;
2784 + if (!ports_open && dma_pool) {
2785 + dma_pool_destroy(dma_pool);
2790 +static int hss_hdlc_open(struct net_device *dev)
2792 + struct port *port = dev_to_port(dev);
2793 + unsigned long flags;
2796 + if ((err = hdlc_open(dev)))
2799 + if ((err = request_hdlc_queues(port)))
2800 + goto err_hdlc_close;
2802 + if ((err = init_hdlc_queues(port)))
2803 + goto err_destroy_queues;
2805 + spin_lock_irqsave(&npe_lock, flags);
2807 + if (port->mode == MODE_G704 && port->channels[0] == CHANNEL_HDLC) {
2808 + err = -EBUSY; /* channel #0 is used for G.704 framing */
2811 + if (port->mode != MODE_HDLC)
2812 + for (i = port->frame_size / 8; i < MAX_CHANNELS; i++)
2813 + if (port->channels[i] == CHANNEL_HDLC) {
2814 + err = -ECHRNG; /* frame too short */
2818 + if ((err = hss_config_load_firmware(port)))
2821 + if (!port->chan_open_count && port->plat->open)
2822 + if ((err = port->plat->open(port->id, dev,
2823 + hss_hdlc_set_carrier)))
2826 + if (port->mode == MODE_G704 && !port->chan_open_count)
2827 + if ((err = hss_prepare_chan(port)))
2828 + goto err_plat_close;
2830 + spin_unlock_irqrestore(&npe_lock, flags);
2832 + /* Populate queues with buffers, no failure after this point */
2833 + for (i = 0; i < TX_DESCS; i++)
2834 + queue_put_desc(port->plat->txreadyq,
2835 + tx_desc_phys(port, i), tx_desc_ptr(port, i));
2837 + for (i = 0; i < RX_DESCS; i++)
2838 + queue_put_desc(queue_ids[port->id].rxfree,
2839 + rx_desc_phys(port, i), rx_desc_ptr(port, i));
2841 + napi_enable(&port->napi);
2842 + netif_start_queue(dev);
2844 + qmgr_set_irq(queue_ids[port->id].rx, QUEUE_IRQ_SRC_NOT_EMPTY,
2845 + hss_hdlc_rx_irq, dev);
2847 + qmgr_set_irq(queue_ids[port->id].txdone, QUEUE_IRQ_SRC_NOT_EMPTY,
2848 + hss_hdlc_txdone_irq, dev);
2849 + qmgr_enable_irq(queue_ids[port->id].txdone);
2852 + port->hdlc_open = 1;
2854 + hss_config_set_hdlc_cfg(port);
2855 + hss_config_set_lut(port);
2856 + hss_config_load(port);
2858 + if (port->mode == MODE_G704 && !port->chan_open_count)
2859 + hss_config_start_chan(port);
2861 + hss_config_start_hdlc(port);
2863 + /* we may already have RX data, enables IRQ */
2864 + netif_rx_schedule(dev, &port->napi);
2868 + if (!port->chan_open_count && port->plat->close)
2869 + port->plat->close(port->id, dev);
2871 + spin_unlock_irqrestore(&npe_lock, flags);
2872 +err_destroy_queues:
2873 + destroy_hdlc_queues(port);
2874 + release_hdlc_queues(port);
2880 +static int hss_hdlc_close(struct net_device *dev)
2882 + struct port *port = dev_to_port(dev);
2883 + unsigned long flags;
2884 + int i, buffs = RX_DESCS; /* allocated RX buffers */
2886 + spin_lock_irqsave(&npe_lock, flags);
2888 + port->hdlc_open = 0;
2889 + qmgr_disable_irq(queue_ids[port->id].rx);
2890 + netif_stop_queue(dev);
2891 + napi_disable(&port->napi);
2893 + hss_config_stop_hdlc(port);
2895 + if (port->mode == MODE_G704 && !port->chan_open_count)
2896 + hss_chan_stop(port);
2898 + while (queue_get_desc(queue_ids[port->id].rxfree, port, 0) >= 0)
2900 + while (queue_get_desc(queue_ids[port->id].rx, port, 0) >= 0)
2904 + printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)"
2905 + " left in NPE\n", dev->name, buffs);
2908 + while (queue_get_desc(queue_ids[port->id].tx, port, 1) >= 0)
2909 + buffs--; /* cancel TX */
2913 + while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0)
2917 + } while (++i < MAX_CLOSE_WAIT);
2920 + printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) "
2921 + "left in NPE\n", dev->name, buffs);
2924 + printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
2926 + qmgr_disable_irq(queue_ids[port->id].txdone);
2928 + if (!port->chan_open_count && port->plat->close)
2929 + port->plat->close(port->id, dev);
2930 + spin_unlock_irqrestore(&npe_lock, flags);
2932 + destroy_hdlc_queues(port);
2933 + release_hdlc_queues(port);
2939 +static int hss_hdlc_attach(struct net_device *dev, unsigned short encoding,
2940 + unsigned short parity)
2942 + struct port *port = dev_to_port(dev);
2944 + if (encoding != ENCODING_NRZ)
2948 + case PARITY_CRC16_PR1_CCITT:
2949 + port->hdlc_cfg = 0;
2952 + case PARITY_CRC32_PR1_CCITT:
2953 + port->hdlc_cfg = PKT_HDLC_CRC_32;
2962 +static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2964 + const size_t size = sizeof(sync_serial_settings);
2965 + sync_serial_settings new_line;
2966 + sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
2967 + struct port *port = dev_to_port(dev);
2968 + unsigned long flags;
2971 + if (cmd != SIOCWANDEV)
2972 + return hdlc_ioctl(dev, ifr, cmd);
2974 + switch(ifr->ifr_settings.type) {
2975 + case IF_GET_IFACE:
2976 + ifr->ifr_settings.type = IF_IFACE_V35;
2977 + if (ifr->ifr_settings.size < size) {
2978 + ifr->ifr_settings.size = size; /* data size wanted */
2981 + memset(&new_line, 0, sizeof(new_line));
2982 + new_line.clock_type = port->clock_type;
2983 + new_line.clock_rate = port->clock_rate;
2984 + new_line.loopback = port->loopback;
2985 + if (copy_to_user(line, &new_line, size))
2988 + if (!port->chan_buf)
2991 + dma_sync_single(&dev->dev, port->chan_rx_buf_phys,
2992 + chan_rx_buf_len(port), DMA_FROM_DEVICE);
2993 + printk(KERN_DEBUG "RX:\n");
2994 + for (i = 0; i < chan_rx_buf_len(port); i++) {
2996 + printk(KERN_DEBUG "%03X ", i);
2997 + printk("%02X%c", chan_rx_buf(port)[i],
2998 + (i + 1) % 32 ? ' ' : '\n');
3002 + printk(KERN_DEBUG "TX:\n");
3003 + for (i = 0; i < /*CHAN_TX_FRAMES * 2*/ chan_tx_buf_len(port)
3004 + + chan_tx_lists_len(port); i++) {
3006 + printk(KERN_DEBUG "%03X ", i);
3007 + printk("%02X%c", chan_tx_buf(port)[i],
3008 + (i + 1) % 32 ? ' ' : '\n');
3011 + port->msg_count = 10;
3014 + case IF_IFACE_SYNC_SERIAL:
3015 + case IF_IFACE_V35:
3016 + if(!capable(CAP_NET_ADMIN))
3018 + if (copy_from_user(&new_line, line, size))
3021 + clk = new_line.clock_type;
3022 + if (port->plat->set_clock)
3023 + clk = port->plat->set_clock(port->id, clk);
3025 + if (clk != CLOCK_EXT && clk != CLOCK_INT)
3026 + return -EINVAL; /* No such clock setting */
3028 + if (new_line.loopback != 0 && new_line.loopback != 1)
3031 + port->clock_type = clk; /* Update settings */
3032 + /* FIXME port->clock_rate = new_line.clock_rate */;
3033 + port->loopback = new_line.loopback;
3035 + spin_lock_irqsave(&npe_lock, flags);
3037 + if (port->chan_open_count || port->hdlc_open) {
3038 + hss_config_set_line(port);
3039 + hss_config_load(port);
3041 + if (port->loopback || port->carrier)
3042 + netif_carrier_on(port->netdev);
3044 + netif_carrier_off(port->netdev);
3045 + spin_unlock_irqrestore(&npe_lock, flags);
3050 + return hdlc_ioctl(dev, ifr, cmd);
3054 +/*****************************************************************************
3055 + * channelized (G.704) operation
3056 + ****************************************************************************/
3058 +static void g704_rx_framer(struct port *port, unsigned int offset)
3060 + u8 *data = chan_rx_buf(port) + sub_offset(offset, CHAN_RX_TRIGGER,
3062 + unsigned int bit, frame, bad_even = 0, bad_odd = 0, cnt;
3063 + unsigned int is_first = port->just_set_offset;
3064 + u8 zeros_even, zeros_odd, ones_even, ones_odd;
3065 + enum alignment aligned;
3067 + port->just_set_offset = 0;
3068 + dma_sync_single(port->dev, port->chan_rx_buf_phys, CHAN_RX_FRAMES,
3071 + /* check if aligned first */
3072 + for (frame = 0; frame < CHAN_RX_TRIGGER &&
3073 + (bad_even <= MAX_CHAN_RX_BAD_SYNC ||
3074 + bad_odd <= MAX_CHAN_RX_BAD_SYNC); frame += 2) {
3075 + u8 ve = data[frame];
3076 + u8 vo = data[frame + 1];
3078 + if ((ve & 0x7F) != 0x1B || !(vo & 0x40))
3081 + if ((vo & 0x7F) != 0x1B || !(ve & 0x40))
3085 + if (bad_even <= MAX_CHAN_RX_BAD_SYNC)
3086 + aligned = EVEN_FIRST;
3087 + else if (bad_odd <= MAX_CHAN_RX_BAD_SYNC)
3088 + aligned = ODD_FIRST;
3090 + aligned = NOT_ALIGNED;
3092 + if (aligned != NOT_ALIGNED) {
3093 + if (aligned == port->aligned)
3094 + return; /* no change */
3095 + if (printk_ratelimit())
3096 + printk(KERN_INFO "HSS-%i: synchronized at %u (%s frame"
3097 + " first)\n", port->id, port->frame_sync_offset,
3098 + aligned == EVEN_FIRST ? "even" : "odd");
3099 + port->aligned = aligned;
3101 + atomic_inc(&port->chan_tx_irq_number);
3102 + wake_up_interruptible(&port->chan_tx_waitq);
3103 + atomic_inc(&port->chan_rx_irq_number);
3104 + wake_up_interruptible(&port->chan_rx_waitq);
3109 + if (port->aligned != NOT_ALIGNED && printk_ratelimit()) {
3110 + printk(KERN_INFO "HSS-%i: lost alignment\n", port->id);
3111 + port->aligned = NOT_ALIGNED;
3113 + for (cnt = 0; cnt < CHAN_RX_FRAMES; cnt++)
3114 + printk("%c%02X%s", cnt == offset ? '>' : ' ',
3115 + chan_rx_buf(port)[cnt],
3116 + (cnt + 1) % 32 ? "" : "\n");
3119 + for (cnt = 0; cnt < MAX_CHAN_DEVICES; cnt++)
3120 + if (port->chan_devices[cnt]) {
3121 + set_bit(TX_ERROR_BIT, &port->chan_devices[cnt]
3123 + set_bit(RX_ERROR_BIT, &port->chan_devices[cnt]
3126 + atomic_inc(&port->chan_tx_irq_number);
3127 + wake_up_interruptible(&port->chan_tx_waitq);
3128 + atomic_inc(&port->chan_rx_irq_number);
3129 + wake_up_interruptible(&port->chan_rx_waitq);
3135 + zeros_even = zeros_odd = 0;
3136 + ones_even = ones_odd = 0xFF;
3137 + for (frame = 0; frame < CHAN_RX_TRIGGER; frame += 2) {
3138 + zeros_even |= data[frame];
3139 + zeros_odd |= data[frame + 1];
3140 + ones_even &= data[frame];
3141 + ones_odd &= data[frame + 1];
3144 + for (bit = 0; bit < 7; bit++) {
3145 + if ((zeros_even & ~0x9B) == 0 && (ones_even & 0x1B) == 0x1B &&
3146 + (ones_odd & 0x40) == 0x40) {
3147 + aligned = EVEN_FIRST; /* maybe */
3150 + if ((zeros_odd & ~0x9B) == 0 && (ones_odd & 0x1B) == 0x1B &&
3151 + (ones_even & 0x40) == 0x40) {
3152 + aligned = ODD_FIRST; /* maybe */
3156 + ones_even = ones_even << 1 | 1;
3158 + ones_odd = ones_odd << 1 | 1;
3161 + port->frame_sync_offset += port->frame_size - bit;
3162 + port->frame_sync_offset %= port->frame_size;
3163 + port->just_set_offset = 1;
3167 + printk(KERN_DEBUG "HSS-%i: trying frame sync at %u\n",
3168 + port->id, port->frame_sync_offset);
3170 + printk(KERN_DEBUG "HSS-%i: found possible frame sync pattern at"
3171 + " %u (%s frame first)\n", port->id,
3172 + port->frame_sync_offset,
3173 + aligned == EVEN_FIRST ? "even" : "odd");
3176 + hss_config_set_rx_frame(port);
3177 + hss_config_load(port);
3180 +static void chan_process_tx_irq(struct chan_device *chan_dev, int offset)
3183 + unsigned int buff_len = CHAN_TX_FRAMES * chan_dev->chan_count;
3184 + unsigned int list_len = CHAN_TX_LIST_FRAMES * chan_dev->chan_count;
3185 + int eaten, last_offset = chan_dev->port->chan_last_tx * list_len;
3187 + offset *= list_len;
3188 + eaten = sub_offset(offset, last_offset, buff_len);
3190 + if (chan_dev->tx_count > eaten + 2 * list_len) {
3191 + /* two pages must be reserved for the transmitter */
3192 + chan_dev->tx_first += eaten;
3193 + chan_dev->tx_first %= buff_len;
3194 + chan_dev->tx_count -= eaten;
3198 + 1 tx_first (may still be transmited)
3199 + 2 tx_offset (currently reported by the NPE)
3200 + 3 tx_first + 2 * list_len (free to write here)
3205 + /* printk(KERN_DEBUG "TX buffer underflow\n"); */
3206 + chan_dev->tx_first = sub_offset(offset, list_len, buff_len);
3207 + chan_dev->tx_count = 2 * list_len; /* reserve */
3208 + set_bit(TX_ERROR_BIT, &chan_dev->errors_bitmap);
3212 +static void chan_process_rx_irq(struct chan_device *chan_dev, int offset)
3215 + unsigned int buff_len = CHAN_RX_FRAMES * chan_dev->chan_count;
3216 + unsigned int trig_len = CHAN_RX_TRIGGER * chan_dev->chan_count;
3217 + int last_offset = chan_dev->port->chan_last_rx * chan_dev->chan_count;
3219 + offset *= chan_dev->chan_count;
3220 + chan_dev->rx_count += sub_offset(offset, last_offset + trig_len,
3221 + buff_len) + trig_len;
3222 + if (chan_dev->rx_count > buff_len - 2 * trig_len) {
3223 + /* two pages - offset[0] and offset[1] are lost - FIXME check */
3224 + /* printk(KERN_DEBUG "RX buffer overflow\n"); */
3225 + chan_dev->rx_first = (offset + 2 * trig_len) % buff_len;
3226 + chan_dev->rx_count = buff_len - 2 * trig_len;
3227 + set_bit(RX_ERROR_BIT, &chan_dev->errors_bitmap);
3231 +static void hss_chan_irq(void *pdev)
3233 + struct port *port = pdev;
3237 + printk(KERN_DEBUG DRV_NAME ": hss_chan_irq\n");
3239 + spin_lock(&npe_lock);
3240 + while ((v = qmgr_get_entry(queue_ids[port->id].chan))) {
3241 + unsigned int first, errors, tx_list, rx_frame;
3245 + errors = (v >> 16) & 0xFF;
3246 + tx_list = (v >> 8) & 0xFF;
3247 + rx_frame = v & 0xFF;
3249 + if (port->msg_count) {
3250 + printk(KERN_DEBUG "chan_irq hss %i jiffies %lu first"
3251 + " 0x%02X errors 0x%02X tx_list 0x%02X rx_frame"
3252 + " 0x%02X\n", port->id, jiffies, first, errors,
3253 + tx_list, rx_frame);
3254 + port->msg_count--;
3257 + BUG_ON(rx_frame % CHAN_RX_TRIGGER);
3258 + BUG_ON(rx_frame >= CHAN_RX_FRAMES);
3259 + BUG_ON(tx_list >= CHAN_TX_LISTS);
3261 + bad = port->mode == MODE_G704 && port->aligned == NOT_ALIGNED;
3262 + if (!bad && tx_list != port->chan_last_tx) {
3263 + if (tx_list != (port->chan_last_tx + 1) % CHAN_TX_LISTS)
3264 + printk(KERN_DEBUG "Skipped an IRQ? Tx last %i"
3265 + " current %i\n", port->chan_last_tx,
3267 + for (i = 0; i < MAX_CHAN_DEVICES; i++) {
3268 + if (!port->chan_devices[i] ||
3269 + !port->chan_devices[i]->open_count)
3271 + chan_process_tx_irq(port->chan_devices[i],
3274 + atomic_inc(&port->chan_tx_irq_number);
3276 + printk(KERN_DEBUG "wakeing up TX jiff %lu\n",
3279 + wake_up_interruptible(&port->chan_tx_waitq);
3282 + if (rx_frame != (port->chan_last_rx + CHAN_RX_TRIGGER) %
3284 + printk(KERN_DEBUG "Skipped an IRQ? Rx last %i"
3285 + " current %i\n", port->chan_last_rx, rx_frame);
3287 + if (port->mode == MODE_G704)
3288 + g704_rx_framer(port, rx_frame);
3291 + (port->mode != MODE_G704 || port->aligned != NOT_ALIGNED)) {
3292 + for (i = 0; i < MAX_CHAN_DEVICES; i++) {
3293 + if (!port->chan_devices[i] ||
3294 + !port->chan_devices[i]->open_count)
3296 + chan_process_rx_irq(port->chan_devices[i],
3299 + atomic_inc(&port->chan_rx_irq_number);
3300 + wake_up_interruptible(&port->chan_rx_waitq);
3302 + port->chan_last_tx = tx_list;
3303 + port->chan_last_rx = rx_frame;
3305 + spin_unlock(&npe_lock);
3309 +static int hss_prepare_chan(struct port *port)
3313 + if ((err = hss_config_load_firmware(port)))
3316 + if ((err = qmgr_request_queue(queue_ids[port->id].chan,
3317 + CHAN_QUEUE_LEN, 0, 0)))
3320 + if (!(port->chan_buf = kmalloc(chan_tx_buf_len(port) +
3321 + chan_tx_lists_len(port) +
3322 + chan_rx_buf_len(port), GFP_KERNEL))) {
3323 + goto release_queue;
3327 + port->chan_tx_buf_phys = dma_map_single(port->dev, chan_tx_buf(port),
3328 + chan_tx_buf_len(port) +
3329 + chan_tx_lists_len(port),
3331 + if (dma_mapping_error(port->chan_tx_buf_phys)) {
3336 + port->chan_rx_buf_phys = dma_map_single(port->dev, chan_rx_buf(port),
3337 + chan_rx_buf_len(port),
3339 + if (dma_mapping_error(port->chan_rx_buf_phys)) {
3344 + qmgr_set_irq(queue_ids[port->id].chan, QUEUE_IRQ_SRC_NOT_EMPTY,
3345 + hss_chan_irq, port);
3346 + qmgr_enable_irq(queue_ids[port->id].chan);
3347 + hss_chan_irq(port);
3351 + dma_unmap_single(port->dev, port->chan_tx_buf_phys,
3352 + chan_tx_buf_len(port) + chan_tx_lists_len(port),
3355 + kfree(port->chan_buf);
3356 + port->chan_buf = NULL;
3358 + qmgr_release_queue(queue_ids[port->id].chan);
3362 +void hss_chan_stop(struct port *port)
3364 + if (!port->chan_open_count && !port->hdlc_open)
3365 + qmgr_disable_irq(queue_ids[port->id].chan);
3367 + hss_config_stop_chan(port);
3368 + hss_config_set_lut(port);
3369 + hss_config_load(port);
3371 + if (!port->chan_open_count && !port->hdlc_open) {
3372 + dma_unmap_single(port->dev, port->chan_tx_buf_phys,
3373 + chan_tx_buf_len(port) +
3374 + chan_tx_lists_len(port), DMA_TO_DEVICE);
3375 + dma_unmap_single(port->dev, port->chan_rx_buf_phys,
3376 + chan_rx_buf_len(port), DMA_FROM_DEVICE);
3377 + kfree(port->chan_buf);
3378 + port->chan_buf = NULL;
3379 + qmgr_release_queue(queue_ids[port->id].chan);
3383 +static int hss_chan_open(struct inode *inode, struct file *file)
3385 + struct chan_device *chan_dev = inode_to_chan_dev(inode);
3386 + struct port *port = chan_dev->port;
3387 + unsigned long flags;
3390 + spin_lock_irqsave(&npe_lock, flags);
3392 + if (chan_dev->open_count) {
3393 + if (chan_dev->excl_open || (file->f_flags & O_EXCL))
3396 + chan_dev->open_count++;
3400 + if (port->mode == MODE_HDLC) {
3405 + if (port->mode == MODE_G704 && port->channels[0] == chan_dev->id) {
3406 + err = -EBUSY; /* channel #0 is used for G.704 signaling */
3409 + for (i = MAX_CHANNELS; i > port->frame_size / 8; i--)
3410 + if (port->channels[i - 1] == chan_dev->id) {
3411 + err = -ECHRNG; /* frame too short */
3415 + chan_dev->rx_first = chan_dev->tx_first = 0;
3416 + chan_dev->rx_count = chan_dev->tx_count = 0;
3417 + clear_bit(TX_ERROR_BIT, &chan_dev->errors_bitmap);
3418 + clear_bit(RX_ERROR_BIT, &chan_dev->errors_bitmap);
3420 + if (!port->chan_open_count && !port->hdlc_open) {
3421 + if (port->plat->open)
3422 + if ((err = port->plat->open(port->id, port->netdev,
3423 + hss_hdlc_set_carrier)))
3425 + if ((err = hss_prepare_chan(port))) {
3426 + if (port->plat->close)
3427 + port->plat->close(port->id, port->netdev);
3432 + hss_config_stop_chan(port);
3433 + chan_dev->open_count++;
3434 + port->chan_open_count++;
3435 + chan_dev->excl_open = !!file->f_flags & O_EXCL;
3437 + hss_config_set_lut(port);
3438 + hss_config_load(port);
3439 + hss_config_start_chan(port);
3441 + spin_unlock_irqrestore(&npe_lock, flags);
3445 +static int hss_chan_release(struct inode *inode, struct file *file)
3447 + struct chan_device *chan_dev = inode_to_chan_dev(inode);
3448 + struct port *port = chan_dev->port;
3449 + unsigned long flags;
3451 + spin_lock_irqsave(&npe_lock, flags);
3453 + if (!--chan_dev->open_count) {
3454 + if (!--port->chan_open_count && !port->hdlc_open) {
3455 + hss_chan_stop(port);
3456 + if (port->plat->close)
3457 + port->plat->close(port->id, port->netdev);
3459 + hss_config_stop_chan(port);
3460 + hss_config_set_lut(port);
3461 + hss_config_set_line(port); //
3462 + hss_config_start_chan(port);
3466 + spin_unlock_irqrestore(&npe_lock, flags);
3470 +static ssize_t hss_chan_read(struct file *file, char __user *buf, size_t count,
3473 + struct chan_device *chan_dev = inode_to_chan_dev
3474 + (file->f_path.dentry->d_inode);
3475 + struct port *port = chan_dev->port;
3476 + unsigned long flags;
3478 + int res = 0, loops = 0;
3480 + spin_lock_irqsave(&npe_lock, flags);
3483 + int prev_irq = atomic_read(&port->chan_rx_irq_number);
3485 + if (test_and_clear_bit(RX_ERROR_BIT, &chan_dev->errors_bitmap)
3486 + || (port->mode == G704 && port->aligned == NOT_ALIGNED)) {
3492 + goto out; /* no need to wait */
3494 + if (chan_dev->rx_count)
3497 + spin_unlock_irqrestore(&npe_lock, flags);
3499 + if ((res = wait_event_interruptible
3500 + (port->chan_rx_waitq,
3501 + atomic_read(&port->chan_rx_irq_number) != prev_irq)))
3503 + spin_lock_irqsave(&npe_lock, flags);
3507 + dma_sync_single(port->dev, port->chan_rx_buf_phys,
3508 + chan_rx_buf_len(port), DMA_FROM_DEVICE);
3512 + printk(KERN_DEBUG "ENTRY rx_first %u rx_count %u count %i"
3513 + " last_rx %u loops %i\n", chan_dev->rx_first,
3514 + chan_dev->rx_count, count, port->chan_last_rx, loops);
3516 + rx_buf = chan_rx_buf(port);
3517 + while (chan_dev->rx_count > 0 && res < count) {
3518 + unsigned int chan = chan_dev->rx_first % chan_dev->chan_count;
3519 + unsigned int frame = chan_dev->rx_first / chan_dev->chan_count;
3521 + chan = chan_dev->log_channels[chan];
3522 + if (put_user(rx_buf[chan * CHAN_RX_FRAMES + frame], buf++)) {
3526 + chan_dev->rx_first++;
3527 + chan_dev->rx_first %= CHAN_RX_FRAMES * chan_dev->chan_count;
3528 + chan_dev->rx_count--;
3533 + printk(KERN_DEBUG "EXIT rx_first %u rx_count %u res %i\n",
3534 + chan_dev->rx_first, chan_dev->rx_count, res);
3536 + spin_unlock_irqrestore(&npe_lock, flags);
3540 +static ssize_t hss_chan_write(struct file *file, const char __user *buf,
3541 + size_t count, loff_t *f_pos)
3543 + struct chan_device *chan_dev = inode_to_chan_dev
3544 + (file->f_path.dentry->d_inode);
3545 + struct port *port = chan_dev->port;
3546 + unsigned long flags;
3548 + int res = 0, loops = 0;
3550 + spin_lock_irqsave(&npe_lock, flags);
3552 + int prev_irq = atomic_read(&port->chan_tx_irq_number);
3554 + if (test_and_clear_bit(TX_ERROR_BIT, &chan_dev->errors_bitmap)
3555 + || (port->mode == G704 && port->aligned == NOT_ALIGNED)) {
3561 + goto out; /* no need to wait */
3563 + if (chan_dev->tx_count < CHAN_TX_FRAMES * chan_dev->chan_count)
3566 + spin_unlock_irqrestore(&npe_lock, flags);
3568 + if ((res = wait_event_interruptible
3569 + (port->chan_tx_waitq,
3570 + atomic_read (&port->chan_tx_irq_number) != prev_irq)))
3572 + spin_lock_irqsave(&npe_lock, flags);
3578 + printk(KERN_DEBUG "ENTRY TX_first %u tx_count %u count %i"
3579 + " last_tx %u loops %i\n", chan_dev->tx_first,
3580 + chan_dev->tx_count, count, port->chan_last_tx, loops);
3582 + tx_buf = chan_tx_buf(port);
3583 + while (chan_dev->tx_count < CHAN_TX_FRAMES * chan_dev->chan_count &&
3585 + unsigned int tail, chan, frame;
3587 + tail = (chan_dev->tx_first + chan_dev->tx_count) %
3588 + (CHAN_TX_FRAMES * chan_dev->chan_count);
3589 + chan = tail % chan_dev->chan_count;
3590 + frame = tail / chan_dev->chan_count;
3591 + chan = chan_dev->log_channels[chan];
3593 + if (get_user(tx_buf[chan * CHAN_TX_FRAMES + frame], buf++)) {
3594 + printk(KERN_DEBUG "BUG? TX %u %u %u\n",
3595 + tail, chan, frame);
3599 + chan_dev->tx_count++;
3603 + dma_sync_single(port->dev, port->chan_tx_buf_phys,
3604 + chan_tx_buf_len(port), DMA_TO_DEVICE);
3607 + printk(KERN_DEBUG "EXIT TX_first %u tx_count %u res %i\n",
3608 + chan_dev->tx_first, chan_dev->tx_count, res);
3610 + spin_unlock_irqrestore(&npe_lock, flags);
3615 +static unsigned int hss_chan_poll(struct file *file, poll_table *wait)
3617 + struct chan_device *chan_dev = inode_to_chan_dev
3618 + (file->f_path.dentry->d_inode);
3619 + struct port *port = chan_dev->port;
3620 + unsigned long flags;
3621 + unsigned int mask = 0;
3623 + spin_lock_irqsave(&npe_lock, flags);
3624 + poll_wait(file, &port->chan_tx_waitq, wait);
3625 + poll_wait(file, &port->chan_rx_waitq, wait);
3627 + if (chan_dev->tx_count < CHAN_TX_FRAMES * chan_dev->chan_count)
3628 + mask |= POLLOUT | POLLWRNORM;
3629 + if (chan_dev->rx_count)
3630 + mask |= POLLIN | POLLRDNORM;
3631 + spin_unlock_irqrestore(&npe_lock, flags);
3635 +/*****************************************************************************
3636 + * channelized device sysfs attributes
3637 + ****************************************************************************/
3639 +static ssize_t chan_show_chan(struct device *dev, struct device_attribute *attr,
3642 + struct chan_device *chan_dev = dev_get_drvdata(dev);
3644 + return print_channels(chan_dev->port, buf, chan_dev->id);
3647 +static ssize_t chan_set_chan(struct device *dev, struct device_attribute *attr,
3648 + const char *buf, size_t len)
3650 + struct chan_device *chan_dev = dev_get_drvdata(dev);
3651 + struct port *port = chan_dev->port;
3652 + unsigned long flags;
3654 + size_t orig_len = len;
3657 + if (len && buf[len - 1] == '\n')
3660 + if (len != 7 || memcmp(buf, "destroy", 7))
3663 + spin_lock_irqsave(&npe_lock, flags);
3664 + cdev_del(&chan_dev->cdev);
3666 + for (ch = 0; ch < MAX_CHANNELS; ch++)
3667 + if (port->channels[ch] == chan_dev->id)
3668 + port->channels[ch] = CHANNEL_UNUSED;
3669 + port->chan_devices[chan_dev->id] = NULL;
3671 + spin_unlock_irqrestore(&npe_lock, flags);
3673 + if ((err = device_schedule_callback(dev, device_unregister)))
3678 +static struct device_attribute chan_attr =
3679 + __ATTR(channels, 0644, chan_show_chan, chan_set_chan);
3681 +/*****************************************************************************
3682 + * main sysfs attributes
3683 + ****************************************************************************/
3685 +static const struct file_operations chan_fops = {
3686 + .owner = THIS_MODULE,
3687 + .llseek = no_llseek,
3688 + .read = hss_chan_read,
3689 + .write = hss_chan_write,
3690 + .poll = hss_chan_poll,
3691 + .open = hss_chan_open,
3692 + .release = hss_chan_release,
3695 +static ssize_t create_chan(struct device *dev, struct device_attribute *attr,
3696 + const char *buf, size_t len)
3698 + struct port *port = dev_get_drvdata(dev);
3699 + struct chan_device *chan_dev;
3700 + u8 channels[MAX_CHANNELS];
3701 + size_t orig_len = len;
3702 + unsigned long flags;
3703 + unsigned int ch, id;
3706 + if ((err = parse_channels(&buf, &len, channels)) < 1)
3709 + if (!(chan_dev = kzalloc(sizeof(struct chan_device), GFP_KERNEL)))
3712 + spin_lock_irqsave(&npe_lock, flags);
3714 + if (port->mode != MODE_RAW && port->mode != MODE_G704) {
3719 + for (ch = 0; ch < MAX_CHANNELS; ch++)
3720 + if (channels[ch] && port->channels[ch] != CHANNEL_UNUSED) {
3721 + printk(KERN_DEBUG "Channel #%i already in use\n", ch);
3726 + for (id = 0; id < MAX_CHAN_DEVICES; id++)
3727 + if (port->chan_devices[id] == NULL)
3730 + if (id == MAX_CHAN_DEVICES) {
3735 + for (ch = 0; ch < MAX_CHANNELS; ch++)
3739 + minor = port->id * MAX_CHAN_DEVICES + ch;
3740 + chan_dev->id = id;
3741 + chan_dev->port = port;
3742 + chan_dev->dev = device_create(hss_class, dev, MKDEV(chan_major, minor),
3743 + "hss%uch%u", port->id, ch);
3744 + if (IS_ERR(chan_dev->dev)) {
3745 + err = PTR_ERR(chan_dev->dev);
3749 + cdev_init(&chan_dev->cdev, &chan_fops);
3750 + chan_dev->cdev.owner = THIS_MODULE;
3751 + if ((err = cdev_add(&chan_dev->cdev, MKDEV(chan_major, minor), 1)))
3752 + goto destroy_device;
3754 + for (ch = 0; ch < MAX_CHANNELS; ch++)
3756 + port->channels[ch] = id;
3757 + port->chan_devices[id] = chan_dev;
3758 + dev_set_drvdata(chan_dev->dev, chan_dev);
3759 + BUG_ON(device_create_file(chan_dev->dev, &chan_attr));
3761 + spin_unlock_irqrestore(&npe_lock, flags);
3765 + device_unregister(chan_dev->dev);
3768 + spin_unlock_irqrestore(&npe_lock, flags);
3772 +static ssize_t show_hdlc_chan(struct device *dev, struct device_attribute *attr,
3775 + return print_channels(dev_get_drvdata(dev), buf, CHANNEL_HDLC);
3778 +static ssize_t set_hdlc_chan(struct device *dev, struct device_attribute *attr,
3779 + const char *buf, size_t len)
3781 + struct port *port = dev_get_drvdata(dev);
3782 + u8 channels[MAX_CHANNELS];
3783 + size_t orig_len = len;
3784 + unsigned long flags;
3788 + if ((err = parse_channels(&buf, &len, channels)) < 0)
3791 + spin_lock_irqsave(&npe_lock, flags);
3793 + if (port->mode != MODE_RAW && port->mode != MODE_G704) {
3798 + for (ch = 0; ch < MAX_CHANNELS; ch++)
3799 + if (channels[ch] &&
3800 + port->channels[ch] != CHANNEL_UNUSED &&
3801 + port->channels[ch] != CHANNEL_HDLC) {
3802 + printk(KERN_DEBUG "Channel #%i already in use\n", ch);
3807 + for (ch = 0; ch < MAX_CHANNELS; ch++)
3809 + port->channels[ch] = CHANNEL_HDLC;
3810 + else if (port->channels[ch] == CHANNEL_HDLC)
3811 + port->channels[ch] = CHANNEL_UNUSED;
3813 + if (port->chan_open_count || port->hdlc_open) {
3814 + hss_config_set_lut(port);
3815 + hss_config_load(port);
3818 + spin_unlock_irqrestore(&npe_lock, flags);
3822 + spin_unlock_irqrestore(&npe_lock, flags);
3826 +static ssize_t show_clock_type(struct device *dev,
3827 + struct device_attribute *attr, char *buf)
3829 + struct port *port = dev_get_drvdata(dev);
3831 + strcpy(buf, port->clock_type == CLOCK_INT ? "int\n" : "ext\n");
3835 +static ssize_t set_clock_type(struct device *dev, struct device_attribute *attr,
3836 + const char *buf, size_t len)
3838 + struct port *port = dev_get_drvdata(dev);
3839 + size_t orig_len = len;
3840 + unsigned long flags;
3841 + unsigned int clk, err;
3843 + if (len && buf[len - 1] == '\n')
3848 + if (!memcmp(buf, "ext", 3))
3850 + else if (!memcmp(buf, "int", 3))
3855 + spin_lock_irqsave(&npe_lock, flags);
3856 + if (port->plat->set_clock)
3857 + clk = port->plat->set_clock(port->id, clk);
3858 + if (clk != CLOCK_EXT && clk != CLOCK_INT) {
3859 + err = -EINVAL; /* plat->set_clock shouldn't change the state */
3862 + port->clock_type = clk;
3863 + if (port->chan_open_count || port->hdlc_open) {
3864 + hss_config_set_line(port);
3865 + hss_config_load(port);
3867 + spin_unlock_irqrestore(&npe_lock, flags);
3871 + spin_unlock_irqrestore(&npe_lock, flags);
3875 +static ssize_t show_clock_rate(struct device *dev,
3876 + struct device_attribute *attr, char *buf)
3878 + struct port *port = dev_get_drvdata(dev);
3880 + sprintf(buf, "%u\n", port->clock_rate);
3881 + return strlen(buf) + 1;
3884 +static ssize_t set_clock_rate(struct device *dev, struct device_attribute *attr,
3885 + const char *buf, size_t len)
3888 + struct port *port = dev_get_drvdata(dev);
3889 + size_t orig_len = len;
3890 + unsigned long flags;
3891 + unsigned int rate;
3893 + if (len && buf[len - 1] == '\n')
3896 + if (get_number(&buf, &len, &rate, 1, 0xFFFFFFFFu))
3901 + spin_lock_irqsave(&npe_lock, flags);
3902 + port->clock_rate = rate;
3903 + spin_unlock_irqrestore(&npe_lock, flags);
3906 + return -EINVAL; /* FIXME not yet supported */
3909 +static ssize_t show_frame_size(struct device *dev,
3910 + struct device_attribute *attr, char *buf)
3912 + struct port *port = dev_get_drvdata(dev);
3914 + if (port->mode != MODE_RAW && port->mode != MODE_G704)
3917 + sprintf(buf, "%u\n", port->frame_size);
3918 + return strlen(buf) + 1;
3921 +static ssize_t set_frame_size(struct device *dev, struct device_attribute *attr,
3922 + const char *buf, size_t len)
3924 + struct port *port = dev_get_drvdata(dev);
3926 + unsigned long flags;
3927 + unsigned int size;
3929 + if (len && buf[len - 1] == '\n')
3932 + if (get_number(&buf, &len, &size, MIN_FRAME_SIZE, MAX_FRAME_SIZE))
3934 + if (len || size % 8 > 1)
3937 + spin_lock_irqsave(&npe_lock, flags);
3938 + if (port->mode != MODE_RAW && port->mode != MODE_G704)
3940 + else if (!port->chan_open_count && !port->hdlc_open)
3943 + port->frame_size = size;
3944 + port->frame_sync_offset = 0;
3946 + spin_unlock_irqrestore(&npe_lock, flags);
3950 +static ssize_t show_frame_offset(struct device *dev,
3951 + struct device_attribute *attr, char *buf)
3953 + struct port *port = dev_get_drvdata(dev);
3955 + sprintf(buf, "%u\n", port->frame_sync_offset);
3956 + return strlen(buf) + 1;
3959 +static ssize_t set_frame_offset(struct device *dev,
3960 + struct device_attribute *attr,
3961 + const char *buf, size_t len)
3963 + struct port *port = dev_get_drvdata(dev);
3964 + size_t orig_len = len;
3965 + unsigned long flags;
3966 + unsigned int offset;
3968 + if (len && buf[len - 1] == '\n')
3971 + if (get_number(&buf, &len, &offset, 0, port->frame_size - 1))
3976 + spin_lock_irqsave(&npe_lock, flags);
3978 + port->frame_sync_offset = offset;
3979 + if (port->chan_open_count || port->hdlc_open) {
3980 + hss_config_set_rx_frame(port);
3981 + hss_config_load(port);
3984 + spin_unlock_irqrestore(&npe_lock, flags);
3988 +static ssize_t show_loopback(struct device *dev, struct device_attribute *attr,
3991 + struct port *port = dev_get_drvdata(dev);
3993 + sprintf(buf, "%u\n", port->loopback);
3994 + return strlen(buf) + 1;
3997 +static ssize_t set_loopback(struct device *dev, struct device_attribute *attr,
3998 + const char *buf, size_t len)
4000 + struct port *port = dev_get_drvdata(dev);
4001 + size_t orig_len = len;
4002 + unsigned long flags;
4005 + if (len && buf[len - 1] == '\n')
4008 + if (get_number(&buf, &len, &lb, 0, 1))
4013 + spin_lock_irqsave(&npe_lock, flags);
4015 + if (port->loopback != lb) {
4016 + port->loopback = lb;
4017 + if (port->chan_open_count || port->hdlc_open) {
4018 + hss_config_set_core(port);
4019 + hss_config_load(port);
4021 + if (port->loopback || port->carrier)
4022 + netif_carrier_on(port->netdev);
4024 + netif_carrier_off(port->netdev);
4027 + spin_unlock_irqrestore(&npe_lock, flags);
4031 +static ssize_t show_mode(struct device *dev, struct device_attribute *attr,
4034 + struct port *port = dev_get_drvdata(dev);
4036 + switch(port->mode) {
4038 + strcpy(buf, "raw\n");
4041 + strcpy(buf, "g704\n");
4044 + strcpy(buf, "hdlc\n");
4047 + return strlen(buf) + 1;
4050 +static ssize_t set_mode(struct device *dev, struct device_attribute *attr,
4051 + const char *buf, size_t len)
4053 + struct port *port = dev_get_drvdata(dev);
4055 + unsigned long flags;
4057 + if (len && buf[len - 1] == '\n')
4060 + spin_lock_irqsave(&npe_lock, flags);
4062 + if (port->chan_open_count || port->hdlc_open) {
4064 + } else if (len == 4 && !memcmp(buf, "hdlc", 4))
4065 + port->mode = MODE_HDLC;
4066 + else if (len == 3 && !memcmp(buf, "raw", 3))
4067 + port->mode = MODE_RAW;
4068 + else if (len == 4 && !memcmp(buf, "g704", 4))
4069 + port->mode = MODE_G704;
4073 + spin_unlock_irqrestore(&npe_lock, flags);
4077 +static struct device_attribute hss_attrs[] = {
4078 + __ATTR(create_chan, 0200, NULL, create_chan),
4079 + __ATTR(hdlc_chan, 0644, show_hdlc_chan, set_hdlc_chan),
4080 + __ATTR(clock_type, 0644, show_clock_type, set_clock_type),
4081 + __ATTR(clock_rate, 0644, show_clock_rate, set_clock_rate),
4082 + __ATTR(frame_size, 0644, show_frame_size, set_frame_size),
4083 + __ATTR(frame_offset, 0644, show_frame_offset, set_frame_offset),
4084 + __ATTR(loopback, 0644, show_loopback, set_loopback),
4085 + __ATTR(mode, 0644, show_mode, set_mode),
4088 +/*****************************************************************************
4090 + ****************************************************************************/
4092 +static int __devinit hss_init_one(struct platform_device *pdev)
4094 + struct port *port;
4095 + struct net_device *dev;
4096 + hdlc_device *hdlc;
4099 + if ((port = kzalloc(sizeof(*port), GFP_KERNEL)) == NULL)
4101 + platform_set_drvdata(pdev, port);
4102 + port->id = pdev->id;
4104 + if ((port->npe = npe_request(0)) == NULL) {
4109 + port->dev = &pdev->dev;
4110 + port->plat = pdev->dev.platform_data;
4111 + if ((port->netdev = dev = alloc_hdlcdev(port)) == NULL) {
4116 + SET_NETDEV_DEV(dev, &pdev->dev);
4117 + hdlc = dev_to_hdlc(dev);
4118 + hdlc->attach = hss_hdlc_attach;
4119 + hdlc->xmit = hss_hdlc_xmit;
4120 + dev->open = hss_hdlc_open;
4121 + dev->stop = hss_hdlc_close;
4122 + dev->do_ioctl = hss_hdlc_ioctl;
4123 + dev->tx_queue_len = 100;
4124 + port->clock_type = CLOCK_EXT;
4125 + port->clock_rate = 2048000;
4126 + port->frame_size = 256; /* E1 */
4127 + memset(port->channels, CHANNEL_UNUSED, sizeof(port->channels));
4128 + init_waitqueue_head(&port->chan_tx_waitq);
4129 + init_waitqueue_head(&port->chan_rx_waitq);
4130 + netif_napi_add(dev, &port->napi, hss_hdlc_poll, NAPI_WEIGHT);
4132 + if ((err = register_hdlc_device(dev))) /* HDLC mode by default */
4133 + goto err_free_netdev;
4135 + for (i = 0; i < ARRAY_SIZE(hss_attrs); i++)
4136 + BUG_ON(device_create_file(port->dev, &hss_attrs[i]));
4138 + printk(KERN_INFO "%s: HSS-%i\n", dev->name, port->id);
4144 + npe_release(port->npe);
4145 + platform_set_drvdata(pdev, NULL);
4151 +static int __devexit hss_remove_one(struct platform_device *pdev)
4153 + struct port *port = platform_get_drvdata(pdev);
4156 + for (i = 0; i < ARRAY_SIZE(hss_attrs); i++)
4157 + device_remove_file(port->dev, &hss_attrs[i]);
4159 + unregister_hdlc_device(port->netdev);
4160 + free_netdev(port->netdev);
4161 + npe_release(port->npe);
4162 + platform_set_drvdata(pdev, NULL);
4167 +static struct platform_driver drv = {
4168 + .driver.name = DRV_NAME,
4169 + .probe = hss_init_one,
4170 + .remove = hss_remove_one,
4173 +static int __init hss_init_module(void)
4178 + if ((ixp4xx_read_feature_bits() &
4179 + (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) !=
4180 + (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS))
4183 + if ((err = alloc_chrdev_region(&rdev, 0, HSS_COUNT * MAX_CHAN_DEVICES,
4187 + spin_lock_init(&npe_lock);
4189 + if (IS_ERR(hss_class = class_create(THIS_MODULE, "hss"))) {
4190 + printk(KERN_ERR "Can't register device class 'hss'\n");
4191 + err = PTR_ERR(hss_class);
4194 + if ((err = platform_driver_register(&drv)))
4195 + goto destroy_class;
4197 + chan_major = MAJOR(rdev);
4201 + class_destroy(hss_class);
4203 + unregister_chrdev_region(MKDEV(chan_major, 0),
4204 + HSS_COUNT * MAX_CHAN_DEVICES);
4208 +static void __exit hss_cleanup_module(void)
4210 + platform_driver_unregister(&drv);
4211 + class_destroy(hss_class);
4212 + unregister_chrdev_region(MKDEV(chan_major, 0),
4213 + HSS_COUNT * MAX_CHAN_DEVICES);
4216 +MODULE_AUTHOR("Krzysztof Halasa");
4217 +MODULE_DESCRIPTION("Intel IXP4xx HSS driver");
4218 +MODULE_LICENSE("GPL v2");
4219 +MODULE_ALIAS("platform:ixp4xx_hss");
4220 +module_init(hss_init_module);
4221 +module_exit(hss_cleanup_module);