#define TX_RING_SIZE 32
#define TX_QUEUE_LEN 28 /* Limit ring entries actually used. */
-#define TX_TIMEOUT HZ*400
+#define TX_TIMEOUT (HZ * 400)
#define RX_DESCS_SIZE (RX_RING_SIZE * sizeof(struct dma_desc *))
#define RX_SKBS_SIZE (RX_RING_SIZE * sizeof(struct sk_buff *))
t = desc->buf2;
SW_DBG(" buf2 %08X addr=%08X%s\n", desc->buf2,
t & DESC_ADDR_MASK,
- (t & DESC_BUF2_EN) ? " EN" : "" );
+ (t & DESC_BUF2_EN) ? " EN" : "");
t = desc->misc;
if (tx)
status = sw_int_status() & SWITCH_INTS_POLL;
if ((done < limit) && (!status)) {
SW_DBG("disable polling mode for %s\n", dev->name);
- netif_rx_complete(dev, napi);
+ napi_complete(napi);
sw_int_unmask(SWITCH_INTS_POLL);
return 0;
}
sw_dump_intr_mask("poll ints", status);
SW_DBG("enable polling mode for %s\n", dev->name);
sw_int_mask(SWITCH_INTS_POLL);
- netif_rx_schedule(dev, &priv->napi);
+ napi_schedule(&priv->napi);
}
#else
sw_int_ack(status);
- if (status & (SWITCH_INT_RLD | SWITCH_INT_LDF)) {
+ if (status & (SWITCH_INT_RLD | SWITCH_INT_LDF))
adm5120_switch_rx(RX_RING_SIZE);
- }
- if (status & SWITCH_INT_SLD) {
+ if (status & SWITCH_INT_SLD)
adm5120_switch_tx();
- }
#endif
return IRQ_HANDLED;
{
memset(desc, 0, num * sizeof(*desc));
desc[num-1].buf1 |= DESC_EOR;
- memset(skbl, 0, sizeof(struct skb*)*num);
+ memset(skbl, 0, sizeof(struct skb *) * num);
cur_txl = 0;
dirty_txl = 0;
break;
}
skb_reserve(skbl[i], SKB_RESERVE_LEN);
- adm5120_rx_dma_update(&desc[i], skbl[i], (num-1==i));
+ adm5120_rx_dma_update(&desc[i], skbl[i], (num - 1 == i));
}
cur_rxl = 0;
sw_write_reg(SWITCH_REG_MAC_WT0, t);
- while (!(sw_read_reg(SWITCH_REG_MAC_WT0) & MAC_WT0_MWD));
+ while (!(sw_read_reg(SWITCH_REG_MAC_WT0) & MAC_WT0_MWD))
+ ;
}
static void adm5120_set_vlan(char *matrix)
sw_write_reg(SWITCH_REG_VLAN_G2, val);
/* Now set/update the port vs. device lookup table */
- for (port=0; port<SWITCH_NUM_PORTS; port++) {
- for (vlan_port=0; vlan_port<SWITCH_NUM_PORTS && !(matrix[vlan_port] & (0x00000001 << port)); vlan_port++);
- if (vlan_port <SWITCH_NUM_PORTS)
+ for (port = 0; port < SWITCH_NUM_PORTS; port++) {
+ for (vlan_port = 0; vlan_port < SWITCH_NUM_PORTS && !(matrix[vlan_port] & (0x00000001 << port)); vlan_port++)
+ ;
+ if (vlan_port < SWITCH_NUM_PORTS)
adm5120_port[port] = adm5120_devs[vlan_port];
else
adm5120_port[port] = NULL;
adm5120_if_napi_enable(dev);
- err = request_irq(dev->irq, adm5120_switch_irq,
- (IRQF_SHARED | IRQF_DISABLED), dev->name, dev);
+ err = request_irq(dev->irq, adm5120_switch_irq, IRQF_SHARED,
+ dev->name, dev);
if (err) {
SW_ERR("unable to get irq for %s\n", dev->name);
goto err;
data |= DESC_ADDR(skb->data);
desc->misc =
- ((skb->len<ETH_ZLEN?ETH_ZLEN:skb->len) << DESC_PKTLEN_SHIFT) |
+ ((skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len) << DESC_PKTLEN_SHIFT) |
(0x1 << priv->vlan_no);
desc->buflen = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
static void adm5120_if_tx_timeout(struct net_device *dev)
{
- SW_INFO("TX timeout on %s\n",dev->name);
+ SW_INFO("TX timeout on %s\n", dev->name);
}
static void adm5120_if_set_multicast_list(struct net_device *dev)
/* to the CPU, the Bridge Test Mode has to be activated. */
/* Check if there is any vlan in promisc mode. */
- if (t & (SWITCH_PORTS_NOCPU << CPUP_CONF_DUNP_SHIFT))
- t &= ~CPUP_CONF_BTM; /* Disable Bridge Testing Mode */
- else
+ if (~t & (SWITCH_PORTS_NOCPU << CPUP_CONF_DUNP_SHIFT))
t |= CPUP_CONF_BTM; /* Enable Bridge Testing Mode */
+ else
+ t &= ~CPUP_CONF_BTM; /* Disable Bridge Testing Mode */
sw_write_reg(SWITCH_REG_CPUP_CONF, t);
static int adm5120_if_set_mac_address(struct net_device *dev, void *p)
{
- struct sockaddr *addr = p;
+ int ret;
+
+ ret = eth_mac_addr(dev, p);
+ if (ret)
+ return ret;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
adm5120_write_mac(dev);
return 0;
}
struct adm5120_sw_info info;
struct adm5120_if_priv *priv = netdev_priv(dev);
- switch(cmd) {
+ switch (cmd) {
case SIOCGADMINFO:
info.magic = 0x5120;
info.ports = adm5120_nrdevs;
return 0;
}
+static const struct net_device_ops adm5120sw_netdev_ops = {
+ .ndo_open = adm5120_if_open,
+ .ndo_stop = adm5120_if_stop,
+ .ndo_start_xmit = adm5120_if_hard_start_xmit,
+ .ndo_set_multicast_list = adm5120_if_set_multicast_list,
+ .ndo_do_ioctl = adm5120_if_do_ioctl,
+ .ndo_tx_timeout = adm5120_if_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = adm5120_if_set_mac_address,
+};
+
static struct net_device *adm5120_if_alloc(void)
{
struct net_device *dev;
priv->dev = dev;
dev->irq = ADM5120_IRQ_SWITCH;
- dev->open = adm5120_if_open;
- dev->hard_start_xmit = adm5120_if_hard_start_xmit;
- dev->stop = adm5120_if_stop;
- dev->set_multicast_list = adm5120_if_set_multicast_list;
- dev->do_ioctl = adm5120_if_do_ioctl;
- dev->tx_timeout = adm5120_if_tx_timeout;
- dev->watchdog_timeo = TX_TIMEOUT;
- dev->set_mac_address = adm5120_if_set_mac_address;
+ dev->netdev_ops = &adm5120sw_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
#ifdef CONFIG_ADM5120_SWITCH_NAPI
netif_napi_add(dev, &priv->napi, adm5120_if_poll, 64);