+static void adm5120_set_bw(char *matrix)
+{
+ unsigned long val;
+
+ /* Port 0 to 3 are set using the bandwidth control 0 register */
+ val = matrix[0] + (matrix[1]<<8) + (matrix[2]<<16) + (matrix[3]<<24);
+ sw_write_reg(SWITCH_REG_BW_CNTL0, val);
+
+ /* Port 4 and 5 are set using the bandwidth control 1 register */
+ val = matrix[4];
+ if (matrix[5] == 1)
+ sw_write_reg(SWITCH_REG_BW_CNTL1, val | 0x80000000);
+ else
+ sw_write_reg(SWITCH_REG_BW_CNTL1, val & ~0x8000000);
+
+ SW_DBG("D: ctl0 0x%ux, ctl1 0x%ux\n", sw_read_reg(SWITCH_REG_BW_CNTL0),
+ sw_read_reg(SWITCH_REG_BW_CNTL1));
+}
+
+static void adm5120_switch_tx_ring_reset(struct dma_desc *desc,
+ struct sk_buff **skbl, int num)
+{
+ memset(desc, 0, num * sizeof(*desc));
+ desc[num-1].buf1 |= DESC_EOR;
+ memset(skbl, 0, sizeof(struct skb*)*num);
+
+ cur_txl = 0;
+ dirty_txl = 0;
+}
+
+static void adm5120_switch_rx_ring_reset(struct dma_desc *desc,
+ struct sk_buff **skbl, int num)
+{
+ int i;
+
+ memset(desc, 0, num * sizeof(*desc));
+ for (i = 0; i < num; i++) {
+ skbl[i] = dev_alloc_skb(SKB_ALLOC_LEN);
+ if (!skbl[i]) {
+ i = num;
+ break;
+ }
+ skb_reserve(skbl[i], SKB_RESERVE_LEN);
+ adm5120_rx_dma_update(&desc[i], skbl[i], (num-1==i));
+ }
+
+ cur_rxl = 0;
+ dirty_rxl = 0;
+}
+
+static int adm5120_switch_tx_ring_alloc(void)
+{
+ int err;
+
+ txl_descs = dma_alloc_coherent(NULL, TX_DESCS_SIZE, &txl_descs_dma,
+ GFP_ATOMIC);
+ if (!txl_descs) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ txl_skbuff = kzalloc(TX_SKBS_SIZE, GFP_KERNEL);
+ if (!txl_skbuff) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ return err;
+}
+
+static void adm5120_switch_tx_ring_free(void)
+{
+ int i;
+
+ if (txl_skbuff) {
+ for (i = 0; i < TX_RING_SIZE; i++)
+ if (txl_skbuff[i])
+ kfree_skb(txl_skbuff[i]);
+ kfree(txl_skbuff);
+ }
+
+ if (txl_descs)
+ dma_free_coherent(NULL, TX_DESCS_SIZE, txl_descs,
+ txl_descs_dma);
+}
+
+static int adm5120_switch_rx_ring_alloc(void)
+{
+ int err;
+ int i;
+
+ /* init RX ring */
+ rxl_descs = dma_alloc_coherent(NULL, RX_DESCS_SIZE, &rxl_descs_dma,
+ GFP_ATOMIC);
+ if (!rxl_descs) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ rxl_skbuff = kzalloc(RX_SKBS_SIZE, GFP_KERNEL);
+ if (!rxl_skbuff) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+ skb = alloc_skb(SKB_ALLOC_LEN, GFP_ATOMIC);
+ if (!skb) {
+ err = -ENOMEM;
+ goto err;
+ }
+ rxl_skbuff[i] = skb;
+ skb_reserve(skb, SKB_RESERVE_LEN);
+ }
+
+ return 0;
+
+err:
+ return err;
+}
+
+static void adm5120_switch_rx_ring_free(void)
+{
+ int i;
+
+ if (rxl_skbuff) {
+ for (i = 0; i < RX_RING_SIZE; i++)
+ if (rxl_skbuff[i])
+ kfree_skb(rxl_skbuff[i]);
+ kfree(rxl_skbuff);
+ }
+
+ if (rxl_descs)
+ dma_free_coherent(NULL, RX_DESCS_SIZE, rxl_descs,
+ rxl_descs_dma);
+}
+
+static void adm5120_write_mac(struct net_device *dev)
+{
+ struct adm5120_if_priv *priv = netdev_priv(dev);
+ unsigned char *mac = dev->dev_addr;
+ u32 t;
+
+ t = mac[2] | (mac[3] << MAC_WT1_MAC3_SHIFT) |
+ (mac[4] << MAC_WT1_MAC4_SHIFT) | (mac[5] << MAC_WT1_MAC5_SHIFT);
+ sw_write_reg(SWITCH_REG_MAC_WT1, t);
+
+ t = (mac[0] << MAC_WT0_MAC0_SHIFT) | (mac[1] << MAC_WT0_MAC1_SHIFT) |
+ MAC_WT0_MAWC | MAC_WT0_WVE | (priv->vlan_no<<3);
+
+ sw_write_reg(SWITCH_REG_MAC_WT0, t);
+
+ while (!(sw_read_reg(SWITCH_REG_MAC_WT0) & MAC_WT0_MWD));
+}
+