upgrade iop32x to 2.6.30
[openwrt.git] / target / linux / octeon / patches / 006-octeon_mgmt_driver.patch
1 Signed-off-by: David Daney <ddaney@caviumnetworks.com>
2 ---
3 arch/mips/include/asm/octeon/cvmx-mdio.h | 577 +++++++++++++++++++++
4 drivers/net/Kconfig | 8 +
5 drivers/net/Makefile | 1 +
6 drivers/net/octeon/Makefile | 11 +
7 drivers/net/octeon/cvmx-mgmt-port.c | 818 ++++++++++++++++++++++++++++++
8 drivers/net/octeon/cvmx-mgmt-port.h | 168 ++++++
9 drivers/net/octeon/octeon-mgmt-port.c | 389 ++++++++++++++
10 7 files changed, 1972 insertions(+), 0 deletions(-)
11 create mode 100644 arch/mips/include/asm/octeon/cvmx-mdio.h
12 create mode 100644 drivers/net/octeon/Makefile
13 create mode 100644 drivers/net/octeon/cvmx-mgmt-port.c
14 create mode 100644 drivers/net/octeon/cvmx-mgmt-port.h
15 create mode 100644 drivers/net/octeon/octeon-mgmt-port.c
16
17 --- /dev/null
18 +++ b/arch/mips/include/asm/octeon/cvmx-mdio.h
19 @@ -0,0 +1,577 @@
20 +/***********************license start***************
21 + * Author: Cavium Networks
22 + *
23 + * Contact: support@caviumnetworks.com
24 + * This file is part of the OCTEON SDK
25 + *
26 + * Copyright (c) 2003-2008 Cavium Networks
27 + *
28 + * This file is free software; you can redistribute it and/or modify
29 + * it under the terms of the GNU General Public License, Version 2, as
30 + * published by the Free Software Foundation.
31 + *
32 + * This file is distributed in the hope that it will be useful, but
33 + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
34 + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
35 + * NONINFRINGEMENT. See the GNU General Public License for more
36 + * details.
37 + *
38 + * You should have received a copy of the GNU General Public License
39 + * along with this file; if not, write to the Free Software
40 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
41 + * or visit http://www.gnu.org/licenses/.
42 + *
43 + * This file may also be available under a different license from Cavium.
44 + * Contact Cavium Networks for more information
45 + ***********************license end**************************************/
46 +
47 +/**
48 + *
49 + * Interface to the SMI/MDIO hardware, including support for both IEEE 802.3
50 + * clause 22 and clause 45 operations.
51 + *
52 + */
53 +
54 +#ifndef __CVMX_MIO_H__
55 +#define __CVMX_MIO_H__
56 +
57 +#include "cvmx-smix-defs.h"
58 +
59 +/**
60 + * PHY register 0 from the 802.3 spec
61 + */
62 +#define CVMX_MDIO_PHY_REG_CONTROL 0
63 +union cvmx_mdio_phy_reg_control {
64 + uint16_t u16;
65 + struct {
66 + uint16_t reset:1;
67 + uint16_t loopback:1;
68 + uint16_t speed_lsb:1;
69 + uint16_t autoneg_enable:1;
70 + uint16_t power_down:1;
71 + uint16_t isolate:1;
72 + uint16_t restart_autoneg:1;
73 + uint16_t duplex:1;
74 + uint16_t collision_test:1;
75 + uint16_t speed_msb:1;
76 + uint16_t unidirectional_enable:1;
77 + uint16_t reserved_0_4:5;
78 + } s;
79 +};
80 +
81 +/**
82 + * PHY register 1 from the 802.3 spec
83 + */
84 +#define CVMX_MDIO_PHY_REG_STATUS 1
85 +union cvmx_mdio_phy_reg_status {
86 + uint16_t u16;
87 + struct {
88 + uint16_t capable_100base_t4:1;
89 + uint16_t capable_100base_x_full:1;
90 + uint16_t capable_100base_x_half:1;
91 + uint16_t capable_10_full:1;
92 + uint16_t capable_10_half:1;
93 + uint16_t capable_100base_t2_full:1;
94 + uint16_t capable_100base_t2_half:1;
95 + uint16_t capable_extended_status:1;
96 + uint16_t capable_unidirectional:1;
97 + uint16_t capable_mf_preamble_suppression:1;
98 + uint16_t autoneg_complete:1;
99 + uint16_t remote_fault:1;
100 + uint16_t capable_autoneg:1;
101 + uint16_t link_status:1;
102 + uint16_t jabber_detect:1;
103 + uint16_t capable_extended_registers:1;
104 +
105 + } s;
106 +};
107 +
108 +/**
109 + * PHY register 2 from the 802.3 spec
110 + */
111 +#define CVMX_MDIO_PHY_REG_ID1 2
112 +union cvmx_mdio_phy_reg_id1 {
113 + uint16_t u16;
114 + struct {
115 + uint16_t oui_bits_3_18;
116 + } s;
117 +};
118 +
119 +/**
120 + * PHY register 3 from the 802.3 spec
121 + */
122 +#define CVMX_MDIO_PHY_REG_ID2 3
123 +union cvmx_mdio_phy_reg_id2 {
124 + uint16_t u16;
125 + struct {
126 + uint16_t oui_bits_19_24:6;
127 + uint16_t model:6;
128 + uint16_t revision:4;
129 + } s;
130 +};
131 +
132 +/**
133 + * PHY register 4 from the 802.3 spec
134 + */
135 +#define CVMX_MDIO_PHY_REG_AUTONEG_ADVER 4
136 +union cvmx_mdio_phy_reg_autoneg_adver {
137 + uint16_t u16;
138 + struct {
139 + uint16_t next_page:1;
140 + uint16_t reserved_14:1;
141 + uint16_t remote_fault:1;
142 + uint16_t reserved_12:1;
143 + uint16_t asymmetric_pause:1;
144 + uint16_t pause:1;
145 + uint16_t advert_100base_t4:1;
146 + uint16_t advert_100base_tx_full:1;
147 + uint16_t advert_100base_tx_half:1;
148 + uint16_t advert_10base_tx_full:1;
149 + uint16_t advert_10base_tx_half:1;
150 + uint16_t selector:5;
151 + } s;
152 +};
153 +
154 +/**
155 + * PHY register 5 from the 802.3 spec
156 + */
157 +#define CVMX_MDIO_PHY_REG_LINK_PARTNER_ABILITY 5
158 +union cvmx_mdio_phy_reg_link_partner_ability {
159 + uint16_t u16;
160 + struct {
161 + uint16_t next_page:1;
162 + uint16_t ack:1;
163 + uint16_t remote_fault:1;
164 + uint16_t reserved_12:1;
165 + uint16_t asymmetric_pause:1;
166 + uint16_t pause:1;
167 + uint16_t advert_100base_t4:1;
168 + uint16_t advert_100base_tx_full:1;
169 + uint16_t advert_100base_tx_half:1;
170 + uint16_t advert_10base_tx_full:1;
171 + uint16_t advert_10base_tx_half:1;
172 + uint16_t selector:5;
173 + } s;
174 +};
175 +
176 +/**
177 + * PHY register 6 from the 802.3 spec
178 + */
179 +#define CVMX_MDIO_PHY_REG_AUTONEG_EXPANSION 6
180 +union cvmx_mdio_phy_reg_autoneg_expansion {
181 + uint16_t u16;
182 + struct {
183 + uint16_t reserved_5_15:11;
184 + uint16_t parallel_detection_fault:1;
185 + uint16_t link_partner_next_page_capable:1;
186 + uint16_t local_next_page_capable:1;
187 + uint16_t page_received:1;
188 + uint16_t link_partner_autoneg_capable:1;
189 +
190 + } s;
191 +};
192 +
193 +/**
194 + * PHY register 9 from the 802.3 spec
195 + */
196 +#define CVMX_MDIO_PHY_REG_CONTROL_1000 9
197 +union cvmx_mdio_phy_reg_control_1000 {
198 + uint16_t u16;
199 + struct {
200 + uint16_t test_mode:3;
201 + uint16_t manual_master_slave:1;
202 + uint16_t master:1;
203 + uint16_t port_type:1;
204 + uint16_t advert_1000base_t_full:1;
205 + uint16_t advert_1000base_t_half:1;
206 + uint16_t reserved_0_7:8;
207 + } s;
208 +};
209 +
210 +/**
211 + * PHY register 10 from the 802.3 spec
212 + */
213 +#define CVMX_MDIO_PHY_REG_STATUS_1000 10
214 +union cvmx_mdio_phy_reg_status_1000 {
215 + uint16_t u16;
216 + struct {
217 + uint16_t master_slave_fault:1;
218 + uint16_t is_master:1;
219 + uint16_t local_receiver_ok:1;
220 + uint16_t remote_receiver_ok:1;
221 + uint16_t remote_capable_1000base_t_full:1;
222 + uint16_t remote_capable_1000base_t_half:1;
223 + uint16_t reserved_8_9:2;
224 + uint16_t idle_error_count:8;
225 + } s;
226 +};
227 +
228 +/**
229 + * PHY register 15 from the 802.3 spec
230 + */
231 +#define CVMX_MDIO_PHY_REG_EXTENDED_STATUS 15
232 +union cvmx_mdio_phy_reg_extended_status {
233 + uint16_t u16;
234 + struct {
235 + uint16_t capable_1000base_x_full:1;
236 + uint16_t capable_1000base_x_half:1;
237 + uint16_t capable_1000base_t_full:1;
238 + uint16_t capable_1000base_t_half:1;
239 + uint16_t reserved_0_11:12;
240 + } s;
241 +};
242 +
243 +/**
244 + * PHY register 13 from the 802.3 spec
245 + */
246 +#define CVMX_MDIO_PHY_REG_MMD_CONTROL 13
247 +union cvmx_mdio_phy_reg_mmd_control {
248 + uint16_t u16;
249 + struct {
250 + uint16_t function:2;
251 + uint16_t reserved_5_13:9;
252 + uint16_t devad:5;
253 + } s;
254 +};
255 +
256 +/**
257 + * PHY register 14 from the 802.3 spec
258 + */
259 +#define CVMX_MDIO_PHY_REG_MMD_ADDRESS_DATA 14
260 +union cvmx_mdio_phy_reg_mmd_address_data {
261 + uint16_t u16;
262 + struct {
263 + uint16_t address_data:16;
264 + } s;
265 +};
266 +
267 +/* Operating request encodings. */
268 +#define MDIO_CLAUSE_22_WRITE 0
269 +#define MDIO_CLAUSE_22_READ 1
270 +
271 +#define MDIO_CLAUSE_45_ADDRESS 0
272 +#define MDIO_CLAUSE_45_WRITE 1
273 +#define MDIO_CLAUSE_45_READ_INC 2
274 +#define MDIO_CLAUSE_45_READ 3
275 +
276 +/* MMD identifiers, mostly for accessing devices withing XENPAK modules. */
277 +#define CVMX_MMD_DEVICE_PMA_PMD 1
278 +#define CVMX_MMD_DEVICE_WIS 2
279 +#define CVMX_MMD_DEVICE_PCS 3
280 +#define CVMX_MMD_DEVICE_PHY_XS 4
281 +#define CVMX_MMD_DEVICE_DTS_XS 5
282 +#define CVMX_MMD_DEVICE_TC 6
283 +#define CVMX_MMD_DEVICE_CL22_EXT 29
284 +#define CVMX_MMD_DEVICE_VENDOR_1 30
285 +#define CVMX_MMD_DEVICE_VENDOR_2 31
286 +
287 +/**
288 + * Perform an MII read. This function is used to read PHY
289 + * registers controlling auto negotiation.
290 + *
291 + * @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
292 + * support multiple busses.
293 + * @phy_id: The MII phy id
294 + * @location: Register location to read
295 + *
296 + * Returns Result from the read or -1 on failure
297 + */
298 +static inline int cvmx_mdio_read(int bus_id, int phy_id, int location)
299 +{
300 + union cvmx_smix_cmd smi_cmd;
301 + union cvmx_smix_rd_dat smi_rd;
302 + int timeout = 1000;
303 +
304 + smi_cmd.u64 = 0;
305 + smi_cmd.s.phy_op = MDIO_CLAUSE_22_READ;
306 + smi_cmd.s.phy_adr = phy_id;
307 + smi_cmd.s.reg_adr = location;
308 + cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
309 +
310 + do {
311 + cvmx_wait(1000);
312 + smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(bus_id));
313 + } while (smi_rd.s.pending && timeout--);
314 +
315 + if (smi_rd.s.val)
316 + return smi_rd.s.dat;
317 + else
318 + return -1;
319 +}
320 +
321 +/**
322 + * Perform an MII write. This function is used to write PHY
323 + * registers controlling auto negotiation.
324 + *
325 + * @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
326 + * support multiple busses.
327 + * @phy_id: The MII phy id
328 + * @location: Register location to write
329 + * @val: Value to write
330 + *
331 + * Returns -1 on error
332 + * 0 on success
333 + */
334 +static inline int cvmx_mdio_write(int bus_id, int phy_id, int location, int val)
335 +{
336 + union cvmx_smix_cmd smi_cmd;
337 + union cvmx_smix_wr_dat smi_wr;
338 + int timeout = 1000;
339 +
340 + smi_wr.u64 = 0;
341 + smi_wr.s.dat = val;
342 + cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
343 +
344 + smi_cmd.u64 = 0;
345 + smi_cmd.s.phy_op = MDIO_CLAUSE_22_WRITE;
346 + smi_cmd.s.phy_adr = phy_id;
347 + smi_cmd.s.reg_adr = location;
348 + cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
349 +
350 + do {
351 + cvmx_wait(1000);
352 + smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
353 + } while (smi_wr.s.pending && --timeout);
354 + if (timeout <= 0)
355 + return -1;
356 +
357 + return 0;
358 +}
359 +
360 +/**
361 + * Perform an IEEE 802.3 clause 45 MII read using clause 22 operations. This
362 + * function is used to read PHY registers controlling auto negotiation.
363 + *
364 + * @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
365 + * support multiple busses.
366 + * @phy_id: The MII phy id
367 + * @device: MDIO Managable Device (MMD) id
368 + * @location: Register location to read
369 + *
370 + * Returns Result from the read or -1 on failure
371 + */
372 +
373 +static inline int cvmx_mdio_45_via_22_read(int bus_id, int phy_id, int device,
374 + int location)
375 +{
376 + union cvmx_mdio_phy_reg_mmd_control mmd_control;
377 +
378 + /*
379 + * a) To Register 13, write the Function field to 00 (address)
380 + * and DEVAD field to the device address value for the
381 + * desired MMD;
382 + */
383 + mmd_control.u16 = 0;
384 + mmd_control.s.function = MDIO_CLAUSE_45_ADDRESS;
385 + mmd_control.s.devad = device;
386 + cvmx_mdio_write(bus_id, phy_id, CVMX_MDIO_PHY_REG_MMD_CONTROL,
387 + mmd_control.u16);
388 +
389 + /*
390 + * b) To Register 14, write the desired address value to the
391 + * MMD's address register;
392 + */
393 + cvmx_mdio_write(bus_id, phy_id, CVMX_MDIO_PHY_REG_MMD_ADDRESS_DATA,
394 + location);
395 +
396 + /*
397 + * c) To Register 13, write the Function field to 01 (Data, no
398 + * post increment) and DEVAD field to the same device
399 + * address value for the desired MMD;
400 + */
401 + mmd_control.u16 = 0;
402 + mmd_control.s.function = MDIO_CLAUSE_45_READ;
403 + mmd_control.s.devad = device;
404 + cvmx_mdio_write(bus_id, phy_id, CVMX_MDIO_PHY_REG_MMD_CONTROL,
405 + mmd_control.u16);
406 +
407 + /*
408 + * d) From Register 14, read the content of the MMD's selected
409 + * register.
410 + */
411 + return cvmx_mdio_read(bus_id, phy_id,
412 + CVMX_MDIO_PHY_REG_MMD_ADDRESS_DATA);
413 +}
414 +
415 +/**
416 + * Perform an IEEE 802.3 clause 45 MII write using clause 22
417 + * operations. This function is used to write PHY registers
418 + * controlling auto negotiation.
419 + *
420 + * @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
421 + * support multiple busses.
422 + * @phy_id: The MII phy id
423 + * @device: MDIO Managable Device (MMD) id
424 + * @location: Register location to write
425 + * @val: Value to write
426 + *
427 + * Returns -1 on error
428 + * 0 on success
429 + */
430 +static inline int cvmx_mdio_45_via_22_write(int bus_id, int phy_id, int device,
431 + int location, int val)
432 +{
433 + union cvmx_mdio_phy_reg_mmd_control mmd_control;
434 +
435 + /*
436 + * a) To Register 13, write the Function field to 00 (address)
437 + * and DEVAD field to the device address value for the
438 + * desired MMD;
439 + */
440 + mmd_control.u16 = 0;
441 + mmd_control.s.function = MDIO_CLAUSE_45_ADDRESS;
442 + mmd_control.s.devad = device;
443 + cvmx_mdio_write(bus_id, phy_id, CVMX_MDIO_PHY_REG_MMD_CONTROL,
444 + mmd_control.u16);
445 +
446 + /*
447 + * b) To Register 14, write the desired address value to the
448 + * MMD's address register;
449 + */
450 + cvmx_mdio_write(bus_id, phy_id, CVMX_MDIO_PHY_REG_MMD_ADDRESS_DATA,
451 + location);
452 +
453 + /*
454 + * c) To Register 13, write the Function field to 01 (Data, no
455 + * post increment) and DEVAD field to the same device
456 + * address value for the desired MMD;
457 + */
458 + mmd_control.u16 = 0;
459 + mmd_control.s.function = MDIO_CLAUSE_45_READ;
460 + mmd_control.s.devad = device;
461 + cvmx_mdio_write(bus_id, phy_id, CVMX_MDIO_PHY_REG_MMD_CONTROL,
462 + mmd_control.u16);
463 +
464 + /*
465 + * d) To Register 14, write the content of the MMD's selected
466 + * register.
467 + */
468 + return cvmx_mdio_write(bus_id, phy_id,
469 + CVMX_MDIO_PHY_REG_MMD_ADDRESS_DATA, val);
470 +
471 + return 0;
472 +}
473 +
474 +/**
475 + * Perform an IEEE 802.3 clause 45 MII read. This function is used to read PHY
476 + * registers controlling auto negotiation.
477 + *
478 + * @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
479 + * support multiple busses.
480 + * @phy_id: The MII phy id
481 + * @device: MDIO Managable Device (MMD) id
482 + * @location: Register location to read
483 + *
484 + * Returns Result from the read or -1 on failure
485 + */
486 +
487 +static inline int cvmx_mdio_45_read(int bus_id, int phy_id, int device,
488 + int location)
489 +{
490 + union cvmx_smix_cmd smi_cmd;
491 + union cvmx_smix_rd_dat smi_rd;
492 + union cvmx_smix_wr_dat smi_wr;
493 + int timeout = 1000;
494 +
495 + smi_wr.u64 = 0;
496 + smi_wr.s.dat = location;
497 + cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
498 +
499 + smi_cmd.u64 = 0;
500 + smi_cmd.s.phy_op = MDIO_CLAUSE_45_ADDRESS;
501 + smi_cmd.s.phy_adr = phy_id;
502 + smi_cmd.s.reg_adr = device;
503 + cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
504 +
505 + do {
506 + cvmx_wait(1000);
507 + smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
508 + } while (smi_wr.s.pending && --timeout);
509 + if (timeout <= 0)
510 + return -1;
511 +
512 + smi_cmd.u64 = 0;
513 + smi_cmd.s.phy_op = MDIO_CLAUSE_45_READ;
514 + smi_cmd.s.phy_adr = phy_id;
515 + smi_cmd.s.reg_adr = device;
516 + cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
517 +
518 + do {
519 + cvmx_wait(1000);
520 + smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(bus_id));
521 + } while (smi_rd.s.pending && timeout--);
522 +
523 + if (0 == timeout)
524 + cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
525 + "device %2d register %2d TIME OUT\n",
526 + bus_id, phy_id, device, location);
527 +
528 + if (smi_rd.s.val)
529 + return smi_rd.s.dat;
530 + else {
531 + cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
532 + "device %2d register %2d INVALID READ\n",
533 + bus_id, phy_id, device, location);
534 + return -1;
535 + }
536 +}
537 +
538 +/**
539 + * Perform an IEEE 802.3 clause 45 MII write. This function is used to
540 + * write PHY registers controlling auto negotiation.
541 + *
542 + * @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
543 + * support multiple busses.
544 + * @phy_id: The MII phy id
545 + * @device: MDIO Managable Device (MMD) id
546 + * @location: Register location to write
547 + * @val: Value to write
548 + *
549 + * Returns -1 on error
550 + * 0 on success
551 + */
552 +static inline int cvmx_mdio_45_write(int bus_id, int phy_id, int device,
553 + int location, int val)
554 +{
555 + union cvmx_smix_cmd smi_cmd;
556 + union cvmx_smix_wr_dat smi_wr;
557 + int timeout = 1000;
558 +
559 + smi_wr.u64 = 0;
560 + smi_wr.s.dat = location;
561 + cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
562 +
563 + smi_cmd.u64 = 0;
564 + smi_cmd.s.phy_op = MDIO_CLAUSE_45_ADDRESS;
565 + smi_cmd.s.phy_adr = phy_id;
566 + smi_cmd.s.reg_adr = device;
567 + cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
568 +
569 + do {
570 + cvmx_wait(1000);
571 + smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
572 + } while (smi_wr.s.pending && --timeout);
573 + if (timeout <= 0)
574 + return -1;
575 +
576 + smi_wr.u64 = 0;
577 + smi_wr.s.dat = val;
578 + cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
579 +
580 + smi_cmd.u64 = 0;
581 + smi_cmd.s.phy_op = MDIO_CLAUSE_45_WRITE;
582 + smi_cmd.s.phy_adr = phy_id;
583 + smi_cmd.s.reg_adr = device;
584 + cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
585 +
586 + do {
587 + cvmx_wait(1000);
588 + smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
589 + } while (smi_wr.s.pending && --timeout);
590 + if (timeout <= 0)
591 + return -1;
592 +
593 + return 0;
594 +}
595 +
596 +#endif
597 --- a/drivers/net/Kconfig
598 +++ b/drivers/net/Kconfig
599 @@ -2035,6 +2035,14 @@ config ATL2
600 To compile this driver as a module, choose M here. The module
601 will be called atl2.
602
603 +config OCTEON_MGMT
604 + tristate "OCTEON Management port ethernet driver (CN5XXX)"
605 + depends on CPU_CAVIUM_OCTEON
606 + default y
607 + help
608 + This option enables the ethernet driver for the management port on
609 + CN52XX, CN57XX, CN56XX, CN55XX, and CN54XX chips.
610 +
611 source "drivers/net/fs_enet/Kconfig"
612
613 endif # NET_ETHERNET
614 --- a/drivers/net/Makefile
615 +++ b/drivers/net/Makefile
616 @@ -234,6 +234,7 @@ pasemi_mac_driver-objs := pasemi_mac.o p
617 obj-$(CONFIG_MLX4_CORE) += mlx4/
618 obj-$(CONFIG_ENC28J60) += enc28j60.o
619 obj-$(CONFIG_ETHOC) += ethoc.o
620 +obj-$(CONFIG_OCTEON_MGMT) += octeon/
621
622 obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o
623
624 --- /dev/null
625 +++ b/drivers/net/octeon/Makefile
626 @@ -0,0 +1,11 @@
627 +# Makefile for the Cavium OCTEON Ethernet drivers.
628 +#
629 +# This file is subject to the terms and conditions of the GNU General Public
630 +# License. See the file "COPYING" in the main directory of this archive
631 +# for more details.
632 +#
633 +# Copyright (C) 2008 Cavium Networks
634 +
635 +obj-$(CONFIG_OCTEON_MGMT) += octeon_mgmt.o
636 +
637 +octeon_mgmt-objs := octeon-mgmt-port.o cvmx-mgmt-port.o
638 \ No newline at end of file
639 --- /dev/null
640 +++ b/drivers/net/octeon/cvmx-mgmt-port.c
641 @@ -0,0 +1,818 @@
642 +/***********************license start***************
643 + * Author: Cavium Networks
644 + *
645 + * Contact: support@caviumnetworks.com
646 + * This file is part of the OCTEON SDK
647 + *
648 + * Copyright (c) 2003-2008 Cavium Networks
649 + *
650 + * This file is free software; you can redistribute it and/or modify
651 + * it under the terms of the GNU General Public License, Version 2, as
652 + * published by the Free Software Foundation.
653 + *
654 + * This file is distributed in the hope that it will be useful, but
655 + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
656 + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
657 + * NONINFRINGEMENT. See the GNU General Public License for more
658 + * details.
659 + *
660 + * You should have received a copy of the GNU General Public License
661 + * along with this file; if not, write to the Free Software
662 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
663 + * or visit http://www.gnu.org/licenses/.
664 + *
665 + * This file may also be available under a different license from Cavium.
666 + * Contact Cavium Networks for more information
667 + ***********************license end**************************************/
668 +
669 +/**
670 + *
671 + * Support functions for managing the MII management port
672 + *
673 + */
674 +
675 +#include <asm/octeon/octeon.h>
676 +#include <asm/octeon/cvmx-spinlock.h>
677 +#include <asm/octeon/cvmx-bootmem.h>
678 +#include <asm/octeon/cvmx-mdio.h>
679 +
680 +#include <asm/octeon/cvmx-mixx-defs.h>
681 +#include <asm/octeon/cvmx-agl-defs.h>
682 +
683 +#include "cvmx-mgmt-port.h"
684 +
685 +#define CVMX_MGMT_PORT_NUM_PORTS 2
686 +/* Number of TX ring buffer entries and buffers */
687 +#define CVMX_MGMT_PORT_NUM_TX_BUFFERS 16
688 +/* Number of RX ring buffer entries and buffers */
689 +#define CVMX_MGMT_PORT_NUM_RX_BUFFERS 128
690 +
691 +#define CVMX_MGMT_PORT_TX_BUFFER_SIZE 12288
692 +#define CVMX_MGMT_PORT_RX_BUFFER_SIZE 1536
693 +
694 +/**
695 + * Format of the TX/RX ring buffer entries
696 + */
697 +union cvmx_mgmt_port_ring_entry {
698 + uint64_t u64;
699 + struct {
700 + uint64_t reserved_62_63:2;
701 + /* Length of the buffer/packet in bytes */
702 + uint64_t len:14;
703 + /* The RX error code */
704 + uint64_t code:8;
705 + /* Physical address of the buffer */
706 + uint64_t addr:40;
707 + } s;
708 +};
709 +
710 +/**
711 + * Per port state required for each mgmt port
712 + */
713 +struct cvmx_mgmt_port_state {
714 + /* Used for exclusive access to this structure */
715 + cvmx_spinlock_t lock;
716 + /* Where the next TX will write in the tx_ring and tx_buffers */
717 + int tx_write_index;
718 + /* Where the next RX will be in the rx_ring and rx_buffers */
719 + int rx_read_index;
720 + /* The SMI/MDIO PHY address */
721 + int phy_id;
722 + /* Our MAC address */
723 + uint64_t mac;
724 + union cvmx_mgmt_port_ring_entry tx_ring[CVMX_MGMT_PORT_NUM_TX_BUFFERS];
725 + union cvmx_mgmt_port_ring_entry rx_ring[CVMX_MGMT_PORT_NUM_RX_BUFFERS];
726 + char tx_buffers[CVMX_MGMT_PORT_NUM_TX_BUFFERS]
727 + [CVMX_MGMT_PORT_TX_BUFFER_SIZE];
728 + char rx_buffers[CVMX_MGMT_PORT_NUM_RX_BUFFERS]
729 + [CVMX_MGMT_PORT_RX_BUFFER_SIZE];
730 +};
731 +
732 +/**
733 + * Pointers to each mgmt port's state
734 + */
735 +struct cvmx_mgmt_port_state *cvmx_mgmt_port_state_ptr;
736 +
737 +/**
738 + * Return the number of management ports supported by this chip
739 + *
740 + * Returns Number of ports
741 + */
742 +int __cvmx_mgmt_port_num_ports(void)
743 +{
744 + if (OCTEON_IS_MODEL(OCTEON_CN56XX))
745 + return 1;
746 + else if (OCTEON_IS_MODEL(OCTEON_CN52XX))
747 + return 2;
748 + else
749 + return 0;
750 +}
751 +
752 +/**
753 + * Called to initialize a management port for use. Multiple calls
754 + * to this function accross applications is safe.
755 + *
756 + * @port: Port to initialize
757 + *
758 + * Returns CVMX_MGMT_PORT_SUCCESS or an error code
759 + */
760 +enum cvmx_mgmt_port_result cvmx_mgmt_port_initialize(int port)
761 +{
762 + char *alloc_name = "cvmx_mgmt_port";
763 + union cvmx_mixx_oring1 oring1;
764 + union cvmx_mixx_ctl mix_ctl;
765 +
766 + if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
767 + return CVMX_MGMT_PORT_INVALID_PARAM;
768 +
769 + cvmx_mgmt_port_state_ptr =
770 + cvmx_bootmem_alloc_named(CVMX_MGMT_PORT_NUM_PORTS *
771 + sizeof(struct cvmx_mgmt_port_state), 128,
772 + alloc_name);
773 + if (cvmx_mgmt_port_state_ptr) {
774 + memset(cvmx_mgmt_port_state_ptr, 0,
775 + CVMX_MGMT_PORT_NUM_PORTS *
776 + sizeof(struct cvmx_mgmt_port_state));
777 + } else {
778 + struct cvmx_bootmem_named_block_desc *block_desc =
779 + cvmx_bootmem_find_named_block(alloc_name);
780 + if (block_desc)
781 + cvmx_mgmt_port_state_ptr =
782 + cvmx_phys_to_ptr(block_desc->base_addr);
783 + else {
784 + cvmx_dprintf("ERROR: cvmx_mgmt_port_initialize: "
785 + "Unable to get named block %s.\n",
786 + alloc_name);
787 + return CVMX_MGMT_PORT_NO_MEMORY;
788 + }
789 + }
790 +
791 + /*
792 + * Reset the MIX block if the previous user had a different TX
793 + * ring size.
794 + */
795 + mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
796 + if (!mix_ctl.s.reset) {
797 + oring1.u64 = cvmx_read_csr(CVMX_MIXX_ORING1(port));
798 + if (oring1.s.osize != CVMX_MGMT_PORT_NUM_TX_BUFFERS) {
799 + mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
800 + mix_ctl.s.en = 0;
801 + cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
802 + do {
803 + mix_ctl.u64 =
804 + cvmx_read_csr(CVMX_MIXX_CTL(port));
805 + } while (mix_ctl.s.busy);
806 + mix_ctl.s.reset = 1;
807 + cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
808 + cvmx_read_csr(CVMX_MIXX_CTL(port));
809 + memset(cvmx_mgmt_port_state_ptr + port, 0,
810 + sizeof(struct cvmx_mgmt_port_state));
811 + }
812 + }
813 +
814 + if (cvmx_mgmt_port_state_ptr[port].tx_ring[0].u64 == 0) {
815 + struct cvmx_mgmt_port_state *state =
816 + cvmx_mgmt_port_state_ptr + port;
817 + int i;
818 + union cvmx_mixx_bist mix_bist;
819 + union cvmx_agl_gmx_bist agl_gmx_bist;
820 + union cvmx_mixx_oring1 oring1;
821 + union cvmx_mixx_iring1 iring1;
822 + union cvmx_mixx_ctl mix_ctl;
823 +
824 + /* Make sure BIST passed */
825 + mix_bist.u64 = cvmx_read_csr(CVMX_MIXX_BIST(port));
826 + if (mix_bist.u64)
827 + cvmx_dprintf("WARNING: cvmx_mgmt_port_initialize: "
828 + "Managment port MIX failed BIST "
829 + "(0x%016llx)\n",
830 + (unsigned long long)mix_bist.u64);
831 +
832 + agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
833 + if (agl_gmx_bist.u64)
834 + cvmx_dprintf("WARNING: cvmx_mgmt_port_initialize: "
835 + "Managment port AGL failed BIST "
836 + "(0x%016llx)\n",
837 + (unsigned long long)agl_gmx_bist.u64);
838 +
839 + /* Clear all state information */
840 + memset(state, 0, sizeof(*state));
841 +
842 + /* Take the control logic out of reset */
843 + mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
844 + mix_ctl.s.reset = 0;
845 + cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
846 +
847 + /* Set the PHY address */
848 + if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
849 + state->phy_id = -1;
850 + else
851 + /* Will need to be change to match the board */
852 + state->phy_id = port;
853 +
854 + /* Create a default MAC address */
855 + state->mac = 0x000000dead000000ull;
856 + state->mac += 0xffffff & CAST64(state);
857 +
858 + /* Setup the TX ring */
859 + for (i = 0; i < CVMX_MGMT_PORT_NUM_TX_BUFFERS; i++) {
860 + state->tx_ring[i].s.len = CVMX_MGMT_PORT_TX_BUFFER_SIZE;
861 + state->tx_ring[i].s.addr =
862 + cvmx_ptr_to_phys(state->tx_buffers[i]);
863 + }
864 +
865 + /* Tell the HW where the TX ring is */
866 + oring1.u64 = 0;
867 + oring1.s.obase = cvmx_ptr_to_phys(state->tx_ring) >> 3;
868 + oring1.s.osize = CVMX_MGMT_PORT_NUM_TX_BUFFERS;
869 + CVMX_SYNCWS;
870 + cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64);
871 +
872 + /* Setup the RX ring */
873 + for (i = 0; i < CVMX_MGMT_PORT_NUM_RX_BUFFERS; i++) {
874 + /* This size is -8 due to an errata for CN56XX pass 1 */
875 + state->rx_ring[i].s.len =
876 + CVMX_MGMT_PORT_RX_BUFFER_SIZE - 8;
877 + state->rx_ring[i].s.addr =
878 + cvmx_ptr_to_phys(state->rx_buffers[i]);
879 + }
880 +
881 + /* Tell the HW where the RX ring is */
882 + iring1.u64 = 0;
883 + iring1.s.ibase = cvmx_ptr_to_phys(state->rx_ring) >> 3;
884 + iring1.s.isize = CVMX_MGMT_PORT_NUM_RX_BUFFERS;
885 + CVMX_SYNCWS;
886 + cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64);
887 + cvmx_write_csr(CVMX_MIXX_IRING2(port),
888 + CVMX_MGMT_PORT_NUM_RX_BUFFERS);
889 +
890 + /* Disable the external input/output */
891 + cvmx_mgmt_port_disable(port);
892 +
893 + /* Set the MAC address filtering up */
894 + cvmx_mgmt_port_set_mac(port, state->mac);
895 +
896 + /*
897 + * Set the default max size to an MTU of 1500 with L2
898 + * and VLAN.
899 + */
900 + cvmx_mgmt_port_set_max_packet_size(port, 1518);
901 +
902 + /*
903 + * Enable the port HW. Packets are not allowed until
904 + * cvmx_mgmt_port_enable() is called.
905 + */
906 + mix_ctl.u64 = 0;
907 + /* Strip the ending CRC */
908 + mix_ctl.s.crc_strip = 1;
909 + /* Enable the port */
910 + mix_ctl.s.en = 1;
911 + /* Arbitration mode */
912 + mix_ctl.s.nbtarb = 0;
913 + /* MII CB-request FIFO programmable high watermark */
914 + mix_ctl.s.mrq_hwm = 1;
915 + cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
916 +
917 + if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
918 + || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
919 + /*
920 + * Force compensation values, as they are not
921 + * determined properly by HW.
922 + */
923 + union cvmx_agl_gmx_drv_ctl drv_ctl;
924 +
925 + drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
926 + if (port) {
927 + drv_ctl.s.byp_en1 = 1;
928 + drv_ctl.s.nctl1 = 6;
929 + drv_ctl.s.pctl1 = 6;
930 + } else {
931 + drv_ctl.s.byp_en = 1;
932 + drv_ctl.s.nctl = 6;
933 + drv_ctl.s.pctl = 6;
934 + }
935 + cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
936 + }
937 + }
938 + return CVMX_MGMT_PORT_SUCCESS;
939 +}
940 +
941 +/**
942 + * Shutdown a management port. This currently disables packet IO
943 + * but leaves all hardware and buffers. Another application can then
944 + * call initialize() without redoing the hardware setup.
945 + *
946 + * @port: Management port
947 + *
948 + * Returns CVMX_MGMT_PORT_SUCCESS or an error code
949 + */
950 +enum cvmx_mgmt_port_result cvmx_mgmt_port_shutdown(int port)
951 +{
952 + if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
953 + return CVMX_MGMT_PORT_INVALID_PARAM;
954 +
955 + /* Stop packets from comming in */
956 + cvmx_mgmt_port_disable(port);
957 +
958 + /*
959 + * We don't free any memory so the next intialize can reuse
960 + * the HW setup.
961 + */
962 + return CVMX_MGMT_PORT_SUCCESS;
963 +}
964 +
965 +/**
966 + * Enable packet IO on a management port
967 + *
968 + * @port: Management port
969 + *
970 + * Returns CVMX_MGMT_PORT_SUCCESS or an error code
971 + */
972 +enum cvmx_mgmt_port_result cvmx_mgmt_port_enable(int port)
973 +{
974 + struct cvmx_mgmt_port_state *state;
975 + union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
976 + union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
977 + union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
978 +
979 + if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
980 + return CVMX_MGMT_PORT_INVALID_PARAM;
981 +
982 + state = cvmx_mgmt_port_state_ptr + port;
983 +
984 + cvmx_spinlock_lock(&state->lock);
985 +
986 + rxx_frm_ctl.u64 = 0;
987 + rxx_frm_ctl.s.pre_align = 1;
988 + /*
989 + * When set, disables the length check for non-min sized pkts
990 + * with padding in the client data.
991 + */
992 + rxx_frm_ctl.s.pad_len = 1;
993 + /* When set, disables the length check for VLAN pkts */
994 + rxx_frm_ctl.s.vlan_len = 1;
995 + /* When set, PREAMBLE checking is less strict */
996 + rxx_frm_ctl.s.pre_free = 1;
997 + /* Control Pause Frames can match station SMAC */
998 + rxx_frm_ctl.s.ctl_smac = 0;
999 + /* Control Pause Frames can match globally assign Multicast address */
1000 + rxx_frm_ctl.s.ctl_mcst = 1;
1001 + /* Forward pause information to TX block */
1002 + rxx_frm_ctl.s.ctl_bck = 1;
1003 + /* Drop Control Pause Frames */
1004 + rxx_frm_ctl.s.ctl_drp = 1;
1005 + /* Strip off the preamble */
1006 + rxx_frm_ctl.s.pre_strp = 1;
1007 + /*
1008 + * This port is configured to send PREAMBLE+SFD to begin every
1009 + * frame. GMX checks that the PREAMBLE is sent correctly.
1010 + */
1011 + rxx_frm_ctl.s.pre_chk = 1;
1012 + cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64);
1013 +
1014 + /* Enable the AGL block */
1015 + agl_gmx_inf_mode.u64 = 0;
1016 + agl_gmx_inf_mode.s.en = 1;
1017 + cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
1018 +
1019 + /* Configure the port duplex and enables */
1020 + agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
1021 + agl_gmx_prtx.s.tx_en = 1;
1022 + agl_gmx_prtx.s.rx_en = 1;
1023 + if (cvmx_mgmt_port_get_link(port) < 0)
1024 + agl_gmx_prtx.s.duplex = 0;
1025 + else
1026 + agl_gmx_prtx.s.duplex = 1;
1027 + agl_gmx_prtx.s.en = 1;
1028 + cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
1029 +
1030 + cvmx_spinlock_unlock(&state->lock);
1031 + return CVMX_MGMT_PORT_SUCCESS;
1032 +}
1033 +
1034 +/**
1035 + * Disable packet IO on a management port
1036 + *
1037 + * @port: Management port
1038 + *
1039 + * Returns CVMX_MGMT_PORT_SUCCESS or an error code
1040 + */
1041 +enum cvmx_mgmt_port_result cvmx_mgmt_port_disable(int port)
1042 +{
1043 + struct cvmx_mgmt_port_state *state;
1044 + union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
1045 +
1046 + if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
1047 + return CVMX_MGMT_PORT_INVALID_PARAM;
1048 +
1049 + state = cvmx_mgmt_port_state_ptr + port;
1050 +
1051 + cvmx_spinlock_lock(&state->lock);
1052 +
1053 + agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
1054 + agl_gmx_prtx.s.en = 0;
1055 + cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
1056 +
1057 + cvmx_spinlock_unlock(&state->lock);
1058 + return CVMX_MGMT_PORT_SUCCESS;
1059 +}
1060 +
1061 +/**
1062 + * Send a packet out the management port. The packet is copied so
1063 + * the input buffer isn't used after this call.
1064 + *
1065 + * @port: Management port
1066 + * @packet_len: Length of the packet to send. It does not include the final CRC
1067 + * @buffer: Packet data
1068 + *
1069 + * Returns CVMX_MGMT_PORT_SUCCESS or an error code
1070 + */
1071 +enum cvmx_mgmt_port_result cvmx_mgmt_port_send(int port, int packet_len,
1072 + void *buffer)
1073 +{
1074 + struct cvmx_mgmt_port_state *state;
1075 + union cvmx_mixx_oring2 mix_oring2;
1076 +
1077 + if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
1078 + return CVMX_MGMT_PORT_INVALID_PARAM;
1079 +
1080 + /* Max sure the packet size is valid */
1081 + if ((packet_len < 1) || (packet_len > CVMX_MGMT_PORT_TX_BUFFER_SIZE))
1082 + return CVMX_MGMT_PORT_INVALID_PARAM;
1083 +
1084 + if (buffer == NULL)
1085 + return CVMX_MGMT_PORT_INVALID_PARAM;
1086 +
1087 + state = cvmx_mgmt_port_state_ptr + port;
1088 +
1089 + cvmx_spinlock_lock(&state->lock);
1090 +
1091 + mix_oring2.u64 = cvmx_read_csr(CVMX_MIXX_ORING2(port));
1092 + if (mix_oring2.s.odbell >= CVMX_MGMT_PORT_NUM_TX_BUFFERS - 1) {
1093 + /* No room for another packet */
1094 + cvmx_spinlock_unlock(&state->lock);
1095 + return CVMX_MGMT_PORT_NO_MEMORY;
1096 + } else {
1097 + /* Copy the packet into the output buffer */
1098 + memcpy(state->tx_buffers[state->tx_write_index], buffer,
1099 + packet_len);
1100 + /* Insert the source MAC */
1101 + memcpy(state->tx_buffers[state->tx_write_index] + 6,
1102 + ((char *)&state->mac) + 2, 6);
1103 + /* Update the TX ring buffer entry size */
1104 + state->tx_ring[state->tx_write_index].s.len = packet_len;
1105 + /* Increment our TX index */
1106 + state->tx_write_index =
1107 + (state->tx_write_index + 1) % CVMX_MGMT_PORT_NUM_TX_BUFFERS;
1108 + /* Ring the doorbell, send ing the packet */
1109 + CVMX_SYNCWS;
1110 + cvmx_write_csr(CVMX_MIXX_ORING2(port), 1);
1111 + if (cvmx_read_csr(CVMX_MIXX_ORCNT(port)))
1112 + cvmx_write_csr(CVMX_MIXX_ORCNT(port),
1113 + cvmx_read_csr(CVMX_MIXX_ORCNT(port)));
1114 +
1115 + cvmx_spinlock_unlock(&state->lock);
1116 + return CVMX_MGMT_PORT_SUCCESS;
1117 + }
1118 +}
1119 +
1120 +/**
1121 + * Receive a packet from the management port.
1122 + *
1123 + * @port: Management port
1124 + * @buffer_len: Size of the buffer to receive the packet into
1125 + * @buffer: Buffer to receive the packet into
1126 + *
1127 + * Returns The size of the packet, or a negative erorr code on failure. Zero
1128 + * means that no packets were available.
1129 + */
1130 +int cvmx_mgmt_port_receive(int port, int buffer_len, void *buffer)
1131 +{
1132 + union cvmx_mixx_ircnt mix_ircnt;
1133 + struct cvmx_mgmt_port_state *state;
1134 + int result;
1135 +
1136 + if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
1137 + return CVMX_MGMT_PORT_INVALID_PARAM;
1138 +
1139 + /* Max sure the buffer size is valid */
1140 + if (buffer_len < 1)
1141 + return CVMX_MGMT_PORT_INVALID_PARAM;
1142 +
1143 + if (buffer == NULL)
1144 + return CVMX_MGMT_PORT_INVALID_PARAM;
1145 +
1146 + state = cvmx_mgmt_port_state_ptr + port;
1147 +
1148 + cvmx_spinlock_lock(&state->lock);
1149 +
1150 + /* Find out how many RX packets are pending */
1151 + mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port));
1152 + if (mix_ircnt.s.ircnt) {
1153 + void *source = state->rx_buffers[state->rx_read_index];
1154 + uint64_t *zero_check = source;
1155 + /*
1156 + * CN56XX pass 1 has an errata where packets might
1157 + * start 8 bytes into the buffer instead of at their
1158 + * correct location. If the first 8 bytes is zero we
1159 + * assume this has happened.
1160 + */
1161 + if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
1162 + && (*zero_check == 0))
1163 + source += 8;
1164 + /* Start off with zero bytes received */
1165 + result = 0;
1166 + /*
1167 + * While the completion code signals more data, copy
1168 + * the buffers into the user's data.
1169 + */
1170 + while (state->rx_ring[state->rx_read_index].s.code == 16) {
1171 + /* Only copy what will fit in the user's buffer */
1172 + int length = state->rx_ring[state->rx_read_index].s.len;
1173 + if (length > buffer_len)
1174 + length = buffer_len;
1175 + memcpy(buffer, source, length);
1176 + /*
1177 + * Reduce the size of the buffer to the
1178 + * remaining space. If we run out we will
1179 + * signal an error when the code 15 buffer
1180 + * doesn't fit.
1181 + */
1182 + buffer += length;
1183 + buffer_len -= length;
1184 + result += length;
1185 + /*
1186 + * Update this buffer for reuse in future
1187 + * receives. This size is -8 due to an errata
1188 + * for CN56XX pass 1.
1189 + */
1190 + state->rx_ring[state->rx_read_index].s.code = 0;
1191 + state->rx_ring[state->rx_read_index].s.len =
1192 + CVMX_MGMT_PORT_RX_BUFFER_SIZE - 8;
1193 + state->rx_read_index =
1194 + (state->rx_read_index +
1195 + 1) % CVMX_MGMT_PORT_NUM_RX_BUFFERS;
1196 + /*
1197 + * Zero the beginning of the buffer for use by
1198 + * the errata check.
1199 + */
1200 + *zero_check = 0;
1201 + CVMX_SYNCWS;
1202 + /* Increment the number of RX buffers */
1203 + cvmx_write_csr(CVMX_MIXX_IRING2(port), 1);
1204 + source = state->rx_buffers[state->rx_read_index];
1205 + zero_check = source;
1206 + }
1207 +
1208 + /* Check for the final good completion code */
1209 + if (state->rx_ring[state->rx_read_index].s.code == 15) {
1210 + if (buffer_len >=
1211 + state->rx_ring[state->rx_read_index].s.len) {
1212 + int length =
1213 + state->rx_ring[state->rx_read_index].s.len;
1214 + memcpy(buffer, source, length);
1215 + result += length;
1216 + } else {
1217 + /* Not enough room for the packet */
1218 + cvmx_dprintf("ERROR: cvmx_mgmt_port_receive: "
1219 + "Packet (%d) larger than "
1220 + "supplied buffer (%d)\n",
1221 + state->rx_ring[state->rx_read_index].s.len,
1222 + buffer_len);
1223 + result = CVMX_MGMT_PORT_NO_MEMORY;
1224 + }
1225 + } else {
1226 + union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
1227 + cvmx_dprintf("ERROR: cvmx_mgmt_port_receive: Receive "
1228 + "error code %d. Packet dropped(Len %d)\n",
1229 + state->rx_ring[state->rx_read_index].s.code,
1230 + state->rx_ring[state->rx_read_index].s.len +
1231 + result);
1232 + result = -state->rx_ring[state->rx_read_index].s.code;
1233 +
1234 + /* Check to see if we need to change the duplex. */
1235 + agl_gmx_prtx.u64 =
1236 + cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
1237 + if (cvmx_mgmt_port_get_link(port) < 0)
1238 + agl_gmx_prtx.s.duplex = 0;
1239 + else
1240 + agl_gmx_prtx.s.duplex = 1;
1241 + cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port),
1242 + agl_gmx_prtx.u64);
1243 + }
1244 +
1245 + /*
1246 + * Clean out the ring buffer entry. This size is -8
1247 + * due to an errata for CN56XX pass 1.
1248 + */
1249 + state->rx_ring[state->rx_read_index].s.code = 0;
1250 + state->rx_ring[state->rx_read_index].s.len =
1251 + CVMX_MGMT_PORT_RX_BUFFER_SIZE - 8;
1252 + state->rx_read_index =
1253 + (state->rx_read_index + 1) % CVMX_MGMT_PORT_NUM_RX_BUFFERS;
1254 + /*
1255 + * Zero the beginning of the buffer for use by the
1256 + * errata check.
1257 + */
1258 + *zero_check = 0;
1259 + CVMX_SYNCWS;
1260 + /* Increment the number of RX buffers */
1261 + cvmx_write_csr(CVMX_MIXX_IRING2(port), 1);
1262 + /* Decrement the pending RX count */
1263 + cvmx_write_csr(CVMX_MIXX_IRCNT(port), 1);
1264 + } else {
1265 + /* No packets available */
1266 + result = 0;
1267 + }
1268 + cvmx_spinlock_unlock(&state->lock);
1269 + return result;
1270 +}
1271 +
1272 +/**
1273 + * Get the management port link status:
1274 + * 100 = 100Mbps, full duplex
1275 + * 10 = 10Mbps, full duplex
1276 + * 0 = Link down
1277 + * -10 = 10Mpbs, half duplex
1278 + * -100 = 100Mbps, half duplex
1279 + *
1280 + * @port: Management port
1281 + *
1282 + * Returns
1283 + */
1284 +int cvmx_mgmt_port_get_link(int port)
1285 +{
1286 + struct cvmx_mgmt_port_state *state;
1287 + int phy_status;
1288 + int duplex;
1289 +
1290 + if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
1291 + return CVMX_MGMT_PORT_INVALID_PARAM;
1292 +
1293 + state = cvmx_mgmt_port_state_ptr + port;
1294 +
1295 + /* Assume 100Mbps if we don't know the PHY address */
1296 + if (state->phy_id == -1)
1297 + return 100;
1298 +
1299 + /* Read the PHY state */
1300 + phy_status =
1301 + cvmx_mdio_read(state->phy_id >> 8, state->phy_id & 0xff, 17);
1302 +
1303 + /* Only return a link if the PHY has finished auto negotiation
1304 + and set the resolved bit (bit 11) */
1305 + if (!(phy_status & (1 << 11)))
1306 + return 0;
1307 +
1308 + /* Create multiple factor to represent duplex */
1309 + if ((phy_status >> 13) & 1)
1310 + duplex = 1;
1311 + else
1312 + duplex = -1;
1313 +
1314 + /* Speed is encoded on bits 15-14 */
1315 + switch ((phy_status >> 14) & 3) {
1316 + case 0: /* 10 Mbps */
1317 + return 10 * duplex;
1318 + case 1: /* 100 Mbps */
1319 + return 100 * duplex;
1320 + default:
1321 + return 0;
1322 + }
1323 +}
1324 +
1325 +/**
1326 + * Set the MAC address for a management port
1327 + *
1328 + * @port: Management port
1329 + * @mac: New MAC address. The lower 6 bytes are used.
1330 + *
1331 + * Returns CVMX_MGMT_PORT_SUCCESS or an error code
1332 + */
1333 +enum cvmx_mgmt_port_result cvmx_mgmt_port_set_mac(int port, uint64_t mac)
1334 +{
1335 + struct cvmx_mgmt_port_state *state;
1336 + union cvmx_agl_gmx_rxx_adr_ctl agl_gmx_rxx_adr_ctl;
1337 +
1338 + if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
1339 + return CVMX_MGMT_PORT_INVALID_PARAM;
1340 +
1341 + state = cvmx_mgmt_port_state_ptr + port;
1342 +
1343 + cvmx_spinlock_lock(&state->lock);
1344 +
1345 + agl_gmx_rxx_adr_ctl.u64 = 0;
1346 + /* Only accept matching MAC addresses */
1347 + agl_gmx_rxx_adr_ctl.s.cam_mode = 1;
1348 + /* Drop multicast */
1349 + agl_gmx_rxx_adr_ctl.s.mcst = 0;
1350 + /* Allow broadcast */
1351 + agl_gmx_rxx_adr_ctl.s.bcst = 1;
1352 + cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), agl_gmx_rxx_adr_ctl.u64);
1353 +
1354 + /* Only using one of the CAMs */
1355 + cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), (mac >> 40) & 0xff);
1356 + cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), (mac >> 32) & 0xff);
1357 + cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), (mac >> 24) & 0xff);
1358 + cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), (mac >> 16) & 0xff);
1359 + cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), (mac >> 8) & 0xff);
1360 + cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), (mac >> 0) & 0xff);
1361 + cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), 1);
1362 + state->mac = mac;
1363 +
1364 + cvmx_spinlock_unlock(&state->lock);
1365 + return CVMX_MGMT_PORT_SUCCESS;
1366 +}
1367 +
1368 +/**
1369 + * Get the MAC address for a management port
1370 + *
1371 + * @port: Management port
1372 + *
1373 + * Returns MAC address
1374 + */
1375 +uint64_t cvmx_mgmt_port_get_mac(int port)
1376 +{
1377 + if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
1378 + return CVMX_MGMT_PORT_INVALID_PARAM;
1379 +
1380 + return cvmx_mgmt_port_state_ptr[port].mac;
1381 +}
1382 +
1383 +/**
1384 + * Set the multicast list.
1385 + *
1386 + * @port: Management port
1387 + * @flags: Interface flags
1388 + *
1389 + * Returns
1390 + */
1391 +void cvmx_mgmt_port_set_multicast_list(int port, int flags)
1392 +{
1393 + struct cvmx_mgmt_port_state *state;
1394 + union cvmx_agl_gmx_rxx_adr_ctl agl_gmx_rxx_adr_ctl;
1395 +
1396 + if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
1397 + return;
1398 +
1399 + state = cvmx_mgmt_port_state_ptr + port;
1400 +
1401 + cvmx_spinlock_lock(&state->lock);
1402 +
1403 + agl_gmx_rxx_adr_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port));
1404 +
1405 + /* Allow broadcast MAC addresses */
1406 + if (!agl_gmx_rxx_adr_ctl.s.bcst)
1407 + agl_gmx_rxx_adr_ctl.s.bcst = 1;
1408 +
1409 + if ((flags & CVMX_IFF_ALLMULTI) || (flags & CVMX_IFF_PROMISC))
1410 + /* Force accept multicast packets */
1411 + agl_gmx_rxx_adr_ctl.s.mcst = 2;
1412 + else
1413 + /* Force reject multicast packets */
1414 + agl_gmx_rxx_adr_ctl.s.mcst = 1;
1415 +
1416 + if (flags & CVMX_IFF_PROMISC)
1417 + /*
1418 + * Reject matches if promisc. Since CAM is shut off,
1419 + * should accept everything.
1420 + */
1421 + agl_gmx_rxx_adr_ctl.s.cam_mode = 0;
1422 + else
1423 + /* Filter packets based on the CAM */
1424 + agl_gmx_rxx_adr_ctl.s.cam_mode = 1;
1425 +
1426 + cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), agl_gmx_rxx_adr_ctl.u64);
1427 +
1428 + if (flags & CVMX_IFF_PROMISC)
1429 + cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), 0);
1430 + else
1431 + cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), 1);
1432 +
1433 + cvmx_spinlock_unlock(&state->lock);
1434 +}
1435 +
1436 +/**
1437 + * Set the maximum packet allowed in. Size is specified
1438 + * including L2 but without FCS. A normal MTU would corespond
1439 + * to 1514 assuming the standard 14 byte L2 header.
1440 + *
1441 + * @port: Management port
1442 + * @size_without_fcs:
1443 + * Size in bytes without FCS
1444 + */
1445 +void cvmx_mgmt_port_set_max_packet_size(int port, int size_without_fcs)
1446 +{
1447 + struct cvmx_mgmt_port_state *state;
1448 +
1449 + if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
1450 + return;
1451 +
1452 + state = cvmx_mgmt_port_state_ptr + port;
1453 +
1454 + cvmx_spinlock_lock(&state->lock);
1455 + cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_MAX(port), size_without_fcs);
1456 + cvmx_write_csr(CVMX_AGL_GMX_RXX_JABBER(port),
1457 + (size_without_fcs + 7) & 0xfff8);
1458 + cvmx_spinlock_unlock(&state->lock);
1459 +}
1460 --- /dev/null
1461 +++ b/drivers/net/octeon/cvmx-mgmt-port.h
1462 @@ -0,0 +1,168 @@
1463 +/***********************license start***************
1464 + * Author: Cavium Networks
1465 + *
1466 + * Contact: support@caviumnetworks.com
1467 + * This file is part of the OCTEON SDK
1468 + *
1469 + * Copyright (c) 2003-2008 Cavium Networks
1470 + *
1471 + * This file is free software; you can redistribute it and/or modify
1472 + * it under the terms of the GNU General Public License, Version 2, as
1473 + * published by the Free Software Foundation.
1474 + *
1475 + * This file is distributed in the hope that it will be useful, but
1476 + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
1477 + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
1478 + * NONINFRINGEMENT. See the GNU General Public License for more
1479 + * details.
1480 + *
1481 + * You should have received a copy of the GNU General Public License
1482 + * along with this file; if not, write to the Free Software
1483 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1484 + * or visit http://www.gnu.org/licenses/.
1485 + *
1486 + * This file may also be available under a different license from Cavium.
1487 + * Contact Cavium Networks for more information
1488 + ***********************license end**************************************/
1489 +
1490 +/**
1491 + *
1492 + * Support functions for managing the MII management port
1493 + *
1494 + */
1495 +
1496 +#ifndef __CVMX_MGMT_PORT_H__
1497 +#define __CVMX_MGMT_PORT_H__
1498 +
1499 +enum cvmx_mgmt_port_result {
1500 + CVMX_MGMT_PORT_SUCCESS = 0,
1501 + CVMX_MGMT_PORT_NO_MEMORY = -1,
1502 + CVMX_MGMT_PORT_INVALID_PARAM = -2,
1503 +};
1504 +
1505 +/* Enumeration of Net Device interface flags. */
1506 +enum cvmx_mgmt_port_netdevice_flags {
1507 + CVMX_IFF_PROMISC = 0x100, /* receive all packets */
1508 + CVMX_IFF_ALLMULTI = 0x200, /* receive all multicast packets */
1509 +};
1510 +
1511 +/**
1512 + * Called to initialize a management port for use. Multiple calls
1513 + * to this function accross applications is safe.
1514 + *
1515 + * @port: Port to initialize
1516 + *
1517 + * Returns CVMX_MGMT_PORT_SUCCESS or an error code
1518 + */
1519 +extern enum cvmx_mgmt_port_result cvmx_mgmt_port_initialize(int port);
1520 +
1521 +/**
1522 + * Shutdown a management port. This currently disables packet IO
1523 + * but leaves all hardware and buffers. Another application can then
1524 + * call initialize() without redoing the hardware setup.
1525 + *
1526 + * @port: Management port
1527 + *
1528 + * Returns CVMX_MGMT_PORT_SUCCESS or an error code
1529 + */
1530 +extern enum cvmx_mgmt_port_result cvmx_mgmt_port_shutdown(int port);
1531 +
1532 +/**
1533 + * Enable packet IO on a management port
1534 + *
1535 + * @port: Management port
1536 + *
1537 + * Returns CVMX_MGMT_PORT_SUCCESS or an error code
1538 + */
1539 +extern enum cvmx_mgmt_port_result cvmx_mgmt_port_enable(int port);
1540 +
1541 +/**
1542 + * Disable packet IO on a management port
1543 + *
1544 + * @port: Management port
1545 + *
1546 + * Returns CVMX_MGMT_PORT_SUCCESS or an error code
1547 + */
1548 +extern enum cvmx_mgmt_port_result cvmx_mgmt_port_disable(int port);
1549 +
1550 +/**
1551 + * Send a packet out the management port. The packet is copied so
1552 + * the input buffer isn't used after this call.
1553 + *
1554 + * @port: Management port
1555 + * @packet_len: Length of the packet to send. It does not include the final CRC
1556 + * @buffer: Packet data
1557 + *
1558 + * Returns CVMX_MGMT_PORT_SUCCESS or an error code
1559 + */
1560 +extern enum cvmx_mgmt_port_result cvmx_mgmt_port_send(int port, int packet_len,
1561 + void *buffer);
1562 +
1563 +/**
1564 + * Receive a packet from the management port.
1565 + *
1566 + * @port: Management port
1567 + * @buffer_len: Size of the buffer to receive the packet into
1568 + * @buffer: Buffer to receive the packet into
1569 + *
1570 + * Returns The size of the packet, or a negative erorr code on failure. Zero
1571 + * means that no packets were available.
1572 + */
1573 +extern int cvmx_mgmt_port_receive(int port, int buffer_len, void *buffer);
1574 +
1575 +/**
1576 + * Get the management port link status:
1577 + * 100 = 100Mbps, full duplex
1578 + * 10 = 10Mbps, full duplex
1579 + * 0 = Link down
1580 + * -10 = 10Mpbs, half duplex
1581 + * -100 = 100Mbps, half duplex
1582 + *
1583 + * @port: Management port
1584 + *
1585 + * Returns
1586 + */
1587 +extern int cvmx_mgmt_port_get_link(int port);
1588 +
1589 +/**
1590 + * Set the MAC address for a management port
1591 + *
1592 + * @port: Management port
1593 + * @mac: New MAC address. The lower 6 bytes are used.
1594 + *
1595 + * Returns CVMX_MGMT_PORT_SUCCESS or an error code
1596 + */
1597 +extern enum cvmx_mgmt_port_result cvmx_mgmt_port_set_mac(int port,
1598 + uint64_t mac);
1599 +
1600 +/**
1601 + * Get the MAC address for a management port
1602 + *
1603 + * @port: Management port
1604 + *
1605 + * Returns MAC address
1606 + */
1607 +extern uint64_t cvmx_mgmt_port_get_mac(int port);
1608 +
1609 +/**
1610 + * Set the multicast list.
1611 + *
1612 + * @port: Management port
1613 + * @flags: Interface flags
1614 + *
1615 + * Returns
1616 + */
1617 +extern void cvmx_mgmt_port_set_multicast_list(int port, int flags);
1618 +
1619 +/**
1620 + * Set the maximum packet allowed in. Size is specified
1621 + * including L2 but without FCS. A normal MTU would corespond
1622 + * to 1514 assuming the standard 14 byte L2 header.
1623 + *
1624 + * @port: Management port
1625 + * @size_without_crc:
1626 + * Size in bytes without FCS
1627 + */
1628 +extern void cvmx_mgmt_port_set_max_packet_size(int port, int size_without_fcs);
1629 +
1630 +#endif /* __CVMX_MGMT_PORT_H__ */
1631 --- /dev/null
1632 +++ b/drivers/net/octeon/octeon-mgmt-port.c
1633 @@ -0,0 +1,389 @@
1634 +/*
1635 + * Octeon Management Port Ethernet Driver
1636 + *
1637 + * This file is subject to the terms and conditions of the GNU General Public
1638 + * License. See the file "COPYING" in the main directory of this archive
1639 + * for more details.
1640 + *
1641 + * Copyright (C) 2007, 2008 Cavium Networks
1642 + */
1643 +#include <linux/module.h>
1644 +#include <linux/kernel.h>
1645 +#include <linux/netdevice.h>
1646 +#include <linux/etherdevice.h>
1647 +#include <linux/ip.h>
1648 +#include <linux/string.h>
1649 +#include <linux/delay.h>
1650 +
1651 +#include <asm/octeon/octeon.h>
1652 +#include <asm/octeon/cvmx-mixx-defs.h>
1653 +#include <asm/octeon/cvmx-agl-defs.h>
1654 +
1655 +#include "cvmx-mgmt-port.h"
1656 +
1657 +static struct net_device *global_dev[2] = { NULL, NULL };
1658 +
1659 +#define DEBUGPRINT(format, ...) do {if (printk_ratelimit()) \
1660 + printk(format, ##__VA_ARGS__); \
1661 + } while (0)
1662 +
1663 +/**
1664 + * This is the definition of the Ethernet driver's private
1665 + * driver state stored in dev->priv.
1666 + */
1667 +struct device_private {
1668 + int port;
1669 + struct net_device_stats stats; /* Device statistics */
1670 +};
1671 +
1672 +
1673 +/**
1674 + * Packet transmit
1675 + *
1676 + * @param skb Packet to send
1677 + * @param dev Device info structure
1678 + * @return Always returns zero
1679 + */
1680 +static int packet_transmit(struct sk_buff *skb, struct net_device *dev)
1681 +{
1682 + uint64_t flags;
1683 + struct device_private *priv = netdev_priv(dev);
1684 + enum cvmx_mgmt_port_result result;
1685 + local_irq_save(flags);
1686 + result = cvmx_mgmt_port_send(priv->port, skb->len, skb->data);
1687 + local_irq_restore(flags);
1688 + if (result == CVMX_MGMT_PORT_SUCCESS) {
1689 + priv->stats.tx_packets++;
1690 + priv->stats.tx_bytes += skb->len;
1691 + } else {
1692 + /* DEBUGPRINT("ERROR: cvmx_mgmt_port_send() failed with %d\n",
1693 + result);
1694 + */
1695 + priv->stats.tx_dropped++;
1696 + }
1697 + dev_kfree_skb(skb);
1698 + return 0;
1699 +}
1700 +
1701 +
1702 +/**
1703 + * Interrupt handler. The interrupt occurs whenever the POW
1704 + * transitions from 0->1 packets in our group.
1705 + *
1706 + * @param cpl
1707 + * @param dev_id
1708 + * @param regs
1709 + * @return
1710 + */
1711 +static irqreturn_t do_interrupt(int cpl, void *dev_id)
1712 +{
1713 + uint64_t flags;
1714 + struct sk_buff *skb;
1715 + int result;
1716 + char packet[2048];
1717 + struct net_device *dev = (struct net_device *) dev_id;
1718 + struct device_private *priv = netdev_priv(dev);
1719 +
1720 + do {
1721 + local_irq_save(flags);
1722 + result = cvmx_mgmt_port_receive(priv->port, sizeof(packet),
1723 + packet);
1724 + local_irq_restore(flags);
1725 +
1726 + /* Silently drop packets if we aren't up */
1727 + if ((dev->flags & IFF_UP) == 0)
1728 + continue;
1729 +
1730 + if (result > 0) {
1731 + skb = dev_alloc_skb(result);
1732 + if (skb) {
1733 + memcpy(skb_put(skb, result), packet, result);
1734 + skb->protocol = eth_type_trans(skb, dev);
1735 + skb->dev = dev;
1736 + skb->ip_summed = CHECKSUM_NONE;
1737 + priv->stats.rx_bytes += skb->len;
1738 + priv->stats.rx_packets++;
1739 + netif_rx(skb);
1740 + } else {
1741 + DEBUGPRINT("%s: Failed to allocate skbuff, "
1742 + "packet dropped\n",
1743 + dev->name);
1744 + priv->stats.rx_dropped++;
1745 + }
1746 + } else if (result < 0) {
1747 + DEBUGPRINT("%s: Receive error code %d, packet "
1748 + "dropped\n",
1749 + dev->name, result);
1750 + priv->stats.rx_errors++;
1751 + }
1752 + } while (result != 0);
1753 +
1754 + /* Clear any pending interrupts */
1755 + cvmx_write_csr(CVMX_MIXX_ISR(priv->port),
1756 + cvmx_read_csr(CVMX_MIXX_ISR(priv->port)));
1757 + cvmx_read_csr(CVMX_MIXX_ISR(priv->port));
1758 +
1759 + return IRQ_HANDLED;
1760 +}
1761 +
1762 +
1763 +#ifdef CONFIG_NET_POLL_CONTROLLER
1764 +/**
1765 + * This is called when the kernel needs to manually poll the
1766 + * device. For Octeon, this is simply calling the interrupt
1767 + * handler. We actually poll all the devices, not just the
1768 + * one supplied.
1769 + *
1770 + * @param dev Device to poll. Unused
1771 + */
1772 +static void device_poll_controller(struct net_device *dev)
1773 +{
1774 + do_interrupt(0, dev);
1775 +}
1776 +#endif
1777 +
1778 +
1779 +/**
1780 + * Open a device for use. Device should be able to send and
1781 + * receive packets after this is called.
1782 + *
1783 + * @param dev Device to bring up
1784 + * @return Zero on success
1785 + */
1786 +static int device_open(struct net_device *dev)
1787 +{
1788 + /* Clear the statistics whenever the interface is brought up */
1789 + struct device_private *priv = netdev_priv(dev);
1790 + memset(&priv->stats, 0, sizeof(priv->stats));
1791 + cvmx_mgmt_port_enable(priv->port);
1792 + return 0;
1793 +}
1794 +
1795 +
1796 +/**
1797 + * Stop an ethernet device. No more packets should be
1798 + * received from this device.
1799 + *
1800 + * @param dev Device to bring down
1801 + * @return Zero on success
1802 + */
1803 +static int device_close(struct net_device *dev)
1804 +{
1805 + struct device_private *priv = netdev_priv(dev);
1806 + cvmx_mgmt_port_disable(priv->port);
1807 + return 0;
1808 +}
1809 +
1810 +
1811 +/**
1812 + * Get the low level ethernet statistics
1813 + *
1814 + * @param dev Device to get the statistics from
1815 + * @return Pointer to the statistics
1816 + */
1817 +static struct net_device_stats *device_get_stats(struct net_device *dev)
1818 +{
1819 + struct device_private *priv = netdev_priv(dev);
1820 + return &priv->stats;
1821 +}
1822 +
1823 +/**
1824 + * Set the multicast list. Currently unimplemented.
1825 + *
1826 + * @param dev Device to work on
1827 + */
1828 +static void ethernet_mgmt_port_set_multicast_list(struct net_device *dev)
1829 +{
1830 + struct device_private *priv = netdev_priv(dev);
1831 + int port = priv->port;
1832 + int num_ports;
1833 + if (OCTEON_IS_MODEL(OCTEON_CN52XX))
1834 + num_ports = 2;
1835 + else
1836 + num_ports = 1;
1837 + if (port < num_ports)
1838 + cvmx_mgmt_port_set_multicast_list(port, dev->flags);
1839 +}
1840 +
1841 +/**
1842 + * Set the hardware MAC address for a management port device
1843 + *
1844 + * @param dev Device to change the MAC address for
1845 + * @param addr Address structure to change it too. MAC address is addr + 2.
1846 + * @return Zero on success
1847 + */
1848 +static int ethernet_mgmt_port_set_mac_address(struct net_device *dev,
1849 + void *addr)
1850 +{
1851 + struct device_private *priv = netdev_priv(dev);
1852 + union cvmx_agl_gmx_prtx_cfg agl_gmx_cfg;
1853 + int port = priv->port;
1854 + int num_ports;
1855 +
1856 + if (OCTEON_IS_MODEL(OCTEON_CN52XX))
1857 + num_ports = 2;
1858 + else
1859 + num_ports = 1;
1860 +
1861 + memcpy(dev->dev_addr, addr + 2, 6);
1862 +
1863 + if (port < num_ports) {
1864 + int i;
1865 + uint8_t *ptr = addr;
1866 + uint64_t mac = 0;
1867 + for (i = 0; i < 6; i++)
1868 + mac = (mac<<8) | (uint64_t)(ptr[i+2]);
1869 +
1870 + agl_gmx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
1871 + cvmx_mgmt_port_set_mac(port, mac);
1872 + ethernet_mgmt_port_set_multicast_list(dev);
1873 + cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_cfg.u64);
1874 + }
1875 + return 0;
1876 +}
1877 +
1878 +/**
1879 + * Per network device initialization
1880 + *
1881 + * @param dev Device to initialize
1882 + * @return Zero on success
1883 + */
1884 +static int device_init(struct net_device *dev)
1885 +{
1886 + struct device_private *priv = netdev_priv(dev);
1887 + uint64_t mac = cvmx_mgmt_port_get_mac(priv->port);
1888 +
1889 + dev->hard_start_xmit = packet_transmit;
1890 + dev->get_stats = device_get_stats;
1891 + dev->open = device_open;
1892 + dev->stop = device_close;
1893 +#ifdef CONFIG_NET_POLL_CONTROLLER
1894 + dev->poll_controller = device_poll_controller;
1895 +#endif
1896 + dev->dev_addr[0] = (mac >> 40) & 0xff;
1897 + dev->dev_addr[1] = (mac >> 32) & 0xff;
1898 + dev->dev_addr[2] = (mac >> 24) & 0xff;
1899 + dev->dev_addr[3] = (mac >> 16) & 0xff;
1900 + dev->dev_addr[4] = (mac >> 8) & 0xff;
1901 + dev->dev_addr[5] = (mac >> 0) & 0xff;
1902 + return 0;
1903 +}
1904 +
1905 +
1906 +/**
1907 + * Module/ driver initialization. Creates the linux network
1908 + * devices.
1909 + *
1910 + * @return Zero on success
1911 + */
1912 +static int __init ethernet_mgmt_port_init(void)
1913 +{
1914 + struct net_device *dev;
1915 + struct device_private *priv;
1916 + union cvmx_mixx_irhwm mix_irhwm;
1917 + union cvmx_mixx_intena mix_intena;
1918 + int num_ports;
1919 + int port;
1920 +
1921 + if (!OCTEON_IS_MODEL(OCTEON_CN56XX) && !OCTEON_IS_MODEL(OCTEON_CN52XX))
1922 + return 0;
1923 +
1924 + if (OCTEON_IS_MODEL(OCTEON_CN52XX))
1925 + num_ports = 2;
1926 + else
1927 + num_ports = 1;
1928 +
1929 + printk("Octeon management port ethernet driver\n");
1930 +
1931 + for (port = 0; port < num_ports; port++) {
1932 + if (cvmx_mgmt_port_initialize(port) != CVMX_MGMT_PORT_SUCCESS) {
1933 + pr_err("ERROR: cvmx_mgmt_port_initialize(%d) "
1934 + "failed\n", port);
1935 + return -1;
1936 + }
1937 +
1938 + /* Setup is complete, create the virtual ethernet devices */
1939 + dev = alloc_etherdev(sizeof(struct device_private));
1940 + if (dev == NULL) {
1941 + pr_err("ERROR: Failed to allocate ethernet device\n");
1942 + return -1;
1943 + }
1944 +
1945 + dev->init = device_init;
1946 + strcpy(dev->name, "mgmt%d");
1947 +
1948 + /* Initialize the device private structure. */
1949 + priv = netdev_priv(dev);
1950 + memset(priv, 0, sizeof(struct device_private));
1951 + priv->port = port;
1952 +
1953 + if (register_netdev(dev) < 0) {
1954 + pr_err("ERROR: Failed to register ethernet device\n");
1955 + kfree(dev);
1956 + return -1;
1957 + }
1958 +
1959 + /* Clear any pending interrupts */
1960 + cvmx_write_csr(CVMX_MIXX_ISR(priv->port),
1961 + cvmx_read_csr(CVMX_MIXX_ISR(priv->port)));
1962 +
1963 + /* Register an IRQ hander for to receive interrupts */
1964 + dev->irq =
1965 + (priv->port == 0) ? OCTEON_IRQ_MII0 : OCTEON_IRQ_MII1;
1966 + if (request_irq(dev->irq, do_interrupt, IRQF_SHARED, dev->name,
1967 + dev))
1968 + pr_err("ethernet-mgmt: Failed to assign "
1969 + "interrupt %d\n", dev->irq);
1970 +
1971 + /* Interrupt every single RX packet */
1972 + mix_irhwm.u64 = 0;
1973 + mix_irhwm.s.irhwm = 0;
1974 + cvmx_write_csr(CVMX_MIXX_IRHWM(priv->port), mix_irhwm.u64);
1975 +
1976 + /* Enable receive interrupts */
1977 + mix_intena.u64 = 0;
1978 + mix_intena.s.ithena = 1;
1979 + cvmx_write_csr(CVMX_MIXX_INTENA(priv->port), mix_intena.u64);
1980 +
1981 + global_dev[priv->port] = dev;
1982 +
1983 + dev->set_mac_address = ethernet_mgmt_port_set_mac_address;
1984 + dev->set_multicast_list = ethernet_mgmt_port_set_multicast_list;
1985 + }
1986 + return 0;
1987 +}
1988 +
1989 +
1990 +/**
1991 + * Module / driver shutdown
1992 + *
1993 + * @return Zero on success
1994 + */
1995 +static void __exit ethernet_mgmt_port_cleanup(void)
1996 +{
1997 + int port;
1998 + for (port = 0; port < 2; port++) {
1999 + if (global_dev[port]) {
2000 + struct device_private *priv =
2001 + netdev_priv(global_dev[port]);
2002 + /* Disable interrupt */
2003 + cvmx_write_csr(CVMX_MIXX_IRHWM(priv->port), 0);
2004 + cvmx_write_csr(CVMX_MIXX_INTENA(priv->port), 0);
2005 + cvmx_mgmt_port_shutdown(priv->port);
2006 +
2007 + /* Free the interrupt handler */
2008 + free_irq(global_dev[port]->irq, global_dev[port]);
2009 +
2010 + /* Free the ethernet devices */
2011 + unregister_netdev(global_dev[port]);
2012 + kfree(global_dev[port]);
2013 + global_dev[port] = NULL;
2014 + }
2015 + }
2016 +}
2017 +
2018 +MODULE_LICENSE("GPL");
2019 +MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
2020 +MODULE_DESCRIPTION("Cavium Networks Octeon management port ethernet driver.");
2021 +module_init(ethernet_mgmt_port_init);
2022 +module_exit(ethernet_mgmt_port_cleanup);
This page took 0.148892 seconds and 5 git commands to generate.