xref: /linux/drivers/net/ethernet/ti/netcp_ethss.c (revision c0e297dc61f8d4453e07afbea1fa8d0e67cd4a34)
1 /*
2  * Keystone GBE and XGBE subsystem code
3  *
4  * Copyright (C) 2014 Texas Instruments Incorporated
5  * Authors:	Sandeep Nair <sandeep_n@ti.com>
6  *		Sandeep Paulraj <s-paulraj@ti.com>
7  *		Cyril Chemparathy <cyril@ti.com>
8  *		Santosh Shilimkar <santosh.shilimkar@ti.com>
9  *		Wingman Kwok <w-kwok2@ti.com>
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License as
13  * published by the Free Software Foundation version 2.
14  *
15  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
16  * kind, whether express or implied; without even the implied warranty
17  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  */
20 
21 #include <linux/io.h>
22 #include <linux/module.h>
23 #include <linux/of_mdio.h>
24 #include <linux/of_address.h>
25 #include <linux/if_vlan.h>
26 #include <linux/ethtool.h>
27 
28 #include "cpsw_ale.h"
29 #include "netcp.h"
30 
31 #define NETCP_DRIVER_NAME		"TI KeyStone Ethernet Driver"
32 #define NETCP_DRIVER_VERSION		"v1.0"
33 
34 #define GBE_IDENT(reg)			((reg >> 16) & 0xffff)
35 #define GBE_MAJOR_VERSION(reg)		(reg >> 8 & 0x7)
36 #define GBE_MINOR_VERSION(reg)		(reg & 0xff)
37 #define GBE_RTL_VERSION(reg)		((reg >> 11) & 0x1f)
38 
39 /* 1G Ethernet SS defines */
40 #define GBE_MODULE_NAME			"netcp-gbe"
41 #define GBE_SS_VERSION_14		0x4ed21104
42 
43 #define GBE_SS_REG_INDEX		0
44 #define GBE_SGMII34_REG_INDEX		1
45 #define GBE_SM_REG_INDEX		2
46 /* offset relative to base of GBE_SS_REG_INDEX */
47 #define GBE13_SGMII_MODULE_OFFSET	0x100
48 /* offset relative to base of GBE_SM_REG_INDEX */
49 #define GBE13_HOST_PORT_OFFSET		0x34
50 #define GBE13_SLAVE_PORT_OFFSET		0x60
51 #define GBE13_EMAC_OFFSET		0x100
52 #define GBE13_SLAVE_PORT2_OFFSET	0x200
53 #define GBE13_HW_STATS_OFFSET		0x300
54 #define GBE13_ALE_OFFSET		0x600
55 #define GBE13_HOST_PORT_NUM		0
56 #define GBE13_NUM_ALE_ENTRIES		1024
57 
58 /* 1G Ethernet NU SS defines */
59 #define GBENU_MODULE_NAME		"netcp-gbenu"
60 #define GBE_SS_ID_NU			0x4ee6
61 #define GBE_SS_ID_2U			0x4ee8
62 
63 #define IS_SS_ID_MU(d) \
64 	((GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) || \
65 	 (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U))
66 
67 #define IS_SS_ID_NU(d) \
68 	(GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU)
69 
70 #define GBENU_SS_REG_INDEX		0
71 #define GBENU_SM_REG_INDEX		1
72 #define GBENU_SGMII_MODULE_OFFSET	0x100
73 #define GBENU_HOST_PORT_OFFSET		0x1000
74 #define GBENU_SLAVE_PORT_OFFSET		0x2000
75 #define GBENU_EMAC_OFFSET		0x2330
76 #define GBENU_HW_STATS_OFFSET		0x1a000
77 #define GBENU_ALE_OFFSET		0x1e000
78 #define GBENU_HOST_PORT_NUM		0
79 #define GBENU_NUM_ALE_ENTRIES		1024
80 
81 /* 10G Ethernet SS defines */
82 #define XGBE_MODULE_NAME		"netcp-xgbe"
83 #define XGBE_SS_VERSION_10		0x4ee42100
84 
85 #define XGBE_SS_REG_INDEX		0
86 #define XGBE_SM_REG_INDEX		1
87 #define XGBE_SERDES_REG_INDEX		2
88 
89 /* offset relative to base of XGBE_SS_REG_INDEX */
90 #define XGBE10_SGMII_MODULE_OFFSET	0x100
91 /* offset relative to base of XGBE_SM_REG_INDEX */
92 #define XGBE10_HOST_PORT_OFFSET		0x34
93 #define XGBE10_SLAVE_PORT_OFFSET	0x64
94 #define XGBE10_EMAC_OFFSET		0x400
95 #define XGBE10_ALE_OFFSET		0x700
96 #define XGBE10_HW_STATS_OFFSET		0x800
97 #define XGBE10_HOST_PORT_NUM		0
98 #define XGBE10_NUM_ALE_ENTRIES		1024
99 
100 #define	GBE_TIMER_INTERVAL			(HZ / 2)
101 
102 /* Soft reset register values */
103 #define SOFT_RESET_MASK				BIT(0)
104 #define SOFT_RESET				BIT(0)
105 #define DEVICE_EMACSL_RESET_POLL_COUNT		100
106 #define GMACSL_RET_WARN_RESET_INCOMPLETE	-2
107 
108 #define MACSL_RX_ENABLE_CSF			BIT(23)
109 #define MACSL_ENABLE_EXT_CTL			BIT(18)
110 #define MACSL_XGMII_ENABLE			BIT(13)
111 #define MACSL_XGIG_MODE				BIT(8)
112 #define MACSL_GIG_MODE				BIT(7)
113 #define MACSL_GMII_ENABLE			BIT(5)
114 #define MACSL_FULLDUPLEX			BIT(0)
115 
116 #define GBE_CTL_P0_ENABLE			BIT(2)
117 #define GBE13_REG_VAL_STAT_ENABLE_ALL		0xff
118 #define XGBE_REG_VAL_STAT_ENABLE_ALL		0xf
119 #define GBE_STATS_CD_SEL			BIT(28)
120 
121 #define GBE_PORT_MASK(x)			(BIT(x) - 1)
122 #define GBE_MASK_NO_PORTS			0
123 
124 #define GBE_DEF_1G_MAC_CONTROL					\
125 		(MACSL_GIG_MODE | MACSL_GMII_ENABLE |		\
126 		 MACSL_ENABLE_EXT_CTL |	MACSL_RX_ENABLE_CSF)
127 
128 #define GBE_DEF_10G_MAC_CONTROL				\
129 		(MACSL_XGIG_MODE | MACSL_XGMII_ENABLE |		\
130 		 MACSL_ENABLE_EXT_CTL |	MACSL_RX_ENABLE_CSF)
131 
132 #define GBE_STATSA_MODULE			0
133 #define GBE_STATSB_MODULE			1
134 #define GBE_STATSC_MODULE			2
135 #define GBE_STATSD_MODULE			3
136 
137 #define GBENU_STATS0_MODULE			0
138 #define GBENU_STATS1_MODULE			1
139 #define GBENU_STATS2_MODULE			2
140 #define GBENU_STATS3_MODULE			3
141 #define GBENU_STATS4_MODULE			4
142 #define GBENU_STATS5_MODULE			5
143 #define GBENU_STATS6_MODULE			6
144 #define GBENU_STATS7_MODULE			7
145 #define GBENU_STATS8_MODULE			8
146 
147 #define XGBE_STATS0_MODULE			0
148 #define XGBE_STATS1_MODULE			1
149 #define XGBE_STATS2_MODULE			2
150 
151 /* s: 0-based slave_port */
152 #define SGMII_BASE(s) \
153 	(((s) < 2) ? gbe_dev->sgmii_port_regs : gbe_dev->sgmii_port34_regs)
154 
155 #define GBE_TX_QUEUE				648
156 #define	GBE_TXHOOK_ORDER			0
157 #define GBE_DEFAULT_ALE_AGEOUT			30
158 #define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
159 #define NETCP_LINK_STATE_INVALID		-1
160 
161 #define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
162 		offsetof(struct gbe##_##rb, rn)
163 #define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
164 		offsetof(struct gbenu##_##rb, rn)
165 #define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
166 		offsetof(struct xgbe##_##rb, rn)
167 #define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
168 
169 #define HOST_TX_PRI_MAP_DEFAULT			0x00000000
170 
171 struct xgbe_ss_regs {
172 	u32	id_ver;
173 	u32	synce_count;
174 	u32	synce_mux;
175 	u32	control;
176 };
177 
178 struct xgbe_switch_regs {
179 	u32	id_ver;
180 	u32	control;
181 	u32	emcontrol;
182 	u32	stat_port_en;
183 	u32	ptype;
184 	u32	soft_idle;
185 	u32	thru_rate;
186 	u32	gap_thresh;
187 	u32	tx_start_wds;
188 	u32	flow_control;
189 	u32	cppi_thresh;
190 };
191 
192 struct xgbe_port_regs {
193 	u32	blk_cnt;
194 	u32	port_vlan;
195 	u32	tx_pri_map;
196 	u32	sa_lo;
197 	u32	sa_hi;
198 	u32	ts_ctl;
199 	u32	ts_seq_ltype;
200 	u32	ts_vlan;
201 	u32	ts_ctl_ltype2;
202 	u32	ts_ctl2;
203 	u32	control;
204 };
205 
206 struct xgbe_host_port_regs {
207 	u32	blk_cnt;
208 	u32	port_vlan;
209 	u32	tx_pri_map;
210 	u32	src_id;
211 	u32	rx_pri_map;
212 	u32	rx_maxlen;
213 };
214 
215 struct xgbe_emac_regs {
216 	u32	id_ver;
217 	u32	mac_control;
218 	u32	mac_status;
219 	u32	soft_reset;
220 	u32	rx_maxlen;
221 	u32	__reserved_0;
222 	u32	rx_pause;
223 	u32	tx_pause;
224 	u32	em_control;
225 	u32	__reserved_1;
226 	u32	tx_gap;
227 	u32	rsvd[4];
228 };
229 
230 struct xgbe_host_hw_stats {
231 	u32	rx_good_frames;
232 	u32	rx_broadcast_frames;
233 	u32	rx_multicast_frames;
234 	u32	__rsvd_0[3];
235 	u32	rx_oversized_frames;
236 	u32	__rsvd_1;
237 	u32	rx_undersized_frames;
238 	u32	__rsvd_2;
239 	u32	overrun_type4;
240 	u32	overrun_type5;
241 	u32	rx_bytes;
242 	u32	tx_good_frames;
243 	u32	tx_broadcast_frames;
244 	u32	tx_multicast_frames;
245 	u32	__rsvd_3[9];
246 	u32	tx_bytes;
247 	u32	tx_64byte_frames;
248 	u32	tx_65_to_127byte_frames;
249 	u32	tx_128_to_255byte_frames;
250 	u32	tx_256_to_511byte_frames;
251 	u32	tx_512_to_1023byte_frames;
252 	u32	tx_1024byte_frames;
253 	u32	net_bytes;
254 	u32	rx_sof_overruns;
255 	u32	rx_mof_overruns;
256 	u32	rx_dma_overruns;
257 };
258 
259 struct xgbe_hw_stats {
260 	u32	rx_good_frames;
261 	u32	rx_broadcast_frames;
262 	u32	rx_multicast_frames;
263 	u32	rx_pause_frames;
264 	u32	rx_crc_errors;
265 	u32	rx_align_code_errors;
266 	u32	rx_oversized_frames;
267 	u32	rx_jabber_frames;
268 	u32	rx_undersized_frames;
269 	u32	rx_fragments;
270 	u32	overrun_type4;
271 	u32	overrun_type5;
272 	u32	rx_bytes;
273 	u32	tx_good_frames;
274 	u32	tx_broadcast_frames;
275 	u32	tx_multicast_frames;
276 	u32	tx_pause_frames;
277 	u32	tx_deferred_frames;
278 	u32	tx_collision_frames;
279 	u32	tx_single_coll_frames;
280 	u32	tx_mult_coll_frames;
281 	u32	tx_excessive_collisions;
282 	u32	tx_late_collisions;
283 	u32	tx_underrun;
284 	u32	tx_carrier_sense_errors;
285 	u32	tx_bytes;
286 	u32	tx_64byte_frames;
287 	u32	tx_65_to_127byte_frames;
288 	u32	tx_128_to_255byte_frames;
289 	u32	tx_256_to_511byte_frames;
290 	u32	tx_512_to_1023byte_frames;
291 	u32	tx_1024byte_frames;
292 	u32	net_bytes;
293 	u32	rx_sof_overruns;
294 	u32	rx_mof_overruns;
295 	u32	rx_dma_overruns;
296 };
297 
298 #define XGBE10_NUM_STAT_ENTRIES (sizeof(struct xgbe_hw_stats)/sizeof(u32))
299 
300 struct gbenu_ss_regs {
301 	u32	id_ver;
302 	u32	synce_count;		/* NU */
303 	u32	synce_mux;		/* NU */
304 	u32	control;		/* 2U */
305 	u32	__rsvd_0[2];		/* 2U */
306 	u32	rgmii_status;		/* 2U */
307 	u32	ss_status;		/* 2U */
308 };
309 
310 struct gbenu_switch_regs {
311 	u32	id_ver;
312 	u32	control;
313 	u32	__rsvd_0[2];
314 	u32	emcontrol;
315 	u32	stat_port_en;
316 	u32	ptype;			/* NU */
317 	u32	soft_idle;
318 	u32	thru_rate;		/* NU */
319 	u32	gap_thresh;		/* NU */
320 	u32	tx_start_wds;		/* NU */
321 	u32	eee_prescale;		/* 2U */
322 	u32	tx_g_oflow_thresh_set;	/* NU */
323 	u32	tx_g_oflow_thresh_clr;	/* NU */
324 	u32	tx_g_buf_thresh_set_l;	/* NU */
325 	u32	tx_g_buf_thresh_set_h;	/* NU */
326 	u32	tx_g_buf_thresh_clr_l;	/* NU */
327 	u32	tx_g_buf_thresh_clr_h;	/* NU */
328 };
329 
330 struct gbenu_port_regs {
331 	u32	__rsvd_0;
332 	u32	control;
333 	u32	max_blks;		/* 2U */
334 	u32	mem_align1;
335 	u32	blk_cnt;
336 	u32	port_vlan;
337 	u32	tx_pri_map;		/* NU */
338 	u32	pri_ctl;		/* 2U */
339 	u32	rx_pri_map;
340 	u32	rx_maxlen;
341 	u32	tx_blks_pri;		/* NU */
342 	u32	__rsvd_1;
343 	u32	idle2lpi;		/* 2U */
344 	u32	lpi2idle;		/* 2U */
345 	u32	eee_status;		/* 2U */
346 	u32	__rsvd_2;
347 	u32	__rsvd_3[176];		/* NU: more to add */
348 	u32	__rsvd_4[2];
349 	u32	sa_lo;
350 	u32	sa_hi;
351 	u32	ts_ctl;
352 	u32	ts_seq_ltype;
353 	u32	ts_vlan;
354 	u32	ts_ctl_ltype2;
355 	u32	ts_ctl2;
356 };
357 
358 struct gbenu_host_port_regs {
359 	u32	__rsvd_0;
360 	u32	control;
361 	u32	flow_id_offset;		/* 2U */
362 	u32	__rsvd_1;
363 	u32	blk_cnt;
364 	u32	port_vlan;
365 	u32	tx_pri_map;		/* NU */
366 	u32	pri_ctl;
367 	u32	rx_pri_map;
368 	u32	rx_maxlen;
369 	u32	tx_blks_pri;		/* NU */
370 	u32	__rsvd_2;
371 	u32	idle2lpi;		/* 2U */
372 	u32	lpi2wake;		/* 2U */
373 	u32	eee_status;		/* 2U */
374 	u32	__rsvd_3;
375 	u32	__rsvd_4[184];		/* NU */
376 	u32	host_blks_pri;		/* NU */
377 };
378 
379 struct gbenu_emac_regs {
380 	u32	mac_control;
381 	u32	mac_status;
382 	u32	soft_reset;
383 	u32	boff_test;
384 	u32	rx_pause;
385 	u32	__rsvd_0[11];		/* NU */
386 	u32	tx_pause;
387 	u32	__rsvd_1[11];		/* NU */
388 	u32	em_control;
389 	u32	tx_gap;
390 };
391 
392 /* Some hw stat regs are applicable to slave port only.
393  * This is handled by gbenu_et_stats struct.  Also some
394  * are for SS version NU and some are for 2U.
395  */
396 struct gbenu_hw_stats {
397 	u32	rx_good_frames;
398 	u32	rx_broadcast_frames;
399 	u32	rx_multicast_frames;
400 	u32	rx_pause_frames;		/* slave */
401 	u32	rx_crc_errors;
402 	u32	rx_align_code_errors;		/* slave */
403 	u32	rx_oversized_frames;
404 	u32	rx_jabber_frames;		/* slave */
405 	u32	rx_undersized_frames;
406 	u32	rx_fragments;			/* slave */
407 	u32	ale_drop;
408 	u32	ale_overrun_drop;
409 	u32	rx_bytes;
410 	u32	tx_good_frames;
411 	u32	tx_broadcast_frames;
412 	u32	tx_multicast_frames;
413 	u32	tx_pause_frames;		/* slave */
414 	u32	tx_deferred_frames;		/* slave */
415 	u32	tx_collision_frames;		/* slave */
416 	u32	tx_single_coll_frames;		/* slave */
417 	u32	tx_mult_coll_frames;		/* slave */
418 	u32	tx_excessive_collisions;	/* slave */
419 	u32	tx_late_collisions;		/* slave */
420 	u32	rx_ipg_error;			/* slave 10G only */
421 	u32	tx_carrier_sense_errors;	/* slave */
422 	u32	tx_bytes;
423 	u32	tx_64B_frames;
424 	u32	tx_65_to_127B_frames;
425 	u32	tx_128_to_255B_frames;
426 	u32	tx_256_to_511B_frames;
427 	u32	tx_512_to_1023B_frames;
428 	u32	tx_1024B_frames;
429 	u32	net_bytes;
430 	u32	rx_bottom_fifo_drop;
431 	u32	rx_port_mask_drop;
432 	u32	rx_top_fifo_drop;
433 	u32	ale_rate_limit_drop;
434 	u32	ale_vid_ingress_drop;
435 	u32	ale_da_eq_sa_drop;
436 	u32	__rsvd_0[3];
437 	u32	ale_unknown_ucast;
438 	u32	ale_unknown_ucast_bytes;
439 	u32	ale_unknown_mcast;
440 	u32	ale_unknown_mcast_bytes;
441 	u32	ale_unknown_bcast;
442 	u32	ale_unknown_bcast_bytes;
443 	u32	ale_pol_match;
444 	u32	ale_pol_match_red;		/* NU */
445 	u32	ale_pol_match_yellow;		/* NU */
446 	u32	__rsvd_1[44];
447 	u32	tx_mem_protect_err;
448 	/* following NU only */
449 	u32	tx_pri0;
450 	u32	tx_pri1;
451 	u32	tx_pri2;
452 	u32	tx_pri3;
453 	u32	tx_pri4;
454 	u32	tx_pri5;
455 	u32	tx_pri6;
456 	u32	tx_pri7;
457 	u32	tx_pri0_bcnt;
458 	u32	tx_pri1_bcnt;
459 	u32	tx_pri2_bcnt;
460 	u32	tx_pri3_bcnt;
461 	u32	tx_pri4_bcnt;
462 	u32	tx_pri5_bcnt;
463 	u32	tx_pri6_bcnt;
464 	u32	tx_pri7_bcnt;
465 	u32	tx_pri0_drop;
466 	u32	tx_pri1_drop;
467 	u32	tx_pri2_drop;
468 	u32	tx_pri3_drop;
469 	u32	tx_pri4_drop;
470 	u32	tx_pri5_drop;
471 	u32	tx_pri6_drop;
472 	u32	tx_pri7_drop;
473 	u32	tx_pri0_drop_bcnt;
474 	u32	tx_pri1_drop_bcnt;
475 	u32	tx_pri2_drop_bcnt;
476 	u32	tx_pri3_drop_bcnt;
477 	u32	tx_pri4_drop_bcnt;
478 	u32	tx_pri5_drop_bcnt;
479 	u32	tx_pri6_drop_bcnt;
480 	u32	tx_pri7_drop_bcnt;
481 };
482 
483 #define GBENU_NUM_HW_STAT_ENTRIES (sizeof(struct gbenu_hw_stats) / sizeof(u32))
484 #define GBENU_HW_STATS_REG_MAP_SZ	0x200
485 
486 struct gbe_ss_regs {
487 	u32	id_ver;
488 	u32	synce_count;
489 	u32	synce_mux;
490 };
491 
492 struct gbe_ss_regs_ofs {
493 	u16	id_ver;
494 	u16	control;
495 };
496 
497 struct gbe_switch_regs {
498 	u32	id_ver;
499 	u32	control;
500 	u32	soft_reset;
501 	u32	stat_port_en;
502 	u32	ptype;
503 	u32	soft_idle;
504 	u32	thru_rate;
505 	u32	gap_thresh;
506 	u32	tx_start_wds;
507 	u32	flow_control;
508 };
509 
510 struct gbe_switch_regs_ofs {
511 	u16	id_ver;
512 	u16	control;
513 	u16	soft_reset;
514 	u16	emcontrol;
515 	u16	stat_port_en;
516 	u16	ptype;
517 	u16	flow_control;
518 };
519 
520 struct gbe_port_regs {
521 	u32	max_blks;
522 	u32	blk_cnt;
523 	u32	port_vlan;
524 	u32	tx_pri_map;
525 	u32	sa_lo;
526 	u32	sa_hi;
527 	u32	ts_ctl;
528 	u32	ts_seq_ltype;
529 	u32	ts_vlan;
530 	u32	ts_ctl_ltype2;
531 	u32	ts_ctl2;
532 };
533 
534 struct gbe_port_regs_ofs {
535 	u16	port_vlan;
536 	u16	tx_pri_map;
537 	u16	sa_lo;
538 	u16	sa_hi;
539 	u16	ts_ctl;
540 	u16	ts_seq_ltype;
541 	u16	ts_vlan;
542 	u16	ts_ctl_ltype2;
543 	u16	ts_ctl2;
544 	u16	rx_maxlen;	/* 2U, NU */
545 };
546 
547 struct gbe_host_port_regs {
548 	u32	src_id;
549 	u32	port_vlan;
550 	u32	rx_pri_map;
551 	u32	rx_maxlen;
552 };
553 
554 struct gbe_host_port_regs_ofs {
555 	u16	port_vlan;
556 	u16	tx_pri_map;
557 	u16	rx_maxlen;
558 };
559 
560 struct gbe_emac_regs {
561 	u32	id_ver;
562 	u32	mac_control;
563 	u32	mac_status;
564 	u32	soft_reset;
565 	u32	rx_maxlen;
566 	u32	__reserved_0;
567 	u32	rx_pause;
568 	u32	tx_pause;
569 	u32	__reserved_1;
570 	u32	rx_pri_map;
571 	u32	rsvd[6];
572 };
573 
574 struct gbe_emac_regs_ofs {
575 	u16	mac_control;
576 	u16	soft_reset;
577 	u16	rx_maxlen;
578 };
579 
580 struct gbe_hw_stats {
581 	u32	rx_good_frames;
582 	u32	rx_broadcast_frames;
583 	u32	rx_multicast_frames;
584 	u32	rx_pause_frames;
585 	u32	rx_crc_errors;
586 	u32	rx_align_code_errors;
587 	u32	rx_oversized_frames;
588 	u32	rx_jabber_frames;
589 	u32	rx_undersized_frames;
590 	u32	rx_fragments;
591 	u32	__pad_0[2];
592 	u32	rx_bytes;
593 	u32	tx_good_frames;
594 	u32	tx_broadcast_frames;
595 	u32	tx_multicast_frames;
596 	u32	tx_pause_frames;
597 	u32	tx_deferred_frames;
598 	u32	tx_collision_frames;
599 	u32	tx_single_coll_frames;
600 	u32	tx_mult_coll_frames;
601 	u32	tx_excessive_collisions;
602 	u32	tx_late_collisions;
603 	u32	tx_underrun;
604 	u32	tx_carrier_sense_errors;
605 	u32	tx_bytes;
606 	u32	tx_64byte_frames;
607 	u32	tx_65_to_127byte_frames;
608 	u32	tx_128_to_255byte_frames;
609 	u32	tx_256_to_511byte_frames;
610 	u32	tx_512_to_1023byte_frames;
611 	u32	tx_1024byte_frames;
612 	u32	net_bytes;
613 	u32	rx_sof_overruns;
614 	u32	rx_mof_overruns;
615 	u32	rx_dma_overruns;
616 };
617 
618 #define GBE13_NUM_HW_STAT_ENTRIES (sizeof(struct gbe_hw_stats)/sizeof(u32))
619 #define GBE_MAX_HW_STAT_MODS			9
620 #define GBE_HW_STATS_REG_MAP_SZ			0x100
621 
622 struct gbe_slave {
623 	void __iomem			*port_regs;
624 	void __iomem			*emac_regs;
625 	struct gbe_port_regs_ofs	port_regs_ofs;
626 	struct gbe_emac_regs_ofs	emac_regs_ofs;
627 	int				slave_num; /* 0 based logical number */
628 	int				port_num;  /* actual port number */
629 	atomic_t			link_state;
630 	bool				open;
631 	struct phy_device		*phy;
632 	u32				link_interface;
633 	u32				mac_control;
634 	u8				phy_port_t;
635 	struct device_node		*phy_node;
636 	struct list_head		slave_list;
637 };
638 
639 struct gbe_priv {
640 	struct device			*dev;
641 	struct netcp_device		*netcp_device;
642 	struct timer_list		timer;
643 	u32				num_slaves;
644 	u32				ale_entries;
645 	u32				ale_ports;
646 	bool				enable_ale;
647 	u8				max_num_slaves;
648 	u8				max_num_ports; /* max_num_slaves + 1 */
649 	struct netcp_tx_pipe		tx_pipe;
650 
651 	int				host_port;
652 	u32				rx_packet_max;
653 	u32				ss_version;
654 	u32				stats_en_mask;
655 
656 	void __iomem			*ss_regs;
657 	void __iomem			*switch_regs;
658 	void __iomem			*host_port_regs;
659 	void __iomem			*ale_reg;
660 	void __iomem			*sgmii_port_regs;
661 	void __iomem			*sgmii_port34_regs;
662 	void __iomem			*xgbe_serdes_regs;
663 	void __iomem			*hw_stats_regs[GBE_MAX_HW_STAT_MODS];
664 
665 	struct gbe_ss_regs_ofs		ss_regs_ofs;
666 	struct gbe_switch_regs_ofs	switch_regs_ofs;
667 	struct gbe_host_port_regs_ofs	host_port_regs_ofs;
668 
669 	struct cpsw_ale			*ale;
670 	unsigned int			tx_queue_id;
671 	const char			*dma_chan_name;
672 
673 	struct list_head		gbe_intf_head;
674 	struct list_head		secondary_slaves;
675 	struct net_device		*dummy_ndev;
676 
677 	u64				*hw_stats;
678 	const struct netcp_ethtool_stat *et_stats;
679 	int				num_et_stats;
680 	/*  Lock for updating the hwstats */
681 	spinlock_t			hw_stats_lock;
682 };
683 
684 struct gbe_intf {
685 	struct net_device	*ndev;
686 	struct device		*dev;
687 	struct gbe_priv		*gbe_dev;
688 	struct netcp_tx_pipe	tx_pipe;
689 	struct gbe_slave	*slave;
690 	struct list_head	gbe_intf_list;
691 	unsigned long		active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
692 };
693 
694 static struct netcp_module gbe_module;
695 static struct netcp_module xgbe_module;
696 
697 /* Statistic management */
698 struct netcp_ethtool_stat {
699 	char desc[ETH_GSTRING_LEN];
700 	int type;
701 	u32 size;
702 	int offset;
703 };
704 
705 #define GBE_STATSA_INFO(field)						\
706 {									\
707 	"GBE_A:"#field, GBE_STATSA_MODULE,				\
708 	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
709 	offsetof(struct gbe_hw_stats, field)				\
710 }
711 
712 #define GBE_STATSB_INFO(field)						\
713 {									\
714 	"GBE_B:"#field, GBE_STATSB_MODULE,				\
715 	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
716 	offsetof(struct gbe_hw_stats, field)				\
717 }
718 
719 #define GBE_STATSC_INFO(field)						\
720 {									\
721 	"GBE_C:"#field, GBE_STATSC_MODULE,				\
722 	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
723 	offsetof(struct gbe_hw_stats, field)				\
724 }
725 
726 #define GBE_STATSD_INFO(field)						\
727 {									\
728 	"GBE_D:"#field, GBE_STATSD_MODULE,				\
729 	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
730 	offsetof(struct gbe_hw_stats, field)				\
731 }
732 
733 static const struct netcp_ethtool_stat gbe13_et_stats[] = {
734 	/* GBE module A */
735 	GBE_STATSA_INFO(rx_good_frames),
736 	GBE_STATSA_INFO(rx_broadcast_frames),
737 	GBE_STATSA_INFO(rx_multicast_frames),
738 	GBE_STATSA_INFO(rx_pause_frames),
739 	GBE_STATSA_INFO(rx_crc_errors),
740 	GBE_STATSA_INFO(rx_align_code_errors),
741 	GBE_STATSA_INFO(rx_oversized_frames),
742 	GBE_STATSA_INFO(rx_jabber_frames),
743 	GBE_STATSA_INFO(rx_undersized_frames),
744 	GBE_STATSA_INFO(rx_fragments),
745 	GBE_STATSA_INFO(rx_bytes),
746 	GBE_STATSA_INFO(tx_good_frames),
747 	GBE_STATSA_INFO(tx_broadcast_frames),
748 	GBE_STATSA_INFO(tx_multicast_frames),
749 	GBE_STATSA_INFO(tx_pause_frames),
750 	GBE_STATSA_INFO(tx_deferred_frames),
751 	GBE_STATSA_INFO(tx_collision_frames),
752 	GBE_STATSA_INFO(tx_single_coll_frames),
753 	GBE_STATSA_INFO(tx_mult_coll_frames),
754 	GBE_STATSA_INFO(tx_excessive_collisions),
755 	GBE_STATSA_INFO(tx_late_collisions),
756 	GBE_STATSA_INFO(tx_underrun),
757 	GBE_STATSA_INFO(tx_carrier_sense_errors),
758 	GBE_STATSA_INFO(tx_bytes),
759 	GBE_STATSA_INFO(tx_64byte_frames),
760 	GBE_STATSA_INFO(tx_65_to_127byte_frames),
761 	GBE_STATSA_INFO(tx_128_to_255byte_frames),
762 	GBE_STATSA_INFO(tx_256_to_511byte_frames),
763 	GBE_STATSA_INFO(tx_512_to_1023byte_frames),
764 	GBE_STATSA_INFO(tx_1024byte_frames),
765 	GBE_STATSA_INFO(net_bytes),
766 	GBE_STATSA_INFO(rx_sof_overruns),
767 	GBE_STATSA_INFO(rx_mof_overruns),
768 	GBE_STATSA_INFO(rx_dma_overruns),
769 	/* GBE module B */
770 	GBE_STATSB_INFO(rx_good_frames),
771 	GBE_STATSB_INFO(rx_broadcast_frames),
772 	GBE_STATSB_INFO(rx_multicast_frames),
773 	GBE_STATSB_INFO(rx_pause_frames),
774 	GBE_STATSB_INFO(rx_crc_errors),
775 	GBE_STATSB_INFO(rx_align_code_errors),
776 	GBE_STATSB_INFO(rx_oversized_frames),
777 	GBE_STATSB_INFO(rx_jabber_frames),
778 	GBE_STATSB_INFO(rx_undersized_frames),
779 	GBE_STATSB_INFO(rx_fragments),
780 	GBE_STATSB_INFO(rx_bytes),
781 	GBE_STATSB_INFO(tx_good_frames),
782 	GBE_STATSB_INFO(tx_broadcast_frames),
783 	GBE_STATSB_INFO(tx_multicast_frames),
784 	GBE_STATSB_INFO(tx_pause_frames),
785 	GBE_STATSB_INFO(tx_deferred_frames),
786 	GBE_STATSB_INFO(tx_collision_frames),
787 	GBE_STATSB_INFO(tx_single_coll_frames),
788 	GBE_STATSB_INFO(tx_mult_coll_frames),
789 	GBE_STATSB_INFO(tx_excessive_collisions),
790 	GBE_STATSB_INFO(tx_late_collisions),
791 	GBE_STATSB_INFO(tx_underrun),
792 	GBE_STATSB_INFO(tx_carrier_sense_errors),
793 	GBE_STATSB_INFO(tx_bytes),
794 	GBE_STATSB_INFO(tx_64byte_frames),
795 	GBE_STATSB_INFO(tx_65_to_127byte_frames),
796 	GBE_STATSB_INFO(tx_128_to_255byte_frames),
797 	GBE_STATSB_INFO(tx_256_to_511byte_frames),
798 	GBE_STATSB_INFO(tx_512_to_1023byte_frames),
799 	GBE_STATSB_INFO(tx_1024byte_frames),
800 	GBE_STATSB_INFO(net_bytes),
801 	GBE_STATSB_INFO(rx_sof_overruns),
802 	GBE_STATSB_INFO(rx_mof_overruns),
803 	GBE_STATSB_INFO(rx_dma_overruns),
804 	/* GBE module C */
805 	GBE_STATSC_INFO(rx_good_frames),
806 	GBE_STATSC_INFO(rx_broadcast_frames),
807 	GBE_STATSC_INFO(rx_multicast_frames),
808 	GBE_STATSC_INFO(rx_pause_frames),
809 	GBE_STATSC_INFO(rx_crc_errors),
810 	GBE_STATSC_INFO(rx_align_code_errors),
811 	GBE_STATSC_INFO(rx_oversized_frames),
812 	GBE_STATSC_INFO(rx_jabber_frames),
813 	GBE_STATSC_INFO(rx_undersized_frames),
814 	GBE_STATSC_INFO(rx_fragments),
815 	GBE_STATSC_INFO(rx_bytes),
816 	GBE_STATSC_INFO(tx_good_frames),
817 	GBE_STATSC_INFO(tx_broadcast_frames),
818 	GBE_STATSC_INFO(tx_multicast_frames),
819 	GBE_STATSC_INFO(tx_pause_frames),
820 	GBE_STATSC_INFO(tx_deferred_frames),
821 	GBE_STATSC_INFO(tx_collision_frames),
822 	GBE_STATSC_INFO(tx_single_coll_frames),
823 	GBE_STATSC_INFO(tx_mult_coll_frames),
824 	GBE_STATSC_INFO(tx_excessive_collisions),
825 	GBE_STATSC_INFO(tx_late_collisions),
826 	GBE_STATSC_INFO(tx_underrun),
827 	GBE_STATSC_INFO(tx_carrier_sense_errors),
828 	GBE_STATSC_INFO(tx_bytes),
829 	GBE_STATSC_INFO(tx_64byte_frames),
830 	GBE_STATSC_INFO(tx_65_to_127byte_frames),
831 	GBE_STATSC_INFO(tx_128_to_255byte_frames),
832 	GBE_STATSC_INFO(tx_256_to_511byte_frames),
833 	GBE_STATSC_INFO(tx_512_to_1023byte_frames),
834 	GBE_STATSC_INFO(tx_1024byte_frames),
835 	GBE_STATSC_INFO(net_bytes),
836 	GBE_STATSC_INFO(rx_sof_overruns),
837 	GBE_STATSC_INFO(rx_mof_overruns),
838 	GBE_STATSC_INFO(rx_dma_overruns),
839 	/* GBE module D */
840 	GBE_STATSD_INFO(rx_good_frames),
841 	GBE_STATSD_INFO(rx_broadcast_frames),
842 	GBE_STATSD_INFO(rx_multicast_frames),
843 	GBE_STATSD_INFO(rx_pause_frames),
844 	GBE_STATSD_INFO(rx_crc_errors),
845 	GBE_STATSD_INFO(rx_align_code_errors),
846 	GBE_STATSD_INFO(rx_oversized_frames),
847 	GBE_STATSD_INFO(rx_jabber_frames),
848 	GBE_STATSD_INFO(rx_undersized_frames),
849 	GBE_STATSD_INFO(rx_fragments),
850 	GBE_STATSD_INFO(rx_bytes),
851 	GBE_STATSD_INFO(tx_good_frames),
852 	GBE_STATSD_INFO(tx_broadcast_frames),
853 	GBE_STATSD_INFO(tx_multicast_frames),
854 	GBE_STATSD_INFO(tx_pause_frames),
855 	GBE_STATSD_INFO(tx_deferred_frames),
856 	GBE_STATSD_INFO(tx_collision_frames),
857 	GBE_STATSD_INFO(tx_single_coll_frames),
858 	GBE_STATSD_INFO(tx_mult_coll_frames),
859 	GBE_STATSD_INFO(tx_excessive_collisions),
860 	GBE_STATSD_INFO(tx_late_collisions),
861 	GBE_STATSD_INFO(tx_underrun),
862 	GBE_STATSD_INFO(tx_carrier_sense_errors),
863 	GBE_STATSD_INFO(tx_bytes),
864 	GBE_STATSD_INFO(tx_64byte_frames),
865 	GBE_STATSD_INFO(tx_65_to_127byte_frames),
866 	GBE_STATSD_INFO(tx_128_to_255byte_frames),
867 	GBE_STATSD_INFO(tx_256_to_511byte_frames),
868 	GBE_STATSD_INFO(tx_512_to_1023byte_frames),
869 	GBE_STATSD_INFO(tx_1024byte_frames),
870 	GBE_STATSD_INFO(net_bytes),
871 	GBE_STATSD_INFO(rx_sof_overruns),
872 	GBE_STATSD_INFO(rx_mof_overruns),
873 	GBE_STATSD_INFO(rx_dma_overruns),
874 };
875 
876 /* This is the size of entries in GBENU_STATS_HOST */
877 #define GBENU_ET_STATS_HOST_SIZE	33
878 
879 #define GBENU_STATS_HOST(field)					\
880 {								\
881 	"GBE_HOST:"#field, GBENU_STATS0_MODULE,			\
882 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
883 	offsetof(struct gbenu_hw_stats, field)			\
884 }
885 
886 /* This is the size of entries in GBENU_STATS_HOST */
887 #define GBENU_ET_STATS_PORT_SIZE	46
888 
889 #define GBENU_STATS_P1(field)					\
890 {								\
891 	"GBE_P1:"#field, GBENU_STATS1_MODULE,			\
892 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
893 	offsetof(struct gbenu_hw_stats, field)			\
894 }
895 
896 #define GBENU_STATS_P2(field)					\
897 {								\
898 	"GBE_P2:"#field, GBENU_STATS2_MODULE,			\
899 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
900 	offsetof(struct gbenu_hw_stats, field)			\
901 }
902 
903 #define GBENU_STATS_P3(field)					\
904 {								\
905 	"GBE_P3:"#field, GBENU_STATS3_MODULE,			\
906 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
907 	offsetof(struct gbenu_hw_stats, field)			\
908 }
909 
910 #define GBENU_STATS_P4(field)					\
911 {								\
912 	"GBE_P4:"#field, GBENU_STATS4_MODULE,			\
913 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
914 	offsetof(struct gbenu_hw_stats, field)			\
915 }
916 
917 #define GBENU_STATS_P5(field)					\
918 {								\
919 	"GBE_P5:"#field, GBENU_STATS5_MODULE,			\
920 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
921 	offsetof(struct gbenu_hw_stats, field)			\
922 }
923 
924 #define GBENU_STATS_P6(field)					\
925 {								\
926 	"GBE_P6:"#field, GBENU_STATS6_MODULE,			\
927 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
928 	offsetof(struct gbenu_hw_stats, field)			\
929 }
930 
931 #define GBENU_STATS_P7(field)					\
932 {								\
933 	"GBE_P7:"#field, GBENU_STATS7_MODULE,			\
934 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
935 	offsetof(struct gbenu_hw_stats, field)			\
936 }
937 
938 #define GBENU_STATS_P8(field)					\
939 {								\
940 	"GBE_P8:"#field, GBENU_STATS8_MODULE,			\
941 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
942 	offsetof(struct gbenu_hw_stats, field)			\
943 }
944 
945 static const struct netcp_ethtool_stat gbenu_et_stats[] = {
946 	/* GBENU Host Module */
947 	GBENU_STATS_HOST(rx_good_frames),
948 	GBENU_STATS_HOST(rx_broadcast_frames),
949 	GBENU_STATS_HOST(rx_multicast_frames),
950 	GBENU_STATS_HOST(rx_crc_errors),
951 	GBENU_STATS_HOST(rx_oversized_frames),
952 	GBENU_STATS_HOST(rx_undersized_frames),
953 	GBENU_STATS_HOST(ale_drop),
954 	GBENU_STATS_HOST(ale_overrun_drop),
955 	GBENU_STATS_HOST(rx_bytes),
956 	GBENU_STATS_HOST(tx_good_frames),
957 	GBENU_STATS_HOST(tx_broadcast_frames),
958 	GBENU_STATS_HOST(tx_multicast_frames),
959 	GBENU_STATS_HOST(tx_bytes),
960 	GBENU_STATS_HOST(tx_64B_frames),
961 	GBENU_STATS_HOST(tx_65_to_127B_frames),
962 	GBENU_STATS_HOST(tx_128_to_255B_frames),
963 	GBENU_STATS_HOST(tx_256_to_511B_frames),
964 	GBENU_STATS_HOST(tx_512_to_1023B_frames),
965 	GBENU_STATS_HOST(tx_1024B_frames),
966 	GBENU_STATS_HOST(net_bytes),
967 	GBENU_STATS_HOST(rx_bottom_fifo_drop),
968 	GBENU_STATS_HOST(rx_port_mask_drop),
969 	GBENU_STATS_HOST(rx_top_fifo_drop),
970 	GBENU_STATS_HOST(ale_rate_limit_drop),
971 	GBENU_STATS_HOST(ale_vid_ingress_drop),
972 	GBENU_STATS_HOST(ale_da_eq_sa_drop),
973 	GBENU_STATS_HOST(ale_unknown_ucast),
974 	GBENU_STATS_HOST(ale_unknown_ucast_bytes),
975 	GBENU_STATS_HOST(ale_unknown_mcast),
976 	GBENU_STATS_HOST(ale_unknown_mcast_bytes),
977 	GBENU_STATS_HOST(ale_unknown_bcast),
978 	GBENU_STATS_HOST(ale_unknown_bcast_bytes),
979 	GBENU_STATS_HOST(tx_mem_protect_err),
980 	/* GBENU Module 1 */
981 	GBENU_STATS_P1(rx_good_frames),
982 	GBENU_STATS_P1(rx_broadcast_frames),
983 	GBENU_STATS_P1(rx_multicast_frames),
984 	GBENU_STATS_P1(rx_pause_frames),
985 	GBENU_STATS_P1(rx_crc_errors),
986 	GBENU_STATS_P1(rx_align_code_errors),
987 	GBENU_STATS_P1(rx_oversized_frames),
988 	GBENU_STATS_P1(rx_jabber_frames),
989 	GBENU_STATS_P1(rx_undersized_frames),
990 	GBENU_STATS_P1(rx_fragments),
991 	GBENU_STATS_P1(ale_drop),
992 	GBENU_STATS_P1(ale_overrun_drop),
993 	GBENU_STATS_P1(rx_bytes),
994 	GBENU_STATS_P1(tx_good_frames),
995 	GBENU_STATS_P1(tx_broadcast_frames),
996 	GBENU_STATS_P1(tx_multicast_frames),
997 	GBENU_STATS_P1(tx_pause_frames),
998 	GBENU_STATS_P1(tx_deferred_frames),
999 	GBENU_STATS_P1(tx_collision_frames),
1000 	GBENU_STATS_P1(tx_single_coll_frames),
1001 	GBENU_STATS_P1(tx_mult_coll_frames),
1002 	GBENU_STATS_P1(tx_excessive_collisions),
1003 	GBENU_STATS_P1(tx_late_collisions),
1004 	GBENU_STATS_P1(rx_ipg_error),
1005 	GBENU_STATS_P1(tx_carrier_sense_errors),
1006 	GBENU_STATS_P1(tx_bytes),
1007 	GBENU_STATS_P1(tx_64B_frames),
1008 	GBENU_STATS_P1(tx_65_to_127B_frames),
1009 	GBENU_STATS_P1(tx_128_to_255B_frames),
1010 	GBENU_STATS_P1(tx_256_to_511B_frames),
1011 	GBENU_STATS_P1(tx_512_to_1023B_frames),
1012 	GBENU_STATS_P1(tx_1024B_frames),
1013 	GBENU_STATS_P1(net_bytes),
1014 	GBENU_STATS_P1(rx_bottom_fifo_drop),
1015 	GBENU_STATS_P1(rx_port_mask_drop),
1016 	GBENU_STATS_P1(rx_top_fifo_drop),
1017 	GBENU_STATS_P1(ale_rate_limit_drop),
1018 	GBENU_STATS_P1(ale_vid_ingress_drop),
1019 	GBENU_STATS_P1(ale_da_eq_sa_drop),
1020 	GBENU_STATS_P1(ale_unknown_ucast),
1021 	GBENU_STATS_P1(ale_unknown_ucast_bytes),
1022 	GBENU_STATS_P1(ale_unknown_mcast),
1023 	GBENU_STATS_P1(ale_unknown_mcast_bytes),
1024 	GBENU_STATS_P1(ale_unknown_bcast),
1025 	GBENU_STATS_P1(ale_unknown_bcast_bytes),
1026 	GBENU_STATS_P1(tx_mem_protect_err),
1027 	/* GBENU Module 2 */
1028 	GBENU_STATS_P2(rx_good_frames),
1029 	GBENU_STATS_P2(rx_broadcast_frames),
1030 	GBENU_STATS_P2(rx_multicast_frames),
1031 	GBENU_STATS_P2(rx_pause_frames),
1032 	GBENU_STATS_P2(rx_crc_errors),
1033 	GBENU_STATS_P2(rx_align_code_errors),
1034 	GBENU_STATS_P2(rx_oversized_frames),
1035 	GBENU_STATS_P2(rx_jabber_frames),
1036 	GBENU_STATS_P2(rx_undersized_frames),
1037 	GBENU_STATS_P2(rx_fragments),
1038 	GBENU_STATS_P2(ale_drop),
1039 	GBENU_STATS_P2(ale_overrun_drop),
1040 	GBENU_STATS_P2(rx_bytes),
1041 	GBENU_STATS_P2(tx_good_frames),
1042 	GBENU_STATS_P2(tx_broadcast_frames),
1043 	GBENU_STATS_P2(tx_multicast_frames),
1044 	GBENU_STATS_P2(tx_pause_frames),
1045 	GBENU_STATS_P2(tx_deferred_frames),
1046 	GBENU_STATS_P2(tx_collision_frames),
1047 	GBENU_STATS_P2(tx_single_coll_frames),
1048 	GBENU_STATS_P2(tx_mult_coll_frames),
1049 	GBENU_STATS_P2(tx_excessive_collisions),
1050 	GBENU_STATS_P2(tx_late_collisions),
1051 	GBENU_STATS_P2(rx_ipg_error),
1052 	GBENU_STATS_P2(tx_carrier_sense_errors),
1053 	GBENU_STATS_P2(tx_bytes),
1054 	GBENU_STATS_P2(tx_64B_frames),
1055 	GBENU_STATS_P2(tx_65_to_127B_frames),
1056 	GBENU_STATS_P2(tx_128_to_255B_frames),
1057 	GBENU_STATS_P2(tx_256_to_511B_frames),
1058 	GBENU_STATS_P2(tx_512_to_1023B_frames),
1059 	GBENU_STATS_P2(tx_1024B_frames),
1060 	GBENU_STATS_P2(net_bytes),
1061 	GBENU_STATS_P2(rx_bottom_fifo_drop),
1062 	GBENU_STATS_P2(rx_port_mask_drop),
1063 	GBENU_STATS_P2(rx_top_fifo_drop),
1064 	GBENU_STATS_P2(ale_rate_limit_drop),
1065 	GBENU_STATS_P2(ale_vid_ingress_drop),
1066 	GBENU_STATS_P2(ale_da_eq_sa_drop),
1067 	GBENU_STATS_P2(ale_unknown_ucast),
1068 	GBENU_STATS_P2(ale_unknown_ucast_bytes),
1069 	GBENU_STATS_P2(ale_unknown_mcast),
1070 	GBENU_STATS_P2(ale_unknown_mcast_bytes),
1071 	GBENU_STATS_P2(ale_unknown_bcast),
1072 	GBENU_STATS_P2(ale_unknown_bcast_bytes),
1073 	GBENU_STATS_P2(tx_mem_protect_err),
1074 	/* GBENU Module 3 */
1075 	GBENU_STATS_P3(rx_good_frames),
1076 	GBENU_STATS_P3(rx_broadcast_frames),
1077 	GBENU_STATS_P3(rx_multicast_frames),
1078 	GBENU_STATS_P3(rx_pause_frames),
1079 	GBENU_STATS_P3(rx_crc_errors),
1080 	GBENU_STATS_P3(rx_align_code_errors),
1081 	GBENU_STATS_P3(rx_oversized_frames),
1082 	GBENU_STATS_P3(rx_jabber_frames),
1083 	GBENU_STATS_P3(rx_undersized_frames),
1084 	GBENU_STATS_P3(rx_fragments),
1085 	GBENU_STATS_P3(ale_drop),
1086 	GBENU_STATS_P3(ale_overrun_drop),
1087 	GBENU_STATS_P3(rx_bytes),
1088 	GBENU_STATS_P3(tx_good_frames),
1089 	GBENU_STATS_P3(tx_broadcast_frames),
1090 	GBENU_STATS_P3(tx_multicast_frames),
1091 	GBENU_STATS_P3(tx_pause_frames),
1092 	GBENU_STATS_P3(tx_deferred_frames),
1093 	GBENU_STATS_P3(tx_collision_frames),
1094 	GBENU_STATS_P3(tx_single_coll_frames),
1095 	GBENU_STATS_P3(tx_mult_coll_frames),
1096 	GBENU_STATS_P3(tx_excessive_collisions),
1097 	GBENU_STATS_P3(tx_late_collisions),
1098 	GBENU_STATS_P3(rx_ipg_error),
1099 	GBENU_STATS_P3(tx_carrier_sense_errors),
1100 	GBENU_STATS_P3(tx_bytes),
1101 	GBENU_STATS_P3(tx_64B_frames),
1102 	GBENU_STATS_P3(tx_65_to_127B_frames),
1103 	GBENU_STATS_P3(tx_128_to_255B_frames),
1104 	GBENU_STATS_P3(tx_256_to_511B_frames),
1105 	GBENU_STATS_P3(tx_512_to_1023B_frames),
1106 	GBENU_STATS_P3(tx_1024B_frames),
1107 	GBENU_STATS_P3(net_bytes),
1108 	GBENU_STATS_P3(rx_bottom_fifo_drop),
1109 	GBENU_STATS_P3(rx_port_mask_drop),
1110 	GBENU_STATS_P3(rx_top_fifo_drop),
1111 	GBENU_STATS_P3(ale_rate_limit_drop),
1112 	GBENU_STATS_P3(ale_vid_ingress_drop),
1113 	GBENU_STATS_P3(ale_da_eq_sa_drop),
1114 	GBENU_STATS_P3(ale_unknown_ucast),
1115 	GBENU_STATS_P3(ale_unknown_ucast_bytes),
1116 	GBENU_STATS_P3(ale_unknown_mcast),
1117 	GBENU_STATS_P3(ale_unknown_mcast_bytes),
1118 	GBENU_STATS_P3(ale_unknown_bcast),
1119 	GBENU_STATS_P3(ale_unknown_bcast_bytes),
1120 	GBENU_STATS_P3(tx_mem_protect_err),
1121 	/* GBENU Module 4 */
1122 	GBENU_STATS_P4(rx_good_frames),
1123 	GBENU_STATS_P4(rx_broadcast_frames),
1124 	GBENU_STATS_P4(rx_multicast_frames),
1125 	GBENU_STATS_P4(rx_pause_frames),
1126 	GBENU_STATS_P4(rx_crc_errors),
1127 	GBENU_STATS_P4(rx_align_code_errors),
1128 	GBENU_STATS_P4(rx_oversized_frames),
1129 	GBENU_STATS_P4(rx_jabber_frames),
1130 	GBENU_STATS_P4(rx_undersized_frames),
1131 	GBENU_STATS_P4(rx_fragments),
1132 	GBENU_STATS_P4(ale_drop),
1133 	GBENU_STATS_P4(ale_overrun_drop),
1134 	GBENU_STATS_P4(rx_bytes),
1135 	GBENU_STATS_P4(tx_good_frames),
1136 	GBENU_STATS_P4(tx_broadcast_frames),
1137 	GBENU_STATS_P4(tx_multicast_frames),
1138 	GBENU_STATS_P4(tx_pause_frames),
1139 	GBENU_STATS_P4(tx_deferred_frames),
1140 	GBENU_STATS_P4(tx_collision_frames),
1141 	GBENU_STATS_P4(tx_single_coll_frames),
1142 	GBENU_STATS_P4(tx_mult_coll_frames),
1143 	GBENU_STATS_P4(tx_excessive_collisions),
1144 	GBENU_STATS_P4(tx_late_collisions),
1145 	GBENU_STATS_P4(rx_ipg_error),
1146 	GBENU_STATS_P4(tx_carrier_sense_errors),
1147 	GBENU_STATS_P4(tx_bytes),
1148 	GBENU_STATS_P4(tx_64B_frames),
1149 	GBENU_STATS_P4(tx_65_to_127B_frames),
1150 	GBENU_STATS_P4(tx_128_to_255B_frames),
1151 	GBENU_STATS_P4(tx_256_to_511B_frames),
1152 	GBENU_STATS_P4(tx_512_to_1023B_frames),
1153 	GBENU_STATS_P4(tx_1024B_frames),
1154 	GBENU_STATS_P4(net_bytes),
1155 	GBENU_STATS_P4(rx_bottom_fifo_drop),
1156 	GBENU_STATS_P4(rx_port_mask_drop),
1157 	GBENU_STATS_P4(rx_top_fifo_drop),
1158 	GBENU_STATS_P4(ale_rate_limit_drop),
1159 	GBENU_STATS_P4(ale_vid_ingress_drop),
1160 	GBENU_STATS_P4(ale_da_eq_sa_drop),
1161 	GBENU_STATS_P4(ale_unknown_ucast),
1162 	GBENU_STATS_P4(ale_unknown_ucast_bytes),
1163 	GBENU_STATS_P4(ale_unknown_mcast),
1164 	GBENU_STATS_P4(ale_unknown_mcast_bytes),
1165 	GBENU_STATS_P4(ale_unknown_bcast),
1166 	GBENU_STATS_P4(ale_unknown_bcast_bytes),
1167 	GBENU_STATS_P4(tx_mem_protect_err),
1168 	/* GBENU Module 5 */
1169 	GBENU_STATS_P5(rx_good_frames),
1170 	GBENU_STATS_P5(rx_broadcast_frames),
1171 	GBENU_STATS_P5(rx_multicast_frames),
1172 	GBENU_STATS_P5(rx_pause_frames),
1173 	GBENU_STATS_P5(rx_crc_errors),
1174 	GBENU_STATS_P5(rx_align_code_errors),
1175 	GBENU_STATS_P5(rx_oversized_frames),
1176 	GBENU_STATS_P5(rx_jabber_frames),
1177 	GBENU_STATS_P5(rx_undersized_frames),
1178 	GBENU_STATS_P5(rx_fragments),
1179 	GBENU_STATS_P5(ale_drop),
1180 	GBENU_STATS_P5(ale_overrun_drop),
1181 	GBENU_STATS_P5(rx_bytes),
1182 	GBENU_STATS_P5(tx_good_frames),
1183 	GBENU_STATS_P5(tx_broadcast_frames),
1184 	GBENU_STATS_P5(tx_multicast_frames),
1185 	GBENU_STATS_P5(tx_pause_frames),
1186 	GBENU_STATS_P5(tx_deferred_frames),
1187 	GBENU_STATS_P5(tx_collision_frames),
1188 	GBENU_STATS_P5(tx_single_coll_frames),
1189 	GBENU_STATS_P5(tx_mult_coll_frames),
1190 	GBENU_STATS_P5(tx_excessive_collisions),
1191 	GBENU_STATS_P5(tx_late_collisions),
1192 	GBENU_STATS_P5(rx_ipg_error),
1193 	GBENU_STATS_P5(tx_carrier_sense_errors),
1194 	GBENU_STATS_P5(tx_bytes),
1195 	GBENU_STATS_P5(tx_64B_frames),
1196 	GBENU_STATS_P5(tx_65_to_127B_frames),
1197 	GBENU_STATS_P5(tx_128_to_255B_frames),
1198 	GBENU_STATS_P5(tx_256_to_511B_frames),
1199 	GBENU_STATS_P5(tx_512_to_1023B_frames),
1200 	GBENU_STATS_P5(tx_1024B_frames),
1201 	GBENU_STATS_P5(net_bytes),
1202 	GBENU_STATS_P5(rx_bottom_fifo_drop),
1203 	GBENU_STATS_P5(rx_port_mask_drop),
1204 	GBENU_STATS_P5(rx_top_fifo_drop),
1205 	GBENU_STATS_P5(ale_rate_limit_drop),
1206 	GBENU_STATS_P5(ale_vid_ingress_drop),
1207 	GBENU_STATS_P5(ale_da_eq_sa_drop),
1208 	GBENU_STATS_P5(ale_unknown_ucast),
1209 	GBENU_STATS_P5(ale_unknown_ucast_bytes),
1210 	GBENU_STATS_P5(ale_unknown_mcast),
1211 	GBENU_STATS_P5(ale_unknown_mcast_bytes),
1212 	GBENU_STATS_P5(ale_unknown_bcast),
1213 	GBENU_STATS_P5(ale_unknown_bcast_bytes),
1214 	GBENU_STATS_P5(tx_mem_protect_err),
1215 	/* GBENU Module 6 */
1216 	GBENU_STATS_P6(rx_good_frames),
1217 	GBENU_STATS_P6(rx_broadcast_frames),
1218 	GBENU_STATS_P6(rx_multicast_frames),
1219 	GBENU_STATS_P6(rx_pause_frames),
1220 	GBENU_STATS_P6(rx_crc_errors),
1221 	GBENU_STATS_P6(rx_align_code_errors),
1222 	GBENU_STATS_P6(rx_oversized_frames),
1223 	GBENU_STATS_P6(rx_jabber_frames),
1224 	GBENU_STATS_P6(rx_undersized_frames),
1225 	GBENU_STATS_P6(rx_fragments),
1226 	GBENU_STATS_P6(ale_drop),
1227 	GBENU_STATS_P6(ale_overrun_drop),
1228 	GBENU_STATS_P6(rx_bytes),
1229 	GBENU_STATS_P6(tx_good_frames),
1230 	GBENU_STATS_P6(tx_broadcast_frames),
1231 	GBENU_STATS_P6(tx_multicast_frames),
1232 	GBENU_STATS_P6(tx_pause_frames),
1233 	GBENU_STATS_P6(tx_deferred_frames),
1234 	GBENU_STATS_P6(tx_collision_frames),
1235 	GBENU_STATS_P6(tx_single_coll_frames),
1236 	GBENU_STATS_P6(tx_mult_coll_frames),
1237 	GBENU_STATS_P6(tx_excessive_collisions),
1238 	GBENU_STATS_P6(tx_late_collisions),
1239 	GBENU_STATS_P6(rx_ipg_error),
1240 	GBENU_STATS_P6(tx_carrier_sense_errors),
1241 	GBENU_STATS_P6(tx_bytes),
1242 	GBENU_STATS_P6(tx_64B_frames),
1243 	GBENU_STATS_P6(tx_65_to_127B_frames),
1244 	GBENU_STATS_P6(tx_128_to_255B_frames),
1245 	GBENU_STATS_P6(tx_256_to_511B_frames),
1246 	GBENU_STATS_P6(tx_512_to_1023B_frames),
1247 	GBENU_STATS_P6(tx_1024B_frames),
1248 	GBENU_STATS_P6(net_bytes),
1249 	GBENU_STATS_P6(rx_bottom_fifo_drop),
1250 	GBENU_STATS_P6(rx_port_mask_drop),
1251 	GBENU_STATS_P6(rx_top_fifo_drop),
1252 	GBENU_STATS_P6(ale_rate_limit_drop),
1253 	GBENU_STATS_P6(ale_vid_ingress_drop),
1254 	GBENU_STATS_P6(ale_da_eq_sa_drop),
1255 	GBENU_STATS_P6(ale_unknown_ucast),
1256 	GBENU_STATS_P6(ale_unknown_ucast_bytes),
1257 	GBENU_STATS_P6(ale_unknown_mcast),
1258 	GBENU_STATS_P6(ale_unknown_mcast_bytes),
1259 	GBENU_STATS_P6(ale_unknown_bcast),
1260 	GBENU_STATS_P6(ale_unknown_bcast_bytes),
1261 	GBENU_STATS_P6(tx_mem_protect_err),
1262 	/* GBENU Module 7 */
1263 	GBENU_STATS_P7(rx_good_frames),
1264 	GBENU_STATS_P7(rx_broadcast_frames),
1265 	GBENU_STATS_P7(rx_multicast_frames),
1266 	GBENU_STATS_P7(rx_pause_frames),
1267 	GBENU_STATS_P7(rx_crc_errors),
1268 	GBENU_STATS_P7(rx_align_code_errors),
1269 	GBENU_STATS_P7(rx_oversized_frames),
1270 	GBENU_STATS_P7(rx_jabber_frames),
1271 	GBENU_STATS_P7(rx_undersized_frames),
1272 	GBENU_STATS_P7(rx_fragments),
1273 	GBENU_STATS_P7(ale_drop),
1274 	GBENU_STATS_P7(ale_overrun_drop),
1275 	GBENU_STATS_P7(rx_bytes),
1276 	GBENU_STATS_P7(tx_good_frames),
1277 	GBENU_STATS_P7(tx_broadcast_frames),
1278 	GBENU_STATS_P7(tx_multicast_frames),
1279 	GBENU_STATS_P7(tx_pause_frames),
1280 	GBENU_STATS_P7(tx_deferred_frames),
1281 	GBENU_STATS_P7(tx_collision_frames),
1282 	GBENU_STATS_P7(tx_single_coll_frames),
1283 	GBENU_STATS_P7(tx_mult_coll_frames),
1284 	GBENU_STATS_P7(tx_excessive_collisions),
1285 	GBENU_STATS_P7(tx_late_collisions),
1286 	GBENU_STATS_P7(rx_ipg_error),
1287 	GBENU_STATS_P7(tx_carrier_sense_errors),
1288 	GBENU_STATS_P7(tx_bytes),
1289 	GBENU_STATS_P7(tx_64B_frames),
1290 	GBENU_STATS_P7(tx_65_to_127B_frames),
1291 	GBENU_STATS_P7(tx_128_to_255B_frames),
1292 	GBENU_STATS_P7(tx_256_to_511B_frames),
1293 	GBENU_STATS_P7(tx_512_to_1023B_frames),
1294 	GBENU_STATS_P7(tx_1024B_frames),
1295 	GBENU_STATS_P7(net_bytes),
1296 	GBENU_STATS_P7(rx_bottom_fifo_drop),
1297 	GBENU_STATS_P7(rx_port_mask_drop),
1298 	GBENU_STATS_P7(rx_top_fifo_drop),
1299 	GBENU_STATS_P7(ale_rate_limit_drop),
1300 	GBENU_STATS_P7(ale_vid_ingress_drop),
1301 	GBENU_STATS_P7(ale_da_eq_sa_drop),
1302 	GBENU_STATS_P7(ale_unknown_ucast),
1303 	GBENU_STATS_P7(ale_unknown_ucast_bytes),
1304 	GBENU_STATS_P7(ale_unknown_mcast),
1305 	GBENU_STATS_P7(ale_unknown_mcast_bytes),
1306 	GBENU_STATS_P7(ale_unknown_bcast),
1307 	GBENU_STATS_P7(ale_unknown_bcast_bytes),
1308 	GBENU_STATS_P7(tx_mem_protect_err),
1309 	/* GBENU Module 8 */
1310 	GBENU_STATS_P8(rx_good_frames),
1311 	GBENU_STATS_P8(rx_broadcast_frames),
1312 	GBENU_STATS_P8(rx_multicast_frames),
1313 	GBENU_STATS_P8(rx_pause_frames),
1314 	GBENU_STATS_P8(rx_crc_errors),
1315 	GBENU_STATS_P8(rx_align_code_errors),
1316 	GBENU_STATS_P8(rx_oversized_frames),
1317 	GBENU_STATS_P8(rx_jabber_frames),
1318 	GBENU_STATS_P8(rx_undersized_frames),
1319 	GBENU_STATS_P8(rx_fragments),
1320 	GBENU_STATS_P8(ale_drop),
1321 	GBENU_STATS_P8(ale_overrun_drop),
1322 	GBENU_STATS_P8(rx_bytes),
1323 	GBENU_STATS_P8(tx_good_frames),
1324 	GBENU_STATS_P8(tx_broadcast_frames),
1325 	GBENU_STATS_P8(tx_multicast_frames),
1326 	GBENU_STATS_P8(tx_pause_frames),
1327 	GBENU_STATS_P8(tx_deferred_frames),
1328 	GBENU_STATS_P8(tx_collision_frames),
1329 	GBENU_STATS_P8(tx_single_coll_frames),
1330 	GBENU_STATS_P8(tx_mult_coll_frames),
1331 	GBENU_STATS_P8(tx_excessive_collisions),
1332 	GBENU_STATS_P8(tx_late_collisions),
1333 	GBENU_STATS_P8(rx_ipg_error),
1334 	GBENU_STATS_P8(tx_carrier_sense_errors),
1335 	GBENU_STATS_P8(tx_bytes),
1336 	GBENU_STATS_P8(tx_64B_frames),
1337 	GBENU_STATS_P8(tx_65_to_127B_frames),
1338 	GBENU_STATS_P8(tx_128_to_255B_frames),
1339 	GBENU_STATS_P8(tx_256_to_511B_frames),
1340 	GBENU_STATS_P8(tx_512_to_1023B_frames),
1341 	GBENU_STATS_P8(tx_1024B_frames),
1342 	GBENU_STATS_P8(net_bytes),
1343 	GBENU_STATS_P8(rx_bottom_fifo_drop),
1344 	GBENU_STATS_P8(rx_port_mask_drop),
1345 	GBENU_STATS_P8(rx_top_fifo_drop),
1346 	GBENU_STATS_P8(ale_rate_limit_drop),
1347 	GBENU_STATS_P8(ale_vid_ingress_drop),
1348 	GBENU_STATS_P8(ale_da_eq_sa_drop),
1349 	GBENU_STATS_P8(ale_unknown_ucast),
1350 	GBENU_STATS_P8(ale_unknown_ucast_bytes),
1351 	GBENU_STATS_P8(ale_unknown_mcast),
1352 	GBENU_STATS_P8(ale_unknown_mcast_bytes),
1353 	GBENU_STATS_P8(ale_unknown_bcast),
1354 	GBENU_STATS_P8(ale_unknown_bcast_bytes),
1355 	GBENU_STATS_P8(tx_mem_protect_err),
1356 };
1357 
1358 #define XGBE_STATS0_INFO(field)				\
1359 {							\
1360 	"GBE_0:"#field, XGBE_STATS0_MODULE,		\
1361 	FIELD_SIZEOF(struct xgbe_hw_stats, field),	\
1362 	offsetof(struct xgbe_hw_stats, field)		\
1363 }
1364 
1365 #define XGBE_STATS1_INFO(field)				\
1366 {							\
1367 	"GBE_1:"#field, XGBE_STATS1_MODULE,		\
1368 	FIELD_SIZEOF(struct xgbe_hw_stats, field),	\
1369 	offsetof(struct xgbe_hw_stats, field)		\
1370 }
1371 
1372 #define XGBE_STATS2_INFO(field)				\
1373 {							\
1374 	"GBE_2:"#field, XGBE_STATS2_MODULE,		\
1375 	FIELD_SIZEOF(struct xgbe_hw_stats, field),	\
1376 	offsetof(struct xgbe_hw_stats, field)		\
1377 }
1378 
1379 static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
1380 	/* GBE module 0 */
1381 	XGBE_STATS0_INFO(rx_good_frames),
1382 	XGBE_STATS0_INFO(rx_broadcast_frames),
1383 	XGBE_STATS0_INFO(rx_multicast_frames),
1384 	XGBE_STATS0_INFO(rx_oversized_frames),
1385 	XGBE_STATS0_INFO(rx_undersized_frames),
1386 	XGBE_STATS0_INFO(overrun_type4),
1387 	XGBE_STATS0_INFO(overrun_type5),
1388 	XGBE_STATS0_INFO(rx_bytes),
1389 	XGBE_STATS0_INFO(tx_good_frames),
1390 	XGBE_STATS0_INFO(tx_broadcast_frames),
1391 	XGBE_STATS0_INFO(tx_multicast_frames),
1392 	XGBE_STATS0_INFO(tx_bytes),
1393 	XGBE_STATS0_INFO(tx_64byte_frames),
1394 	XGBE_STATS0_INFO(tx_65_to_127byte_frames),
1395 	XGBE_STATS0_INFO(tx_128_to_255byte_frames),
1396 	XGBE_STATS0_INFO(tx_256_to_511byte_frames),
1397 	XGBE_STATS0_INFO(tx_512_to_1023byte_frames),
1398 	XGBE_STATS0_INFO(tx_1024byte_frames),
1399 	XGBE_STATS0_INFO(net_bytes),
1400 	XGBE_STATS0_INFO(rx_sof_overruns),
1401 	XGBE_STATS0_INFO(rx_mof_overruns),
1402 	XGBE_STATS0_INFO(rx_dma_overruns),
1403 	/* XGBE module 1 */
1404 	XGBE_STATS1_INFO(rx_good_frames),
1405 	XGBE_STATS1_INFO(rx_broadcast_frames),
1406 	XGBE_STATS1_INFO(rx_multicast_frames),
1407 	XGBE_STATS1_INFO(rx_pause_frames),
1408 	XGBE_STATS1_INFO(rx_crc_errors),
1409 	XGBE_STATS1_INFO(rx_align_code_errors),
1410 	XGBE_STATS1_INFO(rx_oversized_frames),
1411 	XGBE_STATS1_INFO(rx_jabber_frames),
1412 	XGBE_STATS1_INFO(rx_undersized_frames),
1413 	XGBE_STATS1_INFO(rx_fragments),
1414 	XGBE_STATS1_INFO(overrun_type4),
1415 	XGBE_STATS1_INFO(overrun_type5),
1416 	XGBE_STATS1_INFO(rx_bytes),
1417 	XGBE_STATS1_INFO(tx_good_frames),
1418 	XGBE_STATS1_INFO(tx_broadcast_frames),
1419 	XGBE_STATS1_INFO(tx_multicast_frames),
1420 	XGBE_STATS1_INFO(tx_pause_frames),
1421 	XGBE_STATS1_INFO(tx_deferred_frames),
1422 	XGBE_STATS1_INFO(tx_collision_frames),
1423 	XGBE_STATS1_INFO(tx_single_coll_frames),
1424 	XGBE_STATS1_INFO(tx_mult_coll_frames),
1425 	XGBE_STATS1_INFO(tx_excessive_collisions),
1426 	XGBE_STATS1_INFO(tx_late_collisions),
1427 	XGBE_STATS1_INFO(tx_underrun),
1428 	XGBE_STATS1_INFO(tx_carrier_sense_errors),
1429 	XGBE_STATS1_INFO(tx_bytes),
1430 	XGBE_STATS1_INFO(tx_64byte_frames),
1431 	XGBE_STATS1_INFO(tx_65_to_127byte_frames),
1432 	XGBE_STATS1_INFO(tx_128_to_255byte_frames),
1433 	XGBE_STATS1_INFO(tx_256_to_511byte_frames),
1434 	XGBE_STATS1_INFO(tx_512_to_1023byte_frames),
1435 	XGBE_STATS1_INFO(tx_1024byte_frames),
1436 	XGBE_STATS1_INFO(net_bytes),
1437 	XGBE_STATS1_INFO(rx_sof_overruns),
1438 	XGBE_STATS1_INFO(rx_mof_overruns),
1439 	XGBE_STATS1_INFO(rx_dma_overruns),
1440 	/* XGBE module 2 */
1441 	XGBE_STATS2_INFO(rx_good_frames),
1442 	XGBE_STATS2_INFO(rx_broadcast_frames),
1443 	XGBE_STATS2_INFO(rx_multicast_frames),
1444 	XGBE_STATS2_INFO(rx_pause_frames),
1445 	XGBE_STATS2_INFO(rx_crc_errors),
1446 	XGBE_STATS2_INFO(rx_align_code_errors),
1447 	XGBE_STATS2_INFO(rx_oversized_frames),
1448 	XGBE_STATS2_INFO(rx_jabber_frames),
1449 	XGBE_STATS2_INFO(rx_undersized_frames),
1450 	XGBE_STATS2_INFO(rx_fragments),
1451 	XGBE_STATS2_INFO(overrun_type4),
1452 	XGBE_STATS2_INFO(overrun_type5),
1453 	XGBE_STATS2_INFO(rx_bytes),
1454 	XGBE_STATS2_INFO(tx_good_frames),
1455 	XGBE_STATS2_INFO(tx_broadcast_frames),
1456 	XGBE_STATS2_INFO(tx_multicast_frames),
1457 	XGBE_STATS2_INFO(tx_pause_frames),
1458 	XGBE_STATS2_INFO(tx_deferred_frames),
1459 	XGBE_STATS2_INFO(tx_collision_frames),
1460 	XGBE_STATS2_INFO(tx_single_coll_frames),
1461 	XGBE_STATS2_INFO(tx_mult_coll_frames),
1462 	XGBE_STATS2_INFO(tx_excessive_collisions),
1463 	XGBE_STATS2_INFO(tx_late_collisions),
1464 	XGBE_STATS2_INFO(tx_underrun),
1465 	XGBE_STATS2_INFO(tx_carrier_sense_errors),
1466 	XGBE_STATS2_INFO(tx_bytes),
1467 	XGBE_STATS2_INFO(tx_64byte_frames),
1468 	XGBE_STATS2_INFO(tx_65_to_127byte_frames),
1469 	XGBE_STATS2_INFO(tx_128_to_255byte_frames),
1470 	XGBE_STATS2_INFO(tx_256_to_511byte_frames),
1471 	XGBE_STATS2_INFO(tx_512_to_1023byte_frames),
1472 	XGBE_STATS2_INFO(tx_1024byte_frames),
1473 	XGBE_STATS2_INFO(net_bytes),
1474 	XGBE_STATS2_INFO(rx_sof_overruns),
1475 	XGBE_STATS2_INFO(rx_mof_overruns),
1476 	XGBE_STATS2_INFO(rx_dma_overruns),
1477 };
1478 
1479 #define for_each_intf(i, priv) \
1480 	list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list)
1481 
1482 #define for_each_sec_slave(slave, priv) \
1483 	list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list)
1484 
1485 #define first_sec_slave(priv)					\
1486 	list_first_entry(&priv->secondary_slaves, \
1487 			struct gbe_slave, slave_list)
1488 
1489 static void keystone_get_drvinfo(struct net_device *ndev,
1490 				 struct ethtool_drvinfo *info)
1491 {
1492 	strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
1493 	strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
1494 }
1495 
1496 static u32 keystone_get_msglevel(struct net_device *ndev)
1497 {
1498 	struct netcp_intf *netcp = netdev_priv(ndev);
1499 
1500 	return netcp->msg_enable;
1501 }
1502 
1503 static void keystone_set_msglevel(struct net_device *ndev, u32 value)
1504 {
1505 	struct netcp_intf *netcp = netdev_priv(ndev);
1506 
1507 	netcp->msg_enable = value;
1508 }
1509 
1510 static void keystone_get_stat_strings(struct net_device *ndev,
1511 				      uint32_t stringset, uint8_t *data)
1512 {
1513 	struct netcp_intf *netcp = netdev_priv(ndev);
1514 	struct gbe_intf *gbe_intf;
1515 	struct gbe_priv *gbe_dev;
1516 	int i;
1517 
1518 	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1519 	if (!gbe_intf)
1520 		return;
1521 	gbe_dev = gbe_intf->gbe_dev;
1522 
1523 	switch (stringset) {
1524 	case ETH_SS_STATS:
1525 		for (i = 0; i < gbe_dev->num_et_stats; i++) {
1526 			memcpy(data, gbe_dev->et_stats[i].desc,
1527 			       ETH_GSTRING_LEN);
1528 			data += ETH_GSTRING_LEN;
1529 		}
1530 		break;
1531 	case ETH_SS_TEST:
1532 		break;
1533 	}
1534 }
1535 
1536 static int keystone_get_sset_count(struct net_device *ndev, int stringset)
1537 {
1538 	struct netcp_intf *netcp = netdev_priv(ndev);
1539 	struct gbe_intf *gbe_intf;
1540 	struct gbe_priv *gbe_dev;
1541 
1542 	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1543 	if (!gbe_intf)
1544 		return -EINVAL;
1545 	gbe_dev = gbe_intf->gbe_dev;
1546 
1547 	switch (stringset) {
1548 	case ETH_SS_TEST:
1549 		return 0;
1550 	case ETH_SS_STATS:
1551 		return gbe_dev->num_et_stats;
1552 	default:
1553 		return -EINVAL;
1554 	}
1555 }
1556 
1557 static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
1558 {
1559 	void __iomem *base = NULL;
1560 	u32  __iomem *p;
1561 	u32 tmp = 0;
1562 	int i;
1563 
1564 	for (i = 0; i < gbe_dev->num_et_stats; i++) {
1565 		base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[i].type];
1566 		p = base + gbe_dev->et_stats[i].offset;
1567 		tmp = readl(p);
1568 		gbe_dev->hw_stats[i] = gbe_dev->hw_stats[i] + tmp;
1569 		if (data)
1570 			data[i] = gbe_dev->hw_stats[i];
1571 		/* write-to-decrement:
1572 		 * new register value = old register value - write value
1573 		 */
1574 		writel(tmp, p);
1575 	}
1576 }
1577 
1578 static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
1579 {
1580 	void __iomem *gbe_statsa = gbe_dev->hw_stats_regs[0];
1581 	void __iomem *gbe_statsb = gbe_dev->hw_stats_regs[1];
1582 	u64 *hw_stats = &gbe_dev->hw_stats[0];
1583 	void __iomem *base = NULL;
1584 	u32  __iomem *p;
1585 	u32 tmp = 0, val, pair_size = (gbe_dev->num_et_stats / 2);
1586 	int i, j, pair;
1587 
1588 	for (pair = 0; pair < 2; pair++) {
1589 		val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1590 
1591 		if (pair == 0)
1592 			val &= ~GBE_STATS_CD_SEL;
1593 		else
1594 			val |= GBE_STATS_CD_SEL;
1595 
1596 		/* make the stat modules visible */
1597 		writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1598 
1599 		for (i = 0; i < pair_size; i++) {
1600 			j = pair * pair_size + i;
1601 			switch (gbe_dev->et_stats[j].type) {
1602 			case GBE_STATSA_MODULE:
1603 			case GBE_STATSC_MODULE:
1604 				base = gbe_statsa;
1605 			break;
1606 			case GBE_STATSB_MODULE:
1607 			case GBE_STATSD_MODULE:
1608 				base  = gbe_statsb;
1609 			break;
1610 			}
1611 
1612 			p = base + gbe_dev->et_stats[j].offset;
1613 			tmp = readl(p);
1614 			hw_stats[j] += tmp;
1615 			if (data)
1616 				data[j] = hw_stats[j];
1617 			/* write-to-decrement:
1618 			 * new register value = old register value - write value
1619 			 */
1620 			writel(tmp, p);
1621 		}
1622 	}
1623 }
1624 
1625 static void keystone_get_ethtool_stats(struct net_device *ndev,
1626 				       struct ethtool_stats *stats,
1627 				       uint64_t *data)
1628 {
1629 	struct netcp_intf *netcp = netdev_priv(ndev);
1630 	struct gbe_intf *gbe_intf;
1631 	struct gbe_priv *gbe_dev;
1632 
1633 	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1634 	if (!gbe_intf)
1635 		return;
1636 
1637 	gbe_dev = gbe_intf->gbe_dev;
1638 	spin_lock_bh(&gbe_dev->hw_stats_lock);
1639 	if (gbe_dev->ss_version == GBE_SS_VERSION_14)
1640 		gbe_update_stats_ver14(gbe_dev, data);
1641 	else
1642 		gbe_update_stats(gbe_dev, data);
1643 	spin_unlock_bh(&gbe_dev->hw_stats_lock);
1644 }
1645 
1646 static int keystone_get_settings(struct net_device *ndev,
1647 				 struct ethtool_cmd *cmd)
1648 {
1649 	struct netcp_intf *netcp = netdev_priv(ndev);
1650 	struct phy_device *phy = ndev->phydev;
1651 	struct gbe_intf *gbe_intf;
1652 	int ret;
1653 
1654 	if (!phy)
1655 		return -EINVAL;
1656 
1657 	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1658 	if (!gbe_intf)
1659 		return -EINVAL;
1660 
1661 	if (!gbe_intf->slave)
1662 		return -EINVAL;
1663 
1664 	ret = phy_ethtool_gset(phy, cmd);
1665 	if (!ret)
1666 		cmd->port = gbe_intf->slave->phy_port_t;
1667 
1668 	return ret;
1669 }
1670 
1671 static int keystone_set_settings(struct net_device *ndev,
1672 				 struct ethtool_cmd *cmd)
1673 {
1674 	struct netcp_intf *netcp = netdev_priv(ndev);
1675 	struct phy_device *phy = ndev->phydev;
1676 	struct gbe_intf *gbe_intf;
1677 	u32 features = cmd->advertising & cmd->supported;
1678 
1679 	if (!phy)
1680 		return -EINVAL;
1681 
1682 	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1683 	if (!gbe_intf)
1684 		return -EINVAL;
1685 
1686 	if (!gbe_intf->slave)
1687 		return -EINVAL;
1688 
1689 	if (cmd->port != gbe_intf->slave->phy_port_t) {
1690 		if ((cmd->port == PORT_TP) && !(features & ADVERTISED_TP))
1691 			return -EINVAL;
1692 
1693 		if ((cmd->port == PORT_AUI) && !(features & ADVERTISED_AUI))
1694 			return -EINVAL;
1695 
1696 		if ((cmd->port == PORT_BNC) && !(features & ADVERTISED_BNC))
1697 			return -EINVAL;
1698 
1699 		if ((cmd->port == PORT_MII) && !(features & ADVERTISED_MII))
1700 			return -EINVAL;
1701 
1702 		if ((cmd->port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
1703 			return -EINVAL;
1704 	}
1705 
1706 	gbe_intf->slave->phy_port_t = cmd->port;
1707 	return phy_ethtool_sset(phy, cmd);
1708 }
1709 
1710 static const struct ethtool_ops keystone_ethtool_ops = {
1711 	.get_drvinfo		= keystone_get_drvinfo,
1712 	.get_link		= ethtool_op_get_link,
1713 	.get_msglevel		= keystone_get_msglevel,
1714 	.set_msglevel		= keystone_set_msglevel,
1715 	.get_strings		= keystone_get_stat_strings,
1716 	.get_sset_count		= keystone_get_sset_count,
1717 	.get_ethtool_stats	= keystone_get_ethtool_stats,
1718 	.get_settings		= keystone_get_settings,
1719 	.set_settings		= keystone_set_settings,
1720 };
1721 
1722 #define mac_hi(mac)	(((mac)[0] << 0) | ((mac)[1] << 8) |	\
1723 			 ((mac)[2] << 16) | ((mac)[3] << 24))
1724 #define mac_lo(mac)	(((mac)[4] << 0) | ((mac)[5] << 8))
1725 
1726 static void gbe_set_slave_mac(struct gbe_slave *slave,
1727 			      struct gbe_intf *gbe_intf)
1728 {
1729 	struct net_device *ndev = gbe_intf->ndev;
1730 
1731 	writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi));
1732 	writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo));
1733 }
1734 
1735 static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num)
1736 {
1737 	if (priv->host_port == 0)
1738 		return slave_num + 1;
1739 
1740 	return slave_num;
1741 }
1742 
1743 static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
1744 					  struct net_device *ndev,
1745 					  struct gbe_slave *slave,
1746 					  int up)
1747 {
1748 	struct phy_device *phy = slave->phy;
1749 	u32 mac_control = 0;
1750 
1751 	if (up) {
1752 		mac_control = slave->mac_control;
1753 		if (phy && (phy->speed == SPEED_1000)) {
1754 			mac_control |= MACSL_GIG_MODE;
1755 			mac_control &= ~MACSL_XGIG_MODE;
1756 		} else if (phy && (phy->speed == SPEED_10000)) {
1757 			mac_control |= MACSL_XGIG_MODE;
1758 			mac_control &= ~MACSL_GIG_MODE;
1759 		}
1760 
1761 		writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
1762 						 mac_control));
1763 
1764 		cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
1765 				     ALE_PORT_STATE,
1766 				     ALE_PORT_STATE_FORWARD);
1767 
1768 		if (ndev && slave->open &&
1769 		    slave->link_interface != SGMII_LINK_MAC_PHY &&
1770 		    slave->link_interface != XGMII_LINK_MAC_PHY)
1771 			netif_carrier_on(ndev);
1772 	} else {
1773 		writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
1774 						 mac_control));
1775 		cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
1776 				     ALE_PORT_STATE,
1777 				     ALE_PORT_STATE_DISABLE);
1778 		if (ndev &&
1779 		    slave->link_interface != SGMII_LINK_MAC_PHY &&
1780 		    slave->link_interface != XGMII_LINK_MAC_PHY)
1781 			netif_carrier_off(ndev);
1782 	}
1783 
1784 	if (phy)
1785 		phy_print_status(phy);
1786 }
1787 
1788 static bool gbe_phy_link_status(struct gbe_slave *slave)
1789 {
1790 	 return !slave->phy || slave->phy->link;
1791 }
1792 
1793 static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
1794 					  struct gbe_slave *slave,
1795 					  struct net_device *ndev)
1796 {
1797 	int sp = slave->slave_num;
1798 	int phy_link_state, sgmii_link_state = 1, link_state;
1799 
1800 	if (!slave->open)
1801 		return;
1802 
1803 	if (!SLAVE_LINK_IS_XGMII(slave)) {
1804 		if (gbe_dev->ss_version == GBE_SS_VERSION_14)
1805 			sgmii_link_state =
1806 				netcp_sgmii_get_port_link(SGMII_BASE(sp), sp);
1807 		else
1808 			sgmii_link_state =
1809 				netcp_sgmii_get_port_link(
1810 						gbe_dev->sgmii_port_regs, sp);
1811 	}
1812 
1813 	phy_link_state = gbe_phy_link_status(slave);
1814 	link_state = phy_link_state & sgmii_link_state;
1815 
1816 	if (atomic_xchg(&slave->link_state, link_state) != link_state)
1817 		netcp_ethss_link_state_action(gbe_dev, ndev, slave,
1818 					      link_state);
1819 }
1820 
1821 static void xgbe_adjust_link(struct net_device *ndev)
1822 {
1823 	struct netcp_intf *netcp = netdev_priv(ndev);
1824 	struct gbe_intf *gbe_intf;
1825 
1826 	gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
1827 	if (!gbe_intf)
1828 		return;
1829 
1830 	netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
1831 				      ndev);
1832 }
1833 
1834 static void gbe_adjust_link(struct net_device *ndev)
1835 {
1836 	struct netcp_intf *netcp = netdev_priv(ndev);
1837 	struct gbe_intf *gbe_intf;
1838 
1839 	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1840 	if (!gbe_intf)
1841 		return;
1842 
1843 	netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
1844 				      ndev);
1845 }
1846 
1847 static void gbe_adjust_link_sec_slaves(struct net_device *ndev)
1848 {
1849 	struct gbe_priv *gbe_dev = netdev_priv(ndev);
1850 	struct gbe_slave *slave;
1851 
1852 	for_each_sec_slave(slave, gbe_dev)
1853 		netcp_ethss_update_link_state(gbe_dev, slave, NULL);
1854 }
1855 
1856 /* Reset EMAC
1857  * Soft reset is set and polled until clear, or until a timeout occurs
1858  */
1859 static int gbe_port_reset(struct gbe_slave *slave)
1860 {
1861 	u32 i, v;
1862 
1863 	/* Set the soft reset bit */
1864 	writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset));
1865 
1866 	/* Wait for the bit to clear */
1867 	for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
1868 		v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset));
1869 		if ((v & SOFT_RESET_MASK) != SOFT_RESET)
1870 			return 0;
1871 	}
1872 
1873 	/* Timeout on the reset */
1874 	return GMACSL_RET_WARN_RESET_INCOMPLETE;
1875 }
1876 
1877 /* Configure EMAC */
1878 static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
1879 			    int max_rx_len)
1880 {
1881 	void __iomem *rx_maxlen_reg;
1882 	u32 xgmii_mode;
1883 
1884 	if (max_rx_len > NETCP_MAX_FRAME_SIZE)
1885 		max_rx_len = NETCP_MAX_FRAME_SIZE;
1886 
1887 	/* Enable correct MII mode at SS level */
1888 	if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) &&
1889 	    (slave->link_interface >= XGMII_LINK_MAC_PHY)) {
1890 		xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
1891 		xgmii_mode |= (1 << slave->slave_num);
1892 		writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
1893 	}
1894 
1895 	if (IS_SS_ID_MU(gbe_dev))
1896 		rx_maxlen_reg = GBE_REG_ADDR(slave, port_regs, rx_maxlen);
1897 	else
1898 		rx_maxlen_reg = GBE_REG_ADDR(slave, emac_regs, rx_maxlen);
1899 
1900 	writel(max_rx_len, rx_maxlen_reg);
1901 	writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
1902 }
1903 
1904 static void gbe_sgmii_rtreset(struct gbe_priv *priv,
1905 			      struct gbe_slave *slave, bool set)
1906 {
1907 	void __iomem *sgmii_port_regs;
1908 
1909 	if (SLAVE_LINK_IS_XGMII(slave))
1910 		return;
1911 
1912 	if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
1913 		sgmii_port_regs = priv->sgmii_port34_regs;
1914 	else
1915 		sgmii_port_regs = priv->sgmii_port_regs;
1916 
1917 	netcp_sgmii_rtreset(sgmii_port_regs, slave->slave_num, set);
1918 }
1919 
1920 static void gbe_slave_stop(struct gbe_intf *intf)
1921 {
1922 	struct gbe_priv *gbe_dev = intf->gbe_dev;
1923 	struct gbe_slave *slave = intf->slave;
1924 
1925 	gbe_sgmii_rtreset(gbe_dev, slave, true);
1926 	gbe_port_reset(slave);
1927 	/* Disable forwarding */
1928 	cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
1929 			     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1930 	cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast,
1931 			   1 << slave->port_num, 0, 0);
1932 
1933 	if (!slave->phy)
1934 		return;
1935 
1936 	phy_stop(slave->phy);
1937 	phy_disconnect(slave->phy);
1938 	slave->phy = NULL;
1939 }
1940 
1941 static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
1942 {
1943 	void __iomem *sgmii_port_regs;
1944 
1945 	sgmii_port_regs = priv->sgmii_port_regs;
1946 	if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
1947 		sgmii_port_regs = priv->sgmii_port34_regs;
1948 
1949 	if (!SLAVE_LINK_IS_XGMII(slave)) {
1950 		netcp_sgmii_reset(sgmii_port_regs, slave->slave_num);
1951 		netcp_sgmii_config(sgmii_port_regs, slave->slave_num,
1952 				   slave->link_interface);
1953 	}
1954 }
1955 
1956 static int gbe_slave_open(struct gbe_intf *gbe_intf)
1957 {
1958 	struct gbe_priv *priv = gbe_intf->gbe_dev;
1959 	struct gbe_slave *slave = gbe_intf->slave;
1960 	phy_interface_t phy_mode;
1961 	bool has_phy = false;
1962 
1963 	void (*hndlr)(struct net_device *) = gbe_adjust_link;
1964 
1965 	gbe_sgmii_config(priv, slave);
1966 	gbe_port_reset(slave);
1967 	gbe_sgmii_rtreset(priv, slave, false);
1968 	gbe_port_config(priv, slave, priv->rx_packet_max);
1969 	gbe_set_slave_mac(slave, gbe_intf);
1970 	/* enable forwarding */
1971 	cpsw_ale_control_set(priv->ale, slave->port_num,
1972 			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1973 	cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast,
1974 			   1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2);
1975 
1976 	if (slave->link_interface == SGMII_LINK_MAC_PHY) {
1977 		has_phy = true;
1978 		phy_mode = PHY_INTERFACE_MODE_SGMII;
1979 		slave->phy_port_t = PORT_MII;
1980 	} else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
1981 		has_phy = true;
1982 		phy_mode = PHY_INTERFACE_MODE_NA;
1983 		slave->phy_port_t = PORT_FIBRE;
1984 	}
1985 
1986 	if (has_phy) {
1987 		if (priv->ss_version == XGBE_SS_VERSION_10)
1988 			hndlr = xgbe_adjust_link;
1989 
1990 		slave->phy = of_phy_connect(gbe_intf->ndev,
1991 					    slave->phy_node,
1992 					    hndlr, 0,
1993 					    phy_mode);
1994 		if (!slave->phy) {
1995 			dev_err(priv->dev, "phy not found on slave %d\n",
1996 				slave->slave_num);
1997 			return -ENODEV;
1998 		}
1999 		dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
2000 			dev_name(&slave->phy->dev));
2001 		phy_start(slave->phy);
2002 		phy_read_status(slave->phy);
2003 	}
2004 	return 0;
2005 }
2006 
2007 static void gbe_init_host_port(struct gbe_priv *priv)
2008 {
2009 	int bypass_en = 1;
2010 
2011 	/* Host Tx Pri */
2012 	if (IS_SS_ID_NU(priv))
2013 		writel(HOST_TX_PRI_MAP_DEFAULT,
2014 		       GBE_REG_ADDR(priv, host_port_regs, tx_pri_map));
2015 
2016 	/* Max length register */
2017 	writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
2018 						  rx_maxlen));
2019 
2020 	cpsw_ale_start(priv->ale);
2021 
2022 	if (priv->enable_ale)
2023 		bypass_en = 0;
2024 
2025 	cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en);
2026 
2027 	cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1);
2028 
2029 	cpsw_ale_control_set(priv->ale, priv->host_port,
2030 			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2031 
2032 	cpsw_ale_control_set(priv->ale, 0,
2033 			     ALE_PORT_UNKNOWN_VLAN_MEMBER,
2034 			     GBE_PORT_MASK(priv->ale_ports));
2035 
2036 	cpsw_ale_control_set(priv->ale, 0,
2037 			     ALE_PORT_UNKNOWN_MCAST_FLOOD,
2038 			     GBE_PORT_MASK(priv->ale_ports - 1));
2039 
2040 	cpsw_ale_control_set(priv->ale, 0,
2041 			     ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
2042 			     GBE_PORT_MASK(priv->ale_ports));
2043 
2044 	cpsw_ale_control_set(priv->ale, 0,
2045 			     ALE_PORT_UNTAGGED_EGRESS,
2046 			     GBE_PORT_MASK(priv->ale_ports));
2047 }
2048 
2049 static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2050 {
2051 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2052 	u16 vlan_id;
2053 
2054 	cpsw_ale_add_mcast(gbe_dev->ale, addr,
2055 			   GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0,
2056 			   ALE_MCAST_FWD_2);
2057 	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2058 		cpsw_ale_add_mcast(gbe_dev->ale, addr,
2059 				   GBE_PORT_MASK(gbe_dev->ale_ports),
2060 				   ALE_VLAN, vlan_id, ALE_MCAST_FWD_2);
2061 	}
2062 }
2063 
2064 static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2065 {
2066 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2067 	u16 vlan_id;
2068 
2069 	cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2070 
2071 	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID)
2072 		cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2073 				   ALE_VLAN, vlan_id);
2074 }
2075 
2076 static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2077 {
2078 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2079 	u16 vlan_id;
2080 
2081 	cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0);
2082 
2083 	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2084 		cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id);
2085 	}
2086 }
2087 
2088 static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2089 {
2090 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2091 	u16 vlan_id;
2092 
2093 	cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2094 
2095 	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2096 		cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2097 				   ALE_VLAN, vlan_id);
2098 	}
2099 }
2100 
2101 static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr)
2102 {
2103 	struct gbe_intf *gbe_intf = intf_priv;
2104 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2105 
2106 	dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n",
2107 		naddr->addr, naddr->type);
2108 
2109 	switch (naddr->type) {
2110 	case ADDR_MCAST:
2111 	case ADDR_BCAST:
2112 		gbe_add_mcast_addr(gbe_intf, naddr->addr);
2113 		break;
2114 	case ADDR_UCAST:
2115 	case ADDR_DEV:
2116 		gbe_add_ucast_addr(gbe_intf, naddr->addr);
2117 		break;
2118 	case ADDR_ANY:
2119 		/* nothing to do for promiscuous */
2120 	default:
2121 		break;
2122 	}
2123 
2124 	return 0;
2125 }
2126 
2127 static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr)
2128 {
2129 	struct gbe_intf *gbe_intf = intf_priv;
2130 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2131 
2132 	dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n",
2133 		naddr->addr, naddr->type);
2134 
2135 	switch (naddr->type) {
2136 	case ADDR_MCAST:
2137 	case ADDR_BCAST:
2138 		gbe_del_mcast_addr(gbe_intf, naddr->addr);
2139 		break;
2140 	case ADDR_UCAST:
2141 	case ADDR_DEV:
2142 		gbe_del_ucast_addr(gbe_intf, naddr->addr);
2143 		break;
2144 	case ADDR_ANY:
2145 		/* nothing to do for promiscuous */
2146 	default:
2147 		break;
2148 	}
2149 
2150 	return 0;
2151 }
2152 
2153 static int gbe_add_vid(void *intf_priv, int vid)
2154 {
2155 	struct gbe_intf *gbe_intf = intf_priv;
2156 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2157 
2158 	set_bit(vid, gbe_intf->active_vlans);
2159 
2160 	cpsw_ale_add_vlan(gbe_dev->ale, vid,
2161 			  GBE_PORT_MASK(gbe_dev->ale_ports),
2162 			  GBE_MASK_NO_PORTS,
2163 			  GBE_PORT_MASK(gbe_dev->ale_ports),
2164 			  GBE_PORT_MASK(gbe_dev->ale_ports - 1));
2165 
2166 	return 0;
2167 }
2168 
2169 static int gbe_del_vid(void *intf_priv, int vid)
2170 {
2171 	struct gbe_intf *gbe_intf = intf_priv;
2172 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2173 
2174 	cpsw_ale_del_vlan(gbe_dev->ale, vid, 0);
2175 	clear_bit(vid, gbe_intf->active_vlans);
2176 	return 0;
2177 }
2178 
2179 static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
2180 {
2181 	struct gbe_intf *gbe_intf = intf_priv;
2182 	struct phy_device *phy = gbe_intf->slave->phy;
2183 	int ret = -EOPNOTSUPP;
2184 
2185 	if (phy)
2186 		ret = phy_mii_ioctl(phy, req, cmd);
2187 
2188 	return ret;
2189 }
2190 
2191 static void netcp_ethss_timer(unsigned long arg)
2192 {
2193 	struct gbe_priv *gbe_dev = (struct gbe_priv *)arg;
2194 	struct gbe_intf *gbe_intf;
2195 	struct gbe_slave *slave;
2196 
2197 	/* Check & update SGMII link state of interfaces */
2198 	for_each_intf(gbe_intf, gbe_dev) {
2199 		if (!gbe_intf->slave->open)
2200 			continue;
2201 		netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave,
2202 					      gbe_intf->ndev);
2203 	}
2204 
2205 	/* Check & update SGMII link state of secondary ports */
2206 	for_each_sec_slave(slave, gbe_dev) {
2207 		netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2208 	}
2209 
2210 	spin_lock_bh(&gbe_dev->hw_stats_lock);
2211 
2212 	if (gbe_dev->ss_version == GBE_SS_VERSION_14)
2213 		gbe_update_stats_ver14(gbe_dev, NULL);
2214 	else
2215 		gbe_update_stats(gbe_dev, NULL);
2216 
2217 	spin_unlock_bh(&gbe_dev->hw_stats_lock);
2218 
2219 	gbe_dev->timer.expires	= jiffies + GBE_TIMER_INTERVAL;
2220 	add_timer(&gbe_dev->timer);
2221 }
2222 
2223 static int gbe_tx_hook(int order, void *data, struct netcp_packet *p_info)
2224 {
2225 	struct gbe_intf *gbe_intf = data;
2226 
2227 	p_info->tx_pipe = &gbe_intf->tx_pipe;
2228 	return 0;
2229 }
2230 
2231 static int gbe_open(void *intf_priv, struct net_device *ndev)
2232 {
2233 	struct gbe_intf *gbe_intf = intf_priv;
2234 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2235 	struct netcp_intf *netcp = netdev_priv(ndev);
2236 	struct gbe_slave *slave = gbe_intf->slave;
2237 	int port_num = slave->port_num;
2238 	u32 reg;
2239 	int ret;
2240 
2241 	reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
2242 	dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n",
2243 		GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
2244 		GBE_RTL_VERSION(reg), GBE_IDENT(reg));
2245 
2246 	/* For 10G and on NetCP 1.5, use directed to port */
2247 	if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) || IS_SS_ID_MU(gbe_dev))
2248 		gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO;
2249 
2250 	if (gbe_dev->enable_ale)
2251 		gbe_intf->tx_pipe.switch_to_port = 0;
2252 	else
2253 		gbe_intf->tx_pipe.switch_to_port = port_num;
2254 
2255 	dev_dbg(gbe_dev->dev,
2256 		"opened TX channel %s: %p with to port %d, flags %d\n",
2257 		gbe_intf->tx_pipe.dma_chan_name,
2258 		gbe_intf->tx_pipe.dma_channel,
2259 		gbe_intf->tx_pipe.switch_to_port,
2260 		gbe_intf->tx_pipe.flags);
2261 
2262 	gbe_slave_stop(gbe_intf);
2263 
2264 	/* disable priority elevation and enable statistics on all ports */
2265 	writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
2266 
2267 	/* Control register */
2268 	writel(GBE_CTL_P0_ENABLE, GBE_REG_ADDR(gbe_dev, switch_regs, control));
2269 
2270 	/* All statistics enabled and STAT AB visible by default */
2271 	writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs,
2272 						    stat_port_en));
2273 
2274 	ret = gbe_slave_open(gbe_intf);
2275 	if (ret)
2276 		goto fail;
2277 
2278 	netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook,
2279 			      gbe_intf);
2280 
2281 	slave->open = true;
2282 	netcp_ethss_update_link_state(gbe_dev, slave, ndev);
2283 	return 0;
2284 
2285 fail:
2286 	gbe_slave_stop(gbe_intf);
2287 	return ret;
2288 }
2289 
2290 static int gbe_close(void *intf_priv, struct net_device *ndev)
2291 {
2292 	struct gbe_intf *gbe_intf = intf_priv;
2293 	struct netcp_intf *netcp = netdev_priv(ndev);
2294 
2295 	gbe_slave_stop(gbe_intf);
2296 	netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook,
2297 				gbe_intf);
2298 
2299 	gbe_intf->slave->open = false;
2300 	atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
2301 	return 0;
2302 }
2303 
2304 static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2305 		      struct device_node *node)
2306 {
2307 	int port_reg_num;
2308 	u32 port_reg_ofs, emac_reg_ofs;
2309 	u32 port_reg_blk_sz, emac_reg_blk_sz;
2310 
2311 	if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
2312 		dev_err(gbe_dev->dev, "missing slave-port parameter\n");
2313 		return -EINVAL;
2314 	}
2315 
2316 	if (of_property_read_u32(node, "link-interface",
2317 				 &slave->link_interface)) {
2318 		dev_warn(gbe_dev->dev,
2319 			 "missing link-interface value defaulting to 1G mac-phy link\n");
2320 		slave->link_interface = SGMII_LINK_MAC_PHY;
2321 	}
2322 
2323 	slave->open = false;
2324 	slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
2325 	slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
2326 
2327 	if (slave->link_interface >= XGMII_LINK_MAC_PHY)
2328 		slave->mac_control = GBE_DEF_10G_MAC_CONTROL;
2329 	else
2330 		slave->mac_control = GBE_DEF_1G_MAC_CONTROL;
2331 
2332 	/* Emac regs memmap are contiguous but port regs are not */
2333 	port_reg_num = slave->slave_num;
2334 	if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
2335 		if (slave->slave_num > 1) {
2336 			port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
2337 			port_reg_num -= 2;
2338 		} else {
2339 			port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
2340 		}
2341 		emac_reg_ofs = GBE13_EMAC_OFFSET;
2342 		port_reg_blk_sz = 0x30;
2343 		emac_reg_blk_sz = 0x40;
2344 	} else if (IS_SS_ID_MU(gbe_dev)) {
2345 		port_reg_ofs = GBENU_SLAVE_PORT_OFFSET;
2346 		emac_reg_ofs = GBENU_EMAC_OFFSET;
2347 		port_reg_blk_sz = 0x1000;
2348 		emac_reg_blk_sz = 0x1000;
2349 	} else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
2350 		port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
2351 		emac_reg_ofs = XGBE10_EMAC_OFFSET;
2352 		port_reg_blk_sz = 0x30;
2353 		emac_reg_blk_sz = 0x40;
2354 	} else {
2355 		dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
2356 			gbe_dev->ss_version);
2357 		return -EINVAL;
2358 	}
2359 
2360 	slave->port_regs = gbe_dev->switch_regs + port_reg_ofs +
2361 				(port_reg_blk_sz * port_reg_num);
2362 	slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs +
2363 				(emac_reg_blk_sz * slave->slave_num);
2364 
2365 	if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
2366 		/* Initialize  slave port register offsets */
2367 		GBE_SET_REG_OFS(slave, port_regs, port_vlan);
2368 		GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
2369 		GBE_SET_REG_OFS(slave, port_regs, sa_lo);
2370 		GBE_SET_REG_OFS(slave, port_regs, sa_hi);
2371 		GBE_SET_REG_OFS(slave, port_regs, ts_ctl);
2372 		GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
2373 		GBE_SET_REG_OFS(slave, port_regs, ts_vlan);
2374 		GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
2375 		GBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
2376 
2377 		/* Initialize EMAC register offsets */
2378 		GBE_SET_REG_OFS(slave, emac_regs, mac_control);
2379 		GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
2380 		GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
2381 
2382 	} else if (IS_SS_ID_MU(gbe_dev)) {
2383 		/* Initialize  slave port register offsets */
2384 		GBENU_SET_REG_OFS(slave, port_regs, port_vlan);
2385 		GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map);
2386 		GBENU_SET_REG_OFS(slave, port_regs, sa_lo);
2387 		GBENU_SET_REG_OFS(slave, port_regs, sa_hi);
2388 		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl);
2389 		GBENU_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
2390 		GBENU_SET_REG_OFS(slave, port_regs, ts_vlan);
2391 		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
2392 		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl2);
2393 		GBENU_SET_REG_OFS(slave, port_regs, rx_maxlen);
2394 
2395 		/* Initialize EMAC register offsets */
2396 		GBENU_SET_REG_OFS(slave, emac_regs, mac_control);
2397 		GBENU_SET_REG_OFS(slave, emac_regs, soft_reset);
2398 
2399 	} else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
2400 		/* Initialize  slave port register offsets */
2401 		XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
2402 		XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
2403 		XGBE_SET_REG_OFS(slave, port_regs, sa_lo);
2404 		XGBE_SET_REG_OFS(slave, port_regs, sa_hi);
2405 		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl);
2406 		XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
2407 		XGBE_SET_REG_OFS(slave, port_regs, ts_vlan);
2408 		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
2409 		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
2410 
2411 		/* Initialize EMAC register offsets */
2412 		XGBE_SET_REG_OFS(slave, emac_regs, mac_control);
2413 		XGBE_SET_REG_OFS(slave, emac_regs, soft_reset);
2414 		XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
2415 	}
2416 
2417 	atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
2418 	return 0;
2419 }
2420 
2421 static void init_secondary_ports(struct gbe_priv *gbe_dev,
2422 				 struct device_node *node)
2423 {
2424 	struct device *dev = gbe_dev->dev;
2425 	phy_interface_t phy_mode;
2426 	struct gbe_priv **priv;
2427 	struct device_node *port;
2428 	struct gbe_slave *slave;
2429 	bool mac_phy_link = false;
2430 
2431 	for_each_child_of_node(node, port) {
2432 		slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
2433 		if (!slave) {
2434 			dev_err(dev,
2435 				"memomry alloc failed for secondary port(%s), skipping...\n",
2436 				port->name);
2437 			continue;
2438 		}
2439 
2440 		if (init_slave(gbe_dev, slave, port)) {
2441 			dev_err(dev,
2442 				"Failed to initialize secondary port(%s), skipping...\n",
2443 				port->name);
2444 			devm_kfree(dev, slave);
2445 			continue;
2446 		}
2447 
2448 		gbe_sgmii_config(gbe_dev, slave);
2449 		gbe_port_reset(slave);
2450 		gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
2451 		list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
2452 		gbe_dev->num_slaves++;
2453 		if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
2454 		    (slave->link_interface == XGMII_LINK_MAC_PHY))
2455 			mac_phy_link = true;
2456 
2457 		slave->open = true;
2458 		if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
2459 			break;
2460 	}
2461 
2462 	/* of_phy_connect() is needed only for MAC-PHY interface */
2463 	if (!mac_phy_link)
2464 		return;
2465 
2466 	/* Allocate dummy netdev device for attaching to phy device */
2467 	gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy",
2468 					NET_NAME_UNKNOWN, ether_setup);
2469 	if (!gbe_dev->dummy_ndev) {
2470 		dev_err(dev,
2471 			"Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n");
2472 		return;
2473 	}
2474 	priv = netdev_priv(gbe_dev->dummy_ndev);
2475 	*priv = gbe_dev;
2476 
2477 	if (slave->link_interface == SGMII_LINK_MAC_PHY) {
2478 		phy_mode = PHY_INTERFACE_MODE_SGMII;
2479 		slave->phy_port_t = PORT_MII;
2480 	} else {
2481 		phy_mode = PHY_INTERFACE_MODE_NA;
2482 		slave->phy_port_t = PORT_FIBRE;
2483 	}
2484 
2485 	for_each_sec_slave(slave, gbe_dev) {
2486 		if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
2487 		    (slave->link_interface != XGMII_LINK_MAC_PHY))
2488 			continue;
2489 		slave->phy =
2490 			of_phy_connect(gbe_dev->dummy_ndev,
2491 				       slave->phy_node,
2492 				       gbe_adjust_link_sec_slaves,
2493 				       0, phy_mode);
2494 		if (!slave->phy) {
2495 			dev_err(dev, "phy not found for slave %d\n",
2496 				slave->slave_num);
2497 			slave->phy = NULL;
2498 		} else {
2499 			dev_dbg(dev, "phy found: id is: 0x%s\n",
2500 				dev_name(&slave->phy->dev));
2501 			phy_start(slave->phy);
2502 			phy_read_status(slave->phy);
2503 		}
2504 	}
2505 }
2506 
2507 static void free_secondary_ports(struct gbe_priv *gbe_dev)
2508 {
2509 	struct gbe_slave *slave;
2510 
2511 	while (!list_empty(&gbe_dev->secondary_slaves)) {
2512 		slave = first_sec_slave(gbe_dev);
2513 
2514 		if (slave->phy)
2515 			phy_disconnect(slave->phy);
2516 		list_del(&slave->slave_list);
2517 	}
2518 	if (gbe_dev->dummy_ndev)
2519 		free_netdev(gbe_dev->dummy_ndev);
2520 }
2521 
2522 static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
2523 				 struct device_node *node)
2524 {
2525 	struct resource res;
2526 	void __iomem *regs;
2527 	int ret, i;
2528 
2529 	ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res);
2530 	if (ret) {
2531 		dev_err(gbe_dev->dev,
2532 			"Can't xlate xgbe of node(%s) ss address at %d\n",
2533 			node->name, XGBE_SS_REG_INDEX);
2534 		return ret;
2535 	}
2536 
2537 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
2538 	if (IS_ERR(regs)) {
2539 		dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n");
2540 		return PTR_ERR(regs);
2541 	}
2542 	gbe_dev->ss_regs = regs;
2543 
2544 	ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res);
2545 	if (ret) {
2546 		dev_err(gbe_dev->dev,
2547 			"Can't xlate xgbe of node(%s) sm address at %d\n",
2548 			node->name, XGBE_SM_REG_INDEX);
2549 		return ret;
2550 	}
2551 
2552 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
2553 	if (IS_ERR(regs)) {
2554 		dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n");
2555 		return PTR_ERR(regs);
2556 	}
2557 	gbe_dev->switch_regs = regs;
2558 
2559 	ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
2560 	if (ret) {
2561 		dev_err(gbe_dev->dev,
2562 			"Can't xlate xgbe serdes of node(%s) address at %d\n",
2563 			node->name, XGBE_SERDES_REG_INDEX);
2564 		return ret;
2565 	}
2566 
2567 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
2568 	if (IS_ERR(regs)) {
2569 		dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n");
2570 		return PTR_ERR(regs);
2571 	}
2572 	gbe_dev->xgbe_serdes_regs = regs;
2573 
2574 	gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
2575 				  XGBE10_NUM_STAT_ENTRIES *
2576 				  (gbe_dev->max_num_ports) * sizeof(u64),
2577 				  GFP_KERNEL);
2578 	if (!gbe_dev->hw_stats) {
2579 		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
2580 		return -ENOMEM;
2581 	}
2582 
2583 	gbe_dev->ss_version = XGBE_SS_VERSION_10;
2584 	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
2585 					XGBE10_SGMII_MODULE_OFFSET;
2586 	gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
2587 
2588 	for (i = 0; i < gbe_dev->max_num_ports; i++)
2589 		gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
2590 			XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
2591 
2592 	gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET;
2593 	gbe_dev->ale_ports = gbe_dev->max_num_ports;
2594 	gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
2595 	gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
2596 	gbe_dev->et_stats = xgbe10_et_stats;
2597 	gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
2598 	gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
2599 
2600 	/* Subsystem registers */
2601 	XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
2602 	XGBE_SET_REG_OFS(gbe_dev, ss_regs, control);
2603 
2604 	/* Switch module registers */
2605 	XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
2606 	XGBE_SET_REG_OFS(gbe_dev, switch_regs, control);
2607 	XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
2608 	XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
2609 	XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
2610 
2611 	/* Host port registers */
2612 	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
2613 	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
2614 	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
2615 	return 0;
2616 }
2617 
2618 static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
2619 				    struct device_node *node)
2620 {
2621 	struct resource res;
2622 	void __iomem *regs;
2623 	int ret;
2624 
2625 	ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res);
2626 	if (ret) {
2627 		dev_err(gbe_dev->dev,
2628 			"Can't translate of node(%s) of gbe ss address at %d\n",
2629 			node->name, GBE_SS_REG_INDEX);
2630 		return ret;
2631 	}
2632 
2633 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
2634 	if (IS_ERR(regs)) {
2635 		dev_err(gbe_dev->dev, "Failed to map gbe register base\n");
2636 		return PTR_ERR(regs);
2637 	}
2638 	gbe_dev->ss_regs = regs;
2639 	gbe_dev->ss_version = readl(gbe_dev->ss_regs);
2640 	return 0;
2641 }
2642 
2643 static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
2644 				struct device_node *node)
2645 {
2646 	struct resource res;
2647 	void __iomem *regs;
2648 	int i, ret;
2649 
2650 	ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res);
2651 	if (ret) {
2652 		dev_err(gbe_dev->dev,
2653 			"Can't translate of gbe node(%s) address at index %d\n",
2654 			node->name, GBE_SGMII34_REG_INDEX);
2655 		return ret;
2656 	}
2657 
2658 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
2659 	if (IS_ERR(regs)) {
2660 		dev_err(gbe_dev->dev,
2661 			"Failed to map gbe sgmii port34 register base\n");
2662 		return PTR_ERR(regs);
2663 	}
2664 	gbe_dev->sgmii_port34_regs = regs;
2665 
2666 	ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res);
2667 	if (ret) {
2668 		dev_err(gbe_dev->dev,
2669 			"Can't translate of gbe node(%s) address at index %d\n",
2670 			node->name, GBE_SM_REG_INDEX);
2671 		return ret;
2672 	}
2673 
2674 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
2675 	if (IS_ERR(regs)) {
2676 		dev_err(gbe_dev->dev,
2677 			"Failed to map gbe switch module register base\n");
2678 		return PTR_ERR(regs);
2679 	}
2680 	gbe_dev->switch_regs = regs;
2681 
2682 	gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
2683 					  GBE13_NUM_HW_STAT_ENTRIES *
2684 					  gbe_dev->max_num_slaves * sizeof(u64),
2685 					  GFP_KERNEL);
2686 	if (!gbe_dev->hw_stats) {
2687 		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
2688 		return -ENOMEM;
2689 	}
2690 
2691 	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
2692 	gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
2693 
2694 	for (i = 0; i < gbe_dev->max_num_slaves; i++) {
2695 		gbe_dev->hw_stats_regs[i] =
2696 			gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
2697 			(GBE_HW_STATS_REG_MAP_SZ * i);
2698 	}
2699 
2700 	gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
2701 	gbe_dev->ale_ports = gbe_dev->max_num_ports;
2702 	gbe_dev->host_port = GBE13_HOST_PORT_NUM;
2703 	gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
2704 	gbe_dev->et_stats = gbe13_et_stats;
2705 	gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
2706 	gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
2707 
2708 	/* Subsystem registers */
2709 	GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
2710 
2711 	/* Switch module registers */
2712 	GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
2713 	GBE_SET_REG_OFS(gbe_dev, switch_regs, control);
2714 	GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset);
2715 	GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
2716 	GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
2717 	GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
2718 
2719 	/* Host port registers */
2720 	GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
2721 	GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
2722 	return 0;
2723 }
2724 
2725 static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
2726 				struct device_node *node)
2727 {
2728 	struct resource res;
2729 	void __iomem *regs;
2730 	int i, ret;
2731 
2732 	gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
2733 				  GBENU_NUM_HW_STAT_ENTRIES *
2734 				  (gbe_dev->max_num_ports) * sizeof(u64),
2735 				  GFP_KERNEL);
2736 	if (!gbe_dev->hw_stats) {
2737 		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
2738 		return -ENOMEM;
2739 	}
2740 
2741 	ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
2742 	if (ret) {
2743 		dev_err(gbe_dev->dev,
2744 			"Can't translate of gbenu node(%s) addr at index %d\n",
2745 			node->name, GBENU_SM_REG_INDEX);
2746 		return ret;
2747 	}
2748 
2749 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
2750 	if (IS_ERR(regs)) {
2751 		dev_err(gbe_dev->dev,
2752 			"Failed to map gbenu switch module register base\n");
2753 		return PTR_ERR(regs);
2754 	}
2755 	gbe_dev->switch_regs = regs;
2756 
2757 	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
2758 	gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
2759 
2760 	for (i = 0; i < (gbe_dev->max_num_ports); i++)
2761 		gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
2762 			GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i);
2763 
2764 	gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET;
2765 	gbe_dev->ale_ports = gbe_dev->max_num_ports;
2766 	gbe_dev->host_port = GBENU_HOST_PORT_NUM;
2767 	gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
2768 	gbe_dev->et_stats = gbenu_et_stats;
2769 	gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
2770 
2771 	if (IS_SS_ID_NU(gbe_dev))
2772 		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
2773 			(gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
2774 	else
2775 		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
2776 					GBENU_ET_STATS_PORT_SIZE;
2777 
2778 	/* Subsystem registers */
2779 	GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
2780 
2781 	/* Switch module registers */
2782 	GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
2783 	GBENU_SET_REG_OFS(gbe_dev, switch_regs, control);
2784 	GBENU_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
2785 	GBENU_SET_REG_OFS(gbe_dev, switch_regs, ptype);
2786 
2787 	/* Host port registers */
2788 	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
2789 	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
2790 
2791 	/* For NU only.  2U does not need tx_pri_map.
2792 	 * NU cppi port 0 tx pkt streaming interface has (n-1)*8 egress threads
2793 	 * while 2U has only 1 such thread
2794 	 */
2795 	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
2796 	return 0;
2797 }
2798 
2799 static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2800 		     struct device_node *node, void **inst_priv)
2801 {
2802 	struct device_node *interfaces, *interface;
2803 	struct device_node *secondary_ports;
2804 	struct cpsw_ale_params ale_params;
2805 	struct gbe_priv *gbe_dev;
2806 	u32 slave_num;
2807 	int ret = 0;
2808 
2809 	if (!node) {
2810 		dev_err(dev, "device tree info unavailable\n");
2811 		return -ENODEV;
2812 	}
2813 
2814 	gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL);
2815 	if (!gbe_dev)
2816 		return -ENOMEM;
2817 
2818 	if (of_device_is_compatible(node, "ti,netcp-gbe-5") ||
2819 	    of_device_is_compatible(node, "ti,netcp-gbe")) {
2820 		gbe_dev->max_num_slaves = 4;
2821 	} else if (of_device_is_compatible(node, "ti,netcp-gbe-9")) {
2822 		gbe_dev->max_num_slaves = 8;
2823 	} else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) {
2824 		gbe_dev->max_num_slaves = 1;
2825 	} else if (of_device_is_compatible(node, "ti,netcp-xgbe")) {
2826 		gbe_dev->max_num_slaves = 2;
2827 	} else {
2828 		dev_err(dev, "device tree node for unknown device\n");
2829 		return -EINVAL;
2830 	}
2831 	gbe_dev->max_num_ports = gbe_dev->max_num_slaves + 1;
2832 
2833 	gbe_dev->dev = dev;
2834 	gbe_dev->netcp_device = netcp_device;
2835 	gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
2836 
2837 	/* init the hw stats lock */
2838 	spin_lock_init(&gbe_dev->hw_stats_lock);
2839 
2840 	if (of_find_property(node, "enable-ale", NULL)) {
2841 		gbe_dev->enable_ale = true;
2842 		dev_info(dev, "ALE enabled\n");
2843 	} else {
2844 		gbe_dev->enable_ale = false;
2845 		dev_dbg(dev, "ALE bypass enabled*\n");
2846 	}
2847 
2848 	ret = of_property_read_u32(node, "tx-queue",
2849 				   &gbe_dev->tx_queue_id);
2850 	if (ret < 0) {
2851 		dev_err(dev, "missing tx_queue parameter\n");
2852 		gbe_dev->tx_queue_id = GBE_TX_QUEUE;
2853 	}
2854 
2855 	ret = of_property_read_string(node, "tx-channel",
2856 				      &gbe_dev->dma_chan_name);
2857 	if (ret < 0) {
2858 		dev_err(dev, "missing \"tx-channel\" parameter\n");
2859 		return -EINVAL;
2860 	}
2861 
2862 	if (!strcmp(node->name, "gbe")) {
2863 		ret = get_gbe_resource_version(gbe_dev, node);
2864 		if (ret)
2865 			return ret;
2866 
2867 		dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
2868 
2869 		if (gbe_dev->ss_version == GBE_SS_VERSION_14)
2870 			ret = set_gbe_ethss14_priv(gbe_dev, node);
2871 		else if (IS_SS_ID_MU(gbe_dev))
2872 			ret = set_gbenu_ethss_priv(gbe_dev, node);
2873 		else
2874 			ret = -ENODEV;
2875 
2876 	} else if (!strcmp(node->name, "xgbe")) {
2877 		ret = set_xgbe_ethss10_priv(gbe_dev, node);
2878 		if (ret)
2879 			return ret;
2880 		ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
2881 					     gbe_dev->ss_regs);
2882 	} else {
2883 		dev_err(dev, "unknown GBE node(%s)\n", node->name);
2884 		ret = -ENODEV;
2885 	}
2886 
2887 	if (ret)
2888 		return ret;
2889 
2890 	interfaces = of_get_child_by_name(node, "interfaces");
2891 	if (!interfaces)
2892 		dev_err(dev, "could not find interfaces\n");
2893 
2894 	ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
2895 				gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
2896 	if (ret)
2897 		return ret;
2898 
2899 	ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
2900 	if (ret)
2901 		return ret;
2902 
2903 	/* Create network interfaces */
2904 	INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
2905 	for_each_child_of_node(interfaces, interface) {
2906 		ret = of_property_read_u32(interface, "slave-port", &slave_num);
2907 		if (ret) {
2908 			dev_err(dev, "missing slave-port parameter, skipping interface configuration for %s\n",
2909 				interface->name);
2910 			continue;
2911 		}
2912 		gbe_dev->num_slaves++;
2913 		if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
2914 			break;
2915 	}
2916 	of_node_put(interfaces);
2917 
2918 	if (!gbe_dev->num_slaves)
2919 		dev_warn(dev, "No network interface configured\n");
2920 
2921 	/* Initialize Secondary slave ports */
2922 	secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
2923 	INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
2924 	if (secondary_ports && (gbe_dev->num_slaves <  gbe_dev->max_num_slaves))
2925 		init_secondary_ports(gbe_dev, secondary_ports);
2926 	of_node_put(secondary_ports);
2927 
2928 	if (!gbe_dev->num_slaves) {
2929 		dev_err(dev,
2930 			"No network interface or secondary ports configured\n");
2931 		ret = -ENODEV;
2932 		goto free_sec_ports;
2933 	}
2934 
2935 	memset(&ale_params, 0, sizeof(ale_params));
2936 	ale_params.dev		= gbe_dev->dev;
2937 	ale_params.ale_regs	= gbe_dev->ale_reg;
2938 	ale_params.ale_ageout	= GBE_DEFAULT_ALE_AGEOUT;
2939 	ale_params.ale_entries	= gbe_dev->ale_entries;
2940 	ale_params.ale_ports	= gbe_dev->ale_ports;
2941 
2942 	gbe_dev->ale = cpsw_ale_create(&ale_params);
2943 	if (!gbe_dev->ale) {
2944 		dev_err(gbe_dev->dev, "error initializing ale engine\n");
2945 		ret = -ENODEV;
2946 		goto free_sec_ports;
2947 	} else {
2948 		dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
2949 	}
2950 
2951 	/* initialize host port */
2952 	gbe_init_host_port(gbe_dev);
2953 
2954 	init_timer(&gbe_dev->timer);
2955 	gbe_dev->timer.data	 = (unsigned long)gbe_dev;
2956 	gbe_dev->timer.function = netcp_ethss_timer;
2957 	gbe_dev->timer.expires	 = jiffies + GBE_TIMER_INTERVAL;
2958 	add_timer(&gbe_dev->timer);
2959 	*inst_priv = gbe_dev;
2960 	return 0;
2961 
2962 free_sec_ports:
2963 	free_secondary_ports(gbe_dev);
2964 	return ret;
2965 }
2966 
2967 static int gbe_attach(void *inst_priv, struct net_device *ndev,
2968 		      struct device_node *node, void **intf_priv)
2969 {
2970 	struct gbe_priv *gbe_dev = inst_priv;
2971 	struct gbe_intf *gbe_intf;
2972 	int ret;
2973 
2974 	if (!node) {
2975 		dev_err(gbe_dev->dev, "interface node not available\n");
2976 		return -ENODEV;
2977 	}
2978 
2979 	gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL);
2980 	if (!gbe_intf)
2981 		return -ENOMEM;
2982 
2983 	gbe_intf->ndev = ndev;
2984 	gbe_intf->dev = gbe_dev->dev;
2985 	gbe_intf->gbe_dev = gbe_dev;
2986 
2987 	gbe_intf->slave = devm_kzalloc(gbe_dev->dev,
2988 					sizeof(*gbe_intf->slave),
2989 					GFP_KERNEL);
2990 	if (!gbe_intf->slave) {
2991 		ret = -ENOMEM;
2992 		goto fail;
2993 	}
2994 
2995 	if (init_slave(gbe_dev, gbe_intf->slave, node)) {
2996 		ret = -ENODEV;
2997 		goto fail;
2998 	}
2999 
3000 	gbe_intf->tx_pipe = gbe_dev->tx_pipe;
3001 	ndev->ethtool_ops = &keystone_ethtool_ops;
3002 	list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head);
3003 	*intf_priv = gbe_intf;
3004 	return 0;
3005 
3006 fail:
3007 	if (gbe_intf->slave)
3008 		devm_kfree(gbe_dev->dev, gbe_intf->slave);
3009 	if (gbe_intf)
3010 		devm_kfree(gbe_dev->dev, gbe_intf);
3011 	return ret;
3012 }
3013 
3014 static int gbe_release(void *intf_priv)
3015 {
3016 	struct gbe_intf *gbe_intf = intf_priv;
3017 
3018 	gbe_intf->ndev->ethtool_ops = NULL;
3019 	list_del(&gbe_intf->gbe_intf_list);
3020 	devm_kfree(gbe_intf->dev, gbe_intf->slave);
3021 	devm_kfree(gbe_intf->dev, gbe_intf);
3022 	return 0;
3023 }
3024 
3025 static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
3026 {
3027 	struct gbe_priv *gbe_dev = inst_priv;
3028 
3029 	del_timer_sync(&gbe_dev->timer);
3030 	cpsw_ale_stop(gbe_dev->ale);
3031 	cpsw_ale_destroy(gbe_dev->ale);
3032 	netcp_txpipe_close(&gbe_dev->tx_pipe);
3033 	free_secondary_ports(gbe_dev);
3034 
3035 	if (!list_empty(&gbe_dev->gbe_intf_head))
3036 		dev_alert(gbe_dev->dev,
3037 			  "unreleased ethss interfaces present\n");
3038 
3039 	return 0;
3040 }
3041 
3042 static struct netcp_module gbe_module = {
3043 	.name		= GBE_MODULE_NAME,
3044 	.owner		= THIS_MODULE,
3045 	.primary	= true,
3046 	.probe		= gbe_probe,
3047 	.open		= gbe_open,
3048 	.close		= gbe_close,
3049 	.remove		= gbe_remove,
3050 	.attach		= gbe_attach,
3051 	.release	= gbe_release,
3052 	.add_addr	= gbe_add_addr,
3053 	.del_addr	= gbe_del_addr,
3054 	.add_vid	= gbe_add_vid,
3055 	.del_vid	= gbe_del_vid,
3056 	.ioctl		= gbe_ioctl,
3057 };
3058 
3059 static struct netcp_module xgbe_module = {
3060 	.name		= XGBE_MODULE_NAME,
3061 	.owner		= THIS_MODULE,
3062 	.primary	= true,
3063 	.probe		= gbe_probe,
3064 	.open		= gbe_open,
3065 	.close		= gbe_close,
3066 	.remove		= gbe_remove,
3067 	.attach		= gbe_attach,
3068 	.release	= gbe_release,
3069 	.add_addr	= gbe_add_addr,
3070 	.del_addr	= gbe_del_addr,
3071 	.add_vid	= gbe_add_vid,
3072 	.del_vid	= gbe_del_vid,
3073 	.ioctl		= gbe_ioctl,
3074 };
3075 
3076 static int __init keystone_gbe_init(void)
3077 {
3078 	int ret;
3079 
3080 	ret = netcp_register_module(&gbe_module);
3081 	if (ret)
3082 		return ret;
3083 
3084 	ret = netcp_register_module(&xgbe_module);
3085 	if (ret)
3086 		return ret;
3087 
3088 	return 0;
3089 }
3090 module_init(keystone_gbe_init);
3091 
3092 static void __exit keystone_gbe_exit(void)
3093 {
3094 	netcp_unregister_module(&gbe_module);
3095 	netcp_unregister_module(&xgbe_module);
3096 }
3097 module_exit(keystone_gbe_exit);
3098 
3099 MODULE_LICENSE("GPL v2");
3100 MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs");
3101 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");
3102