xref: /linux/drivers/net/ethernet/ti/netcp_ethss.c (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 /*
2  * Keystone GBE and XGBE subsystem code
3  *
4  * Copyright (C) 2014 Texas Instruments Incorporated
5  * Authors:	Sandeep Nair <sandeep_n@ti.com>
6  *		Sandeep Paulraj <s-paulraj@ti.com>
7  *		Cyril Chemparathy <cyril@ti.com>
8  *		Santosh Shilimkar <santosh.shilimkar@ti.com>
9  *		Wingman Kwok <w-kwok2@ti.com>
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License as
13  * published by the Free Software Foundation version 2.
14  *
15  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
16  * kind, whether express or implied; without even the implied warranty
17  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  */
20 
21 #include <linux/io.h>
22 #include <linux/module.h>
23 #include <linux/of_mdio.h>
24 #include <linux/of_address.h>
25 #include <linux/if_vlan.h>
26 #include <linux/ptp_classify.h>
27 #include <linux/net_tstamp.h>
28 #include <linux/ethtool.h>
29 
30 #include "cpsw_ale.h"
31 #include "netcp.h"
32 #include "cpts.h"
33 
34 #define NETCP_DRIVER_NAME		"TI KeyStone Ethernet Driver"
35 #define NETCP_DRIVER_VERSION		"v1.0"
36 
37 #define GBE_IDENT(reg)			((reg >> 16) & 0xffff)
38 #define GBE_MAJOR_VERSION(reg)		(reg >> 8 & 0x7)
39 #define GBE_MINOR_VERSION(reg)		(reg & 0xff)
40 #define GBE_RTL_VERSION(reg)		((reg >> 11) & 0x1f)
41 
42 /* 1G Ethernet SS defines */
43 #define GBE_MODULE_NAME			"netcp-gbe"
44 #define GBE_SS_VERSION_14		0x4ed21104
45 
46 #define GBE_SS_REG_INDEX		0
47 #define GBE_SGMII34_REG_INDEX		1
48 #define GBE_SM_REG_INDEX		2
49 /* offset relative to base of GBE_SS_REG_INDEX */
50 #define GBE13_SGMII_MODULE_OFFSET	0x100
51 /* offset relative to base of GBE_SM_REG_INDEX */
52 #define GBE13_HOST_PORT_OFFSET		0x34
53 #define GBE13_SLAVE_PORT_OFFSET		0x60
54 #define GBE13_EMAC_OFFSET		0x100
55 #define GBE13_SLAVE_PORT2_OFFSET	0x200
56 #define GBE13_HW_STATS_OFFSET		0x300
57 #define GBE13_CPTS_OFFSET		0x500
58 #define GBE13_ALE_OFFSET		0x600
59 #define GBE13_HOST_PORT_NUM		0
60 #define GBE13_NUM_ALE_ENTRIES		1024
61 
62 /* 1G Ethernet NU SS defines */
63 #define GBENU_MODULE_NAME		"netcp-gbenu"
64 #define GBE_SS_ID_NU			0x4ee6
65 #define GBE_SS_ID_2U			0x4ee8
66 
67 #define IS_SS_ID_MU(d) \
68 	((GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) || \
69 	 (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U))
70 
71 #define IS_SS_ID_NU(d) \
72 	(GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU)
73 
74 #define GBENU_SS_REG_INDEX		0
75 #define GBENU_SM_REG_INDEX		1
76 #define GBENU_SGMII_MODULE_OFFSET	0x100
77 #define GBENU_HOST_PORT_OFFSET		0x1000
78 #define GBENU_SLAVE_PORT_OFFSET		0x2000
79 #define GBENU_EMAC_OFFSET		0x2330
80 #define GBENU_HW_STATS_OFFSET		0x1a000
81 #define GBENU_CPTS_OFFSET		0x1d000
82 #define GBENU_ALE_OFFSET		0x1e000
83 #define GBENU_HOST_PORT_NUM		0
84 #define GBENU_SGMII_MODULE_SIZE		0x100
85 
86 /* 10G Ethernet SS defines */
87 #define XGBE_MODULE_NAME		"netcp-xgbe"
88 #define XGBE_SS_VERSION_10		0x4ee42100
89 
90 #define XGBE_SS_REG_INDEX		0
91 #define XGBE_SM_REG_INDEX		1
92 #define XGBE_SERDES_REG_INDEX		2
93 
94 /* offset relative to base of XGBE_SS_REG_INDEX */
95 #define XGBE10_SGMII_MODULE_OFFSET	0x100
96 #define IS_SS_ID_XGBE(d)		((d)->ss_version == XGBE_SS_VERSION_10)
97 /* offset relative to base of XGBE_SM_REG_INDEX */
98 #define XGBE10_HOST_PORT_OFFSET		0x34
99 #define XGBE10_SLAVE_PORT_OFFSET	0x64
100 #define XGBE10_EMAC_OFFSET		0x400
101 #define XGBE10_CPTS_OFFSET		0x600
102 #define XGBE10_ALE_OFFSET		0x700
103 #define XGBE10_HW_STATS_OFFSET		0x800
104 #define XGBE10_HOST_PORT_NUM		0
105 #define XGBE10_NUM_ALE_ENTRIES		2048
106 
107 #define	GBE_TIMER_INTERVAL			(HZ / 2)
108 
109 /* Soft reset register values */
110 #define SOFT_RESET_MASK				BIT(0)
111 #define SOFT_RESET				BIT(0)
112 #define DEVICE_EMACSL_RESET_POLL_COUNT		100
113 #define GMACSL_RET_WARN_RESET_INCOMPLETE	-2
114 
115 #define MACSL_RX_ENABLE_CSF			BIT(23)
116 #define MACSL_ENABLE_EXT_CTL			BIT(18)
117 #define MACSL_XGMII_ENABLE			BIT(13)
118 #define MACSL_XGIG_MODE				BIT(8)
119 #define MACSL_GIG_MODE				BIT(7)
120 #define MACSL_GMII_ENABLE			BIT(5)
121 #define MACSL_FULLDUPLEX			BIT(0)
122 
123 #define GBE_CTL_P0_ENABLE			BIT(2)
124 #define ETH_SW_CTL_P0_TX_CRC_REMOVE		BIT(13)
125 #define GBE13_REG_VAL_STAT_ENABLE_ALL		0xff
126 #define XGBE_REG_VAL_STAT_ENABLE_ALL		0xf
127 #define GBE_STATS_CD_SEL			BIT(28)
128 
129 #define GBE_PORT_MASK(x)			(BIT(x) - 1)
130 #define GBE_MASK_NO_PORTS			0
131 
132 #define GBE_DEF_1G_MAC_CONTROL					\
133 		(MACSL_GIG_MODE | MACSL_GMII_ENABLE |		\
134 		 MACSL_ENABLE_EXT_CTL |	MACSL_RX_ENABLE_CSF)
135 
136 #define GBE_DEF_10G_MAC_CONTROL				\
137 		(MACSL_XGIG_MODE | MACSL_XGMII_ENABLE |		\
138 		 MACSL_ENABLE_EXT_CTL |	MACSL_RX_ENABLE_CSF)
139 
140 #define GBE_STATSA_MODULE			0
141 #define GBE_STATSB_MODULE			1
142 #define GBE_STATSC_MODULE			2
143 #define GBE_STATSD_MODULE			3
144 
145 #define GBENU_STATS0_MODULE			0
146 #define GBENU_STATS1_MODULE			1
147 #define GBENU_STATS2_MODULE			2
148 #define GBENU_STATS3_MODULE			3
149 #define GBENU_STATS4_MODULE			4
150 #define GBENU_STATS5_MODULE			5
151 #define GBENU_STATS6_MODULE			6
152 #define GBENU_STATS7_MODULE			7
153 #define GBENU_STATS8_MODULE			8
154 
155 #define XGBE_STATS0_MODULE			0
156 #define XGBE_STATS1_MODULE			1
157 #define XGBE_STATS2_MODULE			2
158 
159 /* s: 0-based slave_port */
160 #define SGMII_BASE(d, s) \
161 	(((s) < 2) ? (d)->sgmii_port_regs : (d)->sgmii_port34_regs)
162 
163 #define GBE_TX_QUEUE				648
164 #define	GBE_TXHOOK_ORDER			0
165 #define	GBE_RXHOOK_ORDER			0
166 #define GBE_DEFAULT_ALE_AGEOUT			30
167 #define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
168 #define NETCP_LINK_STATE_INVALID		-1
169 
170 #define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
171 		offsetof(struct gbe##_##rb, rn)
172 #define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
173 		offsetof(struct gbenu##_##rb, rn)
174 #define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
175 		offsetof(struct xgbe##_##rb, rn)
176 #define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
177 
178 #define HOST_TX_PRI_MAP_DEFAULT			0x00000000
179 
180 #if IS_ENABLED(CONFIG_TI_CPTS)
181 /* Px_TS_CTL register fields */
182 #define TS_RX_ANX_F_EN				BIT(0)
183 #define TS_RX_VLAN_LT1_EN			BIT(1)
184 #define TS_RX_VLAN_LT2_EN			BIT(2)
185 #define TS_RX_ANX_D_EN				BIT(3)
186 #define TS_TX_ANX_F_EN				BIT(4)
187 #define TS_TX_VLAN_LT1_EN			BIT(5)
188 #define TS_TX_VLAN_LT2_EN			BIT(6)
189 #define TS_TX_ANX_D_EN				BIT(7)
190 #define TS_LT2_EN				BIT(8)
191 #define TS_RX_ANX_E_EN				BIT(9)
192 #define TS_TX_ANX_E_EN				BIT(10)
193 #define TS_MSG_TYPE_EN_SHIFT			16
194 #define TS_MSG_TYPE_EN_MASK			0xffff
195 
196 /* Px_TS_SEQ_LTYPE register fields */
197 #define TS_SEQ_ID_OFS_SHIFT			16
198 #define TS_SEQ_ID_OFS_MASK			0x3f
199 
200 /* Px_TS_CTL_LTYPE2 register fields */
201 #define TS_107					BIT(16)
202 #define TS_129					BIT(17)
203 #define TS_130					BIT(18)
204 #define TS_131					BIT(19)
205 #define TS_132					BIT(20)
206 #define TS_319					BIT(21)
207 #define TS_320					BIT(22)
208 #define TS_TTL_NONZERO				BIT(23)
209 #define TS_UNI_EN				BIT(24)
210 #define TS_UNI_EN_SHIFT				24
211 
212 #define TS_TX_ANX_ALL_EN	 \
213 	(TS_TX_ANX_D_EN	| TS_TX_ANX_E_EN | TS_TX_ANX_F_EN)
214 
215 #define TS_RX_ANX_ALL_EN	 \
216 	(TS_RX_ANX_D_EN	| TS_RX_ANX_E_EN | TS_RX_ANX_F_EN)
217 
218 #define TS_CTL_DST_PORT				TS_319
219 #define TS_CTL_DST_PORT_SHIFT			21
220 
221 #define TS_CTL_MADDR_ALL	\
222 	(TS_107 | TS_129 | TS_130 | TS_131 | TS_132)
223 
224 #define TS_CTL_MADDR_SHIFT			16
225 
226 /* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
227 #define EVENT_MSG_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
228 #endif /* CONFIG_TI_CPTS */
229 
230 struct xgbe_ss_regs {
231 	u32	id_ver;
232 	u32	synce_count;
233 	u32	synce_mux;
234 	u32	control;
235 };
236 
237 struct xgbe_switch_regs {
238 	u32	id_ver;
239 	u32	control;
240 	u32	emcontrol;
241 	u32	stat_port_en;
242 	u32	ptype;
243 	u32	soft_idle;
244 	u32	thru_rate;
245 	u32	gap_thresh;
246 	u32	tx_start_wds;
247 	u32	flow_control;
248 	u32	cppi_thresh;
249 };
250 
251 struct xgbe_port_regs {
252 	u32	blk_cnt;
253 	u32	port_vlan;
254 	u32	tx_pri_map;
255 	u32	sa_lo;
256 	u32	sa_hi;
257 	u32	ts_ctl;
258 	u32	ts_seq_ltype;
259 	u32	ts_vlan;
260 	u32	ts_ctl_ltype2;
261 	u32	ts_ctl2;
262 	u32	control;
263 };
264 
265 struct xgbe_host_port_regs {
266 	u32	blk_cnt;
267 	u32	port_vlan;
268 	u32	tx_pri_map;
269 	u32	src_id;
270 	u32	rx_pri_map;
271 	u32	rx_maxlen;
272 };
273 
274 struct xgbe_emac_regs {
275 	u32	id_ver;
276 	u32	mac_control;
277 	u32	mac_status;
278 	u32	soft_reset;
279 	u32	rx_maxlen;
280 	u32	__reserved_0;
281 	u32	rx_pause;
282 	u32	tx_pause;
283 	u32	em_control;
284 	u32	__reserved_1;
285 	u32	tx_gap;
286 	u32	rsvd[4];
287 };
288 
289 struct xgbe_host_hw_stats {
290 	u32	rx_good_frames;
291 	u32	rx_broadcast_frames;
292 	u32	rx_multicast_frames;
293 	u32	__rsvd_0[3];
294 	u32	rx_oversized_frames;
295 	u32	__rsvd_1;
296 	u32	rx_undersized_frames;
297 	u32	__rsvd_2;
298 	u32	overrun_type4;
299 	u32	overrun_type5;
300 	u32	rx_bytes;
301 	u32	tx_good_frames;
302 	u32	tx_broadcast_frames;
303 	u32	tx_multicast_frames;
304 	u32	__rsvd_3[9];
305 	u32	tx_bytes;
306 	u32	tx_64byte_frames;
307 	u32	tx_65_to_127byte_frames;
308 	u32	tx_128_to_255byte_frames;
309 	u32	tx_256_to_511byte_frames;
310 	u32	tx_512_to_1023byte_frames;
311 	u32	tx_1024byte_frames;
312 	u32	net_bytes;
313 	u32	rx_sof_overruns;
314 	u32	rx_mof_overruns;
315 	u32	rx_dma_overruns;
316 };
317 
318 struct xgbe_hw_stats {
319 	u32	rx_good_frames;
320 	u32	rx_broadcast_frames;
321 	u32	rx_multicast_frames;
322 	u32	rx_pause_frames;
323 	u32	rx_crc_errors;
324 	u32	rx_align_code_errors;
325 	u32	rx_oversized_frames;
326 	u32	rx_jabber_frames;
327 	u32	rx_undersized_frames;
328 	u32	rx_fragments;
329 	u32	overrun_type4;
330 	u32	overrun_type5;
331 	u32	rx_bytes;
332 	u32	tx_good_frames;
333 	u32	tx_broadcast_frames;
334 	u32	tx_multicast_frames;
335 	u32	tx_pause_frames;
336 	u32	tx_deferred_frames;
337 	u32	tx_collision_frames;
338 	u32	tx_single_coll_frames;
339 	u32	tx_mult_coll_frames;
340 	u32	tx_excessive_collisions;
341 	u32	tx_late_collisions;
342 	u32	tx_underrun;
343 	u32	tx_carrier_sense_errors;
344 	u32	tx_bytes;
345 	u32	tx_64byte_frames;
346 	u32	tx_65_to_127byte_frames;
347 	u32	tx_128_to_255byte_frames;
348 	u32	tx_256_to_511byte_frames;
349 	u32	tx_512_to_1023byte_frames;
350 	u32	tx_1024byte_frames;
351 	u32	net_bytes;
352 	u32	rx_sof_overruns;
353 	u32	rx_mof_overruns;
354 	u32	rx_dma_overruns;
355 };
356 
357 struct gbenu_ss_regs {
358 	u32	id_ver;
359 	u32	synce_count;		/* NU */
360 	u32	synce_mux;		/* NU */
361 	u32	control;		/* 2U */
362 	u32	__rsvd_0[2];		/* 2U */
363 	u32	rgmii_status;		/* 2U */
364 	u32	ss_status;		/* 2U */
365 };
366 
367 struct gbenu_switch_regs {
368 	u32	id_ver;
369 	u32	control;
370 	u32	__rsvd_0[2];
371 	u32	emcontrol;
372 	u32	stat_port_en;
373 	u32	ptype;			/* NU */
374 	u32	soft_idle;
375 	u32	thru_rate;		/* NU */
376 	u32	gap_thresh;		/* NU */
377 	u32	tx_start_wds;		/* NU */
378 	u32	eee_prescale;		/* 2U */
379 	u32	tx_g_oflow_thresh_set;	/* NU */
380 	u32	tx_g_oflow_thresh_clr;	/* NU */
381 	u32	tx_g_buf_thresh_set_l;	/* NU */
382 	u32	tx_g_buf_thresh_set_h;	/* NU */
383 	u32	tx_g_buf_thresh_clr_l;	/* NU */
384 	u32	tx_g_buf_thresh_clr_h;	/* NU */
385 };
386 
387 struct gbenu_port_regs {
388 	u32	__rsvd_0;
389 	u32	control;
390 	u32	max_blks;		/* 2U */
391 	u32	mem_align1;
392 	u32	blk_cnt;
393 	u32	port_vlan;
394 	u32	tx_pri_map;		/* NU */
395 	u32	pri_ctl;		/* 2U */
396 	u32	rx_pri_map;
397 	u32	rx_maxlen;
398 	u32	tx_blks_pri;		/* NU */
399 	u32	__rsvd_1;
400 	u32	idle2lpi;		/* 2U */
401 	u32	lpi2idle;		/* 2U */
402 	u32	eee_status;		/* 2U */
403 	u32	__rsvd_2;
404 	u32	__rsvd_3[176];		/* NU: more to add */
405 	u32	__rsvd_4[2];
406 	u32	sa_lo;
407 	u32	sa_hi;
408 	u32	ts_ctl;
409 	u32	ts_seq_ltype;
410 	u32	ts_vlan;
411 	u32	ts_ctl_ltype2;
412 	u32	ts_ctl2;
413 };
414 
415 struct gbenu_host_port_regs {
416 	u32	__rsvd_0;
417 	u32	control;
418 	u32	flow_id_offset;		/* 2U */
419 	u32	__rsvd_1;
420 	u32	blk_cnt;
421 	u32	port_vlan;
422 	u32	tx_pri_map;		/* NU */
423 	u32	pri_ctl;
424 	u32	rx_pri_map;
425 	u32	rx_maxlen;
426 	u32	tx_blks_pri;		/* NU */
427 	u32	__rsvd_2;
428 	u32	idle2lpi;		/* 2U */
429 	u32	lpi2wake;		/* 2U */
430 	u32	eee_status;		/* 2U */
431 	u32	__rsvd_3;
432 	u32	__rsvd_4[184];		/* NU */
433 	u32	host_blks_pri;		/* NU */
434 };
435 
436 struct gbenu_emac_regs {
437 	u32	mac_control;
438 	u32	mac_status;
439 	u32	soft_reset;
440 	u32	boff_test;
441 	u32	rx_pause;
442 	u32	__rsvd_0[11];		/* NU */
443 	u32	tx_pause;
444 	u32	__rsvd_1[11];		/* NU */
445 	u32	em_control;
446 	u32	tx_gap;
447 };
448 
449 /* Some hw stat regs are applicable to slave port only.
450  * This is handled by gbenu_et_stats struct.  Also some
451  * are for SS version NU and some are for 2U.
452  */
453 struct gbenu_hw_stats {
454 	u32	rx_good_frames;
455 	u32	rx_broadcast_frames;
456 	u32	rx_multicast_frames;
457 	u32	rx_pause_frames;		/* slave */
458 	u32	rx_crc_errors;
459 	u32	rx_align_code_errors;		/* slave */
460 	u32	rx_oversized_frames;
461 	u32	rx_jabber_frames;		/* slave */
462 	u32	rx_undersized_frames;
463 	u32	rx_fragments;			/* slave */
464 	u32	ale_drop;
465 	u32	ale_overrun_drop;
466 	u32	rx_bytes;
467 	u32	tx_good_frames;
468 	u32	tx_broadcast_frames;
469 	u32	tx_multicast_frames;
470 	u32	tx_pause_frames;		/* slave */
471 	u32	tx_deferred_frames;		/* slave */
472 	u32	tx_collision_frames;		/* slave */
473 	u32	tx_single_coll_frames;		/* slave */
474 	u32	tx_mult_coll_frames;		/* slave */
475 	u32	tx_excessive_collisions;	/* slave */
476 	u32	tx_late_collisions;		/* slave */
477 	u32	rx_ipg_error;			/* slave 10G only */
478 	u32	tx_carrier_sense_errors;	/* slave */
479 	u32	tx_bytes;
480 	u32	tx_64B_frames;
481 	u32	tx_65_to_127B_frames;
482 	u32	tx_128_to_255B_frames;
483 	u32	tx_256_to_511B_frames;
484 	u32	tx_512_to_1023B_frames;
485 	u32	tx_1024B_frames;
486 	u32	net_bytes;
487 	u32	rx_bottom_fifo_drop;
488 	u32	rx_port_mask_drop;
489 	u32	rx_top_fifo_drop;
490 	u32	ale_rate_limit_drop;
491 	u32	ale_vid_ingress_drop;
492 	u32	ale_da_eq_sa_drop;
493 	u32	__rsvd_0[3];
494 	u32	ale_unknown_ucast;
495 	u32	ale_unknown_ucast_bytes;
496 	u32	ale_unknown_mcast;
497 	u32	ale_unknown_mcast_bytes;
498 	u32	ale_unknown_bcast;
499 	u32	ale_unknown_bcast_bytes;
500 	u32	ale_pol_match;
501 	u32	ale_pol_match_red;		/* NU */
502 	u32	ale_pol_match_yellow;		/* NU */
503 	u32	__rsvd_1[44];
504 	u32	tx_mem_protect_err;
505 	/* following NU only */
506 	u32	tx_pri0;
507 	u32	tx_pri1;
508 	u32	tx_pri2;
509 	u32	tx_pri3;
510 	u32	tx_pri4;
511 	u32	tx_pri5;
512 	u32	tx_pri6;
513 	u32	tx_pri7;
514 	u32	tx_pri0_bcnt;
515 	u32	tx_pri1_bcnt;
516 	u32	tx_pri2_bcnt;
517 	u32	tx_pri3_bcnt;
518 	u32	tx_pri4_bcnt;
519 	u32	tx_pri5_bcnt;
520 	u32	tx_pri6_bcnt;
521 	u32	tx_pri7_bcnt;
522 	u32	tx_pri0_drop;
523 	u32	tx_pri1_drop;
524 	u32	tx_pri2_drop;
525 	u32	tx_pri3_drop;
526 	u32	tx_pri4_drop;
527 	u32	tx_pri5_drop;
528 	u32	tx_pri6_drop;
529 	u32	tx_pri7_drop;
530 	u32	tx_pri0_drop_bcnt;
531 	u32	tx_pri1_drop_bcnt;
532 	u32	tx_pri2_drop_bcnt;
533 	u32	tx_pri3_drop_bcnt;
534 	u32	tx_pri4_drop_bcnt;
535 	u32	tx_pri5_drop_bcnt;
536 	u32	tx_pri6_drop_bcnt;
537 	u32	tx_pri7_drop_bcnt;
538 };
539 
540 #define GBENU_HW_STATS_REG_MAP_SZ	0x200
541 
542 struct gbe_ss_regs {
543 	u32	id_ver;
544 	u32	synce_count;
545 	u32	synce_mux;
546 };
547 
548 struct gbe_ss_regs_ofs {
549 	u16	id_ver;
550 	u16	control;
551 };
552 
553 struct gbe_switch_regs {
554 	u32	id_ver;
555 	u32	control;
556 	u32	soft_reset;
557 	u32	stat_port_en;
558 	u32	ptype;
559 	u32	soft_idle;
560 	u32	thru_rate;
561 	u32	gap_thresh;
562 	u32	tx_start_wds;
563 	u32	flow_control;
564 };
565 
566 struct gbe_switch_regs_ofs {
567 	u16	id_ver;
568 	u16	control;
569 	u16	soft_reset;
570 	u16	emcontrol;
571 	u16	stat_port_en;
572 	u16	ptype;
573 	u16	flow_control;
574 };
575 
576 struct gbe_port_regs {
577 	u32	max_blks;
578 	u32	blk_cnt;
579 	u32	port_vlan;
580 	u32	tx_pri_map;
581 	u32	sa_lo;
582 	u32	sa_hi;
583 	u32	ts_ctl;
584 	u32	ts_seq_ltype;
585 	u32	ts_vlan;
586 	u32	ts_ctl_ltype2;
587 	u32	ts_ctl2;
588 };
589 
590 struct gbe_port_regs_ofs {
591 	u16	port_vlan;
592 	u16	tx_pri_map;
593 	u16	sa_lo;
594 	u16	sa_hi;
595 	u16	ts_ctl;
596 	u16	ts_seq_ltype;
597 	u16	ts_vlan;
598 	u16	ts_ctl_ltype2;
599 	u16	ts_ctl2;
600 	u16	rx_maxlen;	/* 2U, NU */
601 };
602 
603 struct gbe_host_port_regs {
604 	u32	src_id;
605 	u32	port_vlan;
606 	u32	rx_pri_map;
607 	u32	rx_maxlen;
608 };
609 
610 struct gbe_host_port_regs_ofs {
611 	u16	port_vlan;
612 	u16	tx_pri_map;
613 	u16	rx_maxlen;
614 };
615 
616 struct gbe_emac_regs {
617 	u32	id_ver;
618 	u32	mac_control;
619 	u32	mac_status;
620 	u32	soft_reset;
621 	u32	rx_maxlen;
622 	u32	__reserved_0;
623 	u32	rx_pause;
624 	u32	tx_pause;
625 	u32	__reserved_1;
626 	u32	rx_pri_map;
627 	u32	rsvd[6];
628 };
629 
630 struct gbe_emac_regs_ofs {
631 	u16	mac_control;
632 	u16	soft_reset;
633 	u16	rx_maxlen;
634 };
635 
636 struct gbe_hw_stats {
637 	u32	rx_good_frames;
638 	u32	rx_broadcast_frames;
639 	u32	rx_multicast_frames;
640 	u32	rx_pause_frames;
641 	u32	rx_crc_errors;
642 	u32	rx_align_code_errors;
643 	u32	rx_oversized_frames;
644 	u32	rx_jabber_frames;
645 	u32	rx_undersized_frames;
646 	u32	rx_fragments;
647 	u32	__pad_0[2];
648 	u32	rx_bytes;
649 	u32	tx_good_frames;
650 	u32	tx_broadcast_frames;
651 	u32	tx_multicast_frames;
652 	u32	tx_pause_frames;
653 	u32	tx_deferred_frames;
654 	u32	tx_collision_frames;
655 	u32	tx_single_coll_frames;
656 	u32	tx_mult_coll_frames;
657 	u32	tx_excessive_collisions;
658 	u32	tx_late_collisions;
659 	u32	tx_underrun;
660 	u32	tx_carrier_sense_errors;
661 	u32	tx_bytes;
662 	u32	tx_64byte_frames;
663 	u32	tx_65_to_127byte_frames;
664 	u32	tx_128_to_255byte_frames;
665 	u32	tx_256_to_511byte_frames;
666 	u32	tx_512_to_1023byte_frames;
667 	u32	tx_1024byte_frames;
668 	u32	net_bytes;
669 	u32	rx_sof_overruns;
670 	u32	rx_mof_overruns;
671 	u32	rx_dma_overruns;
672 };
673 
674 #define GBE_MAX_HW_STAT_MODS			9
675 #define GBE_HW_STATS_REG_MAP_SZ			0x100
676 
677 struct ts_ctl {
678 	int     uni;
679 	u8      dst_port_map;
680 	u8      maddr_map;
681 	u8      ts_mcast_type;
682 };
683 
684 struct gbe_slave {
685 	void __iomem			*port_regs;
686 	void __iomem			*emac_regs;
687 	struct gbe_port_regs_ofs	port_regs_ofs;
688 	struct gbe_emac_regs_ofs	emac_regs_ofs;
689 	int				slave_num; /* 0 based logical number */
690 	int				port_num;  /* actual port number */
691 	atomic_t			link_state;
692 	bool				open;
693 	struct phy_device		*phy;
694 	u32				link_interface;
695 	u32				mac_control;
696 	u8				phy_port_t;
697 	struct device_node		*phy_node;
698 	struct ts_ctl                   ts_ctl;
699 	struct list_head		slave_list;
700 };
701 
702 struct gbe_priv {
703 	struct device			*dev;
704 	struct netcp_device		*netcp_device;
705 	struct timer_list		timer;
706 	u32				num_slaves;
707 	u32				ale_entries;
708 	u32				ale_ports;
709 	bool				enable_ale;
710 	u8				max_num_slaves;
711 	u8				max_num_ports; /* max_num_slaves + 1 */
712 	u8				num_stats_mods;
713 	struct netcp_tx_pipe		tx_pipe;
714 
715 	int				host_port;
716 	u32				rx_packet_max;
717 	u32				ss_version;
718 	u32				stats_en_mask;
719 
720 	void __iomem			*ss_regs;
721 	void __iomem			*switch_regs;
722 	void __iomem			*host_port_regs;
723 	void __iomem			*ale_reg;
724 	void __iomem                    *cpts_reg;
725 	void __iomem			*sgmii_port_regs;
726 	void __iomem			*sgmii_port34_regs;
727 	void __iomem			*xgbe_serdes_regs;
728 	void __iomem			*hw_stats_regs[GBE_MAX_HW_STAT_MODS];
729 
730 	struct gbe_ss_regs_ofs		ss_regs_ofs;
731 	struct gbe_switch_regs_ofs	switch_regs_ofs;
732 	struct gbe_host_port_regs_ofs	host_port_regs_ofs;
733 
734 	struct cpsw_ale			*ale;
735 	unsigned int			tx_queue_id;
736 	const char			*dma_chan_name;
737 
738 	struct list_head		gbe_intf_head;
739 	struct list_head		secondary_slaves;
740 	struct net_device		*dummy_ndev;
741 
742 	u64				*hw_stats;
743 	u32				*hw_stats_prev;
744 	const struct netcp_ethtool_stat *et_stats;
745 	int				num_et_stats;
746 	/*  Lock for updating the hwstats */
747 	spinlock_t			hw_stats_lock;
748 
749 	int                             cpts_registered;
750 	struct cpts                     *cpts;
751 };
752 
753 struct gbe_intf {
754 	struct net_device	*ndev;
755 	struct device		*dev;
756 	struct gbe_priv		*gbe_dev;
757 	struct netcp_tx_pipe	tx_pipe;
758 	struct gbe_slave	*slave;
759 	struct list_head	gbe_intf_list;
760 	unsigned long		active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
761 };
762 
763 static struct netcp_module gbe_module;
764 static struct netcp_module xgbe_module;
765 
766 /* Statistic management */
767 struct netcp_ethtool_stat {
768 	char desc[ETH_GSTRING_LEN];
769 	int type;
770 	u32 size;
771 	int offset;
772 };
773 
774 #define GBE_STATSA_INFO(field)						\
775 {									\
776 	"GBE_A:"#field, GBE_STATSA_MODULE,				\
777 	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
778 	offsetof(struct gbe_hw_stats, field)				\
779 }
780 
781 #define GBE_STATSB_INFO(field)						\
782 {									\
783 	"GBE_B:"#field, GBE_STATSB_MODULE,				\
784 	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
785 	offsetof(struct gbe_hw_stats, field)				\
786 }
787 
788 #define GBE_STATSC_INFO(field)						\
789 {									\
790 	"GBE_C:"#field, GBE_STATSC_MODULE,				\
791 	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
792 	offsetof(struct gbe_hw_stats, field)				\
793 }
794 
795 #define GBE_STATSD_INFO(field)						\
796 {									\
797 	"GBE_D:"#field, GBE_STATSD_MODULE,				\
798 	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
799 	offsetof(struct gbe_hw_stats, field)				\
800 }
801 
802 static const struct netcp_ethtool_stat gbe13_et_stats[] = {
803 	/* GBE module A */
804 	GBE_STATSA_INFO(rx_good_frames),
805 	GBE_STATSA_INFO(rx_broadcast_frames),
806 	GBE_STATSA_INFO(rx_multicast_frames),
807 	GBE_STATSA_INFO(rx_pause_frames),
808 	GBE_STATSA_INFO(rx_crc_errors),
809 	GBE_STATSA_INFO(rx_align_code_errors),
810 	GBE_STATSA_INFO(rx_oversized_frames),
811 	GBE_STATSA_INFO(rx_jabber_frames),
812 	GBE_STATSA_INFO(rx_undersized_frames),
813 	GBE_STATSA_INFO(rx_fragments),
814 	GBE_STATSA_INFO(rx_bytes),
815 	GBE_STATSA_INFO(tx_good_frames),
816 	GBE_STATSA_INFO(tx_broadcast_frames),
817 	GBE_STATSA_INFO(tx_multicast_frames),
818 	GBE_STATSA_INFO(tx_pause_frames),
819 	GBE_STATSA_INFO(tx_deferred_frames),
820 	GBE_STATSA_INFO(tx_collision_frames),
821 	GBE_STATSA_INFO(tx_single_coll_frames),
822 	GBE_STATSA_INFO(tx_mult_coll_frames),
823 	GBE_STATSA_INFO(tx_excessive_collisions),
824 	GBE_STATSA_INFO(tx_late_collisions),
825 	GBE_STATSA_INFO(tx_underrun),
826 	GBE_STATSA_INFO(tx_carrier_sense_errors),
827 	GBE_STATSA_INFO(tx_bytes),
828 	GBE_STATSA_INFO(tx_64byte_frames),
829 	GBE_STATSA_INFO(tx_65_to_127byte_frames),
830 	GBE_STATSA_INFO(tx_128_to_255byte_frames),
831 	GBE_STATSA_INFO(tx_256_to_511byte_frames),
832 	GBE_STATSA_INFO(tx_512_to_1023byte_frames),
833 	GBE_STATSA_INFO(tx_1024byte_frames),
834 	GBE_STATSA_INFO(net_bytes),
835 	GBE_STATSA_INFO(rx_sof_overruns),
836 	GBE_STATSA_INFO(rx_mof_overruns),
837 	GBE_STATSA_INFO(rx_dma_overruns),
838 	/* GBE module B */
839 	GBE_STATSB_INFO(rx_good_frames),
840 	GBE_STATSB_INFO(rx_broadcast_frames),
841 	GBE_STATSB_INFO(rx_multicast_frames),
842 	GBE_STATSB_INFO(rx_pause_frames),
843 	GBE_STATSB_INFO(rx_crc_errors),
844 	GBE_STATSB_INFO(rx_align_code_errors),
845 	GBE_STATSB_INFO(rx_oversized_frames),
846 	GBE_STATSB_INFO(rx_jabber_frames),
847 	GBE_STATSB_INFO(rx_undersized_frames),
848 	GBE_STATSB_INFO(rx_fragments),
849 	GBE_STATSB_INFO(rx_bytes),
850 	GBE_STATSB_INFO(tx_good_frames),
851 	GBE_STATSB_INFO(tx_broadcast_frames),
852 	GBE_STATSB_INFO(tx_multicast_frames),
853 	GBE_STATSB_INFO(tx_pause_frames),
854 	GBE_STATSB_INFO(tx_deferred_frames),
855 	GBE_STATSB_INFO(tx_collision_frames),
856 	GBE_STATSB_INFO(tx_single_coll_frames),
857 	GBE_STATSB_INFO(tx_mult_coll_frames),
858 	GBE_STATSB_INFO(tx_excessive_collisions),
859 	GBE_STATSB_INFO(tx_late_collisions),
860 	GBE_STATSB_INFO(tx_underrun),
861 	GBE_STATSB_INFO(tx_carrier_sense_errors),
862 	GBE_STATSB_INFO(tx_bytes),
863 	GBE_STATSB_INFO(tx_64byte_frames),
864 	GBE_STATSB_INFO(tx_65_to_127byte_frames),
865 	GBE_STATSB_INFO(tx_128_to_255byte_frames),
866 	GBE_STATSB_INFO(tx_256_to_511byte_frames),
867 	GBE_STATSB_INFO(tx_512_to_1023byte_frames),
868 	GBE_STATSB_INFO(tx_1024byte_frames),
869 	GBE_STATSB_INFO(net_bytes),
870 	GBE_STATSB_INFO(rx_sof_overruns),
871 	GBE_STATSB_INFO(rx_mof_overruns),
872 	GBE_STATSB_INFO(rx_dma_overruns),
873 	/* GBE module C */
874 	GBE_STATSC_INFO(rx_good_frames),
875 	GBE_STATSC_INFO(rx_broadcast_frames),
876 	GBE_STATSC_INFO(rx_multicast_frames),
877 	GBE_STATSC_INFO(rx_pause_frames),
878 	GBE_STATSC_INFO(rx_crc_errors),
879 	GBE_STATSC_INFO(rx_align_code_errors),
880 	GBE_STATSC_INFO(rx_oversized_frames),
881 	GBE_STATSC_INFO(rx_jabber_frames),
882 	GBE_STATSC_INFO(rx_undersized_frames),
883 	GBE_STATSC_INFO(rx_fragments),
884 	GBE_STATSC_INFO(rx_bytes),
885 	GBE_STATSC_INFO(tx_good_frames),
886 	GBE_STATSC_INFO(tx_broadcast_frames),
887 	GBE_STATSC_INFO(tx_multicast_frames),
888 	GBE_STATSC_INFO(tx_pause_frames),
889 	GBE_STATSC_INFO(tx_deferred_frames),
890 	GBE_STATSC_INFO(tx_collision_frames),
891 	GBE_STATSC_INFO(tx_single_coll_frames),
892 	GBE_STATSC_INFO(tx_mult_coll_frames),
893 	GBE_STATSC_INFO(tx_excessive_collisions),
894 	GBE_STATSC_INFO(tx_late_collisions),
895 	GBE_STATSC_INFO(tx_underrun),
896 	GBE_STATSC_INFO(tx_carrier_sense_errors),
897 	GBE_STATSC_INFO(tx_bytes),
898 	GBE_STATSC_INFO(tx_64byte_frames),
899 	GBE_STATSC_INFO(tx_65_to_127byte_frames),
900 	GBE_STATSC_INFO(tx_128_to_255byte_frames),
901 	GBE_STATSC_INFO(tx_256_to_511byte_frames),
902 	GBE_STATSC_INFO(tx_512_to_1023byte_frames),
903 	GBE_STATSC_INFO(tx_1024byte_frames),
904 	GBE_STATSC_INFO(net_bytes),
905 	GBE_STATSC_INFO(rx_sof_overruns),
906 	GBE_STATSC_INFO(rx_mof_overruns),
907 	GBE_STATSC_INFO(rx_dma_overruns),
908 	/* GBE module D */
909 	GBE_STATSD_INFO(rx_good_frames),
910 	GBE_STATSD_INFO(rx_broadcast_frames),
911 	GBE_STATSD_INFO(rx_multicast_frames),
912 	GBE_STATSD_INFO(rx_pause_frames),
913 	GBE_STATSD_INFO(rx_crc_errors),
914 	GBE_STATSD_INFO(rx_align_code_errors),
915 	GBE_STATSD_INFO(rx_oversized_frames),
916 	GBE_STATSD_INFO(rx_jabber_frames),
917 	GBE_STATSD_INFO(rx_undersized_frames),
918 	GBE_STATSD_INFO(rx_fragments),
919 	GBE_STATSD_INFO(rx_bytes),
920 	GBE_STATSD_INFO(tx_good_frames),
921 	GBE_STATSD_INFO(tx_broadcast_frames),
922 	GBE_STATSD_INFO(tx_multicast_frames),
923 	GBE_STATSD_INFO(tx_pause_frames),
924 	GBE_STATSD_INFO(tx_deferred_frames),
925 	GBE_STATSD_INFO(tx_collision_frames),
926 	GBE_STATSD_INFO(tx_single_coll_frames),
927 	GBE_STATSD_INFO(tx_mult_coll_frames),
928 	GBE_STATSD_INFO(tx_excessive_collisions),
929 	GBE_STATSD_INFO(tx_late_collisions),
930 	GBE_STATSD_INFO(tx_underrun),
931 	GBE_STATSD_INFO(tx_carrier_sense_errors),
932 	GBE_STATSD_INFO(tx_bytes),
933 	GBE_STATSD_INFO(tx_64byte_frames),
934 	GBE_STATSD_INFO(tx_65_to_127byte_frames),
935 	GBE_STATSD_INFO(tx_128_to_255byte_frames),
936 	GBE_STATSD_INFO(tx_256_to_511byte_frames),
937 	GBE_STATSD_INFO(tx_512_to_1023byte_frames),
938 	GBE_STATSD_INFO(tx_1024byte_frames),
939 	GBE_STATSD_INFO(net_bytes),
940 	GBE_STATSD_INFO(rx_sof_overruns),
941 	GBE_STATSD_INFO(rx_mof_overruns),
942 	GBE_STATSD_INFO(rx_dma_overruns),
943 };
944 
945 /* This is the size of entries in GBENU_STATS_HOST */
946 #define GBENU_ET_STATS_HOST_SIZE	52
947 
948 #define GBENU_STATS_HOST(field)					\
949 {								\
950 	"GBE_HOST:"#field, GBENU_STATS0_MODULE,			\
951 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
952 	offsetof(struct gbenu_hw_stats, field)			\
953 }
954 
955 /* This is the size of entries in GBENU_STATS_PORT */
956 #define GBENU_ET_STATS_PORT_SIZE	65
957 
958 #define GBENU_STATS_P1(field)					\
959 {								\
960 	"GBE_P1:"#field, GBENU_STATS1_MODULE,			\
961 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
962 	offsetof(struct gbenu_hw_stats, field)			\
963 }
964 
965 #define GBENU_STATS_P2(field)					\
966 {								\
967 	"GBE_P2:"#field, GBENU_STATS2_MODULE,			\
968 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
969 	offsetof(struct gbenu_hw_stats, field)			\
970 }
971 
972 #define GBENU_STATS_P3(field)					\
973 {								\
974 	"GBE_P3:"#field, GBENU_STATS3_MODULE,			\
975 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
976 	offsetof(struct gbenu_hw_stats, field)			\
977 }
978 
979 #define GBENU_STATS_P4(field)					\
980 {								\
981 	"GBE_P4:"#field, GBENU_STATS4_MODULE,			\
982 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
983 	offsetof(struct gbenu_hw_stats, field)			\
984 }
985 
986 #define GBENU_STATS_P5(field)					\
987 {								\
988 	"GBE_P5:"#field, GBENU_STATS5_MODULE,			\
989 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
990 	offsetof(struct gbenu_hw_stats, field)			\
991 }
992 
993 #define GBENU_STATS_P6(field)					\
994 {								\
995 	"GBE_P6:"#field, GBENU_STATS6_MODULE,			\
996 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
997 	offsetof(struct gbenu_hw_stats, field)			\
998 }
999 
1000 #define GBENU_STATS_P7(field)					\
1001 {								\
1002 	"GBE_P7:"#field, GBENU_STATS7_MODULE,			\
1003 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
1004 	offsetof(struct gbenu_hw_stats, field)			\
1005 }
1006 
1007 #define GBENU_STATS_P8(field)					\
1008 {								\
1009 	"GBE_P8:"#field, GBENU_STATS8_MODULE,			\
1010 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
1011 	offsetof(struct gbenu_hw_stats, field)			\
1012 }
1013 
1014 static const struct netcp_ethtool_stat gbenu_et_stats[] = {
1015 	/* GBENU Host Module */
1016 	GBENU_STATS_HOST(rx_good_frames),
1017 	GBENU_STATS_HOST(rx_broadcast_frames),
1018 	GBENU_STATS_HOST(rx_multicast_frames),
1019 	GBENU_STATS_HOST(rx_crc_errors),
1020 	GBENU_STATS_HOST(rx_oversized_frames),
1021 	GBENU_STATS_HOST(rx_undersized_frames),
1022 	GBENU_STATS_HOST(ale_drop),
1023 	GBENU_STATS_HOST(ale_overrun_drop),
1024 	GBENU_STATS_HOST(rx_bytes),
1025 	GBENU_STATS_HOST(tx_good_frames),
1026 	GBENU_STATS_HOST(tx_broadcast_frames),
1027 	GBENU_STATS_HOST(tx_multicast_frames),
1028 	GBENU_STATS_HOST(tx_bytes),
1029 	GBENU_STATS_HOST(tx_64B_frames),
1030 	GBENU_STATS_HOST(tx_65_to_127B_frames),
1031 	GBENU_STATS_HOST(tx_128_to_255B_frames),
1032 	GBENU_STATS_HOST(tx_256_to_511B_frames),
1033 	GBENU_STATS_HOST(tx_512_to_1023B_frames),
1034 	GBENU_STATS_HOST(tx_1024B_frames),
1035 	GBENU_STATS_HOST(net_bytes),
1036 	GBENU_STATS_HOST(rx_bottom_fifo_drop),
1037 	GBENU_STATS_HOST(rx_port_mask_drop),
1038 	GBENU_STATS_HOST(rx_top_fifo_drop),
1039 	GBENU_STATS_HOST(ale_rate_limit_drop),
1040 	GBENU_STATS_HOST(ale_vid_ingress_drop),
1041 	GBENU_STATS_HOST(ale_da_eq_sa_drop),
1042 	GBENU_STATS_HOST(ale_unknown_ucast),
1043 	GBENU_STATS_HOST(ale_unknown_ucast_bytes),
1044 	GBENU_STATS_HOST(ale_unknown_mcast),
1045 	GBENU_STATS_HOST(ale_unknown_mcast_bytes),
1046 	GBENU_STATS_HOST(ale_unknown_bcast),
1047 	GBENU_STATS_HOST(ale_unknown_bcast_bytes),
1048 	GBENU_STATS_HOST(ale_pol_match),
1049 	GBENU_STATS_HOST(ale_pol_match_red),
1050 	GBENU_STATS_HOST(ale_pol_match_yellow),
1051 	GBENU_STATS_HOST(tx_mem_protect_err),
1052 	GBENU_STATS_HOST(tx_pri0_drop),
1053 	GBENU_STATS_HOST(tx_pri1_drop),
1054 	GBENU_STATS_HOST(tx_pri2_drop),
1055 	GBENU_STATS_HOST(tx_pri3_drop),
1056 	GBENU_STATS_HOST(tx_pri4_drop),
1057 	GBENU_STATS_HOST(tx_pri5_drop),
1058 	GBENU_STATS_HOST(tx_pri6_drop),
1059 	GBENU_STATS_HOST(tx_pri7_drop),
1060 	GBENU_STATS_HOST(tx_pri0_drop_bcnt),
1061 	GBENU_STATS_HOST(tx_pri1_drop_bcnt),
1062 	GBENU_STATS_HOST(tx_pri2_drop_bcnt),
1063 	GBENU_STATS_HOST(tx_pri3_drop_bcnt),
1064 	GBENU_STATS_HOST(tx_pri4_drop_bcnt),
1065 	GBENU_STATS_HOST(tx_pri5_drop_bcnt),
1066 	GBENU_STATS_HOST(tx_pri6_drop_bcnt),
1067 	GBENU_STATS_HOST(tx_pri7_drop_bcnt),
1068 	/* GBENU Module 1 */
1069 	GBENU_STATS_P1(rx_good_frames),
1070 	GBENU_STATS_P1(rx_broadcast_frames),
1071 	GBENU_STATS_P1(rx_multicast_frames),
1072 	GBENU_STATS_P1(rx_pause_frames),
1073 	GBENU_STATS_P1(rx_crc_errors),
1074 	GBENU_STATS_P1(rx_align_code_errors),
1075 	GBENU_STATS_P1(rx_oversized_frames),
1076 	GBENU_STATS_P1(rx_jabber_frames),
1077 	GBENU_STATS_P1(rx_undersized_frames),
1078 	GBENU_STATS_P1(rx_fragments),
1079 	GBENU_STATS_P1(ale_drop),
1080 	GBENU_STATS_P1(ale_overrun_drop),
1081 	GBENU_STATS_P1(rx_bytes),
1082 	GBENU_STATS_P1(tx_good_frames),
1083 	GBENU_STATS_P1(tx_broadcast_frames),
1084 	GBENU_STATS_P1(tx_multicast_frames),
1085 	GBENU_STATS_P1(tx_pause_frames),
1086 	GBENU_STATS_P1(tx_deferred_frames),
1087 	GBENU_STATS_P1(tx_collision_frames),
1088 	GBENU_STATS_P1(tx_single_coll_frames),
1089 	GBENU_STATS_P1(tx_mult_coll_frames),
1090 	GBENU_STATS_P1(tx_excessive_collisions),
1091 	GBENU_STATS_P1(tx_late_collisions),
1092 	GBENU_STATS_P1(rx_ipg_error),
1093 	GBENU_STATS_P1(tx_carrier_sense_errors),
1094 	GBENU_STATS_P1(tx_bytes),
1095 	GBENU_STATS_P1(tx_64B_frames),
1096 	GBENU_STATS_P1(tx_65_to_127B_frames),
1097 	GBENU_STATS_P1(tx_128_to_255B_frames),
1098 	GBENU_STATS_P1(tx_256_to_511B_frames),
1099 	GBENU_STATS_P1(tx_512_to_1023B_frames),
1100 	GBENU_STATS_P1(tx_1024B_frames),
1101 	GBENU_STATS_P1(net_bytes),
1102 	GBENU_STATS_P1(rx_bottom_fifo_drop),
1103 	GBENU_STATS_P1(rx_port_mask_drop),
1104 	GBENU_STATS_P1(rx_top_fifo_drop),
1105 	GBENU_STATS_P1(ale_rate_limit_drop),
1106 	GBENU_STATS_P1(ale_vid_ingress_drop),
1107 	GBENU_STATS_P1(ale_da_eq_sa_drop),
1108 	GBENU_STATS_P1(ale_unknown_ucast),
1109 	GBENU_STATS_P1(ale_unknown_ucast_bytes),
1110 	GBENU_STATS_P1(ale_unknown_mcast),
1111 	GBENU_STATS_P1(ale_unknown_mcast_bytes),
1112 	GBENU_STATS_P1(ale_unknown_bcast),
1113 	GBENU_STATS_P1(ale_unknown_bcast_bytes),
1114 	GBENU_STATS_P1(ale_pol_match),
1115 	GBENU_STATS_P1(ale_pol_match_red),
1116 	GBENU_STATS_P1(ale_pol_match_yellow),
1117 	GBENU_STATS_P1(tx_mem_protect_err),
1118 	GBENU_STATS_P1(tx_pri0_drop),
1119 	GBENU_STATS_P1(tx_pri1_drop),
1120 	GBENU_STATS_P1(tx_pri2_drop),
1121 	GBENU_STATS_P1(tx_pri3_drop),
1122 	GBENU_STATS_P1(tx_pri4_drop),
1123 	GBENU_STATS_P1(tx_pri5_drop),
1124 	GBENU_STATS_P1(tx_pri6_drop),
1125 	GBENU_STATS_P1(tx_pri7_drop),
1126 	GBENU_STATS_P1(tx_pri0_drop_bcnt),
1127 	GBENU_STATS_P1(tx_pri1_drop_bcnt),
1128 	GBENU_STATS_P1(tx_pri2_drop_bcnt),
1129 	GBENU_STATS_P1(tx_pri3_drop_bcnt),
1130 	GBENU_STATS_P1(tx_pri4_drop_bcnt),
1131 	GBENU_STATS_P1(tx_pri5_drop_bcnt),
1132 	GBENU_STATS_P1(tx_pri6_drop_bcnt),
1133 	GBENU_STATS_P1(tx_pri7_drop_bcnt),
1134 	/* GBENU Module 2 */
1135 	GBENU_STATS_P2(rx_good_frames),
1136 	GBENU_STATS_P2(rx_broadcast_frames),
1137 	GBENU_STATS_P2(rx_multicast_frames),
1138 	GBENU_STATS_P2(rx_pause_frames),
1139 	GBENU_STATS_P2(rx_crc_errors),
1140 	GBENU_STATS_P2(rx_align_code_errors),
1141 	GBENU_STATS_P2(rx_oversized_frames),
1142 	GBENU_STATS_P2(rx_jabber_frames),
1143 	GBENU_STATS_P2(rx_undersized_frames),
1144 	GBENU_STATS_P2(rx_fragments),
1145 	GBENU_STATS_P2(ale_drop),
1146 	GBENU_STATS_P2(ale_overrun_drop),
1147 	GBENU_STATS_P2(rx_bytes),
1148 	GBENU_STATS_P2(tx_good_frames),
1149 	GBENU_STATS_P2(tx_broadcast_frames),
1150 	GBENU_STATS_P2(tx_multicast_frames),
1151 	GBENU_STATS_P2(tx_pause_frames),
1152 	GBENU_STATS_P2(tx_deferred_frames),
1153 	GBENU_STATS_P2(tx_collision_frames),
1154 	GBENU_STATS_P2(tx_single_coll_frames),
1155 	GBENU_STATS_P2(tx_mult_coll_frames),
1156 	GBENU_STATS_P2(tx_excessive_collisions),
1157 	GBENU_STATS_P2(tx_late_collisions),
1158 	GBENU_STATS_P2(rx_ipg_error),
1159 	GBENU_STATS_P2(tx_carrier_sense_errors),
1160 	GBENU_STATS_P2(tx_bytes),
1161 	GBENU_STATS_P2(tx_64B_frames),
1162 	GBENU_STATS_P2(tx_65_to_127B_frames),
1163 	GBENU_STATS_P2(tx_128_to_255B_frames),
1164 	GBENU_STATS_P2(tx_256_to_511B_frames),
1165 	GBENU_STATS_P2(tx_512_to_1023B_frames),
1166 	GBENU_STATS_P2(tx_1024B_frames),
1167 	GBENU_STATS_P2(net_bytes),
1168 	GBENU_STATS_P2(rx_bottom_fifo_drop),
1169 	GBENU_STATS_P2(rx_port_mask_drop),
1170 	GBENU_STATS_P2(rx_top_fifo_drop),
1171 	GBENU_STATS_P2(ale_rate_limit_drop),
1172 	GBENU_STATS_P2(ale_vid_ingress_drop),
1173 	GBENU_STATS_P2(ale_da_eq_sa_drop),
1174 	GBENU_STATS_P2(ale_unknown_ucast),
1175 	GBENU_STATS_P2(ale_unknown_ucast_bytes),
1176 	GBENU_STATS_P2(ale_unknown_mcast),
1177 	GBENU_STATS_P2(ale_unknown_mcast_bytes),
1178 	GBENU_STATS_P2(ale_unknown_bcast),
1179 	GBENU_STATS_P2(ale_unknown_bcast_bytes),
1180 	GBENU_STATS_P2(ale_pol_match),
1181 	GBENU_STATS_P2(ale_pol_match_red),
1182 	GBENU_STATS_P2(ale_pol_match_yellow),
1183 	GBENU_STATS_P2(tx_mem_protect_err),
1184 	GBENU_STATS_P2(tx_pri0_drop),
1185 	GBENU_STATS_P2(tx_pri1_drop),
1186 	GBENU_STATS_P2(tx_pri2_drop),
1187 	GBENU_STATS_P2(tx_pri3_drop),
1188 	GBENU_STATS_P2(tx_pri4_drop),
1189 	GBENU_STATS_P2(tx_pri5_drop),
1190 	GBENU_STATS_P2(tx_pri6_drop),
1191 	GBENU_STATS_P2(tx_pri7_drop),
1192 	GBENU_STATS_P2(tx_pri0_drop_bcnt),
1193 	GBENU_STATS_P2(tx_pri1_drop_bcnt),
1194 	GBENU_STATS_P2(tx_pri2_drop_bcnt),
1195 	GBENU_STATS_P2(tx_pri3_drop_bcnt),
1196 	GBENU_STATS_P2(tx_pri4_drop_bcnt),
1197 	GBENU_STATS_P2(tx_pri5_drop_bcnt),
1198 	GBENU_STATS_P2(tx_pri6_drop_bcnt),
1199 	GBENU_STATS_P2(tx_pri7_drop_bcnt),
1200 	/* GBENU Module 3 */
1201 	GBENU_STATS_P3(rx_good_frames),
1202 	GBENU_STATS_P3(rx_broadcast_frames),
1203 	GBENU_STATS_P3(rx_multicast_frames),
1204 	GBENU_STATS_P3(rx_pause_frames),
1205 	GBENU_STATS_P3(rx_crc_errors),
1206 	GBENU_STATS_P3(rx_align_code_errors),
1207 	GBENU_STATS_P3(rx_oversized_frames),
1208 	GBENU_STATS_P3(rx_jabber_frames),
1209 	GBENU_STATS_P3(rx_undersized_frames),
1210 	GBENU_STATS_P3(rx_fragments),
1211 	GBENU_STATS_P3(ale_drop),
1212 	GBENU_STATS_P3(ale_overrun_drop),
1213 	GBENU_STATS_P3(rx_bytes),
1214 	GBENU_STATS_P3(tx_good_frames),
1215 	GBENU_STATS_P3(tx_broadcast_frames),
1216 	GBENU_STATS_P3(tx_multicast_frames),
1217 	GBENU_STATS_P3(tx_pause_frames),
1218 	GBENU_STATS_P3(tx_deferred_frames),
1219 	GBENU_STATS_P3(tx_collision_frames),
1220 	GBENU_STATS_P3(tx_single_coll_frames),
1221 	GBENU_STATS_P3(tx_mult_coll_frames),
1222 	GBENU_STATS_P3(tx_excessive_collisions),
1223 	GBENU_STATS_P3(tx_late_collisions),
1224 	GBENU_STATS_P3(rx_ipg_error),
1225 	GBENU_STATS_P3(tx_carrier_sense_errors),
1226 	GBENU_STATS_P3(tx_bytes),
1227 	GBENU_STATS_P3(tx_64B_frames),
1228 	GBENU_STATS_P3(tx_65_to_127B_frames),
1229 	GBENU_STATS_P3(tx_128_to_255B_frames),
1230 	GBENU_STATS_P3(tx_256_to_511B_frames),
1231 	GBENU_STATS_P3(tx_512_to_1023B_frames),
1232 	GBENU_STATS_P3(tx_1024B_frames),
1233 	GBENU_STATS_P3(net_bytes),
1234 	GBENU_STATS_P3(rx_bottom_fifo_drop),
1235 	GBENU_STATS_P3(rx_port_mask_drop),
1236 	GBENU_STATS_P3(rx_top_fifo_drop),
1237 	GBENU_STATS_P3(ale_rate_limit_drop),
1238 	GBENU_STATS_P3(ale_vid_ingress_drop),
1239 	GBENU_STATS_P3(ale_da_eq_sa_drop),
1240 	GBENU_STATS_P3(ale_unknown_ucast),
1241 	GBENU_STATS_P3(ale_unknown_ucast_bytes),
1242 	GBENU_STATS_P3(ale_unknown_mcast),
1243 	GBENU_STATS_P3(ale_unknown_mcast_bytes),
1244 	GBENU_STATS_P3(ale_unknown_bcast),
1245 	GBENU_STATS_P3(ale_unknown_bcast_bytes),
1246 	GBENU_STATS_P3(ale_pol_match),
1247 	GBENU_STATS_P3(ale_pol_match_red),
1248 	GBENU_STATS_P3(ale_pol_match_yellow),
1249 	GBENU_STATS_P3(tx_mem_protect_err),
1250 	GBENU_STATS_P3(tx_pri0_drop),
1251 	GBENU_STATS_P3(tx_pri1_drop),
1252 	GBENU_STATS_P3(tx_pri2_drop),
1253 	GBENU_STATS_P3(tx_pri3_drop),
1254 	GBENU_STATS_P3(tx_pri4_drop),
1255 	GBENU_STATS_P3(tx_pri5_drop),
1256 	GBENU_STATS_P3(tx_pri6_drop),
1257 	GBENU_STATS_P3(tx_pri7_drop),
1258 	GBENU_STATS_P3(tx_pri0_drop_bcnt),
1259 	GBENU_STATS_P3(tx_pri1_drop_bcnt),
1260 	GBENU_STATS_P3(tx_pri2_drop_bcnt),
1261 	GBENU_STATS_P3(tx_pri3_drop_bcnt),
1262 	GBENU_STATS_P3(tx_pri4_drop_bcnt),
1263 	GBENU_STATS_P3(tx_pri5_drop_bcnt),
1264 	GBENU_STATS_P3(tx_pri6_drop_bcnt),
1265 	GBENU_STATS_P3(tx_pri7_drop_bcnt),
1266 	/* GBENU Module 4 */
1267 	GBENU_STATS_P4(rx_good_frames),
1268 	GBENU_STATS_P4(rx_broadcast_frames),
1269 	GBENU_STATS_P4(rx_multicast_frames),
1270 	GBENU_STATS_P4(rx_pause_frames),
1271 	GBENU_STATS_P4(rx_crc_errors),
1272 	GBENU_STATS_P4(rx_align_code_errors),
1273 	GBENU_STATS_P4(rx_oversized_frames),
1274 	GBENU_STATS_P4(rx_jabber_frames),
1275 	GBENU_STATS_P4(rx_undersized_frames),
1276 	GBENU_STATS_P4(rx_fragments),
1277 	GBENU_STATS_P4(ale_drop),
1278 	GBENU_STATS_P4(ale_overrun_drop),
1279 	GBENU_STATS_P4(rx_bytes),
1280 	GBENU_STATS_P4(tx_good_frames),
1281 	GBENU_STATS_P4(tx_broadcast_frames),
1282 	GBENU_STATS_P4(tx_multicast_frames),
1283 	GBENU_STATS_P4(tx_pause_frames),
1284 	GBENU_STATS_P4(tx_deferred_frames),
1285 	GBENU_STATS_P4(tx_collision_frames),
1286 	GBENU_STATS_P4(tx_single_coll_frames),
1287 	GBENU_STATS_P4(tx_mult_coll_frames),
1288 	GBENU_STATS_P4(tx_excessive_collisions),
1289 	GBENU_STATS_P4(tx_late_collisions),
1290 	GBENU_STATS_P4(rx_ipg_error),
1291 	GBENU_STATS_P4(tx_carrier_sense_errors),
1292 	GBENU_STATS_P4(tx_bytes),
1293 	GBENU_STATS_P4(tx_64B_frames),
1294 	GBENU_STATS_P4(tx_65_to_127B_frames),
1295 	GBENU_STATS_P4(tx_128_to_255B_frames),
1296 	GBENU_STATS_P4(tx_256_to_511B_frames),
1297 	GBENU_STATS_P4(tx_512_to_1023B_frames),
1298 	GBENU_STATS_P4(tx_1024B_frames),
1299 	GBENU_STATS_P4(net_bytes),
1300 	GBENU_STATS_P4(rx_bottom_fifo_drop),
1301 	GBENU_STATS_P4(rx_port_mask_drop),
1302 	GBENU_STATS_P4(rx_top_fifo_drop),
1303 	GBENU_STATS_P4(ale_rate_limit_drop),
1304 	GBENU_STATS_P4(ale_vid_ingress_drop),
1305 	GBENU_STATS_P4(ale_da_eq_sa_drop),
1306 	GBENU_STATS_P4(ale_unknown_ucast),
1307 	GBENU_STATS_P4(ale_unknown_ucast_bytes),
1308 	GBENU_STATS_P4(ale_unknown_mcast),
1309 	GBENU_STATS_P4(ale_unknown_mcast_bytes),
1310 	GBENU_STATS_P4(ale_unknown_bcast),
1311 	GBENU_STATS_P4(ale_unknown_bcast_bytes),
1312 	GBENU_STATS_P4(ale_pol_match),
1313 	GBENU_STATS_P4(ale_pol_match_red),
1314 	GBENU_STATS_P4(ale_pol_match_yellow),
1315 	GBENU_STATS_P4(tx_mem_protect_err),
1316 	GBENU_STATS_P4(tx_pri0_drop),
1317 	GBENU_STATS_P4(tx_pri1_drop),
1318 	GBENU_STATS_P4(tx_pri2_drop),
1319 	GBENU_STATS_P4(tx_pri3_drop),
1320 	GBENU_STATS_P4(tx_pri4_drop),
1321 	GBENU_STATS_P4(tx_pri5_drop),
1322 	GBENU_STATS_P4(tx_pri6_drop),
1323 	GBENU_STATS_P4(tx_pri7_drop),
1324 	GBENU_STATS_P4(tx_pri0_drop_bcnt),
1325 	GBENU_STATS_P4(tx_pri1_drop_bcnt),
1326 	GBENU_STATS_P4(tx_pri2_drop_bcnt),
1327 	GBENU_STATS_P4(tx_pri3_drop_bcnt),
1328 	GBENU_STATS_P4(tx_pri4_drop_bcnt),
1329 	GBENU_STATS_P4(tx_pri5_drop_bcnt),
1330 	GBENU_STATS_P4(tx_pri6_drop_bcnt),
1331 	GBENU_STATS_P4(tx_pri7_drop_bcnt),
1332 	/* GBENU Module 5 */
1333 	GBENU_STATS_P5(rx_good_frames),
1334 	GBENU_STATS_P5(rx_broadcast_frames),
1335 	GBENU_STATS_P5(rx_multicast_frames),
1336 	GBENU_STATS_P5(rx_pause_frames),
1337 	GBENU_STATS_P5(rx_crc_errors),
1338 	GBENU_STATS_P5(rx_align_code_errors),
1339 	GBENU_STATS_P5(rx_oversized_frames),
1340 	GBENU_STATS_P5(rx_jabber_frames),
1341 	GBENU_STATS_P5(rx_undersized_frames),
1342 	GBENU_STATS_P5(rx_fragments),
1343 	GBENU_STATS_P5(ale_drop),
1344 	GBENU_STATS_P5(ale_overrun_drop),
1345 	GBENU_STATS_P5(rx_bytes),
1346 	GBENU_STATS_P5(tx_good_frames),
1347 	GBENU_STATS_P5(tx_broadcast_frames),
1348 	GBENU_STATS_P5(tx_multicast_frames),
1349 	GBENU_STATS_P5(tx_pause_frames),
1350 	GBENU_STATS_P5(tx_deferred_frames),
1351 	GBENU_STATS_P5(tx_collision_frames),
1352 	GBENU_STATS_P5(tx_single_coll_frames),
1353 	GBENU_STATS_P5(tx_mult_coll_frames),
1354 	GBENU_STATS_P5(tx_excessive_collisions),
1355 	GBENU_STATS_P5(tx_late_collisions),
1356 	GBENU_STATS_P5(rx_ipg_error),
1357 	GBENU_STATS_P5(tx_carrier_sense_errors),
1358 	GBENU_STATS_P5(tx_bytes),
1359 	GBENU_STATS_P5(tx_64B_frames),
1360 	GBENU_STATS_P5(tx_65_to_127B_frames),
1361 	GBENU_STATS_P5(tx_128_to_255B_frames),
1362 	GBENU_STATS_P5(tx_256_to_511B_frames),
1363 	GBENU_STATS_P5(tx_512_to_1023B_frames),
1364 	GBENU_STATS_P5(tx_1024B_frames),
1365 	GBENU_STATS_P5(net_bytes),
1366 	GBENU_STATS_P5(rx_bottom_fifo_drop),
1367 	GBENU_STATS_P5(rx_port_mask_drop),
1368 	GBENU_STATS_P5(rx_top_fifo_drop),
1369 	GBENU_STATS_P5(ale_rate_limit_drop),
1370 	GBENU_STATS_P5(ale_vid_ingress_drop),
1371 	GBENU_STATS_P5(ale_da_eq_sa_drop),
1372 	GBENU_STATS_P5(ale_unknown_ucast),
1373 	GBENU_STATS_P5(ale_unknown_ucast_bytes),
1374 	GBENU_STATS_P5(ale_unknown_mcast),
1375 	GBENU_STATS_P5(ale_unknown_mcast_bytes),
1376 	GBENU_STATS_P5(ale_unknown_bcast),
1377 	GBENU_STATS_P5(ale_unknown_bcast_bytes),
1378 	GBENU_STATS_P5(ale_pol_match),
1379 	GBENU_STATS_P5(ale_pol_match_red),
1380 	GBENU_STATS_P5(ale_pol_match_yellow),
1381 	GBENU_STATS_P5(tx_mem_protect_err),
1382 	GBENU_STATS_P5(tx_pri0_drop),
1383 	GBENU_STATS_P5(tx_pri1_drop),
1384 	GBENU_STATS_P5(tx_pri2_drop),
1385 	GBENU_STATS_P5(tx_pri3_drop),
1386 	GBENU_STATS_P5(tx_pri4_drop),
1387 	GBENU_STATS_P5(tx_pri5_drop),
1388 	GBENU_STATS_P5(tx_pri6_drop),
1389 	GBENU_STATS_P5(tx_pri7_drop),
1390 	GBENU_STATS_P5(tx_pri0_drop_bcnt),
1391 	GBENU_STATS_P5(tx_pri1_drop_bcnt),
1392 	GBENU_STATS_P5(tx_pri2_drop_bcnt),
1393 	GBENU_STATS_P5(tx_pri3_drop_bcnt),
1394 	GBENU_STATS_P5(tx_pri4_drop_bcnt),
1395 	GBENU_STATS_P5(tx_pri5_drop_bcnt),
1396 	GBENU_STATS_P5(tx_pri6_drop_bcnt),
1397 	GBENU_STATS_P5(tx_pri7_drop_bcnt),
1398 	/* GBENU Module 6 */
1399 	GBENU_STATS_P6(rx_good_frames),
1400 	GBENU_STATS_P6(rx_broadcast_frames),
1401 	GBENU_STATS_P6(rx_multicast_frames),
1402 	GBENU_STATS_P6(rx_pause_frames),
1403 	GBENU_STATS_P6(rx_crc_errors),
1404 	GBENU_STATS_P6(rx_align_code_errors),
1405 	GBENU_STATS_P6(rx_oversized_frames),
1406 	GBENU_STATS_P6(rx_jabber_frames),
1407 	GBENU_STATS_P6(rx_undersized_frames),
1408 	GBENU_STATS_P6(rx_fragments),
1409 	GBENU_STATS_P6(ale_drop),
1410 	GBENU_STATS_P6(ale_overrun_drop),
1411 	GBENU_STATS_P6(rx_bytes),
1412 	GBENU_STATS_P6(tx_good_frames),
1413 	GBENU_STATS_P6(tx_broadcast_frames),
1414 	GBENU_STATS_P6(tx_multicast_frames),
1415 	GBENU_STATS_P6(tx_pause_frames),
1416 	GBENU_STATS_P6(tx_deferred_frames),
1417 	GBENU_STATS_P6(tx_collision_frames),
1418 	GBENU_STATS_P6(tx_single_coll_frames),
1419 	GBENU_STATS_P6(tx_mult_coll_frames),
1420 	GBENU_STATS_P6(tx_excessive_collisions),
1421 	GBENU_STATS_P6(tx_late_collisions),
1422 	GBENU_STATS_P6(rx_ipg_error),
1423 	GBENU_STATS_P6(tx_carrier_sense_errors),
1424 	GBENU_STATS_P6(tx_bytes),
1425 	GBENU_STATS_P6(tx_64B_frames),
1426 	GBENU_STATS_P6(tx_65_to_127B_frames),
1427 	GBENU_STATS_P6(tx_128_to_255B_frames),
1428 	GBENU_STATS_P6(tx_256_to_511B_frames),
1429 	GBENU_STATS_P6(tx_512_to_1023B_frames),
1430 	GBENU_STATS_P6(tx_1024B_frames),
1431 	GBENU_STATS_P6(net_bytes),
1432 	GBENU_STATS_P6(rx_bottom_fifo_drop),
1433 	GBENU_STATS_P6(rx_port_mask_drop),
1434 	GBENU_STATS_P6(rx_top_fifo_drop),
1435 	GBENU_STATS_P6(ale_rate_limit_drop),
1436 	GBENU_STATS_P6(ale_vid_ingress_drop),
1437 	GBENU_STATS_P6(ale_da_eq_sa_drop),
1438 	GBENU_STATS_P6(ale_unknown_ucast),
1439 	GBENU_STATS_P6(ale_unknown_ucast_bytes),
1440 	GBENU_STATS_P6(ale_unknown_mcast),
1441 	GBENU_STATS_P6(ale_unknown_mcast_bytes),
1442 	GBENU_STATS_P6(ale_unknown_bcast),
1443 	GBENU_STATS_P6(ale_unknown_bcast_bytes),
1444 	GBENU_STATS_P6(ale_pol_match),
1445 	GBENU_STATS_P6(ale_pol_match_red),
1446 	GBENU_STATS_P6(ale_pol_match_yellow),
1447 	GBENU_STATS_P6(tx_mem_protect_err),
1448 	GBENU_STATS_P6(tx_pri0_drop),
1449 	GBENU_STATS_P6(tx_pri1_drop),
1450 	GBENU_STATS_P6(tx_pri2_drop),
1451 	GBENU_STATS_P6(tx_pri3_drop),
1452 	GBENU_STATS_P6(tx_pri4_drop),
1453 	GBENU_STATS_P6(tx_pri5_drop),
1454 	GBENU_STATS_P6(tx_pri6_drop),
1455 	GBENU_STATS_P6(tx_pri7_drop),
1456 	GBENU_STATS_P6(tx_pri0_drop_bcnt),
1457 	GBENU_STATS_P6(tx_pri1_drop_bcnt),
1458 	GBENU_STATS_P6(tx_pri2_drop_bcnt),
1459 	GBENU_STATS_P6(tx_pri3_drop_bcnt),
1460 	GBENU_STATS_P6(tx_pri4_drop_bcnt),
1461 	GBENU_STATS_P6(tx_pri5_drop_bcnt),
1462 	GBENU_STATS_P6(tx_pri6_drop_bcnt),
1463 	GBENU_STATS_P6(tx_pri7_drop_bcnt),
1464 	/* GBENU Module 7 */
1465 	GBENU_STATS_P7(rx_good_frames),
1466 	GBENU_STATS_P7(rx_broadcast_frames),
1467 	GBENU_STATS_P7(rx_multicast_frames),
1468 	GBENU_STATS_P7(rx_pause_frames),
1469 	GBENU_STATS_P7(rx_crc_errors),
1470 	GBENU_STATS_P7(rx_align_code_errors),
1471 	GBENU_STATS_P7(rx_oversized_frames),
1472 	GBENU_STATS_P7(rx_jabber_frames),
1473 	GBENU_STATS_P7(rx_undersized_frames),
1474 	GBENU_STATS_P7(rx_fragments),
1475 	GBENU_STATS_P7(ale_drop),
1476 	GBENU_STATS_P7(ale_overrun_drop),
1477 	GBENU_STATS_P7(rx_bytes),
1478 	GBENU_STATS_P7(tx_good_frames),
1479 	GBENU_STATS_P7(tx_broadcast_frames),
1480 	GBENU_STATS_P7(tx_multicast_frames),
1481 	GBENU_STATS_P7(tx_pause_frames),
1482 	GBENU_STATS_P7(tx_deferred_frames),
1483 	GBENU_STATS_P7(tx_collision_frames),
1484 	GBENU_STATS_P7(tx_single_coll_frames),
1485 	GBENU_STATS_P7(tx_mult_coll_frames),
1486 	GBENU_STATS_P7(tx_excessive_collisions),
1487 	GBENU_STATS_P7(tx_late_collisions),
1488 	GBENU_STATS_P7(rx_ipg_error),
1489 	GBENU_STATS_P7(tx_carrier_sense_errors),
1490 	GBENU_STATS_P7(tx_bytes),
1491 	GBENU_STATS_P7(tx_64B_frames),
1492 	GBENU_STATS_P7(tx_65_to_127B_frames),
1493 	GBENU_STATS_P7(tx_128_to_255B_frames),
1494 	GBENU_STATS_P7(tx_256_to_511B_frames),
1495 	GBENU_STATS_P7(tx_512_to_1023B_frames),
1496 	GBENU_STATS_P7(tx_1024B_frames),
1497 	GBENU_STATS_P7(net_bytes),
1498 	GBENU_STATS_P7(rx_bottom_fifo_drop),
1499 	GBENU_STATS_P7(rx_port_mask_drop),
1500 	GBENU_STATS_P7(rx_top_fifo_drop),
1501 	GBENU_STATS_P7(ale_rate_limit_drop),
1502 	GBENU_STATS_P7(ale_vid_ingress_drop),
1503 	GBENU_STATS_P7(ale_da_eq_sa_drop),
1504 	GBENU_STATS_P7(ale_unknown_ucast),
1505 	GBENU_STATS_P7(ale_unknown_ucast_bytes),
1506 	GBENU_STATS_P7(ale_unknown_mcast),
1507 	GBENU_STATS_P7(ale_unknown_mcast_bytes),
1508 	GBENU_STATS_P7(ale_unknown_bcast),
1509 	GBENU_STATS_P7(ale_unknown_bcast_bytes),
1510 	GBENU_STATS_P7(ale_pol_match),
1511 	GBENU_STATS_P7(ale_pol_match_red),
1512 	GBENU_STATS_P7(ale_pol_match_yellow),
1513 	GBENU_STATS_P7(tx_mem_protect_err),
1514 	GBENU_STATS_P7(tx_pri0_drop),
1515 	GBENU_STATS_P7(tx_pri1_drop),
1516 	GBENU_STATS_P7(tx_pri2_drop),
1517 	GBENU_STATS_P7(tx_pri3_drop),
1518 	GBENU_STATS_P7(tx_pri4_drop),
1519 	GBENU_STATS_P7(tx_pri5_drop),
1520 	GBENU_STATS_P7(tx_pri6_drop),
1521 	GBENU_STATS_P7(tx_pri7_drop),
1522 	GBENU_STATS_P7(tx_pri0_drop_bcnt),
1523 	GBENU_STATS_P7(tx_pri1_drop_bcnt),
1524 	GBENU_STATS_P7(tx_pri2_drop_bcnt),
1525 	GBENU_STATS_P7(tx_pri3_drop_bcnt),
1526 	GBENU_STATS_P7(tx_pri4_drop_bcnt),
1527 	GBENU_STATS_P7(tx_pri5_drop_bcnt),
1528 	GBENU_STATS_P7(tx_pri6_drop_bcnt),
1529 	GBENU_STATS_P7(tx_pri7_drop_bcnt),
1530 	/* GBENU Module 8 */
1531 	GBENU_STATS_P8(rx_good_frames),
1532 	GBENU_STATS_P8(rx_broadcast_frames),
1533 	GBENU_STATS_P8(rx_multicast_frames),
1534 	GBENU_STATS_P8(rx_pause_frames),
1535 	GBENU_STATS_P8(rx_crc_errors),
1536 	GBENU_STATS_P8(rx_align_code_errors),
1537 	GBENU_STATS_P8(rx_oversized_frames),
1538 	GBENU_STATS_P8(rx_jabber_frames),
1539 	GBENU_STATS_P8(rx_undersized_frames),
1540 	GBENU_STATS_P8(rx_fragments),
1541 	GBENU_STATS_P8(ale_drop),
1542 	GBENU_STATS_P8(ale_overrun_drop),
1543 	GBENU_STATS_P8(rx_bytes),
1544 	GBENU_STATS_P8(tx_good_frames),
1545 	GBENU_STATS_P8(tx_broadcast_frames),
1546 	GBENU_STATS_P8(tx_multicast_frames),
1547 	GBENU_STATS_P8(tx_pause_frames),
1548 	GBENU_STATS_P8(tx_deferred_frames),
1549 	GBENU_STATS_P8(tx_collision_frames),
1550 	GBENU_STATS_P8(tx_single_coll_frames),
1551 	GBENU_STATS_P8(tx_mult_coll_frames),
1552 	GBENU_STATS_P8(tx_excessive_collisions),
1553 	GBENU_STATS_P8(tx_late_collisions),
1554 	GBENU_STATS_P8(rx_ipg_error),
1555 	GBENU_STATS_P8(tx_carrier_sense_errors),
1556 	GBENU_STATS_P8(tx_bytes),
1557 	GBENU_STATS_P8(tx_64B_frames),
1558 	GBENU_STATS_P8(tx_65_to_127B_frames),
1559 	GBENU_STATS_P8(tx_128_to_255B_frames),
1560 	GBENU_STATS_P8(tx_256_to_511B_frames),
1561 	GBENU_STATS_P8(tx_512_to_1023B_frames),
1562 	GBENU_STATS_P8(tx_1024B_frames),
1563 	GBENU_STATS_P8(net_bytes),
1564 	GBENU_STATS_P8(rx_bottom_fifo_drop),
1565 	GBENU_STATS_P8(rx_port_mask_drop),
1566 	GBENU_STATS_P8(rx_top_fifo_drop),
1567 	GBENU_STATS_P8(ale_rate_limit_drop),
1568 	GBENU_STATS_P8(ale_vid_ingress_drop),
1569 	GBENU_STATS_P8(ale_da_eq_sa_drop),
1570 	GBENU_STATS_P8(ale_unknown_ucast),
1571 	GBENU_STATS_P8(ale_unknown_ucast_bytes),
1572 	GBENU_STATS_P8(ale_unknown_mcast),
1573 	GBENU_STATS_P8(ale_unknown_mcast_bytes),
1574 	GBENU_STATS_P8(ale_unknown_bcast),
1575 	GBENU_STATS_P8(ale_unknown_bcast_bytes),
1576 	GBENU_STATS_P8(ale_pol_match),
1577 	GBENU_STATS_P8(ale_pol_match_red),
1578 	GBENU_STATS_P8(ale_pol_match_yellow),
1579 	GBENU_STATS_P8(tx_mem_protect_err),
1580 	GBENU_STATS_P8(tx_pri0_drop),
1581 	GBENU_STATS_P8(tx_pri1_drop),
1582 	GBENU_STATS_P8(tx_pri2_drop),
1583 	GBENU_STATS_P8(tx_pri3_drop),
1584 	GBENU_STATS_P8(tx_pri4_drop),
1585 	GBENU_STATS_P8(tx_pri5_drop),
1586 	GBENU_STATS_P8(tx_pri6_drop),
1587 	GBENU_STATS_P8(tx_pri7_drop),
1588 	GBENU_STATS_P8(tx_pri0_drop_bcnt),
1589 	GBENU_STATS_P8(tx_pri1_drop_bcnt),
1590 	GBENU_STATS_P8(tx_pri2_drop_bcnt),
1591 	GBENU_STATS_P8(tx_pri3_drop_bcnt),
1592 	GBENU_STATS_P8(tx_pri4_drop_bcnt),
1593 	GBENU_STATS_P8(tx_pri5_drop_bcnt),
1594 	GBENU_STATS_P8(tx_pri6_drop_bcnt),
1595 	GBENU_STATS_P8(tx_pri7_drop_bcnt),
1596 };
1597 
1598 #define XGBE_STATS0_INFO(field)				\
1599 {							\
1600 	"GBE_0:"#field, XGBE_STATS0_MODULE,		\
1601 	FIELD_SIZEOF(struct xgbe_hw_stats, field),	\
1602 	offsetof(struct xgbe_hw_stats, field)		\
1603 }
1604 
1605 #define XGBE_STATS1_INFO(field)				\
1606 {							\
1607 	"GBE_1:"#field, XGBE_STATS1_MODULE,		\
1608 	FIELD_SIZEOF(struct xgbe_hw_stats, field),	\
1609 	offsetof(struct xgbe_hw_stats, field)		\
1610 }
1611 
1612 #define XGBE_STATS2_INFO(field)				\
1613 {							\
1614 	"GBE_2:"#field, XGBE_STATS2_MODULE,		\
1615 	FIELD_SIZEOF(struct xgbe_hw_stats, field),	\
1616 	offsetof(struct xgbe_hw_stats, field)		\
1617 }
1618 
1619 static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
1620 	/* GBE module 0 */
1621 	XGBE_STATS0_INFO(rx_good_frames),
1622 	XGBE_STATS0_INFO(rx_broadcast_frames),
1623 	XGBE_STATS0_INFO(rx_multicast_frames),
1624 	XGBE_STATS0_INFO(rx_oversized_frames),
1625 	XGBE_STATS0_INFO(rx_undersized_frames),
1626 	XGBE_STATS0_INFO(overrun_type4),
1627 	XGBE_STATS0_INFO(overrun_type5),
1628 	XGBE_STATS0_INFO(rx_bytes),
1629 	XGBE_STATS0_INFO(tx_good_frames),
1630 	XGBE_STATS0_INFO(tx_broadcast_frames),
1631 	XGBE_STATS0_INFO(tx_multicast_frames),
1632 	XGBE_STATS0_INFO(tx_bytes),
1633 	XGBE_STATS0_INFO(tx_64byte_frames),
1634 	XGBE_STATS0_INFO(tx_65_to_127byte_frames),
1635 	XGBE_STATS0_INFO(tx_128_to_255byte_frames),
1636 	XGBE_STATS0_INFO(tx_256_to_511byte_frames),
1637 	XGBE_STATS0_INFO(tx_512_to_1023byte_frames),
1638 	XGBE_STATS0_INFO(tx_1024byte_frames),
1639 	XGBE_STATS0_INFO(net_bytes),
1640 	XGBE_STATS0_INFO(rx_sof_overruns),
1641 	XGBE_STATS0_INFO(rx_mof_overruns),
1642 	XGBE_STATS0_INFO(rx_dma_overruns),
1643 	/* XGBE module 1 */
1644 	XGBE_STATS1_INFO(rx_good_frames),
1645 	XGBE_STATS1_INFO(rx_broadcast_frames),
1646 	XGBE_STATS1_INFO(rx_multicast_frames),
1647 	XGBE_STATS1_INFO(rx_pause_frames),
1648 	XGBE_STATS1_INFO(rx_crc_errors),
1649 	XGBE_STATS1_INFO(rx_align_code_errors),
1650 	XGBE_STATS1_INFO(rx_oversized_frames),
1651 	XGBE_STATS1_INFO(rx_jabber_frames),
1652 	XGBE_STATS1_INFO(rx_undersized_frames),
1653 	XGBE_STATS1_INFO(rx_fragments),
1654 	XGBE_STATS1_INFO(overrun_type4),
1655 	XGBE_STATS1_INFO(overrun_type5),
1656 	XGBE_STATS1_INFO(rx_bytes),
1657 	XGBE_STATS1_INFO(tx_good_frames),
1658 	XGBE_STATS1_INFO(tx_broadcast_frames),
1659 	XGBE_STATS1_INFO(tx_multicast_frames),
1660 	XGBE_STATS1_INFO(tx_pause_frames),
1661 	XGBE_STATS1_INFO(tx_deferred_frames),
1662 	XGBE_STATS1_INFO(tx_collision_frames),
1663 	XGBE_STATS1_INFO(tx_single_coll_frames),
1664 	XGBE_STATS1_INFO(tx_mult_coll_frames),
1665 	XGBE_STATS1_INFO(tx_excessive_collisions),
1666 	XGBE_STATS1_INFO(tx_late_collisions),
1667 	XGBE_STATS1_INFO(tx_underrun),
1668 	XGBE_STATS1_INFO(tx_carrier_sense_errors),
1669 	XGBE_STATS1_INFO(tx_bytes),
1670 	XGBE_STATS1_INFO(tx_64byte_frames),
1671 	XGBE_STATS1_INFO(tx_65_to_127byte_frames),
1672 	XGBE_STATS1_INFO(tx_128_to_255byte_frames),
1673 	XGBE_STATS1_INFO(tx_256_to_511byte_frames),
1674 	XGBE_STATS1_INFO(tx_512_to_1023byte_frames),
1675 	XGBE_STATS1_INFO(tx_1024byte_frames),
1676 	XGBE_STATS1_INFO(net_bytes),
1677 	XGBE_STATS1_INFO(rx_sof_overruns),
1678 	XGBE_STATS1_INFO(rx_mof_overruns),
1679 	XGBE_STATS1_INFO(rx_dma_overruns),
1680 	/* XGBE module 2 */
1681 	XGBE_STATS2_INFO(rx_good_frames),
1682 	XGBE_STATS2_INFO(rx_broadcast_frames),
1683 	XGBE_STATS2_INFO(rx_multicast_frames),
1684 	XGBE_STATS2_INFO(rx_pause_frames),
1685 	XGBE_STATS2_INFO(rx_crc_errors),
1686 	XGBE_STATS2_INFO(rx_align_code_errors),
1687 	XGBE_STATS2_INFO(rx_oversized_frames),
1688 	XGBE_STATS2_INFO(rx_jabber_frames),
1689 	XGBE_STATS2_INFO(rx_undersized_frames),
1690 	XGBE_STATS2_INFO(rx_fragments),
1691 	XGBE_STATS2_INFO(overrun_type4),
1692 	XGBE_STATS2_INFO(overrun_type5),
1693 	XGBE_STATS2_INFO(rx_bytes),
1694 	XGBE_STATS2_INFO(tx_good_frames),
1695 	XGBE_STATS2_INFO(tx_broadcast_frames),
1696 	XGBE_STATS2_INFO(tx_multicast_frames),
1697 	XGBE_STATS2_INFO(tx_pause_frames),
1698 	XGBE_STATS2_INFO(tx_deferred_frames),
1699 	XGBE_STATS2_INFO(tx_collision_frames),
1700 	XGBE_STATS2_INFO(tx_single_coll_frames),
1701 	XGBE_STATS2_INFO(tx_mult_coll_frames),
1702 	XGBE_STATS2_INFO(tx_excessive_collisions),
1703 	XGBE_STATS2_INFO(tx_late_collisions),
1704 	XGBE_STATS2_INFO(tx_underrun),
1705 	XGBE_STATS2_INFO(tx_carrier_sense_errors),
1706 	XGBE_STATS2_INFO(tx_bytes),
1707 	XGBE_STATS2_INFO(tx_64byte_frames),
1708 	XGBE_STATS2_INFO(tx_65_to_127byte_frames),
1709 	XGBE_STATS2_INFO(tx_128_to_255byte_frames),
1710 	XGBE_STATS2_INFO(tx_256_to_511byte_frames),
1711 	XGBE_STATS2_INFO(tx_512_to_1023byte_frames),
1712 	XGBE_STATS2_INFO(tx_1024byte_frames),
1713 	XGBE_STATS2_INFO(net_bytes),
1714 	XGBE_STATS2_INFO(rx_sof_overruns),
1715 	XGBE_STATS2_INFO(rx_mof_overruns),
1716 	XGBE_STATS2_INFO(rx_dma_overruns),
1717 };
1718 
1719 #define for_each_intf(i, priv) \
1720 	list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list)
1721 
1722 #define for_each_sec_slave(slave, priv) \
1723 	list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list)
1724 
1725 #define first_sec_slave(priv)					\
1726 	list_first_entry(&priv->secondary_slaves, \
1727 			struct gbe_slave, slave_list)
1728 
1729 static void keystone_get_drvinfo(struct net_device *ndev,
1730 				 struct ethtool_drvinfo *info)
1731 {
1732 	strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
1733 	strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
1734 }
1735 
1736 static u32 keystone_get_msglevel(struct net_device *ndev)
1737 {
1738 	struct netcp_intf *netcp = netdev_priv(ndev);
1739 
1740 	return netcp->msg_enable;
1741 }
1742 
1743 static void keystone_set_msglevel(struct net_device *ndev, u32 value)
1744 {
1745 	struct netcp_intf *netcp = netdev_priv(ndev);
1746 
1747 	netcp->msg_enable = value;
1748 }
1749 
1750 static struct gbe_intf *keystone_get_intf_data(struct netcp_intf *netcp)
1751 {
1752 	struct gbe_intf *gbe_intf;
1753 
1754 	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1755 	if (!gbe_intf)
1756 		gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
1757 
1758 	return gbe_intf;
1759 }
1760 
1761 static void keystone_get_stat_strings(struct net_device *ndev,
1762 				      uint32_t stringset, uint8_t *data)
1763 {
1764 	struct netcp_intf *netcp = netdev_priv(ndev);
1765 	struct gbe_intf *gbe_intf;
1766 	struct gbe_priv *gbe_dev;
1767 	int i;
1768 
1769 	gbe_intf = keystone_get_intf_data(netcp);
1770 	if (!gbe_intf)
1771 		return;
1772 	gbe_dev = gbe_intf->gbe_dev;
1773 
1774 	switch (stringset) {
1775 	case ETH_SS_STATS:
1776 		for (i = 0; i < gbe_dev->num_et_stats; i++) {
1777 			memcpy(data, gbe_dev->et_stats[i].desc,
1778 			       ETH_GSTRING_LEN);
1779 			data += ETH_GSTRING_LEN;
1780 		}
1781 		break;
1782 	case ETH_SS_TEST:
1783 		break;
1784 	}
1785 }
1786 
1787 static int keystone_get_sset_count(struct net_device *ndev, int stringset)
1788 {
1789 	struct netcp_intf *netcp = netdev_priv(ndev);
1790 	struct gbe_intf *gbe_intf;
1791 	struct gbe_priv *gbe_dev;
1792 
1793 	gbe_intf = keystone_get_intf_data(netcp);
1794 	if (!gbe_intf)
1795 		return -EINVAL;
1796 	gbe_dev = gbe_intf->gbe_dev;
1797 
1798 	switch (stringset) {
1799 	case ETH_SS_TEST:
1800 		return 0;
1801 	case ETH_SS_STATS:
1802 		return gbe_dev->num_et_stats;
1803 	default:
1804 		return -EINVAL;
1805 	}
1806 }
1807 
1808 static void gbe_reset_mod_stats(struct gbe_priv *gbe_dev, int stats_mod)
1809 {
1810 	void __iomem *base = gbe_dev->hw_stats_regs[stats_mod];
1811 	u32  __iomem *p_stats_entry;
1812 	int i;
1813 
1814 	for (i = 0; i < gbe_dev->num_et_stats; i++) {
1815 		if (gbe_dev->et_stats[i].type == stats_mod) {
1816 			p_stats_entry = base + gbe_dev->et_stats[i].offset;
1817 			gbe_dev->hw_stats[i] = 0;
1818 			gbe_dev->hw_stats_prev[i] = readl(p_stats_entry);
1819 		}
1820 	}
1821 }
1822 
1823 static inline void gbe_update_hw_stats_entry(struct gbe_priv *gbe_dev,
1824 					     int et_stats_entry)
1825 {
1826 	void __iomem *base = NULL;
1827 	u32  __iomem *p_stats_entry;
1828 	u32 curr, delta;
1829 
1830 	/* The hw_stats_regs pointers are already
1831 	 * properly set to point to the right base:
1832 	 */
1833 	base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[et_stats_entry].type];
1834 	p_stats_entry = base + gbe_dev->et_stats[et_stats_entry].offset;
1835 	curr = readl(p_stats_entry);
1836 	delta = curr - gbe_dev->hw_stats_prev[et_stats_entry];
1837 	gbe_dev->hw_stats_prev[et_stats_entry] = curr;
1838 	gbe_dev->hw_stats[et_stats_entry] += delta;
1839 }
1840 
1841 static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
1842 {
1843 	int i;
1844 
1845 	for (i = 0; i < gbe_dev->num_et_stats; i++) {
1846 		gbe_update_hw_stats_entry(gbe_dev, i);
1847 
1848 		if (data)
1849 			data[i] = gbe_dev->hw_stats[i];
1850 	}
1851 }
1852 
1853 static inline void gbe_stats_mod_visible_ver14(struct gbe_priv *gbe_dev,
1854 					       int stats_mod)
1855 {
1856 	u32 val;
1857 
1858 	val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1859 
1860 	switch (stats_mod) {
1861 	case GBE_STATSA_MODULE:
1862 	case GBE_STATSB_MODULE:
1863 		val &= ~GBE_STATS_CD_SEL;
1864 		break;
1865 	case GBE_STATSC_MODULE:
1866 	case GBE_STATSD_MODULE:
1867 		val |= GBE_STATS_CD_SEL;
1868 		break;
1869 	default:
1870 		return;
1871 	}
1872 
1873 	/* make the stat module visible */
1874 	writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1875 }
1876 
1877 static void gbe_reset_mod_stats_ver14(struct gbe_priv *gbe_dev, int stats_mod)
1878 {
1879 	gbe_stats_mod_visible_ver14(gbe_dev, stats_mod);
1880 	gbe_reset_mod_stats(gbe_dev, stats_mod);
1881 }
1882 
1883 static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
1884 {
1885 	u32 half_num_et_stats = (gbe_dev->num_et_stats / 2);
1886 	int et_entry, j, pair;
1887 
1888 	for (pair = 0; pair < 2; pair++) {
1889 		gbe_stats_mod_visible_ver14(gbe_dev, (pair ?
1890 						      GBE_STATSC_MODULE :
1891 						      GBE_STATSA_MODULE));
1892 
1893 		for (j = 0; j < half_num_et_stats; j++) {
1894 			et_entry = pair * half_num_et_stats + j;
1895 			gbe_update_hw_stats_entry(gbe_dev, et_entry);
1896 
1897 			if (data)
1898 				data[et_entry] = gbe_dev->hw_stats[et_entry];
1899 		}
1900 	}
1901 }
1902 
1903 static void keystone_get_ethtool_stats(struct net_device *ndev,
1904 				       struct ethtool_stats *stats,
1905 				       uint64_t *data)
1906 {
1907 	struct netcp_intf *netcp = netdev_priv(ndev);
1908 	struct gbe_intf *gbe_intf;
1909 	struct gbe_priv *gbe_dev;
1910 
1911 	gbe_intf = keystone_get_intf_data(netcp);
1912 	if (!gbe_intf)
1913 		return;
1914 
1915 	gbe_dev = gbe_intf->gbe_dev;
1916 	spin_lock_bh(&gbe_dev->hw_stats_lock);
1917 	if (gbe_dev->ss_version == GBE_SS_VERSION_14)
1918 		gbe_update_stats_ver14(gbe_dev, data);
1919 	else
1920 		gbe_update_stats(gbe_dev, data);
1921 	spin_unlock_bh(&gbe_dev->hw_stats_lock);
1922 }
1923 
1924 static int keystone_get_link_ksettings(struct net_device *ndev,
1925 				       struct ethtool_link_ksettings *cmd)
1926 {
1927 	struct netcp_intf *netcp = netdev_priv(ndev);
1928 	struct phy_device *phy = ndev->phydev;
1929 	struct gbe_intf *gbe_intf;
1930 	int ret;
1931 
1932 	if (!phy)
1933 		return -EINVAL;
1934 
1935 	gbe_intf = keystone_get_intf_data(netcp);
1936 	if (!gbe_intf)
1937 		return -EINVAL;
1938 
1939 	if (!gbe_intf->slave)
1940 		return -EINVAL;
1941 
1942 	ret = phy_ethtool_ksettings_get(phy, cmd);
1943 	if (!ret)
1944 		cmd->base.port = gbe_intf->slave->phy_port_t;
1945 
1946 	return ret;
1947 }
1948 
1949 static int keystone_set_link_ksettings(struct net_device *ndev,
1950 				       const struct ethtool_link_ksettings *cmd)
1951 {
1952 	struct netcp_intf *netcp = netdev_priv(ndev);
1953 	struct phy_device *phy = ndev->phydev;
1954 	struct gbe_intf *gbe_intf;
1955 	u8 port = cmd->base.port;
1956 	u32 advertising, supported;
1957 	u32 features;
1958 
1959 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
1960 						cmd->link_modes.advertising);
1961 	ethtool_convert_link_mode_to_legacy_u32(&supported,
1962 						cmd->link_modes.supported);
1963 	features = advertising & supported;
1964 
1965 	if (!phy)
1966 		return -EINVAL;
1967 
1968 	gbe_intf = keystone_get_intf_data(netcp);
1969 	if (!gbe_intf)
1970 		return -EINVAL;
1971 
1972 	if (!gbe_intf->slave)
1973 		return -EINVAL;
1974 
1975 	if (port != gbe_intf->slave->phy_port_t) {
1976 		if ((port == PORT_TP) && !(features & ADVERTISED_TP))
1977 			return -EINVAL;
1978 
1979 		if ((port == PORT_AUI) && !(features & ADVERTISED_AUI))
1980 			return -EINVAL;
1981 
1982 		if ((port == PORT_BNC) && !(features & ADVERTISED_BNC))
1983 			return -EINVAL;
1984 
1985 		if ((port == PORT_MII) && !(features & ADVERTISED_MII))
1986 			return -EINVAL;
1987 
1988 		if ((port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
1989 			return -EINVAL;
1990 	}
1991 
1992 	gbe_intf->slave->phy_port_t = port;
1993 	return phy_ethtool_ksettings_set(phy, cmd);
1994 }
1995 
1996 #if IS_ENABLED(CONFIG_TI_CPTS)
1997 static int keystone_get_ts_info(struct net_device *ndev,
1998 				struct ethtool_ts_info *info)
1999 {
2000 	struct netcp_intf *netcp = netdev_priv(ndev);
2001 	struct gbe_intf *gbe_intf;
2002 
2003 	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
2004 	if (!gbe_intf || !gbe_intf->gbe_dev->cpts)
2005 		return -EINVAL;
2006 
2007 	info->so_timestamping =
2008 		SOF_TIMESTAMPING_TX_HARDWARE |
2009 		SOF_TIMESTAMPING_TX_SOFTWARE |
2010 		SOF_TIMESTAMPING_RX_HARDWARE |
2011 		SOF_TIMESTAMPING_RX_SOFTWARE |
2012 		SOF_TIMESTAMPING_SOFTWARE |
2013 		SOF_TIMESTAMPING_RAW_HARDWARE;
2014 	info->phc_index = gbe_intf->gbe_dev->cpts->phc_index;
2015 	info->tx_types =
2016 		(1 << HWTSTAMP_TX_OFF) |
2017 		(1 << HWTSTAMP_TX_ON);
2018 	info->rx_filters =
2019 		(1 << HWTSTAMP_FILTER_NONE) |
2020 		(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
2021 		(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2022 	return 0;
2023 }
2024 #else
2025 static int keystone_get_ts_info(struct net_device *ndev,
2026 				struct ethtool_ts_info *info)
2027 {
2028 	info->so_timestamping =
2029 		SOF_TIMESTAMPING_TX_SOFTWARE |
2030 		SOF_TIMESTAMPING_RX_SOFTWARE |
2031 		SOF_TIMESTAMPING_SOFTWARE;
2032 	info->phc_index = -1;
2033 	info->tx_types = 0;
2034 	info->rx_filters = 0;
2035 	return 0;
2036 }
2037 #endif /* CONFIG_TI_CPTS */
2038 
2039 static const struct ethtool_ops keystone_ethtool_ops = {
2040 	.get_drvinfo		= keystone_get_drvinfo,
2041 	.get_link		= ethtool_op_get_link,
2042 	.get_msglevel		= keystone_get_msglevel,
2043 	.set_msglevel		= keystone_set_msglevel,
2044 	.get_strings		= keystone_get_stat_strings,
2045 	.get_sset_count		= keystone_get_sset_count,
2046 	.get_ethtool_stats	= keystone_get_ethtool_stats,
2047 	.get_link_ksettings	= keystone_get_link_ksettings,
2048 	.set_link_ksettings	= keystone_set_link_ksettings,
2049 	.get_ts_info		= keystone_get_ts_info,
2050 };
2051 
2052 #define mac_hi(mac)	(((mac)[0] << 0) | ((mac)[1] << 8) |	\
2053 			 ((mac)[2] << 16) | ((mac)[3] << 24))
2054 #define mac_lo(mac)	(((mac)[4] << 0) | ((mac)[5] << 8))
2055 
2056 static void gbe_set_slave_mac(struct gbe_slave *slave,
2057 			      struct gbe_intf *gbe_intf)
2058 {
2059 	struct net_device *ndev = gbe_intf->ndev;
2060 
2061 	writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi));
2062 	writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo));
2063 }
2064 
2065 static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num)
2066 {
2067 	if (priv->host_port == 0)
2068 		return slave_num + 1;
2069 
2070 	return slave_num;
2071 }
2072 
2073 static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
2074 					  struct net_device *ndev,
2075 					  struct gbe_slave *slave,
2076 					  int up)
2077 {
2078 	struct phy_device *phy = slave->phy;
2079 	u32 mac_control = 0;
2080 
2081 	if (up) {
2082 		mac_control = slave->mac_control;
2083 		if (phy && (phy->speed == SPEED_1000)) {
2084 			mac_control |= MACSL_GIG_MODE;
2085 			mac_control &= ~MACSL_XGIG_MODE;
2086 		} else if (phy && (phy->speed == SPEED_10000)) {
2087 			mac_control |= MACSL_XGIG_MODE;
2088 			mac_control &= ~MACSL_GIG_MODE;
2089 		}
2090 
2091 		writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
2092 						 mac_control));
2093 
2094 		cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2095 				     ALE_PORT_STATE,
2096 				     ALE_PORT_STATE_FORWARD);
2097 
2098 		if (ndev && slave->open &&
2099 		    slave->link_interface != SGMII_LINK_MAC_PHY &&
2100 		    slave->link_interface != XGMII_LINK_MAC_PHY)
2101 			netif_carrier_on(ndev);
2102 	} else {
2103 		writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
2104 						 mac_control));
2105 		cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2106 				     ALE_PORT_STATE,
2107 				     ALE_PORT_STATE_DISABLE);
2108 		if (ndev &&
2109 		    slave->link_interface != SGMII_LINK_MAC_PHY &&
2110 		    slave->link_interface != XGMII_LINK_MAC_PHY)
2111 			netif_carrier_off(ndev);
2112 	}
2113 
2114 	if (phy)
2115 		phy_print_status(phy);
2116 }
2117 
2118 static bool gbe_phy_link_status(struct gbe_slave *slave)
2119 {
2120 	 return !slave->phy || slave->phy->link;
2121 }
2122 
2123 static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
2124 					  struct gbe_slave *slave,
2125 					  struct net_device *ndev)
2126 {
2127 	int sp = slave->slave_num;
2128 	int phy_link_state, sgmii_link_state = 1, link_state;
2129 
2130 	if (!slave->open)
2131 		return;
2132 
2133 	if (!SLAVE_LINK_IS_XGMII(slave)) {
2134 		sgmii_link_state =
2135 			netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp);
2136 	}
2137 
2138 	phy_link_state = gbe_phy_link_status(slave);
2139 	link_state = phy_link_state & sgmii_link_state;
2140 
2141 	if (atomic_xchg(&slave->link_state, link_state) != link_state)
2142 		netcp_ethss_link_state_action(gbe_dev, ndev, slave,
2143 					      link_state);
2144 }
2145 
2146 static void xgbe_adjust_link(struct net_device *ndev)
2147 {
2148 	struct netcp_intf *netcp = netdev_priv(ndev);
2149 	struct gbe_intf *gbe_intf;
2150 
2151 	gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
2152 	if (!gbe_intf)
2153 		return;
2154 
2155 	netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
2156 				      ndev);
2157 }
2158 
2159 static void gbe_adjust_link(struct net_device *ndev)
2160 {
2161 	struct netcp_intf *netcp = netdev_priv(ndev);
2162 	struct gbe_intf *gbe_intf;
2163 
2164 	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
2165 	if (!gbe_intf)
2166 		return;
2167 
2168 	netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
2169 				      ndev);
2170 }
2171 
2172 static void gbe_adjust_link_sec_slaves(struct net_device *ndev)
2173 {
2174 	struct gbe_priv *gbe_dev = netdev_priv(ndev);
2175 	struct gbe_slave *slave;
2176 
2177 	for_each_sec_slave(slave, gbe_dev)
2178 		netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2179 }
2180 
2181 /* Reset EMAC
2182  * Soft reset is set and polled until clear, or until a timeout occurs
2183  */
2184 static int gbe_port_reset(struct gbe_slave *slave)
2185 {
2186 	u32 i, v;
2187 
2188 	/* Set the soft reset bit */
2189 	writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset));
2190 
2191 	/* Wait for the bit to clear */
2192 	for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
2193 		v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset));
2194 		if ((v & SOFT_RESET_MASK) != SOFT_RESET)
2195 			return 0;
2196 	}
2197 
2198 	/* Timeout on the reset */
2199 	return GMACSL_RET_WARN_RESET_INCOMPLETE;
2200 }
2201 
2202 /* Configure EMAC */
2203 static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2204 			    int max_rx_len)
2205 {
2206 	void __iomem *rx_maxlen_reg;
2207 	u32 xgmii_mode;
2208 
2209 	if (max_rx_len > NETCP_MAX_FRAME_SIZE)
2210 		max_rx_len = NETCP_MAX_FRAME_SIZE;
2211 
2212 	/* Enable correct MII mode at SS level */
2213 	if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) &&
2214 	    (slave->link_interface >= XGMII_LINK_MAC_PHY)) {
2215 		xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
2216 		xgmii_mode |= (1 << slave->slave_num);
2217 		writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
2218 	}
2219 
2220 	if (IS_SS_ID_MU(gbe_dev))
2221 		rx_maxlen_reg = GBE_REG_ADDR(slave, port_regs, rx_maxlen);
2222 	else
2223 		rx_maxlen_reg = GBE_REG_ADDR(slave, emac_regs, rx_maxlen);
2224 
2225 	writel(max_rx_len, rx_maxlen_reg);
2226 	writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
2227 }
2228 
2229 static void gbe_sgmii_rtreset(struct gbe_priv *priv,
2230 			      struct gbe_slave *slave, bool set)
2231 {
2232 	if (SLAVE_LINK_IS_XGMII(slave))
2233 		return;
2234 
2235 	netcp_sgmii_rtreset(SGMII_BASE(priv, slave->slave_num),
2236 			    slave->slave_num, set);
2237 }
2238 
2239 static void gbe_slave_stop(struct gbe_intf *intf)
2240 {
2241 	struct gbe_priv *gbe_dev = intf->gbe_dev;
2242 	struct gbe_slave *slave = intf->slave;
2243 
2244 	gbe_sgmii_rtreset(gbe_dev, slave, true);
2245 	gbe_port_reset(slave);
2246 	/* Disable forwarding */
2247 	cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2248 			     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
2249 	cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast,
2250 			   1 << slave->port_num, 0, 0);
2251 
2252 	if (!slave->phy)
2253 		return;
2254 
2255 	phy_stop(slave->phy);
2256 	phy_disconnect(slave->phy);
2257 	slave->phy = NULL;
2258 }
2259 
2260 static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
2261 {
2262 	if (SLAVE_LINK_IS_XGMII(slave))
2263 		return;
2264 
2265 	netcp_sgmii_reset(SGMII_BASE(priv, slave->slave_num), slave->slave_num);
2266 	netcp_sgmii_config(SGMII_BASE(priv, slave->slave_num), slave->slave_num,
2267 			   slave->link_interface);
2268 }
2269 
2270 static int gbe_slave_open(struct gbe_intf *gbe_intf)
2271 {
2272 	struct gbe_priv *priv = gbe_intf->gbe_dev;
2273 	struct gbe_slave *slave = gbe_intf->slave;
2274 	phy_interface_t phy_mode;
2275 	bool has_phy = false;
2276 
2277 	void (*hndlr)(struct net_device *) = gbe_adjust_link;
2278 
2279 	gbe_sgmii_config(priv, slave);
2280 	gbe_port_reset(slave);
2281 	gbe_sgmii_rtreset(priv, slave, false);
2282 	gbe_port_config(priv, slave, priv->rx_packet_max);
2283 	gbe_set_slave_mac(slave, gbe_intf);
2284 	/* enable forwarding */
2285 	cpsw_ale_control_set(priv->ale, slave->port_num,
2286 			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2287 	cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast,
2288 			   1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2);
2289 
2290 	if (slave->link_interface == SGMII_LINK_MAC_PHY) {
2291 		has_phy = true;
2292 		phy_mode = PHY_INTERFACE_MODE_SGMII;
2293 		slave->phy_port_t = PORT_MII;
2294 	} else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
2295 		has_phy = true;
2296 		phy_mode = PHY_INTERFACE_MODE_NA;
2297 		slave->phy_port_t = PORT_FIBRE;
2298 	}
2299 
2300 	if (has_phy) {
2301 		if (priv->ss_version == XGBE_SS_VERSION_10)
2302 			hndlr = xgbe_adjust_link;
2303 
2304 		slave->phy = of_phy_connect(gbe_intf->ndev,
2305 					    slave->phy_node,
2306 					    hndlr, 0,
2307 					    phy_mode);
2308 		if (!slave->phy) {
2309 			dev_err(priv->dev, "phy not found on slave %d\n",
2310 				slave->slave_num);
2311 			return -ENODEV;
2312 		}
2313 		dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
2314 			phydev_name(slave->phy));
2315 		phy_start(slave->phy);
2316 	}
2317 	return 0;
2318 }
2319 
2320 static void gbe_init_host_port(struct gbe_priv *priv)
2321 {
2322 	int bypass_en = 1;
2323 
2324 	/* Host Tx Pri */
2325 	if (IS_SS_ID_NU(priv) || IS_SS_ID_XGBE(priv))
2326 		writel(HOST_TX_PRI_MAP_DEFAULT,
2327 		       GBE_REG_ADDR(priv, host_port_regs, tx_pri_map));
2328 
2329 	/* Max length register */
2330 	writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
2331 						  rx_maxlen));
2332 
2333 	cpsw_ale_start(priv->ale);
2334 
2335 	if (priv->enable_ale)
2336 		bypass_en = 0;
2337 
2338 	cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en);
2339 
2340 	cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1);
2341 
2342 	cpsw_ale_control_set(priv->ale, priv->host_port,
2343 			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2344 
2345 	cpsw_ale_control_set(priv->ale, 0,
2346 			     ALE_PORT_UNKNOWN_VLAN_MEMBER,
2347 			     GBE_PORT_MASK(priv->ale_ports));
2348 
2349 	cpsw_ale_control_set(priv->ale, 0,
2350 			     ALE_PORT_UNKNOWN_MCAST_FLOOD,
2351 			     GBE_PORT_MASK(priv->ale_ports - 1));
2352 
2353 	cpsw_ale_control_set(priv->ale, 0,
2354 			     ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
2355 			     GBE_PORT_MASK(priv->ale_ports));
2356 
2357 	cpsw_ale_control_set(priv->ale, 0,
2358 			     ALE_PORT_UNTAGGED_EGRESS,
2359 			     GBE_PORT_MASK(priv->ale_ports));
2360 }
2361 
2362 static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2363 {
2364 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2365 	u16 vlan_id;
2366 
2367 	cpsw_ale_add_mcast(gbe_dev->ale, addr,
2368 			   GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0,
2369 			   ALE_MCAST_FWD_2);
2370 	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2371 		cpsw_ale_add_mcast(gbe_dev->ale, addr,
2372 				   GBE_PORT_MASK(gbe_dev->ale_ports),
2373 				   ALE_VLAN, vlan_id, ALE_MCAST_FWD_2);
2374 	}
2375 }
2376 
2377 static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2378 {
2379 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2380 	u16 vlan_id;
2381 
2382 	cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2383 
2384 	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID)
2385 		cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2386 				   ALE_VLAN, vlan_id);
2387 }
2388 
2389 static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2390 {
2391 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2392 	u16 vlan_id;
2393 
2394 	cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0);
2395 
2396 	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2397 		cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id);
2398 	}
2399 }
2400 
2401 static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2402 {
2403 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2404 	u16 vlan_id;
2405 
2406 	cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2407 
2408 	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2409 		cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2410 				   ALE_VLAN, vlan_id);
2411 	}
2412 }
2413 
2414 static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr)
2415 {
2416 	struct gbe_intf *gbe_intf = intf_priv;
2417 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2418 
2419 	dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n",
2420 		naddr->addr, naddr->type);
2421 
2422 	switch (naddr->type) {
2423 	case ADDR_MCAST:
2424 	case ADDR_BCAST:
2425 		gbe_add_mcast_addr(gbe_intf, naddr->addr);
2426 		break;
2427 	case ADDR_UCAST:
2428 	case ADDR_DEV:
2429 		gbe_add_ucast_addr(gbe_intf, naddr->addr);
2430 		break;
2431 	case ADDR_ANY:
2432 		/* nothing to do for promiscuous */
2433 	default:
2434 		break;
2435 	}
2436 
2437 	return 0;
2438 }
2439 
2440 static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr)
2441 {
2442 	struct gbe_intf *gbe_intf = intf_priv;
2443 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2444 
2445 	dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n",
2446 		naddr->addr, naddr->type);
2447 
2448 	switch (naddr->type) {
2449 	case ADDR_MCAST:
2450 	case ADDR_BCAST:
2451 		gbe_del_mcast_addr(gbe_intf, naddr->addr);
2452 		break;
2453 	case ADDR_UCAST:
2454 	case ADDR_DEV:
2455 		gbe_del_ucast_addr(gbe_intf, naddr->addr);
2456 		break;
2457 	case ADDR_ANY:
2458 		/* nothing to do for promiscuous */
2459 	default:
2460 		break;
2461 	}
2462 
2463 	return 0;
2464 }
2465 
2466 static int gbe_add_vid(void *intf_priv, int vid)
2467 {
2468 	struct gbe_intf *gbe_intf = intf_priv;
2469 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2470 
2471 	set_bit(vid, gbe_intf->active_vlans);
2472 
2473 	cpsw_ale_add_vlan(gbe_dev->ale, vid,
2474 			  GBE_PORT_MASK(gbe_dev->ale_ports),
2475 			  GBE_MASK_NO_PORTS,
2476 			  GBE_PORT_MASK(gbe_dev->ale_ports),
2477 			  GBE_PORT_MASK(gbe_dev->ale_ports - 1));
2478 
2479 	return 0;
2480 }
2481 
2482 static int gbe_del_vid(void *intf_priv, int vid)
2483 {
2484 	struct gbe_intf *gbe_intf = intf_priv;
2485 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2486 
2487 	cpsw_ale_del_vlan(gbe_dev->ale, vid, 0);
2488 	clear_bit(vid, gbe_intf->active_vlans);
2489 	return 0;
2490 }
2491 
2492 #if IS_ENABLED(CONFIG_TI_CPTS)
2493 #define HAS_PHY_TXTSTAMP(p) ((p)->drv && (p)->drv->txtstamp)
2494 #define HAS_PHY_RXTSTAMP(p) ((p)->drv && (p)->drv->rxtstamp)
2495 
2496 static void gbe_txtstamp(void *context, struct sk_buff *skb)
2497 {
2498 	struct gbe_intf *gbe_intf = context;
2499 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2500 
2501 	cpts_tx_timestamp(gbe_dev->cpts, skb);
2502 }
2503 
2504 static bool gbe_need_txtstamp(struct gbe_intf *gbe_intf,
2505 			      const struct netcp_packet *p_info)
2506 {
2507 	struct sk_buff *skb = p_info->skb;
2508 	unsigned int class = ptp_classify_raw(skb);
2509 
2510 	if (class == PTP_CLASS_NONE)
2511 		return false;
2512 
2513 	switch (class) {
2514 	case PTP_CLASS_V1_IPV4:
2515 	case PTP_CLASS_V1_IPV6:
2516 	case PTP_CLASS_V2_IPV4:
2517 	case PTP_CLASS_V2_IPV6:
2518 	case PTP_CLASS_V2_L2:
2519 	case (PTP_CLASS_V2_VLAN | PTP_CLASS_L2):
2520 	case (PTP_CLASS_V2_VLAN | PTP_CLASS_IPV4):
2521 	case (PTP_CLASS_V2_VLAN | PTP_CLASS_IPV6):
2522 		return true;
2523 	}
2524 
2525 	return false;
2526 }
2527 
2528 static int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
2529 				 struct netcp_packet *p_info)
2530 {
2531 	struct phy_device *phydev = p_info->skb->dev->phydev;
2532 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2533 
2534 	if (!(skb_shinfo(p_info->skb)->tx_flags & SKBTX_HW_TSTAMP) ||
2535 	    !cpts_is_tx_enabled(gbe_dev->cpts))
2536 		return 0;
2537 
2538 	/* If phy has the txtstamp api, assume it will do it.
2539 	 * We mark it here because skb_tx_timestamp() is called
2540 	 * after all the txhooks are called.
2541 	 */
2542 	if (phydev && HAS_PHY_TXTSTAMP(phydev)) {
2543 		skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
2544 		return 0;
2545 	}
2546 
2547 	if (gbe_need_txtstamp(gbe_intf, p_info)) {
2548 		p_info->txtstamp = gbe_txtstamp;
2549 		p_info->ts_context = (void *)gbe_intf;
2550 		skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
2551 	}
2552 
2553 	return 0;
2554 }
2555 
2556 static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info)
2557 {
2558 	struct phy_device *phydev = p_info->skb->dev->phydev;
2559 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2560 
2561 	if (p_info->rxtstamp_complete)
2562 		return 0;
2563 
2564 	if (phydev && HAS_PHY_RXTSTAMP(phydev)) {
2565 		p_info->rxtstamp_complete = true;
2566 		return 0;
2567 	}
2568 
2569 	cpts_rx_timestamp(gbe_dev->cpts, p_info->skb);
2570 	p_info->rxtstamp_complete = true;
2571 
2572 	return 0;
2573 }
2574 
2575 static int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *ifr)
2576 {
2577 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2578 	struct cpts *cpts = gbe_dev->cpts;
2579 	struct hwtstamp_config cfg;
2580 
2581 	if (!cpts)
2582 		return -EOPNOTSUPP;
2583 
2584 	cfg.flags = 0;
2585 	cfg.tx_type = cpts_is_tx_enabled(cpts) ?
2586 		      HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
2587 	cfg.rx_filter = (cpts_is_rx_enabled(cpts) ?
2588 			 cpts->rx_enable : HWTSTAMP_FILTER_NONE);
2589 
2590 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2591 }
2592 
2593 static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
2594 {
2595 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2596 	struct gbe_slave *slave = gbe_intf->slave;
2597 	u32 ts_en, seq_id, ctl;
2598 
2599 	if (!cpts_is_rx_enabled(gbe_dev->cpts) &&
2600 	    !cpts_is_tx_enabled(gbe_dev->cpts)) {
2601 		writel(0, GBE_REG_ADDR(slave, port_regs, ts_ctl));
2602 		return;
2603 	}
2604 
2605 	seq_id = (30 << TS_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
2606 	ts_en = EVENT_MSG_BITS << TS_MSG_TYPE_EN_SHIFT;
2607 	ctl = ETH_P_1588 | TS_TTL_NONZERO |
2608 		(slave->ts_ctl.dst_port_map << TS_CTL_DST_PORT_SHIFT) |
2609 		(slave->ts_ctl.uni ?  TS_UNI_EN :
2610 			slave->ts_ctl.maddr_map << TS_CTL_MADDR_SHIFT);
2611 
2612 	if (cpts_is_tx_enabled(gbe_dev->cpts))
2613 		ts_en |= (TS_TX_ANX_ALL_EN | TS_TX_VLAN_LT1_EN);
2614 
2615 	if (cpts_is_rx_enabled(gbe_dev->cpts))
2616 		ts_en |= (TS_RX_ANX_ALL_EN | TS_RX_VLAN_LT1_EN);
2617 
2618 	writel(ts_en,  GBE_REG_ADDR(slave, port_regs, ts_ctl));
2619 	writel(seq_id, GBE_REG_ADDR(slave, port_regs, ts_seq_ltype));
2620 	writel(ctl,    GBE_REG_ADDR(slave, port_regs, ts_ctl_ltype2));
2621 }
2622 
2623 static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
2624 {
2625 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2626 	struct cpts *cpts = gbe_dev->cpts;
2627 	struct hwtstamp_config cfg;
2628 
2629 	if (!cpts)
2630 		return -EOPNOTSUPP;
2631 
2632 	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
2633 		return -EFAULT;
2634 
2635 	/* reserved for future extensions */
2636 	if (cfg.flags)
2637 		return -EINVAL;
2638 
2639 	switch (cfg.tx_type) {
2640 	case HWTSTAMP_TX_OFF:
2641 		cpts_tx_enable(cpts, 0);
2642 		break;
2643 	case HWTSTAMP_TX_ON:
2644 		cpts_tx_enable(cpts, 1);
2645 		break;
2646 	default:
2647 		return -ERANGE;
2648 	}
2649 
2650 	switch (cfg.rx_filter) {
2651 	case HWTSTAMP_FILTER_NONE:
2652 		cpts_rx_enable(cpts, 0);
2653 		break;
2654 	case HWTSTAMP_FILTER_ALL:
2655 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2656 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2657 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2658 		cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V1_L4_EVENT);
2659 		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
2660 		break;
2661 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2662 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2663 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2664 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2665 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2666 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2667 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
2668 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
2669 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2670 		cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V2_EVENT);
2671 		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
2672 		break;
2673 	default:
2674 		return -ERANGE;
2675 	}
2676 
2677 	gbe_hwtstamp(gbe_intf);
2678 
2679 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2680 }
2681 
2682 static void gbe_register_cpts(struct gbe_priv *gbe_dev)
2683 {
2684 	if (!gbe_dev->cpts)
2685 		return;
2686 
2687 	if (gbe_dev->cpts_registered > 0)
2688 		goto done;
2689 
2690 	if (cpts_register(gbe_dev->cpts)) {
2691 		dev_err(gbe_dev->dev, "error registering cpts device\n");
2692 		return;
2693 	}
2694 
2695 done:
2696 	++gbe_dev->cpts_registered;
2697 }
2698 
2699 static void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
2700 {
2701 	if (!gbe_dev->cpts || (gbe_dev->cpts_registered <= 0))
2702 		return;
2703 
2704 	if (--gbe_dev->cpts_registered)
2705 		return;
2706 
2707 	cpts_unregister(gbe_dev->cpts);
2708 }
2709 #else
2710 static inline int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
2711 					struct netcp_packet *p_info)
2712 {
2713 	return 0;
2714 }
2715 
2716 static inline int gbe_rxtstamp(struct gbe_intf *gbe_intf,
2717 			       struct netcp_packet *p_info)
2718 {
2719 	return 0;
2720 }
2721 
2722 static inline int gbe_hwtstamp(struct gbe_intf *gbe_intf,
2723 			       struct ifreq *ifr, int cmd)
2724 {
2725 	return -EOPNOTSUPP;
2726 }
2727 
2728 static inline void gbe_register_cpts(struct gbe_priv *gbe_dev)
2729 {
2730 }
2731 
2732 static inline void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
2733 {
2734 }
2735 
2736 static inline int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *req)
2737 {
2738 	return -EOPNOTSUPP;
2739 }
2740 
2741 static inline int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *req)
2742 {
2743 	return -EOPNOTSUPP;
2744 }
2745 #endif /* CONFIG_TI_CPTS */
2746 
2747 static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
2748 {
2749 	struct gbe_intf *gbe_intf = intf_priv;
2750 	struct phy_device *phy = gbe_intf->slave->phy;
2751 
2752 	if (!phy || !phy->drv->hwtstamp) {
2753 		switch (cmd) {
2754 		case SIOCGHWTSTAMP:
2755 			return gbe_hwtstamp_get(gbe_intf, req);
2756 		case SIOCSHWTSTAMP:
2757 			return gbe_hwtstamp_set(gbe_intf, req);
2758 		}
2759 	}
2760 
2761 	if (phy)
2762 		return phy_mii_ioctl(phy, req, cmd);
2763 
2764 	return -EOPNOTSUPP;
2765 }
2766 
2767 static void netcp_ethss_timer(unsigned long arg)
2768 {
2769 	struct gbe_priv *gbe_dev = (struct gbe_priv *)arg;
2770 	struct gbe_intf *gbe_intf;
2771 	struct gbe_slave *slave;
2772 
2773 	/* Check & update SGMII link state of interfaces */
2774 	for_each_intf(gbe_intf, gbe_dev) {
2775 		if (!gbe_intf->slave->open)
2776 			continue;
2777 		netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave,
2778 					      gbe_intf->ndev);
2779 	}
2780 
2781 	/* Check & update SGMII link state of secondary ports */
2782 	for_each_sec_slave(slave, gbe_dev) {
2783 		netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2784 	}
2785 
2786 	/* A timer runs as a BH, no need to block them */
2787 	spin_lock(&gbe_dev->hw_stats_lock);
2788 
2789 	if (gbe_dev->ss_version == GBE_SS_VERSION_14)
2790 		gbe_update_stats_ver14(gbe_dev, NULL);
2791 	else
2792 		gbe_update_stats(gbe_dev, NULL);
2793 
2794 	spin_unlock(&gbe_dev->hw_stats_lock);
2795 
2796 	gbe_dev->timer.expires	= jiffies + GBE_TIMER_INTERVAL;
2797 	add_timer(&gbe_dev->timer);
2798 }
2799 
2800 static int gbe_txhook(int order, void *data, struct netcp_packet *p_info)
2801 {
2802 	struct gbe_intf *gbe_intf = data;
2803 
2804 	p_info->tx_pipe = &gbe_intf->tx_pipe;
2805 
2806 	return gbe_txtstamp_mark_pkt(gbe_intf, p_info);
2807 }
2808 
2809 static int gbe_rxhook(int order, void *data, struct netcp_packet *p_info)
2810 {
2811 	struct gbe_intf *gbe_intf = data;
2812 
2813 	return gbe_rxtstamp(gbe_intf, p_info);
2814 }
2815 
2816 static int gbe_open(void *intf_priv, struct net_device *ndev)
2817 {
2818 	struct gbe_intf *gbe_intf = intf_priv;
2819 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2820 	struct netcp_intf *netcp = netdev_priv(ndev);
2821 	struct gbe_slave *slave = gbe_intf->slave;
2822 	int port_num = slave->port_num;
2823 	u32 reg, val;
2824 	int ret;
2825 
2826 	reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
2827 	dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n",
2828 		GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
2829 		GBE_RTL_VERSION(reg), GBE_IDENT(reg));
2830 
2831 	/* For 10G and on NetCP 1.5, use directed to port */
2832 	if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) || IS_SS_ID_MU(gbe_dev))
2833 		gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO;
2834 
2835 	if (gbe_dev->enable_ale)
2836 		gbe_intf->tx_pipe.switch_to_port = 0;
2837 	else
2838 		gbe_intf->tx_pipe.switch_to_port = port_num;
2839 
2840 	dev_dbg(gbe_dev->dev,
2841 		"opened TX channel %s: %p with to port %d, flags %d\n",
2842 		gbe_intf->tx_pipe.dma_chan_name,
2843 		gbe_intf->tx_pipe.dma_channel,
2844 		gbe_intf->tx_pipe.switch_to_port,
2845 		gbe_intf->tx_pipe.flags);
2846 
2847 	gbe_slave_stop(gbe_intf);
2848 
2849 	/* disable priority elevation and enable statistics on all ports */
2850 	writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
2851 
2852 	/* Control register */
2853 	val = GBE_CTL_P0_ENABLE;
2854 	if (IS_SS_ID_MU(gbe_dev)) {
2855 		val |= ETH_SW_CTL_P0_TX_CRC_REMOVE;
2856 		netcp->hw_cap = ETH_SW_CAN_REMOVE_ETH_FCS;
2857 	}
2858 	writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, control));
2859 
2860 	/* All statistics enabled and STAT AB visible by default */
2861 	writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs,
2862 						    stat_port_en));
2863 
2864 	ret = gbe_slave_open(gbe_intf);
2865 	if (ret)
2866 		goto fail;
2867 
2868 	netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
2869 	netcp_register_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
2870 
2871 	slave->open = true;
2872 	netcp_ethss_update_link_state(gbe_dev, slave, ndev);
2873 
2874 	gbe_register_cpts(gbe_dev);
2875 
2876 	return 0;
2877 
2878 fail:
2879 	gbe_slave_stop(gbe_intf);
2880 	return ret;
2881 }
2882 
2883 static int gbe_close(void *intf_priv, struct net_device *ndev)
2884 {
2885 	struct gbe_intf *gbe_intf = intf_priv;
2886 	struct netcp_intf *netcp = netdev_priv(ndev);
2887 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2888 
2889 	gbe_unregister_cpts(gbe_dev);
2890 
2891 	gbe_slave_stop(gbe_intf);
2892 
2893 	netcp_unregister_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
2894 	netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
2895 
2896 	gbe_intf->slave->open = false;
2897 	atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
2898 	return 0;
2899 }
2900 
2901 #if IS_ENABLED(CONFIG_TI_CPTS)
2902 static void init_slave_ts_ctl(struct gbe_slave *slave)
2903 {
2904 	slave->ts_ctl.uni = 1;
2905 	slave->ts_ctl.dst_port_map =
2906 		(TS_CTL_DST_PORT >> TS_CTL_DST_PORT_SHIFT) & 0x3;
2907 	slave->ts_ctl.maddr_map =
2908 		(TS_CTL_MADDR_ALL >> TS_CTL_MADDR_SHIFT) & 0x1f;
2909 }
2910 
2911 #else
2912 static void init_slave_ts_ctl(struct gbe_slave *slave)
2913 {
2914 }
2915 #endif /* CONFIG_TI_CPTS */
2916 
2917 static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2918 		      struct device_node *node)
2919 {
2920 	int port_reg_num;
2921 	u32 port_reg_ofs, emac_reg_ofs;
2922 	u32 port_reg_blk_sz, emac_reg_blk_sz;
2923 
2924 	if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
2925 		dev_err(gbe_dev->dev, "missing slave-port parameter\n");
2926 		return -EINVAL;
2927 	}
2928 
2929 	if (of_property_read_u32(node, "link-interface",
2930 				 &slave->link_interface)) {
2931 		dev_warn(gbe_dev->dev,
2932 			 "missing link-interface value defaulting to 1G mac-phy link\n");
2933 		slave->link_interface = SGMII_LINK_MAC_PHY;
2934 	}
2935 
2936 	slave->open = false;
2937 	if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
2938 	    (slave->link_interface == XGMII_LINK_MAC_PHY))
2939 		slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
2940 	slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
2941 
2942 	if (slave->link_interface >= XGMII_LINK_MAC_PHY)
2943 		slave->mac_control = GBE_DEF_10G_MAC_CONTROL;
2944 	else
2945 		slave->mac_control = GBE_DEF_1G_MAC_CONTROL;
2946 
2947 	/* Emac regs memmap are contiguous but port regs are not */
2948 	port_reg_num = slave->slave_num;
2949 	if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
2950 		if (slave->slave_num > 1) {
2951 			port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
2952 			port_reg_num -= 2;
2953 		} else {
2954 			port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
2955 		}
2956 		emac_reg_ofs = GBE13_EMAC_OFFSET;
2957 		port_reg_blk_sz = 0x30;
2958 		emac_reg_blk_sz = 0x40;
2959 	} else if (IS_SS_ID_MU(gbe_dev)) {
2960 		port_reg_ofs = GBENU_SLAVE_PORT_OFFSET;
2961 		emac_reg_ofs = GBENU_EMAC_OFFSET;
2962 		port_reg_blk_sz = 0x1000;
2963 		emac_reg_blk_sz = 0x1000;
2964 	} else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
2965 		port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
2966 		emac_reg_ofs = XGBE10_EMAC_OFFSET;
2967 		port_reg_blk_sz = 0x30;
2968 		emac_reg_blk_sz = 0x40;
2969 	} else {
2970 		dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
2971 			gbe_dev->ss_version);
2972 		return -EINVAL;
2973 	}
2974 
2975 	slave->port_regs = gbe_dev->switch_regs + port_reg_ofs +
2976 				(port_reg_blk_sz * port_reg_num);
2977 	slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs +
2978 				(emac_reg_blk_sz * slave->slave_num);
2979 
2980 	if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
2981 		/* Initialize  slave port register offsets */
2982 		GBE_SET_REG_OFS(slave, port_regs, port_vlan);
2983 		GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
2984 		GBE_SET_REG_OFS(slave, port_regs, sa_lo);
2985 		GBE_SET_REG_OFS(slave, port_regs, sa_hi);
2986 		GBE_SET_REG_OFS(slave, port_regs, ts_ctl);
2987 		GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
2988 		GBE_SET_REG_OFS(slave, port_regs, ts_vlan);
2989 		GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
2990 		GBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
2991 
2992 		/* Initialize EMAC register offsets */
2993 		GBE_SET_REG_OFS(slave, emac_regs, mac_control);
2994 		GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
2995 		GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
2996 
2997 	} else if (IS_SS_ID_MU(gbe_dev)) {
2998 		/* Initialize  slave port register offsets */
2999 		GBENU_SET_REG_OFS(slave, port_regs, port_vlan);
3000 		GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map);
3001 		GBENU_SET_REG_OFS(slave, port_regs, sa_lo);
3002 		GBENU_SET_REG_OFS(slave, port_regs, sa_hi);
3003 		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl);
3004 		GBENU_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
3005 		GBENU_SET_REG_OFS(slave, port_regs, ts_vlan);
3006 		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3007 		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl2);
3008 		GBENU_SET_REG_OFS(slave, port_regs, rx_maxlen);
3009 
3010 		/* Initialize EMAC register offsets */
3011 		GBENU_SET_REG_OFS(slave, emac_regs, mac_control);
3012 		GBENU_SET_REG_OFS(slave, emac_regs, soft_reset);
3013 
3014 	} else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
3015 		/* Initialize  slave port register offsets */
3016 		XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
3017 		XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
3018 		XGBE_SET_REG_OFS(slave, port_regs, sa_lo);
3019 		XGBE_SET_REG_OFS(slave, port_regs, sa_hi);
3020 		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl);
3021 		XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
3022 		XGBE_SET_REG_OFS(slave, port_regs, ts_vlan);
3023 		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3024 		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
3025 
3026 		/* Initialize EMAC register offsets */
3027 		XGBE_SET_REG_OFS(slave, emac_regs, mac_control);
3028 		XGBE_SET_REG_OFS(slave, emac_regs, soft_reset);
3029 		XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
3030 	}
3031 
3032 	atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
3033 
3034 	init_slave_ts_ctl(slave);
3035 	return 0;
3036 }
3037 
3038 static void init_secondary_ports(struct gbe_priv *gbe_dev,
3039 				 struct device_node *node)
3040 {
3041 	struct device *dev = gbe_dev->dev;
3042 	phy_interface_t phy_mode;
3043 	struct gbe_priv **priv;
3044 	struct device_node *port;
3045 	struct gbe_slave *slave;
3046 	bool mac_phy_link = false;
3047 
3048 	for_each_child_of_node(node, port) {
3049 		slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
3050 		if (!slave) {
3051 			dev_err(dev,
3052 				"memomry alloc failed for secondary port(%s), skipping...\n",
3053 				port->name);
3054 			continue;
3055 		}
3056 
3057 		if (init_slave(gbe_dev, slave, port)) {
3058 			dev_err(dev,
3059 				"Failed to initialize secondary port(%s), skipping...\n",
3060 				port->name);
3061 			devm_kfree(dev, slave);
3062 			continue;
3063 		}
3064 
3065 		gbe_sgmii_config(gbe_dev, slave);
3066 		gbe_port_reset(slave);
3067 		gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
3068 		list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
3069 		gbe_dev->num_slaves++;
3070 		if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
3071 		    (slave->link_interface == XGMII_LINK_MAC_PHY))
3072 			mac_phy_link = true;
3073 
3074 		slave->open = true;
3075 		if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
3076 			of_node_put(port);
3077 			break;
3078 		}
3079 	}
3080 
3081 	/* of_phy_connect() is needed only for MAC-PHY interface */
3082 	if (!mac_phy_link)
3083 		return;
3084 
3085 	/* Allocate dummy netdev device for attaching to phy device */
3086 	gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy",
3087 					NET_NAME_UNKNOWN, ether_setup);
3088 	if (!gbe_dev->dummy_ndev) {
3089 		dev_err(dev,
3090 			"Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n");
3091 		return;
3092 	}
3093 	priv = netdev_priv(gbe_dev->dummy_ndev);
3094 	*priv = gbe_dev;
3095 
3096 	if (slave->link_interface == SGMII_LINK_MAC_PHY) {
3097 		phy_mode = PHY_INTERFACE_MODE_SGMII;
3098 		slave->phy_port_t = PORT_MII;
3099 	} else {
3100 		phy_mode = PHY_INTERFACE_MODE_NA;
3101 		slave->phy_port_t = PORT_FIBRE;
3102 	}
3103 
3104 	for_each_sec_slave(slave, gbe_dev) {
3105 		if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
3106 		    (slave->link_interface != XGMII_LINK_MAC_PHY))
3107 			continue;
3108 		slave->phy =
3109 			of_phy_connect(gbe_dev->dummy_ndev,
3110 				       slave->phy_node,
3111 				       gbe_adjust_link_sec_slaves,
3112 				       0, phy_mode);
3113 		if (!slave->phy) {
3114 			dev_err(dev, "phy not found for slave %d\n",
3115 				slave->slave_num);
3116 			slave->phy = NULL;
3117 		} else {
3118 			dev_dbg(dev, "phy found: id is: 0x%s\n",
3119 				phydev_name(slave->phy));
3120 			phy_start(slave->phy);
3121 		}
3122 	}
3123 }
3124 
3125 static void free_secondary_ports(struct gbe_priv *gbe_dev)
3126 {
3127 	struct gbe_slave *slave;
3128 
3129 	while (!list_empty(&gbe_dev->secondary_slaves)) {
3130 		slave = first_sec_slave(gbe_dev);
3131 
3132 		if (slave->phy)
3133 			phy_disconnect(slave->phy);
3134 		list_del(&slave->slave_list);
3135 	}
3136 	if (gbe_dev->dummy_ndev)
3137 		free_netdev(gbe_dev->dummy_ndev);
3138 }
3139 
3140 static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
3141 				 struct device_node *node)
3142 {
3143 	struct resource res;
3144 	void __iomem *regs;
3145 	int ret, i;
3146 
3147 	ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res);
3148 	if (ret) {
3149 		dev_err(gbe_dev->dev,
3150 			"Can't xlate xgbe of node(%s) ss address at %d\n",
3151 			node->name, XGBE_SS_REG_INDEX);
3152 		return ret;
3153 	}
3154 
3155 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3156 	if (IS_ERR(regs)) {
3157 		dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n");
3158 		return PTR_ERR(regs);
3159 	}
3160 	gbe_dev->ss_regs = regs;
3161 
3162 	ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res);
3163 	if (ret) {
3164 		dev_err(gbe_dev->dev,
3165 			"Can't xlate xgbe of node(%s) sm address at %d\n",
3166 			node->name, XGBE_SM_REG_INDEX);
3167 		return ret;
3168 	}
3169 
3170 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3171 	if (IS_ERR(regs)) {
3172 		dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n");
3173 		return PTR_ERR(regs);
3174 	}
3175 	gbe_dev->switch_regs = regs;
3176 
3177 	ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
3178 	if (ret) {
3179 		dev_err(gbe_dev->dev,
3180 			"Can't xlate xgbe serdes of node(%s) address at %d\n",
3181 			node->name, XGBE_SERDES_REG_INDEX);
3182 		return ret;
3183 	}
3184 
3185 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3186 	if (IS_ERR(regs)) {
3187 		dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n");
3188 		return PTR_ERR(regs);
3189 	}
3190 	gbe_dev->xgbe_serdes_regs = regs;
3191 
3192 	gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
3193 	gbe_dev->et_stats = xgbe10_et_stats;
3194 	gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
3195 
3196 	gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
3197 					 gbe_dev->num_et_stats * sizeof(u64),
3198 					 GFP_KERNEL);
3199 	if (!gbe_dev->hw_stats) {
3200 		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3201 		return -ENOMEM;
3202 	}
3203 
3204 	gbe_dev->hw_stats_prev =
3205 		devm_kzalloc(gbe_dev->dev,
3206 			     gbe_dev->num_et_stats * sizeof(u32),
3207 			     GFP_KERNEL);
3208 	if (!gbe_dev->hw_stats_prev) {
3209 		dev_err(gbe_dev->dev,
3210 			"hw_stats_prev memory allocation failed\n");
3211 		return -ENOMEM;
3212 	}
3213 
3214 	gbe_dev->ss_version = XGBE_SS_VERSION_10;
3215 	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
3216 					XGBE10_SGMII_MODULE_OFFSET;
3217 	gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
3218 
3219 	for (i = 0; i < gbe_dev->max_num_ports; i++)
3220 		gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
3221 			XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
3222 
3223 	gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET;
3224 	gbe_dev->cpts_reg = gbe_dev->switch_regs + XGBE10_CPTS_OFFSET;
3225 	gbe_dev->ale_ports = gbe_dev->max_num_ports;
3226 	gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
3227 	gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
3228 	gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
3229 
3230 	/* Subsystem registers */
3231 	XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3232 	XGBE_SET_REG_OFS(gbe_dev, ss_regs, control);
3233 
3234 	/* Switch module registers */
3235 	XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3236 	XGBE_SET_REG_OFS(gbe_dev, switch_regs, control);
3237 	XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3238 	XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3239 	XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
3240 
3241 	/* Host port registers */
3242 	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3243 	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
3244 	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3245 	return 0;
3246 }
3247 
3248 static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
3249 				    struct device_node *node)
3250 {
3251 	struct resource res;
3252 	void __iomem *regs;
3253 	int ret;
3254 
3255 	ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res);
3256 	if (ret) {
3257 		dev_err(gbe_dev->dev,
3258 			"Can't translate of node(%s) of gbe ss address at %d\n",
3259 			node->name, GBE_SS_REG_INDEX);
3260 		return ret;
3261 	}
3262 
3263 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3264 	if (IS_ERR(regs)) {
3265 		dev_err(gbe_dev->dev, "Failed to map gbe register base\n");
3266 		return PTR_ERR(regs);
3267 	}
3268 	gbe_dev->ss_regs = regs;
3269 	gbe_dev->ss_version = readl(gbe_dev->ss_regs);
3270 	return 0;
3271 }
3272 
3273 static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
3274 				struct device_node *node)
3275 {
3276 	struct resource res;
3277 	void __iomem *regs;
3278 	int i, ret;
3279 
3280 	ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res);
3281 	if (ret) {
3282 		dev_err(gbe_dev->dev,
3283 			"Can't translate of gbe node(%s) address at index %d\n",
3284 			node->name, GBE_SGMII34_REG_INDEX);
3285 		return ret;
3286 	}
3287 
3288 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3289 	if (IS_ERR(regs)) {
3290 		dev_err(gbe_dev->dev,
3291 			"Failed to map gbe sgmii port34 register base\n");
3292 		return PTR_ERR(regs);
3293 	}
3294 	gbe_dev->sgmii_port34_regs = regs;
3295 
3296 	ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res);
3297 	if (ret) {
3298 		dev_err(gbe_dev->dev,
3299 			"Can't translate of gbe node(%s) address at index %d\n",
3300 			node->name, GBE_SM_REG_INDEX);
3301 		return ret;
3302 	}
3303 
3304 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3305 	if (IS_ERR(regs)) {
3306 		dev_err(gbe_dev->dev,
3307 			"Failed to map gbe switch module register base\n");
3308 		return PTR_ERR(regs);
3309 	}
3310 	gbe_dev->switch_regs = regs;
3311 
3312 	gbe_dev->num_stats_mods = gbe_dev->max_num_slaves;
3313 	gbe_dev->et_stats = gbe13_et_stats;
3314 	gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
3315 
3316 	gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
3317 					 gbe_dev->num_et_stats * sizeof(u64),
3318 					 GFP_KERNEL);
3319 	if (!gbe_dev->hw_stats) {
3320 		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3321 		return -ENOMEM;
3322 	}
3323 
3324 	gbe_dev->hw_stats_prev =
3325 		devm_kzalloc(gbe_dev->dev,
3326 			     gbe_dev->num_et_stats * sizeof(u32),
3327 			     GFP_KERNEL);
3328 	if (!gbe_dev->hw_stats_prev) {
3329 		dev_err(gbe_dev->dev,
3330 			"hw_stats_prev memory allocation failed\n");
3331 		return -ENOMEM;
3332 	}
3333 
3334 	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
3335 	gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
3336 
3337 	/* K2HK has only 2 hw stats modules visible at a time, so
3338 	 * module 0 & 2 points to one base and
3339 	 * module 1 & 3 points to the other base
3340 	 */
3341 	for (i = 0; i < gbe_dev->max_num_slaves; i++) {
3342 		gbe_dev->hw_stats_regs[i] =
3343 			gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
3344 			(GBE_HW_STATS_REG_MAP_SZ * (i & 0x1));
3345 	}
3346 
3347 	gbe_dev->cpts_reg = gbe_dev->switch_regs + GBE13_CPTS_OFFSET;
3348 	gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
3349 	gbe_dev->ale_ports = gbe_dev->max_num_ports;
3350 	gbe_dev->host_port = GBE13_HOST_PORT_NUM;
3351 	gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
3352 	gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
3353 
3354 	/* Subsystem registers */
3355 	GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3356 
3357 	/* Switch module registers */
3358 	GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3359 	GBE_SET_REG_OFS(gbe_dev, switch_regs, control);
3360 	GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset);
3361 	GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3362 	GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3363 	GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
3364 
3365 	/* Host port registers */
3366 	GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3367 	GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3368 	return 0;
3369 }
3370 
3371 static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
3372 				struct device_node *node)
3373 {
3374 	struct resource res;
3375 	void __iomem *regs;
3376 	int i, ret;
3377 
3378 	gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
3379 	gbe_dev->et_stats = gbenu_et_stats;
3380 
3381 	if (IS_SS_ID_NU(gbe_dev))
3382 		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
3383 			(gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
3384 	else
3385 		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
3386 					GBENU_ET_STATS_PORT_SIZE;
3387 
3388 	gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
3389 					 gbe_dev->num_et_stats * sizeof(u64),
3390 					 GFP_KERNEL);
3391 	if (!gbe_dev->hw_stats) {
3392 		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3393 		return -ENOMEM;
3394 	}
3395 
3396 	gbe_dev->hw_stats_prev =
3397 		devm_kzalloc(gbe_dev->dev,
3398 			     gbe_dev->num_et_stats * sizeof(u32),
3399 			     GFP_KERNEL);
3400 	if (!gbe_dev->hw_stats_prev) {
3401 		dev_err(gbe_dev->dev,
3402 			"hw_stats_prev memory allocation failed\n");
3403 		return -ENOMEM;
3404 	}
3405 
3406 	ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
3407 	if (ret) {
3408 		dev_err(gbe_dev->dev,
3409 			"Can't translate of gbenu node(%s) addr at index %d\n",
3410 			node->name, GBENU_SM_REG_INDEX);
3411 		return ret;
3412 	}
3413 
3414 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3415 	if (IS_ERR(regs)) {
3416 		dev_err(gbe_dev->dev,
3417 			"Failed to map gbenu switch module register base\n");
3418 		return PTR_ERR(regs);
3419 	}
3420 	gbe_dev->switch_regs = regs;
3421 
3422 	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
3423 
3424 	/* Although sgmii modules are mem mapped to one contiguous
3425 	 * region on GBENU devices, setting sgmii_port34_regs allows
3426 	 * consistent code when accessing sgmii api
3427 	 */
3428 	gbe_dev->sgmii_port34_regs = gbe_dev->sgmii_port_regs +
3429 				     (2 * GBENU_SGMII_MODULE_SIZE);
3430 
3431 	gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
3432 
3433 	for (i = 0; i < (gbe_dev->max_num_ports); i++)
3434 		gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
3435 			GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i);
3436 
3437 	gbe_dev->cpts_reg = gbe_dev->switch_regs + GBENU_CPTS_OFFSET;
3438 	gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET;
3439 	gbe_dev->ale_ports = gbe_dev->max_num_ports;
3440 	gbe_dev->host_port = GBENU_HOST_PORT_NUM;
3441 	gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
3442 
3443 	/* Subsystem registers */
3444 	GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3445 
3446 	/* Switch module registers */
3447 	GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3448 	GBENU_SET_REG_OFS(gbe_dev, switch_regs, control);
3449 	GBENU_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3450 	GBENU_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3451 
3452 	/* Host port registers */
3453 	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3454 	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3455 
3456 	/* For NU only.  2U does not need tx_pri_map.
3457 	 * NU cppi port 0 tx pkt streaming interface has (n-1)*8 egress threads
3458 	 * while 2U has only 1 such thread
3459 	 */
3460 	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
3461 	return 0;
3462 }
3463 
3464 static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
3465 		     struct device_node *node, void **inst_priv)
3466 {
3467 	struct device_node *interfaces, *interface;
3468 	struct device_node *secondary_ports;
3469 	struct cpsw_ale_params ale_params;
3470 	struct gbe_priv *gbe_dev;
3471 	u32 slave_num;
3472 	int i, ret = 0;
3473 
3474 	if (!node) {
3475 		dev_err(dev, "device tree info unavailable\n");
3476 		return -ENODEV;
3477 	}
3478 
3479 	gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL);
3480 	if (!gbe_dev)
3481 		return -ENOMEM;
3482 
3483 	if (of_device_is_compatible(node, "ti,netcp-gbe-5") ||
3484 	    of_device_is_compatible(node, "ti,netcp-gbe")) {
3485 		gbe_dev->max_num_slaves = 4;
3486 	} else if (of_device_is_compatible(node, "ti,netcp-gbe-9")) {
3487 		gbe_dev->max_num_slaves = 8;
3488 	} else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) {
3489 		gbe_dev->max_num_slaves = 1;
3490 	} else if (of_device_is_compatible(node, "ti,netcp-xgbe")) {
3491 		gbe_dev->max_num_slaves = 2;
3492 	} else {
3493 		dev_err(dev, "device tree node for unknown device\n");
3494 		return -EINVAL;
3495 	}
3496 	gbe_dev->max_num_ports = gbe_dev->max_num_slaves + 1;
3497 
3498 	gbe_dev->dev = dev;
3499 	gbe_dev->netcp_device = netcp_device;
3500 	gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
3501 
3502 	/* init the hw stats lock */
3503 	spin_lock_init(&gbe_dev->hw_stats_lock);
3504 
3505 	if (of_find_property(node, "enable-ale", NULL)) {
3506 		gbe_dev->enable_ale = true;
3507 		dev_info(dev, "ALE enabled\n");
3508 	} else {
3509 		gbe_dev->enable_ale = false;
3510 		dev_dbg(dev, "ALE bypass enabled*\n");
3511 	}
3512 
3513 	ret = of_property_read_u32(node, "tx-queue",
3514 				   &gbe_dev->tx_queue_id);
3515 	if (ret < 0) {
3516 		dev_err(dev, "missing tx_queue parameter\n");
3517 		gbe_dev->tx_queue_id = GBE_TX_QUEUE;
3518 	}
3519 
3520 	ret = of_property_read_string(node, "tx-channel",
3521 				      &gbe_dev->dma_chan_name);
3522 	if (ret < 0) {
3523 		dev_err(dev, "missing \"tx-channel\" parameter\n");
3524 		return -EINVAL;
3525 	}
3526 
3527 	if (!strcmp(node->name, "gbe")) {
3528 		ret = get_gbe_resource_version(gbe_dev, node);
3529 		if (ret)
3530 			return ret;
3531 
3532 		dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
3533 
3534 		if (gbe_dev->ss_version == GBE_SS_VERSION_14)
3535 			ret = set_gbe_ethss14_priv(gbe_dev, node);
3536 		else if (IS_SS_ID_MU(gbe_dev))
3537 			ret = set_gbenu_ethss_priv(gbe_dev, node);
3538 		else
3539 			ret = -ENODEV;
3540 
3541 	} else if (!strcmp(node->name, "xgbe")) {
3542 		ret = set_xgbe_ethss10_priv(gbe_dev, node);
3543 		if (ret)
3544 			return ret;
3545 		ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
3546 					     gbe_dev->ss_regs);
3547 	} else {
3548 		dev_err(dev, "unknown GBE node(%s)\n", node->name);
3549 		ret = -ENODEV;
3550 	}
3551 
3552 	if (ret)
3553 		return ret;
3554 
3555 	interfaces = of_get_child_by_name(node, "interfaces");
3556 	if (!interfaces)
3557 		dev_err(dev, "could not find interfaces\n");
3558 
3559 	ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
3560 				gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
3561 	if (ret)
3562 		return ret;
3563 
3564 	ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
3565 	if (ret)
3566 		return ret;
3567 
3568 	/* Create network interfaces */
3569 	INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
3570 	for_each_child_of_node(interfaces, interface) {
3571 		ret = of_property_read_u32(interface, "slave-port", &slave_num);
3572 		if (ret) {
3573 			dev_err(dev, "missing slave-port parameter, skipping interface configuration for %s\n",
3574 				interface->name);
3575 			continue;
3576 		}
3577 		gbe_dev->num_slaves++;
3578 		if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
3579 			of_node_put(interface);
3580 			break;
3581 		}
3582 	}
3583 	of_node_put(interfaces);
3584 
3585 	if (!gbe_dev->num_slaves)
3586 		dev_warn(dev, "No network interface configured\n");
3587 
3588 	/* Initialize Secondary slave ports */
3589 	secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
3590 	INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
3591 	if (secondary_ports && (gbe_dev->num_slaves <  gbe_dev->max_num_slaves))
3592 		init_secondary_ports(gbe_dev, secondary_ports);
3593 	of_node_put(secondary_ports);
3594 
3595 	if (!gbe_dev->num_slaves) {
3596 		dev_err(dev,
3597 			"No network interface or secondary ports configured\n");
3598 		ret = -ENODEV;
3599 		goto free_sec_ports;
3600 	}
3601 
3602 	memset(&ale_params, 0, sizeof(ale_params));
3603 	ale_params.dev		= gbe_dev->dev;
3604 	ale_params.ale_regs	= gbe_dev->ale_reg;
3605 	ale_params.ale_ageout	= GBE_DEFAULT_ALE_AGEOUT;
3606 	ale_params.ale_entries	= gbe_dev->ale_entries;
3607 	ale_params.ale_ports	= gbe_dev->ale_ports;
3608 	if (IS_SS_ID_MU(gbe_dev)) {
3609 		ale_params.major_ver_mask = 0x7;
3610 		ale_params.nu_switch_ale = true;
3611 	}
3612 	gbe_dev->ale = cpsw_ale_create(&ale_params);
3613 	if (!gbe_dev->ale) {
3614 		dev_err(gbe_dev->dev, "error initializing ale engine\n");
3615 		ret = -ENODEV;
3616 		goto free_sec_ports;
3617 	} else {
3618 		dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
3619 	}
3620 
3621 	gbe_dev->cpts = cpts_create(gbe_dev->dev, gbe_dev->cpts_reg, node);
3622 	if (IS_ENABLED(CONFIG_TI_CPTS) && IS_ERR(gbe_dev->cpts)) {
3623 		ret = PTR_ERR(gbe_dev->cpts);
3624 		goto free_sec_ports;
3625 	}
3626 
3627 	/* initialize host port */
3628 	gbe_init_host_port(gbe_dev);
3629 
3630 	spin_lock_bh(&gbe_dev->hw_stats_lock);
3631 	for (i = 0; i < gbe_dev->num_stats_mods; i++) {
3632 		if (gbe_dev->ss_version == GBE_SS_VERSION_14)
3633 			gbe_reset_mod_stats_ver14(gbe_dev, i);
3634 		else
3635 			gbe_reset_mod_stats(gbe_dev, i);
3636 	}
3637 	spin_unlock_bh(&gbe_dev->hw_stats_lock);
3638 
3639 	init_timer(&gbe_dev->timer);
3640 	gbe_dev->timer.data	 = (unsigned long)gbe_dev;
3641 	gbe_dev->timer.function = netcp_ethss_timer;
3642 	gbe_dev->timer.expires	 = jiffies + GBE_TIMER_INTERVAL;
3643 	add_timer(&gbe_dev->timer);
3644 	*inst_priv = gbe_dev;
3645 	return 0;
3646 
3647 free_sec_ports:
3648 	free_secondary_ports(gbe_dev);
3649 	return ret;
3650 }
3651 
3652 static int gbe_attach(void *inst_priv, struct net_device *ndev,
3653 		      struct device_node *node, void **intf_priv)
3654 {
3655 	struct gbe_priv *gbe_dev = inst_priv;
3656 	struct gbe_intf *gbe_intf;
3657 	int ret;
3658 
3659 	if (!node) {
3660 		dev_err(gbe_dev->dev, "interface node not available\n");
3661 		return -ENODEV;
3662 	}
3663 
3664 	gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL);
3665 	if (!gbe_intf)
3666 		return -ENOMEM;
3667 
3668 	gbe_intf->ndev = ndev;
3669 	gbe_intf->dev = gbe_dev->dev;
3670 	gbe_intf->gbe_dev = gbe_dev;
3671 
3672 	gbe_intf->slave = devm_kzalloc(gbe_dev->dev,
3673 					sizeof(*gbe_intf->slave),
3674 					GFP_KERNEL);
3675 	if (!gbe_intf->slave) {
3676 		ret = -ENOMEM;
3677 		goto fail;
3678 	}
3679 
3680 	if (init_slave(gbe_dev, gbe_intf->slave, node)) {
3681 		ret = -ENODEV;
3682 		goto fail;
3683 	}
3684 
3685 	gbe_intf->tx_pipe = gbe_dev->tx_pipe;
3686 	ndev->ethtool_ops = &keystone_ethtool_ops;
3687 	list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head);
3688 	*intf_priv = gbe_intf;
3689 	return 0;
3690 
3691 fail:
3692 	if (gbe_intf->slave)
3693 		devm_kfree(gbe_dev->dev, gbe_intf->slave);
3694 	if (gbe_intf)
3695 		devm_kfree(gbe_dev->dev, gbe_intf);
3696 	return ret;
3697 }
3698 
3699 static int gbe_release(void *intf_priv)
3700 {
3701 	struct gbe_intf *gbe_intf = intf_priv;
3702 
3703 	gbe_intf->ndev->ethtool_ops = NULL;
3704 	list_del(&gbe_intf->gbe_intf_list);
3705 	devm_kfree(gbe_intf->dev, gbe_intf->slave);
3706 	devm_kfree(gbe_intf->dev, gbe_intf);
3707 	return 0;
3708 }
3709 
3710 static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
3711 {
3712 	struct gbe_priv *gbe_dev = inst_priv;
3713 
3714 	del_timer_sync(&gbe_dev->timer);
3715 	cpts_release(gbe_dev->cpts);
3716 	cpsw_ale_stop(gbe_dev->ale);
3717 	cpsw_ale_destroy(gbe_dev->ale);
3718 	netcp_txpipe_close(&gbe_dev->tx_pipe);
3719 	free_secondary_ports(gbe_dev);
3720 
3721 	if (!list_empty(&gbe_dev->gbe_intf_head))
3722 		dev_alert(gbe_dev->dev,
3723 			  "unreleased ethss interfaces present\n");
3724 
3725 	return 0;
3726 }
3727 
3728 static struct netcp_module gbe_module = {
3729 	.name		= GBE_MODULE_NAME,
3730 	.owner		= THIS_MODULE,
3731 	.primary	= true,
3732 	.probe		= gbe_probe,
3733 	.open		= gbe_open,
3734 	.close		= gbe_close,
3735 	.remove		= gbe_remove,
3736 	.attach		= gbe_attach,
3737 	.release	= gbe_release,
3738 	.add_addr	= gbe_add_addr,
3739 	.del_addr	= gbe_del_addr,
3740 	.add_vid	= gbe_add_vid,
3741 	.del_vid	= gbe_del_vid,
3742 	.ioctl		= gbe_ioctl,
3743 };
3744 
3745 static struct netcp_module xgbe_module = {
3746 	.name		= XGBE_MODULE_NAME,
3747 	.owner		= THIS_MODULE,
3748 	.primary	= true,
3749 	.probe		= gbe_probe,
3750 	.open		= gbe_open,
3751 	.close		= gbe_close,
3752 	.remove		= gbe_remove,
3753 	.attach		= gbe_attach,
3754 	.release	= gbe_release,
3755 	.add_addr	= gbe_add_addr,
3756 	.del_addr	= gbe_del_addr,
3757 	.add_vid	= gbe_add_vid,
3758 	.del_vid	= gbe_del_vid,
3759 	.ioctl		= gbe_ioctl,
3760 };
3761 
3762 static int __init keystone_gbe_init(void)
3763 {
3764 	int ret;
3765 
3766 	ret = netcp_register_module(&gbe_module);
3767 	if (ret)
3768 		return ret;
3769 
3770 	ret = netcp_register_module(&xgbe_module);
3771 	if (ret)
3772 		return ret;
3773 
3774 	return 0;
3775 }
3776 module_init(keystone_gbe_init);
3777 
3778 static void __exit keystone_gbe_exit(void)
3779 {
3780 	netcp_unregister_module(&gbe_module);
3781 	netcp_unregister_module(&xgbe_module);
3782 }
3783 module_exit(keystone_gbe_exit);
3784 
3785 MODULE_LICENSE("GPL v2");
3786 MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs");
3787 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");
3788