xref: /linux/drivers/net/ethernet/ti/am65-cpsw-nuss.c (revision 9fc31a9251de4acaab2d0704450d70ddc99f5ea2)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Texas Instruments K3 AM65 Ethernet Switch SubSystem Driver
3  *
4  * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
5  *
6  */
7 
8 #include <linux/bpf_trace.h>
9 #include <linux/clk.h>
10 #include <linux/etherdevice.h>
11 #include <linux/if_vlan.h>
12 #include <linux/interrupt.h>
13 #include <linux/irqdomain.h>
14 #include <linux/kernel.h>
15 #include <linux/kmemleak.h>
16 #include <linux/module.h>
17 #include <linux/netdevice.h>
18 #include <linux/net_tstamp.h>
19 #include <linux/of.h>
20 #include <linux/of_mdio.h>
21 #include <linux/of_net.h>
22 #include <linux/of_device.h>
23 #include <linux/of_platform.h>
24 #include <linux/phylink.h>
25 #include <linux/phy/phy.h>
26 #include <linux/platform_device.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/regmap.h>
29 #include <linux/rtnetlink.h>
30 #include <linux/mfd/syscon.h>
31 #include <linux/sys_soc.h>
32 #include <linux/dma/ti-cppi5.h>
33 #include <linux/dma/k3-udma-glue.h>
34 #include <net/page_pool/helpers.h>
35 #include <net/switchdev.h>
36 
37 #include "cpsw_ale.h"
38 #include "cpsw_sl.h"
39 #include "am65-cpsw-nuss.h"
40 #include "am65-cpsw-switchdev.h"
41 #include "k3-cppi-desc-pool.h"
42 #include "am65-cpts.h"
43 
44 #define AM65_CPSW_SS_BASE	0x0
45 #define AM65_CPSW_SGMII_BASE	0x100
46 #define AM65_CPSW_XGMII_BASE	0x2100
47 #define AM65_CPSW_CPSW_NU_BASE	0x20000
48 #define AM65_CPSW_NU_PORTS_BASE	0x1000
49 #define AM65_CPSW_NU_FRAM_BASE	0x12000
50 #define AM65_CPSW_NU_STATS_BASE	0x1a000
51 #define AM65_CPSW_NU_ALE_BASE	0x1e000
52 #define AM65_CPSW_NU_CPTS_BASE	0x1d000
53 
54 #define AM65_CPSW_NU_PORTS_OFFSET	0x1000
55 #define AM65_CPSW_NU_STATS_PORT_OFFSET	0x200
56 #define AM65_CPSW_NU_FRAM_PORT_OFFSET	0x200
57 
58 #define AM65_CPSW_MAX_PORTS	8
59 
60 #define AM65_CPSW_MIN_PACKET_SIZE	VLAN_ETH_ZLEN
61 #define AM65_CPSW_MAX_PACKET_SIZE	2024
62 
63 #define AM65_CPSW_REG_CTL		0x004
64 #define AM65_CPSW_REG_STAT_PORT_EN	0x014
65 #define AM65_CPSW_REG_PTYPE		0x018
66 
67 #define AM65_CPSW_P0_REG_CTL			0x004
68 #define AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET	0x008
69 
70 #define AM65_CPSW_PORT_REG_PRI_CTL		0x01c
71 #define AM65_CPSW_PORT_REG_RX_PRI_MAP		0x020
72 #define AM65_CPSW_PORT_REG_RX_MAXLEN		0x024
73 
74 #define AM65_CPSW_PORTN_REG_SA_L		0x308
75 #define AM65_CPSW_PORTN_REG_SA_H		0x30c
76 #define AM65_CPSW_PORTN_REG_TS_CTL              0x310
77 #define AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG	0x314
78 #define AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG	0x318
79 #define AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2       0x31C
80 
81 #define AM65_CPSW_SGMII_CONTROL_REG		0x010
82 #define AM65_CPSW_SGMII_MR_ADV_ABILITY_REG	0x018
83 #define AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE	BIT(0)
84 
85 #define AM65_CPSW_CTL_VLAN_AWARE		BIT(1)
86 #define AM65_CPSW_CTL_P0_ENABLE			BIT(2)
87 #define AM65_CPSW_CTL_P0_TX_CRC_REMOVE		BIT(13)
88 #define AM65_CPSW_CTL_P0_RX_PAD			BIT(14)
89 
90 /* AM65_CPSW_P0_REG_CTL */
91 #define AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN	BIT(0)
92 #define AM65_CPSW_P0_REG_CTL_RX_REMAP_VLAN	BIT(16)
93 
94 /* AM65_CPSW_PORT_REG_PRI_CTL */
95 #define AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN	BIT(8)
96 
97 /* AM65_CPSW_PN_TS_CTL register fields */
98 #define AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN		BIT(4)
99 #define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN	BIT(5)
100 #define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT2_EN	BIT(6)
101 #define AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN		BIT(7)
102 #define AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN		BIT(10)
103 #define AM65_CPSW_PN_TS_CTL_TX_HOST_TS_EN	BIT(11)
104 #define AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT	16
105 
106 /* AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG register fields */
107 #define AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT	16
108 
109 /* AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 */
110 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107	BIT(16)
111 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129	BIT(17)
112 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130	BIT(18)
113 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131	BIT(19)
114 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132	BIT(20)
115 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319	BIT(21)
116 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320	BIT(22)
117 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO BIT(23)
118 
119 /* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
120 #define AM65_CPSW_TS_EVENT_MSG_TYPE_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
121 
122 #define AM65_CPSW_TS_SEQ_ID_OFFSET (0x1e)
123 
124 #define AM65_CPSW_TS_TX_ANX_ALL_EN		\
125 	(AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN |	\
126 	 AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN |	\
127 	 AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN)
128 
129 #define AM65_CPSW_ALE_AGEOUT_DEFAULT	30
130 /* Number of TX/RX descriptors */
131 #define AM65_CPSW_MAX_TX_DESC	500
132 #define AM65_CPSW_MAX_RX_DESC	500
133 
134 #define AM65_CPSW_NAV_PS_DATA_SIZE 16
135 #define AM65_CPSW_NAV_SW_DATA_SIZE 16
136 
137 #define AM65_CPSW_DEBUG	(NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK | \
138 			 NETIF_MSG_IFUP	| NETIF_MSG_PROBE | NETIF_MSG_IFDOWN | \
139 			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
140 
141 #define AM65_CPSW_DEFAULT_TX_CHNS	8
142 
143 /* CPPI streaming packet interface */
144 #define AM65_CPSW_CPPI_TX_FLOW_ID  0x3FFF
145 #define AM65_CPSW_CPPI_TX_PKT_TYPE 0x7
146 
147 /* XDP */
148 #define AM65_CPSW_XDP_CONSUMED 2
149 #define AM65_CPSW_XDP_REDIRECT 1
150 #define AM65_CPSW_XDP_PASS     0
151 
152 /* Include headroom compatible with both skb and xdpf */
153 #define AM65_CPSW_HEADROOM (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN)
154 
155 static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port *slave,
156 				      const u8 *dev_addr)
157 {
158 	u32 mac_hi = (dev_addr[0] << 0) | (dev_addr[1] << 8) |
159 		     (dev_addr[2] << 16) | (dev_addr[3] << 24);
160 	u32 mac_lo = (dev_addr[4] << 0) | (dev_addr[5] << 8);
161 
162 	writel(mac_hi, slave->port_base + AM65_CPSW_PORTN_REG_SA_H);
163 	writel(mac_lo, slave->port_base + AM65_CPSW_PORTN_REG_SA_L);
164 }
165 
166 static void am65_cpsw_sl_ctl_reset(struct am65_cpsw_port *port)
167 {
168 	cpsw_sl_reset(port->slave.mac_sl, 100);
169 	/* Max length register has to be restored after MAC SL reset */
170 	writel(AM65_CPSW_MAX_PACKET_SIZE,
171 	       port->port_base + AM65_CPSW_PORT_REG_RX_MAXLEN);
172 }
173 
174 static void am65_cpsw_nuss_get_ver(struct am65_cpsw_common *common)
175 {
176 	common->nuss_ver = readl(common->ss_base);
177 	common->cpsw_ver = readl(common->cpsw_base);
178 	dev_info(common->dev,
179 		 "initializing am65 cpsw nuss version 0x%08X, cpsw version 0x%08X Ports: %u quirks:%08x\n",
180 		common->nuss_ver,
181 		common->cpsw_ver,
182 		common->port_num + 1,
183 		common->pdata.quirks);
184 }
185 
186 static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev,
187 					    __be16 proto, u16 vid)
188 {
189 	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
190 	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
191 	u32 port_mask, unreg_mcast = 0;
192 	int ret;
193 
194 	if (!common->is_emac_mode)
195 		return 0;
196 
197 	if (!netif_running(ndev) || !vid)
198 		return 0;
199 
200 	ret = pm_runtime_resume_and_get(common->dev);
201 	if (ret < 0)
202 		return ret;
203 
204 	port_mask = BIT(port->port_id) | ALE_PORT_HOST;
205 	if (!vid)
206 		unreg_mcast = port_mask;
207 	dev_info(common->dev, "Adding vlan %d to vlan filter\n", vid);
208 	ret = cpsw_ale_vlan_add_modify(common->ale, vid, port_mask,
209 				       unreg_mcast, port_mask, 0);
210 
211 	pm_runtime_put(common->dev);
212 	return ret;
213 }
214 
215 static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev,
216 					     __be16 proto, u16 vid)
217 {
218 	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
219 	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
220 	int ret;
221 
222 	if (!common->is_emac_mode)
223 		return 0;
224 
225 	if (!netif_running(ndev) || !vid)
226 		return 0;
227 
228 	ret = pm_runtime_resume_and_get(common->dev);
229 	if (ret < 0)
230 		return ret;
231 
232 	dev_info(common->dev, "Removing vlan %d from vlan filter\n", vid);
233 	ret = cpsw_ale_del_vlan(common->ale, vid,
234 				BIT(port->port_id) | ALE_PORT_HOST);
235 
236 	pm_runtime_put(common->dev);
237 	return ret;
238 }
239 
240 static void am65_cpsw_slave_set_promisc(struct am65_cpsw_port *port,
241 					bool promisc)
242 {
243 	struct am65_cpsw_common *common = port->common;
244 
245 	if (promisc && !common->is_emac_mode) {
246 		dev_dbg(common->dev, "promisc mode requested in switch mode");
247 		return;
248 	}
249 
250 	if (promisc) {
251 		/* Enable promiscuous mode */
252 		cpsw_ale_control_set(common->ale, port->port_id,
253 				     ALE_PORT_MACONLY_CAF, 1);
254 		dev_dbg(common->dev, "promisc enabled\n");
255 	} else {
256 		/* Disable promiscuous mode */
257 		cpsw_ale_control_set(common->ale, port->port_id,
258 				     ALE_PORT_MACONLY_CAF, 0);
259 		dev_dbg(common->dev, "promisc disabled\n");
260 	}
261 }
262 
263 static void am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device *ndev)
264 {
265 	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
266 	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
267 	u32 port_mask;
268 	bool promisc;
269 
270 	promisc = !!(ndev->flags & IFF_PROMISC);
271 	am65_cpsw_slave_set_promisc(port, promisc);
272 
273 	if (promisc)
274 		return;
275 
276 	/* Restore allmulti on vlans if necessary */
277 	cpsw_ale_set_allmulti(common->ale,
278 			      ndev->flags & IFF_ALLMULTI, port->port_id);
279 
280 	port_mask = ALE_PORT_HOST;
281 	/* Clear all mcast from ALE */
282 	cpsw_ale_flush_multicast(common->ale, port_mask, -1);
283 
284 	if (!netdev_mc_empty(ndev)) {
285 		struct netdev_hw_addr *ha;
286 
287 		/* program multicast address list into ALE register */
288 		netdev_for_each_mc_addr(ha, ndev) {
289 			cpsw_ale_add_mcast(common->ale, ha->addr,
290 					   port_mask, 0, 0, 0);
291 		}
292 	}
293 }
294 
295 static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
296 					       unsigned int txqueue)
297 {
298 	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
299 	struct am65_cpsw_tx_chn *tx_chn;
300 	struct netdev_queue *netif_txq;
301 	unsigned long trans_start;
302 
303 	netif_txq = netdev_get_tx_queue(ndev, txqueue);
304 	tx_chn = &common->tx_chns[txqueue];
305 	trans_start = READ_ONCE(netif_txq->trans_start);
306 
307 	netdev_err(ndev, "txq:%d DRV_XOFF:%d tmo:%u dql_avail:%d free_desc:%zu\n",
308 		   txqueue,
309 		   netif_tx_queue_stopped(netif_txq),
310 		   jiffies_to_msecs(jiffies - trans_start),
311 		   netdev_queue_dql_avail(netif_txq),
312 		   k3_cppi_desc_pool_avail(tx_chn->desc_pool));
313 
314 	if (netif_tx_queue_stopped(netif_txq)) {
315 		/* try recover if stopped by us */
316 		txq_trans_update(netif_txq);
317 		netif_tx_wake_queue(netif_txq);
318 	}
319 }
320 
321 static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
322 				  struct page *page)
323 {
324 	struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
325 	struct cppi5_host_desc_t *desc_rx;
326 	struct device *dev = common->dev;
327 	dma_addr_t desc_dma;
328 	dma_addr_t buf_dma;
329 	void *swdata;
330 
331 	desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
332 	if (!desc_rx) {
333 		dev_err(dev, "Failed to allocate RXFDQ descriptor\n");
334 		return -ENOMEM;
335 	}
336 	desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
337 
338 	buf_dma = dma_map_single(rx_chn->dma_dev,
339 				 page_address(page) + AM65_CPSW_HEADROOM,
340 				 AM65_CPSW_MAX_PACKET_SIZE, DMA_FROM_DEVICE);
341 	if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) {
342 		k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
343 		dev_err(dev, "Failed to map rx buffer\n");
344 		return -EINVAL;
345 	}
346 
347 	cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
348 			 AM65_CPSW_NAV_PS_DATA_SIZE);
349 	k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
350 	cppi5_hdesc_attach_buf(desc_rx, buf_dma, AM65_CPSW_MAX_PACKET_SIZE,
351 			       buf_dma, AM65_CPSW_MAX_PACKET_SIZE);
352 	swdata = cppi5_hdesc_get_swdata(desc_rx);
353 	*((void **)swdata) = page_address(page);
354 
355 	return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0, desc_rx, desc_dma);
356 }
357 
358 void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common)
359 {
360 	struct am65_cpsw_host *host_p = am65_common_get_host(common);
361 	u32 val, pri_map;
362 
363 	/* P0 set Receive Priority Type */
364 	val = readl(host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL);
365 
366 	if (common->pf_p0_rx_ptype_rrobin) {
367 		val |= AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN;
368 		/* Enet Ports fifos works in fixed priority mode only, so
369 		 * reset P0_Rx_Pri_Map so all packet will go in Enet fifo 0
370 		 */
371 		pri_map = 0x0;
372 	} else {
373 		val &= ~AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN;
374 		/* restore P0_Rx_Pri_Map */
375 		pri_map = 0x76543210;
376 	}
377 
378 	writel(pri_map, host_p->port_base + AM65_CPSW_PORT_REG_RX_PRI_MAP);
379 	writel(val, host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL);
380 }
381 
382 static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common);
383 static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common);
384 static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port);
385 static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port);
386 
387 static void am65_cpsw_destroy_xdp_rxqs(struct am65_cpsw_common *common)
388 {
389 	struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
390 	struct xdp_rxq_info *rxq;
391 	int i;
392 
393 	for (i = 0; i < common->port_num; i++) {
394 		rxq = &common->ports[i].xdp_rxq;
395 
396 		if (xdp_rxq_info_is_reg(rxq))
397 			xdp_rxq_info_unreg(rxq);
398 	}
399 
400 	if (rx_chn->page_pool) {
401 		page_pool_destroy(rx_chn->page_pool);
402 		rx_chn->page_pool = NULL;
403 	}
404 }
405 
406 static int am65_cpsw_create_xdp_rxqs(struct am65_cpsw_common *common)
407 {
408 	struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
409 	struct page_pool_params pp_params = {
410 		.flags = PP_FLAG_DMA_MAP,
411 		.order = 0,
412 		.pool_size = AM65_CPSW_MAX_RX_DESC,
413 		.nid = dev_to_node(common->dev),
414 		.dev = common->dev,
415 		.dma_dir = DMA_BIDIRECTIONAL,
416 		.napi = &common->napi_rx,
417 	};
418 	struct xdp_rxq_info *rxq;
419 	struct page_pool *pool;
420 	int i, ret;
421 
422 	pool = page_pool_create(&pp_params);
423 	if (IS_ERR(pool))
424 		return PTR_ERR(pool);
425 
426 	rx_chn->page_pool = pool;
427 
428 	for (i = 0; i < common->port_num; i++) {
429 		rxq = &common->ports[i].xdp_rxq;
430 
431 		ret = xdp_rxq_info_reg(rxq, common->ports[i].ndev, i, 0);
432 		if (ret)
433 			goto err;
434 
435 		ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
436 		if (ret)
437 			goto err;
438 	}
439 
440 	return 0;
441 
442 err:
443 	am65_cpsw_destroy_xdp_rxqs(common);
444 	return ret;
445 }
446 
447 static int am65_cpsw_nuss_desc_idx(struct k3_cppi_desc_pool *desc_pool,
448 				   void *desc,
449 				   unsigned char dsize_log2)
450 {
451 	void *pool_addr = k3_cppi_desc_pool_cpuaddr(desc_pool);
452 
453 	return (desc - pool_addr) >> dsize_log2;
454 }
455 
456 static void am65_cpsw_nuss_set_buf_type(struct am65_cpsw_tx_chn *tx_chn,
457 					struct cppi5_host_desc_t *desc,
458 					enum am65_cpsw_tx_buf_type buf_type)
459 {
460 	int desc_idx;
461 
462 	desc_idx = am65_cpsw_nuss_desc_idx(tx_chn->desc_pool, desc,
463 					   tx_chn->dsize_log2);
464 	k3_cppi_desc_pool_desc_info_set(tx_chn->desc_pool, desc_idx,
465 					(void *)buf_type);
466 }
467 
468 static enum am65_cpsw_tx_buf_type am65_cpsw_nuss_buf_type(struct am65_cpsw_tx_chn *tx_chn,
469 							  dma_addr_t desc_dma)
470 {
471 	struct cppi5_host_desc_t *desc_tx;
472 	int desc_idx;
473 
474 	desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
475 	desc_idx = am65_cpsw_nuss_desc_idx(tx_chn->desc_pool, desc_tx,
476 					   tx_chn->dsize_log2);
477 
478 	return (enum am65_cpsw_tx_buf_type)k3_cppi_desc_pool_desc_info(tx_chn->desc_pool,
479 								       desc_idx);
480 }
481 
482 static inline void am65_cpsw_put_page(struct am65_cpsw_rx_chn *rx_chn,
483 				      struct page *page,
484 				      bool allow_direct,
485 				      int desc_idx)
486 {
487 	page_pool_put_full_page(rx_chn->page_pool, page, allow_direct);
488 	rx_chn->pages[desc_idx] = NULL;
489 }
490 
491 static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
492 {
493 	struct am65_cpsw_rx_chn *rx_chn = data;
494 	struct cppi5_host_desc_t *desc_rx;
495 	dma_addr_t buf_dma;
496 	u32 buf_dma_len;
497 	void *page_addr;
498 	void **swdata;
499 	int desc_idx;
500 
501 	desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
502 	swdata = cppi5_hdesc_get_swdata(desc_rx);
503 	page_addr = *swdata;
504 	cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
505 	k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
506 	dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
507 	k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
508 
509 	desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx,
510 					   rx_chn->dsize_log2);
511 	am65_cpsw_put_page(rx_chn, virt_to_page(page_addr), false, desc_idx);
512 }
513 
514 static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
515 				     struct cppi5_host_desc_t *desc)
516 {
517 	struct cppi5_host_desc_t *first_desc, *next_desc;
518 	dma_addr_t buf_dma, next_desc_dma;
519 	u32 buf_dma_len;
520 
521 	first_desc = desc;
522 	next_desc = first_desc;
523 
524 	cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
525 	k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
526 
527 	dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len, DMA_TO_DEVICE);
528 
529 	next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
530 	k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
531 	while (next_desc_dma) {
532 		next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
533 						       next_desc_dma);
534 		cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
535 		k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
536 
537 		dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len,
538 			       DMA_TO_DEVICE);
539 
540 		next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
541 		k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
542 
543 		k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
544 	}
545 
546 	k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
547 }
548 
549 static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
550 {
551 	struct am65_cpsw_tx_chn *tx_chn = data;
552 	struct cppi5_host_desc_t *desc_tx;
553 	struct sk_buff *skb;
554 	void **swdata;
555 
556 	desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
557 	swdata = cppi5_hdesc_get_swdata(desc_tx);
558 	skb = *(swdata);
559 	am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
560 
561 	dev_kfree_skb_any(skb);
562 }
563 
564 static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
565 					   struct net_device *ndev,
566 					   unsigned int len)
567 {
568 	struct sk_buff *skb;
569 
570 	len += AM65_CPSW_HEADROOM;
571 
572 	skb = build_skb(page_addr, len);
573 	if (unlikely(!skb))
574 		return NULL;
575 
576 	skb_reserve(skb, AM65_CPSW_HEADROOM);
577 	skb->dev = ndev;
578 
579 	return skb;
580 }
581 
582 static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
583 {
584 	struct am65_cpsw_host *host_p = am65_common_get_host(common);
585 	struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
586 	struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
587 	int port_idx, i, ret, tx;
588 	u32 val, port_mask;
589 	struct page *page;
590 
591 	if (common->usage_count)
592 		return 0;
593 
594 	/* Control register */
595 	writel(AM65_CPSW_CTL_P0_ENABLE | AM65_CPSW_CTL_P0_TX_CRC_REMOVE |
596 	       AM65_CPSW_CTL_VLAN_AWARE | AM65_CPSW_CTL_P0_RX_PAD,
597 	       common->cpsw_base + AM65_CPSW_REG_CTL);
598 	/* Max length register */
599 	writel(AM65_CPSW_MAX_PACKET_SIZE,
600 	       host_p->port_base + AM65_CPSW_PORT_REG_RX_MAXLEN);
601 	/* set base flow_id */
602 	writel(common->rx_flow_id_base,
603 	       host_p->port_base + AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET);
604 	writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN | AM65_CPSW_P0_REG_CTL_RX_REMAP_VLAN,
605 	       host_p->port_base + AM65_CPSW_P0_REG_CTL);
606 
607 	am65_cpsw_nuss_set_p0_ptype(common);
608 
609 	/* enable statistic */
610 	val = BIT(HOST_PORT_NUM);
611 	for (port_idx = 0; port_idx < common->port_num; port_idx++) {
612 		struct am65_cpsw_port *port = &common->ports[port_idx];
613 
614 		if (!port->disabled)
615 			val |=  BIT(port->port_id);
616 	}
617 	writel(val, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
618 
619 	/* disable priority elevation */
620 	writel(0, common->cpsw_base + AM65_CPSW_REG_PTYPE);
621 
622 	cpsw_ale_start(common->ale);
623 
624 	/* limit to one RX flow only */
625 	cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
626 			     ALE_DEFAULT_THREAD_ID, 0);
627 	cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
628 			     ALE_DEFAULT_THREAD_ENABLE, 1);
629 	/* switch to vlan unaware mode */
630 	cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, 1);
631 	cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
632 			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
633 
634 	/* default vlan cfg: create mask based on enabled ports */
635 	port_mask = GENMASK(common->port_num, 0) &
636 		    ~common->disabled_ports_mask;
637 
638 	cpsw_ale_add_vlan(common->ale, 0, port_mask,
639 			  port_mask, port_mask,
640 			  port_mask & ~ALE_PORT_HOST);
641 
642 	if (common->is_emac_mode)
643 		am65_cpsw_init_host_port_emac(common);
644 	else
645 		am65_cpsw_init_host_port_switch(common);
646 
647 	am65_cpsw_qos_tx_p0_rate_init(common);
648 
649 	ret = am65_cpsw_create_xdp_rxqs(common);
650 	if (ret) {
651 		dev_err(common->dev, "Failed to create XDP rx queues\n");
652 		return ret;
653 	}
654 
655 	for (i = 0; i < rx_chn->descs_num; i++) {
656 		page = page_pool_dev_alloc_pages(rx_chn->page_pool);
657 		if (!page) {
658 			ret = -ENOMEM;
659 			if (i)
660 				goto fail_rx;
661 
662 			return ret;
663 		}
664 		rx_chn->pages[i] = page;
665 
666 		ret = am65_cpsw_nuss_rx_push(common, page);
667 		if (ret < 0) {
668 			dev_err(common->dev,
669 				"cannot submit page to channel rx: %d\n",
670 				ret);
671 			am65_cpsw_put_page(rx_chn, page, false, i);
672 			if (i)
673 				goto fail_rx;
674 
675 			return ret;
676 		}
677 	}
678 
679 	ret = k3_udma_glue_enable_rx_chn(rx_chn->rx_chn);
680 	if (ret) {
681 		dev_err(common->dev, "couldn't enable rx chn: %d\n", ret);
682 		goto fail_rx;
683 	}
684 
685 	for (tx = 0; tx < common->tx_ch_num; tx++) {
686 		ret = k3_udma_glue_enable_tx_chn(tx_chn[tx].tx_chn);
687 		if (ret) {
688 			dev_err(common->dev, "couldn't enable tx chn %d: %d\n",
689 				tx, ret);
690 			tx--;
691 			goto fail_tx;
692 		}
693 		napi_enable(&tx_chn[tx].napi_tx);
694 	}
695 
696 	napi_enable(&common->napi_rx);
697 	if (common->rx_irq_disabled) {
698 		common->rx_irq_disabled = false;
699 		enable_irq(rx_chn->irq);
700 	}
701 
702 	dev_dbg(common->dev, "cpsw_nuss started\n");
703 	return 0;
704 
705 fail_tx:
706 	while (tx >= 0) {
707 		napi_disable(&tx_chn[tx].napi_tx);
708 		k3_udma_glue_disable_tx_chn(tx_chn[tx].tx_chn);
709 		tx--;
710 	}
711 
712 	k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
713 
714 fail_rx:
715 	k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, 0, rx_chn,
716 				  am65_cpsw_nuss_rx_cleanup, 0);
717 	return ret;
718 }
719 
720 static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
721 {
722 	struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
723 	struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
724 	int i;
725 
726 	if (common->usage_count != 1)
727 		return 0;
728 
729 	cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
730 			     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
731 
732 	/* shutdown tx channels */
733 	atomic_set(&common->tdown_cnt, common->tx_ch_num);
734 	/* ensure new tdown_cnt value is visible */
735 	smp_mb__after_atomic();
736 	reinit_completion(&common->tdown_complete);
737 
738 	for (i = 0; i < common->tx_ch_num; i++)
739 		k3_udma_glue_tdown_tx_chn(tx_chn[i].tx_chn, false);
740 
741 	i = wait_for_completion_timeout(&common->tdown_complete,
742 					msecs_to_jiffies(1000));
743 	if (!i)
744 		dev_err(common->dev, "tx timeout\n");
745 	for (i = 0; i < common->tx_ch_num; i++) {
746 		napi_disable(&tx_chn[i].napi_tx);
747 		hrtimer_cancel(&tx_chn[i].tx_hrtimer);
748 	}
749 
750 	for (i = 0; i < common->tx_ch_num; i++) {
751 		k3_udma_glue_reset_tx_chn(tx_chn[i].tx_chn, &tx_chn[i],
752 					  am65_cpsw_nuss_tx_cleanup);
753 		k3_udma_glue_disable_tx_chn(tx_chn[i].tx_chn);
754 	}
755 
756 	reinit_completion(&common->tdown_complete);
757 	k3_udma_glue_tdown_rx_chn(rx_chn->rx_chn, true);
758 
759 	if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) {
760 		i = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000));
761 		if (!i)
762 			dev_err(common->dev, "rx teardown timeout\n");
763 	}
764 
765 	napi_disable(&common->napi_rx);
766 	hrtimer_cancel(&common->rx_hrtimer);
767 
768 	for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
769 		k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
770 					  am65_cpsw_nuss_rx_cleanup, !!i);
771 
772 	k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
773 
774 	cpsw_ale_stop(common->ale);
775 
776 	writel(0, common->cpsw_base + AM65_CPSW_REG_CTL);
777 	writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
778 
779 	for (i = 0; i < rx_chn->descs_num; i++) {
780 		if (rx_chn->pages[i])
781 			am65_cpsw_put_page(rx_chn, rx_chn->pages[i], false, i);
782 	}
783 	am65_cpsw_destroy_xdp_rxqs(common);
784 
785 	dev_dbg(common->dev, "cpsw_nuss stopped\n");
786 	return 0;
787 }
788 
789 static int am65_cpsw_nuss_ndo_slave_stop(struct net_device *ndev)
790 {
791 	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
792 	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
793 	int ret;
794 
795 	phylink_stop(port->slave.phylink);
796 
797 	netif_tx_stop_all_queues(ndev);
798 
799 	phylink_disconnect_phy(port->slave.phylink);
800 
801 	ret = am65_cpsw_nuss_common_stop(common);
802 	if (ret)
803 		return ret;
804 
805 	common->usage_count--;
806 	pm_runtime_put(common->dev);
807 	return 0;
808 }
809 
810 static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
811 {
812 	struct am65_cpsw_port *port = arg;
813 
814 	if (!vdev)
815 		return 0;
816 
817 	return am65_cpsw_nuss_ndo_slave_add_vid(port->ndev, 0, vid);
818 }
819 
820 static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
821 {
822 	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
823 	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
824 	int ret, i;
825 	u32 reg;
826 
827 	ret = pm_runtime_resume_and_get(common->dev);
828 	if (ret < 0)
829 		return ret;
830 
831 	/* Idle MAC port */
832 	cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE);
833 	cpsw_sl_wait_for_idle(port->slave.mac_sl, 100);
834 	cpsw_sl_ctl_reset(port->slave.mac_sl);
835 
836 	/* soft reset MAC */
837 	cpsw_sl_reg_write(port->slave.mac_sl, CPSW_SL_SOFT_RESET, 1);
838 	mdelay(1);
839 	reg = cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_SOFT_RESET);
840 	if (reg) {
841 		dev_err(common->dev, "soft RESET didn't complete\n");
842 		ret = -ETIMEDOUT;
843 		goto runtime_put;
844 	}
845 
846 	/* Notify the stack of the actual queue counts. */
847 	ret = netif_set_real_num_tx_queues(ndev, common->tx_ch_num);
848 	if (ret) {
849 		dev_err(common->dev, "cannot set real number of tx queues\n");
850 		goto runtime_put;
851 	}
852 
853 	ret = netif_set_real_num_rx_queues(ndev, AM65_CPSW_MAX_RX_QUEUES);
854 	if (ret) {
855 		dev_err(common->dev, "cannot set real number of rx queues\n");
856 		goto runtime_put;
857 	}
858 
859 	for (i = 0; i < common->tx_ch_num; i++) {
860 		struct netdev_queue *txq = netdev_get_tx_queue(ndev, i);
861 
862 		netdev_tx_reset_queue(txq);
863 		txq->tx_maxrate =  common->tx_chns[i].rate_mbps;
864 	}
865 
866 	ret = am65_cpsw_nuss_common_open(common);
867 	if (ret)
868 		goto runtime_put;
869 
870 	common->usage_count++;
871 
872 	am65_cpsw_port_set_sl_mac(port, ndev->dev_addr);
873 
874 	if (common->is_emac_mode)
875 		am65_cpsw_init_port_emac_ale(port);
876 	else
877 		am65_cpsw_init_port_switch_ale(port);
878 
879 	/* mac_sl should be configured via phy-link interface */
880 	am65_cpsw_sl_ctl_reset(port);
881 
882 	ret = phylink_of_phy_connect(port->slave.phylink, port->slave.phy_node, 0);
883 	if (ret)
884 		goto error_cleanup;
885 
886 	/* restore vlan configurations */
887 	vlan_for_each(ndev, cpsw_restore_vlans, port);
888 
889 	phylink_start(port->slave.phylink);
890 
891 	return 0;
892 
893 error_cleanup:
894 	am65_cpsw_nuss_ndo_slave_stop(ndev);
895 	return ret;
896 
897 runtime_put:
898 	pm_runtime_put(common->dev);
899 	return ret;
900 }
901 
902 static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
903 				  struct am65_cpsw_tx_chn *tx_chn,
904 				  struct xdp_frame *xdpf,
905 				  enum am65_cpsw_tx_buf_type buf_type)
906 {
907 	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
908 	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
909 	struct cppi5_host_desc_t *host_desc;
910 	struct netdev_queue *netif_txq;
911 	dma_addr_t dma_desc, dma_buf;
912 	u32 pkt_len = xdpf->len;
913 	void **swdata;
914 	int ret;
915 
916 	host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
917 	if (unlikely(!host_desc)) {
918 		ndev->stats.tx_dropped++;
919 		return -ENOMEM;
920 	}
921 
922 	am65_cpsw_nuss_set_buf_type(tx_chn, host_desc, buf_type);
923 
924 	dma_buf = dma_map_single(tx_chn->dma_dev, xdpf->data,
925 				 pkt_len, DMA_TO_DEVICE);
926 	if (unlikely(dma_mapping_error(tx_chn->dma_dev, dma_buf))) {
927 		ndev->stats.tx_dropped++;
928 		ret = -ENOMEM;
929 		goto pool_free;
930 	}
931 
932 	cppi5_hdesc_init(host_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
933 			 AM65_CPSW_NAV_PS_DATA_SIZE);
934 	cppi5_hdesc_set_pkttype(host_desc, AM65_CPSW_CPPI_TX_PKT_TYPE);
935 	cppi5_hdesc_set_pktlen(host_desc, pkt_len);
936 	cppi5_desc_set_pktids(&host_desc->hdr, 0, AM65_CPSW_CPPI_TX_FLOW_ID);
937 	cppi5_desc_set_tags_ids(&host_desc->hdr, 0, port->port_id);
938 
939 	k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &dma_buf);
940 	cppi5_hdesc_attach_buf(host_desc, dma_buf, pkt_len, dma_buf, pkt_len);
941 
942 	swdata = cppi5_hdesc_get_swdata(host_desc);
943 	*(swdata) = xdpf;
944 
945 	/* Report BQL before sending the packet */
946 	netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
947 	netdev_tx_sent_queue(netif_txq, pkt_len);
948 
949 	dma_desc = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, host_desc);
950 	if (AM65_CPSW_IS_CPSW2G(common)) {
951 		ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, host_desc,
952 					       dma_desc);
953 	} else {
954 		spin_lock_bh(&tx_chn->lock);
955 		ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, host_desc,
956 					       dma_desc);
957 		spin_unlock_bh(&tx_chn->lock);
958 	}
959 	if (ret) {
960 		/* Inform BQL */
961 		netdev_tx_completed_queue(netif_txq, 1, pkt_len);
962 		ndev->stats.tx_errors++;
963 		goto dma_unmap;
964 	}
965 
966 	return 0;
967 
968 dma_unmap:
969 	k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &dma_buf);
970 	dma_unmap_single(tx_chn->dma_dev, dma_buf, pkt_len, DMA_TO_DEVICE);
971 pool_free:
972 	k3_cppi_desc_pool_free(tx_chn->desc_pool, host_desc);
973 	return ret;
974 }
975 
976 static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
977 			     struct am65_cpsw_port *port,
978 			     struct xdp_buff *xdp,
979 			     int desc_idx, int cpu, int *len)
980 {
981 	struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
982 	struct net_device *ndev = port->ndev;
983 	int ret = AM65_CPSW_XDP_CONSUMED;
984 	struct am65_cpsw_tx_chn *tx_chn;
985 	struct netdev_queue *netif_txq;
986 	struct xdp_frame *xdpf;
987 	struct bpf_prog *prog;
988 	struct page *page;
989 	u32 act;
990 
991 	prog = READ_ONCE(port->xdp_prog);
992 	if (!prog)
993 		return AM65_CPSW_XDP_PASS;
994 
995 	act = bpf_prog_run_xdp(prog, xdp);
996 	/* XDP prog might have changed packet data and boundaries */
997 	*len = xdp->data_end - xdp->data;
998 
999 	switch (act) {
1000 	case XDP_PASS:
1001 		ret = AM65_CPSW_XDP_PASS;
1002 		goto out;
1003 	case XDP_TX:
1004 		tx_chn = &common->tx_chns[cpu % AM65_CPSW_MAX_TX_QUEUES];
1005 		netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
1006 
1007 		xdpf = xdp_convert_buff_to_frame(xdp);
1008 		if (unlikely(!xdpf))
1009 			break;
1010 
1011 		__netif_tx_lock(netif_txq, cpu);
1012 		ret = am65_cpsw_xdp_tx_frame(ndev, tx_chn, xdpf,
1013 					     AM65_CPSW_TX_BUF_TYPE_XDP_TX);
1014 		__netif_tx_unlock(netif_txq);
1015 		if (ret)
1016 			break;
1017 
1018 		ndev->stats.rx_bytes += *len;
1019 		ndev->stats.rx_packets++;
1020 		ret = AM65_CPSW_XDP_CONSUMED;
1021 		goto out;
1022 	case XDP_REDIRECT:
1023 		if (unlikely(xdp_do_redirect(ndev, xdp, prog)))
1024 			break;
1025 
1026 		ndev->stats.rx_bytes += *len;
1027 		ndev->stats.rx_packets++;
1028 		ret = AM65_CPSW_XDP_REDIRECT;
1029 		goto out;
1030 	default:
1031 		bpf_warn_invalid_xdp_action(ndev, prog, act);
1032 		fallthrough;
1033 	case XDP_ABORTED:
1034 		trace_xdp_exception(ndev, prog, act);
1035 		fallthrough;
1036 	case XDP_DROP:
1037 		ndev->stats.rx_dropped++;
1038 	}
1039 
1040 	page = virt_to_head_page(xdp->data);
1041 	am65_cpsw_put_page(rx_chn, page, true, desc_idx);
1042 
1043 out:
1044 	return ret;
1045 }
1046 
1047 static void am65_cpsw_nuss_rx_ts(struct sk_buff *skb, u32 *psdata)
1048 {
1049 	struct skb_shared_hwtstamps *ssh;
1050 	u64 ns;
1051 
1052 	ns = ((u64)psdata[1] << 32) | psdata[0];
1053 
1054 	ssh = skb_hwtstamps(skb);
1055 	memset(ssh, 0, sizeof(*ssh));
1056 	ssh->hwtstamp = ns_to_ktime(ns);
1057 }
1058 
1059 /* RX psdata[2] word format - checksum information */
1060 #define AM65_CPSW_RX_PSD_CSUM_ADD	GENMASK(15, 0)
1061 #define AM65_CPSW_RX_PSD_CSUM_ERR	BIT(16)
1062 #define AM65_CPSW_RX_PSD_IS_FRAGMENT	BIT(17)
1063 #define AM65_CPSW_RX_PSD_IS_TCP		BIT(18)
1064 #define AM65_CPSW_RX_PSD_IPV6_VALID	BIT(19)
1065 #define AM65_CPSW_RX_PSD_IPV4_VALID	BIT(20)
1066 
1067 static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info)
1068 {
1069 	/* HW can verify IPv4/IPv6 TCP/UDP packets checksum
1070 	 * csum information provides in psdata[2] word:
1071 	 * AM65_CPSW_RX_PSD_CSUM_ERR bit - indicates csum error
1072 	 * AM65_CPSW_RX_PSD_IPV6_VALID and AM65_CPSW_RX_PSD_IPV4_VALID
1073 	 * bits - indicates IPv4/IPv6 packet
1074 	 * AM65_CPSW_RX_PSD_IS_FRAGMENT bit - indicates fragmented packet
1075 	 * AM65_CPSW_RX_PSD_CSUM_ADD has value 0xFFFF for non fragmented packets
1076 	 * or csum value for fragmented packets if !AM65_CPSW_RX_PSD_CSUM_ERR
1077 	 */
1078 	skb_checksum_none_assert(skb);
1079 
1080 	if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
1081 		return;
1082 
1083 	if ((csum_info & (AM65_CPSW_RX_PSD_IPV6_VALID |
1084 			  AM65_CPSW_RX_PSD_IPV4_VALID)) &&
1085 			  !(csum_info & AM65_CPSW_RX_PSD_CSUM_ERR)) {
1086 		/* csum for fragmented packets is unsupported */
1087 		if (!(csum_info & AM65_CPSW_RX_PSD_IS_FRAGMENT))
1088 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1089 	}
1090 }
1091 
1092 static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
1093 				     u32 flow_idx, int cpu)
1094 {
1095 	struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
1096 	u32 buf_dma_len, pkt_len, port_id = 0, csum_info;
1097 	struct am65_cpsw_ndev_priv *ndev_priv;
1098 	struct am65_cpsw_ndev_stats *stats;
1099 	struct cppi5_host_desc_t *desc_rx;
1100 	struct device *dev = common->dev;
1101 	struct page *page, *new_page;
1102 	dma_addr_t desc_dma, buf_dma;
1103 	struct am65_cpsw_port *port;
1104 	int headroom, desc_idx, ret;
1105 	struct net_device *ndev;
1106 	struct sk_buff *skb;
1107 	struct xdp_buff	xdp;
1108 	void *page_addr;
1109 	void **swdata;
1110 	u32 *psdata;
1111 
1112 	ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_idx, &desc_dma);
1113 	if (ret) {
1114 		if (ret != -ENODATA)
1115 			dev_err(dev, "RX: pop chn fail %d\n", ret);
1116 		return ret;
1117 	}
1118 
1119 	if (cppi5_desc_is_tdcm(desc_dma)) {
1120 		dev_dbg(dev, "%s RX tdown flow: %u\n", __func__, flow_idx);
1121 		if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ)
1122 			complete(&common->tdown_complete);
1123 		return 0;
1124 	}
1125 
1126 	desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
1127 	dev_dbg(dev, "%s flow_idx: %u desc %pad\n",
1128 		__func__, flow_idx, &desc_dma);
1129 
1130 	swdata = cppi5_hdesc_get_swdata(desc_rx);
1131 	page_addr = *swdata;
1132 	page = virt_to_page(page_addr);
1133 	cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
1134 	k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
1135 	pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
1136 	cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
1137 	dev_dbg(dev, "%s rx port_id:%d\n", __func__, port_id);
1138 	port = am65_common_get_port(common, port_id);
1139 	ndev = port->ndev;
1140 	psdata = cppi5_hdesc_get_psdata(desc_rx);
1141 	csum_info = psdata[2];
1142 	dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
1143 
1144 	dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
1145 
1146 	k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
1147 
1148 	desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx,
1149 					   rx_chn->dsize_log2);
1150 
1151 	skb = am65_cpsw_build_skb(page_addr, ndev,
1152 				  AM65_CPSW_MAX_PACKET_SIZE);
1153 	if (unlikely(!skb)) {
1154 		new_page = page;
1155 		goto requeue;
1156 	}
1157 
1158 	if (port->xdp_prog) {
1159 		xdp_init_buff(&xdp, AM65_CPSW_MAX_PACKET_SIZE, &port->xdp_rxq);
1160 
1161 		xdp_prepare_buff(&xdp, page_addr, skb_headroom(skb),
1162 				 pkt_len, false);
1163 
1164 		ret = am65_cpsw_run_xdp(common, port, &xdp, desc_idx,
1165 					cpu, &pkt_len);
1166 		if (ret != AM65_CPSW_XDP_PASS)
1167 			return ret;
1168 
1169 		/* Compute additional headroom to be reserved */
1170 		headroom = (xdp.data - xdp.data_hard_start) - skb_headroom(skb);
1171 		skb_reserve(skb, headroom);
1172 	}
1173 
1174 	/* Pass skb to netstack if no XDP prog or returned XDP_PASS */
1175 	if (port->rx_ts_enabled)
1176 		am65_cpsw_nuss_rx_ts(skb, psdata);
1177 
1178 	ndev_priv = netdev_priv(ndev);
1179 	am65_cpsw_nuss_set_offload_fwd_mark(skb, ndev_priv->offload_fwd_mark);
1180 	skb_put(skb, pkt_len);
1181 	skb_mark_for_recycle(skb);
1182 	skb->protocol = eth_type_trans(skb, ndev);
1183 	am65_cpsw_nuss_rx_csum(skb, csum_info);
1184 	napi_gro_receive(&common->napi_rx, skb);
1185 
1186 	stats = this_cpu_ptr(ndev_priv->stats);
1187 
1188 	u64_stats_update_begin(&stats->syncp);
1189 	stats->rx_packets++;
1190 	stats->rx_bytes += pkt_len;
1191 	u64_stats_update_end(&stats->syncp);
1192 
1193 	new_page = page_pool_dev_alloc_pages(rx_chn->page_pool);
1194 	if (unlikely(!new_page))
1195 		return -ENOMEM;
1196 	rx_chn->pages[desc_idx] = new_page;
1197 
1198 	if (netif_dormant(ndev)) {
1199 		am65_cpsw_put_page(rx_chn, new_page, true, desc_idx);
1200 		ndev->stats.rx_dropped++;
1201 		return 0;
1202 	}
1203 
1204 requeue:
1205 	ret = am65_cpsw_nuss_rx_push(common, new_page);
1206 	if (WARN_ON(ret < 0)) {
1207 		am65_cpsw_put_page(rx_chn, new_page, true, desc_idx);
1208 		ndev->stats.rx_errors++;
1209 		ndev->stats.rx_dropped++;
1210 	}
1211 
1212 	return ret;
1213 }
1214 
1215 static enum hrtimer_restart am65_cpsw_nuss_rx_timer_callback(struct hrtimer *timer)
1216 {
1217 	struct am65_cpsw_common *common =
1218 			container_of(timer, struct am65_cpsw_common, rx_hrtimer);
1219 
1220 	enable_irq(common->rx_chns.irq);
1221 	return HRTIMER_NORESTART;
1222 }
1223 
1224 static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
1225 {
1226 	struct am65_cpsw_common *common = am65_cpsw_napi_to_common(napi_rx);
1227 	int flow = AM65_CPSW_MAX_RX_FLOWS;
1228 	int cpu = smp_processor_id();
1229 	bool xdp_redirect = false;
1230 	int cur_budget, ret;
1231 	int num_rx = 0;
1232 
1233 	/* process every flow */
1234 	while (flow--) {
1235 		cur_budget = budget - num_rx;
1236 
1237 		while (cur_budget--) {
1238 			ret = am65_cpsw_nuss_rx_packets(common, flow, cpu);
1239 			if (ret) {
1240 				if (ret == AM65_CPSW_XDP_REDIRECT)
1241 					xdp_redirect = true;
1242 				break;
1243 			}
1244 			num_rx++;
1245 		}
1246 
1247 		if (num_rx >= budget)
1248 			break;
1249 	}
1250 
1251 	if (xdp_redirect)
1252 		xdp_do_flush();
1253 
1254 	dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget);
1255 
1256 	if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
1257 		if (common->rx_irq_disabled) {
1258 			common->rx_irq_disabled = false;
1259 			if (unlikely(common->rx_pace_timeout)) {
1260 				hrtimer_start(&common->rx_hrtimer,
1261 					      ns_to_ktime(common->rx_pace_timeout),
1262 					      HRTIMER_MODE_REL_PINNED);
1263 			} else {
1264 				enable_irq(common->rx_chns.irq);
1265 			}
1266 		}
1267 	}
1268 
1269 	return num_rx;
1270 }
1271 
1272 static struct sk_buff *
1273 am65_cpsw_nuss_tx_compl_packet_skb(struct am65_cpsw_tx_chn *tx_chn,
1274 				   dma_addr_t desc_dma)
1275 {
1276 	struct am65_cpsw_ndev_priv *ndev_priv;
1277 	struct am65_cpsw_ndev_stats *stats;
1278 	struct cppi5_host_desc_t *desc_tx;
1279 	struct net_device *ndev;
1280 	struct sk_buff *skb;
1281 	void **swdata;
1282 
1283 	desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
1284 					     desc_dma);
1285 	swdata = cppi5_hdesc_get_swdata(desc_tx);
1286 	skb = *(swdata);
1287 	am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
1288 
1289 	ndev = skb->dev;
1290 
1291 	am65_cpts_tx_timestamp(tx_chn->common->cpts, skb);
1292 
1293 	ndev_priv = netdev_priv(ndev);
1294 	stats = this_cpu_ptr(ndev_priv->stats);
1295 	u64_stats_update_begin(&stats->syncp);
1296 	stats->tx_packets++;
1297 	stats->tx_bytes += skb->len;
1298 	u64_stats_update_end(&stats->syncp);
1299 
1300 	return skb;
1301 }
1302 
1303 static struct xdp_frame *
1304 am65_cpsw_nuss_tx_compl_packet_xdp(struct am65_cpsw_common *common,
1305 				   struct am65_cpsw_tx_chn *tx_chn,
1306 				   dma_addr_t desc_dma,
1307 				   struct net_device **ndev)
1308 {
1309 	struct am65_cpsw_ndev_priv *ndev_priv;
1310 	struct am65_cpsw_ndev_stats *stats;
1311 	struct cppi5_host_desc_t *desc_tx;
1312 	struct am65_cpsw_port *port;
1313 	struct xdp_frame *xdpf;
1314 	u32 port_id = 0;
1315 	void **swdata;
1316 
1317 	desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
1318 	cppi5_desc_get_tags_ids(&desc_tx->hdr, NULL, &port_id);
1319 	swdata = cppi5_hdesc_get_swdata(desc_tx);
1320 	xdpf = *(swdata);
1321 	am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
1322 
1323 	port = am65_common_get_port(common, port_id);
1324 	*ndev = port->ndev;
1325 
1326 	ndev_priv = netdev_priv(*ndev);
1327 	stats = this_cpu_ptr(ndev_priv->stats);
1328 	u64_stats_update_begin(&stats->syncp);
1329 	stats->tx_packets++;
1330 	stats->tx_bytes += xdpf->len;
1331 	u64_stats_update_end(&stats->syncp);
1332 
1333 	return xdpf;
1334 }
1335 
1336 static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_device *ndev,
1337 				   struct netdev_queue *netif_txq)
1338 {
1339 	if (netif_tx_queue_stopped(netif_txq)) {
1340 		/* Check whether the queue is stopped due to stalled
1341 		 * tx dma, if the queue is stopped then wake the queue
1342 		 * as we have free desc for tx
1343 		 */
1344 		__netif_tx_lock(netif_txq, smp_processor_id());
1345 		if (netif_running(ndev) &&
1346 		    (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= MAX_SKB_FRAGS))
1347 			netif_tx_wake_queue(netif_txq);
1348 
1349 		__netif_tx_unlock(netif_txq);
1350 	}
1351 }
1352 
1353 static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
1354 					   int chn, unsigned int budget, bool *tdown)
1355 {
1356 	enum am65_cpsw_tx_buf_type buf_type;
1357 	struct device *dev = common->dev;
1358 	struct am65_cpsw_tx_chn *tx_chn;
1359 	struct netdev_queue *netif_txq;
1360 	unsigned int total_bytes = 0;
1361 	struct net_device *ndev;
1362 	struct xdp_frame *xdpf;
1363 	struct sk_buff *skb;
1364 	dma_addr_t desc_dma;
1365 	int res, num_tx = 0;
1366 
1367 	tx_chn = &common->tx_chns[chn];
1368 
1369 	while (true) {
1370 		spin_lock(&tx_chn->lock);
1371 		res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
1372 		spin_unlock(&tx_chn->lock);
1373 		if (res == -ENODATA)
1374 			break;
1375 
1376 		if (cppi5_desc_is_tdcm(desc_dma)) {
1377 			if (atomic_dec_and_test(&common->tdown_cnt))
1378 				complete(&common->tdown_complete);
1379 			*tdown = true;
1380 			break;
1381 		}
1382 
1383 		buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
1384 		if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
1385 			skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma);
1386 			ndev = skb->dev;
1387 			total_bytes = skb->len;
1388 			napi_consume_skb(skb, budget);
1389 		} else {
1390 			xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn,
1391 								  desc_dma, &ndev);
1392 			total_bytes = xdpf->len;
1393 			if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX)
1394 				xdp_return_frame_rx_napi(xdpf);
1395 			else
1396 				xdp_return_frame(xdpf);
1397 		}
1398 		num_tx++;
1399 
1400 		netif_txq = netdev_get_tx_queue(ndev, chn);
1401 
1402 		netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
1403 
1404 		am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
1405 	}
1406 
1407 	dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
1408 
1409 	return num_tx;
1410 }
1411 
1412 static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common,
1413 					      int chn, unsigned int budget, bool *tdown)
1414 {
1415 	enum am65_cpsw_tx_buf_type buf_type;
1416 	struct device *dev = common->dev;
1417 	struct am65_cpsw_tx_chn *tx_chn;
1418 	struct netdev_queue *netif_txq;
1419 	unsigned int total_bytes = 0;
1420 	struct net_device *ndev;
1421 	struct xdp_frame *xdpf;
1422 	struct sk_buff *skb;
1423 	dma_addr_t desc_dma;
1424 	int res, num_tx = 0;
1425 
1426 	tx_chn = &common->tx_chns[chn];
1427 
1428 	while (true) {
1429 		res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
1430 		if (res == -ENODATA)
1431 			break;
1432 
1433 		if (cppi5_desc_is_tdcm(desc_dma)) {
1434 			if (atomic_dec_and_test(&common->tdown_cnt))
1435 				complete(&common->tdown_complete);
1436 			*tdown = true;
1437 			break;
1438 		}
1439 
1440 		buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
1441 		if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
1442 			skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma);
1443 			ndev = skb->dev;
1444 			total_bytes += skb->len;
1445 			napi_consume_skb(skb, budget);
1446 		} else {
1447 			xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn,
1448 								  desc_dma, &ndev);
1449 			total_bytes += xdpf->len;
1450 			if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX)
1451 				xdp_return_frame_rx_napi(xdpf);
1452 			else
1453 				xdp_return_frame(xdpf);
1454 		}
1455 		num_tx++;
1456 	}
1457 
1458 	if (!num_tx)
1459 		return 0;
1460 
1461 	netif_txq = netdev_get_tx_queue(ndev, chn);
1462 
1463 	netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
1464 
1465 	am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
1466 
1467 	dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
1468 
1469 	return num_tx;
1470 }
1471 
1472 static enum hrtimer_restart am65_cpsw_nuss_tx_timer_callback(struct hrtimer *timer)
1473 {
1474 	struct am65_cpsw_tx_chn *tx_chns =
1475 			container_of(timer, struct am65_cpsw_tx_chn, tx_hrtimer);
1476 
1477 	enable_irq(tx_chns->irq);
1478 	return HRTIMER_NORESTART;
1479 }
1480 
1481 static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget)
1482 {
1483 	struct am65_cpsw_tx_chn *tx_chn = am65_cpsw_napi_to_tx_chn(napi_tx);
1484 	bool tdown = false;
1485 	int num_tx;
1486 
1487 	if (AM65_CPSW_IS_CPSW2G(tx_chn->common))
1488 		num_tx = am65_cpsw_nuss_tx_compl_packets_2g(tx_chn->common, tx_chn->id,
1489 							    budget, &tdown);
1490 	else
1491 		num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common,
1492 							 tx_chn->id, budget, &tdown);
1493 
1494 	if (num_tx >= budget)
1495 		return budget;
1496 
1497 	if (napi_complete_done(napi_tx, num_tx)) {
1498 		if (unlikely(tx_chn->tx_pace_timeout && !tdown)) {
1499 			hrtimer_start(&tx_chn->tx_hrtimer,
1500 				      ns_to_ktime(tx_chn->tx_pace_timeout),
1501 				      HRTIMER_MODE_REL_PINNED);
1502 		} else {
1503 			enable_irq(tx_chn->irq);
1504 		}
1505 	}
1506 
1507 	return 0;
1508 }
1509 
1510 static irqreturn_t am65_cpsw_nuss_rx_irq(int irq, void *dev_id)
1511 {
1512 	struct am65_cpsw_common *common = dev_id;
1513 
1514 	common->rx_irq_disabled = true;
1515 	disable_irq_nosync(irq);
1516 	napi_schedule(&common->napi_rx);
1517 
1518 	return IRQ_HANDLED;
1519 }
1520 
1521 static irqreturn_t am65_cpsw_nuss_tx_irq(int irq, void *dev_id)
1522 {
1523 	struct am65_cpsw_tx_chn *tx_chn = dev_id;
1524 
1525 	disable_irq_nosync(irq);
1526 	napi_schedule(&tx_chn->napi_tx);
1527 
1528 	return IRQ_HANDLED;
1529 }
1530 
1531 static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
1532 						 struct net_device *ndev)
1533 {
1534 	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
1535 	struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
1536 	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1537 	struct device *dev = common->dev;
1538 	struct am65_cpsw_tx_chn *tx_chn;
1539 	struct netdev_queue *netif_txq;
1540 	dma_addr_t desc_dma, buf_dma;
1541 	int ret, q_idx, i;
1542 	void **swdata;
1543 	u32 *psdata;
1544 	u32 pkt_len;
1545 
1546 	/* padding enabled in hw */
1547 	pkt_len = skb_headlen(skb);
1548 
1549 	/* SKB TX timestamp */
1550 	if (port->tx_ts_enabled)
1551 		am65_cpts_prep_tx_timestamp(common->cpts, skb);
1552 
1553 	q_idx = skb_get_queue_mapping(skb);
1554 	dev_dbg(dev, "%s skb_queue:%d\n", __func__, q_idx);
1555 
1556 	tx_chn = &common->tx_chns[q_idx];
1557 	netif_txq = netdev_get_tx_queue(ndev, q_idx);
1558 
1559 	/* Map the linear buffer */
1560 	buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len,
1561 				 DMA_TO_DEVICE);
1562 	if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) {
1563 		dev_err(dev, "Failed to map tx skb buffer\n");
1564 		ndev->stats.tx_errors++;
1565 		goto err_free_skb;
1566 	}
1567 
1568 	first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
1569 	if (!first_desc) {
1570 		dev_dbg(dev, "Failed to allocate descriptor\n");
1571 		dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len,
1572 				 DMA_TO_DEVICE);
1573 		goto busy_stop_q;
1574 	}
1575 
1576 	am65_cpsw_nuss_set_buf_type(tx_chn, first_desc,
1577 				    AM65_CPSW_TX_BUF_TYPE_SKB);
1578 
1579 	cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
1580 			 AM65_CPSW_NAV_PS_DATA_SIZE);
1581 	cppi5_desc_set_pktids(&first_desc->hdr, 0, AM65_CPSW_CPPI_TX_FLOW_ID);
1582 	cppi5_hdesc_set_pkttype(first_desc, AM65_CPSW_CPPI_TX_PKT_TYPE);
1583 	cppi5_desc_set_tags_ids(&first_desc->hdr, 0, port->port_id);
1584 
1585 	k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
1586 	cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
1587 	swdata = cppi5_hdesc_get_swdata(first_desc);
1588 	*(swdata) = skb;
1589 	psdata = cppi5_hdesc_get_psdata(first_desc);
1590 
1591 	/* HW csum offload if enabled */
1592 	psdata[2] = 0;
1593 	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1594 		unsigned int cs_start, cs_offset;
1595 
1596 		cs_start = skb_transport_offset(skb);
1597 		cs_offset = cs_start + skb->csum_offset;
1598 		/* HW numerates bytes starting from 1 */
1599 		psdata[2] = ((cs_offset + 1) << 24) |
1600 			    ((cs_start + 1) << 16) | (skb->len - cs_start);
1601 		dev_dbg(dev, "%s tx psdata:%#x\n", __func__, psdata[2]);
1602 	}
1603 
1604 	if (!skb_is_nonlinear(skb))
1605 		goto done_tx;
1606 
1607 	dev_dbg(dev, "fragmented SKB\n");
1608 
1609 	/* Handle the case where skb is fragmented in pages */
1610 	cur_desc = first_desc;
1611 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1612 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1613 		u32 frag_size = skb_frag_size(frag);
1614 
1615 		next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
1616 		if (!next_desc) {
1617 			dev_err(dev, "Failed to allocate descriptor\n");
1618 			goto busy_free_descs;
1619 		}
1620 
1621 		am65_cpsw_nuss_set_buf_type(tx_chn, next_desc,
1622 					    AM65_CPSW_TX_BUF_TYPE_SKB);
1623 
1624 		buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size,
1625 					   DMA_TO_DEVICE);
1626 		if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) {
1627 			dev_err(dev, "Failed to map tx skb page\n");
1628 			k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
1629 			ndev->stats.tx_errors++;
1630 			goto err_free_descs;
1631 		}
1632 
1633 		cppi5_hdesc_reset_hbdesc(next_desc);
1634 		k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
1635 		cppi5_hdesc_attach_buf(next_desc,
1636 				       buf_dma, frag_size, buf_dma, frag_size);
1637 
1638 		desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
1639 						      next_desc);
1640 		k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma);
1641 		cppi5_hdesc_link_hbdesc(cur_desc, desc_dma);
1642 
1643 		pkt_len += frag_size;
1644 		cur_desc = next_desc;
1645 	}
1646 	WARN_ON(pkt_len != skb->len);
1647 
1648 done_tx:
1649 	skb_tx_timestamp(skb);
1650 
1651 	/* report bql before sending packet */
1652 	netdev_tx_sent_queue(netif_txq, pkt_len);
1653 
1654 	cppi5_hdesc_set_pktlen(first_desc, pkt_len);
1655 	desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
1656 	if (AM65_CPSW_IS_CPSW2G(common)) {
1657 		ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
1658 	} else {
1659 		spin_lock_bh(&tx_chn->lock);
1660 		ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
1661 		spin_unlock_bh(&tx_chn->lock);
1662 	}
1663 	if (ret) {
1664 		dev_err(dev, "can't push desc %d\n", ret);
1665 		/* inform bql */
1666 		netdev_tx_completed_queue(netif_txq, 1, pkt_len);
1667 		ndev->stats.tx_errors++;
1668 		goto err_free_descs;
1669 	}
1670 
1671 	if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) {
1672 		netif_tx_stop_queue(netif_txq);
1673 		/* Barrier, so that stop_queue visible to other cpus */
1674 		smp_mb__after_atomic();
1675 		dev_dbg(dev, "netif_tx_stop_queue %d\n", q_idx);
1676 
1677 		/* re-check for smp */
1678 		if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
1679 		    MAX_SKB_FRAGS) {
1680 			netif_tx_wake_queue(netif_txq);
1681 			dev_dbg(dev, "netif_tx_wake_queue %d\n", q_idx);
1682 		}
1683 	}
1684 
1685 	return NETDEV_TX_OK;
1686 
1687 err_free_descs:
1688 	am65_cpsw_nuss_xmit_free(tx_chn, first_desc);
1689 err_free_skb:
1690 	ndev->stats.tx_dropped++;
1691 	dev_kfree_skb_any(skb);
1692 	return NETDEV_TX_OK;
1693 
1694 busy_free_descs:
1695 	am65_cpsw_nuss_xmit_free(tx_chn, first_desc);
1696 busy_stop_q:
1697 	netif_tx_stop_queue(netif_txq);
1698 	return NETDEV_TX_BUSY;
1699 }
1700 
1701 static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev,
1702 						    void *addr)
1703 {
1704 	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
1705 	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1706 	struct sockaddr *sockaddr = (struct sockaddr *)addr;
1707 	int ret;
1708 
1709 	ret = eth_prepare_mac_addr_change(ndev, addr);
1710 	if (ret < 0)
1711 		return ret;
1712 
1713 	ret = pm_runtime_resume_and_get(common->dev);
1714 	if (ret < 0)
1715 		return ret;
1716 
1717 	cpsw_ale_del_ucast(common->ale, ndev->dev_addr,
1718 			   HOST_PORT_NUM, 0, 0);
1719 	cpsw_ale_add_ucast(common->ale, sockaddr->sa_data,
1720 			   HOST_PORT_NUM, ALE_SECURE, 0);
1721 
1722 	am65_cpsw_port_set_sl_mac(port, addr);
1723 	eth_commit_mac_addr_change(ndev, sockaddr);
1724 
1725 	pm_runtime_put(common->dev);
1726 
1727 	return 0;
1728 }
1729 
1730 static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
1731 				       struct ifreq *ifr)
1732 {
1733 	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
1734 	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1735 	u32 ts_ctrl, seq_id, ts_ctrl_ltype2, ts_vlan_ltype;
1736 	struct hwtstamp_config cfg;
1737 
1738 	if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
1739 		return -EOPNOTSUPP;
1740 
1741 	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1742 		return -EFAULT;
1743 
1744 	/* TX HW timestamp */
1745 	switch (cfg.tx_type) {
1746 	case HWTSTAMP_TX_OFF:
1747 	case HWTSTAMP_TX_ON:
1748 		break;
1749 	default:
1750 		return -ERANGE;
1751 	}
1752 
1753 	switch (cfg.rx_filter) {
1754 	case HWTSTAMP_FILTER_NONE:
1755 		port->rx_ts_enabled = false;
1756 		break;
1757 	case HWTSTAMP_FILTER_ALL:
1758 	case HWTSTAMP_FILTER_SOME:
1759 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1760 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1761 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1762 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1763 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1764 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1765 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1766 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1767 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1768 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
1769 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
1770 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1771 	case HWTSTAMP_FILTER_NTP_ALL:
1772 		port->rx_ts_enabled = true;
1773 		cfg.rx_filter = HWTSTAMP_FILTER_ALL;
1774 		break;
1775 	default:
1776 		return -ERANGE;
1777 	}
1778 
1779 	port->tx_ts_enabled = (cfg.tx_type == HWTSTAMP_TX_ON);
1780 
1781 	/* cfg TX timestamp */
1782 	seq_id = (AM65_CPSW_TS_SEQ_ID_OFFSET <<
1783 		  AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT) | ETH_P_1588;
1784 
1785 	ts_vlan_ltype = ETH_P_8021Q;
1786 
1787 	ts_ctrl_ltype2 = ETH_P_1588 |
1788 			 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107 |
1789 			 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129 |
1790 			 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130 |
1791 			 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131 |
1792 			 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132 |
1793 			 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319 |
1794 			 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320 |
1795 			 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO;
1796 
1797 	ts_ctrl = AM65_CPSW_TS_EVENT_MSG_TYPE_BITS <<
1798 		  AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT;
1799 
1800 	if (port->tx_ts_enabled)
1801 		ts_ctrl |= AM65_CPSW_TS_TX_ANX_ALL_EN |
1802 			   AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN;
1803 
1804 	writel(seq_id, port->port_base + AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG);
1805 	writel(ts_vlan_ltype, port->port_base +
1806 	       AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG);
1807 	writel(ts_ctrl_ltype2, port->port_base +
1808 	       AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2);
1809 	writel(ts_ctrl, port->port_base + AM65_CPSW_PORTN_REG_TS_CTL);
1810 
1811 	/* en/dis RX timestamp */
1812 	am65_cpts_rx_enable(common->cpts, port->rx_ts_enabled);
1813 
1814 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1815 }
1816 
1817 static int am65_cpsw_nuss_hwtstamp_get(struct net_device *ndev,
1818 				       struct ifreq *ifr)
1819 {
1820 	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1821 	struct hwtstamp_config cfg;
1822 
1823 	if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
1824 		return -EOPNOTSUPP;
1825 
1826 	cfg.flags = 0;
1827 	cfg.tx_type = port->tx_ts_enabled ?
1828 		      HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
1829 	cfg.rx_filter = port->rx_ts_enabled ?
1830 			HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
1831 
1832 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1833 }
1834 
1835 static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev,
1836 					  struct ifreq *req, int cmd)
1837 {
1838 	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1839 
1840 	if (!netif_running(ndev))
1841 		return -EINVAL;
1842 
1843 	switch (cmd) {
1844 	case SIOCSHWTSTAMP:
1845 		return am65_cpsw_nuss_hwtstamp_set(ndev, req);
1846 	case SIOCGHWTSTAMP:
1847 		return am65_cpsw_nuss_hwtstamp_get(ndev, req);
1848 	}
1849 
1850 	return phylink_mii_ioctl(port->slave.phylink, req, cmd);
1851 }
1852 
1853 static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev,
1854 					 struct rtnl_link_stats64 *stats)
1855 {
1856 	struct am65_cpsw_ndev_priv *ndev_priv = netdev_priv(dev);
1857 	unsigned int start;
1858 	int cpu;
1859 
1860 	for_each_possible_cpu(cpu) {
1861 		struct am65_cpsw_ndev_stats *cpu_stats;
1862 		u64 rx_packets;
1863 		u64 rx_bytes;
1864 		u64 tx_packets;
1865 		u64 tx_bytes;
1866 
1867 		cpu_stats = per_cpu_ptr(ndev_priv->stats, cpu);
1868 		do {
1869 			start = u64_stats_fetch_begin(&cpu_stats->syncp);
1870 			rx_packets = cpu_stats->rx_packets;
1871 			rx_bytes   = cpu_stats->rx_bytes;
1872 			tx_packets = cpu_stats->tx_packets;
1873 			tx_bytes   = cpu_stats->tx_bytes;
1874 		} while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
1875 
1876 		stats->rx_packets += rx_packets;
1877 		stats->rx_bytes   += rx_bytes;
1878 		stats->tx_packets += tx_packets;
1879 		stats->tx_bytes   += tx_bytes;
1880 	}
1881 
1882 	stats->rx_errors	= dev->stats.rx_errors;
1883 	stats->rx_dropped	= dev->stats.rx_dropped;
1884 	stats->tx_dropped	= dev->stats.tx_dropped;
1885 }
1886 
1887 static int am65_cpsw_xdp_prog_setup(struct net_device *ndev,
1888 				    struct bpf_prog *prog)
1889 {
1890 	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1891 	bool running = netif_running(ndev);
1892 	struct bpf_prog *old_prog;
1893 
1894 	if (running)
1895 		am65_cpsw_nuss_ndo_slave_stop(ndev);
1896 
1897 	old_prog = xchg(&port->xdp_prog, prog);
1898 	if (old_prog)
1899 		bpf_prog_put(old_prog);
1900 
1901 	if (running)
1902 		return am65_cpsw_nuss_ndo_slave_open(ndev);
1903 
1904 	return 0;
1905 }
1906 
1907 static int am65_cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
1908 {
1909 	switch (bpf->command) {
1910 	case XDP_SETUP_PROG:
1911 		return am65_cpsw_xdp_prog_setup(ndev, bpf->prog);
1912 	default:
1913 		return -EINVAL;
1914 	}
1915 }
1916 
1917 static int am65_cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
1918 				  struct xdp_frame **frames, u32 flags)
1919 {
1920 	struct am65_cpsw_tx_chn *tx_chn;
1921 	struct netdev_queue *netif_txq;
1922 	int cpu = smp_processor_id();
1923 	int i, nxmit = 0;
1924 
1925 	tx_chn = &am65_ndev_to_common(ndev)->tx_chns[cpu % AM65_CPSW_MAX_TX_QUEUES];
1926 	netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
1927 
1928 	__netif_tx_lock(netif_txq, cpu);
1929 	for (i = 0; i < n; i++) {
1930 		if (am65_cpsw_xdp_tx_frame(ndev, tx_chn, frames[i],
1931 					   AM65_CPSW_TX_BUF_TYPE_XDP_NDO))
1932 			break;
1933 		nxmit++;
1934 	}
1935 	__netif_tx_unlock(netif_txq);
1936 
1937 	return nxmit;
1938 }
1939 
1940 static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
1941 	.ndo_open		= am65_cpsw_nuss_ndo_slave_open,
1942 	.ndo_stop		= am65_cpsw_nuss_ndo_slave_stop,
1943 	.ndo_start_xmit		= am65_cpsw_nuss_ndo_slave_xmit,
1944 	.ndo_set_rx_mode	= am65_cpsw_nuss_ndo_slave_set_rx_mode,
1945 	.ndo_get_stats64        = am65_cpsw_nuss_ndo_get_stats,
1946 	.ndo_validate_addr	= eth_validate_addr,
1947 	.ndo_set_mac_address	= am65_cpsw_nuss_ndo_slave_set_mac_address,
1948 	.ndo_tx_timeout		= am65_cpsw_nuss_ndo_host_tx_timeout,
1949 	.ndo_vlan_rx_add_vid	= am65_cpsw_nuss_ndo_slave_add_vid,
1950 	.ndo_vlan_rx_kill_vid	= am65_cpsw_nuss_ndo_slave_kill_vid,
1951 	.ndo_eth_ioctl		= am65_cpsw_nuss_ndo_slave_ioctl,
1952 	.ndo_setup_tc           = am65_cpsw_qos_ndo_setup_tc,
1953 	.ndo_set_tx_maxrate	= am65_cpsw_qos_ndo_tx_p0_set_maxrate,
1954 	.ndo_bpf		= am65_cpsw_ndo_bpf,
1955 	.ndo_xdp_xmit		= am65_cpsw_ndo_xdp_xmit,
1956 };
1957 
1958 static void am65_cpsw_disable_phy(struct phy *phy)
1959 {
1960 	phy_power_off(phy);
1961 	phy_exit(phy);
1962 }
1963 
1964 static int am65_cpsw_enable_phy(struct phy *phy)
1965 {
1966 	int ret;
1967 
1968 	ret = phy_init(phy);
1969 	if (ret < 0)
1970 		return ret;
1971 
1972 	ret = phy_power_on(phy);
1973 	if (ret < 0) {
1974 		phy_exit(phy);
1975 		return ret;
1976 	}
1977 
1978 	return 0;
1979 }
1980 
1981 static void am65_cpsw_disable_serdes_phy(struct am65_cpsw_common *common)
1982 {
1983 	struct am65_cpsw_port *port;
1984 	struct phy *phy;
1985 	int i;
1986 
1987 	for (i = 0; i < common->port_num; i++) {
1988 		port = &common->ports[i];
1989 		phy = port->slave.serdes_phy;
1990 		if (phy)
1991 			am65_cpsw_disable_phy(phy);
1992 	}
1993 }
1994 
1995 static int am65_cpsw_init_serdes_phy(struct device *dev, struct device_node *port_np,
1996 				     struct am65_cpsw_port *port)
1997 {
1998 	const char *name = "serdes";
1999 	struct phy *phy;
2000 	int ret;
2001 
2002 	phy = devm_of_phy_optional_get(dev, port_np, name);
2003 	if (IS_ERR_OR_NULL(phy))
2004 		return PTR_ERR_OR_ZERO(phy);
2005 
2006 	/* Serdes PHY exists. Store it. */
2007 	port->slave.serdes_phy = phy;
2008 
2009 	ret =  am65_cpsw_enable_phy(phy);
2010 	if (ret < 0)
2011 		goto err_phy;
2012 
2013 	return 0;
2014 
2015 err_phy:
2016 	devm_phy_put(dev, phy);
2017 	return ret;
2018 }
2019 
2020 static void am65_cpsw_nuss_mac_config(struct phylink_config *config, unsigned int mode,
2021 				      const struct phylink_link_state *state)
2022 {
2023 	struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data,
2024 							  phylink_config);
2025 	struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
2026 	struct am65_cpsw_common *common = port->common;
2027 
2028 	if (common->pdata.extra_modes & BIT(state->interface)) {
2029 		if (state->interface == PHY_INTERFACE_MODE_SGMII) {
2030 			writel(ADVERTISE_SGMII,
2031 			       port->sgmii_base + AM65_CPSW_SGMII_MR_ADV_ABILITY_REG);
2032 			cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_EXT_EN);
2033 		} else {
2034 			cpsw_sl_ctl_clr(port->slave.mac_sl, CPSW_SL_CTL_EXT_EN);
2035 		}
2036 
2037 		if (state->interface == PHY_INTERFACE_MODE_USXGMII) {
2038 			cpsw_sl_ctl_set(port->slave.mac_sl,
2039 					CPSW_SL_CTL_XGIG | CPSW_SL_CTL_XGMII_EN);
2040 		} else {
2041 			cpsw_sl_ctl_clr(port->slave.mac_sl,
2042 					CPSW_SL_CTL_XGIG | CPSW_SL_CTL_XGMII_EN);
2043 		}
2044 
2045 		writel(AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE,
2046 		       port->sgmii_base + AM65_CPSW_SGMII_CONTROL_REG);
2047 	}
2048 }
2049 
2050 static void am65_cpsw_nuss_mac_link_down(struct phylink_config *config, unsigned int mode,
2051 					 phy_interface_t interface)
2052 {
2053 	struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data,
2054 							  phylink_config);
2055 	struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
2056 	struct am65_cpsw_common *common = port->common;
2057 	struct net_device *ndev = port->ndev;
2058 	u32 mac_control;
2059 	int tmo;
2060 
2061 	/* disable forwarding */
2062 	cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
2063 
2064 	cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE);
2065 
2066 	tmo = cpsw_sl_wait_for_idle(port->slave.mac_sl, 100);
2067 	dev_dbg(common->dev, "down msc_sl %08x tmo %d\n",
2068 		cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_MACSTATUS), tmo);
2069 
2070 	/* All the bits that am65_cpsw_nuss_mac_link_up() can possibly set */
2071 	mac_control = CPSW_SL_CTL_GMII_EN | CPSW_SL_CTL_GIG | CPSW_SL_CTL_IFCTL_A |
2072 		      CPSW_SL_CTL_FULLDUPLEX | CPSW_SL_CTL_RX_FLOW_EN | CPSW_SL_CTL_TX_FLOW_EN;
2073 	/* If interface mode is RGMII, CPSW_SL_CTL_EXT_EN might have been set for 10 Mbps */
2074 	if (phy_interface_mode_is_rgmii(interface))
2075 		mac_control |= CPSW_SL_CTL_EXT_EN;
2076 	/* Only clear those bits that can be set by am65_cpsw_nuss_mac_link_up() */
2077 	cpsw_sl_ctl_clr(port->slave.mac_sl, mac_control);
2078 
2079 	am65_cpsw_qos_link_down(ndev);
2080 	netif_tx_stop_all_queues(ndev);
2081 }
2082 
2083 static void am65_cpsw_nuss_mac_link_up(struct phylink_config *config, struct phy_device *phy,
2084 				       unsigned int mode, phy_interface_t interface, int speed,
2085 				       int duplex, bool tx_pause, bool rx_pause)
2086 {
2087 	struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data,
2088 							  phylink_config);
2089 	struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
2090 	struct am65_cpsw_common *common = port->common;
2091 	u32 mac_control = CPSW_SL_CTL_GMII_EN;
2092 	struct net_device *ndev = port->ndev;
2093 
2094 	/* Bring the port out of idle state */
2095 	cpsw_sl_ctl_clr(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE);
2096 
2097 	if (speed == SPEED_1000)
2098 		mac_control |= CPSW_SL_CTL_GIG;
2099 	/* TODO: Verify whether in-band is necessary for 10 Mbps RGMII */
2100 	if (speed == SPEED_10 && phy_interface_mode_is_rgmii(interface))
2101 		/* Can be used with in band mode only */
2102 		mac_control |= CPSW_SL_CTL_EXT_EN;
2103 	if (speed == SPEED_100 && interface == PHY_INTERFACE_MODE_RMII)
2104 		mac_control |= CPSW_SL_CTL_IFCTL_A;
2105 	if (duplex)
2106 		mac_control |= CPSW_SL_CTL_FULLDUPLEX;
2107 
2108 	/* rx_pause/tx_pause */
2109 	if (rx_pause)
2110 		mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
2111 
2112 	if (tx_pause)
2113 		mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
2114 
2115 	cpsw_sl_ctl_set(port->slave.mac_sl, mac_control);
2116 
2117 	/* enable forwarding */
2118 	cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2119 
2120 	am65_cpsw_qos_link_up(ndev, speed);
2121 	netif_tx_wake_all_queues(ndev);
2122 }
2123 
2124 static const struct phylink_mac_ops am65_cpsw_phylink_mac_ops = {
2125 	.mac_config = am65_cpsw_nuss_mac_config,
2126 	.mac_link_down = am65_cpsw_nuss_mac_link_down,
2127 	.mac_link_up = am65_cpsw_nuss_mac_link_up,
2128 };
2129 
2130 static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port)
2131 {
2132 	struct am65_cpsw_common *common = port->common;
2133 
2134 	if (!port->disabled)
2135 		return;
2136 
2137 	cpsw_ale_control_set(common->ale, port->port_id,
2138 			     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
2139 
2140 	cpsw_sl_reset(port->slave.mac_sl, 100);
2141 	cpsw_sl_ctl_reset(port->slave.mac_sl);
2142 }
2143 
2144 static void am65_cpsw_nuss_free_tx_chns(void *data)
2145 {
2146 	struct am65_cpsw_common *common = data;
2147 	int i;
2148 
2149 	for (i = 0; i < common->tx_ch_num; i++) {
2150 		struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
2151 
2152 		if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
2153 			k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
2154 
2155 		if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
2156 			k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
2157 
2158 		memset(tx_chn, 0, sizeof(*tx_chn));
2159 	}
2160 }
2161 
2162 void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
2163 {
2164 	struct device *dev = common->dev;
2165 	int i;
2166 
2167 	devm_remove_action(dev, am65_cpsw_nuss_free_tx_chns, common);
2168 
2169 	common->tx_ch_rate_msk = 0;
2170 	for (i = 0; i < common->tx_ch_num; i++) {
2171 		struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
2172 
2173 		if (tx_chn->irq)
2174 			devm_free_irq(dev, tx_chn->irq, tx_chn);
2175 
2176 		netif_napi_del(&tx_chn->napi_tx);
2177 
2178 		if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
2179 			k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
2180 
2181 		if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
2182 			k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
2183 
2184 		memset(tx_chn, 0, sizeof(*tx_chn));
2185 	}
2186 }
2187 
2188 static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
2189 {
2190 	struct device *dev = common->dev;
2191 	int i, ret = 0;
2192 
2193 	for (i = 0; i < common->tx_ch_num; i++) {
2194 		struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
2195 
2196 		netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
2197 				  am65_cpsw_nuss_tx_poll);
2198 		hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
2199 		tx_chn->tx_hrtimer.function = &am65_cpsw_nuss_tx_timer_callback;
2200 
2201 		ret = devm_request_irq(dev, tx_chn->irq,
2202 				       am65_cpsw_nuss_tx_irq,
2203 				       IRQF_TRIGGER_HIGH,
2204 				       tx_chn->tx_chn_name, tx_chn);
2205 		if (ret) {
2206 			dev_err(dev, "failure requesting tx%u irq %u, %d\n",
2207 				tx_chn->id, tx_chn->irq, ret);
2208 			goto err;
2209 		}
2210 	}
2211 
2212 err:
2213 	return ret;
2214 }
2215 
2216 static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
2217 {
2218 	u32  max_desc_num = ALIGN(AM65_CPSW_MAX_TX_DESC, MAX_SKB_FRAGS);
2219 	struct k3_udma_glue_tx_channel_cfg tx_cfg = { 0 };
2220 	struct device *dev = common->dev;
2221 	struct k3_ring_cfg ring_cfg = {
2222 		.elm_size = K3_RINGACC_RING_ELSIZE_8,
2223 		.mode = K3_RINGACC_RING_MODE_RING,
2224 		.flags = 0
2225 	};
2226 	u32 hdesc_size, hdesc_size_out;
2227 	int i, ret = 0;
2228 
2229 	hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE,
2230 					   AM65_CPSW_NAV_SW_DATA_SIZE);
2231 
2232 	tx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE;
2233 	tx_cfg.tx_cfg = ring_cfg;
2234 	tx_cfg.txcq_cfg = ring_cfg;
2235 	tx_cfg.tx_cfg.size = max_desc_num;
2236 	tx_cfg.txcq_cfg.size = max_desc_num;
2237 
2238 	for (i = 0; i < common->tx_ch_num; i++) {
2239 		struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
2240 
2241 		snprintf(tx_chn->tx_chn_name,
2242 			 sizeof(tx_chn->tx_chn_name), "tx%d", i);
2243 
2244 		spin_lock_init(&tx_chn->lock);
2245 		tx_chn->common = common;
2246 		tx_chn->id = i;
2247 		tx_chn->descs_num = max_desc_num;
2248 
2249 		tx_chn->tx_chn =
2250 			k3_udma_glue_request_tx_chn(dev,
2251 						    tx_chn->tx_chn_name,
2252 						    &tx_cfg);
2253 		if (IS_ERR(tx_chn->tx_chn)) {
2254 			ret = dev_err_probe(dev, PTR_ERR(tx_chn->tx_chn),
2255 					    "Failed to request tx dma channel\n");
2256 			goto err;
2257 		}
2258 		tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn);
2259 
2260 		tx_chn->desc_pool = k3_cppi_desc_pool_create_name(tx_chn->dma_dev,
2261 								  tx_chn->descs_num,
2262 								  hdesc_size,
2263 								  tx_chn->tx_chn_name);
2264 		if (IS_ERR(tx_chn->desc_pool)) {
2265 			ret = PTR_ERR(tx_chn->desc_pool);
2266 			dev_err(dev, "Failed to create poll %d\n", ret);
2267 			goto err;
2268 		}
2269 
2270 		hdesc_size_out = k3_cppi_desc_pool_desc_size(tx_chn->desc_pool);
2271 		tx_chn->dsize_log2 = __fls(hdesc_size_out);
2272 		WARN_ON(hdesc_size_out != (1 << tx_chn->dsize_log2));
2273 
2274 		tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
2275 		if (tx_chn->irq < 0) {
2276 			dev_err(dev, "Failed to get tx dma irq %d\n",
2277 				tx_chn->irq);
2278 			ret = tx_chn->irq;
2279 			goto err;
2280 		}
2281 
2282 		snprintf(tx_chn->tx_chn_name,
2283 			 sizeof(tx_chn->tx_chn_name), "%s-tx%d",
2284 			 dev_name(dev), tx_chn->id);
2285 	}
2286 
2287 	ret = am65_cpsw_nuss_ndev_add_tx_napi(common);
2288 	if (ret) {
2289 		dev_err(dev, "Failed to add tx NAPI %d\n", ret);
2290 		goto err;
2291 	}
2292 
2293 err:
2294 	i = devm_add_action(dev, am65_cpsw_nuss_free_tx_chns, common);
2295 	if (i) {
2296 		dev_err(dev, "Failed to add free_tx_chns action %d\n", i);
2297 		return i;
2298 	}
2299 
2300 	return ret;
2301 }
2302 
2303 static void am65_cpsw_nuss_free_rx_chns(void *data)
2304 {
2305 	struct am65_cpsw_common *common = data;
2306 	struct am65_cpsw_rx_chn *rx_chn;
2307 
2308 	rx_chn = &common->rx_chns;
2309 
2310 	if (!IS_ERR_OR_NULL(rx_chn->desc_pool))
2311 		k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
2312 
2313 	if (!IS_ERR_OR_NULL(rx_chn->rx_chn))
2314 		k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
2315 }
2316 
2317 static void am65_cpsw_nuss_remove_rx_chns(void *data)
2318 {
2319 	struct am65_cpsw_common *common = data;
2320 	struct device *dev = common->dev;
2321 	struct am65_cpsw_rx_chn *rx_chn;
2322 
2323 	rx_chn = &common->rx_chns;
2324 	devm_remove_action(dev, am65_cpsw_nuss_free_rx_chns, common);
2325 
2326 	if (!(rx_chn->irq < 0))
2327 		devm_free_irq(dev, rx_chn->irq, common);
2328 
2329 	netif_napi_del(&common->napi_rx);
2330 
2331 	am65_cpsw_nuss_free_rx_chns(common);
2332 
2333 	common->rx_flow_id_base = -1;
2334 }
2335 
2336 static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
2337 {
2338 	struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
2339 	struct k3_udma_glue_rx_channel_cfg rx_cfg = { 0 };
2340 	u32  max_desc_num = AM65_CPSW_MAX_RX_DESC;
2341 	struct device *dev = common->dev;
2342 	u32 hdesc_size, hdesc_size_out;
2343 	u32 fdqring_id;
2344 	int i, ret = 0;
2345 
2346 	hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE,
2347 					   AM65_CPSW_NAV_SW_DATA_SIZE);
2348 
2349 	rx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE;
2350 	rx_cfg.flow_id_num = AM65_CPSW_MAX_RX_FLOWS;
2351 	rx_cfg.flow_id_base = common->rx_flow_id_base;
2352 
2353 	/* init all flows */
2354 	rx_chn->dev = dev;
2355 	rx_chn->descs_num = max_desc_num;
2356 
2357 	rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg);
2358 	if (IS_ERR(rx_chn->rx_chn)) {
2359 		ret = dev_err_probe(dev, PTR_ERR(rx_chn->rx_chn),
2360 				    "Failed to request rx dma channel\n");
2361 		goto err;
2362 	}
2363 	rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn);
2364 
2365 	rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev,
2366 							  rx_chn->descs_num,
2367 							  hdesc_size, "rx");
2368 	if (IS_ERR(rx_chn->desc_pool)) {
2369 		ret = PTR_ERR(rx_chn->desc_pool);
2370 		dev_err(dev, "Failed to create rx poll %d\n", ret);
2371 		goto err;
2372 	}
2373 
2374 	hdesc_size_out = k3_cppi_desc_pool_desc_size(rx_chn->desc_pool);
2375 	rx_chn->dsize_log2 = __fls(hdesc_size_out);
2376 	WARN_ON(hdesc_size_out != (1 << rx_chn->dsize_log2));
2377 
2378 	rx_chn->page_pool = NULL;
2379 
2380 	rx_chn->pages = devm_kcalloc(dev, rx_chn->descs_num,
2381 				     sizeof(*rx_chn->pages), GFP_KERNEL);
2382 	if (!rx_chn->pages)
2383 		return -ENOMEM;
2384 
2385 	common->rx_flow_id_base =
2386 			k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
2387 	dev_info(dev, "set new flow-id-base %u\n", common->rx_flow_id_base);
2388 
2389 	fdqring_id = K3_RINGACC_RING_ID_ANY;
2390 	for (i = 0; i < rx_cfg.flow_id_num; i++) {
2391 		struct k3_ring_cfg rxring_cfg = {
2392 			.elm_size = K3_RINGACC_RING_ELSIZE_8,
2393 			.mode = K3_RINGACC_RING_MODE_RING,
2394 			.flags = 0,
2395 		};
2396 		struct k3_ring_cfg fdqring_cfg = {
2397 			.elm_size = K3_RINGACC_RING_ELSIZE_8,
2398 			.flags = K3_RINGACC_RING_SHARED,
2399 		};
2400 		struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = {
2401 			.rx_cfg = rxring_cfg,
2402 			.rxfdq_cfg = fdqring_cfg,
2403 			.ring_rxq_id = K3_RINGACC_RING_ID_ANY,
2404 			.src_tag_lo_sel =
2405 				K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG,
2406 		};
2407 
2408 		rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
2409 		rx_flow_cfg.rx_cfg.size = max_desc_num;
2410 		rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
2411 		rx_flow_cfg.rxfdq_cfg.mode = common->pdata.fdqring_mode;
2412 
2413 		ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
2414 						i, &rx_flow_cfg);
2415 		if (ret) {
2416 			dev_err(dev, "Failed to init rx flow%d %d\n", i, ret);
2417 			goto err;
2418 		}
2419 		if (!i)
2420 			fdqring_id =
2421 				k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
2422 								i);
2423 
2424 		rx_chn->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
2425 
2426 		if (rx_chn->irq <= 0) {
2427 			dev_err(dev, "Failed to get rx dma irq %d\n",
2428 				rx_chn->irq);
2429 			ret = -ENXIO;
2430 			goto err;
2431 		}
2432 	}
2433 
2434 	netif_napi_add(common->dma_ndev, &common->napi_rx,
2435 		       am65_cpsw_nuss_rx_poll);
2436 	hrtimer_init(&common->rx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
2437 	common->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback;
2438 
2439 	ret = devm_request_irq(dev, rx_chn->irq,
2440 			       am65_cpsw_nuss_rx_irq,
2441 			       IRQF_TRIGGER_HIGH, dev_name(dev), common);
2442 	if (ret) {
2443 		dev_err(dev, "failure requesting rx irq %u, %d\n",
2444 			rx_chn->irq, ret);
2445 		goto err;
2446 	}
2447 
2448 err:
2449 	i = devm_add_action(dev, am65_cpsw_nuss_free_rx_chns, common);
2450 	if (i) {
2451 		dev_err(dev, "Failed to add free_rx_chns action %d\n", i);
2452 		return i;
2453 	}
2454 
2455 	return ret;
2456 }
2457 
2458 static int am65_cpsw_nuss_init_host_p(struct am65_cpsw_common *common)
2459 {
2460 	struct am65_cpsw_host *host_p = am65_common_get_host(common);
2461 
2462 	host_p->common = common;
2463 	host_p->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE;
2464 	host_p->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE;
2465 
2466 	return 0;
2467 }
2468 
2469 static int am65_cpsw_am654_get_efuse_macid(struct device_node *of_node,
2470 					   int slave, u8 *mac_addr)
2471 {
2472 	u32 mac_lo, mac_hi, offset;
2473 	struct regmap *syscon;
2474 	int ret;
2475 
2476 	syscon = syscon_regmap_lookup_by_phandle(of_node, "ti,syscon-efuse");
2477 	if (IS_ERR(syscon)) {
2478 		if (PTR_ERR(syscon) == -ENODEV)
2479 			return 0;
2480 		return PTR_ERR(syscon);
2481 	}
2482 
2483 	ret = of_property_read_u32_index(of_node, "ti,syscon-efuse", 1,
2484 					 &offset);
2485 	if (ret)
2486 		return ret;
2487 
2488 	regmap_read(syscon, offset, &mac_lo);
2489 	regmap_read(syscon, offset + 4, &mac_hi);
2490 
2491 	mac_addr[0] = (mac_hi >> 8) & 0xff;
2492 	mac_addr[1] = mac_hi & 0xff;
2493 	mac_addr[2] = (mac_lo >> 24) & 0xff;
2494 	mac_addr[3] = (mac_lo >> 16) & 0xff;
2495 	mac_addr[4] = (mac_lo >> 8) & 0xff;
2496 	mac_addr[5] = mac_lo & 0xff;
2497 
2498 	return 0;
2499 }
2500 
2501 static int am65_cpsw_init_cpts(struct am65_cpsw_common *common)
2502 {
2503 	struct device *dev = common->dev;
2504 	struct device_node *node;
2505 	struct am65_cpts *cpts;
2506 	void __iomem *reg_base;
2507 
2508 	if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
2509 		return 0;
2510 
2511 	node = of_get_child_by_name(dev->of_node, "cpts");
2512 	if (!node) {
2513 		dev_err(dev, "%s cpts not found\n", __func__);
2514 		return -ENOENT;
2515 	}
2516 
2517 	reg_base = common->cpsw_base + AM65_CPSW_NU_CPTS_BASE;
2518 	cpts = am65_cpts_create(dev, reg_base, node);
2519 	if (IS_ERR(cpts)) {
2520 		int ret = PTR_ERR(cpts);
2521 
2522 		of_node_put(node);
2523 		dev_err(dev, "cpts create err %d\n", ret);
2524 		return ret;
2525 	}
2526 	common->cpts = cpts;
2527 	/* Forbid PM runtime if CPTS is running.
2528 	 * K3 CPSWxG modules may completely lose context during ON->OFF
2529 	 * transitions depending on integration.
2530 	 * AM65x/J721E MCU CPSW2G: false
2531 	 * J721E MAIN_CPSW9G: true
2532 	 */
2533 	pm_runtime_forbid(dev);
2534 
2535 	return 0;
2536 }
2537 
2538 static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
2539 {
2540 	struct device_node *node, *port_np;
2541 	struct device *dev = common->dev;
2542 	int ret;
2543 
2544 	node = of_get_child_by_name(dev->of_node, "ethernet-ports");
2545 	if (!node)
2546 		return -ENOENT;
2547 
2548 	for_each_child_of_node(node, port_np) {
2549 		struct am65_cpsw_port *port;
2550 		u32 port_id;
2551 
2552 		/* it is not a slave port node, continue */
2553 		if (strcmp(port_np->name, "port"))
2554 			continue;
2555 
2556 		ret = of_property_read_u32(port_np, "reg", &port_id);
2557 		if (ret < 0) {
2558 			dev_err(dev, "%pOF error reading port_id %d\n",
2559 				port_np, ret);
2560 			goto of_node_put;
2561 		}
2562 
2563 		if (!port_id || port_id > common->port_num) {
2564 			dev_err(dev, "%pOF has invalid port_id %u %s\n",
2565 				port_np, port_id, port_np->name);
2566 			ret = -EINVAL;
2567 			goto of_node_put;
2568 		}
2569 
2570 		port = am65_common_get_port(common, port_id);
2571 		port->port_id = port_id;
2572 		port->common = common;
2573 		port->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE +
2574 				  AM65_CPSW_NU_PORTS_OFFSET * (port_id);
2575 		if (common->pdata.extra_modes)
2576 			port->sgmii_base = common->ss_base + AM65_CPSW_SGMII_BASE * (port_id);
2577 		port->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE +
2578 				  (AM65_CPSW_NU_STATS_PORT_OFFSET * port_id);
2579 		port->name = of_get_property(port_np, "label", NULL);
2580 		port->fetch_ram_base =
2581 				common->cpsw_base + AM65_CPSW_NU_FRAM_BASE +
2582 				(AM65_CPSW_NU_FRAM_PORT_OFFSET * (port_id - 1));
2583 
2584 		port->slave.mac_sl = cpsw_sl_get("am65", dev, port->port_base);
2585 		if (IS_ERR(port->slave.mac_sl)) {
2586 			ret = PTR_ERR(port->slave.mac_sl);
2587 			goto of_node_put;
2588 		}
2589 
2590 		port->disabled = !of_device_is_available(port_np);
2591 		if (port->disabled) {
2592 			common->disabled_ports_mask |= BIT(port->port_id);
2593 			continue;
2594 		}
2595 
2596 		port->slave.ifphy = devm_of_phy_get(dev, port_np, NULL);
2597 		if (IS_ERR(port->slave.ifphy)) {
2598 			ret = PTR_ERR(port->slave.ifphy);
2599 			dev_err(dev, "%pOF error retrieving port phy: %d\n",
2600 				port_np, ret);
2601 			goto of_node_put;
2602 		}
2603 
2604 		/* Initialize the Serdes PHY for the port */
2605 		ret = am65_cpsw_init_serdes_phy(dev, port_np, port);
2606 		if (ret)
2607 			goto of_node_put;
2608 
2609 		port->slave.mac_only =
2610 				of_property_read_bool(port_np, "ti,mac-only");
2611 
2612 		/* get phy/link info */
2613 		port->slave.phy_node = port_np;
2614 		ret = of_get_phy_mode(port_np, &port->slave.phy_if);
2615 		if (ret) {
2616 			dev_err(dev, "%pOF read phy-mode err %d\n",
2617 				port_np, ret);
2618 			goto of_node_put;
2619 		}
2620 
2621 		ret = phy_set_mode_ext(port->slave.ifphy, PHY_MODE_ETHERNET, port->slave.phy_if);
2622 		if (ret)
2623 			goto of_node_put;
2624 
2625 		ret = of_get_mac_address(port_np, port->slave.mac_addr);
2626 		if (ret) {
2627 			am65_cpsw_am654_get_efuse_macid(port_np,
2628 							port->port_id,
2629 							port->slave.mac_addr);
2630 			if (!is_valid_ether_addr(port->slave.mac_addr)) {
2631 				eth_random_addr(port->slave.mac_addr);
2632 				dev_err(dev, "Use random MAC address\n");
2633 			}
2634 		}
2635 
2636 		/* Reset all Queue priorities to 0 */
2637 		writel(0, port->port_base + AM65_CPSW_PN_REG_TX_PRI_MAP);
2638 	}
2639 	of_node_put(node);
2640 
2641 	/* is there at least one ext.port */
2642 	if (!(~common->disabled_ports_mask & GENMASK(common->port_num, 1))) {
2643 		dev_err(dev, "No Ext. port are available\n");
2644 		return -ENODEV;
2645 	}
2646 
2647 	return 0;
2648 
2649 of_node_put:
2650 	of_node_put(port_np);
2651 	of_node_put(node);
2652 	return ret;
2653 }
2654 
2655 static void am65_cpsw_pcpu_stats_free(void *data)
2656 {
2657 	struct am65_cpsw_ndev_stats __percpu *stats = data;
2658 
2659 	free_percpu(stats);
2660 }
2661 
2662 static void am65_cpsw_nuss_phylink_cleanup(struct am65_cpsw_common *common)
2663 {
2664 	struct am65_cpsw_port *port;
2665 	int i;
2666 
2667 	for (i = 0; i < common->port_num; i++) {
2668 		port = &common->ports[i];
2669 		if (port->slave.phylink)
2670 			phylink_destroy(port->slave.phylink);
2671 	}
2672 }
2673 
2674 static int
2675 am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
2676 {
2677 	struct am65_cpsw_ndev_priv *ndev_priv;
2678 	struct device *dev = common->dev;
2679 	struct am65_cpsw_port *port;
2680 	struct phylink *phylink;
2681 	int ret;
2682 
2683 	port = &common->ports[port_idx];
2684 
2685 	if (port->disabled)
2686 		return 0;
2687 
2688 	/* alloc netdev */
2689 	port->ndev = devm_alloc_etherdev_mqs(common->dev,
2690 					     sizeof(struct am65_cpsw_ndev_priv),
2691 					     AM65_CPSW_MAX_TX_QUEUES,
2692 					     AM65_CPSW_MAX_RX_QUEUES);
2693 	if (!port->ndev) {
2694 		dev_err(dev, "error allocating slave net_device %u\n",
2695 			port->port_id);
2696 		return -ENOMEM;
2697 	}
2698 
2699 	ndev_priv = netdev_priv(port->ndev);
2700 	ndev_priv->port = port;
2701 	ndev_priv->msg_enable = AM65_CPSW_DEBUG;
2702 	mutex_init(&ndev_priv->mm_lock);
2703 	port->qos.link_speed = SPEED_UNKNOWN;
2704 	SET_NETDEV_DEV(port->ndev, dev);
2705 
2706 	eth_hw_addr_set(port->ndev, port->slave.mac_addr);
2707 
2708 	port->ndev->min_mtu = AM65_CPSW_MIN_PACKET_SIZE;
2709 	port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE -
2710 			      (VLAN_ETH_HLEN + ETH_FCS_LEN);
2711 	port->ndev->hw_features = NETIF_F_SG |
2712 				  NETIF_F_RXCSUM |
2713 				  NETIF_F_HW_CSUM |
2714 				  NETIF_F_HW_TC;
2715 	port->ndev->features = port->ndev->hw_features |
2716 			       NETIF_F_HW_VLAN_CTAG_FILTER;
2717 	port->ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
2718 				   NETDEV_XDP_ACT_REDIRECT |
2719 				   NETDEV_XDP_ACT_NDO_XMIT;
2720 	port->ndev->vlan_features |=  NETIF_F_SG;
2721 	port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops;
2722 	port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave;
2723 
2724 	/* Configuring Phylink */
2725 	port->slave.phylink_config.dev = &port->ndev->dev;
2726 	port->slave.phylink_config.type = PHYLINK_NETDEV;
2727 	port->slave.phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
2728 						      MAC_1000FD | MAC_5000FD;
2729 	port->slave.phylink_config.mac_managed_pm = true; /* MAC does PM */
2730 
2731 	switch (port->slave.phy_if) {
2732 	case PHY_INTERFACE_MODE_RGMII:
2733 	case PHY_INTERFACE_MODE_RGMII_ID:
2734 	case PHY_INTERFACE_MODE_RGMII_RXID:
2735 	case PHY_INTERFACE_MODE_RGMII_TXID:
2736 		phy_interface_set_rgmii(port->slave.phylink_config.supported_interfaces);
2737 		break;
2738 
2739 	case PHY_INTERFACE_MODE_RMII:
2740 		__set_bit(PHY_INTERFACE_MODE_RMII,
2741 			  port->slave.phylink_config.supported_interfaces);
2742 		break;
2743 
2744 	case PHY_INTERFACE_MODE_QSGMII:
2745 	case PHY_INTERFACE_MODE_SGMII:
2746 	case PHY_INTERFACE_MODE_USXGMII:
2747 		if (common->pdata.extra_modes & BIT(port->slave.phy_if)) {
2748 			__set_bit(port->slave.phy_if,
2749 				  port->slave.phylink_config.supported_interfaces);
2750 		} else {
2751 			dev_err(dev, "selected phy-mode is not supported\n");
2752 			return -EOPNOTSUPP;
2753 		}
2754 		break;
2755 
2756 	default:
2757 		dev_err(dev, "selected phy-mode is not supported\n");
2758 		return -EOPNOTSUPP;
2759 	}
2760 
2761 	phylink = phylink_create(&port->slave.phylink_config,
2762 				 of_node_to_fwnode(port->slave.phy_node),
2763 				 port->slave.phy_if,
2764 				 &am65_cpsw_phylink_mac_ops);
2765 	if (IS_ERR(phylink))
2766 		return PTR_ERR(phylink);
2767 
2768 	port->slave.phylink = phylink;
2769 
2770 	/* Disable TX checksum offload by default due to HW bug */
2771 	if (common->pdata.quirks & AM65_CPSW_QUIRK_I2027_NO_TX_CSUM)
2772 		port->ndev->features &= ~NETIF_F_HW_CSUM;
2773 
2774 	ndev_priv->stats = netdev_alloc_pcpu_stats(struct am65_cpsw_ndev_stats);
2775 	if (!ndev_priv->stats)
2776 		return -ENOMEM;
2777 
2778 	ret = devm_add_action_or_reset(dev, am65_cpsw_pcpu_stats_free,
2779 				       ndev_priv->stats);
2780 	if (ret)
2781 		dev_err(dev, "failed to add percpu stat free action %d\n", ret);
2782 
2783 	port->xdp_prog = NULL;
2784 
2785 	if (!common->dma_ndev)
2786 		common->dma_ndev = port->ndev;
2787 
2788 	return ret;
2789 }
2790 
2791 static int am65_cpsw_nuss_init_ndevs(struct am65_cpsw_common *common)
2792 {
2793 	int ret;
2794 	int i;
2795 
2796 	for (i = 0; i < common->port_num; i++) {
2797 		ret = am65_cpsw_nuss_init_port_ndev(common, i);
2798 		if (ret)
2799 			return ret;
2800 	}
2801 
2802 	return ret;
2803 }
2804 
2805 static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common)
2806 {
2807 	struct am65_cpsw_port *port;
2808 	int i;
2809 
2810 	for (i = 0; i < common->port_num; i++) {
2811 		port = &common->ports[i];
2812 		if (port->ndev && port->ndev->reg_state == NETREG_REGISTERED)
2813 			unregister_netdev(port->ndev);
2814 	}
2815 }
2816 
2817 static void am65_cpsw_port_offload_fwd_mark_update(struct am65_cpsw_common *common)
2818 {
2819 	int set_val = 0;
2820 	int i;
2821 
2822 	if (common->br_members == (GENMASK(common->port_num, 1) & ~common->disabled_ports_mask))
2823 		set_val = 1;
2824 
2825 	dev_dbg(common->dev, "set offload_fwd_mark %d\n", set_val);
2826 
2827 	for (i = 1; i <= common->port_num; i++) {
2828 		struct am65_cpsw_port *port = am65_common_get_port(common, i);
2829 		struct am65_cpsw_ndev_priv *priv;
2830 
2831 		if (!port->ndev)
2832 			continue;
2833 
2834 		priv = am65_ndev_to_priv(port->ndev);
2835 		priv->offload_fwd_mark = set_val;
2836 	}
2837 }
2838 
2839 bool am65_cpsw_port_dev_check(const struct net_device *ndev)
2840 {
2841 	if (ndev->netdev_ops == &am65_cpsw_nuss_netdev_ops) {
2842 		struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
2843 
2844 		return !common->is_emac_mode;
2845 	}
2846 
2847 	return false;
2848 }
2849 
2850 static int am65_cpsw_netdevice_port_link(struct net_device *ndev,
2851 					 struct net_device *br_ndev,
2852 					 struct netlink_ext_ack *extack)
2853 {
2854 	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
2855 	struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
2856 	int err;
2857 
2858 	if (!common->br_members) {
2859 		common->hw_bridge_dev = br_ndev;
2860 	} else {
2861 		/* This is adding the port to a second bridge, this is
2862 		 * unsupported
2863 		 */
2864 		if (common->hw_bridge_dev != br_ndev)
2865 			return -EOPNOTSUPP;
2866 	}
2867 
2868 	err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL,
2869 					    false, extack);
2870 	if (err)
2871 		return err;
2872 
2873 	common->br_members |= BIT(priv->port->port_id);
2874 
2875 	am65_cpsw_port_offload_fwd_mark_update(common);
2876 
2877 	return NOTIFY_DONE;
2878 }
2879 
2880 static void am65_cpsw_netdevice_port_unlink(struct net_device *ndev)
2881 {
2882 	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
2883 	struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
2884 
2885 	switchdev_bridge_port_unoffload(ndev, NULL, NULL, NULL);
2886 
2887 	common->br_members &= ~BIT(priv->port->port_id);
2888 
2889 	am65_cpsw_port_offload_fwd_mark_update(common);
2890 
2891 	if (!common->br_members)
2892 		common->hw_bridge_dev = NULL;
2893 }
2894 
2895 /* netdev notifier */
2896 static int am65_cpsw_netdevice_event(struct notifier_block *unused,
2897 				     unsigned long event, void *ptr)
2898 {
2899 	struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
2900 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
2901 	struct netdev_notifier_changeupper_info *info;
2902 	int ret = NOTIFY_DONE;
2903 
2904 	if (!am65_cpsw_port_dev_check(ndev))
2905 		return NOTIFY_DONE;
2906 
2907 	switch (event) {
2908 	case NETDEV_CHANGEUPPER:
2909 		info = ptr;
2910 
2911 		if (netif_is_bridge_master(info->upper_dev)) {
2912 			if (info->linking)
2913 				ret = am65_cpsw_netdevice_port_link(ndev,
2914 								    info->upper_dev,
2915 								    extack);
2916 			else
2917 				am65_cpsw_netdevice_port_unlink(ndev);
2918 		}
2919 		break;
2920 	default:
2921 		return NOTIFY_DONE;
2922 	}
2923 
2924 	return notifier_from_errno(ret);
2925 }
2926 
2927 static int am65_cpsw_register_notifiers(struct am65_cpsw_common *cpsw)
2928 {
2929 	int ret = 0;
2930 
2931 	if (AM65_CPSW_IS_CPSW2G(cpsw) ||
2932 	    !IS_REACHABLE(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV))
2933 		return 0;
2934 
2935 	cpsw->am65_cpsw_netdevice_nb.notifier_call = &am65_cpsw_netdevice_event;
2936 	ret = register_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb);
2937 	if (ret) {
2938 		dev_err(cpsw->dev, "can't register netdevice notifier\n");
2939 		return ret;
2940 	}
2941 
2942 	ret = am65_cpsw_switchdev_register_notifiers(cpsw);
2943 	if (ret)
2944 		unregister_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb);
2945 
2946 	return ret;
2947 }
2948 
2949 static void am65_cpsw_unregister_notifiers(struct am65_cpsw_common *cpsw)
2950 {
2951 	if (AM65_CPSW_IS_CPSW2G(cpsw) ||
2952 	    !IS_REACHABLE(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV))
2953 		return;
2954 
2955 	am65_cpsw_switchdev_unregister_notifiers(cpsw);
2956 	unregister_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb);
2957 }
2958 
2959 static const struct devlink_ops am65_cpsw_devlink_ops = {};
2960 
2961 static void am65_cpsw_init_stp_ale_entry(struct am65_cpsw_common *cpsw)
2962 {
2963 	cpsw_ale_add_mcast(cpsw->ale, eth_stp_addr, ALE_PORT_HOST, ALE_SUPER, 0,
2964 			   ALE_MCAST_BLOCK_LEARN_FWD);
2965 }
2966 
2967 static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common)
2968 {
2969 	struct am65_cpsw_host *host = am65_common_get_host(common);
2970 
2971 	writel(common->default_vlan, host->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
2972 
2973 	am65_cpsw_init_stp_ale_entry(common);
2974 
2975 	cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 1);
2976 	dev_dbg(common->dev, "Set P0_UNI_FLOOD\n");
2977 	cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 0);
2978 }
2979 
2980 static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common)
2981 {
2982 	struct am65_cpsw_host *host = am65_common_get_host(common);
2983 
2984 	writel(0, host->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
2985 
2986 	cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 0);
2987 	dev_dbg(common->dev, "unset P0_UNI_FLOOD\n");
2988 
2989 	/* learning make no sense in multi-mac mode */
2990 	cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 1);
2991 }
2992 
2993 static int am65_cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
2994 					struct devlink_param_gset_ctx *ctx)
2995 {
2996 	struct am65_cpsw_devlink *dl_priv = devlink_priv(dl);
2997 	struct am65_cpsw_common *common = dl_priv->common;
2998 
2999 	dev_dbg(common->dev, "%s id:%u\n", __func__, id);
3000 
3001 	if (id != AM65_CPSW_DL_PARAM_SWITCH_MODE)
3002 		return -EOPNOTSUPP;
3003 
3004 	ctx->val.vbool = !common->is_emac_mode;
3005 
3006 	return 0;
3007 }
3008 
3009 static void am65_cpsw_init_port_emac_ale(struct  am65_cpsw_port *port)
3010 {
3011 	struct am65_cpsw_slave_data *slave = &port->slave;
3012 	struct am65_cpsw_common *common = port->common;
3013 	u32 port_mask;
3014 
3015 	writel(slave->port_vlan, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
3016 
3017 	if (slave->mac_only)
3018 		/* enable mac-only mode on port */
3019 		cpsw_ale_control_set(common->ale, port->port_id,
3020 				     ALE_PORT_MACONLY, 1);
3021 
3022 	cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_NOLEARN, 1);
3023 
3024 	port_mask = BIT(port->port_id) | ALE_PORT_HOST;
3025 
3026 	cpsw_ale_add_ucast(common->ale, port->ndev->dev_addr,
3027 			   HOST_PORT_NUM, ALE_SECURE, slave->port_vlan);
3028 	cpsw_ale_add_mcast(common->ale, port->ndev->broadcast,
3029 			   port_mask, ALE_VLAN, slave->port_vlan, ALE_MCAST_FWD_2);
3030 }
3031 
3032 static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port)
3033 {
3034 	struct am65_cpsw_slave_data *slave = &port->slave;
3035 	struct am65_cpsw_common *cpsw = port->common;
3036 	u32 port_mask;
3037 
3038 	cpsw_ale_control_set(cpsw->ale, port->port_id,
3039 			     ALE_PORT_NOLEARN, 0);
3040 
3041 	cpsw_ale_add_ucast(cpsw->ale, port->ndev->dev_addr,
3042 			   HOST_PORT_NUM, ALE_SECURE | ALE_BLOCKED | ALE_VLAN,
3043 			   slave->port_vlan);
3044 
3045 	port_mask = BIT(port->port_id) | ALE_PORT_HOST;
3046 
3047 	cpsw_ale_add_mcast(cpsw->ale, port->ndev->broadcast,
3048 			   port_mask, ALE_VLAN, slave->port_vlan,
3049 			   ALE_MCAST_FWD_2);
3050 
3051 	writel(slave->port_vlan, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
3052 
3053 	cpsw_ale_control_set(cpsw->ale, port->port_id,
3054 			     ALE_PORT_MACONLY, 0);
3055 }
3056 
3057 static int am65_cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
3058 					struct devlink_param_gset_ctx *ctx)
3059 {
3060 	struct am65_cpsw_devlink *dl_priv = devlink_priv(dl);
3061 	struct am65_cpsw_common *cpsw = dl_priv->common;
3062 	bool switch_en = ctx->val.vbool;
3063 	bool if_running = false;
3064 	int i;
3065 
3066 	dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
3067 
3068 	if (id != AM65_CPSW_DL_PARAM_SWITCH_MODE)
3069 		return -EOPNOTSUPP;
3070 
3071 	if (switch_en == !cpsw->is_emac_mode)
3072 		return 0;
3073 
3074 	if (!switch_en && cpsw->br_members) {
3075 		dev_err(cpsw->dev, "Remove ports from bridge before disabling switch mode\n");
3076 		return -EINVAL;
3077 	}
3078 
3079 	rtnl_lock();
3080 
3081 	cpsw->is_emac_mode = !switch_en;
3082 
3083 	for (i = 0; i < cpsw->port_num; i++) {
3084 		struct net_device *sl_ndev = cpsw->ports[i].ndev;
3085 
3086 		if (!sl_ndev || !netif_running(sl_ndev))
3087 			continue;
3088 
3089 		if_running = true;
3090 	}
3091 
3092 	if (!if_running) {
3093 		/* all ndevs are down */
3094 		for (i = 0; i < cpsw->port_num; i++) {
3095 			struct net_device *sl_ndev = cpsw->ports[i].ndev;
3096 			struct am65_cpsw_slave_data *slave;
3097 
3098 			if (!sl_ndev)
3099 				continue;
3100 
3101 			slave = am65_ndev_to_slave(sl_ndev);
3102 			if (switch_en)
3103 				slave->port_vlan = cpsw->default_vlan;
3104 			else
3105 				slave->port_vlan = 0;
3106 		}
3107 
3108 		goto exit;
3109 	}
3110 
3111 	cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
3112 	/* clean up ALE table */
3113 	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_CLEAR, 1);
3114 	cpsw_ale_control_get(cpsw->ale, HOST_PORT_NUM, ALE_AGEOUT);
3115 
3116 	if (switch_en) {
3117 		dev_info(cpsw->dev, "Enable switch mode\n");
3118 
3119 		am65_cpsw_init_host_port_switch(cpsw);
3120 
3121 		for (i = 0; i < cpsw->port_num; i++) {
3122 			struct net_device *sl_ndev = cpsw->ports[i].ndev;
3123 			struct am65_cpsw_slave_data *slave;
3124 			struct am65_cpsw_port *port;
3125 
3126 			if (!sl_ndev)
3127 				continue;
3128 
3129 			port = am65_ndev_to_port(sl_ndev);
3130 			slave = am65_ndev_to_slave(sl_ndev);
3131 			slave->port_vlan = cpsw->default_vlan;
3132 
3133 			if (netif_running(sl_ndev))
3134 				am65_cpsw_init_port_switch_ale(port);
3135 		}
3136 
3137 	} else {
3138 		dev_info(cpsw->dev, "Disable switch mode\n");
3139 
3140 		am65_cpsw_init_host_port_emac(cpsw);
3141 
3142 		for (i = 0; i < cpsw->port_num; i++) {
3143 			struct net_device *sl_ndev = cpsw->ports[i].ndev;
3144 			struct am65_cpsw_port *port;
3145 
3146 			if (!sl_ndev)
3147 				continue;
3148 
3149 			port = am65_ndev_to_port(sl_ndev);
3150 			port->slave.port_vlan = 0;
3151 			if (netif_running(sl_ndev))
3152 				am65_cpsw_init_port_emac_ale(port);
3153 		}
3154 	}
3155 	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_BYPASS, 0);
3156 exit:
3157 	rtnl_unlock();
3158 
3159 	return 0;
3160 }
3161 
3162 static const struct devlink_param am65_cpsw_devlink_params[] = {
3163 	DEVLINK_PARAM_DRIVER(AM65_CPSW_DL_PARAM_SWITCH_MODE, "switch_mode",
3164 			     DEVLINK_PARAM_TYPE_BOOL,
3165 			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
3166 			     am65_cpsw_dl_switch_mode_get,
3167 			     am65_cpsw_dl_switch_mode_set, NULL),
3168 };
3169 
3170 static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
3171 {
3172 	struct devlink_port_attrs attrs = {};
3173 	struct am65_cpsw_devlink *dl_priv;
3174 	struct device *dev = common->dev;
3175 	struct devlink_port *dl_port;
3176 	struct am65_cpsw_port *port;
3177 	int ret = 0;
3178 	int i;
3179 
3180 	common->devlink =
3181 		devlink_alloc(&am65_cpsw_devlink_ops, sizeof(*dl_priv), dev);
3182 	if (!common->devlink)
3183 		return -ENOMEM;
3184 
3185 	dl_priv = devlink_priv(common->devlink);
3186 	dl_priv->common = common;
3187 
3188 	/* Provide devlink hook to switch mode when multiple external ports
3189 	 * are present NUSS switchdev driver is enabled.
3190 	 */
3191 	if (!AM65_CPSW_IS_CPSW2G(common) &&
3192 	    IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) {
3193 		ret = devlink_params_register(common->devlink,
3194 					      am65_cpsw_devlink_params,
3195 					      ARRAY_SIZE(am65_cpsw_devlink_params));
3196 		if (ret) {
3197 			dev_err(dev, "devlink params reg fail ret:%d\n", ret);
3198 			goto dl_unreg;
3199 		}
3200 	}
3201 
3202 	for (i = 1; i <= common->port_num; i++) {
3203 		port = am65_common_get_port(common, i);
3204 		dl_port = &port->devlink_port;
3205 
3206 		if (port->ndev)
3207 			attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
3208 		else
3209 			attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
3210 		attrs.phys.port_number = port->port_id;
3211 		attrs.switch_id.id_len = sizeof(resource_size_t);
3212 		memcpy(attrs.switch_id.id, common->switch_id, attrs.switch_id.id_len);
3213 		devlink_port_attrs_set(dl_port, &attrs);
3214 
3215 		ret = devlink_port_register(common->devlink, dl_port, port->port_id);
3216 		if (ret) {
3217 			dev_err(dev, "devlink_port reg fail for port %d, ret:%d\n",
3218 				port->port_id, ret);
3219 			goto dl_port_unreg;
3220 		}
3221 	}
3222 	devlink_register(common->devlink);
3223 	return ret;
3224 
3225 dl_port_unreg:
3226 	for (i = i - 1; i >= 1; i--) {
3227 		port = am65_common_get_port(common, i);
3228 		dl_port = &port->devlink_port;
3229 
3230 		devlink_port_unregister(dl_port);
3231 	}
3232 dl_unreg:
3233 	devlink_free(common->devlink);
3234 	return ret;
3235 }
3236 
3237 static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
3238 {
3239 	struct devlink_port *dl_port;
3240 	struct am65_cpsw_port *port;
3241 	int i;
3242 
3243 	devlink_unregister(common->devlink);
3244 
3245 	for (i = 1; i <= common->port_num; i++) {
3246 		port = am65_common_get_port(common, i);
3247 		dl_port = &port->devlink_port;
3248 
3249 		devlink_port_unregister(dl_port);
3250 	}
3251 
3252 	if (!AM65_CPSW_IS_CPSW2G(common) &&
3253 	    IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV))
3254 		devlink_params_unregister(common->devlink,
3255 					  am65_cpsw_devlink_params,
3256 					  ARRAY_SIZE(am65_cpsw_devlink_params));
3257 
3258 	devlink_free(common->devlink);
3259 }
3260 
3261 static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
3262 {
3263 	struct am65_cpsw_rx_chn *rx_chan = &common->rx_chns;
3264 	struct am65_cpsw_tx_chn *tx_chan = common->tx_chns;
3265 	struct device *dev = common->dev;
3266 	struct am65_cpsw_port *port;
3267 	int ret = 0, i;
3268 
3269 	/* init tx channels */
3270 	ret = am65_cpsw_nuss_init_tx_chns(common);
3271 	if (ret)
3272 		return ret;
3273 	ret = am65_cpsw_nuss_init_rx_chns(common);
3274 	if (ret)
3275 		return ret;
3276 
3277 	/* The DMA Channels are not guaranteed to be in a clean state.
3278 	 * Reset and disable them to ensure that they are back to the
3279 	 * clean state and ready to be used.
3280 	 */
3281 	for (i = 0; i < common->tx_ch_num; i++) {
3282 		k3_udma_glue_reset_tx_chn(tx_chan[i].tx_chn, &tx_chan[i],
3283 					  am65_cpsw_nuss_tx_cleanup);
3284 		k3_udma_glue_disable_tx_chn(tx_chan[i].tx_chn);
3285 	}
3286 
3287 	for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
3288 		k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i, rx_chan,
3289 					  am65_cpsw_nuss_rx_cleanup, !!i);
3290 
3291 	k3_udma_glue_disable_rx_chn(rx_chan->rx_chn);
3292 
3293 	ret = am65_cpsw_nuss_register_devlink(common);
3294 	if (ret)
3295 		return ret;
3296 
3297 	for (i = 0; i < common->port_num; i++) {
3298 		port = &common->ports[i];
3299 
3300 		if (!port->ndev)
3301 			continue;
3302 
3303 		SET_NETDEV_DEVLINK_PORT(port->ndev, &port->devlink_port);
3304 
3305 		ret = register_netdev(port->ndev);
3306 		if (ret) {
3307 			dev_err(dev, "error registering slave net device%i %d\n",
3308 				i, ret);
3309 			goto err_cleanup_ndev;
3310 		}
3311 	}
3312 
3313 	ret = am65_cpsw_register_notifiers(common);
3314 	if (ret)
3315 		goto err_cleanup_ndev;
3316 
3317 	/* can't auto unregister ndev using devm_add_action() due to
3318 	 * devres release sequence in DD core for DMA
3319 	 */
3320 
3321 	return 0;
3322 
3323 err_cleanup_ndev:
3324 	am65_cpsw_nuss_cleanup_ndev(common);
3325 	am65_cpsw_unregister_devlink(common);
3326 
3327 	return ret;
3328 }
3329 
3330 int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx)
3331 {
3332 	int ret;
3333 
3334 	common->tx_ch_num = num_tx;
3335 	ret = am65_cpsw_nuss_init_tx_chns(common);
3336 
3337 	return ret;
3338 }
3339 
3340 struct am65_cpsw_soc_pdata {
3341 	u32	quirks_dis;
3342 };
3343 
3344 static const struct am65_cpsw_soc_pdata am65x_soc_sr2_0 = {
3345 	.quirks_dis = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM,
3346 };
3347 
3348 static const struct soc_device_attribute am65_cpsw_socinfo[] = {
3349 	{ .family = "AM65X",
3350 	  .revision = "SR2.0",
3351 	  .data = &am65x_soc_sr2_0
3352 	},
3353 	{/* sentinel */}
3354 };
3355 
3356 static const struct am65_cpsw_pdata am65x_sr1_0 = {
3357 	.quirks = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM,
3358 	.ale_dev_id = "am65x-cpsw2g",
3359 	.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
3360 };
3361 
3362 static const struct am65_cpsw_pdata j721e_pdata = {
3363 	.quirks = 0,
3364 	.ale_dev_id = "am65x-cpsw2g",
3365 	.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
3366 };
3367 
3368 static const struct am65_cpsw_pdata am64x_cpswxg_pdata = {
3369 	.quirks = AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ,
3370 	.ale_dev_id = "am64-cpswxg",
3371 	.fdqring_mode = K3_RINGACC_RING_MODE_RING,
3372 };
3373 
3374 static const struct am65_cpsw_pdata j7200_cpswxg_pdata = {
3375 	.quirks = 0,
3376 	.ale_dev_id = "am64-cpswxg",
3377 	.fdqring_mode = K3_RINGACC_RING_MODE_RING,
3378 	.extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII),
3379 };
3380 
3381 static const struct am65_cpsw_pdata j721e_cpswxg_pdata = {
3382 	.quirks = 0,
3383 	.ale_dev_id = "am64-cpswxg",
3384 	.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
3385 	.extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII),
3386 };
3387 
3388 static const struct am65_cpsw_pdata j784s4_cpswxg_pdata = {
3389 	.quirks = 0,
3390 	.ale_dev_id = "am64-cpswxg",
3391 	.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
3392 	.extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_USXGMII),
3393 };
3394 
3395 static const struct of_device_id am65_cpsw_nuss_of_mtable[] = {
3396 	{ .compatible = "ti,am654-cpsw-nuss", .data = &am65x_sr1_0},
3397 	{ .compatible = "ti,j721e-cpsw-nuss", .data = &j721e_pdata},
3398 	{ .compatible = "ti,am642-cpsw-nuss", .data = &am64x_cpswxg_pdata},
3399 	{ .compatible = "ti,j7200-cpswxg-nuss", .data = &j7200_cpswxg_pdata},
3400 	{ .compatible = "ti,j721e-cpswxg-nuss", .data = &j721e_cpswxg_pdata},
3401 	{ .compatible = "ti,j784s4-cpswxg-nuss", .data = &j784s4_cpswxg_pdata},
3402 	{ /* sentinel */ },
3403 };
3404 MODULE_DEVICE_TABLE(of, am65_cpsw_nuss_of_mtable);
3405 
3406 static void am65_cpsw_nuss_apply_socinfo(struct am65_cpsw_common *common)
3407 {
3408 	const struct soc_device_attribute *soc;
3409 
3410 	soc = soc_device_match(am65_cpsw_socinfo);
3411 	if (soc && soc->data) {
3412 		const struct am65_cpsw_soc_pdata *socdata = soc->data;
3413 
3414 		/* disable quirks */
3415 		common->pdata.quirks &= ~socdata->quirks_dis;
3416 	}
3417 }
3418 
3419 static int am65_cpsw_nuss_probe(struct platform_device *pdev)
3420 {
3421 	struct cpsw_ale_params ale_params = { 0 };
3422 	const struct of_device_id *of_id;
3423 	struct device *dev = &pdev->dev;
3424 	struct am65_cpsw_common *common;
3425 	struct device_node *node;
3426 	struct resource *res;
3427 	struct clk *clk;
3428 	int ale_entries;
3429 	u64 id_temp;
3430 	int ret, i;
3431 
3432 	common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL);
3433 	if (!common)
3434 		return -ENOMEM;
3435 	common->dev = dev;
3436 
3437 	of_id = of_match_device(am65_cpsw_nuss_of_mtable, dev);
3438 	if (!of_id)
3439 		return -EINVAL;
3440 	common->pdata = *(const struct am65_cpsw_pdata *)of_id->data;
3441 
3442 	am65_cpsw_nuss_apply_socinfo(common);
3443 
3444 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpsw_nuss");
3445 	common->ss_base = devm_ioremap_resource(&pdev->dev, res);
3446 	if (IS_ERR(common->ss_base))
3447 		return PTR_ERR(common->ss_base);
3448 	common->cpsw_base = common->ss_base + AM65_CPSW_CPSW_NU_BASE;
3449 	/* Use device's physical base address as switch id */
3450 	id_temp = cpu_to_be64(res->start);
3451 	memcpy(common->switch_id, &id_temp, sizeof(res->start));
3452 
3453 	node = of_get_child_by_name(dev->of_node, "ethernet-ports");
3454 	if (!node)
3455 		return -ENOENT;
3456 	common->port_num = of_get_child_count(node);
3457 	of_node_put(node);
3458 	if (common->port_num < 1 || common->port_num > AM65_CPSW_MAX_PORTS)
3459 		return -ENOENT;
3460 
3461 	common->rx_flow_id_base = -1;
3462 	init_completion(&common->tdown_complete);
3463 	common->tx_ch_num = AM65_CPSW_DEFAULT_TX_CHNS;
3464 	common->pf_p0_rx_ptype_rrobin = false;
3465 	common->default_vlan = 1;
3466 
3467 	common->ports = devm_kcalloc(dev, common->port_num,
3468 				     sizeof(*common->ports),
3469 				     GFP_KERNEL);
3470 	if (!common->ports)
3471 		return -ENOMEM;
3472 
3473 	clk = devm_clk_get(dev, "fck");
3474 	if (IS_ERR(clk))
3475 		return dev_err_probe(dev, PTR_ERR(clk), "getting fck clock\n");
3476 	common->bus_freq = clk_get_rate(clk);
3477 
3478 	pm_runtime_enable(dev);
3479 	ret = pm_runtime_resume_and_get(dev);
3480 	if (ret < 0) {
3481 		pm_runtime_disable(dev);
3482 		return ret;
3483 	}
3484 
3485 	node = of_get_child_by_name(dev->of_node, "mdio");
3486 	if (!node) {
3487 		dev_warn(dev, "MDIO node not found\n");
3488 	} else if (of_device_is_available(node)) {
3489 		struct platform_device *mdio_pdev;
3490 
3491 		mdio_pdev = of_platform_device_create(node, NULL, dev);
3492 		if (!mdio_pdev) {
3493 			ret = -ENODEV;
3494 			goto err_pm_clear;
3495 		}
3496 
3497 		common->mdio_dev =  &mdio_pdev->dev;
3498 	}
3499 	of_node_put(node);
3500 
3501 	am65_cpsw_nuss_get_ver(common);
3502 
3503 	ret = am65_cpsw_nuss_init_host_p(common);
3504 	if (ret)
3505 		goto err_of_clear;
3506 
3507 	ret = am65_cpsw_nuss_init_slave_ports(common);
3508 	if (ret)
3509 		goto err_of_clear;
3510 
3511 	/* init common data */
3512 	ale_params.dev = dev;
3513 	ale_params.ale_ageout = AM65_CPSW_ALE_AGEOUT_DEFAULT;
3514 	ale_params.ale_ports = common->port_num + 1;
3515 	ale_params.ale_regs = common->cpsw_base + AM65_CPSW_NU_ALE_BASE;
3516 	ale_params.dev_id = common->pdata.ale_dev_id;
3517 	ale_params.bus_freq = common->bus_freq;
3518 
3519 	common->ale = cpsw_ale_create(&ale_params);
3520 	if (IS_ERR(common->ale)) {
3521 		dev_err(dev, "error initializing ale engine\n");
3522 		ret = PTR_ERR(common->ale);
3523 		goto err_of_clear;
3524 	}
3525 
3526 	ale_entries = common->ale->params.ale_entries;
3527 	common->ale_context = devm_kzalloc(dev,
3528 					   ale_entries * ALE_ENTRY_WORDS * sizeof(u32),
3529 					   GFP_KERNEL);
3530 	ret = am65_cpsw_init_cpts(common);
3531 	if (ret)
3532 		goto err_of_clear;
3533 
3534 	/* init ports */
3535 	for (i = 0; i < common->port_num; i++)
3536 		am65_cpsw_nuss_slave_disable_unused(&common->ports[i]);
3537 
3538 	dev_set_drvdata(dev, common);
3539 
3540 	common->is_emac_mode = true;
3541 
3542 	ret = am65_cpsw_nuss_init_ndevs(common);
3543 	if (ret)
3544 		goto err_free_phylink;
3545 
3546 	ret = am65_cpsw_nuss_register_ndevs(common);
3547 	if (ret)
3548 		goto err_free_phylink;
3549 
3550 	pm_runtime_put(dev);
3551 	return 0;
3552 
3553 err_free_phylink:
3554 	am65_cpsw_nuss_phylink_cleanup(common);
3555 	am65_cpts_release(common->cpts);
3556 err_of_clear:
3557 	if (common->mdio_dev)
3558 		of_platform_device_destroy(common->mdio_dev, NULL);
3559 err_pm_clear:
3560 	pm_runtime_put_sync(dev);
3561 	pm_runtime_disable(dev);
3562 	return ret;
3563 }
3564 
3565 static void am65_cpsw_nuss_remove(struct platform_device *pdev)
3566 {
3567 	struct device *dev = &pdev->dev;
3568 	struct am65_cpsw_common *common;
3569 	int ret;
3570 
3571 	common = dev_get_drvdata(dev);
3572 
3573 	ret = pm_runtime_resume_and_get(&pdev->dev);
3574 	if (ret < 0) {
3575 		/* Note, if this error path is taken, we're leaking some
3576 		 * resources.
3577 		 */
3578 		dev_err(&pdev->dev, "Failed to resume device (%pe)\n",
3579 			ERR_PTR(ret));
3580 		return;
3581 	}
3582 
3583 	am65_cpsw_unregister_devlink(common);
3584 	am65_cpsw_unregister_notifiers(common);
3585 
3586 	/* must unregister ndevs here because DD release_driver routine calls
3587 	 * dma_deconfigure(dev) before devres_release_all(dev)
3588 	 */
3589 	am65_cpsw_nuss_cleanup_ndev(common);
3590 	am65_cpsw_nuss_phylink_cleanup(common);
3591 	am65_cpts_release(common->cpts);
3592 	am65_cpsw_disable_serdes_phy(common);
3593 
3594 	if (common->mdio_dev)
3595 		of_platform_device_destroy(common->mdio_dev, NULL);
3596 
3597 	pm_runtime_put_sync(&pdev->dev);
3598 	pm_runtime_disable(&pdev->dev);
3599 }
3600 
3601 static int am65_cpsw_nuss_suspend(struct device *dev)
3602 {
3603 	struct am65_cpsw_common *common = dev_get_drvdata(dev);
3604 	struct am65_cpsw_host *host_p = am65_common_get_host(common);
3605 	struct am65_cpsw_port *port;
3606 	struct net_device *ndev;
3607 	int i, ret;
3608 
3609 	cpsw_ale_dump(common->ale, common->ale_context);
3610 	host_p->vid_context = readl(host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
3611 	for (i = 0; i < common->port_num; i++) {
3612 		port = &common->ports[i];
3613 		ndev = port->ndev;
3614 
3615 		if (!ndev)
3616 			continue;
3617 
3618 		port->vid_context = readl(port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
3619 		netif_device_detach(ndev);
3620 		if (netif_running(ndev)) {
3621 			rtnl_lock();
3622 			ret = am65_cpsw_nuss_ndo_slave_stop(ndev);
3623 			rtnl_unlock();
3624 			if (ret < 0) {
3625 				netdev_err(ndev, "failed to stop: %d", ret);
3626 				return ret;
3627 			}
3628 		}
3629 	}
3630 
3631 	am65_cpts_suspend(common->cpts);
3632 
3633 	am65_cpsw_nuss_remove_rx_chns(common);
3634 	am65_cpsw_nuss_remove_tx_chns(common);
3635 
3636 	return 0;
3637 }
3638 
3639 static int am65_cpsw_nuss_resume(struct device *dev)
3640 {
3641 	struct am65_cpsw_common *common = dev_get_drvdata(dev);
3642 	struct am65_cpsw_host *host_p = am65_common_get_host(common);
3643 	struct am65_cpsw_port *port;
3644 	struct net_device *ndev;
3645 	int i, ret;
3646 
3647 	ret = am65_cpsw_nuss_init_tx_chns(common);
3648 	if (ret)
3649 		return ret;
3650 	ret = am65_cpsw_nuss_init_rx_chns(common);
3651 	if (ret)
3652 		return ret;
3653 
3654 	/* If RX IRQ was disabled before suspend, keep it disabled */
3655 	if (common->rx_irq_disabled)
3656 		disable_irq(common->rx_chns.irq);
3657 
3658 	am65_cpts_resume(common->cpts);
3659 
3660 	for (i = 0; i < common->port_num; i++) {
3661 		port = &common->ports[i];
3662 		ndev = port->ndev;
3663 
3664 		if (!ndev)
3665 			continue;
3666 
3667 		if (netif_running(ndev)) {
3668 			rtnl_lock();
3669 			ret = am65_cpsw_nuss_ndo_slave_open(ndev);
3670 			rtnl_unlock();
3671 			if (ret < 0) {
3672 				netdev_err(ndev, "failed to start: %d", ret);
3673 				return ret;
3674 			}
3675 		}
3676 
3677 		netif_device_attach(ndev);
3678 		writel(port->vid_context, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
3679 	}
3680 
3681 	writel(host_p->vid_context, host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
3682 	cpsw_ale_restore(common->ale, common->ale_context);
3683 
3684 	return 0;
3685 }
3686 
3687 static const struct dev_pm_ops am65_cpsw_nuss_dev_pm_ops = {
3688 	SYSTEM_SLEEP_PM_OPS(am65_cpsw_nuss_suspend, am65_cpsw_nuss_resume)
3689 };
3690 
3691 static struct platform_driver am65_cpsw_nuss_driver = {
3692 	.driver = {
3693 		.name	 = AM65_CPSW_DRV_NAME,
3694 		.of_match_table = am65_cpsw_nuss_of_mtable,
3695 		.pm = &am65_cpsw_nuss_dev_pm_ops,
3696 	},
3697 	.probe = am65_cpsw_nuss_probe,
3698 	.remove_new = am65_cpsw_nuss_remove,
3699 };
3700 
3701 module_platform_driver(am65_cpsw_nuss_driver);
3702 
3703 MODULE_LICENSE("GPL v2");
3704 MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>");
3705 MODULE_DESCRIPTION("TI AM65 CPSW Ethernet driver");
3706