1 // SPDX-License-Identifier: GPL-2.0 2 /* Texas Instruments K3 AM65 Ethernet Switch SubSystem Driver 3 * 4 * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/ 5 * 6 */ 7 8 #include <linux/bpf_trace.h> 9 #include <linux/clk.h> 10 #include <linux/etherdevice.h> 11 #include <linux/if_vlan.h> 12 #include <linux/interrupt.h> 13 #include <linux/irqdomain.h> 14 #include <linux/kernel.h> 15 #include <linux/kmemleak.h> 16 #include <linux/module.h> 17 #include <linux/netdevice.h> 18 #include <linux/net_tstamp.h> 19 #include <linux/of.h> 20 #include <linux/of_mdio.h> 21 #include <linux/of_net.h> 22 #include <linux/of_device.h> 23 #include <linux/of_platform.h> 24 #include <linux/phylink.h> 25 #include <linux/phy/phy.h> 26 #include <linux/platform_device.h> 27 #include <linux/pm_runtime.h> 28 #include <linux/regmap.h> 29 #include <linux/rtnetlink.h> 30 #include <linux/mfd/syscon.h> 31 #include <linux/sys_soc.h> 32 #include <linux/dma/ti-cppi5.h> 33 #include <linux/dma/k3-udma-glue.h> 34 #include <net/page_pool/helpers.h> 35 #include <net/switchdev.h> 36 37 #include "cpsw_ale.h" 38 #include "cpsw_sl.h" 39 #include "am65-cpsw-nuss.h" 40 #include "am65-cpsw-switchdev.h" 41 #include "k3-cppi-desc-pool.h" 42 #include "am65-cpts.h" 43 44 #define AM65_CPSW_SS_BASE 0x0 45 #define AM65_CPSW_SGMII_BASE 0x100 46 #define AM65_CPSW_XGMII_BASE 0x2100 47 #define AM65_CPSW_CPSW_NU_BASE 0x20000 48 #define AM65_CPSW_NU_PORTS_BASE 0x1000 49 #define AM65_CPSW_NU_FRAM_BASE 0x12000 50 #define AM65_CPSW_NU_STATS_BASE 0x1a000 51 #define AM65_CPSW_NU_ALE_BASE 0x1e000 52 #define AM65_CPSW_NU_CPTS_BASE 0x1d000 53 54 #define AM65_CPSW_NU_PORTS_OFFSET 0x1000 55 #define AM65_CPSW_NU_STATS_PORT_OFFSET 0x200 56 #define AM65_CPSW_NU_FRAM_PORT_OFFSET 0x200 57 58 #define AM65_CPSW_MAX_PORTS 8 59 60 #define AM65_CPSW_MIN_PACKET_SIZE VLAN_ETH_ZLEN 61 #define AM65_CPSW_MAX_PACKET_SIZE 2024 62 63 #define AM65_CPSW_REG_CTL 0x004 64 #define AM65_CPSW_REG_STAT_PORT_EN 0x014 65 #define AM65_CPSW_REG_PTYPE 0x018 66 67 #define AM65_CPSW_P0_REG_CTL 0x004 68 #define AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET 0x008 69 70 #define AM65_CPSW_PORT_REG_PRI_CTL 0x01c 71 #define AM65_CPSW_PORT_REG_RX_PRI_MAP 0x020 72 #define AM65_CPSW_PORT_REG_RX_MAXLEN 0x024 73 74 #define AM65_CPSW_PORTN_REG_CTL 0x004 75 #define AM65_CPSW_PORTN_REG_DSCP_MAP 0x120 76 #define AM65_CPSW_PORTN_REG_SA_L 0x308 77 #define AM65_CPSW_PORTN_REG_SA_H 0x30c 78 #define AM65_CPSW_PORTN_REG_TS_CTL 0x310 79 #define AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG 0x314 80 #define AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG 0x318 81 #define AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 0x31C 82 83 #define AM65_CPSW_SGMII_CONTROL_REG 0x010 84 #define AM65_CPSW_SGMII_MR_ADV_ABILITY_REG 0x018 85 #define AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE BIT(0) 86 87 #define AM65_CPSW_CTL_VLAN_AWARE BIT(1) 88 #define AM65_CPSW_CTL_P0_ENABLE BIT(2) 89 #define AM65_CPSW_CTL_P0_TX_CRC_REMOVE BIT(13) 90 #define AM65_CPSW_CTL_P0_RX_PAD BIT(14) 91 92 /* AM65_CPSW_P0_REG_CTL */ 93 #define AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN BIT(0) 94 #define AM65_CPSW_P0_REG_CTL_RX_REMAP_VLAN BIT(16) 95 96 /* AM65_CPSW_PORT_REG_PRI_CTL */ 97 #define AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN BIT(8) 98 99 /* AM65_CPSW_PN_REG_CTL */ 100 #define AM65_CPSW_PN_REG_CTL_DSCP_IPV4_EN BIT(1) 101 #define AM65_CPSW_PN_REG_CTL_DSCP_IPV6_EN BIT(2) 102 103 /* AM65_CPSW_PN_TS_CTL register fields */ 104 #define AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN BIT(4) 105 #define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN BIT(5) 106 #define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT2_EN BIT(6) 107 #define AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN BIT(7) 108 #define AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN BIT(10) 109 #define AM65_CPSW_PN_TS_CTL_TX_HOST_TS_EN BIT(11) 110 #define AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT 16 111 112 #define AM65_CPSW_PN_TS_CTL_RX_ANX_F_EN BIT(0) 113 #define AM65_CPSW_PN_TS_CTL_RX_VLAN_LT1_EN BIT(1) 114 #define AM65_CPSW_PN_TS_CTL_RX_VLAN_LT2_EN BIT(2) 115 #define AM65_CPSW_PN_TS_CTL_RX_ANX_D_EN BIT(3) 116 #define AM65_CPSW_PN_TS_CTL_RX_ANX_E_EN BIT(9) 117 118 /* AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG register fields */ 119 #define AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT 16 120 121 /* AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 */ 122 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107 BIT(16) 123 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129 BIT(17) 124 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130 BIT(18) 125 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131 BIT(19) 126 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132 BIT(20) 127 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319 BIT(21) 128 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320 BIT(22) 129 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO BIT(23) 130 131 /* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */ 132 #define AM65_CPSW_TS_EVENT_MSG_TYPE_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3)) 133 134 #define AM65_CPSW_TS_SEQ_ID_OFFSET (0x1e) 135 136 #define AM65_CPSW_TS_TX_ANX_ALL_EN \ 137 (AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN | \ 138 AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN | \ 139 AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN) 140 141 #define AM65_CPSW_TS_RX_ANX_ALL_EN \ 142 (AM65_CPSW_PN_TS_CTL_RX_ANX_D_EN | \ 143 AM65_CPSW_PN_TS_CTL_RX_ANX_E_EN | \ 144 AM65_CPSW_PN_TS_CTL_RX_ANX_F_EN) 145 146 #define AM65_CPSW_ALE_AGEOUT_DEFAULT 30 147 /* Number of TX/RX descriptors per channel/flow */ 148 #define AM65_CPSW_MAX_TX_DESC 500 149 #define AM65_CPSW_MAX_RX_DESC 500 150 151 #define AM65_CPSW_NAV_PS_DATA_SIZE 16 152 #define AM65_CPSW_NAV_SW_DATA_SIZE 16 153 154 #define AM65_CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK | \ 155 NETIF_MSG_IFUP | NETIF_MSG_PROBE | NETIF_MSG_IFDOWN | \ 156 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) 157 158 #define AM65_CPSW_DEFAULT_TX_CHNS 8 159 #define AM65_CPSW_DEFAULT_RX_CHN_FLOWS 1 160 161 /* CPPI streaming packet interface */ 162 #define AM65_CPSW_CPPI_TX_FLOW_ID 0x3FFF 163 #define AM65_CPSW_CPPI_TX_PKT_TYPE 0x7 164 165 /* XDP */ 166 #define AM65_CPSW_XDP_CONSUMED BIT(1) 167 #define AM65_CPSW_XDP_REDIRECT BIT(0) 168 #define AM65_CPSW_XDP_PASS 0 169 170 /* Include headroom compatible with both skb and xdpf */ 171 #define AM65_CPSW_HEADROOM_NA (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN) 172 #define AM65_CPSW_HEADROOM ALIGN(AM65_CPSW_HEADROOM_NA, sizeof(long)) 173 174 static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port *slave, 175 const u8 *dev_addr) 176 { 177 u32 mac_hi = (dev_addr[0] << 0) | (dev_addr[1] << 8) | 178 (dev_addr[2] << 16) | (dev_addr[3] << 24); 179 u32 mac_lo = (dev_addr[4] << 0) | (dev_addr[5] << 8); 180 181 writel(mac_hi, slave->port_base + AM65_CPSW_PORTN_REG_SA_H); 182 writel(mac_lo, slave->port_base + AM65_CPSW_PORTN_REG_SA_L); 183 } 184 185 #define AM65_CPSW_DSCP_MAX GENMASK(5, 0) 186 #define AM65_CPSW_PRI_MAX GENMASK(2, 0) 187 #define AM65_CPSW_DSCP_PRI_PER_REG 8 188 #define AM65_CPSW_DSCP_PRI_SIZE 4 /* in bits */ 189 static int am65_cpsw_port_set_dscp_map(struct am65_cpsw_port *slave, u8 dscp, u8 pri) 190 { 191 int reg_ofs; 192 int bit_ofs; 193 u32 val; 194 195 if (dscp > AM65_CPSW_DSCP_MAX) 196 return -EINVAL; 197 198 if (pri > AM65_CPSW_PRI_MAX) 199 return -EINVAL; 200 201 /* 32-bit register offset to this dscp */ 202 reg_ofs = (dscp / AM65_CPSW_DSCP_PRI_PER_REG) * 4; 203 /* bit field offset to this dscp */ 204 bit_ofs = AM65_CPSW_DSCP_PRI_SIZE * (dscp % AM65_CPSW_DSCP_PRI_PER_REG); 205 206 val = readl(slave->port_base + AM65_CPSW_PORTN_REG_DSCP_MAP + reg_ofs); 207 val &= ~(AM65_CPSW_PRI_MAX << bit_ofs); /* clear */ 208 val |= pri << bit_ofs; /* set */ 209 writel(val, slave->port_base + AM65_CPSW_PORTN_REG_DSCP_MAP + reg_ofs); 210 211 return 0; 212 } 213 214 static void am65_cpsw_port_enable_dscp_map(struct am65_cpsw_port *slave) 215 { 216 int dscp, pri; 217 u32 val; 218 219 /* Default DSCP to User Priority mapping as per: 220 * https://datatracker.ietf.org/doc/html/rfc8325#section-4.3 221 * and 222 * https://datatracker.ietf.org/doc/html/rfc8622#section-11 223 */ 224 for (dscp = 0; dscp <= AM65_CPSW_DSCP_MAX; dscp++) { 225 switch (dscp) { 226 case 56: /* CS7 */ 227 case 48: /* CS6 */ 228 pri = 7; 229 break; 230 case 46: /* EF */ 231 case 44: /* VA */ 232 pri = 6; 233 break; 234 case 40: /* CS5 */ 235 pri = 5; 236 break; 237 case 34: /* AF41 */ 238 case 36: /* AF42 */ 239 case 38: /* AF43 */ 240 case 32: /* CS4 */ 241 case 26: /* AF31 */ 242 case 28: /* AF32 */ 243 case 30: /* AF33 */ 244 case 24: /* CS3 */ 245 pri = 4; 246 break; 247 case 18: /* AF21 */ 248 case 20: /* AF22 */ 249 case 22: /* AF23 */ 250 pri = 3; 251 break; 252 case 16: /* CS2 */ 253 case 10: /* AF11 */ 254 case 12: /* AF12 */ 255 case 14: /* AF13 */ 256 case 0: /* DF */ 257 pri = 0; 258 break; 259 case 8: /* CS1 */ 260 case 1: /* LE */ 261 pri = 1; 262 break; 263 default: 264 pri = 0; 265 break; 266 } 267 268 am65_cpsw_port_set_dscp_map(slave, dscp, pri); 269 } 270 271 /* enable port IPV4 and IPV6 DSCP for this port */ 272 val = readl(slave->port_base + AM65_CPSW_PORTN_REG_CTL); 273 val |= AM65_CPSW_PN_REG_CTL_DSCP_IPV4_EN | 274 AM65_CPSW_PN_REG_CTL_DSCP_IPV6_EN; 275 writel(val, slave->port_base + AM65_CPSW_PORTN_REG_CTL); 276 } 277 278 static void am65_cpsw_sl_ctl_reset(struct am65_cpsw_port *port) 279 { 280 cpsw_sl_reset(port->slave.mac_sl, 100); 281 /* Max length register has to be restored after MAC SL reset */ 282 writel(AM65_CPSW_MAX_PACKET_SIZE, 283 port->port_base + AM65_CPSW_PORT_REG_RX_MAXLEN); 284 } 285 286 static void am65_cpsw_nuss_get_ver(struct am65_cpsw_common *common) 287 { 288 common->nuss_ver = readl(common->ss_base); 289 common->cpsw_ver = readl(common->cpsw_base); 290 dev_info(common->dev, 291 "initializing am65 cpsw nuss version 0x%08X, cpsw version 0x%08X Ports: %u quirks:%08x\n", 292 common->nuss_ver, 293 common->cpsw_ver, 294 common->port_num + 1, 295 common->pdata.quirks); 296 } 297 298 static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev, 299 __be16 proto, u16 vid) 300 { 301 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 302 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 303 u32 port_mask, unreg_mcast = 0; 304 int ret; 305 306 if (!common->is_emac_mode) 307 return 0; 308 309 if (!netif_running(ndev) || !vid) 310 return 0; 311 312 ret = pm_runtime_resume_and_get(common->dev); 313 if (ret < 0) 314 return ret; 315 316 port_mask = BIT(port->port_id) | ALE_PORT_HOST; 317 if (!vid) 318 unreg_mcast = port_mask; 319 dev_info(common->dev, "Adding vlan %d to vlan filter\n", vid); 320 ret = cpsw_ale_vlan_add_modify(common->ale, vid, port_mask, 321 unreg_mcast, port_mask, 0); 322 323 pm_runtime_put(common->dev); 324 return ret; 325 } 326 327 static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev, 328 __be16 proto, u16 vid) 329 { 330 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 331 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 332 int ret; 333 334 if (!common->is_emac_mode) 335 return 0; 336 337 if (!netif_running(ndev) || !vid) 338 return 0; 339 340 ret = pm_runtime_resume_and_get(common->dev); 341 if (ret < 0) 342 return ret; 343 344 dev_info(common->dev, "Removing vlan %d from vlan filter\n", vid); 345 ret = cpsw_ale_del_vlan(common->ale, vid, 346 BIT(port->port_id) | ALE_PORT_HOST); 347 348 pm_runtime_put(common->dev); 349 return ret; 350 } 351 352 static void am65_cpsw_slave_set_promisc(struct am65_cpsw_port *port, 353 bool promisc) 354 { 355 struct am65_cpsw_common *common = port->common; 356 357 if (promisc && !common->is_emac_mode) { 358 dev_dbg(common->dev, "promisc mode requested in switch mode"); 359 return; 360 } 361 362 if (promisc) { 363 /* Enable promiscuous mode */ 364 cpsw_ale_control_set(common->ale, port->port_id, 365 ALE_PORT_MACONLY_CAF, 1); 366 dev_dbg(common->dev, "promisc enabled\n"); 367 } else { 368 /* Disable promiscuous mode */ 369 cpsw_ale_control_set(common->ale, port->port_id, 370 ALE_PORT_MACONLY_CAF, 0); 371 dev_dbg(common->dev, "promisc disabled\n"); 372 } 373 } 374 375 static void am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device *ndev) 376 { 377 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 378 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 379 u32 port_mask; 380 bool promisc; 381 382 promisc = !!(ndev->flags & IFF_PROMISC); 383 am65_cpsw_slave_set_promisc(port, promisc); 384 385 if (promisc) 386 return; 387 388 /* Restore allmulti on vlans if necessary */ 389 cpsw_ale_set_allmulti(common->ale, 390 ndev->flags & IFF_ALLMULTI, port->port_id); 391 392 port_mask = ALE_PORT_HOST; 393 /* Clear all mcast from ALE */ 394 cpsw_ale_flush_multicast(common->ale, port_mask, -1); 395 396 if (!netdev_mc_empty(ndev)) { 397 struct netdev_hw_addr *ha; 398 399 /* program multicast address list into ALE register */ 400 netdev_for_each_mc_addr(ha, ndev) { 401 cpsw_ale_add_mcast(common->ale, ha->addr, 402 port_mask, 0, 0, 0); 403 } 404 } 405 } 406 407 static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev, 408 unsigned int txqueue) 409 { 410 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 411 struct am65_cpsw_tx_chn *tx_chn; 412 struct netdev_queue *netif_txq; 413 unsigned long trans_start; 414 415 netif_txq = netdev_get_tx_queue(ndev, txqueue); 416 tx_chn = &common->tx_chns[txqueue]; 417 trans_start = READ_ONCE(netif_txq->trans_start); 418 419 netdev_err(ndev, "txq:%d DRV_XOFF:%d tmo:%u dql_avail:%d free_desc:%zu\n", 420 txqueue, 421 netif_tx_queue_stopped(netif_txq), 422 jiffies_to_msecs(jiffies - trans_start), 423 netdev_queue_dql_avail(netif_txq), 424 k3_cppi_desc_pool_avail(tx_chn->desc_pool)); 425 426 if (netif_tx_queue_stopped(netif_txq)) { 427 /* try recover if stopped by us */ 428 txq_trans_update(netif_txq); 429 netif_tx_wake_queue(netif_txq); 430 } 431 } 432 433 static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common, 434 struct page *page, u32 flow_idx) 435 { 436 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; 437 struct cppi5_host_desc_t *desc_rx; 438 struct device *dev = common->dev; 439 struct am65_cpsw_swdata *swdata; 440 dma_addr_t desc_dma; 441 dma_addr_t buf_dma; 442 443 desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool); 444 if (!desc_rx) { 445 dev_err(dev, "Failed to allocate RXFDQ descriptor\n"); 446 return -ENOMEM; 447 } 448 desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx); 449 450 buf_dma = dma_map_single(rx_chn->dma_dev, 451 page_address(page) + AM65_CPSW_HEADROOM, 452 AM65_CPSW_MAX_PACKET_SIZE, DMA_FROM_DEVICE); 453 if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) { 454 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); 455 dev_err(dev, "Failed to map rx buffer\n"); 456 return -EINVAL; 457 } 458 459 cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT, 460 AM65_CPSW_NAV_PS_DATA_SIZE); 461 k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma); 462 cppi5_hdesc_attach_buf(desc_rx, buf_dma, AM65_CPSW_MAX_PACKET_SIZE, 463 buf_dma, AM65_CPSW_MAX_PACKET_SIZE); 464 swdata = cppi5_hdesc_get_swdata(desc_rx); 465 swdata->page = page; 466 swdata->flow_id = flow_idx; 467 468 return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, flow_idx, 469 desc_rx, desc_dma); 470 } 471 472 void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common) 473 { 474 struct am65_cpsw_host *host_p = am65_common_get_host(common); 475 u32 val, pri_map; 476 477 /* P0 set Receive Priority Type */ 478 val = readl(host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL); 479 480 if (common->pf_p0_rx_ptype_rrobin) { 481 val |= AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN; 482 /* Enet Ports fifos works in fixed priority mode only, so 483 * reset P0_Rx_Pri_Map so all packet will go in Enet fifo 0 484 */ 485 pri_map = 0x0; 486 } else { 487 val &= ~AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN; 488 /* restore P0_Rx_Pri_Map */ 489 pri_map = 0x76543210; 490 } 491 492 writel(pri_map, host_p->port_base + AM65_CPSW_PORT_REG_RX_PRI_MAP); 493 writel(val, host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL); 494 } 495 496 static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common); 497 static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common); 498 static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port); 499 static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port); 500 501 static void am65_cpsw_destroy_xdp_rxqs(struct am65_cpsw_common *common) 502 { 503 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; 504 struct am65_cpsw_rx_flow *flow; 505 struct xdp_rxq_info *rxq; 506 int id, port; 507 508 for (id = 0; id < common->rx_ch_num_flows; id++) { 509 flow = &rx_chn->flows[id]; 510 511 for (port = 0; port < common->port_num; port++) { 512 if (!common->ports[port].ndev) 513 continue; 514 515 rxq = &common->ports[port].xdp_rxq[id]; 516 517 if (xdp_rxq_info_is_reg(rxq)) 518 xdp_rxq_info_unreg(rxq); 519 } 520 521 if (flow->page_pool) { 522 page_pool_destroy(flow->page_pool); 523 flow->page_pool = NULL; 524 } 525 } 526 } 527 528 static int am65_cpsw_create_xdp_rxqs(struct am65_cpsw_common *common) 529 { 530 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; 531 struct page_pool_params pp_params = { 532 .flags = PP_FLAG_DMA_MAP, 533 .order = 0, 534 .pool_size = AM65_CPSW_MAX_RX_DESC, 535 .nid = dev_to_node(common->dev), 536 .dev = common->dev, 537 .dma_dir = DMA_BIDIRECTIONAL, 538 /* .napi set dynamically */ 539 }; 540 struct am65_cpsw_rx_flow *flow; 541 struct xdp_rxq_info *rxq; 542 struct page_pool *pool; 543 int id, port, ret; 544 545 for (id = 0; id < common->rx_ch_num_flows; id++) { 546 flow = &rx_chn->flows[id]; 547 pp_params.napi = &flow->napi_rx; 548 pool = page_pool_create(&pp_params); 549 if (IS_ERR(pool)) { 550 ret = PTR_ERR(pool); 551 goto err; 552 } 553 554 flow->page_pool = pool; 555 556 /* using same page pool is allowed as no running rx handlers 557 * simultaneously for both ndevs 558 */ 559 for (port = 0; port < common->port_num; port++) { 560 if (!common->ports[port].ndev) 561 continue; 562 563 rxq = &common->ports[port].xdp_rxq[id]; 564 565 ret = xdp_rxq_info_reg(rxq, common->ports[port].ndev, 566 id, flow->napi_rx.napi_id); 567 if (ret) 568 goto err; 569 570 ret = xdp_rxq_info_reg_mem_model(rxq, 571 MEM_TYPE_PAGE_POOL, 572 pool); 573 if (ret) 574 goto err; 575 } 576 } 577 578 return 0; 579 580 err: 581 am65_cpsw_destroy_xdp_rxqs(common); 582 return ret; 583 } 584 585 static int am65_cpsw_nuss_desc_idx(struct k3_cppi_desc_pool *desc_pool, 586 void *desc, 587 unsigned char dsize_log2) 588 { 589 void *pool_addr = k3_cppi_desc_pool_cpuaddr(desc_pool); 590 591 return (desc - pool_addr) >> dsize_log2; 592 } 593 594 static void am65_cpsw_nuss_set_buf_type(struct am65_cpsw_tx_chn *tx_chn, 595 struct cppi5_host_desc_t *desc, 596 enum am65_cpsw_tx_buf_type buf_type) 597 { 598 int desc_idx; 599 600 desc_idx = am65_cpsw_nuss_desc_idx(tx_chn->desc_pool, desc, 601 tx_chn->dsize_log2); 602 k3_cppi_desc_pool_desc_info_set(tx_chn->desc_pool, desc_idx, 603 (void *)buf_type); 604 } 605 606 static enum am65_cpsw_tx_buf_type am65_cpsw_nuss_buf_type(struct am65_cpsw_tx_chn *tx_chn, 607 dma_addr_t desc_dma) 608 { 609 struct cppi5_host_desc_t *desc_tx; 610 int desc_idx; 611 612 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma); 613 desc_idx = am65_cpsw_nuss_desc_idx(tx_chn->desc_pool, desc_tx, 614 tx_chn->dsize_log2); 615 616 return (enum am65_cpsw_tx_buf_type)k3_cppi_desc_pool_desc_info(tx_chn->desc_pool, 617 desc_idx); 618 } 619 620 static inline void am65_cpsw_put_page(struct am65_cpsw_rx_flow *flow, 621 struct page *page, 622 bool allow_direct) 623 { 624 page_pool_put_full_page(flow->page_pool, page, allow_direct); 625 } 626 627 static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma) 628 { 629 struct am65_cpsw_rx_chn *rx_chn = data; 630 struct cppi5_host_desc_t *desc_rx; 631 struct am65_cpsw_swdata *swdata; 632 dma_addr_t buf_dma; 633 struct page *page; 634 u32 buf_dma_len; 635 u32 flow_id; 636 637 desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); 638 swdata = cppi5_hdesc_get_swdata(desc_rx); 639 page = swdata->page; 640 flow_id = swdata->flow_id; 641 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); 642 k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); 643 dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE); 644 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); 645 646 am65_cpsw_put_page(&rx_chn->flows[flow_id], page, false); 647 } 648 649 static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn, 650 struct cppi5_host_desc_t *desc) 651 { 652 struct cppi5_host_desc_t *first_desc, *next_desc; 653 dma_addr_t buf_dma, next_desc_dma; 654 u32 buf_dma_len; 655 656 first_desc = desc; 657 next_desc = first_desc; 658 659 cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len); 660 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma); 661 662 dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len, DMA_TO_DEVICE); 663 664 next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc); 665 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma); 666 while (next_desc_dma) { 667 next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, 668 next_desc_dma); 669 cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len); 670 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma); 671 672 dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len, 673 DMA_TO_DEVICE); 674 675 next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc); 676 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma); 677 678 k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc); 679 } 680 681 k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc); 682 } 683 684 static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma) 685 { 686 struct am65_cpsw_tx_chn *tx_chn = data; 687 struct cppi5_host_desc_t *desc_tx; 688 struct sk_buff *skb; 689 void **swdata; 690 691 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma); 692 swdata = cppi5_hdesc_get_swdata(desc_tx); 693 skb = *(swdata); 694 am65_cpsw_nuss_xmit_free(tx_chn, desc_tx); 695 696 dev_kfree_skb_any(skb); 697 } 698 699 static struct sk_buff *am65_cpsw_build_skb(void *page_addr, 700 struct net_device *ndev, 701 unsigned int len) 702 { 703 struct sk_buff *skb; 704 705 len += AM65_CPSW_HEADROOM; 706 707 skb = build_skb(page_addr, len); 708 if (unlikely(!skb)) 709 return NULL; 710 711 skb_reserve(skb, AM65_CPSW_HEADROOM); 712 skb->dev = ndev; 713 714 return skb; 715 } 716 717 static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common) 718 { 719 struct am65_cpsw_host *host_p = am65_common_get_host(common); 720 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; 721 struct am65_cpsw_tx_chn *tx_chn = common->tx_chns; 722 int port_idx, i, ret, tx, flow_idx; 723 struct am65_cpsw_rx_flow *flow; 724 u32 val, port_mask; 725 struct page *page; 726 727 if (common->usage_count) 728 return 0; 729 730 /* Control register */ 731 writel(AM65_CPSW_CTL_P0_ENABLE | AM65_CPSW_CTL_P0_TX_CRC_REMOVE | 732 AM65_CPSW_CTL_VLAN_AWARE | AM65_CPSW_CTL_P0_RX_PAD, 733 common->cpsw_base + AM65_CPSW_REG_CTL); 734 /* Max length register */ 735 writel(AM65_CPSW_MAX_PACKET_SIZE, 736 host_p->port_base + AM65_CPSW_PORT_REG_RX_MAXLEN); 737 /* set base flow_id */ 738 writel(common->rx_flow_id_base, 739 host_p->port_base + AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET); 740 writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN | AM65_CPSW_P0_REG_CTL_RX_REMAP_VLAN, 741 host_p->port_base + AM65_CPSW_P0_REG_CTL); 742 743 am65_cpsw_nuss_set_p0_ptype(common); 744 745 /* enable statistic */ 746 val = BIT(HOST_PORT_NUM); 747 for (port_idx = 0; port_idx < common->port_num; port_idx++) { 748 struct am65_cpsw_port *port = &common->ports[port_idx]; 749 750 if (!port->disabled) 751 val |= BIT(port->port_id); 752 } 753 writel(val, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN); 754 755 /* disable priority elevation */ 756 writel(0, common->cpsw_base + AM65_CPSW_REG_PTYPE); 757 758 cpsw_ale_start(common->ale); 759 760 /* limit to one RX flow only */ 761 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, 762 ALE_DEFAULT_THREAD_ID, 0); 763 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, 764 ALE_DEFAULT_THREAD_ENABLE, 1); 765 /* switch to vlan unaware mode */ 766 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, 1); 767 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, 768 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); 769 770 /* default vlan cfg: create mask based on enabled ports */ 771 port_mask = GENMASK(common->port_num, 0) & 772 ~common->disabled_ports_mask; 773 774 cpsw_ale_add_vlan(common->ale, 0, port_mask, 775 port_mask, port_mask, 776 port_mask & ~ALE_PORT_HOST); 777 778 if (common->is_emac_mode) 779 am65_cpsw_init_host_port_emac(common); 780 else 781 am65_cpsw_init_host_port_switch(common); 782 783 am65_cpsw_qos_tx_p0_rate_init(common); 784 785 ret = am65_cpsw_create_xdp_rxqs(common); 786 if (ret) { 787 dev_err(common->dev, "Failed to create XDP rx queues\n"); 788 return ret; 789 } 790 791 for (flow_idx = 0; flow_idx < common->rx_ch_num_flows; flow_idx++) { 792 flow = &rx_chn->flows[flow_idx]; 793 for (i = 0; i < AM65_CPSW_MAX_RX_DESC; i++) { 794 page = page_pool_dev_alloc_pages(flow->page_pool); 795 if (!page) { 796 dev_err(common->dev, "cannot allocate page in flow %d\n", 797 flow_idx); 798 ret = -ENOMEM; 799 goto fail_rx; 800 } 801 802 ret = am65_cpsw_nuss_rx_push(common, page, flow_idx); 803 if (ret < 0) { 804 dev_err(common->dev, 805 "cannot submit page to rx channel flow %d, error %d\n", 806 flow_idx, ret); 807 am65_cpsw_put_page(flow, page, false); 808 goto fail_rx; 809 } 810 } 811 } 812 813 ret = k3_udma_glue_enable_rx_chn(rx_chn->rx_chn); 814 if (ret) { 815 dev_err(common->dev, "couldn't enable rx chn: %d\n", ret); 816 goto fail_rx; 817 } 818 819 for (i = 0; i < common->rx_ch_num_flows ; i++) { 820 napi_enable(&rx_chn->flows[i].napi_rx); 821 if (rx_chn->flows[i].irq_disabled) { 822 rx_chn->flows[i].irq_disabled = false; 823 enable_irq(rx_chn->flows[i].irq); 824 } 825 } 826 827 for (tx = 0; tx < common->tx_ch_num; tx++) { 828 ret = k3_udma_glue_enable_tx_chn(tx_chn[tx].tx_chn); 829 if (ret) { 830 dev_err(common->dev, "couldn't enable tx chn %d: %d\n", 831 tx, ret); 832 tx--; 833 goto fail_tx; 834 } 835 napi_enable(&tx_chn[tx].napi_tx); 836 } 837 838 dev_dbg(common->dev, "cpsw_nuss started\n"); 839 return 0; 840 841 fail_tx: 842 while (tx >= 0) { 843 napi_disable(&tx_chn[tx].napi_tx); 844 k3_udma_glue_disable_tx_chn(tx_chn[tx].tx_chn); 845 tx--; 846 } 847 848 for (flow_idx = 0; i < common->rx_ch_num_flows; flow_idx++) { 849 flow = &rx_chn->flows[flow_idx]; 850 if (!flow->irq_disabled) { 851 disable_irq(flow->irq); 852 flow->irq_disabled = true; 853 } 854 napi_disable(&flow->napi_rx); 855 } 856 857 k3_udma_glue_disable_rx_chn(rx_chn->rx_chn); 858 859 fail_rx: 860 for (i = 0; i < common->rx_ch_num_flows; i++) 861 k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn, 862 am65_cpsw_nuss_rx_cleanup, !!i); 863 864 am65_cpsw_destroy_xdp_rxqs(common); 865 866 return ret; 867 } 868 869 static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common) 870 { 871 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; 872 struct am65_cpsw_tx_chn *tx_chn = common->tx_chns; 873 int i; 874 875 if (common->usage_count != 1) 876 return 0; 877 878 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, 879 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); 880 881 /* shutdown tx channels */ 882 atomic_set(&common->tdown_cnt, common->tx_ch_num); 883 /* ensure new tdown_cnt value is visible */ 884 smp_mb__after_atomic(); 885 reinit_completion(&common->tdown_complete); 886 887 for (i = 0; i < common->tx_ch_num; i++) 888 k3_udma_glue_tdown_tx_chn(tx_chn[i].tx_chn, false); 889 890 i = wait_for_completion_timeout(&common->tdown_complete, 891 msecs_to_jiffies(1000)); 892 if (!i) 893 dev_err(common->dev, "tx timeout\n"); 894 for (i = 0; i < common->tx_ch_num; i++) { 895 napi_disable(&tx_chn[i].napi_tx); 896 hrtimer_cancel(&tx_chn[i].tx_hrtimer); 897 } 898 899 for (i = 0; i < common->tx_ch_num; i++) { 900 k3_udma_glue_reset_tx_chn(tx_chn[i].tx_chn, &tx_chn[i], 901 am65_cpsw_nuss_tx_cleanup); 902 k3_udma_glue_disable_tx_chn(tx_chn[i].tx_chn); 903 } 904 905 reinit_completion(&common->tdown_complete); 906 k3_udma_glue_tdown_rx_chn(rx_chn->rx_chn, true); 907 908 if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) { 909 i = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000)); 910 if (!i) 911 dev_err(common->dev, "rx teardown timeout\n"); 912 } 913 914 for (i = common->rx_ch_num_flows - 1; i >= 0; i--) { 915 napi_disable(&rx_chn->flows[i].napi_rx); 916 hrtimer_cancel(&rx_chn->flows[i].rx_hrtimer); 917 k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn, 918 am65_cpsw_nuss_rx_cleanup, !!i); 919 } 920 921 k3_udma_glue_disable_rx_chn(rx_chn->rx_chn); 922 923 cpsw_ale_stop(common->ale); 924 925 writel(0, common->cpsw_base + AM65_CPSW_REG_CTL); 926 writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN); 927 928 am65_cpsw_destroy_xdp_rxqs(common); 929 930 dev_dbg(common->dev, "cpsw_nuss stopped\n"); 931 return 0; 932 } 933 934 static int am65_cpsw_nuss_ndo_slave_stop(struct net_device *ndev) 935 { 936 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 937 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 938 int ret; 939 940 phylink_stop(port->slave.phylink); 941 942 netif_tx_stop_all_queues(ndev); 943 944 phylink_disconnect_phy(port->slave.phylink); 945 946 ret = am65_cpsw_nuss_common_stop(common); 947 if (ret) 948 return ret; 949 950 common->usage_count--; 951 pm_runtime_put(common->dev); 952 return 0; 953 } 954 955 static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg) 956 { 957 struct am65_cpsw_port *port = arg; 958 959 if (!vdev) 960 return 0; 961 962 return am65_cpsw_nuss_ndo_slave_add_vid(port->ndev, 0, vid); 963 } 964 965 static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev) 966 { 967 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 968 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 969 int ret, i; 970 u32 reg; 971 972 ret = pm_runtime_resume_and_get(common->dev); 973 if (ret < 0) 974 return ret; 975 976 /* Idle MAC port */ 977 cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE); 978 cpsw_sl_wait_for_idle(port->slave.mac_sl, 100); 979 cpsw_sl_ctl_reset(port->slave.mac_sl); 980 981 /* soft reset MAC */ 982 cpsw_sl_reg_write(port->slave.mac_sl, CPSW_SL_SOFT_RESET, 1); 983 mdelay(1); 984 reg = cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_SOFT_RESET); 985 if (reg) { 986 dev_err(common->dev, "soft RESET didn't complete\n"); 987 ret = -ETIMEDOUT; 988 goto runtime_put; 989 } 990 991 /* Notify the stack of the actual queue counts. */ 992 ret = netif_set_real_num_tx_queues(ndev, common->tx_ch_num); 993 if (ret) { 994 dev_err(common->dev, "cannot set real number of tx queues\n"); 995 goto runtime_put; 996 } 997 998 ret = netif_set_real_num_rx_queues(ndev, common->rx_ch_num_flows); 999 if (ret) { 1000 dev_err(common->dev, "cannot set real number of rx queues\n"); 1001 goto runtime_put; 1002 } 1003 1004 for (i = 0; i < common->tx_ch_num; i++) { 1005 struct netdev_queue *txq = netdev_get_tx_queue(ndev, i); 1006 1007 netdev_tx_reset_queue(txq); 1008 txq->tx_maxrate = common->tx_chns[i].rate_mbps; 1009 } 1010 1011 ret = am65_cpsw_nuss_common_open(common); 1012 if (ret) 1013 goto runtime_put; 1014 1015 common->usage_count++; 1016 1017 am65_cpsw_port_set_sl_mac(port, ndev->dev_addr); 1018 am65_cpsw_port_enable_dscp_map(port); 1019 1020 if (common->is_emac_mode) 1021 am65_cpsw_init_port_emac_ale(port); 1022 else 1023 am65_cpsw_init_port_switch_ale(port); 1024 1025 /* mac_sl should be configured via phy-link interface */ 1026 am65_cpsw_sl_ctl_reset(port); 1027 1028 ret = phylink_of_phy_connect(port->slave.phylink, port->slave.port_np, 0); 1029 if (ret) 1030 goto error_cleanup; 1031 1032 /* restore vlan configurations */ 1033 vlan_for_each(ndev, cpsw_restore_vlans, port); 1034 1035 phylink_start(port->slave.phylink); 1036 1037 return 0; 1038 1039 error_cleanup: 1040 am65_cpsw_nuss_ndo_slave_stop(ndev); 1041 return ret; 1042 1043 runtime_put: 1044 pm_runtime_put(common->dev); 1045 return ret; 1046 } 1047 1048 static int am65_cpsw_xdp_tx_frame(struct net_device *ndev, 1049 struct am65_cpsw_tx_chn *tx_chn, 1050 struct xdp_frame *xdpf, 1051 enum am65_cpsw_tx_buf_type buf_type) 1052 { 1053 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 1054 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 1055 struct cppi5_host_desc_t *host_desc; 1056 struct netdev_queue *netif_txq; 1057 dma_addr_t dma_desc, dma_buf; 1058 u32 pkt_len = xdpf->len; 1059 void **swdata; 1060 int ret; 1061 1062 host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); 1063 if (unlikely(!host_desc)) { 1064 ndev->stats.tx_dropped++; 1065 return AM65_CPSW_XDP_CONSUMED; /* drop */ 1066 } 1067 1068 am65_cpsw_nuss_set_buf_type(tx_chn, host_desc, buf_type); 1069 1070 dma_buf = dma_map_single(tx_chn->dma_dev, xdpf->data, 1071 pkt_len, DMA_TO_DEVICE); 1072 if (unlikely(dma_mapping_error(tx_chn->dma_dev, dma_buf))) { 1073 ndev->stats.tx_dropped++; 1074 ret = AM65_CPSW_XDP_CONSUMED; /* drop */ 1075 goto pool_free; 1076 } 1077 1078 cppi5_hdesc_init(host_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT, 1079 AM65_CPSW_NAV_PS_DATA_SIZE); 1080 cppi5_hdesc_set_pkttype(host_desc, AM65_CPSW_CPPI_TX_PKT_TYPE); 1081 cppi5_hdesc_set_pktlen(host_desc, pkt_len); 1082 cppi5_desc_set_pktids(&host_desc->hdr, 0, AM65_CPSW_CPPI_TX_FLOW_ID); 1083 cppi5_desc_set_tags_ids(&host_desc->hdr, 0, port->port_id); 1084 1085 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &dma_buf); 1086 cppi5_hdesc_attach_buf(host_desc, dma_buf, pkt_len, dma_buf, pkt_len); 1087 1088 swdata = cppi5_hdesc_get_swdata(host_desc); 1089 *(swdata) = xdpf; 1090 1091 /* Report BQL before sending the packet */ 1092 netif_txq = netdev_get_tx_queue(ndev, tx_chn->id); 1093 netdev_tx_sent_queue(netif_txq, pkt_len); 1094 1095 dma_desc = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, host_desc); 1096 if (AM65_CPSW_IS_CPSW2G(common)) { 1097 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, host_desc, 1098 dma_desc); 1099 } else { 1100 spin_lock_bh(&tx_chn->lock); 1101 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, host_desc, 1102 dma_desc); 1103 spin_unlock_bh(&tx_chn->lock); 1104 } 1105 if (ret) { 1106 /* Inform BQL */ 1107 netdev_tx_completed_queue(netif_txq, 1, pkt_len); 1108 ndev->stats.tx_errors++; 1109 ret = AM65_CPSW_XDP_CONSUMED; /* drop */ 1110 goto dma_unmap; 1111 } 1112 1113 return 0; 1114 1115 dma_unmap: 1116 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &dma_buf); 1117 dma_unmap_single(tx_chn->dma_dev, dma_buf, pkt_len, DMA_TO_DEVICE); 1118 pool_free: 1119 k3_cppi_desc_pool_free(tx_chn->desc_pool, host_desc); 1120 return ret; 1121 } 1122 1123 static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow, 1124 struct am65_cpsw_port *port, 1125 struct xdp_buff *xdp, 1126 int cpu, int *len) 1127 { 1128 struct am65_cpsw_common *common = flow->common; 1129 struct net_device *ndev = port->ndev; 1130 int ret = AM65_CPSW_XDP_CONSUMED; 1131 struct am65_cpsw_tx_chn *tx_chn; 1132 struct netdev_queue *netif_txq; 1133 struct xdp_frame *xdpf; 1134 struct bpf_prog *prog; 1135 struct page *page; 1136 u32 act; 1137 int err; 1138 1139 prog = READ_ONCE(port->xdp_prog); 1140 if (!prog) 1141 return AM65_CPSW_XDP_PASS; 1142 1143 act = bpf_prog_run_xdp(prog, xdp); 1144 /* XDP prog might have changed packet data and boundaries */ 1145 *len = xdp->data_end - xdp->data; 1146 1147 switch (act) { 1148 case XDP_PASS: 1149 ret = AM65_CPSW_XDP_PASS; 1150 goto out; 1151 case XDP_TX: 1152 tx_chn = &common->tx_chns[cpu % AM65_CPSW_MAX_QUEUES]; 1153 netif_txq = netdev_get_tx_queue(ndev, tx_chn->id); 1154 1155 xdpf = xdp_convert_buff_to_frame(xdp); 1156 if (unlikely(!xdpf)) 1157 goto drop; 1158 1159 __netif_tx_lock(netif_txq, cpu); 1160 err = am65_cpsw_xdp_tx_frame(ndev, tx_chn, xdpf, 1161 AM65_CPSW_TX_BUF_TYPE_XDP_TX); 1162 __netif_tx_unlock(netif_txq); 1163 if (err) 1164 goto drop; 1165 1166 dev_sw_netstats_tx_add(ndev, 1, *len); 1167 ret = AM65_CPSW_XDP_CONSUMED; 1168 goto out; 1169 case XDP_REDIRECT: 1170 if (unlikely(xdp_do_redirect(ndev, xdp, prog))) 1171 goto drop; 1172 1173 dev_sw_netstats_rx_add(ndev, *len); 1174 ret = AM65_CPSW_XDP_REDIRECT; 1175 goto out; 1176 default: 1177 bpf_warn_invalid_xdp_action(ndev, prog, act); 1178 fallthrough; 1179 case XDP_ABORTED: 1180 drop: 1181 trace_xdp_exception(ndev, prog, act); 1182 fallthrough; 1183 case XDP_DROP: 1184 ndev->stats.rx_dropped++; 1185 } 1186 1187 page = virt_to_head_page(xdp->data); 1188 am65_cpsw_put_page(flow, page, true); 1189 1190 out: 1191 return ret; 1192 } 1193 1194 /* RX psdata[2] word format - checksum information */ 1195 #define AM65_CPSW_RX_PSD_CSUM_ADD GENMASK(15, 0) 1196 #define AM65_CPSW_RX_PSD_CSUM_ERR BIT(16) 1197 #define AM65_CPSW_RX_PSD_IS_FRAGMENT BIT(17) 1198 #define AM65_CPSW_RX_PSD_IS_TCP BIT(18) 1199 #define AM65_CPSW_RX_PSD_IPV6_VALID BIT(19) 1200 #define AM65_CPSW_RX_PSD_IPV4_VALID BIT(20) 1201 1202 static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info) 1203 { 1204 /* HW can verify IPv4/IPv6 TCP/UDP packets checksum 1205 * csum information provides in psdata[2] word: 1206 * AM65_CPSW_RX_PSD_CSUM_ERR bit - indicates csum error 1207 * AM65_CPSW_RX_PSD_IPV6_VALID and AM65_CPSW_RX_PSD_IPV4_VALID 1208 * bits - indicates IPv4/IPv6 packet 1209 * AM65_CPSW_RX_PSD_IS_FRAGMENT bit - indicates fragmented packet 1210 * AM65_CPSW_RX_PSD_CSUM_ADD has value 0xFFFF for non fragmented packets 1211 * or csum value for fragmented packets if !AM65_CPSW_RX_PSD_CSUM_ERR 1212 */ 1213 skb_checksum_none_assert(skb); 1214 1215 if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) 1216 return; 1217 1218 if ((csum_info & (AM65_CPSW_RX_PSD_IPV6_VALID | 1219 AM65_CPSW_RX_PSD_IPV4_VALID)) && 1220 !(csum_info & AM65_CPSW_RX_PSD_CSUM_ERR)) { 1221 /* csum for fragmented packets is unsupported */ 1222 if (!(csum_info & AM65_CPSW_RX_PSD_IS_FRAGMENT)) 1223 skb->ip_summed = CHECKSUM_UNNECESSARY; 1224 } 1225 } 1226 1227 static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow, 1228 int cpu, int *xdp_state) 1229 { 1230 struct am65_cpsw_rx_chn *rx_chn = &flow->common->rx_chns; 1231 u32 buf_dma_len, pkt_len, port_id = 0, csum_info; 1232 struct am65_cpsw_common *common = flow->common; 1233 struct am65_cpsw_ndev_priv *ndev_priv; 1234 struct cppi5_host_desc_t *desc_rx; 1235 struct device *dev = common->dev; 1236 struct am65_cpsw_swdata *swdata; 1237 struct page *page, *new_page; 1238 dma_addr_t desc_dma, buf_dma; 1239 struct am65_cpsw_port *port; 1240 struct net_device *ndev; 1241 u32 flow_idx = flow->id; 1242 struct sk_buff *skb; 1243 struct xdp_buff xdp; 1244 int headroom, ret; 1245 void *page_addr; 1246 u32 *psdata; 1247 1248 *xdp_state = AM65_CPSW_XDP_PASS; 1249 ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_idx, &desc_dma); 1250 if (ret) { 1251 if (ret != -ENODATA) 1252 dev_err(dev, "RX: pop chn fail %d\n", ret); 1253 return ret; 1254 } 1255 1256 if (cppi5_desc_is_tdcm(desc_dma)) { 1257 dev_dbg(dev, "%s RX tdown flow: %u\n", __func__, flow_idx); 1258 if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) 1259 complete(&common->tdown_complete); 1260 return 0; 1261 } 1262 1263 desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); 1264 dev_dbg(dev, "%s flow_idx: %u desc %pad\n", 1265 __func__, flow_idx, &desc_dma); 1266 1267 swdata = cppi5_hdesc_get_swdata(desc_rx); 1268 page = swdata->page; 1269 page_addr = page_address(page); 1270 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); 1271 k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); 1272 pkt_len = cppi5_hdesc_get_pktlen(desc_rx); 1273 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL); 1274 dev_dbg(dev, "%s rx port_id:%d\n", __func__, port_id); 1275 port = am65_common_get_port(common, port_id); 1276 ndev = port->ndev; 1277 psdata = cppi5_hdesc_get_psdata(desc_rx); 1278 csum_info = psdata[2]; 1279 dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info); 1280 1281 dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE); 1282 1283 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); 1284 1285 skb = am65_cpsw_build_skb(page_addr, ndev, 1286 AM65_CPSW_MAX_PACKET_SIZE); 1287 if (unlikely(!skb)) { 1288 new_page = page; 1289 goto requeue; 1290 } 1291 1292 if (port->xdp_prog) { 1293 xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]); 1294 xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM, 1295 pkt_len, false); 1296 *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp, 1297 cpu, &pkt_len); 1298 if (*xdp_state != AM65_CPSW_XDP_PASS) 1299 goto allocate; 1300 1301 /* Compute additional headroom to be reserved */ 1302 headroom = (xdp.data - xdp.data_hard_start) - skb_headroom(skb); 1303 skb_reserve(skb, headroom); 1304 } 1305 1306 ndev_priv = netdev_priv(ndev); 1307 am65_cpsw_nuss_set_offload_fwd_mark(skb, ndev_priv->offload_fwd_mark); 1308 skb_put(skb, pkt_len); 1309 if (port->rx_ts_enabled) 1310 am65_cpts_rx_timestamp(common->cpts, skb); 1311 skb_mark_for_recycle(skb); 1312 skb->protocol = eth_type_trans(skb, ndev); 1313 am65_cpsw_nuss_rx_csum(skb, csum_info); 1314 napi_gro_receive(&flow->napi_rx, skb); 1315 1316 dev_sw_netstats_rx_add(ndev, pkt_len); 1317 1318 allocate: 1319 new_page = page_pool_dev_alloc_pages(flow->page_pool); 1320 if (unlikely(!new_page)) { 1321 dev_err(dev, "page alloc failed\n"); 1322 return -ENOMEM; 1323 } 1324 1325 if (netif_dormant(ndev)) { 1326 am65_cpsw_put_page(flow, new_page, true); 1327 ndev->stats.rx_dropped++; 1328 return 0; 1329 } 1330 1331 requeue: 1332 ret = am65_cpsw_nuss_rx_push(common, new_page, flow_idx); 1333 if (WARN_ON(ret < 0)) { 1334 am65_cpsw_put_page(flow, new_page, true); 1335 ndev->stats.rx_errors++; 1336 ndev->stats.rx_dropped++; 1337 } 1338 1339 return ret; 1340 } 1341 1342 static enum hrtimer_restart am65_cpsw_nuss_rx_timer_callback(struct hrtimer *timer) 1343 { 1344 struct am65_cpsw_rx_flow *flow = container_of(timer, 1345 struct am65_cpsw_rx_flow, 1346 rx_hrtimer); 1347 1348 enable_irq(flow->irq); 1349 return HRTIMER_NORESTART; 1350 } 1351 1352 static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget) 1353 { 1354 struct am65_cpsw_rx_flow *flow = am65_cpsw_napi_to_rx_flow(napi_rx); 1355 struct am65_cpsw_common *common = flow->common; 1356 int cpu = smp_processor_id(); 1357 int xdp_state_or = 0; 1358 int cur_budget, ret; 1359 int xdp_state; 1360 int num_rx = 0; 1361 1362 /* process only this flow */ 1363 cur_budget = budget; 1364 while (cur_budget--) { 1365 ret = am65_cpsw_nuss_rx_packets(flow, cpu, &xdp_state); 1366 xdp_state_or |= xdp_state; 1367 if (ret) 1368 break; 1369 num_rx++; 1370 } 1371 1372 if (xdp_state_or & AM65_CPSW_XDP_REDIRECT) 1373 xdp_do_flush(); 1374 1375 dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget); 1376 1377 if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) { 1378 if (flow->irq_disabled) { 1379 flow->irq_disabled = false; 1380 if (unlikely(flow->rx_pace_timeout)) { 1381 hrtimer_start(&flow->rx_hrtimer, 1382 ns_to_ktime(flow->rx_pace_timeout), 1383 HRTIMER_MODE_REL_PINNED); 1384 } else { 1385 enable_irq(flow->irq); 1386 } 1387 } 1388 } 1389 1390 return num_rx; 1391 } 1392 1393 static struct sk_buff * 1394 am65_cpsw_nuss_tx_compl_packet_skb(struct am65_cpsw_tx_chn *tx_chn, 1395 dma_addr_t desc_dma) 1396 { 1397 struct cppi5_host_desc_t *desc_tx; 1398 struct sk_buff *skb; 1399 void **swdata; 1400 1401 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, 1402 desc_dma); 1403 swdata = cppi5_hdesc_get_swdata(desc_tx); 1404 skb = *(swdata); 1405 am65_cpsw_nuss_xmit_free(tx_chn, desc_tx); 1406 1407 am65_cpts_tx_timestamp(tx_chn->common->cpts, skb); 1408 1409 dev_sw_netstats_tx_add(skb->dev, 1, skb->len); 1410 1411 return skb; 1412 } 1413 1414 static struct xdp_frame * 1415 am65_cpsw_nuss_tx_compl_packet_xdp(struct am65_cpsw_common *common, 1416 struct am65_cpsw_tx_chn *tx_chn, 1417 dma_addr_t desc_dma, 1418 struct net_device **ndev) 1419 { 1420 struct cppi5_host_desc_t *desc_tx; 1421 struct am65_cpsw_port *port; 1422 struct xdp_frame *xdpf; 1423 u32 port_id = 0; 1424 void **swdata; 1425 1426 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma); 1427 cppi5_desc_get_tags_ids(&desc_tx->hdr, NULL, &port_id); 1428 swdata = cppi5_hdesc_get_swdata(desc_tx); 1429 xdpf = *(swdata); 1430 am65_cpsw_nuss_xmit_free(tx_chn, desc_tx); 1431 1432 port = am65_common_get_port(common, port_id); 1433 dev_sw_netstats_tx_add(port->ndev, 1, xdpf->len); 1434 *ndev = port->ndev; 1435 1436 return xdpf; 1437 } 1438 1439 static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_device *ndev, 1440 struct netdev_queue *netif_txq) 1441 { 1442 if (netif_tx_queue_stopped(netif_txq)) { 1443 /* Check whether the queue is stopped due to stalled 1444 * tx dma, if the queue is stopped then wake the queue 1445 * as we have free desc for tx 1446 */ 1447 __netif_tx_lock(netif_txq, smp_processor_id()); 1448 if (netif_running(ndev) && 1449 (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= MAX_SKB_FRAGS)) 1450 netif_tx_wake_queue(netif_txq); 1451 1452 __netif_tx_unlock(netif_txq); 1453 } 1454 } 1455 1456 static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common, 1457 int chn, unsigned int budget, bool *tdown) 1458 { 1459 enum am65_cpsw_tx_buf_type buf_type; 1460 struct device *dev = common->dev; 1461 struct am65_cpsw_tx_chn *tx_chn; 1462 struct netdev_queue *netif_txq; 1463 unsigned int total_bytes = 0; 1464 struct net_device *ndev; 1465 struct xdp_frame *xdpf; 1466 struct sk_buff *skb; 1467 dma_addr_t desc_dma; 1468 int res, num_tx = 0; 1469 1470 tx_chn = &common->tx_chns[chn]; 1471 1472 while (true) { 1473 spin_lock(&tx_chn->lock); 1474 res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma); 1475 spin_unlock(&tx_chn->lock); 1476 if (res == -ENODATA) 1477 break; 1478 1479 if (cppi5_desc_is_tdcm(desc_dma)) { 1480 if (atomic_dec_and_test(&common->tdown_cnt)) 1481 complete(&common->tdown_complete); 1482 *tdown = true; 1483 break; 1484 } 1485 1486 buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma); 1487 if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) { 1488 skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma); 1489 ndev = skb->dev; 1490 total_bytes = skb->len; 1491 napi_consume_skb(skb, budget); 1492 } else { 1493 xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn, 1494 desc_dma, &ndev); 1495 total_bytes = xdpf->len; 1496 if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX) 1497 xdp_return_frame_rx_napi(xdpf); 1498 else 1499 xdp_return_frame(xdpf); 1500 } 1501 num_tx++; 1502 1503 netif_txq = netdev_get_tx_queue(ndev, chn); 1504 1505 netdev_tx_completed_queue(netif_txq, num_tx, total_bytes); 1506 1507 am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq); 1508 } 1509 1510 dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx); 1511 1512 return num_tx; 1513 } 1514 1515 static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common, 1516 int chn, unsigned int budget, bool *tdown) 1517 { 1518 enum am65_cpsw_tx_buf_type buf_type; 1519 struct device *dev = common->dev; 1520 struct am65_cpsw_tx_chn *tx_chn; 1521 struct netdev_queue *netif_txq; 1522 unsigned int total_bytes = 0; 1523 struct net_device *ndev; 1524 struct xdp_frame *xdpf; 1525 struct sk_buff *skb; 1526 dma_addr_t desc_dma; 1527 int res, num_tx = 0; 1528 1529 tx_chn = &common->tx_chns[chn]; 1530 1531 while (true) { 1532 res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma); 1533 if (res == -ENODATA) 1534 break; 1535 1536 if (cppi5_desc_is_tdcm(desc_dma)) { 1537 if (atomic_dec_and_test(&common->tdown_cnt)) 1538 complete(&common->tdown_complete); 1539 *tdown = true; 1540 break; 1541 } 1542 1543 buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma); 1544 if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) { 1545 skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma); 1546 ndev = skb->dev; 1547 total_bytes += skb->len; 1548 napi_consume_skb(skb, budget); 1549 } else { 1550 xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn, 1551 desc_dma, &ndev); 1552 total_bytes += xdpf->len; 1553 if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX) 1554 xdp_return_frame_rx_napi(xdpf); 1555 else 1556 xdp_return_frame(xdpf); 1557 } 1558 num_tx++; 1559 } 1560 1561 if (!num_tx) 1562 return 0; 1563 1564 netif_txq = netdev_get_tx_queue(ndev, chn); 1565 1566 netdev_tx_completed_queue(netif_txq, num_tx, total_bytes); 1567 1568 am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq); 1569 1570 dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx); 1571 1572 return num_tx; 1573 } 1574 1575 static enum hrtimer_restart am65_cpsw_nuss_tx_timer_callback(struct hrtimer *timer) 1576 { 1577 struct am65_cpsw_tx_chn *tx_chns = 1578 container_of(timer, struct am65_cpsw_tx_chn, tx_hrtimer); 1579 1580 enable_irq(tx_chns->irq); 1581 return HRTIMER_NORESTART; 1582 } 1583 1584 static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget) 1585 { 1586 struct am65_cpsw_tx_chn *tx_chn = am65_cpsw_napi_to_tx_chn(napi_tx); 1587 bool tdown = false; 1588 int num_tx; 1589 1590 if (AM65_CPSW_IS_CPSW2G(tx_chn->common)) 1591 num_tx = am65_cpsw_nuss_tx_compl_packets_2g(tx_chn->common, tx_chn->id, 1592 budget, &tdown); 1593 else 1594 num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common, 1595 tx_chn->id, budget, &tdown); 1596 1597 if (num_tx >= budget) 1598 return budget; 1599 1600 if (napi_complete_done(napi_tx, num_tx)) { 1601 if (unlikely(tx_chn->tx_pace_timeout && !tdown)) { 1602 hrtimer_start(&tx_chn->tx_hrtimer, 1603 ns_to_ktime(tx_chn->tx_pace_timeout), 1604 HRTIMER_MODE_REL_PINNED); 1605 } else { 1606 enable_irq(tx_chn->irq); 1607 } 1608 } 1609 1610 return 0; 1611 } 1612 1613 static irqreturn_t am65_cpsw_nuss_rx_irq(int irq, void *dev_id) 1614 { 1615 struct am65_cpsw_rx_flow *flow = dev_id; 1616 1617 flow->irq_disabled = true; 1618 disable_irq_nosync(irq); 1619 napi_schedule(&flow->napi_rx); 1620 1621 return IRQ_HANDLED; 1622 } 1623 1624 static irqreturn_t am65_cpsw_nuss_tx_irq(int irq, void *dev_id) 1625 { 1626 struct am65_cpsw_tx_chn *tx_chn = dev_id; 1627 1628 disable_irq_nosync(irq); 1629 napi_schedule(&tx_chn->napi_tx); 1630 1631 return IRQ_HANDLED; 1632 } 1633 1634 static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb, 1635 struct net_device *ndev) 1636 { 1637 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 1638 struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc; 1639 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 1640 struct device *dev = common->dev; 1641 struct am65_cpsw_tx_chn *tx_chn; 1642 struct netdev_queue *netif_txq; 1643 dma_addr_t desc_dma, buf_dma; 1644 int ret, q_idx, i; 1645 void **swdata; 1646 u32 *psdata; 1647 u32 pkt_len; 1648 1649 /* padding enabled in hw */ 1650 pkt_len = skb_headlen(skb); 1651 1652 /* SKB TX timestamp */ 1653 if (port->tx_ts_enabled) 1654 am65_cpts_prep_tx_timestamp(common->cpts, skb); 1655 1656 q_idx = skb_get_queue_mapping(skb); 1657 dev_dbg(dev, "%s skb_queue:%d\n", __func__, q_idx); 1658 1659 tx_chn = &common->tx_chns[q_idx]; 1660 netif_txq = netdev_get_tx_queue(ndev, q_idx); 1661 1662 /* Map the linear buffer */ 1663 buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len, 1664 DMA_TO_DEVICE); 1665 if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) { 1666 dev_err(dev, "Failed to map tx skb buffer\n"); 1667 ndev->stats.tx_errors++; 1668 goto err_free_skb; 1669 } 1670 1671 first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); 1672 if (!first_desc) { 1673 dev_dbg(dev, "Failed to allocate descriptor\n"); 1674 dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, 1675 DMA_TO_DEVICE); 1676 goto busy_stop_q; 1677 } 1678 1679 am65_cpsw_nuss_set_buf_type(tx_chn, first_desc, 1680 AM65_CPSW_TX_BUF_TYPE_SKB); 1681 1682 cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT, 1683 AM65_CPSW_NAV_PS_DATA_SIZE); 1684 cppi5_desc_set_pktids(&first_desc->hdr, 0, AM65_CPSW_CPPI_TX_FLOW_ID); 1685 cppi5_hdesc_set_pkttype(first_desc, AM65_CPSW_CPPI_TX_PKT_TYPE); 1686 cppi5_desc_set_tags_ids(&first_desc->hdr, 0, port->port_id); 1687 1688 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma); 1689 cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len); 1690 swdata = cppi5_hdesc_get_swdata(first_desc); 1691 *(swdata) = skb; 1692 psdata = cppi5_hdesc_get_psdata(first_desc); 1693 1694 /* HW csum offload if enabled */ 1695 psdata[2] = 0; 1696 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 1697 unsigned int cs_start, cs_offset; 1698 1699 cs_start = skb_transport_offset(skb); 1700 cs_offset = cs_start + skb->csum_offset; 1701 /* HW numerates bytes starting from 1 */ 1702 psdata[2] = ((cs_offset + 1) << 24) | 1703 ((cs_start + 1) << 16) | (skb->len - cs_start); 1704 dev_dbg(dev, "%s tx psdata:%#x\n", __func__, psdata[2]); 1705 } 1706 1707 if (!skb_is_nonlinear(skb)) 1708 goto done_tx; 1709 1710 dev_dbg(dev, "fragmented SKB\n"); 1711 1712 /* Handle the case where skb is fragmented in pages */ 1713 cur_desc = first_desc; 1714 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1715 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1716 u32 frag_size = skb_frag_size(frag); 1717 1718 next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); 1719 if (!next_desc) { 1720 dev_err(dev, "Failed to allocate descriptor\n"); 1721 goto busy_free_descs; 1722 } 1723 1724 am65_cpsw_nuss_set_buf_type(tx_chn, next_desc, 1725 AM65_CPSW_TX_BUF_TYPE_SKB); 1726 1727 buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size, 1728 DMA_TO_DEVICE); 1729 if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) { 1730 dev_err(dev, "Failed to map tx skb page\n"); 1731 k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc); 1732 ndev->stats.tx_errors++; 1733 goto err_free_descs; 1734 } 1735 1736 cppi5_hdesc_reset_hbdesc(next_desc); 1737 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma); 1738 cppi5_hdesc_attach_buf(next_desc, 1739 buf_dma, frag_size, buf_dma, frag_size); 1740 1741 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, 1742 next_desc); 1743 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma); 1744 cppi5_hdesc_link_hbdesc(cur_desc, desc_dma); 1745 1746 pkt_len += frag_size; 1747 cur_desc = next_desc; 1748 } 1749 WARN_ON(pkt_len != skb->len); 1750 1751 done_tx: 1752 skb_tx_timestamp(skb); 1753 1754 /* report bql before sending packet */ 1755 netdev_tx_sent_queue(netif_txq, pkt_len); 1756 1757 cppi5_hdesc_set_pktlen(first_desc, pkt_len); 1758 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc); 1759 if (AM65_CPSW_IS_CPSW2G(common)) { 1760 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma); 1761 } else { 1762 spin_lock_bh(&tx_chn->lock); 1763 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma); 1764 spin_unlock_bh(&tx_chn->lock); 1765 } 1766 if (ret) { 1767 dev_err(dev, "can't push desc %d\n", ret); 1768 /* inform bql */ 1769 netdev_tx_completed_queue(netif_txq, 1, pkt_len); 1770 ndev->stats.tx_errors++; 1771 goto err_free_descs; 1772 } 1773 1774 if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) { 1775 netif_tx_stop_queue(netif_txq); 1776 /* Barrier, so that stop_queue visible to other cpus */ 1777 smp_mb__after_atomic(); 1778 dev_dbg(dev, "netif_tx_stop_queue %d\n", q_idx); 1779 1780 /* re-check for smp */ 1781 if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= 1782 MAX_SKB_FRAGS) { 1783 netif_tx_wake_queue(netif_txq); 1784 dev_dbg(dev, "netif_tx_wake_queue %d\n", q_idx); 1785 } 1786 } 1787 1788 return NETDEV_TX_OK; 1789 1790 err_free_descs: 1791 am65_cpsw_nuss_xmit_free(tx_chn, first_desc); 1792 err_free_skb: 1793 ndev->stats.tx_dropped++; 1794 dev_kfree_skb_any(skb); 1795 return NETDEV_TX_OK; 1796 1797 busy_free_descs: 1798 am65_cpsw_nuss_xmit_free(tx_chn, first_desc); 1799 busy_stop_q: 1800 netif_tx_stop_queue(netif_txq); 1801 return NETDEV_TX_BUSY; 1802 } 1803 1804 static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev, 1805 void *addr) 1806 { 1807 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 1808 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 1809 struct sockaddr *sockaddr = (struct sockaddr *)addr; 1810 int ret; 1811 1812 ret = eth_prepare_mac_addr_change(ndev, addr); 1813 if (ret < 0) 1814 return ret; 1815 1816 ret = pm_runtime_resume_and_get(common->dev); 1817 if (ret < 0) 1818 return ret; 1819 1820 cpsw_ale_del_ucast(common->ale, ndev->dev_addr, 1821 HOST_PORT_NUM, 0, 0); 1822 cpsw_ale_add_ucast(common->ale, sockaddr->sa_data, 1823 HOST_PORT_NUM, ALE_SECURE, 0); 1824 1825 am65_cpsw_port_set_sl_mac(port, addr); 1826 eth_commit_mac_addr_change(ndev, sockaddr); 1827 1828 pm_runtime_put(common->dev); 1829 1830 return 0; 1831 } 1832 1833 static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev, 1834 struct ifreq *ifr) 1835 { 1836 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 1837 u32 ts_ctrl, seq_id, ts_ctrl_ltype2, ts_vlan_ltype; 1838 struct hwtstamp_config cfg; 1839 1840 if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)) 1841 return -EOPNOTSUPP; 1842 1843 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) 1844 return -EFAULT; 1845 1846 /* TX HW timestamp */ 1847 switch (cfg.tx_type) { 1848 case HWTSTAMP_TX_OFF: 1849 case HWTSTAMP_TX_ON: 1850 break; 1851 default: 1852 return -ERANGE; 1853 } 1854 1855 switch (cfg.rx_filter) { 1856 case HWTSTAMP_FILTER_NONE: 1857 port->rx_ts_enabled = false; 1858 break; 1859 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 1860 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 1861 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 1862 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1863 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 1864 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 1865 case HWTSTAMP_FILTER_PTP_V2_EVENT: 1866 case HWTSTAMP_FILTER_PTP_V2_SYNC: 1867 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 1868 port->rx_ts_enabled = true; 1869 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 1870 break; 1871 case HWTSTAMP_FILTER_ALL: 1872 case HWTSTAMP_FILTER_SOME: 1873 case HWTSTAMP_FILTER_NTP_ALL: 1874 return -EOPNOTSUPP; 1875 default: 1876 return -ERANGE; 1877 } 1878 1879 port->tx_ts_enabled = (cfg.tx_type == HWTSTAMP_TX_ON); 1880 1881 /* cfg TX timestamp */ 1882 seq_id = (AM65_CPSW_TS_SEQ_ID_OFFSET << 1883 AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT) | ETH_P_1588; 1884 1885 ts_vlan_ltype = ETH_P_8021Q; 1886 1887 ts_ctrl_ltype2 = ETH_P_1588 | 1888 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107 | 1889 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129 | 1890 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130 | 1891 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131 | 1892 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132 | 1893 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319 | 1894 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320 | 1895 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO; 1896 1897 ts_ctrl = AM65_CPSW_TS_EVENT_MSG_TYPE_BITS << 1898 AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT; 1899 1900 if (port->tx_ts_enabled) 1901 ts_ctrl |= AM65_CPSW_TS_TX_ANX_ALL_EN | 1902 AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN; 1903 1904 if (port->rx_ts_enabled) 1905 ts_ctrl |= AM65_CPSW_TS_RX_ANX_ALL_EN | 1906 AM65_CPSW_PN_TS_CTL_RX_VLAN_LT1_EN; 1907 1908 writel(seq_id, port->port_base + AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG); 1909 writel(ts_vlan_ltype, port->port_base + 1910 AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG); 1911 writel(ts_ctrl_ltype2, port->port_base + 1912 AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2); 1913 writel(ts_ctrl, port->port_base + AM65_CPSW_PORTN_REG_TS_CTL); 1914 1915 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; 1916 } 1917 1918 static int am65_cpsw_nuss_hwtstamp_get(struct net_device *ndev, 1919 struct ifreq *ifr) 1920 { 1921 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 1922 struct hwtstamp_config cfg; 1923 1924 if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)) 1925 return -EOPNOTSUPP; 1926 1927 cfg.flags = 0; 1928 cfg.tx_type = port->tx_ts_enabled ? 1929 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; 1930 cfg.rx_filter = port->rx_ts_enabled ? 1931 HWTSTAMP_FILTER_PTP_V2_EVENT : HWTSTAMP_FILTER_NONE; 1932 1933 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; 1934 } 1935 1936 static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev, 1937 struct ifreq *req, int cmd) 1938 { 1939 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 1940 1941 if (!netif_running(ndev)) 1942 return -EINVAL; 1943 1944 switch (cmd) { 1945 case SIOCSHWTSTAMP: 1946 return am65_cpsw_nuss_hwtstamp_set(ndev, req); 1947 case SIOCGHWTSTAMP: 1948 return am65_cpsw_nuss_hwtstamp_get(ndev, req); 1949 } 1950 1951 return phylink_mii_ioctl(port->slave.phylink, req, cmd); 1952 } 1953 1954 static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev, 1955 struct rtnl_link_stats64 *stats) 1956 { 1957 dev_fetch_sw_netstats(stats, dev->tstats); 1958 1959 stats->rx_errors = dev->stats.rx_errors; 1960 stats->rx_dropped = dev->stats.rx_dropped; 1961 stats->tx_dropped = dev->stats.tx_dropped; 1962 } 1963 1964 static int am65_cpsw_xdp_prog_setup(struct net_device *ndev, 1965 struct bpf_prog *prog) 1966 { 1967 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 1968 bool running = netif_running(ndev); 1969 struct bpf_prog *old_prog; 1970 1971 if (running) 1972 am65_cpsw_nuss_ndo_slave_stop(ndev); 1973 1974 old_prog = xchg(&port->xdp_prog, prog); 1975 if (old_prog) 1976 bpf_prog_put(old_prog); 1977 1978 if (running) 1979 return am65_cpsw_nuss_ndo_slave_open(ndev); 1980 1981 return 0; 1982 } 1983 1984 static int am65_cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf) 1985 { 1986 switch (bpf->command) { 1987 case XDP_SETUP_PROG: 1988 return am65_cpsw_xdp_prog_setup(ndev, bpf->prog); 1989 default: 1990 return -EINVAL; 1991 } 1992 } 1993 1994 static int am65_cpsw_ndo_xdp_xmit(struct net_device *ndev, int n, 1995 struct xdp_frame **frames, u32 flags) 1996 { 1997 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 1998 struct am65_cpsw_tx_chn *tx_chn; 1999 struct netdev_queue *netif_txq; 2000 int cpu = smp_processor_id(); 2001 int i, nxmit = 0; 2002 2003 tx_chn = &common->tx_chns[cpu % common->tx_ch_num]; 2004 netif_txq = netdev_get_tx_queue(ndev, tx_chn->id); 2005 2006 __netif_tx_lock(netif_txq, cpu); 2007 for (i = 0; i < n; i++) { 2008 if (am65_cpsw_xdp_tx_frame(ndev, tx_chn, frames[i], 2009 AM65_CPSW_TX_BUF_TYPE_XDP_NDO)) 2010 break; 2011 nxmit++; 2012 } 2013 __netif_tx_unlock(netif_txq); 2014 2015 return nxmit; 2016 } 2017 2018 static const struct net_device_ops am65_cpsw_nuss_netdev_ops = { 2019 .ndo_open = am65_cpsw_nuss_ndo_slave_open, 2020 .ndo_stop = am65_cpsw_nuss_ndo_slave_stop, 2021 .ndo_start_xmit = am65_cpsw_nuss_ndo_slave_xmit, 2022 .ndo_set_rx_mode = am65_cpsw_nuss_ndo_slave_set_rx_mode, 2023 .ndo_get_stats64 = am65_cpsw_nuss_ndo_get_stats, 2024 .ndo_validate_addr = eth_validate_addr, 2025 .ndo_set_mac_address = am65_cpsw_nuss_ndo_slave_set_mac_address, 2026 .ndo_tx_timeout = am65_cpsw_nuss_ndo_host_tx_timeout, 2027 .ndo_vlan_rx_add_vid = am65_cpsw_nuss_ndo_slave_add_vid, 2028 .ndo_vlan_rx_kill_vid = am65_cpsw_nuss_ndo_slave_kill_vid, 2029 .ndo_eth_ioctl = am65_cpsw_nuss_ndo_slave_ioctl, 2030 .ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc, 2031 .ndo_set_tx_maxrate = am65_cpsw_qos_ndo_tx_p0_set_maxrate, 2032 .ndo_bpf = am65_cpsw_ndo_bpf, 2033 .ndo_xdp_xmit = am65_cpsw_ndo_xdp_xmit, 2034 }; 2035 2036 static void am65_cpsw_disable_phy(struct phy *phy) 2037 { 2038 phy_power_off(phy); 2039 phy_exit(phy); 2040 } 2041 2042 static int am65_cpsw_enable_phy(struct phy *phy) 2043 { 2044 int ret; 2045 2046 ret = phy_init(phy); 2047 if (ret < 0) 2048 return ret; 2049 2050 ret = phy_power_on(phy); 2051 if (ret < 0) { 2052 phy_exit(phy); 2053 return ret; 2054 } 2055 2056 return 0; 2057 } 2058 2059 static void am65_cpsw_disable_serdes_phy(struct am65_cpsw_common *common) 2060 { 2061 struct am65_cpsw_port *port; 2062 struct phy *phy; 2063 int i; 2064 2065 for (i = 0; i < common->port_num; i++) { 2066 port = &common->ports[i]; 2067 phy = port->slave.serdes_phy; 2068 if (phy) 2069 am65_cpsw_disable_phy(phy); 2070 } 2071 } 2072 2073 static int am65_cpsw_init_serdes_phy(struct device *dev, struct device_node *port_np, 2074 struct am65_cpsw_port *port) 2075 { 2076 const char *name = "serdes"; 2077 struct phy *phy; 2078 int ret; 2079 2080 phy = devm_of_phy_optional_get(dev, port_np, name); 2081 if (IS_ERR_OR_NULL(phy)) 2082 return PTR_ERR_OR_ZERO(phy); 2083 2084 /* Serdes PHY exists. Store it. */ 2085 port->slave.serdes_phy = phy; 2086 2087 ret = am65_cpsw_enable_phy(phy); 2088 if (ret < 0) 2089 goto err_phy; 2090 2091 return 0; 2092 2093 err_phy: 2094 devm_phy_put(dev, phy); 2095 return ret; 2096 } 2097 2098 static void am65_cpsw_nuss_mac_config(struct phylink_config *config, unsigned int mode, 2099 const struct phylink_link_state *state) 2100 { 2101 struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data, 2102 phylink_config); 2103 struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave); 2104 struct am65_cpsw_common *common = port->common; 2105 2106 if (common->pdata.extra_modes & BIT(state->interface)) { 2107 if (state->interface == PHY_INTERFACE_MODE_SGMII) { 2108 writel(ADVERTISE_SGMII, 2109 port->sgmii_base + AM65_CPSW_SGMII_MR_ADV_ABILITY_REG); 2110 cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_EXT_EN); 2111 } else { 2112 cpsw_sl_ctl_clr(port->slave.mac_sl, CPSW_SL_CTL_EXT_EN); 2113 } 2114 2115 if (state->interface == PHY_INTERFACE_MODE_USXGMII) { 2116 cpsw_sl_ctl_set(port->slave.mac_sl, 2117 CPSW_SL_CTL_XGIG | CPSW_SL_CTL_XGMII_EN); 2118 } else { 2119 cpsw_sl_ctl_clr(port->slave.mac_sl, 2120 CPSW_SL_CTL_XGIG | CPSW_SL_CTL_XGMII_EN); 2121 } 2122 2123 writel(AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE, 2124 port->sgmii_base + AM65_CPSW_SGMII_CONTROL_REG); 2125 } 2126 } 2127 2128 static void am65_cpsw_nuss_mac_link_down(struct phylink_config *config, unsigned int mode, 2129 phy_interface_t interface) 2130 { 2131 struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data, 2132 phylink_config); 2133 struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave); 2134 struct am65_cpsw_common *common = port->common; 2135 struct net_device *ndev = port->ndev; 2136 u32 mac_control; 2137 int tmo; 2138 2139 /* disable forwarding */ 2140 cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); 2141 2142 cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE); 2143 2144 tmo = cpsw_sl_wait_for_idle(port->slave.mac_sl, 100); 2145 dev_dbg(common->dev, "down msc_sl %08x tmo %d\n", 2146 cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_MACSTATUS), tmo); 2147 2148 /* All the bits that am65_cpsw_nuss_mac_link_up() can possibly set */ 2149 mac_control = CPSW_SL_CTL_GMII_EN | CPSW_SL_CTL_GIG | CPSW_SL_CTL_IFCTL_A | 2150 CPSW_SL_CTL_FULLDUPLEX | CPSW_SL_CTL_RX_FLOW_EN | CPSW_SL_CTL_TX_FLOW_EN; 2151 /* If interface mode is RGMII, CPSW_SL_CTL_EXT_EN might have been set for 10 Mbps */ 2152 if (phy_interface_mode_is_rgmii(interface)) 2153 mac_control |= CPSW_SL_CTL_EXT_EN; 2154 /* Only clear those bits that can be set by am65_cpsw_nuss_mac_link_up() */ 2155 cpsw_sl_ctl_clr(port->slave.mac_sl, mac_control); 2156 2157 am65_cpsw_qos_link_down(ndev); 2158 netif_tx_stop_all_queues(ndev); 2159 } 2160 2161 static void am65_cpsw_nuss_mac_link_up(struct phylink_config *config, struct phy_device *phy, 2162 unsigned int mode, phy_interface_t interface, int speed, 2163 int duplex, bool tx_pause, bool rx_pause) 2164 { 2165 struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data, 2166 phylink_config); 2167 struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave); 2168 struct am65_cpsw_common *common = port->common; 2169 u32 mac_control = CPSW_SL_CTL_GMII_EN; 2170 struct net_device *ndev = port->ndev; 2171 2172 /* Bring the port out of idle state */ 2173 cpsw_sl_ctl_clr(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE); 2174 2175 if (speed == SPEED_1000) 2176 mac_control |= CPSW_SL_CTL_GIG; 2177 /* TODO: Verify whether in-band is necessary for 10 Mbps RGMII */ 2178 if (speed == SPEED_10 && phy_interface_mode_is_rgmii(interface)) 2179 /* Can be used with in band mode only */ 2180 mac_control |= CPSW_SL_CTL_EXT_EN; 2181 if (speed == SPEED_100 && interface == PHY_INTERFACE_MODE_RMII) 2182 mac_control |= CPSW_SL_CTL_IFCTL_A; 2183 if (duplex) 2184 mac_control |= CPSW_SL_CTL_FULLDUPLEX; 2185 2186 /* rx_pause/tx_pause */ 2187 if (rx_pause) 2188 mac_control |= CPSW_SL_CTL_TX_FLOW_EN; 2189 2190 if (tx_pause) 2191 mac_control |= CPSW_SL_CTL_RX_FLOW_EN; 2192 2193 cpsw_sl_ctl_set(port->slave.mac_sl, mac_control); 2194 2195 /* enable forwarding */ 2196 cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); 2197 2198 am65_cpsw_qos_link_up(ndev, speed); 2199 netif_tx_wake_all_queues(ndev); 2200 } 2201 2202 static const struct phylink_mac_ops am65_cpsw_phylink_mac_ops = { 2203 .mac_config = am65_cpsw_nuss_mac_config, 2204 .mac_link_down = am65_cpsw_nuss_mac_link_down, 2205 .mac_link_up = am65_cpsw_nuss_mac_link_up, 2206 }; 2207 2208 static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port) 2209 { 2210 struct am65_cpsw_common *common = port->common; 2211 2212 if (!port->disabled) 2213 return; 2214 2215 cpsw_ale_control_set(common->ale, port->port_id, 2216 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); 2217 2218 cpsw_sl_reset(port->slave.mac_sl, 100); 2219 cpsw_sl_ctl_reset(port->slave.mac_sl); 2220 } 2221 2222 static void am65_cpsw_nuss_free_tx_chns(void *data) 2223 { 2224 struct am65_cpsw_common *common = data; 2225 int i; 2226 2227 for (i = 0; i < common->tx_ch_num; i++) { 2228 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; 2229 2230 if (!IS_ERR_OR_NULL(tx_chn->desc_pool)) 2231 k3_cppi_desc_pool_destroy(tx_chn->desc_pool); 2232 2233 if (!IS_ERR_OR_NULL(tx_chn->tx_chn)) 2234 k3_udma_glue_release_tx_chn(tx_chn->tx_chn); 2235 2236 memset(tx_chn, 0, sizeof(*tx_chn)); 2237 } 2238 } 2239 2240 static void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common) 2241 { 2242 struct device *dev = common->dev; 2243 int i; 2244 2245 devm_remove_action(dev, am65_cpsw_nuss_free_tx_chns, common); 2246 2247 common->tx_ch_rate_msk = 0; 2248 for (i = 0; i < common->tx_ch_num; i++) { 2249 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; 2250 2251 if (tx_chn->irq) 2252 devm_free_irq(dev, tx_chn->irq, tx_chn); 2253 2254 netif_napi_del(&tx_chn->napi_tx); 2255 } 2256 2257 am65_cpsw_nuss_free_tx_chns(common); 2258 } 2259 2260 static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common) 2261 { 2262 struct device *dev = common->dev; 2263 int i, ret = 0; 2264 2265 for (i = 0; i < common->tx_ch_num; i++) { 2266 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; 2267 2268 netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx, 2269 am65_cpsw_nuss_tx_poll); 2270 hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); 2271 tx_chn->tx_hrtimer.function = &am65_cpsw_nuss_tx_timer_callback; 2272 2273 ret = devm_request_irq(dev, tx_chn->irq, 2274 am65_cpsw_nuss_tx_irq, 2275 IRQF_TRIGGER_HIGH, 2276 tx_chn->tx_chn_name, tx_chn); 2277 if (ret) { 2278 dev_err(dev, "failure requesting tx%u irq %u, %d\n", 2279 tx_chn->id, tx_chn->irq, ret); 2280 goto err; 2281 } 2282 } 2283 2284 err: 2285 return ret; 2286 } 2287 2288 static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common) 2289 { 2290 u32 max_desc_num = ALIGN(AM65_CPSW_MAX_TX_DESC, MAX_SKB_FRAGS); 2291 struct k3_udma_glue_tx_channel_cfg tx_cfg = { 0 }; 2292 struct device *dev = common->dev; 2293 struct k3_ring_cfg ring_cfg = { 2294 .elm_size = K3_RINGACC_RING_ELSIZE_8, 2295 .mode = K3_RINGACC_RING_MODE_RING, 2296 .flags = 0 2297 }; 2298 u32 hdesc_size, hdesc_size_out; 2299 int i, ret = 0; 2300 2301 hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE, 2302 AM65_CPSW_NAV_SW_DATA_SIZE); 2303 2304 tx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE; 2305 tx_cfg.tx_cfg = ring_cfg; 2306 tx_cfg.txcq_cfg = ring_cfg; 2307 tx_cfg.tx_cfg.size = max_desc_num; 2308 tx_cfg.txcq_cfg.size = max_desc_num; 2309 2310 for (i = 0; i < common->tx_ch_num; i++) { 2311 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; 2312 2313 snprintf(tx_chn->tx_chn_name, 2314 sizeof(tx_chn->tx_chn_name), "tx%d", i); 2315 2316 spin_lock_init(&tx_chn->lock); 2317 tx_chn->common = common; 2318 tx_chn->id = i; 2319 tx_chn->descs_num = max_desc_num; 2320 2321 tx_chn->tx_chn = 2322 k3_udma_glue_request_tx_chn(dev, 2323 tx_chn->tx_chn_name, 2324 &tx_cfg); 2325 if (IS_ERR(tx_chn->tx_chn)) { 2326 ret = dev_err_probe(dev, PTR_ERR(tx_chn->tx_chn), 2327 "Failed to request tx dma channel\n"); 2328 goto err; 2329 } 2330 tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn); 2331 2332 tx_chn->desc_pool = k3_cppi_desc_pool_create_name(tx_chn->dma_dev, 2333 tx_chn->descs_num, 2334 hdesc_size, 2335 tx_chn->tx_chn_name); 2336 if (IS_ERR(tx_chn->desc_pool)) { 2337 ret = PTR_ERR(tx_chn->desc_pool); 2338 dev_err(dev, "Failed to create poll %d\n", ret); 2339 goto err; 2340 } 2341 2342 hdesc_size_out = k3_cppi_desc_pool_desc_size(tx_chn->desc_pool); 2343 tx_chn->dsize_log2 = __fls(hdesc_size_out); 2344 WARN_ON(hdesc_size_out != (1 << tx_chn->dsize_log2)); 2345 2346 tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn); 2347 if (tx_chn->irq < 0) { 2348 dev_err(dev, "Failed to get tx dma irq %d\n", 2349 tx_chn->irq); 2350 ret = tx_chn->irq; 2351 goto err; 2352 } 2353 2354 snprintf(tx_chn->tx_chn_name, 2355 sizeof(tx_chn->tx_chn_name), "%s-tx%d", 2356 dev_name(dev), tx_chn->id); 2357 } 2358 2359 ret = am65_cpsw_nuss_ndev_add_tx_napi(common); 2360 if (ret) { 2361 dev_err(dev, "Failed to add tx NAPI %d\n", ret); 2362 goto err; 2363 } 2364 2365 err: 2366 i = devm_add_action(dev, am65_cpsw_nuss_free_tx_chns, common); 2367 if (i) { 2368 dev_err(dev, "Failed to add free_tx_chns action %d\n", i); 2369 return i; 2370 } 2371 2372 return ret; 2373 } 2374 2375 static void am65_cpsw_nuss_free_rx_chns(void *data) 2376 { 2377 struct am65_cpsw_common *common = data; 2378 struct am65_cpsw_rx_chn *rx_chn; 2379 2380 rx_chn = &common->rx_chns; 2381 2382 if (!IS_ERR_OR_NULL(rx_chn->desc_pool)) 2383 k3_cppi_desc_pool_destroy(rx_chn->desc_pool); 2384 2385 if (!IS_ERR_OR_NULL(rx_chn->rx_chn)) 2386 k3_udma_glue_release_rx_chn(rx_chn->rx_chn); 2387 } 2388 2389 static void am65_cpsw_nuss_remove_rx_chns(struct am65_cpsw_common *common) 2390 { 2391 struct device *dev = common->dev; 2392 struct am65_cpsw_rx_chn *rx_chn; 2393 struct am65_cpsw_rx_flow *flows; 2394 int i; 2395 2396 rx_chn = &common->rx_chns; 2397 flows = rx_chn->flows; 2398 devm_remove_action(dev, am65_cpsw_nuss_free_rx_chns, common); 2399 2400 for (i = 0; i < common->rx_ch_num_flows; i++) { 2401 if (!(flows[i].irq < 0)) 2402 devm_free_irq(dev, flows[i].irq, &flows[i]); 2403 netif_napi_del(&flows[i].napi_rx); 2404 } 2405 2406 am65_cpsw_nuss_free_rx_chns(common); 2407 2408 common->rx_flow_id_base = -1; 2409 } 2410 2411 static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) 2412 { 2413 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; 2414 struct k3_udma_glue_rx_channel_cfg rx_cfg = { 0 }; 2415 u32 max_desc_num = AM65_CPSW_MAX_RX_DESC; 2416 struct device *dev = common->dev; 2417 struct am65_cpsw_rx_flow *flow; 2418 u32 hdesc_size, hdesc_size_out; 2419 u32 fdqring_id; 2420 int i, ret = 0; 2421 2422 hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE, 2423 AM65_CPSW_NAV_SW_DATA_SIZE); 2424 2425 rx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE; 2426 rx_cfg.flow_id_num = common->rx_ch_num_flows; 2427 rx_cfg.flow_id_base = common->rx_flow_id_base; 2428 2429 /* init all flows */ 2430 rx_chn->dev = dev; 2431 rx_chn->descs_num = max_desc_num * rx_cfg.flow_id_num; 2432 2433 for (i = 0; i < common->rx_ch_num_flows; i++) { 2434 flow = &rx_chn->flows[i]; 2435 flow->page_pool = NULL; 2436 } 2437 2438 rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg); 2439 if (IS_ERR(rx_chn->rx_chn)) { 2440 ret = dev_err_probe(dev, PTR_ERR(rx_chn->rx_chn), 2441 "Failed to request rx dma channel\n"); 2442 goto err; 2443 } 2444 rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn); 2445 2446 rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev, 2447 rx_chn->descs_num, 2448 hdesc_size, "rx"); 2449 if (IS_ERR(rx_chn->desc_pool)) { 2450 ret = PTR_ERR(rx_chn->desc_pool); 2451 dev_err(dev, "Failed to create rx poll %d\n", ret); 2452 goto err; 2453 } 2454 2455 hdesc_size_out = k3_cppi_desc_pool_desc_size(rx_chn->desc_pool); 2456 rx_chn->dsize_log2 = __fls(hdesc_size_out); 2457 WARN_ON(hdesc_size_out != (1 << rx_chn->dsize_log2)); 2458 2459 common->rx_flow_id_base = 2460 k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn); 2461 dev_info(dev, "set new flow-id-base %u\n", common->rx_flow_id_base); 2462 2463 fdqring_id = K3_RINGACC_RING_ID_ANY; 2464 for (i = 0; i < rx_cfg.flow_id_num; i++) { 2465 struct k3_ring_cfg rxring_cfg = { 2466 .elm_size = K3_RINGACC_RING_ELSIZE_8, 2467 .mode = K3_RINGACC_RING_MODE_RING, 2468 .flags = 0, 2469 }; 2470 struct k3_ring_cfg fdqring_cfg = { 2471 .elm_size = K3_RINGACC_RING_ELSIZE_8, 2472 .flags = K3_RINGACC_RING_SHARED, 2473 }; 2474 struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = { 2475 .rx_cfg = rxring_cfg, 2476 .rxfdq_cfg = fdqring_cfg, 2477 .ring_rxq_id = K3_RINGACC_RING_ID_ANY, 2478 .src_tag_lo_sel = 2479 K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG, 2480 }; 2481 2482 flow = &rx_chn->flows[i]; 2483 flow->id = i; 2484 flow->common = common; 2485 flow->irq = -EINVAL; 2486 2487 rx_flow_cfg.ring_rxfdq0_id = fdqring_id; 2488 rx_flow_cfg.rx_cfg.size = max_desc_num; 2489 /* share same FDQ for all flows */ 2490 rx_flow_cfg.rxfdq_cfg.size = max_desc_num * rx_cfg.flow_id_num; 2491 rx_flow_cfg.rxfdq_cfg.mode = common->pdata.fdqring_mode; 2492 2493 ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn, 2494 i, &rx_flow_cfg); 2495 if (ret) { 2496 dev_err(dev, "Failed to init rx flow%d %d\n", i, ret); 2497 goto err; 2498 } 2499 if (!i) 2500 fdqring_id = 2501 k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn, 2502 i); 2503 2504 flow->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i); 2505 if (flow->irq <= 0) { 2506 dev_err(dev, "Failed to get rx dma irq %d\n", 2507 flow->irq); 2508 ret = flow->irq; 2509 goto err; 2510 } 2511 2512 snprintf(flow->name, 2513 sizeof(flow->name), "%s-rx%d", 2514 dev_name(dev), i); 2515 netif_napi_add(common->dma_ndev, &flow->napi_rx, 2516 am65_cpsw_nuss_rx_poll); 2517 hrtimer_init(&flow->rx_hrtimer, CLOCK_MONOTONIC, 2518 HRTIMER_MODE_REL_PINNED); 2519 flow->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback; 2520 2521 ret = devm_request_irq(dev, flow->irq, 2522 am65_cpsw_nuss_rx_irq, 2523 IRQF_TRIGGER_HIGH, 2524 flow->name, flow); 2525 if (ret) { 2526 dev_err(dev, "failure requesting rx %d irq %u, %d\n", 2527 i, flow->irq, ret); 2528 flow->irq = -EINVAL; 2529 goto err; 2530 } 2531 } 2532 2533 /* setup classifier to route priorities to flows */ 2534 cpsw_ale_classifier_setup_default(common->ale, common->rx_ch_num_flows); 2535 2536 err: 2537 i = devm_add_action(dev, am65_cpsw_nuss_free_rx_chns, common); 2538 if (i) { 2539 dev_err(dev, "Failed to add free_rx_chns action %d\n", i); 2540 return i; 2541 } 2542 2543 return ret; 2544 } 2545 2546 static int am65_cpsw_nuss_init_host_p(struct am65_cpsw_common *common) 2547 { 2548 struct am65_cpsw_host *host_p = am65_common_get_host(common); 2549 2550 host_p->common = common; 2551 host_p->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE; 2552 host_p->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE; 2553 2554 return 0; 2555 } 2556 2557 static int am65_cpsw_am654_get_efuse_macid(struct device_node *of_node, 2558 int slave, u8 *mac_addr) 2559 { 2560 u32 mac_lo, mac_hi, offset; 2561 struct regmap *syscon; 2562 int ret; 2563 2564 syscon = syscon_regmap_lookup_by_phandle(of_node, "ti,syscon-efuse"); 2565 if (IS_ERR(syscon)) { 2566 if (PTR_ERR(syscon) == -ENODEV) 2567 return 0; 2568 return PTR_ERR(syscon); 2569 } 2570 2571 ret = of_property_read_u32_index(of_node, "ti,syscon-efuse", 1, 2572 &offset); 2573 if (ret) 2574 return ret; 2575 2576 regmap_read(syscon, offset, &mac_lo); 2577 regmap_read(syscon, offset + 4, &mac_hi); 2578 2579 mac_addr[0] = (mac_hi >> 8) & 0xff; 2580 mac_addr[1] = mac_hi & 0xff; 2581 mac_addr[2] = (mac_lo >> 24) & 0xff; 2582 mac_addr[3] = (mac_lo >> 16) & 0xff; 2583 mac_addr[4] = (mac_lo >> 8) & 0xff; 2584 mac_addr[5] = mac_lo & 0xff; 2585 2586 return 0; 2587 } 2588 2589 static int am65_cpsw_init_cpts(struct am65_cpsw_common *common) 2590 { 2591 struct device *dev = common->dev; 2592 struct device_node *node; 2593 struct am65_cpts *cpts; 2594 void __iomem *reg_base; 2595 2596 if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)) 2597 return 0; 2598 2599 node = of_get_child_by_name(dev->of_node, "cpts"); 2600 if (!node) { 2601 dev_err(dev, "%s cpts not found\n", __func__); 2602 return -ENOENT; 2603 } 2604 2605 reg_base = common->cpsw_base + AM65_CPSW_NU_CPTS_BASE; 2606 cpts = am65_cpts_create(dev, reg_base, node); 2607 if (IS_ERR(cpts)) { 2608 int ret = PTR_ERR(cpts); 2609 2610 of_node_put(node); 2611 dev_err(dev, "cpts create err %d\n", ret); 2612 return ret; 2613 } 2614 common->cpts = cpts; 2615 /* Forbid PM runtime if CPTS is running. 2616 * K3 CPSWxG modules may completely lose context during ON->OFF 2617 * transitions depending on integration. 2618 * AM65x/J721E MCU CPSW2G: false 2619 * J721E MAIN_CPSW9G: true 2620 */ 2621 pm_runtime_forbid(dev); 2622 2623 return 0; 2624 } 2625 2626 static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) 2627 { 2628 struct device_node *node, *port_np; 2629 struct device *dev = common->dev; 2630 int ret; 2631 2632 node = of_get_child_by_name(dev->of_node, "ethernet-ports"); 2633 if (!node) 2634 return -ENOENT; 2635 2636 for_each_child_of_node(node, port_np) { 2637 struct am65_cpsw_port *port; 2638 u32 port_id; 2639 2640 /* it is not a slave port node, continue */ 2641 if (strcmp(port_np->name, "port")) 2642 continue; 2643 2644 ret = of_property_read_u32(port_np, "reg", &port_id); 2645 if (ret < 0) { 2646 dev_err(dev, "%pOF error reading port_id %d\n", 2647 port_np, ret); 2648 goto of_node_put; 2649 } 2650 2651 if (!port_id || port_id > common->port_num) { 2652 dev_err(dev, "%pOF has invalid port_id %u %s\n", 2653 port_np, port_id, port_np->name); 2654 ret = -EINVAL; 2655 goto of_node_put; 2656 } 2657 2658 port = am65_common_get_port(common, port_id); 2659 port->port_id = port_id; 2660 port->common = common; 2661 port->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE + 2662 AM65_CPSW_NU_PORTS_OFFSET * (port_id); 2663 if (common->pdata.extra_modes) 2664 port->sgmii_base = common->ss_base + AM65_CPSW_SGMII_BASE * (port_id); 2665 port->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE + 2666 (AM65_CPSW_NU_STATS_PORT_OFFSET * port_id); 2667 port->name = of_get_property(port_np, "label", NULL); 2668 port->fetch_ram_base = 2669 common->cpsw_base + AM65_CPSW_NU_FRAM_BASE + 2670 (AM65_CPSW_NU_FRAM_PORT_OFFSET * (port_id - 1)); 2671 2672 port->slave.mac_sl = cpsw_sl_get("am65", dev, port->port_base); 2673 if (IS_ERR(port->slave.mac_sl)) { 2674 ret = PTR_ERR(port->slave.mac_sl); 2675 goto of_node_put; 2676 } 2677 2678 port->disabled = !of_device_is_available(port_np); 2679 if (port->disabled) { 2680 common->disabled_ports_mask |= BIT(port->port_id); 2681 continue; 2682 } 2683 2684 port->slave.ifphy = devm_of_phy_get(dev, port_np, NULL); 2685 if (IS_ERR(port->slave.ifphy)) { 2686 ret = PTR_ERR(port->slave.ifphy); 2687 dev_err(dev, "%pOF error retrieving port phy: %d\n", 2688 port_np, ret); 2689 goto of_node_put; 2690 } 2691 2692 /* Initialize the Serdes PHY for the port */ 2693 ret = am65_cpsw_init_serdes_phy(dev, port_np, port); 2694 if (ret) 2695 goto of_node_put; 2696 2697 port->slave.mac_only = 2698 of_property_read_bool(port_np, "ti,mac-only"); 2699 2700 /* get phy/link info */ 2701 port->slave.port_np = port_np; 2702 ret = of_get_phy_mode(port_np, &port->slave.phy_if); 2703 if (ret) { 2704 dev_err(dev, "%pOF read phy-mode err %d\n", 2705 port_np, ret); 2706 goto of_node_put; 2707 } 2708 2709 ret = phy_set_mode_ext(port->slave.ifphy, PHY_MODE_ETHERNET, port->slave.phy_if); 2710 if (ret) 2711 goto of_node_put; 2712 2713 ret = of_get_mac_address(port_np, port->slave.mac_addr); 2714 if (ret) { 2715 am65_cpsw_am654_get_efuse_macid(port_np, 2716 port->port_id, 2717 port->slave.mac_addr); 2718 if (!is_valid_ether_addr(port->slave.mac_addr)) { 2719 eth_random_addr(port->slave.mac_addr); 2720 dev_err(dev, "Use random MAC address\n"); 2721 } 2722 } 2723 2724 /* Reset all Queue priorities to 0 */ 2725 writel(0, port->port_base + AM65_CPSW_PN_REG_TX_PRI_MAP); 2726 } 2727 of_node_put(node); 2728 2729 /* is there at least one ext.port */ 2730 if (!(~common->disabled_ports_mask & GENMASK(common->port_num, 1))) { 2731 dev_err(dev, "No Ext. port are available\n"); 2732 return -ENODEV; 2733 } 2734 2735 return 0; 2736 2737 of_node_put: 2738 of_node_put(port_np); 2739 of_node_put(node); 2740 return ret; 2741 } 2742 2743 static void am65_cpsw_nuss_phylink_cleanup(struct am65_cpsw_common *common) 2744 { 2745 struct am65_cpsw_port *port; 2746 int i; 2747 2748 for (i = 0; i < common->port_num; i++) { 2749 port = &common->ports[i]; 2750 if (port->slave.phylink) 2751 phylink_destroy(port->slave.phylink); 2752 } 2753 } 2754 2755 static int 2756 am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx) 2757 { 2758 struct am65_cpsw_ndev_priv *ndev_priv; 2759 struct device *dev = common->dev; 2760 struct am65_cpsw_port *port; 2761 struct phylink *phylink; 2762 2763 port = &common->ports[port_idx]; 2764 2765 if (port->disabled) 2766 return 0; 2767 2768 /* alloc netdev */ 2769 port->ndev = alloc_etherdev_mqs(sizeof(struct am65_cpsw_ndev_priv), 2770 AM65_CPSW_MAX_QUEUES, 2771 AM65_CPSW_MAX_QUEUES); 2772 if (!port->ndev) { 2773 dev_err(dev, "error allocating slave net_device %u\n", 2774 port->port_id); 2775 return -ENOMEM; 2776 } 2777 2778 ndev_priv = netdev_priv(port->ndev); 2779 ndev_priv->port = port; 2780 ndev_priv->msg_enable = AM65_CPSW_DEBUG; 2781 mutex_init(&ndev_priv->mm_lock); 2782 port->qos.link_speed = SPEED_UNKNOWN; 2783 SET_NETDEV_DEV(port->ndev, dev); 2784 port->ndev->dev.of_node = port->slave.port_np; 2785 2786 eth_hw_addr_set(port->ndev, port->slave.mac_addr); 2787 2788 port->ndev->min_mtu = AM65_CPSW_MIN_PACKET_SIZE; 2789 port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE - 2790 (VLAN_ETH_HLEN + ETH_FCS_LEN); 2791 port->ndev->hw_features = NETIF_F_SG | 2792 NETIF_F_RXCSUM | 2793 NETIF_F_HW_CSUM | 2794 NETIF_F_HW_TC; 2795 port->ndev->features = port->ndev->hw_features | 2796 NETIF_F_HW_VLAN_CTAG_FILTER; 2797 port->ndev->xdp_features = NETDEV_XDP_ACT_BASIC | 2798 NETDEV_XDP_ACT_REDIRECT | 2799 NETDEV_XDP_ACT_NDO_XMIT; 2800 port->ndev->vlan_features |= NETIF_F_SG; 2801 port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops; 2802 port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave; 2803 2804 /* Configuring Phylink */ 2805 port->slave.phylink_config.dev = &port->ndev->dev; 2806 port->slave.phylink_config.type = PHYLINK_NETDEV; 2807 port->slave.phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | 2808 MAC_1000FD | MAC_5000FD; 2809 port->slave.phylink_config.mac_managed_pm = true; /* MAC does PM */ 2810 2811 switch (port->slave.phy_if) { 2812 case PHY_INTERFACE_MODE_RGMII: 2813 case PHY_INTERFACE_MODE_RGMII_ID: 2814 case PHY_INTERFACE_MODE_RGMII_RXID: 2815 case PHY_INTERFACE_MODE_RGMII_TXID: 2816 phy_interface_set_rgmii(port->slave.phylink_config.supported_interfaces); 2817 break; 2818 2819 case PHY_INTERFACE_MODE_RMII: 2820 __set_bit(PHY_INTERFACE_MODE_RMII, 2821 port->slave.phylink_config.supported_interfaces); 2822 break; 2823 2824 case PHY_INTERFACE_MODE_QSGMII: 2825 case PHY_INTERFACE_MODE_SGMII: 2826 case PHY_INTERFACE_MODE_USXGMII: 2827 if (common->pdata.extra_modes & BIT(port->slave.phy_if)) { 2828 __set_bit(port->slave.phy_if, 2829 port->slave.phylink_config.supported_interfaces); 2830 } else { 2831 dev_err(dev, "selected phy-mode is not supported\n"); 2832 return -EOPNOTSUPP; 2833 } 2834 break; 2835 2836 default: 2837 dev_err(dev, "selected phy-mode is not supported\n"); 2838 return -EOPNOTSUPP; 2839 } 2840 2841 phylink = phylink_create(&port->slave.phylink_config, 2842 of_fwnode_handle(port->slave.port_np), 2843 port->slave.phy_if, 2844 &am65_cpsw_phylink_mac_ops); 2845 if (IS_ERR(phylink)) 2846 return PTR_ERR(phylink); 2847 2848 port->slave.phylink = phylink; 2849 2850 /* Disable TX checksum offload by default due to HW bug */ 2851 if (common->pdata.quirks & AM65_CPSW_QUIRK_I2027_NO_TX_CSUM) 2852 port->ndev->features &= ~NETIF_F_HW_CSUM; 2853 2854 port->ndev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; 2855 port->xdp_prog = NULL; 2856 2857 if (!common->dma_ndev) 2858 common->dma_ndev = port->ndev; 2859 2860 return 0; 2861 } 2862 2863 static int am65_cpsw_nuss_init_ndevs(struct am65_cpsw_common *common) 2864 { 2865 int ret; 2866 int i; 2867 2868 for (i = 0; i < common->port_num; i++) { 2869 ret = am65_cpsw_nuss_init_port_ndev(common, i); 2870 if (ret) 2871 return ret; 2872 } 2873 2874 return ret; 2875 } 2876 2877 static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common) 2878 { 2879 struct am65_cpsw_port *port; 2880 int i; 2881 2882 for (i = 0; i < common->port_num; i++) { 2883 port = &common->ports[i]; 2884 if (!port->ndev) 2885 continue; 2886 if (port->ndev->reg_state == NETREG_REGISTERED) 2887 unregister_netdev(port->ndev); 2888 free_netdev(port->ndev); 2889 port->ndev = NULL; 2890 } 2891 } 2892 2893 static void am65_cpsw_port_offload_fwd_mark_update(struct am65_cpsw_common *common) 2894 { 2895 int set_val = 0; 2896 int i; 2897 2898 if (common->br_members == (GENMASK(common->port_num, 1) & ~common->disabled_ports_mask)) 2899 set_val = 1; 2900 2901 dev_dbg(common->dev, "set offload_fwd_mark %d\n", set_val); 2902 2903 for (i = 1; i <= common->port_num; i++) { 2904 struct am65_cpsw_port *port = am65_common_get_port(common, i); 2905 struct am65_cpsw_ndev_priv *priv; 2906 2907 if (!port->ndev) 2908 continue; 2909 2910 priv = am65_ndev_to_priv(port->ndev); 2911 priv->offload_fwd_mark = set_val; 2912 } 2913 } 2914 2915 bool am65_cpsw_port_dev_check(const struct net_device *ndev) 2916 { 2917 if (ndev->netdev_ops == &am65_cpsw_nuss_netdev_ops) { 2918 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 2919 2920 return !common->is_emac_mode; 2921 } 2922 2923 return false; 2924 } 2925 2926 static int am65_cpsw_netdevice_port_link(struct net_device *ndev, 2927 struct net_device *br_ndev, 2928 struct netlink_ext_ack *extack) 2929 { 2930 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 2931 struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev); 2932 int err; 2933 2934 if (!common->br_members) { 2935 common->hw_bridge_dev = br_ndev; 2936 } else { 2937 /* This is adding the port to a second bridge, this is 2938 * unsupported 2939 */ 2940 if (common->hw_bridge_dev != br_ndev) 2941 return -EOPNOTSUPP; 2942 } 2943 2944 err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL, 2945 false, extack); 2946 if (err) 2947 return err; 2948 2949 common->br_members |= BIT(priv->port->port_id); 2950 2951 am65_cpsw_port_offload_fwd_mark_update(common); 2952 2953 return NOTIFY_DONE; 2954 } 2955 2956 static void am65_cpsw_netdevice_port_unlink(struct net_device *ndev) 2957 { 2958 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 2959 struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev); 2960 2961 switchdev_bridge_port_unoffload(ndev, NULL, NULL, NULL); 2962 2963 common->br_members &= ~BIT(priv->port->port_id); 2964 2965 am65_cpsw_port_offload_fwd_mark_update(common); 2966 2967 if (!common->br_members) 2968 common->hw_bridge_dev = NULL; 2969 } 2970 2971 /* netdev notifier */ 2972 static int am65_cpsw_netdevice_event(struct notifier_block *unused, 2973 unsigned long event, void *ptr) 2974 { 2975 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr); 2976 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 2977 struct netdev_notifier_changeupper_info *info; 2978 int ret = NOTIFY_DONE; 2979 2980 if (!am65_cpsw_port_dev_check(ndev)) 2981 return NOTIFY_DONE; 2982 2983 switch (event) { 2984 case NETDEV_CHANGEUPPER: 2985 info = ptr; 2986 2987 if (netif_is_bridge_master(info->upper_dev)) { 2988 if (info->linking) 2989 ret = am65_cpsw_netdevice_port_link(ndev, 2990 info->upper_dev, 2991 extack); 2992 else 2993 am65_cpsw_netdevice_port_unlink(ndev); 2994 } 2995 break; 2996 default: 2997 return NOTIFY_DONE; 2998 } 2999 3000 return notifier_from_errno(ret); 3001 } 3002 3003 static int am65_cpsw_register_notifiers(struct am65_cpsw_common *cpsw) 3004 { 3005 int ret = 0; 3006 3007 if (AM65_CPSW_IS_CPSW2G(cpsw) || 3008 !IS_REACHABLE(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) 3009 return 0; 3010 3011 cpsw->am65_cpsw_netdevice_nb.notifier_call = &am65_cpsw_netdevice_event; 3012 ret = register_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb); 3013 if (ret) { 3014 dev_err(cpsw->dev, "can't register netdevice notifier\n"); 3015 return ret; 3016 } 3017 3018 ret = am65_cpsw_switchdev_register_notifiers(cpsw); 3019 if (ret) 3020 unregister_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb); 3021 3022 return ret; 3023 } 3024 3025 static void am65_cpsw_unregister_notifiers(struct am65_cpsw_common *cpsw) 3026 { 3027 if (AM65_CPSW_IS_CPSW2G(cpsw) || 3028 !IS_REACHABLE(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) 3029 return; 3030 3031 am65_cpsw_switchdev_unregister_notifiers(cpsw); 3032 unregister_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb); 3033 } 3034 3035 static const struct devlink_ops am65_cpsw_devlink_ops = {}; 3036 3037 static void am65_cpsw_init_stp_ale_entry(struct am65_cpsw_common *cpsw) 3038 { 3039 cpsw_ale_add_mcast(cpsw->ale, eth_stp_addr, ALE_PORT_HOST, ALE_SUPER, 0, 3040 ALE_MCAST_BLOCK_LEARN_FWD); 3041 } 3042 3043 static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common) 3044 { 3045 struct am65_cpsw_host *host = am65_common_get_host(common); 3046 3047 writel(common->default_vlan, host->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 3048 3049 am65_cpsw_init_stp_ale_entry(common); 3050 3051 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 1); 3052 dev_dbg(common->dev, "Set P0_UNI_FLOOD\n"); 3053 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 0); 3054 } 3055 3056 static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common) 3057 { 3058 struct am65_cpsw_host *host = am65_common_get_host(common); 3059 3060 writel(0, host->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 3061 3062 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 0); 3063 dev_dbg(common->dev, "unset P0_UNI_FLOOD\n"); 3064 3065 /* learning make no sense in multi-mac mode */ 3066 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 1); 3067 } 3068 3069 static int am65_cpsw_dl_switch_mode_get(struct devlink *dl, u32 id, 3070 struct devlink_param_gset_ctx *ctx) 3071 { 3072 struct am65_cpsw_devlink *dl_priv = devlink_priv(dl); 3073 struct am65_cpsw_common *common = dl_priv->common; 3074 3075 dev_dbg(common->dev, "%s id:%u\n", __func__, id); 3076 3077 if (id != AM65_CPSW_DL_PARAM_SWITCH_MODE) 3078 return -EOPNOTSUPP; 3079 3080 ctx->val.vbool = !common->is_emac_mode; 3081 3082 return 0; 3083 } 3084 3085 static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port) 3086 { 3087 struct am65_cpsw_slave_data *slave = &port->slave; 3088 struct am65_cpsw_common *common = port->common; 3089 u32 port_mask; 3090 3091 writel(slave->port_vlan, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 3092 3093 if (slave->mac_only) 3094 /* enable mac-only mode on port */ 3095 cpsw_ale_control_set(common->ale, port->port_id, 3096 ALE_PORT_MACONLY, 1); 3097 3098 cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_NOLEARN, 1); 3099 3100 port_mask = BIT(port->port_id) | ALE_PORT_HOST; 3101 3102 cpsw_ale_add_ucast(common->ale, port->ndev->dev_addr, 3103 HOST_PORT_NUM, ALE_SECURE, slave->port_vlan); 3104 cpsw_ale_add_mcast(common->ale, port->ndev->broadcast, 3105 port_mask, ALE_VLAN, slave->port_vlan, ALE_MCAST_FWD_2); 3106 } 3107 3108 static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port) 3109 { 3110 struct am65_cpsw_slave_data *slave = &port->slave; 3111 struct am65_cpsw_common *cpsw = port->common; 3112 u32 port_mask; 3113 3114 cpsw_ale_control_set(cpsw->ale, port->port_id, 3115 ALE_PORT_NOLEARN, 0); 3116 3117 cpsw_ale_add_ucast(cpsw->ale, port->ndev->dev_addr, 3118 HOST_PORT_NUM, ALE_SECURE | ALE_BLOCKED | ALE_VLAN, 3119 slave->port_vlan); 3120 3121 port_mask = BIT(port->port_id) | ALE_PORT_HOST; 3122 3123 cpsw_ale_add_mcast(cpsw->ale, port->ndev->broadcast, 3124 port_mask, ALE_VLAN, slave->port_vlan, 3125 ALE_MCAST_FWD_2); 3126 3127 writel(slave->port_vlan, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 3128 3129 cpsw_ale_control_set(cpsw->ale, port->port_id, 3130 ALE_PORT_MACONLY, 0); 3131 } 3132 3133 static int am65_cpsw_dl_switch_mode_set(struct devlink *dl, u32 id, 3134 struct devlink_param_gset_ctx *ctx, 3135 struct netlink_ext_ack *extack) 3136 { 3137 struct am65_cpsw_devlink *dl_priv = devlink_priv(dl); 3138 struct am65_cpsw_common *cpsw = dl_priv->common; 3139 bool switch_en = ctx->val.vbool; 3140 bool if_running = false; 3141 int i; 3142 3143 dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id); 3144 3145 if (id != AM65_CPSW_DL_PARAM_SWITCH_MODE) 3146 return -EOPNOTSUPP; 3147 3148 if (switch_en == !cpsw->is_emac_mode) 3149 return 0; 3150 3151 if (!switch_en && cpsw->br_members) { 3152 dev_err(cpsw->dev, "Remove ports from bridge before disabling switch mode\n"); 3153 return -EINVAL; 3154 } 3155 3156 rtnl_lock(); 3157 3158 cpsw->is_emac_mode = !switch_en; 3159 3160 for (i = 0; i < cpsw->port_num; i++) { 3161 struct net_device *sl_ndev = cpsw->ports[i].ndev; 3162 3163 if (!sl_ndev || !netif_running(sl_ndev)) 3164 continue; 3165 3166 if_running = true; 3167 } 3168 3169 if (!if_running) { 3170 /* all ndevs are down */ 3171 for (i = 0; i < cpsw->port_num; i++) { 3172 struct net_device *sl_ndev = cpsw->ports[i].ndev; 3173 struct am65_cpsw_slave_data *slave; 3174 3175 if (!sl_ndev) 3176 continue; 3177 3178 slave = am65_ndev_to_slave(sl_ndev); 3179 if (switch_en) 3180 slave->port_vlan = cpsw->default_vlan; 3181 else 3182 slave->port_vlan = 0; 3183 } 3184 3185 goto exit; 3186 } 3187 3188 cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1); 3189 /* clean up ALE table */ 3190 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_CLEAR, 1); 3191 cpsw_ale_control_get(cpsw->ale, HOST_PORT_NUM, ALE_AGEOUT); 3192 3193 if (switch_en) { 3194 dev_info(cpsw->dev, "Enable switch mode\n"); 3195 3196 am65_cpsw_init_host_port_switch(cpsw); 3197 3198 for (i = 0; i < cpsw->port_num; i++) { 3199 struct net_device *sl_ndev = cpsw->ports[i].ndev; 3200 struct am65_cpsw_slave_data *slave; 3201 struct am65_cpsw_port *port; 3202 3203 if (!sl_ndev) 3204 continue; 3205 3206 port = am65_ndev_to_port(sl_ndev); 3207 slave = am65_ndev_to_slave(sl_ndev); 3208 slave->port_vlan = cpsw->default_vlan; 3209 3210 if (netif_running(sl_ndev)) 3211 am65_cpsw_init_port_switch_ale(port); 3212 } 3213 3214 } else { 3215 dev_info(cpsw->dev, "Disable switch mode\n"); 3216 3217 am65_cpsw_init_host_port_emac(cpsw); 3218 3219 for (i = 0; i < cpsw->port_num; i++) { 3220 struct net_device *sl_ndev = cpsw->ports[i].ndev; 3221 struct am65_cpsw_port *port; 3222 3223 if (!sl_ndev) 3224 continue; 3225 3226 port = am65_ndev_to_port(sl_ndev); 3227 port->slave.port_vlan = 0; 3228 if (netif_running(sl_ndev)) 3229 am65_cpsw_init_port_emac_ale(port); 3230 } 3231 } 3232 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_BYPASS, 0); 3233 exit: 3234 rtnl_unlock(); 3235 3236 return 0; 3237 } 3238 3239 static const struct devlink_param am65_cpsw_devlink_params[] = { 3240 DEVLINK_PARAM_DRIVER(AM65_CPSW_DL_PARAM_SWITCH_MODE, "switch_mode", 3241 DEVLINK_PARAM_TYPE_BOOL, 3242 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 3243 am65_cpsw_dl_switch_mode_get, 3244 am65_cpsw_dl_switch_mode_set, NULL), 3245 }; 3246 3247 static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common) 3248 { 3249 struct devlink_port_attrs attrs = {}; 3250 struct am65_cpsw_devlink *dl_priv; 3251 struct device *dev = common->dev; 3252 struct devlink_port *dl_port; 3253 struct am65_cpsw_port *port; 3254 int ret = 0; 3255 int i; 3256 3257 common->devlink = 3258 devlink_alloc(&am65_cpsw_devlink_ops, sizeof(*dl_priv), dev); 3259 if (!common->devlink) 3260 return -ENOMEM; 3261 3262 dl_priv = devlink_priv(common->devlink); 3263 dl_priv->common = common; 3264 3265 /* Provide devlink hook to switch mode when multiple external ports 3266 * are present NUSS switchdev driver is enabled. 3267 */ 3268 if (!AM65_CPSW_IS_CPSW2G(common) && 3269 IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) { 3270 ret = devlink_params_register(common->devlink, 3271 am65_cpsw_devlink_params, 3272 ARRAY_SIZE(am65_cpsw_devlink_params)); 3273 if (ret) { 3274 dev_err(dev, "devlink params reg fail ret:%d\n", ret); 3275 goto dl_unreg; 3276 } 3277 } 3278 3279 for (i = 1; i <= common->port_num; i++) { 3280 port = am65_common_get_port(common, i); 3281 dl_port = &port->devlink_port; 3282 3283 if (port->ndev) 3284 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; 3285 else 3286 attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED; 3287 attrs.phys.port_number = port->port_id; 3288 attrs.switch_id.id_len = sizeof(resource_size_t); 3289 memcpy(attrs.switch_id.id, common->switch_id, attrs.switch_id.id_len); 3290 devlink_port_attrs_set(dl_port, &attrs); 3291 3292 ret = devlink_port_register(common->devlink, dl_port, port->port_id); 3293 if (ret) { 3294 dev_err(dev, "devlink_port reg fail for port %d, ret:%d\n", 3295 port->port_id, ret); 3296 goto dl_port_unreg; 3297 } 3298 } 3299 devlink_register(common->devlink); 3300 return ret; 3301 3302 dl_port_unreg: 3303 for (i = i - 1; i >= 1; i--) { 3304 port = am65_common_get_port(common, i); 3305 dl_port = &port->devlink_port; 3306 3307 devlink_port_unregister(dl_port); 3308 } 3309 dl_unreg: 3310 devlink_free(common->devlink); 3311 return ret; 3312 } 3313 3314 static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common) 3315 { 3316 struct devlink_port *dl_port; 3317 struct am65_cpsw_port *port; 3318 int i; 3319 3320 devlink_unregister(common->devlink); 3321 3322 for (i = 1; i <= common->port_num; i++) { 3323 port = am65_common_get_port(common, i); 3324 dl_port = &port->devlink_port; 3325 3326 devlink_port_unregister(dl_port); 3327 } 3328 3329 if (!AM65_CPSW_IS_CPSW2G(common) && 3330 IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) 3331 devlink_params_unregister(common->devlink, 3332 am65_cpsw_devlink_params, 3333 ARRAY_SIZE(am65_cpsw_devlink_params)); 3334 3335 devlink_free(common->devlink); 3336 } 3337 3338 static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common) 3339 { 3340 struct am65_cpsw_rx_chn *rx_chan = &common->rx_chns; 3341 struct am65_cpsw_tx_chn *tx_chan = common->tx_chns; 3342 struct device *dev = common->dev; 3343 struct am65_cpsw_port *port; 3344 int ret = 0, i; 3345 3346 /* init tx channels */ 3347 ret = am65_cpsw_nuss_init_tx_chns(common); 3348 if (ret) 3349 return ret; 3350 ret = am65_cpsw_nuss_init_rx_chns(common); 3351 if (ret) 3352 return ret; 3353 3354 /* The DMA Channels are not guaranteed to be in a clean state. 3355 * Reset and disable them to ensure that they are back to the 3356 * clean state and ready to be used. 3357 */ 3358 for (i = 0; i < common->tx_ch_num; i++) { 3359 k3_udma_glue_reset_tx_chn(tx_chan[i].tx_chn, &tx_chan[i], 3360 am65_cpsw_nuss_tx_cleanup); 3361 k3_udma_glue_disable_tx_chn(tx_chan[i].tx_chn); 3362 } 3363 3364 for (i = 0; i < common->rx_ch_num_flows; i++) 3365 k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i, 3366 rx_chan, 3367 am65_cpsw_nuss_rx_cleanup, !!i); 3368 3369 k3_udma_glue_disable_rx_chn(rx_chan->rx_chn); 3370 3371 ret = am65_cpsw_nuss_register_devlink(common); 3372 if (ret) 3373 return ret; 3374 3375 for (i = 0; i < common->port_num; i++) { 3376 port = &common->ports[i]; 3377 3378 if (!port->ndev) 3379 continue; 3380 3381 SET_NETDEV_DEVLINK_PORT(port->ndev, &port->devlink_port); 3382 3383 ret = register_netdev(port->ndev); 3384 if (ret) { 3385 dev_err(dev, "error registering slave net device%i %d\n", 3386 i, ret); 3387 goto err_cleanup_ndev; 3388 } 3389 } 3390 3391 ret = am65_cpsw_register_notifiers(common); 3392 if (ret) 3393 goto err_cleanup_ndev; 3394 3395 /* can't auto unregister ndev using devm_add_action() due to 3396 * devres release sequence in DD core for DMA 3397 */ 3398 3399 return 0; 3400 3401 err_cleanup_ndev: 3402 am65_cpsw_nuss_cleanup_ndev(common); 3403 am65_cpsw_unregister_devlink(common); 3404 3405 return ret; 3406 } 3407 3408 int am65_cpsw_nuss_update_tx_rx_chns(struct am65_cpsw_common *common, 3409 int num_tx, int num_rx) 3410 { 3411 int ret; 3412 3413 am65_cpsw_nuss_remove_tx_chns(common); 3414 am65_cpsw_nuss_remove_rx_chns(common); 3415 3416 common->tx_ch_num = num_tx; 3417 common->rx_ch_num_flows = num_rx; 3418 ret = am65_cpsw_nuss_init_tx_chns(common); 3419 if (ret) 3420 return ret; 3421 3422 ret = am65_cpsw_nuss_init_rx_chns(common); 3423 3424 return ret; 3425 } 3426 3427 struct am65_cpsw_soc_pdata { 3428 u32 quirks_dis; 3429 }; 3430 3431 static const struct am65_cpsw_soc_pdata am65x_soc_sr2_0 = { 3432 .quirks_dis = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM, 3433 }; 3434 3435 static const struct soc_device_attribute am65_cpsw_socinfo[] = { 3436 { .family = "AM65X", 3437 .revision = "SR2.0", 3438 .data = &am65x_soc_sr2_0 3439 }, 3440 {/* sentinel */} 3441 }; 3442 3443 static const struct am65_cpsw_pdata am65x_sr1_0 = { 3444 .quirks = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM, 3445 .ale_dev_id = "am65x-cpsw2g", 3446 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, 3447 }; 3448 3449 static const struct am65_cpsw_pdata j721e_pdata = { 3450 .quirks = 0, 3451 .ale_dev_id = "am65x-cpsw2g", 3452 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, 3453 }; 3454 3455 static const struct am65_cpsw_pdata am64x_cpswxg_pdata = { 3456 .quirks = AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ, 3457 .ale_dev_id = "am64-cpswxg", 3458 .fdqring_mode = K3_RINGACC_RING_MODE_RING, 3459 }; 3460 3461 static const struct am65_cpsw_pdata j7200_cpswxg_pdata = { 3462 .quirks = 0, 3463 .ale_dev_id = "am64-cpswxg", 3464 .fdqring_mode = K3_RINGACC_RING_MODE_RING, 3465 .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII) | 3466 BIT(PHY_INTERFACE_MODE_USXGMII), 3467 }; 3468 3469 static const struct am65_cpsw_pdata j721e_cpswxg_pdata = { 3470 .quirks = 0, 3471 .ale_dev_id = "am64-cpswxg", 3472 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, 3473 .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII), 3474 }; 3475 3476 static const struct am65_cpsw_pdata j784s4_cpswxg_pdata = { 3477 .quirks = 0, 3478 .ale_dev_id = "am64-cpswxg", 3479 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, 3480 .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII) | 3481 BIT(PHY_INTERFACE_MODE_USXGMII), 3482 }; 3483 3484 static const struct of_device_id am65_cpsw_nuss_of_mtable[] = { 3485 { .compatible = "ti,am654-cpsw-nuss", .data = &am65x_sr1_0}, 3486 { .compatible = "ti,j721e-cpsw-nuss", .data = &j721e_pdata}, 3487 { .compatible = "ti,am642-cpsw-nuss", .data = &am64x_cpswxg_pdata}, 3488 { .compatible = "ti,j7200-cpswxg-nuss", .data = &j7200_cpswxg_pdata}, 3489 { .compatible = "ti,j721e-cpswxg-nuss", .data = &j721e_cpswxg_pdata}, 3490 { .compatible = "ti,j784s4-cpswxg-nuss", .data = &j784s4_cpswxg_pdata}, 3491 { /* sentinel */ }, 3492 }; 3493 MODULE_DEVICE_TABLE(of, am65_cpsw_nuss_of_mtable); 3494 3495 static void am65_cpsw_nuss_apply_socinfo(struct am65_cpsw_common *common) 3496 { 3497 const struct soc_device_attribute *soc; 3498 3499 soc = soc_device_match(am65_cpsw_socinfo); 3500 if (soc && soc->data) { 3501 const struct am65_cpsw_soc_pdata *socdata = soc->data; 3502 3503 /* disable quirks */ 3504 common->pdata.quirks &= ~socdata->quirks_dis; 3505 } 3506 } 3507 3508 static int am65_cpsw_nuss_probe(struct platform_device *pdev) 3509 { 3510 struct cpsw_ale_params ale_params = { 0 }; 3511 const struct of_device_id *of_id; 3512 struct device *dev = &pdev->dev; 3513 struct am65_cpsw_common *common; 3514 struct device_node *node; 3515 struct resource *res; 3516 struct clk *clk; 3517 int ale_entries; 3518 __be64 id_temp; 3519 int ret, i; 3520 3521 common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL); 3522 if (!common) 3523 return -ENOMEM; 3524 common->dev = dev; 3525 3526 of_id = of_match_device(am65_cpsw_nuss_of_mtable, dev); 3527 if (!of_id) 3528 return -EINVAL; 3529 common->pdata = *(const struct am65_cpsw_pdata *)of_id->data; 3530 3531 am65_cpsw_nuss_apply_socinfo(common); 3532 3533 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpsw_nuss"); 3534 common->ss_base = devm_ioremap_resource(&pdev->dev, res); 3535 if (IS_ERR(common->ss_base)) 3536 return PTR_ERR(common->ss_base); 3537 common->cpsw_base = common->ss_base + AM65_CPSW_CPSW_NU_BASE; 3538 /* Use device's physical base address as switch id */ 3539 id_temp = cpu_to_be64(res->start); 3540 memcpy(common->switch_id, &id_temp, sizeof(res->start)); 3541 3542 node = of_get_child_by_name(dev->of_node, "ethernet-ports"); 3543 if (!node) 3544 return -ENOENT; 3545 common->port_num = of_get_child_count(node); 3546 of_node_put(node); 3547 if (common->port_num < 1 || common->port_num > AM65_CPSW_MAX_PORTS) 3548 return -ENOENT; 3549 3550 common->rx_flow_id_base = -1; 3551 init_completion(&common->tdown_complete); 3552 common->tx_ch_num = AM65_CPSW_DEFAULT_TX_CHNS; 3553 common->rx_ch_num_flows = AM65_CPSW_DEFAULT_RX_CHN_FLOWS; 3554 common->pf_p0_rx_ptype_rrobin = false; 3555 common->default_vlan = 1; 3556 3557 common->ports = devm_kcalloc(dev, common->port_num, 3558 sizeof(*common->ports), 3559 GFP_KERNEL); 3560 if (!common->ports) 3561 return -ENOMEM; 3562 3563 clk = devm_clk_get(dev, "fck"); 3564 if (IS_ERR(clk)) 3565 return dev_err_probe(dev, PTR_ERR(clk), "getting fck clock\n"); 3566 common->bus_freq = clk_get_rate(clk); 3567 3568 pm_runtime_enable(dev); 3569 ret = pm_runtime_resume_and_get(dev); 3570 if (ret < 0) { 3571 pm_runtime_disable(dev); 3572 return ret; 3573 } 3574 3575 node = of_get_child_by_name(dev->of_node, "mdio"); 3576 if (!node) { 3577 dev_warn(dev, "MDIO node not found\n"); 3578 } else if (of_device_is_available(node)) { 3579 struct platform_device *mdio_pdev; 3580 3581 mdio_pdev = of_platform_device_create(node, NULL, dev); 3582 if (!mdio_pdev) { 3583 ret = -ENODEV; 3584 goto err_pm_clear; 3585 } 3586 3587 common->mdio_dev = &mdio_pdev->dev; 3588 } 3589 of_node_put(node); 3590 3591 am65_cpsw_nuss_get_ver(common); 3592 3593 ret = am65_cpsw_nuss_init_host_p(common); 3594 if (ret) 3595 goto err_of_clear; 3596 3597 ret = am65_cpsw_nuss_init_slave_ports(common); 3598 if (ret) 3599 goto err_of_clear; 3600 3601 /* init common data */ 3602 ale_params.dev = dev; 3603 ale_params.ale_ageout = AM65_CPSW_ALE_AGEOUT_DEFAULT; 3604 ale_params.ale_ports = common->port_num + 1; 3605 ale_params.ale_regs = common->cpsw_base + AM65_CPSW_NU_ALE_BASE; 3606 ale_params.dev_id = common->pdata.ale_dev_id; 3607 ale_params.bus_freq = common->bus_freq; 3608 3609 common->ale = cpsw_ale_create(&ale_params); 3610 if (IS_ERR(common->ale)) { 3611 dev_err(dev, "error initializing ale engine\n"); 3612 ret = PTR_ERR(common->ale); 3613 goto err_of_clear; 3614 } 3615 3616 ale_entries = common->ale->params.ale_entries; 3617 common->ale_context = devm_kzalloc(dev, 3618 ale_entries * ALE_ENTRY_WORDS * sizeof(u32), 3619 GFP_KERNEL); 3620 ret = am65_cpsw_init_cpts(common); 3621 if (ret) 3622 goto err_of_clear; 3623 3624 /* init ports */ 3625 for (i = 0; i < common->port_num; i++) 3626 am65_cpsw_nuss_slave_disable_unused(&common->ports[i]); 3627 3628 dev_set_drvdata(dev, common); 3629 3630 common->is_emac_mode = true; 3631 3632 ret = am65_cpsw_nuss_init_ndevs(common); 3633 if (ret) 3634 goto err_ndevs_clear; 3635 3636 ret = am65_cpsw_nuss_register_ndevs(common); 3637 if (ret) 3638 goto err_ndevs_clear; 3639 3640 pm_runtime_put(dev); 3641 return 0; 3642 3643 err_ndevs_clear: 3644 am65_cpsw_nuss_cleanup_ndev(common); 3645 am65_cpsw_nuss_phylink_cleanup(common); 3646 am65_cpts_release(common->cpts); 3647 err_of_clear: 3648 if (common->mdio_dev) 3649 of_platform_device_destroy(common->mdio_dev, NULL); 3650 err_pm_clear: 3651 pm_runtime_put_sync(dev); 3652 pm_runtime_disable(dev); 3653 return ret; 3654 } 3655 3656 static void am65_cpsw_nuss_remove(struct platform_device *pdev) 3657 { 3658 struct device *dev = &pdev->dev; 3659 struct am65_cpsw_common *common; 3660 int ret; 3661 3662 common = dev_get_drvdata(dev); 3663 3664 ret = pm_runtime_resume_and_get(&pdev->dev); 3665 if (ret < 0) { 3666 /* Note, if this error path is taken, we're leaking some 3667 * resources. 3668 */ 3669 dev_err(&pdev->dev, "Failed to resume device (%pe)\n", 3670 ERR_PTR(ret)); 3671 return; 3672 } 3673 3674 am65_cpsw_unregister_notifiers(common); 3675 3676 /* must unregister ndevs here because DD release_driver routine calls 3677 * dma_deconfigure(dev) before devres_release_all(dev) 3678 */ 3679 am65_cpsw_nuss_cleanup_ndev(common); 3680 am65_cpsw_unregister_devlink(common); 3681 am65_cpsw_nuss_phylink_cleanup(common); 3682 am65_cpts_release(common->cpts); 3683 am65_cpsw_disable_serdes_phy(common); 3684 3685 if (common->mdio_dev) 3686 of_platform_device_destroy(common->mdio_dev, NULL); 3687 3688 pm_runtime_put_sync(&pdev->dev); 3689 pm_runtime_disable(&pdev->dev); 3690 } 3691 3692 static int am65_cpsw_nuss_suspend(struct device *dev) 3693 { 3694 struct am65_cpsw_common *common = dev_get_drvdata(dev); 3695 struct am65_cpsw_host *host_p = am65_common_get_host(common); 3696 struct am65_cpsw_port *port; 3697 struct net_device *ndev; 3698 int i, ret; 3699 3700 cpsw_ale_dump(common->ale, common->ale_context); 3701 host_p->vid_context = readl(host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 3702 for (i = 0; i < common->port_num; i++) { 3703 port = &common->ports[i]; 3704 ndev = port->ndev; 3705 3706 if (!ndev) 3707 continue; 3708 3709 port->vid_context = readl(port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 3710 netif_device_detach(ndev); 3711 if (netif_running(ndev)) { 3712 rtnl_lock(); 3713 ret = am65_cpsw_nuss_ndo_slave_stop(ndev); 3714 rtnl_unlock(); 3715 if (ret < 0) { 3716 netdev_err(ndev, "failed to stop: %d", ret); 3717 return ret; 3718 } 3719 } 3720 } 3721 3722 am65_cpts_suspend(common->cpts); 3723 3724 am65_cpsw_nuss_remove_rx_chns(common); 3725 am65_cpsw_nuss_remove_tx_chns(common); 3726 3727 return 0; 3728 } 3729 3730 static int am65_cpsw_nuss_resume(struct device *dev) 3731 { 3732 struct am65_cpsw_common *common = dev_get_drvdata(dev); 3733 struct am65_cpsw_host *host_p = am65_common_get_host(common); 3734 struct am65_cpsw_port *port; 3735 struct net_device *ndev; 3736 int i, ret; 3737 3738 ret = am65_cpsw_nuss_init_tx_chns(common); 3739 if (ret) 3740 return ret; 3741 ret = am65_cpsw_nuss_init_rx_chns(common); 3742 if (ret) 3743 return ret; 3744 3745 /* If RX IRQ was disabled before suspend, keep it disabled */ 3746 for (i = 0; i < common->rx_ch_num_flows; i++) { 3747 if (common->rx_chns.flows[i].irq_disabled) 3748 disable_irq(common->rx_chns.flows[i].irq); 3749 } 3750 3751 am65_cpts_resume(common->cpts); 3752 3753 for (i = 0; i < common->port_num; i++) { 3754 port = &common->ports[i]; 3755 ndev = port->ndev; 3756 3757 if (!ndev) 3758 continue; 3759 3760 if (netif_running(ndev)) { 3761 rtnl_lock(); 3762 ret = am65_cpsw_nuss_ndo_slave_open(ndev); 3763 rtnl_unlock(); 3764 if (ret < 0) { 3765 netdev_err(ndev, "failed to start: %d", ret); 3766 return ret; 3767 } 3768 } 3769 3770 netif_device_attach(ndev); 3771 writel(port->vid_context, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 3772 } 3773 3774 writel(host_p->vid_context, host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 3775 cpsw_ale_restore(common->ale, common->ale_context); 3776 3777 return 0; 3778 } 3779 3780 static const struct dev_pm_ops am65_cpsw_nuss_dev_pm_ops = { 3781 SYSTEM_SLEEP_PM_OPS(am65_cpsw_nuss_suspend, am65_cpsw_nuss_resume) 3782 }; 3783 3784 static struct platform_driver am65_cpsw_nuss_driver = { 3785 .driver = { 3786 .name = AM65_CPSW_DRV_NAME, 3787 .of_match_table = am65_cpsw_nuss_of_mtable, 3788 .pm = &am65_cpsw_nuss_dev_pm_ops, 3789 }, 3790 .probe = am65_cpsw_nuss_probe, 3791 .remove = am65_cpsw_nuss_remove, 3792 }; 3793 3794 module_platform_driver(am65_cpsw_nuss_driver); 3795 3796 MODULE_LICENSE("GPL v2"); 3797 MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>"); 3798 MODULE_DESCRIPTION("TI AM65 CPSW Ethernet driver"); 3799