1 // SPDX-License-Identifier: GPL-2.0 2 /* Texas Instruments K3 AM65 Ethernet Switch SubSystem Driver 3 * 4 * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/ 5 * 6 */ 7 8 #include <linux/bpf_trace.h> 9 #include <linux/clk.h> 10 #include <linux/etherdevice.h> 11 #include <linux/if_vlan.h> 12 #include <linux/interrupt.h> 13 #include <linux/irqdomain.h> 14 #include <linux/kernel.h> 15 #include <linux/kmemleak.h> 16 #include <linux/module.h> 17 #include <linux/netdevice.h> 18 #include <linux/net_tstamp.h> 19 #include <linux/of.h> 20 #include <linux/of_mdio.h> 21 #include <linux/of_net.h> 22 #include <linux/of_device.h> 23 #include <linux/of_platform.h> 24 #include <linux/phylink.h> 25 #include <linux/phy/phy.h> 26 #include <linux/platform_device.h> 27 #include <linux/pm_runtime.h> 28 #include <linux/regmap.h> 29 #include <linux/rtnetlink.h> 30 #include <linux/mfd/syscon.h> 31 #include <linux/sys_soc.h> 32 #include <linux/dma/ti-cppi5.h> 33 #include <linux/dma/k3-udma-glue.h> 34 #include <net/page_pool/helpers.h> 35 #include <net/switchdev.h> 36 37 #include "cpsw_ale.h" 38 #include "cpsw_sl.h" 39 #include "am65-cpsw-nuss.h" 40 #include "am65-cpsw-switchdev.h" 41 #include "k3-cppi-desc-pool.h" 42 #include "am65-cpts.h" 43 44 #define AM65_CPSW_SS_BASE 0x0 45 #define AM65_CPSW_SGMII_BASE 0x100 46 #define AM65_CPSW_XGMII_BASE 0x2100 47 #define AM65_CPSW_CPSW_NU_BASE 0x20000 48 #define AM65_CPSW_NU_PORTS_BASE 0x1000 49 #define AM65_CPSW_NU_FRAM_BASE 0x12000 50 #define AM65_CPSW_NU_STATS_BASE 0x1a000 51 #define AM65_CPSW_NU_ALE_BASE 0x1e000 52 #define AM65_CPSW_NU_CPTS_BASE 0x1d000 53 54 #define AM65_CPSW_NU_PORTS_OFFSET 0x1000 55 #define AM65_CPSW_NU_STATS_PORT_OFFSET 0x200 56 #define AM65_CPSW_NU_FRAM_PORT_OFFSET 0x200 57 58 #define AM65_CPSW_MAX_PORTS 8 59 60 #define AM65_CPSW_MIN_PACKET_SIZE VLAN_ETH_ZLEN 61 #define AM65_CPSW_MAX_PACKET_SIZE 2024 62 63 #define AM65_CPSW_REG_CTL 0x004 64 #define AM65_CPSW_REG_STAT_PORT_EN 0x014 65 #define AM65_CPSW_REG_PTYPE 0x018 66 67 #define AM65_CPSW_P0_REG_CTL 0x004 68 #define AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET 0x008 69 70 #define AM65_CPSW_PORT_REG_PRI_CTL 0x01c 71 #define AM65_CPSW_PORT_REG_RX_PRI_MAP 0x020 72 #define AM65_CPSW_PORT_REG_RX_MAXLEN 0x024 73 74 #define AM65_CPSW_PORTN_REG_SA_L 0x308 75 #define AM65_CPSW_PORTN_REG_SA_H 0x30c 76 #define AM65_CPSW_PORTN_REG_TS_CTL 0x310 77 #define AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG 0x314 78 #define AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG 0x318 79 #define AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 0x31C 80 81 #define AM65_CPSW_SGMII_CONTROL_REG 0x010 82 #define AM65_CPSW_SGMII_MR_ADV_ABILITY_REG 0x018 83 #define AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE BIT(0) 84 85 #define AM65_CPSW_CTL_VLAN_AWARE BIT(1) 86 #define AM65_CPSW_CTL_P0_ENABLE BIT(2) 87 #define AM65_CPSW_CTL_P0_TX_CRC_REMOVE BIT(13) 88 #define AM65_CPSW_CTL_P0_RX_PAD BIT(14) 89 90 /* AM65_CPSW_P0_REG_CTL */ 91 #define AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN BIT(0) 92 #define AM65_CPSW_P0_REG_CTL_RX_REMAP_VLAN BIT(16) 93 94 /* AM65_CPSW_PORT_REG_PRI_CTL */ 95 #define AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN BIT(8) 96 97 /* AM65_CPSW_PN_TS_CTL register fields */ 98 #define AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN BIT(4) 99 #define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN BIT(5) 100 #define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT2_EN BIT(6) 101 #define AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN BIT(7) 102 #define AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN BIT(10) 103 #define AM65_CPSW_PN_TS_CTL_TX_HOST_TS_EN BIT(11) 104 #define AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT 16 105 106 #define AM65_CPSW_PN_TS_CTL_RX_ANX_F_EN BIT(0) 107 #define AM65_CPSW_PN_TS_CTL_RX_VLAN_LT1_EN BIT(1) 108 #define AM65_CPSW_PN_TS_CTL_RX_VLAN_LT2_EN BIT(2) 109 #define AM65_CPSW_PN_TS_CTL_RX_ANX_D_EN BIT(3) 110 #define AM65_CPSW_PN_TS_CTL_RX_ANX_E_EN BIT(9) 111 112 /* AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG register fields */ 113 #define AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT 16 114 115 /* AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 */ 116 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107 BIT(16) 117 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129 BIT(17) 118 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130 BIT(18) 119 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131 BIT(19) 120 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132 BIT(20) 121 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319 BIT(21) 122 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320 BIT(22) 123 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO BIT(23) 124 125 /* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */ 126 #define AM65_CPSW_TS_EVENT_MSG_TYPE_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3)) 127 128 #define AM65_CPSW_TS_SEQ_ID_OFFSET (0x1e) 129 130 #define AM65_CPSW_TS_TX_ANX_ALL_EN \ 131 (AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN | \ 132 AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN | \ 133 AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN) 134 135 #define AM65_CPSW_TS_RX_ANX_ALL_EN \ 136 (AM65_CPSW_PN_TS_CTL_RX_ANX_D_EN | \ 137 AM65_CPSW_PN_TS_CTL_RX_ANX_E_EN | \ 138 AM65_CPSW_PN_TS_CTL_RX_ANX_F_EN) 139 140 #define AM65_CPSW_ALE_AGEOUT_DEFAULT 30 141 /* Number of TX/RX descriptors per channel/flow */ 142 #define AM65_CPSW_MAX_TX_DESC 500 143 #define AM65_CPSW_MAX_RX_DESC 500 144 145 #define AM65_CPSW_NAV_PS_DATA_SIZE 16 146 #define AM65_CPSW_NAV_SW_DATA_SIZE 16 147 148 #define AM65_CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK | \ 149 NETIF_MSG_IFUP | NETIF_MSG_PROBE | NETIF_MSG_IFDOWN | \ 150 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) 151 152 #define AM65_CPSW_DEFAULT_TX_CHNS 8 153 #define AM65_CPSW_DEFAULT_RX_CHN_FLOWS 1 154 155 /* CPPI streaming packet interface */ 156 #define AM65_CPSW_CPPI_TX_FLOW_ID 0x3FFF 157 #define AM65_CPSW_CPPI_TX_PKT_TYPE 0x7 158 159 /* XDP */ 160 #define AM65_CPSW_XDP_CONSUMED BIT(1) 161 #define AM65_CPSW_XDP_REDIRECT BIT(0) 162 #define AM65_CPSW_XDP_PASS 0 163 164 /* Include headroom compatible with both skb and xdpf */ 165 #define AM65_CPSW_HEADROOM_NA (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN) 166 #define AM65_CPSW_HEADROOM ALIGN(AM65_CPSW_HEADROOM_NA, sizeof(long)) 167 168 static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port *slave, 169 const u8 *dev_addr) 170 { 171 u32 mac_hi = (dev_addr[0] << 0) | (dev_addr[1] << 8) | 172 (dev_addr[2] << 16) | (dev_addr[3] << 24); 173 u32 mac_lo = (dev_addr[4] << 0) | (dev_addr[5] << 8); 174 175 writel(mac_hi, slave->port_base + AM65_CPSW_PORTN_REG_SA_H); 176 writel(mac_lo, slave->port_base + AM65_CPSW_PORTN_REG_SA_L); 177 } 178 179 static void am65_cpsw_sl_ctl_reset(struct am65_cpsw_port *port) 180 { 181 cpsw_sl_reset(port->slave.mac_sl, 100); 182 /* Max length register has to be restored after MAC SL reset */ 183 writel(AM65_CPSW_MAX_PACKET_SIZE, 184 port->port_base + AM65_CPSW_PORT_REG_RX_MAXLEN); 185 } 186 187 static void am65_cpsw_nuss_get_ver(struct am65_cpsw_common *common) 188 { 189 common->nuss_ver = readl(common->ss_base); 190 common->cpsw_ver = readl(common->cpsw_base); 191 dev_info(common->dev, 192 "initializing am65 cpsw nuss version 0x%08X, cpsw version 0x%08X Ports: %u quirks:%08x\n", 193 common->nuss_ver, 194 common->cpsw_ver, 195 common->port_num + 1, 196 common->pdata.quirks); 197 } 198 199 static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev, 200 __be16 proto, u16 vid) 201 { 202 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 203 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 204 u32 port_mask, unreg_mcast = 0; 205 int ret; 206 207 if (!common->is_emac_mode) 208 return 0; 209 210 if (!netif_running(ndev) || !vid) 211 return 0; 212 213 ret = pm_runtime_resume_and_get(common->dev); 214 if (ret < 0) 215 return ret; 216 217 port_mask = BIT(port->port_id) | ALE_PORT_HOST; 218 if (!vid) 219 unreg_mcast = port_mask; 220 dev_info(common->dev, "Adding vlan %d to vlan filter\n", vid); 221 ret = cpsw_ale_vlan_add_modify(common->ale, vid, port_mask, 222 unreg_mcast, port_mask, 0); 223 224 pm_runtime_put(common->dev); 225 return ret; 226 } 227 228 static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev, 229 __be16 proto, u16 vid) 230 { 231 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 232 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 233 int ret; 234 235 if (!common->is_emac_mode) 236 return 0; 237 238 if (!netif_running(ndev) || !vid) 239 return 0; 240 241 ret = pm_runtime_resume_and_get(common->dev); 242 if (ret < 0) 243 return ret; 244 245 dev_info(common->dev, "Removing vlan %d from vlan filter\n", vid); 246 ret = cpsw_ale_del_vlan(common->ale, vid, 247 BIT(port->port_id) | ALE_PORT_HOST); 248 249 pm_runtime_put(common->dev); 250 return ret; 251 } 252 253 static void am65_cpsw_slave_set_promisc(struct am65_cpsw_port *port, 254 bool promisc) 255 { 256 struct am65_cpsw_common *common = port->common; 257 258 if (promisc && !common->is_emac_mode) { 259 dev_dbg(common->dev, "promisc mode requested in switch mode"); 260 return; 261 } 262 263 if (promisc) { 264 /* Enable promiscuous mode */ 265 cpsw_ale_control_set(common->ale, port->port_id, 266 ALE_PORT_MACONLY_CAF, 1); 267 dev_dbg(common->dev, "promisc enabled\n"); 268 } else { 269 /* Disable promiscuous mode */ 270 cpsw_ale_control_set(common->ale, port->port_id, 271 ALE_PORT_MACONLY_CAF, 0); 272 dev_dbg(common->dev, "promisc disabled\n"); 273 } 274 } 275 276 static void am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device *ndev) 277 { 278 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 279 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 280 u32 port_mask; 281 bool promisc; 282 283 promisc = !!(ndev->flags & IFF_PROMISC); 284 am65_cpsw_slave_set_promisc(port, promisc); 285 286 if (promisc) 287 return; 288 289 /* Restore allmulti on vlans if necessary */ 290 cpsw_ale_set_allmulti(common->ale, 291 ndev->flags & IFF_ALLMULTI, port->port_id); 292 293 port_mask = ALE_PORT_HOST; 294 /* Clear all mcast from ALE */ 295 cpsw_ale_flush_multicast(common->ale, port_mask, -1); 296 297 if (!netdev_mc_empty(ndev)) { 298 struct netdev_hw_addr *ha; 299 300 /* program multicast address list into ALE register */ 301 netdev_for_each_mc_addr(ha, ndev) { 302 cpsw_ale_add_mcast(common->ale, ha->addr, 303 port_mask, 0, 0, 0); 304 } 305 } 306 } 307 308 static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev, 309 unsigned int txqueue) 310 { 311 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 312 struct am65_cpsw_tx_chn *tx_chn; 313 struct netdev_queue *netif_txq; 314 unsigned long trans_start; 315 316 netif_txq = netdev_get_tx_queue(ndev, txqueue); 317 tx_chn = &common->tx_chns[txqueue]; 318 trans_start = READ_ONCE(netif_txq->trans_start); 319 320 netdev_err(ndev, "txq:%d DRV_XOFF:%d tmo:%u dql_avail:%d free_desc:%zu\n", 321 txqueue, 322 netif_tx_queue_stopped(netif_txq), 323 jiffies_to_msecs(jiffies - trans_start), 324 netdev_queue_dql_avail(netif_txq), 325 k3_cppi_desc_pool_avail(tx_chn->desc_pool)); 326 327 if (netif_tx_queue_stopped(netif_txq)) { 328 /* try recover if stopped by us */ 329 txq_trans_update(netif_txq); 330 netif_tx_wake_queue(netif_txq); 331 } 332 } 333 334 static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common, 335 struct page *page, u32 flow_idx) 336 { 337 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; 338 struct cppi5_host_desc_t *desc_rx; 339 struct device *dev = common->dev; 340 struct am65_cpsw_swdata *swdata; 341 dma_addr_t desc_dma; 342 dma_addr_t buf_dma; 343 344 desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool); 345 if (!desc_rx) { 346 dev_err(dev, "Failed to allocate RXFDQ descriptor\n"); 347 return -ENOMEM; 348 } 349 desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx); 350 351 buf_dma = dma_map_single(rx_chn->dma_dev, 352 page_address(page) + AM65_CPSW_HEADROOM, 353 AM65_CPSW_MAX_PACKET_SIZE, DMA_FROM_DEVICE); 354 if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) { 355 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); 356 dev_err(dev, "Failed to map rx buffer\n"); 357 return -EINVAL; 358 } 359 360 cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT, 361 AM65_CPSW_NAV_PS_DATA_SIZE); 362 k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma); 363 cppi5_hdesc_attach_buf(desc_rx, buf_dma, AM65_CPSW_MAX_PACKET_SIZE, 364 buf_dma, AM65_CPSW_MAX_PACKET_SIZE); 365 swdata = cppi5_hdesc_get_swdata(desc_rx); 366 swdata->page = page; 367 swdata->flow_id = flow_idx; 368 369 return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, flow_idx, 370 desc_rx, desc_dma); 371 } 372 373 void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common) 374 { 375 struct am65_cpsw_host *host_p = am65_common_get_host(common); 376 u32 val, pri_map; 377 378 /* P0 set Receive Priority Type */ 379 val = readl(host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL); 380 381 if (common->pf_p0_rx_ptype_rrobin) { 382 val |= AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN; 383 /* Enet Ports fifos works in fixed priority mode only, so 384 * reset P0_Rx_Pri_Map so all packet will go in Enet fifo 0 385 */ 386 pri_map = 0x0; 387 } else { 388 val &= ~AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN; 389 /* restore P0_Rx_Pri_Map */ 390 pri_map = 0x76543210; 391 } 392 393 writel(pri_map, host_p->port_base + AM65_CPSW_PORT_REG_RX_PRI_MAP); 394 writel(val, host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL); 395 } 396 397 static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common); 398 static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common); 399 static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port); 400 static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port); 401 402 static void am65_cpsw_destroy_xdp_rxqs(struct am65_cpsw_common *common) 403 { 404 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; 405 struct am65_cpsw_rx_flow *flow; 406 struct xdp_rxq_info *rxq; 407 int id, port; 408 409 for (id = 0; id < common->rx_ch_num_flows; id++) { 410 flow = &rx_chn->flows[id]; 411 412 for (port = 0; port < common->port_num; port++) { 413 if (!common->ports[port].ndev) 414 continue; 415 416 rxq = &common->ports[port].xdp_rxq[id]; 417 418 if (xdp_rxq_info_is_reg(rxq)) 419 xdp_rxq_info_unreg(rxq); 420 } 421 422 if (flow->page_pool) { 423 page_pool_destroy(flow->page_pool); 424 flow->page_pool = NULL; 425 } 426 } 427 } 428 429 static int am65_cpsw_create_xdp_rxqs(struct am65_cpsw_common *common) 430 { 431 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; 432 struct page_pool_params pp_params = { 433 .flags = PP_FLAG_DMA_MAP, 434 .order = 0, 435 .pool_size = AM65_CPSW_MAX_RX_DESC, 436 .nid = dev_to_node(common->dev), 437 .dev = common->dev, 438 .dma_dir = DMA_BIDIRECTIONAL, 439 /* .napi set dynamically */ 440 }; 441 struct am65_cpsw_rx_flow *flow; 442 struct xdp_rxq_info *rxq; 443 struct page_pool *pool; 444 int id, port, ret; 445 446 for (id = 0; id < common->rx_ch_num_flows; id++) { 447 flow = &rx_chn->flows[id]; 448 pp_params.napi = &flow->napi_rx; 449 pool = page_pool_create(&pp_params); 450 if (IS_ERR(pool)) { 451 ret = PTR_ERR(pool); 452 goto err; 453 } 454 455 flow->page_pool = pool; 456 457 /* using same page pool is allowed as no running rx handlers 458 * simultaneously for both ndevs 459 */ 460 for (port = 0; port < common->port_num; port++) { 461 if (!common->ports[port].ndev) 462 continue; 463 464 rxq = &common->ports[port].xdp_rxq[id]; 465 466 ret = xdp_rxq_info_reg(rxq, common->ports[port].ndev, 467 id, flow->napi_rx.napi_id); 468 if (ret) 469 goto err; 470 471 ret = xdp_rxq_info_reg_mem_model(rxq, 472 MEM_TYPE_PAGE_POOL, 473 pool); 474 if (ret) 475 goto err; 476 } 477 } 478 479 return 0; 480 481 err: 482 am65_cpsw_destroy_xdp_rxqs(common); 483 return ret; 484 } 485 486 static int am65_cpsw_nuss_desc_idx(struct k3_cppi_desc_pool *desc_pool, 487 void *desc, 488 unsigned char dsize_log2) 489 { 490 void *pool_addr = k3_cppi_desc_pool_cpuaddr(desc_pool); 491 492 return (desc - pool_addr) >> dsize_log2; 493 } 494 495 static void am65_cpsw_nuss_set_buf_type(struct am65_cpsw_tx_chn *tx_chn, 496 struct cppi5_host_desc_t *desc, 497 enum am65_cpsw_tx_buf_type buf_type) 498 { 499 int desc_idx; 500 501 desc_idx = am65_cpsw_nuss_desc_idx(tx_chn->desc_pool, desc, 502 tx_chn->dsize_log2); 503 k3_cppi_desc_pool_desc_info_set(tx_chn->desc_pool, desc_idx, 504 (void *)buf_type); 505 } 506 507 static enum am65_cpsw_tx_buf_type am65_cpsw_nuss_buf_type(struct am65_cpsw_tx_chn *tx_chn, 508 dma_addr_t desc_dma) 509 { 510 struct cppi5_host_desc_t *desc_tx; 511 int desc_idx; 512 513 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma); 514 desc_idx = am65_cpsw_nuss_desc_idx(tx_chn->desc_pool, desc_tx, 515 tx_chn->dsize_log2); 516 517 return (enum am65_cpsw_tx_buf_type)k3_cppi_desc_pool_desc_info(tx_chn->desc_pool, 518 desc_idx); 519 } 520 521 static inline void am65_cpsw_put_page(struct am65_cpsw_rx_flow *flow, 522 struct page *page, 523 bool allow_direct) 524 { 525 page_pool_put_full_page(flow->page_pool, page, allow_direct); 526 } 527 528 static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma) 529 { 530 struct am65_cpsw_rx_chn *rx_chn = data; 531 struct cppi5_host_desc_t *desc_rx; 532 struct am65_cpsw_swdata *swdata; 533 dma_addr_t buf_dma; 534 struct page *page; 535 u32 buf_dma_len; 536 u32 flow_id; 537 538 desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); 539 swdata = cppi5_hdesc_get_swdata(desc_rx); 540 page = swdata->page; 541 flow_id = swdata->flow_id; 542 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); 543 k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); 544 dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE); 545 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); 546 547 am65_cpsw_put_page(&rx_chn->flows[flow_id], page, false); 548 } 549 550 static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn, 551 struct cppi5_host_desc_t *desc) 552 { 553 struct cppi5_host_desc_t *first_desc, *next_desc; 554 dma_addr_t buf_dma, next_desc_dma; 555 u32 buf_dma_len; 556 557 first_desc = desc; 558 next_desc = first_desc; 559 560 cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len); 561 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma); 562 563 dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len, DMA_TO_DEVICE); 564 565 next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc); 566 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma); 567 while (next_desc_dma) { 568 next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, 569 next_desc_dma); 570 cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len); 571 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma); 572 573 dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len, 574 DMA_TO_DEVICE); 575 576 next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc); 577 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma); 578 579 k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc); 580 } 581 582 k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc); 583 } 584 585 static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma) 586 { 587 struct am65_cpsw_tx_chn *tx_chn = data; 588 struct cppi5_host_desc_t *desc_tx; 589 struct sk_buff *skb; 590 void **swdata; 591 592 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma); 593 swdata = cppi5_hdesc_get_swdata(desc_tx); 594 skb = *(swdata); 595 am65_cpsw_nuss_xmit_free(tx_chn, desc_tx); 596 597 dev_kfree_skb_any(skb); 598 } 599 600 static struct sk_buff *am65_cpsw_build_skb(void *page_addr, 601 struct net_device *ndev, 602 unsigned int len) 603 { 604 struct sk_buff *skb; 605 606 len += AM65_CPSW_HEADROOM; 607 608 skb = build_skb(page_addr, len); 609 if (unlikely(!skb)) 610 return NULL; 611 612 skb_reserve(skb, AM65_CPSW_HEADROOM); 613 skb->dev = ndev; 614 615 return skb; 616 } 617 618 static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common) 619 { 620 struct am65_cpsw_host *host_p = am65_common_get_host(common); 621 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; 622 struct am65_cpsw_tx_chn *tx_chn = common->tx_chns; 623 int port_idx, i, ret, tx, flow_idx; 624 struct am65_cpsw_rx_flow *flow; 625 u32 val, port_mask; 626 struct page *page; 627 628 if (common->usage_count) 629 return 0; 630 631 /* Control register */ 632 writel(AM65_CPSW_CTL_P0_ENABLE | AM65_CPSW_CTL_P0_TX_CRC_REMOVE | 633 AM65_CPSW_CTL_VLAN_AWARE | AM65_CPSW_CTL_P0_RX_PAD, 634 common->cpsw_base + AM65_CPSW_REG_CTL); 635 /* Max length register */ 636 writel(AM65_CPSW_MAX_PACKET_SIZE, 637 host_p->port_base + AM65_CPSW_PORT_REG_RX_MAXLEN); 638 /* set base flow_id */ 639 writel(common->rx_flow_id_base, 640 host_p->port_base + AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET); 641 writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN | AM65_CPSW_P0_REG_CTL_RX_REMAP_VLAN, 642 host_p->port_base + AM65_CPSW_P0_REG_CTL); 643 644 am65_cpsw_nuss_set_p0_ptype(common); 645 646 /* enable statistic */ 647 val = BIT(HOST_PORT_NUM); 648 for (port_idx = 0; port_idx < common->port_num; port_idx++) { 649 struct am65_cpsw_port *port = &common->ports[port_idx]; 650 651 if (!port->disabled) 652 val |= BIT(port->port_id); 653 } 654 writel(val, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN); 655 656 /* disable priority elevation */ 657 writel(0, common->cpsw_base + AM65_CPSW_REG_PTYPE); 658 659 cpsw_ale_start(common->ale); 660 661 /* limit to one RX flow only */ 662 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, 663 ALE_DEFAULT_THREAD_ID, 0); 664 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, 665 ALE_DEFAULT_THREAD_ENABLE, 1); 666 /* switch to vlan unaware mode */ 667 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, 1); 668 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, 669 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); 670 671 /* default vlan cfg: create mask based on enabled ports */ 672 port_mask = GENMASK(common->port_num, 0) & 673 ~common->disabled_ports_mask; 674 675 cpsw_ale_add_vlan(common->ale, 0, port_mask, 676 port_mask, port_mask, 677 port_mask & ~ALE_PORT_HOST); 678 679 if (common->is_emac_mode) 680 am65_cpsw_init_host_port_emac(common); 681 else 682 am65_cpsw_init_host_port_switch(common); 683 684 am65_cpsw_qos_tx_p0_rate_init(common); 685 686 ret = am65_cpsw_create_xdp_rxqs(common); 687 if (ret) { 688 dev_err(common->dev, "Failed to create XDP rx queues\n"); 689 return ret; 690 } 691 692 for (flow_idx = 0; flow_idx < common->rx_ch_num_flows; flow_idx++) { 693 flow = &rx_chn->flows[flow_idx]; 694 for (i = 0; i < AM65_CPSW_MAX_RX_DESC; i++) { 695 page = page_pool_dev_alloc_pages(flow->page_pool); 696 if (!page) { 697 dev_err(common->dev, "cannot allocate page in flow %d\n", 698 flow_idx); 699 ret = -ENOMEM; 700 goto fail_rx; 701 } 702 703 ret = am65_cpsw_nuss_rx_push(common, page, flow_idx); 704 if (ret < 0) { 705 dev_err(common->dev, 706 "cannot submit page to rx channel flow %d, error %d\n", 707 flow_idx, ret); 708 am65_cpsw_put_page(flow, page, false); 709 goto fail_rx; 710 } 711 } 712 } 713 714 ret = k3_udma_glue_enable_rx_chn(rx_chn->rx_chn); 715 if (ret) { 716 dev_err(common->dev, "couldn't enable rx chn: %d\n", ret); 717 goto fail_rx; 718 } 719 720 for (i = 0; i < common->rx_ch_num_flows ; i++) { 721 napi_enable(&rx_chn->flows[i].napi_rx); 722 if (rx_chn->flows[i].irq_disabled) { 723 rx_chn->flows[i].irq_disabled = false; 724 enable_irq(rx_chn->flows[i].irq); 725 } 726 } 727 728 for (tx = 0; tx < common->tx_ch_num; tx++) { 729 ret = k3_udma_glue_enable_tx_chn(tx_chn[tx].tx_chn); 730 if (ret) { 731 dev_err(common->dev, "couldn't enable tx chn %d: %d\n", 732 tx, ret); 733 tx--; 734 goto fail_tx; 735 } 736 napi_enable(&tx_chn[tx].napi_tx); 737 } 738 739 dev_dbg(common->dev, "cpsw_nuss started\n"); 740 return 0; 741 742 fail_tx: 743 while (tx >= 0) { 744 napi_disable(&tx_chn[tx].napi_tx); 745 k3_udma_glue_disable_tx_chn(tx_chn[tx].tx_chn); 746 tx--; 747 } 748 749 for (flow_idx = 0; i < common->rx_ch_num_flows; flow_idx++) { 750 flow = &rx_chn->flows[flow_idx]; 751 if (!flow->irq_disabled) { 752 disable_irq(flow->irq); 753 flow->irq_disabled = true; 754 } 755 napi_disable(&flow->napi_rx); 756 } 757 758 k3_udma_glue_disable_rx_chn(rx_chn->rx_chn); 759 760 fail_rx: 761 for (i = 0; i < common->rx_ch_num_flows; i++) 762 k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn, 763 am65_cpsw_nuss_rx_cleanup, !!i); 764 765 am65_cpsw_destroy_xdp_rxqs(common); 766 767 return ret; 768 } 769 770 static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common) 771 { 772 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; 773 struct am65_cpsw_tx_chn *tx_chn = common->tx_chns; 774 int i; 775 776 if (common->usage_count != 1) 777 return 0; 778 779 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, 780 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); 781 782 /* shutdown tx channels */ 783 atomic_set(&common->tdown_cnt, common->tx_ch_num); 784 /* ensure new tdown_cnt value is visible */ 785 smp_mb__after_atomic(); 786 reinit_completion(&common->tdown_complete); 787 788 for (i = 0; i < common->tx_ch_num; i++) 789 k3_udma_glue_tdown_tx_chn(tx_chn[i].tx_chn, false); 790 791 i = wait_for_completion_timeout(&common->tdown_complete, 792 msecs_to_jiffies(1000)); 793 if (!i) 794 dev_err(common->dev, "tx timeout\n"); 795 for (i = 0; i < common->tx_ch_num; i++) { 796 napi_disable(&tx_chn[i].napi_tx); 797 hrtimer_cancel(&tx_chn[i].tx_hrtimer); 798 } 799 800 for (i = 0; i < common->tx_ch_num; i++) { 801 k3_udma_glue_reset_tx_chn(tx_chn[i].tx_chn, &tx_chn[i], 802 am65_cpsw_nuss_tx_cleanup); 803 k3_udma_glue_disable_tx_chn(tx_chn[i].tx_chn); 804 } 805 806 reinit_completion(&common->tdown_complete); 807 k3_udma_glue_tdown_rx_chn(rx_chn->rx_chn, true); 808 809 if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) { 810 i = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000)); 811 if (!i) 812 dev_err(common->dev, "rx teardown timeout\n"); 813 } 814 815 for (i = common->rx_ch_num_flows - 1; i >= 0; i--) { 816 napi_disable(&rx_chn->flows[i].napi_rx); 817 hrtimer_cancel(&rx_chn->flows[i].rx_hrtimer); 818 k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn, 819 am65_cpsw_nuss_rx_cleanup, !!i); 820 } 821 822 k3_udma_glue_disable_rx_chn(rx_chn->rx_chn); 823 824 cpsw_ale_stop(common->ale); 825 826 writel(0, common->cpsw_base + AM65_CPSW_REG_CTL); 827 writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN); 828 829 am65_cpsw_destroy_xdp_rxqs(common); 830 831 dev_dbg(common->dev, "cpsw_nuss stopped\n"); 832 return 0; 833 } 834 835 static int am65_cpsw_nuss_ndo_slave_stop(struct net_device *ndev) 836 { 837 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 838 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 839 int ret; 840 841 phylink_stop(port->slave.phylink); 842 843 netif_tx_stop_all_queues(ndev); 844 845 phylink_disconnect_phy(port->slave.phylink); 846 847 ret = am65_cpsw_nuss_common_stop(common); 848 if (ret) 849 return ret; 850 851 common->usage_count--; 852 pm_runtime_put(common->dev); 853 return 0; 854 } 855 856 static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg) 857 { 858 struct am65_cpsw_port *port = arg; 859 860 if (!vdev) 861 return 0; 862 863 return am65_cpsw_nuss_ndo_slave_add_vid(port->ndev, 0, vid); 864 } 865 866 static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev) 867 { 868 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 869 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 870 int ret, i; 871 u32 reg; 872 873 ret = pm_runtime_resume_and_get(common->dev); 874 if (ret < 0) 875 return ret; 876 877 /* Idle MAC port */ 878 cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE); 879 cpsw_sl_wait_for_idle(port->slave.mac_sl, 100); 880 cpsw_sl_ctl_reset(port->slave.mac_sl); 881 882 /* soft reset MAC */ 883 cpsw_sl_reg_write(port->slave.mac_sl, CPSW_SL_SOFT_RESET, 1); 884 mdelay(1); 885 reg = cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_SOFT_RESET); 886 if (reg) { 887 dev_err(common->dev, "soft RESET didn't complete\n"); 888 ret = -ETIMEDOUT; 889 goto runtime_put; 890 } 891 892 /* Notify the stack of the actual queue counts. */ 893 ret = netif_set_real_num_tx_queues(ndev, common->tx_ch_num); 894 if (ret) { 895 dev_err(common->dev, "cannot set real number of tx queues\n"); 896 goto runtime_put; 897 } 898 899 ret = netif_set_real_num_rx_queues(ndev, common->rx_ch_num_flows); 900 if (ret) { 901 dev_err(common->dev, "cannot set real number of rx queues\n"); 902 goto runtime_put; 903 } 904 905 for (i = 0; i < common->tx_ch_num; i++) { 906 struct netdev_queue *txq = netdev_get_tx_queue(ndev, i); 907 908 netdev_tx_reset_queue(txq); 909 txq->tx_maxrate = common->tx_chns[i].rate_mbps; 910 } 911 912 ret = am65_cpsw_nuss_common_open(common); 913 if (ret) 914 goto runtime_put; 915 916 common->usage_count++; 917 918 am65_cpsw_port_set_sl_mac(port, ndev->dev_addr); 919 920 if (common->is_emac_mode) 921 am65_cpsw_init_port_emac_ale(port); 922 else 923 am65_cpsw_init_port_switch_ale(port); 924 925 /* mac_sl should be configured via phy-link interface */ 926 am65_cpsw_sl_ctl_reset(port); 927 928 ret = phylink_of_phy_connect(port->slave.phylink, port->slave.port_np, 0); 929 if (ret) 930 goto error_cleanup; 931 932 /* restore vlan configurations */ 933 vlan_for_each(ndev, cpsw_restore_vlans, port); 934 935 phylink_start(port->slave.phylink); 936 937 return 0; 938 939 error_cleanup: 940 am65_cpsw_nuss_ndo_slave_stop(ndev); 941 return ret; 942 943 runtime_put: 944 pm_runtime_put(common->dev); 945 return ret; 946 } 947 948 static int am65_cpsw_xdp_tx_frame(struct net_device *ndev, 949 struct am65_cpsw_tx_chn *tx_chn, 950 struct xdp_frame *xdpf, 951 enum am65_cpsw_tx_buf_type buf_type) 952 { 953 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 954 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 955 struct cppi5_host_desc_t *host_desc; 956 struct netdev_queue *netif_txq; 957 dma_addr_t dma_desc, dma_buf; 958 u32 pkt_len = xdpf->len; 959 void **swdata; 960 int ret; 961 962 host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); 963 if (unlikely(!host_desc)) { 964 ndev->stats.tx_dropped++; 965 return AM65_CPSW_XDP_CONSUMED; /* drop */ 966 } 967 968 am65_cpsw_nuss_set_buf_type(tx_chn, host_desc, buf_type); 969 970 dma_buf = dma_map_single(tx_chn->dma_dev, xdpf->data, 971 pkt_len, DMA_TO_DEVICE); 972 if (unlikely(dma_mapping_error(tx_chn->dma_dev, dma_buf))) { 973 ndev->stats.tx_dropped++; 974 ret = AM65_CPSW_XDP_CONSUMED; /* drop */ 975 goto pool_free; 976 } 977 978 cppi5_hdesc_init(host_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT, 979 AM65_CPSW_NAV_PS_DATA_SIZE); 980 cppi5_hdesc_set_pkttype(host_desc, AM65_CPSW_CPPI_TX_PKT_TYPE); 981 cppi5_hdesc_set_pktlen(host_desc, pkt_len); 982 cppi5_desc_set_pktids(&host_desc->hdr, 0, AM65_CPSW_CPPI_TX_FLOW_ID); 983 cppi5_desc_set_tags_ids(&host_desc->hdr, 0, port->port_id); 984 985 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &dma_buf); 986 cppi5_hdesc_attach_buf(host_desc, dma_buf, pkt_len, dma_buf, pkt_len); 987 988 swdata = cppi5_hdesc_get_swdata(host_desc); 989 *(swdata) = xdpf; 990 991 /* Report BQL before sending the packet */ 992 netif_txq = netdev_get_tx_queue(ndev, tx_chn->id); 993 netdev_tx_sent_queue(netif_txq, pkt_len); 994 995 dma_desc = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, host_desc); 996 if (AM65_CPSW_IS_CPSW2G(common)) { 997 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, host_desc, 998 dma_desc); 999 } else { 1000 spin_lock_bh(&tx_chn->lock); 1001 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, host_desc, 1002 dma_desc); 1003 spin_unlock_bh(&tx_chn->lock); 1004 } 1005 if (ret) { 1006 /* Inform BQL */ 1007 netdev_tx_completed_queue(netif_txq, 1, pkt_len); 1008 ndev->stats.tx_errors++; 1009 ret = AM65_CPSW_XDP_CONSUMED; /* drop */ 1010 goto dma_unmap; 1011 } 1012 1013 return 0; 1014 1015 dma_unmap: 1016 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &dma_buf); 1017 dma_unmap_single(tx_chn->dma_dev, dma_buf, pkt_len, DMA_TO_DEVICE); 1018 pool_free: 1019 k3_cppi_desc_pool_free(tx_chn->desc_pool, host_desc); 1020 return ret; 1021 } 1022 1023 static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow, 1024 struct am65_cpsw_port *port, 1025 struct xdp_buff *xdp, 1026 int cpu, int *len) 1027 { 1028 struct am65_cpsw_common *common = flow->common; 1029 struct net_device *ndev = port->ndev; 1030 int ret = AM65_CPSW_XDP_CONSUMED; 1031 struct am65_cpsw_tx_chn *tx_chn; 1032 struct netdev_queue *netif_txq; 1033 struct xdp_frame *xdpf; 1034 struct bpf_prog *prog; 1035 struct page *page; 1036 u32 act; 1037 int err; 1038 1039 prog = READ_ONCE(port->xdp_prog); 1040 if (!prog) 1041 return AM65_CPSW_XDP_PASS; 1042 1043 act = bpf_prog_run_xdp(prog, xdp); 1044 /* XDP prog might have changed packet data and boundaries */ 1045 *len = xdp->data_end - xdp->data; 1046 1047 switch (act) { 1048 case XDP_PASS: 1049 ret = AM65_CPSW_XDP_PASS; 1050 goto out; 1051 case XDP_TX: 1052 tx_chn = &common->tx_chns[cpu % AM65_CPSW_MAX_QUEUES]; 1053 netif_txq = netdev_get_tx_queue(ndev, tx_chn->id); 1054 1055 xdpf = xdp_convert_buff_to_frame(xdp); 1056 if (unlikely(!xdpf)) 1057 goto drop; 1058 1059 __netif_tx_lock(netif_txq, cpu); 1060 err = am65_cpsw_xdp_tx_frame(ndev, tx_chn, xdpf, 1061 AM65_CPSW_TX_BUF_TYPE_XDP_TX); 1062 __netif_tx_unlock(netif_txq); 1063 if (err) 1064 goto drop; 1065 1066 dev_sw_netstats_tx_add(ndev, 1, *len); 1067 ret = AM65_CPSW_XDP_CONSUMED; 1068 goto out; 1069 case XDP_REDIRECT: 1070 if (unlikely(xdp_do_redirect(ndev, xdp, prog))) 1071 goto drop; 1072 1073 dev_sw_netstats_rx_add(ndev, *len); 1074 ret = AM65_CPSW_XDP_REDIRECT; 1075 goto out; 1076 default: 1077 bpf_warn_invalid_xdp_action(ndev, prog, act); 1078 fallthrough; 1079 case XDP_ABORTED: 1080 drop: 1081 trace_xdp_exception(ndev, prog, act); 1082 fallthrough; 1083 case XDP_DROP: 1084 ndev->stats.rx_dropped++; 1085 } 1086 1087 page = virt_to_head_page(xdp->data); 1088 am65_cpsw_put_page(flow, page, true); 1089 1090 out: 1091 return ret; 1092 } 1093 1094 /* RX psdata[2] word format - checksum information */ 1095 #define AM65_CPSW_RX_PSD_CSUM_ADD GENMASK(15, 0) 1096 #define AM65_CPSW_RX_PSD_CSUM_ERR BIT(16) 1097 #define AM65_CPSW_RX_PSD_IS_FRAGMENT BIT(17) 1098 #define AM65_CPSW_RX_PSD_IS_TCP BIT(18) 1099 #define AM65_CPSW_RX_PSD_IPV6_VALID BIT(19) 1100 #define AM65_CPSW_RX_PSD_IPV4_VALID BIT(20) 1101 1102 static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info) 1103 { 1104 /* HW can verify IPv4/IPv6 TCP/UDP packets checksum 1105 * csum information provides in psdata[2] word: 1106 * AM65_CPSW_RX_PSD_CSUM_ERR bit - indicates csum error 1107 * AM65_CPSW_RX_PSD_IPV6_VALID and AM65_CPSW_RX_PSD_IPV4_VALID 1108 * bits - indicates IPv4/IPv6 packet 1109 * AM65_CPSW_RX_PSD_IS_FRAGMENT bit - indicates fragmented packet 1110 * AM65_CPSW_RX_PSD_CSUM_ADD has value 0xFFFF for non fragmented packets 1111 * or csum value for fragmented packets if !AM65_CPSW_RX_PSD_CSUM_ERR 1112 */ 1113 skb_checksum_none_assert(skb); 1114 1115 if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) 1116 return; 1117 1118 if ((csum_info & (AM65_CPSW_RX_PSD_IPV6_VALID | 1119 AM65_CPSW_RX_PSD_IPV4_VALID)) && 1120 !(csum_info & AM65_CPSW_RX_PSD_CSUM_ERR)) { 1121 /* csum for fragmented packets is unsupported */ 1122 if (!(csum_info & AM65_CPSW_RX_PSD_IS_FRAGMENT)) 1123 skb->ip_summed = CHECKSUM_UNNECESSARY; 1124 } 1125 } 1126 1127 static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow, 1128 int cpu, int *xdp_state) 1129 { 1130 struct am65_cpsw_rx_chn *rx_chn = &flow->common->rx_chns; 1131 u32 buf_dma_len, pkt_len, port_id = 0, csum_info; 1132 struct am65_cpsw_common *common = flow->common; 1133 struct am65_cpsw_ndev_priv *ndev_priv; 1134 struct cppi5_host_desc_t *desc_rx; 1135 struct device *dev = common->dev; 1136 struct am65_cpsw_swdata *swdata; 1137 struct page *page, *new_page; 1138 dma_addr_t desc_dma, buf_dma; 1139 struct am65_cpsw_port *port; 1140 struct net_device *ndev; 1141 u32 flow_idx = flow->id; 1142 struct sk_buff *skb; 1143 struct xdp_buff xdp; 1144 int headroom, ret; 1145 void *page_addr; 1146 u32 *psdata; 1147 1148 *xdp_state = AM65_CPSW_XDP_PASS; 1149 ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_idx, &desc_dma); 1150 if (ret) { 1151 if (ret != -ENODATA) 1152 dev_err(dev, "RX: pop chn fail %d\n", ret); 1153 return ret; 1154 } 1155 1156 if (cppi5_desc_is_tdcm(desc_dma)) { 1157 dev_dbg(dev, "%s RX tdown flow: %u\n", __func__, flow_idx); 1158 if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) 1159 complete(&common->tdown_complete); 1160 return 0; 1161 } 1162 1163 desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); 1164 dev_dbg(dev, "%s flow_idx: %u desc %pad\n", 1165 __func__, flow_idx, &desc_dma); 1166 1167 swdata = cppi5_hdesc_get_swdata(desc_rx); 1168 page = swdata->page; 1169 page_addr = page_address(page); 1170 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); 1171 k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); 1172 pkt_len = cppi5_hdesc_get_pktlen(desc_rx); 1173 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL); 1174 dev_dbg(dev, "%s rx port_id:%d\n", __func__, port_id); 1175 port = am65_common_get_port(common, port_id); 1176 ndev = port->ndev; 1177 psdata = cppi5_hdesc_get_psdata(desc_rx); 1178 csum_info = psdata[2]; 1179 dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info); 1180 1181 dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE); 1182 1183 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); 1184 1185 skb = am65_cpsw_build_skb(page_addr, ndev, 1186 AM65_CPSW_MAX_PACKET_SIZE); 1187 if (unlikely(!skb)) { 1188 new_page = page; 1189 goto requeue; 1190 } 1191 1192 if (port->xdp_prog) { 1193 xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]); 1194 xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM, 1195 pkt_len, false); 1196 *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp, 1197 cpu, &pkt_len); 1198 if (*xdp_state != AM65_CPSW_XDP_PASS) 1199 goto allocate; 1200 1201 /* Compute additional headroom to be reserved */ 1202 headroom = (xdp.data - xdp.data_hard_start) - skb_headroom(skb); 1203 skb_reserve(skb, headroom); 1204 } 1205 1206 ndev_priv = netdev_priv(ndev); 1207 am65_cpsw_nuss_set_offload_fwd_mark(skb, ndev_priv->offload_fwd_mark); 1208 skb_put(skb, pkt_len); 1209 if (port->rx_ts_enabled) 1210 am65_cpts_rx_timestamp(common->cpts, skb); 1211 skb_mark_for_recycle(skb); 1212 skb->protocol = eth_type_trans(skb, ndev); 1213 am65_cpsw_nuss_rx_csum(skb, csum_info); 1214 napi_gro_receive(&flow->napi_rx, skb); 1215 1216 dev_sw_netstats_rx_add(ndev, pkt_len); 1217 1218 allocate: 1219 new_page = page_pool_dev_alloc_pages(flow->page_pool); 1220 if (unlikely(!new_page)) { 1221 dev_err(dev, "page alloc failed\n"); 1222 return -ENOMEM; 1223 } 1224 1225 if (netif_dormant(ndev)) { 1226 am65_cpsw_put_page(flow, new_page, true); 1227 ndev->stats.rx_dropped++; 1228 return 0; 1229 } 1230 1231 requeue: 1232 ret = am65_cpsw_nuss_rx_push(common, new_page, flow_idx); 1233 if (WARN_ON(ret < 0)) { 1234 am65_cpsw_put_page(flow, new_page, true); 1235 ndev->stats.rx_errors++; 1236 ndev->stats.rx_dropped++; 1237 } 1238 1239 return ret; 1240 } 1241 1242 static enum hrtimer_restart am65_cpsw_nuss_rx_timer_callback(struct hrtimer *timer) 1243 { 1244 struct am65_cpsw_rx_flow *flow = container_of(timer, 1245 struct am65_cpsw_rx_flow, 1246 rx_hrtimer); 1247 1248 enable_irq(flow->irq); 1249 return HRTIMER_NORESTART; 1250 } 1251 1252 static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget) 1253 { 1254 struct am65_cpsw_rx_flow *flow = am65_cpsw_napi_to_rx_flow(napi_rx); 1255 struct am65_cpsw_common *common = flow->common; 1256 int cpu = smp_processor_id(); 1257 int xdp_state_or = 0; 1258 int cur_budget, ret; 1259 int xdp_state; 1260 int num_rx = 0; 1261 1262 /* process only this flow */ 1263 cur_budget = budget; 1264 while (cur_budget--) { 1265 ret = am65_cpsw_nuss_rx_packets(flow, cpu, &xdp_state); 1266 xdp_state_or |= xdp_state; 1267 if (ret) 1268 break; 1269 num_rx++; 1270 } 1271 1272 if (xdp_state_or & AM65_CPSW_XDP_REDIRECT) 1273 xdp_do_flush(); 1274 1275 dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget); 1276 1277 if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) { 1278 if (flow->irq_disabled) { 1279 flow->irq_disabled = false; 1280 if (unlikely(flow->rx_pace_timeout)) { 1281 hrtimer_start(&flow->rx_hrtimer, 1282 ns_to_ktime(flow->rx_pace_timeout), 1283 HRTIMER_MODE_REL_PINNED); 1284 } else { 1285 enable_irq(flow->irq); 1286 } 1287 } 1288 } 1289 1290 return num_rx; 1291 } 1292 1293 static struct sk_buff * 1294 am65_cpsw_nuss_tx_compl_packet_skb(struct am65_cpsw_tx_chn *tx_chn, 1295 dma_addr_t desc_dma) 1296 { 1297 struct cppi5_host_desc_t *desc_tx; 1298 struct sk_buff *skb; 1299 void **swdata; 1300 1301 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, 1302 desc_dma); 1303 swdata = cppi5_hdesc_get_swdata(desc_tx); 1304 skb = *(swdata); 1305 am65_cpsw_nuss_xmit_free(tx_chn, desc_tx); 1306 1307 am65_cpts_tx_timestamp(tx_chn->common->cpts, skb); 1308 1309 dev_sw_netstats_tx_add(skb->dev, 1, skb->len); 1310 1311 return skb; 1312 } 1313 1314 static struct xdp_frame * 1315 am65_cpsw_nuss_tx_compl_packet_xdp(struct am65_cpsw_common *common, 1316 struct am65_cpsw_tx_chn *tx_chn, 1317 dma_addr_t desc_dma, 1318 struct net_device **ndev) 1319 { 1320 struct cppi5_host_desc_t *desc_tx; 1321 struct am65_cpsw_port *port; 1322 struct xdp_frame *xdpf; 1323 u32 port_id = 0; 1324 void **swdata; 1325 1326 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma); 1327 cppi5_desc_get_tags_ids(&desc_tx->hdr, NULL, &port_id); 1328 swdata = cppi5_hdesc_get_swdata(desc_tx); 1329 xdpf = *(swdata); 1330 am65_cpsw_nuss_xmit_free(tx_chn, desc_tx); 1331 1332 port = am65_common_get_port(common, port_id); 1333 dev_sw_netstats_tx_add(port->ndev, 1, xdpf->len); 1334 *ndev = port->ndev; 1335 1336 return xdpf; 1337 } 1338 1339 static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_device *ndev, 1340 struct netdev_queue *netif_txq) 1341 { 1342 if (netif_tx_queue_stopped(netif_txq)) { 1343 /* Check whether the queue is stopped due to stalled 1344 * tx dma, if the queue is stopped then wake the queue 1345 * as we have free desc for tx 1346 */ 1347 __netif_tx_lock(netif_txq, smp_processor_id()); 1348 if (netif_running(ndev) && 1349 (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= MAX_SKB_FRAGS)) 1350 netif_tx_wake_queue(netif_txq); 1351 1352 __netif_tx_unlock(netif_txq); 1353 } 1354 } 1355 1356 static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common, 1357 int chn, unsigned int budget, bool *tdown) 1358 { 1359 enum am65_cpsw_tx_buf_type buf_type; 1360 struct device *dev = common->dev; 1361 struct am65_cpsw_tx_chn *tx_chn; 1362 struct netdev_queue *netif_txq; 1363 unsigned int total_bytes = 0; 1364 struct net_device *ndev; 1365 struct xdp_frame *xdpf; 1366 struct sk_buff *skb; 1367 dma_addr_t desc_dma; 1368 int res, num_tx = 0; 1369 1370 tx_chn = &common->tx_chns[chn]; 1371 1372 while (true) { 1373 spin_lock(&tx_chn->lock); 1374 res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma); 1375 spin_unlock(&tx_chn->lock); 1376 if (res == -ENODATA) 1377 break; 1378 1379 if (cppi5_desc_is_tdcm(desc_dma)) { 1380 if (atomic_dec_and_test(&common->tdown_cnt)) 1381 complete(&common->tdown_complete); 1382 *tdown = true; 1383 break; 1384 } 1385 1386 buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma); 1387 if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) { 1388 skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma); 1389 ndev = skb->dev; 1390 total_bytes = skb->len; 1391 napi_consume_skb(skb, budget); 1392 } else { 1393 xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn, 1394 desc_dma, &ndev); 1395 total_bytes = xdpf->len; 1396 if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX) 1397 xdp_return_frame_rx_napi(xdpf); 1398 else 1399 xdp_return_frame(xdpf); 1400 } 1401 num_tx++; 1402 1403 netif_txq = netdev_get_tx_queue(ndev, chn); 1404 1405 netdev_tx_completed_queue(netif_txq, num_tx, total_bytes); 1406 1407 am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq); 1408 } 1409 1410 dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx); 1411 1412 return num_tx; 1413 } 1414 1415 static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common, 1416 int chn, unsigned int budget, bool *tdown) 1417 { 1418 enum am65_cpsw_tx_buf_type buf_type; 1419 struct device *dev = common->dev; 1420 struct am65_cpsw_tx_chn *tx_chn; 1421 struct netdev_queue *netif_txq; 1422 unsigned int total_bytes = 0; 1423 struct net_device *ndev; 1424 struct xdp_frame *xdpf; 1425 struct sk_buff *skb; 1426 dma_addr_t desc_dma; 1427 int res, num_tx = 0; 1428 1429 tx_chn = &common->tx_chns[chn]; 1430 1431 while (true) { 1432 res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma); 1433 if (res == -ENODATA) 1434 break; 1435 1436 if (cppi5_desc_is_tdcm(desc_dma)) { 1437 if (atomic_dec_and_test(&common->tdown_cnt)) 1438 complete(&common->tdown_complete); 1439 *tdown = true; 1440 break; 1441 } 1442 1443 buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma); 1444 if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) { 1445 skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma); 1446 ndev = skb->dev; 1447 total_bytes += skb->len; 1448 napi_consume_skb(skb, budget); 1449 } else { 1450 xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn, 1451 desc_dma, &ndev); 1452 total_bytes += xdpf->len; 1453 if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX) 1454 xdp_return_frame_rx_napi(xdpf); 1455 else 1456 xdp_return_frame(xdpf); 1457 } 1458 num_tx++; 1459 } 1460 1461 if (!num_tx) 1462 return 0; 1463 1464 netif_txq = netdev_get_tx_queue(ndev, chn); 1465 1466 netdev_tx_completed_queue(netif_txq, num_tx, total_bytes); 1467 1468 am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq); 1469 1470 dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx); 1471 1472 return num_tx; 1473 } 1474 1475 static enum hrtimer_restart am65_cpsw_nuss_tx_timer_callback(struct hrtimer *timer) 1476 { 1477 struct am65_cpsw_tx_chn *tx_chns = 1478 container_of(timer, struct am65_cpsw_tx_chn, tx_hrtimer); 1479 1480 enable_irq(tx_chns->irq); 1481 return HRTIMER_NORESTART; 1482 } 1483 1484 static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget) 1485 { 1486 struct am65_cpsw_tx_chn *tx_chn = am65_cpsw_napi_to_tx_chn(napi_tx); 1487 bool tdown = false; 1488 int num_tx; 1489 1490 if (AM65_CPSW_IS_CPSW2G(tx_chn->common)) 1491 num_tx = am65_cpsw_nuss_tx_compl_packets_2g(tx_chn->common, tx_chn->id, 1492 budget, &tdown); 1493 else 1494 num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common, 1495 tx_chn->id, budget, &tdown); 1496 1497 if (num_tx >= budget) 1498 return budget; 1499 1500 if (napi_complete_done(napi_tx, num_tx)) { 1501 if (unlikely(tx_chn->tx_pace_timeout && !tdown)) { 1502 hrtimer_start(&tx_chn->tx_hrtimer, 1503 ns_to_ktime(tx_chn->tx_pace_timeout), 1504 HRTIMER_MODE_REL_PINNED); 1505 } else { 1506 enable_irq(tx_chn->irq); 1507 } 1508 } 1509 1510 return 0; 1511 } 1512 1513 static irqreturn_t am65_cpsw_nuss_rx_irq(int irq, void *dev_id) 1514 { 1515 struct am65_cpsw_rx_flow *flow = dev_id; 1516 1517 flow->irq_disabled = true; 1518 disable_irq_nosync(irq); 1519 napi_schedule(&flow->napi_rx); 1520 1521 return IRQ_HANDLED; 1522 } 1523 1524 static irqreturn_t am65_cpsw_nuss_tx_irq(int irq, void *dev_id) 1525 { 1526 struct am65_cpsw_tx_chn *tx_chn = dev_id; 1527 1528 disable_irq_nosync(irq); 1529 napi_schedule(&tx_chn->napi_tx); 1530 1531 return IRQ_HANDLED; 1532 } 1533 1534 static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb, 1535 struct net_device *ndev) 1536 { 1537 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 1538 struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc; 1539 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 1540 struct device *dev = common->dev; 1541 struct am65_cpsw_tx_chn *tx_chn; 1542 struct netdev_queue *netif_txq; 1543 dma_addr_t desc_dma, buf_dma; 1544 int ret, q_idx, i; 1545 void **swdata; 1546 u32 *psdata; 1547 u32 pkt_len; 1548 1549 /* padding enabled in hw */ 1550 pkt_len = skb_headlen(skb); 1551 1552 /* SKB TX timestamp */ 1553 if (port->tx_ts_enabled) 1554 am65_cpts_prep_tx_timestamp(common->cpts, skb); 1555 1556 q_idx = skb_get_queue_mapping(skb); 1557 dev_dbg(dev, "%s skb_queue:%d\n", __func__, q_idx); 1558 1559 tx_chn = &common->tx_chns[q_idx]; 1560 netif_txq = netdev_get_tx_queue(ndev, q_idx); 1561 1562 /* Map the linear buffer */ 1563 buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len, 1564 DMA_TO_DEVICE); 1565 if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) { 1566 dev_err(dev, "Failed to map tx skb buffer\n"); 1567 ndev->stats.tx_errors++; 1568 goto err_free_skb; 1569 } 1570 1571 first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); 1572 if (!first_desc) { 1573 dev_dbg(dev, "Failed to allocate descriptor\n"); 1574 dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, 1575 DMA_TO_DEVICE); 1576 goto busy_stop_q; 1577 } 1578 1579 am65_cpsw_nuss_set_buf_type(tx_chn, first_desc, 1580 AM65_CPSW_TX_BUF_TYPE_SKB); 1581 1582 cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT, 1583 AM65_CPSW_NAV_PS_DATA_SIZE); 1584 cppi5_desc_set_pktids(&first_desc->hdr, 0, AM65_CPSW_CPPI_TX_FLOW_ID); 1585 cppi5_hdesc_set_pkttype(first_desc, AM65_CPSW_CPPI_TX_PKT_TYPE); 1586 cppi5_desc_set_tags_ids(&first_desc->hdr, 0, port->port_id); 1587 1588 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma); 1589 cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len); 1590 swdata = cppi5_hdesc_get_swdata(first_desc); 1591 *(swdata) = skb; 1592 psdata = cppi5_hdesc_get_psdata(first_desc); 1593 1594 /* HW csum offload if enabled */ 1595 psdata[2] = 0; 1596 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 1597 unsigned int cs_start, cs_offset; 1598 1599 cs_start = skb_transport_offset(skb); 1600 cs_offset = cs_start + skb->csum_offset; 1601 /* HW numerates bytes starting from 1 */ 1602 psdata[2] = ((cs_offset + 1) << 24) | 1603 ((cs_start + 1) << 16) | (skb->len - cs_start); 1604 dev_dbg(dev, "%s tx psdata:%#x\n", __func__, psdata[2]); 1605 } 1606 1607 if (!skb_is_nonlinear(skb)) 1608 goto done_tx; 1609 1610 dev_dbg(dev, "fragmented SKB\n"); 1611 1612 /* Handle the case where skb is fragmented in pages */ 1613 cur_desc = first_desc; 1614 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1615 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1616 u32 frag_size = skb_frag_size(frag); 1617 1618 next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); 1619 if (!next_desc) { 1620 dev_err(dev, "Failed to allocate descriptor\n"); 1621 goto busy_free_descs; 1622 } 1623 1624 am65_cpsw_nuss_set_buf_type(tx_chn, next_desc, 1625 AM65_CPSW_TX_BUF_TYPE_SKB); 1626 1627 buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size, 1628 DMA_TO_DEVICE); 1629 if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) { 1630 dev_err(dev, "Failed to map tx skb page\n"); 1631 k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc); 1632 ndev->stats.tx_errors++; 1633 goto err_free_descs; 1634 } 1635 1636 cppi5_hdesc_reset_hbdesc(next_desc); 1637 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma); 1638 cppi5_hdesc_attach_buf(next_desc, 1639 buf_dma, frag_size, buf_dma, frag_size); 1640 1641 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, 1642 next_desc); 1643 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma); 1644 cppi5_hdesc_link_hbdesc(cur_desc, desc_dma); 1645 1646 pkt_len += frag_size; 1647 cur_desc = next_desc; 1648 } 1649 WARN_ON(pkt_len != skb->len); 1650 1651 done_tx: 1652 skb_tx_timestamp(skb); 1653 1654 /* report bql before sending packet */ 1655 netdev_tx_sent_queue(netif_txq, pkt_len); 1656 1657 cppi5_hdesc_set_pktlen(first_desc, pkt_len); 1658 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc); 1659 if (AM65_CPSW_IS_CPSW2G(common)) { 1660 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma); 1661 } else { 1662 spin_lock_bh(&tx_chn->lock); 1663 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma); 1664 spin_unlock_bh(&tx_chn->lock); 1665 } 1666 if (ret) { 1667 dev_err(dev, "can't push desc %d\n", ret); 1668 /* inform bql */ 1669 netdev_tx_completed_queue(netif_txq, 1, pkt_len); 1670 ndev->stats.tx_errors++; 1671 goto err_free_descs; 1672 } 1673 1674 if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) { 1675 netif_tx_stop_queue(netif_txq); 1676 /* Barrier, so that stop_queue visible to other cpus */ 1677 smp_mb__after_atomic(); 1678 dev_dbg(dev, "netif_tx_stop_queue %d\n", q_idx); 1679 1680 /* re-check for smp */ 1681 if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= 1682 MAX_SKB_FRAGS) { 1683 netif_tx_wake_queue(netif_txq); 1684 dev_dbg(dev, "netif_tx_wake_queue %d\n", q_idx); 1685 } 1686 } 1687 1688 return NETDEV_TX_OK; 1689 1690 err_free_descs: 1691 am65_cpsw_nuss_xmit_free(tx_chn, first_desc); 1692 err_free_skb: 1693 ndev->stats.tx_dropped++; 1694 dev_kfree_skb_any(skb); 1695 return NETDEV_TX_OK; 1696 1697 busy_free_descs: 1698 am65_cpsw_nuss_xmit_free(tx_chn, first_desc); 1699 busy_stop_q: 1700 netif_tx_stop_queue(netif_txq); 1701 return NETDEV_TX_BUSY; 1702 } 1703 1704 static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev, 1705 void *addr) 1706 { 1707 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 1708 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 1709 struct sockaddr *sockaddr = (struct sockaddr *)addr; 1710 int ret; 1711 1712 ret = eth_prepare_mac_addr_change(ndev, addr); 1713 if (ret < 0) 1714 return ret; 1715 1716 ret = pm_runtime_resume_and_get(common->dev); 1717 if (ret < 0) 1718 return ret; 1719 1720 cpsw_ale_del_ucast(common->ale, ndev->dev_addr, 1721 HOST_PORT_NUM, 0, 0); 1722 cpsw_ale_add_ucast(common->ale, sockaddr->sa_data, 1723 HOST_PORT_NUM, ALE_SECURE, 0); 1724 1725 am65_cpsw_port_set_sl_mac(port, addr); 1726 eth_commit_mac_addr_change(ndev, sockaddr); 1727 1728 pm_runtime_put(common->dev); 1729 1730 return 0; 1731 } 1732 1733 static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev, 1734 struct ifreq *ifr) 1735 { 1736 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 1737 u32 ts_ctrl, seq_id, ts_ctrl_ltype2, ts_vlan_ltype; 1738 struct hwtstamp_config cfg; 1739 1740 if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)) 1741 return -EOPNOTSUPP; 1742 1743 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) 1744 return -EFAULT; 1745 1746 /* TX HW timestamp */ 1747 switch (cfg.tx_type) { 1748 case HWTSTAMP_TX_OFF: 1749 case HWTSTAMP_TX_ON: 1750 break; 1751 default: 1752 return -ERANGE; 1753 } 1754 1755 switch (cfg.rx_filter) { 1756 case HWTSTAMP_FILTER_NONE: 1757 port->rx_ts_enabled = false; 1758 break; 1759 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 1760 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 1761 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 1762 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1763 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 1764 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 1765 case HWTSTAMP_FILTER_PTP_V2_EVENT: 1766 case HWTSTAMP_FILTER_PTP_V2_SYNC: 1767 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 1768 port->rx_ts_enabled = true; 1769 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 1770 break; 1771 case HWTSTAMP_FILTER_ALL: 1772 case HWTSTAMP_FILTER_SOME: 1773 case HWTSTAMP_FILTER_NTP_ALL: 1774 return -EOPNOTSUPP; 1775 default: 1776 return -ERANGE; 1777 } 1778 1779 port->tx_ts_enabled = (cfg.tx_type == HWTSTAMP_TX_ON); 1780 1781 /* cfg TX timestamp */ 1782 seq_id = (AM65_CPSW_TS_SEQ_ID_OFFSET << 1783 AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT) | ETH_P_1588; 1784 1785 ts_vlan_ltype = ETH_P_8021Q; 1786 1787 ts_ctrl_ltype2 = ETH_P_1588 | 1788 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107 | 1789 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129 | 1790 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130 | 1791 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131 | 1792 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132 | 1793 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319 | 1794 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320 | 1795 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO; 1796 1797 ts_ctrl = AM65_CPSW_TS_EVENT_MSG_TYPE_BITS << 1798 AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT; 1799 1800 if (port->tx_ts_enabled) 1801 ts_ctrl |= AM65_CPSW_TS_TX_ANX_ALL_EN | 1802 AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN; 1803 1804 if (port->rx_ts_enabled) 1805 ts_ctrl |= AM65_CPSW_TS_RX_ANX_ALL_EN | 1806 AM65_CPSW_PN_TS_CTL_RX_VLAN_LT1_EN; 1807 1808 writel(seq_id, port->port_base + AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG); 1809 writel(ts_vlan_ltype, port->port_base + 1810 AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG); 1811 writel(ts_ctrl_ltype2, port->port_base + 1812 AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2); 1813 writel(ts_ctrl, port->port_base + AM65_CPSW_PORTN_REG_TS_CTL); 1814 1815 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; 1816 } 1817 1818 static int am65_cpsw_nuss_hwtstamp_get(struct net_device *ndev, 1819 struct ifreq *ifr) 1820 { 1821 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 1822 struct hwtstamp_config cfg; 1823 1824 if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)) 1825 return -EOPNOTSUPP; 1826 1827 cfg.flags = 0; 1828 cfg.tx_type = port->tx_ts_enabled ? 1829 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; 1830 cfg.rx_filter = port->rx_ts_enabled ? 1831 HWTSTAMP_FILTER_PTP_V2_EVENT : HWTSTAMP_FILTER_NONE; 1832 1833 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; 1834 } 1835 1836 static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev, 1837 struct ifreq *req, int cmd) 1838 { 1839 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 1840 1841 if (!netif_running(ndev)) 1842 return -EINVAL; 1843 1844 switch (cmd) { 1845 case SIOCSHWTSTAMP: 1846 return am65_cpsw_nuss_hwtstamp_set(ndev, req); 1847 case SIOCGHWTSTAMP: 1848 return am65_cpsw_nuss_hwtstamp_get(ndev, req); 1849 } 1850 1851 return phylink_mii_ioctl(port->slave.phylink, req, cmd); 1852 } 1853 1854 static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev, 1855 struct rtnl_link_stats64 *stats) 1856 { 1857 dev_fetch_sw_netstats(stats, dev->tstats); 1858 1859 stats->rx_errors = dev->stats.rx_errors; 1860 stats->rx_dropped = dev->stats.rx_dropped; 1861 stats->tx_dropped = dev->stats.tx_dropped; 1862 } 1863 1864 static int am65_cpsw_xdp_prog_setup(struct net_device *ndev, 1865 struct bpf_prog *prog) 1866 { 1867 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 1868 bool running = netif_running(ndev); 1869 struct bpf_prog *old_prog; 1870 1871 if (running) 1872 am65_cpsw_nuss_ndo_slave_stop(ndev); 1873 1874 old_prog = xchg(&port->xdp_prog, prog); 1875 if (old_prog) 1876 bpf_prog_put(old_prog); 1877 1878 if (running) 1879 return am65_cpsw_nuss_ndo_slave_open(ndev); 1880 1881 return 0; 1882 } 1883 1884 static int am65_cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf) 1885 { 1886 switch (bpf->command) { 1887 case XDP_SETUP_PROG: 1888 return am65_cpsw_xdp_prog_setup(ndev, bpf->prog); 1889 default: 1890 return -EINVAL; 1891 } 1892 } 1893 1894 static int am65_cpsw_ndo_xdp_xmit(struct net_device *ndev, int n, 1895 struct xdp_frame **frames, u32 flags) 1896 { 1897 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 1898 struct am65_cpsw_tx_chn *tx_chn; 1899 struct netdev_queue *netif_txq; 1900 int cpu = smp_processor_id(); 1901 int i, nxmit = 0; 1902 1903 tx_chn = &common->tx_chns[cpu % common->tx_ch_num]; 1904 netif_txq = netdev_get_tx_queue(ndev, tx_chn->id); 1905 1906 __netif_tx_lock(netif_txq, cpu); 1907 for (i = 0; i < n; i++) { 1908 if (am65_cpsw_xdp_tx_frame(ndev, tx_chn, frames[i], 1909 AM65_CPSW_TX_BUF_TYPE_XDP_NDO)) 1910 break; 1911 nxmit++; 1912 } 1913 __netif_tx_unlock(netif_txq); 1914 1915 return nxmit; 1916 } 1917 1918 static const struct net_device_ops am65_cpsw_nuss_netdev_ops = { 1919 .ndo_open = am65_cpsw_nuss_ndo_slave_open, 1920 .ndo_stop = am65_cpsw_nuss_ndo_slave_stop, 1921 .ndo_start_xmit = am65_cpsw_nuss_ndo_slave_xmit, 1922 .ndo_set_rx_mode = am65_cpsw_nuss_ndo_slave_set_rx_mode, 1923 .ndo_get_stats64 = am65_cpsw_nuss_ndo_get_stats, 1924 .ndo_validate_addr = eth_validate_addr, 1925 .ndo_set_mac_address = am65_cpsw_nuss_ndo_slave_set_mac_address, 1926 .ndo_tx_timeout = am65_cpsw_nuss_ndo_host_tx_timeout, 1927 .ndo_vlan_rx_add_vid = am65_cpsw_nuss_ndo_slave_add_vid, 1928 .ndo_vlan_rx_kill_vid = am65_cpsw_nuss_ndo_slave_kill_vid, 1929 .ndo_eth_ioctl = am65_cpsw_nuss_ndo_slave_ioctl, 1930 .ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc, 1931 .ndo_set_tx_maxrate = am65_cpsw_qos_ndo_tx_p0_set_maxrate, 1932 .ndo_bpf = am65_cpsw_ndo_bpf, 1933 .ndo_xdp_xmit = am65_cpsw_ndo_xdp_xmit, 1934 }; 1935 1936 static void am65_cpsw_disable_phy(struct phy *phy) 1937 { 1938 phy_power_off(phy); 1939 phy_exit(phy); 1940 } 1941 1942 static int am65_cpsw_enable_phy(struct phy *phy) 1943 { 1944 int ret; 1945 1946 ret = phy_init(phy); 1947 if (ret < 0) 1948 return ret; 1949 1950 ret = phy_power_on(phy); 1951 if (ret < 0) { 1952 phy_exit(phy); 1953 return ret; 1954 } 1955 1956 return 0; 1957 } 1958 1959 static void am65_cpsw_disable_serdes_phy(struct am65_cpsw_common *common) 1960 { 1961 struct am65_cpsw_port *port; 1962 struct phy *phy; 1963 int i; 1964 1965 for (i = 0; i < common->port_num; i++) { 1966 port = &common->ports[i]; 1967 phy = port->slave.serdes_phy; 1968 if (phy) 1969 am65_cpsw_disable_phy(phy); 1970 } 1971 } 1972 1973 static int am65_cpsw_init_serdes_phy(struct device *dev, struct device_node *port_np, 1974 struct am65_cpsw_port *port) 1975 { 1976 const char *name = "serdes"; 1977 struct phy *phy; 1978 int ret; 1979 1980 phy = devm_of_phy_optional_get(dev, port_np, name); 1981 if (IS_ERR_OR_NULL(phy)) 1982 return PTR_ERR_OR_ZERO(phy); 1983 1984 /* Serdes PHY exists. Store it. */ 1985 port->slave.serdes_phy = phy; 1986 1987 ret = am65_cpsw_enable_phy(phy); 1988 if (ret < 0) 1989 goto err_phy; 1990 1991 return 0; 1992 1993 err_phy: 1994 devm_phy_put(dev, phy); 1995 return ret; 1996 } 1997 1998 static void am65_cpsw_nuss_mac_config(struct phylink_config *config, unsigned int mode, 1999 const struct phylink_link_state *state) 2000 { 2001 struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data, 2002 phylink_config); 2003 struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave); 2004 struct am65_cpsw_common *common = port->common; 2005 2006 if (common->pdata.extra_modes & BIT(state->interface)) { 2007 if (state->interface == PHY_INTERFACE_MODE_SGMII) { 2008 writel(ADVERTISE_SGMII, 2009 port->sgmii_base + AM65_CPSW_SGMII_MR_ADV_ABILITY_REG); 2010 cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_EXT_EN); 2011 } else { 2012 cpsw_sl_ctl_clr(port->slave.mac_sl, CPSW_SL_CTL_EXT_EN); 2013 } 2014 2015 if (state->interface == PHY_INTERFACE_MODE_USXGMII) { 2016 cpsw_sl_ctl_set(port->slave.mac_sl, 2017 CPSW_SL_CTL_XGIG | CPSW_SL_CTL_XGMII_EN); 2018 } else { 2019 cpsw_sl_ctl_clr(port->slave.mac_sl, 2020 CPSW_SL_CTL_XGIG | CPSW_SL_CTL_XGMII_EN); 2021 } 2022 2023 writel(AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE, 2024 port->sgmii_base + AM65_CPSW_SGMII_CONTROL_REG); 2025 } 2026 } 2027 2028 static void am65_cpsw_nuss_mac_link_down(struct phylink_config *config, unsigned int mode, 2029 phy_interface_t interface) 2030 { 2031 struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data, 2032 phylink_config); 2033 struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave); 2034 struct am65_cpsw_common *common = port->common; 2035 struct net_device *ndev = port->ndev; 2036 u32 mac_control; 2037 int tmo; 2038 2039 /* disable forwarding */ 2040 cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); 2041 2042 cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE); 2043 2044 tmo = cpsw_sl_wait_for_idle(port->slave.mac_sl, 100); 2045 dev_dbg(common->dev, "down msc_sl %08x tmo %d\n", 2046 cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_MACSTATUS), tmo); 2047 2048 /* All the bits that am65_cpsw_nuss_mac_link_up() can possibly set */ 2049 mac_control = CPSW_SL_CTL_GMII_EN | CPSW_SL_CTL_GIG | CPSW_SL_CTL_IFCTL_A | 2050 CPSW_SL_CTL_FULLDUPLEX | CPSW_SL_CTL_RX_FLOW_EN | CPSW_SL_CTL_TX_FLOW_EN; 2051 /* If interface mode is RGMII, CPSW_SL_CTL_EXT_EN might have been set for 10 Mbps */ 2052 if (phy_interface_mode_is_rgmii(interface)) 2053 mac_control |= CPSW_SL_CTL_EXT_EN; 2054 /* Only clear those bits that can be set by am65_cpsw_nuss_mac_link_up() */ 2055 cpsw_sl_ctl_clr(port->slave.mac_sl, mac_control); 2056 2057 am65_cpsw_qos_link_down(ndev); 2058 netif_tx_stop_all_queues(ndev); 2059 } 2060 2061 static void am65_cpsw_nuss_mac_link_up(struct phylink_config *config, struct phy_device *phy, 2062 unsigned int mode, phy_interface_t interface, int speed, 2063 int duplex, bool tx_pause, bool rx_pause) 2064 { 2065 struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data, 2066 phylink_config); 2067 struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave); 2068 struct am65_cpsw_common *common = port->common; 2069 u32 mac_control = CPSW_SL_CTL_GMII_EN; 2070 struct net_device *ndev = port->ndev; 2071 2072 /* Bring the port out of idle state */ 2073 cpsw_sl_ctl_clr(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE); 2074 2075 if (speed == SPEED_1000) 2076 mac_control |= CPSW_SL_CTL_GIG; 2077 /* TODO: Verify whether in-band is necessary for 10 Mbps RGMII */ 2078 if (speed == SPEED_10 && phy_interface_mode_is_rgmii(interface)) 2079 /* Can be used with in band mode only */ 2080 mac_control |= CPSW_SL_CTL_EXT_EN; 2081 if (speed == SPEED_100 && interface == PHY_INTERFACE_MODE_RMII) 2082 mac_control |= CPSW_SL_CTL_IFCTL_A; 2083 if (duplex) 2084 mac_control |= CPSW_SL_CTL_FULLDUPLEX; 2085 2086 /* rx_pause/tx_pause */ 2087 if (rx_pause) 2088 mac_control |= CPSW_SL_CTL_TX_FLOW_EN; 2089 2090 if (tx_pause) 2091 mac_control |= CPSW_SL_CTL_RX_FLOW_EN; 2092 2093 cpsw_sl_ctl_set(port->slave.mac_sl, mac_control); 2094 2095 /* enable forwarding */ 2096 cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); 2097 2098 am65_cpsw_qos_link_up(ndev, speed); 2099 netif_tx_wake_all_queues(ndev); 2100 } 2101 2102 static const struct phylink_mac_ops am65_cpsw_phylink_mac_ops = { 2103 .mac_config = am65_cpsw_nuss_mac_config, 2104 .mac_link_down = am65_cpsw_nuss_mac_link_down, 2105 .mac_link_up = am65_cpsw_nuss_mac_link_up, 2106 }; 2107 2108 static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port) 2109 { 2110 struct am65_cpsw_common *common = port->common; 2111 2112 if (!port->disabled) 2113 return; 2114 2115 cpsw_ale_control_set(common->ale, port->port_id, 2116 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); 2117 2118 cpsw_sl_reset(port->slave.mac_sl, 100); 2119 cpsw_sl_ctl_reset(port->slave.mac_sl); 2120 } 2121 2122 static void am65_cpsw_nuss_free_tx_chns(void *data) 2123 { 2124 struct am65_cpsw_common *common = data; 2125 int i; 2126 2127 for (i = 0; i < common->tx_ch_num; i++) { 2128 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; 2129 2130 if (!IS_ERR_OR_NULL(tx_chn->desc_pool)) 2131 k3_cppi_desc_pool_destroy(tx_chn->desc_pool); 2132 2133 if (!IS_ERR_OR_NULL(tx_chn->tx_chn)) 2134 k3_udma_glue_release_tx_chn(tx_chn->tx_chn); 2135 2136 memset(tx_chn, 0, sizeof(*tx_chn)); 2137 } 2138 } 2139 2140 static void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common) 2141 { 2142 struct device *dev = common->dev; 2143 int i; 2144 2145 devm_remove_action(dev, am65_cpsw_nuss_free_tx_chns, common); 2146 2147 common->tx_ch_rate_msk = 0; 2148 for (i = 0; i < common->tx_ch_num; i++) { 2149 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; 2150 2151 if (tx_chn->irq) 2152 devm_free_irq(dev, tx_chn->irq, tx_chn); 2153 2154 netif_napi_del(&tx_chn->napi_tx); 2155 } 2156 2157 am65_cpsw_nuss_free_tx_chns(common); 2158 } 2159 2160 static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common) 2161 { 2162 struct device *dev = common->dev; 2163 int i, ret = 0; 2164 2165 for (i = 0; i < common->tx_ch_num; i++) { 2166 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; 2167 2168 netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx, 2169 am65_cpsw_nuss_tx_poll); 2170 hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); 2171 tx_chn->tx_hrtimer.function = &am65_cpsw_nuss_tx_timer_callback; 2172 2173 ret = devm_request_irq(dev, tx_chn->irq, 2174 am65_cpsw_nuss_tx_irq, 2175 IRQF_TRIGGER_HIGH, 2176 tx_chn->tx_chn_name, tx_chn); 2177 if (ret) { 2178 dev_err(dev, "failure requesting tx%u irq %u, %d\n", 2179 tx_chn->id, tx_chn->irq, ret); 2180 goto err; 2181 } 2182 } 2183 2184 err: 2185 return ret; 2186 } 2187 2188 static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common) 2189 { 2190 u32 max_desc_num = ALIGN(AM65_CPSW_MAX_TX_DESC, MAX_SKB_FRAGS); 2191 struct k3_udma_glue_tx_channel_cfg tx_cfg = { 0 }; 2192 struct device *dev = common->dev; 2193 struct k3_ring_cfg ring_cfg = { 2194 .elm_size = K3_RINGACC_RING_ELSIZE_8, 2195 .mode = K3_RINGACC_RING_MODE_RING, 2196 .flags = 0 2197 }; 2198 u32 hdesc_size, hdesc_size_out; 2199 int i, ret = 0; 2200 2201 hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE, 2202 AM65_CPSW_NAV_SW_DATA_SIZE); 2203 2204 tx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE; 2205 tx_cfg.tx_cfg = ring_cfg; 2206 tx_cfg.txcq_cfg = ring_cfg; 2207 tx_cfg.tx_cfg.size = max_desc_num; 2208 tx_cfg.txcq_cfg.size = max_desc_num; 2209 2210 for (i = 0; i < common->tx_ch_num; i++) { 2211 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; 2212 2213 snprintf(tx_chn->tx_chn_name, 2214 sizeof(tx_chn->tx_chn_name), "tx%d", i); 2215 2216 spin_lock_init(&tx_chn->lock); 2217 tx_chn->common = common; 2218 tx_chn->id = i; 2219 tx_chn->descs_num = max_desc_num; 2220 2221 tx_chn->tx_chn = 2222 k3_udma_glue_request_tx_chn(dev, 2223 tx_chn->tx_chn_name, 2224 &tx_cfg); 2225 if (IS_ERR(tx_chn->tx_chn)) { 2226 ret = dev_err_probe(dev, PTR_ERR(tx_chn->tx_chn), 2227 "Failed to request tx dma channel\n"); 2228 goto err; 2229 } 2230 tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn); 2231 2232 tx_chn->desc_pool = k3_cppi_desc_pool_create_name(tx_chn->dma_dev, 2233 tx_chn->descs_num, 2234 hdesc_size, 2235 tx_chn->tx_chn_name); 2236 if (IS_ERR(tx_chn->desc_pool)) { 2237 ret = PTR_ERR(tx_chn->desc_pool); 2238 dev_err(dev, "Failed to create poll %d\n", ret); 2239 goto err; 2240 } 2241 2242 hdesc_size_out = k3_cppi_desc_pool_desc_size(tx_chn->desc_pool); 2243 tx_chn->dsize_log2 = __fls(hdesc_size_out); 2244 WARN_ON(hdesc_size_out != (1 << tx_chn->dsize_log2)); 2245 2246 tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn); 2247 if (tx_chn->irq < 0) { 2248 dev_err(dev, "Failed to get tx dma irq %d\n", 2249 tx_chn->irq); 2250 ret = tx_chn->irq; 2251 goto err; 2252 } 2253 2254 snprintf(tx_chn->tx_chn_name, 2255 sizeof(tx_chn->tx_chn_name), "%s-tx%d", 2256 dev_name(dev), tx_chn->id); 2257 } 2258 2259 ret = am65_cpsw_nuss_ndev_add_tx_napi(common); 2260 if (ret) { 2261 dev_err(dev, "Failed to add tx NAPI %d\n", ret); 2262 goto err; 2263 } 2264 2265 err: 2266 i = devm_add_action(dev, am65_cpsw_nuss_free_tx_chns, common); 2267 if (i) { 2268 dev_err(dev, "Failed to add free_tx_chns action %d\n", i); 2269 return i; 2270 } 2271 2272 return ret; 2273 } 2274 2275 static void am65_cpsw_nuss_free_rx_chns(void *data) 2276 { 2277 struct am65_cpsw_common *common = data; 2278 struct am65_cpsw_rx_chn *rx_chn; 2279 2280 rx_chn = &common->rx_chns; 2281 2282 if (!IS_ERR_OR_NULL(rx_chn->desc_pool)) 2283 k3_cppi_desc_pool_destroy(rx_chn->desc_pool); 2284 2285 if (!IS_ERR_OR_NULL(rx_chn->rx_chn)) 2286 k3_udma_glue_release_rx_chn(rx_chn->rx_chn); 2287 } 2288 2289 static void am65_cpsw_nuss_remove_rx_chns(struct am65_cpsw_common *common) 2290 { 2291 struct device *dev = common->dev; 2292 struct am65_cpsw_rx_chn *rx_chn; 2293 struct am65_cpsw_rx_flow *flows; 2294 int i; 2295 2296 rx_chn = &common->rx_chns; 2297 flows = rx_chn->flows; 2298 devm_remove_action(dev, am65_cpsw_nuss_free_rx_chns, common); 2299 2300 for (i = 0; i < common->rx_ch_num_flows; i++) { 2301 if (!(flows[i].irq < 0)) 2302 devm_free_irq(dev, flows[i].irq, &flows[i]); 2303 netif_napi_del(&flows[i].napi_rx); 2304 } 2305 2306 am65_cpsw_nuss_free_rx_chns(common); 2307 2308 common->rx_flow_id_base = -1; 2309 } 2310 2311 static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) 2312 { 2313 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; 2314 struct k3_udma_glue_rx_channel_cfg rx_cfg = { 0 }; 2315 u32 max_desc_num = AM65_CPSW_MAX_RX_DESC; 2316 struct device *dev = common->dev; 2317 struct am65_cpsw_rx_flow *flow; 2318 u32 hdesc_size, hdesc_size_out; 2319 u32 fdqring_id; 2320 int i, ret = 0; 2321 2322 hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE, 2323 AM65_CPSW_NAV_SW_DATA_SIZE); 2324 2325 rx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE; 2326 rx_cfg.flow_id_num = common->rx_ch_num_flows; 2327 rx_cfg.flow_id_base = common->rx_flow_id_base; 2328 2329 /* init all flows */ 2330 rx_chn->dev = dev; 2331 rx_chn->descs_num = max_desc_num * rx_cfg.flow_id_num; 2332 2333 for (i = 0; i < common->rx_ch_num_flows; i++) { 2334 flow = &rx_chn->flows[i]; 2335 flow->page_pool = NULL; 2336 } 2337 2338 rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg); 2339 if (IS_ERR(rx_chn->rx_chn)) { 2340 ret = dev_err_probe(dev, PTR_ERR(rx_chn->rx_chn), 2341 "Failed to request rx dma channel\n"); 2342 goto err; 2343 } 2344 rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn); 2345 2346 rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev, 2347 rx_chn->descs_num, 2348 hdesc_size, "rx"); 2349 if (IS_ERR(rx_chn->desc_pool)) { 2350 ret = PTR_ERR(rx_chn->desc_pool); 2351 dev_err(dev, "Failed to create rx poll %d\n", ret); 2352 goto err; 2353 } 2354 2355 hdesc_size_out = k3_cppi_desc_pool_desc_size(rx_chn->desc_pool); 2356 rx_chn->dsize_log2 = __fls(hdesc_size_out); 2357 WARN_ON(hdesc_size_out != (1 << rx_chn->dsize_log2)); 2358 2359 common->rx_flow_id_base = 2360 k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn); 2361 dev_info(dev, "set new flow-id-base %u\n", common->rx_flow_id_base); 2362 2363 fdqring_id = K3_RINGACC_RING_ID_ANY; 2364 for (i = 0; i < rx_cfg.flow_id_num; i++) { 2365 struct k3_ring_cfg rxring_cfg = { 2366 .elm_size = K3_RINGACC_RING_ELSIZE_8, 2367 .mode = K3_RINGACC_RING_MODE_RING, 2368 .flags = 0, 2369 }; 2370 struct k3_ring_cfg fdqring_cfg = { 2371 .elm_size = K3_RINGACC_RING_ELSIZE_8, 2372 .flags = K3_RINGACC_RING_SHARED, 2373 }; 2374 struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = { 2375 .rx_cfg = rxring_cfg, 2376 .rxfdq_cfg = fdqring_cfg, 2377 .ring_rxq_id = K3_RINGACC_RING_ID_ANY, 2378 .src_tag_lo_sel = 2379 K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG, 2380 }; 2381 2382 flow = &rx_chn->flows[i]; 2383 flow->id = i; 2384 flow->common = common; 2385 flow->irq = -EINVAL; 2386 2387 rx_flow_cfg.ring_rxfdq0_id = fdqring_id; 2388 rx_flow_cfg.rx_cfg.size = max_desc_num; 2389 /* share same FDQ for all flows */ 2390 rx_flow_cfg.rxfdq_cfg.size = max_desc_num * rx_cfg.flow_id_num; 2391 rx_flow_cfg.rxfdq_cfg.mode = common->pdata.fdqring_mode; 2392 2393 ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn, 2394 i, &rx_flow_cfg); 2395 if (ret) { 2396 dev_err(dev, "Failed to init rx flow%d %d\n", i, ret); 2397 goto err; 2398 } 2399 if (!i) 2400 fdqring_id = 2401 k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn, 2402 i); 2403 2404 flow->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i); 2405 if (flow->irq <= 0) { 2406 dev_err(dev, "Failed to get rx dma irq %d\n", 2407 flow->irq); 2408 ret = flow->irq; 2409 goto err; 2410 } 2411 2412 snprintf(flow->name, 2413 sizeof(flow->name), "%s-rx%d", 2414 dev_name(dev), i); 2415 netif_napi_add(common->dma_ndev, &flow->napi_rx, 2416 am65_cpsw_nuss_rx_poll); 2417 hrtimer_init(&flow->rx_hrtimer, CLOCK_MONOTONIC, 2418 HRTIMER_MODE_REL_PINNED); 2419 flow->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback; 2420 2421 ret = devm_request_irq(dev, flow->irq, 2422 am65_cpsw_nuss_rx_irq, 2423 IRQF_TRIGGER_HIGH, 2424 flow->name, flow); 2425 if (ret) { 2426 dev_err(dev, "failure requesting rx %d irq %u, %d\n", 2427 i, flow->irq, ret); 2428 flow->irq = -EINVAL; 2429 goto err; 2430 } 2431 } 2432 2433 /* setup classifier to route priorities to flows */ 2434 cpsw_ale_classifier_setup_default(common->ale, common->rx_ch_num_flows); 2435 2436 err: 2437 i = devm_add_action(dev, am65_cpsw_nuss_free_rx_chns, common); 2438 if (i) { 2439 dev_err(dev, "Failed to add free_rx_chns action %d\n", i); 2440 return i; 2441 } 2442 2443 return ret; 2444 } 2445 2446 static int am65_cpsw_nuss_init_host_p(struct am65_cpsw_common *common) 2447 { 2448 struct am65_cpsw_host *host_p = am65_common_get_host(common); 2449 2450 host_p->common = common; 2451 host_p->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE; 2452 host_p->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE; 2453 2454 return 0; 2455 } 2456 2457 static int am65_cpsw_am654_get_efuse_macid(struct device_node *of_node, 2458 int slave, u8 *mac_addr) 2459 { 2460 u32 mac_lo, mac_hi, offset; 2461 struct regmap *syscon; 2462 int ret; 2463 2464 syscon = syscon_regmap_lookup_by_phandle(of_node, "ti,syscon-efuse"); 2465 if (IS_ERR(syscon)) { 2466 if (PTR_ERR(syscon) == -ENODEV) 2467 return 0; 2468 return PTR_ERR(syscon); 2469 } 2470 2471 ret = of_property_read_u32_index(of_node, "ti,syscon-efuse", 1, 2472 &offset); 2473 if (ret) 2474 return ret; 2475 2476 regmap_read(syscon, offset, &mac_lo); 2477 regmap_read(syscon, offset + 4, &mac_hi); 2478 2479 mac_addr[0] = (mac_hi >> 8) & 0xff; 2480 mac_addr[1] = mac_hi & 0xff; 2481 mac_addr[2] = (mac_lo >> 24) & 0xff; 2482 mac_addr[3] = (mac_lo >> 16) & 0xff; 2483 mac_addr[4] = (mac_lo >> 8) & 0xff; 2484 mac_addr[5] = mac_lo & 0xff; 2485 2486 return 0; 2487 } 2488 2489 static int am65_cpsw_init_cpts(struct am65_cpsw_common *common) 2490 { 2491 struct device *dev = common->dev; 2492 struct device_node *node; 2493 struct am65_cpts *cpts; 2494 void __iomem *reg_base; 2495 2496 if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)) 2497 return 0; 2498 2499 node = of_get_child_by_name(dev->of_node, "cpts"); 2500 if (!node) { 2501 dev_err(dev, "%s cpts not found\n", __func__); 2502 return -ENOENT; 2503 } 2504 2505 reg_base = common->cpsw_base + AM65_CPSW_NU_CPTS_BASE; 2506 cpts = am65_cpts_create(dev, reg_base, node); 2507 if (IS_ERR(cpts)) { 2508 int ret = PTR_ERR(cpts); 2509 2510 of_node_put(node); 2511 dev_err(dev, "cpts create err %d\n", ret); 2512 return ret; 2513 } 2514 common->cpts = cpts; 2515 /* Forbid PM runtime if CPTS is running. 2516 * K3 CPSWxG modules may completely lose context during ON->OFF 2517 * transitions depending on integration. 2518 * AM65x/J721E MCU CPSW2G: false 2519 * J721E MAIN_CPSW9G: true 2520 */ 2521 pm_runtime_forbid(dev); 2522 2523 return 0; 2524 } 2525 2526 static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) 2527 { 2528 struct device_node *node, *port_np; 2529 struct device *dev = common->dev; 2530 int ret; 2531 2532 node = of_get_child_by_name(dev->of_node, "ethernet-ports"); 2533 if (!node) 2534 return -ENOENT; 2535 2536 for_each_child_of_node(node, port_np) { 2537 struct am65_cpsw_port *port; 2538 u32 port_id; 2539 2540 /* it is not a slave port node, continue */ 2541 if (strcmp(port_np->name, "port")) 2542 continue; 2543 2544 ret = of_property_read_u32(port_np, "reg", &port_id); 2545 if (ret < 0) { 2546 dev_err(dev, "%pOF error reading port_id %d\n", 2547 port_np, ret); 2548 goto of_node_put; 2549 } 2550 2551 if (!port_id || port_id > common->port_num) { 2552 dev_err(dev, "%pOF has invalid port_id %u %s\n", 2553 port_np, port_id, port_np->name); 2554 ret = -EINVAL; 2555 goto of_node_put; 2556 } 2557 2558 port = am65_common_get_port(common, port_id); 2559 port->port_id = port_id; 2560 port->common = common; 2561 port->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE + 2562 AM65_CPSW_NU_PORTS_OFFSET * (port_id); 2563 if (common->pdata.extra_modes) 2564 port->sgmii_base = common->ss_base + AM65_CPSW_SGMII_BASE * (port_id); 2565 port->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE + 2566 (AM65_CPSW_NU_STATS_PORT_OFFSET * port_id); 2567 port->name = of_get_property(port_np, "label", NULL); 2568 port->fetch_ram_base = 2569 common->cpsw_base + AM65_CPSW_NU_FRAM_BASE + 2570 (AM65_CPSW_NU_FRAM_PORT_OFFSET * (port_id - 1)); 2571 2572 port->slave.mac_sl = cpsw_sl_get("am65", dev, port->port_base); 2573 if (IS_ERR(port->slave.mac_sl)) { 2574 ret = PTR_ERR(port->slave.mac_sl); 2575 goto of_node_put; 2576 } 2577 2578 port->disabled = !of_device_is_available(port_np); 2579 if (port->disabled) { 2580 common->disabled_ports_mask |= BIT(port->port_id); 2581 continue; 2582 } 2583 2584 port->slave.ifphy = devm_of_phy_get(dev, port_np, NULL); 2585 if (IS_ERR(port->slave.ifphy)) { 2586 ret = PTR_ERR(port->slave.ifphy); 2587 dev_err(dev, "%pOF error retrieving port phy: %d\n", 2588 port_np, ret); 2589 goto of_node_put; 2590 } 2591 2592 /* Initialize the Serdes PHY for the port */ 2593 ret = am65_cpsw_init_serdes_phy(dev, port_np, port); 2594 if (ret) 2595 goto of_node_put; 2596 2597 port->slave.mac_only = 2598 of_property_read_bool(port_np, "ti,mac-only"); 2599 2600 /* get phy/link info */ 2601 port->slave.port_np = port_np; 2602 ret = of_get_phy_mode(port_np, &port->slave.phy_if); 2603 if (ret) { 2604 dev_err(dev, "%pOF read phy-mode err %d\n", 2605 port_np, ret); 2606 goto of_node_put; 2607 } 2608 2609 ret = phy_set_mode_ext(port->slave.ifphy, PHY_MODE_ETHERNET, port->slave.phy_if); 2610 if (ret) 2611 goto of_node_put; 2612 2613 ret = of_get_mac_address(port_np, port->slave.mac_addr); 2614 if (ret) { 2615 am65_cpsw_am654_get_efuse_macid(port_np, 2616 port->port_id, 2617 port->slave.mac_addr); 2618 if (!is_valid_ether_addr(port->slave.mac_addr)) { 2619 eth_random_addr(port->slave.mac_addr); 2620 dev_err(dev, "Use random MAC address\n"); 2621 } 2622 } 2623 2624 /* Reset all Queue priorities to 0 */ 2625 writel(0, port->port_base + AM65_CPSW_PN_REG_TX_PRI_MAP); 2626 } 2627 of_node_put(node); 2628 2629 /* is there at least one ext.port */ 2630 if (!(~common->disabled_ports_mask & GENMASK(common->port_num, 1))) { 2631 dev_err(dev, "No Ext. port are available\n"); 2632 return -ENODEV; 2633 } 2634 2635 return 0; 2636 2637 of_node_put: 2638 of_node_put(port_np); 2639 of_node_put(node); 2640 return ret; 2641 } 2642 2643 static void am65_cpsw_nuss_phylink_cleanup(struct am65_cpsw_common *common) 2644 { 2645 struct am65_cpsw_port *port; 2646 int i; 2647 2648 for (i = 0; i < common->port_num; i++) { 2649 port = &common->ports[i]; 2650 if (port->slave.phylink) 2651 phylink_destroy(port->slave.phylink); 2652 } 2653 } 2654 2655 static int 2656 am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx) 2657 { 2658 struct am65_cpsw_ndev_priv *ndev_priv; 2659 struct device *dev = common->dev; 2660 struct am65_cpsw_port *port; 2661 struct phylink *phylink; 2662 2663 port = &common->ports[port_idx]; 2664 2665 if (port->disabled) 2666 return 0; 2667 2668 /* alloc netdev */ 2669 port->ndev = alloc_etherdev_mqs(sizeof(struct am65_cpsw_ndev_priv), 2670 AM65_CPSW_MAX_QUEUES, 2671 AM65_CPSW_MAX_QUEUES); 2672 if (!port->ndev) { 2673 dev_err(dev, "error allocating slave net_device %u\n", 2674 port->port_id); 2675 return -ENOMEM; 2676 } 2677 2678 ndev_priv = netdev_priv(port->ndev); 2679 ndev_priv->port = port; 2680 ndev_priv->msg_enable = AM65_CPSW_DEBUG; 2681 mutex_init(&ndev_priv->mm_lock); 2682 port->qos.link_speed = SPEED_UNKNOWN; 2683 SET_NETDEV_DEV(port->ndev, dev); 2684 port->ndev->dev.of_node = port->slave.port_np; 2685 2686 eth_hw_addr_set(port->ndev, port->slave.mac_addr); 2687 2688 port->ndev->min_mtu = AM65_CPSW_MIN_PACKET_SIZE; 2689 port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE - 2690 (VLAN_ETH_HLEN + ETH_FCS_LEN); 2691 port->ndev->hw_features = NETIF_F_SG | 2692 NETIF_F_RXCSUM | 2693 NETIF_F_HW_CSUM | 2694 NETIF_F_HW_TC; 2695 port->ndev->features = port->ndev->hw_features | 2696 NETIF_F_HW_VLAN_CTAG_FILTER; 2697 port->ndev->xdp_features = NETDEV_XDP_ACT_BASIC | 2698 NETDEV_XDP_ACT_REDIRECT | 2699 NETDEV_XDP_ACT_NDO_XMIT; 2700 port->ndev->vlan_features |= NETIF_F_SG; 2701 port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops; 2702 port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave; 2703 2704 /* Configuring Phylink */ 2705 port->slave.phylink_config.dev = &port->ndev->dev; 2706 port->slave.phylink_config.type = PHYLINK_NETDEV; 2707 port->slave.phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | 2708 MAC_1000FD | MAC_5000FD; 2709 port->slave.phylink_config.mac_managed_pm = true; /* MAC does PM */ 2710 2711 switch (port->slave.phy_if) { 2712 case PHY_INTERFACE_MODE_RGMII: 2713 case PHY_INTERFACE_MODE_RGMII_ID: 2714 case PHY_INTERFACE_MODE_RGMII_RXID: 2715 case PHY_INTERFACE_MODE_RGMII_TXID: 2716 phy_interface_set_rgmii(port->slave.phylink_config.supported_interfaces); 2717 break; 2718 2719 case PHY_INTERFACE_MODE_RMII: 2720 __set_bit(PHY_INTERFACE_MODE_RMII, 2721 port->slave.phylink_config.supported_interfaces); 2722 break; 2723 2724 case PHY_INTERFACE_MODE_QSGMII: 2725 case PHY_INTERFACE_MODE_SGMII: 2726 case PHY_INTERFACE_MODE_USXGMII: 2727 if (common->pdata.extra_modes & BIT(port->slave.phy_if)) { 2728 __set_bit(port->slave.phy_if, 2729 port->slave.phylink_config.supported_interfaces); 2730 } else { 2731 dev_err(dev, "selected phy-mode is not supported\n"); 2732 return -EOPNOTSUPP; 2733 } 2734 break; 2735 2736 default: 2737 dev_err(dev, "selected phy-mode is not supported\n"); 2738 return -EOPNOTSUPP; 2739 } 2740 2741 phylink = phylink_create(&port->slave.phylink_config, 2742 of_fwnode_handle(port->slave.port_np), 2743 port->slave.phy_if, 2744 &am65_cpsw_phylink_mac_ops); 2745 if (IS_ERR(phylink)) 2746 return PTR_ERR(phylink); 2747 2748 port->slave.phylink = phylink; 2749 2750 /* Disable TX checksum offload by default due to HW bug */ 2751 if (common->pdata.quirks & AM65_CPSW_QUIRK_I2027_NO_TX_CSUM) 2752 port->ndev->features &= ~NETIF_F_HW_CSUM; 2753 2754 port->ndev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; 2755 port->xdp_prog = NULL; 2756 2757 if (!common->dma_ndev) 2758 common->dma_ndev = port->ndev; 2759 2760 return 0; 2761 } 2762 2763 static int am65_cpsw_nuss_init_ndevs(struct am65_cpsw_common *common) 2764 { 2765 int ret; 2766 int i; 2767 2768 for (i = 0; i < common->port_num; i++) { 2769 ret = am65_cpsw_nuss_init_port_ndev(common, i); 2770 if (ret) 2771 return ret; 2772 } 2773 2774 return ret; 2775 } 2776 2777 static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common) 2778 { 2779 struct am65_cpsw_port *port; 2780 int i; 2781 2782 for (i = 0; i < common->port_num; i++) { 2783 port = &common->ports[i]; 2784 if (!port->ndev) 2785 continue; 2786 if (port->ndev->reg_state == NETREG_REGISTERED) 2787 unregister_netdev(port->ndev); 2788 free_netdev(port->ndev); 2789 port->ndev = NULL; 2790 } 2791 } 2792 2793 static void am65_cpsw_port_offload_fwd_mark_update(struct am65_cpsw_common *common) 2794 { 2795 int set_val = 0; 2796 int i; 2797 2798 if (common->br_members == (GENMASK(common->port_num, 1) & ~common->disabled_ports_mask)) 2799 set_val = 1; 2800 2801 dev_dbg(common->dev, "set offload_fwd_mark %d\n", set_val); 2802 2803 for (i = 1; i <= common->port_num; i++) { 2804 struct am65_cpsw_port *port = am65_common_get_port(common, i); 2805 struct am65_cpsw_ndev_priv *priv; 2806 2807 if (!port->ndev) 2808 continue; 2809 2810 priv = am65_ndev_to_priv(port->ndev); 2811 priv->offload_fwd_mark = set_val; 2812 } 2813 } 2814 2815 bool am65_cpsw_port_dev_check(const struct net_device *ndev) 2816 { 2817 if (ndev->netdev_ops == &am65_cpsw_nuss_netdev_ops) { 2818 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 2819 2820 return !common->is_emac_mode; 2821 } 2822 2823 return false; 2824 } 2825 2826 static int am65_cpsw_netdevice_port_link(struct net_device *ndev, 2827 struct net_device *br_ndev, 2828 struct netlink_ext_ack *extack) 2829 { 2830 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 2831 struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev); 2832 int err; 2833 2834 if (!common->br_members) { 2835 common->hw_bridge_dev = br_ndev; 2836 } else { 2837 /* This is adding the port to a second bridge, this is 2838 * unsupported 2839 */ 2840 if (common->hw_bridge_dev != br_ndev) 2841 return -EOPNOTSUPP; 2842 } 2843 2844 err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL, 2845 false, extack); 2846 if (err) 2847 return err; 2848 2849 common->br_members |= BIT(priv->port->port_id); 2850 2851 am65_cpsw_port_offload_fwd_mark_update(common); 2852 2853 return NOTIFY_DONE; 2854 } 2855 2856 static void am65_cpsw_netdevice_port_unlink(struct net_device *ndev) 2857 { 2858 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 2859 struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev); 2860 2861 switchdev_bridge_port_unoffload(ndev, NULL, NULL, NULL); 2862 2863 common->br_members &= ~BIT(priv->port->port_id); 2864 2865 am65_cpsw_port_offload_fwd_mark_update(common); 2866 2867 if (!common->br_members) 2868 common->hw_bridge_dev = NULL; 2869 } 2870 2871 /* netdev notifier */ 2872 static int am65_cpsw_netdevice_event(struct notifier_block *unused, 2873 unsigned long event, void *ptr) 2874 { 2875 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr); 2876 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 2877 struct netdev_notifier_changeupper_info *info; 2878 int ret = NOTIFY_DONE; 2879 2880 if (!am65_cpsw_port_dev_check(ndev)) 2881 return NOTIFY_DONE; 2882 2883 switch (event) { 2884 case NETDEV_CHANGEUPPER: 2885 info = ptr; 2886 2887 if (netif_is_bridge_master(info->upper_dev)) { 2888 if (info->linking) 2889 ret = am65_cpsw_netdevice_port_link(ndev, 2890 info->upper_dev, 2891 extack); 2892 else 2893 am65_cpsw_netdevice_port_unlink(ndev); 2894 } 2895 break; 2896 default: 2897 return NOTIFY_DONE; 2898 } 2899 2900 return notifier_from_errno(ret); 2901 } 2902 2903 static int am65_cpsw_register_notifiers(struct am65_cpsw_common *cpsw) 2904 { 2905 int ret = 0; 2906 2907 if (AM65_CPSW_IS_CPSW2G(cpsw) || 2908 !IS_REACHABLE(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) 2909 return 0; 2910 2911 cpsw->am65_cpsw_netdevice_nb.notifier_call = &am65_cpsw_netdevice_event; 2912 ret = register_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb); 2913 if (ret) { 2914 dev_err(cpsw->dev, "can't register netdevice notifier\n"); 2915 return ret; 2916 } 2917 2918 ret = am65_cpsw_switchdev_register_notifiers(cpsw); 2919 if (ret) 2920 unregister_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb); 2921 2922 return ret; 2923 } 2924 2925 static void am65_cpsw_unregister_notifiers(struct am65_cpsw_common *cpsw) 2926 { 2927 if (AM65_CPSW_IS_CPSW2G(cpsw) || 2928 !IS_REACHABLE(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) 2929 return; 2930 2931 am65_cpsw_switchdev_unregister_notifiers(cpsw); 2932 unregister_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb); 2933 } 2934 2935 static const struct devlink_ops am65_cpsw_devlink_ops = {}; 2936 2937 static void am65_cpsw_init_stp_ale_entry(struct am65_cpsw_common *cpsw) 2938 { 2939 cpsw_ale_add_mcast(cpsw->ale, eth_stp_addr, ALE_PORT_HOST, ALE_SUPER, 0, 2940 ALE_MCAST_BLOCK_LEARN_FWD); 2941 } 2942 2943 static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common) 2944 { 2945 struct am65_cpsw_host *host = am65_common_get_host(common); 2946 2947 writel(common->default_vlan, host->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 2948 2949 am65_cpsw_init_stp_ale_entry(common); 2950 2951 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 1); 2952 dev_dbg(common->dev, "Set P0_UNI_FLOOD\n"); 2953 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 0); 2954 } 2955 2956 static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common) 2957 { 2958 struct am65_cpsw_host *host = am65_common_get_host(common); 2959 2960 writel(0, host->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 2961 2962 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 0); 2963 dev_dbg(common->dev, "unset P0_UNI_FLOOD\n"); 2964 2965 /* learning make no sense in multi-mac mode */ 2966 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 1); 2967 } 2968 2969 static int am65_cpsw_dl_switch_mode_get(struct devlink *dl, u32 id, 2970 struct devlink_param_gset_ctx *ctx) 2971 { 2972 struct am65_cpsw_devlink *dl_priv = devlink_priv(dl); 2973 struct am65_cpsw_common *common = dl_priv->common; 2974 2975 dev_dbg(common->dev, "%s id:%u\n", __func__, id); 2976 2977 if (id != AM65_CPSW_DL_PARAM_SWITCH_MODE) 2978 return -EOPNOTSUPP; 2979 2980 ctx->val.vbool = !common->is_emac_mode; 2981 2982 return 0; 2983 } 2984 2985 static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port) 2986 { 2987 struct am65_cpsw_slave_data *slave = &port->slave; 2988 struct am65_cpsw_common *common = port->common; 2989 u32 port_mask; 2990 2991 writel(slave->port_vlan, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 2992 2993 if (slave->mac_only) 2994 /* enable mac-only mode on port */ 2995 cpsw_ale_control_set(common->ale, port->port_id, 2996 ALE_PORT_MACONLY, 1); 2997 2998 cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_NOLEARN, 1); 2999 3000 port_mask = BIT(port->port_id) | ALE_PORT_HOST; 3001 3002 cpsw_ale_add_ucast(common->ale, port->ndev->dev_addr, 3003 HOST_PORT_NUM, ALE_SECURE, slave->port_vlan); 3004 cpsw_ale_add_mcast(common->ale, port->ndev->broadcast, 3005 port_mask, ALE_VLAN, slave->port_vlan, ALE_MCAST_FWD_2); 3006 } 3007 3008 static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port) 3009 { 3010 struct am65_cpsw_slave_data *slave = &port->slave; 3011 struct am65_cpsw_common *cpsw = port->common; 3012 u32 port_mask; 3013 3014 cpsw_ale_control_set(cpsw->ale, port->port_id, 3015 ALE_PORT_NOLEARN, 0); 3016 3017 cpsw_ale_add_ucast(cpsw->ale, port->ndev->dev_addr, 3018 HOST_PORT_NUM, ALE_SECURE | ALE_BLOCKED | ALE_VLAN, 3019 slave->port_vlan); 3020 3021 port_mask = BIT(port->port_id) | ALE_PORT_HOST; 3022 3023 cpsw_ale_add_mcast(cpsw->ale, port->ndev->broadcast, 3024 port_mask, ALE_VLAN, slave->port_vlan, 3025 ALE_MCAST_FWD_2); 3026 3027 writel(slave->port_vlan, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 3028 3029 cpsw_ale_control_set(cpsw->ale, port->port_id, 3030 ALE_PORT_MACONLY, 0); 3031 } 3032 3033 static int am65_cpsw_dl_switch_mode_set(struct devlink *dl, u32 id, 3034 struct devlink_param_gset_ctx *ctx, 3035 struct netlink_ext_ack *extack) 3036 { 3037 struct am65_cpsw_devlink *dl_priv = devlink_priv(dl); 3038 struct am65_cpsw_common *cpsw = dl_priv->common; 3039 bool switch_en = ctx->val.vbool; 3040 bool if_running = false; 3041 int i; 3042 3043 dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id); 3044 3045 if (id != AM65_CPSW_DL_PARAM_SWITCH_MODE) 3046 return -EOPNOTSUPP; 3047 3048 if (switch_en == !cpsw->is_emac_mode) 3049 return 0; 3050 3051 if (!switch_en && cpsw->br_members) { 3052 dev_err(cpsw->dev, "Remove ports from bridge before disabling switch mode\n"); 3053 return -EINVAL; 3054 } 3055 3056 rtnl_lock(); 3057 3058 cpsw->is_emac_mode = !switch_en; 3059 3060 for (i = 0; i < cpsw->port_num; i++) { 3061 struct net_device *sl_ndev = cpsw->ports[i].ndev; 3062 3063 if (!sl_ndev || !netif_running(sl_ndev)) 3064 continue; 3065 3066 if_running = true; 3067 } 3068 3069 if (!if_running) { 3070 /* all ndevs are down */ 3071 for (i = 0; i < cpsw->port_num; i++) { 3072 struct net_device *sl_ndev = cpsw->ports[i].ndev; 3073 struct am65_cpsw_slave_data *slave; 3074 3075 if (!sl_ndev) 3076 continue; 3077 3078 slave = am65_ndev_to_slave(sl_ndev); 3079 if (switch_en) 3080 slave->port_vlan = cpsw->default_vlan; 3081 else 3082 slave->port_vlan = 0; 3083 } 3084 3085 goto exit; 3086 } 3087 3088 cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1); 3089 /* clean up ALE table */ 3090 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_CLEAR, 1); 3091 cpsw_ale_control_get(cpsw->ale, HOST_PORT_NUM, ALE_AGEOUT); 3092 3093 if (switch_en) { 3094 dev_info(cpsw->dev, "Enable switch mode\n"); 3095 3096 am65_cpsw_init_host_port_switch(cpsw); 3097 3098 for (i = 0; i < cpsw->port_num; i++) { 3099 struct net_device *sl_ndev = cpsw->ports[i].ndev; 3100 struct am65_cpsw_slave_data *slave; 3101 struct am65_cpsw_port *port; 3102 3103 if (!sl_ndev) 3104 continue; 3105 3106 port = am65_ndev_to_port(sl_ndev); 3107 slave = am65_ndev_to_slave(sl_ndev); 3108 slave->port_vlan = cpsw->default_vlan; 3109 3110 if (netif_running(sl_ndev)) 3111 am65_cpsw_init_port_switch_ale(port); 3112 } 3113 3114 } else { 3115 dev_info(cpsw->dev, "Disable switch mode\n"); 3116 3117 am65_cpsw_init_host_port_emac(cpsw); 3118 3119 for (i = 0; i < cpsw->port_num; i++) { 3120 struct net_device *sl_ndev = cpsw->ports[i].ndev; 3121 struct am65_cpsw_port *port; 3122 3123 if (!sl_ndev) 3124 continue; 3125 3126 port = am65_ndev_to_port(sl_ndev); 3127 port->slave.port_vlan = 0; 3128 if (netif_running(sl_ndev)) 3129 am65_cpsw_init_port_emac_ale(port); 3130 } 3131 } 3132 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_BYPASS, 0); 3133 exit: 3134 rtnl_unlock(); 3135 3136 return 0; 3137 } 3138 3139 static const struct devlink_param am65_cpsw_devlink_params[] = { 3140 DEVLINK_PARAM_DRIVER(AM65_CPSW_DL_PARAM_SWITCH_MODE, "switch_mode", 3141 DEVLINK_PARAM_TYPE_BOOL, 3142 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 3143 am65_cpsw_dl_switch_mode_get, 3144 am65_cpsw_dl_switch_mode_set, NULL), 3145 }; 3146 3147 static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common) 3148 { 3149 struct devlink_port_attrs attrs = {}; 3150 struct am65_cpsw_devlink *dl_priv; 3151 struct device *dev = common->dev; 3152 struct devlink_port *dl_port; 3153 struct am65_cpsw_port *port; 3154 int ret = 0; 3155 int i; 3156 3157 common->devlink = 3158 devlink_alloc(&am65_cpsw_devlink_ops, sizeof(*dl_priv), dev); 3159 if (!common->devlink) 3160 return -ENOMEM; 3161 3162 dl_priv = devlink_priv(common->devlink); 3163 dl_priv->common = common; 3164 3165 /* Provide devlink hook to switch mode when multiple external ports 3166 * are present NUSS switchdev driver is enabled. 3167 */ 3168 if (!AM65_CPSW_IS_CPSW2G(common) && 3169 IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) { 3170 ret = devlink_params_register(common->devlink, 3171 am65_cpsw_devlink_params, 3172 ARRAY_SIZE(am65_cpsw_devlink_params)); 3173 if (ret) { 3174 dev_err(dev, "devlink params reg fail ret:%d\n", ret); 3175 goto dl_unreg; 3176 } 3177 } 3178 3179 for (i = 1; i <= common->port_num; i++) { 3180 port = am65_common_get_port(common, i); 3181 dl_port = &port->devlink_port; 3182 3183 if (port->ndev) 3184 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; 3185 else 3186 attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED; 3187 attrs.phys.port_number = port->port_id; 3188 attrs.switch_id.id_len = sizeof(resource_size_t); 3189 memcpy(attrs.switch_id.id, common->switch_id, attrs.switch_id.id_len); 3190 devlink_port_attrs_set(dl_port, &attrs); 3191 3192 ret = devlink_port_register(common->devlink, dl_port, port->port_id); 3193 if (ret) { 3194 dev_err(dev, "devlink_port reg fail for port %d, ret:%d\n", 3195 port->port_id, ret); 3196 goto dl_port_unreg; 3197 } 3198 } 3199 devlink_register(common->devlink); 3200 return ret; 3201 3202 dl_port_unreg: 3203 for (i = i - 1; i >= 1; i--) { 3204 port = am65_common_get_port(common, i); 3205 dl_port = &port->devlink_port; 3206 3207 devlink_port_unregister(dl_port); 3208 } 3209 dl_unreg: 3210 devlink_free(common->devlink); 3211 return ret; 3212 } 3213 3214 static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common) 3215 { 3216 struct devlink_port *dl_port; 3217 struct am65_cpsw_port *port; 3218 int i; 3219 3220 devlink_unregister(common->devlink); 3221 3222 for (i = 1; i <= common->port_num; i++) { 3223 port = am65_common_get_port(common, i); 3224 dl_port = &port->devlink_port; 3225 3226 devlink_port_unregister(dl_port); 3227 } 3228 3229 if (!AM65_CPSW_IS_CPSW2G(common) && 3230 IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) 3231 devlink_params_unregister(common->devlink, 3232 am65_cpsw_devlink_params, 3233 ARRAY_SIZE(am65_cpsw_devlink_params)); 3234 3235 devlink_free(common->devlink); 3236 } 3237 3238 static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common) 3239 { 3240 struct am65_cpsw_rx_chn *rx_chan = &common->rx_chns; 3241 struct am65_cpsw_tx_chn *tx_chan = common->tx_chns; 3242 struct device *dev = common->dev; 3243 struct am65_cpsw_port *port; 3244 int ret = 0, i; 3245 3246 /* init tx channels */ 3247 ret = am65_cpsw_nuss_init_tx_chns(common); 3248 if (ret) 3249 return ret; 3250 ret = am65_cpsw_nuss_init_rx_chns(common); 3251 if (ret) 3252 return ret; 3253 3254 /* The DMA Channels are not guaranteed to be in a clean state. 3255 * Reset and disable them to ensure that they are back to the 3256 * clean state and ready to be used. 3257 */ 3258 for (i = 0; i < common->tx_ch_num; i++) { 3259 k3_udma_glue_reset_tx_chn(tx_chan[i].tx_chn, &tx_chan[i], 3260 am65_cpsw_nuss_tx_cleanup); 3261 k3_udma_glue_disable_tx_chn(tx_chan[i].tx_chn); 3262 } 3263 3264 for (i = 0; i < common->rx_ch_num_flows; i++) 3265 k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i, 3266 rx_chan, 3267 am65_cpsw_nuss_rx_cleanup, !!i); 3268 3269 k3_udma_glue_disable_rx_chn(rx_chan->rx_chn); 3270 3271 ret = am65_cpsw_nuss_register_devlink(common); 3272 if (ret) 3273 return ret; 3274 3275 for (i = 0; i < common->port_num; i++) { 3276 port = &common->ports[i]; 3277 3278 if (!port->ndev) 3279 continue; 3280 3281 SET_NETDEV_DEVLINK_PORT(port->ndev, &port->devlink_port); 3282 3283 ret = register_netdev(port->ndev); 3284 if (ret) { 3285 dev_err(dev, "error registering slave net device%i %d\n", 3286 i, ret); 3287 goto err_cleanup_ndev; 3288 } 3289 } 3290 3291 ret = am65_cpsw_register_notifiers(common); 3292 if (ret) 3293 goto err_cleanup_ndev; 3294 3295 /* can't auto unregister ndev using devm_add_action() due to 3296 * devres release sequence in DD core for DMA 3297 */ 3298 3299 return 0; 3300 3301 err_cleanup_ndev: 3302 am65_cpsw_nuss_cleanup_ndev(common); 3303 am65_cpsw_unregister_devlink(common); 3304 3305 return ret; 3306 } 3307 3308 int am65_cpsw_nuss_update_tx_rx_chns(struct am65_cpsw_common *common, 3309 int num_tx, int num_rx) 3310 { 3311 int ret; 3312 3313 am65_cpsw_nuss_remove_tx_chns(common); 3314 am65_cpsw_nuss_remove_rx_chns(common); 3315 3316 common->tx_ch_num = num_tx; 3317 common->rx_ch_num_flows = num_rx; 3318 ret = am65_cpsw_nuss_init_tx_chns(common); 3319 if (ret) 3320 return ret; 3321 3322 ret = am65_cpsw_nuss_init_rx_chns(common); 3323 3324 return ret; 3325 } 3326 3327 struct am65_cpsw_soc_pdata { 3328 u32 quirks_dis; 3329 }; 3330 3331 static const struct am65_cpsw_soc_pdata am65x_soc_sr2_0 = { 3332 .quirks_dis = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM, 3333 }; 3334 3335 static const struct soc_device_attribute am65_cpsw_socinfo[] = { 3336 { .family = "AM65X", 3337 .revision = "SR2.0", 3338 .data = &am65x_soc_sr2_0 3339 }, 3340 {/* sentinel */} 3341 }; 3342 3343 static const struct am65_cpsw_pdata am65x_sr1_0 = { 3344 .quirks = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM, 3345 .ale_dev_id = "am65x-cpsw2g", 3346 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, 3347 }; 3348 3349 static const struct am65_cpsw_pdata j721e_pdata = { 3350 .quirks = 0, 3351 .ale_dev_id = "am65x-cpsw2g", 3352 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, 3353 }; 3354 3355 static const struct am65_cpsw_pdata am64x_cpswxg_pdata = { 3356 .quirks = AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ, 3357 .ale_dev_id = "am64-cpswxg", 3358 .fdqring_mode = K3_RINGACC_RING_MODE_RING, 3359 }; 3360 3361 static const struct am65_cpsw_pdata j7200_cpswxg_pdata = { 3362 .quirks = 0, 3363 .ale_dev_id = "am64-cpswxg", 3364 .fdqring_mode = K3_RINGACC_RING_MODE_RING, 3365 .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII) | 3366 BIT(PHY_INTERFACE_MODE_USXGMII), 3367 }; 3368 3369 static const struct am65_cpsw_pdata j721e_cpswxg_pdata = { 3370 .quirks = 0, 3371 .ale_dev_id = "am64-cpswxg", 3372 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, 3373 .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII), 3374 }; 3375 3376 static const struct am65_cpsw_pdata j784s4_cpswxg_pdata = { 3377 .quirks = 0, 3378 .ale_dev_id = "am64-cpswxg", 3379 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, 3380 .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII) | 3381 BIT(PHY_INTERFACE_MODE_USXGMII), 3382 }; 3383 3384 static const struct of_device_id am65_cpsw_nuss_of_mtable[] = { 3385 { .compatible = "ti,am654-cpsw-nuss", .data = &am65x_sr1_0}, 3386 { .compatible = "ti,j721e-cpsw-nuss", .data = &j721e_pdata}, 3387 { .compatible = "ti,am642-cpsw-nuss", .data = &am64x_cpswxg_pdata}, 3388 { .compatible = "ti,j7200-cpswxg-nuss", .data = &j7200_cpswxg_pdata}, 3389 { .compatible = "ti,j721e-cpswxg-nuss", .data = &j721e_cpswxg_pdata}, 3390 { .compatible = "ti,j784s4-cpswxg-nuss", .data = &j784s4_cpswxg_pdata}, 3391 { /* sentinel */ }, 3392 }; 3393 MODULE_DEVICE_TABLE(of, am65_cpsw_nuss_of_mtable); 3394 3395 static void am65_cpsw_nuss_apply_socinfo(struct am65_cpsw_common *common) 3396 { 3397 const struct soc_device_attribute *soc; 3398 3399 soc = soc_device_match(am65_cpsw_socinfo); 3400 if (soc && soc->data) { 3401 const struct am65_cpsw_soc_pdata *socdata = soc->data; 3402 3403 /* disable quirks */ 3404 common->pdata.quirks &= ~socdata->quirks_dis; 3405 } 3406 } 3407 3408 static int am65_cpsw_nuss_probe(struct platform_device *pdev) 3409 { 3410 struct cpsw_ale_params ale_params = { 0 }; 3411 const struct of_device_id *of_id; 3412 struct device *dev = &pdev->dev; 3413 struct am65_cpsw_common *common; 3414 struct device_node *node; 3415 struct resource *res; 3416 struct clk *clk; 3417 int ale_entries; 3418 __be64 id_temp; 3419 int ret, i; 3420 3421 common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL); 3422 if (!common) 3423 return -ENOMEM; 3424 common->dev = dev; 3425 3426 of_id = of_match_device(am65_cpsw_nuss_of_mtable, dev); 3427 if (!of_id) 3428 return -EINVAL; 3429 common->pdata = *(const struct am65_cpsw_pdata *)of_id->data; 3430 3431 am65_cpsw_nuss_apply_socinfo(common); 3432 3433 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpsw_nuss"); 3434 common->ss_base = devm_ioremap_resource(&pdev->dev, res); 3435 if (IS_ERR(common->ss_base)) 3436 return PTR_ERR(common->ss_base); 3437 common->cpsw_base = common->ss_base + AM65_CPSW_CPSW_NU_BASE; 3438 /* Use device's physical base address as switch id */ 3439 id_temp = cpu_to_be64(res->start); 3440 memcpy(common->switch_id, &id_temp, sizeof(res->start)); 3441 3442 node = of_get_child_by_name(dev->of_node, "ethernet-ports"); 3443 if (!node) 3444 return -ENOENT; 3445 common->port_num = of_get_child_count(node); 3446 of_node_put(node); 3447 if (common->port_num < 1 || common->port_num > AM65_CPSW_MAX_PORTS) 3448 return -ENOENT; 3449 3450 common->rx_flow_id_base = -1; 3451 init_completion(&common->tdown_complete); 3452 common->tx_ch_num = AM65_CPSW_DEFAULT_TX_CHNS; 3453 common->rx_ch_num_flows = AM65_CPSW_DEFAULT_RX_CHN_FLOWS; 3454 common->pf_p0_rx_ptype_rrobin = false; 3455 common->default_vlan = 1; 3456 3457 common->ports = devm_kcalloc(dev, common->port_num, 3458 sizeof(*common->ports), 3459 GFP_KERNEL); 3460 if (!common->ports) 3461 return -ENOMEM; 3462 3463 clk = devm_clk_get(dev, "fck"); 3464 if (IS_ERR(clk)) 3465 return dev_err_probe(dev, PTR_ERR(clk), "getting fck clock\n"); 3466 common->bus_freq = clk_get_rate(clk); 3467 3468 pm_runtime_enable(dev); 3469 ret = pm_runtime_resume_and_get(dev); 3470 if (ret < 0) { 3471 pm_runtime_disable(dev); 3472 return ret; 3473 } 3474 3475 node = of_get_child_by_name(dev->of_node, "mdio"); 3476 if (!node) { 3477 dev_warn(dev, "MDIO node not found\n"); 3478 } else if (of_device_is_available(node)) { 3479 struct platform_device *mdio_pdev; 3480 3481 mdio_pdev = of_platform_device_create(node, NULL, dev); 3482 if (!mdio_pdev) { 3483 ret = -ENODEV; 3484 goto err_pm_clear; 3485 } 3486 3487 common->mdio_dev = &mdio_pdev->dev; 3488 } 3489 of_node_put(node); 3490 3491 am65_cpsw_nuss_get_ver(common); 3492 3493 ret = am65_cpsw_nuss_init_host_p(common); 3494 if (ret) 3495 goto err_of_clear; 3496 3497 ret = am65_cpsw_nuss_init_slave_ports(common); 3498 if (ret) 3499 goto err_of_clear; 3500 3501 /* init common data */ 3502 ale_params.dev = dev; 3503 ale_params.ale_ageout = AM65_CPSW_ALE_AGEOUT_DEFAULT; 3504 ale_params.ale_ports = common->port_num + 1; 3505 ale_params.ale_regs = common->cpsw_base + AM65_CPSW_NU_ALE_BASE; 3506 ale_params.dev_id = common->pdata.ale_dev_id; 3507 ale_params.bus_freq = common->bus_freq; 3508 3509 common->ale = cpsw_ale_create(&ale_params); 3510 if (IS_ERR(common->ale)) { 3511 dev_err(dev, "error initializing ale engine\n"); 3512 ret = PTR_ERR(common->ale); 3513 goto err_of_clear; 3514 } 3515 3516 ale_entries = common->ale->params.ale_entries; 3517 common->ale_context = devm_kzalloc(dev, 3518 ale_entries * ALE_ENTRY_WORDS * sizeof(u32), 3519 GFP_KERNEL); 3520 ret = am65_cpsw_init_cpts(common); 3521 if (ret) 3522 goto err_of_clear; 3523 3524 /* init ports */ 3525 for (i = 0; i < common->port_num; i++) 3526 am65_cpsw_nuss_slave_disable_unused(&common->ports[i]); 3527 3528 dev_set_drvdata(dev, common); 3529 3530 common->is_emac_mode = true; 3531 3532 ret = am65_cpsw_nuss_init_ndevs(common); 3533 if (ret) 3534 goto err_ndevs_clear; 3535 3536 ret = am65_cpsw_nuss_register_ndevs(common); 3537 if (ret) 3538 goto err_ndevs_clear; 3539 3540 pm_runtime_put(dev); 3541 return 0; 3542 3543 err_ndevs_clear: 3544 am65_cpsw_nuss_cleanup_ndev(common); 3545 am65_cpsw_nuss_phylink_cleanup(common); 3546 am65_cpts_release(common->cpts); 3547 err_of_clear: 3548 if (common->mdio_dev) 3549 of_platform_device_destroy(common->mdio_dev, NULL); 3550 err_pm_clear: 3551 pm_runtime_put_sync(dev); 3552 pm_runtime_disable(dev); 3553 return ret; 3554 } 3555 3556 static void am65_cpsw_nuss_remove(struct platform_device *pdev) 3557 { 3558 struct device *dev = &pdev->dev; 3559 struct am65_cpsw_common *common; 3560 int ret; 3561 3562 common = dev_get_drvdata(dev); 3563 3564 ret = pm_runtime_resume_and_get(&pdev->dev); 3565 if (ret < 0) { 3566 /* Note, if this error path is taken, we're leaking some 3567 * resources. 3568 */ 3569 dev_err(&pdev->dev, "Failed to resume device (%pe)\n", 3570 ERR_PTR(ret)); 3571 return; 3572 } 3573 3574 am65_cpsw_unregister_notifiers(common); 3575 3576 /* must unregister ndevs here because DD release_driver routine calls 3577 * dma_deconfigure(dev) before devres_release_all(dev) 3578 */ 3579 am65_cpsw_nuss_cleanup_ndev(common); 3580 am65_cpsw_unregister_devlink(common); 3581 am65_cpsw_nuss_phylink_cleanup(common); 3582 am65_cpts_release(common->cpts); 3583 am65_cpsw_disable_serdes_phy(common); 3584 3585 if (common->mdio_dev) 3586 of_platform_device_destroy(common->mdio_dev, NULL); 3587 3588 pm_runtime_put_sync(&pdev->dev); 3589 pm_runtime_disable(&pdev->dev); 3590 } 3591 3592 static int am65_cpsw_nuss_suspend(struct device *dev) 3593 { 3594 struct am65_cpsw_common *common = dev_get_drvdata(dev); 3595 struct am65_cpsw_host *host_p = am65_common_get_host(common); 3596 struct am65_cpsw_port *port; 3597 struct net_device *ndev; 3598 int i, ret; 3599 3600 cpsw_ale_dump(common->ale, common->ale_context); 3601 host_p->vid_context = readl(host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 3602 for (i = 0; i < common->port_num; i++) { 3603 port = &common->ports[i]; 3604 ndev = port->ndev; 3605 3606 if (!ndev) 3607 continue; 3608 3609 port->vid_context = readl(port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 3610 netif_device_detach(ndev); 3611 if (netif_running(ndev)) { 3612 rtnl_lock(); 3613 ret = am65_cpsw_nuss_ndo_slave_stop(ndev); 3614 rtnl_unlock(); 3615 if (ret < 0) { 3616 netdev_err(ndev, "failed to stop: %d", ret); 3617 return ret; 3618 } 3619 } 3620 } 3621 3622 am65_cpts_suspend(common->cpts); 3623 3624 am65_cpsw_nuss_remove_rx_chns(common); 3625 am65_cpsw_nuss_remove_tx_chns(common); 3626 3627 return 0; 3628 } 3629 3630 static int am65_cpsw_nuss_resume(struct device *dev) 3631 { 3632 struct am65_cpsw_common *common = dev_get_drvdata(dev); 3633 struct am65_cpsw_host *host_p = am65_common_get_host(common); 3634 struct am65_cpsw_port *port; 3635 struct net_device *ndev; 3636 int i, ret; 3637 3638 ret = am65_cpsw_nuss_init_tx_chns(common); 3639 if (ret) 3640 return ret; 3641 ret = am65_cpsw_nuss_init_rx_chns(common); 3642 if (ret) 3643 return ret; 3644 3645 /* If RX IRQ was disabled before suspend, keep it disabled */ 3646 for (i = 0; i < common->rx_ch_num_flows; i++) { 3647 if (common->rx_chns.flows[i].irq_disabled) 3648 disable_irq(common->rx_chns.flows[i].irq); 3649 } 3650 3651 am65_cpts_resume(common->cpts); 3652 3653 for (i = 0; i < common->port_num; i++) { 3654 port = &common->ports[i]; 3655 ndev = port->ndev; 3656 3657 if (!ndev) 3658 continue; 3659 3660 if (netif_running(ndev)) { 3661 rtnl_lock(); 3662 ret = am65_cpsw_nuss_ndo_slave_open(ndev); 3663 rtnl_unlock(); 3664 if (ret < 0) { 3665 netdev_err(ndev, "failed to start: %d", ret); 3666 return ret; 3667 } 3668 } 3669 3670 netif_device_attach(ndev); 3671 writel(port->vid_context, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 3672 } 3673 3674 writel(host_p->vid_context, host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 3675 cpsw_ale_restore(common->ale, common->ale_context); 3676 3677 return 0; 3678 } 3679 3680 static const struct dev_pm_ops am65_cpsw_nuss_dev_pm_ops = { 3681 SYSTEM_SLEEP_PM_OPS(am65_cpsw_nuss_suspend, am65_cpsw_nuss_resume) 3682 }; 3683 3684 static struct platform_driver am65_cpsw_nuss_driver = { 3685 .driver = { 3686 .name = AM65_CPSW_DRV_NAME, 3687 .of_match_table = am65_cpsw_nuss_of_mtable, 3688 .pm = &am65_cpsw_nuss_dev_pm_ops, 3689 }, 3690 .probe = am65_cpsw_nuss_probe, 3691 .remove = am65_cpsw_nuss_remove, 3692 }; 3693 3694 module_platform_driver(am65_cpsw_nuss_driver); 3695 3696 MODULE_LICENSE("GPL v2"); 3697 MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>"); 3698 MODULE_DESCRIPTION("TI AM65 CPSW Ethernet driver"); 3699