1 // SPDX-License-Identifier: GPL-2.0 2 /* Texas Instruments K3 AM65 Ethernet Switch SubSystem Driver 3 * 4 * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/ 5 * 6 */ 7 8 #include <linux/bpf_trace.h> 9 #include <linux/clk.h> 10 #include <linux/etherdevice.h> 11 #include <linux/if_vlan.h> 12 #include <linux/interrupt.h> 13 #include <linux/irqdomain.h> 14 #include <linux/kernel.h> 15 #include <linux/kmemleak.h> 16 #include <linux/module.h> 17 #include <linux/netdevice.h> 18 #include <linux/net_tstamp.h> 19 #include <linux/of.h> 20 #include <linux/of_mdio.h> 21 #include <linux/of_net.h> 22 #include <linux/of_device.h> 23 #include <linux/of_platform.h> 24 #include <linux/phylink.h> 25 #include <linux/phy/phy.h> 26 #include <linux/platform_device.h> 27 #include <linux/pm_runtime.h> 28 #include <linux/regmap.h> 29 #include <linux/rtnetlink.h> 30 #include <linux/mfd/syscon.h> 31 #include <linux/sys_soc.h> 32 #include <linux/dma/ti-cppi5.h> 33 #include <linux/dma/k3-udma-glue.h> 34 #include <net/page_pool/helpers.h> 35 #include <net/switchdev.h> 36 37 #include "cpsw_ale.h" 38 #include "cpsw_sl.h" 39 #include "am65-cpsw-nuss.h" 40 #include "am65-cpsw-switchdev.h" 41 #include "k3-cppi-desc-pool.h" 42 #include "am65-cpts.h" 43 44 #define AM65_CPSW_SS_BASE 0x0 45 #define AM65_CPSW_SGMII_BASE 0x100 46 #define AM65_CPSW_XGMII_BASE 0x2100 47 #define AM65_CPSW_CPSW_NU_BASE 0x20000 48 #define AM65_CPSW_NU_PORTS_BASE 0x1000 49 #define AM65_CPSW_NU_FRAM_BASE 0x12000 50 #define AM65_CPSW_NU_STATS_BASE 0x1a000 51 #define AM65_CPSW_NU_ALE_BASE 0x1e000 52 #define AM65_CPSW_NU_CPTS_BASE 0x1d000 53 54 #define AM65_CPSW_NU_PORTS_OFFSET 0x1000 55 #define AM65_CPSW_NU_STATS_PORT_OFFSET 0x200 56 #define AM65_CPSW_NU_FRAM_PORT_OFFSET 0x200 57 58 #define AM65_CPSW_MAX_PORTS 8 59 60 #define AM65_CPSW_MIN_PACKET_SIZE VLAN_ETH_ZLEN 61 #define AM65_CPSW_MAX_PACKET_SIZE 2024 62 63 #define AM65_CPSW_REG_CTL 0x004 64 #define AM65_CPSW_REG_STAT_PORT_EN 0x014 65 #define AM65_CPSW_REG_PTYPE 0x018 66 67 #define AM65_CPSW_P0_REG_CTL 0x004 68 #define AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET 0x008 69 70 #define AM65_CPSW_PORT_REG_PRI_CTL 0x01c 71 #define AM65_CPSW_PORT_REG_RX_PRI_MAP 0x020 72 #define AM65_CPSW_PORT_REG_RX_MAXLEN 0x024 73 74 #define AM65_CPSW_PORTN_REG_SA_L 0x308 75 #define AM65_CPSW_PORTN_REG_SA_H 0x30c 76 #define AM65_CPSW_PORTN_REG_TS_CTL 0x310 77 #define AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG 0x314 78 #define AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG 0x318 79 #define AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 0x31C 80 81 #define AM65_CPSW_SGMII_CONTROL_REG 0x010 82 #define AM65_CPSW_SGMII_MR_ADV_ABILITY_REG 0x018 83 #define AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE BIT(0) 84 85 #define AM65_CPSW_CTL_VLAN_AWARE BIT(1) 86 #define AM65_CPSW_CTL_P0_ENABLE BIT(2) 87 #define AM65_CPSW_CTL_P0_TX_CRC_REMOVE BIT(13) 88 #define AM65_CPSW_CTL_P0_RX_PAD BIT(14) 89 90 /* AM65_CPSW_P0_REG_CTL */ 91 #define AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN BIT(0) 92 #define AM65_CPSW_P0_REG_CTL_RX_REMAP_VLAN BIT(16) 93 94 /* AM65_CPSW_PORT_REG_PRI_CTL */ 95 #define AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN BIT(8) 96 97 /* AM65_CPSW_PN_TS_CTL register fields */ 98 #define AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN BIT(4) 99 #define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN BIT(5) 100 #define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT2_EN BIT(6) 101 #define AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN BIT(7) 102 #define AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN BIT(10) 103 #define AM65_CPSW_PN_TS_CTL_TX_HOST_TS_EN BIT(11) 104 #define AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT 16 105 106 #define AM65_CPSW_PN_TS_CTL_RX_ANX_F_EN BIT(0) 107 #define AM65_CPSW_PN_TS_CTL_RX_VLAN_LT1_EN BIT(1) 108 #define AM65_CPSW_PN_TS_CTL_RX_VLAN_LT2_EN BIT(2) 109 #define AM65_CPSW_PN_TS_CTL_RX_ANX_D_EN BIT(3) 110 #define AM65_CPSW_PN_TS_CTL_RX_ANX_E_EN BIT(9) 111 112 /* AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG register fields */ 113 #define AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT 16 114 115 /* AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 */ 116 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107 BIT(16) 117 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129 BIT(17) 118 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130 BIT(18) 119 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131 BIT(19) 120 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132 BIT(20) 121 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319 BIT(21) 122 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320 BIT(22) 123 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO BIT(23) 124 125 /* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */ 126 #define AM65_CPSW_TS_EVENT_MSG_TYPE_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3)) 127 128 #define AM65_CPSW_TS_SEQ_ID_OFFSET (0x1e) 129 130 #define AM65_CPSW_TS_TX_ANX_ALL_EN \ 131 (AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN | \ 132 AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN | \ 133 AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN) 134 135 #define AM65_CPSW_TS_RX_ANX_ALL_EN \ 136 (AM65_CPSW_PN_TS_CTL_RX_ANX_D_EN | \ 137 AM65_CPSW_PN_TS_CTL_RX_ANX_E_EN | \ 138 AM65_CPSW_PN_TS_CTL_RX_ANX_F_EN) 139 140 #define AM65_CPSW_ALE_AGEOUT_DEFAULT 30 141 /* Number of TX/RX descriptors */ 142 #define AM65_CPSW_MAX_TX_DESC 500 143 #define AM65_CPSW_MAX_RX_DESC 500 144 145 #define AM65_CPSW_NAV_PS_DATA_SIZE 16 146 #define AM65_CPSW_NAV_SW_DATA_SIZE 16 147 148 #define AM65_CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK | \ 149 NETIF_MSG_IFUP | NETIF_MSG_PROBE | NETIF_MSG_IFDOWN | \ 150 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) 151 152 #define AM65_CPSW_DEFAULT_TX_CHNS 8 153 154 /* CPPI streaming packet interface */ 155 #define AM65_CPSW_CPPI_TX_FLOW_ID 0x3FFF 156 #define AM65_CPSW_CPPI_TX_PKT_TYPE 0x7 157 158 /* XDP */ 159 #define AM65_CPSW_XDP_CONSUMED 2 160 #define AM65_CPSW_XDP_REDIRECT 1 161 #define AM65_CPSW_XDP_PASS 0 162 163 /* Include headroom compatible with both skb and xdpf */ 164 #define AM65_CPSW_HEADROOM (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN) 165 166 static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port *slave, 167 const u8 *dev_addr) 168 { 169 u32 mac_hi = (dev_addr[0] << 0) | (dev_addr[1] << 8) | 170 (dev_addr[2] << 16) | (dev_addr[3] << 24); 171 u32 mac_lo = (dev_addr[4] << 0) | (dev_addr[5] << 8); 172 173 writel(mac_hi, slave->port_base + AM65_CPSW_PORTN_REG_SA_H); 174 writel(mac_lo, slave->port_base + AM65_CPSW_PORTN_REG_SA_L); 175 } 176 177 static void am65_cpsw_sl_ctl_reset(struct am65_cpsw_port *port) 178 { 179 cpsw_sl_reset(port->slave.mac_sl, 100); 180 /* Max length register has to be restored after MAC SL reset */ 181 writel(AM65_CPSW_MAX_PACKET_SIZE, 182 port->port_base + AM65_CPSW_PORT_REG_RX_MAXLEN); 183 } 184 185 static void am65_cpsw_nuss_get_ver(struct am65_cpsw_common *common) 186 { 187 common->nuss_ver = readl(common->ss_base); 188 common->cpsw_ver = readl(common->cpsw_base); 189 dev_info(common->dev, 190 "initializing am65 cpsw nuss version 0x%08X, cpsw version 0x%08X Ports: %u quirks:%08x\n", 191 common->nuss_ver, 192 common->cpsw_ver, 193 common->port_num + 1, 194 common->pdata.quirks); 195 } 196 197 static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev, 198 __be16 proto, u16 vid) 199 { 200 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 201 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 202 u32 port_mask, unreg_mcast = 0; 203 int ret; 204 205 if (!common->is_emac_mode) 206 return 0; 207 208 if (!netif_running(ndev) || !vid) 209 return 0; 210 211 ret = pm_runtime_resume_and_get(common->dev); 212 if (ret < 0) 213 return ret; 214 215 port_mask = BIT(port->port_id) | ALE_PORT_HOST; 216 if (!vid) 217 unreg_mcast = port_mask; 218 dev_info(common->dev, "Adding vlan %d to vlan filter\n", vid); 219 ret = cpsw_ale_vlan_add_modify(common->ale, vid, port_mask, 220 unreg_mcast, port_mask, 0); 221 222 pm_runtime_put(common->dev); 223 return ret; 224 } 225 226 static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev, 227 __be16 proto, u16 vid) 228 { 229 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 230 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 231 int ret; 232 233 if (!common->is_emac_mode) 234 return 0; 235 236 if (!netif_running(ndev) || !vid) 237 return 0; 238 239 ret = pm_runtime_resume_and_get(common->dev); 240 if (ret < 0) 241 return ret; 242 243 dev_info(common->dev, "Removing vlan %d from vlan filter\n", vid); 244 ret = cpsw_ale_del_vlan(common->ale, vid, 245 BIT(port->port_id) | ALE_PORT_HOST); 246 247 pm_runtime_put(common->dev); 248 return ret; 249 } 250 251 static void am65_cpsw_slave_set_promisc(struct am65_cpsw_port *port, 252 bool promisc) 253 { 254 struct am65_cpsw_common *common = port->common; 255 256 if (promisc && !common->is_emac_mode) { 257 dev_dbg(common->dev, "promisc mode requested in switch mode"); 258 return; 259 } 260 261 if (promisc) { 262 /* Enable promiscuous mode */ 263 cpsw_ale_control_set(common->ale, port->port_id, 264 ALE_PORT_MACONLY_CAF, 1); 265 dev_dbg(common->dev, "promisc enabled\n"); 266 } else { 267 /* Disable promiscuous mode */ 268 cpsw_ale_control_set(common->ale, port->port_id, 269 ALE_PORT_MACONLY_CAF, 0); 270 dev_dbg(common->dev, "promisc disabled\n"); 271 } 272 } 273 274 static void am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device *ndev) 275 { 276 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 277 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 278 u32 port_mask; 279 bool promisc; 280 281 promisc = !!(ndev->flags & IFF_PROMISC); 282 am65_cpsw_slave_set_promisc(port, promisc); 283 284 if (promisc) 285 return; 286 287 /* Restore allmulti on vlans if necessary */ 288 cpsw_ale_set_allmulti(common->ale, 289 ndev->flags & IFF_ALLMULTI, port->port_id); 290 291 port_mask = ALE_PORT_HOST; 292 /* Clear all mcast from ALE */ 293 cpsw_ale_flush_multicast(common->ale, port_mask, -1); 294 295 if (!netdev_mc_empty(ndev)) { 296 struct netdev_hw_addr *ha; 297 298 /* program multicast address list into ALE register */ 299 netdev_for_each_mc_addr(ha, ndev) { 300 cpsw_ale_add_mcast(common->ale, ha->addr, 301 port_mask, 0, 0, 0); 302 } 303 } 304 } 305 306 static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev, 307 unsigned int txqueue) 308 { 309 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 310 struct am65_cpsw_tx_chn *tx_chn; 311 struct netdev_queue *netif_txq; 312 unsigned long trans_start; 313 314 netif_txq = netdev_get_tx_queue(ndev, txqueue); 315 tx_chn = &common->tx_chns[txqueue]; 316 trans_start = READ_ONCE(netif_txq->trans_start); 317 318 netdev_err(ndev, "txq:%d DRV_XOFF:%d tmo:%u dql_avail:%d free_desc:%zu\n", 319 txqueue, 320 netif_tx_queue_stopped(netif_txq), 321 jiffies_to_msecs(jiffies - trans_start), 322 netdev_queue_dql_avail(netif_txq), 323 k3_cppi_desc_pool_avail(tx_chn->desc_pool)); 324 325 if (netif_tx_queue_stopped(netif_txq)) { 326 /* try recover if stopped by us */ 327 txq_trans_update(netif_txq); 328 netif_tx_wake_queue(netif_txq); 329 } 330 } 331 332 static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common, 333 struct page *page) 334 { 335 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; 336 struct cppi5_host_desc_t *desc_rx; 337 struct device *dev = common->dev; 338 dma_addr_t desc_dma; 339 dma_addr_t buf_dma; 340 void *swdata; 341 342 desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool); 343 if (!desc_rx) { 344 dev_err(dev, "Failed to allocate RXFDQ descriptor\n"); 345 return -ENOMEM; 346 } 347 desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx); 348 349 buf_dma = dma_map_single(rx_chn->dma_dev, 350 page_address(page) + AM65_CPSW_HEADROOM, 351 AM65_CPSW_MAX_PACKET_SIZE, DMA_FROM_DEVICE); 352 if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) { 353 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); 354 dev_err(dev, "Failed to map rx buffer\n"); 355 return -EINVAL; 356 } 357 358 cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT, 359 AM65_CPSW_NAV_PS_DATA_SIZE); 360 k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma); 361 cppi5_hdesc_attach_buf(desc_rx, buf_dma, AM65_CPSW_MAX_PACKET_SIZE, 362 buf_dma, AM65_CPSW_MAX_PACKET_SIZE); 363 swdata = cppi5_hdesc_get_swdata(desc_rx); 364 *((void **)swdata) = page_address(page); 365 366 return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0, desc_rx, desc_dma); 367 } 368 369 void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common) 370 { 371 struct am65_cpsw_host *host_p = am65_common_get_host(common); 372 u32 val, pri_map; 373 374 /* P0 set Receive Priority Type */ 375 val = readl(host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL); 376 377 if (common->pf_p0_rx_ptype_rrobin) { 378 val |= AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN; 379 /* Enet Ports fifos works in fixed priority mode only, so 380 * reset P0_Rx_Pri_Map so all packet will go in Enet fifo 0 381 */ 382 pri_map = 0x0; 383 } else { 384 val &= ~AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN; 385 /* restore P0_Rx_Pri_Map */ 386 pri_map = 0x76543210; 387 } 388 389 writel(pri_map, host_p->port_base + AM65_CPSW_PORT_REG_RX_PRI_MAP); 390 writel(val, host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL); 391 } 392 393 static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common); 394 static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common); 395 static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port); 396 static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port); 397 398 static void am65_cpsw_destroy_xdp_rxqs(struct am65_cpsw_common *common) 399 { 400 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; 401 struct xdp_rxq_info *rxq; 402 int i; 403 404 for (i = 0; i < common->port_num; i++) { 405 if (!common->ports[i].ndev) 406 continue; 407 408 rxq = &common->ports[i].xdp_rxq; 409 410 if (xdp_rxq_info_is_reg(rxq)) 411 xdp_rxq_info_unreg(rxq); 412 } 413 414 if (rx_chn->page_pool) { 415 page_pool_destroy(rx_chn->page_pool); 416 rx_chn->page_pool = NULL; 417 } 418 } 419 420 static int am65_cpsw_create_xdp_rxqs(struct am65_cpsw_common *common) 421 { 422 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; 423 struct page_pool_params pp_params = { 424 .flags = PP_FLAG_DMA_MAP, 425 .order = 0, 426 .pool_size = AM65_CPSW_MAX_RX_DESC, 427 .nid = dev_to_node(common->dev), 428 .dev = common->dev, 429 .dma_dir = DMA_BIDIRECTIONAL, 430 .napi = &common->napi_rx, 431 }; 432 struct xdp_rxq_info *rxq; 433 struct page_pool *pool; 434 int i, ret; 435 436 pool = page_pool_create(&pp_params); 437 if (IS_ERR(pool)) 438 return PTR_ERR(pool); 439 440 rx_chn->page_pool = pool; 441 442 for (i = 0; i < common->port_num; i++) { 443 if (!common->ports[i].ndev) 444 continue; 445 446 rxq = &common->ports[i].xdp_rxq; 447 448 ret = xdp_rxq_info_reg(rxq, common->ports[i].ndev, i, 0); 449 if (ret) 450 goto err; 451 452 ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool); 453 if (ret) 454 goto err; 455 } 456 457 return 0; 458 459 err: 460 am65_cpsw_destroy_xdp_rxqs(common); 461 return ret; 462 } 463 464 static int am65_cpsw_nuss_desc_idx(struct k3_cppi_desc_pool *desc_pool, 465 void *desc, 466 unsigned char dsize_log2) 467 { 468 void *pool_addr = k3_cppi_desc_pool_cpuaddr(desc_pool); 469 470 return (desc - pool_addr) >> dsize_log2; 471 } 472 473 static void am65_cpsw_nuss_set_buf_type(struct am65_cpsw_tx_chn *tx_chn, 474 struct cppi5_host_desc_t *desc, 475 enum am65_cpsw_tx_buf_type buf_type) 476 { 477 int desc_idx; 478 479 desc_idx = am65_cpsw_nuss_desc_idx(tx_chn->desc_pool, desc, 480 tx_chn->dsize_log2); 481 k3_cppi_desc_pool_desc_info_set(tx_chn->desc_pool, desc_idx, 482 (void *)buf_type); 483 } 484 485 static enum am65_cpsw_tx_buf_type am65_cpsw_nuss_buf_type(struct am65_cpsw_tx_chn *tx_chn, 486 dma_addr_t desc_dma) 487 { 488 struct cppi5_host_desc_t *desc_tx; 489 int desc_idx; 490 491 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma); 492 desc_idx = am65_cpsw_nuss_desc_idx(tx_chn->desc_pool, desc_tx, 493 tx_chn->dsize_log2); 494 495 return (enum am65_cpsw_tx_buf_type)k3_cppi_desc_pool_desc_info(tx_chn->desc_pool, 496 desc_idx); 497 } 498 499 static inline void am65_cpsw_put_page(struct am65_cpsw_rx_chn *rx_chn, 500 struct page *page, 501 bool allow_direct, 502 int desc_idx) 503 { 504 page_pool_put_full_page(rx_chn->page_pool, page, allow_direct); 505 rx_chn->pages[desc_idx] = NULL; 506 } 507 508 static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma) 509 { 510 struct am65_cpsw_rx_chn *rx_chn = data; 511 struct cppi5_host_desc_t *desc_rx; 512 dma_addr_t buf_dma; 513 u32 buf_dma_len; 514 void *page_addr; 515 void **swdata; 516 int desc_idx; 517 518 desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); 519 swdata = cppi5_hdesc_get_swdata(desc_rx); 520 page_addr = *swdata; 521 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); 522 k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); 523 dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE); 524 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); 525 526 desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx, 527 rx_chn->dsize_log2); 528 am65_cpsw_put_page(rx_chn, virt_to_page(page_addr), false, desc_idx); 529 } 530 531 static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn, 532 struct cppi5_host_desc_t *desc) 533 { 534 struct cppi5_host_desc_t *first_desc, *next_desc; 535 dma_addr_t buf_dma, next_desc_dma; 536 u32 buf_dma_len; 537 538 first_desc = desc; 539 next_desc = first_desc; 540 541 cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len); 542 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma); 543 544 dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len, DMA_TO_DEVICE); 545 546 next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc); 547 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma); 548 while (next_desc_dma) { 549 next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, 550 next_desc_dma); 551 cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len); 552 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma); 553 554 dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len, 555 DMA_TO_DEVICE); 556 557 next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc); 558 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma); 559 560 k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc); 561 } 562 563 k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc); 564 } 565 566 static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma) 567 { 568 struct am65_cpsw_tx_chn *tx_chn = data; 569 struct cppi5_host_desc_t *desc_tx; 570 struct sk_buff *skb; 571 void **swdata; 572 573 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma); 574 swdata = cppi5_hdesc_get_swdata(desc_tx); 575 skb = *(swdata); 576 am65_cpsw_nuss_xmit_free(tx_chn, desc_tx); 577 578 dev_kfree_skb_any(skb); 579 } 580 581 static struct sk_buff *am65_cpsw_build_skb(void *page_addr, 582 struct net_device *ndev, 583 unsigned int len) 584 { 585 struct sk_buff *skb; 586 587 len += AM65_CPSW_HEADROOM; 588 589 skb = build_skb(page_addr, len); 590 if (unlikely(!skb)) 591 return NULL; 592 593 skb_reserve(skb, AM65_CPSW_HEADROOM); 594 skb->dev = ndev; 595 596 return skb; 597 } 598 599 static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common) 600 { 601 struct am65_cpsw_host *host_p = am65_common_get_host(common); 602 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; 603 struct am65_cpsw_tx_chn *tx_chn = common->tx_chns; 604 int port_idx, i, ret, tx; 605 u32 val, port_mask; 606 struct page *page; 607 608 if (common->usage_count) 609 return 0; 610 611 /* Control register */ 612 writel(AM65_CPSW_CTL_P0_ENABLE | AM65_CPSW_CTL_P0_TX_CRC_REMOVE | 613 AM65_CPSW_CTL_VLAN_AWARE | AM65_CPSW_CTL_P0_RX_PAD, 614 common->cpsw_base + AM65_CPSW_REG_CTL); 615 /* Max length register */ 616 writel(AM65_CPSW_MAX_PACKET_SIZE, 617 host_p->port_base + AM65_CPSW_PORT_REG_RX_MAXLEN); 618 /* set base flow_id */ 619 writel(common->rx_flow_id_base, 620 host_p->port_base + AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET); 621 writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN | AM65_CPSW_P0_REG_CTL_RX_REMAP_VLAN, 622 host_p->port_base + AM65_CPSW_P0_REG_CTL); 623 624 am65_cpsw_nuss_set_p0_ptype(common); 625 626 /* enable statistic */ 627 val = BIT(HOST_PORT_NUM); 628 for (port_idx = 0; port_idx < common->port_num; port_idx++) { 629 struct am65_cpsw_port *port = &common->ports[port_idx]; 630 631 if (!port->disabled) 632 val |= BIT(port->port_id); 633 } 634 writel(val, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN); 635 636 /* disable priority elevation */ 637 writel(0, common->cpsw_base + AM65_CPSW_REG_PTYPE); 638 639 cpsw_ale_start(common->ale); 640 641 /* limit to one RX flow only */ 642 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, 643 ALE_DEFAULT_THREAD_ID, 0); 644 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, 645 ALE_DEFAULT_THREAD_ENABLE, 1); 646 /* switch to vlan unaware mode */ 647 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, 1); 648 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, 649 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); 650 651 /* default vlan cfg: create mask based on enabled ports */ 652 port_mask = GENMASK(common->port_num, 0) & 653 ~common->disabled_ports_mask; 654 655 cpsw_ale_add_vlan(common->ale, 0, port_mask, 656 port_mask, port_mask, 657 port_mask & ~ALE_PORT_HOST); 658 659 if (common->is_emac_mode) 660 am65_cpsw_init_host_port_emac(common); 661 else 662 am65_cpsw_init_host_port_switch(common); 663 664 am65_cpsw_qos_tx_p0_rate_init(common); 665 666 ret = am65_cpsw_create_xdp_rxqs(common); 667 if (ret) { 668 dev_err(common->dev, "Failed to create XDP rx queues\n"); 669 return ret; 670 } 671 672 for (i = 0; i < rx_chn->descs_num; i++) { 673 page = page_pool_dev_alloc_pages(rx_chn->page_pool); 674 if (!page) { 675 ret = -ENOMEM; 676 if (i) 677 goto fail_rx; 678 679 return ret; 680 } 681 rx_chn->pages[i] = page; 682 683 ret = am65_cpsw_nuss_rx_push(common, page); 684 if (ret < 0) { 685 dev_err(common->dev, 686 "cannot submit page to channel rx: %d\n", 687 ret); 688 am65_cpsw_put_page(rx_chn, page, false, i); 689 if (i) 690 goto fail_rx; 691 692 return ret; 693 } 694 } 695 696 ret = k3_udma_glue_enable_rx_chn(rx_chn->rx_chn); 697 if (ret) { 698 dev_err(common->dev, "couldn't enable rx chn: %d\n", ret); 699 goto fail_rx; 700 } 701 702 for (tx = 0; tx < common->tx_ch_num; tx++) { 703 ret = k3_udma_glue_enable_tx_chn(tx_chn[tx].tx_chn); 704 if (ret) { 705 dev_err(common->dev, "couldn't enable tx chn %d: %d\n", 706 tx, ret); 707 tx--; 708 goto fail_tx; 709 } 710 napi_enable(&tx_chn[tx].napi_tx); 711 } 712 713 napi_enable(&common->napi_rx); 714 if (common->rx_irq_disabled) { 715 common->rx_irq_disabled = false; 716 enable_irq(rx_chn->irq); 717 } 718 719 dev_dbg(common->dev, "cpsw_nuss started\n"); 720 return 0; 721 722 fail_tx: 723 while (tx >= 0) { 724 napi_disable(&tx_chn[tx].napi_tx); 725 k3_udma_glue_disable_tx_chn(tx_chn[tx].tx_chn); 726 tx--; 727 } 728 729 k3_udma_glue_disable_rx_chn(rx_chn->rx_chn); 730 731 fail_rx: 732 k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, 0, rx_chn, 733 am65_cpsw_nuss_rx_cleanup, 0); 734 return ret; 735 } 736 737 static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common) 738 { 739 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; 740 struct am65_cpsw_tx_chn *tx_chn = common->tx_chns; 741 int i; 742 743 if (common->usage_count != 1) 744 return 0; 745 746 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, 747 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); 748 749 /* shutdown tx channels */ 750 atomic_set(&common->tdown_cnt, common->tx_ch_num); 751 /* ensure new tdown_cnt value is visible */ 752 smp_mb__after_atomic(); 753 reinit_completion(&common->tdown_complete); 754 755 for (i = 0; i < common->tx_ch_num; i++) 756 k3_udma_glue_tdown_tx_chn(tx_chn[i].tx_chn, false); 757 758 i = wait_for_completion_timeout(&common->tdown_complete, 759 msecs_to_jiffies(1000)); 760 if (!i) 761 dev_err(common->dev, "tx timeout\n"); 762 for (i = 0; i < common->tx_ch_num; i++) { 763 napi_disable(&tx_chn[i].napi_tx); 764 hrtimer_cancel(&tx_chn[i].tx_hrtimer); 765 } 766 767 for (i = 0; i < common->tx_ch_num; i++) { 768 k3_udma_glue_reset_tx_chn(tx_chn[i].tx_chn, &tx_chn[i], 769 am65_cpsw_nuss_tx_cleanup); 770 k3_udma_glue_disable_tx_chn(tx_chn[i].tx_chn); 771 } 772 773 reinit_completion(&common->tdown_complete); 774 k3_udma_glue_tdown_rx_chn(rx_chn->rx_chn, true); 775 776 if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) { 777 i = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000)); 778 if (!i) 779 dev_err(common->dev, "rx teardown timeout\n"); 780 } 781 782 napi_disable(&common->napi_rx); 783 hrtimer_cancel(&common->rx_hrtimer); 784 785 for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++) 786 k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn, 787 am65_cpsw_nuss_rx_cleanup, !!i); 788 789 k3_udma_glue_disable_rx_chn(rx_chn->rx_chn); 790 791 cpsw_ale_stop(common->ale); 792 793 writel(0, common->cpsw_base + AM65_CPSW_REG_CTL); 794 writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN); 795 796 for (i = 0; i < rx_chn->descs_num; i++) { 797 if (rx_chn->pages[i]) 798 am65_cpsw_put_page(rx_chn, rx_chn->pages[i], false, i); 799 } 800 am65_cpsw_destroy_xdp_rxqs(common); 801 802 dev_dbg(common->dev, "cpsw_nuss stopped\n"); 803 return 0; 804 } 805 806 static int am65_cpsw_nuss_ndo_slave_stop(struct net_device *ndev) 807 { 808 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 809 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 810 int ret; 811 812 phylink_stop(port->slave.phylink); 813 814 netif_tx_stop_all_queues(ndev); 815 816 phylink_disconnect_phy(port->slave.phylink); 817 818 ret = am65_cpsw_nuss_common_stop(common); 819 if (ret) 820 return ret; 821 822 common->usage_count--; 823 pm_runtime_put(common->dev); 824 return 0; 825 } 826 827 static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg) 828 { 829 struct am65_cpsw_port *port = arg; 830 831 if (!vdev) 832 return 0; 833 834 return am65_cpsw_nuss_ndo_slave_add_vid(port->ndev, 0, vid); 835 } 836 837 static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev) 838 { 839 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 840 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 841 int ret, i; 842 u32 reg; 843 844 ret = pm_runtime_resume_and_get(common->dev); 845 if (ret < 0) 846 return ret; 847 848 /* Idle MAC port */ 849 cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE); 850 cpsw_sl_wait_for_idle(port->slave.mac_sl, 100); 851 cpsw_sl_ctl_reset(port->slave.mac_sl); 852 853 /* soft reset MAC */ 854 cpsw_sl_reg_write(port->slave.mac_sl, CPSW_SL_SOFT_RESET, 1); 855 mdelay(1); 856 reg = cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_SOFT_RESET); 857 if (reg) { 858 dev_err(common->dev, "soft RESET didn't complete\n"); 859 ret = -ETIMEDOUT; 860 goto runtime_put; 861 } 862 863 /* Notify the stack of the actual queue counts. */ 864 ret = netif_set_real_num_tx_queues(ndev, common->tx_ch_num); 865 if (ret) { 866 dev_err(common->dev, "cannot set real number of tx queues\n"); 867 goto runtime_put; 868 } 869 870 ret = netif_set_real_num_rx_queues(ndev, AM65_CPSW_MAX_RX_QUEUES); 871 if (ret) { 872 dev_err(common->dev, "cannot set real number of rx queues\n"); 873 goto runtime_put; 874 } 875 876 for (i = 0; i < common->tx_ch_num; i++) { 877 struct netdev_queue *txq = netdev_get_tx_queue(ndev, i); 878 879 netdev_tx_reset_queue(txq); 880 txq->tx_maxrate = common->tx_chns[i].rate_mbps; 881 } 882 883 ret = am65_cpsw_nuss_common_open(common); 884 if (ret) 885 goto runtime_put; 886 887 common->usage_count++; 888 889 am65_cpsw_port_set_sl_mac(port, ndev->dev_addr); 890 891 if (common->is_emac_mode) 892 am65_cpsw_init_port_emac_ale(port); 893 else 894 am65_cpsw_init_port_switch_ale(port); 895 896 /* mac_sl should be configured via phy-link interface */ 897 am65_cpsw_sl_ctl_reset(port); 898 899 ret = phylink_of_phy_connect(port->slave.phylink, port->slave.phy_node, 0); 900 if (ret) 901 goto error_cleanup; 902 903 /* restore vlan configurations */ 904 vlan_for_each(ndev, cpsw_restore_vlans, port); 905 906 phylink_start(port->slave.phylink); 907 908 return 0; 909 910 error_cleanup: 911 am65_cpsw_nuss_ndo_slave_stop(ndev); 912 return ret; 913 914 runtime_put: 915 pm_runtime_put(common->dev); 916 return ret; 917 } 918 919 static int am65_cpsw_xdp_tx_frame(struct net_device *ndev, 920 struct am65_cpsw_tx_chn *tx_chn, 921 struct xdp_frame *xdpf, 922 enum am65_cpsw_tx_buf_type buf_type) 923 { 924 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 925 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 926 struct cppi5_host_desc_t *host_desc; 927 struct netdev_queue *netif_txq; 928 dma_addr_t dma_desc, dma_buf; 929 u32 pkt_len = xdpf->len; 930 void **swdata; 931 int ret; 932 933 host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); 934 if (unlikely(!host_desc)) { 935 ndev->stats.tx_dropped++; 936 return -ENOMEM; 937 } 938 939 am65_cpsw_nuss_set_buf_type(tx_chn, host_desc, buf_type); 940 941 dma_buf = dma_map_single(tx_chn->dma_dev, xdpf->data, 942 pkt_len, DMA_TO_DEVICE); 943 if (unlikely(dma_mapping_error(tx_chn->dma_dev, dma_buf))) { 944 ndev->stats.tx_dropped++; 945 ret = -ENOMEM; 946 goto pool_free; 947 } 948 949 cppi5_hdesc_init(host_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT, 950 AM65_CPSW_NAV_PS_DATA_SIZE); 951 cppi5_hdesc_set_pkttype(host_desc, AM65_CPSW_CPPI_TX_PKT_TYPE); 952 cppi5_hdesc_set_pktlen(host_desc, pkt_len); 953 cppi5_desc_set_pktids(&host_desc->hdr, 0, AM65_CPSW_CPPI_TX_FLOW_ID); 954 cppi5_desc_set_tags_ids(&host_desc->hdr, 0, port->port_id); 955 956 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &dma_buf); 957 cppi5_hdesc_attach_buf(host_desc, dma_buf, pkt_len, dma_buf, pkt_len); 958 959 swdata = cppi5_hdesc_get_swdata(host_desc); 960 *(swdata) = xdpf; 961 962 /* Report BQL before sending the packet */ 963 netif_txq = netdev_get_tx_queue(ndev, tx_chn->id); 964 netdev_tx_sent_queue(netif_txq, pkt_len); 965 966 dma_desc = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, host_desc); 967 if (AM65_CPSW_IS_CPSW2G(common)) { 968 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, host_desc, 969 dma_desc); 970 } else { 971 spin_lock_bh(&tx_chn->lock); 972 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, host_desc, 973 dma_desc); 974 spin_unlock_bh(&tx_chn->lock); 975 } 976 if (ret) { 977 /* Inform BQL */ 978 netdev_tx_completed_queue(netif_txq, 1, pkt_len); 979 ndev->stats.tx_errors++; 980 goto dma_unmap; 981 } 982 983 return 0; 984 985 dma_unmap: 986 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &dma_buf); 987 dma_unmap_single(tx_chn->dma_dev, dma_buf, pkt_len, DMA_TO_DEVICE); 988 pool_free: 989 k3_cppi_desc_pool_free(tx_chn->desc_pool, host_desc); 990 return ret; 991 } 992 993 static int am65_cpsw_run_xdp(struct am65_cpsw_common *common, 994 struct am65_cpsw_port *port, 995 struct xdp_buff *xdp, 996 int desc_idx, int cpu, int *len) 997 { 998 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; 999 struct net_device *ndev = port->ndev; 1000 int ret = AM65_CPSW_XDP_CONSUMED; 1001 struct am65_cpsw_tx_chn *tx_chn; 1002 struct netdev_queue *netif_txq; 1003 struct xdp_frame *xdpf; 1004 struct bpf_prog *prog; 1005 struct page *page; 1006 u32 act; 1007 1008 prog = READ_ONCE(port->xdp_prog); 1009 if (!prog) 1010 return AM65_CPSW_XDP_PASS; 1011 1012 act = bpf_prog_run_xdp(prog, xdp); 1013 /* XDP prog might have changed packet data and boundaries */ 1014 *len = xdp->data_end - xdp->data; 1015 1016 switch (act) { 1017 case XDP_PASS: 1018 ret = AM65_CPSW_XDP_PASS; 1019 goto out; 1020 case XDP_TX: 1021 tx_chn = &common->tx_chns[cpu % AM65_CPSW_MAX_TX_QUEUES]; 1022 netif_txq = netdev_get_tx_queue(ndev, tx_chn->id); 1023 1024 xdpf = xdp_convert_buff_to_frame(xdp); 1025 if (unlikely(!xdpf)) 1026 break; 1027 1028 __netif_tx_lock(netif_txq, cpu); 1029 ret = am65_cpsw_xdp_tx_frame(ndev, tx_chn, xdpf, 1030 AM65_CPSW_TX_BUF_TYPE_XDP_TX); 1031 __netif_tx_unlock(netif_txq); 1032 if (ret) 1033 break; 1034 1035 ndev->stats.rx_bytes += *len; 1036 ndev->stats.rx_packets++; 1037 ret = AM65_CPSW_XDP_CONSUMED; 1038 goto out; 1039 case XDP_REDIRECT: 1040 if (unlikely(xdp_do_redirect(ndev, xdp, prog))) 1041 break; 1042 1043 ndev->stats.rx_bytes += *len; 1044 ndev->stats.rx_packets++; 1045 ret = AM65_CPSW_XDP_REDIRECT; 1046 goto out; 1047 default: 1048 bpf_warn_invalid_xdp_action(ndev, prog, act); 1049 fallthrough; 1050 case XDP_ABORTED: 1051 trace_xdp_exception(ndev, prog, act); 1052 fallthrough; 1053 case XDP_DROP: 1054 ndev->stats.rx_dropped++; 1055 } 1056 1057 page = virt_to_head_page(xdp->data); 1058 am65_cpsw_put_page(rx_chn, page, true, desc_idx); 1059 1060 out: 1061 return ret; 1062 } 1063 1064 /* RX psdata[2] word format - checksum information */ 1065 #define AM65_CPSW_RX_PSD_CSUM_ADD GENMASK(15, 0) 1066 #define AM65_CPSW_RX_PSD_CSUM_ERR BIT(16) 1067 #define AM65_CPSW_RX_PSD_IS_FRAGMENT BIT(17) 1068 #define AM65_CPSW_RX_PSD_IS_TCP BIT(18) 1069 #define AM65_CPSW_RX_PSD_IPV6_VALID BIT(19) 1070 #define AM65_CPSW_RX_PSD_IPV4_VALID BIT(20) 1071 1072 static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info) 1073 { 1074 /* HW can verify IPv4/IPv6 TCP/UDP packets checksum 1075 * csum information provides in psdata[2] word: 1076 * AM65_CPSW_RX_PSD_CSUM_ERR bit - indicates csum error 1077 * AM65_CPSW_RX_PSD_IPV6_VALID and AM65_CPSW_RX_PSD_IPV4_VALID 1078 * bits - indicates IPv4/IPv6 packet 1079 * AM65_CPSW_RX_PSD_IS_FRAGMENT bit - indicates fragmented packet 1080 * AM65_CPSW_RX_PSD_CSUM_ADD has value 0xFFFF for non fragmented packets 1081 * or csum value for fragmented packets if !AM65_CPSW_RX_PSD_CSUM_ERR 1082 */ 1083 skb_checksum_none_assert(skb); 1084 1085 if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) 1086 return; 1087 1088 if ((csum_info & (AM65_CPSW_RX_PSD_IPV6_VALID | 1089 AM65_CPSW_RX_PSD_IPV4_VALID)) && 1090 !(csum_info & AM65_CPSW_RX_PSD_CSUM_ERR)) { 1091 /* csum for fragmented packets is unsupported */ 1092 if (!(csum_info & AM65_CPSW_RX_PSD_IS_FRAGMENT)) 1093 skb->ip_summed = CHECKSUM_UNNECESSARY; 1094 } 1095 } 1096 1097 static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common, 1098 u32 flow_idx, int cpu) 1099 { 1100 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; 1101 u32 buf_dma_len, pkt_len, port_id = 0, csum_info; 1102 struct am65_cpsw_ndev_priv *ndev_priv; 1103 struct am65_cpsw_ndev_stats *stats; 1104 struct cppi5_host_desc_t *desc_rx; 1105 struct device *dev = common->dev; 1106 struct page *page, *new_page; 1107 dma_addr_t desc_dma, buf_dma; 1108 struct am65_cpsw_port *port; 1109 int headroom, desc_idx, ret; 1110 struct net_device *ndev; 1111 struct sk_buff *skb; 1112 struct xdp_buff xdp; 1113 void *page_addr; 1114 void **swdata; 1115 u32 *psdata; 1116 1117 ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_idx, &desc_dma); 1118 if (ret) { 1119 if (ret != -ENODATA) 1120 dev_err(dev, "RX: pop chn fail %d\n", ret); 1121 return ret; 1122 } 1123 1124 if (cppi5_desc_is_tdcm(desc_dma)) { 1125 dev_dbg(dev, "%s RX tdown flow: %u\n", __func__, flow_idx); 1126 if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) 1127 complete(&common->tdown_complete); 1128 return 0; 1129 } 1130 1131 desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); 1132 dev_dbg(dev, "%s flow_idx: %u desc %pad\n", 1133 __func__, flow_idx, &desc_dma); 1134 1135 swdata = cppi5_hdesc_get_swdata(desc_rx); 1136 page_addr = *swdata; 1137 page = virt_to_page(page_addr); 1138 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); 1139 k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); 1140 pkt_len = cppi5_hdesc_get_pktlen(desc_rx); 1141 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL); 1142 dev_dbg(dev, "%s rx port_id:%d\n", __func__, port_id); 1143 port = am65_common_get_port(common, port_id); 1144 ndev = port->ndev; 1145 psdata = cppi5_hdesc_get_psdata(desc_rx); 1146 csum_info = psdata[2]; 1147 dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info); 1148 1149 dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE); 1150 1151 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); 1152 1153 desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx, 1154 rx_chn->dsize_log2); 1155 1156 skb = am65_cpsw_build_skb(page_addr, ndev, 1157 AM65_CPSW_MAX_PACKET_SIZE); 1158 if (unlikely(!skb)) { 1159 new_page = page; 1160 goto requeue; 1161 } 1162 1163 if (port->xdp_prog) { 1164 xdp_init_buff(&xdp, AM65_CPSW_MAX_PACKET_SIZE, &port->xdp_rxq); 1165 1166 xdp_prepare_buff(&xdp, page_addr, skb_headroom(skb), 1167 pkt_len, false); 1168 1169 ret = am65_cpsw_run_xdp(common, port, &xdp, desc_idx, 1170 cpu, &pkt_len); 1171 if (ret != AM65_CPSW_XDP_PASS) 1172 return ret; 1173 1174 /* Compute additional headroom to be reserved */ 1175 headroom = (xdp.data - xdp.data_hard_start) - skb_headroom(skb); 1176 skb_reserve(skb, headroom); 1177 } 1178 1179 ndev_priv = netdev_priv(ndev); 1180 am65_cpsw_nuss_set_offload_fwd_mark(skb, ndev_priv->offload_fwd_mark); 1181 skb_put(skb, pkt_len); 1182 if (port->rx_ts_enabled) 1183 am65_cpts_rx_timestamp(common->cpts, skb); 1184 skb_mark_for_recycle(skb); 1185 skb->protocol = eth_type_trans(skb, ndev); 1186 am65_cpsw_nuss_rx_csum(skb, csum_info); 1187 napi_gro_receive(&common->napi_rx, skb); 1188 1189 stats = this_cpu_ptr(ndev_priv->stats); 1190 1191 u64_stats_update_begin(&stats->syncp); 1192 stats->rx_packets++; 1193 stats->rx_bytes += pkt_len; 1194 u64_stats_update_end(&stats->syncp); 1195 1196 new_page = page_pool_dev_alloc_pages(rx_chn->page_pool); 1197 if (unlikely(!new_page)) 1198 return -ENOMEM; 1199 rx_chn->pages[desc_idx] = new_page; 1200 1201 if (netif_dormant(ndev)) { 1202 am65_cpsw_put_page(rx_chn, new_page, true, desc_idx); 1203 ndev->stats.rx_dropped++; 1204 return 0; 1205 } 1206 1207 requeue: 1208 ret = am65_cpsw_nuss_rx_push(common, new_page); 1209 if (WARN_ON(ret < 0)) { 1210 am65_cpsw_put_page(rx_chn, new_page, true, desc_idx); 1211 ndev->stats.rx_errors++; 1212 ndev->stats.rx_dropped++; 1213 } 1214 1215 return ret; 1216 } 1217 1218 static enum hrtimer_restart am65_cpsw_nuss_rx_timer_callback(struct hrtimer *timer) 1219 { 1220 struct am65_cpsw_common *common = 1221 container_of(timer, struct am65_cpsw_common, rx_hrtimer); 1222 1223 enable_irq(common->rx_chns.irq); 1224 return HRTIMER_NORESTART; 1225 } 1226 1227 static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget) 1228 { 1229 struct am65_cpsw_common *common = am65_cpsw_napi_to_common(napi_rx); 1230 int flow = AM65_CPSW_MAX_RX_FLOWS; 1231 int cpu = smp_processor_id(); 1232 bool xdp_redirect = false; 1233 int cur_budget, ret; 1234 int num_rx = 0; 1235 1236 /* process every flow */ 1237 while (flow--) { 1238 cur_budget = budget - num_rx; 1239 1240 while (cur_budget--) { 1241 ret = am65_cpsw_nuss_rx_packets(common, flow, cpu); 1242 if (ret) { 1243 if (ret == AM65_CPSW_XDP_REDIRECT) 1244 xdp_redirect = true; 1245 break; 1246 } 1247 num_rx++; 1248 } 1249 1250 if (num_rx >= budget) 1251 break; 1252 } 1253 1254 if (xdp_redirect) 1255 xdp_do_flush(); 1256 1257 dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget); 1258 1259 if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) { 1260 if (common->rx_irq_disabled) { 1261 common->rx_irq_disabled = false; 1262 if (unlikely(common->rx_pace_timeout)) { 1263 hrtimer_start(&common->rx_hrtimer, 1264 ns_to_ktime(common->rx_pace_timeout), 1265 HRTIMER_MODE_REL_PINNED); 1266 } else { 1267 enable_irq(common->rx_chns.irq); 1268 } 1269 } 1270 } 1271 1272 return num_rx; 1273 } 1274 1275 static struct sk_buff * 1276 am65_cpsw_nuss_tx_compl_packet_skb(struct am65_cpsw_tx_chn *tx_chn, 1277 dma_addr_t desc_dma) 1278 { 1279 struct am65_cpsw_ndev_priv *ndev_priv; 1280 struct am65_cpsw_ndev_stats *stats; 1281 struct cppi5_host_desc_t *desc_tx; 1282 struct net_device *ndev; 1283 struct sk_buff *skb; 1284 void **swdata; 1285 1286 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, 1287 desc_dma); 1288 swdata = cppi5_hdesc_get_swdata(desc_tx); 1289 skb = *(swdata); 1290 am65_cpsw_nuss_xmit_free(tx_chn, desc_tx); 1291 1292 ndev = skb->dev; 1293 1294 am65_cpts_tx_timestamp(tx_chn->common->cpts, skb); 1295 1296 ndev_priv = netdev_priv(ndev); 1297 stats = this_cpu_ptr(ndev_priv->stats); 1298 u64_stats_update_begin(&stats->syncp); 1299 stats->tx_packets++; 1300 stats->tx_bytes += skb->len; 1301 u64_stats_update_end(&stats->syncp); 1302 1303 return skb; 1304 } 1305 1306 static struct xdp_frame * 1307 am65_cpsw_nuss_tx_compl_packet_xdp(struct am65_cpsw_common *common, 1308 struct am65_cpsw_tx_chn *tx_chn, 1309 dma_addr_t desc_dma, 1310 struct net_device **ndev) 1311 { 1312 struct am65_cpsw_ndev_priv *ndev_priv; 1313 struct am65_cpsw_ndev_stats *stats; 1314 struct cppi5_host_desc_t *desc_tx; 1315 struct am65_cpsw_port *port; 1316 struct xdp_frame *xdpf; 1317 u32 port_id = 0; 1318 void **swdata; 1319 1320 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma); 1321 cppi5_desc_get_tags_ids(&desc_tx->hdr, NULL, &port_id); 1322 swdata = cppi5_hdesc_get_swdata(desc_tx); 1323 xdpf = *(swdata); 1324 am65_cpsw_nuss_xmit_free(tx_chn, desc_tx); 1325 1326 port = am65_common_get_port(common, port_id); 1327 *ndev = port->ndev; 1328 1329 ndev_priv = netdev_priv(*ndev); 1330 stats = this_cpu_ptr(ndev_priv->stats); 1331 u64_stats_update_begin(&stats->syncp); 1332 stats->tx_packets++; 1333 stats->tx_bytes += xdpf->len; 1334 u64_stats_update_end(&stats->syncp); 1335 1336 return xdpf; 1337 } 1338 1339 static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_device *ndev, 1340 struct netdev_queue *netif_txq) 1341 { 1342 if (netif_tx_queue_stopped(netif_txq)) { 1343 /* Check whether the queue is stopped due to stalled 1344 * tx dma, if the queue is stopped then wake the queue 1345 * as we have free desc for tx 1346 */ 1347 __netif_tx_lock(netif_txq, smp_processor_id()); 1348 if (netif_running(ndev) && 1349 (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= MAX_SKB_FRAGS)) 1350 netif_tx_wake_queue(netif_txq); 1351 1352 __netif_tx_unlock(netif_txq); 1353 } 1354 } 1355 1356 static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common, 1357 int chn, unsigned int budget, bool *tdown) 1358 { 1359 enum am65_cpsw_tx_buf_type buf_type; 1360 struct device *dev = common->dev; 1361 struct am65_cpsw_tx_chn *tx_chn; 1362 struct netdev_queue *netif_txq; 1363 unsigned int total_bytes = 0; 1364 struct net_device *ndev; 1365 struct xdp_frame *xdpf; 1366 struct sk_buff *skb; 1367 dma_addr_t desc_dma; 1368 int res, num_tx = 0; 1369 1370 tx_chn = &common->tx_chns[chn]; 1371 1372 while (true) { 1373 spin_lock(&tx_chn->lock); 1374 res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma); 1375 spin_unlock(&tx_chn->lock); 1376 if (res == -ENODATA) 1377 break; 1378 1379 if (cppi5_desc_is_tdcm(desc_dma)) { 1380 if (atomic_dec_and_test(&common->tdown_cnt)) 1381 complete(&common->tdown_complete); 1382 *tdown = true; 1383 break; 1384 } 1385 1386 buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma); 1387 if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) { 1388 skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma); 1389 ndev = skb->dev; 1390 total_bytes = skb->len; 1391 napi_consume_skb(skb, budget); 1392 } else { 1393 xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn, 1394 desc_dma, &ndev); 1395 total_bytes = xdpf->len; 1396 if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX) 1397 xdp_return_frame_rx_napi(xdpf); 1398 else 1399 xdp_return_frame(xdpf); 1400 } 1401 num_tx++; 1402 1403 netif_txq = netdev_get_tx_queue(ndev, chn); 1404 1405 netdev_tx_completed_queue(netif_txq, num_tx, total_bytes); 1406 1407 am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq); 1408 } 1409 1410 dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx); 1411 1412 return num_tx; 1413 } 1414 1415 static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common, 1416 int chn, unsigned int budget, bool *tdown) 1417 { 1418 enum am65_cpsw_tx_buf_type buf_type; 1419 struct device *dev = common->dev; 1420 struct am65_cpsw_tx_chn *tx_chn; 1421 struct netdev_queue *netif_txq; 1422 unsigned int total_bytes = 0; 1423 struct net_device *ndev; 1424 struct xdp_frame *xdpf; 1425 struct sk_buff *skb; 1426 dma_addr_t desc_dma; 1427 int res, num_tx = 0; 1428 1429 tx_chn = &common->tx_chns[chn]; 1430 1431 while (true) { 1432 res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma); 1433 if (res == -ENODATA) 1434 break; 1435 1436 if (cppi5_desc_is_tdcm(desc_dma)) { 1437 if (atomic_dec_and_test(&common->tdown_cnt)) 1438 complete(&common->tdown_complete); 1439 *tdown = true; 1440 break; 1441 } 1442 1443 buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma); 1444 if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) { 1445 skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma); 1446 ndev = skb->dev; 1447 total_bytes += skb->len; 1448 napi_consume_skb(skb, budget); 1449 } else { 1450 xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn, 1451 desc_dma, &ndev); 1452 total_bytes += xdpf->len; 1453 if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX) 1454 xdp_return_frame_rx_napi(xdpf); 1455 else 1456 xdp_return_frame(xdpf); 1457 } 1458 num_tx++; 1459 } 1460 1461 if (!num_tx) 1462 return 0; 1463 1464 netif_txq = netdev_get_tx_queue(ndev, chn); 1465 1466 netdev_tx_completed_queue(netif_txq, num_tx, total_bytes); 1467 1468 am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq); 1469 1470 dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx); 1471 1472 return num_tx; 1473 } 1474 1475 static enum hrtimer_restart am65_cpsw_nuss_tx_timer_callback(struct hrtimer *timer) 1476 { 1477 struct am65_cpsw_tx_chn *tx_chns = 1478 container_of(timer, struct am65_cpsw_tx_chn, tx_hrtimer); 1479 1480 enable_irq(tx_chns->irq); 1481 return HRTIMER_NORESTART; 1482 } 1483 1484 static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget) 1485 { 1486 struct am65_cpsw_tx_chn *tx_chn = am65_cpsw_napi_to_tx_chn(napi_tx); 1487 bool tdown = false; 1488 int num_tx; 1489 1490 if (AM65_CPSW_IS_CPSW2G(tx_chn->common)) 1491 num_tx = am65_cpsw_nuss_tx_compl_packets_2g(tx_chn->common, tx_chn->id, 1492 budget, &tdown); 1493 else 1494 num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common, 1495 tx_chn->id, budget, &tdown); 1496 1497 if (num_tx >= budget) 1498 return budget; 1499 1500 if (napi_complete_done(napi_tx, num_tx)) { 1501 if (unlikely(tx_chn->tx_pace_timeout && !tdown)) { 1502 hrtimer_start(&tx_chn->tx_hrtimer, 1503 ns_to_ktime(tx_chn->tx_pace_timeout), 1504 HRTIMER_MODE_REL_PINNED); 1505 } else { 1506 enable_irq(tx_chn->irq); 1507 } 1508 } 1509 1510 return 0; 1511 } 1512 1513 static irqreturn_t am65_cpsw_nuss_rx_irq(int irq, void *dev_id) 1514 { 1515 struct am65_cpsw_common *common = dev_id; 1516 1517 common->rx_irq_disabled = true; 1518 disable_irq_nosync(irq); 1519 napi_schedule(&common->napi_rx); 1520 1521 return IRQ_HANDLED; 1522 } 1523 1524 static irqreturn_t am65_cpsw_nuss_tx_irq(int irq, void *dev_id) 1525 { 1526 struct am65_cpsw_tx_chn *tx_chn = dev_id; 1527 1528 disable_irq_nosync(irq); 1529 napi_schedule(&tx_chn->napi_tx); 1530 1531 return IRQ_HANDLED; 1532 } 1533 1534 static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb, 1535 struct net_device *ndev) 1536 { 1537 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 1538 struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc; 1539 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 1540 struct device *dev = common->dev; 1541 struct am65_cpsw_tx_chn *tx_chn; 1542 struct netdev_queue *netif_txq; 1543 dma_addr_t desc_dma, buf_dma; 1544 int ret, q_idx, i; 1545 void **swdata; 1546 u32 *psdata; 1547 u32 pkt_len; 1548 1549 /* padding enabled in hw */ 1550 pkt_len = skb_headlen(skb); 1551 1552 /* SKB TX timestamp */ 1553 if (port->tx_ts_enabled) 1554 am65_cpts_prep_tx_timestamp(common->cpts, skb); 1555 1556 q_idx = skb_get_queue_mapping(skb); 1557 dev_dbg(dev, "%s skb_queue:%d\n", __func__, q_idx); 1558 1559 tx_chn = &common->tx_chns[q_idx]; 1560 netif_txq = netdev_get_tx_queue(ndev, q_idx); 1561 1562 /* Map the linear buffer */ 1563 buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len, 1564 DMA_TO_DEVICE); 1565 if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) { 1566 dev_err(dev, "Failed to map tx skb buffer\n"); 1567 ndev->stats.tx_errors++; 1568 goto err_free_skb; 1569 } 1570 1571 first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); 1572 if (!first_desc) { 1573 dev_dbg(dev, "Failed to allocate descriptor\n"); 1574 dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, 1575 DMA_TO_DEVICE); 1576 goto busy_stop_q; 1577 } 1578 1579 am65_cpsw_nuss_set_buf_type(tx_chn, first_desc, 1580 AM65_CPSW_TX_BUF_TYPE_SKB); 1581 1582 cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT, 1583 AM65_CPSW_NAV_PS_DATA_SIZE); 1584 cppi5_desc_set_pktids(&first_desc->hdr, 0, AM65_CPSW_CPPI_TX_FLOW_ID); 1585 cppi5_hdesc_set_pkttype(first_desc, AM65_CPSW_CPPI_TX_PKT_TYPE); 1586 cppi5_desc_set_tags_ids(&first_desc->hdr, 0, port->port_id); 1587 1588 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma); 1589 cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len); 1590 swdata = cppi5_hdesc_get_swdata(first_desc); 1591 *(swdata) = skb; 1592 psdata = cppi5_hdesc_get_psdata(first_desc); 1593 1594 /* HW csum offload if enabled */ 1595 psdata[2] = 0; 1596 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 1597 unsigned int cs_start, cs_offset; 1598 1599 cs_start = skb_transport_offset(skb); 1600 cs_offset = cs_start + skb->csum_offset; 1601 /* HW numerates bytes starting from 1 */ 1602 psdata[2] = ((cs_offset + 1) << 24) | 1603 ((cs_start + 1) << 16) | (skb->len - cs_start); 1604 dev_dbg(dev, "%s tx psdata:%#x\n", __func__, psdata[2]); 1605 } 1606 1607 if (!skb_is_nonlinear(skb)) 1608 goto done_tx; 1609 1610 dev_dbg(dev, "fragmented SKB\n"); 1611 1612 /* Handle the case where skb is fragmented in pages */ 1613 cur_desc = first_desc; 1614 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1615 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1616 u32 frag_size = skb_frag_size(frag); 1617 1618 next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); 1619 if (!next_desc) { 1620 dev_err(dev, "Failed to allocate descriptor\n"); 1621 goto busy_free_descs; 1622 } 1623 1624 am65_cpsw_nuss_set_buf_type(tx_chn, next_desc, 1625 AM65_CPSW_TX_BUF_TYPE_SKB); 1626 1627 buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size, 1628 DMA_TO_DEVICE); 1629 if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) { 1630 dev_err(dev, "Failed to map tx skb page\n"); 1631 k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc); 1632 ndev->stats.tx_errors++; 1633 goto err_free_descs; 1634 } 1635 1636 cppi5_hdesc_reset_hbdesc(next_desc); 1637 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma); 1638 cppi5_hdesc_attach_buf(next_desc, 1639 buf_dma, frag_size, buf_dma, frag_size); 1640 1641 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, 1642 next_desc); 1643 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma); 1644 cppi5_hdesc_link_hbdesc(cur_desc, desc_dma); 1645 1646 pkt_len += frag_size; 1647 cur_desc = next_desc; 1648 } 1649 WARN_ON(pkt_len != skb->len); 1650 1651 done_tx: 1652 skb_tx_timestamp(skb); 1653 1654 /* report bql before sending packet */ 1655 netdev_tx_sent_queue(netif_txq, pkt_len); 1656 1657 cppi5_hdesc_set_pktlen(first_desc, pkt_len); 1658 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc); 1659 if (AM65_CPSW_IS_CPSW2G(common)) { 1660 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma); 1661 } else { 1662 spin_lock_bh(&tx_chn->lock); 1663 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma); 1664 spin_unlock_bh(&tx_chn->lock); 1665 } 1666 if (ret) { 1667 dev_err(dev, "can't push desc %d\n", ret); 1668 /* inform bql */ 1669 netdev_tx_completed_queue(netif_txq, 1, pkt_len); 1670 ndev->stats.tx_errors++; 1671 goto err_free_descs; 1672 } 1673 1674 if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) { 1675 netif_tx_stop_queue(netif_txq); 1676 /* Barrier, so that stop_queue visible to other cpus */ 1677 smp_mb__after_atomic(); 1678 dev_dbg(dev, "netif_tx_stop_queue %d\n", q_idx); 1679 1680 /* re-check for smp */ 1681 if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= 1682 MAX_SKB_FRAGS) { 1683 netif_tx_wake_queue(netif_txq); 1684 dev_dbg(dev, "netif_tx_wake_queue %d\n", q_idx); 1685 } 1686 } 1687 1688 return NETDEV_TX_OK; 1689 1690 err_free_descs: 1691 am65_cpsw_nuss_xmit_free(tx_chn, first_desc); 1692 err_free_skb: 1693 ndev->stats.tx_dropped++; 1694 dev_kfree_skb_any(skb); 1695 return NETDEV_TX_OK; 1696 1697 busy_free_descs: 1698 am65_cpsw_nuss_xmit_free(tx_chn, first_desc); 1699 busy_stop_q: 1700 netif_tx_stop_queue(netif_txq); 1701 return NETDEV_TX_BUSY; 1702 } 1703 1704 static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev, 1705 void *addr) 1706 { 1707 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 1708 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 1709 struct sockaddr *sockaddr = (struct sockaddr *)addr; 1710 int ret; 1711 1712 ret = eth_prepare_mac_addr_change(ndev, addr); 1713 if (ret < 0) 1714 return ret; 1715 1716 ret = pm_runtime_resume_and_get(common->dev); 1717 if (ret < 0) 1718 return ret; 1719 1720 cpsw_ale_del_ucast(common->ale, ndev->dev_addr, 1721 HOST_PORT_NUM, 0, 0); 1722 cpsw_ale_add_ucast(common->ale, sockaddr->sa_data, 1723 HOST_PORT_NUM, ALE_SECURE, 0); 1724 1725 am65_cpsw_port_set_sl_mac(port, addr); 1726 eth_commit_mac_addr_change(ndev, sockaddr); 1727 1728 pm_runtime_put(common->dev); 1729 1730 return 0; 1731 } 1732 1733 static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev, 1734 struct ifreq *ifr) 1735 { 1736 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 1737 u32 ts_ctrl, seq_id, ts_ctrl_ltype2, ts_vlan_ltype; 1738 struct hwtstamp_config cfg; 1739 1740 if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)) 1741 return -EOPNOTSUPP; 1742 1743 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) 1744 return -EFAULT; 1745 1746 /* TX HW timestamp */ 1747 switch (cfg.tx_type) { 1748 case HWTSTAMP_TX_OFF: 1749 case HWTSTAMP_TX_ON: 1750 break; 1751 default: 1752 return -ERANGE; 1753 } 1754 1755 switch (cfg.rx_filter) { 1756 case HWTSTAMP_FILTER_NONE: 1757 port->rx_ts_enabled = false; 1758 break; 1759 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 1760 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 1761 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 1762 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1763 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 1764 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 1765 case HWTSTAMP_FILTER_PTP_V2_EVENT: 1766 case HWTSTAMP_FILTER_PTP_V2_SYNC: 1767 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 1768 port->rx_ts_enabled = true; 1769 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 1770 break; 1771 case HWTSTAMP_FILTER_ALL: 1772 case HWTSTAMP_FILTER_SOME: 1773 case HWTSTAMP_FILTER_NTP_ALL: 1774 return -EOPNOTSUPP; 1775 default: 1776 return -ERANGE; 1777 } 1778 1779 port->tx_ts_enabled = (cfg.tx_type == HWTSTAMP_TX_ON); 1780 1781 /* cfg TX timestamp */ 1782 seq_id = (AM65_CPSW_TS_SEQ_ID_OFFSET << 1783 AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT) | ETH_P_1588; 1784 1785 ts_vlan_ltype = ETH_P_8021Q; 1786 1787 ts_ctrl_ltype2 = ETH_P_1588 | 1788 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107 | 1789 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129 | 1790 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130 | 1791 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131 | 1792 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132 | 1793 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319 | 1794 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320 | 1795 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO; 1796 1797 ts_ctrl = AM65_CPSW_TS_EVENT_MSG_TYPE_BITS << 1798 AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT; 1799 1800 if (port->tx_ts_enabled) 1801 ts_ctrl |= AM65_CPSW_TS_TX_ANX_ALL_EN | 1802 AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN; 1803 1804 if (port->rx_ts_enabled) 1805 ts_ctrl |= AM65_CPSW_TS_RX_ANX_ALL_EN | 1806 AM65_CPSW_PN_TS_CTL_RX_VLAN_LT1_EN; 1807 1808 writel(seq_id, port->port_base + AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG); 1809 writel(ts_vlan_ltype, port->port_base + 1810 AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG); 1811 writel(ts_ctrl_ltype2, port->port_base + 1812 AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2); 1813 writel(ts_ctrl, port->port_base + AM65_CPSW_PORTN_REG_TS_CTL); 1814 1815 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; 1816 } 1817 1818 static int am65_cpsw_nuss_hwtstamp_get(struct net_device *ndev, 1819 struct ifreq *ifr) 1820 { 1821 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 1822 struct hwtstamp_config cfg; 1823 1824 if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)) 1825 return -EOPNOTSUPP; 1826 1827 cfg.flags = 0; 1828 cfg.tx_type = port->tx_ts_enabled ? 1829 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; 1830 cfg.rx_filter = port->rx_ts_enabled ? 1831 HWTSTAMP_FILTER_PTP_V2_EVENT : HWTSTAMP_FILTER_NONE; 1832 1833 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; 1834 } 1835 1836 static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev, 1837 struct ifreq *req, int cmd) 1838 { 1839 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 1840 1841 if (!netif_running(ndev)) 1842 return -EINVAL; 1843 1844 switch (cmd) { 1845 case SIOCSHWTSTAMP: 1846 return am65_cpsw_nuss_hwtstamp_set(ndev, req); 1847 case SIOCGHWTSTAMP: 1848 return am65_cpsw_nuss_hwtstamp_get(ndev, req); 1849 } 1850 1851 return phylink_mii_ioctl(port->slave.phylink, req, cmd); 1852 } 1853 1854 static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev, 1855 struct rtnl_link_stats64 *stats) 1856 { 1857 struct am65_cpsw_ndev_priv *ndev_priv = netdev_priv(dev); 1858 unsigned int start; 1859 int cpu; 1860 1861 for_each_possible_cpu(cpu) { 1862 struct am65_cpsw_ndev_stats *cpu_stats; 1863 u64 rx_packets; 1864 u64 rx_bytes; 1865 u64 tx_packets; 1866 u64 tx_bytes; 1867 1868 cpu_stats = per_cpu_ptr(ndev_priv->stats, cpu); 1869 do { 1870 start = u64_stats_fetch_begin(&cpu_stats->syncp); 1871 rx_packets = cpu_stats->rx_packets; 1872 rx_bytes = cpu_stats->rx_bytes; 1873 tx_packets = cpu_stats->tx_packets; 1874 tx_bytes = cpu_stats->tx_bytes; 1875 } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); 1876 1877 stats->rx_packets += rx_packets; 1878 stats->rx_bytes += rx_bytes; 1879 stats->tx_packets += tx_packets; 1880 stats->tx_bytes += tx_bytes; 1881 } 1882 1883 stats->rx_errors = dev->stats.rx_errors; 1884 stats->rx_dropped = dev->stats.rx_dropped; 1885 stats->tx_dropped = dev->stats.tx_dropped; 1886 } 1887 1888 static int am65_cpsw_xdp_prog_setup(struct net_device *ndev, 1889 struct bpf_prog *prog) 1890 { 1891 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 1892 bool running = netif_running(ndev); 1893 struct bpf_prog *old_prog; 1894 1895 if (running) 1896 am65_cpsw_nuss_ndo_slave_stop(ndev); 1897 1898 old_prog = xchg(&port->xdp_prog, prog); 1899 if (old_prog) 1900 bpf_prog_put(old_prog); 1901 1902 if (running) 1903 return am65_cpsw_nuss_ndo_slave_open(ndev); 1904 1905 return 0; 1906 } 1907 1908 static int am65_cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf) 1909 { 1910 switch (bpf->command) { 1911 case XDP_SETUP_PROG: 1912 return am65_cpsw_xdp_prog_setup(ndev, bpf->prog); 1913 default: 1914 return -EINVAL; 1915 } 1916 } 1917 1918 static int am65_cpsw_ndo_xdp_xmit(struct net_device *ndev, int n, 1919 struct xdp_frame **frames, u32 flags) 1920 { 1921 struct am65_cpsw_tx_chn *tx_chn; 1922 struct netdev_queue *netif_txq; 1923 int cpu = smp_processor_id(); 1924 int i, nxmit = 0; 1925 1926 tx_chn = &am65_ndev_to_common(ndev)->tx_chns[cpu % AM65_CPSW_MAX_TX_QUEUES]; 1927 netif_txq = netdev_get_tx_queue(ndev, tx_chn->id); 1928 1929 __netif_tx_lock(netif_txq, cpu); 1930 for (i = 0; i < n; i++) { 1931 if (am65_cpsw_xdp_tx_frame(ndev, tx_chn, frames[i], 1932 AM65_CPSW_TX_BUF_TYPE_XDP_NDO)) 1933 break; 1934 nxmit++; 1935 } 1936 __netif_tx_unlock(netif_txq); 1937 1938 return nxmit; 1939 } 1940 1941 static const struct net_device_ops am65_cpsw_nuss_netdev_ops = { 1942 .ndo_open = am65_cpsw_nuss_ndo_slave_open, 1943 .ndo_stop = am65_cpsw_nuss_ndo_slave_stop, 1944 .ndo_start_xmit = am65_cpsw_nuss_ndo_slave_xmit, 1945 .ndo_set_rx_mode = am65_cpsw_nuss_ndo_slave_set_rx_mode, 1946 .ndo_get_stats64 = am65_cpsw_nuss_ndo_get_stats, 1947 .ndo_validate_addr = eth_validate_addr, 1948 .ndo_set_mac_address = am65_cpsw_nuss_ndo_slave_set_mac_address, 1949 .ndo_tx_timeout = am65_cpsw_nuss_ndo_host_tx_timeout, 1950 .ndo_vlan_rx_add_vid = am65_cpsw_nuss_ndo_slave_add_vid, 1951 .ndo_vlan_rx_kill_vid = am65_cpsw_nuss_ndo_slave_kill_vid, 1952 .ndo_eth_ioctl = am65_cpsw_nuss_ndo_slave_ioctl, 1953 .ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc, 1954 .ndo_set_tx_maxrate = am65_cpsw_qos_ndo_tx_p0_set_maxrate, 1955 .ndo_bpf = am65_cpsw_ndo_bpf, 1956 .ndo_xdp_xmit = am65_cpsw_ndo_xdp_xmit, 1957 }; 1958 1959 static void am65_cpsw_disable_phy(struct phy *phy) 1960 { 1961 phy_power_off(phy); 1962 phy_exit(phy); 1963 } 1964 1965 static int am65_cpsw_enable_phy(struct phy *phy) 1966 { 1967 int ret; 1968 1969 ret = phy_init(phy); 1970 if (ret < 0) 1971 return ret; 1972 1973 ret = phy_power_on(phy); 1974 if (ret < 0) { 1975 phy_exit(phy); 1976 return ret; 1977 } 1978 1979 return 0; 1980 } 1981 1982 static void am65_cpsw_disable_serdes_phy(struct am65_cpsw_common *common) 1983 { 1984 struct am65_cpsw_port *port; 1985 struct phy *phy; 1986 int i; 1987 1988 for (i = 0; i < common->port_num; i++) { 1989 port = &common->ports[i]; 1990 phy = port->slave.serdes_phy; 1991 if (phy) 1992 am65_cpsw_disable_phy(phy); 1993 } 1994 } 1995 1996 static int am65_cpsw_init_serdes_phy(struct device *dev, struct device_node *port_np, 1997 struct am65_cpsw_port *port) 1998 { 1999 const char *name = "serdes"; 2000 struct phy *phy; 2001 int ret; 2002 2003 phy = devm_of_phy_optional_get(dev, port_np, name); 2004 if (IS_ERR_OR_NULL(phy)) 2005 return PTR_ERR_OR_ZERO(phy); 2006 2007 /* Serdes PHY exists. Store it. */ 2008 port->slave.serdes_phy = phy; 2009 2010 ret = am65_cpsw_enable_phy(phy); 2011 if (ret < 0) 2012 goto err_phy; 2013 2014 return 0; 2015 2016 err_phy: 2017 devm_phy_put(dev, phy); 2018 return ret; 2019 } 2020 2021 static void am65_cpsw_nuss_mac_config(struct phylink_config *config, unsigned int mode, 2022 const struct phylink_link_state *state) 2023 { 2024 struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data, 2025 phylink_config); 2026 struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave); 2027 struct am65_cpsw_common *common = port->common; 2028 2029 if (common->pdata.extra_modes & BIT(state->interface)) { 2030 if (state->interface == PHY_INTERFACE_MODE_SGMII) { 2031 writel(ADVERTISE_SGMII, 2032 port->sgmii_base + AM65_CPSW_SGMII_MR_ADV_ABILITY_REG); 2033 cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_EXT_EN); 2034 } else { 2035 cpsw_sl_ctl_clr(port->slave.mac_sl, CPSW_SL_CTL_EXT_EN); 2036 } 2037 2038 if (state->interface == PHY_INTERFACE_MODE_USXGMII) { 2039 cpsw_sl_ctl_set(port->slave.mac_sl, 2040 CPSW_SL_CTL_XGIG | CPSW_SL_CTL_XGMII_EN); 2041 } else { 2042 cpsw_sl_ctl_clr(port->slave.mac_sl, 2043 CPSW_SL_CTL_XGIG | CPSW_SL_CTL_XGMII_EN); 2044 } 2045 2046 writel(AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE, 2047 port->sgmii_base + AM65_CPSW_SGMII_CONTROL_REG); 2048 } 2049 } 2050 2051 static void am65_cpsw_nuss_mac_link_down(struct phylink_config *config, unsigned int mode, 2052 phy_interface_t interface) 2053 { 2054 struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data, 2055 phylink_config); 2056 struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave); 2057 struct am65_cpsw_common *common = port->common; 2058 struct net_device *ndev = port->ndev; 2059 u32 mac_control; 2060 int tmo; 2061 2062 /* disable forwarding */ 2063 cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); 2064 2065 cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE); 2066 2067 tmo = cpsw_sl_wait_for_idle(port->slave.mac_sl, 100); 2068 dev_dbg(common->dev, "down msc_sl %08x tmo %d\n", 2069 cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_MACSTATUS), tmo); 2070 2071 /* All the bits that am65_cpsw_nuss_mac_link_up() can possibly set */ 2072 mac_control = CPSW_SL_CTL_GMII_EN | CPSW_SL_CTL_GIG | CPSW_SL_CTL_IFCTL_A | 2073 CPSW_SL_CTL_FULLDUPLEX | CPSW_SL_CTL_RX_FLOW_EN | CPSW_SL_CTL_TX_FLOW_EN; 2074 /* If interface mode is RGMII, CPSW_SL_CTL_EXT_EN might have been set for 10 Mbps */ 2075 if (phy_interface_mode_is_rgmii(interface)) 2076 mac_control |= CPSW_SL_CTL_EXT_EN; 2077 /* Only clear those bits that can be set by am65_cpsw_nuss_mac_link_up() */ 2078 cpsw_sl_ctl_clr(port->slave.mac_sl, mac_control); 2079 2080 am65_cpsw_qos_link_down(ndev); 2081 netif_tx_stop_all_queues(ndev); 2082 } 2083 2084 static void am65_cpsw_nuss_mac_link_up(struct phylink_config *config, struct phy_device *phy, 2085 unsigned int mode, phy_interface_t interface, int speed, 2086 int duplex, bool tx_pause, bool rx_pause) 2087 { 2088 struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data, 2089 phylink_config); 2090 struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave); 2091 struct am65_cpsw_common *common = port->common; 2092 u32 mac_control = CPSW_SL_CTL_GMII_EN; 2093 struct net_device *ndev = port->ndev; 2094 2095 /* Bring the port out of idle state */ 2096 cpsw_sl_ctl_clr(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE); 2097 2098 if (speed == SPEED_1000) 2099 mac_control |= CPSW_SL_CTL_GIG; 2100 /* TODO: Verify whether in-band is necessary for 10 Mbps RGMII */ 2101 if (speed == SPEED_10 && phy_interface_mode_is_rgmii(interface)) 2102 /* Can be used with in band mode only */ 2103 mac_control |= CPSW_SL_CTL_EXT_EN; 2104 if (speed == SPEED_100 && interface == PHY_INTERFACE_MODE_RMII) 2105 mac_control |= CPSW_SL_CTL_IFCTL_A; 2106 if (duplex) 2107 mac_control |= CPSW_SL_CTL_FULLDUPLEX; 2108 2109 /* rx_pause/tx_pause */ 2110 if (rx_pause) 2111 mac_control |= CPSW_SL_CTL_TX_FLOW_EN; 2112 2113 if (tx_pause) 2114 mac_control |= CPSW_SL_CTL_RX_FLOW_EN; 2115 2116 cpsw_sl_ctl_set(port->slave.mac_sl, mac_control); 2117 2118 /* enable forwarding */ 2119 cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); 2120 2121 am65_cpsw_qos_link_up(ndev, speed); 2122 netif_tx_wake_all_queues(ndev); 2123 } 2124 2125 static const struct phylink_mac_ops am65_cpsw_phylink_mac_ops = { 2126 .mac_config = am65_cpsw_nuss_mac_config, 2127 .mac_link_down = am65_cpsw_nuss_mac_link_down, 2128 .mac_link_up = am65_cpsw_nuss_mac_link_up, 2129 }; 2130 2131 static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port) 2132 { 2133 struct am65_cpsw_common *common = port->common; 2134 2135 if (!port->disabled) 2136 return; 2137 2138 cpsw_ale_control_set(common->ale, port->port_id, 2139 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); 2140 2141 cpsw_sl_reset(port->slave.mac_sl, 100); 2142 cpsw_sl_ctl_reset(port->slave.mac_sl); 2143 } 2144 2145 static void am65_cpsw_nuss_free_tx_chns(void *data) 2146 { 2147 struct am65_cpsw_common *common = data; 2148 int i; 2149 2150 for (i = 0; i < common->tx_ch_num; i++) { 2151 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; 2152 2153 if (!IS_ERR_OR_NULL(tx_chn->desc_pool)) 2154 k3_cppi_desc_pool_destroy(tx_chn->desc_pool); 2155 2156 if (!IS_ERR_OR_NULL(tx_chn->tx_chn)) 2157 k3_udma_glue_release_tx_chn(tx_chn->tx_chn); 2158 2159 memset(tx_chn, 0, sizeof(*tx_chn)); 2160 } 2161 } 2162 2163 void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common) 2164 { 2165 struct device *dev = common->dev; 2166 int i; 2167 2168 devm_remove_action(dev, am65_cpsw_nuss_free_tx_chns, common); 2169 2170 common->tx_ch_rate_msk = 0; 2171 for (i = 0; i < common->tx_ch_num; i++) { 2172 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; 2173 2174 if (tx_chn->irq) 2175 devm_free_irq(dev, tx_chn->irq, tx_chn); 2176 2177 netif_napi_del(&tx_chn->napi_tx); 2178 2179 if (!IS_ERR_OR_NULL(tx_chn->desc_pool)) 2180 k3_cppi_desc_pool_destroy(tx_chn->desc_pool); 2181 2182 if (!IS_ERR_OR_NULL(tx_chn->tx_chn)) 2183 k3_udma_glue_release_tx_chn(tx_chn->tx_chn); 2184 2185 memset(tx_chn, 0, sizeof(*tx_chn)); 2186 } 2187 } 2188 2189 static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common) 2190 { 2191 struct device *dev = common->dev; 2192 int i, ret = 0; 2193 2194 for (i = 0; i < common->tx_ch_num; i++) { 2195 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; 2196 2197 netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx, 2198 am65_cpsw_nuss_tx_poll); 2199 hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); 2200 tx_chn->tx_hrtimer.function = &am65_cpsw_nuss_tx_timer_callback; 2201 2202 ret = devm_request_irq(dev, tx_chn->irq, 2203 am65_cpsw_nuss_tx_irq, 2204 IRQF_TRIGGER_HIGH, 2205 tx_chn->tx_chn_name, tx_chn); 2206 if (ret) { 2207 dev_err(dev, "failure requesting tx%u irq %u, %d\n", 2208 tx_chn->id, tx_chn->irq, ret); 2209 goto err; 2210 } 2211 } 2212 2213 err: 2214 return ret; 2215 } 2216 2217 static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common) 2218 { 2219 u32 max_desc_num = ALIGN(AM65_CPSW_MAX_TX_DESC, MAX_SKB_FRAGS); 2220 struct k3_udma_glue_tx_channel_cfg tx_cfg = { 0 }; 2221 struct device *dev = common->dev; 2222 struct k3_ring_cfg ring_cfg = { 2223 .elm_size = K3_RINGACC_RING_ELSIZE_8, 2224 .mode = K3_RINGACC_RING_MODE_RING, 2225 .flags = 0 2226 }; 2227 u32 hdesc_size, hdesc_size_out; 2228 int i, ret = 0; 2229 2230 hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE, 2231 AM65_CPSW_NAV_SW_DATA_SIZE); 2232 2233 tx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE; 2234 tx_cfg.tx_cfg = ring_cfg; 2235 tx_cfg.txcq_cfg = ring_cfg; 2236 tx_cfg.tx_cfg.size = max_desc_num; 2237 tx_cfg.txcq_cfg.size = max_desc_num; 2238 2239 for (i = 0; i < common->tx_ch_num; i++) { 2240 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; 2241 2242 snprintf(tx_chn->tx_chn_name, 2243 sizeof(tx_chn->tx_chn_name), "tx%d", i); 2244 2245 spin_lock_init(&tx_chn->lock); 2246 tx_chn->common = common; 2247 tx_chn->id = i; 2248 tx_chn->descs_num = max_desc_num; 2249 2250 tx_chn->tx_chn = 2251 k3_udma_glue_request_tx_chn(dev, 2252 tx_chn->tx_chn_name, 2253 &tx_cfg); 2254 if (IS_ERR(tx_chn->tx_chn)) { 2255 ret = dev_err_probe(dev, PTR_ERR(tx_chn->tx_chn), 2256 "Failed to request tx dma channel\n"); 2257 goto err; 2258 } 2259 tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn); 2260 2261 tx_chn->desc_pool = k3_cppi_desc_pool_create_name(tx_chn->dma_dev, 2262 tx_chn->descs_num, 2263 hdesc_size, 2264 tx_chn->tx_chn_name); 2265 if (IS_ERR(tx_chn->desc_pool)) { 2266 ret = PTR_ERR(tx_chn->desc_pool); 2267 dev_err(dev, "Failed to create poll %d\n", ret); 2268 goto err; 2269 } 2270 2271 hdesc_size_out = k3_cppi_desc_pool_desc_size(tx_chn->desc_pool); 2272 tx_chn->dsize_log2 = __fls(hdesc_size_out); 2273 WARN_ON(hdesc_size_out != (1 << tx_chn->dsize_log2)); 2274 2275 tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn); 2276 if (tx_chn->irq < 0) { 2277 dev_err(dev, "Failed to get tx dma irq %d\n", 2278 tx_chn->irq); 2279 ret = tx_chn->irq; 2280 goto err; 2281 } 2282 2283 snprintf(tx_chn->tx_chn_name, 2284 sizeof(tx_chn->tx_chn_name), "%s-tx%d", 2285 dev_name(dev), tx_chn->id); 2286 } 2287 2288 ret = am65_cpsw_nuss_ndev_add_tx_napi(common); 2289 if (ret) { 2290 dev_err(dev, "Failed to add tx NAPI %d\n", ret); 2291 goto err; 2292 } 2293 2294 err: 2295 i = devm_add_action(dev, am65_cpsw_nuss_free_tx_chns, common); 2296 if (i) { 2297 dev_err(dev, "Failed to add free_tx_chns action %d\n", i); 2298 return i; 2299 } 2300 2301 return ret; 2302 } 2303 2304 static void am65_cpsw_nuss_free_rx_chns(void *data) 2305 { 2306 struct am65_cpsw_common *common = data; 2307 struct am65_cpsw_rx_chn *rx_chn; 2308 2309 rx_chn = &common->rx_chns; 2310 2311 if (!IS_ERR_OR_NULL(rx_chn->desc_pool)) 2312 k3_cppi_desc_pool_destroy(rx_chn->desc_pool); 2313 2314 if (!IS_ERR_OR_NULL(rx_chn->rx_chn)) 2315 k3_udma_glue_release_rx_chn(rx_chn->rx_chn); 2316 } 2317 2318 static void am65_cpsw_nuss_remove_rx_chns(void *data) 2319 { 2320 struct am65_cpsw_common *common = data; 2321 struct device *dev = common->dev; 2322 struct am65_cpsw_rx_chn *rx_chn; 2323 2324 rx_chn = &common->rx_chns; 2325 devm_remove_action(dev, am65_cpsw_nuss_free_rx_chns, common); 2326 2327 if (!(rx_chn->irq < 0)) 2328 devm_free_irq(dev, rx_chn->irq, common); 2329 2330 netif_napi_del(&common->napi_rx); 2331 2332 am65_cpsw_nuss_free_rx_chns(common); 2333 2334 common->rx_flow_id_base = -1; 2335 } 2336 2337 static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) 2338 { 2339 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; 2340 struct k3_udma_glue_rx_channel_cfg rx_cfg = { 0 }; 2341 u32 max_desc_num = AM65_CPSW_MAX_RX_DESC; 2342 struct device *dev = common->dev; 2343 u32 hdesc_size, hdesc_size_out; 2344 u32 fdqring_id; 2345 int i, ret = 0; 2346 2347 hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE, 2348 AM65_CPSW_NAV_SW_DATA_SIZE); 2349 2350 rx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE; 2351 rx_cfg.flow_id_num = AM65_CPSW_MAX_RX_FLOWS; 2352 rx_cfg.flow_id_base = common->rx_flow_id_base; 2353 2354 /* init all flows */ 2355 rx_chn->dev = dev; 2356 rx_chn->descs_num = max_desc_num; 2357 2358 rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg); 2359 if (IS_ERR(rx_chn->rx_chn)) { 2360 ret = dev_err_probe(dev, PTR_ERR(rx_chn->rx_chn), 2361 "Failed to request rx dma channel\n"); 2362 goto err; 2363 } 2364 rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn); 2365 2366 rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev, 2367 rx_chn->descs_num, 2368 hdesc_size, "rx"); 2369 if (IS_ERR(rx_chn->desc_pool)) { 2370 ret = PTR_ERR(rx_chn->desc_pool); 2371 dev_err(dev, "Failed to create rx poll %d\n", ret); 2372 goto err; 2373 } 2374 2375 hdesc_size_out = k3_cppi_desc_pool_desc_size(rx_chn->desc_pool); 2376 rx_chn->dsize_log2 = __fls(hdesc_size_out); 2377 WARN_ON(hdesc_size_out != (1 << rx_chn->dsize_log2)); 2378 2379 rx_chn->page_pool = NULL; 2380 2381 rx_chn->pages = devm_kcalloc(dev, rx_chn->descs_num, 2382 sizeof(*rx_chn->pages), GFP_KERNEL); 2383 if (!rx_chn->pages) 2384 return -ENOMEM; 2385 2386 common->rx_flow_id_base = 2387 k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn); 2388 dev_info(dev, "set new flow-id-base %u\n", common->rx_flow_id_base); 2389 2390 fdqring_id = K3_RINGACC_RING_ID_ANY; 2391 for (i = 0; i < rx_cfg.flow_id_num; i++) { 2392 struct k3_ring_cfg rxring_cfg = { 2393 .elm_size = K3_RINGACC_RING_ELSIZE_8, 2394 .mode = K3_RINGACC_RING_MODE_RING, 2395 .flags = 0, 2396 }; 2397 struct k3_ring_cfg fdqring_cfg = { 2398 .elm_size = K3_RINGACC_RING_ELSIZE_8, 2399 .flags = K3_RINGACC_RING_SHARED, 2400 }; 2401 struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = { 2402 .rx_cfg = rxring_cfg, 2403 .rxfdq_cfg = fdqring_cfg, 2404 .ring_rxq_id = K3_RINGACC_RING_ID_ANY, 2405 .src_tag_lo_sel = 2406 K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG, 2407 }; 2408 2409 rx_flow_cfg.ring_rxfdq0_id = fdqring_id; 2410 rx_flow_cfg.rx_cfg.size = max_desc_num; 2411 rx_flow_cfg.rxfdq_cfg.size = max_desc_num; 2412 rx_flow_cfg.rxfdq_cfg.mode = common->pdata.fdqring_mode; 2413 2414 ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn, 2415 i, &rx_flow_cfg); 2416 if (ret) { 2417 dev_err(dev, "Failed to init rx flow%d %d\n", i, ret); 2418 goto err; 2419 } 2420 if (!i) 2421 fdqring_id = 2422 k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn, 2423 i); 2424 2425 rx_chn->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i); 2426 2427 if (rx_chn->irq <= 0) { 2428 dev_err(dev, "Failed to get rx dma irq %d\n", 2429 rx_chn->irq); 2430 ret = -ENXIO; 2431 goto err; 2432 } 2433 } 2434 2435 netif_napi_add(common->dma_ndev, &common->napi_rx, 2436 am65_cpsw_nuss_rx_poll); 2437 hrtimer_init(&common->rx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); 2438 common->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback; 2439 2440 ret = devm_request_irq(dev, rx_chn->irq, 2441 am65_cpsw_nuss_rx_irq, 2442 IRQF_TRIGGER_HIGH, dev_name(dev), common); 2443 if (ret) { 2444 dev_err(dev, "failure requesting rx irq %u, %d\n", 2445 rx_chn->irq, ret); 2446 goto err; 2447 } 2448 2449 err: 2450 i = devm_add_action(dev, am65_cpsw_nuss_free_rx_chns, common); 2451 if (i) { 2452 dev_err(dev, "Failed to add free_rx_chns action %d\n", i); 2453 return i; 2454 } 2455 2456 return ret; 2457 } 2458 2459 static int am65_cpsw_nuss_init_host_p(struct am65_cpsw_common *common) 2460 { 2461 struct am65_cpsw_host *host_p = am65_common_get_host(common); 2462 2463 host_p->common = common; 2464 host_p->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE; 2465 host_p->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE; 2466 2467 return 0; 2468 } 2469 2470 static int am65_cpsw_am654_get_efuse_macid(struct device_node *of_node, 2471 int slave, u8 *mac_addr) 2472 { 2473 u32 mac_lo, mac_hi, offset; 2474 struct regmap *syscon; 2475 int ret; 2476 2477 syscon = syscon_regmap_lookup_by_phandle(of_node, "ti,syscon-efuse"); 2478 if (IS_ERR(syscon)) { 2479 if (PTR_ERR(syscon) == -ENODEV) 2480 return 0; 2481 return PTR_ERR(syscon); 2482 } 2483 2484 ret = of_property_read_u32_index(of_node, "ti,syscon-efuse", 1, 2485 &offset); 2486 if (ret) 2487 return ret; 2488 2489 regmap_read(syscon, offset, &mac_lo); 2490 regmap_read(syscon, offset + 4, &mac_hi); 2491 2492 mac_addr[0] = (mac_hi >> 8) & 0xff; 2493 mac_addr[1] = mac_hi & 0xff; 2494 mac_addr[2] = (mac_lo >> 24) & 0xff; 2495 mac_addr[3] = (mac_lo >> 16) & 0xff; 2496 mac_addr[4] = (mac_lo >> 8) & 0xff; 2497 mac_addr[5] = mac_lo & 0xff; 2498 2499 return 0; 2500 } 2501 2502 static int am65_cpsw_init_cpts(struct am65_cpsw_common *common) 2503 { 2504 struct device *dev = common->dev; 2505 struct device_node *node; 2506 struct am65_cpts *cpts; 2507 void __iomem *reg_base; 2508 2509 if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)) 2510 return 0; 2511 2512 node = of_get_child_by_name(dev->of_node, "cpts"); 2513 if (!node) { 2514 dev_err(dev, "%s cpts not found\n", __func__); 2515 return -ENOENT; 2516 } 2517 2518 reg_base = common->cpsw_base + AM65_CPSW_NU_CPTS_BASE; 2519 cpts = am65_cpts_create(dev, reg_base, node); 2520 if (IS_ERR(cpts)) { 2521 int ret = PTR_ERR(cpts); 2522 2523 of_node_put(node); 2524 dev_err(dev, "cpts create err %d\n", ret); 2525 return ret; 2526 } 2527 common->cpts = cpts; 2528 /* Forbid PM runtime if CPTS is running. 2529 * K3 CPSWxG modules may completely lose context during ON->OFF 2530 * transitions depending on integration. 2531 * AM65x/J721E MCU CPSW2G: false 2532 * J721E MAIN_CPSW9G: true 2533 */ 2534 pm_runtime_forbid(dev); 2535 2536 return 0; 2537 } 2538 2539 static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) 2540 { 2541 struct device_node *node, *port_np; 2542 struct device *dev = common->dev; 2543 int ret; 2544 2545 node = of_get_child_by_name(dev->of_node, "ethernet-ports"); 2546 if (!node) 2547 return -ENOENT; 2548 2549 for_each_child_of_node(node, port_np) { 2550 struct am65_cpsw_port *port; 2551 u32 port_id; 2552 2553 /* it is not a slave port node, continue */ 2554 if (strcmp(port_np->name, "port")) 2555 continue; 2556 2557 ret = of_property_read_u32(port_np, "reg", &port_id); 2558 if (ret < 0) { 2559 dev_err(dev, "%pOF error reading port_id %d\n", 2560 port_np, ret); 2561 goto of_node_put; 2562 } 2563 2564 if (!port_id || port_id > common->port_num) { 2565 dev_err(dev, "%pOF has invalid port_id %u %s\n", 2566 port_np, port_id, port_np->name); 2567 ret = -EINVAL; 2568 goto of_node_put; 2569 } 2570 2571 port = am65_common_get_port(common, port_id); 2572 port->port_id = port_id; 2573 port->common = common; 2574 port->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE + 2575 AM65_CPSW_NU_PORTS_OFFSET * (port_id); 2576 if (common->pdata.extra_modes) 2577 port->sgmii_base = common->ss_base + AM65_CPSW_SGMII_BASE * (port_id); 2578 port->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE + 2579 (AM65_CPSW_NU_STATS_PORT_OFFSET * port_id); 2580 port->name = of_get_property(port_np, "label", NULL); 2581 port->fetch_ram_base = 2582 common->cpsw_base + AM65_CPSW_NU_FRAM_BASE + 2583 (AM65_CPSW_NU_FRAM_PORT_OFFSET * (port_id - 1)); 2584 2585 port->slave.mac_sl = cpsw_sl_get("am65", dev, port->port_base); 2586 if (IS_ERR(port->slave.mac_sl)) { 2587 ret = PTR_ERR(port->slave.mac_sl); 2588 goto of_node_put; 2589 } 2590 2591 port->disabled = !of_device_is_available(port_np); 2592 if (port->disabled) { 2593 common->disabled_ports_mask |= BIT(port->port_id); 2594 continue; 2595 } 2596 2597 port->slave.ifphy = devm_of_phy_get(dev, port_np, NULL); 2598 if (IS_ERR(port->slave.ifphy)) { 2599 ret = PTR_ERR(port->slave.ifphy); 2600 dev_err(dev, "%pOF error retrieving port phy: %d\n", 2601 port_np, ret); 2602 goto of_node_put; 2603 } 2604 2605 /* Initialize the Serdes PHY for the port */ 2606 ret = am65_cpsw_init_serdes_phy(dev, port_np, port); 2607 if (ret) 2608 goto of_node_put; 2609 2610 port->slave.mac_only = 2611 of_property_read_bool(port_np, "ti,mac-only"); 2612 2613 /* get phy/link info */ 2614 port->slave.phy_node = port_np; 2615 ret = of_get_phy_mode(port_np, &port->slave.phy_if); 2616 if (ret) { 2617 dev_err(dev, "%pOF read phy-mode err %d\n", 2618 port_np, ret); 2619 goto of_node_put; 2620 } 2621 2622 ret = phy_set_mode_ext(port->slave.ifphy, PHY_MODE_ETHERNET, port->slave.phy_if); 2623 if (ret) 2624 goto of_node_put; 2625 2626 ret = of_get_mac_address(port_np, port->slave.mac_addr); 2627 if (ret) { 2628 am65_cpsw_am654_get_efuse_macid(port_np, 2629 port->port_id, 2630 port->slave.mac_addr); 2631 if (!is_valid_ether_addr(port->slave.mac_addr)) { 2632 eth_random_addr(port->slave.mac_addr); 2633 dev_err(dev, "Use random MAC address\n"); 2634 } 2635 } 2636 2637 /* Reset all Queue priorities to 0 */ 2638 writel(0, port->port_base + AM65_CPSW_PN_REG_TX_PRI_MAP); 2639 } 2640 of_node_put(node); 2641 2642 /* is there at least one ext.port */ 2643 if (!(~common->disabled_ports_mask & GENMASK(common->port_num, 1))) { 2644 dev_err(dev, "No Ext. port are available\n"); 2645 return -ENODEV; 2646 } 2647 2648 return 0; 2649 2650 of_node_put: 2651 of_node_put(port_np); 2652 of_node_put(node); 2653 return ret; 2654 } 2655 2656 static void am65_cpsw_pcpu_stats_free(void *data) 2657 { 2658 struct am65_cpsw_ndev_stats __percpu *stats = data; 2659 2660 free_percpu(stats); 2661 } 2662 2663 static void am65_cpsw_nuss_phylink_cleanup(struct am65_cpsw_common *common) 2664 { 2665 struct am65_cpsw_port *port; 2666 int i; 2667 2668 for (i = 0; i < common->port_num; i++) { 2669 port = &common->ports[i]; 2670 if (port->slave.phylink) 2671 phylink_destroy(port->slave.phylink); 2672 } 2673 } 2674 2675 static int 2676 am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx) 2677 { 2678 struct am65_cpsw_ndev_priv *ndev_priv; 2679 struct device *dev = common->dev; 2680 struct am65_cpsw_port *port; 2681 struct phylink *phylink; 2682 int ret; 2683 2684 port = &common->ports[port_idx]; 2685 2686 if (port->disabled) 2687 return 0; 2688 2689 /* alloc netdev */ 2690 port->ndev = devm_alloc_etherdev_mqs(common->dev, 2691 sizeof(struct am65_cpsw_ndev_priv), 2692 AM65_CPSW_MAX_TX_QUEUES, 2693 AM65_CPSW_MAX_RX_QUEUES); 2694 if (!port->ndev) { 2695 dev_err(dev, "error allocating slave net_device %u\n", 2696 port->port_id); 2697 return -ENOMEM; 2698 } 2699 2700 ndev_priv = netdev_priv(port->ndev); 2701 ndev_priv->port = port; 2702 ndev_priv->msg_enable = AM65_CPSW_DEBUG; 2703 mutex_init(&ndev_priv->mm_lock); 2704 port->qos.link_speed = SPEED_UNKNOWN; 2705 SET_NETDEV_DEV(port->ndev, dev); 2706 2707 eth_hw_addr_set(port->ndev, port->slave.mac_addr); 2708 2709 port->ndev->min_mtu = AM65_CPSW_MIN_PACKET_SIZE; 2710 port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE - 2711 (VLAN_ETH_HLEN + ETH_FCS_LEN); 2712 port->ndev->hw_features = NETIF_F_SG | 2713 NETIF_F_RXCSUM | 2714 NETIF_F_HW_CSUM | 2715 NETIF_F_HW_TC; 2716 port->ndev->features = port->ndev->hw_features | 2717 NETIF_F_HW_VLAN_CTAG_FILTER; 2718 port->ndev->xdp_features = NETDEV_XDP_ACT_BASIC | 2719 NETDEV_XDP_ACT_REDIRECT | 2720 NETDEV_XDP_ACT_NDO_XMIT; 2721 port->ndev->vlan_features |= NETIF_F_SG; 2722 port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops; 2723 port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave; 2724 2725 /* Configuring Phylink */ 2726 port->slave.phylink_config.dev = &port->ndev->dev; 2727 port->slave.phylink_config.type = PHYLINK_NETDEV; 2728 port->slave.phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | 2729 MAC_1000FD | MAC_5000FD; 2730 port->slave.phylink_config.mac_managed_pm = true; /* MAC does PM */ 2731 2732 switch (port->slave.phy_if) { 2733 case PHY_INTERFACE_MODE_RGMII: 2734 case PHY_INTERFACE_MODE_RGMII_ID: 2735 case PHY_INTERFACE_MODE_RGMII_RXID: 2736 case PHY_INTERFACE_MODE_RGMII_TXID: 2737 phy_interface_set_rgmii(port->slave.phylink_config.supported_interfaces); 2738 break; 2739 2740 case PHY_INTERFACE_MODE_RMII: 2741 __set_bit(PHY_INTERFACE_MODE_RMII, 2742 port->slave.phylink_config.supported_interfaces); 2743 break; 2744 2745 case PHY_INTERFACE_MODE_QSGMII: 2746 case PHY_INTERFACE_MODE_SGMII: 2747 case PHY_INTERFACE_MODE_USXGMII: 2748 if (common->pdata.extra_modes & BIT(port->slave.phy_if)) { 2749 __set_bit(port->slave.phy_if, 2750 port->slave.phylink_config.supported_interfaces); 2751 } else { 2752 dev_err(dev, "selected phy-mode is not supported\n"); 2753 return -EOPNOTSUPP; 2754 } 2755 break; 2756 2757 default: 2758 dev_err(dev, "selected phy-mode is not supported\n"); 2759 return -EOPNOTSUPP; 2760 } 2761 2762 phylink = phylink_create(&port->slave.phylink_config, 2763 of_node_to_fwnode(port->slave.phy_node), 2764 port->slave.phy_if, 2765 &am65_cpsw_phylink_mac_ops); 2766 if (IS_ERR(phylink)) 2767 return PTR_ERR(phylink); 2768 2769 port->slave.phylink = phylink; 2770 2771 /* Disable TX checksum offload by default due to HW bug */ 2772 if (common->pdata.quirks & AM65_CPSW_QUIRK_I2027_NO_TX_CSUM) 2773 port->ndev->features &= ~NETIF_F_HW_CSUM; 2774 2775 ndev_priv->stats = netdev_alloc_pcpu_stats(struct am65_cpsw_ndev_stats); 2776 if (!ndev_priv->stats) 2777 return -ENOMEM; 2778 2779 ret = devm_add_action_or_reset(dev, am65_cpsw_pcpu_stats_free, 2780 ndev_priv->stats); 2781 if (ret) 2782 dev_err(dev, "failed to add percpu stat free action %d\n", ret); 2783 2784 port->xdp_prog = NULL; 2785 2786 if (!common->dma_ndev) 2787 common->dma_ndev = port->ndev; 2788 2789 return ret; 2790 } 2791 2792 static int am65_cpsw_nuss_init_ndevs(struct am65_cpsw_common *common) 2793 { 2794 int ret; 2795 int i; 2796 2797 for (i = 0; i < common->port_num; i++) { 2798 ret = am65_cpsw_nuss_init_port_ndev(common, i); 2799 if (ret) 2800 return ret; 2801 } 2802 2803 return ret; 2804 } 2805 2806 static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common) 2807 { 2808 struct am65_cpsw_port *port; 2809 int i; 2810 2811 for (i = 0; i < common->port_num; i++) { 2812 port = &common->ports[i]; 2813 if (port->ndev && port->ndev->reg_state == NETREG_REGISTERED) 2814 unregister_netdev(port->ndev); 2815 } 2816 } 2817 2818 static void am65_cpsw_port_offload_fwd_mark_update(struct am65_cpsw_common *common) 2819 { 2820 int set_val = 0; 2821 int i; 2822 2823 if (common->br_members == (GENMASK(common->port_num, 1) & ~common->disabled_ports_mask)) 2824 set_val = 1; 2825 2826 dev_dbg(common->dev, "set offload_fwd_mark %d\n", set_val); 2827 2828 for (i = 1; i <= common->port_num; i++) { 2829 struct am65_cpsw_port *port = am65_common_get_port(common, i); 2830 struct am65_cpsw_ndev_priv *priv; 2831 2832 if (!port->ndev) 2833 continue; 2834 2835 priv = am65_ndev_to_priv(port->ndev); 2836 priv->offload_fwd_mark = set_val; 2837 } 2838 } 2839 2840 bool am65_cpsw_port_dev_check(const struct net_device *ndev) 2841 { 2842 if (ndev->netdev_ops == &am65_cpsw_nuss_netdev_ops) { 2843 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 2844 2845 return !common->is_emac_mode; 2846 } 2847 2848 return false; 2849 } 2850 2851 static int am65_cpsw_netdevice_port_link(struct net_device *ndev, 2852 struct net_device *br_ndev, 2853 struct netlink_ext_ack *extack) 2854 { 2855 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 2856 struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev); 2857 int err; 2858 2859 if (!common->br_members) { 2860 common->hw_bridge_dev = br_ndev; 2861 } else { 2862 /* This is adding the port to a second bridge, this is 2863 * unsupported 2864 */ 2865 if (common->hw_bridge_dev != br_ndev) 2866 return -EOPNOTSUPP; 2867 } 2868 2869 err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL, 2870 false, extack); 2871 if (err) 2872 return err; 2873 2874 common->br_members |= BIT(priv->port->port_id); 2875 2876 am65_cpsw_port_offload_fwd_mark_update(common); 2877 2878 return NOTIFY_DONE; 2879 } 2880 2881 static void am65_cpsw_netdevice_port_unlink(struct net_device *ndev) 2882 { 2883 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 2884 struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev); 2885 2886 switchdev_bridge_port_unoffload(ndev, NULL, NULL, NULL); 2887 2888 common->br_members &= ~BIT(priv->port->port_id); 2889 2890 am65_cpsw_port_offload_fwd_mark_update(common); 2891 2892 if (!common->br_members) 2893 common->hw_bridge_dev = NULL; 2894 } 2895 2896 /* netdev notifier */ 2897 static int am65_cpsw_netdevice_event(struct notifier_block *unused, 2898 unsigned long event, void *ptr) 2899 { 2900 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr); 2901 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 2902 struct netdev_notifier_changeupper_info *info; 2903 int ret = NOTIFY_DONE; 2904 2905 if (!am65_cpsw_port_dev_check(ndev)) 2906 return NOTIFY_DONE; 2907 2908 switch (event) { 2909 case NETDEV_CHANGEUPPER: 2910 info = ptr; 2911 2912 if (netif_is_bridge_master(info->upper_dev)) { 2913 if (info->linking) 2914 ret = am65_cpsw_netdevice_port_link(ndev, 2915 info->upper_dev, 2916 extack); 2917 else 2918 am65_cpsw_netdevice_port_unlink(ndev); 2919 } 2920 break; 2921 default: 2922 return NOTIFY_DONE; 2923 } 2924 2925 return notifier_from_errno(ret); 2926 } 2927 2928 static int am65_cpsw_register_notifiers(struct am65_cpsw_common *cpsw) 2929 { 2930 int ret = 0; 2931 2932 if (AM65_CPSW_IS_CPSW2G(cpsw) || 2933 !IS_REACHABLE(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) 2934 return 0; 2935 2936 cpsw->am65_cpsw_netdevice_nb.notifier_call = &am65_cpsw_netdevice_event; 2937 ret = register_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb); 2938 if (ret) { 2939 dev_err(cpsw->dev, "can't register netdevice notifier\n"); 2940 return ret; 2941 } 2942 2943 ret = am65_cpsw_switchdev_register_notifiers(cpsw); 2944 if (ret) 2945 unregister_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb); 2946 2947 return ret; 2948 } 2949 2950 static void am65_cpsw_unregister_notifiers(struct am65_cpsw_common *cpsw) 2951 { 2952 if (AM65_CPSW_IS_CPSW2G(cpsw) || 2953 !IS_REACHABLE(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) 2954 return; 2955 2956 am65_cpsw_switchdev_unregister_notifiers(cpsw); 2957 unregister_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb); 2958 } 2959 2960 static const struct devlink_ops am65_cpsw_devlink_ops = {}; 2961 2962 static void am65_cpsw_init_stp_ale_entry(struct am65_cpsw_common *cpsw) 2963 { 2964 cpsw_ale_add_mcast(cpsw->ale, eth_stp_addr, ALE_PORT_HOST, ALE_SUPER, 0, 2965 ALE_MCAST_BLOCK_LEARN_FWD); 2966 } 2967 2968 static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common) 2969 { 2970 struct am65_cpsw_host *host = am65_common_get_host(common); 2971 2972 writel(common->default_vlan, host->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 2973 2974 am65_cpsw_init_stp_ale_entry(common); 2975 2976 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 1); 2977 dev_dbg(common->dev, "Set P0_UNI_FLOOD\n"); 2978 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 0); 2979 } 2980 2981 static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common) 2982 { 2983 struct am65_cpsw_host *host = am65_common_get_host(common); 2984 2985 writel(0, host->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 2986 2987 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 0); 2988 dev_dbg(common->dev, "unset P0_UNI_FLOOD\n"); 2989 2990 /* learning make no sense in multi-mac mode */ 2991 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 1); 2992 } 2993 2994 static int am65_cpsw_dl_switch_mode_get(struct devlink *dl, u32 id, 2995 struct devlink_param_gset_ctx *ctx) 2996 { 2997 struct am65_cpsw_devlink *dl_priv = devlink_priv(dl); 2998 struct am65_cpsw_common *common = dl_priv->common; 2999 3000 dev_dbg(common->dev, "%s id:%u\n", __func__, id); 3001 3002 if (id != AM65_CPSW_DL_PARAM_SWITCH_MODE) 3003 return -EOPNOTSUPP; 3004 3005 ctx->val.vbool = !common->is_emac_mode; 3006 3007 return 0; 3008 } 3009 3010 static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port) 3011 { 3012 struct am65_cpsw_slave_data *slave = &port->slave; 3013 struct am65_cpsw_common *common = port->common; 3014 u32 port_mask; 3015 3016 writel(slave->port_vlan, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 3017 3018 if (slave->mac_only) 3019 /* enable mac-only mode on port */ 3020 cpsw_ale_control_set(common->ale, port->port_id, 3021 ALE_PORT_MACONLY, 1); 3022 3023 cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_NOLEARN, 1); 3024 3025 port_mask = BIT(port->port_id) | ALE_PORT_HOST; 3026 3027 cpsw_ale_add_ucast(common->ale, port->ndev->dev_addr, 3028 HOST_PORT_NUM, ALE_SECURE, slave->port_vlan); 3029 cpsw_ale_add_mcast(common->ale, port->ndev->broadcast, 3030 port_mask, ALE_VLAN, slave->port_vlan, ALE_MCAST_FWD_2); 3031 } 3032 3033 static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port) 3034 { 3035 struct am65_cpsw_slave_data *slave = &port->slave; 3036 struct am65_cpsw_common *cpsw = port->common; 3037 u32 port_mask; 3038 3039 cpsw_ale_control_set(cpsw->ale, port->port_id, 3040 ALE_PORT_NOLEARN, 0); 3041 3042 cpsw_ale_add_ucast(cpsw->ale, port->ndev->dev_addr, 3043 HOST_PORT_NUM, ALE_SECURE | ALE_BLOCKED | ALE_VLAN, 3044 slave->port_vlan); 3045 3046 port_mask = BIT(port->port_id) | ALE_PORT_HOST; 3047 3048 cpsw_ale_add_mcast(cpsw->ale, port->ndev->broadcast, 3049 port_mask, ALE_VLAN, slave->port_vlan, 3050 ALE_MCAST_FWD_2); 3051 3052 writel(slave->port_vlan, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 3053 3054 cpsw_ale_control_set(cpsw->ale, port->port_id, 3055 ALE_PORT_MACONLY, 0); 3056 } 3057 3058 static int am65_cpsw_dl_switch_mode_set(struct devlink *dl, u32 id, 3059 struct devlink_param_gset_ctx *ctx, 3060 struct netlink_ext_ack *extack) 3061 { 3062 struct am65_cpsw_devlink *dl_priv = devlink_priv(dl); 3063 struct am65_cpsw_common *cpsw = dl_priv->common; 3064 bool switch_en = ctx->val.vbool; 3065 bool if_running = false; 3066 int i; 3067 3068 dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id); 3069 3070 if (id != AM65_CPSW_DL_PARAM_SWITCH_MODE) 3071 return -EOPNOTSUPP; 3072 3073 if (switch_en == !cpsw->is_emac_mode) 3074 return 0; 3075 3076 if (!switch_en && cpsw->br_members) { 3077 dev_err(cpsw->dev, "Remove ports from bridge before disabling switch mode\n"); 3078 return -EINVAL; 3079 } 3080 3081 rtnl_lock(); 3082 3083 cpsw->is_emac_mode = !switch_en; 3084 3085 for (i = 0; i < cpsw->port_num; i++) { 3086 struct net_device *sl_ndev = cpsw->ports[i].ndev; 3087 3088 if (!sl_ndev || !netif_running(sl_ndev)) 3089 continue; 3090 3091 if_running = true; 3092 } 3093 3094 if (!if_running) { 3095 /* all ndevs are down */ 3096 for (i = 0; i < cpsw->port_num; i++) { 3097 struct net_device *sl_ndev = cpsw->ports[i].ndev; 3098 struct am65_cpsw_slave_data *slave; 3099 3100 if (!sl_ndev) 3101 continue; 3102 3103 slave = am65_ndev_to_slave(sl_ndev); 3104 if (switch_en) 3105 slave->port_vlan = cpsw->default_vlan; 3106 else 3107 slave->port_vlan = 0; 3108 } 3109 3110 goto exit; 3111 } 3112 3113 cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1); 3114 /* clean up ALE table */ 3115 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_CLEAR, 1); 3116 cpsw_ale_control_get(cpsw->ale, HOST_PORT_NUM, ALE_AGEOUT); 3117 3118 if (switch_en) { 3119 dev_info(cpsw->dev, "Enable switch mode\n"); 3120 3121 am65_cpsw_init_host_port_switch(cpsw); 3122 3123 for (i = 0; i < cpsw->port_num; i++) { 3124 struct net_device *sl_ndev = cpsw->ports[i].ndev; 3125 struct am65_cpsw_slave_data *slave; 3126 struct am65_cpsw_port *port; 3127 3128 if (!sl_ndev) 3129 continue; 3130 3131 port = am65_ndev_to_port(sl_ndev); 3132 slave = am65_ndev_to_slave(sl_ndev); 3133 slave->port_vlan = cpsw->default_vlan; 3134 3135 if (netif_running(sl_ndev)) 3136 am65_cpsw_init_port_switch_ale(port); 3137 } 3138 3139 } else { 3140 dev_info(cpsw->dev, "Disable switch mode\n"); 3141 3142 am65_cpsw_init_host_port_emac(cpsw); 3143 3144 for (i = 0; i < cpsw->port_num; i++) { 3145 struct net_device *sl_ndev = cpsw->ports[i].ndev; 3146 struct am65_cpsw_port *port; 3147 3148 if (!sl_ndev) 3149 continue; 3150 3151 port = am65_ndev_to_port(sl_ndev); 3152 port->slave.port_vlan = 0; 3153 if (netif_running(sl_ndev)) 3154 am65_cpsw_init_port_emac_ale(port); 3155 } 3156 } 3157 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_BYPASS, 0); 3158 exit: 3159 rtnl_unlock(); 3160 3161 return 0; 3162 } 3163 3164 static const struct devlink_param am65_cpsw_devlink_params[] = { 3165 DEVLINK_PARAM_DRIVER(AM65_CPSW_DL_PARAM_SWITCH_MODE, "switch_mode", 3166 DEVLINK_PARAM_TYPE_BOOL, 3167 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 3168 am65_cpsw_dl_switch_mode_get, 3169 am65_cpsw_dl_switch_mode_set, NULL), 3170 }; 3171 3172 static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common) 3173 { 3174 struct devlink_port_attrs attrs = {}; 3175 struct am65_cpsw_devlink *dl_priv; 3176 struct device *dev = common->dev; 3177 struct devlink_port *dl_port; 3178 struct am65_cpsw_port *port; 3179 int ret = 0; 3180 int i; 3181 3182 common->devlink = 3183 devlink_alloc(&am65_cpsw_devlink_ops, sizeof(*dl_priv), dev); 3184 if (!common->devlink) 3185 return -ENOMEM; 3186 3187 dl_priv = devlink_priv(common->devlink); 3188 dl_priv->common = common; 3189 3190 /* Provide devlink hook to switch mode when multiple external ports 3191 * are present NUSS switchdev driver is enabled. 3192 */ 3193 if (!AM65_CPSW_IS_CPSW2G(common) && 3194 IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) { 3195 ret = devlink_params_register(common->devlink, 3196 am65_cpsw_devlink_params, 3197 ARRAY_SIZE(am65_cpsw_devlink_params)); 3198 if (ret) { 3199 dev_err(dev, "devlink params reg fail ret:%d\n", ret); 3200 goto dl_unreg; 3201 } 3202 } 3203 3204 for (i = 1; i <= common->port_num; i++) { 3205 port = am65_common_get_port(common, i); 3206 dl_port = &port->devlink_port; 3207 3208 if (port->ndev) 3209 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; 3210 else 3211 attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED; 3212 attrs.phys.port_number = port->port_id; 3213 attrs.switch_id.id_len = sizeof(resource_size_t); 3214 memcpy(attrs.switch_id.id, common->switch_id, attrs.switch_id.id_len); 3215 devlink_port_attrs_set(dl_port, &attrs); 3216 3217 ret = devlink_port_register(common->devlink, dl_port, port->port_id); 3218 if (ret) { 3219 dev_err(dev, "devlink_port reg fail for port %d, ret:%d\n", 3220 port->port_id, ret); 3221 goto dl_port_unreg; 3222 } 3223 } 3224 devlink_register(common->devlink); 3225 return ret; 3226 3227 dl_port_unreg: 3228 for (i = i - 1; i >= 1; i--) { 3229 port = am65_common_get_port(common, i); 3230 dl_port = &port->devlink_port; 3231 3232 devlink_port_unregister(dl_port); 3233 } 3234 dl_unreg: 3235 devlink_free(common->devlink); 3236 return ret; 3237 } 3238 3239 static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common) 3240 { 3241 struct devlink_port *dl_port; 3242 struct am65_cpsw_port *port; 3243 int i; 3244 3245 devlink_unregister(common->devlink); 3246 3247 for (i = 1; i <= common->port_num; i++) { 3248 port = am65_common_get_port(common, i); 3249 dl_port = &port->devlink_port; 3250 3251 devlink_port_unregister(dl_port); 3252 } 3253 3254 if (!AM65_CPSW_IS_CPSW2G(common) && 3255 IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) 3256 devlink_params_unregister(common->devlink, 3257 am65_cpsw_devlink_params, 3258 ARRAY_SIZE(am65_cpsw_devlink_params)); 3259 3260 devlink_free(common->devlink); 3261 } 3262 3263 static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common) 3264 { 3265 struct am65_cpsw_rx_chn *rx_chan = &common->rx_chns; 3266 struct am65_cpsw_tx_chn *tx_chan = common->tx_chns; 3267 struct device *dev = common->dev; 3268 struct am65_cpsw_port *port; 3269 int ret = 0, i; 3270 3271 /* init tx channels */ 3272 ret = am65_cpsw_nuss_init_tx_chns(common); 3273 if (ret) 3274 return ret; 3275 ret = am65_cpsw_nuss_init_rx_chns(common); 3276 if (ret) 3277 return ret; 3278 3279 /* The DMA Channels are not guaranteed to be in a clean state. 3280 * Reset and disable them to ensure that they are back to the 3281 * clean state and ready to be used. 3282 */ 3283 for (i = 0; i < common->tx_ch_num; i++) { 3284 k3_udma_glue_reset_tx_chn(tx_chan[i].tx_chn, &tx_chan[i], 3285 am65_cpsw_nuss_tx_cleanup); 3286 k3_udma_glue_disable_tx_chn(tx_chan[i].tx_chn); 3287 } 3288 3289 for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++) 3290 k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i, rx_chan, 3291 am65_cpsw_nuss_rx_cleanup, !!i); 3292 3293 k3_udma_glue_disable_rx_chn(rx_chan->rx_chn); 3294 3295 ret = am65_cpsw_nuss_register_devlink(common); 3296 if (ret) 3297 return ret; 3298 3299 for (i = 0; i < common->port_num; i++) { 3300 port = &common->ports[i]; 3301 3302 if (!port->ndev) 3303 continue; 3304 3305 SET_NETDEV_DEVLINK_PORT(port->ndev, &port->devlink_port); 3306 3307 ret = register_netdev(port->ndev); 3308 if (ret) { 3309 dev_err(dev, "error registering slave net device%i %d\n", 3310 i, ret); 3311 goto err_cleanup_ndev; 3312 } 3313 } 3314 3315 ret = am65_cpsw_register_notifiers(common); 3316 if (ret) 3317 goto err_cleanup_ndev; 3318 3319 /* can't auto unregister ndev using devm_add_action() due to 3320 * devres release sequence in DD core for DMA 3321 */ 3322 3323 return 0; 3324 3325 err_cleanup_ndev: 3326 am65_cpsw_nuss_cleanup_ndev(common); 3327 am65_cpsw_unregister_devlink(common); 3328 3329 return ret; 3330 } 3331 3332 int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx) 3333 { 3334 int ret; 3335 3336 common->tx_ch_num = num_tx; 3337 ret = am65_cpsw_nuss_init_tx_chns(common); 3338 3339 return ret; 3340 } 3341 3342 struct am65_cpsw_soc_pdata { 3343 u32 quirks_dis; 3344 }; 3345 3346 static const struct am65_cpsw_soc_pdata am65x_soc_sr2_0 = { 3347 .quirks_dis = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM, 3348 }; 3349 3350 static const struct soc_device_attribute am65_cpsw_socinfo[] = { 3351 { .family = "AM65X", 3352 .revision = "SR2.0", 3353 .data = &am65x_soc_sr2_0 3354 }, 3355 {/* sentinel */} 3356 }; 3357 3358 static const struct am65_cpsw_pdata am65x_sr1_0 = { 3359 .quirks = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM, 3360 .ale_dev_id = "am65x-cpsw2g", 3361 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, 3362 }; 3363 3364 static const struct am65_cpsw_pdata j721e_pdata = { 3365 .quirks = 0, 3366 .ale_dev_id = "am65x-cpsw2g", 3367 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, 3368 }; 3369 3370 static const struct am65_cpsw_pdata am64x_cpswxg_pdata = { 3371 .quirks = AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ, 3372 .ale_dev_id = "am64-cpswxg", 3373 .fdqring_mode = K3_RINGACC_RING_MODE_RING, 3374 }; 3375 3376 static const struct am65_cpsw_pdata j7200_cpswxg_pdata = { 3377 .quirks = 0, 3378 .ale_dev_id = "am64-cpswxg", 3379 .fdqring_mode = K3_RINGACC_RING_MODE_RING, 3380 .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII), 3381 }; 3382 3383 static const struct am65_cpsw_pdata j721e_cpswxg_pdata = { 3384 .quirks = 0, 3385 .ale_dev_id = "am64-cpswxg", 3386 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, 3387 .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII), 3388 }; 3389 3390 static const struct am65_cpsw_pdata j784s4_cpswxg_pdata = { 3391 .quirks = 0, 3392 .ale_dev_id = "am64-cpswxg", 3393 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, 3394 .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII) | 3395 BIT(PHY_INTERFACE_MODE_USXGMII), 3396 }; 3397 3398 static const struct of_device_id am65_cpsw_nuss_of_mtable[] = { 3399 { .compatible = "ti,am654-cpsw-nuss", .data = &am65x_sr1_0}, 3400 { .compatible = "ti,j721e-cpsw-nuss", .data = &j721e_pdata}, 3401 { .compatible = "ti,am642-cpsw-nuss", .data = &am64x_cpswxg_pdata}, 3402 { .compatible = "ti,j7200-cpswxg-nuss", .data = &j7200_cpswxg_pdata}, 3403 { .compatible = "ti,j721e-cpswxg-nuss", .data = &j721e_cpswxg_pdata}, 3404 { .compatible = "ti,j784s4-cpswxg-nuss", .data = &j784s4_cpswxg_pdata}, 3405 { /* sentinel */ }, 3406 }; 3407 MODULE_DEVICE_TABLE(of, am65_cpsw_nuss_of_mtable); 3408 3409 static void am65_cpsw_nuss_apply_socinfo(struct am65_cpsw_common *common) 3410 { 3411 const struct soc_device_attribute *soc; 3412 3413 soc = soc_device_match(am65_cpsw_socinfo); 3414 if (soc && soc->data) { 3415 const struct am65_cpsw_soc_pdata *socdata = soc->data; 3416 3417 /* disable quirks */ 3418 common->pdata.quirks &= ~socdata->quirks_dis; 3419 } 3420 } 3421 3422 static int am65_cpsw_nuss_probe(struct platform_device *pdev) 3423 { 3424 struct cpsw_ale_params ale_params = { 0 }; 3425 const struct of_device_id *of_id; 3426 struct device *dev = &pdev->dev; 3427 struct am65_cpsw_common *common; 3428 struct device_node *node; 3429 struct resource *res; 3430 struct clk *clk; 3431 int ale_entries; 3432 u64 id_temp; 3433 int ret, i; 3434 3435 common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL); 3436 if (!common) 3437 return -ENOMEM; 3438 common->dev = dev; 3439 3440 of_id = of_match_device(am65_cpsw_nuss_of_mtable, dev); 3441 if (!of_id) 3442 return -EINVAL; 3443 common->pdata = *(const struct am65_cpsw_pdata *)of_id->data; 3444 3445 am65_cpsw_nuss_apply_socinfo(common); 3446 3447 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpsw_nuss"); 3448 common->ss_base = devm_ioremap_resource(&pdev->dev, res); 3449 if (IS_ERR(common->ss_base)) 3450 return PTR_ERR(common->ss_base); 3451 common->cpsw_base = common->ss_base + AM65_CPSW_CPSW_NU_BASE; 3452 /* Use device's physical base address as switch id */ 3453 id_temp = cpu_to_be64(res->start); 3454 memcpy(common->switch_id, &id_temp, sizeof(res->start)); 3455 3456 node = of_get_child_by_name(dev->of_node, "ethernet-ports"); 3457 if (!node) 3458 return -ENOENT; 3459 common->port_num = of_get_child_count(node); 3460 of_node_put(node); 3461 if (common->port_num < 1 || common->port_num > AM65_CPSW_MAX_PORTS) 3462 return -ENOENT; 3463 3464 common->rx_flow_id_base = -1; 3465 init_completion(&common->tdown_complete); 3466 common->tx_ch_num = AM65_CPSW_DEFAULT_TX_CHNS; 3467 common->pf_p0_rx_ptype_rrobin = false; 3468 common->default_vlan = 1; 3469 3470 common->ports = devm_kcalloc(dev, common->port_num, 3471 sizeof(*common->ports), 3472 GFP_KERNEL); 3473 if (!common->ports) 3474 return -ENOMEM; 3475 3476 clk = devm_clk_get(dev, "fck"); 3477 if (IS_ERR(clk)) 3478 return dev_err_probe(dev, PTR_ERR(clk), "getting fck clock\n"); 3479 common->bus_freq = clk_get_rate(clk); 3480 3481 pm_runtime_enable(dev); 3482 ret = pm_runtime_resume_and_get(dev); 3483 if (ret < 0) { 3484 pm_runtime_disable(dev); 3485 return ret; 3486 } 3487 3488 node = of_get_child_by_name(dev->of_node, "mdio"); 3489 if (!node) { 3490 dev_warn(dev, "MDIO node not found\n"); 3491 } else if (of_device_is_available(node)) { 3492 struct platform_device *mdio_pdev; 3493 3494 mdio_pdev = of_platform_device_create(node, NULL, dev); 3495 if (!mdio_pdev) { 3496 ret = -ENODEV; 3497 goto err_pm_clear; 3498 } 3499 3500 common->mdio_dev = &mdio_pdev->dev; 3501 } 3502 of_node_put(node); 3503 3504 am65_cpsw_nuss_get_ver(common); 3505 3506 ret = am65_cpsw_nuss_init_host_p(common); 3507 if (ret) 3508 goto err_of_clear; 3509 3510 ret = am65_cpsw_nuss_init_slave_ports(common); 3511 if (ret) 3512 goto err_of_clear; 3513 3514 /* init common data */ 3515 ale_params.dev = dev; 3516 ale_params.ale_ageout = AM65_CPSW_ALE_AGEOUT_DEFAULT; 3517 ale_params.ale_ports = common->port_num + 1; 3518 ale_params.ale_regs = common->cpsw_base + AM65_CPSW_NU_ALE_BASE; 3519 ale_params.dev_id = common->pdata.ale_dev_id; 3520 ale_params.bus_freq = common->bus_freq; 3521 3522 common->ale = cpsw_ale_create(&ale_params); 3523 if (IS_ERR(common->ale)) { 3524 dev_err(dev, "error initializing ale engine\n"); 3525 ret = PTR_ERR(common->ale); 3526 goto err_of_clear; 3527 } 3528 3529 ale_entries = common->ale->params.ale_entries; 3530 common->ale_context = devm_kzalloc(dev, 3531 ale_entries * ALE_ENTRY_WORDS * sizeof(u32), 3532 GFP_KERNEL); 3533 ret = am65_cpsw_init_cpts(common); 3534 if (ret) 3535 goto err_of_clear; 3536 3537 /* init ports */ 3538 for (i = 0; i < common->port_num; i++) 3539 am65_cpsw_nuss_slave_disable_unused(&common->ports[i]); 3540 3541 dev_set_drvdata(dev, common); 3542 3543 common->is_emac_mode = true; 3544 3545 ret = am65_cpsw_nuss_init_ndevs(common); 3546 if (ret) 3547 goto err_free_phylink; 3548 3549 ret = am65_cpsw_nuss_register_ndevs(common); 3550 if (ret) 3551 goto err_free_phylink; 3552 3553 pm_runtime_put(dev); 3554 return 0; 3555 3556 err_free_phylink: 3557 am65_cpsw_nuss_phylink_cleanup(common); 3558 am65_cpts_release(common->cpts); 3559 err_of_clear: 3560 if (common->mdio_dev) 3561 of_platform_device_destroy(common->mdio_dev, NULL); 3562 err_pm_clear: 3563 pm_runtime_put_sync(dev); 3564 pm_runtime_disable(dev); 3565 return ret; 3566 } 3567 3568 static void am65_cpsw_nuss_remove(struct platform_device *pdev) 3569 { 3570 struct device *dev = &pdev->dev; 3571 struct am65_cpsw_common *common; 3572 int ret; 3573 3574 common = dev_get_drvdata(dev); 3575 3576 ret = pm_runtime_resume_and_get(&pdev->dev); 3577 if (ret < 0) { 3578 /* Note, if this error path is taken, we're leaking some 3579 * resources. 3580 */ 3581 dev_err(&pdev->dev, "Failed to resume device (%pe)\n", 3582 ERR_PTR(ret)); 3583 return; 3584 } 3585 3586 am65_cpsw_unregister_devlink(common); 3587 am65_cpsw_unregister_notifiers(common); 3588 3589 /* must unregister ndevs here because DD release_driver routine calls 3590 * dma_deconfigure(dev) before devres_release_all(dev) 3591 */ 3592 am65_cpsw_nuss_cleanup_ndev(common); 3593 am65_cpsw_nuss_phylink_cleanup(common); 3594 am65_cpts_release(common->cpts); 3595 am65_cpsw_disable_serdes_phy(common); 3596 3597 if (common->mdio_dev) 3598 of_platform_device_destroy(common->mdio_dev, NULL); 3599 3600 pm_runtime_put_sync(&pdev->dev); 3601 pm_runtime_disable(&pdev->dev); 3602 } 3603 3604 static int am65_cpsw_nuss_suspend(struct device *dev) 3605 { 3606 struct am65_cpsw_common *common = dev_get_drvdata(dev); 3607 struct am65_cpsw_host *host_p = am65_common_get_host(common); 3608 struct am65_cpsw_port *port; 3609 struct net_device *ndev; 3610 int i, ret; 3611 3612 cpsw_ale_dump(common->ale, common->ale_context); 3613 host_p->vid_context = readl(host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 3614 for (i = 0; i < common->port_num; i++) { 3615 port = &common->ports[i]; 3616 ndev = port->ndev; 3617 3618 if (!ndev) 3619 continue; 3620 3621 port->vid_context = readl(port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 3622 netif_device_detach(ndev); 3623 if (netif_running(ndev)) { 3624 rtnl_lock(); 3625 ret = am65_cpsw_nuss_ndo_slave_stop(ndev); 3626 rtnl_unlock(); 3627 if (ret < 0) { 3628 netdev_err(ndev, "failed to stop: %d", ret); 3629 return ret; 3630 } 3631 } 3632 } 3633 3634 am65_cpts_suspend(common->cpts); 3635 3636 am65_cpsw_nuss_remove_rx_chns(common); 3637 am65_cpsw_nuss_remove_tx_chns(common); 3638 3639 return 0; 3640 } 3641 3642 static int am65_cpsw_nuss_resume(struct device *dev) 3643 { 3644 struct am65_cpsw_common *common = dev_get_drvdata(dev); 3645 struct am65_cpsw_host *host_p = am65_common_get_host(common); 3646 struct am65_cpsw_port *port; 3647 struct net_device *ndev; 3648 int i, ret; 3649 3650 ret = am65_cpsw_nuss_init_tx_chns(common); 3651 if (ret) 3652 return ret; 3653 ret = am65_cpsw_nuss_init_rx_chns(common); 3654 if (ret) 3655 return ret; 3656 3657 /* If RX IRQ was disabled before suspend, keep it disabled */ 3658 if (common->rx_irq_disabled) 3659 disable_irq(common->rx_chns.irq); 3660 3661 am65_cpts_resume(common->cpts); 3662 3663 for (i = 0; i < common->port_num; i++) { 3664 port = &common->ports[i]; 3665 ndev = port->ndev; 3666 3667 if (!ndev) 3668 continue; 3669 3670 if (netif_running(ndev)) { 3671 rtnl_lock(); 3672 ret = am65_cpsw_nuss_ndo_slave_open(ndev); 3673 rtnl_unlock(); 3674 if (ret < 0) { 3675 netdev_err(ndev, "failed to start: %d", ret); 3676 return ret; 3677 } 3678 } 3679 3680 netif_device_attach(ndev); 3681 writel(port->vid_context, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 3682 } 3683 3684 writel(host_p->vid_context, host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 3685 cpsw_ale_restore(common->ale, common->ale_context); 3686 3687 return 0; 3688 } 3689 3690 static const struct dev_pm_ops am65_cpsw_nuss_dev_pm_ops = { 3691 SYSTEM_SLEEP_PM_OPS(am65_cpsw_nuss_suspend, am65_cpsw_nuss_resume) 3692 }; 3693 3694 static struct platform_driver am65_cpsw_nuss_driver = { 3695 .driver = { 3696 .name = AM65_CPSW_DRV_NAME, 3697 .of_match_table = am65_cpsw_nuss_of_mtable, 3698 .pm = &am65_cpsw_nuss_dev_pm_ops, 3699 }, 3700 .probe = am65_cpsw_nuss_probe, 3701 .remove_new = am65_cpsw_nuss_remove, 3702 }; 3703 3704 module_platform_driver(am65_cpsw_nuss_driver); 3705 3706 MODULE_LICENSE("GPL v2"); 3707 MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>"); 3708 MODULE_DESCRIPTION("TI AM65 CPSW Ethernet driver"); 3709