1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Texas Instruments ICSSG Ethernet Driver 4 * 5 * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/ 6 * Copyright (C) Siemens AG, 2024 7 * 8 */ 9 10 #include <linux/dma-mapping.h> 11 #include <linux/dma/ti-cppi5.h> 12 #include <linux/etherdevice.h> 13 #include <linux/interrupt.h> 14 #include <linux/kernel.h> 15 #include <linux/of.h> 16 #include <linux/of_mdio.h> 17 #include <linux/phy.h> 18 #include <linux/remoteproc/pruss.h> 19 #include <linux/regmap.h> 20 #include <linux/remoteproc.h> 21 22 #include "icssg_prueth.h" 23 #include "../k3-cppi-desc-pool.h" 24 25 /* Netif debug messages possible */ 26 #define PRUETH_EMAC_DEBUG (NETIF_MSG_DRV | \ 27 NETIF_MSG_PROBE | \ 28 NETIF_MSG_LINK | \ 29 NETIF_MSG_TIMER | \ 30 NETIF_MSG_IFDOWN | \ 31 NETIF_MSG_IFUP | \ 32 NETIF_MSG_RX_ERR | \ 33 NETIF_MSG_TX_ERR | \ 34 NETIF_MSG_TX_QUEUED | \ 35 NETIF_MSG_INTR | \ 36 NETIF_MSG_TX_DONE | \ 37 NETIF_MSG_RX_STATUS | \ 38 NETIF_MSG_PKTDATA | \ 39 NETIF_MSG_HW | \ 40 NETIF_MSG_WOL) 41 42 #define prueth_napi_to_emac(napi) container_of(napi, struct prueth_emac, napi_rx) 43 44 void prueth_cleanup_rx_chns(struct prueth_emac *emac, 45 struct prueth_rx_chn *rx_chn, 46 int max_rflows) 47 { 48 if (rx_chn->pg_pool) { 49 page_pool_destroy(rx_chn->pg_pool); 50 rx_chn->pg_pool = NULL; 51 } 52 53 if (rx_chn->desc_pool) 54 k3_cppi_desc_pool_destroy(rx_chn->desc_pool); 55 56 if (rx_chn->rx_chn) 57 k3_udma_glue_release_rx_chn(rx_chn->rx_chn); 58 } 59 EXPORT_SYMBOL_GPL(prueth_cleanup_rx_chns); 60 61 void prueth_cleanup_tx_chns(struct prueth_emac *emac) 62 { 63 int i; 64 65 for (i = 0; i < emac->tx_ch_num; i++) { 66 struct prueth_tx_chn *tx_chn = &emac->tx_chns[i]; 67 68 if (tx_chn->desc_pool) 69 k3_cppi_desc_pool_destroy(tx_chn->desc_pool); 70 71 if (tx_chn->tx_chn) 72 k3_udma_glue_release_tx_chn(tx_chn->tx_chn); 73 74 /* Assume prueth_cleanup_tx_chns() is called at the 75 * end after all channel resources are freed 76 */ 77 memset(tx_chn, 0, sizeof(*tx_chn)); 78 } 79 } 80 EXPORT_SYMBOL_GPL(prueth_cleanup_tx_chns); 81 82 void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num) 83 { 84 int i; 85 86 for (i = 0; i < num; i++) { 87 struct prueth_tx_chn *tx_chn = &emac->tx_chns[i]; 88 89 if (tx_chn->irq) 90 free_irq(tx_chn->irq, tx_chn); 91 netif_napi_del(&tx_chn->napi_tx); 92 } 93 } 94 EXPORT_SYMBOL_GPL(prueth_ndev_del_tx_napi); 95 96 void prueth_xmit_free(struct prueth_tx_chn *tx_chn, 97 struct cppi5_host_desc_t *desc) 98 { 99 struct cppi5_host_desc_t *first_desc, *next_desc; 100 dma_addr_t buf_dma, next_desc_dma; 101 struct prueth_swdata *swdata; 102 struct page *page; 103 u32 buf_dma_len; 104 105 first_desc = desc; 106 next_desc = first_desc; 107 108 swdata = cppi5_hdesc_get_swdata(desc); 109 if (swdata->type == PRUETH_SWDATA_PAGE) { 110 page = swdata->data.page; 111 page_pool_recycle_direct(page->pp, swdata->data.page); 112 goto free_desc; 113 } 114 115 cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len); 116 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma); 117 118 dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len, 119 DMA_TO_DEVICE); 120 121 next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc); 122 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma); 123 while (next_desc_dma) { 124 next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, 125 next_desc_dma); 126 cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len); 127 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma); 128 129 dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len, 130 DMA_TO_DEVICE); 131 132 next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc); 133 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma); 134 135 k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc); 136 } 137 138 free_desc: 139 k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc); 140 } 141 EXPORT_SYMBOL_GPL(prueth_xmit_free); 142 143 int emac_tx_complete_packets(struct prueth_emac *emac, int chn, 144 int budget, bool *tdown) 145 { 146 struct net_device *ndev = emac->ndev; 147 struct cppi5_host_desc_t *desc_tx; 148 struct netdev_queue *netif_txq; 149 struct prueth_swdata *swdata; 150 struct prueth_tx_chn *tx_chn; 151 unsigned int total_bytes = 0; 152 struct xdp_frame *xdpf; 153 struct sk_buff *skb; 154 dma_addr_t desc_dma; 155 int res, num_tx = 0; 156 157 tx_chn = &emac->tx_chns[chn]; 158 159 while (true) { 160 res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma); 161 if (res == -ENODATA) 162 break; 163 164 /* teardown completion */ 165 if (cppi5_desc_is_tdcm(desc_dma)) { 166 if (atomic_dec_and_test(&emac->tdown_cnt)) 167 complete(&emac->tdown_complete); 168 *tdown = true; 169 break; 170 } 171 172 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, 173 desc_dma); 174 swdata = cppi5_hdesc_get_swdata(desc_tx); 175 176 switch (swdata->type) { 177 case PRUETH_SWDATA_SKB: 178 skb = swdata->data.skb; 179 dev_sw_netstats_tx_add(skb->dev, 1, skb->len); 180 total_bytes += skb->len; 181 napi_consume_skb(skb, budget); 182 break; 183 case PRUETH_SWDATA_XDPF: 184 xdpf = swdata->data.xdpf; 185 dev_sw_netstats_tx_add(ndev, 1, xdpf->len); 186 total_bytes += xdpf->len; 187 xdp_return_frame(xdpf); 188 break; 189 default: 190 netdev_err(ndev, "tx_complete: invalid swdata type %d\n", swdata->type); 191 prueth_xmit_free(tx_chn, desc_tx); 192 ndev->stats.tx_dropped++; 193 continue; 194 } 195 196 prueth_xmit_free(tx_chn, desc_tx); 197 num_tx++; 198 } 199 200 if (!num_tx) 201 return 0; 202 203 netif_txq = netdev_get_tx_queue(ndev, chn); 204 netdev_tx_completed_queue(netif_txq, num_tx, total_bytes); 205 206 if (netif_tx_queue_stopped(netif_txq)) { 207 /* If the TX queue was stopped, wake it now 208 * if we have enough room. 209 */ 210 __netif_tx_lock(netif_txq, smp_processor_id()); 211 if (netif_running(ndev) && 212 (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= 213 MAX_SKB_FRAGS)) 214 netif_tx_wake_queue(netif_txq); 215 __netif_tx_unlock(netif_txq); 216 } 217 218 return num_tx; 219 } 220 221 static enum hrtimer_restart emac_tx_timer_callback(struct hrtimer *timer) 222 { 223 struct prueth_tx_chn *tx_chns = 224 container_of(timer, struct prueth_tx_chn, tx_hrtimer); 225 226 enable_irq(tx_chns->irq); 227 return HRTIMER_NORESTART; 228 } 229 230 static int emac_napi_tx_poll(struct napi_struct *napi_tx, int budget) 231 { 232 struct prueth_tx_chn *tx_chn = prueth_napi_to_tx_chn(napi_tx); 233 struct prueth_emac *emac = tx_chn->emac; 234 bool tdown = false; 235 int num_tx_packets; 236 237 num_tx_packets = emac_tx_complete_packets(emac, tx_chn->id, budget, 238 &tdown); 239 240 if (num_tx_packets >= budget) 241 return budget; 242 243 if (napi_complete_done(napi_tx, num_tx_packets)) { 244 if (unlikely(tx_chn->tx_pace_timeout_ns && !tdown)) { 245 hrtimer_start(&tx_chn->tx_hrtimer, 246 ns_to_ktime(tx_chn->tx_pace_timeout_ns), 247 HRTIMER_MODE_REL_PINNED); 248 } else { 249 enable_irq(tx_chn->irq); 250 } 251 } 252 253 return num_tx_packets; 254 } 255 256 static irqreturn_t prueth_tx_irq(int irq, void *dev_id) 257 { 258 struct prueth_tx_chn *tx_chn = dev_id; 259 260 disable_irq_nosync(irq); 261 napi_schedule(&tx_chn->napi_tx); 262 263 return IRQ_HANDLED; 264 } 265 266 int prueth_ndev_add_tx_napi(struct prueth_emac *emac) 267 { 268 struct prueth *prueth = emac->prueth; 269 int i, ret; 270 271 for (i = 0; i < emac->tx_ch_num; i++) { 272 struct prueth_tx_chn *tx_chn = &emac->tx_chns[i]; 273 274 netif_napi_add_tx(emac->ndev, &tx_chn->napi_tx, emac_napi_tx_poll); 275 hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC, 276 HRTIMER_MODE_REL_PINNED); 277 tx_chn->tx_hrtimer.function = &emac_tx_timer_callback; 278 ret = request_irq(tx_chn->irq, prueth_tx_irq, 279 IRQF_TRIGGER_HIGH, tx_chn->name, 280 tx_chn); 281 if (ret) { 282 netif_napi_del(&tx_chn->napi_tx); 283 dev_err(prueth->dev, "unable to request TX IRQ %d\n", 284 tx_chn->irq); 285 goto fail; 286 } 287 } 288 289 return 0; 290 fail: 291 prueth_ndev_del_tx_napi(emac, i); 292 return ret; 293 } 294 EXPORT_SYMBOL_GPL(prueth_ndev_add_tx_napi); 295 296 int prueth_init_tx_chns(struct prueth_emac *emac) 297 { 298 static const struct k3_ring_cfg ring_cfg = { 299 .elm_size = K3_RINGACC_RING_ELSIZE_8, 300 .mode = K3_RINGACC_RING_MODE_RING, 301 .flags = 0, 302 .size = PRUETH_MAX_TX_DESC, 303 }; 304 struct k3_udma_glue_tx_channel_cfg tx_cfg; 305 struct device *dev = emac->prueth->dev; 306 struct net_device *ndev = emac->ndev; 307 int ret, slice, i; 308 u32 hdesc_size; 309 310 slice = prueth_emac_slice(emac); 311 if (slice < 0) 312 return slice; 313 314 init_completion(&emac->tdown_complete); 315 316 hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE, 317 PRUETH_NAV_SW_DATA_SIZE); 318 memset(&tx_cfg, 0, sizeof(tx_cfg)); 319 tx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE; 320 tx_cfg.tx_cfg = ring_cfg; 321 tx_cfg.txcq_cfg = ring_cfg; 322 323 for (i = 0; i < emac->tx_ch_num; i++) { 324 struct prueth_tx_chn *tx_chn = &emac->tx_chns[i]; 325 326 /* To differentiate channels for SLICE0 vs SLICE1 */ 327 snprintf(tx_chn->name, sizeof(tx_chn->name), 328 "tx%d-%d", slice, i); 329 330 tx_chn->emac = emac; 331 tx_chn->id = i; 332 tx_chn->descs_num = PRUETH_MAX_TX_DESC; 333 334 tx_chn->tx_chn = 335 k3_udma_glue_request_tx_chn(dev, tx_chn->name, 336 &tx_cfg); 337 if (IS_ERR(tx_chn->tx_chn)) { 338 ret = PTR_ERR(tx_chn->tx_chn); 339 tx_chn->tx_chn = NULL; 340 netdev_err(ndev, 341 "Failed to request tx dma ch: %d\n", ret); 342 goto fail; 343 } 344 345 tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn); 346 tx_chn->desc_pool = 347 k3_cppi_desc_pool_create_name(tx_chn->dma_dev, 348 tx_chn->descs_num, 349 hdesc_size, 350 tx_chn->name); 351 if (IS_ERR(tx_chn->desc_pool)) { 352 ret = PTR_ERR(tx_chn->desc_pool); 353 tx_chn->desc_pool = NULL; 354 netdev_err(ndev, "Failed to create tx pool: %d\n", ret); 355 goto fail; 356 } 357 358 ret = k3_udma_glue_tx_get_irq(tx_chn->tx_chn); 359 if (ret < 0) { 360 netdev_err(ndev, "failed to get tx irq\n"); 361 goto fail; 362 } 363 tx_chn->irq = ret; 364 365 snprintf(tx_chn->name, sizeof(tx_chn->name), "%s-tx%d", 366 dev_name(dev), tx_chn->id); 367 } 368 369 return 0; 370 371 fail: 372 prueth_cleanup_tx_chns(emac); 373 return ret; 374 } 375 EXPORT_SYMBOL_GPL(prueth_init_tx_chns); 376 377 int prueth_init_rx_chns(struct prueth_emac *emac, 378 struct prueth_rx_chn *rx_chn, 379 char *name, u32 max_rflows, 380 u32 max_desc_num) 381 { 382 struct k3_udma_glue_rx_channel_cfg rx_cfg; 383 struct device *dev = emac->prueth->dev; 384 struct net_device *ndev = emac->ndev; 385 u32 fdqring_id, hdesc_size; 386 int i, ret = 0, slice; 387 int flow_id_base; 388 389 slice = prueth_emac_slice(emac); 390 if (slice < 0) 391 return slice; 392 393 /* To differentiate channels for SLICE0 vs SLICE1 */ 394 snprintf(rx_chn->name, sizeof(rx_chn->name), "%s%d", name, slice); 395 396 hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE, 397 PRUETH_NAV_SW_DATA_SIZE); 398 memset(&rx_cfg, 0, sizeof(rx_cfg)); 399 rx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE; 400 rx_cfg.flow_id_num = max_rflows; 401 rx_cfg.flow_id_base = -1; /* udmax will auto select flow id base */ 402 403 /* init all flows */ 404 rx_chn->dev = dev; 405 rx_chn->descs_num = max_desc_num; 406 407 rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, rx_chn->name, 408 &rx_cfg); 409 if (IS_ERR(rx_chn->rx_chn)) { 410 ret = PTR_ERR(rx_chn->rx_chn); 411 rx_chn->rx_chn = NULL; 412 netdev_err(ndev, "Failed to request rx dma ch: %d\n", ret); 413 goto fail; 414 } 415 416 rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn); 417 rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev, 418 rx_chn->descs_num, 419 hdesc_size, 420 rx_chn->name); 421 if (IS_ERR(rx_chn->desc_pool)) { 422 ret = PTR_ERR(rx_chn->desc_pool); 423 rx_chn->desc_pool = NULL; 424 netdev_err(ndev, "Failed to create rx pool: %d\n", ret); 425 goto fail; 426 } 427 428 flow_id_base = k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn); 429 if (emac->is_sr1 && !strcmp(name, "rxmgm")) { 430 emac->rx_mgm_flow_id_base = flow_id_base; 431 netdev_dbg(ndev, "mgm flow id base = %d\n", flow_id_base); 432 } else { 433 emac->rx_flow_id_base = flow_id_base; 434 netdev_dbg(ndev, "flow id base = %d\n", flow_id_base); 435 } 436 437 fdqring_id = K3_RINGACC_RING_ID_ANY; 438 for (i = 0; i < rx_cfg.flow_id_num; i++) { 439 struct k3_ring_cfg rxring_cfg = { 440 .elm_size = K3_RINGACC_RING_ELSIZE_8, 441 .mode = K3_RINGACC_RING_MODE_RING, 442 .flags = 0, 443 }; 444 struct k3_ring_cfg fdqring_cfg = { 445 .elm_size = K3_RINGACC_RING_ELSIZE_8, 446 .flags = K3_RINGACC_RING_SHARED, 447 }; 448 struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = { 449 .rx_cfg = rxring_cfg, 450 .rxfdq_cfg = fdqring_cfg, 451 .ring_rxq_id = K3_RINGACC_RING_ID_ANY, 452 .src_tag_lo_sel = 453 K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG, 454 }; 455 456 rx_flow_cfg.ring_rxfdq0_id = fdqring_id; 457 rx_flow_cfg.rx_cfg.size = max_desc_num; 458 rx_flow_cfg.rxfdq_cfg.size = max_desc_num; 459 rx_flow_cfg.rxfdq_cfg.mode = emac->prueth->pdata.fdqring_mode; 460 461 ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn, 462 i, &rx_flow_cfg); 463 if (ret) { 464 netdev_err(ndev, "Failed to init rx flow%d %d\n", 465 i, ret); 466 goto fail; 467 } 468 if (!i) 469 fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn, 470 i); 471 ret = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i); 472 if (ret < 0) { 473 netdev_err(ndev, "Failed to get rx dma irq"); 474 goto fail; 475 } 476 rx_chn->irq[i] = ret; 477 } 478 479 return 0; 480 481 fail: 482 prueth_cleanup_rx_chns(emac, rx_chn, max_rflows); 483 return ret; 484 } 485 EXPORT_SYMBOL_GPL(prueth_init_rx_chns); 486 487 int prueth_dma_rx_push_mapped(struct prueth_emac *emac, 488 struct prueth_rx_chn *rx_chn, 489 struct page *page, u32 buf_len) 490 { 491 struct net_device *ndev = emac->ndev; 492 struct cppi5_host_desc_t *desc_rx; 493 struct prueth_swdata *swdata; 494 dma_addr_t desc_dma; 495 dma_addr_t buf_dma; 496 497 buf_dma = page_pool_get_dma_addr(page) + PRUETH_HEADROOM; 498 desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool); 499 if (!desc_rx) { 500 netdev_err(ndev, "rx push: failed to allocate descriptor\n"); 501 return -ENOMEM; 502 } 503 desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx); 504 505 cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT, 506 PRUETH_NAV_PS_DATA_SIZE); 507 k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma); 508 cppi5_hdesc_attach_buf(desc_rx, buf_dma, buf_len, buf_dma, buf_len); 509 510 swdata = cppi5_hdesc_get_swdata(desc_rx); 511 swdata->type = PRUETH_SWDATA_PAGE; 512 swdata->data.page = page; 513 514 return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, PRUETH_RX_FLOW_DATA, 515 desc_rx, desc_dma); 516 } 517 EXPORT_SYMBOL_GPL(prueth_dma_rx_push_mapped); 518 519 u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns) 520 { 521 u32 iepcount_lo, iepcount_hi, hi_rollover_count; 522 u64 ns; 523 524 iepcount_lo = lo & GENMASK(19, 0); 525 iepcount_hi = (hi & GENMASK(11, 0)) << 12 | lo >> 20; 526 hi_rollover_count = hi >> 11; 527 528 ns = ((u64)hi_rollover_count) << 23 | (iepcount_hi + hi_sw); 529 ns = ns * cycle_time_ns + iepcount_lo; 530 531 return ns; 532 } 533 EXPORT_SYMBOL_GPL(icssg_ts_to_ns); 534 535 void emac_rx_timestamp(struct prueth_emac *emac, 536 struct sk_buff *skb, u32 *psdata) 537 { 538 struct skb_shared_hwtstamps *ssh; 539 u64 ns; 540 541 if (emac->is_sr1) { 542 ns = (u64)psdata[1] << 32 | psdata[0]; 543 } else { 544 u32 hi_sw = readl(emac->prueth->shram.va + 545 TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET); 546 ns = icssg_ts_to_ns(hi_sw, psdata[1], psdata[0], 547 IEP_DEFAULT_CYCLE_TIME_NS); 548 } 549 550 ssh = skb_hwtstamps(skb); 551 memset(ssh, 0, sizeof(*ssh)); 552 ssh->hwtstamp = ns_to_ktime(ns); 553 } 554 555 /** 556 * emac_xmit_xdp_frame - transmits an XDP frame 557 * @emac: emac device 558 * @xdpf: data to transmit 559 * @page: page from page pool if already DMA mapped 560 * @q_idx: queue id 561 * 562 * Return: XDP state 563 */ 564 u32 emac_xmit_xdp_frame(struct prueth_emac *emac, 565 struct xdp_frame *xdpf, 566 struct page *page, 567 unsigned int q_idx) 568 { 569 struct cppi5_host_desc_t *first_desc; 570 struct net_device *ndev = emac->ndev; 571 struct prueth_tx_chn *tx_chn; 572 dma_addr_t desc_dma, buf_dma; 573 struct prueth_swdata *swdata; 574 u32 *epib; 575 int ret; 576 577 if (q_idx >= PRUETH_MAX_TX_QUEUES) { 578 netdev_err(ndev, "xdp tx: invalid q_id %d\n", q_idx); 579 return ICSSG_XDP_CONSUMED; /* drop */ 580 } 581 582 tx_chn = &emac->tx_chns[q_idx]; 583 584 first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); 585 if (!first_desc) { 586 netdev_dbg(ndev, "xdp tx: failed to allocate descriptor\n"); 587 goto drop_free_descs; /* drop */ 588 } 589 590 if (page) { /* already DMA mapped by page_pool */ 591 buf_dma = page_pool_get_dma_addr(page); 592 buf_dma += xdpf->headroom + sizeof(struct xdp_frame); 593 } else { /* Map the linear buffer */ 594 buf_dma = dma_map_single(tx_chn->dma_dev, xdpf->data, xdpf->len, DMA_TO_DEVICE); 595 if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) { 596 netdev_err(ndev, "xdp tx: failed to map data buffer\n"); 597 goto drop_free_descs; /* drop */ 598 } 599 } 600 601 cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT, 602 PRUETH_NAV_PS_DATA_SIZE); 603 cppi5_hdesc_set_pkttype(first_desc, 0); 604 epib = first_desc->epib; 605 epib[0] = 0; 606 epib[1] = 0; 607 608 /* set dst tag to indicate internal qid at the firmware which is at 609 * bit8..bit15. bit0..bit7 indicates port num for directed 610 * packets in case of switch mode operation 611 */ 612 cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8))); 613 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma); 614 cppi5_hdesc_attach_buf(first_desc, buf_dma, xdpf->len, buf_dma, xdpf->len); 615 swdata = cppi5_hdesc_get_swdata(first_desc); 616 if (page) { 617 swdata->type = PRUETH_SWDATA_PAGE; 618 swdata->data.page = page; 619 } else { 620 swdata->type = PRUETH_SWDATA_XDPF; 621 swdata->data.xdpf = xdpf; 622 } 623 624 cppi5_hdesc_set_pktlen(first_desc, xdpf->len); 625 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc); 626 627 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma); 628 if (ret) { 629 netdev_err(ndev, "xdp tx: push failed: %d\n", ret); 630 goto drop_free_descs; 631 } 632 633 return ICSSG_XDP_TX; 634 635 drop_free_descs: 636 prueth_xmit_free(tx_chn, first_desc); 637 return ICSSG_XDP_CONSUMED; 638 } 639 EXPORT_SYMBOL_GPL(emac_xmit_xdp_frame); 640 641 /** 642 * emac_run_xdp - run an XDP program 643 * @emac: emac device 644 * @xdp: XDP buffer containing the frame 645 * @page: page with RX data if already DMA mapped 646 * @len: Rx descriptor packet length 647 * 648 * Return: XDP state 649 */ 650 static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp, 651 struct page *page, u32 *len) 652 { 653 struct net_device *ndev = emac->ndev; 654 struct bpf_prog *xdp_prog; 655 struct xdp_frame *xdpf; 656 u32 pkt_len = *len; 657 u32 act, result; 658 int q_idx, err; 659 660 xdp_prog = READ_ONCE(emac->xdp_prog); 661 act = bpf_prog_run_xdp(xdp_prog, xdp); 662 switch (act) { 663 case XDP_PASS: 664 return ICSSG_XDP_PASS; 665 case XDP_TX: 666 /* Send packet to TX ring for immediate transmission */ 667 xdpf = xdp_convert_buff_to_frame(xdp); 668 if (unlikely(!xdpf)) { 669 ndev->stats.tx_dropped++; 670 goto drop; 671 } 672 673 q_idx = smp_processor_id() % emac->tx_ch_num; 674 result = emac_xmit_xdp_frame(emac, xdpf, page, q_idx); 675 if (result == ICSSG_XDP_CONSUMED) 676 goto drop; 677 678 dev_sw_netstats_rx_add(ndev, xdpf->len); 679 return result; 680 case XDP_REDIRECT: 681 err = xdp_do_redirect(emac->ndev, xdp, xdp_prog); 682 if (err) 683 goto drop; 684 685 dev_sw_netstats_rx_add(ndev, pkt_len); 686 return ICSSG_XDP_REDIR; 687 default: 688 bpf_warn_invalid_xdp_action(emac->ndev, xdp_prog, act); 689 fallthrough; 690 case XDP_ABORTED: 691 drop: 692 trace_xdp_exception(emac->ndev, xdp_prog, act); 693 fallthrough; /* handle aborts by dropping packet */ 694 case XDP_DROP: 695 ndev->stats.rx_dropped++; 696 page_pool_recycle_direct(emac->rx_chns.pg_pool, page); 697 return ICSSG_XDP_CONSUMED; 698 } 699 } 700 701 static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state) 702 { 703 struct prueth_rx_chn *rx_chn = &emac->rx_chns; 704 u32 buf_dma_len, pkt_len, port_id = 0; 705 struct net_device *ndev = emac->ndev; 706 struct cppi5_host_desc_t *desc_rx; 707 struct prueth_swdata *swdata; 708 dma_addr_t desc_dma, buf_dma; 709 struct page *page, *new_page; 710 struct page_pool *pool; 711 struct sk_buff *skb; 712 struct xdp_buff xdp; 713 u32 *psdata; 714 void *pa; 715 int ret; 716 717 *xdp_state = 0; 718 pool = rx_chn->pg_pool; 719 ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma); 720 if (ret) { 721 if (ret != -ENODATA) 722 netdev_err(ndev, "rx pop: failed: %d\n", ret); 723 return ret; 724 } 725 726 if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown ? */ 727 return 0; 728 729 desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); 730 swdata = cppi5_hdesc_get_swdata(desc_rx); 731 if (swdata->type != PRUETH_SWDATA_PAGE) { 732 netdev_err(ndev, "rx_pkt: invalid swdata->type %d\n", swdata->type); 733 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); 734 return 0; 735 } 736 737 page = swdata->data.page; 738 page_pool_dma_sync_for_cpu(pool, page, 0, PAGE_SIZE); 739 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); 740 k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); 741 pkt_len = cppi5_hdesc_get_pktlen(desc_rx); 742 /* firmware adds 4 CRC bytes, strip them */ 743 pkt_len -= 4; 744 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL); 745 746 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); 747 748 /* if allocation fails we drop the packet but push the 749 * descriptor back to the ring with old page to prevent a stall 750 */ 751 new_page = page_pool_dev_alloc_pages(pool); 752 if (unlikely(!new_page)) { 753 new_page = page; 754 ndev->stats.rx_dropped++; 755 goto requeue; 756 } 757 758 pa = page_address(page); 759 if (emac->xdp_prog) { 760 xdp_init_buff(&xdp, PAGE_SIZE, &rx_chn->xdp_rxq); 761 xdp_prepare_buff(&xdp, pa, PRUETH_HEADROOM, pkt_len, false); 762 763 *xdp_state = emac_run_xdp(emac, &xdp, page, &pkt_len); 764 if (*xdp_state == ICSSG_XDP_PASS) 765 skb = xdp_build_skb_from_buff(&xdp); 766 else 767 goto requeue; 768 } else { 769 /* prepare skb and send to n/w stack */ 770 skb = napi_build_skb(pa, PAGE_SIZE); 771 } 772 773 if (!skb) { 774 ndev->stats.rx_dropped++; 775 page_pool_recycle_direct(pool, page); 776 goto requeue; 777 } 778 779 skb_reserve(skb, PRUETH_HEADROOM); 780 skb_put(skb, pkt_len); 781 skb->dev = ndev; 782 783 psdata = cppi5_hdesc_get_psdata(desc_rx); 784 /* RX HW timestamp */ 785 if (emac->rx_ts_enabled) 786 emac_rx_timestamp(emac, skb, psdata); 787 788 if (emac->prueth->is_switch_mode) 789 skb->offload_fwd_mark = emac->offload_fwd_mark; 790 skb->protocol = eth_type_trans(skb, ndev); 791 792 skb_mark_for_recycle(skb); 793 napi_gro_receive(&emac->napi_rx, skb); 794 ndev->stats.rx_bytes += pkt_len; 795 ndev->stats.rx_packets++; 796 797 requeue: 798 /* queue another RX DMA */ 799 ret = prueth_dma_rx_push_mapped(emac, &emac->rx_chns, new_page, 800 PRUETH_MAX_PKT_SIZE); 801 if (WARN_ON(ret < 0)) { 802 page_pool_recycle_direct(pool, new_page); 803 ndev->stats.rx_errors++; 804 ndev->stats.rx_dropped++; 805 } 806 807 return ret; 808 } 809 810 static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma) 811 { 812 struct prueth_rx_chn *rx_chn = data; 813 struct cppi5_host_desc_t *desc_rx; 814 struct prueth_swdata *swdata; 815 struct page_pool *pool; 816 struct page *page; 817 818 pool = rx_chn->pg_pool; 819 desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); 820 swdata = cppi5_hdesc_get_swdata(desc_rx); 821 if (swdata->type == PRUETH_SWDATA_PAGE) { 822 page = swdata->data.page; 823 page_pool_recycle_direct(pool, page); 824 } 825 826 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); 827 } 828 829 static int prueth_tx_ts_cookie_get(struct prueth_emac *emac) 830 { 831 int i; 832 833 /* search and get the next free slot */ 834 for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) { 835 if (!emac->tx_ts_skb[i]) { 836 emac->tx_ts_skb[i] = ERR_PTR(-EBUSY); /* reserve slot */ 837 return i; 838 } 839 } 840 841 return -EBUSY; 842 } 843 844 /** 845 * icssg_ndo_start_xmit - EMAC Transmit function 846 * @skb: SKB pointer 847 * @ndev: EMAC network adapter 848 * 849 * Called by the system to transmit a packet - we queue the packet in 850 * EMAC hardware transmit queue 851 * Doesn't wait for completion we'll check for TX completion in 852 * emac_tx_complete_packets(). 853 * 854 * Return: enum netdev_tx 855 */ 856 enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) 857 { 858 struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc; 859 struct prueth_emac *emac = netdev_priv(ndev); 860 struct prueth *prueth = emac->prueth; 861 struct netdev_queue *netif_txq; 862 struct prueth_swdata *swdata; 863 struct prueth_tx_chn *tx_chn; 864 dma_addr_t desc_dma, buf_dma; 865 u32 pkt_len, dst_tag_id; 866 int i, ret = 0, q_idx; 867 bool in_tx_ts = 0; 868 int tx_ts_cookie; 869 u32 *epib; 870 871 pkt_len = skb_headlen(skb); 872 q_idx = skb_get_queue_mapping(skb); 873 874 tx_chn = &emac->tx_chns[q_idx]; 875 netif_txq = netdev_get_tx_queue(ndev, q_idx); 876 877 /* Map the linear buffer */ 878 buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len, DMA_TO_DEVICE); 879 if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) { 880 netdev_err(ndev, "tx: failed to map skb buffer\n"); 881 ret = NETDEV_TX_OK; 882 goto drop_free_skb; 883 } 884 885 first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); 886 if (!first_desc) { 887 netdev_dbg(ndev, "tx: failed to allocate descriptor\n"); 888 dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, DMA_TO_DEVICE); 889 goto drop_stop_q_busy; 890 } 891 892 cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT, 893 PRUETH_NAV_PS_DATA_SIZE); 894 cppi5_hdesc_set_pkttype(first_desc, 0); 895 epib = first_desc->epib; 896 epib[0] = 0; 897 epib[1] = 0; 898 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 899 emac->tx_ts_enabled) { 900 tx_ts_cookie = prueth_tx_ts_cookie_get(emac); 901 if (tx_ts_cookie >= 0) { 902 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 903 /* Request TX timestamp */ 904 epib[0] = (u32)tx_ts_cookie; 905 epib[1] = 0x80000000; /* TX TS request */ 906 emac->tx_ts_skb[tx_ts_cookie] = skb_get(skb); 907 in_tx_ts = 1; 908 } 909 } 910 911 /* set dst tag to indicate internal qid at the firmware which is at 912 * bit8..bit15. bit0..bit7 indicates port num for directed 913 * packets in case of switch mode operation and port num 0 914 * for undirected packets in case of HSR offload mode 915 */ 916 dst_tag_id = emac->port_id | (q_idx << 8); 917 918 if (prueth->is_hsr_offload_mode && 919 (ndev->features & NETIF_F_HW_HSR_DUP)) 920 dst_tag_id = PRUETH_UNDIRECTED_PKT_DST_TAG; 921 922 if (prueth->is_hsr_offload_mode && 923 (ndev->features & NETIF_F_HW_HSR_TAG_INS)) 924 epib[1] |= PRUETH_UNDIRECTED_PKT_TAG_INS; 925 926 cppi5_desc_set_tags_ids(&first_desc->hdr, 0, dst_tag_id); 927 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma); 928 cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len); 929 swdata = cppi5_hdesc_get_swdata(first_desc); 930 swdata->type = PRUETH_SWDATA_SKB; 931 swdata->data.skb = skb; 932 933 /* Handle the case where skb is fragmented in pages */ 934 cur_desc = first_desc; 935 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 936 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 937 u32 frag_size = skb_frag_size(frag); 938 939 next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); 940 if (!next_desc) { 941 netdev_err(ndev, 942 "tx: failed to allocate frag. descriptor\n"); 943 goto free_desc_stop_q_busy_cleanup_tx_ts; 944 } 945 946 buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size, 947 DMA_TO_DEVICE); 948 if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) { 949 netdev_err(ndev, "tx: Failed to map skb page\n"); 950 k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc); 951 ret = NETDEV_TX_OK; 952 goto cleanup_tx_ts; 953 } 954 955 cppi5_hdesc_reset_hbdesc(next_desc); 956 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma); 957 cppi5_hdesc_attach_buf(next_desc, 958 buf_dma, frag_size, buf_dma, frag_size); 959 960 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, 961 next_desc); 962 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma); 963 cppi5_hdesc_link_hbdesc(cur_desc, desc_dma); 964 965 pkt_len += frag_size; 966 cur_desc = next_desc; 967 } 968 WARN_ON_ONCE(pkt_len != skb->len); 969 970 /* report bql before sending packet */ 971 netdev_tx_sent_queue(netif_txq, pkt_len); 972 973 cppi5_hdesc_set_pktlen(first_desc, pkt_len); 974 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc); 975 /* cppi5_desc_dump(first_desc, 64); */ 976 977 skb_tx_timestamp(skb); /* SW timestamp if SKBTX_IN_PROGRESS not set */ 978 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma); 979 if (ret) { 980 netdev_err(ndev, "tx: push failed: %d\n", ret); 981 goto drop_free_descs; 982 } 983 984 if (in_tx_ts) 985 atomic_inc(&emac->tx_ts_pending); 986 987 if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) { 988 netif_tx_stop_queue(netif_txq); 989 /* Barrier, so that stop_queue visible to other cpus */ 990 smp_mb__after_atomic(); 991 992 if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= 993 MAX_SKB_FRAGS) 994 netif_tx_wake_queue(netif_txq); 995 } 996 997 return NETDEV_TX_OK; 998 999 cleanup_tx_ts: 1000 if (in_tx_ts) { 1001 dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]); 1002 emac->tx_ts_skb[tx_ts_cookie] = NULL; 1003 } 1004 1005 drop_free_descs: 1006 prueth_xmit_free(tx_chn, first_desc); 1007 1008 drop_free_skb: 1009 dev_kfree_skb_any(skb); 1010 1011 /* error */ 1012 ndev->stats.tx_dropped++; 1013 netdev_err(ndev, "tx: error: %d\n", ret); 1014 1015 return ret; 1016 1017 free_desc_stop_q_busy_cleanup_tx_ts: 1018 if (in_tx_ts) { 1019 dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]); 1020 emac->tx_ts_skb[tx_ts_cookie] = NULL; 1021 } 1022 prueth_xmit_free(tx_chn, first_desc); 1023 1024 drop_stop_q_busy: 1025 netif_tx_stop_queue(netif_txq); 1026 return NETDEV_TX_BUSY; 1027 } 1028 EXPORT_SYMBOL_GPL(icssg_ndo_start_xmit); 1029 1030 static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma) 1031 { 1032 struct prueth_tx_chn *tx_chn = data; 1033 struct cppi5_host_desc_t *desc_tx; 1034 struct prueth_swdata *swdata; 1035 struct xdp_frame *xdpf; 1036 struct sk_buff *skb; 1037 1038 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma); 1039 swdata = cppi5_hdesc_get_swdata(desc_tx); 1040 1041 switch (swdata->type) { 1042 case PRUETH_SWDATA_SKB: 1043 skb = swdata->data.skb; 1044 dev_kfree_skb_any(skb); 1045 break; 1046 case PRUETH_SWDATA_XDPF: 1047 xdpf = swdata->data.xdpf; 1048 xdp_return_frame(xdpf); 1049 break; 1050 default: 1051 break; 1052 } 1053 1054 prueth_xmit_free(tx_chn, desc_tx); 1055 } 1056 1057 irqreturn_t prueth_rx_irq(int irq, void *dev_id) 1058 { 1059 struct prueth_emac *emac = dev_id; 1060 1061 disable_irq_nosync(irq); 1062 napi_schedule(&emac->napi_rx); 1063 1064 return IRQ_HANDLED; 1065 } 1066 EXPORT_SYMBOL_GPL(prueth_rx_irq); 1067 1068 void prueth_cleanup_tx_ts(struct prueth_emac *emac) 1069 { 1070 int i; 1071 1072 for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) { 1073 if (emac->tx_ts_skb[i]) { 1074 dev_kfree_skb_any(emac->tx_ts_skb[i]); 1075 emac->tx_ts_skb[i] = NULL; 1076 } 1077 } 1078 } 1079 EXPORT_SYMBOL_GPL(prueth_cleanup_tx_ts); 1080 1081 int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget) 1082 { 1083 struct prueth_emac *emac = prueth_napi_to_emac(napi_rx); 1084 int rx_flow = emac->is_sr1 ? 1085 PRUETH_RX_FLOW_DATA_SR1 : PRUETH_RX_FLOW_DATA; 1086 int flow = emac->is_sr1 ? 1087 PRUETH_MAX_RX_FLOWS_SR1 : PRUETH_MAX_RX_FLOWS; 1088 int xdp_state_or = 0; 1089 int num_rx = 0; 1090 int cur_budget; 1091 u32 xdp_state; 1092 int ret; 1093 1094 while (flow--) { 1095 cur_budget = budget - num_rx; 1096 1097 while (cur_budget--) { 1098 ret = emac_rx_packet(emac, flow, &xdp_state); 1099 xdp_state_or |= xdp_state; 1100 if (ret) 1101 break; 1102 num_rx++; 1103 } 1104 1105 if (num_rx >= budget) 1106 break; 1107 } 1108 1109 if (xdp_state_or & ICSSG_XDP_REDIR) 1110 xdp_do_flush(); 1111 1112 if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) { 1113 if (unlikely(emac->rx_pace_timeout_ns)) { 1114 hrtimer_start(&emac->rx_hrtimer, 1115 ns_to_ktime(emac->rx_pace_timeout_ns), 1116 HRTIMER_MODE_REL_PINNED); 1117 } else { 1118 enable_irq(emac->rx_chns.irq[rx_flow]); 1119 } 1120 } 1121 1122 return num_rx; 1123 } 1124 EXPORT_SYMBOL_GPL(icssg_napi_rx_poll); 1125 1126 static struct page_pool *prueth_create_page_pool(struct prueth_emac *emac, 1127 struct device *dma_dev, 1128 int size) 1129 { 1130 struct page_pool_params pp_params = { 0 }; 1131 struct page_pool *pool; 1132 1133 pp_params.order = 0; 1134 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; 1135 pp_params.pool_size = size; 1136 pp_params.nid = dev_to_node(emac->prueth->dev); 1137 pp_params.dma_dir = DMA_BIDIRECTIONAL; 1138 pp_params.dev = dma_dev; 1139 pp_params.napi = &emac->napi_rx; 1140 pp_params.max_len = PAGE_SIZE; 1141 1142 pool = page_pool_create(&pp_params); 1143 if (IS_ERR(pool)) 1144 netdev_err(emac->ndev, "cannot create rx page pool\n"); 1145 1146 return pool; 1147 } 1148 1149 int prueth_prepare_rx_chan(struct prueth_emac *emac, 1150 struct prueth_rx_chn *chn, 1151 int buf_size) 1152 { 1153 struct page_pool *pool; 1154 struct page *page; 1155 int i, ret; 1156 1157 pool = prueth_create_page_pool(emac, chn->dma_dev, chn->descs_num); 1158 if (IS_ERR(pool)) 1159 return PTR_ERR(pool); 1160 1161 chn->pg_pool = pool; 1162 1163 for (i = 0; i < chn->descs_num; i++) { 1164 /* NOTE: we're not using memory efficiently here. 1165 * 1 full page (4KB?) used here instead of 1166 * PRUETH_MAX_PKT_SIZE (~1.5KB?) 1167 */ 1168 page = page_pool_dev_alloc_pages(pool); 1169 if (!page) { 1170 netdev_err(emac->ndev, "couldn't allocate rx page\n"); 1171 ret = -ENOMEM; 1172 goto recycle_alloc_pg; 1173 } 1174 1175 ret = prueth_dma_rx_push_mapped(emac, chn, page, buf_size); 1176 if (ret < 0) { 1177 netdev_err(emac->ndev, 1178 "cannot submit page for rx chan %s ret %d\n", 1179 chn->name, ret); 1180 page_pool_recycle_direct(pool, page); 1181 goto recycle_alloc_pg; 1182 } 1183 } 1184 1185 return 0; 1186 1187 recycle_alloc_pg: 1188 prueth_reset_rx_chan(&emac->rx_chns, PRUETH_MAX_RX_FLOWS, false); 1189 1190 return ret; 1191 } 1192 EXPORT_SYMBOL_GPL(prueth_prepare_rx_chan); 1193 1194 void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num, 1195 bool free_skb) 1196 { 1197 int i; 1198 1199 for (i = 0; i < ch_num; i++) { 1200 if (free_skb) 1201 k3_udma_glue_reset_tx_chn(emac->tx_chns[i].tx_chn, 1202 &emac->tx_chns[i], 1203 prueth_tx_cleanup); 1204 k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn); 1205 } 1206 } 1207 EXPORT_SYMBOL_GPL(prueth_reset_tx_chan); 1208 1209 void prueth_reset_rx_chan(struct prueth_rx_chn *chn, 1210 int num_flows, bool disable) 1211 { 1212 int i; 1213 1214 for (i = 0; i < num_flows; i++) 1215 k3_udma_glue_reset_rx_chn(chn->rx_chn, i, chn, 1216 prueth_rx_cleanup, !!i); 1217 if (disable) 1218 k3_udma_glue_disable_rx_chn(chn->rx_chn); 1219 1220 page_pool_destroy(chn->pg_pool); 1221 chn->pg_pool = NULL; 1222 } 1223 EXPORT_SYMBOL_GPL(prueth_reset_rx_chan); 1224 1225 void icssg_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue) 1226 { 1227 ndev->stats.tx_errors++; 1228 } 1229 EXPORT_SYMBOL_GPL(icssg_ndo_tx_timeout); 1230 1231 static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr) 1232 { 1233 struct prueth_emac *emac = netdev_priv(ndev); 1234 struct hwtstamp_config config; 1235 1236 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1237 return -EFAULT; 1238 1239 switch (config.tx_type) { 1240 case HWTSTAMP_TX_OFF: 1241 emac->tx_ts_enabled = 0; 1242 break; 1243 case HWTSTAMP_TX_ON: 1244 emac->tx_ts_enabled = 1; 1245 break; 1246 default: 1247 return -ERANGE; 1248 } 1249 1250 switch (config.rx_filter) { 1251 case HWTSTAMP_FILTER_NONE: 1252 emac->rx_ts_enabled = 0; 1253 break; 1254 case HWTSTAMP_FILTER_ALL: 1255 case HWTSTAMP_FILTER_SOME: 1256 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 1257 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 1258 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 1259 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 1260 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 1261 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 1262 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1263 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 1264 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 1265 case HWTSTAMP_FILTER_PTP_V2_EVENT: 1266 case HWTSTAMP_FILTER_PTP_V2_SYNC: 1267 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 1268 case HWTSTAMP_FILTER_NTP_ALL: 1269 emac->rx_ts_enabled = 1; 1270 config.rx_filter = HWTSTAMP_FILTER_ALL; 1271 break; 1272 default: 1273 return -ERANGE; 1274 } 1275 1276 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 1277 -EFAULT : 0; 1278 } 1279 1280 static int emac_get_ts_config(struct net_device *ndev, struct ifreq *ifr) 1281 { 1282 struct prueth_emac *emac = netdev_priv(ndev); 1283 struct hwtstamp_config config; 1284 1285 config.flags = 0; 1286 config.tx_type = emac->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; 1287 config.rx_filter = emac->rx_ts_enabled ? HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; 1288 1289 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 1290 -EFAULT : 0; 1291 } 1292 1293 int icssg_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) 1294 { 1295 switch (cmd) { 1296 case SIOCGHWTSTAMP: 1297 return emac_get_ts_config(ndev, ifr); 1298 case SIOCSHWTSTAMP: 1299 return emac_set_ts_config(ndev, ifr); 1300 default: 1301 break; 1302 } 1303 1304 return phy_do_ioctl(ndev, ifr, cmd); 1305 } 1306 EXPORT_SYMBOL_GPL(icssg_ndo_ioctl); 1307 1308 void icssg_ndo_get_stats64(struct net_device *ndev, 1309 struct rtnl_link_stats64 *stats) 1310 { 1311 struct prueth_emac *emac = netdev_priv(ndev); 1312 1313 emac_update_hardware_stats(emac); 1314 1315 stats->rx_packets = emac_get_stat_by_name(emac, "rx_packets"); 1316 stats->rx_bytes = emac_get_stat_by_name(emac, "rx_bytes"); 1317 stats->tx_packets = emac_get_stat_by_name(emac, "tx_packets"); 1318 stats->tx_bytes = emac_get_stat_by_name(emac, "tx_bytes"); 1319 stats->rx_crc_errors = emac_get_stat_by_name(emac, "rx_crc_errors"); 1320 stats->rx_over_errors = emac_get_stat_by_name(emac, "rx_over_errors"); 1321 stats->multicast = emac_get_stat_by_name(emac, "rx_multicast_frames"); 1322 1323 stats->rx_errors = ndev->stats.rx_errors; 1324 stats->rx_dropped = ndev->stats.rx_dropped; 1325 stats->tx_errors = ndev->stats.tx_errors; 1326 stats->tx_dropped = ndev->stats.tx_dropped; 1327 } 1328 EXPORT_SYMBOL_GPL(icssg_ndo_get_stats64); 1329 1330 int icssg_ndo_get_phys_port_name(struct net_device *ndev, char *name, 1331 size_t len) 1332 { 1333 struct prueth_emac *emac = netdev_priv(ndev); 1334 int ret; 1335 1336 ret = snprintf(name, len, "p%d", emac->port_id); 1337 if (ret >= len) 1338 return -EINVAL; 1339 1340 return 0; 1341 } 1342 EXPORT_SYMBOL_GPL(icssg_ndo_get_phys_port_name); 1343 1344 /* get emac_port corresponding to eth_node name */ 1345 int prueth_node_port(struct device_node *eth_node) 1346 { 1347 u32 port_id; 1348 int ret; 1349 1350 ret = of_property_read_u32(eth_node, "reg", &port_id); 1351 if (ret) 1352 return ret; 1353 1354 if (port_id == 0) 1355 return PRUETH_PORT_MII0; 1356 else if (port_id == 1) 1357 return PRUETH_PORT_MII1; 1358 else 1359 return PRUETH_PORT_INVALID; 1360 } 1361 EXPORT_SYMBOL_GPL(prueth_node_port); 1362 1363 /* get MAC instance corresponding to eth_node name */ 1364 int prueth_node_mac(struct device_node *eth_node) 1365 { 1366 u32 port_id; 1367 int ret; 1368 1369 ret = of_property_read_u32(eth_node, "reg", &port_id); 1370 if (ret) 1371 return ret; 1372 1373 if (port_id == 0) 1374 return PRUETH_MAC0; 1375 else if (port_id == 1) 1376 return PRUETH_MAC1; 1377 else 1378 return PRUETH_MAC_INVALID; 1379 } 1380 EXPORT_SYMBOL_GPL(prueth_node_mac); 1381 1382 void prueth_netdev_exit(struct prueth *prueth, 1383 struct device_node *eth_node) 1384 { 1385 struct prueth_emac *emac; 1386 enum prueth_mac mac; 1387 1388 mac = prueth_node_mac(eth_node); 1389 if (mac == PRUETH_MAC_INVALID) 1390 return; 1391 1392 emac = prueth->emac[mac]; 1393 if (!emac) 1394 return; 1395 1396 if (of_phy_is_fixed_link(emac->phy_node)) 1397 of_phy_deregister_fixed_link(emac->phy_node); 1398 1399 netif_napi_del(&emac->napi_rx); 1400 1401 pruss_release_mem_region(prueth->pruss, &emac->dram); 1402 destroy_workqueue(emac->cmd_wq); 1403 free_netdev(emac->ndev); 1404 prueth->emac[mac] = NULL; 1405 } 1406 EXPORT_SYMBOL_GPL(prueth_netdev_exit); 1407 1408 int prueth_get_cores(struct prueth *prueth, int slice, bool is_sr1) 1409 { 1410 struct device *dev = prueth->dev; 1411 enum pruss_pru_id pruss_id; 1412 struct device_node *np; 1413 int idx = -1, ret; 1414 1415 np = dev->of_node; 1416 1417 switch (slice) { 1418 case ICSS_SLICE0: 1419 idx = 0; 1420 break; 1421 case ICSS_SLICE1: 1422 idx = is_sr1 ? 2 : 3; 1423 break; 1424 default: 1425 return -EINVAL; 1426 } 1427 1428 prueth->pru[slice] = pru_rproc_get(np, idx, &pruss_id); 1429 if (IS_ERR(prueth->pru[slice])) { 1430 ret = PTR_ERR(prueth->pru[slice]); 1431 prueth->pru[slice] = NULL; 1432 return dev_err_probe(dev, ret, "unable to get PRU%d\n", slice); 1433 } 1434 prueth->pru_id[slice] = pruss_id; 1435 1436 idx++; 1437 prueth->rtu[slice] = pru_rproc_get(np, idx, NULL); 1438 if (IS_ERR(prueth->rtu[slice])) { 1439 ret = PTR_ERR(prueth->rtu[slice]); 1440 prueth->rtu[slice] = NULL; 1441 return dev_err_probe(dev, ret, "unable to get RTU%d\n", slice); 1442 } 1443 1444 if (is_sr1) 1445 return 0; 1446 1447 idx++; 1448 prueth->txpru[slice] = pru_rproc_get(np, idx, NULL); 1449 if (IS_ERR(prueth->txpru[slice])) { 1450 ret = PTR_ERR(prueth->txpru[slice]); 1451 prueth->txpru[slice] = NULL; 1452 return dev_err_probe(dev, ret, "unable to get TX_PRU%d\n", slice); 1453 } 1454 1455 return 0; 1456 } 1457 EXPORT_SYMBOL_GPL(prueth_get_cores); 1458 1459 void prueth_put_cores(struct prueth *prueth, int slice) 1460 { 1461 if (prueth->txpru[slice]) 1462 pru_rproc_put(prueth->txpru[slice]); 1463 1464 if (prueth->rtu[slice]) 1465 pru_rproc_put(prueth->rtu[slice]); 1466 1467 if (prueth->pru[slice]) 1468 pru_rproc_put(prueth->pru[slice]); 1469 } 1470 EXPORT_SYMBOL_GPL(prueth_put_cores); 1471 1472 #ifdef CONFIG_PM_SLEEP 1473 static int prueth_suspend(struct device *dev) 1474 { 1475 struct prueth *prueth = dev_get_drvdata(dev); 1476 struct net_device *ndev; 1477 int i, ret; 1478 1479 for (i = 0; i < PRUETH_NUM_MACS; i++) { 1480 ndev = prueth->registered_netdevs[i]; 1481 1482 if (!ndev) 1483 continue; 1484 1485 if (netif_running(ndev)) { 1486 netif_device_detach(ndev); 1487 ret = ndev->netdev_ops->ndo_stop(ndev); 1488 if (ret < 0) { 1489 netdev_err(ndev, "failed to stop: %d", ret); 1490 return ret; 1491 } 1492 } 1493 } 1494 1495 return 0; 1496 } 1497 1498 static int prueth_resume(struct device *dev) 1499 { 1500 struct prueth *prueth = dev_get_drvdata(dev); 1501 struct net_device *ndev; 1502 int i, ret; 1503 1504 for (i = 0; i < PRUETH_NUM_MACS; i++) { 1505 ndev = prueth->registered_netdevs[i]; 1506 1507 if (!ndev) 1508 continue; 1509 1510 if (netif_running(ndev)) { 1511 ret = ndev->netdev_ops->ndo_open(ndev); 1512 if (ret < 0) { 1513 netdev_err(ndev, "failed to start: %d", ret); 1514 return ret; 1515 } 1516 netif_device_attach(ndev); 1517 } 1518 } 1519 1520 return 0; 1521 } 1522 #endif /* CONFIG_PM_SLEEP */ 1523 1524 const struct dev_pm_ops prueth_dev_pm_ops = { 1525 SET_SYSTEM_SLEEP_PM_OPS(prueth_suspend, prueth_resume) 1526 }; 1527 EXPORT_SYMBOL_GPL(prueth_dev_pm_ops); 1528 1529 MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>"); 1530 MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>"); 1531 MODULE_DESCRIPTION("PRUSS ICSSG Ethernet Driver Common Module"); 1532 MODULE_LICENSE("GPL"); 1533