1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Texas Instruments ICSSG Ethernet Driver 4 * 5 * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/ 6 * Copyright (C) Siemens AG, 2024 7 * 8 */ 9 10 #include <linux/dma-mapping.h> 11 #include <linux/dma/ti-cppi5.h> 12 #include <linux/etherdevice.h> 13 #include <linux/interrupt.h> 14 #include <linux/kernel.h> 15 #include <linux/of.h> 16 #include <linux/of_mdio.h> 17 #include <linux/phy.h> 18 #include <linux/remoteproc/pruss.h> 19 #include <linux/regmap.h> 20 #include <linux/remoteproc.h> 21 22 #include "icssg_prueth.h" 23 #include "../k3-cppi-desc-pool.h" 24 25 /* Netif debug messages possible */ 26 #define PRUETH_EMAC_DEBUG (NETIF_MSG_DRV | \ 27 NETIF_MSG_PROBE | \ 28 NETIF_MSG_LINK | \ 29 NETIF_MSG_TIMER | \ 30 NETIF_MSG_IFDOWN | \ 31 NETIF_MSG_IFUP | \ 32 NETIF_MSG_RX_ERR | \ 33 NETIF_MSG_TX_ERR | \ 34 NETIF_MSG_TX_QUEUED | \ 35 NETIF_MSG_INTR | \ 36 NETIF_MSG_TX_DONE | \ 37 NETIF_MSG_RX_STATUS | \ 38 NETIF_MSG_PKTDATA | \ 39 NETIF_MSG_HW | \ 40 NETIF_MSG_WOL) 41 42 #define prueth_napi_to_emac(napi) container_of(napi, struct prueth_emac, napi_rx) 43 44 void prueth_cleanup_rx_chns(struct prueth_emac *emac, 45 struct prueth_rx_chn *rx_chn, 46 int max_rflows) 47 { 48 if (rx_chn->pg_pool) { 49 page_pool_destroy(rx_chn->pg_pool); 50 rx_chn->pg_pool = NULL; 51 } 52 53 if (rx_chn->desc_pool) 54 k3_cppi_desc_pool_destroy(rx_chn->desc_pool); 55 56 if (rx_chn->rx_chn) 57 k3_udma_glue_release_rx_chn(rx_chn->rx_chn); 58 } 59 EXPORT_SYMBOL_GPL(prueth_cleanup_rx_chns); 60 61 void prueth_cleanup_tx_chns(struct prueth_emac *emac) 62 { 63 int i; 64 65 for (i = 0; i < emac->tx_ch_num; i++) { 66 struct prueth_tx_chn *tx_chn = &emac->tx_chns[i]; 67 68 if (tx_chn->desc_pool) 69 k3_cppi_desc_pool_destroy(tx_chn->desc_pool); 70 71 if (tx_chn->tx_chn) 72 k3_udma_glue_release_tx_chn(tx_chn->tx_chn); 73 74 /* Assume prueth_cleanup_tx_chns() is called at the 75 * end after all channel resources are freed 76 */ 77 memset(tx_chn, 0, sizeof(*tx_chn)); 78 } 79 } 80 EXPORT_SYMBOL_GPL(prueth_cleanup_tx_chns); 81 82 void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num) 83 { 84 int i; 85 86 for (i = 0; i < num; i++) { 87 struct prueth_tx_chn *tx_chn = &emac->tx_chns[i]; 88 89 if (tx_chn->irq) 90 free_irq(tx_chn->irq, tx_chn); 91 netif_napi_del(&tx_chn->napi_tx); 92 } 93 } 94 EXPORT_SYMBOL_GPL(prueth_ndev_del_tx_napi); 95 96 void prueth_xmit_free(struct prueth_tx_chn *tx_chn, 97 struct cppi5_host_desc_t *desc) 98 { 99 struct cppi5_host_desc_t *first_desc, *next_desc; 100 dma_addr_t buf_dma, next_desc_dma; 101 u32 buf_dma_len; 102 103 first_desc = desc; 104 next_desc = first_desc; 105 106 cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len); 107 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma); 108 109 dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len, 110 DMA_TO_DEVICE); 111 112 next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc); 113 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma); 114 while (next_desc_dma) { 115 next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, 116 next_desc_dma); 117 cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len); 118 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma); 119 120 dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len, 121 DMA_TO_DEVICE); 122 123 next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc); 124 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma); 125 126 k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc); 127 } 128 129 k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc); 130 } 131 EXPORT_SYMBOL_GPL(prueth_xmit_free); 132 133 int emac_tx_complete_packets(struct prueth_emac *emac, int chn, 134 int budget, bool *tdown) 135 { 136 struct net_device *ndev = emac->ndev; 137 struct cppi5_host_desc_t *desc_tx; 138 struct netdev_queue *netif_txq; 139 struct prueth_swdata *swdata; 140 struct prueth_tx_chn *tx_chn; 141 unsigned int total_bytes = 0; 142 struct xdp_frame *xdpf; 143 struct sk_buff *skb; 144 dma_addr_t desc_dma; 145 int res, num_tx = 0; 146 147 tx_chn = &emac->tx_chns[chn]; 148 149 while (true) { 150 res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma); 151 if (res == -ENODATA) 152 break; 153 154 /* teardown completion */ 155 if (cppi5_desc_is_tdcm(desc_dma)) { 156 if (atomic_dec_and_test(&emac->tdown_cnt)) 157 complete(&emac->tdown_complete); 158 *tdown = true; 159 break; 160 } 161 162 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, 163 desc_dma); 164 swdata = cppi5_hdesc_get_swdata(desc_tx); 165 166 switch (swdata->type) { 167 case PRUETH_SWDATA_SKB: 168 skb = swdata->data.skb; 169 dev_sw_netstats_tx_add(skb->dev, 1, skb->len); 170 total_bytes += skb->len; 171 napi_consume_skb(skb, budget); 172 break; 173 case PRUETH_SWDATA_XDPF: 174 xdpf = swdata->data.xdpf; 175 dev_sw_netstats_tx_add(ndev, 1, xdpf->len); 176 total_bytes += xdpf->len; 177 xdp_return_frame(xdpf); 178 break; 179 default: 180 prueth_xmit_free(tx_chn, desc_tx); 181 ndev->stats.tx_dropped++; 182 continue; 183 } 184 185 prueth_xmit_free(tx_chn, desc_tx); 186 num_tx++; 187 } 188 189 if (!num_tx) 190 return 0; 191 192 netif_txq = netdev_get_tx_queue(ndev, chn); 193 netdev_tx_completed_queue(netif_txq, num_tx, total_bytes); 194 195 if (netif_tx_queue_stopped(netif_txq)) { 196 /* If the TX queue was stopped, wake it now 197 * if we have enough room. 198 */ 199 __netif_tx_lock(netif_txq, smp_processor_id()); 200 if (netif_running(ndev) && 201 (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= 202 MAX_SKB_FRAGS)) 203 netif_tx_wake_queue(netif_txq); 204 __netif_tx_unlock(netif_txq); 205 } 206 207 return num_tx; 208 } 209 210 static enum hrtimer_restart emac_tx_timer_callback(struct hrtimer *timer) 211 { 212 struct prueth_tx_chn *tx_chns = 213 container_of(timer, struct prueth_tx_chn, tx_hrtimer); 214 215 enable_irq(tx_chns->irq); 216 return HRTIMER_NORESTART; 217 } 218 219 static int emac_napi_tx_poll(struct napi_struct *napi_tx, int budget) 220 { 221 struct prueth_tx_chn *tx_chn = prueth_napi_to_tx_chn(napi_tx); 222 struct prueth_emac *emac = tx_chn->emac; 223 bool tdown = false; 224 int num_tx_packets; 225 226 num_tx_packets = emac_tx_complete_packets(emac, tx_chn->id, budget, 227 &tdown); 228 229 if (num_tx_packets >= budget) 230 return budget; 231 232 if (napi_complete_done(napi_tx, num_tx_packets)) { 233 if (unlikely(tx_chn->tx_pace_timeout_ns && !tdown)) { 234 hrtimer_start(&tx_chn->tx_hrtimer, 235 ns_to_ktime(tx_chn->tx_pace_timeout_ns), 236 HRTIMER_MODE_REL_PINNED); 237 } else { 238 enable_irq(tx_chn->irq); 239 } 240 } 241 242 return num_tx_packets; 243 } 244 245 static irqreturn_t prueth_tx_irq(int irq, void *dev_id) 246 { 247 struct prueth_tx_chn *tx_chn = dev_id; 248 249 disable_irq_nosync(irq); 250 napi_schedule(&tx_chn->napi_tx); 251 252 return IRQ_HANDLED; 253 } 254 255 int prueth_ndev_add_tx_napi(struct prueth_emac *emac) 256 { 257 struct prueth *prueth = emac->prueth; 258 int i, ret; 259 260 for (i = 0; i < emac->tx_ch_num; i++) { 261 struct prueth_tx_chn *tx_chn = &emac->tx_chns[i]; 262 263 netif_napi_add_tx(emac->ndev, &tx_chn->napi_tx, emac_napi_tx_poll); 264 hrtimer_setup(&tx_chn->tx_hrtimer, &emac_tx_timer_callback, CLOCK_MONOTONIC, 265 HRTIMER_MODE_REL_PINNED); 266 ret = request_irq(tx_chn->irq, prueth_tx_irq, 267 IRQF_TRIGGER_HIGH, tx_chn->name, 268 tx_chn); 269 if (ret) { 270 netif_napi_del(&tx_chn->napi_tx); 271 dev_err(prueth->dev, "unable to request TX IRQ %d\n", 272 tx_chn->irq); 273 goto fail; 274 } 275 } 276 277 return 0; 278 fail: 279 prueth_ndev_del_tx_napi(emac, i); 280 return ret; 281 } 282 EXPORT_SYMBOL_GPL(prueth_ndev_add_tx_napi); 283 284 int prueth_init_tx_chns(struct prueth_emac *emac) 285 { 286 static const struct k3_ring_cfg ring_cfg = { 287 .elm_size = K3_RINGACC_RING_ELSIZE_8, 288 .mode = K3_RINGACC_RING_MODE_RING, 289 .flags = 0, 290 .size = PRUETH_MAX_TX_DESC, 291 }; 292 struct k3_udma_glue_tx_channel_cfg tx_cfg; 293 struct device *dev = emac->prueth->dev; 294 struct net_device *ndev = emac->ndev; 295 int ret, slice, i; 296 u32 hdesc_size; 297 298 slice = prueth_emac_slice(emac); 299 if (slice < 0) 300 return slice; 301 302 init_completion(&emac->tdown_complete); 303 304 hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE, 305 PRUETH_NAV_SW_DATA_SIZE); 306 memset(&tx_cfg, 0, sizeof(tx_cfg)); 307 tx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE; 308 tx_cfg.tx_cfg = ring_cfg; 309 tx_cfg.txcq_cfg = ring_cfg; 310 311 for (i = 0; i < emac->tx_ch_num; i++) { 312 struct prueth_tx_chn *tx_chn = &emac->tx_chns[i]; 313 314 /* To differentiate channels for SLICE0 vs SLICE1 */ 315 snprintf(tx_chn->name, sizeof(tx_chn->name), 316 "tx%d-%d", slice, i); 317 318 tx_chn->emac = emac; 319 tx_chn->id = i; 320 tx_chn->descs_num = PRUETH_MAX_TX_DESC; 321 322 tx_chn->tx_chn = 323 k3_udma_glue_request_tx_chn(dev, tx_chn->name, 324 &tx_cfg); 325 if (IS_ERR(tx_chn->tx_chn)) { 326 ret = PTR_ERR(tx_chn->tx_chn); 327 tx_chn->tx_chn = NULL; 328 netdev_err(ndev, 329 "Failed to request tx dma ch: %d\n", ret); 330 goto fail; 331 } 332 333 tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn); 334 tx_chn->desc_pool = 335 k3_cppi_desc_pool_create_name(tx_chn->dma_dev, 336 tx_chn->descs_num, 337 hdesc_size, 338 tx_chn->name); 339 if (IS_ERR(tx_chn->desc_pool)) { 340 ret = PTR_ERR(tx_chn->desc_pool); 341 tx_chn->desc_pool = NULL; 342 netdev_err(ndev, "Failed to create tx pool: %d\n", ret); 343 goto fail; 344 } 345 346 ret = k3_udma_glue_tx_get_irq(tx_chn->tx_chn); 347 if (ret < 0) { 348 netdev_err(ndev, "failed to get tx irq\n"); 349 goto fail; 350 } 351 tx_chn->irq = ret; 352 353 snprintf(tx_chn->name, sizeof(tx_chn->name), "%s-tx%d", 354 dev_name(dev), tx_chn->id); 355 } 356 357 return 0; 358 359 fail: 360 prueth_cleanup_tx_chns(emac); 361 return ret; 362 } 363 EXPORT_SYMBOL_GPL(prueth_init_tx_chns); 364 365 int prueth_init_rx_chns(struct prueth_emac *emac, 366 struct prueth_rx_chn *rx_chn, 367 char *name, u32 max_rflows, 368 u32 max_desc_num) 369 { 370 struct k3_udma_glue_rx_channel_cfg rx_cfg; 371 struct device *dev = emac->prueth->dev; 372 struct net_device *ndev = emac->ndev; 373 u32 fdqring_id, hdesc_size; 374 int i, ret = 0, slice; 375 int flow_id_base; 376 377 slice = prueth_emac_slice(emac); 378 if (slice < 0) 379 return slice; 380 381 /* To differentiate channels for SLICE0 vs SLICE1 */ 382 snprintf(rx_chn->name, sizeof(rx_chn->name), "%s%d", name, slice); 383 384 hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE, 385 PRUETH_NAV_SW_DATA_SIZE); 386 memset(&rx_cfg, 0, sizeof(rx_cfg)); 387 rx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE; 388 rx_cfg.flow_id_num = max_rflows; 389 rx_cfg.flow_id_base = -1; /* udmax will auto select flow id base */ 390 391 /* init all flows */ 392 rx_chn->dev = dev; 393 rx_chn->descs_num = max_desc_num; 394 395 rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, rx_chn->name, 396 &rx_cfg); 397 if (IS_ERR(rx_chn->rx_chn)) { 398 ret = PTR_ERR(rx_chn->rx_chn); 399 rx_chn->rx_chn = NULL; 400 netdev_err(ndev, "Failed to request rx dma ch: %d\n", ret); 401 goto fail; 402 } 403 404 rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn); 405 rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev, 406 rx_chn->descs_num, 407 hdesc_size, 408 rx_chn->name); 409 if (IS_ERR(rx_chn->desc_pool)) { 410 ret = PTR_ERR(rx_chn->desc_pool); 411 rx_chn->desc_pool = NULL; 412 netdev_err(ndev, "Failed to create rx pool: %d\n", ret); 413 goto fail; 414 } 415 416 flow_id_base = k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn); 417 if (emac->is_sr1 && !strcmp(name, "rxmgm")) { 418 emac->rx_mgm_flow_id_base = flow_id_base; 419 netdev_dbg(ndev, "mgm flow id base = %d\n", flow_id_base); 420 } else { 421 emac->rx_flow_id_base = flow_id_base; 422 netdev_dbg(ndev, "flow id base = %d\n", flow_id_base); 423 } 424 425 fdqring_id = K3_RINGACC_RING_ID_ANY; 426 for (i = 0; i < rx_cfg.flow_id_num; i++) { 427 struct k3_ring_cfg rxring_cfg = { 428 .elm_size = K3_RINGACC_RING_ELSIZE_8, 429 .mode = K3_RINGACC_RING_MODE_RING, 430 .flags = 0, 431 }; 432 struct k3_ring_cfg fdqring_cfg = { 433 .elm_size = K3_RINGACC_RING_ELSIZE_8, 434 .flags = K3_RINGACC_RING_SHARED, 435 }; 436 struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = { 437 .rx_cfg = rxring_cfg, 438 .rxfdq_cfg = fdqring_cfg, 439 .ring_rxq_id = K3_RINGACC_RING_ID_ANY, 440 .src_tag_lo_sel = 441 K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG, 442 }; 443 444 rx_flow_cfg.ring_rxfdq0_id = fdqring_id; 445 rx_flow_cfg.rx_cfg.size = max_desc_num; 446 rx_flow_cfg.rxfdq_cfg.size = max_desc_num; 447 rx_flow_cfg.rxfdq_cfg.mode = emac->prueth->pdata.fdqring_mode; 448 449 ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn, 450 i, &rx_flow_cfg); 451 if (ret) { 452 netdev_err(ndev, "Failed to init rx flow%d %d\n", 453 i, ret); 454 goto fail; 455 } 456 if (!i) 457 fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn, 458 i); 459 ret = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i); 460 if (ret < 0) { 461 netdev_err(ndev, "Failed to get rx dma irq"); 462 goto fail; 463 } 464 rx_chn->irq[i] = ret; 465 } 466 467 return 0; 468 469 fail: 470 prueth_cleanup_rx_chns(emac, rx_chn, max_rflows); 471 return ret; 472 } 473 EXPORT_SYMBOL_GPL(prueth_init_rx_chns); 474 475 int prueth_dma_rx_push_mapped(struct prueth_emac *emac, 476 struct prueth_rx_chn *rx_chn, 477 struct page *page, u32 buf_len) 478 { 479 struct net_device *ndev = emac->ndev; 480 struct cppi5_host_desc_t *desc_rx; 481 struct prueth_swdata *swdata; 482 dma_addr_t desc_dma; 483 dma_addr_t buf_dma; 484 485 buf_dma = page_pool_get_dma_addr(page) + PRUETH_HEADROOM; 486 desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool); 487 if (!desc_rx) { 488 netdev_err(ndev, "rx push: failed to allocate descriptor\n"); 489 return -ENOMEM; 490 } 491 desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx); 492 493 cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT, 494 PRUETH_NAV_PS_DATA_SIZE); 495 k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma); 496 cppi5_hdesc_attach_buf(desc_rx, buf_dma, buf_len, buf_dma, buf_len); 497 498 swdata = cppi5_hdesc_get_swdata(desc_rx); 499 swdata->type = PRUETH_SWDATA_PAGE; 500 swdata->data.page = page; 501 502 return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, PRUETH_RX_FLOW_DATA, 503 desc_rx, desc_dma); 504 } 505 EXPORT_SYMBOL_GPL(prueth_dma_rx_push_mapped); 506 507 u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns) 508 { 509 u32 iepcount_lo, iepcount_hi, hi_rollover_count; 510 u64 ns; 511 512 iepcount_lo = lo & GENMASK(19, 0); 513 iepcount_hi = (hi & GENMASK(11, 0)) << 12 | lo >> 20; 514 hi_rollover_count = hi >> 11; 515 516 ns = ((u64)hi_rollover_count) << 23 | (iepcount_hi + hi_sw); 517 ns = ns * cycle_time_ns + iepcount_lo; 518 519 return ns; 520 } 521 EXPORT_SYMBOL_GPL(icssg_ts_to_ns); 522 523 void emac_rx_timestamp(struct prueth_emac *emac, 524 struct sk_buff *skb, u32 *psdata) 525 { 526 struct skb_shared_hwtstamps *ssh; 527 u64 ns; 528 529 if (emac->is_sr1) { 530 ns = (u64)psdata[1] << 32 | psdata[0]; 531 } else { 532 u32 hi_sw = readl(emac->prueth->shram.va + 533 TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET); 534 ns = icssg_ts_to_ns(hi_sw, psdata[1], psdata[0], 535 IEP_DEFAULT_CYCLE_TIME_NS); 536 } 537 538 ssh = skb_hwtstamps(skb); 539 memset(ssh, 0, sizeof(*ssh)); 540 ssh->hwtstamp = ns_to_ktime(ns); 541 } 542 543 /** 544 * emac_xmit_xdp_frame - transmits an XDP frame 545 * @emac: emac device 546 * @xdpf: data to transmit 547 * @page: page from page pool if already DMA mapped 548 * @q_idx: queue id 549 * 550 * Return: XDP state 551 */ 552 u32 emac_xmit_xdp_frame(struct prueth_emac *emac, 553 struct xdp_frame *xdpf, 554 struct page *page, 555 unsigned int q_idx) 556 { 557 struct cppi5_host_desc_t *first_desc; 558 struct net_device *ndev = emac->ndev; 559 struct netdev_queue *netif_txq; 560 struct prueth_tx_chn *tx_chn; 561 dma_addr_t desc_dma, buf_dma; 562 struct prueth_swdata *swdata; 563 u32 *epib; 564 int ret; 565 566 if (q_idx >= PRUETH_MAX_TX_QUEUES) { 567 netdev_err(ndev, "xdp tx: invalid q_id %d\n", q_idx); 568 return ICSSG_XDP_CONSUMED; /* drop */ 569 } 570 571 tx_chn = &emac->tx_chns[q_idx]; 572 573 first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); 574 if (!first_desc) { 575 netdev_dbg(ndev, "xdp tx: failed to allocate descriptor\n"); 576 return ICSSG_XDP_CONSUMED; /* drop */ 577 } 578 579 if (page) { /* already DMA mapped by page_pool */ 580 buf_dma = page_pool_get_dma_addr(page); 581 buf_dma += xdpf->headroom + sizeof(struct xdp_frame); 582 } else { /* Map the linear buffer */ 583 buf_dma = dma_map_single(tx_chn->dma_dev, xdpf->data, xdpf->len, DMA_TO_DEVICE); 584 if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) { 585 netdev_err(ndev, "xdp tx: failed to map data buffer\n"); 586 goto drop_free_descs; /* drop */ 587 } 588 } 589 590 cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT, 591 PRUETH_NAV_PS_DATA_SIZE); 592 cppi5_hdesc_set_pkttype(first_desc, 0); 593 epib = first_desc->epib; 594 epib[0] = 0; 595 epib[1] = 0; 596 597 /* set dst tag to indicate internal qid at the firmware which is at 598 * bit8..bit15. bit0..bit7 indicates port num for directed 599 * packets in case of switch mode operation 600 */ 601 cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8))); 602 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma); 603 cppi5_hdesc_attach_buf(first_desc, buf_dma, xdpf->len, buf_dma, xdpf->len); 604 swdata = cppi5_hdesc_get_swdata(first_desc); 605 swdata->type = PRUETH_SWDATA_XDPF; 606 swdata->data.xdpf = xdpf; 607 608 /* Report BQL before sending the packet */ 609 netif_txq = netdev_get_tx_queue(ndev, tx_chn->id); 610 netdev_tx_sent_queue(netif_txq, xdpf->len); 611 612 cppi5_hdesc_set_pktlen(first_desc, xdpf->len); 613 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc); 614 615 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma); 616 if (ret) { 617 netdev_err(ndev, "xdp tx: push failed: %d\n", ret); 618 netdev_tx_completed_queue(netif_txq, 1, xdpf->len); 619 goto drop_free_descs; 620 } 621 622 return ICSSG_XDP_TX; 623 624 drop_free_descs: 625 prueth_xmit_free(tx_chn, first_desc); 626 return ICSSG_XDP_CONSUMED; 627 } 628 EXPORT_SYMBOL_GPL(emac_xmit_xdp_frame); 629 630 /** 631 * emac_run_xdp - run an XDP program 632 * @emac: emac device 633 * @xdp: XDP buffer containing the frame 634 * @page: page with RX data if already DMA mapped 635 * @len: Rx descriptor packet length 636 * 637 * Return: XDP state 638 */ 639 static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp, 640 struct page *page, u32 *len) 641 { 642 struct net_device *ndev = emac->ndev; 643 struct netdev_queue *netif_txq; 644 int cpu = smp_processor_id(); 645 struct bpf_prog *xdp_prog; 646 struct xdp_frame *xdpf; 647 u32 pkt_len = *len; 648 u32 act, result; 649 int q_idx, err; 650 651 xdp_prog = READ_ONCE(emac->xdp_prog); 652 act = bpf_prog_run_xdp(xdp_prog, xdp); 653 switch (act) { 654 case XDP_PASS: 655 return ICSSG_XDP_PASS; 656 case XDP_TX: 657 /* Send packet to TX ring for immediate transmission */ 658 xdpf = xdp_convert_buff_to_frame(xdp); 659 if (unlikely(!xdpf)) { 660 ndev->stats.tx_dropped++; 661 goto drop; 662 } 663 664 q_idx = cpu % emac->tx_ch_num; 665 netif_txq = netdev_get_tx_queue(ndev, q_idx); 666 __netif_tx_lock(netif_txq, cpu); 667 result = emac_xmit_xdp_frame(emac, xdpf, page, q_idx); 668 __netif_tx_unlock(netif_txq); 669 if (result == ICSSG_XDP_CONSUMED) { 670 ndev->stats.tx_dropped++; 671 goto drop; 672 } 673 674 dev_sw_netstats_rx_add(ndev, xdpf->len); 675 return result; 676 case XDP_REDIRECT: 677 err = xdp_do_redirect(emac->ndev, xdp, xdp_prog); 678 if (err) 679 goto drop; 680 681 dev_sw_netstats_rx_add(ndev, pkt_len); 682 return ICSSG_XDP_REDIR; 683 default: 684 bpf_warn_invalid_xdp_action(emac->ndev, xdp_prog, act); 685 fallthrough; 686 case XDP_ABORTED: 687 drop: 688 trace_xdp_exception(emac->ndev, xdp_prog, act); 689 fallthrough; /* handle aborts by dropping packet */ 690 case XDP_DROP: 691 ndev->stats.rx_dropped++; 692 page_pool_recycle_direct(emac->rx_chns.pg_pool, page); 693 return ICSSG_XDP_CONSUMED; 694 } 695 } 696 697 static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state) 698 { 699 struct prueth_rx_chn *rx_chn = &emac->rx_chns; 700 u32 buf_dma_len, pkt_len, port_id = 0; 701 struct net_device *ndev = emac->ndev; 702 struct cppi5_host_desc_t *desc_rx; 703 struct prueth_swdata *swdata; 704 dma_addr_t desc_dma, buf_dma; 705 struct page *page, *new_page; 706 struct page_pool *pool; 707 struct sk_buff *skb; 708 struct xdp_buff xdp; 709 int headroom, ret; 710 u32 *psdata; 711 void *pa; 712 713 *xdp_state = 0; 714 pool = rx_chn->pg_pool; 715 ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma); 716 if (ret) { 717 if (ret != -ENODATA) 718 netdev_err(ndev, "rx pop: failed: %d\n", ret); 719 return ret; 720 } 721 722 if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown ? */ 723 return 0; 724 725 desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); 726 swdata = cppi5_hdesc_get_swdata(desc_rx); 727 if (swdata->type != PRUETH_SWDATA_PAGE) { 728 netdev_err(ndev, "rx_pkt: invalid swdata->type %d\n", swdata->type); 729 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); 730 return 0; 731 } 732 733 page = swdata->data.page; 734 page_pool_dma_sync_for_cpu(pool, page, 0, PAGE_SIZE); 735 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); 736 k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); 737 pkt_len = cppi5_hdesc_get_pktlen(desc_rx); 738 /* firmware adds 4 CRC bytes, strip them */ 739 pkt_len -= 4; 740 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL); 741 742 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); 743 744 /* if allocation fails we drop the packet but push the 745 * descriptor back to the ring with old page to prevent a stall 746 */ 747 new_page = page_pool_dev_alloc_pages(pool); 748 if (unlikely(!new_page)) { 749 new_page = page; 750 ndev->stats.rx_dropped++; 751 goto requeue; 752 } 753 754 pa = page_address(page); 755 if (emac->xdp_prog) { 756 xdp_init_buff(&xdp, PAGE_SIZE, &rx_chn->xdp_rxq); 757 xdp_prepare_buff(&xdp, pa, PRUETH_HEADROOM, pkt_len, false); 758 759 *xdp_state = emac_run_xdp(emac, &xdp, page, &pkt_len); 760 if (*xdp_state != ICSSG_XDP_PASS) 761 goto requeue; 762 headroom = xdp.data - xdp.data_hard_start; 763 pkt_len = xdp.data_end - xdp.data; 764 } else { 765 headroom = PRUETH_HEADROOM; 766 } 767 768 /* prepare skb and send to n/w stack */ 769 skb = napi_build_skb(pa, PAGE_SIZE); 770 if (!skb) { 771 ndev->stats.rx_dropped++; 772 page_pool_recycle_direct(pool, page); 773 goto requeue; 774 } 775 776 skb_reserve(skb, headroom); 777 skb_put(skb, pkt_len); 778 skb->dev = ndev; 779 780 psdata = cppi5_hdesc_get_psdata(desc_rx); 781 /* RX HW timestamp */ 782 if (emac->rx_ts_enabled) 783 emac_rx_timestamp(emac, skb, psdata); 784 785 if (emac->prueth->is_switch_mode) 786 skb->offload_fwd_mark = emac->offload_fwd_mark; 787 skb->protocol = eth_type_trans(skb, ndev); 788 789 skb_mark_for_recycle(skb); 790 napi_gro_receive(&emac->napi_rx, skb); 791 ndev->stats.rx_bytes += pkt_len; 792 ndev->stats.rx_packets++; 793 794 requeue: 795 /* queue another RX DMA */ 796 ret = prueth_dma_rx_push_mapped(emac, &emac->rx_chns, new_page, 797 PRUETH_MAX_PKT_SIZE); 798 if (WARN_ON(ret < 0)) { 799 page_pool_recycle_direct(pool, new_page); 800 ndev->stats.rx_errors++; 801 ndev->stats.rx_dropped++; 802 } 803 804 return ret; 805 } 806 807 static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma) 808 { 809 struct prueth_rx_chn *rx_chn = data; 810 struct cppi5_host_desc_t *desc_rx; 811 struct prueth_swdata *swdata; 812 struct page_pool *pool; 813 struct page *page; 814 815 pool = rx_chn->pg_pool; 816 desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); 817 swdata = cppi5_hdesc_get_swdata(desc_rx); 818 if (swdata->type == PRUETH_SWDATA_PAGE) { 819 page = swdata->data.page; 820 page_pool_recycle_direct(pool, page); 821 } 822 823 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); 824 } 825 826 static int prueth_tx_ts_cookie_get(struct prueth_emac *emac) 827 { 828 int i; 829 830 /* search and get the next free slot */ 831 for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) { 832 if (!emac->tx_ts_skb[i]) { 833 emac->tx_ts_skb[i] = ERR_PTR(-EBUSY); /* reserve slot */ 834 return i; 835 } 836 } 837 838 return -EBUSY; 839 } 840 841 /** 842 * icssg_ndo_start_xmit - EMAC Transmit function 843 * @skb: SKB pointer 844 * @ndev: EMAC network adapter 845 * 846 * Called by the system to transmit a packet - we queue the packet in 847 * EMAC hardware transmit queue 848 * Doesn't wait for completion we'll check for TX completion in 849 * emac_tx_complete_packets(). 850 * 851 * Return: enum netdev_tx 852 */ 853 enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) 854 { 855 struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc; 856 struct prueth_emac *emac = netdev_priv(ndev); 857 struct prueth *prueth = emac->prueth; 858 struct netdev_queue *netif_txq; 859 struct prueth_swdata *swdata; 860 struct prueth_tx_chn *tx_chn; 861 dma_addr_t desc_dma, buf_dma; 862 u32 pkt_len, dst_tag_id; 863 int i, ret = 0, q_idx; 864 bool in_tx_ts = 0; 865 int tx_ts_cookie; 866 u32 *epib; 867 868 pkt_len = skb_headlen(skb); 869 q_idx = skb_get_queue_mapping(skb); 870 871 tx_chn = &emac->tx_chns[q_idx]; 872 netif_txq = netdev_get_tx_queue(ndev, q_idx); 873 874 /* Map the linear buffer */ 875 buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len, DMA_TO_DEVICE); 876 if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) { 877 netdev_err(ndev, "tx: failed to map skb buffer\n"); 878 ret = NETDEV_TX_OK; 879 goto drop_free_skb; 880 } 881 882 first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); 883 if (!first_desc) { 884 netdev_dbg(ndev, "tx: failed to allocate descriptor\n"); 885 dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, DMA_TO_DEVICE); 886 goto drop_stop_q_busy; 887 } 888 889 cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT, 890 PRUETH_NAV_PS_DATA_SIZE); 891 cppi5_hdesc_set_pkttype(first_desc, 0); 892 epib = first_desc->epib; 893 epib[0] = 0; 894 epib[1] = 0; 895 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 896 emac->tx_ts_enabled) { 897 tx_ts_cookie = prueth_tx_ts_cookie_get(emac); 898 if (tx_ts_cookie >= 0) { 899 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 900 /* Request TX timestamp */ 901 epib[0] = (u32)tx_ts_cookie; 902 epib[1] = 0x80000000; /* TX TS request */ 903 emac->tx_ts_skb[tx_ts_cookie] = skb_get(skb); 904 in_tx_ts = 1; 905 } 906 } 907 908 /* set dst tag to indicate internal qid at the firmware which is at 909 * bit8..bit15. bit0..bit7 indicates port num for directed 910 * packets in case of switch mode operation and port num 0 911 * for undirected packets in case of HSR offload mode 912 */ 913 dst_tag_id = emac->port_id | (q_idx << 8); 914 915 if (prueth->is_hsr_offload_mode && 916 (ndev->features & NETIF_F_HW_HSR_DUP)) 917 dst_tag_id = PRUETH_UNDIRECTED_PKT_DST_TAG; 918 919 if (prueth->is_hsr_offload_mode && 920 (ndev->features & NETIF_F_HW_HSR_TAG_INS)) 921 epib[1] |= PRUETH_UNDIRECTED_PKT_TAG_INS; 922 923 cppi5_desc_set_tags_ids(&first_desc->hdr, 0, dst_tag_id); 924 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma); 925 cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len); 926 swdata = cppi5_hdesc_get_swdata(first_desc); 927 swdata->type = PRUETH_SWDATA_SKB; 928 swdata->data.skb = skb; 929 930 /* Handle the case where skb is fragmented in pages */ 931 cur_desc = first_desc; 932 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 933 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 934 u32 frag_size = skb_frag_size(frag); 935 936 next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); 937 if (!next_desc) { 938 netdev_err(ndev, 939 "tx: failed to allocate frag. descriptor\n"); 940 goto free_desc_stop_q_busy_cleanup_tx_ts; 941 } 942 943 buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size, 944 DMA_TO_DEVICE); 945 if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) { 946 netdev_err(ndev, "tx: Failed to map skb page\n"); 947 k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc); 948 ret = NETDEV_TX_OK; 949 goto cleanup_tx_ts; 950 } 951 952 cppi5_hdesc_reset_hbdesc(next_desc); 953 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma); 954 cppi5_hdesc_attach_buf(next_desc, 955 buf_dma, frag_size, buf_dma, frag_size); 956 957 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, 958 next_desc); 959 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma); 960 cppi5_hdesc_link_hbdesc(cur_desc, desc_dma); 961 962 pkt_len += frag_size; 963 cur_desc = next_desc; 964 } 965 WARN_ON_ONCE(pkt_len != skb->len); 966 967 /* report bql before sending packet */ 968 netdev_tx_sent_queue(netif_txq, pkt_len); 969 970 cppi5_hdesc_set_pktlen(first_desc, pkt_len); 971 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc); 972 /* cppi5_desc_dump(first_desc, 64); */ 973 974 skb_tx_timestamp(skb); /* SW timestamp if SKBTX_IN_PROGRESS not set */ 975 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma); 976 if (ret) { 977 netdev_err(ndev, "tx: push failed: %d\n", ret); 978 netdev_tx_completed_queue(netif_txq, 1, pkt_len); 979 goto drop_free_descs; 980 } 981 982 if (in_tx_ts) 983 atomic_inc(&emac->tx_ts_pending); 984 985 if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) { 986 netif_tx_stop_queue(netif_txq); 987 /* Barrier, so that stop_queue visible to other cpus */ 988 smp_mb__after_atomic(); 989 990 if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= 991 MAX_SKB_FRAGS) 992 netif_tx_wake_queue(netif_txq); 993 } 994 995 return NETDEV_TX_OK; 996 997 cleanup_tx_ts: 998 if (in_tx_ts) { 999 dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]); 1000 emac->tx_ts_skb[tx_ts_cookie] = NULL; 1001 } 1002 1003 drop_free_descs: 1004 prueth_xmit_free(tx_chn, first_desc); 1005 1006 drop_free_skb: 1007 dev_kfree_skb_any(skb); 1008 1009 /* error */ 1010 ndev->stats.tx_dropped++; 1011 netdev_err(ndev, "tx: error: %d\n", ret); 1012 1013 return ret; 1014 1015 free_desc_stop_q_busy_cleanup_tx_ts: 1016 if (in_tx_ts) { 1017 dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]); 1018 emac->tx_ts_skb[tx_ts_cookie] = NULL; 1019 } 1020 prueth_xmit_free(tx_chn, first_desc); 1021 1022 drop_stop_q_busy: 1023 netif_tx_stop_queue(netif_txq); 1024 return NETDEV_TX_BUSY; 1025 } 1026 EXPORT_SYMBOL_GPL(icssg_ndo_start_xmit); 1027 1028 static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma) 1029 { 1030 struct prueth_tx_chn *tx_chn = data; 1031 struct cppi5_host_desc_t *desc_tx; 1032 struct prueth_swdata *swdata; 1033 struct xdp_frame *xdpf; 1034 struct sk_buff *skb; 1035 1036 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma); 1037 swdata = cppi5_hdesc_get_swdata(desc_tx); 1038 1039 switch (swdata->type) { 1040 case PRUETH_SWDATA_SKB: 1041 skb = swdata->data.skb; 1042 dev_kfree_skb_any(skb); 1043 break; 1044 case PRUETH_SWDATA_XDPF: 1045 xdpf = swdata->data.xdpf; 1046 xdp_return_frame(xdpf); 1047 break; 1048 default: 1049 break; 1050 } 1051 1052 prueth_xmit_free(tx_chn, desc_tx); 1053 } 1054 1055 irqreturn_t prueth_rx_irq(int irq, void *dev_id) 1056 { 1057 struct prueth_emac *emac = dev_id; 1058 1059 disable_irq_nosync(irq); 1060 napi_schedule(&emac->napi_rx); 1061 1062 return IRQ_HANDLED; 1063 } 1064 EXPORT_SYMBOL_GPL(prueth_rx_irq); 1065 1066 void prueth_cleanup_tx_ts(struct prueth_emac *emac) 1067 { 1068 int i; 1069 1070 for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) { 1071 if (emac->tx_ts_skb[i]) { 1072 dev_kfree_skb_any(emac->tx_ts_skb[i]); 1073 emac->tx_ts_skb[i] = NULL; 1074 } 1075 } 1076 } 1077 EXPORT_SYMBOL_GPL(prueth_cleanup_tx_ts); 1078 1079 int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget) 1080 { 1081 struct prueth_emac *emac = prueth_napi_to_emac(napi_rx); 1082 int rx_flow = emac->is_sr1 ? 1083 PRUETH_RX_FLOW_DATA_SR1 : PRUETH_RX_FLOW_DATA; 1084 int flow = emac->is_sr1 ? 1085 PRUETH_MAX_RX_FLOWS_SR1 : PRUETH_MAX_RX_FLOWS; 1086 int xdp_state_or = 0; 1087 int num_rx = 0; 1088 int cur_budget; 1089 u32 xdp_state; 1090 int ret; 1091 1092 while (flow--) { 1093 cur_budget = budget - num_rx; 1094 1095 while (cur_budget--) { 1096 ret = emac_rx_packet(emac, flow, &xdp_state); 1097 xdp_state_or |= xdp_state; 1098 if (ret) 1099 break; 1100 num_rx++; 1101 } 1102 1103 if (num_rx >= budget) 1104 break; 1105 } 1106 1107 if (xdp_state_or & ICSSG_XDP_REDIR) 1108 xdp_do_flush(); 1109 1110 if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) { 1111 if (unlikely(emac->rx_pace_timeout_ns)) { 1112 hrtimer_start(&emac->rx_hrtimer, 1113 ns_to_ktime(emac->rx_pace_timeout_ns), 1114 HRTIMER_MODE_REL_PINNED); 1115 } else { 1116 enable_irq(emac->rx_chns.irq[rx_flow]); 1117 } 1118 } 1119 1120 return num_rx; 1121 } 1122 EXPORT_SYMBOL_GPL(icssg_napi_rx_poll); 1123 1124 static struct page_pool *prueth_create_page_pool(struct prueth_emac *emac, 1125 struct device *dma_dev, 1126 int size) 1127 { 1128 struct page_pool_params pp_params = { 0 }; 1129 struct page_pool *pool; 1130 1131 pp_params.order = 0; 1132 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; 1133 pp_params.pool_size = size; 1134 pp_params.nid = dev_to_node(emac->prueth->dev); 1135 pp_params.dma_dir = DMA_BIDIRECTIONAL; 1136 pp_params.dev = dma_dev; 1137 pp_params.napi = &emac->napi_rx; 1138 pp_params.max_len = PAGE_SIZE; 1139 1140 pool = page_pool_create(&pp_params); 1141 if (IS_ERR(pool)) 1142 netdev_err(emac->ndev, "cannot create rx page pool\n"); 1143 1144 return pool; 1145 } 1146 1147 int prueth_prepare_rx_chan(struct prueth_emac *emac, 1148 struct prueth_rx_chn *chn, 1149 int buf_size) 1150 { 1151 struct page_pool *pool; 1152 struct page *page; 1153 int i, ret; 1154 1155 pool = prueth_create_page_pool(emac, chn->dma_dev, chn->descs_num); 1156 if (IS_ERR(pool)) 1157 return PTR_ERR(pool); 1158 1159 chn->pg_pool = pool; 1160 1161 for (i = 0; i < chn->descs_num; i++) { 1162 /* NOTE: we're not using memory efficiently here. 1163 * 1 full page (4KB?) used here instead of 1164 * PRUETH_MAX_PKT_SIZE (~1.5KB?) 1165 */ 1166 page = page_pool_dev_alloc_pages(pool); 1167 if (!page) { 1168 netdev_err(emac->ndev, "couldn't allocate rx page\n"); 1169 ret = -ENOMEM; 1170 goto recycle_alloc_pg; 1171 } 1172 1173 ret = prueth_dma_rx_push_mapped(emac, chn, page, buf_size); 1174 if (ret < 0) { 1175 netdev_err(emac->ndev, 1176 "cannot submit page for rx chan %s ret %d\n", 1177 chn->name, ret); 1178 page_pool_recycle_direct(pool, page); 1179 goto recycle_alloc_pg; 1180 } 1181 } 1182 1183 return 0; 1184 1185 recycle_alloc_pg: 1186 prueth_reset_rx_chan(&emac->rx_chns, PRUETH_MAX_RX_FLOWS, false); 1187 1188 return ret; 1189 } 1190 EXPORT_SYMBOL_GPL(prueth_prepare_rx_chan); 1191 1192 void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num, 1193 bool free_skb) 1194 { 1195 int i; 1196 1197 for (i = 0; i < ch_num; i++) { 1198 if (free_skb) 1199 k3_udma_glue_reset_tx_chn(emac->tx_chns[i].tx_chn, 1200 &emac->tx_chns[i], 1201 prueth_tx_cleanup); 1202 k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn); 1203 } 1204 } 1205 EXPORT_SYMBOL_GPL(prueth_reset_tx_chan); 1206 1207 void prueth_reset_rx_chan(struct prueth_rx_chn *chn, 1208 int num_flows, bool disable) 1209 { 1210 int i; 1211 1212 for (i = 0; i < num_flows; i++) 1213 k3_udma_glue_reset_rx_chn(chn->rx_chn, i, chn, 1214 prueth_rx_cleanup); 1215 if (disable) 1216 k3_udma_glue_disable_rx_chn(chn->rx_chn); 1217 } 1218 EXPORT_SYMBOL_GPL(prueth_reset_rx_chan); 1219 1220 void icssg_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue) 1221 { 1222 ndev->stats.tx_errors++; 1223 } 1224 EXPORT_SYMBOL_GPL(icssg_ndo_tx_timeout); 1225 1226 static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr) 1227 { 1228 struct prueth_emac *emac = netdev_priv(ndev); 1229 struct hwtstamp_config config; 1230 1231 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1232 return -EFAULT; 1233 1234 switch (config.tx_type) { 1235 case HWTSTAMP_TX_OFF: 1236 emac->tx_ts_enabled = 0; 1237 break; 1238 case HWTSTAMP_TX_ON: 1239 emac->tx_ts_enabled = 1; 1240 break; 1241 default: 1242 return -ERANGE; 1243 } 1244 1245 switch (config.rx_filter) { 1246 case HWTSTAMP_FILTER_NONE: 1247 emac->rx_ts_enabled = 0; 1248 break; 1249 case HWTSTAMP_FILTER_ALL: 1250 case HWTSTAMP_FILTER_SOME: 1251 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 1252 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 1253 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 1254 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 1255 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 1256 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 1257 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1258 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 1259 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 1260 case HWTSTAMP_FILTER_PTP_V2_EVENT: 1261 case HWTSTAMP_FILTER_PTP_V2_SYNC: 1262 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 1263 case HWTSTAMP_FILTER_NTP_ALL: 1264 emac->rx_ts_enabled = 1; 1265 config.rx_filter = HWTSTAMP_FILTER_ALL; 1266 break; 1267 default: 1268 return -ERANGE; 1269 } 1270 1271 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 1272 -EFAULT : 0; 1273 } 1274 1275 static int emac_get_ts_config(struct net_device *ndev, struct ifreq *ifr) 1276 { 1277 struct prueth_emac *emac = netdev_priv(ndev); 1278 struct hwtstamp_config config; 1279 1280 config.flags = 0; 1281 config.tx_type = emac->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; 1282 config.rx_filter = emac->rx_ts_enabled ? HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; 1283 1284 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 1285 -EFAULT : 0; 1286 } 1287 1288 int icssg_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) 1289 { 1290 switch (cmd) { 1291 case SIOCGHWTSTAMP: 1292 return emac_get_ts_config(ndev, ifr); 1293 case SIOCSHWTSTAMP: 1294 return emac_set_ts_config(ndev, ifr); 1295 default: 1296 break; 1297 } 1298 1299 return phy_do_ioctl(ndev, ifr, cmd); 1300 } 1301 EXPORT_SYMBOL_GPL(icssg_ndo_ioctl); 1302 1303 void icssg_ndo_get_stats64(struct net_device *ndev, 1304 struct rtnl_link_stats64 *stats) 1305 { 1306 struct prueth_emac *emac = netdev_priv(ndev); 1307 1308 emac_update_hardware_stats(emac); 1309 1310 stats->rx_packets = emac_get_stat_by_name(emac, "rx_packets"); 1311 stats->rx_bytes = emac_get_stat_by_name(emac, "rx_bytes"); 1312 stats->tx_packets = emac_get_stat_by_name(emac, "tx_packets"); 1313 stats->tx_bytes = emac_get_stat_by_name(emac, "tx_bytes"); 1314 stats->rx_crc_errors = emac_get_stat_by_name(emac, "rx_crc_errors"); 1315 stats->rx_over_errors = emac_get_stat_by_name(emac, "rx_over_errors"); 1316 stats->multicast = emac_get_stat_by_name(emac, "rx_multicast_frames"); 1317 1318 stats->rx_errors = ndev->stats.rx_errors + 1319 emac_get_stat_by_name(emac, "FW_RX_ERROR") + 1320 emac_get_stat_by_name(emac, "FW_RX_EOF_SHORT_FRMERR") + 1321 emac_get_stat_by_name(emac, "FW_RX_B0_DROP_EARLY_EOF") + 1322 emac_get_stat_by_name(emac, "FW_RX_EXP_FRAG_Q_DROP") + 1323 emac_get_stat_by_name(emac, "FW_RX_FIFO_OVERRUN"); 1324 stats->rx_dropped = ndev->stats.rx_dropped + 1325 emac_get_stat_by_name(emac, "FW_DROPPED_PKT") + 1326 emac_get_stat_by_name(emac, "FW_INF_PORT_DISABLED") + 1327 emac_get_stat_by_name(emac, "FW_INF_SAV") + 1328 emac_get_stat_by_name(emac, "FW_INF_SA_DL") + 1329 emac_get_stat_by_name(emac, "FW_INF_PORT_BLOCKED") + 1330 emac_get_stat_by_name(emac, "FW_INF_DROP_TAGGED") + 1331 emac_get_stat_by_name(emac, "FW_INF_DROP_PRIOTAGGED") + 1332 emac_get_stat_by_name(emac, "FW_INF_DROP_NOTAG") + 1333 emac_get_stat_by_name(emac, "FW_INF_DROP_NOTMEMBER"); 1334 stats->tx_errors = ndev->stats.tx_errors; 1335 stats->tx_dropped = ndev->stats.tx_dropped + 1336 emac_get_stat_by_name(emac, "FW_RTU_PKT_DROP") + 1337 emac_get_stat_by_name(emac, "FW_TX_DROPPED_PACKET") + 1338 emac_get_stat_by_name(emac, "FW_TX_TS_DROPPED_PACKET") + 1339 emac_get_stat_by_name(emac, "FW_TX_JUMBO_FRM_CUTOFF"); 1340 } 1341 EXPORT_SYMBOL_GPL(icssg_ndo_get_stats64); 1342 1343 int icssg_ndo_get_phys_port_name(struct net_device *ndev, char *name, 1344 size_t len) 1345 { 1346 struct prueth_emac *emac = netdev_priv(ndev); 1347 int ret; 1348 1349 ret = snprintf(name, len, "p%d", emac->port_id); 1350 if (ret >= len) 1351 return -EINVAL; 1352 1353 return 0; 1354 } 1355 EXPORT_SYMBOL_GPL(icssg_ndo_get_phys_port_name); 1356 1357 /* get emac_port corresponding to eth_node name */ 1358 int prueth_node_port(struct device_node *eth_node) 1359 { 1360 u32 port_id; 1361 int ret; 1362 1363 ret = of_property_read_u32(eth_node, "reg", &port_id); 1364 if (ret) 1365 return ret; 1366 1367 if (port_id == 0) 1368 return PRUETH_PORT_MII0; 1369 else if (port_id == 1) 1370 return PRUETH_PORT_MII1; 1371 else 1372 return PRUETH_PORT_INVALID; 1373 } 1374 EXPORT_SYMBOL_GPL(prueth_node_port); 1375 1376 /* get MAC instance corresponding to eth_node name */ 1377 int prueth_node_mac(struct device_node *eth_node) 1378 { 1379 u32 port_id; 1380 int ret; 1381 1382 ret = of_property_read_u32(eth_node, "reg", &port_id); 1383 if (ret) 1384 return ret; 1385 1386 if (port_id == 0) 1387 return PRUETH_MAC0; 1388 else if (port_id == 1) 1389 return PRUETH_MAC1; 1390 else 1391 return PRUETH_MAC_INVALID; 1392 } 1393 EXPORT_SYMBOL_GPL(prueth_node_mac); 1394 1395 void prueth_netdev_exit(struct prueth *prueth, 1396 struct device_node *eth_node) 1397 { 1398 struct prueth_emac *emac; 1399 enum prueth_mac mac; 1400 1401 mac = prueth_node_mac(eth_node); 1402 if (mac == PRUETH_MAC_INVALID) 1403 return; 1404 1405 emac = prueth->emac[mac]; 1406 if (!emac) 1407 return; 1408 1409 if (of_phy_is_fixed_link(emac->phy_node)) 1410 of_phy_deregister_fixed_link(emac->phy_node); 1411 1412 netif_napi_del(&emac->napi_rx); 1413 1414 pruss_release_mem_region(prueth->pruss, &emac->dram); 1415 destroy_workqueue(emac->cmd_wq); 1416 free_netdev(emac->ndev); 1417 prueth->emac[mac] = NULL; 1418 } 1419 EXPORT_SYMBOL_GPL(prueth_netdev_exit); 1420 1421 int prueth_get_cores(struct prueth *prueth, int slice, bool is_sr1) 1422 { 1423 struct device *dev = prueth->dev; 1424 enum pruss_pru_id pruss_id; 1425 struct device_node *np; 1426 int idx = -1, ret; 1427 1428 np = dev->of_node; 1429 1430 switch (slice) { 1431 case ICSS_SLICE0: 1432 idx = 0; 1433 break; 1434 case ICSS_SLICE1: 1435 idx = is_sr1 ? 2 : 3; 1436 break; 1437 default: 1438 return -EINVAL; 1439 } 1440 1441 prueth->pru[slice] = pru_rproc_get(np, idx, &pruss_id); 1442 if (IS_ERR(prueth->pru[slice])) { 1443 ret = PTR_ERR(prueth->pru[slice]); 1444 prueth->pru[slice] = NULL; 1445 return dev_err_probe(dev, ret, "unable to get PRU%d\n", slice); 1446 } 1447 prueth->pru_id[slice] = pruss_id; 1448 1449 idx++; 1450 prueth->rtu[slice] = pru_rproc_get(np, idx, NULL); 1451 if (IS_ERR(prueth->rtu[slice])) { 1452 ret = PTR_ERR(prueth->rtu[slice]); 1453 prueth->rtu[slice] = NULL; 1454 return dev_err_probe(dev, ret, "unable to get RTU%d\n", slice); 1455 } 1456 1457 if (is_sr1) 1458 return 0; 1459 1460 idx++; 1461 prueth->txpru[slice] = pru_rproc_get(np, idx, NULL); 1462 if (IS_ERR(prueth->txpru[slice])) { 1463 ret = PTR_ERR(prueth->txpru[slice]); 1464 prueth->txpru[slice] = NULL; 1465 return dev_err_probe(dev, ret, "unable to get TX_PRU%d\n", slice); 1466 } 1467 1468 return 0; 1469 } 1470 EXPORT_SYMBOL_GPL(prueth_get_cores); 1471 1472 void prueth_put_cores(struct prueth *prueth, int slice) 1473 { 1474 if (prueth->txpru[slice]) 1475 pru_rproc_put(prueth->txpru[slice]); 1476 1477 if (prueth->rtu[slice]) 1478 pru_rproc_put(prueth->rtu[slice]); 1479 1480 if (prueth->pru[slice]) 1481 pru_rproc_put(prueth->pru[slice]); 1482 } 1483 EXPORT_SYMBOL_GPL(prueth_put_cores); 1484 1485 #ifdef CONFIG_PM_SLEEP 1486 static int prueth_suspend(struct device *dev) 1487 { 1488 struct prueth *prueth = dev_get_drvdata(dev); 1489 struct net_device *ndev; 1490 int i, ret; 1491 1492 for (i = 0; i < PRUETH_NUM_MACS; i++) { 1493 ndev = prueth->registered_netdevs[i]; 1494 1495 if (!ndev) 1496 continue; 1497 1498 if (netif_running(ndev)) { 1499 netif_device_detach(ndev); 1500 ret = ndev->netdev_ops->ndo_stop(ndev); 1501 if (ret < 0) { 1502 netdev_err(ndev, "failed to stop: %d", ret); 1503 return ret; 1504 } 1505 } 1506 } 1507 1508 return 0; 1509 } 1510 1511 static int prueth_resume(struct device *dev) 1512 { 1513 struct prueth *prueth = dev_get_drvdata(dev); 1514 struct net_device *ndev; 1515 int i, ret; 1516 1517 for (i = 0; i < PRUETH_NUM_MACS; i++) { 1518 ndev = prueth->registered_netdevs[i]; 1519 1520 if (!ndev) 1521 continue; 1522 1523 if (netif_running(ndev)) { 1524 ret = ndev->netdev_ops->ndo_open(ndev); 1525 if (ret < 0) { 1526 netdev_err(ndev, "failed to start: %d", ret); 1527 return ret; 1528 } 1529 netif_device_attach(ndev); 1530 } 1531 } 1532 1533 return 0; 1534 } 1535 #endif /* CONFIG_PM_SLEEP */ 1536 1537 const struct dev_pm_ops prueth_dev_pm_ops = { 1538 SET_SYSTEM_SLEEP_PM_OPS(prueth_suspend, prueth_resume) 1539 }; 1540 EXPORT_SYMBOL_GPL(prueth_dev_pm_ops); 1541 1542 MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>"); 1543 MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>"); 1544 MODULE_DESCRIPTION("PRUSS ICSSG Ethernet Driver Common Module"); 1545 MODULE_LICENSE("GPL"); 1546