1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */ 3 4 /* TSN endpoint Ethernet MAC driver 5 * 6 * The TSN endpoint Ethernet MAC is a FPGA based network device for real-time 7 * communication. It is designed for endpoints within TSN (Time Sensitive 8 * Networking) networks; e.g., for PLCs in the industrial automation case. 9 * 10 * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used 11 * by the driver. 12 * 13 * More information can be found here: 14 * - www.embedded-experts.at/tsn 15 * - www.engleder-embedded.com 16 */ 17 18 #include "tsnep.h" 19 #include "tsnep_hw.h" 20 21 #include <linux/module.h> 22 #include <linux/of.h> 23 #include <linux/of_net.h> 24 #include <linux/of_mdio.h> 25 #include <linux/interrupt.h> 26 #include <linux/etherdevice.h> 27 #include <linux/phy.h> 28 #include <linux/iopoll.h> 29 #include <linux/bpf.h> 30 #include <linux/bpf_trace.h> 31 #include <net/page_pool/helpers.h> 32 #include <net/xdp_sock_drv.h> 33 34 #define TSNEP_RX_OFFSET (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN) 35 #define TSNEP_HEADROOM ALIGN(TSNEP_RX_OFFSET, 4) 36 #define TSNEP_MAX_RX_BUF_SIZE (PAGE_SIZE - TSNEP_HEADROOM - \ 37 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 38 /* XSK buffer shall store at least Q-in-Q frame */ 39 #define TSNEP_XSK_RX_BUF_SIZE (ALIGN(TSNEP_RX_INLINE_METADATA_SIZE + \ 40 ETH_FRAME_LEN + ETH_FCS_LEN + \ 41 VLAN_HLEN * 2, 4)) 42 43 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 44 #define DMA_ADDR_HIGH(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF)) 45 #else 46 #define DMA_ADDR_HIGH(dma_addr) ((u32)(0)) 47 #endif 48 #define DMA_ADDR_LOW(dma_addr) ((u32)((dma_addr) & 0xFFFFFFFF)) 49 50 #define TSNEP_COALESCE_USECS_DEFAULT 64 51 #define TSNEP_COALESCE_USECS_MAX ((ECM_INT_DELAY_MASK >> ECM_INT_DELAY_SHIFT) * \ 52 ECM_INT_DELAY_BASE_US + ECM_INT_DELAY_BASE_US - 1) 53 54 /* mapping type */ 55 #define TSNEP_TX_TYPE_MAP BIT(0) 56 #define TSNEP_TX_TYPE_MAP_PAGE BIT(1) 57 #define TSNEP_TX_TYPE_INLINE BIT(2) 58 /* buffer type */ 59 #define TSNEP_TX_TYPE_SKB BIT(8) 60 #define TSNEP_TX_TYPE_SKB_MAP (TSNEP_TX_TYPE_SKB | TSNEP_TX_TYPE_MAP) 61 #define TSNEP_TX_TYPE_SKB_INLINE (TSNEP_TX_TYPE_SKB | TSNEP_TX_TYPE_INLINE) 62 #define TSNEP_TX_TYPE_SKB_FRAG BIT(9) 63 #define TSNEP_TX_TYPE_SKB_FRAG_MAP_PAGE (TSNEP_TX_TYPE_SKB_FRAG | TSNEP_TX_TYPE_MAP_PAGE) 64 #define TSNEP_TX_TYPE_SKB_FRAG_INLINE (TSNEP_TX_TYPE_SKB_FRAG | TSNEP_TX_TYPE_INLINE) 65 #define TSNEP_TX_TYPE_XDP_TX BIT(10) 66 #define TSNEP_TX_TYPE_XDP_NDO BIT(11) 67 #define TSNEP_TX_TYPE_XDP_NDO_MAP_PAGE (TSNEP_TX_TYPE_XDP_NDO | TSNEP_TX_TYPE_MAP_PAGE) 68 #define TSNEP_TX_TYPE_XDP (TSNEP_TX_TYPE_XDP_TX | TSNEP_TX_TYPE_XDP_NDO) 69 #define TSNEP_TX_TYPE_XSK BIT(12) 70 71 #define TSNEP_XDP_TX BIT(0) 72 #define TSNEP_XDP_REDIRECT BIT(1) 73 74 static void tsnep_enable_irq(struct tsnep_adapter *adapter, u32 mask) 75 { 76 iowrite32(mask, adapter->addr + ECM_INT_ENABLE); 77 } 78 79 static void tsnep_disable_irq(struct tsnep_adapter *adapter, u32 mask) 80 { 81 mask |= ECM_INT_DISABLE; 82 iowrite32(mask, adapter->addr + ECM_INT_ENABLE); 83 } 84 85 static irqreturn_t tsnep_irq(int irq, void *arg) 86 { 87 struct tsnep_adapter *adapter = arg; 88 u32 active = ioread32(adapter->addr + ECM_INT_ACTIVE); 89 90 /* acknowledge interrupt */ 91 if (active != 0) 92 iowrite32(active, adapter->addr + ECM_INT_ACKNOWLEDGE); 93 94 /* handle link interrupt */ 95 if ((active & ECM_INT_LINK) != 0) 96 phy_mac_interrupt(adapter->netdev->phydev); 97 98 /* handle TX/RX queue 0 interrupt */ 99 if ((active & adapter->queue[0].irq_mask) != 0) { 100 if (napi_schedule_prep(&adapter->queue[0].napi)) { 101 tsnep_disable_irq(adapter, adapter->queue[0].irq_mask); 102 /* schedule after masking to avoid races */ 103 __napi_schedule(&adapter->queue[0].napi); 104 } 105 } 106 107 return IRQ_HANDLED; 108 } 109 110 static irqreturn_t tsnep_irq_txrx(int irq, void *arg) 111 { 112 struct tsnep_queue *queue = arg; 113 114 /* handle TX/RX queue interrupt */ 115 if (napi_schedule_prep(&queue->napi)) { 116 tsnep_disable_irq(queue->adapter, queue->irq_mask); 117 /* schedule after masking to avoid races */ 118 __napi_schedule(&queue->napi); 119 } 120 121 return IRQ_HANDLED; 122 } 123 124 int tsnep_set_irq_coalesce(struct tsnep_queue *queue, u32 usecs) 125 { 126 if (usecs > TSNEP_COALESCE_USECS_MAX) 127 return -ERANGE; 128 129 usecs /= ECM_INT_DELAY_BASE_US; 130 usecs <<= ECM_INT_DELAY_SHIFT; 131 usecs &= ECM_INT_DELAY_MASK; 132 133 queue->irq_delay &= ~ECM_INT_DELAY_MASK; 134 queue->irq_delay |= usecs; 135 iowrite8(queue->irq_delay, queue->irq_delay_addr); 136 137 return 0; 138 } 139 140 u32 tsnep_get_irq_coalesce(struct tsnep_queue *queue) 141 { 142 u32 usecs; 143 144 usecs = (queue->irq_delay & ECM_INT_DELAY_MASK); 145 usecs >>= ECM_INT_DELAY_SHIFT; 146 usecs *= ECM_INT_DELAY_BASE_US; 147 148 return usecs; 149 } 150 151 static int tsnep_mdiobus_read(struct mii_bus *bus, int addr, int regnum) 152 { 153 struct tsnep_adapter *adapter = bus->priv; 154 u32 md; 155 int retval; 156 157 md = ECM_MD_READ; 158 if (!adapter->suppress_preamble) 159 md |= ECM_MD_PREAMBLE; 160 md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK; 161 md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK; 162 iowrite32(md, adapter->addr + ECM_MD_CONTROL); 163 retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md, 164 !(md & ECM_MD_BUSY), 16, 1000); 165 if (retval != 0) 166 return retval; 167 168 return (md & ECM_MD_DATA_MASK) >> ECM_MD_DATA_SHIFT; 169 } 170 171 static int tsnep_mdiobus_write(struct mii_bus *bus, int addr, int regnum, 172 u16 val) 173 { 174 struct tsnep_adapter *adapter = bus->priv; 175 u32 md; 176 int retval; 177 178 md = ECM_MD_WRITE; 179 if (!adapter->suppress_preamble) 180 md |= ECM_MD_PREAMBLE; 181 md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK; 182 md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK; 183 md |= ((u32)val << ECM_MD_DATA_SHIFT) & ECM_MD_DATA_MASK; 184 iowrite32(md, adapter->addr + ECM_MD_CONTROL); 185 retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md, 186 !(md & ECM_MD_BUSY), 16, 1000); 187 if (retval != 0) 188 return retval; 189 190 return 0; 191 } 192 193 static void tsnep_set_link_mode(struct tsnep_adapter *adapter) 194 { 195 u32 mode; 196 197 switch (adapter->phydev->speed) { 198 case SPEED_100: 199 mode = ECM_LINK_MODE_100; 200 break; 201 case SPEED_1000: 202 mode = ECM_LINK_MODE_1000; 203 break; 204 default: 205 mode = ECM_LINK_MODE_OFF; 206 break; 207 } 208 iowrite32(mode, adapter->addr + ECM_STATUS); 209 } 210 211 static void tsnep_phy_link_status_change(struct net_device *netdev) 212 { 213 struct tsnep_adapter *adapter = netdev_priv(netdev); 214 struct phy_device *phydev = netdev->phydev; 215 216 if (phydev->link) 217 tsnep_set_link_mode(adapter); 218 219 phy_print_status(netdev->phydev); 220 } 221 222 static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable) 223 { 224 int retval; 225 226 retval = phy_loopback(adapter->phydev, enable); 227 228 /* PHY link state change is not signaled if loopback is enabled, it 229 * would delay a working loopback anyway, let's ensure that loopback 230 * is working immediately by setting link mode directly 231 */ 232 if (!retval && enable) 233 tsnep_set_link_mode(adapter); 234 235 return retval; 236 } 237 238 static int tsnep_phy_open(struct tsnep_adapter *adapter) 239 { 240 struct phy_device *phydev; 241 struct ethtool_eee ethtool_eee; 242 int retval; 243 244 retval = phy_connect_direct(adapter->netdev, adapter->phydev, 245 tsnep_phy_link_status_change, 246 adapter->phy_mode); 247 if (retval) 248 return retval; 249 phydev = adapter->netdev->phydev; 250 251 /* MAC supports only 100Mbps|1000Mbps full duplex 252 * SPE (Single Pair Ethernet) is also an option but not implemented yet 253 */ 254 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); 255 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); 256 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); 257 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 258 259 /* disable EEE autoneg, EEE not supported by TSNEP */ 260 memset(ðtool_eee, 0, sizeof(ethtool_eee)); 261 phy_ethtool_set_eee(adapter->phydev, ðtool_eee); 262 263 adapter->phydev->irq = PHY_MAC_INTERRUPT; 264 phy_start(adapter->phydev); 265 266 return 0; 267 } 268 269 static void tsnep_phy_close(struct tsnep_adapter *adapter) 270 { 271 phy_stop(adapter->netdev->phydev); 272 phy_disconnect(adapter->netdev->phydev); 273 } 274 275 static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx) 276 { 277 struct device *dmadev = tx->adapter->dmadev; 278 int i; 279 280 memset(tx->entry, 0, sizeof(tx->entry)); 281 282 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) { 283 if (tx->page[i]) { 284 dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i], 285 tx->page_dma[i]); 286 tx->page[i] = NULL; 287 tx->page_dma[i] = 0; 288 } 289 } 290 } 291 292 static int tsnep_tx_ring_create(struct tsnep_tx *tx) 293 { 294 struct device *dmadev = tx->adapter->dmadev; 295 struct tsnep_tx_entry *entry; 296 struct tsnep_tx_entry *next_entry; 297 int i, j; 298 int retval; 299 300 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) { 301 tx->page[i] = 302 dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i], 303 GFP_KERNEL); 304 if (!tx->page[i]) { 305 retval = -ENOMEM; 306 goto alloc_failed; 307 } 308 for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) { 309 entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; 310 entry->desc_wb = (struct tsnep_tx_desc_wb *) 311 (((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j); 312 entry->desc = (struct tsnep_tx_desc *) 313 (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET); 314 entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j; 315 entry->owner_user_flag = false; 316 } 317 } 318 for (i = 0; i < TSNEP_RING_SIZE; i++) { 319 entry = &tx->entry[i]; 320 next_entry = &tx->entry[(i + 1) & TSNEP_RING_MASK]; 321 entry->desc->next = __cpu_to_le64(next_entry->desc_dma); 322 } 323 324 return 0; 325 326 alloc_failed: 327 tsnep_tx_ring_cleanup(tx); 328 return retval; 329 } 330 331 static void tsnep_tx_init(struct tsnep_tx *tx) 332 { 333 dma_addr_t dma; 334 335 dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; 336 iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW); 337 iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH); 338 tx->write = 0; 339 tx->read = 0; 340 tx->owner_counter = 1; 341 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; 342 } 343 344 static void tsnep_tx_enable(struct tsnep_tx *tx) 345 { 346 struct netdev_queue *nq; 347 348 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); 349 350 __netif_tx_lock_bh(nq); 351 netif_tx_wake_queue(nq); 352 __netif_tx_unlock_bh(nq); 353 } 354 355 static void tsnep_tx_disable(struct tsnep_tx *tx, struct napi_struct *napi) 356 { 357 struct netdev_queue *nq; 358 u32 val; 359 360 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); 361 362 __netif_tx_lock_bh(nq); 363 netif_tx_stop_queue(nq); 364 __netif_tx_unlock_bh(nq); 365 366 /* wait until TX is done in hardware */ 367 readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val, 368 ((val & TSNEP_CONTROL_TX_ENABLE) == 0), 10000, 369 1000000); 370 371 /* wait until TX is also done in software */ 372 while (READ_ONCE(tx->read) != tx->write) { 373 napi_schedule(napi); 374 napi_synchronize(napi); 375 } 376 } 377 378 static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length, 379 bool last) 380 { 381 struct tsnep_tx_entry *entry = &tx->entry[index]; 382 383 entry->properties = 0; 384 /* xdpf and zc are union with skb */ 385 if (entry->skb) { 386 entry->properties = length & TSNEP_DESC_LENGTH_MASK; 387 entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; 388 if ((entry->type & TSNEP_TX_TYPE_SKB) && 389 (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS)) 390 entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG; 391 392 /* toggle user flag to prevent false acknowledge 393 * 394 * Only the first fragment is acknowledged. For all other 395 * fragments no acknowledge is done and the last written owner 396 * counter stays in the writeback descriptor. Therefore, it is 397 * possible that the last written owner counter is identical to 398 * the new incremented owner counter and a false acknowledge is 399 * detected before the real acknowledge has been done by 400 * hardware. 401 * 402 * The user flag is used to prevent this situation. The user 403 * flag is copied to the writeback descriptor by the hardware 404 * and is used as additional acknowledge data. By toggeling the 405 * user flag only for the first fragment (which is 406 * acknowledged), it is guaranteed that the last acknowledge 407 * done for this descriptor has used a different user flag and 408 * cannot be detected as false acknowledge. 409 */ 410 entry->owner_user_flag = !entry->owner_user_flag; 411 } 412 if (last) 413 entry->properties |= TSNEP_TX_DESC_LAST_FRAGMENT_FLAG; 414 if (index == tx->increment_owner_counter) { 415 tx->owner_counter++; 416 if (tx->owner_counter == 4) 417 tx->owner_counter = 1; 418 tx->increment_owner_counter--; 419 if (tx->increment_owner_counter < 0) 420 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; 421 } 422 entry->properties |= 423 (tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & 424 TSNEP_DESC_OWNER_COUNTER_MASK; 425 if (entry->owner_user_flag) 426 entry->properties |= TSNEP_TX_DESC_OWNER_USER_FLAG; 427 entry->desc->more_properties = 428 __cpu_to_le32(entry->len & TSNEP_DESC_LENGTH_MASK); 429 if (entry->type & TSNEP_TX_TYPE_INLINE) 430 entry->properties |= TSNEP_TX_DESC_DATA_AFTER_DESC_FLAG; 431 432 /* descriptor properties shall be written last, because valid data is 433 * signaled there 434 */ 435 dma_wmb(); 436 437 entry->desc->properties = __cpu_to_le32(entry->properties); 438 } 439 440 static int tsnep_tx_desc_available(struct tsnep_tx *tx) 441 { 442 if (tx->read <= tx->write) 443 return TSNEP_RING_SIZE - tx->write + tx->read - 1; 444 else 445 return tx->read - tx->write - 1; 446 } 447 448 static int tsnep_tx_map_frag(skb_frag_t *frag, struct tsnep_tx_entry *entry, 449 struct device *dmadev, dma_addr_t *dma) 450 { 451 unsigned int len; 452 int mapped; 453 454 len = skb_frag_size(frag); 455 if (likely(len > TSNEP_DESC_SIZE_DATA_AFTER_INLINE)) { 456 *dma = skb_frag_dma_map(dmadev, frag, 0, len, DMA_TO_DEVICE); 457 if (dma_mapping_error(dmadev, *dma)) 458 return -ENOMEM; 459 entry->type = TSNEP_TX_TYPE_SKB_FRAG_MAP_PAGE; 460 mapped = 1; 461 } else { 462 void *fragdata = skb_frag_address_safe(frag); 463 464 if (likely(fragdata)) { 465 memcpy(&entry->desc->tx, fragdata, len); 466 } else { 467 struct page *page = skb_frag_page(frag); 468 469 fragdata = kmap_local_page(page); 470 memcpy(&entry->desc->tx, fragdata + skb_frag_off(frag), 471 len); 472 kunmap_local(fragdata); 473 } 474 entry->type = TSNEP_TX_TYPE_SKB_FRAG_INLINE; 475 mapped = 0; 476 } 477 478 return mapped; 479 } 480 481 static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count) 482 { 483 struct device *dmadev = tx->adapter->dmadev; 484 struct tsnep_tx_entry *entry; 485 unsigned int len; 486 int map_len = 0; 487 dma_addr_t dma; 488 int i, mapped; 489 490 for (i = 0; i < count; i++) { 491 entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK]; 492 493 if (!i) { 494 len = skb_headlen(skb); 495 if (likely(len > TSNEP_DESC_SIZE_DATA_AFTER_INLINE)) { 496 dma = dma_map_single(dmadev, skb->data, len, 497 DMA_TO_DEVICE); 498 if (dma_mapping_error(dmadev, dma)) 499 return -ENOMEM; 500 entry->type = TSNEP_TX_TYPE_SKB_MAP; 501 mapped = 1; 502 } else { 503 memcpy(&entry->desc->tx, skb->data, len); 504 entry->type = TSNEP_TX_TYPE_SKB_INLINE; 505 mapped = 0; 506 } 507 } else { 508 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 509 510 len = skb_frag_size(frag); 511 mapped = tsnep_tx_map_frag(frag, entry, dmadev, &dma); 512 if (mapped < 0) 513 return mapped; 514 } 515 516 entry->len = len; 517 if (likely(mapped)) { 518 dma_unmap_addr_set(entry, dma, dma); 519 entry->desc->tx = __cpu_to_le64(dma); 520 } 521 522 map_len += len; 523 } 524 525 return map_len; 526 } 527 528 static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count) 529 { 530 struct device *dmadev = tx->adapter->dmadev; 531 struct tsnep_tx_entry *entry; 532 int map_len = 0; 533 int i; 534 535 for (i = 0; i < count; i++) { 536 entry = &tx->entry[(index + i) & TSNEP_RING_MASK]; 537 538 if (entry->len) { 539 if (entry->type & TSNEP_TX_TYPE_MAP) 540 dma_unmap_single(dmadev, 541 dma_unmap_addr(entry, dma), 542 dma_unmap_len(entry, len), 543 DMA_TO_DEVICE); 544 else if (entry->type & TSNEP_TX_TYPE_MAP_PAGE) 545 dma_unmap_page(dmadev, 546 dma_unmap_addr(entry, dma), 547 dma_unmap_len(entry, len), 548 DMA_TO_DEVICE); 549 map_len += entry->len; 550 entry->len = 0; 551 } 552 } 553 554 return map_len; 555 } 556 557 static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, 558 struct tsnep_tx *tx) 559 { 560 int count = 1; 561 struct tsnep_tx_entry *entry; 562 int length; 563 int i; 564 int retval; 565 566 if (skb_shinfo(skb)->nr_frags > 0) 567 count += skb_shinfo(skb)->nr_frags; 568 569 if (tsnep_tx_desc_available(tx) < count) { 570 /* ring full, shall not happen because queue is stopped if full 571 * below 572 */ 573 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); 574 575 return NETDEV_TX_BUSY; 576 } 577 578 entry = &tx->entry[tx->write]; 579 entry->skb = skb; 580 581 retval = tsnep_tx_map(skb, tx, count); 582 if (retval < 0) { 583 tsnep_tx_unmap(tx, tx->write, count); 584 dev_kfree_skb_any(entry->skb); 585 entry->skb = NULL; 586 587 tx->dropped++; 588 589 return NETDEV_TX_OK; 590 } 591 length = retval; 592 593 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) 594 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 595 596 for (i = 0; i < count; i++) 597 tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length, 598 i == count - 1); 599 tx->write = (tx->write + count) & TSNEP_RING_MASK; 600 601 skb_tx_timestamp(skb); 602 603 /* descriptor properties shall be valid before hardware is notified */ 604 dma_wmb(); 605 606 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); 607 608 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) { 609 /* ring can get full with next frame */ 610 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); 611 } 612 613 return NETDEV_TX_OK; 614 } 615 616 static int tsnep_xdp_tx_map(struct xdp_frame *xdpf, struct tsnep_tx *tx, 617 struct skb_shared_info *shinfo, int count, u32 type) 618 { 619 struct device *dmadev = tx->adapter->dmadev; 620 struct tsnep_tx_entry *entry; 621 struct page *page; 622 skb_frag_t *frag; 623 unsigned int len; 624 int map_len = 0; 625 dma_addr_t dma; 626 void *data; 627 int i; 628 629 frag = NULL; 630 len = xdpf->len; 631 for (i = 0; i < count; i++) { 632 entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK]; 633 if (type & TSNEP_TX_TYPE_XDP_NDO) { 634 data = unlikely(frag) ? skb_frag_address(frag) : 635 xdpf->data; 636 dma = dma_map_single(dmadev, data, len, DMA_TO_DEVICE); 637 if (dma_mapping_error(dmadev, dma)) 638 return -ENOMEM; 639 640 entry->type = TSNEP_TX_TYPE_XDP_NDO_MAP_PAGE; 641 } else { 642 page = unlikely(frag) ? skb_frag_page(frag) : 643 virt_to_page(xdpf->data); 644 dma = page_pool_get_dma_addr(page); 645 if (unlikely(frag)) 646 dma += skb_frag_off(frag); 647 else 648 dma += sizeof(*xdpf) + xdpf->headroom; 649 dma_sync_single_for_device(dmadev, dma, len, 650 DMA_BIDIRECTIONAL); 651 652 entry->type = TSNEP_TX_TYPE_XDP_TX; 653 } 654 655 entry->len = len; 656 dma_unmap_addr_set(entry, dma, dma); 657 658 entry->desc->tx = __cpu_to_le64(dma); 659 660 map_len += len; 661 662 if (i + 1 < count) { 663 frag = &shinfo->frags[i]; 664 len = skb_frag_size(frag); 665 } 666 } 667 668 return map_len; 669 } 670 671 /* This function requires __netif_tx_lock is held by the caller. */ 672 static bool tsnep_xdp_xmit_frame_ring(struct xdp_frame *xdpf, 673 struct tsnep_tx *tx, u32 type) 674 { 675 struct skb_shared_info *shinfo = xdp_get_shared_info_from_frame(xdpf); 676 struct tsnep_tx_entry *entry; 677 int count, length, retval, i; 678 679 count = 1; 680 if (unlikely(xdp_frame_has_frags(xdpf))) 681 count += shinfo->nr_frags; 682 683 /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS 684 * will be available for normal TX path and queue is stopped there if 685 * necessary 686 */ 687 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1 + count)) 688 return false; 689 690 entry = &tx->entry[tx->write]; 691 entry->xdpf = xdpf; 692 693 retval = tsnep_xdp_tx_map(xdpf, tx, shinfo, count, type); 694 if (retval < 0) { 695 tsnep_tx_unmap(tx, tx->write, count); 696 entry->xdpf = NULL; 697 698 tx->dropped++; 699 700 return false; 701 } 702 length = retval; 703 704 for (i = 0; i < count; i++) 705 tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length, 706 i == count - 1); 707 tx->write = (tx->write + count) & TSNEP_RING_MASK; 708 709 /* descriptor properties shall be valid before hardware is notified */ 710 dma_wmb(); 711 712 return true; 713 } 714 715 static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx) 716 { 717 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); 718 } 719 720 static bool tsnep_xdp_xmit_back(struct tsnep_adapter *adapter, 721 struct xdp_buff *xdp, 722 struct netdev_queue *tx_nq, struct tsnep_tx *tx, 723 bool zc) 724 { 725 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); 726 bool xmit; 727 u32 type; 728 729 if (unlikely(!xdpf)) 730 return false; 731 732 /* no page pool for zero copy */ 733 if (zc) 734 type = TSNEP_TX_TYPE_XDP_NDO; 735 else 736 type = TSNEP_TX_TYPE_XDP_TX; 737 738 __netif_tx_lock(tx_nq, smp_processor_id()); 739 740 xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, type); 741 742 /* Avoid transmit queue timeout since we share it with the slow path */ 743 if (xmit) 744 txq_trans_cond_update(tx_nq); 745 746 __netif_tx_unlock(tx_nq); 747 748 return xmit; 749 } 750 751 static int tsnep_xdp_tx_map_zc(struct xdp_desc *xdpd, struct tsnep_tx *tx) 752 { 753 struct tsnep_tx_entry *entry; 754 dma_addr_t dma; 755 756 entry = &tx->entry[tx->write]; 757 entry->zc = true; 758 759 dma = xsk_buff_raw_get_dma(tx->xsk_pool, xdpd->addr); 760 xsk_buff_raw_dma_sync_for_device(tx->xsk_pool, dma, xdpd->len); 761 762 entry->type = TSNEP_TX_TYPE_XSK; 763 entry->len = xdpd->len; 764 765 entry->desc->tx = __cpu_to_le64(dma); 766 767 return xdpd->len; 768 } 769 770 static void tsnep_xdp_xmit_frame_ring_zc(struct xdp_desc *xdpd, 771 struct tsnep_tx *tx) 772 { 773 int length; 774 775 length = tsnep_xdp_tx_map_zc(xdpd, tx); 776 777 tsnep_tx_activate(tx, tx->write, length, true); 778 tx->write = (tx->write + 1) & TSNEP_RING_MASK; 779 } 780 781 static void tsnep_xdp_xmit_zc(struct tsnep_tx *tx) 782 { 783 int desc_available = tsnep_tx_desc_available(tx); 784 struct xdp_desc *descs = tx->xsk_pool->tx_descs; 785 int batch, i; 786 787 /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS 788 * will be available for normal TX path and queue is stopped there if 789 * necessary 790 */ 791 if (desc_available <= (MAX_SKB_FRAGS + 1)) 792 return; 793 desc_available -= MAX_SKB_FRAGS + 1; 794 795 batch = xsk_tx_peek_release_desc_batch(tx->xsk_pool, desc_available); 796 for (i = 0; i < batch; i++) 797 tsnep_xdp_xmit_frame_ring_zc(&descs[i], tx); 798 799 if (batch) { 800 /* descriptor properties shall be valid before hardware is 801 * notified 802 */ 803 dma_wmb(); 804 805 tsnep_xdp_xmit_flush(tx); 806 } 807 } 808 809 static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) 810 { 811 struct tsnep_tx_entry *entry; 812 struct netdev_queue *nq; 813 int xsk_frames = 0; 814 int budget = 128; 815 int length; 816 int count; 817 818 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); 819 __netif_tx_lock(nq, smp_processor_id()); 820 821 do { 822 if (tx->read == tx->write) 823 break; 824 825 entry = &tx->entry[tx->read]; 826 if ((__le32_to_cpu(entry->desc_wb->properties) & 827 TSNEP_TX_DESC_OWNER_MASK) != 828 (entry->properties & TSNEP_TX_DESC_OWNER_MASK)) 829 break; 830 831 /* descriptor properties shall be read first, because valid data 832 * is signaled there 833 */ 834 dma_rmb(); 835 836 count = 1; 837 if ((entry->type & TSNEP_TX_TYPE_SKB) && 838 skb_shinfo(entry->skb)->nr_frags > 0) 839 count += skb_shinfo(entry->skb)->nr_frags; 840 else if ((entry->type & TSNEP_TX_TYPE_XDP) && 841 xdp_frame_has_frags(entry->xdpf)) 842 count += xdp_get_shared_info_from_frame(entry->xdpf)->nr_frags; 843 844 length = tsnep_tx_unmap(tx, tx->read, count); 845 846 if ((entry->type & TSNEP_TX_TYPE_SKB) && 847 (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) && 848 (__le32_to_cpu(entry->desc_wb->properties) & 849 TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) { 850 struct skb_shared_hwtstamps hwtstamps; 851 u64 timestamp; 852 853 if (skb_shinfo(entry->skb)->tx_flags & 854 SKBTX_HW_TSTAMP_USE_CYCLES) 855 timestamp = 856 __le64_to_cpu(entry->desc_wb->counter); 857 else 858 timestamp = 859 __le64_to_cpu(entry->desc_wb->timestamp); 860 861 memset(&hwtstamps, 0, sizeof(hwtstamps)); 862 hwtstamps.hwtstamp = ns_to_ktime(timestamp); 863 864 skb_tstamp_tx(entry->skb, &hwtstamps); 865 } 866 867 if (entry->type & TSNEP_TX_TYPE_SKB) 868 napi_consume_skb(entry->skb, napi_budget); 869 else if (entry->type & TSNEP_TX_TYPE_XDP) 870 xdp_return_frame_rx_napi(entry->xdpf); 871 else 872 xsk_frames++; 873 /* xdpf and zc are union with skb */ 874 entry->skb = NULL; 875 876 tx->read = (tx->read + count) & TSNEP_RING_MASK; 877 878 tx->packets++; 879 tx->bytes += length + ETH_FCS_LEN; 880 881 budget--; 882 } while (likely(budget)); 883 884 if (tx->xsk_pool) { 885 if (xsk_frames) 886 xsk_tx_completed(tx->xsk_pool, xsk_frames); 887 if (xsk_uses_need_wakeup(tx->xsk_pool)) 888 xsk_set_tx_need_wakeup(tx->xsk_pool); 889 tsnep_xdp_xmit_zc(tx); 890 } 891 892 if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) && 893 netif_tx_queue_stopped(nq)) { 894 netif_tx_wake_queue(nq); 895 } 896 897 __netif_tx_unlock(nq); 898 899 return budget != 0; 900 } 901 902 static bool tsnep_tx_pending(struct tsnep_tx *tx) 903 { 904 struct tsnep_tx_entry *entry; 905 struct netdev_queue *nq; 906 bool pending = false; 907 908 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); 909 __netif_tx_lock(nq, smp_processor_id()); 910 911 if (tx->read != tx->write) { 912 entry = &tx->entry[tx->read]; 913 if ((__le32_to_cpu(entry->desc_wb->properties) & 914 TSNEP_TX_DESC_OWNER_MASK) == 915 (entry->properties & TSNEP_TX_DESC_OWNER_MASK)) 916 pending = true; 917 } 918 919 __netif_tx_unlock(nq); 920 921 return pending; 922 } 923 924 static int tsnep_tx_open(struct tsnep_tx *tx) 925 { 926 int retval; 927 928 retval = tsnep_tx_ring_create(tx); 929 if (retval) 930 return retval; 931 932 tsnep_tx_init(tx); 933 934 return 0; 935 } 936 937 static void tsnep_tx_close(struct tsnep_tx *tx) 938 { 939 tsnep_tx_ring_cleanup(tx); 940 } 941 942 static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx) 943 { 944 struct device *dmadev = rx->adapter->dmadev; 945 struct tsnep_rx_entry *entry; 946 int i; 947 948 for (i = 0; i < TSNEP_RING_SIZE; i++) { 949 entry = &rx->entry[i]; 950 if (!rx->xsk_pool && entry->page) 951 page_pool_put_full_page(rx->page_pool, entry->page, 952 false); 953 if (rx->xsk_pool && entry->xdp) 954 xsk_buff_free(entry->xdp); 955 /* xdp is union with page */ 956 entry->page = NULL; 957 } 958 959 if (rx->page_pool) 960 page_pool_destroy(rx->page_pool); 961 962 memset(rx->entry, 0, sizeof(rx->entry)); 963 964 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) { 965 if (rx->page[i]) { 966 dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i], 967 rx->page_dma[i]); 968 rx->page[i] = NULL; 969 rx->page_dma[i] = 0; 970 } 971 } 972 } 973 974 static int tsnep_rx_ring_create(struct tsnep_rx *rx) 975 { 976 struct device *dmadev = rx->adapter->dmadev; 977 struct tsnep_rx_entry *entry; 978 struct page_pool_params pp_params = { 0 }; 979 struct tsnep_rx_entry *next_entry; 980 int i, j; 981 int retval; 982 983 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) { 984 rx->page[i] = 985 dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i], 986 GFP_KERNEL); 987 if (!rx->page[i]) { 988 retval = -ENOMEM; 989 goto failed; 990 } 991 for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) { 992 entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; 993 entry->desc_wb = (struct tsnep_rx_desc_wb *) 994 (((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j); 995 entry->desc = (struct tsnep_rx_desc *) 996 (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET); 997 entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j; 998 } 999 } 1000 1001 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; 1002 pp_params.order = 0; 1003 pp_params.pool_size = TSNEP_RING_SIZE; 1004 pp_params.nid = dev_to_node(dmadev); 1005 pp_params.dev = dmadev; 1006 pp_params.dma_dir = DMA_BIDIRECTIONAL; 1007 pp_params.max_len = TSNEP_MAX_RX_BUF_SIZE; 1008 pp_params.offset = TSNEP_RX_OFFSET; 1009 rx->page_pool = page_pool_create(&pp_params); 1010 if (IS_ERR(rx->page_pool)) { 1011 retval = PTR_ERR(rx->page_pool); 1012 rx->page_pool = NULL; 1013 goto failed; 1014 } 1015 1016 for (i = 0; i < TSNEP_RING_SIZE; i++) { 1017 entry = &rx->entry[i]; 1018 next_entry = &rx->entry[(i + 1) & TSNEP_RING_MASK]; 1019 entry->desc->next = __cpu_to_le64(next_entry->desc_dma); 1020 } 1021 1022 return 0; 1023 1024 failed: 1025 tsnep_rx_ring_cleanup(rx); 1026 return retval; 1027 } 1028 1029 static void tsnep_rx_init(struct tsnep_rx *rx) 1030 { 1031 dma_addr_t dma; 1032 1033 dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; 1034 iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW); 1035 iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH); 1036 rx->write = 0; 1037 rx->read = 0; 1038 rx->owner_counter = 1; 1039 rx->increment_owner_counter = TSNEP_RING_SIZE - 1; 1040 } 1041 1042 static void tsnep_rx_enable(struct tsnep_rx *rx) 1043 { 1044 /* descriptor properties shall be valid before hardware is notified */ 1045 dma_wmb(); 1046 1047 iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL); 1048 } 1049 1050 static void tsnep_rx_disable(struct tsnep_rx *rx) 1051 { 1052 u32 val; 1053 1054 iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL); 1055 readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val, 1056 ((val & TSNEP_CONTROL_RX_ENABLE) == 0), 10000, 1057 1000000); 1058 } 1059 1060 static int tsnep_rx_desc_available(struct tsnep_rx *rx) 1061 { 1062 if (rx->read <= rx->write) 1063 return TSNEP_RING_SIZE - rx->write + rx->read - 1; 1064 else 1065 return rx->read - rx->write - 1; 1066 } 1067 1068 static void tsnep_rx_free_page_buffer(struct tsnep_rx *rx) 1069 { 1070 struct page **page; 1071 1072 /* last entry of page_buffer is always zero, because ring cannot be 1073 * filled completely 1074 */ 1075 page = rx->page_buffer; 1076 while (*page) { 1077 page_pool_put_full_page(rx->page_pool, *page, false); 1078 *page = NULL; 1079 page++; 1080 } 1081 } 1082 1083 static int tsnep_rx_alloc_page_buffer(struct tsnep_rx *rx) 1084 { 1085 int i; 1086 1087 /* alloc for all ring entries except the last one, because ring cannot 1088 * be filled completely 1089 */ 1090 for (i = 0; i < TSNEP_RING_SIZE - 1; i++) { 1091 rx->page_buffer[i] = page_pool_dev_alloc_pages(rx->page_pool); 1092 if (!rx->page_buffer[i]) { 1093 tsnep_rx_free_page_buffer(rx); 1094 1095 return -ENOMEM; 1096 } 1097 } 1098 1099 return 0; 1100 } 1101 1102 static void tsnep_rx_set_page(struct tsnep_rx *rx, struct tsnep_rx_entry *entry, 1103 struct page *page) 1104 { 1105 entry->page = page; 1106 entry->len = TSNEP_MAX_RX_BUF_SIZE; 1107 entry->dma = page_pool_get_dma_addr(entry->page); 1108 entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_RX_OFFSET); 1109 } 1110 1111 static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx, int index) 1112 { 1113 struct tsnep_rx_entry *entry = &rx->entry[index]; 1114 struct page *page; 1115 1116 page = page_pool_dev_alloc_pages(rx->page_pool); 1117 if (unlikely(!page)) 1118 return -ENOMEM; 1119 tsnep_rx_set_page(rx, entry, page); 1120 1121 return 0; 1122 } 1123 1124 static void tsnep_rx_reuse_buffer(struct tsnep_rx *rx, int index) 1125 { 1126 struct tsnep_rx_entry *entry = &rx->entry[index]; 1127 struct tsnep_rx_entry *read = &rx->entry[rx->read]; 1128 1129 tsnep_rx_set_page(rx, entry, read->page); 1130 read->page = NULL; 1131 } 1132 1133 static void tsnep_rx_activate(struct tsnep_rx *rx, int index) 1134 { 1135 struct tsnep_rx_entry *entry = &rx->entry[index]; 1136 1137 /* TSNEP_MAX_RX_BUF_SIZE and TSNEP_XSK_RX_BUF_SIZE are multiple of 4 */ 1138 entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK; 1139 entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; 1140 if (index == rx->increment_owner_counter) { 1141 rx->owner_counter++; 1142 if (rx->owner_counter == 4) 1143 rx->owner_counter = 1; 1144 rx->increment_owner_counter--; 1145 if (rx->increment_owner_counter < 0) 1146 rx->increment_owner_counter = TSNEP_RING_SIZE - 1; 1147 } 1148 entry->properties |= 1149 (rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & 1150 TSNEP_DESC_OWNER_COUNTER_MASK; 1151 1152 /* descriptor properties shall be written last, because valid data is 1153 * signaled there 1154 */ 1155 dma_wmb(); 1156 1157 entry->desc->properties = __cpu_to_le32(entry->properties); 1158 } 1159 1160 static int tsnep_rx_alloc(struct tsnep_rx *rx, int count, bool reuse) 1161 { 1162 bool alloc_failed = false; 1163 int i, index; 1164 1165 for (i = 0; i < count && !alloc_failed; i++) { 1166 index = (rx->write + i) & TSNEP_RING_MASK; 1167 1168 if (unlikely(tsnep_rx_alloc_buffer(rx, index))) { 1169 rx->alloc_failed++; 1170 alloc_failed = true; 1171 1172 /* reuse only if no other allocation was successful */ 1173 if (i == 0 && reuse) 1174 tsnep_rx_reuse_buffer(rx, index); 1175 else 1176 break; 1177 } 1178 1179 tsnep_rx_activate(rx, index); 1180 } 1181 1182 if (i) 1183 rx->write = (rx->write + i) & TSNEP_RING_MASK; 1184 1185 return i; 1186 } 1187 1188 static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse) 1189 { 1190 int desc_refilled; 1191 1192 desc_refilled = tsnep_rx_alloc(rx, count, reuse); 1193 if (desc_refilled) 1194 tsnep_rx_enable(rx); 1195 1196 return desc_refilled; 1197 } 1198 1199 static void tsnep_rx_set_xdp(struct tsnep_rx *rx, struct tsnep_rx_entry *entry, 1200 struct xdp_buff *xdp) 1201 { 1202 entry->xdp = xdp; 1203 entry->len = TSNEP_XSK_RX_BUF_SIZE; 1204 entry->dma = xsk_buff_xdp_get_dma(entry->xdp); 1205 entry->desc->rx = __cpu_to_le64(entry->dma); 1206 } 1207 1208 static void tsnep_rx_reuse_buffer_zc(struct tsnep_rx *rx, int index) 1209 { 1210 struct tsnep_rx_entry *entry = &rx->entry[index]; 1211 struct tsnep_rx_entry *read = &rx->entry[rx->read]; 1212 1213 tsnep_rx_set_xdp(rx, entry, read->xdp); 1214 read->xdp = NULL; 1215 } 1216 1217 static int tsnep_rx_alloc_zc(struct tsnep_rx *rx, int count, bool reuse) 1218 { 1219 u32 allocated; 1220 int i; 1221 1222 allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch, count); 1223 for (i = 0; i < allocated; i++) { 1224 int index = (rx->write + i) & TSNEP_RING_MASK; 1225 struct tsnep_rx_entry *entry = &rx->entry[index]; 1226 1227 tsnep_rx_set_xdp(rx, entry, rx->xdp_batch[i]); 1228 tsnep_rx_activate(rx, index); 1229 } 1230 if (i == 0) { 1231 rx->alloc_failed++; 1232 1233 if (reuse) { 1234 tsnep_rx_reuse_buffer_zc(rx, rx->write); 1235 tsnep_rx_activate(rx, rx->write); 1236 } 1237 } 1238 1239 if (i) 1240 rx->write = (rx->write + i) & TSNEP_RING_MASK; 1241 1242 return i; 1243 } 1244 1245 static void tsnep_rx_free_zc(struct tsnep_rx *rx) 1246 { 1247 int i; 1248 1249 for (i = 0; i < TSNEP_RING_SIZE; i++) { 1250 struct tsnep_rx_entry *entry = &rx->entry[i]; 1251 1252 if (entry->xdp) 1253 xsk_buff_free(entry->xdp); 1254 entry->xdp = NULL; 1255 } 1256 } 1257 1258 static int tsnep_rx_refill_zc(struct tsnep_rx *rx, int count, bool reuse) 1259 { 1260 int desc_refilled; 1261 1262 desc_refilled = tsnep_rx_alloc_zc(rx, count, reuse); 1263 if (desc_refilled) 1264 tsnep_rx_enable(rx); 1265 1266 return desc_refilled; 1267 } 1268 1269 static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog, 1270 struct xdp_buff *xdp, int *status, 1271 struct netdev_queue *tx_nq, struct tsnep_tx *tx) 1272 { 1273 unsigned int length; 1274 unsigned int sync; 1275 u32 act; 1276 1277 length = xdp->data_end - xdp->data_hard_start - XDP_PACKET_HEADROOM; 1278 1279 act = bpf_prog_run_xdp(prog, xdp); 1280 switch (act) { 1281 case XDP_PASS: 1282 return false; 1283 case XDP_TX: 1284 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, false)) 1285 goto out_failure; 1286 *status |= TSNEP_XDP_TX; 1287 return true; 1288 case XDP_REDIRECT: 1289 if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0) 1290 goto out_failure; 1291 *status |= TSNEP_XDP_REDIRECT; 1292 return true; 1293 default: 1294 bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act); 1295 fallthrough; 1296 case XDP_ABORTED: 1297 out_failure: 1298 trace_xdp_exception(rx->adapter->netdev, prog, act); 1299 fallthrough; 1300 case XDP_DROP: 1301 /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU 1302 * touch 1303 */ 1304 sync = xdp->data_end - xdp->data_hard_start - 1305 XDP_PACKET_HEADROOM; 1306 sync = max(sync, length); 1307 page_pool_put_page(rx->page_pool, virt_to_head_page(xdp->data), 1308 sync, true); 1309 return true; 1310 } 1311 } 1312 1313 static bool tsnep_xdp_run_prog_zc(struct tsnep_rx *rx, struct bpf_prog *prog, 1314 struct xdp_buff *xdp, int *status, 1315 struct netdev_queue *tx_nq, 1316 struct tsnep_tx *tx) 1317 { 1318 u32 act; 1319 1320 act = bpf_prog_run_xdp(prog, xdp); 1321 1322 /* XDP_REDIRECT is the main action for zero-copy */ 1323 if (likely(act == XDP_REDIRECT)) { 1324 if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0) 1325 goto out_failure; 1326 *status |= TSNEP_XDP_REDIRECT; 1327 return true; 1328 } 1329 1330 switch (act) { 1331 case XDP_PASS: 1332 return false; 1333 case XDP_TX: 1334 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, true)) 1335 goto out_failure; 1336 *status |= TSNEP_XDP_TX; 1337 return true; 1338 default: 1339 bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act); 1340 fallthrough; 1341 case XDP_ABORTED: 1342 out_failure: 1343 trace_xdp_exception(rx->adapter->netdev, prog, act); 1344 fallthrough; 1345 case XDP_DROP: 1346 xsk_buff_free(xdp); 1347 return true; 1348 } 1349 } 1350 1351 static void tsnep_finalize_xdp(struct tsnep_adapter *adapter, int status, 1352 struct netdev_queue *tx_nq, struct tsnep_tx *tx) 1353 { 1354 if (status & TSNEP_XDP_TX) { 1355 __netif_tx_lock(tx_nq, smp_processor_id()); 1356 tsnep_xdp_xmit_flush(tx); 1357 __netif_tx_unlock(tx_nq); 1358 } 1359 1360 if (status & TSNEP_XDP_REDIRECT) 1361 xdp_do_flush(); 1362 } 1363 1364 static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page, 1365 int length) 1366 { 1367 struct sk_buff *skb; 1368 1369 skb = napi_build_skb(page_address(page), PAGE_SIZE); 1370 if (unlikely(!skb)) 1371 return NULL; 1372 1373 /* update pointers within the skb to store the data */ 1374 skb_reserve(skb, TSNEP_RX_OFFSET + TSNEP_RX_INLINE_METADATA_SIZE); 1375 __skb_put(skb, length - ETH_FCS_LEN); 1376 1377 if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) { 1378 struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); 1379 struct tsnep_rx_inline *rx_inline = 1380 (struct tsnep_rx_inline *)(page_address(page) + 1381 TSNEP_RX_OFFSET); 1382 1383 skb_shinfo(skb)->tx_flags |= 1384 SKBTX_HW_TSTAMP_NETDEV; 1385 memset(hwtstamps, 0, sizeof(*hwtstamps)); 1386 hwtstamps->netdev_data = rx_inline; 1387 } 1388 1389 skb_record_rx_queue(skb, rx->queue_index); 1390 skb->protocol = eth_type_trans(skb, rx->adapter->netdev); 1391 1392 return skb; 1393 } 1394 1395 static void tsnep_rx_page(struct tsnep_rx *rx, struct napi_struct *napi, 1396 struct page *page, int length) 1397 { 1398 struct sk_buff *skb; 1399 1400 skb = tsnep_build_skb(rx, page, length); 1401 if (skb) { 1402 skb_mark_for_recycle(skb); 1403 1404 rx->packets++; 1405 rx->bytes += length; 1406 if (skb->pkt_type == PACKET_MULTICAST) 1407 rx->multicast++; 1408 1409 napi_gro_receive(napi, skb); 1410 } else { 1411 page_pool_recycle_direct(rx->page_pool, page); 1412 1413 rx->dropped++; 1414 } 1415 } 1416 1417 static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, 1418 int budget) 1419 { 1420 struct device *dmadev = rx->adapter->dmadev; 1421 enum dma_data_direction dma_dir; 1422 struct tsnep_rx_entry *entry; 1423 struct netdev_queue *tx_nq; 1424 struct bpf_prog *prog; 1425 struct xdp_buff xdp; 1426 struct tsnep_tx *tx; 1427 int desc_available; 1428 int xdp_status = 0; 1429 int done = 0; 1430 int length; 1431 1432 desc_available = tsnep_rx_desc_available(rx); 1433 dma_dir = page_pool_get_dma_dir(rx->page_pool); 1434 prog = READ_ONCE(rx->adapter->xdp_prog); 1435 if (prog) { 1436 tx_nq = netdev_get_tx_queue(rx->adapter->netdev, 1437 rx->tx_queue_index); 1438 tx = &rx->adapter->tx[rx->tx_queue_index]; 1439 1440 xdp_init_buff(&xdp, PAGE_SIZE, &rx->xdp_rxq); 1441 } 1442 1443 while (likely(done < budget) && (rx->read != rx->write)) { 1444 entry = &rx->entry[rx->read]; 1445 if ((__le32_to_cpu(entry->desc_wb->properties) & 1446 TSNEP_DESC_OWNER_COUNTER_MASK) != 1447 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK)) 1448 break; 1449 done++; 1450 1451 if (desc_available >= TSNEP_RING_RX_REFILL) { 1452 bool reuse = desc_available >= TSNEP_RING_RX_REUSE; 1453 1454 desc_available -= tsnep_rx_refill(rx, desc_available, 1455 reuse); 1456 if (!entry->page) { 1457 /* buffer has been reused for refill to prevent 1458 * empty RX ring, thus buffer cannot be used for 1459 * RX processing 1460 */ 1461 rx->read = (rx->read + 1) & TSNEP_RING_MASK; 1462 desc_available++; 1463 1464 rx->dropped++; 1465 1466 continue; 1467 } 1468 } 1469 1470 /* descriptor properties shall be read first, because valid data 1471 * is signaled there 1472 */ 1473 dma_rmb(); 1474 1475 prefetch(page_address(entry->page) + TSNEP_RX_OFFSET); 1476 length = __le32_to_cpu(entry->desc_wb->properties) & 1477 TSNEP_DESC_LENGTH_MASK; 1478 dma_sync_single_range_for_cpu(dmadev, entry->dma, 1479 TSNEP_RX_OFFSET, length, dma_dir); 1480 1481 /* RX metadata with timestamps is in front of actual data, 1482 * subtract metadata size to get length of actual data and 1483 * consider metadata size as offset of actual data during RX 1484 * processing 1485 */ 1486 length -= TSNEP_RX_INLINE_METADATA_SIZE; 1487 1488 rx->read = (rx->read + 1) & TSNEP_RING_MASK; 1489 desc_available++; 1490 1491 if (prog) { 1492 bool consume; 1493 1494 xdp_prepare_buff(&xdp, page_address(entry->page), 1495 XDP_PACKET_HEADROOM + TSNEP_RX_INLINE_METADATA_SIZE, 1496 length - ETH_FCS_LEN, false); 1497 1498 consume = tsnep_xdp_run_prog(rx, prog, &xdp, 1499 &xdp_status, tx_nq, tx); 1500 if (consume) { 1501 rx->packets++; 1502 rx->bytes += length; 1503 1504 entry->page = NULL; 1505 1506 continue; 1507 } 1508 } 1509 1510 tsnep_rx_page(rx, napi, entry->page, length); 1511 entry->page = NULL; 1512 } 1513 1514 if (xdp_status) 1515 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); 1516 1517 if (desc_available) 1518 tsnep_rx_refill(rx, desc_available, false); 1519 1520 return done; 1521 } 1522 1523 static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi, 1524 int budget) 1525 { 1526 struct tsnep_rx_entry *entry; 1527 struct netdev_queue *tx_nq; 1528 struct bpf_prog *prog; 1529 struct tsnep_tx *tx; 1530 int desc_available; 1531 int xdp_status = 0; 1532 struct page *page; 1533 int done = 0; 1534 int length; 1535 1536 desc_available = tsnep_rx_desc_available(rx); 1537 prog = READ_ONCE(rx->adapter->xdp_prog); 1538 if (prog) { 1539 tx_nq = netdev_get_tx_queue(rx->adapter->netdev, 1540 rx->tx_queue_index); 1541 tx = &rx->adapter->tx[rx->tx_queue_index]; 1542 } 1543 1544 while (likely(done < budget) && (rx->read != rx->write)) { 1545 entry = &rx->entry[rx->read]; 1546 if ((__le32_to_cpu(entry->desc_wb->properties) & 1547 TSNEP_DESC_OWNER_COUNTER_MASK) != 1548 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK)) 1549 break; 1550 done++; 1551 1552 if (desc_available >= TSNEP_RING_RX_REFILL) { 1553 bool reuse = desc_available >= TSNEP_RING_RX_REUSE; 1554 1555 desc_available -= tsnep_rx_refill_zc(rx, desc_available, 1556 reuse); 1557 if (!entry->xdp) { 1558 /* buffer has been reused for refill to prevent 1559 * empty RX ring, thus buffer cannot be used for 1560 * RX processing 1561 */ 1562 rx->read = (rx->read + 1) & TSNEP_RING_MASK; 1563 desc_available++; 1564 1565 rx->dropped++; 1566 1567 continue; 1568 } 1569 } 1570 1571 /* descriptor properties shall be read first, because valid data 1572 * is signaled there 1573 */ 1574 dma_rmb(); 1575 1576 prefetch(entry->xdp->data); 1577 length = __le32_to_cpu(entry->desc_wb->properties) & 1578 TSNEP_DESC_LENGTH_MASK; 1579 xsk_buff_set_size(entry->xdp, length - ETH_FCS_LEN); 1580 xsk_buff_dma_sync_for_cpu(entry->xdp, rx->xsk_pool); 1581 1582 /* RX metadata with timestamps is in front of actual data, 1583 * subtract metadata size to get length of actual data and 1584 * consider metadata size as offset of actual data during RX 1585 * processing 1586 */ 1587 length -= TSNEP_RX_INLINE_METADATA_SIZE; 1588 1589 rx->read = (rx->read + 1) & TSNEP_RING_MASK; 1590 desc_available++; 1591 1592 if (prog) { 1593 bool consume; 1594 1595 entry->xdp->data += TSNEP_RX_INLINE_METADATA_SIZE; 1596 entry->xdp->data_meta += TSNEP_RX_INLINE_METADATA_SIZE; 1597 1598 consume = tsnep_xdp_run_prog_zc(rx, prog, entry->xdp, 1599 &xdp_status, tx_nq, tx); 1600 if (consume) { 1601 rx->packets++; 1602 rx->bytes += length; 1603 1604 entry->xdp = NULL; 1605 1606 continue; 1607 } 1608 } 1609 1610 page = page_pool_dev_alloc_pages(rx->page_pool); 1611 if (page) { 1612 memcpy(page_address(page) + TSNEP_RX_OFFSET, 1613 entry->xdp->data - TSNEP_RX_INLINE_METADATA_SIZE, 1614 length + TSNEP_RX_INLINE_METADATA_SIZE); 1615 tsnep_rx_page(rx, napi, page, length); 1616 } else { 1617 rx->dropped++; 1618 } 1619 xsk_buff_free(entry->xdp); 1620 entry->xdp = NULL; 1621 } 1622 1623 if (xdp_status) 1624 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); 1625 1626 if (desc_available) 1627 desc_available -= tsnep_rx_refill_zc(rx, desc_available, false); 1628 1629 if (xsk_uses_need_wakeup(rx->xsk_pool)) { 1630 if (desc_available) 1631 xsk_set_rx_need_wakeup(rx->xsk_pool); 1632 else 1633 xsk_clear_rx_need_wakeup(rx->xsk_pool); 1634 1635 return done; 1636 } 1637 1638 return desc_available ? budget : done; 1639 } 1640 1641 static bool tsnep_rx_pending(struct tsnep_rx *rx) 1642 { 1643 struct tsnep_rx_entry *entry; 1644 1645 if (rx->read != rx->write) { 1646 entry = &rx->entry[rx->read]; 1647 if ((__le32_to_cpu(entry->desc_wb->properties) & 1648 TSNEP_DESC_OWNER_COUNTER_MASK) == 1649 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK)) 1650 return true; 1651 } 1652 1653 return false; 1654 } 1655 1656 static int tsnep_rx_open(struct tsnep_rx *rx) 1657 { 1658 int desc_available; 1659 int retval; 1660 1661 retval = tsnep_rx_ring_create(rx); 1662 if (retval) 1663 return retval; 1664 1665 tsnep_rx_init(rx); 1666 1667 desc_available = tsnep_rx_desc_available(rx); 1668 if (rx->xsk_pool) 1669 retval = tsnep_rx_alloc_zc(rx, desc_available, false); 1670 else 1671 retval = tsnep_rx_alloc(rx, desc_available, false); 1672 if (retval != desc_available) { 1673 retval = -ENOMEM; 1674 1675 goto alloc_failed; 1676 } 1677 1678 /* prealloc pages to prevent allocation failures when XSK pool is 1679 * disabled at runtime 1680 */ 1681 if (rx->xsk_pool) { 1682 retval = tsnep_rx_alloc_page_buffer(rx); 1683 if (retval) 1684 goto alloc_failed; 1685 } 1686 1687 return 0; 1688 1689 alloc_failed: 1690 tsnep_rx_ring_cleanup(rx); 1691 return retval; 1692 } 1693 1694 static void tsnep_rx_close(struct tsnep_rx *rx) 1695 { 1696 if (rx->xsk_pool) 1697 tsnep_rx_free_page_buffer(rx); 1698 1699 tsnep_rx_ring_cleanup(rx); 1700 } 1701 1702 static void tsnep_rx_reopen(struct tsnep_rx *rx) 1703 { 1704 struct page **page = rx->page_buffer; 1705 int i; 1706 1707 tsnep_rx_init(rx); 1708 1709 for (i = 0; i < TSNEP_RING_SIZE; i++) { 1710 struct tsnep_rx_entry *entry = &rx->entry[i]; 1711 1712 /* defined initial values for properties are required for 1713 * correct owner counter checking 1714 */ 1715 entry->desc->properties = 0; 1716 entry->desc_wb->properties = 0; 1717 1718 /* prevent allocation failures by reusing kept pages */ 1719 if (*page) { 1720 tsnep_rx_set_page(rx, entry, *page); 1721 tsnep_rx_activate(rx, rx->write); 1722 rx->write++; 1723 1724 *page = NULL; 1725 page++; 1726 } 1727 } 1728 } 1729 1730 static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx) 1731 { 1732 struct page **page = rx->page_buffer; 1733 u32 allocated; 1734 int i; 1735 1736 tsnep_rx_init(rx); 1737 1738 /* alloc all ring entries except the last one, because ring cannot be 1739 * filled completely, as many buffers as possible is enough as wakeup is 1740 * done if new buffers are available 1741 */ 1742 allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch, 1743 TSNEP_RING_SIZE - 1); 1744 1745 for (i = 0; i < TSNEP_RING_SIZE; i++) { 1746 struct tsnep_rx_entry *entry = &rx->entry[i]; 1747 1748 /* keep pages to prevent allocation failures when xsk is 1749 * disabled 1750 */ 1751 if (entry->page) { 1752 *page = entry->page; 1753 entry->page = NULL; 1754 1755 page++; 1756 } 1757 1758 /* defined initial values for properties are required for 1759 * correct owner counter checking 1760 */ 1761 entry->desc->properties = 0; 1762 entry->desc_wb->properties = 0; 1763 1764 if (allocated) { 1765 tsnep_rx_set_xdp(rx, entry, 1766 rx->xdp_batch[allocated - 1]); 1767 tsnep_rx_activate(rx, rx->write); 1768 rx->write++; 1769 1770 allocated--; 1771 } 1772 } 1773 1774 /* set need wakeup flag immediately if ring is not filled completely, 1775 * first polling would be too late as need wakeup signalisation would 1776 * be delayed for an indefinite time 1777 */ 1778 if (xsk_uses_need_wakeup(rx->xsk_pool)) { 1779 int desc_available = tsnep_rx_desc_available(rx); 1780 1781 if (desc_available) 1782 xsk_set_rx_need_wakeup(rx->xsk_pool); 1783 else 1784 xsk_clear_rx_need_wakeup(rx->xsk_pool); 1785 } 1786 } 1787 1788 static bool tsnep_pending(struct tsnep_queue *queue) 1789 { 1790 if (queue->tx && tsnep_tx_pending(queue->tx)) 1791 return true; 1792 1793 if (queue->rx && tsnep_rx_pending(queue->rx)) 1794 return true; 1795 1796 return false; 1797 } 1798 1799 static int tsnep_poll(struct napi_struct *napi, int budget) 1800 { 1801 struct tsnep_queue *queue = container_of(napi, struct tsnep_queue, 1802 napi); 1803 bool complete = true; 1804 int done = 0; 1805 1806 if (queue->tx) 1807 complete = tsnep_tx_poll(queue->tx, budget); 1808 1809 /* handle case where we are called by netpoll with a budget of 0 */ 1810 if (unlikely(budget <= 0)) 1811 return budget; 1812 1813 if (queue->rx) { 1814 done = queue->rx->xsk_pool ? 1815 tsnep_rx_poll_zc(queue->rx, napi, budget) : 1816 tsnep_rx_poll(queue->rx, napi, budget); 1817 if (done >= budget) 1818 complete = false; 1819 } 1820 1821 /* if all work not completed, return budget and keep polling */ 1822 if (!complete) 1823 return budget; 1824 1825 if (likely(napi_complete_done(napi, done))) { 1826 tsnep_enable_irq(queue->adapter, queue->irq_mask); 1827 1828 /* reschedule if work is already pending, prevent rotten packets 1829 * which are transmitted or received after polling but before 1830 * interrupt enable 1831 */ 1832 if (tsnep_pending(queue)) { 1833 tsnep_disable_irq(queue->adapter, queue->irq_mask); 1834 napi_schedule(napi); 1835 } 1836 } 1837 1838 return min(done, budget - 1); 1839 } 1840 1841 static int tsnep_request_irq(struct tsnep_queue *queue, bool first) 1842 { 1843 const char *name = netdev_name(queue->adapter->netdev); 1844 irq_handler_t handler; 1845 void *dev; 1846 int retval; 1847 1848 if (first) { 1849 sprintf(queue->name, "%s-mac", name); 1850 handler = tsnep_irq; 1851 dev = queue->adapter; 1852 } else { 1853 if (queue->tx && queue->rx) 1854 snprintf(queue->name, sizeof(queue->name), "%s-txrx-%d", 1855 name, queue->rx->queue_index); 1856 else if (queue->tx) 1857 snprintf(queue->name, sizeof(queue->name), "%s-tx-%d", 1858 name, queue->tx->queue_index); 1859 else 1860 snprintf(queue->name, sizeof(queue->name), "%s-rx-%d", 1861 name, queue->rx->queue_index); 1862 handler = tsnep_irq_txrx; 1863 dev = queue; 1864 } 1865 1866 retval = request_irq(queue->irq, handler, 0, queue->name, dev); 1867 if (retval) { 1868 /* if name is empty, then interrupt won't be freed */ 1869 memset(queue->name, 0, sizeof(queue->name)); 1870 } 1871 1872 return retval; 1873 } 1874 1875 static void tsnep_free_irq(struct tsnep_queue *queue, bool first) 1876 { 1877 void *dev; 1878 1879 if (!strlen(queue->name)) 1880 return; 1881 1882 if (first) 1883 dev = queue->adapter; 1884 else 1885 dev = queue; 1886 1887 free_irq(queue->irq, dev); 1888 memset(queue->name, 0, sizeof(queue->name)); 1889 } 1890 1891 static void tsnep_queue_close(struct tsnep_queue *queue, bool first) 1892 { 1893 struct tsnep_rx *rx = queue->rx; 1894 1895 tsnep_free_irq(queue, first); 1896 1897 if (rx) { 1898 if (xdp_rxq_info_is_reg(&rx->xdp_rxq)) 1899 xdp_rxq_info_unreg(&rx->xdp_rxq); 1900 if (xdp_rxq_info_is_reg(&rx->xdp_rxq_zc)) 1901 xdp_rxq_info_unreg(&rx->xdp_rxq_zc); 1902 } 1903 1904 netif_napi_del(&queue->napi); 1905 } 1906 1907 static int tsnep_queue_open(struct tsnep_adapter *adapter, 1908 struct tsnep_queue *queue, bool first) 1909 { 1910 struct tsnep_rx *rx = queue->rx; 1911 struct tsnep_tx *tx = queue->tx; 1912 int retval; 1913 1914 netif_napi_add(adapter->netdev, &queue->napi, tsnep_poll); 1915 1916 if (rx) { 1917 /* choose TX queue for XDP_TX */ 1918 if (tx) 1919 rx->tx_queue_index = tx->queue_index; 1920 else if (rx->queue_index < adapter->num_tx_queues) 1921 rx->tx_queue_index = rx->queue_index; 1922 else 1923 rx->tx_queue_index = 0; 1924 1925 /* prepare both memory models to eliminate possible registration 1926 * errors when memory model is switched between page pool and 1927 * XSK pool during runtime 1928 */ 1929 retval = xdp_rxq_info_reg(&rx->xdp_rxq, adapter->netdev, 1930 rx->queue_index, queue->napi.napi_id); 1931 if (retval) 1932 goto failed; 1933 retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, 1934 MEM_TYPE_PAGE_POOL, 1935 rx->page_pool); 1936 if (retval) 1937 goto failed; 1938 retval = xdp_rxq_info_reg(&rx->xdp_rxq_zc, adapter->netdev, 1939 rx->queue_index, queue->napi.napi_id); 1940 if (retval) 1941 goto failed; 1942 retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq_zc, 1943 MEM_TYPE_XSK_BUFF_POOL, 1944 NULL); 1945 if (retval) 1946 goto failed; 1947 if (rx->xsk_pool) 1948 xsk_pool_set_rxq_info(rx->xsk_pool, &rx->xdp_rxq_zc); 1949 } 1950 1951 retval = tsnep_request_irq(queue, first); 1952 if (retval) { 1953 netif_err(adapter, drv, adapter->netdev, 1954 "can't get assigned irq %d.\n", queue->irq); 1955 goto failed; 1956 } 1957 1958 return 0; 1959 1960 failed: 1961 tsnep_queue_close(queue, first); 1962 1963 return retval; 1964 } 1965 1966 static void tsnep_queue_enable(struct tsnep_queue *queue) 1967 { 1968 napi_enable(&queue->napi); 1969 tsnep_enable_irq(queue->adapter, queue->irq_mask); 1970 1971 if (queue->tx) 1972 tsnep_tx_enable(queue->tx); 1973 1974 if (queue->rx) 1975 tsnep_rx_enable(queue->rx); 1976 } 1977 1978 static void tsnep_queue_disable(struct tsnep_queue *queue) 1979 { 1980 if (queue->tx) 1981 tsnep_tx_disable(queue->tx, &queue->napi); 1982 1983 napi_disable(&queue->napi); 1984 tsnep_disable_irq(queue->adapter, queue->irq_mask); 1985 1986 /* disable RX after NAPI polling has been disabled, because RX can be 1987 * enabled during NAPI polling 1988 */ 1989 if (queue->rx) 1990 tsnep_rx_disable(queue->rx); 1991 } 1992 1993 static int tsnep_netdev_open(struct net_device *netdev) 1994 { 1995 struct tsnep_adapter *adapter = netdev_priv(netdev); 1996 int i, retval; 1997 1998 for (i = 0; i < adapter->num_queues; i++) { 1999 if (adapter->queue[i].tx) { 2000 retval = tsnep_tx_open(adapter->queue[i].tx); 2001 if (retval) 2002 goto failed; 2003 } 2004 if (adapter->queue[i].rx) { 2005 retval = tsnep_rx_open(adapter->queue[i].rx); 2006 if (retval) 2007 goto failed; 2008 } 2009 2010 retval = tsnep_queue_open(adapter, &adapter->queue[i], i == 0); 2011 if (retval) 2012 goto failed; 2013 } 2014 2015 retval = netif_set_real_num_tx_queues(adapter->netdev, 2016 adapter->num_tx_queues); 2017 if (retval) 2018 goto failed; 2019 retval = netif_set_real_num_rx_queues(adapter->netdev, 2020 adapter->num_rx_queues); 2021 if (retval) 2022 goto failed; 2023 2024 tsnep_enable_irq(adapter, ECM_INT_LINK); 2025 retval = tsnep_phy_open(adapter); 2026 if (retval) 2027 goto phy_failed; 2028 2029 for (i = 0; i < adapter->num_queues; i++) 2030 tsnep_queue_enable(&adapter->queue[i]); 2031 2032 return 0; 2033 2034 phy_failed: 2035 tsnep_disable_irq(adapter, ECM_INT_LINK); 2036 failed: 2037 for (i = 0; i < adapter->num_queues; i++) { 2038 tsnep_queue_close(&adapter->queue[i], i == 0); 2039 2040 if (adapter->queue[i].rx) 2041 tsnep_rx_close(adapter->queue[i].rx); 2042 if (adapter->queue[i].tx) 2043 tsnep_tx_close(adapter->queue[i].tx); 2044 } 2045 return retval; 2046 } 2047 2048 static int tsnep_netdev_close(struct net_device *netdev) 2049 { 2050 struct tsnep_adapter *adapter = netdev_priv(netdev); 2051 int i; 2052 2053 tsnep_disable_irq(adapter, ECM_INT_LINK); 2054 tsnep_phy_close(adapter); 2055 2056 for (i = 0; i < adapter->num_queues; i++) { 2057 tsnep_queue_disable(&adapter->queue[i]); 2058 2059 tsnep_queue_close(&adapter->queue[i], i == 0); 2060 2061 if (adapter->queue[i].rx) 2062 tsnep_rx_close(adapter->queue[i].rx); 2063 if (adapter->queue[i].tx) 2064 tsnep_tx_close(adapter->queue[i].tx); 2065 } 2066 2067 return 0; 2068 } 2069 2070 int tsnep_enable_xsk(struct tsnep_queue *queue, struct xsk_buff_pool *pool) 2071 { 2072 bool running = netif_running(queue->adapter->netdev); 2073 u32 frame_size; 2074 2075 frame_size = xsk_pool_get_rx_frame_size(pool); 2076 if (frame_size < TSNEP_XSK_RX_BUF_SIZE) 2077 return -EOPNOTSUPP; 2078 2079 queue->rx->page_buffer = kcalloc(TSNEP_RING_SIZE, 2080 sizeof(*queue->rx->page_buffer), 2081 GFP_KERNEL); 2082 if (!queue->rx->page_buffer) 2083 return -ENOMEM; 2084 queue->rx->xdp_batch = kcalloc(TSNEP_RING_SIZE, 2085 sizeof(*queue->rx->xdp_batch), 2086 GFP_KERNEL); 2087 if (!queue->rx->xdp_batch) { 2088 kfree(queue->rx->page_buffer); 2089 queue->rx->page_buffer = NULL; 2090 2091 return -ENOMEM; 2092 } 2093 2094 xsk_pool_set_rxq_info(pool, &queue->rx->xdp_rxq_zc); 2095 2096 if (running) 2097 tsnep_queue_disable(queue); 2098 2099 queue->tx->xsk_pool = pool; 2100 queue->rx->xsk_pool = pool; 2101 2102 if (running) { 2103 tsnep_rx_reopen_xsk(queue->rx); 2104 tsnep_queue_enable(queue); 2105 } 2106 2107 return 0; 2108 } 2109 2110 void tsnep_disable_xsk(struct tsnep_queue *queue) 2111 { 2112 bool running = netif_running(queue->adapter->netdev); 2113 2114 if (running) 2115 tsnep_queue_disable(queue); 2116 2117 tsnep_rx_free_zc(queue->rx); 2118 2119 queue->rx->xsk_pool = NULL; 2120 queue->tx->xsk_pool = NULL; 2121 2122 if (running) { 2123 tsnep_rx_reopen(queue->rx); 2124 tsnep_queue_enable(queue); 2125 } 2126 2127 kfree(queue->rx->xdp_batch); 2128 queue->rx->xdp_batch = NULL; 2129 kfree(queue->rx->page_buffer); 2130 queue->rx->page_buffer = NULL; 2131 } 2132 2133 static netdev_tx_t tsnep_netdev_xmit_frame(struct sk_buff *skb, 2134 struct net_device *netdev) 2135 { 2136 struct tsnep_adapter *adapter = netdev_priv(netdev); 2137 u16 queue_mapping = skb_get_queue_mapping(skb); 2138 2139 if (queue_mapping >= adapter->num_tx_queues) 2140 queue_mapping = 0; 2141 2142 return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]); 2143 } 2144 2145 static int tsnep_netdev_ioctl(struct net_device *netdev, struct ifreq *ifr, 2146 int cmd) 2147 { 2148 if (!netif_running(netdev)) 2149 return -EINVAL; 2150 if (cmd == SIOCSHWTSTAMP || cmd == SIOCGHWTSTAMP) 2151 return tsnep_ptp_ioctl(netdev, ifr, cmd); 2152 return phy_mii_ioctl(netdev->phydev, ifr, cmd); 2153 } 2154 2155 static void tsnep_netdev_set_multicast(struct net_device *netdev) 2156 { 2157 struct tsnep_adapter *adapter = netdev_priv(netdev); 2158 2159 u16 rx_filter = 0; 2160 2161 /* configured MAC address and broadcasts are never filtered */ 2162 if (netdev->flags & IFF_PROMISC) { 2163 rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS; 2164 rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_UNICASTS; 2165 } else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) { 2166 rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS; 2167 } 2168 iowrite16(rx_filter, adapter->addr + TSNEP_RX_FILTER); 2169 } 2170 2171 static void tsnep_netdev_get_stats64(struct net_device *netdev, 2172 struct rtnl_link_stats64 *stats) 2173 { 2174 struct tsnep_adapter *adapter = netdev_priv(netdev); 2175 u32 reg; 2176 u32 val; 2177 int i; 2178 2179 for (i = 0; i < adapter->num_tx_queues; i++) { 2180 stats->tx_packets += adapter->tx[i].packets; 2181 stats->tx_bytes += adapter->tx[i].bytes; 2182 stats->tx_dropped += adapter->tx[i].dropped; 2183 } 2184 for (i = 0; i < adapter->num_rx_queues; i++) { 2185 stats->rx_packets += adapter->rx[i].packets; 2186 stats->rx_bytes += adapter->rx[i].bytes; 2187 stats->rx_dropped += adapter->rx[i].dropped; 2188 stats->multicast += adapter->rx[i].multicast; 2189 2190 reg = ioread32(adapter->addr + TSNEP_QUEUE(i) + 2191 TSNEP_RX_STATISTIC); 2192 val = (reg & TSNEP_RX_STATISTIC_NO_DESC_MASK) >> 2193 TSNEP_RX_STATISTIC_NO_DESC_SHIFT; 2194 stats->rx_dropped += val; 2195 val = (reg & TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK) >> 2196 TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT; 2197 stats->rx_dropped += val; 2198 val = (reg & TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK) >> 2199 TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT; 2200 stats->rx_errors += val; 2201 stats->rx_fifo_errors += val; 2202 val = (reg & TSNEP_RX_STATISTIC_INVALID_FRAME_MASK) >> 2203 TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT; 2204 stats->rx_errors += val; 2205 stats->rx_frame_errors += val; 2206 } 2207 2208 reg = ioread32(adapter->addr + ECM_STAT); 2209 val = (reg & ECM_STAT_RX_ERR_MASK) >> ECM_STAT_RX_ERR_SHIFT; 2210 stats->rx_errors += val; 2211 val = (reg & ECM_STAT_INV_FRM_MASK) >> ECM_STAT_INV_FRM_SHIFT; 2212 stats->rx_errors += val; 2213 stats->rx_crc_errors += val; 2214 val = (reg & ECM_STAT_FWD_RX_ERR_MASK) >> ECM_STAT_FWD_RX_ERR_SHIFT; 2215 stats->rx_errors += val; 2216 } 2217 2218 static void tsnep_mac_set_address(struct tsnep_adapter *adapter, u8 *addr) 2219 { 2220 iowrite32(*(u32 *)addr, adapter->addr + TSNEP_MAC_ADDRESS_LOW); 2221 iowrite16(*(u16 *)(addr + sizeof(u32)), 2222 adapter->addr + TSNEP_MAC_ADDRESS_HIGH); 2223 2224 ether_addr_copy(adapter->mac_address, addr); 2225 netif_info(adapter, drv, adapter->netdev, "MAC address set to %pM\n", 2226 addr); 2227 } 2228 2229 static int tsnep_netdev_set_mac_address(struct net_device *netdev, void *addr) 2230 { 2231 struct tsnep_adapter *adapter = netdev_priv(netdev); 2232 struct sockaddr *sock_addr = addr; 2233 int retval; 2234 2235 retval = eth_prepare_mac_addr_change(netdev, sock_addr); 2236 if (retval) 2237 return retval; 2238 eth_hw_addr_set(netdev, sock_addr->sa_data); 2239 tsnep_mac_set_address(adapter, sock_addr->sa_data); 2240 2241 return 0; 2242 } 2243 2244 static int tsnep_netdev_set_features(struct net_device *netdev, 2245 netdev_features_t features) 2246 { 2247 struct tsnep_adapter *adapter = netdev_priv(netdev); 2248 netdev_features_t changed = netdev->features ^ features; 2249 bool enable; 2250 int retval = 0; 2251 2252 if (changed & NETIF_F_LOOPBACK) { 2253 enable = !!(features & NETIF_F_LOOPBACK); 2254 retval = tsnep_phy_loopback(adapter, enable); 2255 } 2256 2257 return retval; 2258 } 2259 2260 static ktime_t tsnep_netdev_get_tstamp(struct net_device *netdev, 2261 const struct skb_shared_hwtstamps *hwtstamps, 2262 bool cycles) 2263 { 2264 struct tsnep_rx_inline *rx_inline = hwtstamps->netdev_data; 2265 u64 timestamp; 2266 2267 if (cycles) 2268 timestamp = __le64_to_cpu(rx_inline->counter); 2269 else 2270 timestamp = __le64_to_cpu(rx_inline->timestamp); 2271 2272 return ns_to_ktime(timestamp); 2273 } 2274 2275 static int tsnep_netdev_bpf(struct net_device *dev, struct netdev_bpf *bpf) 2276 { 2277 struct tsnep_adapter *adapter = netdev_priv(dev); 2278 2279 switch (bpf->command) { 2280 case XDP_SETUP_PROG: 2281 return tsnep_xdp_setup_prog(adapter, bpf->prog, bpf->extack); 2282 case XDP_SETUP_XSK_POOL: 2283 return tsnep_xdp_setup_pool(adapter, bpf->xsk.pool, 2284 bpf->xsk.queue_id); 2285 default: 2286 return -EOPNOTSUPP; 2287 } 2288 } 2289 2290 static struct tsnep_tx *tsnep_xdp_get_tx(struct tsnep_adapter *adapter, u32 cpu) 2291 { 2292 if (cpu >= TSNEP_MAX_QUEUES) 2293 cpu &= TSNEP_MAX_QUEUES - 1; 2294 2295 while (cpu >= adapter->num_tx_queues) 2296 cpu -= adapter->num_tx_queues; 2297 2298 return &adapter->tx[cpu]; 2299 } 2300 2301 static int tsnep_netdev_xdp_xmit(struct net_device *dev, int n, 2302 struct xdp_frame **xdp, u32 flags) 2303 { 2304 struct tsnep_adapter *adapter = netdev_priv(dev); 2305 u32 cpu = smp_processor_id(); 2306 struct netdev_queue *nq; 2307 struct tsnep_tx *tx; 2308 int nxmit; 2309 bool xmit; 2310 2311 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 2312 return -EINVAL; 2313 2314 tx = tsnep_xdp_get_tx(adapter, cpu); 2315 nq = netdev_get_tx_queue(adapter->netdev, tx->queue_index); 2316 2317 __netif_tx_lock(nq, cpu); 2318 2319 for (nxmit = 0; nxmit < n; nxmit++) { 2320 xmit = tsnep_xdp_xmit_frame_ring(xdp[nxmit], tx, 2321 TSNEP_TX_TYPE_XDP_NDO); 2322 if (!xmit) 2323 break; 2324 2325 /* avoid transmit queue timeout since we share it with the slow 2326 * path 2327 */ 2328 txq_trans_cond_update(nq); 2329 } 2330 2331 if (flags & XDP_XMIT_FLUSH) 2332 tsnep_xdp_xmit_flush(tx); 2333 2334 __netif_tx_unlock(nq); 2335 2336 return nxmit; 2337 } 2338 2339 static int tsnep_netdev_xsk_wakeup(struct net_device *dev, u32 queue_id, 2340 u32 flags) 2341 { 2342 struct tsnep_adapter *adapter = netdev_priv(dev); 2343 struct tsnep_queue *queue; 2344 2345 if (queue_id >= adapter->num_rx_queues || 2346 queue_id >= adapter->num_tx_queues) 2347 return -EINVAL; 2348 2349 queue = &adapter->queue[queue_id]; 2350 2351 if (!napi_if_scheduled_mark_missed(&queue->napi)) 2352 napi_schedule(&queue->napi); 2353 2354 return 0; 2355 } 2356 2357 static const struct net_device_ops tsnep_netdev_ops = { 2358 .ndo_open = tsnep_netdev_open, 2359 .ndo_stop = tsnep_netdev_close, 2360 .ndo_start_xmit = tsnep_netdev_xmit_frame, 2361 .ndo_eth_ioctl = tsnep_netdev_ioctl, 2362 .ndo_set_rx_mode = tsnep_netdev_set_multicast, 2363 .ndo_get_stats64 = tsnep_netdev_get_stats64, 2364 .ndo_set_mac_address = tsnep_netdev_set_mac_address, 2365 .ndo_set_features = tsnep_netdev_set_features, 2366 .ndo_get_tstamp = tsnep_netdev_get_tstamp, 2367 .ndo_setup_tc = tsnep_tc_setup, 2368 .ndo_bpf = tsnep_netdev_bpf, 2369 .ndo_xdp_xmit = tsnep_netdev_xdp_xmit, 2370 .ndo_xsk_wakeup = tsnep_netdev_xsk_wakeup, 2371 }; 2372 2373 static int tsnep_mac_init(struct tsnep_adapter *adapter) 2374 { 2375 int retval; 2376 2377 /* initialize RX filtering, at least configured MAC address and 2378 * broadcast are not filtered 2379 */ 2380 iowrite16(0, adapter->addr + TSNEP_RX_FILTER); 2381 2382 /* try to get MAC address in the following order: 2383 * - device tree 2384 * - valid MAC address already set 2385 * - MAC address register if valid 2386 * - random MAC address 2387 */ 2388 retval = of_get_mac_address(adapter->pdev->dev.of_node, 2389 adapter->mac_address); 2390 if (retval == -EPROBE_DEFER) 2391 return retval; 2392 if (retval && !is_valid_ether_addr(adapter->mac_address)) { 2393 *(u32 *)adapter->mac_address = 2394 ioread32(adapter->addr + TSNEP_MAC_ADDRESS_LOW); 2395 *(u16 *)(adapter->mac_address + sizeof(u32)) = 2396 ioread16(adapter->addr + TSNEP_MAC_ADDRESS_HIGH); 2397 if (!is_valid_ether_addr(adapter->mac_address)) 2398 eth_random_addr(adapter->mac_address); 2399 } 2400 2401 tsnep_mac_set_address(adapter, adapter->mac_address); 2402 eth_hw_addr_set(adapter->netdev, adapter->mac_address); 2403 2404 return 0; 2405 } 2406 2407 static int tsnep_mdio_init(struct tsnep_adapter *adapter) 2408 { 2409 struct device_node *np = adapter->pdev->dev.of_node; 2410 int retval; 2411 2412 if (np) { 2413 np = of_get_child_by_name(np, "mdio"); 2414 if (!np) 2415 return 0; 2416 2417 adapter->suppress_preamble = 2418 of_property_read_bool(np, "suppress-preamble"); 2419 } 2420 2421 adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev); 2422 if (!adapter->mdiobus) { 2423 retval = -ENOMEM; 2424 2425 goto out; 2426 } 2427 2428 adapter->mdiobus->priv = (void *)adapter; 2429 adapter->mdiobus->parent = &adapter->pdev->dev; 2430 adapter->mdiobus->read = tsnep_mdiobus_read; 2431 adapter->mdiobus->write = tsnep_mdiobus_write; 2432 adapter->mdiobus->name = TSNEP "-mdiobus"; 2433 snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, "%s", 2434 adapter->pdev->name); 2435 2436 /* do not scan broadcast address */ 2437 adapter->mdiobus->phy_mask = 0x0000001; 2438 2439 retval = of_mdiobus_register(adapter->mdiobus, np); 2440 2441 out: 2442 of_node_put(np); 2443 2444 return retval; 2445 } 2446 2447 static int tsnep_phy_init(struct tsnep_adapter *adapter) 2448 { 2449 struct device_node *phy_node; 2450 int retval; 2451 2452 retval = of_get_phy_mode(adapter->pdev->dev.of_node, 2453 &adapter->phy_mode); 2454 if (retval) 2455 adapter->phy_mode = PHY_INTERFACE_MODE_GMII; 2456 2457 phy_node = of_parse_phandle(adapter->pdev->dev.of_node, "phy-handle", 2458 0); 2459 adapter->phydev = of_phy_find_device(phy_node); 2460 of_node_put(phy_node); 2461 if (!adapter->phydev && adapter->mdiobus) 2462 adapter->phydev = phy_find_first(adapter->mdiobus); 2463 if (!adapter->phydev) 2464 return -EIO; 2465 2466 return 0; 2467 } 2468 2469 static int tsnep_queue_init(struct tsnep_adapter *adapter, int queue_count) 2470 { 2471 u32 irq_mask = ECM_INT_TX_0 | ECM_INT_RX_0; 2472 char name[8]; 2473 int i; 2474 int retval; 2475 2476 /* one TX/RX queue pair for netdev is mandatory */ 2477 if (platform_irq_count(adapter->pdev) == 1) 2478 retval = platform_get_irq(adapter->pdev, 0); 2479 else 2480 retval = platform_get_irq_byname(adapter->pdev, "mac"); 2481 if (retval < 0) 2482 return retval; 2483 adapter->num_tx_queues = 1; 2484 adapter->num_rx_queues = 1; 2485 adapter->num_queues = 1; 2486 adapter->queue[0].adapter = adapter; 2487 adapter->queue[0].irq = retval; 2488 adapter->queue[0].tx = &adapter->tx[0]; 2489 adapter->queue[0].tx->adapter = adapter; 2490 adapter->queue[0].tx->addr = adapter->addr + TSNEP_QUEUE(0); 2491 adapter->queue[0].tx->queue_index = 0; 2492 adapter->queue[0].rx = &adapter->rx[0]; 2493 adapter->queue[0].rx->adapter = adapter; 2494 adapter->queue[0].rx->addr = adapter->addr + TSNEP_QUEUE(0); 2495 adapter->queue[0].rx->queue_index = 0; 2496 adapter->queue[0].irq_mask = irq_mask; 2497 adapter->queue[0].irq_delay_addr = adapter->addr + ECM_INT_DELAY; 2498 retval = tsnep_set_irq_coalesce(&adapter->queue[0], 2499 TSNEP_COALESCE_USECS_DEFAULT); 2500 if (retval < 0) 2501 return retval; 2502 2503 adapter->netdev->irq = adapter->queue[0].irq; 2504 2505 /* add additional TX/RX queue pairs only if dedicated interrupt is 2506 * available 2507 */ 2508 for (i = 1; i < queue_count; i++) { 2509 sprintf(name, "txrx-%d", i); 2510 retval = platform_get_irq_byname_optional(adapter->pdev, name); 2511 if (retval < 0) 2512 break; 2513 2514 adapter->num_tx_queues++; 2515 adapter->num_rx_queues++; 2516 adapter->num_queues++; 2517 adapter->queue[i].adapter = adapter; 2518 adapter->queue[i].irq = retval; 2519 adapter->queue[i].tx = &adapter->tx[i]; 2520 adapter->queue[i].tx->adapter = adapter; 2521 adapter->queue[i].tx->addr = adapter->addr + TSNEP_QUEUE(i); 2522 adapter->queue[i].tx->queue_index = i; 2523 adapter->queue[i].rx = &adapter->rx[i]; 2524 adapter->queue[i].rx->adapter = adapter; 2525 adapter->queue[i].rx->addr = adapter->addr + TSNEP_QUEUE(i); 2526 adapter->queue[i].rx->queue_index = i; 2527 adapter->queue[i].irq_mask = 2528 irq_mask << (ECM_INT_TXRX_SHIFT * i); 2529 adapter->queue[i].irq_delay_addr = 2530 adapter->addr + ECM_INT_DELAY + ECM_INT_DELAY_OFFSET * i; 2531 retval = tsnep_set_irq_coalesce(&adapter->queue[i], 2532 TSNEP_COALESCE_USECS_DEFAULT); 2533 if (retval < 0) 2534 return retval; 2535 } 2536 2537 return 0; 2538 } 2539 2540 static int tsnep_probe(struct platform_device *pdev) 2541 { 2542 struct tsnep_adapter *adapter; 2543 struct net_device *netdev; 2544 struct resource *io; 2545 u32 type; 2546 int revision; 2547 int version; 2548 int queue_count; 2549 int retval; 2550 2551 netdev = devm_alloc_etherdev_mqs(&pdev->dev, 2552 sizeof(struct tsnep_adapter), 2553 TSNEP_MAX_QUEUES, TSNEP_MAX_QUEUES); 2554 if (!netdev) 2555 return -ENODEV; 2556 SET_NETDEV_DEV(netdev, &pdev->dev); 2557 adapter = netdev_priv(netdev); 2558 platform_set_drvdata(pdev, adapter); 2559 adapter->pdev = pdev; 2560 adapter->dmadev = &pdev->dev; 2561 adapter->netdev = netdev; 2562 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE | 2563 NETIF_MSG_LINK | NETIF_MSG_IFUP | 2564 NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED; 2565 2566 netdev->min_mtu = ETH_MIN_MTU; 2567 netdev->max_mtu = TSNEP_MAX_FRAME_SIZE; 2568 2569 mutex_init(&adapter->gate_control_lock); 2570 mutex_init(&adapter->rxnfc_lock); 2571 INIT_LIST_HEAD(&adapter->rxnfc_rules); 2572 2573 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2574 adapter->addr = devm_ioremap_resource(&pdev->dev, io); 2575 if (IS_ERR(adapter->addr)) 2576 return PTR_ERR(adapter->addr); 2577 netdev->mem_start = io->start; 2578 netdev->mem_end = io->end; 2579 2580 type = ioread32(adapter->addr + ECM_TYPE); 2581 revision = (type & ECM_REVISION_MASK) >> ECM_REVISION_SHIFT; 2582 version = (type & ECM_VERSION_MASK) >> ECM_VERSION_SHIFT; 2583 queue_count = (type & ECM_QUEUE_COUNT_MASK) >> ECM_QUEUE_COUNT_SHIFT; 2584 adapter->gate_control = type & ECM_GATE_CONTROL; 2585 adapter->rxnfc_max = TSNEP_RX_ASSIGN_ETHER_TYPE_COUNT; 2586 2587 tsnep_disable_irq(adapter, ECM_INT_ALL); 2588 2589 retval = tsnep_queue_init(adapter, queue_count); 2590 if (retval) 2591 return retval; 2592 2593 retval = dma_set_mask_and_coherent(&adapter->pdev->dev, 2594 DMA_BIT_MASK(64)); 2595 if (retval) { 2596 dev_err(&adapter->pdev->dev, "no usable DMA configuration.\n"); 2597 return retval; 2598 } 2599 2600 retval = tsnep_mac_init(adapter); 2601 if (retval) 2602 return retval; 2603 2604 retval = tsnep_mdio_init(adapter); 2605 if (retval) 2606 goto mdio_init_failed; 2607 2608 retval = tsnep_phy_init(adapter); 2609 if (retval) 2610 goto phy_init_failed; 2611 2612 retval = tsnep_ptp_init(adapter); 2613 if (retval) 2614 goto ptp_init_failed; 2615 2616 retval = tsnep_tc_init(adapter); 2617 if (retval) 2618 goto tc_init_failed; 2619 2620 retval = tsnep_rxnfc_init(adapter); 2621 if (retval) 2622 goto rxnfc_init_failed; 2623 2624 netdev->netdev_ops = &tsnep_netdev_ops; 2625 netdev->ethtool_ops = &tsnep_ethtool_ops; 2626 netdev->features = NETIF_F_SG; 2627 netdev->hw_features = netdev->features | NETIF_F_LOOPBACK; 2628 2629 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | 2630 NETDEV_XDP_ACT_NDO_XMIT | 2631 NETDEV_XDP_ACT_NDO_XMIT_SG | 2632 NETDEV_XDP_ACT_XSK_ZEROCOPY; 2633 2634 /* carrier off reporting is important to ethtool even BEFORE open */ 2635 netif_carrier_off(netdev); 2636 2637 retval = register_netdev(netdev); 2638 if (retval) 2639 goto register_failed; 2640 2641 dev_info(&adapter->pdev->dev, "device version %d.%02d\n", version, 2642 revision); 2643 if (adapter->gate_control) 2644 dev_info(&adapter->pdev->dev, "gate control detected\n"); 2645 2646 return 0; 2647 2648 register_failed: 2649 tsnep_rxnfc_cleanup(adapter); 2650 rxnfc_init_failed: 2651 tsnep_tc_cleanup(adapter); 2652 tc_init_failed: 2653 tsnep_ptp_cleanup(adapter); 2654 ptp_init_failed: 2655 phy_init_failed: 2656 if (adapter->mdiobus) 2657 mdiobus_unregister(adapter->mdiobus); 2658 mdio_init_failed: 2659 return retval; 2660 } 2661 2662 static void tsnep_remove(struct platform_device *pdev) 2663 { 2664 struct tsnep_adapter *adapter = platform_get_drvdata(pdev); 2665 2666 unregister_netdev(adapter->netdev); 2667 2668 tsnep_rxnfc_cleanup(adapter); 2669 2670 tsnep_tc_cleanup(adapter); 2671 2672 tsnep_ptp_cleanup(adapter); 2673 2674 if (adapter->mdiobus) 2675 mdiobus_unregister(adapter->mdiobus); 2676 2677 tsnep_disable_irq(adapter, ECM_INT_ALL); 2678 } 2679 2680 static const struct of_device_id tsnep_of_match[] = { 2681 { .compatible = "engleder,tsnep", }, 2682 { }, 2683 }; 2684 MODULE_DEVICE_TABLE(of, tsnep_of_match); 2685 2686 static struct platform_driver tsnep_driver = { 2687 .driver = { 2688 .name = TSNEP, 2689 .of_match_table = tsnep_of_match, 2690 }, 2691 .probe = tsnep_probe, 2692 .remove_new = tsnep_remove, 2693 }; 2694 module_platform_driver(tsnep_driver); 2695 2696 MODULE_AUTHOR("Gerhard Engleder <gerhard@engleder-embedded.com>"); 2697 MODULE_DESCRIPTION("TSN endpoint Ethernet MAC driver"); 2698 MODULE_LICENSE("GPL"); 2699