1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */ 3 4 /* TSN endpoint Ethernet MAC driver 5 * 6 * The TSN endpoint Ethernet MAC is a FPGA based network device for real-time 7 * communication. It is designed for endpoints within TSN (Time Sensitive 8 * Networking) networks; e.g., for PLCs in the industrial automation case. 9 * 10 * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used 11 * by the driver. 12 * 13 * More information can be found here: 14 * - www.embedded-experts.at/tsn 15 * - www.engleder-embedded.com 16 */ 17 18 #include "tsnep.h" 19 #include "tsnep_hw.h" 20 21 #include <linux/module.h> 22 #include <linux/of.h> 23 #include <linux/of_net.h> 24 #include <linux/of_mdio.h> 25 #include <linux/interrupt.h> 26 #include <linux/etherdevice.h> 27 #include <linux/phy.h> 28 #include <linux/iopoll.h> 29 #include <linux/bpf.h> 30 #include <linux/bpf_trace.h> 31 #include <net/page_pool/helpers.h> 32 #include <net/xdp_sock_drv.h> 33 34 #define TSNEP_RX_OFFSET (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN) 35 #define TSNEP_HEADROOM ALIGN(TSNEP_RX_OFFSET, 4) 36 #define TSNEP_MAX_RX_BUF_SIZE (PAGE_SIZE - TSNEP_HEADROOM - \ 37 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 38 /* XSK buffer shall store at least Q-in-Q frame */ 39 #define TSNEP_XSK_RX_BUF_SIZE (ALIGN(TSNEP_RX_INLINE_METADATA_SIZE + \ 40 ETH_FRAME_LEN + ETH_FCS_LEN + \ 41 VLAN_HLEN * 2, 4)) 42 43 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 44 #define DMA_ADDR_HIGH(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF)) 45 #else 46 #define DMA_ADDR_HIGH(dma_addr) ((u32)(0)) 47 #endif 48 #define DMA_ADDR_LOW(dma_addr) ((u32)((dma_addr) & 0xFFFFFFFF)) 49 50 #define TSNEP_COALESCE_USECS_DEFAULT 64 51 #define TSNEP_COALESCE_USECS_MAX ((ECM_INT_DELAY_MASK >> ECM_INT_DELAY_SHIFT) * \ 52 ECM_INT_DELAY_BASE_US + ECM_INT_DELAY_BASE_US - 1) 53 54 /* mapping type */ 55 #define TSNEP_TX_TYPE_MAP BIT(0) 56 #define TSNEP_TX_TYPE_MAP_PAGE BIT(1) 57 #define TSNEP_TX_TYPE_INLINE BIT(2) 58 /* buffer type */ 59 #define TSNEP_TX_TYPE_SKB BIT(8) 60 #define TSNEP_TX_TYPE_SKB_MAP (TSNEP_TX_TYPE_SKB | TSNEP_TX_TYPE_MAP) 61 #define TSNEP_TX_TYPE_SKB_INLINE (TSNEP_TX_TYPE_SKB | TSNEP_TX_TYPE_INLINE) 62 #define TSNEP_TX_TYPE_SKB_FRAG BIT(9) 63 #define TSNEP_TX_TYPE_SKB_FRAG_MAP_PAGE (TSNEP_TX_TYPE_SKB_FRAG | TSNEP_TX_TYPE_MAP_PAGE) 64 #define TSNEP_TX_TYPE_SKB_FRAG_INLINE (TSNEP_TX_TYPE_SKB_FRAG | TSNEP_TX_TYPE_INLINE) 65 #define TSNEP_TX_TYPE_XDP_TX BIT(10) 66 #define TSNEP_TX_TYPE_XDP_NDO BIT(11) 67 #define TSNEP_TX_TYPE_XDP_NDO_MAP_PAGE (TSNEP_TX_TYPE_XDP_NDO | TSNEP_TX_TYPE_MAP_PAGE) 68 #define TSNEP_TX_TYPE_XDP (TSNEP_TX_TYPE_XDP_TX | TSNEP_TX_TYPE_XDP_NDO) 69 #define TSNEP_TX_TYPE_XSK BIT(12) 70 71 #define TSNEP_XDP_TX BIT(0) 72 #define TSNEP_XDP_REDIRECT BIT(1) 73 74 static void tsnep_enable_irq(struct tsnep_adapter *adapter, u32 mask) 75 { 76 iowrite32(mask, adapter->addr + ECM_INT_ENABLE); 77 } 78 79 static void tsnep_disable_irq(struct tsnep_adapter *adapter, u32 mask) 80 { 81 mask |= ECM_INT_DISABLE; 82 iowrite32(mask, adapter->addr + ECM_INT_ENABLE); 83 } 84 85 static irqreturn_t tsnep_irq(int irq, void *arg) 86 { 87 struct tsnep_adapter *adapter = arg; 88 u32 active = ioread32(adapter->addr + ECM_INT_ACTIVE); 89 90 /* acknowledge interrupt */ 91 if (active != 0) 92 iowrite32(active, adapter->addr + ECM_INT_ACKNOWLEDGE); 93 94 /* handle link interrupt */ 95 if ((active & ECM_INT_LINK) != 0) 96 phy_mac_interrupt(adapter->netdev->phydev); 97 98 /* handle TX/RX queue 0 interrupt */ 99 if ((active & adapter->queue[0].irq_mask) != 0) { 100 if (napi_schedule_prep(&adapter->queue[0].napi)) { 101 tsnep_disable_irq(adapter, adapter->queue[0].irq_mask); 102 /* schedule after masking to avoid races */ 103 __napi_schedule(&adapter->queue[0].napi); 104 } 105 } 106 107 return IRQ_HANDLED; 108 } 109 110 static irqreturn_t tsnep_irq_txrx(int irq, void *arg) 111 { 112 struct tsnep_queue *queue = arg; 113 114 /* handle TX/RX queue interrupt */ 115 if (napi_schedule_prep(&queue->napi)) { 116 tsnep_disable_irq(queue->adapter, queue->irq_mask); 117 /* schedule after masking to avoid races */ 118 __napi_schedule(&queue->napi); 119 } 120 121 return IRQ_HANDLED; 122 } 123 124 int tsnep_set_irq_coalesce(struct tsnep_queue *queue, u32 usecs) 125 { 126 if (usecs > TSNEP_COALESCE_USECS_MAX) 127 return -ERANGE; 128 129 usecs /= ECM_INT_DELAY_BASE_US; 130 usecs <<= ECM_INT_DELAY_SHIFT; 131 usecs &= ECM_INT_DELAY_MASK; 132 133 queue->irq_delay &= ~ECM_INT_DELAY_MASK; 134 queue->irq_delay |= usecs; 135 iowrite8(queue->irq_delay, queue->irq_delay_addr); 136 137 return 0; 138 } 139 140 u32 tsnep_get_irq_coalesce(struct tsnep_queue *queue) 141 { 142 u32 usecs; 143 144 usecs = (queue->irq_delay & ECM_INT_DELAY_MASK); 145 usecs >>= ECM_INT_DELAY_SHIFT; 146 usecs *= ECM_INT_DELAY_BASE_US; 147 148 return usecs; 149 } 150 151 static int tsnep_mdiobus_read(struct mii_bus *bus, int addr, int regnum) 152 { 153 struct tsnep_adapter *adapter = bus->priv; 154 u32 md; 155 int retval; 156 157 md = ECM_MD_READ; 158 if (!adapter->suppress_preamble) 159 md |= ECM_MD_PREAMBLE; 160 md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK; 161 md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK; 162 iowrite32(md, adapter->addr + ECM_MD_CONTROL); 163 retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md, 164 !(md & ECM_MD_BUSY), 16, 1000); 165 if (retval != 0) 166 return retval; 167 168 return (md & ECM_MD_DATA_MASK) >> ECM_MD_DATA_SHIFT; 169 } 170 171 static int tsnep_mdiobus_write(struct mii_bus *bus, int addr, int regnum, 172 u16 val) 173 { 174 struct tsnep_adapter *adapter = bus->priv; 175 u32 md; 176 int retval; 177 178 md = ECM_MD_WRITE; 179 if (!adapter->suppress_preamble) 180 md |= ECM_MD_PREAMBLE; 181 md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK; 182 md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK; 183 md |= ((u32)val << ECM_MD_DATA_SHIFT) & ECM_MD_DATA_MASK; 184 iowrite32(md, adapter->addr + ECM_MD_CONTROL); 185 retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md, 186 !(md & ECM_MD_BUSY), 16, 1000); 187 if (retval != 0) 188 return retval; 189 190 return 0; 191 } 192 193 static void tsnep_set_link_mode(struct tsnep_adapter *adapter) 194 { 195 u32 mode; 196 197 switch (adapter->phydev->speed) { 198 case SPEED_100: 199 mode = ECM_LINK_MODE_100; 200 break; 201 case SPEED_1000: 202 mode = ECM_LINK_MODE_1000; 203 break; 204 default: 205 mode = ECM_LINK_MODE_OFF; 206 break; 207 } 208 iowrite32(mode, adapter->addr + ECM_STATUS); 209 } 210 211 static void tsnep_phy_link_status_change(struct net_device *netdev) 212 { 213 struct tsnep_adapter *adapter = netdev_priv(netdev); 214 struct phy_device *phydev = netdev->phydev; 215 216 if (phydev->link) 217 tsnep_set_link_mode(adapter); 218 219 phy_print_status(netdev->phydev); 220 } 221 222 static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable) 223 { 224 int retval; 225 226 retval = phy_loopback(adapter->phydev, enable); 227 228 /* PHY link state change is not signaled if loopback is enabled, it 229 * would delay a working loopback anyway, let's ensure that loopback 230 * is working immediately by setting link mode directly 231 */ 232 if (!retval && enable) { 233 netif_carrier_on(adapter->netdev); 234 tsnep_set_link_mode(adapter); 235 } 236 237 return retval; 238 } 239 240 static int tsnep_phy_open(struct tsnep_adapter *adapter) 241 { 242 struct phy_device *phydev; 243 struct ethtool_keee ethtool_keee; 244 int retval; 245 246 retval = phy_connect_direct(adapter->netdev, adapter->phydev, 247 tsnep_phy_link_status_change, 248 adapter->phy_mode); 249 if (retval) 250 return retval; 251 phydev = adapter->netdev->phydev; 252 253 /* MAC supports only 100Mbps|1000Mbps full duplex 254 * SPE (Single Pair Ethernet) is also an option but not implemented yet 255 */ 256 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); 257 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); 258 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); 259 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 260 261 /* disable EEE autoneg, EEE not supported by TSNEP */ 262 memset(ðtool_keee, 0, sizeof(ethtool_keee)); 263 phy_ethtool_set_eee(adapter->phydev, ðtool_keee); 264 265 adapter->phydev->irq = PHY_MAC_INTERRUPT; 266 phy_start(adapter->phydev); 267 268 return 0; 269 } 270 271 static void tsnep_phy_close(struct tsnep_adapter *adapter) 272 { 273 phy_stop(adapter->netdev->phydev); 274 phy_disconnect(adapter->netdev->phydev); 275 } 276 277 static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx) 278 { 279 struct device *dmadev = tx->adapter->dmadev; 280 int i; 281 282 memset(tx->entry, 0, sizeof(tx->entry)); 283 284 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) { 285 if (tx->page[i]) { 286 dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i], 287 tx->page_dma[i]); 288 tx->page[i] = NULL; 289 tx->page_dma[i] = 0; 290 } 291 } 292 } 293 294 static int tsnep_tx_ring_create(struct tsnep_tx *tx) 295 { 296 struct device *dmadev = tx->adapter->dmadev; 297 struct tsnep_tx_entry *entry; 298 struct tsnep_tx_entry *next_entry; 299 int i, j; 300 int retval; 301 302 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) { 303 tx->page[i] = 304 dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i], 305 GFP_KERNEL); 306 if (!tx->page[i]) { 307 retval = -ENOMEM; 308 goto alloc_failed; 309 } 310 for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) { 311 entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; 312 entry->desc_wb = (struct tsnep_tx_desc_wb *) 313 (((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j); 314 entry->desc = (struct tsnep_tx_desc *) 315 (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET); 316 entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j; 317 entry->owner_user_flag = false; 318 } 319 } 320 for (i = 0; i < TSNEP_RING_SIZE; i++) { 321 entry = &tx->entry[i]; 322 next_entry = &tx->entry[(i + 1) & TSNEP_RING_MASK]; 323 entry->desc->next = __cpu_to_le64(next_entry->desc_dma); 324 } 325 326 return 0; 327 328 alloc_failed: 329 tsnep_tx_ring_cleanup(tx); 330 return retval; 331 } 332 333 static void tsnep_tx_init(struct tsnep_tx *tx) 334 { 335 dma_addr_t dma; 336 337 dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; 338 iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW); 339 iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH); 340 tx->write = 0; 341 tx->read = 0; 342 tx->owner_counter = 1; 343 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; 344 } 345 346 static void tsnep_tx_enable(struct tsnep_tx *tx) 347 { 348 struct netdev_queue *nq; 349 350 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); 351 352 __netif_tx_lock_bh(nq); 353 netif_tx_wake_queue(nq); 354 __netif_tx_unlock_bh(nq); 355 } 356 357 static void tsnep_tx_disable(struct tsnep_tx *tx, struct napi_struct *napi) 358 { 359 struct netdev_queue *nq; 360 u32 val; 361 362 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); 363 364 __netif_tx_lock_bh(nq); 365 netif_tx_stop_queue(nq); 366 __netif_tx_unlock_bh(nq); 367 368 /* wait until TX is done in hardware */ 369 readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val, 370 ((val & TSNEP_CONTROL_TX_ENABLE) == 0), 10000, 371 1000000); 372 373 /* wait until TX is also done in software */ 374 while (READ_ONCE(tx->read) != tx->write) { 375 napi_schedule(napi); 376 napi_synchronize(napi); 377 } 378 } 379 380 static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length, 381 bool last) 382 { 383 struct tsnep_tx_entry *entry = &tx->entry[index]; 384 385 entry->properties = 0; 386 /* xdpf and zc are union with skb */ 387 if (entry->skb) { 388 entry->properties = length & TSNEP_DESC_LENGTH_MASK; 389 entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; 390 if ((entry->type & TSNEP_TX_TYPE_SKB) && 391 (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS)) 392 entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG; 393 394 /* toggle user flag to prevent false acknowledge 395 * 396 * Only the first fragment is acknowledged. For all other 397 * fragments no acknowledge is done and the last written owner 398 * counter stays in the writeback descriptor. Therefore, it is 399 * possible that the last written owner counter is identical to 400 * the new incremented owner counter and a false acknowledge is 401 * detected before the real acknowledge has been done by 402 * hardware. 403 * 404 * The user flag is used to prevent this situation. The user 405 * flag is copied to the writeback descriptor by the hardware 406 * and is used as additional acknowledge data. By toggeling the 407 * user flag only for the first fragment (which is 408 * acknowledged), it is guaranteed that the last acknowledge 409 * done for this descriptor has used a different user flag and 410 * cannot be detected as false acknowledge. 411 */ 412 entry->owner_user_flag = !entry->owner_user_flag; 413 } 414 if (last) 415 entry->properties |= TSNEP_TX_DESC_LAST_FRAGMENT_FLAG; 416 if (index == tx->increment_owner_counter) { 417 tx->owner_counter++; 418 if (tx->owner_counter == 4) 419 tx->owner_counter = 1; 420 tx->increment_owner_counter--; 421 if (tx->increment_owner_counter < 0) 422 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; 423 } 424 entry->properties |= 425 (tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & 426 TSNEP_DESC_OWNER_COUNTER_MASK; 427 if (entry->owner_user_flag) 428 entry->properties |= TSNEP_TX_DESC_OWNER_USER_FLAG; 429 entry->desc->more_properties = 430 __cpu_to_le32(entry->len & TSNEP_DESC_LENGTH_MASK); 431 if (entry->type & TSNEP_TX_TYPE_INLINE) 432 entry->properties |= TSNEP_TX_DESC_DATA_AFTER_DESC_FLAG; 433 434 /* descriptor properties shall be written last, because valid data is 435 * signaled there 436 */ 437 dma_wmb(); 438 439 entry->desc->properties = __cpu_to_le32(entry->properties); 440 } 441 442 static int tsnep_tx_desc_available(struct tsnep_tx *tx) 443 { 444 if (tx->read <= tx->write) 445 return TSNEP_RING_SIZE - tx->write + tx->read - 1; 446 else 447 return tx->read - tx->write - 1; 448 } 449 450 static int tsnep_tx_map_frag(skb_frag_t *frag, struct tsnep_tx_entry *entry, 451 struct device *dmadev, dma_addr_t *dma) 452 { 453 unsigned int len; 454 int mapped; 455 456 len = skb_frag_size(frag); 457 if (likely(len > TSNEP_DESC_SIZE_DATA_AFTER_INLINE)) { 458 *dma = skb_frag_dma_map(dmadev, frag, 0, len, DMA_TO_DEVICE); 459 if (dma_mapping_error(dmadev, *dma)) 460 return -ENOMEM; 461 entry->type = TSNEP_TX_TYPE_SKB_FRAG_MAP_PAGE; 462 mapped = 1; 463 } else { 464 void *fragdata = skb_frag_address_safe(frag); 465 466 if (likely(fragdata)) { 467 memcpy(&entry->desc->tx, fragdata, len); 468 } else { 469 struct page *page = skb_frag_page(frag); 470 471 fragdata = kmap_local_page(page); 472 memcpy(&entry->desc->tx, fragdata + skb_frag_off(frag), 473 len); 474 kunmap_local(fragdata); 475 } 476 entry->type = TSNEP_TX_TYPE_SKB_FRAG_INLINE; 477 mapped = 0; 478 } 479 480 return mapped; 481 } 482 483 static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count) 484 { 485 struct device *dmadev = tx->adapter->dmadev; 486 struct tsnep_tx_entry *entry; 487 unsigned int len; 488 int map_len = 0; 489 dma_addr_t dma; 490 int i, mapped; 491 492 for (i = 0; i < count; i++) { 493 entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK]; 494 495 if (!i) { 496 len = skb_headlen(skb); 497 if (likely(len > TSNEP_DESC_SIZE_DATA_AFTER_INLINE)) { 498 dma = dma_map_single(dmadev, skb->data, len, 499 DMA_TO_DEVICE); 500 if (dma_mapping_error(dmadev, dma)) 501 return -ENOMEM; 502 entry->type = TSNEP_TX_TYPE_SKB_MAP; 503 mapped = 1; 504 } else { 505 memcpy(&entry->desc->tx, skb->data, len); 506 entry->type = TSNEP_TX_TYPE_SKB_INLINE; 507 mapped = 0; 508 } 509 } else { 510 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 511 512 len = skb_frag_size(frag); 513 mapped = tsnep_tx_map_frag(frag, entry, dmadev, &dma); 514 if (mapped < 0) 515 return mapped; 516 } 517 518 entry->len = len; 519 if (likely(mapped)) { 520 dma_unmap_addr_set(entry, dma, dma); 521 entry->desc->tx = __cpu_to_le64(dma); 522 } 523 524 map_len += len; 525 } 526 527 return map_len; 528 } 529 530 static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count) 531 { 532 struct device *dmadev = tx->adapter->dmadev; 533 struct tsnep_tx_entry *entry; 534 int map_len = 0; 535 int i; 536 537 for (i = 0; i < count; i++) { 538 entry = &tx->entry[(index + i) & TSNEP_RING_MASK]; 539 540 if (entry->len) { 541 if (entry->type & TSNEP_TX_TYPE_MAP) 542 dma_unmap_single(dmadev, 543 dma_unmap_addr(entry, dma), 544 dma_unmap_len(entry, len), 545 DMA_TO_DEVICE); 546 else if (entry->type & TSNEP_TX_TYPE_MAP_PAGE) 547 dma_unmap_page(dmadev, 548 dma_unmap_addr(entry, dma), 549 dma_unmap_len(entry, len), 550 DMA_TO_DEVICE); 551 map_len += entry->len; 552 entry->len = 0; 553 } 554 } 555 556 return map_len; 557 } 558 559 static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, 560 struct tsnep_tx *tx) 561 { 562 int count = 1; 563 struct tsnep_tx_entry *entry; 564 int length; 565 int i; 566 int retval; 567 568 if (skb_shinfo(skb)->nr_frags > 0) 569 count += skb_shinfo(skb)->nr_frags; 570 571 if (tsnep_tx_desc_available(tx) < count) { 572 /* ring full, shall not happen because queue is stopped if full 573 * below 574 */ 575 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); 576 577 return NETDEV_TX_BUSY; 578 } 579 580 entry = &tx->entry[tx->write]; 581 entry->skb = skb; 582 583 retval = tsnep_tx_map(skb, tx, count); 584 if (retval < 0) { 585 tsnep_tx_unmap(tx, tx->write, count); 586 dev_kfree_skb_any(entry->skb); 587 entry->skb = NULL; 588 589 tx->dropped++; 590 591 return NETDEV_TX_OK; 592 } 593 length = retval; 594 595 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) 596 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 597 598 for (i = 0; i < count; i++) 599 tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length, 600 i == count - 1); 601 tx->write = (tx->write + count) & TSNEP_RING_MASK; 602 603 skb_tx_timestamp(skb); 604 605 /* descriptor properties shall be valid before hardware is notified */ 606 dma_wmb(); 607 608 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); 609 610 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) { 611 /* ring can get full with next frame */ 612 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); 613 } 614 615 return NETDEV_TX_OK; 616 } 617 618 static int tsnep_xdp_tx_map(struct xdp_frame *xdpf, struct tsnep_tx *tx, 619 struct skb_shared_info *shinfo, int count, u32 type) 620 { 621 struct device *dmadev = tx->adapter->dmadev; 622 struct tsnep_tx_entry *entry; 623 struct page *page; 624 skb_frag_t *frag; 625 unsigned int len; 626 int map_len = 0; 627 dma_addr_t dma; 628 void *data; 629 int i; 630 631 frag = NULL; 632 len = xdpf->len; 633 for (i = 0; i < count; i++) { 634 entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK]; 635 if (type & TSNEP_TX_TYPE_XDP_NDO) { 636 data = unlikely(frag) ? skb_frag_address(frag) : 637 xdpf->data; 638 dma = dma_map_single(dmadev, data, len, DMA_TO_DEVICE); 639 if (dma_mapping_error(dmadev, dma)) 640 return -ENOMEM; 641 642 entry->type = TSNEP_TX_TYPE_XDP_NDO_MAP_PAGE; 643 } else { 644 page = unlikely(frag) ? skb_frag_page(frag) : 645 virt_to_page(xdpf->data); 646 dma = page_pool_get_dma_addr(page); 647 if (unlikely(frag)) 648 dma += skb_frag_off(frag); 649 else 650 dma += sizeof(*xdpf) + xdpf->headroom; 651 dma_sync_single_for_device(dmadev, dma, len, 652 DMA_BIDIRECTIONAL); 653 654 entry->type = TSNEP_TX_TYPE_XDP_TX; 655 } 656 657 entry->len = len; 658 dma_unmap_addr_set(entry, dma, dma); 659 660 entry->desc->tx = __cpu_to_le64(dma); 661 662 map_len += len; 663 664 if (i + 1 < count) { 665 frag = &shinfo->frags[i]; 666 len = skb_frag_size(frag); 667 } 668 } 669 670 return map_len; 671 } 672 673 /* This function requires __netif_tx_lock is held by the caller. */ 674 static bool tsnep_xdp_xmit_frame_ring(struct xdp_frame *xdpf, 675 struct tsnep_tx *tx, u32 type) 676 { 677 struct skb_shared_info *shinfo = xdp_get_shared_info_from_frame(xdpf); 678 struct tsnep_tx_entry *entry; 679 int count, length, retval, i; 680 681 count = 1; 682 if (unlikely(xdp_frame_has_frags(xdpf))) 683 count += shinfo->nr_frags; 684 685 /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS 686 * will be available for normal TX path and queue is stopped there if 687 * necessary 688 */ 689 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1 + count)) 690 return false; 691 692 entry = &tx->entry[tx->write]; 693 entry->xdpf = xdpf; 694 695 retval = tsnep_xdp_tx_map(xdpf, tx, shinfo, count, type); 696 if (retval < 0) { 697 tsnep_tx_unmap(tx, tx->write, count); 698 entry->xdpf = NULL; 699 700 tx->dropped++; 701 702 return false; 703 } 704 length = retval; 705 706 for (i = 0; i < count; i++) 707 tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length, 708 i == count - 1); 709 tx->write = (tx->write + count) & TSNEP_RING_MASK; 710 711 /* descriptor properties shall be valid before hardware is notified */ 712 dma_wmb(); 713 714 return true; 715 } 716 717 static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx) 718 { 719 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); 720 } 721 722 static bool tsnep_xdp_xmit_back(struct tsnep_adapter *adapter, 723 struct xdp_buff *xdp, 724 struct netdev_queue *tx_nq, struct tsnep_tx *tx) 725 { 726 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); 727 bool xmit; 728 729 if (unlikely(!xdpf)) 730 return false; 731 732 __netif_tx_lock(tx_nq, smp_processor_id()); 733 734 xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, TSNEP_TX_TYPE_XDP_TX); 735 736 /* Avoid transmit queue timeout since we share it with the slow path */ 737 if (xmit) 738 txq_trans_cond_update(tx_nq); 739 740 __netif_tx_unlock(tx_nq); 741 742 return xmit; 743 } 744 745 static int tsnep_xdp_tx_map_zc(struct xdp_desc *xdpd, struct tsnep_tx *tx) 746 { 747 struct tsnep_tx_entry *entry; 748 dma_addr_t dma; 749 750 entry = &tx->entry[tx->write]; 751 entry->zc = true; 752 753 dma = xsk_buff_raw_get_dma(tx->xsk_pool, xdpd->addr); 754 xsk_buff_raw_dma_sync_for_device(tx->xsk_pool, dma, xdpd->len); 755 756 entry->type = TSNEP_TX_TYPE_XSK; 757 entry->len = xdpd->len; 758 759 entry->desc->tx = __cpu_to_le64(dma); 760 761 return xdpd->len; 762 } 763 764 static void tsnep_xdp_xmit_frame_ring_zc(struct xdp_desc *xdpd, 765 struct tsnep_tx *tx) 766 { 767 int length; 768 769 length = tsnep_xdp_tx_map_zc(xdpd, tx); 770 771 tsnep_tx_activate(tx, tx->write, length, true); 772 tx->write = (tx->write + 1) & TSNEP_RING_MASK; 773 } 774 775 static void tsnep_xdp_xmit_zc(struct tsnep_tx *tx) 776 { 777 int desc_available = tsnep_tx_desc_available(tx); 778 struct xdp_desc *descs = tx->xsk_pool->tx_descs; 779 int batch, i; 780 781 /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS 782 * will be available for normal TX path and queue is stopped there if 783 * necessary 784 */ 785 if (desc_available <= (MAX_SKB_FRAGS + 1)) 786 return; 787 desc_available -= MAX_SKB_FRAGS + 1; 788 789 batch = xsk_tx_peek_release_desc_batch(tx->xsk_pool, desc_available); 790 for (i = 0; i < batch; i++) 791 tsnep_xdp_xmit_frame_ring_zc(&descs[i], tx); 792 793 if (batch) { 794 /* descriptor properties shall be valid before hardware is 795 * notified 796 */ 797 dma_wmb(); 798 799 tsnep_xdp_xmit_flush(tx); 800 } 801 } 802 803 static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) 804 { 805 struct tsnep_tx_entry *entry; 806 struct netdev_queue *nq; 807 int xsk_frames = 0; 808 int budget = 128; 809 int length; 810 int count; 811 812 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); 813 __netif_tx_lock(nq, smp_processor_id()); 814 815 do { 816 if (tx->read == tx->write) 817 break; 818 819 entry = &tx->entry[tx->read]; 820 if ((__le32_to_cpu(entry->desc_wb->properties) & 821 TSNEP_TX_DESC_OWNER_MASK) != 822 (entry->properties & TSNEP_TX_DESC_OWNER_MASK)) 823 break; 824 825 /* descriptor properties shall be read first, because valid data 826 * is signaled there 827 */ 828 dma_rmb(); 829 830 count = 1; 831 if ((entry->type & TSNEP_TX_TYPE_SKB) && 832 skb_shinfo(entry->skb)->nr_frags > 0) 833 count += skb_shinfo(entry->skb)->nr_frags; 834 else if ((entry->type & TSNEP_TX_TYPE_XDP) && 835 xdp_frame_has_frags(entry->xdpf)) 836 count += xdp_get_shared_info_from_frame(entry->xdpf)->nr_frags; 837 838 length = tsnep_tx_unmap(tx, tx->read, count); 839 840 if ((entry->type & TSNEP_TX_TYPE_SKB) && 841 (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) && 842 (__le32_to_cpu(entry->desc_wb->properties) & 843 TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) { 844 struct skb_shared_hwtstamps hwtstamps; 845 u64 timestamp; 846 847 if (skb_shinfo(entry->skb)->tx_flags & 848 SKBTX_HW_TSTAMP_USE_CYCLES) 849 timestamp = 850 __le64_to_cpu(entry->desc_wb->counter); 851 else 852 timestamp = 853 __le64_to_cpu(entry->desc_wb->timestamp); 854 855 memset(&hwtstamps, 0, sizeof(hwtstamps)); 856 hwtstamps.hwtstamp = ns_to_ktime(timestamp); 857 858 skb_tstamp_tx(entry->skb, &hwtstamps); 859 } 860 861 if (entry->type & TSNEP_TX_TYPE_SKB) 862 napi_consume_skb(entry->skb, napi_budget); 863 else if (entry->type & TSNEP_TX_TYPE_XDP) 864 xdp_return_frame_rx_napi(entry->xdpf); 865 else 866 xsk_frames++; 867 /* xdpf and zc are union with skb */ 868 entry->skb = NULL; 869 870 tx->read = (tx->read + count) & TSNEP_RING_MASK; 871 872 tx->packets++; 873 tx->bytes += length + ETH_FCS_LEN; 874 875 budget--; 876 } while (likely(budget)); 877 878 if (tx->xsk_pool) { 879 if (xsk_frames) 880 xsk_tx_completed(tx->xsk_pool, xsk_frames); 881 if (xsk_uses_need_wakeup(tx->xsk_pool)) 882 xsk_set_tx_need_wakeup(tx->xsk_pool); 883 tsnep_xdp_xmit_zc(tx); 884 } 885 886 if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) && 887 netif_tx_queue_stopped(nq)) { 888 netif_tx_wake_queue(nq); 889 } 890 891 __netif_tx_unlock(nq); 892 893 return budget != 0; 894 } 895 896 static bool tsnep_tx_pending(struct tsnep_tx *tx) 897 { 898 struct tsnep_tx_entry *entry; 899 struct netdev_queue *nq; 900 bool pending = false; 901 902 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); 903 __netif_tx_lock(nq, smp_processor_id()); 904 905 if (tx->read != tx->write) { 906 entry = &tx->entry[tx->read]; 907 if ((__le32_to_cpu(entry->desc_wb->properties) & 908 TSNEP_TX_DESC_OWNER_MASK) == 909 (entry->properties & TSNEP_TX_DESC_OWNER_MASK)) 910 pending = true; 911 } 912 913 __netif_tx_unlock(nq); 914 915 return pending; 916 } 917 918 static int tsnep_tx_open(struct tsnep_tx *tx) 919 { 920 int retval; 921 922 retval = tsnep_tx_ring_create(tx); 923 if (retval) 924 return retval; 925 926 tsnep_tx_init(tx); 927 928 return 0; 929 } 930 931 static void tsnep_tx_close(struct tsnep_tx *tx) 932 { 933 tsnep_tx_ring_cleanup(tx); 934 } 935 936 static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx) 937 { 938 struct device *dmadev = rx->adapter->dmadev; 939 struct tsnep_rx_entry *entry; 940 int i; 941 942 for (i = 0; i < TSNEP_RING_SIZE; i++) { 943 entry = &rx->entry[i]; 944 if (!rx->xsk_pool && entry->page) 945 page_pool_put_full_page(rx->page_pool, entry->page, 946 false); 947 if (rx->xsk_pool && entry->xdp) 948 xsk_buff_free(entry->xdp); 949 /* xdp is union with page */ 950 entry->page = NULL; 951 } 952 953 if (rx->page_pool) 954 page_pool_destroy(rx->page_pool); 955 956 memset(rx->entry, 0, sizeof(rx->entry)); 957 958 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) { 959 if (rx->page[i]) { 960 dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i], 961 rx->page_dma[i]); 962 rx->page[i] = NULL; 963 rx->page_dma[i] = 0; 964 } 965 } 966 } 967 968 static int tsnep_rx_ring_create(struct tsnep_rx *rx) 969 { 970 struct device *dmadev = rx->adapter->dmadev; 971 struct tsnep_rx_entry *entry; 972 struct page_pool_params pp_params = { 0 }; 973 struct tsnep_rx_entry *next_entry; 974 int i, j; 975 int retval; 976 977 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) { 978 rx->page[i] = 979 dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i], 980 GFP_KERNEL); 981 if (!rx->page[i]) { 982 retval = -ENOMEM; 983 goto failed; 984 } 985 for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) { 986 entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; 987 entry->desc_wb = (struct tsnep_rx_desc_wb *) 988 (((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j); 989 entry->desc = (struct tsnep_rx_desc *) 990 (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET); 991 entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j; 992 } 993 } 994 995 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; 996 pp_params.order = 0; 997 pp_params.pool_size = TSNEP_RING_SIZE; 998 pp_params.nid = dev_to_node(dmadev); 999 pp_params.dev = dmadev; 1000 pp_params.dma_dir = DMA_BIDIRECTIONAL; 1001 pp_params.max_len = TSNEP_MAX_RX_BUF_SIZE; 1002 pp_params.offset = TSNEP_RX_OFFSET; 1003 rx->page_pool = page_pool_create(&pp_params); 1004 if (IS_ERR(rx->page_pool)) { 1005 retval = PTR_ERR(rx->page_pool); 1006 rx->page_pool = NULL; 1007 goto failed; 1008 } 1009 1010 for (i = 0; i < TSNEP_RING_SIZE; i++) { 1011 entry = &rx->entry[i]; 1012 next_entry = &rx->entry[(i + 1) & TSNEP_RING_MASK]; 1013 entry->desc->next = __cpu_to_le64(next_entry->desc_dma); 1014 } 1015 1016 return 0; 1017 1018 failed: 1019 tsnep_rx_ring_cleanup(rx); 1020 return retval; 1021 } 1022 1023 static void tsnep_rx_init(struct tsnep_rx *rx) 1024 { 1025 dma_addr_t dma; 1026 1027 dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; 1028 iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW); 1029 iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH); 1030 rx->write = 0; 1031 rx->read = 0; 1032 rx->owner_counter = 1; 1033 rx->increment_owner_counter = TSNEP_RING_SIZE - 1; 1034 } 1035 1036 static void tsnep_rx_enable(struct tsnep_rx *rx) 1037 { 1038 /* descriptor properties shall be valid before hardware is notified */ 1039 dma_wmb(); 1040 1041 iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL); 1042 } 1043 1044 static void tsnep_rx_disable(struct tsnep_rx *rx) 1045 { 1046 u32 val; 1047 1048 iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL); 1049 readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val, 1050 ((val & TSNEP_CONTROL_RX_ENABLE) == 0), 10000, 1051 1000000); 1052 } 1053 1054 static int tsnep_rx_desc_available(struct tsnep_rx *rx) 1055 { 1056 if (rx->read <= rx->write) 1057 return TSNEP_RING_SIZE - rx->write + rx->read - 1; 1058 else 1059 return rx->read - rx->write - 1; 1060 } 1061 1062 static void tsnep_rx_free_page_buffer(struct tsnep_rx *rx) 1063 { 1064 struct page **page; 1065 1066 /* last entry of page_buffer is always zero, because ring cannot be 1067 * filled completely 1068 */ 1069 page = rx->page_buffer; 1070 while (*page) { 1071 page_pool_put_full_page(rx->page_pool, *page, false); 1072 *page = NULL; 1073 page++; 1074 } 1075 } 1076 1077 static int tsnep_rx_alloc_page_buffer(struct tsnep_rx *rx) 1078 { 1079 int i; 1080 1081 /* alloc for all ring entries except the last one, because ring cannot 1082 * be filled completely 1083 */ 1084 for (i = 0; i < TSNEP_RING_SIZE - 1; i++) { 1085 rx->page_buffer[i] = page_pool_dev_alloc_pages(rx->page_pool); 1086 if (!rx->page_buffer[i]) { 1087 tsnep_rx_free_page_buffer(rx); 1088 1089 return -ENOMEM; 1090 } 1091 } 1092 1093 return 0; 1094 } 1095 1096 static void tsnep_rx_set_page(struct tsnep_rx *rx, struct tsnep_rx_entry *entry, 1097 struct page *page) 1098 { 1099 entry->page = page; 1100 entry->len = TSNEP_MAX_RX_BUF_SIZE; 1101 entry->dma = page_pool_get_dma_addr(entry->page); 1102 entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_RX_OFFSET); 1103 } 1104 1105 static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx, int index) 1106 { 1107 struct tsnep_rx_entry *entry = &rx->entry[index]; 1108 struct page *page; 1109 1110 page = page_pool_dev_alloc_pages(rx->page_pool); 1111 if (unlikely(!page)) 1112 return -ENOMEM; 1113 tsnep_rx_set_page(rx, entry, page); 1114 1115 return 0; 1116 } 1117 1118 static void tsnep_rx_reuse_buffer(struct tsnep_rx *rx, int index) 1119 { 1120 struct tsnep_rx_entry *entry = &rx->entry[index]; 1121 struct tsnep_rx_entry *read = &rx->entry[rx->read]; 1122 1123 tsnep_rx_set_page(rx, entry, read->page); 1124 read->page = NULL; 1125 } 1126 1127 static void tsnep_rx_activate(struct tsnep_rx *rx, int index) 1128 { 1129 struct tsnep_rx_entry *entry = &rx->entry[index]; 1130 1131 /* TSNEP_MAX_RX_BUF_SIZE and TSNEP_XSK_RX_BUF_SIZE are multiple of 4 */ 1132 entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK; 1133 entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; 1134 if (index == rx->increment_owner_counter) { 1135 rx->owner_counter++; 1136 if (rx->owner_counter == 4) 1137 rx->owner_counter = 1; 1138 rx->increment_owner_counter--; 1139 if (rx->increment_owner_counter < 0) 1140 rx->increment_owner_counter = TSNEP_RING_SIZE - 1; 1141 } 1142 entry->properties |= 1143 (rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & 1144 TSNEP_DESC_OWNER_COUNTER_MASK; 1145 1146 /* descriptor properties shall be written last, because valid data is 1147 * signaled there 1148 */ 1149 dma_wmb(); 1150 1151 entry->desc->properties = __cpu_to_le32(entry->properties); 1152 } 1153 1154 static int tsnep_rx_alloc(struct tsnep_rx *rx, int count, bool reuse) 1155 { 1156 bool alloc_failed = false; 1157 int i, index; 1158 1159 for (i = 0; i < count && !alloc_failed; i++) { 1160 index = (rx->write + i) & TSNEP_RING_MASK; 1161 1162 if (unlikely(tsnep_rx_alloc_buffer(rx, index))) { 1163 rx->alloc_failed++; 1164 alloc_failed = true; 1165 1166 /* reuse only if no other allocation was successful */ 1167 if (i == 0 && reuse) 1168 tsnep_rx_reuse_buffer(rx, index); 1169 else 1170 break; 1171 } 1172 1173 tsnep_rx_activate(rx, index); 1174 } 1175 1176 if (i) 1177 rx->write = (rx->write + i) & TSNEP_RING_MASK; 1178 1179 return i; 1180 } 1181 1182 static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse) 1183 { 1184 int desc_refilled; 1185 1186 desc_refilled = tsnep_rx_alloc(rx, count, reuse); 1187 if (desc_refilled) 1188 tsnep_rx_enable(rx); 1189 1190 return desc_refilled; 1191 } 1192 1193 static void tsnep_rx_set_xdp(struct tsnep_rx *rx, struct tsnep_rx_entry *entry, 1194 struct xdp_buff *xdp) 1195 { 1196 entry->xdp = xdp; 1197 entry->len = TSNEP_XSK_RX_BUF_SIZE; 1198 entry->dma = xsk_buff_xdp_get_dma(entry->xdp); 1199 entry->desc->rx = __cpu_to_le64(entry->dma); 1200 } 1201 1202 static void tsnep_rx_reuse_buffer_zc(struct tsnep_rx *rx, int index) 1203 { 1204 struct tsnep_rx_entry *entry = &rx->entry[index]; 1205 struct tsnep_rx_entry *read = &rx->entry[rx->read]; 1206 1207 tsnep_rx_set_xdp(rx, entry, read->xdp); 1208 read->xdp = NULL; 1209 } 1210 1211 static int tsnep_rx_alloc_zc(struct tsnep_rx *rx, int count, bool reuse) 1212 { 1213 u32 allocated; 1214 int i; 1215 1216 allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch, count); 1217 for (i = 0; i < allocated; i++) { 1218 int index = (rx->write + i) & TSNEP_RING_MASK; 1219 struct tsnep_rx_entry *entry = &rx->entry[index]; 1220 1221 tsnep_rx_set_xdp(rx, entry, rx->xdp_batch[i]); 1222 tsnep_rx_activate(rx, index); 1223 } 1224 if (i == 0) { 1225 rx->alloc_failed++; 1226 1227 if (reuse) { 1228 tsnep_rx_reuse_buffer_zc(rx, rx->write); 1229 tsnep_rx_activate(rx, rx->write); 1230 } 1231 } 1232 1233 if (i) 1234 rx->write = (rx->write + i) & TSNEP_RING_MASK; 1235 1236 return i; 1237 } 1238 1239 static void tsnep_rx_free_zc(struct tsnep_rx *rx) 1240 { 1241 int i; 1242 1243 for (i = 0; i < TSNEP_RING_SIZE; i++) { 1244 struct tsnep_rx_entry *entry = &rx->entry[i]; 1245 1246 if (entry->xdp) 1247 xsk_buff_free(entry->xdp); 1248 entry->xdp = NULL; 1249 } 1250 } 1251 1252 static int tsnep_rx_refill_zc(struct tsnep_rx *rx, int count, bool reuse) 1253 { 1254 int desc_refilled; 1255 1256 desc_refilled = tsnep_rx_alloc_zc(rx, count, reuse); 1257 if (desc_refilled) 1258 tsnep_rx_enable(rx); 1259 1260 return desc_refilled; 1261 } 1262 1263 static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog, 1264 struct xdp_buff *xdp, int *status, 1265 struct netdev_queue *tx_nq, struct tsnep_tx *tx) 1266 { 1267 unsigned int length; 1268 unsigned int sync; 1269 u32 act; 1270 1271 length = xdp->data_end - xdp->data_hard_start - XDP_PACKET_HEADROOM; 1272 1273 act = bpf_prog_run_xdp(prog, xdp); 1274 switch (act) { 1275 case XDP_PASS: 1276 return false; 1277 case XDP_TX: 1278 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx)) 1279 goto out_failure; 1280 *status |= TSNEP_XDP_TX; 1281 return true; 1282 case XDP_REDIRECT: 1283 if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0) 1284 goto out_failure; 1285 *status |= TSNEP_XDP_REDIRECT; 1286 return true; 1287 default: 1288 bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act); 1289 fallthrough; 1290 case XDP_ABORTED: 1291 out_failure: 1292 trace_xdp_exception(rx->adapter->netdev, prog, act); 1293 fallthrough; 1294 case XDP_DROP: 1295 /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU 1296 * touch 1297 */ 1298 sync = xdp->data_end - xdp->data_hard_start - 1299 XDP_PACKET_HEADROOM; 1300 sync = max(sync, length); 1301 page_pool_put_page(rx->page_pool, virt_to_head_page(xdp->data), 1302 sync, true); 1303 return true; 1304 } 1305 } 1306 1307 static bool tsnep_xdp_run_prog_zc(struct tsnep_rx *rx, struct bpf_prog *prog, 1308 struct xdp_buff *xdp, int *status, 1309 struct netdev_queue *tx_nq, 1310 struct tsnep_tx *tx) 1311 { 1312 u32 act; 1313 1314 act = bpf_prog_run_xdp(prog, xdp); 1315 1316 /* XDP_REDIRECT is the main action for zero-copy */ 1317 if (likely(act == XDP_REDIRECT)) { 1318 if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0) 1319 goto out_failure; 1320 *status |= TSNEP_XDP_REDIRECT; 1321 return true; 1322 } 1323 1324 switch (act) { 1325 case XDP_PASS: 1326 return false; 1327 case XDP_TX: 1328 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx)) 1329 goto out_failure; 1330 *status |= TSNEP_XDP_TX; 1331 return true; 1332 default: 1333 bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act); 1334 fallthrough; 1335 case XDP_ABORTED: 1336 out_failure: 1337 trace_xdp_exception(rx->adapter->netdev, prog, act); 1338 fallthrough; 1339 case XDP_DROP: 1340 xsk_buff_free(xdp); 1341 return true; 1342 } 1343 } 1344 1345 static void tsnep_finalize_xdp(struct tsnep_adapter *adapter, int status, 1346 struct netdev_queue *tx_nq, struct tsnep_tx *tx) 1347 { 1348 if (status & TSNEP_XDP_TX) { 1349 __netif_tx_lock(tx_nq, smp_processor_id()); 1350 tsnep_xdp_xmit_flush(tx); 1351 __netif_tx_unlock(tx_nq); 1352 } 1353 1354 if (status & TSNEP_XDP_REDIRECT) 1355 xdp_do_flush(); 1356 } 1357 1358 static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page, 1359 int length) 1360 { 1361 struct sk_buff *skb; 1362 1363 skb = napi_build_skb(page_address(page), PAGE_SIZE); 1364 if (unlikely(!skb)) 1365 return NULL; 1366 1367 /* update pointers within the skb to store the data */ 1368 skb_reserve(skb, TSNEP_RX_OFFSET + TSNEP_RX_INLINE_METADATA_SIZE); 1369 __skb_put(skb, length - ETH_FCS_LEN); 1370 1371 if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) { 1372 struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); 1373 struct tsnep_rx_inline *rx_inline = 1374 (struct tsnep_rx_inline *)(page_address(page) + 1375 TSNEP_RX_OFFSET); 1376 1377 skb_shinfo(skb)->tx_flags |= 1378 SKBTX_HW_TSTAMP_NETDEV; 1379 memset(hwtstamps, 0, sizeof(*hwtstamps)); 1380 hwtstamps->netdev_data = rx_inline; 1381 } 1382 1383 skb_record_rx_queue(skb, rx->queue_index); 1384 skb->protocol = eth_type_trans(skb, rx->adapter->netdev); 1385 1386 return skb; 1387 } 1388 1389 static void tsnep_rx_page(struct tsnep_rx *rx, struct napi_struct *napi, 1390 struct page *page, int length) 1391 { 1392 struct sk_buff *skb; 1393 1394 skb = tsnep_build_skb(rx, page, length); 1395 if (skb) { 1396 skb_mark_for_recycle(skb); 1397 1398 rx->packets++; 1399 rx->bytes += length; 1400 if (skb->pkt_type == PACKET_MULTICAST) 1401 rx->multicast++; 1402 1403 napi_gro_receive(napi, skb); 1404 } else { 1405 page_pool_recycle_direct(rx->page_pool, page); 1406 1407 rx->dropped++; 1408 } 1409 } 1410 1411 static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, 1412 int budget) 1413 { 1414 struct device *dmadev = rx->adapter->dmadev; 1415 enum dma_data_direction dma_dir; 1416 struct tsnep_rx_entry *entry; 1417 struct netdev_queue *tx_nq; 1418 struct bpf_prog *prog; 1419 struct xdp_buff xdp; 1420 struct tsnep_tx *tx; 1421 int desc_available; 1422 int xdp_status = 0; 1423 int done = 0; 1424 int length; 1425 1426 desc_available = tsnep_rx_desc_available(rx); 1427 dma_dir = page_pool_get_dma_dir(rx->page_pool); 1428 prog = READ_ONCE(rx->adapter->xdp_prog); 1429 if (prog) { 1430 tx_nq = netdev_get_tx_queue(rx->adapter->netdev, 1431 rx->tx_queue_index); 1432 tx = &rx->adapter->tx[rx->tx_queue_index]; 1433 1434 xdp_init_buff(&xdp, PAGE_SIZE, &rx->xdp_rxq); 1435 } 1436 1437 while (likely(done < budget) && (rx->read != rx->write)) { 1438 entry = &rx->entry[rx->read]; 1439 if ((__le32_to_cpu(entry->desc_wb->properties) & 1440 TSNEP_DESC_OWNER_COUNTER_MASK) != 1441 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK)) 1442 break; 1443 done++; 1444 1445 if (desc_available >= TSNEP_RING_RX_REFILL) { 1446 bool reuse = desc_available >= TSNEP_RING_RX_REUSE; 1447 1448 desc_available -= tsnep_rx_refill(rx, desc_available, 1449 reuse); 1450 if (!entry->page) { 1451 /* buffer has been reused for refill to prevent 1452 * empty RX ring, thus buffer cannot be used for 1453 * RX processing 1454 */ 1455 rx->read = (rx->read + 1) & TSNEP_RING_MASK; 1456 desc_available++; 1457 1458 rx->dropped++; 1459 1460 continue; 1461 } 1462 } 1463 1464 /* descriptor properties shall be read first, because valid data 1465 * is signaled there 1466 */ 1467 dma_rmb(); 1468 1469 prefetch(page_address(entry->page) + TSNEP_RX_OFFSET); 1470 length = __le32_to_cpu(entry->desc_wb->properties) & 1471 TSNEP_DESC_LENGTH_MASK; 1472 dma_sync_single_range_for_cpu(dmadev, entry->dma, 1473 TSNEP_RX_OFFSET, length, dma_dir); 1474 1475 /* RX metadata with timestamps is in front of actual data, 1476 * subtract metadata size to get length of actual data and 1477 * consider metadata size as offset of actual data during RX 1478 * processing 1479 */ 1480 length -= TSNEP_RX_INLINE_METADATA_SIZE; 1481 1482 rx->read = (rx->read + 1) & TSNEP_RING_MASK; 1483 desc_available++; 1484 1485 if (prog) { 1486 bool consume; 1487 1488 xdp_prepare_buff(&xdp, page_address(entry->page), 1489 XDP_PACKET_HEADROOM + TSNEP_RX_INLINE_METADATA_SIZE, 1490 length - ETH_FCS_LEN, false); 1491 1492 consume = tsnep_xdp_run_prog(rx, prog, &xdp, 1493 &xdp_status, tx_nq, tx); 1494 if (consume) { 1495 rx->packets++; 1496 rx->bytes += length; 1497 1498 entry->page = NULL; 1499 1500 continue; 1501 } 1502 } 1503 1504 tsnep_rx_page(rx, napi, entry->page, length); 1505 entry->page = NULL; 1506 } 1507 1508 if (xdp_status) 1509 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); 1510 1511 if (desc_available) 1512 tsnep_rx_refill(rx, desc_available, false); 1513 1514 return done; 1515 } 1516 1517 static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi, 1518 int budget) 1519 { 1520 struct tsnep_rx_entry *entry; 1521 struct netdev_queue *tx_nq; 1522 struct bpf_prog *prog; 1523 struct tsnep_tx *tx; 1524 int desc_available; 1525 int xdp_status = 0; 1526 struct page *page; 1527 int done = 0; 1528 int length; 1529 1530 desc_available = tsnep_rx_desc_available(rx); 1531 prog = READ_ONCE(rx->adapter->xdp_prog); 1532 if (prog) { 1533 tx_nq = netdev_get_tx_queue(rx->adapter->netdev, 1534 rx->tx_queue_index); 1535 tx = &rx->adapter->tx[rx->tx_queue_index]; 1536 } 1537 1538 while (likely(done < budget) && (rx->read != rx->write)) { 1539 entry = &rx->entry[rx->read]; 1540 if ((__le32_to_cpu(entry->desc_wb->properties) & 1541 TSNEP_DESC_OWNER_COUNTER_MASK) != 1542 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK)) 1543 break; 1544 done++; 1545 1546 if (desc_available >= TSNEP_RING_RX_REFILL) { 1547 bool reuse = desc_available >= TSNEP_RING_RX_REUSE; 1548 1549 desc_available -= tsnep_rx_refill_zc(rx, desc_available, 1550 reuse); 1551 if (!entry->xdp) { 1552 /* buffer has been reused for refill to prevent 1553 * empty RX ring, thus buffer cannot be used for 1554 * RX processing 1555 */ 1556 rx->read = (rx->read + 1) & TSNEP_RING_MASK; 1557 desc_available++; 1558 1559 rx->dropped++; 1560 1561 continue; 1562 } 1563 } 1564 1565 /* descriptor properties shall be read first, because valid data 1566 * is signaled there 1567 */ 1568 dma_rmb(); 1569 1570 prefetch(entry->xdp->data); 1571 length = __le32_to_cpu(entry->desc_wb->properties) & 1572 TSNEP_DESC_LENGTH_MASK; 1573 xsk_buff_set_size(entry->xdp, length - ETH_FCS_LEN); 1574 xsk_buff_dma_sync_for_cpu(entry->xdp, rx->xsk_pool); 1575 1576 /* RX metadata with timestamps is in front of actual data, 1577 * subtract metadata size to get length of actual data and 1578 * consider metadata size as offset of actual data during RX 1579 * processing 1580 */ 1581 length -= TSNEP_RX_INLINE_METADATA_SIZE; 1582 1583 rx->read = (rx->read + 1) & TSNEP_RING_MASK; 1584 desc_available++; 1585 1586 if (prog) { 1587 bool consume; 1588 1589 entry->xdp->data += TSNEP_RX_INLINE_METADATA_SIZE; 1590 entry->xdp->data_meta += TSNEP_RX_INLINE_METADATA_SIZE; 1591 1592 consume = tsnep_xdp_run_prog_zc(rx, prog, entry->xdp, 1593 &xdp_status, tx_nq, tx); 1594 if (consume) { 1595 rx->packets++; 1596 rx->bytes += length; 1597 1598 entry->xdp = NULL; 1599 1600 continue; 1601 } 1602 } 1603 1604 page = page_pool_dev_alloc_pages(rx->page_pool); 1605 if (page) { 1606 memcpy(page_address(page) + TSNEP_RX_OFFSET, 1607 entry->xdp->data - TSNEP_RX_INLINE_METADATA_SIZE, 1608 length + TSNEP_RX_INLINE_METADATA_SIZE); 1609 tsnep_rx_page(rx, napi, page, length); 1610 } else { 1611 rx->dropped++; 1612 } 1613 xsk_buff_free(entry->xdp); 1614 entry->xdp = NULL; 1615 } 1616 1617 if (xdp_status) 1618 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); 1619 1620 if (desc_available) 1621 desc_available -= tsnep_rx_refill_zc(rx, desc_available, false); 1622 1623 if (xsk_uses_need_wakeup(rx->xsk_pool)) { 1624 if (desc_available) 1625 xsk_set_rx_need_wakeup(rx->xsk_pool); 1626 else 1627 xsk_clear_rx_need_wakeup(rx->xsk_pool); 1628 1629 return done; 1630 } 1631 1632 return desc_available ? budget : done; 1633 } 1634 1635 static bool tsnep_rx_pending(struct tsnep_rx *rx) 1636 { 1637 struct tsnep_rx_entry *entry; 1638 1639 if (rx->read != rx->write) { 1640 entry = &rx->entry[rx->read]; 1641 if ((__le32_to_cpu(entry->desc_wb->properties) & 1642 TSNEP_DESC_OWNER_COUNTER_MASK) == 1643 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK)) 1644 return true; 1645 } 1646 1647 return false; 1648 } 1649 1650 static int tsnep_rx_open(struct tsnep_rx *rx) 1651 { 1652 int desc_available; 1653 int retval; 1654 1655 retval = tsnep_rx_ring_create(rx); 1656 if (retval) 1657 return retval; 1658 1659 tsnep_rx_init(rx); 1660 1661 desc_available = tsnep_rx_desc_available(rx); 1662 if (rx->xsk_pool) 1663 retval = tsnep_rx_alloc_zc(rx, desc_available, false); 1664 else 1665 retval = tsnep_rx_alloc(rx, desc_available, false); 1666 if (retval != desc_available) { 1667 retval = -ENOMEM; 1668 1669 goto alloc_failed; 1670 } 1671 1672 /* prealloc pages to prevent allocation failures when XSK pool is 1673 * disabled at runtime 1674 */ 1675 if (rx->xsk_pool) { 1676 retval = tsnep_rx_alloc_page_buffer(rx); 1677 if (retval) 1678 goto alloc_failed; 1679 } 1680 1681 return 0; 1682 1683 alloc_failed: 1684 tsnep_rx_ring_cleanup(rx); 1685 return retval; 1686 } 1687 1688 static void tsnep_rx_close(struct tsnep_rx *rx) 1689 { 1690 if (rx->xsk_pool) 1691 tsnep_rx_free_page_buffer(rx); 1692 1693 tsnep_rx_ring_cleanup(rx); 1694 } 1695 1696 static void tsnep_rx_reopen(struct tsnep_rx *rx) 1697 { 1698 struct page **page = rx->page_buffer; 1699 int i; 1700 1701 tsnep_rx_init(rx); 1702 1703 for (i = 0; i < TSNEP_RING_SIZE; i++) { 1704 struct tsnep_rx_entry *entry = &rx->entry[i]; 1705 1706 /* defined initial values for properties are required for 1707 * correct owner counter checking 1708 */ 1709 entry->desc->properties = 0; 1710 entry->desc_wb->properties = 0; 1711 1712 /* prevent allocation failures by reusing kept pages */ 1713 if (*page) { 1714 tsnep_rx_set_page(rx, entry, *page); 1715 tsnep_rx_activate(rx, rx->write); 1716 rx->write++; 1717 1718 *page = NULL; 1719 page++; 1720 } 1721 } 1722 } 1723 1724 static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx) 1725 { 1726 struct page **page = rx->page_buffer; 1727 u32 allocated; 1728 int i; 1729 1730 tsnep_rx_init(rx); 1731 1732 /* alloc all ring entries except the last one, because ring cannot be 1733 * filled completely, as many buffers as possible is enough as wakeup is 1734 * done if new buffers are available 1735 */ 1736 allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch, 1737 TSNEP_RING_SIZE - 1); 1738 1739 for (i = 0; i < TSNEP_RING_SIZE; i++) { 1740 struct tsnep_rx_entry *entry = &rx->entry[i]; 1741 1742 /* keep pages to prevent allocation failures when xsk is 1743 * disabled 1744 */ 1745 if (entry->page) { 1746 *page = entry->page; 1747 entry->page = NULL; 1748 1749 page++; 1750 } 1751 1752 /* defined initial values for properties are required for 1753 * correct owner counter checking 1754 */ 1755 entry->desc->properties = 0; 1756 entry->desc_wb->properties = 0; 1757 1758 if (allocated) { 1759 tsnep_rx_set_xdp(rx, entry, 1760 rx->xdp_batch[allocated - 1]); 1761 tsnep_rx_activate(rx, rx->write); 1762 rx->write++; 1763 1764 allocated--; 1765 } 1766 } 1767 1768 /* set need wakeup flag immediately if ring is not filled completely, 1769 * first polling would be too late as need wakeup signalisation would 1770 * be delayed for an indefinite time 1771 */ 1772 if (xsk_uses_need_wakeup(rx->xsk_pool)) { 1773 int desc_available = tsnep_rx_desc_available(rx); 1774 1775 if (desc_available) 1776 xsk_set_rx_need_wakeup(rx->xsk_pool); 1777 else 1778 xsk_clear_rx_need_wakeup(rx->xsk_pool); 1779 } 1780 } 1781 1782 static bool tsnep_pending(struct tsnep_queue *queue) 1783 { 1784 if (queue->tx && tsnep_tx_pending(queue->tx)) 1785 return true; 1786 1787 if (queue->rx && tsnep_rx_pending(queue->rx)) 1788 return true; 1789 1790 return false; 1791 } 1792 1793 static int tsnep_poll(struct napi_struct *napi, int budget) 1794 { 1795 struct tsnep_queue *queue = container_of(napi, struct tsnep_queue, 1796 napi); 1797 bool complete = true; 1798 int done = 0; 1799 1800 if (queue->tx) 1801 complete = tsnep_tx_poll(queue->tx, budget); 1802 1803 /* handle case where we are called by netpoll with a budget of 0 */ 1804 if (unlikely(budget <= 0)) 1805 return budget; 1806 1807 if (queue->rx) { 1808 done = queue->rx->xsk_pool ? 1809 tsnep_rx_poll_zc(queue->rx, napi, budget) : 1810 tsnep_rx_poll(queue->rx, napi, budget); 1811 if (done >= budget) 1812 complete = false; 1813 } 1814 1815 /* if all work not completed, return budget and keep polling */ 1816 if (!complete) 1817 return budget; 1818 1819 if (likely(napi_complete_done(napi, done))) { 1820 tsnep_enable_irq(queue->adapter, queue->irq_mask); 1821 1822 /* reschedule if work is already pending, prevent rotten packets 1823 * which are transmitted or received after polling but before 1824 * interrupt enable 1825 */ 1826 if (tsnep_pending(queue)) { 1827 tsnep_disable_irq(queue->adapter, queue->irq_mask); 1828 napi_schedule(napi); 1829 } 1830 } 1831 1832 return min(done, budget - 1); 1833 } 1834 1835 static int tsnep_request_irq(struct tsnep_queue *queue, bool first) 1836 { 1837 const char *name = netdev_name(queue->adapter->netdev); 1838 irq_handler_t handler; 1839 void *dev; 1840 int retval; 1841 1842 if (first) { 1843 sprintf(queue->name, "%s-mac", name); 1844 handler = tsnep_irq; 1845 dev = queue->adapter; 1846 } else { 1847 if (queue->tx && queue->rx) 1848 snprintf(queue->name, sizeof(queue->name), "%s-txrx-%d", 1849 name, queue->rx->queue_index); 1850 else if (queue->tx) 1851 snprintf(queue->name, sizeof(queue->name), "%s-tx-%d", 1852 name, queue->tx->queue_index); 1853 else 1854 snprintf(queue->name, sizeof(queue->name), "%s-rx-%d", 1855 name, queue->rx->queue_index); 1856 handler = tsnep_irq_txrx; 1857 dev = queue; 1858 } 1859 1860 retval = request_irq(queue->irq, handler, 0, queue->name, dev); 1861 if (retval) { 1862 /* if name is empty, then interrupt won't be freed */ 1863 memset(queue->name, 0, sizeof(queue->name)); 1864 } 1865 1866 return retval; 1867 } 1868 1869 static void tsnep_free_irq(struct tsnep_queue *queue, bool first) 1870 { 1871 void *dev; 1872 1873 if (!strlen(queue->name)) 1874 return; 1875 1876 if (first) 1877 dev = queue->adapter; 1878 else 1879 dev = queue; 1880 1881 free_irq(queue->irq, dev); 1882 memset(queue->name, 0, sizeof(queue->name)); 1883 } 1884 1885 static void tsnep_queue_close(struct tsnep_queue *queue, bool first) 1886 { 1887 struct tsnep_rx *rx = queue->rx; 1888 1889 tsnep_free_irq(queue, first); 1890 1891 if (rx) { 1892 if (xdp_rxq_info_is_reg(&rx->xdp_rxq)) 1893 xdp_rxq_info_unreg(&rx->xdp_rxq); 1894 if (xdp_rxq_info_is_reg(&rx->xdp_rxq_zc)) 1895 xdp_rxq_info_unreg(&rx->xdp_rxq_zc); 1896 } 1897 1898 netif_napi_del(&queue->napi); 1899 } 1900 1901 static int tsnep_queue_open(struct tsnep_adapter *adapter, 1902 struct tsnep_queue *queue, bool first) 1903 { 1904 struct tsnep_rx *rx = queue->rx; 1905 struct tsnep_tx *tx = queue->tx; 1906 int retval; 1907 1908 netif_napi_add(adapter->netdev, &queue->napi, tsnep_poll); 1909 1910 if (rx) { 1911 /* choose TX queue for XDP_TX */ 1912 if (tx) 1913 rx->tx_queue_index = tx->queue_index; 1914 else if (rx->queue_index < adapter->num_tx_queues) 1915 rx->tx_queue_index = rx->queue_index; 1916 else 1917 rx->tx_queue_index = 0; 1918 1919 /* prepare both memory models to eliminate possible registration 1920 * errors when memory model is switched between page pool and 1921 * XSK pool during runtime 1922 */ 1923 retval = xdp_rxq_info_reg(&rx->xdp_rxq, adapter->netdev, 1924 rx->queue_index, queue->napi.napi_id); 1925 if (retval) 1926 goto failed; 1927 retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, 1928 MEM_TYPE_PAGE_POOL, 1929 rx->page_pool); 1930 if (retval) 1931 goto failed; 1932 retval = xdp_rxq_info_reg(&rx->xdp_rxq_zc, adapter->netdev, 1933 rx->queue_index, queue->napi.napi_id); 1934 if (retval) 1935 goto failed; 1936 retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq_zc, 1937 MEM_TYPE_XSK_BUFF_POOL, 1938 NULL); 1939 if (retval) 1940 goto failed; 1941 if (rx->xsk_pool) 1942 xsk_pool_set_rxq_info(rx->xsk_pool, &rx->xdp_rxq_zc); 1943 } 1944 1945 retval = tsnep_request_irq(queue, first); 1946 if (retval) { 1947 netif_err(adapter, drv, adapter->netdev, 1948 "can't get assigned irq %d.\n", queue->irq); 1949 goto failed; 1950 } 1951 1952 return 0; 1953 1954 failed: 1955 tsnep_queue_close(queue, first); 1956 1957 return retval; 1958 } 1959 1960 static void tsnep_queue_enable(struct tsnep_queue *queue) 1961 { 1962 napi_enable(&queue->napi); 1963 tsnep_enable_irq(queue->adapter, queue->irq_mask); 1964 1965 if (queue->tx) 1966 tsnep_tx_enable(queue->tx); 1967 1968 if (queue->rx) 1969 tsnep_rx_enable(queue->rx); 1970 } 1971 1972 static void tsnep_queue_disable(struct tsnep_queue *queue) 1973 { 1974 if (queue->tx) 1975 tsnep_tx_disable(queue->tx, &queue->napi); 1976 1977 napi_disable(&queue->napi); 1978 tsnep_disable_irq(queue->adapter, queue->irq_mask); 1979 1980 /* disable RX after NAPI polling has been disabled, because RX can be 1981 * enabled during NAPI polling 1982 */ 1983 if (queue->rx) 1984 tsnep_rx_disable(queue->rx); 1985 } 1986 1987 static int tsnep_netdev_open(struct net_device *netdev) 1988 { 1989 struct tsnep_adapter *adapter = netdev_priv(netdev); 1990 int i, retval; 1991 1992 for (i = 0; i < adapter->num_queues; i++) { 1993 if (adapter->queue[i].tx) { 1994 retval = tsnep_tx_open(adapter->queue[i].tx); 1995 if (retval) 1996 goto failed; 1997 } 1998 if (adapter->queue[i].rx) { 1999 retval = tsnep_rx_open(adapter->queue[i].rx); 2000 if (retval) 2001 goto failed; 2002 } 2003 2004 retval = tsnep_queue_open(adapter, &adapter->queue[i], i == 0); 2005 if (retval) 2006 goto failed; 2007 } 2008 2009 retval = netif_set_real_num_tx_queues(adapter->netdev, 2010 adapter->num_tx_queues); 2011 if (retval) 2012 goto failed; 2013 retval = netif_set_real_num_rx_queues(adapter->netdev, 2014 adapter->num_rx_queues); 2015 if (retval) 2016 goto failed; 2017 2018 tsnep_enable_irq(adapter, ECM_INT_LINK); 2019 retval = tsnep_phy_open(adapter); 2020 if (retval) 2021 goto phy_failed; 2022 2023 for (i = 0; i < adapter->num_queues; i++) 2024 tsnep_queue_enable(&adapter->queue[i]); 2025 2026 return 0; 2027 2028 phy_failed: 2029 tsnep_disable_irq(adapter, ECM_INT_LINK); 2030 failed: 2031 for (i = 0; i < adapter->num_queues; i++) { 2032 tsnep_queue_close(&adapter->queue[i], i == 0); 2033 2034 if (adapter->queue[i].rx) 2035 tsnep_rx_close(adapter->queue[i].rx); 2036 if (adapter->queue[i].tx) 2037 tsnep_tx_close(adapter->queue[i].tx); 2038 } 2039 return retval; 2040 } 2041 2042 static int tsnep_netdev_close(struct net_device *netdev) 2043 { 2044 struct tsnep_adapter *adapter = netdev_priv(netdev); 2045 int i; 2046 2047 tsnep_disable_irq(adapter, ECM_INT_LINK); 2048 tsnep_phy_close(adapter); 2049 2050 for (i = 0; i < adapter->num_queues; i++) { 2051 tsnep_queue_disable(&adapter->queue[i]); 2052 2053 tsnep_queue_close(&adapter->queue[i], i == 0); 2054 2055 if (adapter->queue[i].rx) 2056 tsnep_rx_close(adapter->queue[i].rx); 2057 if (adapter->queue[i].tx) 2058 tsnep_tx_close(adapter->queue[i].tx); 2059 } 2060 2061 return 0; 2062 } 2063 2064 int tsnep_enable_xsk(struct tsnep_queue *queue, struct xsk_buff_pool *pool) 2065 { 2066 bool running = netif_running(queue->adapter->netdev); 2067 u32 frame_size; 2068 2069 frame_size = xsk_pool_get_rx_frame_size(pool); 2070 if (frame_size < TSNEP_XSK_RX_BUF_SIZE) 2071 return -EOPNOTSUPP; 2072 2073 queue->rx->page_buffer = kcalloc(TSNEP_RING_SIZE, 2074 sizeof(*queue->rx->page_buffer), 2075 GFP_KERNEL); 2076 if (!queue->rx->page_buffer) 2077 return -ENOMEM; 2078 queue->rx->xdp_batch = kcalloc(TSNEP_RING_SIZE, 2079 sizeof(*queue->rx->xdp_batch), 2080 GFP_KERNEL); 2081 if (!queue->rx->xdp_batch) { 2082 kfree(queue->rx->page_buffer); 2083 queue->rx->page_buffer = NULL; 2084 2085 return -ENOMEM; 2086 } 2087 2088 xsk_pool_set_rxq_info(pool, &queue->rx->xdp_rxq_zc); 2089 2090 if (running) 2091 tsnep_queue_disable(queue); 2092 2093 queue->tx->xsk_pool = pool; 2094 queue->rx->xsk_pool = pool; 2095 2096 if (running) { 2097 tsnep_rx_reopen_xsk(queue->rx); 2098 tsnep_queue_enable(queue); 2099 } 2100 2101 return 0; 2102 } 2103 2104 void tsnep_disable_xsk(struct tsnep_queue *queue) 2105 { 2106 bool running = netif_running(queue->adapter->netdev); 2107 2108 if (running) 2109 tsnep_queue_disable(queue); 2110 2111 tsnep_rx_free_zc(queue->rx); 2112 2113 queue->rx->xsk_pool = NULL; 2114 queue->tx->xsk_pool = NULL; 2115 2116 if (running) { 2117 tsnep_rx_reopen(queue->rx); 2118 tsnep_queue_enable(queue); 2119 } 2120 2121 kfree(queue->rx->xdp_batch); 2122 queue->rx->xdp_batch = NULL; 2123 kfree(queue->rx->page_buffer); 2124 queue->rx->page_buffer = NULL; 2125 } 2126 2127 static netdev_tx_t tsnep_netdev_xmit_frame(struct sk_buff *skb, 2128 struct net_device *netdev) 2129 { 2130 struct tsnep_adapter *adapter = netdev_priv(netdev); 2131 u16 queue_mapping = skb_get_queue_mapping(skb); 2132 2133 if (queue_mapping >= adapter->num_tx_queues) 2134 queue_mapping = 0; 2135 2136 return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]); 2137 } 2138 2139 static int tsnep_netdev_ioctl(struct net_device *netdev, struct ifreq *ifr, 2140 int cmd) 2141 { 2142 if (!netif_running(netdev)) 2143 return -EINVAL; 2144 if (cmd == SIOCSHWTSTAMP || cmd == SIOCGHWTSTAMP) 2145 return tsnep_ptp_ioctl(netdev, ifr, cmd); 2146 return phy_mii_ioctl(netdev->phydev, ifr, cmd); 2147 } 2148 2149 static void tsnep_netdev_set_multicast(struct net_device *netdev) 2150 { 2151 struct tsnep_adapter *adapter = netdev_priv(netdev); 2152 2153 u16 rx_filter = 0; 2154 2155 /* configured MAC address and broadcasts are never filtered */ 2156 if (netdev->flags & IFF_PROMISC) { 2157 rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS; 2158 rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_UNICASTS; 2159 } else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) { 2160 rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS; 2161 } 2162 iowrite16(rx_filter, adapter->addr + TSNEP_RX_FILTER); 2163 } 2164 2165 static void tsnep_netdev_get_stats64(struct net_device *netdev, 2166 struct rtnl_link_stats64 *stats) 2167 { 2168 struct tsnep_adapter *adapter = netdev_priv(netdev); 2169 u32 reg; 2170 u32 val; 2171 int i; 2172 2173 for (i = 0; i < adapter->num_tx_queues; i++) { 2174 stats->tx_packets += adapter->tx[i].packets; 2175 stats->tx_bytes += adapter->tx[i].bytes; 2176 stats->tx_dropped += adapter->tx[i].dropped; 2177 } 2178 for (i = 0; i < adapter->num_rx_queues; i++) { 2179 stats->rx_packets += adapter->rx[i].packets; 2180 stats->rx_bytes += adapter->rx[i].bytes; 2181 stats->rx_dropped += adapter->rx[i].dropped; 2182 stats->multicast += adapter->rx[i].multicast; 2183 2184 reg = ioread32(adapter->addr + TSNEP_QUEUE(i) + 2185 TSNEP_RX_STATISTIC); 2186 val = (reg & TSNEP_RX_STATISTIC_NO_DESC_MASK) >> 2187 TSNEP_RX_STATISTIC_NO_DESC_SHIFT; 2188 stats->rx_dropped += val; 2189 val = (reg & TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK) >> 2190 TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT; 2191 stats->rx_dropped += val; 2192 val = (reg & TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK) >> 2193 TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT; 2194 stats->rx_errors += val; 2195 stats->rx_fifo_errors += val; 2196 val = (reg & TSNEP_RX_STATISTIC_INVALID_FRAME_MASK) >> 2197 TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT; 2198 stats->rx_errors += val; 2199 stats->rx_frame_errors += val; 2200 } 2201 2202 reg = ioread32(adapter->addr + ECM_STAT); 2203 val = (reg & ECM_STAT_RX_ERR_MASK) >> ECM_STAT_RX_ERR_SHIFT; 2204 stats->rx_errors += val; 2205 val = (reg & ECM_STAT_INV_FRM_MASK) >> ECM_STAT_INV_FRM_SHIFT; 2206 stats->rx_errors += val; 2207 stats->rx_crc_errors += val; 2208 val = (reg & ECM_STAT_FWD_RX_ERR_MASK) >> ECM_STAT_FWD_RX_ERR_SHIFT; 2209 stats->rx_errors += val; 2210 } 2211 2212 static void tsnep_mac_set_address(struct tsnep_adapter *adapter, u8 *addr) 2213 { 2214 iowrite32(*(u32 *)addr, adapter->addr + TSNEP_MAC_ADDRESS_LOW); 2215 iowrite16(*(u16 *)(addr + sizeof(u32)), 2216 adapter->addr + TSNEP_MAC_ADDRESS_HIGH); 2217 2218 ether_addr_copy(adapter->mac_address, addr); 2219 netif_info(adapter, drv, adapter->netdev, "MAC address set to %pM\n", 2220 addr); 2221 } 2222 2223 static int tsnep_netdev_set_mac_address(struct net_device *netdev, void *addr) 2224 { 2225 struct tsnep_adapter *adapter = netdev_priv(netdev); 2226 struct sockaddr *sock_addr = addr; 2227 int retval; 2228 2229 retval = eth_prepare_mac_addr_change(netdev, sock_addr); 2230 if (retval) 2231 return retval; 2232 eth_hw_addr_set(netdev, sock_addr->sa_data); 2233 tsnep_mac_set_address(adapter, sock_addr->sa_data); 2234 2235 return 0; 2236 } 2237 2238 static int tsnep_netdev_set_features(struct net_device *netdev, 2239 netdev_features_t features) 2240 { 2241 struct tsnep_adapter *adapter = netdev_priv(netdev); 2242 netdev_features_t changed = netdev->features ^ features; 2243 bool enable; 2244 int retval = 0; 2245 2246 if (changed & NETIF_F_LOOPBACK) { 2247 enable = !!(features & NETIF_F_LOOPBACK); 2248 retval = tsnep_phy_loopback(adapter, enable); 2249 } 2250 2251 return retval; 2252 } 2253 2254 static ktime_t tsnep_netdev_get_tstamp(struct net_device *netdev, 2255 const struct skb_shared_hwtstamps *hwtstamps, 2256 bool cycles) 2257 { 2258 struct tsnep_rx_inline *rx_inline = hwtstamps->netdev_data; 2259 u64 timestamp; 2260 2261 if (cycles) 2262 timestamp = __le64_to_cpu(rx_inline->counter); 2263 else 2264 timestamp = __le64_to_cpu(rx_inline->timestamp); 2265 2266 return ns_to_ktime(timestamp); 2267 } 2268 2269 static int tsnep_netdev_bpf(struct net_device *dev, struct netdev_bpf *bpf) 2270 { 2271 struct tsnep_adapter *adapter = netdev_priv(dev); 2272 2273 switch (bpf->command) { 2274 case XDP_SETUP_PROG: 2275 return tsnep_xdp_setup_prog(adapter, bpf->prog, bpf->extack); 2276 case XDP_SETUP_XSK_POOL: 2277 return tsnep_xdp_setup_pool(adapter, bpf->xsk.pool, 2278 bpf->xsk.queue_id); 2279 default: 2280 return -EOPNOTSUPP; 2281 } 2282 } 2283 2284 static struct tsnep_tx *tsnep_xdp_get_tx(struct tsnep_adapter *adapter, u32 cpu) 2285 { 2286 if (cpu >= TSNEP_MAX_QUEUES) 2287 cpu &= TSNEP_MAX_QUEUES - 1; 2288 2289 while (cpu >= adapter->num_tx_queues) 2290 cpu -= adapter->num_tx_queues; 2291 2292 return &adapter->tx[cpu]; 2293 } 2294 2295 static int tsnep_netdev_xdp_xmit(struct net_device *dev, int n, 2296 struct xdp_frame **xdp, u32 flags) 2297 { 2298 struct tsnep_adapter *adapter = netdev_priv(dev); 2299 u32 cpu = smp_processor_id(); 2300 struct netdev_queue *nq; 2301 struct tsnep_tx *tx; 2302 int nxmit; 2303 bool xmit; 2304 2305 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 2306 return -EINVAL; 2307 2308 tx = tsnep_xdp_get_tx(adapter, cpu); 2309 nq = netdev_get_tx_queue(adapter->netdev, tx->queue_index); 2310 2311 __netif_tx_lock(nq, cpu); 2312 2313 for (nxmit = 0; nxmit < n; nxmit++) { 2314 xmit = tsnep_xdp_xmit_frame_ring(xdp[nxmit], tx, 2315 TSNEP_TX_TYPE_XDP_NDO); 2316 if (!xmit) 2317 break; 2318 2319 /* avoid transmit queue timeout since we share it with the slow 2320 * path 2321 */ 2322 txq_trans_cond_update(nq); 2323 } 2324 2325 if (flags & XDP_XMIT_FLUSH) 2326 tsnep_xdp_xmit_flush(tx); 2327 2328 __netif_tx_unlock(nq); 2329 2330 return nxmit; 2331 } 2332 2333 static int tsnep_netdev_xsk_wakeup(struct net_device *dev, u32 queue_id, 2334 u32 flags) 2335 { 2336 struct tsnep_adapter *adapter = netdev_priv(dev); 2337 struct tsnep_queue *queue; 2338 2339 if (queue_id >= adapter->num_rx_queues || 2340 queue_id >= adapter->num_tx_queues) 2341 return -EINVAL; 2342 2343 queue = &adapter->queue[queue_id]; 2344 2345 if (!napi_if_scheduled_mark_missed(&queue->napi)) 2346 napi_schedule(&queue->napi); 2347 2348 return 0; 2349 } 2350 2351 static const struct net_device_ops tsnep_netdev_ops = { 2352 .ndo_open = tsnep_netdev_open, 2353 .ndo_stop = tsnep_netdev_close, 2354 .ndo_start_xmit = tsnep_netdev_xmit_frame, 2355 .ndo_eth_ioctl = tsnep_netdev_ioctl, 2356 .ndo_set_rx_mode = tsnep_netdev_set_multicast, 2357 .ndo_get_stats64 = tsnep_netdev_get_stats64, 2358 .ndo_set_mac_address = tsnep_netdev_set_mac_address, 2359 .ndo_set_features = tsnep_netdev_set_features, 2360 .ndo_get_tstamp = tsnep_netdev_get_tstamp, 2361 .ndo_setup_tc = tsnep_tc_setup, 2362 .ndo_bpf = tsnep_netdev_bpf, 2363 .ndo_xdp_xmit = tsnep_netdev_xdp_xmit, 2364 .ndo_xsk_wakeup = tsnep_netdev_xsk_wakeup, 2365 }; 2366 2367 static int tsnep_mac_init(struct tsnep_adapter *adapter) 2368 { 2369 int retval; 2370 2371 /* initialize RX filtering, at least configured MAC address and 2372 * broadcast are not filtered 2373 */ 2374 iowrite16(0, adapter->addr + TSNEP_RX_FILTER); 2375 2376 /* try to get MAC address in the following order: 2377 * - device tree 2378 * - valid MAC address already set 2379 * - MAC address register if valid 2380 * - random MAC address 2381 */ 2382 retval = of_get_mac_address(adapter->pdev->dev.of_node, 2383 adapter->mac_address); 2384 if (retval == -EPROBE_DEFER) 2385 return retval; 2386 if (retval && !is_valid_ether_addr(adapter->mac_address)) { 2387 *(u32 *)adapter->mac_address = 2388 ioread32(adapter->addr + TSNEP_MAC_ADDRESS_LOW); 2389 *(u16 *)(adapter->mac_address + sizeof(u32)) = 2390 ioread16(adapter->addr + TSNEP_MAC_ADDRESS_HIGH); 2391 if (!is_valid_ether_addr(adapter->mac_address)) 2392 eth_random_addr(adapter->mac_address); 2393 } 2394 2395 tsnep_mac_set_address(adapter, adapter->mac_address); 2396 eth_hw_addr_set(adapter->netdev, adapter->mac_address); 2397 2398 return 0; 2399 } 2400 2401 static int tsnep_mdio_init(struct tsnep_adapter *adapter) 2402 { 2403 struct device_node *np = adapter->pdev->dev.of_node; 2404 int retval; 2405 2406 if (np) { 2407 np = of_get_child_by_name(np, "mdio"); 2408 if (!np) 2409 return 0; 2410 2411 adapter->suppress_preamble = 2412 of_property_read_bool(np, "suppress-preamble"); 2413 } 2414 2415 adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev); 2416 if (!adapter->mdiobus) { 2417 retval = -ENOMEM; 2418 2419 goto out; 2420 } 2421 2422 adapter->mdiobus->priv = (void *)adapter; 2423 adapter->mdiobus->parent = &adapter->pdev->dev; 2424 adapter->mdiobus->read = tsnep_mdiobus_read; 2425 adapter->mdiobus->write = tsnep_mdiobus_write; 2426 adapter->mdiobus->name = TSNEP "-mdiobus"; 2427 snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, "%s", 2428 adapter->pdev->name); 2429 2430 /* do not scan broadcast address */ 2431 adapter->mdiobus->phy_mask = 0x0000001; 2432 2433 retval = of_mdiobus_register(adapter->mdiobus, np); 2434 2435 out: 2436 of_node_put(np); 2437 2438 return retval; 2439 } 2440 2441 static int tsnep_phy_init(struct tsnep_adapter *adapter) 2442 { 2443 struct device_node *phy_node; 2444 int retval; 2445 2446 retval = of_get_phy_mode(adapter->pdev->dev.of_node, 2447 &adapter->phy_mode); 2448 if (retval) 2449 adapter->phy_mode = PHY_INTERFACE_MODE_GMII; 2450 2451 phy_node = of_parse_phandle(adapter->pdev->dev.of_node, "phy-handle", 2452 0); 2453 adapter->phydev = of_phy_find_device(phy_node); 2454 of_node_put(phy_node); 2455 if (!adapter->phydev && adapter->mdiobus) 2456 adapter->phydev = phy_find_first(adapter->mdiobus); 2457 if (!adapter->phydev) 2458 return -EIO; 2459 2460 return 0; 2461 } 2462 2463 static int tsnep_queue_init(struct tsnep_adapter *adapter, int queue_count) 2464 { 2465 u32 irq_mask = ECM_INT_TX_0 | ECM_INT_RX_0; 2466 char name[8]; 2467 int i; 2468 int retval; 2469 2470 /* one TX/RX queue pair for netdev is mandatory */ 2471 if (platform_irq_count(adapter->pdev) == 1) 2472 retval = platform_get_irq(adapter->pdev, 0); 2473 else 2474 retval = platform_get_irq_byname(adapter->pdev, "mac"); 2475 if (retval < 0) 2476 return retval; 2477 adapter->num_tx_queues = 1; 2478 adapter->num_rx_queues = 1; 2479 adapter->num_queues = 1; 2480 adapter->queue[0].adapter = adapter; 2481 adapter->queue[0].irq = retval; 2482 adapter->queue[0].tx = &adapter->tx[0]; 2483 adapter->queue[0].tx->adapter = adapter; 2484 adapter->queue[0].tx->addr = adapter->addr + TSNEP_QUEUE(0); 2485 adapter->queue[0].tx->queue_index = 0; 2486 adapter->queue[0].rx = &adapter->rx[0]; 2487 adapter->queue[0].rx->adapter = adapter; 2488 adapter->queue[0].rx->addr = adapter->addr + TSNEP_QUEUE(0); 2489 adapter->queue[0].rx->queue_index = 0; 2490 adapter->queue[0].irq_mask = irq_mask; 2491 adapter->queue[0].irq_delay_addr = adapter->addr + ECM_INT_DELAY; 2492 retval = tsnep_set_irq_coalesce(&adapter->queue[0], 2493 TSNEP_COALESCE_USECS_DEFAULT); 2494 if (retval < 0) 2495 return retval; 2496 2497 adapter->netdev->irq = adapter->queue[0].irq; 2498 2499 /* add additional TX/RX queue pairs only if dedicated interrupt is 2500 * available 2501 */ 2502 for (i = 1; i < queue_count; i++) { 2503 sprintf(name, "txrx-%d", i); 2504 retval = platform_get_irq_byname_optional(adapter->pdev, name); 2505 if (retval < 0) 2506 break; 2507 2508 adapter->num_tx_queues++; 2509 adapter->num_rx_queues++; 2510 adapter->num_queues++; 2511 adapter->queue[i].adapter = adapter; 2512 adapter->queue[i].irq = retval; 2513 adapter->queue[i].tx = &adapter->tx[i]; 2514 adapter->queue[i].tx->adapter = adapter; 2515 adapter->queue[i].tx->addr = adapter->addr + TSNEP_QUEUE(i); 2516 adapter->queue[i].tx->queue_index = i; 2517 adapter->queue[i].rx = &adapter->rx[i]; 2518 adapter->queue[i].rx->adapter = adapter; 2519 adapter->queue[i].rx->addr = adapter->addr + TSNEP_QUEUE(i); 2520 adapter->queue[i].rx->queue_index = i; 2521 adapter->queue[i].irq_mask = 2522 irq_mask << (ECM_INT_TXRX_SHIFT * i); 2523 adapter->queue[i].irq_delay_addr = 2524 adapter->addr + ECM_INT_DELAY + ECM_INT_DELAY_OFFSET * i; 2525 retval = tsnep_set_irq_coalesce(&adapter->queue[i], 2526 TSNEP_COALESCE_USECS_DEFAULT); 2527 if (retval < 0) 2528 return retval; 2529 } 2530 2531 return 0; 2532 } 2533 2534 static int tsnep_probe(struct platform_device *pdev) 2535 { 2536 struct tsnep_adapter *adapter; 2537 struct net_device *netdev; 2538 struct resource *io; 2539 u32 type; 2540 int revision; 2541 int version; 2542 int queue_count; 2543 int retval; 2544 2545 netdev = devm_alloc_etherdev_mqs(&pdev->dev, 2546 sizeof(struct tsnep_adapter), 2547 TSNEP_MAX_QUEUES, TSNEP_MAX_QUEUES); 2548 if (!netdev) 2549 return -ENODEV; 2550 SET_NETDEV_DEV(netdev, &pdev->dev); 2551 adapter = netdev_priv(netdev); 2552 platform_set_drvdata(pdev, adapter); 2553 adapter->pdev = pdev; 2554 adapter->dmadev = &pdev->dev; 2555 adapter->netdev = netdev; 2556 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE | 2557 NETIF_MSG_LINK | NETIF_MSG_IFUP | 2558 NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED; 2559 2560 netdev->min_mtu = ETH_MIN_MTU; 2561 netdev->max_mtu = TSNEP_MAX_FRAME_SIZE; 2562 2563 mutex_init(&adapter->gate_control_lock); 2564 mutex_init(&adapter->rxnfc_lock); 2565 INIT_LIST_HEAD(&adapter->rxnfc_rules); 2566 2567 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2568 adapter->addr = devm_ioremap_resource(&pdev->dev, io); 2569 if (IS_ERR(adapter->addr)) 2570 return PTR_ERR(adapter->addr); 2571 netdev->mem_start = io->start; 2572 netdev->mem_end = io->end; 2573 2574 type = ioread32(adapter->addr + ECM_TYPE); 2575 revision = (type & ECM_REVISION_MASK) >> ECM_REVISION_SHIFT; 2576 version = (type & ECM_VERSION_MASK) >> ECM_VERSION_SHIFT; 2577 queue_count = (type & ECM_QUEUE_COUNT_MASK) >> ECM_QUEUE_COUNT_SHIFT; 2578 adapter->gate_control = type & ECM_GATE_CONTROL; 2579 adapter->rxnfc_max = TSNEP_RX_ASSIGN_ETHER_TYPE_COUNT; 2580 2581 tsnep_disable_irq(adapter, ECM_INT_ALL); 2582 2583 retval = tsnep_queue_init(adapter, queue_count); 2584 if (retval) 2585 return retval; 2586 2587 retval = dma_set_mask_and_coherent(&adapter->pdev->dev, 2588 DMA_BIT_MASK(64)); 2589 if (retval) { 2590 dev_err(&adapter->pdev->dev, "no usable DMA configuration.\n"); 2591 return retval; 2592 } 2593 2594 retval = tsnep_mac_init(adapter); 2595 if (retval) 2596 return retval; 2597 2598 retval = tsnep_mdio_init(adapter); 2599 if (retval) 2600 goto mdio_init_failed; 2601 2602 retval = tsnep_phy_init(adapter); 2603 if (retval) 2604 goto phy_init_failed; 2605 2606 retval = tsnep_ptp_init(adapter); 2607 if (retval) 2608 goto ptp_init_failed; 2609 2610 retval = tsnep_tc_init(adapter); 2611 if (retval) 2612 goto tc_init_failed; 2613 2614 retval = tsnep_rxnfc_init(adapter); 2615 if (retval) 2616 goto rxnfc_init_failed; 2617 2618 netdev->netdev_ops = &tsnep_netdev_ops; 2619 netdev->ethtool_ops = &tsnep_ethtool_ops; 2620 netdev->features = NETIF_F_SG; 2621 netdev->hw_features = netdev->features | NETIF_F_LOOPBACK; 2622 2623 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | 2624 NETDEV_XDP_ACT_NDO_XMIT | 2625 NETDEV_XDP_ACT_NDO_XMIT_SG | 2626 NETDEV_XDP_ACT_XSK_ZEROCOPY; 2627 2628 /* carrier off reporting is important to ethtool even BEFORE open */ 2629 netif_carrier_off(netdev); 2630 2631 retval = register_netdev(netdev); 2632 if (retval) 2633 goto register_failed; 2634 2635 dev_info(&adapter->pdev->dev, "device version %d.%02d\n", version, 2636 revision); 2637 if (adapter->gate_control) 2638 dev_info(&adapter->pdev->dev, "gate control detected\n"); 2639 2640 return 0; 2641 2642 register_failed: 2643 tsnep_rxnfc_cleanup(adapter); 2644 rxnfc_init_failed: 2645 tsnep_tc_cleanup(adapter); 2646 tc_init_failed: 2647 tsnep_ptp_cleanup(adapter); 2648 ptp_init_failed: 2649 phy_init_failed: 2650 if (adapter->mdiobus) 2651 mdiobus_unregister(adapter->mdiobus); 2652 mdio_init_failed: 2653 return retval; 2654 } 2655 2656 static void tsnep_remove(struct platform_device *pdev) 2657 { 2658 struct tsnep_adapter *adapter = platform_get_drvdata(pdev); 2659 2660 unregister_netdev(adapter->netdev); 2661 2662 tsnep_rxnfc_cleanup(adapter); 2663 2664 tsnep_tc_cleanup(adapter); 2665 2666 tsnep_ptp_cleanup(adapter); 2667 2668 if (adapter->mdiobus) 2669 mdiobus_unregister(adapter->mdiobus); 2670 2671 tsnep_disable_irq(adapter, ECM_INT_ALL); 2672 } 2673 2674 static const struct of_device_id tsnep_of_match[] = { 2675 { .compatible = "engleder,tsnep", }, 2676 { }, 2677 }; 2678 MODULE_DEVICE_TABLE(of, tsnep_of_match); 2679 2680 static struct platform_driver tsnep_driver = { 2681 .driver = { 2682 .name = TSNEP, 2683 .of_match_table = tsnep_of_match, 2684 }, 2685 .probe = tsnep_probe, 2686 .remove_new = tsnep_remove, 2687 }; 2688 module_platform_driver(tsnep_driver); 2689 2690 MODULE_AUTHOR("Gerhard Engleder <gerhard@engleder-embedded.com>"); 2691 MODULE_DESCRIPTION("TSN endpoint Ethernet MAC driver"); 2692 MODULE_LICENSE("GPL"); 2693