1 /**************************************************************************** 2 * Driver for Solarflare network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2005-2013 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11 #include <linux/pci.h> 12 #include <linux/tcp.h> 13 #include <linux/ip.h> 14 #include <linux/in.h> 15 #include <linux/ipv6.h> 16 #include <linux/slab.h> 17 #include <net/ipv6.h> 18 #include <linux/if_ether.h> 19 #include <linux/highmem.h> 20 #include <linux/cache.h> 21 #include "net_driver.h" 22 #include "efx.h" 23 #include "io.h" 24 #include "nic.h" 25 #include "workarounds.h" 26 #include "ef10_regs.h" 27 28 #ifdef EFX_USE_PIO 29 30 #define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE 31 #define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES) 32 unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF; 33 34 #endif /* EFX_USE_PIO */ 35 36 static inline unsigned int 37 efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue) 38 { 39 return tx_queue->insert_count & tx_queue->ptr_mask; 40 } 41 42 static inline struct efx_tx_buffer * 43 __efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue) 44 { 45 return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)]; 46 } 47 48 static inline struct efx_tx_buffer * 49 efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue) 50 { 51 struct efx_tx_buffer *buffer = 52 __efx_tx_queue_get_insert_buffer(tx_queue); 53 54 EFX_BUG_ON_PARANOID(buffer->len); 55 EFX_BUG_ON_PARANOID(buffer->flags); 56 EFX_BUG_ON_PARANOID(buffer->unmap_len); 57 58 return buffer; 59 } 60 61 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, 62 struct efx_tx_buffer *buffer, 63 unsigned int *pkts_compl, 64 unsigned int *bytes_compl) 65 { 66 if (buffer->unmap_len) { 67 struct device *dma_dev = &tx_queue->efx->pci_dev->dev; 68 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len - 69 buffer->unmap_len); 70 if (buffer->flags & EFX_TX_BUF_MAP_SINGLE) 71 dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len, 72 DMA_TO_DEVICE); 73 else 74 dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len, 75 DMA_TO_DEVICE); 76 buffer->unmap_len = 0; 77 } 78 79 if (buffer->flags & EFX_TX_BUF_SKB) { 80 (*pkts_compl)++; 81 (*bytes_compl) += buffer->skb->len; 82 dev_kfree_skb_any((struct sk_buff *) buffer->skb); 83 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, 84 "TX queue %d transmission id %x complete\n", 85 tx_queue->queue, tx_queue->read_count); 86 } else if (buffer->flags & EFX_TX_BUF_HEAP) { 87 kfree(buffer->heap_buf); 88 } 89 90 buffer->len = 0; 91 buffer->flags = 0; 92 } 93 94 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, 95 struct sk_buff *skb); 96 97 static inline unsigned 98 efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) 99 { 100 /* Depending on the NIC revision, we can use descriptor 101 * lengths up to 8K or 8K-1. However, since PCI Express 102 * devices must split read requests at 4K boundaries, there is 103 * little benefit from using descriptors that cross those 104 * boundaries and we keep things simple by not doing so. 105 */ 106 unsigned len = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1; 107 108 /* Work around hardware bug for unaligned buffers. */ 109 if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf)) 110 len = min_t(unsigned, len, 512 - (dma_addr & 0xf)); 111 112 return len; 113 } 114 115 unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) 116 { 117 /* Header and payload descriptor for each output segment, plus 118 * one for every input fragment boundary within a segment 119 */ 120 unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS; 121 122 /* Possibly one more per segment for the alignment workaround, 123 * or for option descriptors 124 */ 125 if (EFX_WORKAROUND_5391(efx) || efx_nic_rev(efx) >= EFX_REV_HUNT_A0) 126 max_descs += EFX_TSO_MAX_SEGS; 127 128 /* Possibly more for PCIe page boundaries within input fragments */ 129 if (PAGE_SIZE > EFX_PAGE_SIZE) 130 max_descs += max_t(unsigned int, MAX_SKB_FRAGS, 131 DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE)); 132 133 return max_descs; 134 } 135 136 /* Get partner of a TX queue, seen as part of the same net core queue */ 137 static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue) 138 { 139 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) 140 return tx_queue - EFX_TXQ_TYPE_OFFLOAD; 141 else 142 return tx_queue + EFX_TXQ_TYPE_OFFLOAD; 143 } 144 145 static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1) 146 { 147 /* We need to consider both queues that the net core sees as one */ 148 struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1); 149 struct efx_nic *efx = txq1->efx; 150 unsigned int fill_level; 151 152 fill_level = max(txq1->insert_count - txq1->old_read_count, 153 txq2->insert_count - txq2->old_read_count); 154 if (likely(fill_level < efx->txq_stop_thresh)) 155 return; 156 157 /* We used the stale old_read_count above, which gives us a 158 * pessimistic estimate of the fill level (which may even 159 * validly be >= efx->txq_entries). Now try again using 160 * read_count (more likely to be a cache miss). 161 * 162 * If we read read_count and then conditionally stop the 163 * queue, it is possible for the completion path to race with 164 * us and complete all outstanding descriptors in the middle, 165 * after which there will be no more completions to wake it. 166 * Therefore we stop the queue first, then read read_count 167 * (with a memory barrier to ensure the ordering), then 168 * restart the queue if the fill level turns out to be low 169 * enough. 170 */ 171 netif_tx_stop_queue(txq1->core_txq); 172 smp_mb(); 173 txq1->old_read_count = ACCESS_ONCE(txq1->read_count); 174 txq2->old_read_count = ACCESS_ONCE(txq2->read_count); 175 176 fill_level = max(txq1->insert_count - txq1->old_read_count, 177 txq2->insert_count - txq2->old_read_count); 178 EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries); 179 if (likely(fill_level < efx->txq_stop_thresh)) { 180 smp_mb(); 181 if (likely(!efx->loopback_selftest)) 182 netif_tx_start_queue(txq1->core_txq); 183 } 184 } 185 186 #ifdef EFX_USE_PIO 187 188 struct efx_short_copy_buffer { 189 int used; 190 u8 buf[L1_CACHE_BYTES]; 191 }; 192 193 /* Copy to PIO, respecting that writes to PIO buffers must be dword aligned. 194 * Advances piobuf pointer. Leaves additional data in the copy buffer. 195 */ 196 static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf, 197 u8 *data, int len, 198 struct efx_short_copy_buffer *copy_buf) 199 { 200 int block_len = len & ~(sizeof(copy_buf->buf) - 1); 201 202 memcpy_toio(*piobuf, data, block_len); 203 *piobuf += block_len; 204 len -= block_len; 205 206 if (len) { 207 data += block_len; 208 BUG_ON(copy_buf->used); 209 BUG_ON(len > sizeof(copy_buf->buf)); 210 memcpy(copy_buf->buf, data, len); 211 copy_buf->used = len; 212 } 213 } 214 215 /* Copy to PIO, respecting dword alignment, popping data from copy buffer first. 216 * Advances piobuf pointer. Leaves additional data in the copy buffer. 217 */ 218 static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf, 219 u8 *data, int len, 220 struct efx_short_copy_buffer *copy_buf) 221 { 222 if (copy_buf->used) { 223 /* if the copy buffer is partially full, fill it up and write */ 224 int copy_to_buf = 225 min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len); 226 227 memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf); 228 copy_buf->used += copy_to_buf; 229 230 /* if we didn't fill it up then we're done for now */ 231 if (copy_buf->used < sizeof(copy_buf->buf)) 232 return; 233 234 memcpy_toio(*piobuf, copy_buf->buf, sizeof(copy_buf->buf)); 235 *piobuf += sizeof(copy_buf->buf); 236 data += copy_to_buf; 237 len -= copy_to_buf; 238 copy_buf->used = 0; 239 } 240 241 efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf); 242 } 243 244 static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf, 245 struct efx_short_copy_buffer *copy_buf) 246 { 247 /* if there's anything in it, write the whole buffer, including junk */ 248 if (copy_buf->used) 249 memcpy_toio(piobuf, copy_buf->buf, sizeof(copy_buf->buf)); 250 } 251 252 /* Traverse skb structure and copy fragments in to PIO buffer. 253 * Advances piobuf pointer. 254 */ 255 static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb, 256 u8 __iomem **piobuf, 257 struct efx_short_copy_buffer *copy_buf) 258 { 259 int i; 260 261 efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb), 262 copy_buf); 263 264 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { 265 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 266 u8 *vaddr; 267 268 vaddr = kmap_atomic(skb_frag_page(f)); 269 270 efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset, 271 skb_frag_size(f), copy_buf); 272 kunmap_atomic(vaddr); 273 } 274 275 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->frag_list); 276 } 277 278 static struct efx_tx_buffer * 279 efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb) 280 { 281 struct efx_tx_buffer *buffer = 282 efx_tx_queue_get_insert_buffer(tx_queue); 283 u8 __iomem *piobuf = tx_queue->piobuf; 284 285 /* Copy to PIO buffer. Ensure the writes are padded to the end 286 * of a cache line, as this is required for write-combining to be 287 * effective on at least x86. 288 */ 289 290 if (skb_shinfo(skb)->nr_frags) { 291 /* The size of the copy buffer will ensure all writes 292 * are the size of a cache line. 293 */ 294 struct efx_short_copy_buffer copy_buf; 295 296 copy_buf.used = 0; 297 298 efx_skb_copy_bits_to_pio(tx_queue->efx, skb, 299 &piobuf, ©_buf); 300 efx_flush_copy_buffer(tx_queue->efx, piobuf, ©_buf); 301 } else { 302 /* Pad the write to the size of a cache line. 303 * We can do this because we know the skb_shared_info sruct is 304 * after the source, and the destination buffer is big enough. 305 */ 306 BUILD_BUG_ON(L1_CACHE_BYTES > 307 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); 308 memcpy_toio(tx_queue->piobuf, skb->data, 309 ALIGN(skb->len, L1_CACHE_BYTES)); 310 } 311 312 EFX_POPULATE_QWORD_5(buffer->option, 313 ESF_DZ_TX_DESC_IS_OPT, 1, 314 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO, 315 ESF_DZ_TX_PIO_CONT, 0, 316 ESF_DZ_TX_PIO_BYTE_CNT, skb->len, 317 ESF_DZ_TX_PIO_BUF_ADDR, 318 tx_queue->piobuf_offset); 319 ++tx_queue->pio_packets; 320 ++tx_queue->insert_count; 321 return buffer; 322 } 323 #endif /* EFX_USE_PIO */ 324 325 /* 326 * Add a socket buffer to a TX queue 327 * 328 * This maps all fragments of a socket buffer for DMA and adds them to 329 * the TX queue. The queue's insert pointer will be incremented by 330 * the number of fragments in the socket buffer. 331 * 332 * If any DMA mapping fails, any mapped fragments will be unmapped, 333 * the queue's insert pointer will be restored to its original value. 334 * 335 * This function is split out from efx_hard_start_xmit to allow the 336 * loopback test to direct packets via specific TX queues. 337 * 338 * Returns NETDEV_TX_OK. 339 * You must hold netif_tx_lock() to call this function. 340 */ 341 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) 342 { 343 struct efx_nic *efx = tx_queue->efx; 344 struct device *dma_dev = &efx->pci_dev->dev; 345 struct efx_tx_buffer *buffer; 346 skb_frag_t *fragment; 347 unsigned int len, unmap_len = 0; 348 dma_addr_t dma_addr, unmap_addr = 0; 349 unsigned int dma_len; 350 unsigned short dma_flags; 351 int i = 0; 352 353 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); 354 355 if (skb_shinfo(skb)->gso_size) 356 return efx_enqueue_skb_tso(tx_queue, skb); 357 358 /* Get size of the initial fragment */ 359 len = skb_headlen(skb); 360 361 /* Pad if necessary */ 362 if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) { 363 EFX_BUG_ON_PARANOID(skb->data_len); 364 len = 32 + 1; 365 if (skb_pad(skb, len - skb->len)) 366 return NETDEV_TX_OK; 367 } 368 369 /* Consider using PIO for short packets */ 370 #ifdef EFX_USE_PIO 371 if (skb->len <= efx_piobuf_size && tx_queue->piobuf && 372 efx_nic_tx_is_empty(tx_queue) && 373 efx_nic_tx_is_empty(efx_tx_queue_partner(tx_queue))) { 374 buffer = efx_enqueue_skb_pio(tx_queue, skb); 375 dma_flags = EFX_TX_BUF_OPTION; 376 goto finish_packet; 377 } 378 #endif 379 380 /* Map for DMA. Use dma_map_single rather than dma_map_page 381 * since this is more efficient on machines with sparse 382 * memory. 383 */ 384 dma_flags = EFX_TX_BUF_MAP_SINGLE; 385 dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE); 386 387 /* Process all fragments */ 388 while (1) { 389 if (unlikely(dma_mapping_error(dma_dev, dma_addr))) 390 goto dma_err; 391 392 /* Store fields for marking in the per-fragment final 393 * descriptor */ 394 unmap_len = len; 395 unmap_addr = dma_addr; 396 397 /* Add to TX queue, splitting across DMA boundaries */ 398 do { 399 buffer = efx_tx_queue_get_insert_buffer(tx_queue); 400 401 dma_len = efx_max_tx_len(efx, dma_addr); 402 if (likely(dma_len >= len)) 403 dma_len = len; 404 405 /* Fill out per descriptor fields */ 406 buffer->len = dma_len; 407 buffer->dma_addr = dma_addr; 408 buffer->flags = EFX_TX_BUF_CONT; 409 len -= dma_len; 410 dma_addr += dma_len; 411 ++tx_queue->insert_count; 412 } while (len); 413 414 /* Transfer ownership of the unmapping to the final buffer */ 415 buffer->flags = EFX_TX_BUF_CONT | dma_flags; 416 buffer->unmap_len = unmap_len; 417 unmap_len = 0; 418 419 /* Get address and size of next fragment */ 420 if (i >= skb_shinfo(skb)->nr_frags) 421 break; 422 fragment = &skb_shinfo(skb)->frags[i]; 423 len = skb_frag_size(fragment); 424 i++; 425 /* Map for DMA */ 426 dma_flags = 0; 427 dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len, 428 DMA_TO_DEVICE); 429 } 430 431 /* Transfer ownership of the skb to the final buffer */ 432 finish_packet: 433 buffer->skb = skb; 434 buffer->flags = EFX_TX_BUF_SKB | dma_flags; 435 436 netdev_tx_sent_queue(tx_queue->core_txq, skb->len); 437 438 /* Pass off to hardware */ 439 efx_nic_push_buffers(tx_queue); 440 441 efx_tx_maybe_stop_queue(tx_queue); 442 443 return NETDEV_TX_OK; 444 445 dma_err: 446 netif_err(efx, tx_err, efx->net_dev, 447 " TX queue %d could not map skb with %d bytes %d " 448 "fragments for DMA\n", tx_queue->queue, skb->len, 449 skb_shinfo(skb)->nr_frags + 1); 450 451 /* Mark the packet as transmitted, and free the SKB ourselves */ 452 dev_kfree_skb_any(skb); 453 454 /* Work backwards until we hit the original insert pointer value */ 455 while (tx_queue->insert_count != tx_queue->write_count) { 456 unsigned int pkts_compl = 0, bytes_compl = 0; 457 --tx_queue->insert_count; 458 buffer = __efx_tx_queue_get_insert_buffer(tx_queue); 459 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); 460 } 461 462 /* Free the fragment we were mid-way through pushing */ 463 if (unmap_len) { 464 if (dma_flags & EFX_TX_BUF_MAP_SINGLE) 465 dma_unmap_single(dma_dev, unmap_addr, unmap_len, 466 DMA_TO_DEVICE); 467 else 468 dma_unmap_page(dma_dev, unmap_addr, unmap_len, 469 DMA_TO_DEVICE); 470 } 471 472 return NETDEV_TX_OK; 473 } 474 475 /* Remove packets from the TX queue 476 * 477 * This removes packets from the TX queue, up to and including the 478 * specified index. 479 */ 480 static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, 481 unsigned int index, 482 unsigned int *pkts_compl, 483 unsigned int *bytes_compl) 484 { 485 struct efx_nic *efx = tx_queue->efx; 486 unsigned int stop_index, read_ptr; 487 488 stop_index = (index + 1) & tx_queue->ptr_mask; 489 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; 490 491 while (read_ptr != stop_index) { 492 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; 493 494 if (!(buffer->flags & EFX_TX_BUF_OPTION) && 495 unlikely(buffer->len == 0)) { 496 netif_err(efx, tx_err, efx->net_dev, 497 "TX queue %d spurious TX completion id %x\n", 498 tx_queue->queue, read_ptr); 499 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); 500 return; 501 } 502 503 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl); 504 505 ++tx_queue->read_count; 506 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; 507 } 508 } 509 510 /* Initiate a packet transmission. We use one channel per CPU 511 * (sharing when we have more CPUs than channels). On Falcon, the TX 512 * completion events will be directed back to the CPU that transmitted 513 * the packet, which should be cache-efficient. 514 * 515 * Context: non-blocking. 516 * Note that returning anything other than NETDEV_TX_OK will cause the 517 * OS to free the skb. 518 */ 519 netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, 520 struct net_device *net_dev) 521 { 522 struct efx_nic *efx = netdev_priv(net_dev); 523 struct efx_tx_queue *tx_queue; 524 unsigned index, type; 525 526 EFX_WARN_ON_PARANOID(!netif_device_present(net_dev)); 527 528 /* PTP "event" packet */ 529 if (unlikely(efx_xmit_with_hwtstamp(skb)) && 530 unlikely(efx_ptp_is_ptp_tx(efx, skb))) { 531 return efx_ptp_tx(efx, skb); 532 } 533 534 index = skb_get_queue_mapping(skb); 535 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0; 536 if (index >= efx->n_tx_channels) { 537 index -= efx->n_tx_channels; 538 type |= EFX_TXQ_TYPE_HIGHPRI; 539 } 540 tx_queue = efx_get_tx_queue(efx, index, type); 541 542 return efx_enqueue_skb(tx_queue, skb); 543 } 544 545 void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) 546 { 547 struct efx_nic *efx = tx_queue->efx; 548 549 /* Must be inverse of queue lookup in efx_hard_start_xmit() */ 550 tx_queue->core_txq = 551 netdev_get_tx_queue(efx->net_dev, 552 tx_queue->queue / EFX_TXQ_TYPES + 553 ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? 554 efx->n_tx_channels : 0)); 555 } 556 557 int efx_setup_tc(struct net_device *net_dev, u8 num_tc) 558 { 559 struct efx_nic *efx = netdev_priv(net_dev); 560 struct efx_channel *channel; 561 struct efx_tx_queue *tx_queue; 562 unsigned tc; 563 int rc; 564 565 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC) 566 return -EINVAL; 567 568 if (num_tc == net_dev->num_tc) 569 return 0; 570 571 for (tc = 0; tc < num_tc; tc++) { 572 net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels; 573 net_dev->tc_to_txq[tc].count = efx->n_tx_channels; 574 } 575 576 if (num_tc > net_dev->num_tc) { 577 /* Initialise high-priority queues as necessary */ 578 efx_for_each_channel(channel, efx) { 579 efx_for_each_possible_channel_tx_queue(tx_queue, 580 channel) { 581 if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI)) 582 continue; 583 if (!tx_queue->buffer) { 584 rc = efx_probe_tx_queue(tx_queue); 585 if (rc) 586 return rc; 587 } 588 if (!tx_queue->initialised) 589 efx_init_tx_queue(tx_queue); 590 efx_init_tx_queue_core_txq(tx_queue); 591 } 592 } 593 } else { 594 /* Reduce number of classes before number of queues */ 595 net_dev->num_tc = num_tc; 596 } 597 598 rc = netif_set_real_num_tx_queues(net_dev, 599 max_t(int, num_tc, 1) * 600 efx->n_tx_channels); 601 if (rc) 602 return rc; 603 604 /* Do not destroy high-priority queues when they become 605 * unused. We would have to flush them first, and it is 606 * fairly difficult to flush a subset of TX queues. Leave 607 * it to efx_fini_channels(). 608 */ 609 610 net_dev->num_tc = num_tc; 611 return 0; 612 } 613 614 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) 615 { 616 unsigned fill_level; 617 struct efx_nic *efx = tx_queue->efx; 618 struct efx_tx_queue *txq2; 619 unsigned int pkts_compl = 0, bytes_compl = 0; 620 621 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); 622 623 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); 624 netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl); 625 626 if (pkts_compl > 1) 627 ++tx_queue->merge_events; 628 629 /* See if we need to restart the netif queue. This memory 630 * barrier ensures that we write read_count (inside 631 * efx_dequeue_buffers()) before reading the queue status. 632 */ 633 smp_mb(); 634 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && 635 likely(efx->port_enabled) && 636 likely(netif_device_present(efx->net_dev))) { 637 txq2 = efx_tx_queue_partner(tx_queue); 638 fill_level = max(tx_queue->insert_count - tx_queue->read_count, 639 txq2->insert_count - txq2->read_count); 640 if (fill_level <= efx->txq_wake_thresh) 641 netif_tx_wake_queue(tx_queue->core_txq); 642 } 643 644 /* Check whether the hardware queue is now empty */ 645 if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { 646 tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count); 647 if (tx_queue->read_count == tx_queue->old_write_count) { 648 smp_mb(); 649 tx_queue->empty_read_count = 650 tx_queue->read_count | EFX_EMPTY_COUNT_VALID; 651 } 652 } 653 } 654 655 /* Size of page-based TSO header buffers. Larger blocks must be 656 * allocated from the heap. 657 */ 658 #define TSOH_STD_SIZE 128 659 #define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE) 660 661 /* At most half the descriptors in the queue at any time will refer to 662 * a TSO header buffer, since they must always be followed by a 663 * payload descriptor referring to an skb. 664 */ 665 static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue) 666 { 667 return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE); 668 } 669 670 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) 671 { 672 struct efx_nic *efx = tx_queue->efx; 673 unsigned int entries; 674 int rc; 675 676 /* Create the smallest power-of-two aligned ring */ 677 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); 678 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); 679 tx_queue->ptr_mask = entries - 1; 680 681 netif_dbg(efx, probe, efx->net_dev, 682 "creating TX queue %d size %#x mask %#x\n", 683 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); 684 685 /* Allocate software ring */ 686 tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer), 687 GFP_KERNEL); 688 if (!tx_queue->buffer) 689 return -ENOMEM; 690 691 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) { 692 tx_queue->tsoh_page = 693 kcalloc(efx_tsoh_page_count(tx_queue), 694 sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL); 695 if (!tx_queue->tsoh_page) { 696 rc = -ENOMEM; 697 goto fail1; 698 } 699 } 700 701 /* Allocate hardware ring */ 702 rc = efx_nic_probe_tx(tx_queue); 703 if (rc) 704 goto fail2; 705 706 return 0; 707 708 fail2: 709 kfree(tx_queue->tsoh_page); 710 tx_queue->tsoh_page = NULL; 711 fail1: 712 kfree(tx_queue->buffer); 713 tx_queue->buffer = NULL; 714 return rc; 715 } 716 717 void efx_init_tx_queue(struct efx_tx_queue *tx_queue) 718 { 719 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 720 "initialising TX queue %d\n", tx_queue->queue); 721 722 tx_queue->insert_count = 0; 723 tx_queue->write_count = 0; 724 tx_queue->old_write_count = 0; 725 tx_queue->read_count = 0; 726 tx_queue->old_read_count = 0; 727 tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; 728 729 /* Set up TX descriptor ring */ 730 efx_nic_init_tx(tx_queue); 731 732 tx_queue->initialised = true; 733 } 734 735 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) 736 { 737 struct efx_tx_buffer *buffer; 738 739 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 740 "shutting down TX queue %d\n", tx_queue->queue); 741 742 if (!tx_queue->buffer) 743 return; 744 745 /* Free any buffers left in the ring */ 746 while (tx_queue->read_count != tx_queue->write_count) { 747 unsigned int pkts_compl = 0, bytes_compl = 0; 748 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; 749 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); 750 751 ++tx_queue->read_count; 752 } 753 netdev_tx_reset_queue(tx_queue->core_txq); 754 } 755 756 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) 757 { 758 int i; 759 760 if (!tx_queue->buffer) 761 return; 762 763 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 764 "destroying TX queue %d\n", tx_queue->queue); 765 efx_nic_remove_tx(tx_queue); 766 767 if (tx_queue->tsoh_page) { 768 for (i = 0; i < efx_tsoh_page_count(tx_queue); i++) 769 efx_nic_free_buffer(tx_queue->efx, 770 &tx_queue->tsoh_page[i]); 771 kfree(tx_queue->tsoh_page); 772 tx_queue->tsoh_page = NULL; 773 } 774 775 kfree(tx_queue->buffer); 776 tx_queue->buffer = NULL; 777 } 778 779 780 /* Efx TCP segmentation acceleration. 781 * 782 * Why? Because by doing it here in the driver we can go significantly 783 * faster than the GSO. 784 * 785 * Requires TX checksum offload support. 786 */ 787 788 /* Number of bytes inserted at the start of a TSO header buffer, 789 * similar to NET_IP_ALIGN. 790 */ 791 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 792 #define TSOH_OFFSET 0 793 #else 794 #define TSOH_OFFSET NET_IP_ALIGN 795 #endif 796 797 #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) 798 799 /** 800 * struct tso_state - TSO state for an SKB 801 * @out_len: Remaining length in current segment 802 * @seqnum: Current sequence number 803 * @ipv4_id: Current IPv4 ID, host endian 804 * @packet_space: Remaining space in current packet 805 * @dma_addr: DMA address of current position 806 * @in_len: Remaining length in current SKB fragment 807 * @unmap_len: Length of SKB fragment 808 * @unmap_addr: DMA address of SKB fragment 809 * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0 810 * @protocol: Network protocol (after any VLAN header) 811 * @ip_off: Offset of IP header 812 * @tcp_off: Offset of TCP header 813 * @header_len: Number of bytes of header 814 * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload 815 * @header_dma_addr: Header DMA address, when using option descriptors 816 * @header_unmap_len: Header DMA mapped length, or 0 if not using option 817 * descriptors 818 * 819 * The state used during segmentation. It is put into this data structure 820 * just to make it easy to pass into inline functions. 821 */ 822 struct tso_state { 823 /* Output position */ 824 unsigned out_len; 825 unsigned seqnum; 826 u16 ipv4_id; 827 unsigned packet_space; 828 829 /* Input position */ 830 dma_addr_t dma_addr; 831 unsigned in_len; 832 unsigned unmap_len; 833 dma_addr_t unmap_addr; 834 unsigned short dma_flags; 835 836 __be16 protocol; 837 unsigned int ip_off; 838 unsigned int tcp_off; 839 unsigned header_len; 840 unsigned int ip_base_len; 841 dma_addr_t header_dma_addr; 842 unsigned int header_unmap_len; 843 }; 844 845 846 /* 847 * Verify that our various assumptions about sk_buffs and the conditions 848 * under which TSO will be attempted hold true. Return the protocol number. 849 */ 850 static __be16 efx_tso_check_protocol(struct sk_buff *skb) 851 { 852 __be16 protocol = skb->protocol; 853 854 EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != 855 protocol); 856 if (protocol == htons(ETH_P_8021Q)) { 857 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 858 protocol = veh->h_vlan_encapsulated_proto; 859 } 860 861 if (protocol == htons(ETH_P_IP)) { 862 EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP); 863 } else { 864 EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6)); 865 EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP); 866 } 867 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) 868 + (tcp_hdr(skb)->doff << 2u)) > 869 skb_headlen(skb)); 870 871 return protocol; 872 } 873 874 static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue, 875 struct efx_tx_buffer *buffer, unsigned int len) 876 { 877 u8 *result; 878 879 EFX_BUG_ON_PARANOID(buffer->len); 880 EFX_BUG_ON_PARANOID(buffer->flags); 881 EFX_BUG_ON_PARANOID(buffer->unmap_len); 882 883 if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) { 884 unsigned index = 885 (tx_queue->insert_count & tx_queue->ptr_mask) / 2; 886 struct efx_buffer *page_buf = 887 &tx_queue->tsoh_page[index / TSOH_PER_PAGE]; 888 unsigned offset = 889 TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET; 890 891 if (unlikely(!page_buf->addr) && 892 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, 893 GFP_ATOMIC)) 894 return NULL; 895 896 result = (u8 *)page_buf->addr + offset; 897 buffer->dma_addr = page_buf->dma_addr + offset; 898 buffer->flags = EFX_TX_BUF_CONT; 899 } else { 900 tx_queue->tso_long_headers++; 901 902 buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC); 903 if (unlikely(!buffer->heap_buf)) 904 return NULL; 905 result = (u8 *)buffer->heap_buf + TSOH_OFFSET; 906 buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP; 907 } 908 909 buffer->len = len; 910 911 return result; 912 } 913 914 /** 915 * efx_tx_queue_insert - push descriptors onto the TX queue 916 * @tx_queue: Efx TX queue 917 * @dma_addr: DMA address of fragment 918 * @len: Length of fragment 919 * @final_buffer: The final buffer inserted into the queue 920 * 921 * Push descriptors onto the TX queue. 922 */ 923 static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue, 924 dma_addr_t dma_addr, unsigned len, 925 struct efx_tx_buffer **final_buffer) 926 { 927 struct efx_tx_buffer *buffer; 928 struct efx_nic *efx = tx_queue->efx; 929 unsigned dma_len; 930 931 EFX_BUG_ON_PARANOID(len <= 0); 932 933 while (1) { 934 buffer = efx_tx_queue_get_insert_buffer(tx_queue); 935 ++tx_queue->insert_count; 936 937 EFX_BUG_ON_PARANOID(tx_queue->insert_count - 938 tx_queue->read_count >= 939 efx->txq_entries); 940 941 buffer->dma_addr = dma_addr; 942 943 dma_len = efx_max_tx_len(efx, dma_addr); 944 945 /* If there is enough space to send then do so */ 946 if (dma_len >= len) 947 break; 948 949 buffer->len = dma_len; 950 buffer->flags = EFX_TX_BUF_CONT; 951 dma_addr += dma_len; 952 len -= dma_len; 953 } 954 955 EFX_BUG_ON_PARANOID(!len); 956 buffer->len = len; 957 *final_buffer = buffer; 958 } 959 960 961 /* 962 * Put a TSO header into the TX queue. 963 * 964 * This is special-cased because we know that it is small enough to fit in 965 * a single fragment, and we know it doesn't cross a page boundary. It 966 * also allows us to not worry about end-of-packet etc. 967 */ 968 static int efx_tso_put_header(struct efx_tx_queue *tx_queue, 969 struct efx_tx_buffer *buffer, u8 *header) 970 { 971 if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) { 972 buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev, 973 header, buffer->len, 974 DMA_TO_DEVICE); 975 if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev, 976 buffer->dma_addr))) { 977 kfree(buffer->heap_buf); 978 buffer->len = 0; 979 buffer->flags = 0; 980 return -ENOMEM; 981 } 982 buffer->unmap_len = buffer->len; 983 buffer->flags |= EFX_TX_BUF_MAP_SINGLE; 984 } 985 986 ++tx_queue->insert_count; 987 return 0; 988 } 989 990 991 /* Remove buffers put into a tx_queue. None of the buffers must have 992 * an skb attached. 993 */ 994 static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) 995 { 996 struct efx_tx_buffer *buffer; 997 998 /* Work backwards until we hit the original insert pointer value */ 999 while (tx_queue->insert_count != tx_queue->write_count) { 1000 --tx_queue->insert_count; 1001 buffer = __efx_tx_queue_get_insert_buffer(tx_queue); 1002 efx_dequeue_buffer(tx_queue, buffer, NULL, NULL); 1003 } 1004 } 1005 1006 1007 /* Parse the SKB header and initialise state. */ 1008 static int tso_start(struct tso_state *st, struct efx_nic *efx, 1009 const struct sk_buff *skb) 1010 { 1011 bool use_options = efx_nic_rev(efx) >= EFX_REV_HUNT_A0; 1012 struct device *dma_dev = &efx->pci_dev->dev; 1013 unsigned int header_len, in_len; 1014 dma_addr_t dma_addr; 1015 1016 st->ip_off = skb_network_header(skb) - skb->data; 1017 st->tcp_off = skb_transport_header(skb) - skb->data; 1018 header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u); 1019 in_len = skb_headlen(skb) - header_len; 1020 st->header_len = header_len; 1021 st->in_len = in_len; 1022 if (st->protocol == htons(ETH_P_IP)) { 1023 st->ip_base_len = st->header_len - st->ip_off; 1024 st->ipv4_id = ntohs(ip_hdr(skb)->id); 1025 } else { 1026 st->ip_base_len = st->header_len - st->tcp_off; 1027 st->ipv4_id = 0; 1028 } 1029 st->seqnum = ntohl(tcp_hdr(skb)->seq); 1030 1031 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); 1032 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); 1033 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); 1034 1035 st->out_len = skb->len - header_len; 1036 1037 if (!use_options) { 1038 st->header_unmap_len = 0; 1039 1040 if (likely(in_len == 0)) { 1041 st->dma_flags = 0; 1042 st->unmap_len = 0; 1043 return 0; 1044 } 1045 1046 dma_addr = dma_map_single(dma_dev, skb->data + header_len, 1047 in_len, DMA_TO_DEVICE); 1048 st->dma_flags = EFX_TX_BUF_MAP_SINGLE; 1049 st->dma_addr = dma_addr; 1050 st->unmap_addr = dma_addr; 1051 st->unmap_len = in_len; 1052 } else { 1053 dma_addr = dma_map_single(dma_dev, skb->data, 1054 skb_headlen(skb), DMA_TO_DEVICE); 1055 st->header_dma_addr = dma_addr; 1056 st->header_unmap_len = skb_headlen(skb); 1057 st->dma_flags = 0; 1058 st->dma_addr = dma_addr + header_len; 1059 st->unmap_len = 0; 1060 } 1061 1062 return unlikely(dma_mapping_error(dma_dev, dma_addr)) ? -ENOMEM : 0; 1063 } 1064 1065 static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, 1066 skb_frag_t *frag) 1067 { 1068 st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0, 1069 skb_frag_size(frag), DMA_TO_DEVICE); 1070 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { 1071 st->dma_flags = 0; 1072 st->unmap_len = skb_frag_size(frag); 1073 st->in_len = skb_frag_size(frag); 1074 st->dma_addr = st->unmap_addr; 1075 return 0; 1076 } 1077 return -ENOMEM; 1078 } 1079 1080 1081 /** 1082 * tso_fill_packet_with_fragment - form descriptors for the current fragment 1083 * @tx_queue: Efx TX queue 1084 * @skb: Socket buffer 1085 * @st: TSO state 1086 * 1087 * Form descriptors for the current fragment, until we reach the end 1088 * of fragment or end-of-packet. 1089 */ 1090 static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, 1091 const struct sk_buff *skb, 1092 struct tso_state *st) 1093 { 1094 struct efx_tx_buffer *buffer; 1095 int n; 1096 1097 if (st->in_len == 0) 1098 return; 1099 if (st->packet_space == 0) 1100 return; 1101 1102 EFX_BUG_ON_PARANOID(st->in_len <= 0); 1103 EFX_BUG_ON_PARANOID(st->packet_space <= 0); 1104 1105 n = min(st->in_len, st->packet_space); 1106 1107 st->packet_space -= n; 1108 st->out_len -= n; 1109 st->in_len -= n; 1110 1111 efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); 1112 1113 if (st->out_len == 0) { 1114 /* Transfer ownership of the skb */ 1115 buffer->skb = skb; 1116 buffer->flags = EFX_TX_BUF_SKB; 1117 } else if (st->packet_space != 0) { 1118 buffer->flags = EFX_TX_BUF_CONT; 1119 } 1120 1121 if (st->in_len == 0) { 1122 /* Transfer ownership of the DMA mapping */ 1123 buffer->unmap_len = st->unmap_len; 1124 buffer->flags |= st->dma_flags; 1125 st->unmap_len = 0; 1126 } 1127 1128 st->dma_addr += n; 1129 } 1130 1131 1132 /** 1133 * tso_start_new_packet - generate a new header and prepare for the new packet 1134 * @tx_queue: Efx TX queue 1135 * @skb: Socket buffer 1136 * @st: TSO state 1137 * 1138 * Generate a new header and prepare for the new packet. Return 0 on 1139 * success, or -%ENOMEM if failed to alloc header. 1140 */ 1141 static int tso_start_new_packet(struct efx_tx_queue *tx_queue, 1142 const struct sk_buff *skb, 1143 struct tso_state *st) 1144 { 1145 struct efx_tx_buffer *buffer = 1146 efx_tx_queue_get_insert_buffer(tx_queue); 1147 bool is_last = st->out_len <= skb_shinfo(skb)->gso_size; 1148 u8 tcp_flags_clear; 1149 1150 if (!is_last) { 1151 st->packet_space = skb_shinfo(skb)->gso_size; 1152 tcp_flags_clear = 0x09; /* mask out FIN and PSH */ 1153 } else { 1154 st->packet_space = st->out_len; 1155 tcp_flags_clear = 0x00; 1156 } 1157 1158 if (!st->header_unmap_len) { 1159 /* Allocate and insert a DMA-mapped header buffer. */ 1160 struct tcphdr *tsoh_th; 1161 unsigned ip_length; 1162 u8 *header; 1163 int rc; 1164 1165 header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len); 1166 if (!header) 1167 return -ENOMEM; 1168 1169 tsoh_th = (struct tcphdr *)(header + st->tcp_off); 1170 1171 /* Copy and update the headers. */ 1172 memcpy(header, skb->data, st->header_len); 1173 1174 tsoh_th->seq = htonl(st->seqnum); 1175 ((u8 *)tsoh_th)[13] &= ~tcp_flags_clear; 1176 1177 ip_length = st->ip_base_len + st->packet_space; 1178 1179 if (st->protocol == htons(ETH_P_IP)) { 1180 struct iphdr *tsoh_iph = 1181 (struct iphdr *)(header + st->ip_off); 1182 1183 tsoh_iph->tot_len = htons(ip_length); 1184 tsoh_iph->id = htons(st->ipv4_id); 1185 } else { 1186 struct ipv6hdr *tsoh_iph = 1187 (struct ipv6hdr *)(header + st->ip_off); 1188 1189 tsoh_iph->payload_len = htons(ip_length); 1190 } 1191 1192 rc = efx_tso_put_header(tx_queue, buffer, header); 1193 if (unlikely(rc)) 1194 return rc; 1195 } else { 1196 /* Send the original headers with a TSO option descriptor 1197 * in front 1198 */ 1199 u8 tcp_flags = ((u8 *)tcp_hdr(skb))[13] & ~tcp_flags_clear; 1200 1201 buffer->flags = EFX_TX_BUF_OPTION; 1202 buffer->len = 0; 1203 buffer->unmap_len = 0; 1204 EFX_POPULATE_QWORD_5(buffer->option, 1205 ESF_DZ_TX_DESC_IS_OPT, 1, 1206 ESF_DZ_TX_OPTION_TYPE, 1207 ESE_DZ_TX_OPTION_DESC_TSO, 1208 ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags, 1209 ESF_DZ_TX_TSO_IP_ID, st->ipv4_id, 1210 ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum); 1211 ++tx_queue->insert_count; 1212 1213 /* We mapped the headers in tso_start(). Unmap them 1214 * when the last segment is completed. 1215 */ 1216 buffer = efx_tx_queue_get_insert_buffer(tx_queue); 1217 buffer->dma_addr = st->header_dma_addr; 1218 buffer->len = st->header_len; 1219 if (is_last) { 1220 buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE; 1221 buffer->unmap_len = st->header_unmap_len; 1222 /* Ensure we only unmap them once in case of a 1223 * later DMA mapping error and rollback 1224 */ 1225 st->header_unmap_len = 0; 1226 } else { 1227 buffer->flags = EFX_TX_BUF_CONT; 1228 buffer->unmap_len = 0; 1229 } 1230 ++tx_queue->insert_count; 1231 } 1232 1233 st->seqnum += skb_shinfo(skb)->gso_size; 1234 1235 /* Linux leaves suitable gaps in the IP ID space for us to fill. */ 1236 ++st->ipv4_id; 1237 1238 ++tx_queue->tso_packets; 1239 1240 return 0; 1241 } 1242 1243 1244 /** 1245 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer 1246 * @tx_queue: Efx TX queue 1247 * @skb: Socket buffer 1248 * 1249 * Context: You must hold netif_tx_lock() to call this function. 1250 * 1251 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if 1252 * @skb was not enqueued. In all cases @skb is consumed. Return 1253 * %NETDEV_TX_OK. 1254 */ 1255 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, 1256 struct sk_buff *skb) 1257 { 1258 struct efx_nic *efx = tx_queue->efx; 1259 int frag_i, rc; 1260 struct tso_state state; 1261 1262 /* Find the packet protocol and sanity-check it */ 1263 state.protocol = efx_tso_check_protocol(skb); 1264 1265 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); 1266 1267 rc = tso_start(&state, efx, skb); 1268 if (rc) 1269 goto mem_err; 1270 1271 if (likely(state.in_len == 0)) { 1272 /* Grab the first payload fragment. */ 1273 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); 1274 frag_i = 0; 1275 rc = tso_get_fragment(&state, efx, 1276 skb_shinfo(skb)->frags + frag_i); 1277 if (rc) 1278 goto mem_err; 1279 } else { 1280 /* Payload starts in the header area. */ 1281 frag_i = -1; 1282 } 1283 1284 if (tso_start_new_packet(tx_queue, skb, &state) < 0) 1285 goto mem_err; 1286 1287 while (1) { 1288 tso_fill_packet_with_fragment(tx_queue, skb, &state); 1289 1290 /* Move onto the next fragment? */ 1291 if (state.in_len == 0) { 1292 if (++frag_i >= skb_shinfo(skb)->nr_frags) 1293 /* End of payload reached. */ 1294 break; 1295 rc = tso_get_fragment(&state, efx, 1296 skb_shinfo(skb)->frags + frag_i); 1297 if (rc) 1298 goto mem_err; 1299 } 1300 1301 /* Start at new packet? */ 1302 if (state.packet_space == 0 && 1303 tso_start_new_packet(tx_queue, skb, &state) < 0) 1304 goto mem_err; 1305 } 1306 1307 netdev_tx_sent_queue(tx_queue->core_txq, skb->len); 1308 1309 /* Pass off to hardware */ 1310 efx_nic_push_buffers(tx_queue); 1311 1312 efx_tx_maybe_stop_queue(tx_queue); 1313 1314 tx_queue->tso_bursts++; 1315 return NETDEV_TX_OK; 1316 1317 mem_err: 1318 netif_err(efx, tx_err, efx->net_dev, 1319 "Out of memory for TSO headers, or DMA mapping error\n"); 1320 dev_kfree_skb_any(skb); 1321 1322 /* Free the DMA mapping we were in the process of writing out */ 1323 if (state.unmap_len) { 1324 if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE) 1325 dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr, 1326 state.unmap_len, DMA_TO_DEVICE); 1327 else 1328 dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr, 1329 state.unmap_len, DMA_TO_DEVICE); 1330 } 1331 1332 /* Free the header DMA mapping, if using option descriptors */ 1333 if (state.header_unmap_len) 1334 dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr, 1335 state.header_unmap_len, DMA_TO_DEVICE); 1336 1337 efx_enqueue_unwind(tx_queue); 1338 return NETDEV_TX_OK; 1339 } 1340