1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include <linux/bitfield.h> 5 #include <linux/net/intel/libie/rx.h> 6 #include <linux/prefetch.h> 7 8 #include "iavf.h" 9 #include "iavf_trace.h" 10 #include "iavf_prototype.h" 11 #include "iavf_ptp.h" 12 13 /** 14 * iavf_is_descriptor_done - tests DD bit in Rx descriptor 15 * @qw1: quad word 1 from descriptor to get Descriptor Done field from 16 * @flex: is the descriptor flex or legacy 17 * 18 * This function tests the descriptor done bit in specified descriptor. Because 19 * there are two types of descriptors (legacy and flex) the parameter rx_ring 20 * is used to distinguish. 21 * 22 * Return: true or false based on the state of DD bit in Rx descriptor. 23 */ 24 static bool iavf_is_descriptor_done(u64 qw1, bool flex) 25 { 26 if (flex) 27 return FIELD_GET(IAVF_RXD_FLEX_DD_M, qw1); 28 else 29 return FIELD_GET(IAVF_RXD_LEGACY_DD_M, qw1); 30 } 31 32 static __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, 33 u32 td_tag) 34 { 35 return cpu_to_le64(IAVF_TX_DESC_DTYPE_DATA | 36 ((u64)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) | 37 ((u64)td_offset << IAVF_TXD_QW1_OFFSET_SHIFT) | 38 ((u64)size << IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) | 39 ((u64)td_tag << IAVF_TXD_QW1_L2TAG1_SHIFT)); 40 } 41 42 #define IAVF_TXD_CMD (IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_RS) 43 44 /** 45 * iavf_unmap_and_free_tx_resource - Release a Tx buffer 46 * @ring: the ring that owns the buffer 47 * @tx_buffer: the buffer to free 48 **/ 49 static void iavf_unmap_and_free_tx_resource(struct iavf_ring *ring, 50 struct iavf_tx_buffer *tx_buffer) 51 { 52 if (tx_buffer->skb) { 53 if (tx_buffer->tx_flags & IAVF_TX_FLAGS_FD_SB) 54 kfree(tx_buffer->raw_buf); 55 else 56 dev_kfree_skb_any(tx_buffer->skb); 57 if (dma_unmap_len(tx_buffer, len)) 58 dma_unmap_single(ring->dev, 59 dma_unmap_addr(tx_buffer, dma), 60 dma_unmap_len(tx_buffer, len), 61 DMA_TO_DEVICE); 62 } else if (dma_unmap_len(tx_buffer, len)) { 63 dma_unmap_page(ring->dev, 64 dma_unmap_addr(tx_buffer, dma), 65 dma_unmap_len(tx_buffer, len), 66 DMA_TO_DEVICE); 67 } 68 69 tx_buffer->next_to_watch = NULL; 70 tx_buffer->skb = NULL; 71 dma_unmap_len_set(tx_buffer, len, 0); 72 /* tx_buffer must be completely set up in the transmit path */ 73 } 74 75 /** 76 * iavf_clean_tx_ring - Free any empty Tx buffers 77 * @tx_ring: ring to be cleaned 78 **/ 79 static void iavf_clean_tx_ring(struct iavf_ring *tx_ring) 80 { 81 unsigned long bi_size; 82 u16 i; 83 84 /* ring already cleared, nothing to do */ 85 if (!tx_ring->tx_bi) 86 return; 87 88 /* Free all the Tx ring sk_buffs */ 89 for (i = 0; i < tx_ring->count; i++) 90 iavf_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); 91 92 bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count; 93 memset(tx_ring->tx_bi, 0, bi_size); 94 95 /* Zero out the descriptor ring */ 96 memset(tx_ring->desc, 0, tx_ring->size); 97 98 tx_ring->next_to_use = 0; 99 tx_ring->next_to_clean = 0; 100 101 if (!tx_ring->netdev) 102 return; 103 104 /* cleanup Tx queue statistics */ 105 netdev_tx_reset_queue(txring_txq(tx_ring)); 106 } 107 108 /** 109 * iavf_free_tx_resources - Free Tx resources per queue 110 * @tx_ring: Tx descriptor ring for a specific queue 111 * 112 * Free all transmit software resources 113 **/ 114 void iavf_free_tx_resources(struct iavf_ring *tx_ring) 115 { 116 iavf_clean_tx_ring(tx_ring); 117 kfree(tx_ring->tx_bi); 118 tx_ring->tx_bi = NULL; 119 120 if (tx_ring->desc) { 121 dma_free_coherent(tx_ring->dev, tx_ring->size, 122 tx_ring->desc, tx_ring->dma); 123 tx_ring->desc = NULL; 124 } 125 } 126 127 /** 128 * iavf_get_tx_pending - how many Tx descriptors not processed 129 * @ring: the ring of descriptors 130 * @in_sw: is tx_pending being checked in SW or HW 131 * 132 * Since there is no access to the ring head register 133 * in XL710, we need to use our local copies 134 **/ 135 static u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw) 136 { 137 u32 head, tail; 138 139 /* underlying hardware might not allow access and/or always return 140 * 0 for the head/tail registers so just use the cached values 141 */ 142 head = ring->next_to_clean; 143 tail = ring->next_to_use; 144 145 if (head != tail) 146 return (head < tail) ? 147 tail - head : (tail + ring->count - head); 148 149 return 0; 150 } 151 152 /** 153 * iavf_force_wb - Issue SW Interrupt so HW does a wb 154 * @vsi: the VSI we care about 155 * @q_vector: the vector on which to force writeback 156 **/ 157 static void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector) 158 { 159 u32 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK | 160 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */ 161 IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK | 162 IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK 163 /* allow 00 to be written to the index */; 164 165 wr32(&vsi->back->hw, 166 IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), 167 val); 168 } 169 170 /** 171 * iavf_detect_recover_hung - Function to detect and recover hung_queues 172 * @vsi: pointer to vsi struct with tx queues 173 * 174 * VSI has netdev and netdev has TX queues. This function is to check each of 175 * those TX queues if they are hung, trigger recovery by issuing SW interrupt. 176 **/ 177 void iavf_detect_recover_hung(struct iavf_vsi *vsi) 178 { 179 struct iavf_ring *tx_ring = NULL; 180 struct net_device *netdev; 181 unsigned int i; 182 int packets; 183 184 if (!vsi) 185 return; 186 187 if (test_bit(__IAVF_VSI_DOWN, vsi->state)) 188 return; 189 190 netdev = vsi->netdev; 191 if (!netdev) 192 return; 193 194 if (!netif_carrier_ok(netdev)) 195 return; 196 197 for (i = 0; i < vsi->back->num_active_queues; i++) { 198 tx_ring = &vsi->back->tx_rings[i]; 199 if (tx_ring && tx_ring->desc) { 200 /* If packet counter has not changed the queue is 201 * likely stalled, so force an interrupt for this 202 * queue. 203 * 204 * prev_pkt_ctr would be negative if there was no 205 * pending work. 206 */ 207 packets = tx_ring->stats.packets & INT_MAX; 208 if (tx_ring->prev_pkt_ctr == packets) { 209 iavf_force_wb(vsi, tx_ring->q_vector); 210 continue; 211 } 212 213 /* Memory barrier between read of packet count and call 214 * to iavf_get_tx_pending() 215 */ 216 smp_rmb(); 217 tx_ring->prev_pkt_ctr = 218 iavf_get_tx_pending(tx_ring, true) ? packets : -1; 219 } 220 } 221 } 222 223 #define WB_STRIDE 4 224 225 /** 226 * iavf_clean_tx_irq - Reclaim resources after transmit completes 227 * @vsi: the VSI we care about 228 * @tx_ring: Tx ring to clean 229 * @napi_budget: Used to determine if we are in netpoll 230 * 231 * Returns true if there's any budget left (e.g. the clean is finished) 232 **/ 233 static bool iavf_clean_tx_irq(struct iavf_vsi *vsi, 234 struct iavf_ring *tx_ring, int napi_budget) 235 { 236 int i = tx_ring->next_to_clean; 237 struct iavf_tx_buffer *tx_buf; 238 struct iavf_tx_desc *tx_desc; 239 unsigned int total_bytes = 0, total_packets = 0; 240 unsigned int budget = IAVF_DEFAULT_IRQ_WORK; 241 242 tx_buf = &tx_ring->tx_bi[i]; 243 tx_desc = IAVF_TX_DESC(tx_ring, i); 244 i -= tx_ring->count; 245 246 do { 247 struct iavf_tx_desc *eop_desc = tx_buf->next_to_watch; 248 249 /* if next_to_watch is not set then there is no work pending */ 250 if (!eop_desc) 251 break; 252 253 /* prevent any other reads prior to eop_desc */ 254 smp_rmb(); 255 256 iavf_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); 257 /* if the descriptor isn't done, no work yet to do */ 258 if (!(eop_desc->cmd_type_offset_bsz & 259 cpu_to_le64(IAVF_TX_DESC_DTYPE_DESC_DONE))) 260 break; 261 262 /* clear next_to_watch to prevent false hangs */ 263 tx_buf->next_to_watch = NULL; 264 265 /* update the statistics for this packet */ 266 total_bytes += tx_buf->bytecount; 267 total_packets += tx_buf->gso_segs; 268 269 /* free the skb */ 270 napi_consume_skb(tx_buf->skb, napi_budget); 271 272 /* unmap skb header data */ 273 dma_unmap_single(tx_ring->dev, 274 dma_unmap_addr(tx_buf, dma), 275 dma_unmap_len(tx_buf, len), 276 DMA_TO_DEVICE); 277 278 /* clear tx_buffer data */ 279 tx_buf->skb = NULL; 280 dma_unmap_len_set(tx_buf, len, 0); 281 282 /* unmap remaining buffers */ 283 while (tx_desc != eop_desc) { 284 iavf_trace(clean_tx_irq_unmap, 285 tx_ring, tx_desc, tx_buf); 286 287 tx_buf++; 288 tx_desc++; 289 i++; 290 if (unlikely(!i)) { 291 i -= tx_ring->count; 292 tx_buf = tx_ring->tx_bi; 293 tx_desc = IAVF_TX_DESC(tx_ring, 0); 294 } 295 296 /* unmap any remaining paged data */ 297 if (dma_unmap_len(tx_buf, len)) { 298 dma_unmap_page(tx_ring->dev, 299 dma_unmap_addr(tx_buf, dma), 300 dma_unmap_len(tx_buf, len), 301 DMA_TO_DEVICE); 302 dma_unmap_len_set(tx_buf, len, 0); 303 } 304 } 305 306 /* move us one more past the eop_desc for start of next pkt */ 307 tx_buf++; 308 tx_desc++; 309 i++; 310 if (unlikely(!i)) { 311 i -= tx_ring->count; 312 tx_buf = tx_ring->tx_bi; 313 tx_desc = IAVF_TX_DESC(tx_ring, 0); 314 } 315 316 prefetch(tx_desc); 317 318 /* update budget accounting */ 319 budget--; 320 } while (likely(budget)); 321 322 i += tx_ring->count; 323 tx_ring->next_to_clean = i; 324 u64_stats_update_begin(&tx_ring->syncp); 325 tx_ring->stats.bytes += total_bytes; 326 tx_ring->stats.packets += total_packets; 327 u64_stats_update_end(&tx_ring->syncp); 328 tx_ring->q_vector->tx.total_bytes += total_bytes; 329 tx_ring->q_vector->tx.total_packets += total_packets; 330 331 if (tx_ring->flags & IAVF_TXR_FLAGS_WB_ON_ITR) { 332 /* check to see if there are < 4 descriptors 333 * waiting to be written back, then kick the hardware to force 334 * them to be written back in case we stay in NAPI. 335 * In this mode on X722 we do not enable Interrupt. 336 */ 337 unsigned int j = iavf_get_tx_pending(tx_ring, false); 338 339 if (budget && 340 ((j / WB_STRIDE) == 0) && (j > 0) && 341 !test_bit(__IAVF_VSI_DOWN, vsi->state) && 342 (IAVF_DESC_UNUSED(tx_ring) != tx_ring->count)) 343 tx_ring->flags |= IAVF_TXR_FLAGS_ARM_WB; 344 } 345 346 /* notify netdev of completed buffers */ 347 netdev_tx_completed_queue(txring_txq(tx_ring), 348 total_packets, total_bytes); 349 350 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) 351 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && 352 (IAVF_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 353 /* Make sure that anybody stopping the queue after this 354 * sees the new next_to_clean. 355 */ 356 smp_mb(); 357 if (__netif_subqueue_stopped(tx_ring->netdev, 358 tx_ring->queue_index) && 359 !test_bit(__IAVF_VSI_DOWN, vsi->state)) { 360 netif_wake_subqueue(tx_ring->netdev, 361 tx_ring->queue_index); 362 ++tx_ring->tx_stats.restart_queue; 363 } 364 } 365 366 return !!budget; 367 } 368 369 /** 370 * iavf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled 371 * @vsi: the VSI we care about 372 * @q_vector: the vector on which to enable writeback 373 * 374 **/ 375 static void iavf_enable_wb_on_itr(struct iavf_vsi *vsi, 376 struct iavf_q_vector *q_vector) 377 { 378 u16 flags = q_vector->tx.ring[0].flags; 379 u32 val; 380 381 if (!(flags & IAVF_TXR_FLAGS_WB_ON_ITR)) 382 return; 383 384 if (q_vector->arm_wb_state) 385 return; 386 387 val = IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK | 388 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */ 389 390 wr32(&vsi->back->hw, 391 IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), val); 392 q_vector->arm_wb_state = true; 393 } 394 395 static bool iavf_container_is_rx(struct iavf_q_vector *q_vector, 396 struct iavf_ring_container *rc) 397 { 398 return &q_vector->rx == rc; 399 } 400 401 #define IAVF_AIM_MULTIPLIER_100G 2560 402 #define IAVF_AIM_MULTIPLIER_50G 1280 403 #define IAVF_AIM_MULTIPLIER_40G 1024 404 #define IAVF_AIM_MULTIPLIER_20G 512 405 #define IAVF_AIM_MULTIPLIER_10G 256 406 #define IAVF_AIM_MULTIPLIER_1G 32 407 408 static unsigned int iavf_mbps_itr_multiplier(u32 speed_mbps) 409 { 410 switch (speed_mbps) { 411 case SPEED_100000: 412 return IAVF_AIM_MULTIPLIER_100G; 413 case SPEED_50000: 414 return IAVF_AIM_MULTIPLIER_50G; 415 case SPEED_40000: 416 return IAVF_AIM_MULTIPLIER_40G; 417 case SPEED_25000: 418 case SPEED_20000: 419 return IAVF_AIM_MULTIPLIER_20G; 420 case SPEED_10000: 421 default: 422 return IAVF_AIM_MULTIPLIER_10G; 423 case SPEED_1000: 424 case SPEED_100: 425 return IAVF_AIM_MULTIPLIER_1G; 426 } 427 } 428 429 static unsigned int 430 iavf_virtchnl_itr_multiplier(enum virtchnl_link_speed speed_virtchnl) 431 { 432 switch (speed_virtchnl) { 433 case VIRTCHNL_LINK_SPEED_40GB: 434 return IAVF_AIM_MULTIPLIER_40G; 435 case VIRTCHNL_LINK_SPEED_25GB: 436 case VIRTCHNL_LINK_SPEED_20GB: 437 return IAVF_AIM_MULTIPLIER_20G; 438 case VIRTCHNL_LINK_SPEED_10GB: 439 default: 440 return IAVF_AIM_MULTIPLIER_10G; 441 case VIRTCHNL_LINK_SPEED_1GB: 442 case VIRTCHNL_LINK_SPEED_100MB: 443 return IAVF_AIM_MULTIPLIER_1G; 444 } 445 } 446 447 static unsigned int iavf_itr_divisor(struct iavf_adapter *adapter) 448 { 449 if (ADV_LINK_SUPPORT(adapter)) 450 return IAVF_ITR_ADAPTIVE_MIN_INC * 451 iavf_mbps_itr_multiplier(adapter->link_speed_mbps); 452 else 453 return IAVF_ITR_ADAPTIVE_MIN_INC * 454 iavf_virtchnl_itr_multiplier(adapter->link_speed); 455 } 456 457 /** 458 * iavf_update_itr - update the dynamic ITR value based on statistics 459 * @q_vector: structure containing interrupt and ring information 460 * @rc: structure containing ring performance data 461 * 462 * Stores a new ITR value based on packets and byte 463 * counts during the last interrupt. The advantage of per interrupt 464 * computation is faster updates and more accurate ITR for the current 465 * traffic pattern. Constants in this function were computed 466 * based on theoretical maximum wire speed and thresholds were set based 467 * on testing data as well as attempting to minimize response time 468 * while increasing bulk throughput. 469 **/ 470 static void iavf_update_itr(struct iavf_q_vector *q_vector, 471 struct iavf_ring_container *rc) 472 { 473 unsigned int avg_wire_size, packets, bytes, itr; 474 unsigned long next_update = jiffies; 475 476 /* If we don't have any rings just leave ourselves set for maximum 477 * possible latency so we take ourselves out of the equation. 478 */ 479 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting)) 480 return; 481 482 /* For Rx we want to push the delay up and default to low latency. 483 * for Tx we want to pull the delay down and default to high latency. 484 */ 485 itr = iavf_container_is_rx(q_vector, rc) ? 486 IAVF_ITR_ADAPTIVE_MIN_USECS | IAVF_ITR_ADAPTIVE_LATENCY : 487 IAVF_ITR_ADAPTIVE_MAX_USECS | IAVF_ITR_ADAPTIVE_LATENCY; 488 489 /* If we didn't update within up to 1 - 2 jiffies we can assume 490 * that either packets are coming in so slow there hasn't been 491 * any work, or that there is so much work that NAPI is dealing 492 * with interrupt moderation and we don't need to do anything. 493 */ 494 if (time_after(next_update, rc->next_update)) 495 goto clear_counts; 496 497 /* If itr_countdown is set it means we programmed an ITR within 498 * the last 4 interrupt cycles. This has a side effect of us 499 * potentially firing an early interrupt. In order to work around 500 * this we need to throw out any data received for a few 501 * interrupts following the update. 502 */ 503 if (q_vector->itr_countdown) { 504 itr = rc->target_itr; 505 goto clear_counts; 506 } 507 508 packets = rc->total_packets; 509 bytes = rc->total_bytes; 510 511 if (iavf_container_is_rx(q_vector, rc)) { 512 /* If Rx there are 1 to 4 packets and bytes are less than 513 * 9000 assume insufficient data to use bulk rate limiting 514 * approach unless Tx is already in bulk rate limiting. We 515 * are likely latency driven. 516 */ 517 if (packets && packets < 4 && bytes < 9000 && 518 (q_vector->tx.target_itr & IAVF_ITR_ADAPTIVE_LATENCY)) { 519 itr = IAVF_ITR_ADAPTIVE_LATENCY; 520 goto adjust_by_size; 521 } 522 } else if (packets < 4) { 523 /* If we have Tx and Rx ITR maxed and Tx ITR is running in 524 * bulk mode and we are receiving 4 or fewer packets just 525 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so 526 * that the Rx can relax. 527 */ 528 if (rc->target_itr == IAVF_ITR_ADAPTIVE_MAX_USECS && 529 (q_vector->rx.target_itr & IAVF_ITR_MASK) == 530 IAVF_ITR_ADAPTIVE_MAX_USECS) 531 goto clear_counts; 532 } else if (packets > 32) { 533 /* If we have processed over 32 packets in a single interrupt 534 * for Tx assume we need to switch over to "bulk" mode. 535 */ 536 rc->target_itr &= ~IAVF_ITR_ADAPTIVE_LATENCY; 537 } 538 539 /* We have no packets to actually measure against. This means 540 * either one of the other queues on this vector is active or 541 * we are a Tx queue doing TSO with too high of an interrupt rate. 542 * 543 * Between 4 and 56 we can assume that our current interrupt delay 544 * is only slightly too low. As such we should increase it by a small 545 * fixed amount. 546 */ 547 if (packets < 56) { 548 itr = rc->target_itr + IAVF_ITR_ADAPTIVE_MIN_INC; 549 if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) { 550 itr &= IAVF_ITR_ADAPTIVE_LATENCY; 551 itr += IAVF_ITR_ADAPTIVE_MAX_USECS; 552 } 553 goto clear_counts; 554 } 555 556 if (packets <= 256) { 557 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); 558 itr &= IAVF_ITR_MASK; 559 560 /* Between 56 and 112 is our "goldilocks" zone where we are 561 * working out "just right". Just report that our current 562 * ITR is good for us. 563 */ 564 if (packets <= 112) 565 goto clear_counts; 566 567 /* If packet count is 128 or greater we are likely looking 568 * at a slight overrun of the delay we want. Try halving 569 * our delay to see if that will cut the number of packets 570 * in half per interrupt. 571 */ 572 itr /= 2; 573 itr &= IAVF_ITR_MASK; 574 if (itr < IAVF_ITR_ADAPTIVE_MIN_USECS) 575 itr = IAVF_ITR_ADAPTIVE_MIN_USECS; 576 577 goto clear_counts; 578 } 579 580 /* The paths below assume we are dealing with a bulk ITR since 581 * number of packets is greater than 256. We are just going to have 582 * to compute a value and try to bring the count under control, 583 * though for smaller packet sizes there isn't much we can do as 584 * NAPI polling will likely be kicking in sooner rather than later. 585 */ 586 itr = IAVF_ITR_ADAPTIVE_BULK; 587 588 adjust_by_size: 589 /* If packet counts are 256 or greater we can assume we have a gross 590 * overestimation of what the rate should be. Instead of trying to fine 591 * tune it just use the formula below to try and dial in an exact value 592 * give the current packet size of the frame. 593 */ 594 avg_wire_size = bytes / packets; 595 596 /* The following is a crude approximation of: 597 * wmem_default / (size + overhead) = desired_pkts_per_int 598 * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate 599 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value 600 * 601 * Assuming wmem_default is 212992 and overhead is 640 bytes per 602 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the 603 * formula down to 604 * 605 * (170 * (size + 24)) / (size + 640) = ITR 606 * 607 * We first do some math on the packet size and then finally bitshift 608 * by 8 after rounding up. We also have to account for PCIe link speed 609 * difference as ITR scales based on this. 610 */ 611 if (avg_wire_size <= 60) { 612 /* Start at 250k ints/sec */ 613 avg_wire_size = 4096; 614 } else if (avg_wire_size <= 380) { 615 /* 250K ints/sec to 60K ints/sec */ 616 avg_wire_size *= 40; 617 avg_wire_size += 1696; 618 } else if (avg_wire_size <= 1084) { 619 /* 60K ints/sec to 36K ints/sec */ 620 avg_wire_size *= 15; 621 avg_wire_size += 11452; 622 } else if (avg_wire_size <= 1980) { 623 /* 36K ints/sec to 30K ints/sec */ 624 avg_wire_size *= 5; 625 avg_wire_size += 22420; 626 } else { 627 /* plateau at a limit of 30K ints/sec */ 628 avg_wire_size = 32256; 629 } 630 631 /* If we are in low latency mode halve our delay which doubles the 632 * rate to somewhere between 100K to 16K ints/sec 633 */ 634 if (itr & IAVF_ITR_ADAPTIVE_LATENCY) 635 avg_wire_size /= 2; 636 637 /* Resultant value is 256 times larger than it needs to be. This 638 * gives us room to adjust the value as needed to either increase 639 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc. 640 * 641 * Use addition as we have already recorded the new latency flag 642 * for the ITR value. 643 */ 644 itr += DIV_ROUND_UP(avg_wire_size, 645 iavf_itr_divisor(q_vector->adapter)) * 646 IAVF_ITR_ADAPTIVE_MIN_INC; 647 648 if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) { 649 itr &= IAVF_ITR_ADAPTIVE_LATENCY; 650 itr += IAVF_ITR_ADAPTIVE_MAX_USECS; 651 } 652 653 clear_counts: 654 /* write back value */ 655 rc->target_itr = itr; 656 657 /* next update should occur within next jiffy */ 658 rc->next_update = next_update + 1; 659 660 rc->total_bytes = 0; 661 rc->total_packets = 0; 662 } 663 664 /** 665 * iavf_setup_tx_descriptors - Allocate the Tx descriptors 666 * @tx_ring: the tx ring to set up 667 * 668 * Return 0 on success, negative on error 669 **/ 670 int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring) 671 { 672 struct device *dev = tx_ring->dev; 673 int bi_size; 674 675 if (!dev) 676 return -ENOMEM; 677 678 /* warn if we are about to overwrite the pointer */ 679 WARN_ON(tx_ring->tx_bi); 680 bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count; 681 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); 682 if (!tx_ring->tx_bi) 683 goto err; 684 685 /* round up to nearest 4K */ 686 tx_ring->size = tx_ring->count * sizeof(struct iavf_tx_desc); 687 tx_ring->size = ALIGN(tx_ring->size, 4096); 688 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, 689 &tx_ring->dma, GFP_KERNEL); 690 if (!tx_ring->desc) { 691 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 692 tx_ring->size); 693 goto err; 694 } 695 696 tx_ring->next_to_use = 0; 697 tx_ring->next_to_clean = 0; 698 tx_ring->prev_pkt_ctr = -1; 699 return 0; 700 701 err: 702 kfree(tx_ring->tx_bi); 703 tx_ring->tx_bi = NULL; 704 return -ENOMEM; 705 } 706 707 /** 708 * iavf_clean_rx_ring - Free Rx buffers 709 * @rx_ring: ring to be cleaned 710 **/ 711 static void iavf_clean_rx_ring(struct iavf_ring *rx_ring) 712 { 713 /* ring already cleared, nothing to do */ 714 if (!rx_ring->rx_fqes) 715 return; 716 717 if (rx_ring->skb) { 718 dev_kfree_skb(rx_ring->skb); 719 rx_ring->skb = NULL; 720 } 721 722 /* Free all the Rx ring buffers */ 723 for (u32 i = rx_ring->next_to_clean; i != rx_ring->next_to_use; ) { 724 const struct libeth_fqe *rx_fqes = &rx_ring->rx_fqes[i]; 725 726 page_pool_put_full_page(rx_ring->pp, rx_fqes->page, false); 727 728 if (unlikely(++i == rx_ring->count)) 729 i = 0; 730 } 731 732 rx_ring->next_to_clean = 0; 733 rx_ring->next_to_use = 0; 734 } 735 736 /** 737 * iavf_free_rx_resources - Free Rx resources 738 * @rx_ring: ring to clean the resources from 739 * 740 * Free all receive software resources 741 **/ 742 void iavf_free_rx_resources(struct iavf_ring *rx_ring) 743 { 744 struct libeth_fq fq = { 745 .fqes = rx_ring->rx_fqes, 746 .pp = rx_ring->pp, 747 }; 748 749 iavf_clean_rx_ring(rx_ring); 750 751 if (rx_ring->desc) { 752 dma_free_coherent(rx_ring->pp->p.dev, rx_ring->size, 753 rx_ring->desc, rx_ring->dma); 754 rx_ring->desc = NULL; 755 } 756 757 libeth_rx_fq_destroy(&fq); 758 rx_ring->rx_fqes = NULL; 759 rx_ring->pp = NULL; 760 } 761 762 /** 763 * iavf_setup_rx_descriptors - Allocate Rx descriptors 764 * @rx_ring: Rx descriptor ring (for a specific queue) to setup 765 * 766 * Returns 0 on success, negative on failure 767 **/ 768 int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring) 769 { 770 struct libeth_fq fq = { 771 .count = rx_ring->count, 772 .buf_len = LIBIE_MAX_RX_BUF_LEN, 773 .nid = NUMA_NO_NODE, 774 }; 775 int ret; 776 777 ret = libeth_rx_fq_create(&fq, &rx_ring->q_vector->napi); 778 if (ret) 779 return ret; 780 781 rx_ring->pp = fq.pp; 782 rx_ring->rx_fqes = fq.fqes; 783 rx_ring->truesize = fq.truesize; 784 rx_ring->rx_buf_len = fq.buf_len; 785 786 u64_stats_init(&rx_ring->syncp); 787 788 /* Round up to nearest 4K */ 789 rx_ring->size = rx_ring->count * sizeof(struct iavf_rx_desc); 790 rx_ring->size = ALIGN(rx_ring->size, 4096); 791 rx_ring->desc = dma_alloc_coherent(fq.pp->p.dev, rx_ring->size, 792 &rx_ring->dma, GFP_KERNEL); 793 794 if (!rx_ring->desc) { 795 dev_info(fq.pp->p.dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 796 rx_ring->size); 797 goto err; 798 } 799 800 rx_ring->next_to_clean = 0; 801 rx_ring->next_to_use = 0; 802 803 return 0; 804 805 err: 806 libeth_rx_fq_destroy(&fq); 807 rx_ring->rx_fqes = NULL; 808 rx_ring->pp = NULL; 809 810 return -ENOMEM; 811 } 812 813 /** 814 * iavf_release_rx_desc - Store the new tail and head values 815 * @rx_ring: ring to bump 816 * @val: new head index 817 **/ 818 static void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val) 819 { 820 rx_ring->next_to_use = val; 821 822 /* Force memory writes to complete before letting h/w 823 * know there are new descriptors to fetch. (Only 824 * applicable for weak-ordered memory model archs, 825 * such as IA-64). 826 */ 827 wmb(); 828 writel(val, rx_ring->tail); 829 } 830 831 /** 832 * iavf_receive_skb - Send a completed packet up the stack 833 * @rx_ring: rx ring in play 834 * @skb: packet to send up 835 * @vlan_tag: vlan tag for packet 836 **/ 837 static void iavf_receive_skb(struct iavf_ring *rx_ring, 838 struct sk_buff *skb, u16 vlan_tag) 839 { 840 struct iavf_q_vector *q_vector = rx_ring->q_vector; 841 842 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 843 (vlan_tag & VLAN_VID_MASK)) 844 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); 845 else if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_STAG_RX) && 846 vlan_tag & VLAN_VID_MASK) 847 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag); 848 849 napi_gro_receive(&q_vector->napi, skb); 850 } 851 852 /** 853 * iavf_alloc_rx_buffers - Replace used receive buffers 854 * @rx_ring: ring to place buffers on 855 * @cleaned_count: number of buffers to replace 856 * 857 * Returns false if all allocations were successful, true if any fail 858 **/ 859 bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count) 860 { 861 const struct libeth_fq_fp fq = { 862 .pp = rx_ring->pp, 863 .fqes = rx_ring->rx_fqes, 864 .truesize = rx_ring->truesize, 865 .count = rx_ring->count, 866 }; 867 u16 ntu = rx_ring->next_to_use; 868 struct iavf_rx_desc *rx_desc; 869 870 /* do nothing if no valid netdev defined */ 871 if (!rx_ring->netdev || !cleaned_count) 872 return false; 873 874 rx_desc = IAVF_RX_DESC(rx_ring, ntu); 875 876 do { 877 dma_addr_t addr; 878 879 addr = libeth_rx_alloc(&fq, ntu); 880 if (addr == DMA_MAPPING_ERROR) 881 goto no_buffers; 882 883 /* Refresh the desc even if buffer_addrs didn't change 884 * because each write-back erases this info. 885 */ 886 rx_desc->qw0 = cpu_to_le64(addr); 887 888 rx_desc++; 889 ntu++; 890 if (unlikely(ntu == rx_ring->count)) { 891 rx_desc = IAVF_RX_DESC(rx_ring, 0); 892 ntu = 0; 893 } 894 895 /* clear the status bits for the next_to_use descriptor */ 896 rx_desc->qw1 = 0; 897 898 cleaned_count--; 899 } while (cleaned_count); 900 901 if (rx_ring->next_to_use != ntu) 902 iavf_release_rx_desc(rx_ring, ntu); 903 904 return false; 905 906 no_buffers: 907 if (rx_ring->next_to_use != ntu) 908 iavf_release_rx_desc(rx_ring, ntu); 909 910 rx_ring->rx_stats.alloc_page_failed++; 911 912 /* make sure to come back via polling to try again after 913 * allocation failure 914 */ 915 return true; 916 } 917 918 /** 919 * iavf_rx_csum - Indicate in skb if hw indicated a good checksum 920 * @vsi: the VSI we care about 921 * @skb: skb currently being received and modified 922 * @decoded_pt: decoded ptype information 923 * @csum_bits: decoded Rx descriptor information 924 **/ 925 static void iavf_rx_csum(const struct iavf_vsi *vsi, struct sk_buff *skb, 926 struct libeth_rx_pt decoded_pt, 927 struct libeth_rx_csum csum_bits) 928 { 929 bool ipv4, ipv6; 930 931 skb->ip_summed = CHECKSUM_NONE; 932 933 /* did the hardware decode the packet and checksum? */ 934 if (unlikely(!csum_bits.l3l4p)) 935 return; 936 937 ipv4 = libeth_rx_pt_get_ip_ver(decoded_pt) == LIBETH_RX_PT_OUTER_IPV4; 938 ipv6 = libeth_rx_pt_get_ip_ver(decoded_pt) == LIBETH_RX_PT_OUTER_IPV6; 939 940 if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe))) 941 goto checksum_fail; 942 943 /* likely incorrect csum if alternate IP extension headers found */ 944 if (unlikely(ipv6 && csum_bits.ipv6exadd)) 945 return; 946 947 /* there was some L4 error, count error and punt packet to the stack */ 948 if (unlikely(csum_bits.l4e)) 949 goto checksum_fail; 950 951 /* handle packets that were not able to be checksummed due 952 * to arrival speed, in this case the stack can compute 953 * the csum. 954 */ 955 if (unlikely(csum_bits.pprs)) 956 return; 957 958 skb->ip_summed = CHECKSUM_UNNECESSARY; 959 return; 960 961 checksum_fail: 962 vsi->back->hw_csum_rx_error++; 963 } 964 965 /** 966 * iavf_legacy_rx_csum - Indicate in skb if hw indicated a good checksum 967 * @vsi: the VSI we care about 968 * @qw1: quad word 1 969 * @decoded_pt: decoded packet type 970 * 971 * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte 972 * descriptor writeback format. 973 * 974 * Return: decoded checksum bits. 975 **/ 976 static struct libeth_rx_csum 977 iavf_legacy_rx_csum(const struct iavf_vsi *vsi, u64 qw1, 978 const struct libeth_rx_pt decoded_pt) 979 { 980 struct libeth_rx_csum csum_bits = {}; 981 982 if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded_pt)) 983 return csum_bits; 984 985 csum_bits.ipe = FIELD_GET(IAVF_RXD_LEGACY_IPE_M, qw1); 986 csum_bits.eipe = FIELD_GET(IAVF_RXD_LEGACY_EIPE_M, qw1); 987 csum_bits.l4e = FIELD_GET(IAVF_RXD_LEGACY_L4E_M, qw1); 988 csum_bits.pprs = FIELD_GET(IAVF_RXD_LEGACY_PPRS_M, qw1); 989 csum_bits.l3l4p = FIELD_GET(IAVF_RXD_LEGACY_L3L4P_M, qw1); 990 csum_bits.ipv6exadd = FIELD_GET(IAVF_RXD_LEGACY_IPV6EXADD_M, qw1); 991 992 return csum_bits; 993 } 994 995 /** 996 * iavf_flex_rx_csum - Indicate in skb if hw indicated a good checksum 997 * @vsi: the VSI we care about 998 * @qw1: quad word 1 999 * @decoded_pt: decoded packet type 1000 * 1001 * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible 1002 * descriptor writeback format. 1003 * 1004 * Return: decoded checksum bits. 1005 **/ 1006 static struct libeth_rx_csum 1007 iavf_flex_rx_csum(const struct iavf_vsi *vsi, u64 qw1, 1008 const struct libeth_rx_pt decoded_pt) 1009 { 1010 struct libeth_rx_csum csum_bits = {}; 1011 1012 if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded_pt)) 1013 return csum_bits; 1014 1015 csum_bits.ipe = FIELD_GET(IAVF_RXD_FLEX_XSUM_IPE_M, qw1); 1016 csum_bits.eipe = FIELD_GET(IAVF_RXD_FLEX_XSUM_EIPE_M, qw1); 1017 csum_bits.l4e = FIELD_GET(IAVF_RXD_FLEX_XSUM_L4E_M, qw1); 1018 csum_bits.eudpe = FIELD_GET(IAVF_RXD_FLEX_XSUM_EUDPE_M, qw1); 1019 csum_bits.l3l4p = FIELD_GET(IAVF_RXD_FLEX_L3L4P_M, qw1); 1020 csum_bits.ipv6exadd = FIELD_GET(IAVF_RXD_FLEX_IPV6EXADD_M, qw1); 1021 csum_bits.nat = FIELD_GET(IAVF_RXD_FLEX_NAT_M, qw1); 1022 1023 return csum_bits; 1024 } 1025 1026 /** 1027 * iavf_legacy_rx_hash - set the hash value in the skb 1028 * @ring: descriptor ring 1029 * @qw0: quad word 0 1030 * @qw1: quad word 1 1031 * @skb: skb currently being received and modified 1032 * @decoded_pt: decoded packet type 1033 * 1034 * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte 1035 * descriptor writeback format. 1036 **/ 1037 static void iavf_legacy_rx_hash(const struct iavf_ring *ring, __le64 qw0, 1038 __le64 qw1, struct sk_buff *skb, 1039 const struct libeth_rx_pt decoded_pt) 1040 { 1041 const __le64 rss_mask = cpu_to_le64(IAVF_RXD_LEGACY_FLTSTAT_M); 1042 u32 hash; 1043 1044 if (!libeth_rx_pt_has_hash(ring->netdev, decoded_pt)) 1045 return; 1046 1047 if ((qw1 & rss_mask) == rss_mask) { 1048 hash = le64_get_bits(qw0, IAVF_RXD_LEGACY_RSS_M); 1049 libeth_rx_pt_set_hash(skb, hash, decoded_pt); 1050 } 1051 } 1052 1053 /** 1054 * iavf_flex_rx_hash - set the hash value in the skb 1055 * @ring: descriptor ring 1056 * @qw1: quad word 1 1057 * @skb: skb currently being received and modified 1058 * @decoded_pt: decoded packet type 1059 * 1060 * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible 1061 * descriptor writeback format. 1062 **/ 1063 static void iavf_flex_rx_hash(const struct iavf_ring *ring, __le64 qw1, 1064 struct sk_buff *skb, 1065 const struct libeth_rx_pt decoded_pt) 1066 { 1067 bool rss_valid; 1068 u32 hash; 1069 1070 if (!libeth_rx_pt_has_hash(ring->netdev, decoded_pt)) 1071 return; 1072 1073 rss_valid = le64_get_bits(qw1, IAVF_RXD_FLEX_RSS_VALID_M); 1074 if (rss_valid) { 1075 hash = le64_get_bits(qw1, IAVF_RXD_FLEX_RSS_HASH_M); 1076 libeth_rx_pt_set_hash(skb, hash, decoded_pt); 1077 } 1078 } 1079 1080 /** 1081 * iavf_flex_rx_tstamp - Capture Rx timestamp from the descriptor 1082 * @rx_ring: descriptor ring 1083 * @qw2: quad word 2 of descriptor 1084 * @qw3: quad word 3 of descriptor 1085 * @skb: skb currently being received 1086 * 1087 * Read the Rx timestamp value from the descriptor and pass it to the stack. 1088 * 1089 * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible 1090 * descriptor writeback format. 1091 */ 1092 static void iavf_flex_rx_tstamp(const struct iavf_ring *rx_ring, __le64 qw2, 1093 __le64 qw3, struct sk_buff *skb) 1094 { 1095 u32 tstamp; 1096 u64 ns; 1097 1098 /* Skip processing if timestamps aren't enabled */ 1099 if (!(rx_ring->flags & IAVF_TXRX_FLAGS_HW_TSTAMP)) 1100 return; 1101 1102 /* Check if this Rx descriptor has a valid timestamp */ 1103 if (!le64_get_bits(qw2, IAVF_PTP_40B_TSTAMP_VALID)) 1104 return; 1105 1106 /* the ts_low field only contains the valid bit and sub-nanosecond 1107 * precision, so we don't need to extract it. 1108 */ 1109 tstamp = le64_get_bits(qw3, IAVF_RXD_FLEX_QW3_TSTAMP_HIGH_M); 1110 1111 ns = iavf_ptp_extend_32b_timestamp(rx_ring->ptp->cached_phc_time, 1112 tstamp); 1113 1114 *skb_hwtstamps(skb) = (struct skb_shared_hwtstamps) { 1115 .hwtstamp = ns_to_ktime(ns), 1116 }; 1117 } 1118 1119 /** 1120 * iavf_process_skb_fields - Populate skb header fields from Rx descriptor 1121 * @rx_ring: rx descriptor ring packet is being transacted on 1122 * @rx_desc: pointer to the EOP Rx descriptor 1123 * @skb: pointer to current skb being populated 1124 * @ptype: the packet type decoded by hardware 1125 * @flex: is the descriptor flex or legacy 1126 * 1127 * This function checks the ring, descriptor, and packet information in 1128 * order to populate the hash, checksum, VLAN, protocol, and 1129 * other fields within the skb. 1130 **/ 1131 static void iavf_process_skb_fields(const struct iavf_ring *rx_ring, 1132 const struct iavf_rx_desc *rx_desc, 1133 struct sk_buff *skb, u32 ptype, 1134 bool flex) 1135 { 1136 struct libeth_rx_csum csum_bits; 1137 struct libeth_rx_pt decoded_pt; 1138 __le64 qw0 = rx_desc->qw0; 1139 __le64 qw1 = rx_desc->qw1; 1140 __le64 qw2 = rx_desc->qw2; 1141 __le64 qw3 = rx_desc->qw3; 1142 1143 decoded_pt = libie_rx_pt_parse(ptype); 1144 1145 if (flex) { 1146 iavf_flex_rx_hash(rx_ring, qw1, skb, decoded_pt); 1147 iavf_flex_rx_tstamp(rx_ring, qw2, qw3, skb); 1148 csum_bits = iavf_flex_rx_csum(rx_ring->vsi, le64_to_cpu(qw1), 1149 decoded_pt); 1150 } else { 1151 iavf_legacy_rx_hash(rx_ring, qw0, qw1, skb, decoded_pt); 1152 csum_bits = iavf_legacy_rx_csum(rx_ring->vsi, le64_to_cpu(qw1), 1153 decoded_pt); 1154 } 1155 iavf_rx_csum(rx_ring->vsi, skb, decoded_pt, csum_bits); 1156 1157 skb_record_rx_queue(skb, rx_ring->queue_index); 1158 1159 /* modifies the skb - consumes the enet header */ 1160 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 1161 } 1162 1163 /** 1164 * iavf_cleanup_headers - Correct empty headers 1165 * @rx_ring: rx descriptor ring packet is being transacted on 1166 * @skb: pointer to current skb being fixed 1167 * 1168 * Also address the case where we are pulling data in on pages only 1169 * and as such no data is present in the skb header. 1170 * 1171 * In addition if skb is not at least 60 bytes we need to pad it so that 1172 * it is large enough to qualify as a valid Ethernet frame. 1173 * 1174 * Returns true if an error was encountered and skb was freed. 1175 **/ 1176 static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb) 1177 { 1178 /* if eth_skb_pad returns an error the skb was freed */ 1179 if (eth_skb_pad(skb)) 1180 return true; 1181 1182 return false; 1183 } 1184 1185 /** 1186 * iavf_add_rx_frag - Add contents of Rx buffer to sk_buff 1187 * @skb: sk_buff to place the data into 1188 * @rx_buffer: buffer containing page to add 1189 * @size: packet length from rx_desc 1190 * 1191 * This function will add the data contained in rx_buffer->page to the skb. 1192 * It will just attach the page as a frag to the skb. 1193 * 1194 * The function will then update the page offset. 1195 **/ 1196 static void iavf_add_rx_frag(struct sk_buff *skb, 1197 const struct libeth_fqe *rx_buffer, 1198 unsigned int size) 1199 { 1200 u32 hr = rx_buffer->page->pp->p.offset; 1201 1202 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, 1203 rx_buffer->offset + hr, size, rx_buffer->truesize); 1204 } 1205 1206 /** 1207 * iavf_build_skb - Build skb around an existing buffer 1208 * @rx_buffer: Rx buffer to pull data from 1209 * @size: size of buffer to add to skb 1210 * 1211 * This function builds an skb around an existing Rx buffer, taking care 1212 * to set up the skb correctly and avoid any memcpy overhead. 1213 */ 1214 static struct sk_buff *iavf_build_skb(const struct libeth_fqe *rx_buffer, 1215 unsigned int size) 1216 { 1217 u32 hr = rx_buffer->page->pp->p.offset; 1218 struct sk_buff *skb; 1219 void *va; 1220 1221 /* prefetch first cache line of first page */ 1222 va = page_address(rx_buffer->page) + rx_buffer->offset; 1223 net_prefetch(va + hr); 1224 1225 /* build an skb around the page buffer */ 1226 skb = napi_build_skb(va, rx_buffer->truesize); 1227 if (unlikely(!skb)) 1228 return NULL; 1229 1230 skb_mark_for_recycle(skb); 1231 1232 /* update pointers within the skb to store the data */ 1233 skb_reserve(skb, hr); 1234 __skb_put(skb, size); 1235 1236 return skb; 1237 } 1238 1239 /** 1240 * iavf_is_non_eop - process handling of non-EOP buffers 1241 * @rx_ring: Rx ring being processed 1242 * @fields: Rx descriptor extracted fields 1243 * 1244 * This function updates next to clean. If the buffer is an EOP buffer 1245 * this function exits returning false, otherwise it will place the 1246 * sk_buff in the next buffer to be chained and return true indicating 1247 * that this is in fact a non-EOP buffer. 1248 **/ 1249 static bool iavf_is_non_eop(struct iavf_ring *rx_ring, 1250 struct libeth_rqe_info fields) 1251 { 1252 u32 ntc = rx_ring->next_to_clean + 1; 1253 1254 /* fetch, update, and store next to clean */ 1255 ntc = (ntc < rx_ring->count) ? ntc : 0; 1256 rx_ring->next_to_clean = ntc; 1257 1258 prefetch(IAVF_RX_DESC(rx_ring, ntc)); 1259 1260 /* if we are the last buffer then there is nothing else to do */ 1261 if (likely(fields.eop)) 1262 return false; 1263 1264 rx_ring->rx_stats.non_eop_descs++; 1265 1266 return true; 1267 } 1268 1269 /** 1270 * iavf_extract_legacy_rx_fields - Extract fields from the Rx descriptor 1271 * @rx_ring: rx descriptor ring 1272 * @rx_desc: the descriptor to process 1273 * 1274 * Decode the Rx descriptor and extract relevant information including the 1275 * size, VLAN tag, Rx packet type, end of packet field and RXE field value. 1276 * 1277 * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte 1278 * descriptor writeback format. 1279 * 1280 * Return: fields extracted from the Rx descriptor. 1281 */ 1282 static struct libeth_rqe_info 1283 iavf_extract_legacy_rx_fields(const struct iavf_ring *rx_ring, 1284 const struct iavf_rx_desc *rx_desc) 1285 { 1286 u64 qw0 = le64_to_cpu(rx_desc->qw0); 1287 u64 qw1 = le64_to_cpu(rx_desc->qw1); 1288 u64 qw2 = le64_to_cpu(rx_desc->qw2); 1289 struct libeth_rqe_info fields; 1290 bool l2tag1p, l2tag2p; 1291 1292 fields.eop = FIELD_GET(IAVF_RXD_LEGACY_EOP_M, qw1); 1293 fields.len = FIELD_GET(IAVF_RXD_LEGACY_LENGTH_M, qw1); 1294 1295 if (!fields.eop) 1296 return fields; 1297 1298 fields.rxe = FIELD_GET(IAVF_RXD_LEGACY_RXE_M, qw1); 1299 fields.ptype = FIELD_GET(IAVF_RXD_LEGACY_PTYPE_M, qw1); 1300 fields.vlan = 0; 1301 1302 if (rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) { 1303 l2tag1p = FIELD_GET(IAVF_RXD_LEGACY_L2TAG1P_M, qw1); 1304 if (l2tag1p) 1305 fields.vlan = FIELD_GET(IAVF_RXD_LEGACY_L2TAG1_M, qw0); 1306 } else if (rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2) { 1307 l2tag2p = FIELD_GET(IAVF_RXD_LEGACY_L2TAG2P_M, qw2); 1308 if (l2tag2p) 1309 fields.vlan = FIELD_GET(IAVF_RXD_LEGACY_L2TAG2_M, qw2); 1310 } 1311 1312 return fields; 1313 } 1314 1315 /** 1316 * iavf_extract_flex_rx_fields - Extract fields from the Rx descriptor 1317 * @rx_ring: rx descriptor ring 1318 * @rx_desc: the descriptor to process 1319 * 1320 * Decode the Rx descriptor and extract relevant information including the 1321 * size, VLAN tag, Rx packet type, end of packet field and RXE field value. 1322 * 1323 * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible 1324 * descriptor writeback format. 1325 * 1326 * Return: fields extracted from the Rx descriptor. 1327 */ 1328 static struct libeth_rqe_info 1329 iavf_extract_flex_rx_fields(const struct iavf_ring *rx_ring, 1330 const struct iavf_rx_desc *rx_desc) 1331 { 1332 struct libeth_rqe_info fields = {}; 1333 u64 qw0 = le64_to_cpu(rx_desc->qw0); 1334 u64 qw1 = le64_to_cpu(rx_desc->qw1); 1335 u64 qw2 = le64_to_cpu(rx_desc->qw2); 1336 bool l2tag1p, l2tag2p; 1337 1338 fields.eop = FIELD_GET(IAVF_RXD_FLEX_EOP_M, qw1); 1339 fields.len = FIELD_GET(IAVF_RXD_FLEX_PKT_LEN_M, qw0); 1340 1341 if (!fields.eop) 1342 return fields; 1343 1344 fields.rxe = FIELD_GET(IAVF_RXD_FLEX_RXE_M, qw1); 1345 fields.ptype = FIELD_GET(IAVF_RXD_FLEX_PTYPE_M, qw0); 1346 fields.vlan = 0; 1347 1348 if (rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) { 1349 l2tag1p = FIELD_GET(IAVF_RXD_FLEX_L2TAG1P_M, qw1); 1350 if (l2tag1p) 1351 fields.vlan = FIELD_GET(IAVF_RXD_FLEX_L2TAG1_M, qw1); 1352 } else if (rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2) { 1353 l2tag2p = FIELD_GET(IAVF_RXD_FLEX_L2TAG2P_M, qw2); 1354 if (l2tag2p) 1355 fields.vlan = FIELD_GET(IAVF_RXD_FLEX_L2TAG2_2_M, qw2); 1356 } 1357 1358 return fields; 1359 } 1360 1361 static struct libeth_rqe_info 1362 iavf_extract_rx_fields(const struct iavf_ring *rx_ring, 1363 const struct iavf_rx_desc *rx_desc, 1364 bool flex) 1365 { 1366 if (flex) 1367 return iavf_extract_flex_rx_fields(rx_ring, rx_desc); 1368 else 1369 return iavf_extract_legacy_rx_fields(rx_ring, rx_desc); 1370 } 1371 1372 /** 1373 * iavf_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 1374 * @rx_ring: rx descriptor ring to transact packets on 1375 * @budget: Total limit on number of packets to process 1376 * 1377 * This function provides a "bounce buffer" approach to Rx interrupt 1378 * processing. The advantage to this is that on systems that have 1379 * expensive overhead for IOMMU access this provides a means of avoiding 1380 * it by maintaining the mapping of the page to the system. 1381 * 1382 * Returns amount of work completed 1383 **/ 1384 static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) 1385 { 1386 bool flex = rx_ring->rxdid == VIRTCHNL_RXDID_2_FLEX_SQ_NIC; 1387 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 1388 struct sk_buff *skb = rx_ring->skb; 1389 u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring); 1390 bool failure = false; 1391 1392 while (likely(total_rx_packets < (unsigned int)budget)) { 1393 struct libeth_rqe_info fields; 1394 struct libeth_fqe *rx_buffer; 1395 struct iavf_rx_desc *rx_desc; 1396 u64 qw1; 1397 1398 /* return some buffers to hardware, one at a time is too slow */ 1399 if (cleaned_count >= IAVF_RX_BUFFER_WRITE) { 1400 failure = failure || 1401 iavf_alloc_rx_buffers(rx_ring, cleaned_count); 1402 cleaned_count = 0; 1403 } 1404 1405 rx_desc = IAVF_RX_DESC(rx_ring, rx_ring->next_to_clean); 1406 1407 /* This memory barrier is needed to keep us from reading 1408 * any other fields out of the rx_desc until we have 1409 * verified the descriptor has been written back. 1410 */ 1411 dma_rmb(); 1412 1413 qw1 = le64_to_cpu(rx_desc->qw1); 1414 /* If DD field (descriptor done) is unset then other fields are 1415 * not valid 1416 */ 1417 if (!iavf_is_descriptor_done(qw1, flex)) 1418 break; 1419 1420 fields = iavf_extract_rx_fields(rx_ring, rx_desc, flex); 1421 1422 iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb); 1423 1424 rx_buffer = &rx_ring->rx_fqes[rx_ring->next_to_clean]; 1425 if (!libeth_rx_sync_for_cpu(rx_buffer, fields.len)) 1426 goto skip_data; 1427 1428 /* retrieve a buffer from the ring */ 1429 if (skb) 1430 iavf_add_rx_frag(skb, rx_buffer, fields.len); 1431 else 1432 skb = iavf_build_skb(rx_buffer, fields.len); 1433 1434 /* exit if we failed to retrieve a buffer */ 1435 if (!skb) { 1436 rx_ring->rx_stats.alloc_buff_failed++; 1437 break; 1438 } 1439 1440 skip_data: 1441 cleaned_count++; 1442 1443 if (iavf_is_non_eop(rx_ring, fields) || unlikely(!skb)) 1444 continue; 1445 1446 /* RXE field in descriptor is an indication of the MAC errors 1447 * (like CRC, alignment, oversize etc). If it is set then iavf 1448 * should finish. 1449 */ 1450 if (unlikely(fields.rxe)) { 1451 dev_kfree_skb_any(skb); 1452 skb = NULL; 1453 continue; 1454 } 1455 1456 if (iavf_cleanup_headers(rx_ring, skb)) { 1457 skb = NULL; 1458 continue; 1459 } 1460 1461 /* probably a little skewed due to removing CRC */ 1462 total_rx_bytes += skb->len; 1463 1464 /* populate checksum, VLAN, and protocol */ 1465 iavf_process_skb_fields(rx_ring, rx_desc, skb, fields.ptype, flex); 1466 1467 iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb); 1468 iavf_receive_skb(rx_ring, skb, fields.vlan); 1469 skb = NULL; 1470 1471 /* update budget accounting */ 1472 total_rx_packets++; 1473 } 1474 1475 rx_ring->skb = skb; 1476 1477 u64_stats_update_begin(&rx_ring->syncp); 1478 rx_ring->stats.packets += total_rx_packets; 1479 rx_ring->stats.bytes += total_rx_bytes; 1480 u64_stats_update_end(&rx_ring->syncp); 1481 rx_ring->q_vector->rx.total_packets += total_rx_packets; 1482 rx_ring->q_vector->rx.total_bytes += total_rx_bytes; 1483 1484 /* guarantee a trip back through this routine if there was a failure */ 1485 return failure ? budget : (int)total_rx_packets; 1486 } 1487 1488 static inline u32 iavf_buildreg_itr(const int type, u16 itr) 1489 { 1490 u32 val; 1491 1492 /* We don't bother with setting the CLEARPBA bit as the data sheet 1493 * points out doing so is "meaningless since it was already 1494 * auto-cleared". The auto-clearing happens when the interrupt is 1495 * asserted. 1496 * 1497 * Hardware errata 28 for also indicates that writing to a 1498 * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear 1499 * an event in the PBA anyway so we need to rely on the automask 1500 * to hold pending events for us until the interrupt is re-enabled 1501 * 1502 * The itr value is reported in microseconds, and the register 1503 * value is recorded in 2 microsecond units. For this reason we 1504 * only need to shift by the interval shift - 1 instead of the 1505 * full value. 1506 */ 1507 itr &= IAVF_ITR_MASK; 1508 1509 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK | 1510 (type << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | 1511 (itr << (IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT - 1)); 1512 1513 return val; 1514 } 1515 1516 /* a small macro to shorten up some long lines */ 1517 #define INTREG IAVF_VFINT_DYN_CTLN1 1518 1519 /* The act of updating the ITR will cause it to immediately trigger. In order 1520 * to prevent this from throwing off adaptive update statistics we defer the 1521 * update so that it can only happen so often. So after either Tx or Rx are 1522 * updated we make the adaptive scheme wait until either the ITR completely 1523 * expires via the next_update expiration or we have been through at least 1524 * 3 interrupts. 1525 */ 1526 #define ITR_COUNTDOWN_START 3 1527 1528 /** 1529 * iavf_update_enable_itr - Update itr and re-enable MSIX interrupt 1530 * @vsi: the VSI we care about 1531 * @q_vector: q_vector for which itr is being updated and interrupt enabled 1532 * 1533 **/ 1534 static void iavf_update_enable_itr(struct iavf_vsi *vsi, 1535 struct iavf_q_vector *q_vector) 1536 { 1537 struct iavf_hw *hw = &vsi->back->hw; 1538 u32 intval; 1539 1540 /* These will do nothing if dynamic updates are not enabled */ 1541 iavf_update_itr(q_vector, &q_vector->tx); 1542 iavf_update_itr(q_vector, &q_vector->rx); 1543 1544 /* This block of logic allows us to get away with only updating 1545 * one ITR value with each interrupt. The idea is to perform a 1546 * pseudo-lazy update with the following criteria. 1547 * 1548 * 1. Rx is given higher priority than Tx if both are in same state 1549 * 2. If we must reduce an ITR that is given highest priority. 1550 * 3. We then give priority to increasing ITR based on amount. 1551 */ 1552 if (q_vector->rx.target_itr < q_vector->rx.current_itr) { 1553 /* Rx ITR needs to be reduced, this is highest priority */ 1554 intval = iavf_buildreg_itr(IAVF_RX_ITR, 1555 q_vector->rx.target_itr); 1556 q_vector->rx.current_itr = q_vector->rx.target_itr; 1557 q_vector->itr_countdown = ITR_COUNTDOWN_START; 1558 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) || 1559 ((q_vector->rx.target_itr - q_vector->rx.current_itr) < 1560 (q_vector->tx.target_itr - q_vector->tx.current_itr))) { 1561 /* Tx ITR needs to be reduced, this is second priority 1562 * Tx ITR needs to be increased more than Rx, fourth priority 1563 */ 1564 intval = iavf_buildreg_itr(IAVF_TX_ITR, 1565 q_vector->tx.target_itr); 1566 q_vector->tx.current_itr = q_vector->tx.target_itr; 1567 q_vector->itr_countdown = ITR_COUNTDOWN_START; 1568 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) { 1569 /* Rx ITR needs to be increased, third priority */ 1570 intval = iavf_buildreg_itr(IAVF_RX_ITR, 1571 q_vector->rx.target_itr); 1572 q_vector->rx.current_itr = q_vector->rx.target_itr; 1573 q_vector->itr_countdown = ITR_COUNTDOWN_START; 1574 } else { 1575 /* No ITR update, lowest priority */ 1576 intval = iavf_buildreg_itr(IAVF_ITR_NONE, 0); 1577 if (q_vector->itr_countdown) 1578 q_vector->itr_countdown--; 1579 } 1580 1581 if (!test_bit(__IAVF_VSI_DOWN, vsi->state)) 1582 wr32(hw, INTREG(q_vector->reg_idx), intval); 1583 } 1584 1585 /** 1586 * iavf_napi_poll - NAPI polling Rx/Tx cleanup routine 1587 * @napi: napi struct with our devices info in it 1588 * @budget: amount of work driver is allowed to do this pass, in packets 1589 * 1590 * This function will clean all queues associated with a q_vector. 1591 * 1592 * Returns the amount of work done 1593 **/ 1594 int iavf_napi_poll(struct napi_struct *napi, int budget) 1595 { 1596 struct iavf_q_vector *q_vector = 1597 container_of(napi, struct iavf_q_vector, napi); 1598 struct iavf_vsi *vsi = q_vector->vsi; 1599 struct iavf_ring *ring; 1600 bool clean_complete = true; 1601 bool arm_wb = false; 1602 int budget_per_ring; 1603 int work_done = 0; 1604 1605 if (test_bit(__IAVF_VSI_DOWN, vsi->state)) { 1606 napi_complete(napi); 1607 return 0; 1608 } 1609 1610 /* Since the actual Tx work is minimal, we can give the Tx a larger 1611 * budget and be more aggressive about cleaning up the Tx descriptors. 1612 */ 1613 iavf_for_each_ring(ring, q_vector->tx) { 1614 if (!iavf_clean_tx_irq(vsi, ring, budget)) { 1615 clean_complete = false; 1616 continue; 1617 } 1618 arm_wb |= !!(ring->flags & IAVF_TXR_FLAGS_ARM_WB); 1619 ring->flags &= ~IAVF_TXR_FLAGS_ARM_WB; 1620 } 1621 1622 /* Handle case where we are called by netpoll with a budget of 0 */ 1623 if (budget <= 0) 1624 goto tx_only; 1625 1626 /* We attempt to distribute budget to each Rx queue fairly, but don't 1627 * allow the budget to go below 1 because that would exit polling early. 1628 */ 1629 budget_per_ring = max(budget/q_vector->num_ringpairs, 1); 1630 1631 iavf_for_each_ring(ring, q_vector->rx) { 1632 int cleaned = iavf_clean_rx_irq(ring, budget_per_ring); 1633 1634 work_done += cleaned; 1635 /* if we clean as many as budgeted, we must not be done */ 1636 if (cleaned >= budget_per_ring) 1637 clean_complete = false; 1638 } 1639 1640 /* If work not completed, return budget and polling will return */ 1641 if (!clean_complete) { 1642 int cpu_id = smp_processor_id(); 1643 1644 /* It is possible that the interrupt affinity has changed but, 1645 * if the cpu is pegged at 100%, polling will never exit while 1646 * traffic continues and the interrupt will be stuck on this 1647 * cpu. We check to make sure affinity is correct before we 1648 * continue to poll, otherwise we must stop polling so the 1649 * interrupt can move to the correct cpu. 1650 */ 1651 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { 1652 /* Tell napi that we are done polling */ 1653 napi_complete_done(napi, work_done); 1654 1655 /* Force an interrupt */ 1656 iavf_force_wb(vsi, q_vector); 1657 1658 /* Return budget-1 so that polling stops */ 1659 return budget - 1; 1660 } 1661 tx_only: 1662 if (arm_wb) { 1663 q_vector->tx.ring[0].tx_stats.tx_force_wb++; 1664 iavf_enable_wb_on_itr(vsi, q_vector); 1665 } 1666 return budget; 1667 } 1668 1669 if (vsi->back->flags & IAVF_TXR_FLAGS_WB_ON_ITR) 1670 q_vector->arm_wb_state = false; 1671 1672 /* Exit the polling mode, but don't re-enable interrupts if stack might 1673 * poll us due to busy-polling 1674 */ 1675 if (likely(napi_complete_done(napi, work_done))) 1676 iavf_update_enable_itr(vsi, q_vector); 1677 1678 return min_t(int, work_done, budget - 1); 1679 } 1680 1681 /** 1682 * iavf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW 1683 * @skb: send buffer 1684 * @tx_ring: ring to send buffer on 1685 * @flags: the tx flags to be set 1686 * 1687 * Checks the skb and set up correspondingly several generic transmit flags 1688 * related to VLAN tagging for the HW, such as VLAN, DCB, etc. 1689 * 1690 * Returns error code indicate the frame should be dropped upon error and the 1691 * otherwise returns 0 to indicate the flags has been set properly. 1692 **/ 1693 static void iavf_tx_prepare_vlan_flags(struct sk_buff *skb, 1694 struct iavf_ring *tx_ring, u32 *flags) 1695 { 1696 u32 tx_flags = 0; 1697 1698 1699 /* stack will only request hardware VLAN insertion offload for protocols 1700 * that the driver supports and has enabled 1701 */ 1702 if (!skb_vlan_tag_present(skb)) 1703 return; 1704 1705 tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT; 1706 if (tx_ring->flags & IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2) { 1707 tx_flags |= IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN; 1708 } else if (tx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) { 1709 tx_flags |= IAVF_TX_FLAGS_HW_VLAN; 1710 } else { 1711 dev_dbg(tx_ring->dev, "Unsupported Tx VLAN tag location requested\n"); 1712 return; 1713 } 1714 1715 *flags = tx_flags; 1716 } 1717 1718 /** 1719 * iavf_tso - set up the tso context descriptor 1720 * @first: pointer to first Tx buffer for xmit 1721 * @hdr_len: ptr to the size of the packet header 1722 * @cd_type_cmd_tso_mss: Quad Word 1 1723 * 1724 * Returns 0 if no TSO can happen, 1 if tso is going, or error 1725 **/ 1726 static int iavf_tso(struct iavf_tx_buffer *first, u8 *hdr_len, 1727 u64 *cd_type_cmd_tso_mss) 1728 { 1729 struct sk_buff *skb = first->skb; 1730 u64 cd_cmd, cd_tso_len, cd_mss; 1731 union { 1732 struct iphdr *v4; 1733 struct ipv6hdr *v6; 1734 unsigned char *hdr; 1735 } ip; 1736 union { 1737 struct tcphdr *tcp; 1738 struct udphdr *udp; 1739 unsigned char *hdr; 1740 } l4; 1741 u32 paylen, l4_offset; 1742 u16 gso_segs, gso_size; 1743 int err; 1744 1745 if (skb->ip_summed != CHECKSUM_PARTIAL) 1746 return 0; 1747 1748 if (!skb_is_gso(skb)) 1749 return 0; 1750 1751 err = skb_cow_head(skb, 0); 1752 if (err < 0) 1753 return err; 1754 1755 ip.hdr = skb_network_header(skb); 1756 l4.hdr = skb_transport_header(skb); 1757 1758 /* initialize outer IP header fields */ 1759 if (ip.v4->version == 4) { 1760 ip.v4->tot_len = 0; 1761 ip.v4->check = 0; 1762 } else { 1763 ip.v6->payload_len = 0; 1764 } 1765 1766 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 1767 SKB_GSO_GRE_CSUM | 1768 SKB_GSO_IPXIP4 | 1769 SKB_GSO_IPXIP6 | 1770 SKB_GSO_UDP_TUNNEL | 1771 SKB_GSO_UDP_TUNNEL_CSUM)) { 1772 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && 1773 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { 1774 l4.udp->len = 0; 1775 1776 /* determine offset of outer transport header */ 1777 l4_offset = l4.hdr - skb->data; 1778 1779 /* remove payload length from outer checksum */ 1780 paylen = skb->len - l4_offset; 1781 csum_replace_by_diff(&l4.udp->check, 1782 (__force __wsum)htonl(paylen)); 1783 } 1784 1785 /* reset pointers to inner headers */ 1786 ip.hdr = skb_inner_network_header(skb); 1787 l4.hdr = skb_inner_transport_header(skb); 1788 1789 /* initialize inner IP header fields */ 1790 if (ip.v4->version == 4) { 1791 ip.v4->tot_len = 0; 1792 ip.v4->check = 0; 1793 } else { 1794 ip.v6->payload_len = 0; 1795 } 1796 } 1797 1798 /* determine offset of inner transport header */ 1799 l4_offset = l4.hdr - skb->data; 1800 /* remove payload length from inner checksum */ 1801 paylen = skb->len - l4_offset; 1802 1803 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 1804 csum_replace_by_diff(&l4.udp->check, 1805 (__force __wsum)htonl(paylen)); 1806 /* compute length of UDP segmentation header */ 1807 *hdr_len = (u8)sizeof(l4.udp) + l4_offset; 1808 } else { 1809 csum_replace_by_diff(&l4.tcp->check, 1810 (__force __wsum)htonl(paylen)); 1811 /* compute length of TCP segmentation header */ 1812 *hdr_len = (u8)((l4.tcp->doff * 4) + l4_offset); 1813 } 1814 1815 /* pull values out of skb_shinfo */ 1816 gso_size = skb_shinfo(skb)->gso_size; 1817 gso_segs = skb_shinfo(skb)->gso_segs; 1818 1819 /* update GSO size and bytecount with header size */ 1820 first->gso_segs = gso_segs; 1821 first->bytecount += (first->gso_segs - 1) * *hdr_len; 1822 1823 /* find the field values */ 1824 cd_cmd = IAVF_TX_CTX_DESC_TSO; 1825 cd_tso_len = skb->len - *hdr_len; 1826 cd_mss = gso_size; 1827 *cd_type_cmd_tso_mss |= (cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) | 1828 (cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) | 1829 (cd_mss << IAVF_TXD_CTX_QW1_MSS_SHIFT); 1830 return 1; 1831 } 1832 1833 /** 1834 * iavf_tx_enable_csum - Enable Tx checksum offloads 1835 * @skb: send buffer 1836 * @tx_flags: pointer to Tx flags currently set 1837 * @td_cmd: Tx descriptor command bits to set 1838 * @td_offset: Tx descriptor header offsets to set 1839 * @tx_ring: Tx descriptor ring 1840 * @cd_tunneling: ptr to context desc bits 1841 **/ 1842 static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, 1843 u32 *td_cmd, u32 *td_offset, 1844 struct iavf_ring *tx_ring, 1845 u32 *cd_tunneling) 1846 { 1847 union { 1848 struct iphdr *v4; 1849 struct ipv6hdr *v6; 1850 unsigned char *hdr; 1851 } ip; 1852 union { 1853 struct tcphdr *tcp; 1854 struct udphdr *udp; 1855 unsigned char *hdr; 1856 } l4; 1857 unsigned char *exthdr; 1858 u32 offset, cmd = 0; 1859 __be16 frag_off; 1860 u8 l4_proto = 0; 1861 1862 if (skb->ip_summed != CHECKSUM_PARTIAL) 1863 return 0; 1864 1865 ip.hdr = skb_network_header(skb); 1866 l4.hdr = skb_transport_header(skb); 1867 1868 /* compute outer L2 header size */ 1869 offset = ((ip.hdr - skb->data) / 2) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT; 1870 1871 if (skb->encapsulation) { 1872 u32 tunnel = 0; 1873 /* define outer network header type */ 1874 if (*tx_flags & IAVF_TX_FLAGS_IPV4) { 1875 tunnel |= (*tx_flags & IAVF_TX_FLAGS_TSO) ? 1876 IAVF_TX_CTX_EXT_IP_IPV4 : 1877 IAVF_TX_CTX_EXT_IP_IPV4_NO_CSUM; 1878 1879 l4_proto = ip.v4->protocol; 1880 } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) { 1881 tunnel |= IAVF_TX_CTX_EXT_IP_IPV6; 1882 1883 exthdr = ip.hdr + sizeof(*ip.v6); 1884 l4_proto = ip.v6->nexthdr; 1885 if (l4.hdr != exthdr) 1886 ipv6_skip_exthdr(skb, exthdr - skb->data, 1887 &l4_proto, &frag_off); 1888 } 1889 1890 /* define outer transport */ 1891 switch (l4_proto) { 1892 case IPPROTO_UDP: 1893 tunnel |= IAVF_TXD_CTX_UDP_TUNNELING; 1894 *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL; 1895 break; 1896 case IPPROTO_GRE: 1897 tunnel |= IAVF_TXD_CTX_GRE_TUNNELING; 1898 *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL; 1899 break; 1900 case IPPROTO_IPIP: 1901 case IPPROTO_IPV6: 1902 *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL; 1903 l4.hdr = skb_inner_network_header(skb); 1904 break; 1905 default: 1906 if (*tx_flags & IAVF_TX_FLAGS_TSO) 1907 return -1; 1908 1909 skb_checksum_help(skb); 1910 return 0; 1911 } 1912 1913 /* compute outer L3 header size */ 1914 tunnel |= ((l4.hdr - ip.hdr) / 4) << 1915 IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT; 1916 1917 /* switch IP header pointer from outer to inner header */ 1918 ip.hdr = skb_inner_network_header(skb); 1919 1920 /* compute tunnel header size */ 1921 tunnel |= ((ip.hdr - l4.hdr) / 2) << 1922 IAVF_TXD_CTX_QW0_NATLEN_SHIFT; 1923 1924 /* indicate if we need to offload outer UDP header */ 1925 if ((*tx_flags & IAVF_TX_FLAGS_TSO) && 1926 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && 1927 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) 1928 tunnel |= IAVF_TXD_CTX_QW0_L4T_CS_MASK; 1929 1930 /* record tunnel offload values */ 1931 *cd_tunneling |= tunnel; 1932 1933 /* switch L4 header pointer from outer to inner */ 1934 l4.hdr = skb_inner_transport_header(skb); 1935 l4_proto = 0; 1936 1937 /* reset type as we transition from outer to inner headers */ 1938 *tx_flags &= ~(IAVF_TX_FLAGS_IPV4 | IAVF_TX_FLAGS_IPV6); 1939 if (ip.v4->version == 4) 1940 *tx_flags |= IAVF_TX_FLAGS_IPV4; 1941 if (ip.v6->version == 6) 1942 *tx_flags |= IAVF_TX_FLAGS_IPV6; 1943 } 1944 1945 /* Enable IP checksum offloads */ 1946 if (*tx_flags & IAVF_TX_FLAGS_IPV4) { 1947 l4_proto = ip.v4->protocol; 1948 /* the stack computes the IP header already, the only time we 1949 * need the hardware to recompute it is in the case of TSO. 1950 */ 1951 cmd |= (*tx_flags & IAVF_TX_FLAGS_TSO) ? 1952 IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM : 1953 IAVF_TX_DESC_CMD_IIPT_IPV4; 1954 } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) { 1955 cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6; 1956 1957 exthdr = ip.hdr + sizeof(*ip.v6); 1958 l4_proto = ip.v6->nexthdr; 1959 if (l4.hdr != exthdr) 1960 ipv6_skip_exthdr(skb, exthdr - skb->data, 1961 &l4_proto, &frag_off); 1962 } 1963 1964 /* compute inner L3 header size */ 1965 offset |= ((l4.hdr - ip.hdr) / 4) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; 1966 1967 /* Enable L4 checksum offloads */ 1968 switch (l4_proto) { 1969 case IPPROTO_TCP: 1970 /* enable checksum offloads */ 1971 cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP; 1972 offset |= l4.tcp->doff << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; 1973 break; 1974 case IPPROTO_SCTP: 1975 /* enable SCTP checksum offload */ 1976 cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP; 1977 offset |= (sizeof(struct sctphdr) >> 2) << 1978 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; 1979 break; 1980 case IPPROTO_UDP: 1981 /* enable UDP checksum offload */ 1982 cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP; 1983 offset |= (sizeof(struct udphdr) >> 2) << 1984 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; 1985 break; 1986 default: 1987 if (*tx_flags & IAVF_TX_FLAGS_TSO) 1988 return -1; 1989 skb_checksum_help(skb); 1990 return 0; 1991 } 1992 1993 *td_cmd |= cmd; 1994 *td_offset |= offset; 1995 1996 return 1; 1997 } 1998 1999 /** 2000 * iavf_create_tx_ctx - Build the Tx context descriptor 2001 * @tx_ring: ring to create the descriptor on 2002 * @cd_type_cmd_tso_mss: Quad Word 1 2003 * @cd_tunneling: Quad Word 0 - bits 0-31 2004 * @cd_l2tag2: Quad Word 0 - bits 32-63 2005 **/ 2006 static void iavf_create_tx_ctx(struct iavf_ring *tx_ring, 2007 const u64 cd_type_cmd_tso_mss, 2008 const u32 cd_tunneling, const u32 cd_l2tag2) 2009 { 2010 struct iavf_tx_context_desc *context_desc; 2011 int i = tx_ring->next_to_use; 2012 2013 if ((cd_type_cmd_tso_mss == IAVF_TX_DESC_DTYPE_CONTEXT) && 2014 !cd_tunneling && !cd_l2tag2) 2015 return; 2016 2017 /* grab the next descriptor */ 2018 context_desc = IAVF_TX_CTXTDESC(tx_ring, i); 2019 2020 i++; 2021 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2022 2023 /* cpu_to_le32 and assign to struct fields */ 2024 context_desc->tunneling_params = cpu_to_le32(cd_tunneling); 2025 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2); 2026 context_desc->rsvd = cpu_to_le16(0); 2027 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); 2028 } 2029 2030 /** 2031 * __iavf_chk_linearize - Check if there are more than 8 buffers per packet 2032 * @skb: send buffer 2033 * 2034 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire 2035 * and so we need to figure out the cases where we need to linearize the skb. 2036 * 2037 * For TSO we need to count the TSO header and segment payload separately. 2038 * As such we need to check cases where we have 7 fragments or more as we 2039 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 2040 * the segment payload in the first descriptor, and another 7 for the 2041 * fragments. 2042 **/ 2043 bool __iavf_chk_linearize(struct sk_buff *skb) 2044 { 2045 const skb_frag_t *frag, *stale; 2046 int nr_frags, sum; 2047 2048 /* no need to check if number of frags is less than 7 */ 2049 nr_frags = skb_shinfo(skb)->nr_frags; 2050 if (nr_frags < (IAVF_MAX_BUFFER_TXD - 1)) 2051 return false; 2052 2053 /* We need to walk through the list and validate that each group 2054 * of 6 fragments totals at least gso_size. 2055 */ 2056 nr_frags -= IAVF_MAX_BUFFER_TXD - 2; 2057 frag = &skb_shinfo(skb)->frags[0]; 2058 2059 /* Initialize size to the negative value of gso_size minus 1. We 2060 * use this as the worst case scenerio in which the frag ahead 2061 * of us only provides one byte which is why we are limited to 6 2062 * descriptors for a single transmit as the header and previous 2063 * fragment are already consuming 2 descriptors. 2064 */ 2065 sum = 1 - skb_shinfo(skb)->gso_size; 2066 2067 /* Add size of frags 0 through 4 to create our initial sum */ 2068 sum += skb_frag_size(frag++); 2069 sum += skb_frag_size(frag++); 2070 sum += skb_frag_size(frag++); 2071 sum += skb_frag_size(frag++); 2072 sum += skb_frag_size(frag++); 2073 2074 /* Walk through fragments adding latest fragment, testing it, and 2075 * then removing stale fragments from the sum. 2076 */ 2077 for (stale = &skb_shinfo(skb)->frags[0];; stale++) { 2078 int stale_size = skb_frag_size(stale); 2079 2080 sum += skb_frag_size(frag++); 2081 2082 /* The stale fragment may present us with a smaller 2083 * descriptor than the actual fragment size. To account 2084 * for that we need to remove all the data on the front and 2085 * figure out what the remainder would be in the last 2086 * descriptor associated with the fragment. 2087 */ 2088 if (stale_size > IAVF_MAX_DATA_PER_TXD) { 2089 int align_pad = -(skb_frag_off(stale)) & 2090 (IAVF_MAX_READ_REQ_SIZE - 1); 2091 2092 sum -= align_pad; 2093 stale_size -= align_pad; 2094 2095 do { 2096 sum -= IAVF_MAX_DATA_PER_TXD_ALIGNED; 2097 stale_size -= IAVF_MAX_DATA_PER_TXD_ALIGNED; 2098 } while (stale_size > IAVF_MAX_DATA_PER_TXD); 2099 } 2100 2101 /* if sum is negative we failed to make sufficient progress */ 2102 if (sum < 0) 2103 return true; 2104 2105 if (!nr_frags--) 2106 break; 2107 2108 sum -= stale_size; 2109 } 2110 2111 return false; 2112 } 2113 2114 /** 2115 * __iavf_maybe_stop_tx - 2nd level check for tx stop conditions 2116 * @tx_ring: the ring to be checked 2117 * @size: the size buffer we want to assure is available 2118 * 2119 * Returns -EBUSY if a stop is needed, else 0 2120 **/ 2121 int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size) 2122 { 2123 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 2124 /* Memory barrier before checking head and tail */ 2125 smp_mb(); 2126 2127 /* Check again in a case another CPU has just made room available. */ 2128 if (likely(IAVF_DESC_UNUSED(tx_ring) < size)) 2129 return -EBUSY; 2130 2131 /* A reprieve! - use start_queue because it doesn't call schedule */ 2132 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); 2133 ++tx_ring->tx_stats.restart_queue; 2134 return 0; 2135 } 2136 2137 /** 2138 * iavf_tx_map - Build the Tx descriptor 2139 * @tx_ring: ring to send buffer on 2140 * @skb: send buffer 2141 * @first: first buffer info buffer to use 2142 * @tx_flags: collected send information 2143 * @hdr_len: size of the packet header 2144 * @td_cmd: the command field in the descriptor 2145 * @td_offset: offset for checksum or crc 2146 **/ 2147 static void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb, 2148 struct iavf_tx_buffer *first, u32 tx_flags, 2149 const u8 hdr_len, u32 td_cmd, u32 td_offset) 2150 { 2151 unsigned int data_len = skb->data_len; 2152 unsigned int size = skb_headlen(skb); 2153 skb_frag_t *frag; 2154 struct iavf_tx_buffer *tx_bi; 2155 struct iavf_tx_desc *tx_desc; 2156 u16 i = tx_ring->next_to_use; 2157 u32 td_tag = 0; 2158 dma_addr_t dma; 2159 2160 if (tx_flags & IAVF_TX_FLAGS_HW_VLAN) { 2161 td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1; 2162 td_tag = FIELD_GET(IAVF_TX_FLAGS_VLAN_MASK, tx_flags); 2163 } 2164 2165 first->tx_flags = tx_flags; 2166 2167 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 2168 2169 tx_desc = IAVF_TX_DESC(tx_ring, i); 2170 tx_bi = first; 2171 2172 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 2173 unsigned int max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED; 2174 2175 if (dma_mapping_error(tx_ring->dev, dma)) 2176 goto dma_error; 2177 2178 /* record length, and DMA address */ 2179 dma_unmap_len_set(tx_bi, len, size); 2180 dma_unmap_addr_set(tx_bi, dma, dma); 2181 2182 /* align size to end of page */ 2183 max_data += -dma & (IAVF_MAX_READ_REQ_SIZE - 1); 2184 tx_desc->buffer_addr = cpu_to_le64(dma); 2185 2186 while (unlikely(size > IAVF_MAX_DATA_PER_TXD)) { 2187 tx_desc->cmd_type_offset_bsz = 2188 build_ctob(td_cmd, td_offset, 2189 max_data, td_tag); 2190 2191 tx_desc++; 2192 i++; 2193 2194 if (i == tx_ring->count) { 2195 tx_desc = IAVF_TX_DESC(tx_ring, 0); 2196 i = 0; 2197 } 2198 2199 dma += max_data; 2200 size -= max_data; 2201 2202 max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED; 2203 tx_desc->buffer_addr = cpu_to_le64(dma); 2204 } 2205 2206 if (likely(!data_len)) 2207 break; 2208 2209 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, 2210 size, td_tag); 2211 2212 tx_desc++; 2213 i++; 2214 2215 if (i == tx_ring->count) { 2216 tx_desc = IAVF_TX_DESC(tx_ring, 0); 2217 i = 0; 2218 } 2219 2220 size = skb_frag_size(frag); 2221 data_len -= size; 2222 2223 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 2224 DMA_TO_DEVICE); 2225 2226 tx_bi = &tx_ring->tx_bi[i]; 2227 } 2228 2229 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 2230 2231 i++; 2232 if (i == tx_ring->count) 2233 i = 0; 2234 2235 tx_ring->next_to_use = i; 2236 2237 iavf_maybe_stop_tx(tx_ring, DESC_NEEDED); 2238 2239 /* write last descriptor with RS and EOP bits */ 2240 td_cmd |= IAVF_TXD_CMD; 2241 tx_desc->cmd_type_offset_bsz = 2242 build_ctob(td_cmd, td_offset, size, td_tag); 2243 2244 skb_tx_timestamp(skb); 2245 2246 /* Force memory writes to complete before letting h/w know there 2247 * are new descriptors to fetch. 2248 * 2249 * We also use this memory barrier to make certain all of the 2250 * status bits have been updated before next_to_watch is written. 2251 */ 2252 wmb(); 2253 2254 /* set next_to_watch value indicating a packet is present */ 2255 first->next_to_watch = tx_desc; 2256 2257 /* notify HW of packet */ 2258 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { 2259 writel(i, tx_ring->tail); 2260 } 2261 2262 return; 2263 2264 dma_error: 2265 dev_info(tx_ring->dev, "TX DMA map failed\n"); 2266 2267 /* clear dma mappings for failed tx_bi map */ 2268 for (;;) { 2269 tx_bi = &tx_ring->tx_bi[i]; 2270 iavf_unmap_and_free_tx_resource(tx_ring, tx_bi); 2271 if (tx_bi == first) 2272 break; 2273 if (i == 0) 2274 i = tx_ring->count; 2275 i--; 2276 } 2277 2278 tx_ring->next_to_use = i; 2279 } 2280 2281 /** 2282 * iavf_xmit_frame_ring - Sends buffer on Tx ring 2283 * @skb: send buffer 2284 * @tx_ring: ring to send buffer on 2285 * 2286 * Returns NETDEV_TX_OK if sent, else an error code 2287 **/ 2288 static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb, 2289 struct iavf_ring *tx_ring) 2290 { 2291 u64 cd_type_cmd_tso_mss = IAVF_TX_DESC_DTYPE_CONTEXT; 2292 u32 cd_tunneling = 0, cd_l2tag2 = 0; 2293 struct iavf_tx_buffer *first; 2294 u32 td_offset = 0; 2295 u32 tx_flags = 0; 2296 __be16 protocol; 2297 u32 td_cmd = 0; 2298 u8 hdr_len = 0; 2299 int tso, count; 2300 2301 /* prefetch the data, we'll need it later */ 2302 prefetch(skb->data); 2303 2304 iavf_trace(xmit_frame_ring, skb, tx_ring); 2305 2306 count = iavf_xmit_descriptor_count(skb); 2307 if (iavf_chk_linearize(skb, count)) { 2308 if (__skb_linearize(skb)) { 2309 dev_kfree_skb_any(skb); 2310 return NETDEV_TX_OK; 2311 } 2312 count = iavf_txd_use_count(skb->len); 2313 tx_ring->tx_stats.tx_linearize++; 2314 } 2315 2316 /* need: 1 descriptor per page * PAGE_SIZE/IAVF_MAX_DATA_PER_TXD, 2317 * + 1 desc for skb_head_len/IAVF_MAX_DATA_PER_TXD, 2318 * + 4 desc gap to avoid the cache line where head is, 2319 * + 1 desc for context descriptor, 2320 * otherwise try next time 2321 */ 2322 if (iavf_maybe_stop_tx(tx_ring, count + 4 + 1)) { 2323 tx_ring->tx_stats.tx_busy++; 2324 return NETDEV_TX_BUSY; 2325 } 2326 2327 /* record the location of the first descriptor for this packet */ 2328 first = &tx_ring->tx_bi[tx_ring->next_to_use]; 2329 first->skb = skb; 2330 first->bytecount = skb->len; 2331 first->gso_segs = 1; 2332 2333 /* prepare the xmit flags */ 2334 iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags); 2335 if (tx_flags & IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN) { 2336 cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2 << 2337 IAVF_TXD_CTX_QW1_CMD_SHIFT; 2338 cd_l2tag2 = FIELD_GET(IAVF_TX_FLAGS_VLAN_MASK, tx_flags); 2339 } 2340 2341 /* obtain protocol of skb */ 2342 protocol = vlan_get_protocol(skb); 2343 2344 /* setup IPv4/IPv6 offloads */ 2345 if (protocol == htons(ETH_P_IP)) 2346 tx_flags |= IAVF_TX_FLAGS_IPV4; 2347 else if (protocol == htons(ETH_P_IPV6)) 2348 tx_flags |= IAVF_TX_FLAGS_IPV6; 2349 2350 tso = iavf_tso(first, &hdr_len, &cd_type_cmd_tso_mss); 2351 2352 if (tso < 0) 2353 goto out_drop; 2354 else if (tso) 2355 tx_flags |= IAVF_TX_FLAGS_TSO; 2356 2357 /* Always offload the checksum, since it's in the data descriptor */ 2358 tso = iavf_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, 2359 tx_ring, &cd_tunneling); 2360 if (tso < 0) 2361 goto out_drop; 2362 2363 /* always enable CRC insertion offload */ 2364 td_cmd |= IAVF_TX_DESC_CMD_ICRC; 2365 2366 iavf_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, 2367 cd_tunneling, cd_l2tag2); 2368 2369 iavf_tx_map(tx_ring, skb, first, tx_flags, hdr_len, 2370 td_cmd, td_offset); 2371 2372 return NETDEV_TX_OK; 2373 2374 out_drop: 2375 iavf_trace(xmit_frame_ring_drop, first->skb, tx_ring); 2376 dev_kfree_skb_any(first->skb); 2377 first->skb = NULL; 2378 return NETDEV_TX_OK; 2379 } 2380 2381 /** 2382 * iavf_xmit_frame - Selects the correct VSI and Tx queue to send buffer 2383 * @skb: send buffer 2384 * @netdev: network interface device structure 2385 * 2386 * Returns NETDEV_TX_OK if sent, else an error code 2387 **/ 2388 netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 2389 { 2390 struct iavf_adapter *adapter = netdev_priv(netdev); 2391 struct iavf_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping]; 2392 2393 /* hardware can't handle really short frames, hardware padding works 2394 * beyond this point 2395 */ 2396 if (unlikely(skb->len < IAVF_MIN_TX_LEN)) { 2397 if (skb_pad(skb, IAVF_MIN_TX_LEN - skb->len)) 2398 return NETDEV_TX_OK; 2399 skb->len = IAVF_MIN_TX_LEN; 2400 skb_set_tail_pointer(skb, IAVF_MIN_TX_LEN); 2401 } 2402 2403 return iavf_xmit_frame_ring(skb, tx_ring); 2404 } 2405