1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ 3 4 #include <linux/ip.h> 5 #include <linux/ipv6.h> 6 #include <linux/if_vlan.h> 7 #include <net/ip6_checksum.h> 8 9 #include "ionic.h" 10 #include "ionic_lif.h" 11 #include "ionic_txrx.h" 12 13 static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info, 14 struct ionic_cq_info *cq_info, void *cb_arg); 15 16 static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell, 17 ionic_desc_cb cb_func, void *cb_arg) 18 { 19 DEBUG_STATS_TXQ_POST(q_to_qcq(q), q->head->desc, ring_dbell); 20 21 ionic_q_post(q, ring_dbell, cb_func, cb_arg); 22 } 23 24 static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell, 25 ionic_desc_cb cb_func, void *cb_arg) 26 { 27 ionic_q_post(q, ring_dbell, cb_func, cb_arg); 28 29 DEBUG_STATS_RX_BUFF_CNT(q_to_qcq(q)); 30 } 31 32 static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q) 33 { 34 return netdev_get_tx_queue(q->lif->netdev, q->index); 35 } 36 37 static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q, 38 unsigned int len, bool frags) 39 { 40 struct ionic_lif *lif = q->lif; 41 struct ionic_rx_stats *stats; 42 struct net_device *netdev; 43 struct sk_buff *skb; 44 45 netdev = lif->netdev; 46 stats = q_to_rx_stats(q); 47 48 if (frags) 49 skb = napi_get_frags(&q_to_qcq(q)->napi); 50 else 51 skb = netdev_alloc_skb_ip_align(netdev, len); 52 53 if (unlikely(!skb)) { 54 net_warn_ratelimited("%s: SKB alloc failed on %s!\n", 55 netdev->name, q->name); 56 stats->alloc_err++; 57 return NULL; 58 } 59 60 return skb; 61 } 62 63 static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, 64 struct ionic_desc_info *desc_info, 65 struct ionic_cq_info *cq_info) 66 { 67 struct ionic_rxq_comp *comp = cq_info->cq_desc; 68 struct device *dev = q->lif->ionic->dev; 69 struct ionic_page_info *page_info; 70 struct sk_buff *skb; 71 unsigned int i; 72 u16 frag_len; 73 u16 len; 74 75 page_info = &desc_info->pages[0]; 76 len = le16_to_cpu(comp->len); 77 78 prefetch(page_address(page_info->page) + NET_IP_ALIGN); 79 80 skb = ionic_rx_skb_alloc(q, len, true); 81 if (unlikely(!skb)) 82 return NULL; 83 84 i = comp->num_sg_elems + 1; 85 do { 86 if (unlikely(!page_info->page)) { 87 struct napi_struct *napi = &q_to_qcq(q)->napi; 88 89 napi->skb = NULL; 90 dev_kfree_skb(skb); 91 return NULL; 92 } 93 94 frag_len = min(len, (u16)PAGE_SIZE); 95 len -= frag_len; 96 97 dma_unmap_page(dev, dma_unmap_addr(page_info, dma_addr), 98 PAGE_SIZE, DMA_FROM_DEVICE); 99 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 100 page_info->page, 0, frag_len, PAGE_SIZE); 101 page_info->page = NULL; 102 page_info++; 103 i--; 104 } while (i > 0); 105 106 return skb; 107 } 108 109 static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q, 110 struct ionic_desc_info *desc_info, 111 struct ionic_cq_info *cq_info) 112 { 113 struct ionic_rxq_comp *comp = cq_info->cq_desc; 114 struct device *dev = q->lif->ionic->dev; 115 struct ionic_page_info *page_info; 116 struct sk_buff *skb; 117 u16 len; 118 119 page_info = &desc_info->pages[0]; 120 len = le16_to_cpu(comp->len); 121 122 skb = ionic_rx_skb_alloc(q, len, false); 123 if (unlikely(!skb)) 124 return NULL; 125 126 if (unlikely(!page_info->page)) { 127 dev_kfree_skb(skb); 128 return NULL; 129 } 130 131 dma_sync_single_for_cpu(dev, dma_unmap_addr(page_info, dma_addr), 132 len, DMA_FROM_DEVICE); 133 skb_copy_to_linear_data(skb, page_address(page_info->page), len); 134 dma_sync_single_for_device(dev, dma_unmap_addr(page_info, dma_addr), 135 len, DMA_FROM_DEVICE); 136 137 skb_put(skb, len); 138 skb->protocol = eth_type_trans(skb, q->lif->netdev); 139 140 return skb; 141 } 142 143 static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info, 144 struct ionic_cq_info *cq_info, void *cb_arg) 145 { 146 struct ionic_rxq_comp *comp = cq_info->cq_desc; 147 struct ionic_qcq *qcq = q_to_qcq(q); 148 struct ionic_rx_stats *stats; 149 struct net_device *netdev; 150 struct sk_buff *skb; 151 152 stats = q_to_rx_stats(q); 153 netdev = q->lif->netdev; 154 155 if (comp->status) { 156 stats->dropped++; 157 return; 158 } 159 160 /* no packet processing while resetting */ 161 if (unlikely(test_bit(IONIC_LIF_F_QUEUE_RESET, q->lif->state))) { 162 stats->dropped++; 163 return; 164 } 165 166 stats->pkts++; 167 stats->bytes += le16_to_cpu(comp->len); 168 169 if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak) 170 skb = ionic_rx_copybreak(q, desc_info, cq_info); 171 else 172 skb = ionic_rx_frags(q, desc_info, cq_info); 173 174 if (unlikely(!skb)) { 175 stats->dropped++; 176 return; 177 } 178 179 skb_record_rx_queue(skb, q->index); 180 181 if (likely(netdev->features & NETIF_F_RXHASH)) { 182 switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) { 183 case IONIC_PKT_TYPE_IPV4: 184 case IONIC_PKT_TYPE_IPV6: 185 skb_set_hash(skb, le32_to_cpu(comp->rss_hash), 186 PKT_HASH_TYPE_L3); 187 break; 188 case IONIC_PKT_TYPE_IPV4_TCP: 189 case IONIC_PKT_TYPE_IPV6_TCP: 190 case IONIC_PKT_TYPE_IPV4_UDP: 191 case IONIC_PKT_TYPE_IPV6_UDP: 192 skb_set_hash(skb, le32_to_cpu(comp->rss_hash), 193 PKT_HASH_TYPE_L4); 194 break; 195 } 196 } 197 198 if (likely(netdev->features & NETIF_F_RXCSUM)) { 199 if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) { 200 skb->ip_summed = CHECKSUM_COMPLETE; 201 skb->csum = (__wsum)le16_to_cpu(comp->csum); 202 stats->csum_complete++; 203 } 204 } else { 205 stats->csum_none++; 206 } 207 208 if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) || 209 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) || 210 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD))) 211 stats->csum_error++; 212 213 if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 214 if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) 215 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 216 le16_to_cpu(comp->vlan_tci)); 217 } 218 219 if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak) 220 napi_gro_receive(&qcq->napi, skb); 221 else 222 napi_gro_frags(&qcq->napi); 223 } 224 225 static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) 226 { 227 struct ionic_rxq_comp *comp = cq_info->cq_desc; 228 struct ionic_queue *q = cq->bound_q; 229 struct ionic_desc_info *desc_info; 230 231 if (!color_match(comp->pkt_type_color, cq->done_color)) 232 return false; 233 234 /* check for empty queue */ 235 if (q->tail->index == q->head->index) 236 return false; 237 238 desc_info = q->tail; 239 if (desc_info->index != le16_to_cpu(comp->comp_index)) 240 return false; 241 242 q->tail = desc_info->next; 243 244 /* clean the related q entry, only one per qc completion */ 245 ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg); 246 247 desc_info->cb = NULL; 248 desc_info->cb_arg = NULL; 249 250 return true; 251 } 252 253 static u32 ionic_rx_walk_cq(struct ionic_cq *rxcq, u32 limit) 254 { 255 u32 work_done = 0; 256 257 while (ionic_rx_service(rxcq, rxcq->tail)) { 258 if (rxcq->tail->last) 259 rxcq->done_color = !rxcq->done_color; 260 rxcq->tail = rxcq->tail->next; 261 DEBUG_STATS_CQE_CNT(rxcq); 262 263 if (++work_done >= limit) 264 break; 265 } 266 267 return work_done; 268 } 269 270 void ionic_rx_flush(struct ionic_cq *cq) 271 { 272 struct ionic_dev *idev = &cq->lif->ionic->idev; 273 u32 work_done; 274 275 work_done = ionic_rx_walk_cq(cq, cq->num_descs); 276 277 if (work_done) 278 ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, 279 work_done, IONIC_INTR_CRED_RESET_COALESCE); 280 } 281 282 static struct page *ionic_rx_page_alloc(struct ionic_queue *q, 283 dma_addr_t *dma_addr) 284 { 285 struct ionic_lif *lif = q->lif; 286 struct ionic_rx_stats *stats; 287 struct net_device *netdev; 288 struct device *dev; 289 struct page *page; 290 291 netdev = lif->netdev; 292 dev = lif->ionic->dev; 293 stats = q_to_rx_stats(q); 294 page = alloc_page(GFP_ATOMIC); 295 if (unlikely(!page)) { 296 net_err_ratelimited("%s: Page alloc failed on %s!\n", 297 netdev->name, q->name); 298 stats->alloc_err++; 299 return NULL; 300 } 301 302 *dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); 303 if (unlikely(dma_mapping_error(dev, *dma_addr))) { 304 __free_page(page); 305 net_err_ratelimited("%s: DMA single map failed on %s!\n", 306 netdev->name, q->name); 307 stats->dma_map_err++; 308 return NULL; 309 } 310 311 return page; 312 } 313 314 static void ionic_rx_page_free(struct ionic_queue *q, struct page *page, 315 dma_addr_t dma_addr) 316 { 317 struct ionic_lif *lif = q->lif; 318 struct net_device *netdev; 319 struct device *dev; 320 321 netdev = lif->netdev; 322 dev = lif->ionic->dev; 323 324 if (unlikely(!page)) { 325 net_err_ratelimited("%s: Trying to free unallocated buffer on %s!\n", 326 netdev->name, q->name); 327 return; 328 } 329 330 dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); 331 332 __free_page(page); 333 } 334 335 #define IONIC_RX_RING_DOORBELL_STRIDE ((1 << 5) - 1) 336 #define IONIC_RX_RING_HEAD_BUF_SZ 2048 337 338 void ionic_rx_fill(struct ionic_queue *q) 339 { 340 struct net_device *netdev = q->lif->netdev; 341 struct ionic_desc_info *desc_info; 342 struct ionic_page_info *page_info; 343 struct ionic_rxq_sg_desc *sg_desc; 344 struct ionic_rxq_sg_elem *sg_elem; 345 struct ionic_rxq_desc *desc; 346 unsigned int remain_len; 347 unsigned int seg_len; 348 unsigned int nfrags; 349 bool ring_doorbell; 350 unsigned int i, j; 351 unsigned int len; 352 353 len = netdev->mtu + ETH_HLEN; 354 nfrags = round_up(len, PAGE_SIZE) / PAGE_SIZE; 355 356 for (i = ionic_q_space_avail(q); i; i--) { 357 remain_len = len; 358 desc_info = q->head; 359 desc = desc_info->desc; 360 sg_desc = desc_info->sg_desc; 361 page_info = &desc_info->pages[0]; 362 363 if (page_info->page) { /* recycle the buffer */ 364 ring_doorbell = ((q->head->index + 1) & 365 IONIC_RX_RING_DOORBELL_STRIDE) == 0; 366 ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, NULL); 367 continue; 368 } 369 370 /* fill main descriptor - pages[0] */ 371 desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG : 372 IONIC_RXQ_DESC_OPCODE_SIMPLE; 373 desc_info->npages = nfrags; 374 page_info->page = ionic_rx_page_alloc(q, &page_info->dma_addr); 375 if (unlikely(!page_info->page)) { 376 desc->addr = 0; 377 desc->len = 0; 378 return; 379 } 380 desc->addr = cpu_to_le64(page_info->dma_addr); 381 seg_len = min_t(unsigned int, PAGE_SIZE, len); 382 desc->len = cpu_to_le16(seg_len); 383 remain_len -= seg_len; 384 page_info++; 385 386 /* fill sg descriptors - pages[1..n] */ 387 for (j = 0; j < nfrags - 1; j++) { 388 if (page_info->page) /* recycle the sg buffer */ 389 continue; 390 391 sg_elem = &sg_desc->elems[j]; 392 page_info->page = ionic_rx_page_alloc(q, &page_info->dma_addr); 393 if (unlikely(!page_info->page)) { 394 sg_elem->addr = 0; 395 sg_elem->len = 0; 396 return; 397 } 398 sg_elem->addr = cpu_to_le64(page_info->dma_addr); 399 seg_len = min_t(unsigned int, PAGE_SIZE, remain_len); 400 sg_elem->len = cpu_to_le16(seg_len); 401 remain_len -= seg_len; 402 page_info++; 403 } 404 405 ring_doorbell = ((q->head->index + 1) & 406 IONIC_RX_RING_DOORBELL_STRIDE) == 0; 407 ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, NULL); 408 } 409 } 410 411 static void ionic_rx_fill_cb(void *arg) 412 { 413 ionic_rx_fill(arg); 414 } 415 416 void ionic_rx_empty(struct ionic_queue *q) 417 { 418 struct ionic_desc_info *cur; 419 struct ionic_rxq_desc *desc; 420 unsigned int i; 421 422 for (cur = q->tail; cur != q->head; cur = cur->next) { 423 desc = cur->desc; 424 desc->addr = 0; 425 desc->len = 0; 426 427 for (i = 0; i < cur->npages; i++) { 428 if (likely(cur->pages[i].page)) { 429 ionic_rx_page_free(q, cur->pages[i].page, 430 cur->pages[i].dma_addr); 431 cur->pages[i].page = NULL; 432 cur->pages[i].dma_addr = 0; 433 } 434 } 435 436 cur->cb_arg = NULL; 437 } 438 } 439 440 int ionic_rx_napi(struct napi_struct *napi, int budget) 441 { 442 struct ionic_qcq *qcq = napi_to_qcq(napi); 443 struct ionic_cq *rxcq = napi_to_cq(napi); 444 unsigned int qi = rxcq->bound_q->index; 445 struct ionic_dev *idev; 446 struct ionic_lif *lif; 447 struct ionic_cq *txcq; 448 u32 work_done = 0; 449 u32 flags = 0; 450 451 lif = rxcq->bound_q->lif; 452 idev = &lif->ionic->idev; 453 txcq = &lif->txqcqs[qi].qcq->cq; 454 455 ionic_tx_flush(txcq); 456 457 work_done = ionic_rx_walk_cq(rxcq, budget); 458 459 if (work_done) 460 ionic_rx_fill_cb(rxcq->bound_q); 461 462 if (work_done < budget && napi_complete_done(napi, work_done)) { 463 flags |= IONIC_INTR_CRED_UNMASK; 464 DEBUG_STATS_INTR_REARM(rxcq->bound_intr); 465 } 466 467 if (work_done || flags) { 468 flags |= IONIC_INTR_CRED_RESET_COALESCE; 469 ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index, 470 work_done, flags); 471 } 472 473 DEBUG_STATS_NAPI_POLL(qcq, work_done); 474 475 return work_done; 476 } 477 478 static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, void *data, size_t len) 479 { 480 struct ionic_tx_stats *stats = q_to_tx_stats(q); 481 struct device *dev = q->lif->ionic->dev; 482 dma_addr_t dma_addr; 483 484 dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE); 485 if (dma_mapping_error(dev, dma_addr)) { 486 net_warn_ratelimited("%s: DMA single map failed on %s!\n", 487 q->lif->netdev->name, q->name); 488 stats->dma_map_err++; 489 return 0; 490 } 491 return dma_addr; 492 } 493 494 static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, const skb_frag_t *frag, 495 size_t offset, size_t len) 496 { 497 struct ionic_tx_stats *stats = q_to_tx_stats(q); 498 struct device *dev = q->lif->ionic->dev; 499 dma_addr_t dma_addr; 500 501 dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE); 502 if (dma_mapping_error(dev, dma_addr)) { 503 net_warn_ratelimited("%s: DMA frag map failed on %s!\n", 504 q->lif->netdev->name, q->name); 505 stats->dma_map_err++; 506 } 507 return dma_addr; 508 } 509 510 static void ionic_tx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info, 511 struct ionic_cq_info *cq_info, void *cb_arg) 512 { 513 struct ionic_txq_sg_desc *sg_desc = desc_info->sg_desc; 514 struct ionic_txq_sg_elem *elem = sg_desc->elems; 515 struct ionic_tx_stats *stats = q_to_tx_stats(q); 516 struct ionic_txq_desc *desc = desc_info->desc; 517 struct device *dev = q->lif->ionic->dev; 518 u8 opcode, flags, nsge; 519 u16 queue_index; 520 unsigned int i; 521 u64 addr; 522 523 decode_txq_desc_cmd(le64_to_cpu(desc->cmd), 524 &opcode, &flags, &nsge, &addr); 525 526 /* use unmap_single only if either this is not TSO, 527 * or this is first descriptor of a TSO 528 */ 529 if (opcode != IONIC_TXQ_DESC_OPCODE_TSO || 530 flags & IONIC_TXQ_DESC_FLAG_TSO_SOT) 531 dma_unmap_single(dev, (dma_addr_t)addr, 532 le16_to_cpu(desc->len), DMA_TO_DEVICE); 533 else 534 dma_unmap_page(dev, (dma_addr_t)addr, 535 le16_to_cpu(desc->len), DMA_TO_DEVICE); 536 537 for (i = 0; i < nsge; i++, elem++) 538 dma_unmap_page(dev, (dma_addr_t)le64_to_cpu(elem->addr), 539 le16_to_cpu(elem->len), DMA_TO_DEVICE); 540 541 if (cb_arg) { 542 struct sk_buff *skb = cb_arg; 543 u32 len = skb->len; 544 545 queue_index = skb_get_queue_mapping(skb); 546 if (unlikely(__netif_subqueue_stopped(q->lif->netdev, 547 queue_index))) { 548 netif_wake_subqueue(q->lif->netdev, queue_index); 549 q->wake++; 550 } 551 dev_kfree_skb_any(skb); 552 stats->clean++; 553 netdev_tx_completed_queue(q_to_ndq(q), 1, len); 554 } 555 } 556 557 void ionic_tx_flush(struct ionic_cq *cq) 558 { 559 struct ionic_txq_comp *comp = cq->tail->cq_desc; 560 struct ionic_dev *idev = &cq->lif->ionic->idev; 561 struct ionic_queue *q = cq->bound_q; 562 struct ionic_desc_info *desc_info; 563 unsigned int work_done = 0; 564 565 /* walk the completed cq entries */ 566 while (work_done < cq->num_descs && 567 color_match(comp->color, cq->done_color)) { 568 569 /* clean the related q entries, there could be 570 * several q entries completed for each cq completion 571 */ 572 do { 573 desc_info = q->tail; 574 q->tail = desc_info->next; 575 ionic_tx_clean(q, desc_info, cq->tail, 576 desc_info->cb_arg); 577 desc_info->cb = NULL; 578 desc_info->cb_arg = NULL; 579 } while (desc_info->index != le16_to_cpu(comp->comp_index)); 580 581 if (cq->tail->last) 582 cq->done_color = !cq->done_color; 583 584 cq->tail = cq->tail->next; 585 comp = cq->tail->cq_desc; 586 DEBUG_STATS_CQE_CNT(cq); 587 588 work_done++; 589 } 590 591 if (work_done) 592 ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, 593 work_done, 0); 594 } 595 596 static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb) 597 { 598 int err; 599 600 err = skb_cow_head(skb, 0); 601 if (err) 602 return err; 603 604 if (skb->protocol == cpu_to_be16(ETH_P_IP)) { 605 inner_ip_hdr(skb)->check = 0; 606 inner_tcp_hdr(skb)->check = 607 ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr, 608 inner_ip_hdr(skb)->daddr, 609 0, IPPROTO_TCP, 0); 610 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { 611 inner_tcp_hdr(skb)->check = 612 ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr, 613 &inner_ipv6_hdr(skb)->daddr, 614 0, IPPROTO_TCP, 0); 615 } 616 617 return 0; 618 } 619 620 static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb) 621 { 622 int err; 623 624 err = skb_cow_head(skb, 0); 625 if (err) 626 return err; 627 628 if (skb->protocol == cpu_to_be16(ETH_P_IP)) { 629 ip_hdr(skb)->check = 0; 630 tcp_hdr(skb)->check = 631 ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 632 ip_hdr(skb)->daddr, 633 0, IPPROTO_TCP, 0); 634 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { 635 tcp_v6_gso_csum_prep(skb); 636 } 637 638 return 0; 639 } 640 641 static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, 642 struct sk_buff *skb, 643 dma_addr_t addr, u8 nsge, u16 len, 644 unsigned int hdrlen, unsigned int mss, 645 bool outer_csum, 646 u16 vlan_tci, bool has_vlan, 647 bool start, bool done) 648 { 649 u8 flags = 0; 650 u64 cmd; 651 652 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 653 flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 654 flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; 655 flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; 656 657 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr); 658 desc->cmd = cpu_to_le64(cmd); 659 desc->len = cpu_to_le16(len); 660 desc->vlan_tci = cpu_to_le16(vlan_tci); 661 desc->hdr_len = cpu_to_le16(hdrlen); 662 desc->mss = cpu_to_le16(mss); 663 664 if (done) { 665 skb_tx_timestamp(skb); 666 netdev_tx_sent_queue(q_to_ndq(q), skb->len); 667 ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb); 668 } else { 669 ionic_txq_post(q, false, ionic_tx_clean, NULL); 670 } 671 } 672 673 static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q, 674 struct ionic_txq_sg_elem **elem) 675 { 676 struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc; 677 struct ionic_txq_desc *desc = q->head->desc; 678 679 *elem = sg_desc->elems; 680 return desc; 681 } 682 683 static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb) 684 { 685 struct ionic_tx_stats *stats = q_to_tx_stats(q); 686 struct ionic_desc_info *abort = q->head; 687 struct device *dev = q->lif->ionic->dev; 688 struct ionic_desc_info *rewind = abort; 689 struct ionic_txq_sg_elem *elem; 690 struct ionic_txq_desc *desc; 691 unsigned int frag_left = 0; 692 unsigned int offset = 0; 693 unsigned int len_left; 694 dma_addr_t desc_addr; 695 unsigned int hdrlen; 696 unsigned int nfrags; 697 unsigned int seglen; 698 u64 total_bytes = 0; 699 u64 total_pkts = 0; 700 unsigned int left; 701 unsigned int len; 702 unsigned int mss; 703 skb_frag_t *frag; 704 bool start, done; 705 bool outer_csum; 706 bool has_vlan; 707 u16 desc_len; 708 u8 desc_nsge; 709 u16 vlan_tci; 710 bool encap; 711 int err; 712 713 mss = skb_shinfo(skb)->gso_size; 714 nfrags = skb_shinfo(skb)->nr_frags; 715 len_left = skb->len - skb_headlen(skb); 716 outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) || 717 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM); 718 has_vlan = !!skb_vlan_tag_present(skb); 719 vlan_tci = skb_vlan_tag_get(skb); 720 encap = skb->encapsulation; 721 722 /* Preload inner-most TCP csum field with IP pseudo hdr 723 * calculated with IP length set to zero. HW will later 724 * add in length to each TCP segment resulting from the TSO. 725 */ 726 727 if (encap) 728 err = ionic_tx_tcp_inner_pseudo_csum(skb); 729 else 730 err = ionic_tx_tcp_pseudo_csum(skb); 731 if (err) 732 return err; 733 734 if (encap) 735 hdrlen = skb_inner_transport_header(skb) - skb->data + 736 inner_tcp_hdrlen(skb); 737 else 738 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb); 739 740 seglen = hdrlen + mss; 741 left = skb_headlen(skb); 742 743 desc = ionic_tx_tso_next(q, &elem); 744 start = true; 745 746 /* Chop skb->data up into desc segments */ 747 748 while (left > 0) { 749 len = min(seglen, left); 750 frag_left = seglen - len; 751 desc_addr = ionic_tx_map_single(q, skb->data + offset, len); 752 if (dma_mapping_error(dev, desc_addr)) 753 goto err_out_abort; 754 desc_len = len; 755 desc_nsge = 0; 756 left -= len; 757 offset += len; 758 if (nfrags > 0 && frag_left > 0) 759 continue; 760 done = (nfrags == 0 && left == 0); 761 ionic_tx_tso_post(q, desc, skb, 762 desc_addr, desc_nsge, desc_len, 763 hdrlen, mss, 764 outer_csum, 765 vlan_tci, has_vlan, 766 start, done); 767 total_pkts++; 768 total_bytes += start ? len : len + hdrlen; 769 desc = ionic_tx_tso_next(q, &elem); 770 start = false; 771 seglen = mss; 772 } 773 774 /* Chop skb frags into desc segments */ 775 776 for (frag = skb_shinfo(skb)->frags; len_left; frag++) { 777 offset = 0; 778 left = skb_frag_size(frag); 779 len_left -= left; 780 nfrags--; 781 stats->frags++; 782 783 while (left > 0) { 784 if (frag_left > 0) { 785 len = min(frag_left, left); 786 frag_left -= len; 787 elem->addr = 788 cpu_to_le64(ionic_tx_map_frag(q, frag, 789 offset, len)); 790 if (dma_mapping_error(dev, elem->addr)) 791 goto err_out_abort; 792 elem->len = cpu_to_le16(len); 793 elem++; 794 desc_nsge++; 795 left -= len; 796 offset += len; 797 if (nfrags > 0 && frag_left > 0) 798 continue; 799 done = (nfrags == 0 && left == 0); 800 ionic_tx_tso_post(q, desc, skb, desc_addr, 801 desc_nsge, desc_len, 802 hdrlen, mss, outer_csum, 803 vlan_tci, has_vlan, 804 start, done); 805 total_pkts++; 806 total_bytes += start ? len : len + hdrlen; 807 desc = ionic_tx_tso_next(q, &elem); 808 start = false; 809 } else { 810 len = min(mss, left); 811 frag_left = mss - len; 812 desc_addr = ionic_tx_map_frag(q, frag, 813 offset, len); 814 if (dma_mapping_error(dev, desc_addr)) 815 goto err_out_abort; 816 desc_len = len; 817 desc_nsge = 0; 818 left -= len; 819 offset += len; 820 if (nfrags > 0 && frag_left > 0) 821 continue; 822 done = (nfrags == 0 && left == 0); 823 ionic_tx_tso_post(q, desc, skb, desc_addr, 824 desc_nsge, desc_len, 825 hdrlen, mss, outer_csum, 826 vlan_tci, has_vlan, 827 start, done); 828 total_pkts++; 829 total_bytes += start ? len : len + hdrlen; 830 desc = ionic_tx_tso_next(q, &elem); 831 start = false; 832 } 833 } 834 } 835 836 stats->pkts += total_pkts; 837 stats->bytes += total_bytes; 838 stats->tso++; 839 840 return 0; 841 842 err_out_abort: 843 while (rewind->desc != q->head->desc) { 844 ionic_tx_clean(q, rewind, NULL, NULL); 845 rewind = rewind->next; 846 } 847 q->head = abort; 848 849 return -ENOMEM; 850 } 851 852 static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb) 853 { 854 struct ionic_tx_stats *stats = q_to_tx_stats(q); 855 struct ionic_txq_desc *desc = q->head->desc; 856 struct device *dev = q->lif->ionic->dev; 857 dma_addr_t dma_addr; 858 bool has_vlan; 859 u8 flags = 0; 860 bool encap; 861 u64 cmd; 862 863 has_vlan = !!skb_vlan_tag_present(skb); 864 encap = skb->encapsulation; 865 866 dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb)); 867 if (dma_mapping_error(dev, dma_addr)) 868 return -ENOMEM; 869 870 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 871 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 872 873 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL, 874 flags, skb_shinfo(skb)->nr_frags, dma_addr); 875 desc->cmd = cpu_to_le64(cmd); 876 desc->len = cpu_to_le16(skb_headlen(skb)); 877 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); 878 desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb)); 879 desc->csum_offset = cpu_to_le16(skb->csum_offset); 880 881 if (skb->csum_not_inet) 882 stats->crc32_csum++; 883 else 884 stats->csum++; 885 886 return 0; 887 } 888 889 static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb) 890 { 891 struct ionic_tx_stats *stats = q_to_tx_stats(q); 892 struct ionic_txq_desc *desc = q->head->desc; 893 struct device *dev = q->lif->ionic->dev; 894 dma_addr_t dma_addr; 895 bool has_vlan; 896 u8 flags = 0; 897 bool encap; 898 u64 cmd; 899 900 has_vlan = !!skb_vlan_tag_present(skb); 901 encap = skb->encapsulation; 902 903 dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb)); 904 if (dma_mapping_error(dev, dma_addr)) 905 return -ENOMEM; 906 907 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 908 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 909 910 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE, 911 flags, skb_shinfo(skb)->nr_frags, dma_addr); 912 desc->cmd = cpu_to_le64(cmd); 913 desc->len = cpu_to_le16(skb_headlen(skb)); 914 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); 915 916 stats->no_csum++; 917 918 return 0; 919 } 920 921 static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb) 922 { 923 struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc; 924 unsigned int len_left = skb->len - skb_headlen(skb); 925 struct ionic_txq_sg_elem *elem = sg_desc->elems; 926 struct ionic_tx_stats *stats = q_to_tx_stats(q); 927 struct device *dev = q->lif->ionic->dev; 928 dma_addr_t dma_addr; 929 skb_frag_t *frag; 930 u16 len; 931 932 for (frag = skb_shinfo(skb)->frags; len_left; frag++, elem++) { 933 len = skb_frag_size(frag); 934 elem->len = cpu_to_le16(len); 935 dma_addr = ionic_tx_map_frag(q, frag, 0, len); 936 if (dma_mapping_error(dev, dma_addr)) 937 return -ENOMEM; 938 elem->addr = cpu_to_le64(dma_addr); 939 len_left -= len; 940 stats->frags++; 941 } 942 943 return 0; 944 } 945 946 static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb) 947 { 948 struct ionic_tx_stats *stats = q_to_tx_stats(q); 949 int err; 950 951 /* set up the initial descriptor */ 952 if (skb->ip_summed == CHECKSUM_PARTIAL) 953 err = ionic_tx_calc_csum(q, skb); 954 else 955 err = ionic_tx_calc_no_csum(q, skb); 956 if (err) 957 return err; 958 959 /* add frags */ 960 err = ionic_tx_skb_frags(q, skb); 961 if (err) 962 return err; 963 964 skb_tx_timestamp(skb); 965 stats->pkts++; 966 stats->bytes += skb->len; 967 968 netdev_tx_sent_queue(q_to_ndq(q), skb->len); 969 ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb); 970 971 return 0; 972 } 973 974 static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb) 975 { 976 struct ionic_tx_stats *stats = q_to_tx_stats(q); 977 int err; 978 979 /* If TSO, need roundup(skb->len/mss) descs */ 980 if (skb_is_gso(skb)) 981 return (skb->len / skb_shinfo(skb)->gso_size) + 1; 982 983 /* If non-TSO, just need 1 desc and nr_frags sg elems */ 984 if (skb_shinfo(skb)->nr_frags <= IONIC_TX_MAX_SG_ELEMS) 985 return 1; 986 987 /* Too many frags, so linearize */ 988 err = skb_linearize(skb); 989 if (err) 990 return err; 991 992 stats->linearize++; 993 994 /* Need 1 desc and zero sg elems */ 995 return 1; 996 } 997 998 static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs) 999 { 1000 int stopped = 0; 1001 1002 if (unlikely(!ionic_q_has_space(q, ndescs))) { 1003 netif_stop_subqueue(q->lif->netdev, q->index); 1004 q->stop++; 1005 stopped = 1; 1006 1007 /* Might race with ionic_tx_clean, check again */ 1008 smp_rmb(); 1009 if (ionic_q_has_space(q, ndescs)) { 1010 netif_wake_subqueue(q->lif->netdev, q->index); 1011 stopped = 0; 1012 } 1013 } 1014 1015 return stopped; 1016 } 1017 1018 netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev) 1019 { 1020 u16 queue_index = skb_get_queue_mapping(skb); 1021 struct ionic_lif *lif = netdev_priv(netdev); 1022 struct ionic_queue *q; 1023 int ndescs; 1024 int err; 1025 1026 if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state))) { 1027 dev_kfree_skb(skb); 1028 return NETDEV_TX_OK; 1029 } 1030 1031 if (unlikely(!lif_to_txqcq(lif, queue_index))) 1032 queue_index = 0; 1033 q = lif_to_txq(lif, queue_index); 1034 1035 ndescs = ionic_tx_descs_needed(q, skb); 1036 if (ndescs < 0) 1037 goto err_out_drop; 1038 1039 if (unlikely(ionic_maybe_stop_tx(q, ndescs))) 1040 return NETDEV_TX_BUSY; 1041 1042 if (skb_is_gso(skb)) 1043 err = ionic_tx_tso(q, skb); 1044 else 1045 err = ionic_tx(q, skb); 1046 1047 if (err) 1048 goto err_out_drop; 1049 1050 /* Stop the queue if there aren't descriptors for the next packet. 1051 * Since our SG lists per descriptor take care of most of the possible 1052 * fragmentation, we don't need to have many descriptors available. 1053 */ 1054 ionic_maybe_stop_tx(q, 4); 1055 1056 return NETDEV_TX_OK; 1057 1058 err_out_drop: 1059 q->stop++; 1060 q->drop++; 1061 dev_kfree_skb(skb); 1062 return NETDEV_TX_OK; 1063 } 1064