1 /* QLogic qede NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/netdevice.h> 33 #include <linux/etherdevice.h> 34 #include <linux/skbuff.h> 35 #include <linux/bpf_trace.h> 36 #include <net/udp_tunnel.h> 37 #include <linux/ip.h> 38 #include <net/ipv6.h> 39 #include <net/tcp.h> 40 #include <linux/if_ether.h> 41 #include <linux/if_vlan.h> 42 #include <net/ip6_checksum.h> 43 #include "qede_ptp.h" 44 45 #include <linux/qed/qed_if.h> 46 #include "qede.h" 47 /********************************* 48 * Content also used by slowpath * 49 *********************************/ 50 51 int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy) 52 { 53 struct sw_rx_data *sw_rx_data; 54 struct eth_rx_bd *rx_bd; 55 dma_addr_t mapping; 56 struct page *data; 57 58 /* In case lazy-allocation is allowed, postpone allocation until the 59 * end of the NAPI run. We'd still need to make sure the Rx ring has 60 * sufficient buffers to guarantee an additional Rx interrupt. 61 */ 62 if (allow_lazy && likely(rxq->filled_buffers > 12)) { 63 rxq->filled_buffers--; 64 return 0; 65 } 66 67 data = alloc_pages(GFP_ATOMIC, 0); 68 if (unlikely(!data)) 69 return -ENOMEM; 70 71 /* Map the entire page as it would be used 72 * for multiple RX buffer segment size mapping. 73 */ 74 mapping = dma_map_page(rxq->dev, data, 0, 75 PAGE_SIZE, rxq->data_direction); 76 if (unlikely(dma_mapping_error(rxq->dev, mapping))) { 77 __free_page(data); 78 return -ENOMEM; 79 } 80 81 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; 82 sw_rx_data->page_offset = 0; 83 sw_rx_data->data = data; 84 sw_rx_data->mapping = mapping; 85 86 /* Advance PROD and get BD pointer */ 87 rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring); 88 WARN_ON(!rx_bd); 89 rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping)); 90 rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping)); 91 92 rxq->sw_rx_prod++; 93 rxq->filled_buffers++; 94 95 return 0; 96 } 97 98 /* Unmap the data and free skb */ 99 int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len) 100 { 101 u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX; 102 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; 103 struct eth_tx_1st_bd *first_bd; 104 struct eth_tx_bd *tx_data_bd; 105 int bds_consumed = 0; 106 int nbds; 107 bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD; 108 int i, split_bd_len = 0; 109 110 if (unlikely(!skb)) { 111 DP_ERR(edev, 112 "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n", 113 idx, txq->sw_tx_cons, txq->sw_tx_prod); 114 return -1; 115 } 116 117 *len = skb->len; 118 119 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); 120 121 bds_consumed++; 122 123 nbds = first_bd->data.nbds; 124 125 if (data_split) { 126 struct eth_tx_bd *split = (struct eth_tx_bd *) 127 qed_chain_consume(&txq->tx_pbl); 128 split_bd_len = BD_UNMAP_LEN(split); 129 bds_consumed++; 130 } 131 dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), 132 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); 133 134 /* Unmap the data of the skb frags */ 135 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) { 136 tx_data_bd = (struct eth_tx_bd *) 137 qed_chain_consume(&txq->tx_pbl); 138 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), 139 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); 140 } 141 142 while (bds_consumed++ < nbds) 143 qed_chain_consume(&txq->tx_pbl); 144 145 /* Free skb */ 146 dev_kfree_skb_any(skb); 147 txq->sw_tx_ring.skbs[idx].skb = NULL; 148 txq->sw_tx_ring.skbs[idx].flags = 0; 149 150 return 0; 151 } 152 153 /* Unmap the data and free skb when mapping failed during start_xmit */ 154 static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq, 155 struct eth_tx_1st_bd *first_bd, 156 int nbd, bool data_split) 157 { 158 u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; 159 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; 160 struct eth_tx_bd *tx_data_bd; 161 int i, split_bd_len = 0; 162 163 /* Return prod to its position before this skb was handled */ 164 qed_chain_set_prod(&txq->tx_pbl, 165 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); 166 167 first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); 168 169 if (data_split) { 170 struct eth_tx_bd *split = (struct eth_tx_bd *) 171 qed_chain_produce(&txq->tx_pbl); 172 split_bd_len = BD_UNMAP_LEN(split); 173 nbd--; 174 } 175 176 dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd), 177 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); 178 179 /* Unmap the data of the skb frags */ 180 for (i = 0; i < nbd; i++) { 181 tx_data_bd = (struct eth_tx_bd *) 182 qed_chain_produce(&txq->tx_pbl); 183 if (tx_data_bd->nbytes) 184 dma_unmap_page(txq->dev, 185 BD_UNMAP_ADDR(tx_data_bd), 186 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); 187 } 188 189 /* Return again prod to its position before this skb was handled */ 190 qed_chain_set_prod(&txq->tx_pbl, 191 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); 192 193 /* Free skb */ 194 dev_kfree_skb_any(skb); 195 txq->sw_tx_ring.skbs[idx].skb = NULL; 196 txq->sw_tx_ring.skbs[idx].flags = 0; 197 } 198 199 static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext) 200 { 201 u32 rc = XMIT_L4_CSUM; 202 __be16 l3_proto; 203 204 if (skb->ip_summed != CHECKSUM_PARTIAL) 205 return XMIT_PLAIN; 206 207 l3_proto = vlan_get_protocol(skb); 208 if (l3_proto == htons(ETH_P_IPV6) && 209 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) 210 *ipv6_ext = 1; 211 212 if (skb->encapsulation) { 213 rc |= XMIT_ENC; 214 if (skb_is_gso(skb)) { 215 unsigned short gso_type = skb_shinfo(skb)->gso_type; 216 217 if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) || 218 (gso_type & SKB_GSO_GRE_CSUM)) 219 rc |= XMIT_ENC_GSO_L4_CSUM; 220 221 rc |= XMIT_LSO; 222 return rc; 223 } 224 } 225 226 if (skb_is_gso(skb)) 227 rc |= XMIT_LSO; 228 229 return rc; 230 } 231 232 static void qede_set_params_for_ipv6_ext(struct sk_buff *skb, 233 struct eth_tx_2nd_bd *second_bd, 234 struct eth_tx_3rd_bd *third_bd) 235 { 236 u8 l4_proto; 237 u16 bd2_bits1 = 0, bd2_bits2 = 0; 238 239 bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT); 240 241 bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) & 242 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) 243 << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT; 244 245 bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH << 246 ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT); 247 248 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) 249 l4_proto = ipv6_hdr(skb)->nexthdr; 250 else 251 l4_proto = ip_hdr(skb)->protocol; 252 253 if (l4_proto == IPPROTO_UDP) 254 bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT; 255 256 if (third_bd) 257 third_bd->data.bitfields |= 258 cpu_to_le16(((tcp_hdrlen(skb) / 4) & 259 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) << 260 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT); 261 262 second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1); 263 second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2); 264 } 265 266 static int map_frag_to_bd(struct qede_tx_queue *txq, 267 skb_frag_t *frag, struct eth_tx_bd *bd) 268 { 269 dma_addr_t mapping; 270 271 /* Map skb non-linear frag data for DMA */ 272 mapping = skb_frag_dma_map(txq->dev, frag, 0, 273 skb_frag_size(frag), DMA_TO_DEVICE); 274 if (unlikely(dma_mapping_error(txq->dev, mapping))) 275 return -ENOMEM; 276 277 /* Setup the data pointer of the frag data */ 278 BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag)); 279 280 return 0; 281 } 282 283 static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt) 284 { 285 if (is_encap_pkt) 286 return (skb_inner_transport_header(skb) + 287 inner_tcp_hdrlen(skb) - skb->data); 288 else 289 return (skb_transport_header(skb) + 290 tcp_hdrlen(skb) - skb->data); 291 } 292 293 /* +2 for 1st BD for headers and 2nd BD for headlen (if required) */ 294 #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) 295 static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type) 296 { 297 int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1; 298 299 if (xmit_type & XMIT_LSO) { 300 int hlen; 301 302 hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC); 303 304 /* linear payload would require its own BD */ 305 if (skb_headlen(skb) > hlen) 306 allowed_frags--; 307 } 308 309 return (skb_shinfo(skb)->nr_frags > allowed_frags); 310 } 311 #endif 312 313 static inline void qede_update_tx_producer(struct qede_tx_queue *txq) 314 { 315 /* wmb makes sure that the BDs data is updated before updating the 316 * producer, otherwise FW may read old data from the BDs. 317 */ 318 wmb(); 319 barrier(); 320 writel(txq->tx_db.raw, txq->doorbell_addr); 321 322 /* mmiowb is needed to synchronize doorbell writes from more than one 323 * processor. It guarantees that the write arrives to the device before 324 * the queue lock is released and another start_xmit is called (possibly 325 * on another CPU). Without this barrier, the next doorbell can bypass 326 * this doorbell. This is applicable to IA64/Altix systems. 327 */ 328 mmiowb(); 329 } 330 331 static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp, 332 struct sw_rx_data *metadata, u16 padding, u16 length) 333 { 334 struct qede_tx_queue *txq = fp->xdp_tx; 335 u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; 336 struct eth_tx_1st_bd *first_bd; 337 338 if (!qed_chain_get_elem_left(&txq->tx_pbl)) { 339 txq->stopped_cnt++; 340 return -ENOMEM; 341 } 342 343 first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); 344 345 memset(first_bd, 0, sizeof(*first_bd)); 346 first_bd->data.bd_flags.bitfields = 347 BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT); 348 first_bd->data.bitfields |= 349 (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << 350 ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; 351 first_bd->data.nbds = 1; 352 353 /* We can safely ignore the offset, as it's 0 for XDP */ 354 BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length); 355 356 /* Synchronize the buffer back to device, as program [probably] 357 * has changed it. 358 */ 359 dma_sync_single_for_device(&edev->pdev->dev, 360 metadata->mapping + padding, 361 length, PCI_DMA_TODEVICE); 362 363 txq->sw_tx_ring.pages[idx] = metadata->data; 364 txq->sw_tx_prod++; 365 366 /* Mark the fastpath for future XDP doorbell */ 367 fp->xdp_xmit = 1; 368 369 return 0; 370 } 371 372 int qede_txq_has_work(struct qede_tx_queue *txq) 373 { 374 u16 hw_bd_cons; 375 376 /* Tell compiler that consumer and producer can change */ 377 barrier(); 378 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); 379 if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1) 380 return 0; 381 382 return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl); 383 } 384 385 static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) 386 { 387 struct eth_tx_1st_bd *bd; 388 u16 hw_bd_cons; 389 390 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); 391 barrier(); 392 393 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { 394 bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); 395 396 dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(bd), 397 PAGE_SIZE, DMA_BIDIRECTIONAL); 398 __free_page(txq->sw_tx_ring.pages[txq->sw_tx_cons & 399 NUM_TX_BDS_MAX]); 400 401 txq->sw_tx_cons++; 402 txq->xmit_pkts++; 403 } 404 } 405 406 static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) 407 { 408 struct netdev_queue *netdev_txq; 409 u16 hw_bd_cons; 410 unsigned int pkts_compl = 0, bytes_compl = 0; 411 int rc; 412 413 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index); 414 415 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); 416 barrier(); 417 418 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { 419 int len = 0; 420 421 rc = qede_free_tx_pkt(edev, txq, &len); 422 if (rc) { 423 DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n", 424 hw_bd_cons, 425 qed_chain_get_cons_idx(&txq->tx_pbl)); 426 break; 427 } 428 429 bytes_compl += len; 430 pkts_compl++; 431 txq->sw_tx_cons++; 432 txq->xmit_pkts++; 433 } 434 435 netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl); 436 437 /* Need to make the tx_bd_cons update visible to start_xmit() 438 * before checking for netif_tx_queue_stopped(). Without the 439 * memory barrier, there is a small possibility that 440 * start_xmit() will miss it and cause the queue to be stopped 441 * forever. 442 * On the other hand we need an rmb() here to ensure the proper 443 * ordering of bit testing in the following 444 * netif_tx_queue_stopped(txq) call. 445 */ 446 smp_mb(); 447 448 if (unlikely(netif_tx_queue_stopped(netdev_txq))) { 449 /* Taking tx_lock is needed to prevent reenabling the queue 450 * while it's empty. This could have happen if rx_action() gets 451 * suspended in qede_tx_int() after the condition before 452 * netif_tx_wake_queue(), while tx_action (qede_start_xmit()): 453 * 454 * stops the queue->sees fresh tx_bd_cons->releases the queue-> 455 * sends some packets consuming the whole queue again-> 456 * stops the queue 457 */ 458 459 __netif_tx_lock(netdev_txq, smp_processor_id()); 460 461 if ((netif_tx_queue_stopped(netdev_txq)) && 462 (edev->state == QEDE_STATE_OPEN) && 463 (qed_chain_get_elem_left(&txq->tx_pbl) 464 >= (MAX_SKB_FRAGS + 1))) { 465 netif_tx_wake_queue(netdev_txq); 466 DP_VERBOSE(edev, NETIF_MSG_TX_DONE, 467 "Wake queue was called\n"); 468 } 469 470 __netif_tx_unlock(netdev_txq); 471 } 472 473 return 0; 474 } 475 476 bool qede_has_rx_work(struct qede_rx_queue *rxq) 477 { 478 u16 hw_comp_cons, sw_comp_cons; 479 480 /* Tell compiler that status block fields can change */ 481 barrier(); 482 483 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); 484 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); 485 486 return hw_comp_cons != sw_comp_cons; 487 } 488 489 static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq) 490 { 491 qed_chain_consume(&rxq->rx_bd_ring); 492 rxq->sw_rx_cons++; 493 } 494 495 /* This function reuses the buffer(from an offset) from 496 * consumer index to producer index in the bd ring 497 */ 498 static inline void qede_reuse_page(struct qede_rx_queue *rxq, 499 struct sw_rx_data *curr_cons) 500 { 501 struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); 502 struct sw_rx_data *curr_prod; 503 dma_addr_t new_mapping; 504 505 curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; 506 *curr_prod = *curr_cons; 507 508 new_mapping = curr_prod->mapping + curr_prod->page_offset; 509 510 rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping)); 511 rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping)); 512 513 rxq->sw_rx_prod++; 514 curr_cons->data = NULL; 515 } 516 517 /* In case of allocation failures reuse buffers 518 * from consumer index to produce buffers for firmware 519 */ 520 void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count) 521 { 522 struct sw_rx_data *curr_cons; 523 524 for (; count > 0; count--) { 525 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; 526 qede_reuse_page(rxq, curr_cons); 527 qede_rx_bd_ring_consume(rxq); 528 } 529 } 530 531 static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq, 532 struct sw_rx_data *curr_cons) 533 { 534 /* Move to the next segment in the page */ 535 curr_cons->page_offset += rxq->rx_buf_seg_size; 536 537 if (curr_cons->page_offset == PAGE_SIZE) { 538 if (unlikely(qede_alloc_rx_buffer(rxq, true))) { 539 /* Since we failed to allocate new buffer 540 * current buffer can be used again. 541 */ 542 curr_cons->page_offset -= rxq->rx_buf_seg_size; 543 544 return -ENOMEM; 545 } 546 547 dma_unmap_page(rxq->dev, curr_cons->mapping, 548 PAGE_SIZE, rxq->data_direction); 549 } else { 550 /* Increment refcount of the page as we don't want 551 * network stack to take the ownership of the page 552 * which can be recycled multiple times by the driver. 553 */ 554 page_ref_inc(curr_cons->data); 555 qede_reuse_page(rxq, curr_cons); 556 } 557 558 return 0; 559 } 560 561 void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq) 562 { 563 u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring); 564 u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring); 565 struct eth_rx_prod_data rx_prods = {0}; 566 567 /* Update producers */ 568 rx_prods.bd_prod = cpu_to_le16(bd_prod); 569 rx_prods.cqe_prod = cpu_to_le16(cqe_prod); 570 571 /* Make sure that the BD and SGE data is updated before updating the 572 * producers since FW might read the BD/SGE right after the producer 573 * is updated. 574 */ 575 wmb(); 576 577 internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods), 578 (u32 *)&rx_prods); 579 580 /* mmiowb is needed to synchronize doorbell writes from more than one 581 * processor. It guarantees that the write arrives to the device before 582 * the napi lock is released and another qede_poll is called (possibly 583 * on another CPU). Without this barrier, the next doorbell can bypass 584 * this doorbell. This is applicable to IA64/Altix systems. 585 */ 586 mmiowb(); 587 } 588 589 static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash) 590 { 591 enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE; 592 enum rss_hash_type htype; 593 u32 hash = 0; 594 595 htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE); 596 if (htype) { 597 hash_type = ((htype == RSS_HASH_TYPE_IPV4) || 598 (htype == RSS_HASH_TYPE_IPV6)) ? 599 PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4; 600 hash = le32_to_cpu(rss_hash); 601 } 602 skb_set_hash(skb, hash, hash_type); 603 } 604 605 static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag) 606 { 607 skb_checksum_none_assert(skb); 608 609 if (csum_flag & QEDE_CSUM_UNNECESSARY) 610 skb->ip_summed = CHECKSUM_UNNECESSARY; 611 612 if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY) { 613 skb->csum_level = 1; 614 skb->encapsulation = 1; 615 } 616 } 617 618 static inline void qede_skb_receive(struct qede_dev *edev, 619 struct qede_fastpath *fp, 620 struct qede_rx_queue *rxq, 621 struct sk_buff *skb, u16 vlan_tag) 622 { 623 if (vlan_tag) 624 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); 625 626 napi_gro_receive(&fp->napi, skb); 627 rxq->rcv_pkts++; 628 } 629 630 static void qede_set_gro_params(struct qede_dev *edev, 631 struct sk_buff *skb, 632 struct eth_fast_path_rx_tpa_start_cqe *cqe) 633 { 634 u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags); 635 636 if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) & 637 PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2) 638 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 639 else 640 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 641 642 skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) - 643 cqe->header_len; 644 } 645 646 static int qede_fill_frag_skb(struct qede_dev *edev, 647 struct qede_rx_queue *rxq, 648 u8 tpa_agg_index, u16 len_on_bd) 649 { 650 struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons & 651 NUM_RX_BDS_MAX]; 652 struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index]; 653 struct sk_buff *skb = tpa_info->skb; 654 655 if (unlikely(tpa_info->state != QEDE_AGG_STATE_START)) 656 goto out; 657 658 /* Add one frag and update the appropriate fields in the skb */ 659 skb_fill_page_desc(skb, tpa_info->frag_id++, 660 current_bd->data, current_bd->page_offset, 661 len_on_bd); 662 663 if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) { 664 /* Incr page ref count to reuse on allocation failure 665 * so that it doesn't get freed while freeing SKB. 666 */ 667 page_ref_inc(current_bd->data); 668 goto out; 669 } 670 671 qed_chain_consume(&rxq->rx_bd_ring); 672 rxq->sw_rx_cons++; 673 674 skb->data_len += len_on_bd; 675 skb->truesize += rxq->rx_buf_seg_size; 676 skb->len += len_on_bd; 677 678 return 0; 679 680 out: 681 tpa_info->state = QEDE_AGG_STATE_ERROR; 682 qede_recycle_rx_bd_ring(rxq, 1); 683 684 return -ENOMEM; 685 } 686 687 static bool qede_tunn_exist(u16 flag) 688 { 689 return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK << 690 PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT)); 691 } 692 693 static u8 qede_check_tunn_csum(u16 flag) 694 { 695 u16 csum_flag = 0; 696 u8 tcsum = 0; 697 698 if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK << 699 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT)) 700 csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK << 701 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT; 702 703 if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << 704 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) { 705 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << 706 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT; 707 tcsum = QEDE_TUNN_CSUM_UNNECESSARY; 708 } 709 710 csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK << 711 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT | 712 PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << 713 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT; 714 715 if (csum_flag & flag) 716 return QEDE_CSUM_ERROR; 717 718 return QEDE_CSUM_UNNECESSARY | tcsum; 719 } 720 721 static void qede_tpa_start(struct qede_dev *edev, 722 struct qede_rx_queue *rxq, 723 struct eth_fast_path_rx_tpa_start_cqe *cqe) 724 { 725 struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; 726 struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring); 727 struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); 728 struct sw_rx_data *replace_buf = &tpa_info->buffer; 729 dma_addr_t mapping = tpa_info->buffer_mapping; 730 struct sw_rx_data *sw_rx_data_cons; 731 struct sw_rx_data *sw_rx_data_prod; 732 733 sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; 734 sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; 735 736 /* Use pre-allocated replacement buffer - we can't release the agg. 737 * start until its over and we don't want to risk allocation failing 738 * here, so re-allocate when aggregation will be over. 739 */ 740 sw_rx_data_prod->mapping = replace_buf->mapping; 741 742 sw_rx_data_prod->data = replace_buf->data; 743 rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping)); 744 rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping)); 745 sw_rx_data_prod->page_offset = replace_buf->page_offset; 746 747 rxq->sw_rx_prod++; 748 749 /* move partial skb from cons to pool (don't unmap yet) 750 * save mapping, incase we drop the packet later on. 751 */ 752 tpa_info->buffer = *sw_rx_data_cons; 753 mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi), 754 le32_to_cpu(rx_bd_cons->addr.lo)); 755 756 tpa_info->buffer_mapping = mapping; 757 rxq->sw_rx_cons++; 758 759 /* set tpa state to start only if we are able to allocate skb 760 * for this aggregation, otherwise mark as error and aggregation will 761 * be dropped 762 */ 763 tpa_info->skb = netdev_alloc_skb(edev->ndev, 764 le16_to_cpu(cqe->len_on_first_bd)); 765 if (unlikely(!tpa_info->skb)) { 766 DP_NOTICE(edev, "Failed to allocate SKB for gro\n"); 767 tpa_info->state = QEDE_AGG_STATE_ERROR; 768 goto cons_buf; 769 } 770 771 /* Start filling in the aggregation info */ 772 skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd)); 773 tpa_info->frag_id = 0; 774 tpa_info->state = QEDE_AGG_STATE_START; 775 776 /* Store some information from first CQE */ 777 tpa_info->start_cqe_placement_offset = cqe->placement_offset; 778 tpa_info->start_cqe_bd_len = le16_to_cpu(cqe->len_on_first_bd); 779 if ((le16_to_cpu(cqe->pars_flags.flags) >> 780 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) & 781 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK) 782 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); 783 else 784 tpa_info->vlan_tag = 0; 785 786 qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash); 787 788 /* This is needed in order to enable forwarding support */ 789 qede_set_gro_params(edev, tpa_info->skb, cqe); 790 791 cons_buf: /* We still need to handle bd_len_list to consume buffers */ 792 if (likely(cqe->ext_bd_len_list[0])) 793 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, 794 le16_to_cpu(cqe->ext_bd_len_list[0])); 795 796 if (unlikely(cqe->ext_bd_len_list[1])) { 797 DP_ERR(edev, 798 "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n"); 799 tpa_info->state = QEDE_AGG_STATE_ERROR; 800 } 801 } 802 803 #ifdef CONFIG_INET 804 static void qede_gro_ip_csum(struct sk_buff *skb) 805 { 806 const struct iphdr *iph = ip_hdr(skb); 807 struct tcphdr *th; 808 809 skb_set_transport_header(skb, sizeof(struct iphdr)); 810 th = tcp_hdr(skb); 811 812 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb), 813 iph->saddr, iph->daddr, 0); 814 815 tcp_gro_complete(skb); 816 } 817 818 static void qede_gro_ipv6_csum(struct sk_buff *skb) 819 { 820 struct ipv6hdr *iph = ipv6_hdr(skb); 821 struct tcphdr *th; 822 823 skb_set_transport_header(skb, sizeof(struct ipv6hdr)); 824 th = tcp_hdr(skb); 825 826 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), 827 &iph->saddr, &iph->daddr, 0); 828 tcp_gro_complete(skb); 829 } 830 #endif 831 832 static void qede_gro_receive(struct qede_dev *edev, 833 struct qede_fastpath *fp, 834 struct sk_buff *skb, 835 u16 vlan_tag) 836 { 837 /* FW can send a single MTU sized packet from gro flow 838 * due to aggregation timeout/last segment etc. which 839 * is not expected to be a gro packet. If a skb has zero 840 * frags then simply push it in the stack as non gso skb. 841 */ 842 if (unlikely(!skb->data_len)) { 843 skb_shinfo(skb)->gso_type = 0; 844 skb_shinfo(skb)->gso_size = 0; 845 goto send_skb; 846 } 847 848 #ifdef CONFIG_INET 849 if (skb_shinfo(skb)->gso_size) { 850 skb_reset_network_header(skb); 851 852 switch (skb->protocol) { 853 case htons(ETH_P_IP): 854 qede_gro_ip_csum(skb); 855 break; 856 case htons(ETH_P_IPV6): 857 qede_gro_ipv6_csum(skb); 858 break; 859 default: 860 DP_ERR(edev, 861 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n", 862 ntohs(skb->protocol)); 863 } 864 } 865 #endif 866 867 send_skb: 868 skb_record_rx_queue(skb, fp->rxq->rxq_id); 869 qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag); 870 } 871 872 static inline void qede_tpa_cont(struct qede_dev *edev, 873 struct qede_rx_queue *rxq, 874 struct eth_fast_path_rx_tpa_cont_cqe *cqe) 875 { 876 int i; 877 878 for (i = 0; cqe->len_list[i]; i++) 879 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, 880 le16_to_cpu(cqe->len_list[i])); 881 882 if (unlikely(i > 1)) 883 DP_ERR(edev, 884 "Strange - TPA cont with more than a single len_list entry\n"); 885 } 886 887 static void qede_tpa_end(struct qede_dev *edev, 888 struct qede_fastpath *fp, 889 struct eth_fast_path_rx_tpa_end_cqe *cqe) 890 { 891 struct qede_rx_queue *rxq = fp->rxq; 892 struct qede_agg_info *tpa_info; 893 struct sk_buff *skb; 894 int i; 895 896 tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; 897 skb = tpa_info->skb; 898 899 for (i = 0; cqe->len_list[i]; i++) 900 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, 901 le16_to_cpu(cqe->len_list[i])); 902 if (unlikely(i > 1)) 903 DP_ERR(edev, 904 "Strange - TPA emd with more than a single len_list entry\n"); 905 906 if (unlikely(tpa_info->state != QEDE_AGG_STATE_START)) 907 goto err; 908 909 /* Sanity */ 910 if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1)) 911 DP_ERR(edev, 912 "Strange - TPA had %02x BDs, but SKB has only %d frags\n", 913 cqe->num_of_bds, tpa_info->frag_id); 914 if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len))) 915 DP_ERR(edev, 916 "Strange - total packet len [cqe] is %4x but SKB has len %04x\n", 917 le16_to_cpu(cqe->total_packet_len), skb->len); 918 919 memcpy(skb->data, 920 page_address(tpa_info->buffer.data) + 921 tpa_info->start_cqe_placement_offset + 922 tpa_info->buffer.page_offset, tpa_info->start_cqe_bd_len); 923 924 /* Finalize the SKB */ 925 skb->protocol = eth_type_trans(skb, edev->ndev); 926 skb->ip_summed = CHECKSUM_UNNECESSARY; 927 928 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count 929 * to skb_shinfo(skb)->gso_segs 930 */ 931 NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs); 932 933 qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag); 934 935 tpa_info->state = QEDE_AGG_STATE_NONE; 936 937 return; 938 err: 939 tpa_info->state = QEDE_AGG_STATE_NONE; 940 dev_kfree_skb_any(tpa_info->skb); 941 tpa_info->skb = NULL; 942 } 943 944 static u8 qede_check_notunn_csum(u16 flag) 945 { 946 u16 csum_flag = 0; 947 u8 csum = 0; 948 949 if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << 950 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) { 951 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << 952 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT; 953 csum = QEDE_CSUM_UNNECESSARY; 954 } 955 956 csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << 957 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT; 958 959 if (csum_flag & flag) 960 return QEDE_CSUM_ERROR; 961 962 return csum; 963 } 964 965 static u8 qede_check_csum(u16 flag) 966 { 967 if (!qede_tunn_exist(flag)) 968 return qede_check_notunn_csum(flag); 969 else 970 return qede_check_tunn_csum(flag); 971 } 972 973 static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe, 974 u16 flag) 975 { 976 u8 tun_pars_flg = cqe->tunnel_pars_flags.flags; 977 978 if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK << 979 ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) || 980 (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK << 981 PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT))) 982 return true; 983 984 return false; 985 } 986 987 /* Return true iff packet is to be passed to stack */ 988 static bool qede_rx_xdp(struct qede_dev *edev, 989 struct qede_fastpath *fp, 990 struct qede_rx_queue *rxq, 991 struct bpf_prog *prog, 992 struct sw_rx_data *bd, 993 struct eth_fast_path_rx_reg_cqe *cqe) 994 { 995 u16 len = le16_to_cpu(cqe->len_on_first_bd); 996 struct xdp_buff xdp; 997 enum xdp_action act; 998 999 xdp.data = page_address(bd->data) + cqe->placement_offset; 1000 xdp.data_end = xdp.data + len; 1001 1002 /* Queues always have a full reset currently, so for the time 1003 * being until there's atomic program replace just mark read 1004 * side for map helpers. 1005 */ 1006 rcu_read_lock(); 1007 act = bpf_prog_run_xdp(prog, &xdp); 1008 rcu_read_unlock(); 1009 1010 if (act == XDP_PASS) 1011 return true; 1012 1013 /* Count number of packets not to be passed to stack */ 1014 rxq->xdp_no_pass++; 1015 1016 switch (act) { 1017 case XDP_TX: 1018 /* We need the replacement buffer before transmit. */ 1019 if (qede_alloc_rx_buffer(rxq, true)) { 1020 qede_recycle_rx_bd_ring(rxq, 1); 1021 trace_xdp_exception(edev->ndev, prog, act); 1022 return false; 1023 } 1024 1025 /* Now if there's a transmission problem, we'd still have to 1026 * throw current buffer, as replacement was already allocated. 1027 */ 1028 if (qede_xdp_xmit(edev, fp, bd, cqe->placement_offset, len)) { 1029 dma_unmap_page(rxq->dev, bd->mapping, 1030 PAGE_SIZE, DMA_BIDIRECTIONAL); 1031 __free_page(bd->data); 1032 trace_xdp_exception(edev->ndev, prog, act); 1033 } 1034 1035 /* Regardless, we've consumed an Rx BD */ 1036 qede_rx_bd_ring_consume(rxq); 1037 return false; 1038 1039 default: 1040 bpf_warn_invalid_xdp_action(act); 1041 case XDP_ABORTED: 1042 trace_xdp_exception(edev->ndev, prog, act); 1043 case XDP_DROP: 1044 qede_recycle_rx_bd_ring(rxq, cqe->bd_num); 1045 } 1046 1047 return false; 1048 } 1049 1050 static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev, 1051 struct qede_rx_queue *rxq, 1052 struct sw_rx_data *bd, u16 len, 1053 u16 pad) 1054 { 1055 unsigned int offset = bd->page_offset; 1056 struct skb_frag_struct *frag; 1057 struct page *page = bd->data; 1058 unsigned int pull_len; 1059 struct sk_buff *skb; 1060 unsigned char *va; 1061 1062 /* Allocate a new SKB with a sufficient large header len */ 1063 skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); 1064 if (unlikely(!skb)) 1065 return NULL; 1066 1067 /* Copy data into SKB - if it's small, we can simply copy it and 1068 * re-use the already allcoated & mapped memory. 1069 */ 1070 if (len + pad <= edev->rx_copybreak) { 1071 memcpy(skb_put(skb, len), 1072 page_address(page) + pad + offset, len); 1073 qede_reuse_page(rxq, bd); 1074 goto out; 1075 } 1076 1077 frag = &skb_shinfo(skb)->frags[0]; 1078 1079 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 1080 page, pad + offset, len, rxq->rx_buf_seg_size); 1081 1082 va = skb_frag_address(frag); 1083 pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE); 1084 1085 /* Align the pull_len to optimize memcpy */ 1086 memcpy(skb->data, va, ALIGN(pull_len, sizeof(long))); 1087 1088 /* Correct the skb & frag sizes offset after the pull */ 1089 skb_frag_size_sub(frag, pull_len); 1090 frag->page_offset += pull_len; 1091 skb->data_len -= pull_len; 1092 skb->tail += pull_len; 1093 1094 if (unlikely(qede_realloc_rx_buffer(rxq, bd))) { 1095 /* Incr page ref count to reuse on allocation failure so 1096 * that it doesn't get freed while freeing SKB [as its 1097 * already mapped there]. 1098 */ 1099 page_ref_inc(page); 1100 dev_kfree_skb_any(skb); 1101 return NULL; 1102 } 1103 1104 out: 1105 /* We've consumed the first BD and prepared an SKB */ 1106 qede_rx_bd_ring_consume(rxq); 1107 return skb; 1108 } 1109 1110 static int qede_rx_build_jumbo(struct qede_dev *edev, 1111 struct qede_rx_queue *rxq, 1112 struct sk_buff *skb, 1113 struct eth_fast_path_rx_reg_cqe *cqe, 1114 u16 first_bd_len) 1115 { 1116 u16 pkt_len = le16_to_cpu(cqe->pkt_len); 1117 struct sw_rx_data *bd; 1118 u16 bd_cons_idx; 1119 u8 num_frags; 1120 1121 pkt_len -= first_bd_len; 1122 1123 /* We've already used one BD for the SKB. Now take care of the rest */ 1124 for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) { 1125 u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size : 1126 pkt_len; 1127 1128 if (unlikely(!cur_size)) { 1129 DP_ERR(edev, 1130 "Still got %d BDs for mapping jumbo, but length became 0\n", 1131 num_frags); 1132 goto out; 1133 } 1134 1135 /* We need a replacement buffer for each BD */ 1136 if (unlikely(qede_alloc_rx_buffer(rxq, true))) 1137 goto out; 1138 1139 /* Now that we've allocated the replacement buffer, 1140 * we can safely consume the next BD and map it to the SKB. 1141 */ 1142 bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX; 1143 bd = &rxq->sw_rx_ring[bd_cons_idx]; 1144 qede_rx_bd_ring_consume(rxq); 1145 1146 dma_unmap_page(rxq->dev, bd->mapping, 1147 PAGE_SIZE, DMA_FROM_DEVICE); 1148 1149 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++, 1150 bd->data, 0, cur_size); 1151 1152 skb->truesize += PAGE_SIZE; 1153 skb->data_len += cur_size; 1154 skb->len += cur_size; 1155 pkt_len -= cur_size; 1156 } 1157 1158 if (unlikely(pkt_len)) 1159 DP_ERR(edev, 1160 "Mapped all BDs of jumbo, but still have %d bytes\n", 1161 pkt_len); 1162 1163 out: 1164 return num_frags; 1165 } 1166 1167 static int qede_rx_process_tpa_cqe(struct qede_dev *edev, 1168 struct qede_fastpath *fp, 1169 struct qede_rx_queue *rxq, 1170 union eth_rx_cqe *cqe, 1171 enum eth_rx_cqe_type type) 1172 { 1173 switch (type) { 1174 case ETH_RX_CQE_TYPE_TPA_START: 1175 qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start); 1176 return 0; 1177 case ETH_RX_CQE_TYPE_TPA_CONT: 1178 qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont); 1179 return 0; 1180 case ETH_RX_CQE_TYPE_TPA_END: 1181 qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end); 1182 return 1; 1183 default: 1184 return 0; 1185 } 1186 } 1187 1188 static int qede_rx_process_cqe(struct qede_dev *edev, 1189 struct qede_fastpath *fp, 1190 struct qede_rx_queue *rxq) 1191 { 1192 struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog); 1193 struct eth_fast_path_rx_reg_cqe *fp_cqe; 1194 u16 len, pad, bd_cons_idx, parse_flag; 1195 enum eth_rx_cqe_type cqe_type; 1196 union eth_rx_cqe *cqe; 1197 struct sw_rx_data *bd; 1198 struct sk_buff *skb; 1199 __le16 flags; 1200 u8 csum_flag; 1201 1202 /* Get the CQE from the completion ring */ 1203 cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring); 1204 cqe_type = cqe->fast_path_regular.type; 1205 1206 /* Process an unlikely slowpath event */ 1207 if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) { 1208 struct eth_slow_path_rx_cqe *sp_cqe; 1209 1210 sp_cqe = (struct eth_slow_path_rx_cqe *)cqe; 1211 edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe); 1212 return 0; 1213 } 1214 1215 /* Handle TPA cqes */ 1216 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) 1217 return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type); 1218 1219 /* Get the data from the SW ring; Consume it only after it's evident 1220 * we wouldn't recycle it. 1221 */ 1222 bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX; 1223 bd = &rxq->sw_rx_ring[bd_cons_idx]; 1224 1225 fp_cqe = &cqe->fast_path_regular; 1226 len = le16_to_cpu(fp_cqe->len_on_first_bd); 1227 pad = fp_cqe->placement_offset; 1228 1229 /* Run eBPF program if one is attached */ 1230 if (xdp_prog) 1231 if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe)) 1232 return 1; 1233 1234 /* If this is an error packet then drop it */ 1235 flags = cqe->fast_path_regular.pars_flags.flags; 1236 parse_flag = le16_to_cpu(flags); 1237 1238 csum_flag = qede_check_csum(parse_flag); 1239 if (unlikely(csum_flag == QEDE_CSUM_ERROR)) { 1240 if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) { 1241 rxq->rx_ip_frags++; 1242 } else { 1243 DP_NOTICE(edev, 1244 "CQE has error, flags = %x, dropping incoming packet\n", 1245 parse_flag); 1246 rxq->rx_hw_errors++; 1247 qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num); 1248 return 0; 1249 } 1250 } 1251 1252 /* Basic validation passed; Need to prepare an SKB. This would also 1253 * guarantee to finally consume the first BD upon success. 1254 */ 1255 skb = qede_rx_allocate_skb(edev, rxq, bd, len, pad); 1256 if (!skb) { 1257 rxq->rx_alloc_errors++; 1258 qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num); 1259 return 0; 1260 } 1261 1262 /* In case of Jumbo packet, several PAGE_SIZEd buffers will be pointed 1263 * by a single cqe. 1264 */ 1265 if (fp_cqe->bd_num > 1) { 1266 u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb, 1267 fp_cqe, len); 1268 1269 if (unlikely(unmapped_frags > 0)) { 1270 qede_recycle_rx_bd_ring(rxq, unmapped_frags); 1271 dev_kfree_skb_any(skb); 1272 return 0; 1273 } 1274 } 1275 1276 /* The SKB contains all the data. Now prepare meta-magic */ 1277 skb->protocol = eth_type_trans(skb, edev->ndev); 1278 qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash); 1279 qede_set_skb_csum(skb, csum_flag); 1280 skb_record_rx_queue(skb, rxq->rxq_id); 1281 qede_ptp_record_rx_ts(edev, cqe, skb); 1282 1283 /* SKB is prepared - pass it to stack */ 1284 qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag)); 1285 1286 return 1; 1287 } 1288 1289 static int qede_rx_int(struct qede_fastpath *fp, int budget) 1290 { 1291 struct qede_rx_queue *rxq = fp->rxq; 1292 struct qede_dev *edev = fp->edev; 1293 u16 hw_comp_cons, sw_comp_cons; 1294 int work_done = 0; 1295 1296 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); 1297 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); 1298 1299 /* Memory barrier to prevent the CPU from doing speculative reads of CQE 1300 * / BD in the while-loop before reading hw_comp_cons. If the CQE is 1301 * read before it is written by FW, then FW writes CQE and SB, and then 1302 * the CPU reads the hw_comp_cons, it will use an old CQE. 1303 */ 1304 rmb(); 1305 1306 /* Loop to complete all indicated BDs */ 1307 while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) { 1308 qede_rx_process_cqe(edev, fp, rxq); 1309 qed_chain_recycle_consumed(&rxq->rx_comp_ring); 1310 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); 1311 work_done++; 1312 } 1313 1314 /* Allocate replacement buffers */ 1315 while (rxq->num_rx_buffers - rxq->filled_buffers) 1316 if (qede_alloc_rx_buffer(rxq, false)) 1317 break; 1318 1319 /* Update producers */ 1320 qede_update_rx_prod(edev, rxq); 1321 1322 return work_done; 1323 } 1324 1325 static bool qede_poll_is_more_work(struct qede_fastpath *fp) 1326 { 1327 qed_sb_update_sb_idx(fp->sb_info); 1328 1329 /* *_has_*_work() reads the status block, thus we need to ensure that 1330 * status block indices have been actually read (qed_sb_update_sb_idx) 1331 * prior to this check (*_has_*_work) so that we won't write the 1332 * "newer" value of the status block to HW (if there was a DMA right 1333 * after qede_has_rx_work and if there is no rmb, the memory reading 1334 * (qed_sb_update_sb_idx) may be postponed to right before *_ack_sb). 1335 * In this case there will never be another interrupt until there is 1336 * another update of the status block, while there is still unhandled 1337 * work. 1338 */ 1339 rmb(); 1340 1341 if (likely(fp->type & QEDE_FASTPATH_RX)) 1342 if (qede_has_rx_work(fp->rxq)) 1343 return true; 1344 1345 if (fp->type & QEDE_FASTPATH_XDP) 1346 if (qede_txq_has_work(fp->xdp_tx)) 1347 return true; 1348 1349 if (likely(fp->type & QEDE_FASTPATH_TX)) 1350 if (qede_txq_has_work(fp->txq)) 1351 return true; 1352 1353 return false; 1354 } 1355 1356 /********************* 1357 * NDO & API related * 1358 *********************/ 1359 int qede_poll(struct napi_struct *napi, int budget) 1360 { 1361 struct qede_fastpath *fp = container_of(napi, struct qede_fastpath, 1362 napi); 1363 struct qede_dev *edev = fp->edev; 1364 int rx_work_done = 0; 1365 1366 if (likely(fp->type & QEDE_FASTPATH_TX) && qede_txq_has_work(fp->txq)) 1367 qede_tx_int(edev, fp->txq); 1368 1369 if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx)) 1370 qede_xdp_tx_int(edev, fp->xdp_tx); 1371 1372 rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) && 1373 qede_has_rx_work(fp->rxq)) ? 1374 qede_rx_int(fp, budget) : 0; 1375 if (rx_work_done < budget) { 1376 if (!qede_poll_is_more_work(fp)) { 1377 napi_complete_done(napi, rx_work_done); 1378 1379 /* Update and reenable interrupts */ 1380 qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); 1381 } else { 1382 rx_work_done = budget; 1383 } 1384 } 1385 1386 if (fp->xdp_xmit) { 1387 u16 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl); 1388 1389 fp->xdp_xmit = 0; 1390 fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod); 1391 qede_update_tx_producer(fp->xdp_tx); 1392 } 1393 1394 return rx_work_done; 1395 } 1396 1397 irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie) 1398 { 1399 struct qede_fastpath *fp = fp_cookie; 1400 1401 qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/); 1402 1403 napi_schedule_irqoff(&fp->napi); 1404 return IRQ_HANDLED; 1405 } 1406 1407 /* Main transmit function */ 1408 netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) 1409 { 1410 struct qede_dev *edev = netdev_priv(ndev); 1411 struct netdev_queue *netdev_txq; 1412 struct qede_tx_queue *txq; 1413 struct eth_tx_1st_bd *first_bd; 1414 struct eth_tx_2nd_bd *second_bd = NULL; 1415 struct eth_tx_3rd_bd *third_bd = NULL; 1416 struct eth_tx_bd *tx_data_bd = NULL; 1417 u16 txq_index; 1418 u8 nbd = 0; 1419 dma_addr_t mapping; 1420 int rc, frag_idx = 0, ipv6_ext = 0; 1421 u8 xmit_type; 1422 u16 idx; 1423 u16 hlen; 1424 bool data_split = false; 1425 1426 /* Get tx-queue context and netdev index */ 1427 txq_index = skb_get_queue_mapping(skb); 1428 WARN_ON(txq_index >= QEDE_TSS_COUNT(edev)); 1429 txq = edev->fp_array[edev->fp_num_rx + txq_index].txq; 1430 netdev_txq = netdev_get_tx_queue(ndev, txq_index); 1431 1432 WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1)); 1433 1434 xmit_type = qede_xmit_type(skb, &ipv6_ext); 1435 1436 #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) 1437 if (qede_pkt_req_lin(skb, xmit_type)) { 1438 if (skb_linearize(skb)) { 1439 DP_NOTICE(edev, 1440 "SKB linearization failed - silently dropping this SKB\n"); 1441 dev_kfree_skb_any(skb); 1442 return NETDEV_TX_OK; 1443 } 1444 } 1445 #endif 1446 1447 /* Fill the entry in the SW ring and the BDs in the FW ring */ 1448 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; 1449 txq->sw_tx_ring.skbs[idx].skb = skb; 1450 first_bd = (struct eth_tx_1st_bd *) 1451 qed_chain_produce(&txq->tx_pbl); 1452 memset(first_bd, 0, sizeof(*first_bd)); 1453 first_bd->data.bd_flags.bitfields = 1454 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; 1455 1456 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) 1457 qede_ptp_tx_ts(edev, skb); 1458 1459 /* Map skb linear data for DMA and set in the first BD */ 1460 mapping = dma_map_single(txq->dev, skb->data, 1461 skb_headlen(skb), DMA_TO_DEVICE); 1462 if (unlikely(dma_mapping_error(txq->dev, mapping))) { 1463 DP_NOTICE(edev, "SKB mapping failed\n"); 1464 qede_free_failed_tx_pkt(txq, first_bd, 0, false); 1465 qede_update_tx_producer(txq); 1466 return NETDEV_TX_OK; 1467 } 1468 nbd++; 1469 BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb)); 1470 1471 /* In case there is IPv6 with extension headers or LSO we need 2nd and 1472 * 3rd BDs. 1473 */ 1474 if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) { 1475 second_bd = (struct eth_tx_2nd_bd *) 1476 qed_chain_produce(&txq->tx_pbl); 1477 memset(second_bd, 0, sizeof(*second_bd)); 1478 1479 nbd++; 1480 third_bd = (struct eth_tx_3rd_bd *) 1481 qed_chain_produce(&txq->tx_pbl); 1482 memset(third_bd, 0, sizeof(*third_bd)); 1483 1484 nbd++; 1485 /* We need to fill in additional data in second_bd... */ 1486 tx_data_bd = (struct eth_tx_bd *)second_bd; 1487 } 1488 1489 if (skb_vlan_tag_present(skb)) { 1490 first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb)); 1491 first_bd->data.bd_flags.bitfields |= 1492 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT; 1493 } 1494 1495 /* Fill the parsing flags & params according to the requested offload */ 1496 if (xmit_type & XMIT_L4_CSUM) { 1497 /* We don't re-calculate IP checksum as it is already done by 1498 * the upper stack 1499 */ 1500 first_bd->data.bd_flags.bitfields |= 1501 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT; 1502 1503 if (xmit_type & XMIT_ENC) { 1504 first_bd->data.bd_flags.bitfields |= 1505 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; 1506 first_bd->data.bitfields |= 1507 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; 1508 } 1509 1510 /* Legacy FW had flipped behavior in regard to this bit - 1511 * I.e., needed to set to prevent FW from touching encapsulated 1512 * packets when it didn't need to. 1513 */ 1514 if (unlikely(txq->is_legacy)) 1515 first_bd->data.bitfields ^= 1516 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; 1517 1518 /* If the packet is IPv6 with extension header, indicate that 1519 * to FW and pass few params, since the device cracker doesn't 1520 * support parsing IPv6 with extension header/s. 1521 */ 1522 if (unlikely(ipv6_ext)) 1523 qede_set_params_for_ipv6_ext(skb, second_bd, third_bd); 1524 } 1525 1526 if (xmit_type & XMIT_LSO) { 1527 first_bd->data.bd_flags.bitfields |= 1528 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT); 1529 third_bd->data.lso_mss = 1530 cpu_to_le16(skb_shinfo(skb)->gso_size); 1531 1532 if (unlikely(xmit_type & XMIT_ENC)) { 1533 first_bd->data.bd_flags.bitfields |= 1534 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT; 1535 1536 if (xmit_type & XMIT_ENC_GSO_L4_CSUM) { 1537 u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT; 1538 1539 first_bd->data.bd_flags.bitfields |= 1 << tmp; 1540 } 1541 hlen = qede_get_skb_hlen(skb, true); 1542 } else { 1543 first_bd->data.bd_flags.bitfields |= 1544 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; 1545 hlen = qede_get_skb_hlen(skb, false); 1546 } 1547 1548 /* @@@TBD - if will not be removed need to check */ 1549 third_bd->data.bitfields |= 1550 cpu_to_le16(1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 1551 1552 /* Make life easier for FW guys who can't deal with header and 1553 * data on same BD. If we need to split, use the second bd... 1554 */ 1555 if (unlikely(skb_headlen(skb) > hlen)) { 1556 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, 1557 "TSO split header size is %d (%x:%x)\n", 1558 first_bd->nbytes, first_bd->addr.hi, 1559 first_bd->addr.lo); 1560 1561 mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi), 1562 le32_to_cpu(first_bd->addr.lo)) + 1563 hlen; 1564 1565 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping, 1566 le16_to_cpu(first_bd->nbytes) - 1567 hlen); 1568 1569 /* this marks the BD as one that has no 1570 * individual mapping 1571 */ 1572 txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD; 1573 1574 first_bd->nbytes = cpu_to_le16(hlen); 1575 1576 tx_data_bd = (struct eth_tx_bd *)third_bd; 1577 data_split = true; 1578 } 1579 } else { 1580 first_bd->data.bitfields |= 1581 (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << 1582 ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; 1583 } 1584 1585 /* Handle fragmented skb */ 1586 /* special handle for frags inside 2nd and 3rd bds.. */ 1587 while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) { 1588 rc = map_frag_to_bd(txq, 1589 &skb_shinfo(skb)->frags[frag_idx], 1590 tx_data_bd); 1591 if (rc) { 1592 qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split); 1593 qede_update_tx_producer(txq); 1594 return NETDEV_TX_OK; 1595 } 1596 1597 if (tx_data_bd == (struct eth_tx_bd *)second_bd) 1598 tx_data_bd = (struct eth_tx_bd *)third_bd; 1599 else 1600 tx_data_bd = NULL; 1601 1602 frag_idx++; 1603 } 1604 1605 /* map last frags into 4th, 5th .... */ 1606 for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) { 1607 tx_data_bd = (struct eth_tx_bd *) 1608 qed_chain_produce(&txq->tx_pbl); 1609 1610 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 1611 1612 rc = map_frag_to_bd(txq, 1613 &skb_shinfo(skb)->frags[frag_idx], 1614 tx_data_bd); 1615 if (rc) { 1616 qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split); 1617 qede_update_tx_producer(txq); 1618 return NETDEV_TX_OK; 1619 } 1620 } 1621 1622 /* update the first BD with the actual num BDs */ 1623 first_bd->data.nbds = nbd; 1624 1625 netdev_tx_sent_queue(netdev_txq, skb->len); 1626 1627 skb_tx_timestamp(skb); 1628 1629 /* Advance packet producer only before sending the packet since mapping 1630 * of pages may fail. 1631 */ 1632 txq->sw_tx_prod++; 1633 1634 /* 'next page' entries are counted in the producer value */ 1635 txq->tx_db.data.bd_prod = 1636 cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); 1637 1638 if (!skb->xmit_more || netif_xmit_stopped(netdev_txq)) 1639 qede_update_tx_producer(txq); 1640 1641 if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) 1642 < (MAX_SKB_FRAGS + 1))) { 1643 if (skb->xmit_more) 1644 qede_update_tx_producer(txq); 1645 1646 netif_tx_stop_queue(netdev_txq); 1647 txq->stopped_cnt++; 1648 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, 1649 "Stop queue was called\n"); 1650 /* paired memory barrier is in qede_tx_int(), we have to keep 1651 * ordering of set_bit() in netif_tx_stop_queue() and read of 1652 * fp->bd_tx_cons 1653 */ 1654 smp_mb(); 1655 1656 if ((qed_chain_get_elem_left(&txq->tx_pbl) >= 1657 (MAX_SKB_FRAGS + 1)) && 1658 (edev->state == QEDE_STATE_OPEN)) { 1659 netif_tx_wake_queue(netdev_txq); 1660 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, 1661 "Wake queue was called\n"); 1662 } 1663 } 1664 1665 return NETDEV_TX_OK; 1666 } 1667 1668 /* 8B udp header + 8B base tunnel header + 32B option length */ 1669 #define QEDE_MAX_TUN_HDR_LEN 48 1670 1671 netdev_features_t qede_features_check(struct sk_buff *skb, 1672 struct net_device *dev, 1673 netdev_features_t features) 1674 { 1675 if (skb->encapsulation) { 1676 u8 l4_proto = 0; 1677 1678 switch (vlan_get_protocol(skb)) { 1679 case htons(ETH_P_IP): 1680 l4_proto = ip_hdr(skb)->protocol; 1681 break; 1682 case htons(ETH_P_IPV6): 1683 l4_proto = ipv6_hdr(skb)->nexthdr; 1684 break; 1685 default: 1686 return features; 1687 } 1688 1689 /* Disable offloads for geneve tunnels, as HW can't parse 1690 * the geneve header which has option length greater than 32B. 1691 */ 1692 if ((l4_proto == IPPROTO_UDP) && 1693 ((skb_inner_mac_header(skb) - 1694 skb_transport_header(skb)) > QEDE_MAX_TUN_HDR_LEN)) 1695 return features & ~(NETIF_F_CSUM_MASK | 1696 NETIF_F_GSO_MASK); 1697 } 1698 1699 return features; 1700 } 1701