1 /* 2 * This file is part of the Chelsio T4 Ethernet driver for Linux. 3 * 4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/skbuff.h> 36 #include <linux/netdevice.h> 37 #include <linux/etherdevice.h> 38 #include <linux/if_vlan.h> 39 #include <linux/ip.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/jiffies.h> 42 #include <linux/prefetch.h> 43 #include <linux/export.h> 44 #include <net/xfrm.h> 45 #include <net/ipv6.h> 46 #include <net/tcp.h> 47 #include <net/busy_poll.h> 48 #ifdef CONFIG_CHELSIO_T4_FCOE 49 #include <scsi/fc/fc_fcoe.h> 50 #endif /* CONFIG_CHELSIO_T4_FCOE */ 51 #include "cxgb4.h" 52 #include "t4_regs.h" 53 #include "t4_values.h" 54 #include "t4_msg.h" 55 #include "t4fw_api.h" 56 #include "cxgb4_ptp.h" 57 #include "cxgb4_uld.h" 58 59 /* 60 * Rx buffer size. We use largish buffers if possible but settle for single 61 * pages under memory shortage. 62 */ 63 #if PAGE_SHIFT >= 16 64 # define FL_PG_ORDER 0 65 #else 66 # define FL_PG_ORDER (16 - PAGE_SHIFT) 67 #endif 68 69 /* RX_PULL_LEN should be <= RX_COPY_THRES */ 70 #define RX_COPY_THRES 256 71 #define RX_PULL_LEN 128 72 73 /* 74 * Main body length for sk_buffs used for Rx Ethernet packets with fragments. 75 * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room. 76 */ 77 #define RX_PKT_SKB_LEN 512 78 79 /* 80 * Max number of Tx descriptors we clean up at a time. Should be modest as 81 * freeing skbs isn't cheap and it happens while holding locks. We just need 82 * to free packets faster than they arrive, we eventually catch up and keep 83 * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES. 84 */ 85 #define MAX_TX_RECLAIM 16 86 87 /* 88 * Max number of Rx buffers we replenish at a time. Again keep this modest, 89 * allocating buffers isn't cheap either. 90 */ 91 #define MAX_RX_REFILL 16U 92 93 /* 94 * Period of the Rx queue check timer. This timer is infrequent as it has 95 * something to do only when the system experiences severe memory shortage. 96 */ 97 #define RX_QCHECK_PERIOD (HZ / 2) 98 99 /* 100 * Period of the Tx queue check timer. 101 */ 102 #define TX_QCHECK_PERIOD (HZ / 2) 103 104 /* 105 * Max number of Tx descriptors to be reclaimed by the Tx timer. 106 */ 107 #define MAX_TIMER_TX_RECLAIM 100 108 109 /* 110 * Timer index used when backing off due to memory shortage. 111 */ 112 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1) 113 114 /* 115 * Suspension threshold for non-Ethernet Tx queues. We require enough room 116 * for a full sized WR. 117 */ 118 #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc)) 119 120 /* 121 * Max Tx descriptor space we allow for an Ethernet packet to be inlined 122 * into a WR. 123 */ 124 #define MAX_IMM_TX_PKT_LEN 256 125 126 /* 127 * Max size of a WR sent through a control Tx queue. 128 */ 129 #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN 130 131 struct rx_sw_desc { /* SW state per Rx descriptor */ 132 struct page *page; 133 dma_addr_t dma_addr; 134 }; 135 136 /* 137 * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb 138 * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs. 139 * We could easily support more but there doesn't seem to be much need for 140 * that ... 141 */ 142 #define FL_MTU_SMALL 1500 143 #define FL_MTU_LARGE 9000 144 145 static inline unsigned int fl_mtu_bufsize(struct adapter *adapter, 146 unsigned int mtu) 147 { 148 struct sge *s = &adapter->sge; 149 150 return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align); 151 } 152 153 #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL) 154 #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE) 155 156 /* 157 * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses 158 * these to specify the buffer size as an index into the SGE Free List Buffer 159 * Size register array. We also use bit 4, when the buffer has been unmapped 160 * for DMA, but this is of course never sent to the hardware and is only used 161 * to prevent double unmappings. All of the above requires that the Free List 162 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are 163 * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal 164 * Free List Buffer alignment is 32 bytes, this works out for us ... 165 */ 166 enum { 167 RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */ 168 RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */ 169 RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */ 170 171 /* 172 * XXX We shouldn't depend on being able to use these indices. 173 * XXX Especially when some other Master PF has initialized the 174 * XXX adapter or we use the Firmware Configuration File. We 175 * XXX should really search through the Host Buffer Size register 176 * XXX array for the appropriately sized buffer indices. 177 */ 178 RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */ 179 RX_LARGE_PG_BUF = 0x1, /* buffer large (FL_PG_ORDER) page buffer */ 180 181 RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */ 182 RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */ 183 }; 184 185 static int timer_pkt_quota[] = {1, 1, 2, 3, 4, 5}; 186 #define MIN_NAPI_WORK 1 187 188 static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d) 189 { 190 return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS; 191 } 192 193 static inline bool is_buf_mapped(const struct rx_sw_desc *d) 194 { 195 return !(d->dma_addr & RX_UNMAPPED_BUF); 196 } 197 198 /** 199 * txq_avail - return the number of available slots in a Tx queue 200 * @q: the Tx queue 201 * 202 * Returns the number of descriptors in a Tx queue available to write new 203 * packets. 204 */ 205 static inline unsigned int txq_avail(const struct sge_txq *q) 206 { 207 return q->size - 1 - q->in_use; 208 } 209 210 /** 211 * fl_cap - return the capacity of a free-buffer list 212 * @fl: the FL 213 * 214 * Returns the capacity of a free-buffer list. The capacity is less than 215 * the size because one descriptor needs to be left unpopulated, otherwise 216 * HW will think the FL is empty. 217 */ 218 static inline unsigned int fl_cap(const struct sge_fl *fl) 219 { 220 return fl->size - 8; /* 1 descriptor = 8 buffers */ 221 } 222 223 /** 224 * fl_starving - return whether a Free List is starving. 225 * @adapter: pointer to the adapter 226 * @fl: the Free List 227 * 228 * Tests specified Free List to see whether the number of buffers 229 * available to the hardware has falled below our "starvation" 230 * threshold. 231 */ 232 static inline bool fl_starving(const struct adapter *adapter, 233 const struct sge_fl *fl) 234 { 235 const struct sge *s = &adapter->sge; 236 237 return fl->avail - fl->pend_cred <= s->fl_starve_thres; 238 } 239 240 int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb, 241 dma_addr_t *addr) 242 { 243 const skb_frag_t *fp, *end; 244 const struct skb_shared_info *si; 245 246 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); 247 if (dma_mapping_error(dev, *addr)) 248 goto out_err; 249 250 si = skb_shinfo(skb); 251 end = &si->frags[si->nr_frags]; 252 253 for (fp = si->frags; fp < end; fp++) { 254 *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp), 255 DMA_TO_DEVICE); 256 if (dma_mapping_error(dev, *addr)) 257 goto unwind; 258 } 259 return 0; 260 261 unwind: 262 while (fp-- > si->frags) 263 dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE); 264 265 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); 266 out_err: 267 return -ENOMEM; 268 } 269 EXPORT_SYMBOL(cxgb4_map_skb); 270 271 #ifdef CONFIG_NEED_DMA_MAP_STATE 272 static void unmap_skb(struct device *dev, const struct sk_buff *skb, 273 const dma_addr_t *addr) 274 { 275 const skb_frag_t *fp, *end; 276 const struct skb_shared_info *si; 277 278 dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE); 279 280 si = skb_shinfo(skb); 281 end = &si->frags[si->nr_frags]; 282 for (fp = si->frags; fp < end; fp++) 283 dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE); 284 } 285 286 /** 287 * deferred_unmap_destructor - unmap a packet when it is freed 288 * @skb: the packet 289 * 290 * This is the packet destructor used for Tx packets that need to remain 291 * mapped until they are freed rather than until their Tx descriptors are 292 * freed. 293 */ 294 static void deferred_unmap_destructor(struct sk_buff *skb) 295 { 296 unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head); 297 } 298 #endif 299 300 static void unmap_sgl(struct device *dev, const struct sk_buff *skb, 301 const struct ulptx_sgl *sgl, const struct sge_txq *q) 302 { 303 const struct ulptx_sge_pair *p; 304 unsigned int nfrags = skb_shinfo(skb)->nr_frags; 305 306 if (likely(skb_headlen(skb))) 307 dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0), 308 DMA_TO_DEVICE); 309 else { 310 dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0), 311 DMA_TO_DEVICE); 312 nfrags--; 313 } 314 315 /* 316 * the complexity below is because of the possibility of a wrap-around 317 * in the middle of an SGL 318 */ 319 for (p = sgl->sge; nfrags >= 2; nfrags -= 2) { 320 if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) { 321 unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]), 322 ntohl(p->len[0]), DMA_TO_DEVICE); 323 dma_unmap_page(dev, be64_to_cpu(p->addr[1]), 324 ntohl(p->len[1]), DMA_TO_DEVICE); 325 p++; 326 } else if ((u8 *)p == (u8 *)q->stat) { 327 p = (const struct ulptx_sge_pair *)q->desc; 328 goto unmap; 329 } else if ((u8 *)p + 8 == (u8 *)q->stat) { 330 const __be64 *addr = (const __be64 *)q->desc; 331 332 dma_unmap_page(dev, be64_to_cpu(addr[0]), 333 ntohl(p->len[0]), DMA_TO_DEVICE); 334 dma_unmap_page(dev, be64_to_cpu(addr[1]), 335 ntohl(p->len[1]), DMA_TO_DEVICE); 336 p = (const struct ulptx_sge_pair *)&addr[2]; 337 } else { 338 const __be64 *addr = (const __be64 *)q->desc; 339 340 dma_unmap_page(dev, be64_to_cpu(p->addr[0]), 341 ntohl(p->len[0]), DMA_TO_DEVICE); 342 dma_unmap_page(dev, be64_to_cpu(addr[0]), 343 ntohl(p->len[1]), DMA_TO_DEVICE); 344 p = (const struct ulptx_sge_pair *)&addr[1]; 345 } 346 } 347 if (nfrags) { 348 __be64 addr; 349 350 if ((u8 *)p == (u8 *)q->stat) 351 p = (const struct ulptx_sge_pair *)q->desc; 352 addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] : 353 *(const __be64 *)q->desc; 354 dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]), 355 DMA_TO_DEVICE); 356 } 357 } 358 359 /** 360 * free_tx_desc - reclaims Tx descriptors and their buffers 361 * @adapter: the adapter 362 * @q: the Tx queue to reclaim descriptors from 363 * @n: the number of descriptors to reclaim 364 * @unmap: whether the buffers should be unmapped for DMA 365 * 366 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated 367 * Tx buffers. Called with the Tx queue lock held. 368 */ 369 void free_tx_desc(struct adapter *adap, struct sge_txq *q, 370 unsigned int n, bool unmap) 371 { 372 struct tx_sw_desc *d; 373 unsigned int cidx = q->cidx; 374 struct device *dev = adap->pdev_dev; 375 376 d = &q->sdesc[cidx]; 377 while (n--) { 378 if (d->skb) { /* an SGL is present */ 379 if (unmap) 380 unmap_sgl(dev, d->skb, d->sgl, q); 381 dev_consume_skb_any(d->skb); 382 d->skb = NULL; 383 } 384 ++d; 385 if (++cidx == q->size) { 386 cidx = 0; 387 d = q->sdesc; 388 } 389 } 390 q->cidx = cidx; 391 } 392 393 /* 394 * Return the number of reclaimable descriptors in a Tx queue. 395 */ 396 static inline int reclaimable(const struct sge_txq *q) 397 { 398 int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); 399 hw_cidx -= q->cidx; 400 return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; 401 } 402 403 /** 404 * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors 405 * @adap: the adapter 406 * @q: the Tx queue to reclaim completed descriptors from 407 * @unmap: whether the buffers should be unmapped for DMA 408 * 409 * Reclaims Tx descriptors that the SGE has indicated it has processed, 410 * and frees the associated buffers if possible. Called with the Tx 411 * queue locked. 412 */ 413 inline void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, 414 bool unmap) 415 { 416 int avail = reclaimable(q); 417 418 if (avail) { 419 /* 420 * Limit the amount of clean up work we do at a time to keep 421 * the Tx lock hold time O(1). 422 */ 423 if (avail > MAX_TX_RECLAIM) 424 avail = MAX_TX_RECLAIM; 425 426 free_tx_desc(adap, q, avail, unmap); 427 q->in_use -= avail; 428 } 429 } 430 EXPORT_SYMBOL(cxgb4_reclaim_completed_tx); 431 432 static inline int get_buf_size(struct adapter *adapter, 433 const struct rx_sw_desc *d) 434 { 435 struct sge *s = &adapter->sge; 436 unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE; 437 int buf_size; 438 439 switch (rx_buf_size_idx) { 440 case RX_SMALL_PG_BUF: 441 buf_size = PAGE_SIZE; 442 break; 443 444 case RX_LARGE_PG_BUF: 445 buf_size = PAGE_SIZE << s->fl_pg_order; 446 break; 447 448 case RX_SMALL_MTU_BUF: 449 buf_size = FL_MTU_SMALL_BUFSIZE(adapter); 450 break; 451 452 case RX_LARGE_MTU_BUF: 453 buf_size = FL_MTU_LARGE_BUFSIZE(adapter); 454 break; 455 456 default: 457 BUG_ON(1); 458 } 459 460 return buf_size; 461 } 462 463 /** 464 * free_rx_bufs - free the Rx buffers on an SGE free list 465 * @adap: the adapter 466 * @q: the SGE free list to free buffers from 467 * @n: how many buffers to free 468 * 469 * Release the next @n buffers on an SGE free-buffer Rx queue. The 470 * buffers must be made inaccessible to HW before calling this function. 471 */ 472 static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n) 473 { 474 while (n--) { 475 struct rx_sw_desc *d = &q->sdesc[q->cidx]; 476 477 if (is_buf_mapped(d)) 478 dma_unmap_page(adap->pdev_dev, get_buf_addr(d), 479 get_buf_size(adap, d), 480 PCI_DMA_FROMDEVICE); 481 put_page(d->page); 482 d->page = NULL; 483 if (++q->cidx == q->size) 484 q->cidx = 0; 485 q->avail--; 486 } 487 } 488 489 /** 490 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list 491 * @adap: the adapter 492 * @q: the SGE free list 493 * 494 * Unmap the current buffer on an SGE free-buffer Rx queue. The 495 * buffer must be made inaccessible to HW before calling this function. 496 * 497 * This is similar to @free_rx_bufs above but does not free the buffer. 498 * Do note that the FL still loses any further access to the buffer. 499 */ 500 static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q) 501 { 502 struct rx_sw_desc *d = &q->sdesc[q->cidx]; 503 504 if (is_buf_mapped(d)) 505 dma_unmap_page(adap->pdev_dev, get_buf_addr(d), 506 get_buf_size(adap, d), PCI_DMA_FROMDEVICE); 507 d->page = NULL; 508 if (++q->cidx == q->size) 509 q->cidx = 0; 510 q->avail--; 511 } 512 513 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) 514 { 515 if (q->pend_cred >= 8) { 516 u32 val = adap->params.arch.sge_fl_db; 517 518 if (is_t4(adap->params.chip)) 519 val |= PIDX_V(q->pend_cred / 8); 520 else 521 val |= PIDX_T5_V(q->pend_cred / 8); 522 523 /* Make sure all memory writes to the Free List queue are 524 * committed before we tell the hardware about them. 525 */ 526 wmb(); 527 528 /* If we don't have access to the new User Doorbell (T5+), use 529 * the old doorbell mechanism; otherwise use the new BAR2 530 * mechanism. 531 */ 532 if (unlikely(q->bar2_addr == NULL)) { 533 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), 534 val | QID_V(q->cntxt_id)); 535 } else { 536 writel(val | QID_V(q->bar2_qid), 537 q->bar2_addr + SGE_UDB_KDOORBELL); 538 539 /* This Write memory Barrier will force the write to 540 * the User Doorbell area to be flushed. 541 */ 542 wmb(); 543 } 544 q->pend_cred &= 7; 545 } 546 } 547 548 static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg, 549 dma_addr_t mapping) 550 { 551 sd->page = pg; 552 sd->dma_addr = mapping; /* includes size low bits */ 553 } 554 555 /** 556 * refill_fl - refill an SGE Rx buffer ring 557 * @adap: the adapter 558 * @q: the ring to refill 559 * @n: the number of new buffers to allocate 560 * @gfp: the gfp flags for the allocations 561 * 562 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, 563 * allocated with the supplied gfp flags. The caller must assure that 564 * @n does not exceed the queue's capacity. If afterwards the queue is 565 * found critically low mark it as starving in the bitmap of starving FLs. 566 * 567 * Returns the number of buffers allocated. 568 */ 569 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, 570 gfp_t gfp) 571 { 572 struct sge *s = &adap->sge; 573 struct page *pg; 574 dma_addr_t mapping; 575 unsigned int cred = q->avail; 576 __be64 *d = &q->desc[q->pidx]; 577 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; 578 int node; 579 580 #ifdef CONFIG_DEBUG_FS 581 if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl)) 582 goto out; 583 #endif 584 585 gfp |= __GFP_NOWARN; 586 node = dev_to_node(adap->pdev_dev); 587 588 if (s->fl_pg_order == 0) 589 goto alloc_small_pages; 590 591 /* 592 * Prefer large buffers 593 */ 594 while (n) { 595 pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order); 596 if (unlikely(!pg)) { 597 q->large_alloc_failed++; 598 break; /* fall back to single pages */ 599 } 600 601 mapping = dma_map_page(adap->pdev_dev, pg, 0, 602 PAGE_SIZE << s->fl_pg_order, 603 PCI_DMA_FROMDEVICE); 604 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { 605 __free_pages(pg, s->fl_pg_order); 606 q->mapping_err++; 607 goto out; /* do not try small pages for this error */ 608 } 609 mapping |= RX_LARGE_PG_BUF; 610 *d++ = cpu_to_be64(mapping); 611 612 set_rx_sw_desc(sd, pg, mapping); 613 sd++; 614 615 q->avail++; 616 if (++q->pidx == q->size) { 617 q->pidx = 0; 618 sd = q->sdesc; 619 d = q->desc; 620 } 621 n--; 622 } 623 624 alloc_small_pages: 625 while (n--) { 626 pg = alloc_pages_node(node, gfp, 0); 627 if (unlikely(!pg)) { 628 q->alloc_failed++; 629 break; 630 } 631 632 mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE, 633 PCI_DMA_FROMDEVICE); 634 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { 635 put_page(pg); 636 q->mapping_err++; 637 goto out; 638 } 639 *d++ = cpu_to_be64(mapping); 640 641 set_rx_sw_desc(sd, pg, mapping); 642 sd++; 643 644 q->avail++; 645 if (++q->pidx == q->size) { 646 q->pidx = 0; 647 sd = q->sdesc; 648 d = q->desc; 649 } 650 } 651 652 out: cred = q->avail - cred; 653 q->pend_cred += cred; 654 ring_fl_db(adap, q); 655 656 if (unlikely(fl_starving(adap, q))) { 657 smp_wmb(); 658 q->low++; 659 set_bit(q->cntxt_id - adap->sge.egr_start, 660 adap->sge.starving_fl); 661 } 662 663 return cred; 664 } 665 666 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) 667 { 668 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail), 669 GFP_ATOMIC); 670 } 671 672 /** 673 * alloc_ring - allocate resources for an SGE descriptor ring 674 * @dev: the PCI device's core device 675 * @nelem: the number of descriptors 676 * @elem_size: the size of each descriptor 677 * @sw_size: the size of the SW state associated with each ring element 678 * @phys: the physical address of the allocated ring 679 * @metadata: address of the array holding the SW state for the ring 680 * @stat_size: extra space in HW ring for status information 681 * @node: preferred node for memory allocations 682 * 683 * Allocates resources for an SGE descriptor ring, such as Tx queues, 684 * free buffer lists, or response queues. Each SGE ring requires 685 * space for its HW descriptors plus, optionally, space for the SW state 686 * associated with each HW entry (the metadata). The function returns 687 * three values: the virtual address for the HW ring (the return value 688 * of the function), the bus address of the HW ring, and the address 689 * of the SW ring. 690 */ 691 static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size, 692 size_t sw_size, dma_addr_t *phys, void *metadata, 693 size_t stat_size, int node) 694 { 695 size_t len = nelem * elem_size + stat_size; 696 void *s = NULL; 697 void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL); 698 699 if (!p) 700 return NULL; 701 if (sw_size) { 702 s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node); 703 704 if (!s) { 705 dma_free_coherent(dev, len, p, *phys); 706 return NULL; 707 } 708 } 709 if (metadata) 710 *(void **)metadata = s; 711 memset(p, 0, len); 712 return p; 713 } 714 715 /** 716 * sgl_len - calculates the size of an SGL of the given capacity 717 * @n: the number of SGL entries 718 * 719 * Calculates the number of flits needed for a scatter/gather list that 720 * can hold the given number of entries. 721 */ 722 static inline unsigned int sgl_len(unsigned int n) 723 { 724 /* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA 725 * addresses. The DSGL Work Request starts off with a 32-bit DSGL 726 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N, 727 * repeated sequences of { Length[i], Length[i+1], Address[i], 728 * Address[i+1] } (this ensures that all addresses are on 64-bit 729 * boundaries). If N is even, then Length[N+1] should be set to 0 and 730 * Address[N+1] is omitted. 731 * 732 * The following calculation incorporates all of the above. It's 733 * somewhat hard to follow but, briefly: the "+2" accounts for the 734 * first two flits which include the DSGL header, Length0 and 735 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3 736 * flits for every pair of the remaining N) +1 if (n-1) is odd; and 737 * finally the "+((n-1)&1)" adds the one remaining flit needed if 738 * (n-1) is odd ... 739 */ 740 n--; 741 return (3 * n) / 2 + (n & 1) + 2; 742 } 743 744 /** 745 * flits_to_desc - returns the num of Tx descriptors for the given flits 746 * @n: the number of flits 747 * 748 * Returns the number of Tx descriptors needed for the supplied number 749 * of flits. 750 */ 751 static inline unsigned int flits_to_desc(unsigned int n) 752 { 753 BUG_ON(n > SGE_MAX_WR_LEN / 8); 754 return DIV_ROUND_UP(n, 8); 755 } 756 757 /** 758 * is_eth_imm - can an Ethernet packet be sent as immediate data? 759 * @skb: the packet 760 * 761 * Returns whether an Ethernet packet is small enough to fit as 762 * immediate data. Return value corresponds to headroom required. 763 */ 764 static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver) 765 { 766 int hdrlen = 0; 767 768 if (skb->encapsulation && skb_shinfo(skb)->gso_size && 769 chip_ver > CHELSIO_T5) { 770 hdrlen = sizeof(struct cpl_tx_tnl_lso); 771 hdrlen += sizeof(struct cpl_tx_pkt_core); 772 } else { 773 hdrlen = skb_shinfo(skb)->gso_size ? 774 sizeof(struct cpl_tx_pkt_lso_core) : 0; 775 hdrlen += sizeof(struct cpl_tx_pkt); 776 } 777 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen) 778 return hdrlen; 779 return 0; 780 } 781 782 /** 783 * calc_tx_flits - calculate the number of flits for a packet Tx WR 784 * @skb: the packet 785 * 786 * Returns the number of flits needed for a Tx WR for the given Ethernet 787 * packet, including the needed WR and CPL headers. 788 */ 789 static inline unsigned int calc_tx_flits(const struct sk_buff *skb, 790 unsigned int chip_ver) 791 { 792 unsigned int flits; 793 int hdrlen = is_eth_imm(skb, chip_ver); 794 795 /* If the skb is small enough, we can pump it out as a work request 796 * with only immediate data. In that case we just have to have the 797 * TX Packet header plus the skb data in the Work Request. 798 */ 799 800 if (hdrlen) 801 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64)); 802 803 /* Otherwise, we're going to have to construct a Scatter gather list 804 * of the skb body and fragments. We also include the flits necessary 805 * for the TX Packet Work Request and CPL. We always have a firmware 806 * Write Header (incorporated as part of the cpl_tx_pkt_lso and 807 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL 808 * message or, if we're doing a Large Send Offload, an LSO CPL message 809 * with an embedded TX Packet Write CPL message. 810 */ 811 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); 812 if (skb_shinfo(skb)->gso_size) { 813 if (skb->encapsulation && chip_ver > CHELSIO_T5) 814 hdrlen = sizeof(struct fw_eth_tx_pkt_wr) + 815 sizeof(struct cpl_tx_tnl_lso); 816 else 817 hdrlen = sizeof(struct fw_eth_tx_pkt_wr) + 818 sizeof(struct cpl_tx_pkt_lso_core); 819 820 hdrlen += sizeof(struct cpl_tx_pkt_core); 821 flits += (hdrlen / sizeof(__be64)); 822 } else { 823 flits += (sizeof(struct fw_eth_tx_pkt_wr) + 824 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); 825 } 826 return flits; 827 } 828 829 /** 830 * calc_tx_descs - calculate the number of Tx descriptors for a packet 831 * @skb: the packet 832 * 833 * Returns the number of Tx descriptors needed for the given Ethernet 834 * packet, including the needed WR and CPL headers. 835 */ 836 static inline unsigned int calc_tx_descs(const struct sk_buff *skb, 837 unsigned int chip_ver) 838 { 839 return flits_to_desc(calc_tx_flits(skb, chip_ver)); 840 } 841 842 /** 843 * cxgb4_write_sgl - populate a scatter/gather list for a packet 844 * @skb: the packet 845 * @q: the Tx queue we are writing into 846 * @sgl: starting location for writing the SGL 847 * @end: points right after the end of the SGL 848 * @start: start offset into skb main-body data to include in the SGL 849 * @addr: the list of bus addresses for the SGL elements 850 * 851 * Generates a gather list for the buffers that make up a packet. 852 * The caller must provide adequate space for the SGL that will be written. 853 * The SGL includes all of the packet's page fragments and the data in its 854 * main body except for the first @start bytes. @sgl must be 16-byte 855 * aligned and within a Tx descriptor with available space. @end points 856 * right after the end of the SGL but does not account for any potential 857 * wrap around, i.e., @end > @sgl. 858 */ 859 void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q, 860 struct ulptx_sgl *sgl, u64 *end, unsigned int start, 861 const dma_addr_t *addr) 862 { 863 unsigned int i, len; 864 struct ulptx_sge_pair *to; 865 const struct skb_shared_info *si = skb_shinfo(skb); 866 unsigned int nfrags = si->nr_frags; 867 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1]; 868 869 len = skb_headlen(skb) - start; 870 if (likely(len)) { 871 sgl->len0 = htonl(len); 872 sgl->addr0 = cpu_to_be64(addr[0] + start); 873 nfrags++; 874 } else { 875 sgl->len0 = htonl(skb_frag_size(&si->frags[0])); 876 sgl->addr0 = cpu_to_be64(addr[1]); 877 } 878 879 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | 880 ULPTX_NSGE_V(nfrags)); 881 if (likely(--nfrags == 0)) 882 return; 883 /* 884 * Most of the complexity below deals with the possibility we hit the 885 * end of the queue in the middle of writing the SGL. For this case 886 * only we create the SGL in a temporary buffer and then copy it. 887 */ 888 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; 889 890 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { 891 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); 892 to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i])); 893 to->addr[0] = cpu_to_be64(addr[i]); 894 to->addr[1] = cpu_to_be64(addr[++i]); 895 } 896 if (nfrags) { 897 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); 898 to->len[1] = cpu_to_be32(0); 899 to->addr[0] = cpu_to_be64(addr[i + 1]); 900 } 901 if (unlikely((u8 *)end > (u8 *)q->stat)) { 902 unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; 903 904 if (likely(part0)) 905 memcpy(sgl->sge, buf, part0); 906 part1 = (u8 *)end - (u8 *)q->stat; 907 memcpy(q->desc, (u8 *)buf + part0, part1); 908 end = (void *)q->desc + part1; 909 } 910 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ 911 *end = 0; 912 } 913 EXPORT_SYMBOL(cxgb4_write_sgl); 914 915 /* This function copies 64 byte coalesced work request to 916 * memory mapped BAR2 space. For coalesced WR SGE fetches 917 * data from the FIFO instead of from Host. 918 */ 919 static void cxgb_pio_copy(u64 __iomem *dst, u64 *src) 920 { 921 int count = 8; 922 923 while (count) { 924 writeq(*src, dst); 925 src++; 926 dst++; 927 count--; 928 } 929 } 930 931 /** 932 * cxgb4_ring_tx_db - check and potentially ring a Tx queue's doorbell 933 * @adap: the adapter 934 * @q: the Tx queue 935 * @n: number of new descriptors to give to HW 936 * 937 * Ring the doorbel for a Tx queue. 938 */ 939 inline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) 940 { 941 /* Make sure that all writes to the TX Descriptors are committed 942 * before we tell the hardware about them. 943 */ 944 wmb(); 945 946 /* If we don't have access to the new User Doorbell (T5+), use the old 947 * doorbell mechanism; otherwise use the new BAR2 mechanism. 948 */ 949 if (unlikely(q->bar2_addr == NULL)) { 950 u32 val = PIDX_V(n); 951 unsigned long flags; 952 953 /* For T4 we need to participate in the Doorbell Recovery 954 * mechanism. 955 */ 956 spin_lock_irqsave(&q->db_lock, flags); 957 if (!q->db_disabled) 958 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), 959 QID_V(q->cntxt_id) | val); 960 else 961 q->db_pidx_inc += n; 962 q->db_pidx = q->pidx; 963 spin_unlock_irqrestore(&q->db_lock, flags); 964 } else { 965 u32 val = PIDX_T5_V(n); 966 967 /* T4 and later chips share the same PIDX field offset within 968 * the doorbell, but T5 and later shrank the field in order to 969 * gain a bit for Doorbell Priority. The field was absurdly 970 * large in the first place (14 bits) so we just use the T5 971 * and later limits and warn if a Queue ID is too large. 972 */ 973 WARN_ON(val & DBPRIO_F); 974 975 /* If we're only writing a single TX Descriptor and we can use 976 * Inferred QID registers, we can use the Write Combining 977 * Gather Buffer; otherwise we use the simple doorbell. 978 */ 979 if (n == 1 && q->bar2_qid == 0) { 980 int index = (q->pidx 981 ? (q->pidx - 1) 982 : (q->size - 1)); 983 u64 *wr = (u64 *)&q->desc[index]; 984 985 cxgb_pio_copy((u64 __iomem *) 986 (q->bar2_addr + SGE_UDB_WCDOORBELL), 987 wr); 988 } else { 989 writel(val | QID_V(q->bar2_qid), 990 q->bar2_addr + SGE_UDB_KDOORBELL); 991 } 992 993 /* This Write Memory Barrier will force the write to the User 994 * Doorbell area to be flushed. This is needed to prevent 995 * writes on different CPUs for the same queue from hitting 996 * the adapter out of order. This is required when some Work 997 * Requests take the Write Combine Gather Buffer path (user 998 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some 999 * take the traditional path where we simply increment the 1000 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the 1001 * hardware DMA read the actual Work Request. 1002 */ 1003 wmb(); 1004 } 1005 } 1006 EXPORT_SYMBOL(cxgb4_ring_tx_db); 1007 1008 /** 1009 * cxgb4_inline_tx_skb - inline a packet's data into Tx descriptors 1010 * @skb: the packet 1011 * @q: the Tx queue where the packet will be inlined 1012 * @pos: starting position in the Tx queue where to inline the packet 1013 * 1014 * Inline a packet's contents directly into Tx descriptors, starting at 1015 * the given position within the Tx DMA ring. 1016 * Most of the complexity of this operation is dealing with wrap arounds 1017 * in the middle of the packet we want to inline. 1018 */ 1019 void cxgb4_inline_tx_skb(const struct sk_buff *skb, 1020 const struct sge_txq *q, void *pos) 1021 { 1022 int left = (void *)q->stat - pos; 1023 u64 *p; 1024 1025 if (likely(skb->len <= left)) { 1026 if (likely(!skb->data_len)) 1027 skb_copy_from_linear_data(skb, pos, skb->len); 1028 else 1029 skb_copy_bits(skb, 0, pos, skb->len); 1030 pos += skb->len; 1031 } else { 1032 skb_copy_bits(skb, 0, pos, left); 1033 skb_copy_bits(skb, left, q->desc, skb->len - left); 1034 pos = (void *)q->desc + (skb->len - left); 1035 } 1036 1037 /* 0-pad to multiple of 16 */ 1038 p = PTR_ALIGN(pos, 8); 1039 if ((uintptr_t)p & 8) 1040 *p = 0; 1041 } 1042 EXPORT_SYMBOL(cxgb4_inline_tx_skb); 1043 1044 static void *inline_tx_skb_header(const struct sk_buff *skb, 1045 const struct sge_txq *q, void *pos, 1046 int length) 1047 { 1048 u64 *p; 1049 int left = (void *)q->stat - pos; 1050 1051 if (likely(length <= left)) { 1052 memcpy(pos, skb->data, length); 1053 pos += length; 1054 } else { 1055 memcpy(pos, skb->data, left); 1056 memcpy(q->desc, skb->data + left, length - left); 1057 pos = (void *)q->desc + (length - left); 1058 } 1059 /* 0-pad to multiple of 16 */ 1060 p = PTR_ALIGN(pos, 8); 1061 if ((uintptr_t)p & 8) { 1062 *p = 0; 1063 return p + 1; 1064 } 1065 return p; 1066 } 1067 1068 /* 1069 * Figure out what HW csum a packet wants and return the appropriate control 1070 * bits. 1071 */ 1072 static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb) 1073 { 1074 int csum_type; 1075 bool inner_hdr_csum = false; 1076 u16 proto, ver; 1077 1078 if (skb->encapsulation && 1079 (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5)) 1080 inner_hdr_csum = true; 1081 1082 if (inner_hdr_csum) { 1083 ver = inner_ip_hdr(skb)->version; 1084 proto = (ver == 4) ? inner_ip_hdr(skb)->protocol : 1085 inner_ipv6_hdr(skb)->nexthdr; 1086 } else { 1087 ver = ip_hdr(skb)->version; 1088 proto = (ver == 4) ? ip_hdr(skb)->protocol : 1089 ipv6_hdr(skb)->nexthdr; 1090 } 1091 1092 if (ver == 4) { 1093 if (proto == IPPROTO_TCP) 1094 csum_type = TX_CSUM_TCPIP; 1095 else if (proto == IPPROTO_UDP) 1096 csum_type = TX_CSUM_UDPIP; 1097 else { 1098 nocsum: /* 1099 * unknown protocol, disable HW csum 1100 * and hope a bad packet is detected 1101 */ 1102 return TXPKT_L4CSUM_DIS_F; 1103 } 1104 } else { 1105 /* 1106 * this doesn't work with extension headers 1107 */ 1108 if (proto == IPPROTO_TCP) 1109 csum_type = TX_CSUM_TCPIP6; 1110 else if (proto == IPPROTO_UDP) 1111 csum_type = TX_CSUM_UDPIP6; 1112 else 1113 goto nocsum; 1114 } 1115 1116 if (likely(csum_type >= TX_CSUM_TCPIP)) { 1117 int eth_hdr_len, l4_len; 1118 u64 hdr_len; 1119 1120 if (inner_hdr_csum) { 1121 /* This allows checksum offload for all encapsulated 1122 * packets like GRE etc.. 1123 */ 1124 l4_len = skb_inner_network_header_len(skb); 1125 eth_hdr_len = skb_inner_network_offset(skb) - ETH_HLEN; 1126 } else { 1127 l4_len = skb_network_header_len(skb); 1128 eth_hdr_len = skb_network_offset(skb) - ETH_HLEN; 1129 } 1130 hdr_len = TXPKT_IPHDR_LEN_V(l4_len); 1131 1132 if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5) 1133 hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len); 1134 else 1135 hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len); 1136 return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len; 1137 } else { 1138 int start = skb_transport_offset(skb); 1139 1140 return TXPKT_CSUM_TYPE_V(csum_type) | 1141 TXPKT_CSUM_START_V(start) | 1142 TXPKT_CSUM_LOC_V(start + skb->csum_offset); 1143 } 1144 } 1145 1146 static void eth_txq_stop(struct sge_eth_txq *q) 1147 { 1148 netif_tx_stop_queue(q->txq); 1149 q->q.stops++; 1150 } 1151 1152 static inline void txq_advance(struct sge_txq *q, unsigned int n) 1153 { 1154 q->in_use += n; 1155 q->pidx += n; 1156 if (q->pidx >= q->size) 1157 q->pidx -= q->size; 1158 } 1159 1160 #ifdef CONFIG_CHELSIO_T4_FCOE 1161 static inline int 1162 cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap, 1163 const struct port_info *pi, u64 *cntrl) 1164 { 1165 const struct cxgb_fcoe *fcoe = &pi->fcoe; 1166 1167 if (!(fcoe->flags & CXGB_FCOE_ENABLED)) 1168 return 0; 1169 1170 if (skb->protocol != htons(ETH_P_FCOE)) 1171 return 0; 1172 1173 skb_reset_mac_header(skb); 1174 skb->mac_len = sizeof(struct ethhdr); 1175 1176 skb_set_network_header(skb, skb->mac_len); 1177 skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr)); 1178 1179 if (!cxgb_fcoe_sof_eof_supported(adap, skb)) 1180 return -ENOTSUPP; 1181 1182 /* FC CRC offload */ 1183 *cntrl = TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE) | 1184 TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F | 1185 TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START) | 1186 TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END) | 1187 TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END); 1188 return 0; 1189 } 1190 #endif /* CONFIG_CHELSIO_T4_FCOE */ 1191 1192 /* Returns tunnel type if hardware supports offloading of the same. 1193 * It is called only for T5 and onwards. 1194 */ 1195 enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb) 1196 { 1197 u8 l4_hdr = 0; 1198 enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE; 1199 struct port_info *pi = netdev_priv(skb->dev); 1200 struct adapter *adapter = pi->adapter; 1201 1202 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || 1203 skb->inner_protocol != htons(ETH_P_TEB)) 1204 return tnl_type; 1205 1206 switch (vlan_get_protocol(skb)) { 1207 case htons(ETH_P_IP): 1208 l4_hdr = ip_hdr(skb)->protocol; 1209 break; 1210 case htons(ETH_P_IPV6): 1211 l4_hdr = ipv6_hdr(skb)->nexthdr; 1212 break; 1213 default: 1214 return tnl_type; 1215 } 1216 1217 switch (l4_hdr) { 1218 case IPPROTO_UDP: 1219 if (adapter->vxlan_port == udp_hdr(skb)->dest) 1220 tnl_type = TX_TNL_TYPE_VXLAN; 1221 else if (adapter->geneve_port == udp_hdr(skb)->dest) 1222 tnl_type = TX_TNL_TYPE_GENEVE; 1223 break; 1224 default: 1225 return tnl_type; 1226 } 1227 1228 return tnl_type; 1229 } 1230 1231 static inline void t6_fill_tnl_lso(struct sk_buff *skb, 1232 struct cpl_tx_tnl_lso *tnl_lso, 1233 enum cpl_tx_tnl_lso_type tnl_type) 1234 { 1235 u32 val; 1236 int in_eth_xtra_len; 1237 int l3hdr_len = skb_network_header_len(skb); 1238 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; 1239 const struct skb_shared_info *ssi = skb_shinfo(skb); 1240 bool v6 = (ip_hdr(skb)->version == 6); 1241 1242 val = CPL_TX_TNL_LSO_OPCODE_V(CPL_TX_TNL_LSO) | 1243 CPL_TX_TNL_LSO_FIRST_F | 1244 CPL_TX_TNL_LSO_LAST_F | 1245 (v6 ? CPL_TX_TNL_LSO_IPV6OUT_F : 0) | 1246 CPL_TX_TNL_LSO_ETHHDRLENOUT_V(eth_xtra_len / 4) | 1247 CPL_TX_TNL_LSO_IPHDRLENOUT_V(l3hdr_len / 4) | 1248 (v6 ? 0 : CPL_TX_TNL_LSO_IPHDRCHKOUT_F) | 1249 CPL_TX_TNL_LSO_IPLENSETOUT_F | 1250 (v6 ? 0 : CPL_TX_TNL_LSO_IPIDINCOUT_F); 1251 tnl_lso->op_to_IpIdSplitOut = htonl(val); 1252 1253 tnl_lso->IpIdOffsetOut = 0; 1254 1255 /* Get the tunnel header length */ 1256 val = skb_inner_mac_header(skb) - skb_mac_header(skb); 1257 in_eth_xtra_len = skb_inner_network_header(skb) - 1258 skb_inner_mac_header(skb) - ETH_HLEN; 1259 1260 switch (tnl_type) { 1261 case TX_TNL_TYPE_VXLAN: 1262 case TX_TNL_TYPE_GENEVE: 1263 tnl_lso->UdpLenSetOut_to_TnlHdrLen = 1264 htons(CPL_TX_TNL_LSO_UDPCHKCLROUT_F | 1265 CPL_TX_TNL_LSO_UDPLENSETOUT_F); 1266 break; 1267 default: 1268 tnl_lso->UdpLenSetOut_to_TnlHdrLen = 0; 1269 break; 1270 } 1271 1272 tnl_lso->UdpLenSetOut_to_TnlHdrLen |= 1273 htons(CPL_TX_TNL_LSO_TNLHDRLEN_V(val) | 1274 CPL_TX_TNL_LSO_TNLTYPE_V(tnl_type)); 1275 1276 tnl_lso->r1 = 0; 1277 1278 val = CPL_TX_TNL_LSO_ETHHDRLEN_V(in_eth_xtra_len / 4) | 1279 CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb)->version == 6) | 1280 CPL_TX_TNL_LSO_IPHDRLEN_V(skb_inner_network_header_len(skb) / 4) | 1281 CPL_TX_TNL_LSO_TCPHDRLEN_V(inner_tcp_hdrlen(skb) / 4); 1282 tnl_lso->Flow_to_TcpHdrLen = htonl(val); 1283 1284 tnl_lso->IpIdOffset = htons(0); 1285 1286 tnl_lso->IpIdSplit_to_Mss = htons(CPL_TX_TNL_LSO_MSS_V(ssi->gso_size)); 1287 tnl_lso->TCPSeqOffset = htonl(0); 1288 tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len)); 1289 } 1290 1291 /** 1292 * t4_eth_xmit - add a packet to an Ethernet Tx queue 1293 * @skb: the packet 1294 * @dev: the egress net device 1295 * 1296 * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled. 1297 */ 1298 netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev) 1299 { 1300 u32 wr_mid, ctrl0, op; 1301 u64 cntrl, *end, *sgl; 1302 int qidx, credits; 1303 unsigned int flits, ndesc; 1304 struct adapter *adap; 1305 struct sge_eth_txq *q; 1306 const struct port_info *pi; 1307 struct fw_eth_tx_pkt_wr *wr; 1308 struct cpl_tx_pkt_core *cpl; 1309 const struct skb_shared_info *ssi; 1310 dma_addr_t addr[MAX_SKB_FRAGS + 1]; 1311 bool immediate = false; 1312 int len, max_pkt_len; 1313 bool ptp_enabled = is_ptp_enabled(skb, dev); 1314 unsigned int chip_ver; 1315 enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE; 1316 1317 #ifdef CONFIG_CHELSIO_T4_FCOE 1318 int err; 1319 #endif /* CONFIG_CHELSIO_T4_FCOE */ 1320 1321 /* 1322 * The chip min packet length is 10 octets but play safe and reject 1323 * anything shorter than an Ethernet header. 1324 */ 1325 if (unlikely(skb->len < ETH_HLEN)) { 1326 out_free: dev_kfree_skb_any(skb); 1327 return NETDEV_TX_OK; 1328 } 1329 1330 /* Discard the packet if the length is greater than mtu */ 1331 max_pkt_len = ETH_HLEN + dev->mtu; 1332 if (skb_vlan_tagged(skb)) 1333 max_pkt_len += VLAN_HLEN; 1334 if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len))) 1335 goto out_free; 1336 1337 pi = netdev_priv(dev); 1338 adap = pi->adapter; 1339 ssi = skb_shinfo(skb); 1340 #ifdef CONFIG_CHELSIO_IPSEC_INLINE 1341 if (xfrm_offload(skb) && !ssi->gso_size) 1342 return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev); 1343 #endif /* CHELSIO_IPSEC_INLINE */ 1344 1345 qidx = skb_get_queue_mapping(skb); 1346 if (ptp_enabled) { 1347 spin_lock(&adap->ptp_lock); 1348 if (!(adap->ptp_tx_skb)) { 1349 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1350 adap->ptp_tx_skb = skb_get(skb); 1351 } else { 1352 spin_unlock(&adap->ptp_lock); 1353 goto out_free; 1354 } 1355 q = &adap->sge.ptptxq; 1356 } else { 1357 q = &adap->sge.ethtxq[qidx + pi->first_qset]; 1358 } 1359 skb_tx_timestamp(skb); 1360 1361 cxgb4_reclaim_completed_tx(adap, &q->q, true); 1362 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; 1363 1364 #ifdef CONFIG_CHELSIO_T4_FCOE 1365 err = cxgb_fcoe_offload(skb, adap, pi, &cntrl); 1366 if (unlikely(err == -ENOTSUPP)) { 1367 if (ptp_enabled) 1368 spin_unlock(&adap->ptp_lock); 1369 goto out_free; 1370 } 1371 #endif /* CONFIG_CHELSIO_T4_FCOE */ 1372 1373 chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); 1374 flits = calc_tx_flits(skb, chip_ver); 1375 ndesc = flits_to_desc(flits); 1376 credits = txq_avail(&q->q) - ndesc; 1377 1378 if (unlikely(credits < 0)) { 1379 eth_txq_stop(q); 1380 dev_err(adap->pdev_dev, 1381 "%s: Tx ring %u full while queue awake!\n", 1382 dev->name, qidx); 1383 if (ptp_enabled) 1384 spin_unlock(&adap->ptp_lock); 1385 return NETDEV_TX_BUSY; 1386 } 1387 1388 if (is_eth_imm(skb, chip_ver)) 1389 immediate = true; 1390 1391 if (skb->encapsulation && chip_ver > CHELSIO_T5) 1392 tnl_type = cxgb_encap_offload_supported(skb); 1393 1394 if (!immediate && 1395 unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) { 1396 q->mapping_err++; 1397 if (ptp_enabled) 1398 spin_unlock(&adap->ptp_lock); 1399 goto out_free; 1400 } 1401 1402 wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); 1403 if (unlikely(credits < ETHTXQ_STOP_THRES)) { 1404 eth_txq_stop(q); 1405 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; 1406 } 1407 1408 wr = (void *)&q->q.desc[q->q.pidx]; 1409 wr->equiq_to_len16 = htonl(wr_mid); 1410 wr->r3 = cpu_to_be64(0); 1411 end = (u64 *)wr + flits; 1412 1413 len = immediate ? skb->len : 0; 1414 if (ssi->gso_size) { 1415 struct cpl_tx_pkt_lso *lso = (void *)wr; 1416 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; 1417 int l3hdr_len = skb_network_header_len(skb); 1418 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; 1419 struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1); 1420 1421 if (tnl_type) 1422 len += sizeof(*tnl_lso); 1423 else 1424 len += sizeof(*lso); 1425 1426 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | 1427 FW_WR_IMMDLEN_V(len)); 1428 if (tnl_type) { 1429 struct iphdr *iph = ip_hdr(skb); 1430 1431 t6_fill_tnl_lso(skb, tnl_lso, tnl_type); 1432 cpl = (void *)(tnl_lso + 1); 1433 /* Driver is expected to compute partial checksum that 1434 * does not include the IP Total Length. 1435 */ 1436 if (iph->version == 4) { 1437 iph->check = 0; 1438 iph->tot_len = 0; 1439 iph->check = (u16)(~ip_fast_csum((u8 *)iph, 1440 iph->ihl)); 1441 } 1442 if (skb->ip_summed == CHECKSUM_PARTIAL) 1443 cntrl = hwcsum(adap->params.chip, skb); 1444 } else { 1445 lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) | 1446 LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F | 1447 LSO_IPV6_V(v6) | 1448 LSO_ETHHDR_LEN_V(eth_xtra_len / 4) | 1449 LSO_IPHDR_LEN_V(l3hdr_len / 4) | 1450 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); 1451 lso->c.ipid_ofst = htons(0); 1452 lso->c.mss = htons(ssi->gso_size); 1453 lso->c.seqno_offset = htonl(0); 1454 if (is_t4(adap->params.chip)) 1455 lso->c.len = htonl(skb->len); 1456 else 1457 lso->c.len = 1458 htonl(LSO_T5_XFER_SIZE_V(skb->len)); 1459 cpl = (void *)(lso + 1); 1460 1461 if (CHELSIO_CHIP_VERSION(adap->params.chip) 1462 <= CHELSIO_T5) 1463 cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len); 1464 else 1465 cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len); 1466 1467 cntrl |= TXPKT_CSUM_TYPE_V(v6 ? 1468 TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | 1469 TXPKT_IPHDR_LEN_V(l3hdr_len); 1470 } 1471 sgl = (u64 *)(cpl + 1); /* sgl start here */ 1472 if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) { 1473 /* If current position is already at the end of the 1474 * txq, reset the current to point to start of the queue 1475 * and update the end ptr as well. 1476 */ 1477 if (sgl == (u64 *)q->q.stat) { 1478 int left = (u8 *)end - (u8 *)q->q.stat; 1479 1480 end = (void *)q->q.desc + left; 1481 sgl = (void *)q->q.desc; 1482 } 1483 } 1484 q->tso++; 1485 q->tx_cso += ssi->gso_segs; 1486 } else { 1487 len += sizeof(*cpl); 1488 if (ptp_enabled) 1489 op = FW_PTP_TX_PKT_WR; 1490 else 1491 op = FW_ETH_TX_PKT_WR; 1492 wr->op_immdlen = htonl(FW_WR_OP_V(op) | 1493 FW_WR_IMMDLEN_V(len)); 1494 cpl = (void *)(wr + 1); 1495 sgl = (u64 *)(cpl + 1); 1496 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1497 cntrl = hwcsum(adap->params.chip, skb) | 1498 TXPKT_IPCSUM_DIS_F; 1499 q->tx_cso++; 1500 } 1501 } 1502 1503 if (skb_vlan_tag_present(skb)) { 1504 q->vlan_ins++; 1505 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb)); 1506 #ifdef CONFIG_CHELSIO_T4_FCOE 1507 if (skb->protocol == htons(ETH_P_FCOE)) 1508 cntrl |= TXPKT_VLAN_V( 1509 ((skb->priority & 0x7) << VLAN_PRIO_SHIFT)); 1510 #endif /* CONFIG_CHELSIO_T4_FCOE */ 1511 } 1512 1513 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) | 1514 TXPKT_PF_V(adap->pf); 1515 if (ptp_enabled) 1516 ctrl0 |= TXPKT_TSTAMP_F; 1517 #ifdef CONFIG_CHELSIO_T4_DCB 1518 if (is_t4(adap->params.chip)) 1519 ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio); 1520 else 1521 ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio); 1522 #endif 1523 cpl->ctrl0 = htonl(ctrl0); 1524 cpl->pack = htons(0); 1525 cpl->len = htons(skb->len); 1526 cpl->ctrl1 = cpu_to_be64(cntrl); 1527 1528 if (immediate) { 1529 cxgb4_inline_tx_skb(skb, &q->q, sgl); 1530 dev_consume_skb_any(skb); 1531 } else { 1532 int last_desc; 1533 1534 cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, 0, addr); 1535 skb_orphan(skb); 1536 1537 last_desc = q->q.pidx + ndesc - 1; 1538 if (last_desc >= q->q.size) 1539 last_desc -= q->q.size; 1540 q->q.sdesc[last_desc].skb = skb; 1541 q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1); 1542 } 1543 1544 txq_advance(&q->q, ndesc); 1545 1546 cxgb4_ring_tx_db(adap, &q->q, ndesc); 1547 if (ptp_enabled) 1548 spin_unlock(&adap->ptp_lock); 1549 return NETDEV_TX_OK; 1550 } 1551 1552 /** 1553 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs 1554 * @q: the SGE control Tx queue 1555 * 1556 * This is a variant of cxgb4_reclaim_completed_tx() that is used 1557 * for Tx queues that send only immediate data (presently just 1558 * the control queues) and thus do not have any sk_buffs to release. 1559 */ 1560 static inline void reclaim_completed_tx_imm(struct sge_txq *q) 1561 { 1562 int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); 1563 int reclaim = hw_cidx - q->cidx; 1564 1565 if (reclaim < 0) 1566 reclaim += q->size; 1567 1568 q->in_use -= reclaim; 1569 q->cidx = hw_cidx; 1570 } 1571 1572 /** 1573 * is_imm - check whether a packet can be sent as immediate data 1574 * @skb: the packet 1575 * 1576 * Returns true if a packet can be sent as a WR with immediate data. 1577 */ 1578 static inline int is_imm(const struct sk_buff *skb) 1579 { 1580 return skb->len <= MAX_CTRL_WR_LEN; 1581 } 1582 1583 /** 1584 * ctrlq_check_stop - check if a control queue is full and should stop 1585 * @q: the queue 1586 * @wr: most recent WR written to the queue 1587 * 1588 * Check if a control queue has become full and should be stopped. 1589 * We clean up control queue descriptors very lazily, only when we are out. 1590 * If the queue is still full after reclaiming any completed descriptors 1591 * we suspend it and have the last WR wake it up. 1592 */ 1593 static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr) 1594 { 1595 reclaim_completed_tx_imm(&q->q); 1596 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { 1597 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); 1598 q->q.stops++; 1599 q->full = 1; 1600 } 1601 } 1602 1603 /** 1604 * ctrl_xmit - send a packet through an SGE control Tx queue 1605 * @q: the control queue 1606 * @skb: the packet 1607 * 1608 * Send a packet through an SGE control Tx queue. Packets sent through 1609 * a control queue must fit entirely as immediate data. 1610 */ 1611 static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb) 1612 { 1613 unsigned int ndesc; 1614 struct fw_wr_hdr *wr; 1615 1616 if (unlikely(!is_imm(skb))) { 1617 WARN_ON(1); 1618 dev_kfree_skb(skb); 1619 return NET_XMIT_DROP; 1620 } 1621 1622 ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc)); 1623 spin_lock(&q->sendq.lock); 1624 1625 if (unlikely(q->full)) { 1626 skb->priority = ndesc; /* save for restart */ 1627 __skb_queue_tail(&q->sendq, skb); 1628 spin_unlock(&q->sendq.lock); 1629 return NET_XMIT_CN; 1630 } 1631 1632 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; 1633 cxgb4_inline_tx_skb(skb, &q->q, wr); 1634 1635 txq_advance(&q->q, ndesc); 1636 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) 1637 ctrlq_check_stop(q, wr); 1638 1639 cxgb4_ring_tx_db(q->adap, &q->q, ndesc); 1640 spin_unlock(&q->sendq.lock); 1641 1642 kfree_skb(skb); 1643 return NET_XMIT_SUCCESS; 1644 } 1645 1646 /** 1647 * restart_ctrlq - restart a suspended control queue 1648 * @data: the control queue to restart 1649 * 1650 * Resumes transmission on a suspended Tx control queue. 1651 */ 1652 static void restart_ctrlq(unsigned long data) 1653 { 1654 struct sk_buff *skb; 1655 unsigned int written = 0; 1656 struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data; 1657 1658 spin_lock(&q->sendq.lock); 1659 reclaim_completed_tx_imm(&q->q); 1660 BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */ 1661 1662 while ((skb = __skb_dequeue(&q->sendq)) != NULL) { 1663 struct fw_wr_hdr *wr; 1664 unsigned int ndesc = skb->priority; /* previously saved */ 1665 1666 written += ndesc; 1667 /* Write descriptors and free skbs outside the lock to limit 1668 * wait times. q->full is still set so new skbs will be queued. 1669 */ 1670 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; 1671 txq_advance(&q->q, ndesc); 1672 spin_unlock(&q->sendq.lock); 1673 1674 cxgb4_inline_tx_skb(skb, &q->q, wr); 1675 kfree_skb(skb); 1676 1677 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { 1678 unsigned long old = q->q.stops; 1679 1680 ctrlq_check_stop(q, wr); 1681 if (q->q.stops != old) { /* suspended anew */ 1682 spin_lock(&q->sendq.lock); 1683 goto ringdb; 1684 } 1685 } 1686 if (written > 16) { 1687 cxgb4_ring_tx_db(q->adap, &q->q, written); 1688 written = 0; 1689 } 1690 spin_lock(&q->sendq.lock); 1691 } 1692 q->full = 0; 1693 ringdb: 1694 if (written) 1695 cxgb4_ring_tx_db(q->adap, &q->q, written); 1696 spin_unlock(&q->sendq.lock); 1697 } 1698 1699 /** 1700 * t4_mgmt_tx - send a management message 1701 * @adap: the adapter 1702 * @skb: the packet containing the management message 1703 * 1704 * Send a management message through control queue 0. 1705 */ 1706 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb) 1707 { 1708 int ret; 1709 1710 local_bh_disable(); 1711 ret = ctrl_xmit(&adap->sge.ctrlq[0], skb); 1712 local_bh_enable(); 1713 return ret; 1714 } 1715 1716 /** 1717 * is_ofld_imm - check whether a packet can be sent as immediate data 1718 * @skb: the packet 1719 * 1720 * Returns true if a packet can be sent as an offload WR with immediate 1721 * data. We currently use the same limit as for Ethernet packets. 1722 */ 1723 static inline int is_ofld_imm(const struct sk_buff *skb) 1724 { 1725 struct work_request_hdr *req = (struct work_request_hdr *)skb->data; 1726 unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi)); 1727 1728 if (opcode == FW_CRYPTO_LOOKASIDE_WR) 1729 return skb->len <= SGE_MAX_WR_LEN; 1730 else 1731 return skb->len <= MAX_IMM_TX_PKT_LEN; 1732 } 1733 1734 /** 1735 * calc_tx_flits_ofld - calculate # of flits for an offload packet 1736 * @skb: the packet 1737 * 1738 * Returns the number of flits needed for the given offload packet. 1739 * These packets are already fully constructed and no additional headers 1740 * will be added. 1741 */ 1742 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) 1743 { 1744 unsigned int flits, cnt; 1745 1746 if (is_ofld_imm(skb)) 1747 return DIV_ROUND_UP(skb->len, 8); 1748 1749 flits = skb_transport_offset(skb) / 8U; /* headers */ 1750 cnt = skb_shinfo(skb)->nr_frags; 1751 if (skb_tail_pointer(skb) != skb_transport_header(skb)) 1752 cnt++; 1753 return flits + sgl_len(cnt); 1754 } 1755 1756 /** 1757 * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion 1758 * @adap: the adapter 1759 * @q: the queue to stop 1760 * 1761 * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting 1762 * inability to map packets. A periodic timer attempts to restart 1763 * queues so marked. 1764 */ 1765 static void txq_stop_maperr(struct sge_uld_txq *q) 1766 { 1767 q->mapping_err++; 1768 q->q.stops++; 1769 set_bit(q->q.cntxt_id - q->adap->sge.egr_start, 1770 q->adap->sge.txq_maperr); 1771 } 1772 1773 /** 1774 * ofldtxq_stop - stop an offload Tx queue that has become full 1775 * @q: the queue to stop 1776 * @wr: the Work Request causing the queue to become full 1777 * 1778 * Stops an offload Tx queue that has become full and modifies the packet 1779 * being written to request a wakeup. 1780 */ 1781 static void ofldtxq_stop(struct sge_uld_txq *q, struct fw_wr_hdr *wr) 1782 { 1783 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); 1784 q->q.stops++; 1785 q->full = 1; 1786 } 1787 1788 /** 1789 * service_ofldq - service/restart a suspended offload queue 1790 * @q: the offload queue 1791 * 1792 * Services an offload Tx queue by moving packets from its Pending Send 1793 * Queue to the Hardware TX ring. The function starts and ends with the 1794 * Send Queue locked, but drops the lock while putting the skb at the 1795 * head of the Send Queue onto the Hardware TX Ring. Dropping the lock 1796 * allows more skbs to be added to the Send Queue by other threads. 1797 * The packet being processed at the head of the Pending Send Queue is 1798 * left on the queue in case we experience DMA Mapping errors, etc. 1799 * and need to give up and restart later. 1800 * 1801 * service_ofldq() can be thought of as a task which opportunistically 1802 * uses other threads execution contexts. We use the Offload Queue 1803 * boolean "service_ofldq_running" to make sure that only one instance 1804 * is ever running at a time ... 1805 */ 1806 static void service_ofldq(struct sge_uld_txq *q) 1807 { 1808 u64 *pos, *before, *end; 1809 int credits; 1810 struct sk_buff *skb; 1811 struct sge_txq *txq; 1812 unsigned int left; 1813 unsigned int written = 0; 1814 unsigned int flits, ndesc; 1815 1816 /* If another thread is currently in service_ofldq() processing the 1817 * Pending Send Queue then there's nothing to do. Otherwise, flag 1818 * that we're doing the work and continue. Examining/modifying 1819 * the Offload Queue boolean "service_ofldq_running" must be done 1820 * while holding the Pending Send Queue Lock. 1821 */ 1822 if (q->service_ofldq_running) 1823 return; 1824 q->service_ofldq_running = true; 1825 1826 while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) { 1827 /* We drop the lock while we're working with the skb at the 1828 * head of the Pending Send Queue. This allows more skbs to 1829 * be added to the Pending Send Queue while we're working on 1830 * this one. We don't need to lock to guard the TX Ring 1831 * updates because only one thread of execution is ever 1832 * allowed into service_ofldq() at a time. 1833 */ 1834 spin_unlock(&q->sendq.lock); 1835 1836 cxgb4_reclaim_completed_tx(q->adap, &q->q, false); 1837 1838 flits = skb->priority; /* previously saved */ 1839 ndesc = flits_to_desc(flits); 1840 credits = txq_avail(&q->q) - ndesc; 1841 BUG_ON(credits < 0); 1842 if (unlikely(credits < TXQ_STOP_THRES)) 1843 ofldtxq_stop(q, (struct fw_wr_hdr *)skb->data); 1844 1845 pos = (u64 *)&q->q.desc[q->q.pidx]; 1846 if (is_ofld_imm(skb)) 1847 cxgb4_inline_tx_skb(skb, &q->q, pos); 1848 else if (cxgb4_map_skb(q->adap->pdev_dev, skb, 1849 (dma_addr_t *)skb->head)) { 1850 txq_stop_maperr(q); 1851 spin_lock(&q->sendq.lock); 1852 break; 1853 } else { 1854 int last_desc, hdr_len = skb_transport_offset(skb); 1855 1856 /* The WR headers may not fit within one descriptor. 1857 * So we need to deal with wrap-around here. 1858 */ 1859 before = (u64 *)pos; 1860 end = (u64 *)pos + flits; 1861 txq = &q->q; 1862 pos = (void *)inline_tx_skb_header(skb, &q->q, 1863 (void *)pos, 1864 hdr_len); 1865 if (before > (u64 *)pos) { 1866 left = (u8 *)end - (u8 *)txq->stat; 1867 end = (void *)txq->desc + left; 1868 } 1869 1870 /* If current position is already at the end of the 1871 * ofld queue, reset the current to point to 1872 * start of the queue and update the end ptr as well. 1873 */ 1874 if (pos == (u64 *)txq->stat) { 1875 left = (u8 *)end - (u8 *)txq->stat; 1876 end = (void *)txq->desc + left; 1877 pos = (void *)txq->desc; 1878 } 1879 1880 cxgb4_write_sgl(skb, &q->q, (void *)pos, 1881 end, hdr_len, 1882 (dma_addr_t *)skb->head); 1883 #ifdef CONFIG_NEED_DMA_MAP_STATE 1884 skb->dev = q->adap->port[0]; 1885 skb->destructor = deferred_unmap_destructor; 1886 #endif 1887 last_desc = q->q.pidx + ndesc - 1; 1888 if (last_desc >= q->q.size) 1889 last_desc -= q->q.size; 1890 q->q.sdesc[last_desc].skb = skb; 1891 } 1892 1893 txq_advance(&q->q, ndesc); 1894 written += ndesc; 1895 if (unlikely(written > 32)) { 1896 cxgb4_ring_tx_db(q->adap, &q->q, written); 1897 written = 0; 1898 } 1899 1900 /* Reacquire the Pending Send Queue Lock so we can unlink the 1901 * skb we've just successfully transferred to the TX Ring and 1902 * loop for the next skb which may be at the head of the 1903 * Pending Send Queue. 1904 */ 1905 spin_lock(&q->sendq.lock); 1906 __skb_unlink(skb, &q->sendq); 1907 if (is_ofld_imm(skb)) 1908 kfree_skb(skb); 1909 } 1910 if (likely(written)) 1911 cxgb4_ring_tx_db(q->adap, &q->q, written); 1912 1913 /*Indicate that no thread is processing the Pending Send Queue 1914 * currently. 1915 */ 1916 q->service_ofldq_running = false; 1917 } 1918 1919 /** 1920 * ofld_xmit - send a packet through an offload queue 1921 * @q: the Tx offload queue 1922 * @skb: the packet 1923 * 1924 * Send an offload packet through an SGE offload queue. 1925 */ 1926 static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb) 1927 { 1928 skb->priority = calc_tx_flits_ofld(skb); /* save for restart */ 1929 spin_lock(&q->sendq.lock); 1930 1931 /* Queue the new skb onto the Offload Queue's Pending Send Queue. If 1932 * that results in this new skb being the only one on the queue, start 1933 * servicing it. If there are other skbs already on the list, then 1934 * either the queue is currently being processed or it's been stopped 1935 * for some reason and it'll be restarted at a later time. Restart 1936 * paths are triggered by events like experiencing a DMA Mapping Error 1937 * or filling the Hardware TX Ring. 1938 */ 1939 __skb_queue_tail(&q->sendq, skb); 1940 if (q->sendq.qlen == 1) 1941 service_ofldq(q); 1942 1943 spin_unlock(&q->sendq.lock); 1944 return NET_XMIT_SUCCESS; 1945 } 1946 1947 /** 1948 * restart_ofldq - restart a suspended offload queue 1949 * @data: the offload queue to restart 1950 * 1951 * Resumes transmission on a suspended Tx offload queue. 1952 */ 1953 static void restart_ofldq(unsigned long data) 1954 { 1955 struct sge_uld_txq *q = (struct sge_uld_txq *)data; 1956 1957 spin_lock(&q->sendq.lock); 1958 q->full = 0; /* the queue actually is completely empty now */ 1959 service_ofldq(q); 1960 spin_unlock(&q->sendq.lock); 1961 } 1962 1963 /** 1964 * skb_txq - return the Tx queue an offload packet should use 1965 * @skb: the packet 1966 * 1967 * Returns the Tx queue an offload packet should use as indicated by bits 1968 * 1-15 in the packet's queue_mapping. 1969 */ 1970 static inline unsigned int skb_txq(const struct sk_buff *skb) 1971 { 1972 return skb->queue_mapping >> 1; 1973 } 1974 1975 /** 1976 * is_ctrl_pkt - return whether an offload packet is a control packet 1977 * @skb: the packet 1978 * 1979 * Returns whether an offload packet should use an OFLD or a CTRL 1980 * Tx queue as indicated by bit 0 in the packet's queue_mapping. 1981 */ 1982 static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb) 1983 { 1984 return skb->queue_mapping & 1; 1985 } 1986 1987 static inline int uld_send(struct adapter *adap, struct sk_buff *skb, 1988 unsigned int tx_uld_type) 1989 { 1990 struct sge_uld_txq_info *txq_info; 1991 struct sge_uld_txq *txq; 1992 unsigned int idx = skb_txq(skb); 1993 1994 if (unlikely(is_ctrl_pkt(skb))) { 1995 /* Single ctrl queue is a requirement for LE workaround path */ 1996 if (adap->tids.nsftids) 1997 idx = 0; 1998 return ctrl_xmit(&adap->sge.ctrlq[idx], skb); 1999 } 2000 2001 txq_info = adap->sge.uld_txq_info[tx_uld_type]; 2002 if (unlikely(!txq_info)) { 2003 WARN_ON(true); 2004 return NET_XMIT_DROP; 2005 } 2006 2007 txq = &txq_info->uldtxq[idx]; 2008 return ofld_xmit(txq, skb); 2009 } 2010 2011 /** 2012 * t4_ofld_send - send an offload packet 2013 * @adap: the adapter 2014 * @skb: the packet 2015 * 2016 * Sends an offload packet. We use the packet queue_mapping to select the 2017 * appropriate Tx queue as follows: bit 0 indicates whether the packet 2018 * should be sent as regular or control, bits 1-15 select the queue. 2019 */ 2020 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb) 2021 { 2022 int ret; 2023 2024 local_bh_disable(); 2025 ret = uld_send(adap, skb, CXGB4_TX_OFLD); 2026 local_bh_enable(); 2027 return ret; 2028 } 2029 2030 /** 2031 * cxgb4_ofld_send - send an offload packet 2032 * @dev: the net device 2033 * @skb: the packet 2034 * 2035 * Sends an offload packet. This is an exported version of @t4_ofld_send, 2036 * intended for ULDs. 2037 */ 2038 int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb) 2039 { 2040 return t4_ofld_send(netdev2adap(dev), skb); 2041 } 2042 EXPORT_SYMBOL(cxgb4_ofld_send); 2043 2044 static void *inline_tx_header(const void *src, 2045 const struct sge_txq *q, 2046 void *pos, int length) 2047 { 2048 int left = (void *)q->stat - pos; 2049 u64 *p; 2050 2051 if (likely(length <= left)) { 2052 memcpy(pos, src, length); 2053 pos += length; 2054 } else { 2055 memcpy(pos, src, left); 2056 memcpy(q->desc, src + left, length - left); 2057 pos = (void *)q->desc + (length - left); 2058 } 2059 /* 0-pad to multiple of 16 */ 2060 p = PTR_ALIGN(pos, 8); 2061 if ((uintptr_t)p & 8) { 2062 *p = 0; 2063 return p + 1; 2064 } 2065 return p; 2066 } 2067 2068 /** 2069 * ofld_xmit_direct - copy a WR into offload queue 2070 * @q: the Tx offload queue 2071 * @src: location of WR 2072 * @len: WR length 2073 * 2074 * Copy an immediate WR into an uncontended SGE offload queue. 2075 */ 2076 static int ofld_xmit_direct(struct sge_uld_txq *q, const void *src, 2077 unsigned int len) 2078 { 2079 unsigned int ndesc; 2080 int credits; 2081 u64 *pos; 2082 2083 /* Use the lower limit as the cut-off */ 2084 if (len > MAX_IMM_OFLD_TX_DATA_WR_LEN) { 2085 WARN_ON(1); 2086 return NET_XMIT_DROP; 2087 } 2088 2089 /* Don't return NET_XMIT_CN here as the current 2090 * implementation doesn't queue the request 2091 * using an skb when the following conditions not met 2092 */ 2093 if (!spin_trylock(&q->sendq.lock)) 2094 return NET_XMIT_DROP; 2095 2096 if (q->full || !skb_queue_empty(&q->sendq) || 2097 q->service_ofldq_running) { 2098 spin_unlock(&q->sendq.lock); 2099 return NET_XMIT_DROP; 2100 } 2101 ndesc = flits_to_desc(DIV_ROUND_UP(len, 8)); 2102 credits = txq_avail(&q->q) - ndesc; 2103 pos = (u64 *)&q->q.desc[q->q.pidx]; 2104 2105 /* ofldtxq_stop modifies WR header in-situ */ 2106 inline_tx_header(src, &q->q, pos, len); 2107 if (unlikely(credits < TXQ_STOP_THRES)) 2108 ofldtxq_stop(q, (struct fw_wr_hdr *)pos); 2109 txq_advance(&q->q, ndesc); 2110 cxgb4_ring_tx_db(q->adap, &q->q, ndesc); 2111 2112 spin_unlock(&q->sendq.lock); 2113 return NET_XMIT_SUCCESS; 2114 } 2115 2116 int cxgb4_immdata_send(struct net_device *dev, unsigned int idx, 2117 const void *src, unsigned int len) 2118 { 2119 struct sge_uld_txq_info *txq_info; 2120 struct sge_uld_txq *txq; 2121 struct adapter *adap; 2122 int ret; 2123 2124 adap = netdev2adap(dev); 2125 2126 local_bh_disable(); 2127 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD]; 2128 if (unlikely(!txq_info)) { 2129 WARN_ON(true); 2130 local_bh_enable(); 2131 return NET_XMIT_DROP; 2132 } 2133 txq = &txq_info->uldtxq[idx]; 2134 2135 ret = ofld_xmit_direct(txq, src, len); 2136 local_bh_enable(); 2137 return net_xmit_eval(ret); 2138 } 2139 EXPORT_SYMBOL(cxgb4_immdata_send); 2140 2141 /** 2142 * t4_crypto_send - send crypto packet 2143 * @adap: the adapter 2144 * @skb: the packet 2145 * 2146 * Sends crypto packet. We use the packet queue_mapping to select the 2147 * appropriate Tx queue as follows: bit 0 indicates whether the packet 2148 * should be sent as regular or control, bits 1-15 select the queue. 2149 */ 2150 static int t4_crypto_send(struct adapter *adap, struct sk_buff *skb) 2151 { 2152 int ret; 2153 2154 local_bh_disable(); 2155 ret = uld_send(adap, skb, CXGB4_TX_CRYPTO); 2156 local_bh_enable(); 2157 return ret; 2158 } 2159 2160 /** 2161 * cxgb4_crypto_send - send crypto packet 2162 * @dev: the net device 2163 * @skb: the packet 2164 * 2165 * Sends crypto packet. This is an exported version of @t4_crypto_send, 2166 * intended for ULDs. 2167 */ 2168 int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb) 2169 { 2170 return t4_crypto_send(netdev2adap(dev), skb); 2171 } 2172 EXPORT_SYMBOL(cxgb4_crypto_send); 2173 2174 static inline void copy_frags(struct sk_buff *skb, 2175 const struct pkt_gl *gl, unsigned int offset) 2176 { 2177 int i; 2178 2179 /* usually there's just one frag */ 2180 __skb_fill_page_desc(skb, 0, gl->frags[0].page, 2181 gl->frags[0].offset + offset, 2182 gl->frags[0].size - offset); 2183 skb_shinfo(skb)->nr_frags = gl->nfrags; 2184 for (i = 1; i < gl->nfrags; i++) 2185 __skb_fill_page_desc(skb, i, gl->frags[i].page, 2186 gl->frags[i].offset, 2187 gl->frags[i].size); 2188 2189 /* get a reference to the last page, we don't own it */ 2190 get_page(gl->frags[gl->nfrags - 1].page); 2191 } 2192 2193 /** 2194 * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list 2195 * @gl: the gather list 2196 * @skb_len: size of sk_buff main body if it carries fragments 2197 * @pull_len: amount of data to move to the sk_buff's main body 2198 * 2199 * Builds an sk_buff from the given packet gather list. Returns the 2200 * sk_buff or %NULL if sk_buff allocation failed. 2201 */ 2202 struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, 2203 unsigned int skb_len, unsigned int pull_len) 2204 { 2205 struct sk_buff *skb; 2206 2207 /* 2208 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer 2209 * size, which is expected since buffers are at least PAGE_SIZEd. 2210 * In this case packets up to RX_COPY_THRES have only one fragment. 2211 */ 2212 if (gl->tot_len <= RX_COPY_THRES) { 2213 skb = dev_alloc_skb(gl->tot_len); 2214 if (unlikely(!skb)) 2215 goto out; 2216 __skb_put(skb, gl->tot_len); 2217 skb_copy_to_linear_data(skb, gl->va, gl->tot_len); 2218 } else { 2219 skb = dev_alloc_skb(skb_len); 2220 if (unlikely(!skb)) 2221 goto out; 2222 __skb_put(skb, pull_len); 2223 skb_copy_to_linear_data(skb, gl->va, pull_len); 2224 2225 copy_frags(skb, gl, pull_len); 2226 skb->len = gl->tot_len; 2227 skb->data_len = skb->len - pull_len; 2228 skb->truesize += skb->data_len; 2229 } 2230 out: return skb; 2231 } 2232 EXPORT_SYMBOL(cxgb4_pktgl_to_skb); 2233 2234 /** 2235 * t4_pktgl_free - free a packet gather list 2236 * @gl: the gather list 2237 * 2238 * Releases the pages of a packet gather list. We do not own the last 2239 * page on the list and do not free it. 2240 */ 2241 static void t4_pktgl_free(const struct pkt_gl *gl) 2242 { 2243 int n; 2244 const struct page_frag *p; 2245 2246 for (p = gl->frags, n = gl->nfrags - 1; n--; p++) 2247 put_page(p->page); 2248 } 2249 2250 /* 2251 * Process an MPS trace packet. Give it an unused protocol number so it won't 2252 * be delivered to anyone and send it to the stack for capture. 2253 */ 2254 static noinline int handle_trace_pkt(struct adapter *adap, 2255 const struct pkt_gl *gl) 2256 { 2257 struct sk_buff *skb; 2258 2259 skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); 2260 if (unlikely(!skb)) { 2261 t4_pktgl_free(gl); 2262 return 0; 2263 } 2264 2265 if (is_t4(adap->params.chip)) 2266 __skb_pull(skb, sizeof(struct cpl_trace_pkt)); 2267 else 2268 __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt)); 2269 2270 skb_reset_mac_header(skb); 2271 skb->protocol = htons(0xffff); 2272 skb->dev = adap->port[0]; 2273 netif_receive_skb(skb); 2274 return 0; 2275 } 2276 2277 /** 2278 * cxgb4_sgetim_to_hwtstamp - convert sge time stamp to hw time stamp 2279 * @adap: the adapter 2280 * @hwtstamps: time stamp structure to update 2281 * @sgetstamp: 60bit iqe timestamp 2282 * 2283 * Every ingress queue entry has the 60-bit timestamp, convert that timestamp 2284 * which is in Core Clock ticks into ktime_t and assign it 2285 **/ 2286 static void cxgb4_sgetim_to_hwtstamp(struct adapter *adap, 2287 struct skb_shared_hwtstamps *hwtstamps, 2288 u64 sgetstamp) 2289 { 2290 u64 ns; 2291 u64 tmp = (sgetstamp * 1000 * 1000 + adap->params.vpd.cclk / 2); 2292 2293 ns = div_u64(tmp, adap->params.vpd.cclk); 2294 2295 memset(hwtstamps, 0, sizeof(*hwtstamps)); 2296 hwtstamps->hwtstamp = ns_to_ktime(ns); 2297 } 2298 2299 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, 2300 const struct cpl_rx_pkt *pkt, unsigned long tnl_hdr_len) 2301 { 2302 struct adapter *adapter = rxq->rspq.adap; 2303 struct sge *s = &adapter->sge; 2304 struct port_info *pi; 2305 int ret; 2306 struct sk_buff *skb; 2307 2308 skb = napi_get_frags(&rxq->rspq.napi); 2309 if (unlikely(!skb)) { 2310 t4_pktgl_free(gl); 2311 rxq->stats.rx_drops++; 2312 return; 2313 } 2314 2315 copy_frags(skb, gl, s->pktshift); 2316 if (tnl_hdr_len) 2317 skb->csum_level = 1; 2318 skb->len = gl->tot_len - s->pktshift; 2319 skb->data_len = skb->len; 2320 skb->truesize += skb->data_len; 2321 skb->ip_summed = CHECKSUM_UNNECESSARY; 2322 skb_record_rx_queue(skb, rxq->rspq.idx); 2323 pi = netdev_priv(skb->dev); 2324 if (pi->rxtstamp) 2325 cxgb4_sgetim_to_hwtstamp(adapter, skb_hwtstamps(skb), 2326 gl->sgetstamp); 2327 if (rxq->rspq.netdev->features & NETIF_F_RXHASH) 2328 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val, 2329 PKT_HASH_TYPE_L3); 2330 2331 if (unlikely(pkt->vlan_ex)) { 2332 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); 2333 rxq->stats.vlan_ex++; 2334 } 2335 ret = napi_gro_frags(&rxq->rspq.napi); 2336 if (ret == GRO_HELD) 2337 rxq->stats.lro_pkts++; 2338 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE) 2339 rxq->stats.lro_merged++; 2340 rxq->stats.pkts++; 2341 rxq->stats.rx_cso++; 2342 } 2343 2344 enum { 2345 RX_NON_PTP_PKT = 0, 2346 RX_PTP_PKT_SUC = 1, 2347 RX_PTP_PKT_ERR = 2 2348 }; 2349 2350 /** 2351 * t4_systim_to_hwstamp - read hardware time stamp 2352 * @adap: the adapter 2353 * @skb: the packet 2354 * 2355 * Read Time Stamp from MPS packet and insert in skb which 2356 * is forwarded to PTP application 2357 */ 2358 static noinline int t4_systim_to_hwstamp(struct adapter *adapter, 2359 struct sk_buff *skb) 2360 { 2361 struct skb_shared_hwtstamps *hwtstamps; 2362 struct cpl_rx_mps_pkt *cpl = NULL; 2363 unsigned char *data; 2364 int offset; 2365 2366 cpl = (struct cpl_rx_mps_pkt *)skb->data; 2367 if (!(CPL_RX_MPS_PKT_TYPE_G(ntohl(cpl->op_to_r1_hi)) & 2368 X_CPL_RX_MPS_PKT_TYPE_PTP)) 2369 return RX_PTP_PKT_ERR; 2370 2371 data = skb->data + sizeof(*cpl); 2372 skb_pull(skb, 2 * sizeof(u64) + sizeof(struct cpl_rx_mps_pkt)); 2373 offset = ETH_HLEN + IPV4_HLEN(skb->data) + UDP_HLEN; 2374 if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(short)) 2375 return RX_PTP_PKT_ERR; 2376 2377 hwtstamps = skb_hwtstamps(skb); 2378 memset(hwtstamps, 0, sizeof(*hwtstamps)); 2379 hwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*((u64 *)data))); 2380 2381 return RX_PTP_PKT_SUC; 2382 } 2383 2384 /** 2385 * t4_rx_hststamp - Recv PTP Event Message 2386 * @adap: the adapter 2387 * @rsp: the response queue descriptor holding the RX_PKT message 2388 * @skb: the packet 2389 * 2390 * PTP enabled and MPS packet, read HW timestamp 2391 */ 2392 static int t4_rx_hststamp(struct adapter *adapter, const __be64 *rsp, 2393 struct sge_eth_rxq *rxq, struct sk_buff *skb) 2394 { 2395 int ret; 2396 2397 if (unlikely((*(u8 *)rsp == CPL_RX_MPS_PKT) && 2398 !is_t4(adapter->params.chip))) { 2399 ret = t4_systim_to_hwstamp(adapter, skb); 2400 if (ret == RX_PTP_PKT_ERR) { 2401 kfree_skb(skb); 2402 rxq->stats.rx_drops++; 2403 } 2404 return ret; 2405 } 2406 return RX_NON_PTP_PKT; 2407 } 2408 2409 /** 2410 * t4_tx_hststamp - Loopback PTP Transmit Event Message 2411 * @adap: the adapter 2412 * @skb: the packet 2413 * @dev: the ingress net device 2414 * 2415 * Read hardware timestamp for the loopback PTP Tx event message 2416 */ 2417 static int t4_tx_hststamp(struct adapter *adapter, struct sk_buff *skb, 2418 struct net_device *dev) 2419 { 2420 struct port_info *pi = netdev_priv(dev); 2421 2422 if (!is_t4(adapter->params.chip) && adapter->ptp_tx_skb) { 2423 cxgb4_ptp_read_hwstamp(adapter, pi); 2424 kfree_skb(skb); 2425 return 0; 2426 } 2427 return 1; 2428 } 2429 2430 /** 2431 * t4_ethrx_handler - process an ingress ethernet packet 2432 * @q: the response queue that received the packet 2433 * @rsp: the response queue descriptor holding the RX_PKT message 2434 * @si: the gather list of packet fragments 2435 * 2436 * Process an ingress ethernet packet and deliver it to the stack. 2437 */ 2438 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, 2439 const struct pkt_gl *si) 2440 { 2441 bool csum_ok; 2442 struct sk_buff *skb; 2443 const struct cpl_rx_pkt *pkt; 2444 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); 2445 struct adapter *adapter = q->adap; 2446 struct sge *s = &q->adap->sge; 2447 int cpl_trace_pkt = is_t4(q->adap->params.chip) ? 2448 CPL_TRACE_PKT : CPL_TRACE_PKT_T5; 2449 u16 err_vec, tnl_hdr_len = 0; 2450 struct port_info *pi; 2451 int ret = 0; 2452 2453 if (unlikely(*(u8 *)rsp == cpl_trace_pkt)) 2454 return handle_trace_pkt(q->adap, si); 2455 2456 pkt = (const struct cpl_rx_pkt *)rsp; 2457 /* Compressed error vector is enabled for T6 only */ 2458 if (q->adap->params.tp.rx_pkt_encap) { 2459 err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec)); 2460 tnl_hdr_len = T6_RX_TNLHDR_LEN_G(ntohs(pkt->err_vec)); 2461 } else { 2462 err_vec = be16_to_cpu(pkt->err_vec); 2463 } 2464 2465 csum_ok = pkt->csum_calc && !err_vec && 2466 (q->netdev->features & NETIF_F_RXCSUM); 2467 if (((pkt->l2info & htonl(RXF_TCP_F)) || 2468 tnl_hdr_len) && 2469 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { 2470 do_gro(rxq, si, pkt, tnl_hdr_len); 2471 return 0; 2472 } 2473 2474 skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN); 2475 if (unlikely(!skb)) { 2476 t4_pktgl_free(si); 2477 rxq->stats.rx_drops++; 2478 return 0; 2479 } 2480 pi = netdev_priv(q->netdev); 2481 2482 /* Handle PTP Event Rx packet */ 2483 if (unlikely(pi->ptp_enable)) { 2484 ret = t4_rx_hststamp(adapter, rsp, rxq, skb); 2485 if (ret == RX_PTP_PKT_ERR) 2486 return 0; 2487 } 2488 if (likely(!ret)) 2489 __skb_pull(skb, s->pktshift); /* remove ethernet header pad */ 2490 2491 /* Handle the PTP Event Tx Loopback packet */ 2492 if (unlikely(pi->ptp_enable && !ret && 2493 (pkt->l2info & htonl(RXF_UDP_F)) && 2494 cxgb4_ptp_is_ptp_rx(skb))) { 2495 if (!t4_tx_hststamp(adapter, skb, q->netdev)) 2496 return 0; 2497 } 2498 2499 skb->protocol = eth_type_trans(skb, q->netdev); 2500 skb_record_rx_queue(skb, q->idx); 2501 if (skb->dev->features & NETIF_F_RXHASH) 2502 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val, 2503 PKT_HASH_TYPE_L3); 2504 2505 rxq->stats.pkts++; 2506 2507 if (pi->rxtstamp) 2508 cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb), 2509 si->sgetstamp); 2510 if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) { 2511 if (!pkt->ip_frag) { 2512 skb->ip_summed = CHECKSUM_UNNECESSARY; 2513 rxq->stats.rx_cso++; 2514 } else if (pkt->l2info & htonl(RXF_IP_F)) { 2515 __sum16 c = (__force __sum16)pkt->csum; 2516 skb->csum = csum_unfold(c); 2517 2518 if (tnl_hdr_len) { 2519 skb->ip_summed = CHECKSUM_UNNECESSARY; 2520 skb->csum_level = 1; 2521 } else { 2522 skb->ip_summed = CHECKSUM_COMPLETE; 2523 } 2524 rxq->stats.rx_cso++; 2525 } 2526 } else { 2527 skb_checksum_none_assert(skb); 2528 #ifdef CONFIG_CHELSIO_T4_FCOE 2529 #define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \ 2530 RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F) 2531 2532 if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) { 2533 if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) && 2534 (pi->fcoe.flags & CXGB_FCOE_ENABLED)) { 2535 if (q->adap->params.tp.rx_pkt_encap) 2536 csum_ok = err_vec & 2537 T6_COMPR_RXERR_SUM_F; 2538 else 2539 csum_ok = err_vec & RXERR_CSUM_F; 2540 if (!csum_ok) 2541 skb->ip_summed = CHECKSUM_UNNECESSARY; 2542 } 2543 } 2544 2545 #undef CPL_RX_PKT_FLAGS 2546 #endif /* CONFIG_CHELSIO_T4_FCOE */ 2547 } 2548 2549 if (unlikely(pkt->vlan_ex)) { 2550 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); 2551 rxq->stats.vlan_ex++; 2552 } 2553 skb_mark_napi_id(skb, &q->napi); 2554 netif_receive_skb(skb); 2555 return 0; 2556 } 2557 2558 /** 2559 * restore_rx_bufs - put back a packet's Rx buffers 2560 * @si: the packet gather list 2561 * @q: the SGE free list 2562 * @frags: number of FL buffers to restore 2563 * 2564 * Puts back on an FL the Rx buffers associated with @si. The buffers 2565 * have already been unmapped and are left unmapped, we mark them so to 2566 * prevent further unmapping attempts. 2567 * 2568 * This function undoes a series of @unmap_rx_buf calls when we find out 2569 * that the current packet can't be processed right away afterall and we 2570 * need to come back to it later. This is a very rare event and there's 2571 * no effort to make this particularly efficient. 2572 */ 2573 static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q, 2574 int frags) 2575 { 2576 struct rx_sw_desc *d; 2577 2578 while (frags--) { 2579 if (q->cidx == 0) 2580 q->cidx = q->size - 1; 2581 else 2582 q->cidx--; 2583 d = &q->sdesc[q->cidx]; 2584 d->page = si->frags[frags].page; 2585 d->dma_addr |= RX_UNMAPPED_BUF; 2586 q->avail++; 2587 } 2588 } 2589 2590 /** 2591 * is_new_response - check if a response is newly written 2592 * @r: the response descriptor 2593 * @q: the response queue 2594 * 2595 * Returns true if a response descriptor contains a yet unprocessed 2596 * response. 2597 */ 2598 static inline bool is_new_response(const struct rsp_ctrl *r, 2599 const struct sge_rspq *q) 2600 { 2601 return (r->type_gen >> RSPD_GEN_S) == q->gen; 2602 } 2603 2604 /** 2605 * rspq_next - advance to the next entry in a response queue 2606 * @q: the queue 2607 * 2608 * Updates the state of a response queue to advance it to the next entry. 2609 */ 2610 static inline void rspq_next(struct sge_rspq *q) 2611 { 2612 q->cur_desc = (void *)q->cur_desc + q->iqe_len; 2613 if (unlikely(++q->cidx == q->size)) { 2614 q->cidx = 0; 2615 q->gen ^= 1; 2616 q->cur_desc = q->desc; 2617 } 2618 } 2619 2620 /** 2621 * process_responses - process responses from an SGE response queue 2622 * @q: the ingress queue to process 2623 * @budget: how many responses can be processed in this round 2624 * 2625 * Process responses from an SGE response queue up to the supplied budget. 2626 * Responses include received packets as well as control messages from FW 2627 * or HW. 2628 * 2629 * Additionally choose the interrupt holdoff time for the next interrupt 2630 * on this queue. If the system is under memory shortage use a fairly 2631 * long delay to help recovery. 2632 */ 2633 static int process_responses(struct sge_rspq *q, int budget) 2634 { 2635 int ret, rsp_type; 2636 int budget_left = budget; 2637 const struct rsp_ctrl *rc; 2638 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); 2639 struct adapter *adapter = q->adap; 2640 struct sge *s = &adapter->sge; 2641 2642 while (likely(budget_left)) { 2643 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); 2644 if (!is_new_response(rc, q)) { 2645 if (q->flush_handler) 2646 q->flush_handler(q); 2647 break; 2648 } 2649 2650 dma_rmb(); 2651 rsp_type = RSPD_TYPE_G(rc->type_gen); 2652 if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) { 2653 struct page_frag *fp; 2654 struct pkt_gl si; 2655 const struct rx_sw_desc *rsd; 2656 u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags; 2657 2658 if (len & RSPD_NEWBUF_F) { 2659 if (likely(q->offset > 0)) { 2660 free_rx_bufs(q->adap, &rxq->fl, 1); 2661 q->offset = 0; 2662 } 2663 len = RSPD_LEN_G(len); 2664 } 2665 si.tot_len = len; 2666 2667 /* gather packet fragments */ 2668 for (frags = 0, fp = si.frags; ; frags++, fp++) { 2669 rsd = &rxq->fl.sdesc[rxq->fl.cidx]; 2670 bufsz = get_buf_size(adapter, rsd); 2671 fp->page = rsd->page; 2672 fp->offset = q->offset; 2673 fp->size = min(bufsz, len); 2674 len -= fp->size; 2675 if (!len) 2676 break; 2677 unmap_rx_buf(q->adap, &rxq->fl); 2678 } 2679 2680 si.sgetstamp = SGE_TIMESTAMP_G( 2681 be64_to_cpu(rc->last_flit)); 2682 /* 2683 * Last buffer remains mapped so explicitly make it 2684 * coherent for CPU access. 2685 */ 2686 dma_sync_single_for_cpu(q->adap->pdev_dev, 2687 get_buf_addr(rsd), 2688 fp->size, DMA_FROM_DEVICE); 2689 2690 si.va = page_address(si.frags[0].page) + 2691 si.frags[0].offset; 2692 prefetch(si.va); 2693 2694 si.nfrags = frags + 1; 2695 ret = q->handler(q, q->cur_desc, &si); 2696 if (likely(ret == 0)) 2697 q->offset += ALIGN(fp->size, s->fl_align); 2698 else 2699 restore_rx_bufs(&si, &rxq->fl, frags); 2700 } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) { 2701 ret = q->handler(q, q->cur_desc, NULL); 2702 } else { 2703 ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN); 2704 } 2705 2706 if (unlikely(ret)) { 2707 /* couldn't process descriptor, back off for recovery */ 2708 q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX); 2709 break; 2710 } 2711 2712 rspq_next(q); 2713 budget_left--; 2714 } 2715 2716 if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16) 2717 __refill_fl(q->adap, &rxq->fl); 2718 return budget - budget_left; 2719 } 2720 2721 /** 2722 * napi_rx_handler - the NAPI handler for Rx processing 2723 * @napi: the napi instance 2724 * @budget: how many packets we can process in this round 2725 * 2726 * Handler for new data events when using NAPI. This does not need any 2727 * locking or protection from interrupts as data interrupts are off at 2728 * this point and other adapter interrupts do not interfere (the latter 2729 * in not a concern at all with MSI-X as non-data interrupts then have 2730 * a separate handler). 2731 */ 2732 static int napi_rx_handler(struct napi_struct *napi, int budget) 2733 { 2734 unsigned int params; 2735 struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); 2736 int work_done; 2737 u32 val; 2738 2739 work_done = process_responses(q, budget); 2740 if (likely(work_done < budget)) { 2741 int timer_index; 2742 2743 napi_complete_done(napi, work_done); 2744 timer_index = QINTR_TIMER_IDX_G(q->next_intr_params); 2745 2746 if (q->adaptive_rx) { 2747 if (work_done > max(timer_pkt_quota[timer_index], 2748 MIN_NAPI_WORK)) 2749 timer_index = (timer_index + 1); 2750 else 2751 timer_index = timer_index - 1; 2752 2753 timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1); 2754 q->next_intr_params = 2755 QINTR_TIMER_IDX_V(timer_index) | 2756 QINTR_CNT_EN_V(0); 2757 params = q->next_intr_params; 2758 } else { 2759 params = q->next_intr_params; 2760 q->next_intr_params = q->intr_params; 2761 } 2762 } else 2763 params = QINTR_TIMER_IDX_V(7); 2764 2765 val = CIDXINC_V(work_done) | SEINTARM_V(params); 2766 2767 /* If we don't have access to the new User GTS (T5+), use the old 2768 * doorbell mechanism; otherwise use the new BAR2 mechanism. 2769 */ 2770 if (unlikely(q->bar2_addr == NULL)) { 2771 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A), 2772 val | INGRESSQID_V((u32)q->cntxt_id)); 2773 } else { 2774 writel(val | INGRESSQID_V(q->bar2_qid), 2775 q->bar2_addr + SGE_UDB_GTS); 2776 wmb(); 2777 } 2778 return work_done; 2779 } 2780 2781 /* 2782 * The MSI-X interrupt handler for an SGE response queue. 2783 */ 2784 irqreturn_t t4_sge_intr_msix(int irq, void *cookie) 2785 { 2786 struct sge_rspq *q = cookie; 2787 2788 napi_schedule(&q->napi); 2789 return IRQ_HANDLED; 2790 } 2791 2792 /* 2793 * Process the indirect interrupt entries in the interrupt queue and kick off 2794 * NAPI for each queue that has generated an entry. 2795 */ 2796 static unsigned int process_intrq(struct adapter *adap) 2797 { 2798 unsigned int credits; 2799 const struct rsp_ctrl *rc; 2800 struct sge_rspq *q = &adap->sge.intrq; 2801 u32 val; 2802 2803 spin_lock(&adap->sge.intrq_lock); 2804 for (credits = 0; ; credits++) { 2805 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); 2806 if (!is_new_response(rc, q)) 2807 break; 2808 2809 dma_rmb(); 2810 if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) { 2811 unsigned int qid = ntohl(rc->pldbuflen_qid); 2812 2813 qid -= adap->sge.ingr_start; 2814 napi_schedule(&adap->sge.ingr_map[qid]->napi); 2815 } 2816 2817 rspq_next(q); 2818 } 2819 2820 val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params); 2821 2822 /* If we don't have access to the new User GTS (T5+), use the old 2823 * doorbell mechanism; otherwise use the new BAR2 mechanism. 2824 */ 2825 if (unlikely(q->bar2_addr == NULL)) { 2826 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), 2827 val | INGRESSQID_V(q->cntxt_id)); 2828 } else { 2829 writel(val | INGRESSQID_V(q->bar2_qid), 2830 q->bar2_addr + SGE_UDB_GTS); 2831 wmb(); 2832 } 2833 spin_unlock(&adap->sge.intrq_lock); 2834 return credits; 2835 } 2836 2837 /* 2838 * The MSI interrupt handler, which handles data events from SGE response queues 2839 * as well as error and other async events as they all use the same MSI vector. 2840 */ 2841 static irqreturn_t t4_intr_msi(int irq, void *cookie) 2842 { 2843 struct adapter *adap = cookie; 2844 2845 if (adap->flags & MASTER_PF) 2846 t4_slow_intr_handler(adap); 2847 process_intrq(adap); 2848 return IRQ_HANDLED; 2849 } 2850 2851 /* 2852 * Interrupt handler for legacy INTx interrupts. 2853 * Handles data events from SGE response queues as well as error and other 2854 * async events as they all use the same interrupt line. 2855 */ 2856 static irqreturn_t t4_intr_intx(int irq, void *cookie) 2857 { 2858 struct adapter *adap = cookie; 2859 2860 t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0); 2861 if (((adap->flags & MASTER_PF) && t4_slow_intr_handler(adap)) | 2862 process_intrq(adap)) 2863 return IRQ_HANDLED; 2864 return IRQ_NONE; /* probably shared interrupt */ 2865 } 2866 2867 /** 2868 * t4_intr_handler - select the top-level interrupt handler 2869 * @adap: the adapter 2870 * 2871 * Selects the top-level interrupt handler based on the type of interrupts 2872 * (MSI-X, MSI, or INTx). 2873 */ 2874 irq_handler_t t4_intr_handler(struct adapter *adap) 2875 { 2876 if (adap->flags & USING_MSIX) 2877 return t4_sge_intr_msix; 2878 if (adap->flags & USING_MSI) 2879 return t4_intr_msi; 2880 return t4_intr_intx; 2881 } 2882 2883 static void sge_rx_timer_cb(struct timer_list *t) 2884 { 2885 unsigned long m; 2886 unsigned int i; 2887 struct adapter *adap = from_timer(adap, t, sge.rx_timer); 2888 struct sge *s = &adap->sge; 2889 2890 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) 2891 for (m = s->starving_fl[i]; m; m &= m - 1) { 2892 struct sge_eth_rxq *rxq; 2893 unsigned int id = __ffs(m) + i * BITS_PER_LONG; 2894 struct sge_fl *fl = s->egr_map[id]; 2895 2896 clear_bit(id, s->starving_fl); 2897 smp_mb__after_atomic(); 2898 2899 if (fl_starving(adap, fl)) { 2900 rxq = container_of(fl, struct sge_eth_rxq, fl); 2901 if (napi_reschedule(&rxq->rspq.napi)) 2902 fl->starving++; 2903 else 2904 set_bit(id, s->starving_fl); 2905 } 2906 } 2907 /* The remainder of the SGE RX Timer Callback routine is dedicated to 2908 * global Master PF activities like checking for chip ingress stalls, 2909 * etc. 2910 */ 2911 if (!(adap->flags & MASTER_PF)) 2912 goto done; 2913 2914 t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD); 2915 2916 done: 2917 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); 2918 } 2919 2920 static void sge_tx_timer_cb(struct timer_list *t) 2921 { 2922 unsigned long m; 2923 unsigned int i, budget; 2924 struct adapter *adap = from_timer(adap, t, sge.tx_timer); 2925 struct sge *s = &adap->sge; 2926 2927 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) 2928 for (m = s->txq_maperr[i]; m; m &= m - 1) { 2929 unsigned long id = __ffs(m) + i * BITS_PER_LONG; 2930 struct sge_uld_txq *txq = s->egr_map[id]; 2931 2932 clear_bit(id, s->txq_maperr); 2933 tasklet_schedule(&txq->qresume_tsk); 2934 } 2935 2936 if (!is_t4(adap->params.chip)) { 2937 struct sge_eth_txq *q = &s->ptptxq; 2938 int avail; 2939 2940 spin_lock(&adap->ptp_lock); 2941 avail = reclaimable(&q->q); 2942 2943 if (avail) { 2944 free_tx_desc(adap, &q->q, avail, false); 2945 q->q.in_use -= avail; 2946 } 2947 spin_unlock(&adap->ptp_lock); 2948 } 2949 2950 budget = MAX_TIMER_TX_RECLAIM; 2951 i = s->ethtxq_rover; 2952 do { 2953 struct sge_eth_txq *q = &s->ethtxq[i]; 2954 2955 if (q->q.in_use && 2956 time_after_eq(jiffies, q->txq->trans_start + HZ / 100) && 2957 __netif_tx_trylock(q->txq)) { 2958 int avail = reclaimable(&q->q); 2959 2960 if (avail) { 2961 if (avail > budget) 2962 avail = budget; 2963 2964 free_tx_desc(adap, &q->q, avail, true); 2965 q->q.in_use -= avail; 2966 budget -= avail; 2967 } 2968 __netif_tx_unlock(q->txq); 2969 } 2970 2971 if (++i >= s->ethqsets) 2972 i = 0; 2973 } while (budget && i != s->ethtxq_rover); 2974 s->ethtxq_rover = i; 2975 mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2)); 2976 } 2977 2978 /** 2979 * bar2_address - return the BAR2 address for an SGE Queue's Registers 2980 * @adapter: the adapter 2981 * @qid: the SGE Queue ID 2982 * @qtype: the SGE Queue Type (Egress or Ingress) 2983 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues 2984 * 2985 * Returns the BAR2 address for the SGE Queue Registers associated with 2986 * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also 2987 * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE 2988 * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID" 2989 * Registers are supported (e.g. the Write Combining Doorbell Buffer). 2990 */ 2991 static void __iomem *bar2_address(struct adapter *adapter, 2992 unsigned int qid, 2993 enum t4_bar2_qtype qtype, 2994 unsigned int *pbar2_qid) 2995 { 2996 u64 bar2_qoffset; 2997 int ret; 2998 2999 ret = t4_bar2_sge_qregs(adapter, qid, qtype, 0, 3000 &bar2_qoffset, pbar2_qid); 3001 if (ret) 3002 return NULL; 3003 3004 return adapter->bar2 + bar2_qoffset; 3005 } 3006 3007 /* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0 3008 * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map 3009 */ 3010 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, 3011 struct net_device *dev, int intr_idx, 3012 struct sge_fl *fl, rspq_handler_t hnd, 3013 rspq_flush_handler_t flush_hnd, int cong) 3014 { 3015 int ret, flsz = 0; 3016 struct fw_iq_cmd c; 3017 struct sge *s = &adap->sge; 3018 struct port_info *pi = netdev_priv(dev); 3019 int relaxed = !(adap->flags & ROOT_NO_RELAXED_ORDERING); 3020 3021 /* Size needs to be multiple of 16, including status entry. */ 3022 iq->size = roundup(iq->size, 16); 3023 3024 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0, 3025 &iq->phys_addr, NULL, 0, 3026 dev_to_node(adap->pdev_dev)); 3027 if (!iq->desc) 3028 return -ENOMEM; 3029 3030 memset(&c, 0, sizeof(c)); 3031 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F | 3032 FW_CMD_WRITE_F | FW_CMD_EXEC_F | 3033 FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0)); 3034 c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F | 3035 FW_LEN16(c)); 3036 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) | 3037 FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) | 3038 FW_IQ_CMD_IQANDST_V(intr_idx < 0) | 3039 FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X) | 3040 FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx : 3041 -intr_idx - 1)); 3042 c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) | 3043 FW_IQ_CMD_IQGTSMODE_F | 3044 FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) | 3045 FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4)); 3046 c.iqsize = htons(iq->size); 3047 c.iqaddr = cpu_to_be64(iq->phys_addr); 3048 if (cong >= 0) 3049 c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F); 3050 3051 if (fl) { 3052 enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip); 3053 3054 /* Allocate the ring for the hardware free list (with space 3055 * for its status page) along with the associated software 3056 * descriptor ring. The free list size needs to be a multiple 3057 * of the Egress Queue Unit and at least 2 Egress Units larger 3058 * than the SGE's Egress Congrestion Threshold 3059 * (fl_starve_thres - 1). 3060 */ 3061 if (fl->size < s->fl_starve_thres - 1 + 2 * 8) 3062 fl->size = s->fl_starve_thres - 1 + 2 * 8; 3063 fl->size = roundup(fl->size, 8); 3064 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64), 3065 sizeof(struct rx_sw_desc), &fl->addr, 3066 &fl->sdesc, s->stat_len, 3067 dev_to_node(adap->pdev_dev)); 3068 if (!fl->desc) 3069 goto fl_nomem; 3070 3071 flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc); 3072 c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F | 3073 FW_IQ_CMD_FL0FETCHRO_V(relaxed) | 3074 FW_IQ_CMD_FL0DATARO_V(relaxed) | 3075 FW_IQ_CMD_FL0PADEN_F); 3076 if (cong >= 0) 3077 c.iqns_to_fl0congen |= 3078 htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) | 3079 FW_IQ_CMD_FL0CONGCIF_F | 3080 FW_IQ_CMD_FL0CONGEN_F); 3081 /* In T6, for egress queue type FL there is internal overhead 3082 * of 16B for header going into FLM module. Hence the maximum 3083 * allowed burst size is 448 bytes. For T4/T5, the hardware 3084 * doesn't coalesce fetch requests if more than 64 bytes of 3085 * Free List pointers are provided, so we use a 128-byte Fetch 3086 * Burst Minimum there (T6 implements coalescing so we can use 3087 * the smaller 64-byte value there). 3088 */ 3089 c.fl0dcaen_to_fl0cidxfthresh = 3090 htons(FW_IQ_CMD_FL0FBMIN_V(chip <= CHELSIO_T5 ? 3091 FETCHBURSTMIN_128B_X : 3092 FETCHBURSTMIN_64B_X) | 3093 FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ? 3094 FETCHBURSTMAX_512B_X : 3095 FETCHBURSTMAX_256B_X)); 3096 c.fl0size = htons(flsz); 3097 c.fl0addr = cpu_to_be64(fl->addr); 3098 } 3099 3100 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); 3101 if (ret) 3102 goto err; 3103 3104 netif_napi_add(dev, &iq->napi, napi_rx_handler, 64); 3105 iq->cur_desc = iq->desc; 3106 iq->cidx = 0; 3107 iq->gen = 1; 3108 iq->next_intr_params = iq->intr_params; 3109 iq->cntxt_id = ntohs(c.iqid); 3110 iq->abs_id = ntohs(c.physiqid); 3111 iq->bar2_addr = bar2_address(adap, 3112 iq->cntxt_id, 3113 T4_BAR2_QTYPE_INGRESS, 3114 &iq->bar2_qid); 3115 iq->size--; /* subtract status entry */ 3116 iq->netdev = dev; 3117 iq->handler = hnd; 3118 iq->flush_handler = flush_hnd; 3119 3120 memset(&iq->lro_mgr, 0, sizeof(struct t4_lro_mgr)); 3121 skb_queue_head_init(&iq->lro_mgr.lroq); 3122 3123 /* set offset to -1 to distinguish ingress queues without FL */ 3124 iq->offset = fl ? 0 : -1; 3125 3126 adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq; 3127 3128 if (fl) { 3129 fl->cntxt_id = ntohs(c.fl0id); 3130 fl->avail = fl->pend_cred = 0; 3131 fl->pidx = fl->cidx = 0; 3132 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; 3133 adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl; 3134 3135 /* Note, we must initialize the BAR2 Free List User Doorbell 3136 * information before refilling the Free List! 3137 */ 3138 fl->bar2_addr = bar2_address(adap, 3139 fl->cntxt_id, 3140 T4_BAR2_QTYPE_EGRESS, 3141 &fl->bar2_qid); 3142 refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL); 3143 } 3144 3145 /* For T5 and later we attempt to set up the Congestion Manager values 3146 * of the new RX Ethernet Queue. This should really be handled by 3147 * firmware because it's more complex than any host driver wants to 3148 * get involved with and it's different per chip and this is almost 3149 * certainly wrong. Firmware would be wrong as well, but it would be 3150 * a lot easier to fix in one place ... For now we do something very 3151 * simple (and hopefully less wrong). 3152 */ 3153 if (!is_t4(adap->params.chip) && cong >= 0) { 3154 u32 param, val, ch_map = 0; 3155 int i; 3156 u16 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log; 3157 3158 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | 3159 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 3160 FW_PARAMS_PARAM_YZ_V(iq->cntxt_id)); 3161 if (cong == 0) { 3162 val = CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_QUEUE_X); 3163 } else { 3164 val = 3165 CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X); 3166 for (i = 0; i < 4; i++) { 3167 if (cong & (1 << i)) 3168 ch_map |= 1 << (i << cng_ch_bits_log); 3169 } 3170 val |= CONMCTXT_CNGCHMAP_V(ch_map); 3171 } 3172 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, 3173 ¶m, &val); 3174 if (ret) 3175 dev_warn(adap->pdev_dev, "Failed to set Congestion" 3176 " Manager Context for Ingress Queue %d: %d\n", 3177 iq->cntxt_id, -ret); 3178 } 3179 3180 return 0; 3181 3182 fl_nomem: 3183 ret = -ENOMEM; 3184 err: 3185 if (iq->desc) { 3186 dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len, 3187 iq->desc, iq->phys_addr); 3188 iq->desc = NULL; 3189 } 3190 if (fl && fl->desc) { 3191 kfree(fl->sdesc); 3192 fl->sdesc = NULL; 3193 dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc), 3194 fl->desc, fl->addr); 3195 fl->desc = NULL; 3196 } 3197 return ret; 3198 } 3199 3200 static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) 3201 { 3202 q->cntxt_id = id; 3203 q->bar2_addr = bar2_address(adap, 3204 q->cntxt_id, 3205 T4_BAR2_QTYPE_EGRESS, 3206 &q->bar2_qid); 3207 q->in_use = 0; 3208 q->cidx = q->pidx = 0; 3209 q->stops = q->restarts = 0; 3210 q->stat = (void *)&q->desc[q->size]; 3211 spin_lock_init(&q->db_lock); 3212 adap->sge.egr_map[id - adap->sge.egr_start] = q; 3213 } 3214 3215 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, 3216 struct net_device *dev, struct netdev_queue *netdevq, 3217 unsigned int iqid) 3218 { 3219 int ret, nentries; 3220 struct fw_eq_eth_cmd c; 3221 struct sge *s = &adap->sge; 3222 struct port_info *pi = netdev_priv(dev); 3223 3224 /* Add status entries */ 3225 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); 3226 3227 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, 3228 sizeof(struct tx_desc), sizeof(struct tx_sw_desc), 3229 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, 3230 netdev_queue_numa_node_read(netdevq)); 3231 if (!txq->q.desc) 3232 return -ENOMEM; 3233 3234 memset(&c, 0, sizeof(c)); 3235 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F | 3236 FW_CMD_WRITE_F | FW_CMD_EXEC_F | 3237 FW_EQ_ETH_CMD_PFN_V(adap->pf) | 3238 FW_EQ_ETH_CMD_VFN_V(0)); 3239 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F | 3240 FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c)); 3241 c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F | 3242 FW_EQ_ETH_CMD_VIID_V(pi->viid)); 3243 c.fetchszm_to_iqid = 3244 htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) | 3245 FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) | 3246 FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid)); 3247 c.dcaen_to_eqsize = 3248 htonl(FW_EQ_ETH_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) | 3249 FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | 3250 FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | 3251 FW_EQ_ETH_CMD_EQSIZE_V(nentries)); 3252 c.eqaddr = cpu_to_be64(txq->q.phys_addr); 3253 3254 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); 3255 if (ret) { 3256 kfree(txq->q.sdesc); 3257 txq->q.sdesc = NULL; 3258 dma_free_coherent(adap->pdev_dev, 3259 nentries * sizeof(struct tx_desc), 3260 txq->q.desc, txq->q.phys_addr); 3261 txq->q.desc = NULL; 3262 return ret; 3263 } 3264 3265 txq->q.q_type = CXGB4_TXQ_ETH; 3266 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd))); 3267 txq->txq = netdevq; 3268 txq->tso = txq->tx_cso = txq->vlan_ins = 0; 3269 txq->mapping_err = 0; 3270 return 0; 3271 } 3272 3273 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, 3274 struct net_device *dev, unsigned int iqid, 3275 unsigned int cmplqid) 3276 { 3277 int ret, nentries; 3278 struct fw_eq_ctrl_cmd c; 3279 struct sge *s = &adap->sge; 3280 struct port_info *pi = netdev_priv(dev); 3281 3282 /* Add status entries */ 3283 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); 3284 3285 txq->q.desc = alloc_ring(adap->pdev_dev, nentries, 3286 sizeof(struct tx_desc), 0, &txq->q.phys_addr, 3287 NULL, 0, dev_to_node(adap->pdev_dev)); 3288 if (!txq->q.desc) 3289 return -ENOMEM; 3290 3291 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F | 3292 FW_CMD_WRITE_F | FW_CMD_EXEC_F | 3293 FW_EQ_CTRL_CMD_PFN_V(adap->pf) | 3294 FW_EQ_CTRL_CMD_VFN_V(0)); 3295 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F | 3296 FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c)); 3297 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid)); 3298 c.physeqid_pkd = htonl(0); 3299 c.fetchszm_to_iqid = 3300 htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) | 3301 FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) | 3302 FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid)); 3303 c.dcaen_to_eqsize = 3304 htonl(FW_EQ_CTRL_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) | 3305 FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | 3306 FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | 3307 FW_EQ_CTRL_CMD_EQSIZE_V(nentries)); 3308 c.eqaddr = cpu_to_be64(txq->q.phys_addr); 3309 3310 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); 3311 if (ret) { 3312 dma_free_coherent(adap->pdev_dev, 3313 nentries * sizeof(struct tx_desc), 3314 txq->q.desc, txq->q.phys_addr); 3315 txq->q.desc = NULL; 3316 return ret; 3317 } 3318 3319 txq->q.q_type = CXGB4_TXQ_CTRL; 3320 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid))); 3321 txq->adap = adap; 3322 skb_queue_head_init(&txq->sendq); 3323 tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq); 3324 txq->full = 0; 3325 return 0; 3326 } 3327 3328 int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid, 3329 unsigned int cmplqid) 3330 { 3331 u32 param, val; 3332 3333 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | 3334 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL) | 3335 FW_PARAMS_PARAM_YZ_V(eqid)); 3336 val = cmplqid; 3337 return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); 3338 } 3339 3340 int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq, 3341 struct net_device *dev, unsigned int iqid, 3342 unsigned int uld_type) 3343 { 3344 int ret, nentries; 3345 struct fw_eq_ofld_cmd c; 3346 struct sge *s = &adap->sge; 3347 struct port_info *pi = netdev_priv(dev); 3348 int cmd = FW_EQ_OFLD_CMD; 3349 3350 /* Add status entries */ 3351 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); 3352 3353 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, 3354 sizeof(struct tx_desc), sizeof(struct tx_sw_desc), 3355 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, 3356 NUMA_NO_NODE); 3357 if (!txq->q.desc) 3358 return -ENOMEM; 3359 3360 memset(&c, 0, sizeof(c)); 3361 if (unlikely(uld_type == CXGB4_TX_CRYPTO)) 3362 cmd = FW_EQ_CTRL_CMD; 3363 c.op_to_vfn = htonl(FW_CMD_OP_V(cmd) | FW_CMD_REQUEST_F | 3364 FW_CMD_WRITE_F | FW_CMD_EXEC_F | 3365 FW_EQ_OFLD_CMD_PFN_V(adap->pf) | 3366 FW_EQ_OFLD_CMD_VFN_V(0)); 3367 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F | 3368 FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c)); 3369 c.fetchszm_to_iqid = 3370 htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) | 3371 FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) | 3372 FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid)); 3373 c.dcaen_to_eqsize = 3374 htonl(FW_EQ_OFLD_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) | 3375 FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | 3376 FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | 3377 FW_EQ_OFLD_CMD_EQSIZE_V(nentries)); 3378 c.eqaddr = cpu_to_be64(txq->q.phys_addr); 3379 3380 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); 3381 if (ret) { 3382 kfree(txq->q.sdesc); 3383 txq->q.sdesc = NULL; 3384 dma_free_coherent(adap->pdev_dev, 3385 nentries * sizeof(struct tx_desc), 3386 txq->q.desc, txq->q.phys_addr); 3387 txq->q.desc = NULL; 3388 return ret; 3389 } 3390 3391 txq->q.q_type = CXGB4_TXQ_ULD; 3392 init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd))); 3393 txq->adap = adap; 3394 skb_queue_head_init(&txq->sendq); 3395 tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq); 3396 txq->full = 0; 3397 txq->mapping_err = 0; 3398 return 0; 3399 } 3400 3401 void free_txq(struct adapter *adap, struct sge_txq *q) 3402 { 3403 struct sge *s = &adap->sge; 3404 3405 dma_free_coherent(adap->pdev_dev, 3406 q->size * sizeof(struct tx_desc) + s->stat_len, 3407 q->desc, q->phys_addr); 3408 q->cntxt_id = 0; 3409 q->sdesc = NULL; 3410 q->desc = NULL; 3411 } 3412 3413 void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, 3414 struct sge_fl *fl) 3415 { 3416 struct sge *s = &adap->sge; 3417 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; 3418 3419 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL; 3420 t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP, 3421 rq->cntxt_id, fl_id, 0xffff); 3422 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, 3423 rq->desc, rq->phys_addr); 3424 netif_napi_del(&rq->napi); 3425 rq->netdev = NULL; 3426 rq->cntxt_id = rq->abs_id = 0; 3427 rq->desc = NULL; 3428 3429 if (fl) { 3430 free_rx_bufs(adap, fl, fl->avail); 3431 dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len, 3432 fl->desc, fl->addr); 3433 kfree(fl->sdesc); 3434 fl->sdesc = NULL; 3435 fl->cntxt_id = 0; 3436 fl->desc = NULL; 3437 } 3438 } 3439 3440 /** 3441 * t4_free_ofld_rxqs - free a block of consecutive Rx queues 3442 * @adap: the adapter 3443 * @n: number of queues 3444 * @q: pointer to first queue 3445 * 3446 * Release the resources of a consecutive block of offload Rx queues. 3447 */ 3448 void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q) 3449 { 3450 for ( ; n; n--, q++) 3451 if (q->rspq.desc) 3452 free_rspq_fl(adap, &q->rspq, 3453 q->fl.size ? &q->fl : NULL); 3454 } 3455 3456 /** 3457 * t4_free_sge_resources - free SGE resources 3458 * @adap: the adapter 3459 * 3460 * Frees resources used by the SGE queue sets. 3461 */ 3462 void t4_free_sge_resources(struct adapter *adap) 3463 { 3464 int i; 3465 struct sge_eth_rxq *eq; 3466 struct sge_eth_txq *etq; 3467 3468 /* stop all Rx queues in order to start them draining */ 3469 for (i = 0; i < adap->sge.ethqsets; i++) { 3470 eq = &adap->sge.ethrxq[i]; 3471 if (eq->rspq.desc) 3472 t4_iq_stop(adap, adap->mbox, adap->pf, 0, 3473 FW_IQ_TYPE_FL_INT_CAP, 3474 eq->rspq.cntxt_id, 3475 eq->fl.size ? eq->fl.cntxt_id : 0xffff, 3476 0xffff); 3477 } 3478 3479 /* clean up Ethernet Tx/Rx queues */ 3480 for (i = 0; i < adap->sge.ethqsets; i++) { 3481 eq = &adap->sge.ethrxq[i]; 3482 if (eq->rspq.desc) 3483 free_rspq_fl(adap, &eq->rspq, 3484 eq->fl.size ? &eq->fl : NULL); 3485 3486 etq = &adap->sge.ethtxq[i]; 3487 if (etq->q.desc) { 3488 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, 3489 etq->q.cntxt_id); 3490 __netif_tx_lock_bh(etq->txq); 3491 free_tx_desc(adap, &etq->q, etq->q.in_use, true); 3492 __netif_tx_unlock_bh(etq->txq); 3493 kfree(etq->q.sdesc); 3494 free_txq(adap, &etq->q); 3495 } 3496 } 3497 3498 /* clean up control Tx queues */ 3499 for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) { 3500 struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; 3501 3502 if (cq->q.desc) { 3503 tasklet_kill(&cq->qresume_tsk); 3504 t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0, 3505 cq->q.cntxt_id); 3506 __skb_queue_purge(&cq->sendq); 3507 free_txq(adap, &cq->q); 3508 } 3509 } 3510 3511 if (adap->sge.fw_evtq.desc) 3512 free_rspq_fl(adap, &adap->sge.fw_evtq, NULL); 3513 3514 if (adap->sge.intrq.desc) 3515 free_rspq_fl(adap, &adap->sge.intrq, NULL); 3516 3517 if (!is_t4(adap->params.chip)) { 3518 etq = &adap->sge.ptptxq; 3519 if (etq->q.desc) { 3520 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, 3521 etq->q.cntxt_id); 3522 spin_lock_bh(&adap->ptp_lock); 3523 free_tx_desc(adap, &etq->q, etq->q.in_use, true); 3524 spin_unlock_bh(&adap->ptp_lock); 3525 kfree(etq->q.sdesc); 3526 free_txq(adap, &etq->q); 3527 } 3528 } 3529 3530 /* clear the reverse egress queue map */ 3531 memset(adap->sge.egr_map, 0, 3532 adap->sge.egr_sz * sizeof(*adap->sge.egr_map)); 3533 } 3534 3535 void t4_sge_start(struct adapter *adap) 3536 { 3537 adap->sge.ethtxq_rover = 0; 3538 mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD); 3539 mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD); 3540 } 3541 3542 /** 3543 * t4_sge_stop - disable SGE operation 3544 * @adap: the adapter 3545 * 3546 * Stop tasklets and timers associated with the DMA engine. Note that 3547 * this is effective only if measures have been taken to disable any HW 3548 * events that may restart them. 3549 */ 3550 void t4_sge_stop(struct adapter *adap) 3551 { 3552 int i; 3553 struct sge *s = &adap->sge; 3554 3555 if (in_interrupt()) /* actions below require waiting */ 3556 return; 3557 3558 if (s->rx_timer.function) 3559 del_timer_sync(&s->rx_timer); 3560 if (s->tx_timer.function) 3561 del_timer_sync(&s->tx_timer); 3562 3563 if (is_offload(adap)) { 3564 struct sge_uld_txq_info *txq_info; 3565 3566 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD]; 3567 if (txq_info) { 3568 struct sge_uld_txq *txq = txq_info->uldtxq; 3569 3570 for_each_ofldtxq(&adap->sge, i) { 3571 if (txq->q.desc) 3572 tasklet_kill(&txq->qresume_tsk); 3573 } 3574 } 3575 } 3576 3577 if (is_pci_uld(adap)) { 3578 struct sge_uld_txq_info *txq_info; 3579 3580 txq_info = adap->sge.uld_txq_info[CXGB4_TX_CRYPTO]; 3581 if (txq_info) { 3582 struct sge_uld_txq *txq = txq_info->uldtxq; 3583 3584 for_each_ofldtxq(&adap->sge, i) { 3585 if (txq->q.desc) 3586 tasklet_kill(&txq->qresume_tsk); 3587 } 3588 } 3589 } 3590 3591 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) { 3592 struct sge_ctrl_txq *cq = &s->ctrlq[i]; 3593 3594 if (cq->q.desc) 3595 tasklet_kill(&cq->qresume_tsk); 3596 } 3597 } 3598 3599 /** 3600 * t4_sge_init_soft - grab core SGE values needed by SGE code 3601 * @adap: the adapter 3602 * 3603 * We need to grab the SGE operating parameters that we need to have 3604 * in order to do our job and make sure we can live with them. 3605 */ 3606 3607 static int t4_sge_init_soft(struct adapter *adap) 3608 { 3609 struct sge *s = &adap->sge; 3610 u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu; 3611 u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5; 3612 u32 ingress_rx_threshold; 3613 3614 /* 3615 * Verify that CPL messages are going to the Ingress Queue for 3616 * process_responses() and that only packet data is going to the 3617 * Free Lists. 3618 */ 3619 if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) != 3620 RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) { 3621 dev_err(adap->pdev_dev, "bad SGE CPL MODE\n"); 3622 return -EINVAL; 3623 } 3624 3625 /* 3626 * Validate the Host Buffer Register Array indices that we want to 3627 * use ... 3628 * 3629 * XXX Note that we should really read through the Host Buffer Size 3630 * XXX register array and find the indices of the Buffer Sizes which 3631 * XXX meet our needs! 3632 */ 3633 #define READ_FL_BUF(x) \ 3634 t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32)) 3635 3636 fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF); 3637 fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF); 3638 fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF); 3639 fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF); 3640 3641 /* We only bother using the Large Page logic if the Large Page Buffer 3642 * is larger than our Page Size Buffer. 3643 */ 3644 if (fl_large_pg <= fl_small_pg) 3645 fl_large_pg = 0; 3646 3647 #undef READ_FL_BUF 3648 3649 /* The Page Size Buffer must be exactly equal to our Page Size and the 3650 * Large Page Size Buffer should be 0 (per above) or a power of 2. 3651 */ 3652 if (fl_small_pg != PAGE_SIZE || 3653 (fl_large_pg & (fl_large_pg-1)) != 0) { 3654 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", 3655 fl_small_pg, fl_large_pg); 3656 return -EINVAL; 3657 } 3658 if (fl_large_pg) 3659 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; 3660 3661 if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) || 3662 fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) { 3663 dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n", 3664 fl_small_mtu, fl_large_mtu); 3665 return -EINVAL; 3666 } 3667 3668 /* 3669 * Retrieve our RX interrupt holdoff timer values and counter 3670 * threshold values from the SGE parameters. 3671 */ 3672 timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A); 3673 timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A); 3674 timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A); 3675 s->timer_val[0] = core_ticks_to_us(adap, 3676 TIMERVALUE0_G(timer_value_0_and_1)); 3677 s->timer_val[1] = core_ticks_to_us(adap, 3678 TIMERVALUE1_G(timer_value_0_and_1)); 3679 s->timer_val[2] = core_ticks_to_us(adap, 3680 TIMERVALUE2_G(timer_value_2_and_3)); 3681 s->timer_val[3] = core_ticks_to_us(adap, 3682 TIMERVALUE3_G(timer_value_2_and_3)); 3683 s->timer_val[4] = core_ticks_to_us(adap, 3684 TIMERVALUE4_G(timer_value_4_and_5)); 3685 s->timer_val[5] = core_ticks_to_us(adap, 3686 TIMERVALUE5_G(timer_value_4_and_5)); 3687 3688 ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A); 3689 s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold); 3690 s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold); 3691 s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold); 3692 s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold); 3693 3694 return 0; 3695 } 3696 3697 /** 3698 * t4_sge_init - initialize SGE 3699 * @adap: the adapter 3700 * 3701 * Perform low-level SGE code initialization needed every time after a 3702 * chip reset. 3703 */ 3704 int t4_sge_init(struct adapter *adap) 3705 { 3706 struct sge *s = &adap->sge; 3707 u32 sge_control, sge_conm_ctrl; 3708 int ret, egress_threshold; 3709 3710 /* 3711 * Ingress Padding Boundary and Egress Status Page Size are set up by 3712 * t4_fixup_host_params(). 3713 */ 3714 sge_control = t4_read_reg(adap, SGE_CONTROL_A); 3715 s->pktshift = PKTSHIFT_G(sge_control); 3716 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64; 3717 3718 s->fl_align = t4_fl_pkt_align(adap); 3719 ret = t4_sge_init_soft(adap); 3720 if (ret < 0) 3721 return ret; 3722 3723 /* 3724 * A FL with <= fl_starve_thres buffers is starving and a periodic 3725 * timer will attempt to refill it. This needs to be larger than the 3726 * SGE's Egress Congestion Threshold. If it isn't, then we can get 3727 * stuck waiting for new packets while the SGE is waiting for us to 3728 * give it more Free List entries. (Note that the SGE's Egress 3729 * Congestion Threshold is in units of 2 Free List pointers.) For T4, 3730 * there was only a single field to control this. For T5 there's the 3731 * original field which now only applies to Unpacked Mode Free List 3732 * buffers and a new field which only applies to Packed Mode Free List 3733 * buffers. 3734 */ 3735 sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A); 3736 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { 3737 case CHELSIO_T4: 3738 egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl); 3739 break; 3740 case CHELSIO_T5: 3741 egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl); 3742 break; 3743 case CHELSIO_T6: 3744 egress_threshold = T6_EGRTHRESHOLDPACKING_G(sge_conm_ctrl); 3745 break; 3746 default: 3747 dev_err(adap->pdev_dev, "Unsupported Chip version %d\n", 3748 CHELSIO_CHIP_VERSION(adap->params.chip)); 3749 return -EINVAL; 3750 } 3751 s->fl_starve_thres = 2*egress_threshold + 1; 3752 3753 t4_idma_monitor_init(adap, &s->idma_monitor); 3754 3755 /* Set up timers used for recuring callbacks to process RX and TX 3756 * administrative tasks. 3757 */ 3758 timer_setup(&s->rx_timer, sge_rx_timer_cb, 0); 3759 timer_setup(&s->tx_timer, sge_tx_timer_cb, 0); 3760 3761 spin_lock_init(&s->intrq_lock); 3762 3763 return 0; 3764 } 3765