1 /* 2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet 3 * driver for Linux. 4 * 5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/skbuff.h> 37 #include <linux/netdevice.h> 38 #include <linux/etherdevice.h> 39 #include <linux/if_vlan.h> 40 #include <linux/ip.h> 41 #include <net/ipv6.h> 42 #include <net/tcp.h> 43 #include <linux/dma-mapping.h> 44 #include <linux/prefetch.h> 45 46 #include "t4vf_common.h" 47 #include "t4vf_defs.h" 48 49 #include "../cxgb4/t4_regs.h" 50 #include "../cxgb4/t4fw_api.h" 51 #include "../cxgb4/t4_msg.h" 52 53 /* 54 * Decoded Adapter Parameters. 55 */ 56 static u32 FL_PG_ORDER; /* large page allocation size */ 57 static u32 STAT_LEN; /* length of status page at ring end */ 58 static u32 PKTSHIFT; /* padding between CPL and packet data */ 59 static u32 FL_ALIGN; /* response queue message alignment */ 60 61 /* 62 * Constants ... 63 */ 64 enum { 65 /* 66 * Egress Queue sizes, producer and consumer indices are all in units 67 * of Egress Context Units bytes. Note that as far as the hardware is 68 * concerned, the free list is an Egress Queue (the host produces free 69 * buffers which the hardware consumes) and free list entries are 70 * 64-bit PCI DMA addresses. 71 */ 72 EQ_UNIT = SGE_EQ_IDXSIZE, 73 FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), 74 TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), 75 76 /* 77 * Max number of TX descriptors we clean up at a time. Should be 78 * modest as freeing skbs isn't cheap and it happens while holding 79 * locks. We just need to free packets faster than they arrive, we 80 * eventually catch up and keep the amortized cost reasonable. 81 */ 82 MAX_TX_RECLAIM = 16, 83 84 /* 85 * Max number of Rx buffers we replenish at a time. Again keep this 86 * modest, allocating buffers isn't cheap either. 87 */ 88 MAX_RX_REFILL = 16, 89 90 /* 91 * Period of the Rx queue check timer. This timer is infrequent as it 92 * has something to do only when the system experiences severe memory 93 * shortage. 94 */ 95 RX_QCHECK_PERIOD = (HZ / 2), 96 97 /* 98 * Period of the TX queue check timer and the maximum number of TX 99 * descriptors to be reclaimed by the TX timer. 100 */ 101 TX_QCHECK_PERIOD = (HZ / 2), 102 MAX_TIMER_TX_RECLAIM = 100, 103 104 /* 105 * An FL with <= FL_STARVE_THRES buffers is starving and a periodic 106 * timer will attempt to refill it. 107 */ 108 FL_STARVE_THRES = 4, 109 110 /* 111 * Suspend an Ethernet TX queue with fewer available descriptors than 112 * this. We always want to have room for a maximum sized packet: 113 * inline immediate data + MAX_SKB_FRAGS. This is the same as 114 * calc_tx_flits() for a TSO packet with nr_frags == MAX_SKB_FRAGS 115 * (see that function and its helpers for a description of the 116 * calculation). 117 */ 118 ETHTXQ_MAX_FRAGS = MAX_SKB_FRAGS + 1, 119 ETHTXQ_MAX_SGL_LEN = ((3 * (ETHTXQ_MAX_FRAGS-1))/2 + 120 ((ETHTXQ_MAX_FRAGS-1) & 1) + 121 2), 122 ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) + 123 sizeof(struct cpl_tx_pkt_lso_core) + 124 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64), 125 ETHTXQ_MAX_FLITS = ETHTXQ_MAX_SGL_LEN + ETHTXQ_MAX_HDR, 126 127 ETHTXQ_STOP_THRES = 1 + DIV_ROUND_UP(ETHTXQ_MAX_FLITS, TXD_PER_EQ_UNIT), 128 129 /* 130 * Max TX descriptor space we allow for an Ethernet packet to be 131 * inlined into a WR. This is limited by the maximum value which 132 * we can specify for immediate data in the firmware Ethernet TX 133 * Work Request. 134 */ 135 MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_MASK, 136 137 /* 138 * Max size of a WR sent through a control TX queue. 139 */ 140 MAX_CTRL_WR_LEN = 256, 141 142 /* 143 * Maximum amount of data which we'll ever need to inline into a 144 * TX ring: max(MAX_IMM_TX_PKT_LEN, MAX_CTRL_WR_LEN). 145 */ 146 MAX_IMM_TX_LEN = (MAX_IMM_TX_PKT_LEN > MAX_CTRL_WR_LEN 147 ? MAX_IMM_TX_PKT_LEN 148 : MAX_CTRL_WR_LEN), 149 150 /* 151 * For incoming packets less than RX_COPY_THRES, we copy the data into 152 * an skb rather than referencing the data. We allocate enough 153 * in-line room in skb's to accommodate pulling in RX_PULL_LEN bytes 154 * of the data (header). 155 */ 156 RX_COPY_THRES = 256, 157 RX_PULL_LEN = 128, 158 159 /* 160 * Main body length for sk_buffs used for RX Ethernet packets with 161 * fragments. Should be >= RX_PULL_LEN but possibly bigger to give 162 * pskb_may_pull() some room. 163 */ 164 RX_SKB_LEN = 512, 165 }; 166 167 /* 168 * Software state per TX descriptor. 169 */ 170 struct tx_sw_desc { 171 struct sk_buff *skb; /* socket buffer of TX data source */ 172 struct ulptx_sgl *sgl; /* scatter/gather list in TX Queue */ 173 }; 174 175 /* 176 * Software state per RX Free List descriptor. We keep track of the allocated 177 * FL page, its size, and its PCI DMA address (if the page is mapped). The FL 178 * page size and its PCI DMA mapped state are stored in the low bits of the 179 * PCI DMA address as per below. 180 */ 181 struct rx_sw_desc { 182 struct page *page; /* Free List page buffer */ 183 dma_addr_t dma_addr; /* PCI DMA address (if mapped) */ 184 /* and flags (see below) */ 185 }; 186 187 /* 188 * The low bits of rx_sw_desc.dma_addr have special meaning. Note that the 189 * SGE also uses the low 4 bits to determine the size of the buffer. It uses 190 * those bits to index into the SGE_FL_BUFFER_SIZE[index] register array. 191 * Since we only use SGE_FL_BUFFER_SIZE0 and SGE_FL_BUFFER_SIZE1, these low 4 192 * bits can only contain a 0 or a 1 to indicate which size buffer we're giving 193 * to the SGE. Thus, our software state of "is the buffer mapped for DMA" is 194 * maintained in an inverse sense so the hardware never sees that bit high. 195 */ 196 enum { 197 RX_LARGE_BUF = 1 << 0, /* buffer is SGE_FL_BUFFER_SIZE[1] */ 198 RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */ 199 }; 200 201 /** 202 * get_buf_addr - return DMA buffer address of software descriptor 203 * @sdesc: pointer to the software buffer descriptor 204 * 205 * Return the DMA buffer address of a software descriptor (stripping out 206 * our low-order flag bits). 207 */ 208 static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *sdesc) 209 { 210 return sdesc->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF); 211 } 212 213 /** 214 * is_buf_mapped - is buffer mapped for DMA? 215 * @sdesc: pointer to the software buffer descriptor 216 * 217 * Determine whether the buffer associated with a software descriptor in 218 * mapped for DMA or not. 219 */ 220 static inline bool is_buf_mapped(const struct rx_sw_desc *sdesc) 221 { 222 return !(sdesc->dma_addr & RX_UNMAPPED_BUF); 223 } 224 225 /** 226 * need_skb_unmap - does the platform need unmapping of sk_buffs? 227 * 228 * Returns true if the platform needs sk_buff unmapping. The compiler 229 * optimizes away unnecessary code if this returns true. 230 */ 231 static inline int need_skb_unmap(void) 232 { 233 #ifdef CONFIG_NEED_DMA_MAP_STATE 234 return 1; 235 #else 236 return 0; 237 #endif 238 } 239 240 /** 241 * txq_avail - return the number of available slots in a TX queue 242 * @tq: the TX queue 243 * 244 * Returns the number of available descriptors in a TX queue. 245 */ 246 static inline unsigned int txq_avail(const struct sge_txq *tq) 247 { 248 return tq->size - 1 - tq->in_use; 249 } 250 251 /** 252 * fl_cap - return the capacity of a Free List 253 * @fl: the Free List 254 * 255 * Returns the capacity of a Free List. The capacity is less than the 256 * size because an Egress Queue Index Unit worth of descriptors needs to 257 * be left unpopulated, otherwise the Producer and Consumer indices PIDX 258 * and CIDX will match and the hardware will think the FL is empty. 259 */ 260 static inline unsigned int fl_cap(const struct sge_fl *fl) 261 { 262 return fl->size - FL_PER_EQ_UNIT; 263 } 264 265 /** 266 * fl_starving - return whether a Free List is starving. 267 * @fl: the Free List 268 * 269 * Tests specified Free List to see whether the number of buffers 270 * available to the hardware has falled below our "starvation" 271 * threshold. 272 */ 273 static inline bool fl_starving(const struct sge_fl *fl) 274 { 275 return fl->avail - fl->pend_cred <= FL_STARVE_THRES; 276 } 277 278 /** 279 * map_skb - map an skb for DMA to the device 280 * @dev: the egress net device 281 * @skb: the packet to map 282 * @addr: a pointer to the base of the DMA mapping array 283 * 284 * Map an skb for DMA to the device and return an array of DMA addresses. 285 */ 286 static int map_skb(struct device *dev, const struct sk_buff *skb, 287 dma_addr_t *addr) 288 { 289 const skb_frag_t *fp, *end; 290 const struct skb_shared_info *si; 291 292 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); 293 if (dma_mapping_error(dev, *addr)) 294 goto out_err; 295 296 si = skb_shinfo(skb); 297 end = &si->frags[si->nr_frags]; 298 for (fp = si->frags; fp < end; fp++) { 299 *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp), 300 DMA_TO_DEVICE); 301 if (dma_mapping_error(dev, *addr)) 302 goto unwind; 303 } 304 return 0; 305 306 unwind: 307 while (fp-- > si->frags) 308 dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE); 309 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); 310 311 out_err: 312 return -ENOMEM; 313 } 314 315 static void unmap_sgl(struct device *dev, const struct sk_buff *skb, 316 const struct ulptx_sgl *sgl, const struct sge_txq *tq) 317 { 318 const struct ulptx_sge_pair *p; 319 unsigned int nfrags = skb_shinfo(skb)->nr_frags; 320 321 if (likely(skb_headlen(skb))) 322 dma_unmap_single(dev, be64_to_cpu(sgl->addr0), 323 be32_to_cpu(sgl->len0), DMA_TO_DEVICE); 324 else { 325 dma_unmap_page(dev, be64_to_cpu(sgl->addr0), 326 be32_to_cpu(sgl->len0), DMA_TO_DEVICE); 327 nfrags--; 328 } 329 330 /* 331 * the complexity below is because of the possibility of a wrap-around 332 * in the middle of an SGL 333 */ 334 for (p = sgl->sge; nfrags >= 2; nfrags -= 2) { 335 if (likely((u8 *)(p + 1) <= (u8 *)tq->stat)) { 336 unmap: 337 dma_unmap_page(dev, be64_to_cpu(p->addr[0]), 338 be32_to_cpu(p->len[0]), DMA_TO_DEVICE); 339 dma_unmap_page(dev, be64_to_cpu(p->addr[1]), 340 be32_to_cpu(p->len[1]), DMA_TO_DEVICE); 341 p++; 342 } else if ((u8 *)p == (u8 *)tq->stat) { 343 p = (const struct ulptx_sge_pair *)tq->desc; 344 goto unmap; 345 } else if ((u8 *)p + 8 == (u8 *)tq->stat) { 346 const __be64 *addr = (const __be64 *)tq->desc; 347 348 dma_unmap_page(dev, be64_to_cpu(addr[0]), 349 be32_to_cpu(p->len[0]), DMA_TO_DEVICE); 350 dma_unmap_page(dev, be64_to_cpu(addr[1]), 351 be32_to_cpu(p->len[1]), DMA_TO_DEVICE); 352 p = (const struct ulptx_sge_pair *)&addr[2]; 353 } else { 354 const __be64 *addr = (const __be64 *)tq->desc; 355 356 dma_unmap_page(dev, be64_to_cpu(p->addr[0]), 357 be32_to_cpu(p->len[0]), DMA_TO_DEVICE); 358 dma_unmap_page(dev, be64_to_cpu(addr[0]), 359 be32_to_cpu(p->len[1]), DMA_TO_DEVICE); 360 p = (const struct ulptx_sge_pair *)&addr[1]; 361 } 362 } 363 if (nfrags) { 364 __be64 addr; 365 366 if ((u8 *)p == (u8 *)tq->stat) 367 p = (const struct ulptx_sge_pair *)tq->desc; 368 addr = ((u8 *)p + 16 <= (u8 *)tq->stat 369 ? p->addr[0] 370 : *(const __be64 *)tq->desc); 371 dma_unmap_page(dev, be64_to_cpu(addr), be32_to_cpu(p->len[0]), 372 DMA_TO_DEVICE); 373 } 374 } 375 376 /** 377 * free_tx_desc - reclaims TX descriptors and their buffers 378 * @adapter: the adapter 379 * @tq: the TX queue to reclaim descriptors from 380 * @n: the number of descriptors to reclaim 381 * @unmap: whether the buffers should be unmapped for DMA 382 * 383 * Reclaims TX descriptors from an SGE TX queue and frees the associated 384 * TX buffers. Called with the TX queue lock held. 385 */ 386 static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq, 387 unsigned int n, bool unmap) 388 { 389 struct tx_sw_desc *sdesc; 390 unsigned int cidx = tq->cidx; 391 struct device *dev = adapter->pdev_dev; 392 393 const int need_unmap = need_skb_unmap() && unmap; 394 395 sdesc = &tq->sdesc[cidx]; 396 while (n--) { 397 /* 398 * If we kept a reference to the original TX skb, we need to 399 * unmap it from PCI DMA space (if required) and free it. 400 */ 401 if (sdesc->skb) { 402 if (need_unmap) 403 unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq); 404 kfree_skb(sdesc->skb); 405 sdesc->skb = NULL; 406 } 407 408 sdesc++; 409 if (++cidx == tq->size) { 410 cidx = 0; 411 sdesc = tq->sdesc; 412 } 413 } 414 tq->cidx = cidx; 415 } 416 417 /* 418 * Return the number of reclaimable descriptors in a TX queue. 419 */ 420 static inline int reclaimable(const struct sge_txq *tq) 421 { 422 int hw_cidx = be16_to_cpu(tq->stat->cidx); 423 int reclaimable = hw_cidx - tq->cidx; 424 if (reclaimable < 0) 425 reclaimable += tq->size; 426 return reclaimable; 427 } 428 429 /** 430 * reclaim_completed_tx - reclaims completed TX descriptors 431 * @adapter: the adapter 432 * @tq: the TX queue to reclaim completed descriptors from 433 * @unmap: whether the buffers should be unmapped for DMA 434 * 435 * Reclaims TX descriptors that the SGE has indicated it has processed, 436 * and frees the associated buffers if possible. Called with the TX 437 * queue locked. 438 */ 439 static inline void reclaim_completed_tx(struct adapter *adapter, 440 struct sge_txq *tq, 441 bool unmap) 442 { 443 int avail = reclaimable(tq); 444 445 if (avail) { 446 /* 447 * Limit the amount of clean up work we do at a time to keep 448 * the TX lock hold time O(1). 449 */ 450 if (avail > MAX_TX_RECLAIM) 451 avail = MAX_TX_RECLAIM; 452 453 free_tx_desc(adapter, tq, avail, unmap); 454 tq->in_use -= avail; 455 } 456 } 457 458 /** 459 * get_buf_size - return the size of an RX Free List buffer. 460 * @sdesc: pointer to the software buffer descriptor 461 */ 462 static inline int get_buf_size(const struct rx_sw_desc *sdesc) 463 { 464 return FL_PG_ORDER > 0 && (sdesc->dma_addr & RX_LARGE_BUF) 465 ? (PAGE_SIZE << FL_PG_ORDER) 466 : PAGE_SIZE; 467 } 468 469 /** 470 * free_rx_bufs - free RX buffers on an SGE Free List 471 * @adapter: the adapter 472 * @fl: the SGE Free List to free buffers from 473 * @n: how many buffers to free 474 * 475 * Release the next @n buffers on an SGE Free List RX queue. The 476 * buffers must be made inaccessible to hardware before calling this 477 * function. 478 */ 479 static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n) 480 { 481 while (n--) { 482 struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx]; 483 484 if (is_buf_mapped(sdesc)) 485 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), 486 get_buf_size(sdesc), PCI_DMA_FROMDEVICE); 487 put_page(sdesc->page); 488 sdesc->page = NULL; 489 if (++fl->cidx == fl->size) 490 fl->cidx = 0; 491 fl->avail--; 492 } 493 } 494 495 /** 496 * unmap_rx_buf - unmap the current RX buffer on an SGE Free List 497 * @adapter: the adapter 498 * @fl: the SGE Free List 499 * 500 * Unmap the current buffer on an SGE Free List RX queue. The 501 * buffer must be made inaccessible to HW before calling this function. 502 * 503 * This is similar to @free_rx_bufs above but does not free the buffer. 504 * Do note that the FL still loses any further access to the buffer. 505 * This is used predominantly to "transfer ownership" of an FL buffer 506 * to another entity (typically an skb's fragment list). 507 */ 508 static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl) 509 { 510 struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx]; 511 512 if (is_buf_mapped(sdesc)) 513 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), 514 get_buf_size(sdesc), PCI_DMA_FROMDEVICE); 515 sdesc->page = NULL; 516 if (++fl->cidx == fl->size) 517 fl->cidx = 0; 518 fl->avail--; 519 } 520 521 /** 522 * ring_fl_db - righ doorbell on free list 523 * @adapter: the adapter 524 * @fl: the Free List whose doorbell should be rung ... 525 * 526 * Tell the Scatter Gather Engine that there are new free list entries 527 * available. 528 */ 529 static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl) 530 { 531 /* 532 * The SGE keeps track of its Producer and Consumer Indices in terms 533 * of Egress Queue Units so we can only tell it about integral numbers 534 * of multiples of Free List Entries per Egress Queue Units ... 535 */ 536 if (fl->pend_cred >= FL_PER_EQ_UNIT) { 537 wmb(); 538 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, 539 DBPRIO | 540 QID(fl->cntxt_id) | 541 PIDX(fl->pend_cred / FL_PER_EQ_UNIT)); 542 fl->pend_cred %= FL_PER_EQ_UNIT; 543 } 544 } 545 546 /** 547 * set_rx_sw_desc - initialize software RX buffer descriptor 548 * @sdesc: pointer to the softwore RX buffer descriptor 549 * @page: pointer to the page data structure backing the RX buffer 550 * @dma_addr: PCI DMA address (possibly with low-bit flags) 551 */ 552 static inline void set_rx_sw_desc(struct rx_sw_desc *sdesc, struct page *page, 553 dma_addr_t dma_addr) 554 { 555 sdesc->page = page; 556 sdesc->dma_addr = dma_addr; 557 } 558 559 /* 560 * Support for poisoning RX buffers ... 561 */ 562 #define POISON_BUF_VAL -1 563 564 static inline void poison_buf(struct page *page, size_t sz) 565 { 566 #if POISON_BUF_VAL >= 0 567 memset(page_address(page), POISON_BUF_VAL, sz); 568 #endif 569 } 570 571 /** 572 * refill_fl - refill an SGE RX buffer ring 573 * @adapter: the adapter 574 * @fl: the Free List ring to refill 575 * @n: the number of new buffers to allocate 576 * @gfp: the gfp flags for the allocations 577 * 578 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, 579 * allocated with the supplied gfp flags. The caller must assure that 580 * @n does not exceed the queue's capacity -- i.e. (cidx == pidx) _IN 581 * EGRESS QUEUE UNITS_ indicates an empty Free List! Returns the number 582 * of buffers allocated. If afterwards the queue is found critically low, 583 * mark it as starving in the bitmap of starving FLs. 584 */ 585 static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, 586 int n, gfp_t gfp) 587 { 588 struct page *page; 589 dma_addr_t dma_addr; 590 unsigned int cred = fl->avail; 591 __be64 *d = &fl->desc[fl->pidx]; 592 struct rx_sw_desc *sdesc = &fl->sdesc[fl->pidx]; 593 594 /* 595 * Sanity: ensure that the result of adding n Free List buffers 596 * won't result in wrapping the SGE's Producer Index around to 597 * it's Consumer Index thereby indicating an empty Free List ... 598 */ 599 BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT); 600 601 /* 602 * If we support large pages, prefer large buffers and fail over to 603 * small pages if we can't allocate large pages to satisfy the refill. 604 * If we don't support large pages, drop directly into the small page 605 * allocation code. 606 */ 607 if (FL_PG_ORDER == 0) 608 goto alloc_small_pages; 609 610 while (n) { 611 page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 612 FL_PG_ORDER); 613 if (unlikely(!page)) { 614 /* 615 * We've failed inour attempt to allocate a "large 616 * page". Fail over to the "small page" allocation 617 * below. 618 */ 619 fl->large_alloc_failed++; 620 break; 621 } 622 poison_buf(page, PAGE_SIZE << FL_PG_ORDER); 623 624 dma_addr = dma_map_page(adapter->pdev_dev, page, 0, 625 PAGE_SIZE << FL_PG_ORDER, 626 PCI_DMA_FROMDEVICE); 627 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { 628 /* 629 * We've run out of DMA mapping space. Free up the 630 * buffer and return with what we've managed to put 631 * into the free list. We don't want to fail over to 632 * the small page allocation below in this case 633 * because DMA mapping resources are typically 634 * critical resources once they become scarse. 635 */ 636 __free_pages(page, FL_PG_ORDER); 637 goto out; 638 } 639 dma_addr |= RX_LARGE_BUF; 640 *d++ = cpu_to_be64(dma_addr); 641 642 set_rx_sw_desc(sdesc, page, dma_addr); 643 sdesc++; 644 645 fl->avail++; 646 if (++fl->pidx == fl->size) { 647 fl->pidx = 0; 648 sdesc = fl->sdesc; 649 d = fl->desc; 650 } 651 n--; 652 } 653 654 alloc_small_pages: 655 while (n--) { 656 page = __netdev_alloc_page(adapter->port[0], 657 gfp | __GFP_NOWARN); 658 if (unlikely(!page)) { 659 fl->alloc_failed++; 660 break; 661 } 662 poison_buf(page, PAGE_SIZE); 663 664 dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE, 665 PCI_DMA_FROMDEVICE); 666 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { 667 netdev_free_page(adapter->port[0], page); 668 break; 669 } 670 *d++ = cpu_to_be64(dma_addr); 671 672 set_rx_sw_desc(sdesc, page, dma_addr); 673 sdesc++; 674 675 fl->avail++; 676 if (++fl->pidx == fl->size) { 677 fl->pidx = 0; 678 sdesc = fl->sdesc; 679 d = fl->desc; 680 } 681 } 682 683 out: 684 /* 685 * Update our accounting state to incorporate the new Free List 686 * buffers, tell the hardware about them and return the number of 687 * bufers which we were able to allocate. 688 */ 689 cred = fl->avail - cred; 690 fl->pend_cred += cred; 691 ring_fl_db(adapter, fl); 692 693 if (unlikely(fl_starving(fl))) { 694 smp_wmb(); 695 set_bit(fl->cntxt_id, adapter->sge.starving_fl); 696 } 697 698 return cred; 699 } 700 701 /* 702 * Refill a Free List to its capacity or the Maximum Refill Increment, 703 * whichever is smaller ... 704 */ 705 static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl) 706 { 707 refill_fl(adapter, fl, 708 min((unsigned int)MAX_RX_REFILL, fl_cap(fl) - fl->avail), 709 GFP_ATOMIC); 710 } 711 712 /** 713 * alloc_ring - allocate resources for an SGE descriptor ring 714 * @dev: the PCI device's core device 715 * @nelem: the number of descriptors 716 * @hwsize: the size of each hardware descriptor 717 * @swsize: the size of each software descriptor 718 * @busaddrp: the physical PCI bus address of the allocated ring 719 * @swringp: return address pointer for software ring 720 * @stat_size: extra space in hardware ring for status information 721 * 722 * Allocates resources for an SGE descriptor ring, such as TX queues, 723 * free buffer lists, response queues, etc. Each SGE ring requires 724 * space for its hardware descriptors plus, optionally, space for software 725 * state associated with each hardware entry (the metadata). The function 726 * returns three values: the virtual address for the hardware ring (the 727 * return value of the function), the PCI bus address of the hardware 728 * ring (in *busaddrp), and the address of the software ring (in swringp). 729 * Both the hardware and software rings are returned zeroed out. 730 */ 731 static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize, 732 size_t swsize, dma_addr_t *busaddrp, void *swringp, 733 size_t stat_size) 734 { 735 /* 736 * Allocate the hardware ring and PCI DMA bus address space for said. 737 */ 738 size_t hwlen = nelem * hwsize + stat_size; 739 void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL); 740 741 if (!hwring) 742 return NULL; 743 744 /* 745 * If the caller wants a software ring, allocate it and return a 746 * pointer to it in *swringp. 747 */ 748 BUG_ON((swsize != 0) != (swringp != NULL)); 749 if (swsize) { 750 void *swring = kcalloc(nelem, swsize, GFP_KERNEL); 751 752 if (!swring) { 753 dma_free_coherent(dev, hwlen, hwring, *busaddrp); 754 return NULL; 755 } 756 *(void **)swringp = swring; 757 } 758 759 /* 760 * Zero out the hardware ring and return its address as our function 761 * value. 762 */ 763 memset(hwring, 0, hwlen); 764 return hwring; 765 } 766 767 /** 768 * sgl_len - calculates the size of an SGL of the given capacity 769 * @n: the number of SGL entries 770 * 771 * Calculates the number of flits (8-byte units) needed for a Direct 772 * Scatter/Gather List that can hold the given number of entries. 773 */ 774 static inline unsigned int sgl_len(unsigned int n) 775 { 776 /* 777 * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA 778 * addresses. The DSGL Work Request starts off with a 32-bit DSGL 779 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N, 780 * repeated sequences of { Length[i], Length[i+1], Address[i], 781 * Address[i+1] } (this ensures that all addresses are on 64-bit 782 * boundaries). If N is even, then Length[N+1] should be set to 0 and 783 * Address[N+1] is omitted. 784 * 785 * The following calculation incorporates all of the above. It's 786 * somewhat hard to follow but, briefly: the "+2" accounts for the 787 * first two flits which include the DSGL header, Length0 and 788 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3 789 * flits for every pair of the remaining N) +1 if (n-1) is odd; and 790 * finally the "+((n-1)&1)" adds the one remaining flit needed if 791 * (n-1) is odd ... 792 */ 793 n--; 794 return (3 * n) / 2 + (n & 1) + 2; 795 } 796 797 /** 798 * flits_to_desc - returns the num of TX descriptors for the given flits 799 * @flits: the number of flits 800 * 801 * Returns the number of TX descriptors needed for the supplied number 802 * of flits. 803 */ 804 static inline unsigned int flits_to_desc(unsigned int flits) 805 { 806 BUG_ON(flits > SGE_MAX_WR_LEN / sizeof(__be64)); 807 return DIV_ROUND_UP(flits, TXD_PER_EQ_UNIT); 808 } 809 810 /** 811 * is_eth_imm - can an Ethernet packet be sent as immediate data? 812 * @skb: the packet 813 * 814 * Returns whether an Ethernet packet is small enough to fit completely as 815 * immediate data. 816 */ 817 static inline int is_eth_imm(const struct sk_buff *skb) 818 { 819 /* 820 * The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request 821 * which does not accommodate immediate data. We could dike out all 822 * of the support code for immediate data but that would tie our hands 823 * too much if we ever want to enhace the firmware. It would also 824 * create more differences between the PF and VF Drivers. 825 */ 826 return false; 827 } 828 829 /** 830 * calc_tx_flits - calculate the number of flits for a packet TX WR 831 * @skb: the packet 832 * 833 * Returns the number of flits needed for a TX Work Request for the 834 * given Ethernet packet, including the needed WR and CPL headers. 835 */ 836 static inline unsigned int calc_tx_flits(const struct sk_buff *skb) 837 { 838 unsigned int flits; 839 840 /* 841 * If the skb is small enough, we can pump it out as a work request 842 * with only immediate data. In that case we just have to have the 843 * TX Packet header plus the skb data in the Work Request. 844 */ 845 if (is_eth_imm(skb)) 846 return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 847 sizeof(__be64)); 848 849 /* 850 * Otherwise, we're going to have to construct a Scatter gather list 851 * of the skb body and fragments. We also include the flits necessary 852 * for the TX Packet Work Request and CPL. We always have a firmware 853 * Write Header (incorporated as part of the cpl_tx_pkt_lso and 854 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL 855 * message or, if we're doing a Large Send Offload, an LSO CPL message 856 * with an embeded TX Packet Write CPL message. 857 */ 858 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); 859 if (skb_shinfo(skb)->gso_size) 860 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + 861 sizeof(struct cpl_tx_pkt_lso_core) + 862 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); 863 else 864 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + 865 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); 866 return flits; 867 } 868 869 /** 870 * write_sgl - populate a Scatter/Gather List for a packet 871 * @skb: the packet 872 * @tq: the TX queue we are writing into 873 * @sgl: starting location for writing the SGL 874 * @end: points right after the end of the SGL 875 * @start: start offset into skb main-body data to include in the SGL 876 * @addr: the list of DMA bus addresses for the SGL elements 877 * 878 * Generates a Scatter/Gather List for the buffers that make up a packet. 879 * The caller must provide adequate space for the SGL that will be written. 880 * The SGL includes all of the packet's page fragments and the data in its 881 * main body except for the first @start bytes. @pos must be 16-byte 882 * aligned and within a TX descriptor with available space. @end points 883 * write after the end of the SGL but does not account for any potential 884 * wrap around, i.e., @end > @tq->stat. 885 */ 886 static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq, 887 struct ulptx_sgl *sgl, u64 *end, unsigned int start, 888 const dma_addr_t *addr) 889 { 890 unsigned int i, len; 891 struct ulptx_sge_pair *to; 892 const struct skb_shared_info *si = skb_shinfo(skb); 893 unsigned int nfrags = si->nr_frags; 894 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1]; 895 896 len = skb_headlen(skb) - start; 897 if (likely(len)) { 898 sgl->len0 = htonl(len); 899 sgl->addr0 = cpu_to_be64(addr[0] + start); 900 nfrags++; 901 } else { 902 sgl->len0 = htonl(skb_frag_size(&si->frags[0])); 903 sgl->addr0 = cpu_to_be64(addr[1]); 904 } 905 906 sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | 907 ULPTX_NSGE(nfrags)); 908 if (likely(--nfrags == 0)) 909 return; 910 /* 911 * Most of the complexity below deals with the possibility we hit the 912 * end of the queue in the middle of writing the SGL. For this case 913 * only we create the SGL in a temporary buffer and then copy it. 914 */ 915 to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge; 916 917 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { 918 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); 919 to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i])); 920 to->addr[0] = cpu_to_be64(addr[i]); 921 to->addr[1] = cpu_to_be64(addr[++i]); 922 } 923 if (nfrags) { 924 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); 925 to->len[1] = cpu_to_be32(0); 926 to->addr[0] = cpu_to_be64(addr[i + 1]); 927 } 928 if (unlikely((u8 *)end > (u8 *)tq->stat)) { 929 unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1; 930 931 if (likely(part0)) 932 memcpy(sgl->sge, buf, part0); 933 part1 = (u8 *)end - (u8 *)tq->stat; 934 memcpy(tq->desc, (u8 *)buf + part0, part1); 935 end = (void *)tq->desc + part1; 936 } 937 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ 938 *(u64 *)end = 0; 939 } 940 941 /** 942 * check_ring_tx_db - check and potentially ring a TX queue's doorbell 943 * @adapter: the adapter 944 * @tq: the TX queue 945 * @n: number of new descriptors to give to HW 946 * 947 * Ring the doorbel for a TX queue. 948 */ 949 static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq, 950 int n) 951 { 952 /* 953 * Warn if we write doorbells with the wrong priority and write 954 * descriptors before telling HW. 955 */ 956 WARN_ON((QID(tq->cntxt_id) | PIDX(n)) & DBPRIO); 957 wmb(); 958 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, 959 QID(tq->cntxt_id) | PIDX(n)); 960 } 961 962 /** 963 * inline_tx_skb - inline a packet's data into TX descriptors 964 * @skb: the packet 965 * @tq: the TX queue where the packet will be inlined 966 * @pos: starting position in the TX queue to inline the packet 967 * 968 * Inline a packet's contents directly into TX descriptors, starting at 969 * the given position within the TX DMA ring. 970 * Most of the complexity of this operation is dealing with wrap arounds 971 * in the middle of the packet we want to inline. 972 */ 973 static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq, 974 void *pos) 975 { 976 u64 *p; 977 int left = (void *)tq->stat - pos; 978 979 if (likely(skb->len <= left)) { 980 if (likely(!skb->data_len)) 981 skb_copy_from_linear_data(skb, pos, skb->len); 982 else 983 skb_copy_bits(skb, 0, pos, skb->len); 984 pos += skb->len; 985 } else { 986 skb_copy_bits(skb, 0, pos, left); 987 skb_copy_bits(skb, left, tq->desc, skb->len - left); 988 pos = (void *)tq->desc + (skb->len - left); 989 } 990 991 /* 0-pad to multiple of 16 */ 992 p = PTR_ALIGN(pos, 8); 993 if ((uintptr_t)p & 8) 994 *p = 0; 995 } 996 997 /* 998 * Figure out what HW csum a packet wants and return the appropriate control 999 * bits. 1000 */ 1001 static u64 hwcsum(const struct sk_buff *skb) 1002 { 1003 int csum_type; 1004 const struct iphdr *iph = ip_hdr(skb); 1005 1006 if (iph->version == 4) { 1007 if (iph->protocol == IPPROTO_TCP) 1008 csum_type = TX_CSUM_TCPIP; 1009 else if (iph->protocol == IPPROTO_UDP) 1010 csum_type = TX_CSUM_UDPIP; 1011 else { 1012 nocsum: 1013 /* 1014 * unknown protocol, disable HW csum 1015 * and hope a bad packet is detected 1016 */ 1017 return TXPKT_L4CSUM_DIS; 1018 } 1019 } else { 1020 /* 1021 * this doesn't work with extension headers 1022 */ 1023 const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph; 1024 1025 if (ip6h->nexthdr == IPPROTO_TCP) 1026 csum_type = TX_CSUM_TCPIP6; 1027 else if (ip6h->nexthdr == IPPROTO_UDP) 1028 csum_type = TX_CSUM_UDPIP6; 1029 else 1030 goto nocsum; 1031 } 1032 1033 if (likely(csum_type >= TX_CSUM_TCPIP)) 1034 return TXPKT_CSUM_TYPE(csum_type) | 1035 TXPKT_IPHDR_LEN(skb_network_header_len(skb)) | 1036 TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN); 1037 else { 1038 int start = skb_transport_offset(skb); 1039 1040 return TXPKT_CSUM_TYPE(csum_type) | 1041 TXPKT_CSUM_START(start) | 1042 TXPKT_CSUM_LOC(start + skb->csum_offset); 1043 } 1044 } 1045 1046 /* 1047 * Stop an Ethernet TX queue and record that state change. 1048 */ 1049 static void txq_stop(struct sge_eth_txq *txq) 1050 { 1051 netif_tx_stop_queue(txq->txq); 1052 txq->q.stops++; 1053 } 1054 1055 /* 1056 * Advance our software state for a TX queue by adding n in use descriptors. 1057 */ 1058 static inline void txq_advance(struct sge_txq *tq, unsigned int n) 1059 { 1060 tq->in_use += n; 1061 tq->pidx += n; 1062 if (tq->pidx >= tq->size) 1063 tq->pidx -= tq->size; 1064 } 1065 1066 /** 1067 * t4vf_eth_xmit - add a packet to an Ethernet TX queue 1068 * @skb: the packet 1069 * @dev: the egress net device 1070 * 1071 * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled. 1072 */ 1073 int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) 1074 { 1075 u32 wr_mid; 1076 u64 cntrl, *end; 1077 int qidx, credits; 1078 unsigned int flits, ndesc; 1079 struct adapter *adapter; 1080 struct sge_eth_txq *txq; 1081 const struct port_info *pi; 1082 struct fw_eth_tx_pkt_vm_wr *wr; 1083 struct cpl_tx_pkt_core *cpl; 1084 const struct skb_shared_info *ssi; 1085 dma_addr_t addr[MAX_SKB_FRAGS + 1]; 1086 const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) + 1087 sizeof(wr->ethmacsrc) + 1088 sizeof(wr->ethtype) + 1089 sizeof(wr->vlantci)); 1090 1091 /* 1092 * The chip minimum packet length is 10 octets but the firmware 1093 * command that we are using requires that we copy the Ethernet header 1094 * (including the VLAN tag) into the header so we reject anything 1095 * smaller than that ... 1096 */ 1097 if (unlikely(skb->len < fw_hdr_copy_len)) 1098 goto out_free; 1099 1100 /* 1101 * Figure out which TX Queue we're going to use. 1102 */ 1103 pi = netdev_priv(dev); 1104 adapter = pi->adapter; 1105 qidx = skb_get_queue_mapping(skb); 1106 BUG_ON(qidx >= pi->nqsets); 1107 txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; 1108 1109 /* 1110 * Take this opportunity to reclaim any TX Descriptors whose DMA 1111 * transfers have completed. 1112 */ 1113 reclaim_completed_tx(adapter, &txq->q, true); 1114 1115 /* 1116 * Calculate the number of flits and TX Descriptors we're going to 1117 * need along with how many TX Descriptors will be left over after 1118 * we inject our Work Request. 1119 */ 1120 flits = calc_tx_flits(skb); 1121 ndesc = flits_to_desc(flits); 1122 credits = txq_avail(&txq->q) - ndesc; 1123 1124 if (unlikely(credits < 0)) { 1125 /* 1126 * Not enough room for this packet's Work Request. Stop the 1127 * TX Queue and return a "busy" condition. The queue will get 1128 * started later on when the firmware informs us that space 1129 * has opened up. 1130 */ 1131 txq_stop(txq); 1132 dev_err(adapter->pdev_dev, 1133 "%s: TX ring %u full while queue awake!\n", 1134 dev->name, qidx); 1135 return NETDEV_TX_BUSY; 1136 } 1137 1138 if (!is_eth_imm(skb) && 1139 unlikely(map_skb(adapter->pdev_dev, skb, addr) < 0)) { 1140 /* 1141 * We need to map the skb into PCI DMA space (because it can't 1142 * be in-lined directly into the Work Request) and the mapping 1143 * operation failed. Record the error and drop the packet. 1144 */ 1145 txq->mapping_err++; 1146 goto out_free; 1147 } 1148 1149 wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2)); 1150 if (unlikely(credits < ETHTXQ_STOP_THRES)) { 1151 /* 1152 * After we're done injecting the Work Request for this 1153 * packet, we'll be below our "stop threshold" so stop the TX 1154 * Queue now and schedule a request for an SGE Egress Queue 1155 * Update message. The queue will get started later on when 1156 * the firmware processes this Work Request and sends us an 1157 * Egress Queue Status Update message indicating that space 1158 * has opened up. 1159 */ 1160 txq_stop(txq); 1161 wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ; 1162 } 1163 1164 /* 1165 * Start filling in our Work Request. Note that we do _not_ handle 1166 * the WR Header wrapping around the TX Descriptor Ring. If our 1167 * maximum header size ever exceeds one TX Descriptor, we'll need to 1168 * do something else here. 1169 */ 1170 BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1); 1171 wr = (void *)&txq->q.desc[txq->q.pidx]; 1172 wr->equiq_to_len16 = cpu_to_be32(wr_mid); 1173 wr->r3[0] = cpu_to_be64(0); 1174 wr->r3[1] = cpu_to_be64(0); 1175 skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len); 1176 end = (u64 *)wr + flits; 1177 1178 /* 1179 * If this is a Large Send Offload packet we'll put in an LSO CPL 1180 * message with an encapsulated TX Packet CPL message. Otherwise we 1181 * just use a TX Packet CPL message. 1182 */ 1183 ssi = skb_shinfo(skb); 1184 if (ssi->gso_size) { 1185 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); 1186 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; 1187 int l3hdr_len = skb_network_header_len(skb); 1188 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; 1189 1190 wr->op_immdlen = 1191 cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) | 1192 FW_WR_IMMDLEN(sizeof(*lso) + 1193 sizeof(*cpl))); 1194 /* 1195 * Fill in the LSO CPL message. 1196 */ 1197 lso->lso_ctrl = 1198 cpu_to_be32(LSO_OPCODE(CPL_TX_PKT_LSO) | 1199 LSO_FIRST_SLICE | 1200 LSO_LAST_SLICE | 1201 LSO_IPV6(v6) | 1202 LSO_ETHHDR_LEN(eth_xtra_len/4) | 1203 LSO_IPHDR_LEN(l3hdr_len/4) | 1204 LSO_TCPHDR_LEN(tcp_hdr(skb)->doff)); 1205 lso->ipid_ofst = cpu_to_be16(0); 1206 lso->mss = cpu_to_be16(ssi->gso_size); 1207 lso->seqno_offset = cpu_to_be32(0); 1208 lso->len = cpu_to_be32(skb->len); 1209 1210 /* 1211 * Set up TX Packet CPL pointer, control word and perform 1212 * accounting. 1213 */ 1214 cpl = (void *)(lso + 1); 1215 cntrl = (TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | 1216 TXPKT_IPHDR_LEN(l3hdr_len) | 1217 TXPKT_ETHHDR_LEN(eth_xtra_len)); 1218 txq->tso++; 1219 txq->tx_cso += ssi->gso_segs; 1220 } else { 1221 int len; 1222 1223 len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl); 1224 wr->op_immdlen = 1225 cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) | 1226 FW_WR_IMMDLEN(len)); 1227 1228 /* 1229 * Set up TX Packet CPL pointer, control word and perform 1230 * accounting. 1231 */ 1232 cpl = (void *)(wr + 1); 1233 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1234 cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS; 1235 txq->tx_cso++; 1236 } else 1237 cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS; 1238 } 1239 1240 /* 1241 * If there's a VLAN tag present, add that to the list of things to 1242 * do in this Work Request. 1243 */ 1244 if (vlan_tx_tag_present(skb)) { 1245 txq->vlan_ins++; 1246 cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb)); 1247 } 1248 1249 /* 1250 * Fill in the TX Packet CPL message header. 1251 */ 1252 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE(CPL_TX_PKT_XT) | 1253 TXPKT_INTF(pi->port_id) | 1254 TXPKT_PF(0)); 1255 cpl->pack = cpu_to_be16(0); 1256 cpl->len = cpu_to_be16(skb->len); 1257 cpl->ctrl1 = cpu_to_be64(cntrl); 1258 1259 #ifdef T4_TRACE 1260 T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7], 1261 "eth_xmit: ndesc %u, credits %u, pidx %u, len %u, frags %u", 1262 ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags); 1263 #endif 1264 1265 /* 1266 * Fill in the body of the TX Packet CPL message with either in-lined 1267 * data or a Scatter/Gather List. 1268 */ 1269 if (is_eth_imm(skb)) { 1270 /* 1271 * In-line the packet's data and free the skb since we don't 1272 * need it any longer. 1273 */ 1274 inline_tx_skb(skb, &txq->q, cpl + 1); 1275 dev_kfree_skb(skb); 1276 } else { 1277 /* 1278 * Write the skb's Scatter/Gather list into the TX Packet CPL 1279 * message and retain a pointer to the skb so we can free it 1280 * later when its DMA completes. (We store the skb pointer 1281 * in the Software Descriptor corresponding to the last TX 1282 * Descriptor used by the Work Request.) 1283 * 1284 * The retained skb will be freed when the corresponding TX 1285 * Descriptors are reclaimed after their DMAs complete. 1286 * However, this could take quite a while since, in general, 1287 * the hardware is set up to be lazy about sending DMA 1288 * completion notifications to us and we mostly perform TX 1289 * reclaims in the transmit routine. 1290 * 1291 * This is good for performamce but means that we rely on new 1292 * TX packets arriving to run the destructors of completed 1293 * packets, which open up space in their sockets' send queues. 1294 * Sometimes we do not get such new packets causing TX to 1295 * stall. A single UDP transmitter is a good example of this 1296 * situation. We have a clean up timer that periodically 1297 * reclaims completed packets but it doesn't run often enough 1298 * (nor do we want it to) to prevent lengthy stalls. A 1299 * solution to this problem is to run the destructor early, 1300 * after the packet is queued but before it's DMAd. A con is 1301 * that we lie to socket memory accounting, but the amount of 1302 * extra memory is reasonable (limited by the number of TX 1303 * descriptors), the packets do actually get freed quickly by 1304 * new packets almost always, and for protocols like TCP that 1305 * wait for acks to really free up the data the extra memory 1306 * is even less. On the positive side we run the destructors 1307 * on the sending CPU rather than on a potentially different 1308 * completing CPU, usually a good thing. 1309 * 1310 * Run the destructor before telling the DMA engine about the 1311 * packet to make sure it doesn't complete and get freed 1312 * prematurely. 1313 */ 1314 struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1); 1315 struct sge_txq *tq = &txq->q; 1316 int last_desc; 1317 1318 /* 1319 * If the Work Request header was an exact multiple of our TX 1320 * Descriptor length, then it's possible that the starting SGL 1321 * pointer lines up exactly with the end of our TX Descriptor 1322 * ring. If that's the case, wrap around to the beginning 1323 * here ... 1324 */ 1325 if (unlikely((void *)sgl == (void *)tq->stat)) { 1326 sgl = (void *)tq->desc; 1327 end = (void *)((void *)tq->desc + 1328 ((void *)end - (void *)tq->stat)); 1329 } 1330 1331 write_sgl(skb, tq, sgl, end, 0, addr); 1332 skb_orphan(skb); 1333 1334 last_desc = tq->pidx + ndesc - 1; 1335 if (last_desc >= tq->size) 1336 last_desc -= tq->size; 1337 tq->sdesc[last_desc].skb = skb; 1338 tq->sdesc[last_desc].sgl = sgl; 1339 } 1340 1341 /* 1342 * Advance our internal TX Queue state, tell the hardware about 1343 * the new TX descriptors and return success. 1344 */ 1345 txq_advance(&txq->q, ndesc); 1346 dev->trans_start = jiffies; 1347 ring_tx_db(adapter, &txq->q, ndesc); 1348 return NETDEV_TX_OK; 1349 1350 out_free: 1351 /* 1352 * An error of some sort happened. Free the TX skb and tell the 1353 * OS that we've "dealt" with the packet ... 1354 */ 1355 dev_kfree_skb(skb); 1356 return NETDEV_TX_OK; 1357 } 1358 1359 /** 1360 * copy_frags - copy fragments from gather list into skb_shared_info 1361 * @skb: destination skb 1362 * @gl: source internal packet gather list 1363 * @offset: packet start offset in first page 1364 * 1365 * Copy an internal packet gather list into a Linux skb_shared_info 1366 * structure. 1367 */ 1368 static inline void copy_frags(struct sk_buff *skb, 1369 const struct pkt_gl *gl, 1370 unsigned int offset) 1371 { 1372 int i; 1373 1374 /* usually there's just one frag */ 1375 __skb_fill_page_desc(skb, 0, gl->frags[0].page, 1376 gl->frags[0].offset + offset, 1377 gl->frags[0].size - offset); 1378 skb_shinfo(skb)->nr_frags = gl->nfrags; 1379 for (i = 1; i < gl->nfrags; i++) 1380 __skb_fill_page_desc(skb, i, gl->frags[i].page, 1381 gl->frags[i].offset, 1382 gl->frags[i].size); 1383 1384 /* get a reference to the last page, we don't own it */ 1385 get_page(gl->frags[gl->nfrags - 1].page); 1386 } 1387 1388 /** 1389 * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list 1390 * @gl: the gather list 1391 * @skb_len: size of sk_buff main body if it carries fragments 1392 * @pull_len: amount of data to move to the sk_buff's main body 1393 * 1394 * Builds an sk_buff from the given packet gather list. Returns the 1395 * sk_buff or %NULL if sk_buff allocation failed. 1396 */ 1397 struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl, 1398 unsigned int skb_len, unsigned int pull_len) 1399 { 1400 struct sk_buff *skb; 1401 1402 /* 1403 * If the ingress packet is small enough, allocate an skb large enough 1404 * for all of the data and copy it inline. Otherwise, allocate an skb 1405 * with enough room to pull in the header and reference the rest of 1406 * the data via the skb fragment list. 1407 * 1408 * Below we rely on RX_COPY_THRES being less than the smallest Rx 1409 * buff! size, which is expected since buffers are at least 1410 * PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one 1411 * fragment. 1412 */ 1413 if (gl->tot_len <= RX_COPY_THRES) { 1414 /* small packets have only one fragment */ 1415 skb = alloc_skb(gl->tot_len, GFP_ATOMIC); 1416 if (unlikely(!skb)) 1417 goto out; 1418 __skb_put(skb, gl->tot_len); 1419 skb_copy_to_linear_data(skb, gl->va, gl->tot_len); 1420 } else { 1421 skb = alloc_skb(skb_len, GFP_ATOMIC); 1422 if (unlikely(!skb)) 1423 goto out; 1424 __skb_put(skb, pull_len); 1425 skb_copy_to_linear_data(skb, gl->va, pull_len); 1426 1427 copy_frags(skb, gl, pull_len); 1428 skb->len = gl->tot_len; 1429 skb->data_len = skb->len - pull_len; 1430 skb->truesize += skb->data_len; 1431 } 1432 1433 out: 1434 return skb; 1435 } 1436 1437 /** 1438 * t4vf_pktgl_free - free a packet gather list 1439 * @gl: the gather list 1440 * 1441 * Releases the pages of a packet gather list. We do not own the last 1442 * page on the list and do not free it. 1443 */ 1444 void t4vf_pktgl_free(const struct pkt_gl *gl) 1445 { 1446 int frag; 1447 1448 frag = gl->nfrags - 1; 1449 while (frag--) 1450 put_page(gl->frags[frag].page); 1451 } 1452 1453 /** 1454 * do_gro - perform Generic Receive Offload ingress packet processing 1455 * @rxq: ingress RX Ethernet Queue 1456 * @gl: gather list for ingress packet 1457 * @pkt: CPL header for last packet fragment 1458 * 1459 * Perform Generic Receive Offload (GRO) ingress packet processing. 1460 * We use the standard Linux GRO interfaces for this. 1461 */ 1462 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, 1463 const struct cpl_rx_pkt *pkt) 1464 { 1465 int ret; 1466 struct sk_buff *skb; 1467 1468 skb = napi_get_frags(&rxq->rspq.napi); 1469 if (unlikely(!skb)) { 1470 t4vf_pktgl_free(gl); 1471 rxq->stats.rx_drops++; 1472 return; 1473 } 1474 1475 copy_frags(skb, gl, PKTSHIFT); 1476 skb->len = gl->tot_len - PKTSHIFT; 1477 skb->data_len = skb->len; 1478 skb->truesize += skb->data_len; 1479 skb->ip_summed = CHECKSUM_UNNECESSARY; 1480 skb_record_rx_queue(skb, rxq->rspq.idx); 1481 1482 if (pkt->vlan_ex) 1483 __vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan)); 1484 ret = napi_gro_frags(&rxq->rspq.napi); 1485 1486 if (ret == GRO_HELD) 1487 rxq->stats.lro_pkts++; 1488 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE) 1489 rxq->stats.lro_merged++; 1490 rxq->stats.pkts++; 1491 rxq->stats.rx_cso++; 1492 } 1493 1494 /** 1495 * t4vf_ethrx_handler - process an ingress ethernet packet 1496 * @rspq: the response queue that received the packet 1497 * @rsp: the response queue descriptor holding the RX_PKT message 1498 * @gl: the gather list of packet fragments 1499 * 1500 * Process an ingress ethernet packet and deliver it to the stack. 1501 */ 1502 int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, 1503 const struct pkt_gl *gl) 1504 { 1505 struct sk_buff *skb; 1506 const struct cpl_rx_pkt *pkt = (void *)&rsp[1]; 1507 bool csum_ok = pkt->csum_calc && !pkt->err_vec; 1508 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); 1509 1510 /* 1511 * If this is a good TCP packet and we have Generic Receive Offload 1512 * enabled, handle the packet in the GRO path. 1513 */ 1514 if ((pkt->l2info & cpu_to_be32(RXF_TCP)) && 1515 (rspq->netdev->features & NETIF_F_GRO) && csum_ok && 1516 !pkt->ip_frag) { 1517 do_gro(rxq, gl, pkt); 1518 return 0; 1519 } 1520 1521 /* 1522 * Convert the Packet Gather List into an skb. 1523 */ 1524 skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN); 1525 if (unlikely(!skb)) { 1526 t4vf_pktgl_free(gl); 1527 rxq->stats.rx_drops++; 1528 return 0; 1529 } 1530 __skb_pull(skb, PKTSHIFT); 1531 skb->protocol = eth_type_trans(skb, rspq->netdev); 1532 skb_record_rx_queue(skb, rspq->idx); 1533 rxq->stats.pkts++; 1534 1535 if (csum_ok && (rspq->netdev->features & NETIF_F_RXCSUM) && 1536 !pkt->err_vec && (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) { 1537 if (!pkt->ip_frag) 1538 skb->ip_summed = CHECKSUM_UNNECESSARY; 1539 else { 1540 __sum16 c = (__force __sum16)pkt->csum; 1541 skb->csum = csum_unfold(c); 1542 skb->ip_summed = CHECKSUM_COMPLETE; 1543 } 1544 rxq->stats.rx_cso++; 1545 } else 1546 skb_checksum_none_assert(skb); 1547 1548 if (pkt->vlan_ex) { 1549 rxq->stats.vlan_ex++; 1550 __vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan)); 1551 } 1552 1553 netif_receive_skb(skb); 1554 1555 return 0; 1556 } 1557 1558 /** 1559 * is_new_response - check if a response is newly written 1560 * @rc: the response control descriptor 1561 * @rspq: the response queue 1562 * 1563 * Returns true if a response descriptor contains a yet unprocessed 1564 * response. 1565 */ 1566 static inline bool is_new_response(const struct rsp_ctrl *rc, 1567 const struct sge_rspq *rspq) 1568 { 1569 return RSPD_GEN(rc->type_gen) == rspq->gen; 1570 } 1571 1572 /** 1573 * restore_rx_bufs - put back a packet's RX buffers 1574 * @gl: the packet gather list 1575 * @fl: the SGE Free List 1576 * @nfrags: how many fragments in @si 1577 * 1578 * Called when we find out that the current packet, @si, can't be 1579 * processed right away for some reason. This is a very rare event and 1580 * there's no effort to make this suspension/resumption process 1581 * particularly efficient. 1582 * 1583 * We implement the suspension by putting all of the RX buffers associated 1584 * with the current packet back on the original Free List. The buffers 1585 * have already been unmapped and are left unmapped, we mark them as 1586 * unmapped in order to prevent further unmapping attempts. (Effectively 1587 * this function undoes the series of @unmap_rx_buf calls which were done 1588 * to create the current packet's gather list.) This leaves us ready to 1589 * restart processing of the packet the next time we start processing the 1590 * RX Queue ... 1591 */ 1592 static void restore_rx_bufs(const struct pkt_gl *gl, struct sge_fl *fl, 1593 int frags) 1594 { 1595 struct rx_sw_desc *sdesc; 1596 1597 while (frags--) { 1598 if (fl->cidx == 0) 1599 fl->cidx = fl->size - 1; 1600 else 1601 fl->cidx--; 1602 sdesc = &fl->sdesc[fl->cidx]; 1603 sdesc->page = gl->frags[frags].page; 1604 sdesc->dma_addr |= RX_UNMAPPED_BUF; 1605 fl->avail++; 1606 } 1607 } 1608 1609 /** 1610 * rspq_next - advance to the next entry in a response queue 1611 * @rspq: the queue 1612 * 1613 * Updates the state of a response queue to advance it to the next entry. 1614 */ 1615 static inline void rspq_next(struct sge_rspq *rspq) 1616 { 1617 rspq->cur_desc = (void *)rspq->cur_desc + rspq->iqe_len; 1618 if (unlikely(++rspq->cidx == rspq->size)) { 1619 rspq->cidx = 0; 1620 rspq->gen ^= 1; 1621 rspq->cur_desc = rspq->desc; 1622 } 1623 } 1624 1625 /** 1626 * process_responses - process responses from an SGE response queue 1627 * @rspq: the ingress response queue to process 1628 * @budget: how many responses can be processed in this round 1629 * 1630 * Process responses from a Scatter Gather Engine response queue up to 1631 * the supplied budget. Responses include received packets as well as 1632 * control messages from firmware or hardware. 1633 * 1634 * Additionally choose the interrupt holdoff time for the next interrupt 1635 * on this queue. If the system is under memory shortage use a fairly 1636 * long delay to help recovery. 1637 */ 1638 int process_responses(struct sge_rspq *rspq, int budget) 1639 { 1640 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); 1641 int budget_left = budget; 1642 1643 while (likely(budget_left)) { 1644 int ret, rsp_type; 1645 const struct rsp_ctrl *rc; 1646 1647 rc = (void *)rspq->cur_desc + (rspq->iqe_len - sizeof(*rc)); 1648 if (!is_new_response(rc, rspq)) 1649 break; 1650 1651 /* 1652 * Figure out what kind of response we've received from the 1653 * SGE. 1654 */ 1655 rmb(); 1656 rsp_type = RSPD_TYPE(rc->type_gen); 1657 if (likely(rsp_type == RSP_TYPE_FLBUF)) { 1658 struct page_frag *fp; 1659 struct pkt_gl gl; 1660 const struct rx_sw_desc *sdesc; 1661 u32 bufsz, frag; 1662 u32 len = be32_to_cpu(rc->pldbuflen_qid); 1663 1664 /* 1665 * If we get a "new buffer" message from the SGE we 1666 * need to move on to the next Free List buffer. 1667 */ 1668 if (len & RSPD_NEWBUF) { 1669 /* 1670 * We get one "new buffer" message when we 1671 * first start up a queue so we need to ignore 1672 * it when our offset into the buffer is 0. 1673 */ 1674 if (likely(rspq->offset > 0)) { 1675 free_rx_bufs(rspq->adapter, &rxq->fl, 1676 1); 1677 rspq->offset = 0; 1678 } 1679 len = RSPD_LEN(len); 1680 } 1681 gl.tot_len = len; 1682 1683 /* 1684 * Gather packet fragments. 1685 */ 1686 for (frag = 0, fp = gl.frags; /**/; frag++, fp++) { 1687 BUG_ON(frag >= MAX_SKB_FRAGS); 1688 BUG_ON(rxq->fl.avail == 0); 1689 sdesc = &rxq->fl.sdesc[rxq->fl.cidx]; 1690 bufsz = get_buf_size(sdesc); 1691 fp->page = sdesc->page; 1692 fp->offset = rspq->offset; 1693 fp->size = min(bufsz, len); 1694 len -= fp->size; 1695 if (!len) 1696 break; 1697 unmap_rx_buf(rspq->adapter, &rxq->fl); 1698 } 1699 gl.nfrags = frag+1; 1700 1701 /* 1702 * Last buffer remains mapped so explicitly make it 1703 * coherent for CPU access and start preloading first 1704 * cache line ... 1705 */ 1706 dma_sync_single_for_cpu(rspq->adapter->pdev_dev, 1707 get_buf_addr(sdesc), 1708 fp->size, DMA_FROM_DEVICE); 1709 gl.va = (page_address(gl.frags[0].page) + 1710 gl.frags[0].offset); 1711 prefetch(gl.va); 1712 1713 /* 1714 * Hand the new ingress packet to the handler for 1715 * this Response Queue. 1716 */ 1717 ret = rspq->handler(rspq, rspq->cur_desc, &gl); 1718 if (likely(ret == 0)) 1719 rspq->offset += ALIGN(fp->size, FL_ALIGN); 1720 else 1721 restore_rx_bufs(&gl, &rxq->fl, frag); 1722 } else if (likely(rsp_type == RSP_TYPE_CPL)) { 1723 ret = rspq->handler(rspq, rspq->cur_desc, NULL); 1724 } else { 1725 WARN_ON(rsp_type > RSP_TYPE_CPL); 1726 ret = 0; 1727 } 1728 1729 if (unlikely(ret)) { 1730 /* 1731 * Couldn't process descriptor, back off for recovery. 1732 * We use the SGE's last timer which has the longest 1733 * interrupt coalescing value ... 1734 */ 1735 const int NOMEM_TIMER_IDX = SGE_NTIMERS-1; 1736 rspq->next_intr_params = 1737 QINTR_TIMER_IDX(NOMEM_TIMER_IDX); 1738 break; 1739 } 1740 1741 rspq_next(rspq); 1742 budget_left--; 1743 } 1744 1745 /* 1746 * If this is a Response Queue with an associated Free List and 1747 * at least two Egress Queue units available in the Free List 1748 * for new buffer pointers, refill the Free List. 1749 */ 1750 if (rspq->offset >= 0 && 1751 rxq->fl.size - rxq->fl.avail >= 2*FL_PER_EQ_UNIT) 1752 __refill_fl(rspq->adapter, &rxq->fl); 1753 return budget - budget_left; 1754 } 1755 1756 /** 1757 * napi_rx_handler - the NAPI handler for RX processing 1758 * @napi: the napi instance 1759 * @budget: how many packets we can process in this round 1760 * 1761 * Handler for new data events when using NAPI. This does not need any 1762 * locking or protection from interrupts as data interrupts are off at 1763 * this point and other adapter interrupts do not interfere (the latter 1764 * in not a concern at all with MSI-X as non-data interrupts then have 1765 * a separate handler). 1766 */ 1767 static int napi_rx_handler(struct napi_struct *napi, int budget) 1768 { 1769 unsigned int intr_params; 1770 struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi); 1771 int work_done = process_responses(rspq, budget); 1772 1773 if (likely(work_done < budget)) { 1774 napi_complete(napi); 1775 intr_params = rspq->next_intr_params; 1776 rspq->next_intr_params = rspq->intr_params; 1777 } else 1778 intr_params = QINTR_TIMER_IDX(SGE_TIMER_UPD_CIDX); 1779 1780 if (unlikely(work_done == 0)) 1781 rspq->unhandled_irqs++; 1782 1783 t4_write_reg(rspq->adapter, 1784 T4VF_SGE_BASE_ADDR + SGE_VF_GTS, 1785 CIDXINC(work_done) | 1786 INGRESSQID((u32)rspq->cntxt_id) | 1787 SEINTARM(intr_params)); 1788 return work_done; 1789 } 1790 1791 /* 1792 * The MSI-X interrupt handler for an SGE response queue for the NAPI case 1793 * (i.e., response queue serviced by NAPI polling). 1794 */ 1795 irqreturn_t t4vf_sge_intr_msix(int irq, void *cookie) 1796 { 1797 struct sge_rspq *rspq = cookie; 1798 1799 napi_schedule(&rspq->napi); 1800 return IRQ_HANDLED; 1801 } 1802 1803 /* 1804 * Process the indirect interrupt entries in the interrupt queue and kick off 1805 * NAPI for each queue that has generated an entry. 1806 */ 1807 static unsigned int process_intrq(struct adapter *adapter) 1808 { 1809 struct sge *s = &adapter->sge; 1810 struct sge_rspq *intrq = &s->intrq; 1811 unsigned int work_done; 1812 1813 spin_lock(&adapter->sge.intrq_lock); 1814 for (work_done = 0; ; work_done++) { 1815 const struct rsp_ctrl *rc; 1816 unsigned int qid, iq_idx; 1817 struct sge_rspq *rspq; 1818 1819 /* 1820 * Grab the next response from the interrupt queue and bail 1821 * out if it's not a new response. 1822 */ 1823 rc = (void *)intrq->cur_desc + (intrq->iqe_len - sizeof(*rc)); 1824 if (!is_new_response(rc, intrq)) 1825 break; 1826 1827 /* 1828 * If the response isn't a forwarded interrupt message issue a 1829 * error and go on to the next response message. This should 1830 * never happen ... 1831 */ 1832 rmb(); 1833 if (unlikely(RSPD_TYPE(rc->type_gen) != RSP_TYPE_INTR)) { 1834 dev_err(adapter->pdev_dev, 1835 "Unexpected INTRQ response type %d\n", 1836 RSPD_TYPE(rc->type_gen)); 1837 continue; 1838 } 1839 1840 /* 1841 * Extract the Queue ID from the interrupt message and perform 1842 * sanity checking to make sure it really refers to one of our 1843 * Ingress Queues which is active and matches the queue's ID. 1844 * None of these error conditions should ever happen so we may 1845 * want to either make them fatal and/or conditionalized under 1846 * DEBUG. 1847 */ 1848 qid = RSPD_QID(be32_to_cpu(rc->pldbuflen_qid)); 1849 iq_idx = IQ_IDX(s, qid); 1850 if (unlikely(iq_idx >= MAX_INGQ)) { 1851 dev_err(adapter->pdev_dev, 1852 "Ingress QID %d out of range\n", qid); 1853 continue; 1854 } 1855 rspq = s->ingr_map[iq_idx]; 1856 if (unlikely(rspq == NULL)) { 1857 dev_err(adapter->pdev_dev, 1858 "Ingress QID %d RSPQ=NULL\n", qid); 1859 continue; 1860 } 1861 if (unlikely(rspq->abs_id != qid)) { 1862 dev_err(adapter->pdev_dev, 1863 "Ingress QID %d refers to RSPQ %d\n", 1864 qid, rspq->abs_id); 1865 continue; 1866 } 1867 1868 /* 1869 * Schedule NAPI processing on the indicated Response Queue 1870 * and move on to the next entry in the Forwarded Interrupt 1871 * Queue. 1872 */ 1873 napi_schedule(&rspq->napi); 1874 rspq_next(intrq); 1875 } 1876 1877 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, 1878 CIDXINC(work_done) | 1879 INGRESSQID(intrq->cntxt_id) | 1880 SEINTARM(intrq->intr_params)); 1881 1882 spin_unlock(&adapter->sge.intrq_lock); 1883 1884 return work_done; 1885 } 1886 1887 /* 1888 * The MSI interrupt handler handles data events from SGE response queues as 1889 * well as error and other async events as they all use the same MSI vector. 1890 */ 1891 irqreturn_t t4vf_intr_msi(int irq, void *cookie) 1892 { 1893 struct adapter *adapter = cookie; 1894 1895 process_intrq(adapter); 1896 return IRQ_HANDLED; 1897 } 1898 1899 /** 1900 * t4vf_intr_handler - select the top-level interrupt handler 1901 * @adapter: the adapter 1902 * 1903 * Selects the top-level interrupt handler based on the type of interrupts 1904 * (MSI-X or MSI). 1905 */ 1906 irq_handler_t t4vf_intr_handler(struct adapter *adapter) 1907 { 1908 BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0); 1909 if (adapter->flags & USING_MSIX) 1910 return t4vf_sge_intr_msix; 1911 else 1912 return t4vf_intr_msi; 1913 } 1914 1915 /** 1916 * sge_rx_timer_cb - perform periodic maintenance of SGE RX queues 1917 * @data: the adapter 1918 * 1919 * Runs periodically from a timer to perform maintenance of SGE RX queues. 1920 * 1921 * a) Replenishes RX queues that have run out due to memory shortage. 1922 * Normally new RX buffers are added when existing ones are consumed but 1923 * when out of memory a queue can become empty. We schedule NAPI to do 1924 * the actual refill. 1925 */ 1926 static void sge_rx_timer_cb(unsigned long data) 1927 { 1928 struct adapter *adapter = (struct adapter *)data; 1929 struct sge *s = &adapter->sge; 1930 unsigned int i; 1931 1932 /* 1933 * Scan the "Starving Free Lists" flag array looking for any Free 1934 * Lists in need of more free buffers. If we find one and it's not 1935 * being actively polled, then bump its "starving" counter and attempt 1936 * to refill it. If we're successful in adding enough buffers to push 1937 * the Free List over the starving threshold, then we can clear its 1938 * "starving" status. 1939 */ 1940 for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) { 1941 unsigned long m; 1942 1943 for (m = s->starving_fl[i]; m; m &= m - 1) { 1944 unsigned int id = __ffs(m) + i * BITS_PER_LONG; 1945 struct sge_fl *fl = s->egr_map[id]; 1946 1947 clear_bit(id, s->starving_fl); 1948 smp_mb__after_clear_bit(); 1949 1950 /* 1951 * Since we are accessing fl without a lock there's a 1952 * small probability of a false positive where we 1953 * schedule napi but the FL is no longer starving. 1954 * No biggie. 1955 */ 1956 if (fl_starving(fl)) { 1957 struct sge_eth_rxq *rxq; 1958 1959 rxq = container_of(fl, struct sge_eth_rxq, fl); 1960 if (napi_reschedule(&rxq->rspq.napi)) 1961 fl->starving++; 1962 else 1963 set_bit(id, s->starving_fl); 1964 } 1965 } 1966 } 1967 1968 /* 1969 * Reschedule the next scan for starving Free Lists ... 1970 */ 1971 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); 1972 } 1973 1974 /** 1975 * sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues 1976 * @data: the adapter 1977 * 1978 * Runs periodically from a timer to perform maintenance of SGE TX queues. 1979 * 1980 * b) Reclaims completed Tx packets for the Ethernet queues. Normally 1981 * packets are cleaned up by new Tx packets, this timer cleans up packets 1982 * when no new packets are being submitted. This is essential for pktgen, 1983 * at least. 1984 */ 1985 static void sge_tx_timer_cb(unsigned long data) 1986 { 1987 struct adapter *adapter = (struct adapter *)data; 1988 struct sge *s = &adapter->sge; 1989 unsigned int i, budget; 1990 1991 budget = MAX_TIMER_TX_RECLAIM; 1992 i = s->ethtxq_rover; 1993 do { 1994 struct sge_eth_txq *txq = &s->ethtxq[i]; 1995 1996 if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) { 1997 int avail = reclaimable(&txq->q); 1998 1999 if (avail > budget) 2000 avail = budget; 2001 2002 free_tx_desc(adapter, &txq->q, avail, true); 2003 txq->q.in_use -= avail; 2004 __netif_tx_unlock(txq->txq); 2005 2006 budget -= avail; 2007 if (!budget) 2008 break; 2009 } 2010 2011 i++; 2012 if (i >= s->ethqsets) 2013 i = 0; 2014 } while (i != s->ethtxq_rover); 2015 s->ethtxq_rover = i; 2016 2017 /* 2018 * If we found too many reclaimable packets schedule a timer in the 2019 * near future to continue where we left off. Otherwise the next timer 2020 * will be at its normal interval. 2021 */ 2022 mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2)); 2023 } 2024 2025 /** 2026 * t4vf_sge_alloc_rxq - allocate an SGE RX Queue 2027 * @adapter: the adapter 2028 * @rspq: pointer to to the new rxq's Response Queue to be filled in 2029 * @iqasynch: if 0, a normal rspq; if 1, an asynchronous event queue 2030 * @dev: the network device associated with the new rspq 2031 * @intr_dest: MSI-X vector index (overriden in MSI mode) 2032 * @fl: pointer to the new rxq's Free List to be filled in 2033 * @hnd: the interrupt handler to invoke for the rspq 2034 */ 2035 int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, 2036 bool iqasynch, struct net_device *dev, 2037 int intr_dest, 2038 struct sge_fl *fl, rspq_handler_t hnd) 2039 { 2040 struct port_info *pi = netdev_priv(dev); 2041 struct fw_iq_cmd cmd, rpl; 2042 int ret, iqandst, flsz = 0; 2043 2044 /* 2045 * If we're using MSI interrupts and we're not initializing the 2046 * Forwarded Interrupt Queue itself, then set up this queue for 2047 * indirect interrupts to the Forwarded Interrupt Queue. Obviously 2048 * the Forwarded Interrupt Queue must be set up before any other 2049 * ingress queue ... 2050 */ 2051 if ((adapter->flags & USING_MSI) && rspq != &adapter->sge.intrq) { 2052 iqandst = SGE_INTRDST_IQ; 2053 intr_dest = adapter->sge.intrq.abs_id; 2054 } else 2055 iqandst = SGE_INTRDST_PCI; 2056 2057 /* 2058 * Allocate the hardware ring for the Response Queue. The size needs 2059 * to be a multiple of 16 which includes the mandatory status entry 2060 * (regardless of whether the Status Page capabilities are enabled or 2061 * not). 2062 */ 2063 rspq->size = roundup(rspq->size, 16); 2064 rspq->desc = alloc_ring(adapter->pdev_dev, rspq->size, rspq->iqe_len, 2065 0, &rspq->phys_addr, NULL, 0); 2066 if (!rspq->desc) 2067 return -ENOMEM; 2068 2069 /* 2070 * Fill in the Ingress Queue Command. Note: Ideally this code would 2071 * be in t4vf_hw.c but there are so many parameters and dependencies 2072 * on our Linux SGE state that we would end up having to pass tons of 2073 * parameters. We'll have to think about how this might be migrated 2074 * into OS-independent common code ... 2075 */ 2076 memset(&cmd, 0, sizeof(cmd)); 2077 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_IQ_CMD) | 2078 FW_CMD_REQUEST | 2079 FW_CMD_WRITE | 2080 FW_CMD_EXEC); 2081 cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC | 2082 FW_IQ_CMD_IQSTART(1) | 2083 FW_LEN16(cmd)); 2084 cmd.type_to_iqandstindex = 2085 cpu_to_be32(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | 2086 FW_IQ_CMD_IQASYNCH(iqasynch) | 2087 FW_IQ_CMD_VIID(pi->viid) | 2088 FW_IQ_CMD_IQANDST(iqandst) | 2089 FW_IQ_CMD_IQANUS(1) | 2090 FW_IQ_CMD_IQANUD(SGE_UPDATEDEL_INTR) | 2091 FW_IQ_CMD_IQANDSTINDEX(intr_dest)); 2092 cmd.iqdroprss_to_iqesize = 2093 cpu_to_be16(FW_IQ_CMD_IQPCIECH(pi->port_id) | 2094 FW_IQ_CMD_IQGTSMODE | 2095 FW_IQ_CMD_IQINTCNTTHRESH(rspq->pktcnt_idx) | 2096 FW_IQ_CMD_IQESIZE(ilog2(rspq->iqe_len) - 4)); 2097 cmd.iqsize = cpu_to_be16(rspq->size); 2098 cmd.iqaddr = cpu_to_be64(rspq->phys_addr); 2099 2100 if (fl) { 2101 /* 2102 * Allocate the ring for the hardware free list (with space 2103 * for its status page) along with the associated software 2104 * descriptor ring. The free list size needs to be a multiple 2105 * of the Egress Queue Unit. 2106 */ 2107 fl->size = roundup(fl->size, FL_PER_EQ_UNIT); 2108 fl->desc = alloc_ring(adapter->pdev_dev, fl->size, 2109 sizeof(__be64), sizeof(struct rx_sw_desc), 2110 &fl->addr, &fl->sdesc, STAT_LEN); 2111 if (!fl->desc) { 2112 ret = -ENOMEM; 2113 goto err; 2114 } 2115 2116 /* 2117 * Calculate the size of the hardware free list ring plus 2118 * Status Page (which the SGE will place after the end of the 2119 * free list ring) in Egress Queue Units. 2120 */ 2121 flsz = (fl->size / FL_PER_EQ_UNIT + 2122 STAT_LEN / EQ_UNIT); 2123 2124 /* 2125 * Fill in all the relevant firmware Ingress Queue Command 2126 * fields for the free list. 2127 */ 2128 cmd.iqns_to_fl0congen = 2129 cpu_to_be32( 2130 FW_IQ_CMD_FL0HOSTFCMODE(SGE_HOSTFCMODE_NONE) | 2131 FW_IQ_CMD_FL0PACKEN | 2132 FW_IQ_CMD_FL0PADEN); 2133 cmd.fl0dcaen_to_fl0cidxfthresh = 2134 cpu_to_be16( 2135 FW_IQ_CMD_FL0FBMIN(SGE_FETCHBURSTMIN_64B) | 2136 FW_IQ_CMD_FL0FBMAX(SGE_FETCHBURSTMAX_512B)); 2137 cmd.fl0size = cpu_to_be16(flsz); 2138 cmd.fl0addr = cpu_to_be64(fl->addr); 2139 } 2140 2141 /* 2142 * Issue the firmware Ingress Queue Command and extract the results if 2143 * it completes successfully. 2144 */ 2145 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 2146 if (ret) 2147 goto err; 2148 2149 netif_napi_add(dev, &rspq->napi, napi_rx_handler, 64); 2150 rspq->cur_desc = rspq->desc; 2151 rspq->cidx = 0; 2152 rspq->gen = 1; 2153 rspq->next_intr_params = rspq->intr_params; 2154 rspq->cntxt_id = be16_to_cpu(rpl.iqid); 2155 rspq->abs_id = be16_to_cpu(rpl.physiqid); 2156 rspq->size--; /* subtract status entry */ 2157 rspq->adapter = adapter; 2158 rspq->netdev = dev; 2159 rspq->handler = hnd; 2160 2161 /* set offset to -1 to distinguish ingress queues without FL */ 2162 rspq->offset = fl ? 0 : -1; 2163 2164 if (fl) { 2165 fl->cntxt_id = be16_to_cpu(rpl.fl0id); 2166 fl->avail = 0; 2167 fl->pend_cred = 0; 2168 fl->pidx = 0; 2169 fl->cidx = 0; 2170 fl->alloc_failed = 0; 2171 fl->large_alloc_failed = 0; 2172 fl->starving = 0; 2173 refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL); 2174 } 2175 2176 return 0; 2177 2178 err: 2179 /* 2180 * An error occurred. Clean up our partial allocation state and 2181 * return the error. 2182 */ 2183 if (rspq->desc) { 2184 dma_free_coherent(adapter->pdev_dev, rspq->size * rspq->iqe_len, 2185 rspq->desc, rspq->phys_addr); 2186 rspq->desc = NULL; 2187 } 2188 if (fl && fl->desc) { 2189 kfree(fl->sdesc); 2190 fl->sdesc = NULL; 2191 dma_free_coherent(adapter->pdev_dev, flsz * EQ_UNIT, 2192 fl->desc, fl->addr); 2193 fl->desc = NULL; 2194 } 2195 return ret; 2196 } 2197 2198 /** 2199 * t4vf_sge_alloc_eth_txq - allocate an SGE Ethernet TX Queue 2200 * @adapter: the adapter 2201 * @txq: pointer to the new txq to be filled in 2202 * @devq: the network TX queue associated with the new txq 2203 * @iqid: the relative ingress queue ID to which events relating to 2204 * the new txq should be directed 2205 */ 2206 int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, 2207 struct net_device *dev, struct netdev_queue *devq, 2208 unsigned int iqid) 2209 { 2210 int ret, nentries; 2211 struct fw_eq_eth_cmd cmd, rpl; 2212 struct port_info *pi = netdev_priv(dev); 2213 2214 /* 2215 * Calculate the size of the hardware TX Queue (including the Status 2216 * Page on the end of the TX Queue) in units of TX Descriptors. 2217 */ 2218 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); 2219 2220 /* 2221 * Allocate the hardware ring for the TX ring (with space for its 2222 * status page) along with the associated software descriptor ring. 2223 */ 2224 txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size, 2225 sizeof(struct tx_desc), 2226 sizeof(struct tx_sw_desc), 2227 &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN); 2228 if (!txq->q.desc) 2229 return -ENOMEM; 2230 2231 /* 2232 * Fill in the Egress Queue Command. Note: As with the direct use of 2233 * the firmware Ingress Queue COmmand above in our RXQ allocation 2234 * routine, ideally, this code would be in t4vf_hw.c. Again, we'll 2235 * have to see if there's some reasonable way to parameterize it 2236 * into the common code ... 2237 */ 2238 memset(&cmd, 0, sizeof(cmd)); 2239 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_EQ_ETH_CMD) | 2240 FW_CMD_REQUEST | 2241 FW_CMD_WRITE | 2242 FW_CMD_EXEC); 2243 cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC | 2244 FW_EQ_ETH_CMD_EQSTART | 2245 FW_LEN16(cmd)); 2246 cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_VIID(pi->viid)); 2247 cmd.fetchszm_to_iqid = 2248 cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE(SGE_HOSTFCMODE_STPG) | 2249 FW_EQ_ETH_CMD_PCIECHN(pi->port_id) | 2250 FW_EQ_ETH_CMD_IQID(iqid)); 2251 cmd.dcaen_to_eqsize = 2252 cpu_to_be32(FW_EQ_ETH_CMD_FBMIN(SGE_FETCHBURSTMIN_64B) | 2253 FW_EQ_ETH_CMD_FBMAX(SGE_FETCHBURSTMAX_512B) | 2254 FW_EQ_ETH_CMD_CIDXFTHRESH(SGE_CIDXFLUSHTHRESH_32) | 2255 FW_EQ_ETH_CMD_EQSIZE(nentries)); 2256 cmd.eqaddr = cpu_to_be64(txq->q.phys_addr); 2257 2258 /* 2259 * Issue the firmware Egress Queue Command and extract the results if 2260 * it completes successfully. 2261 */ 2262 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 2263 if (ret) { 2264 /* 2265 * The girmware Ingress Queue Command failed for some reason. 2266 * Free up our partial allocation state and return the error. 2267 */ 2268 kfree(txq->q.sdesc); 2269 txq->q.sdesc = NULL; 2270 dma_free_coherent(adapter->pdev_dev, 2271 nentries * sizeof(struct tx_desc), 2272 txq->q.desc, txq->q.phys_addr); 2273 txq->q.desc = NULL; 2274 return ret; 2275 } 2276 2277 txq->q.in_use = 0; 2278 txq->q.cidx = 0; 2279 txq->q.pidx = 0; 2280 txq->q.stat = (void *)&txq->q.desc[txq->q.size]; 2281 txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_GET(be32_to_cpu(rpl.eqid_pkd)); 2282 txq->q.abs_id = 2283 FW_EQ_ETH_CMD_PHYSEQID_GET(be32_to_cpu(rpl.physeqid_pkd)); 2284 txq->txq = devq; 2285 txq->tso = 0; 2286 txq->tx_cso = 0; 2287 txq->vlan_ins = 0; 2288 txq->q.stops = 0; 2289 txq->q.restarts = 0; 2290 txq->mapping_err = 0; 2291 return 0; 2292 } 2293 2294 /* 2295 * Free the DMA map resources associated with a TX queue. 2296 */ 2297 static void free_txq(struct adapter *adapter, struct sge_txq *tq) 2298 { 2299 dma_free_coherent(adapter->pdev_dev, 2300 tq->size * sizeof(*tq->desc) + STAT_LEN, 2301 tq->desc, tq->phys_addr); 2302 tq->cntxt_id = 0; 2303 tq->sdesc = NULL; 2304 tq->desc = NULL; 2305 } 2306 2307 /* 2308 * Free the resources associated with a response queue (possibly including a 2309 * free list). 2310 */ 2311 static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq, 2312 struct sge_fl *fl) 2313 { 2314 unsigned int flid = fl ? fl->cntxt_id : 0xffff; 2315 2316 t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP, 2317 rspq->cntxt_id, flid, 0xffff); 2318 dma_free_coherent(adapter->pdev_dev, (rspq->size + 1) * rspq->iqe_len, 2319 rspq->desc, rspq->phys_addr); 2320 netif_napi_del(&rspq->napi); 2321 rspq->netdev = NULL; 2322 rspq->cntxt_id = 0; 2323 rspq->abs_id = 0; 2324 rspq->desc = NULL; 2325 2326 if (fl) { 2327 free_rx_bufs(adapter, fl, fl->avail); 2328 dma_free_coherent(adapter->pdev_dev, 2329 fl->size * sizeof(*fl->desc) + STAT_LEN, 2330 fl->desc, fl->addr); 2331 kfree(fl->sdesc); 2332 fl->sdesc = NULL; 2333 fl->cntxt_id = 0; 2334 fl->desc = NULL; 2335 } 2336 } 2337 2338 /** 2339 * t4vf_free_sge_resources - free SGE resources 2340 * @adapter: the adapter 2341 * 2342 * Frees resources used by the SGE queue sets. 2343 */ 2344 void t4vf_free_sge_resources(struct adapter *adapter) 2345 { 2346 struct sge *s = &adapter->sge; 2347 struct sge_eth_rxq *rxq = s->ethrxq; 2348 struct sge_eth_txq *txq = s->ethtxq; 2349 struct sge_rspq *evtq = &s->fw_evtq; 2350 struct sge_rspq *intrq = &s->intrq; 2351 int qs; 2352 2353 for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) { 2354 if (rxq->rspq.desc) 2355 free_rspq_fl(adapter, &rxq->rspq, &rxq->fl); 2356 if (txq->q.desc) { 2357 t4vf_eth_eq_free(adapter, txq->q.cntxt_id); 2358 free_tx_desc(adapter, &txq->q, txq->q.in_use, true); 2359 kfree(txq->q.sdesc); 2360 free_txq(adapter, &txq->q); 2361 } 2362 } 2363 if (evtq->desc) 2364 free_rspq_fl(adapter, evtq, NULL); 2365 if (intrq->desc) 2366 free_rspq_fl(adapter, intrq, NULL); 2367 } 2368 2369 /** 2370 * t4vf_sge_start - enable SGE operation 2371 * @adapter: the adapter 2372 * 2373 * Start tasklets and timers associated with the DMA engine. 2374 */ 2375 void t4vf_sge_start(struct adapter *adapter) 2376 { 2377 adapter->sge.ethtxq_rover = 0; 2378 mod_timer(&adapter->sge.rx_timer, jiffies + RX_QCHECK_PERIOD); 2379 mod_timer(&adapter->sge.tx_timer, jiffies + TX_QCHECK_PERIOD); 2380 } 2381 2382 /** 2383 * t4vf_sge_stop - disable SGE operation 2384 * @adapter: the adapter 2385 * 2386 * Stop tasklets and timers associated with the DMA engine. Note that 2387 * this is effective only if measures have been taken to disable any HW 2388 * events that may restart them. 2389 */ 2390 void t4vf_sge_stop(struct adapter *adapter) 2391 { 2392 struct sge *s = &adapter->sge; 2393 2394 if (s->rx_timer.function) 2395 del_timer_sync(&s->rx_timer); 2396 if (s->tx_timer.function) 2397 del_timer_sync(&s->tx_timer); 2398 } 2399 2400 /** 2401 * t4vf_sge_init - initialize SGE 2402 * @adapter: the adapter 2403 * 2404 * Performs SGE initialization needed every time after a chip reset. 2405 * We do not initialize any of the queue sets here, instead the driver 2406 * top-level must request those individually. We also do not enable DMA 2407 * here, that should be done after the queues have been set up. 2408 */ 2409 int t4vf_sge_init(struct adapter *adapter) 2410 { 2411 struct sge_params *sge_params = &adapter->params.sge; 2412 u32 fl0 = sge_params->sge_fl_buffer_size[0]; 2413 u32 fl1 = sge_params->sge_fl_buffer_size[1]; 2414 struct sge *s = &adapter->sge; 2415 2416 /* 2417 * Start by vetting the basic SGE parameters which have been set up by 2418 * the Physical Function Driver. Ideally we should be able to deal 2419 * with _any_ configuration. Practice is different ... 2420 */ 2421 if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) { 2422 dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n", 2423 fl0, fl1); 2424 return -EINVAL; 2425 } 2426 if ((sge_params->sge_control & RXPKTCPLMODE) == 0) { 2427 dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n"); 2428 return -EINVAL; 2429 } 2430 2431 /* 2432 * Now translate the adapter parameters into our internal forms. 2433 */ 2434 if (fl1) 2435 FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT; 2436 STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE) ? 128 : 64); 2437 PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control); 2438 FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) + 2439 SGE_INGPADBOUNDARY_SHIFT); 2440 2441 /* 2442 * Set up tasklet timers. 2443 */ 2444 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adapter); 2445 setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adapter); 2446 2447 /* 2448 * Initialize Forwarded Interrupt Queue lock. 2449 */ 2450 spin_lock_init(&s->intrq_lock); 2451 2452 return 0; 2453 } 2454