Lines Matching +full:hw +full:- +full:gro

2  * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
17 * - Redistributions of source code must retain the above
21 * - Redistributions in binary form must reproduce the above
43 #include <linux/dma-mapping.h>
63 * 64-bit PCI DMA addresses.
106 ETHTXQ_MAX_SGL_LEN = ((3 * (ETHTXQ_MAX_FRAGS-1))/2 +
107 ((ETHTXQ_MAX_FRAGS-1) & 1) +
140 * in-line room in skb's to accommodate pulling in RX_PULL_LEN bytes
189 * get_buf_addr - return DMA buffer address of software descriptor
193 * our low-order flag bits).
197 return sdesc->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF); in get_buf_addr()
201 * is_buf_mapped - is buffer mapped for DMA?
209 return !(sdesc->dma_addr & RX_UNMAPPED_BUF); in is_buf_mapped()
213 * need_skb_unmap - does the platform need unmapping of sk_buffs?
228 * txq_avail - return the number of available slots in a TX queue
235 return tq->size - 1 - tq->in_use; in txq_avail()
239 * fl_cap - return the capacity of a Free List
249 return fl->size - FL_PER_EQ_UNIT; in fl_cap()
253 * fl_starving - return whether a Free List is starving.
264 const struct sge *s = &adapter->sge; in fl_starving()
266 return fl->avail - fl->pend_cred <= s->fl_starve_thres; in fl_starving()
270 * map_skb - map an skb for DMA to the device
283 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); in map_skb()
288 end = &si->frags[si->nr_frags]; in map_skb()
289 for (fp = si->frags; fp < end; fp++) { in map_skb()
298 while (fp-- > si->frags) in map_skb()
299 dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE); in map_skb()
300 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); in map_skb()
303 return -ENOMEM; in map_skb()
310 unsigned int nfrags = skb_shinfo(skb)->nr_frags; in unmap_sgl()
313 dma_unmap_single(dev, be64_to_cpu(sgl->addr0), in unmap_sgl()
314 be32_to_cpu(sgl->len0), DMA_TO_DEVICE); in unmap_sgl()
316 dma_unmap_page(dev, be64_to_cpu(sgl->addr0), in unmap_sgl()
317 be32_to_cpu(sgl->len0), DMA_TO_DEVICE); in unmap_sgl()
318 nfrags--; in unmap_sgl()
322 * the complexity below is because of the possibility of a wrap-around in unmap_sgl()
325 for (p = sgl->sge; nfrags >= 2; nfrags -= 2) { in unmap_sgl()
326 if (likely((u8 *)(p + 1) <= (u8 *)tq->stat)) { in unmap_sgl()
328 dma_unmap_page(dev, be64_to_cpu(p->addr[0]), in unmap_sgl()
329 be32_to_cpu(p->len[0]), DMA_TO_DEVICE); in unmap_sgl()
330 dma_unmap_page(dev, be64_to_cpu(p->addr[1]), in unmap_sgl()
331 be32_to_cpu(p->len[1]), DMA_TO_DEVICE); in unmap_sgl()
333 } else if ((u8 *)p == (u8 *)tq->stat) { in unmap_sgl()
334 p = (const struct ulptx_sge_pair *)tq->desc; in unmap_sgl()
336 } else if ((u8 *)p + 8 == (u8 *)tq->stat) { in unmap_sgl()
337 const __be64 *addr = (const __be64 *)tq->desc; in unmap_sgl()
340 be32_to_cpu(p->len[0]), DMA_TO_DEVICE); in unmap_sgl()
342 be32_to_cpu(p->len[1]), DMA_TO_DEVICE); in unmap_sgl()
345 const __be64 *addr = (const __be64 *)tq->desc; in unmap_sgl()
347 dma_unmap_page(dev, be64_to_cpu(p->addr[0]), in unmap_sgl()
348 be32_to_cpu(p->len[0]), DMA_TO_DEVICE); in unmap_sgl()
350 be32_to_cpu(p->len[1]), DMA_TO_DEVICE); in unmap_sgl()
357 if ((u8 *)p == (u8 *)tq->stat) in unmap_sgl()
358 p = (const struct ulptx_sge_pair *)tq->desc; in unmap_sgl()
359 addr = ((u8 *)p + 16 <= (u8 *)tq->stat in unmap_sgl()
360 ? p->addr[0] in unmap_sgl()
361 : *(const __be64 *)tq->desc); in unmap_sgl()
362 dma_unmap_page(dev, be64_to_cpu(addr), be32_to_cpu(p->len[0]), in unmap_sgl()
368 * free_tx_desc - reclaims TX descriptors and their buffers
381 unsigned int cidx = tq->cidx; in free_tx_desc()
382 struct device *dev = adapter->pdev_dev; in free_tx_desc()
386 sdesc = &tq->sdesc[cidx]; in free_tx_desc()
387 while (n--) { in free_tx_desc()
392 if (sdesc->skb) { in free_tx_desc()
394 unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq); in free_tx_desc()
395 dev_consume_skb_any(sdesc->skb); in free_tx_desc()
396 sdesc->skb = NULL; in free_tx_desc()
400 if (++cidx == tq->size) { in free_tx_desc()
402 sdesc = tq->sdesc; in free_tx_desc()
405 tq->cidx = cidx; in free_tx_desc()
413 int hw_cidx = be16_to_cpu(tq->stat->cidx); in reclaimable()
414 int reclaimable = hw_cidx - tq->cidx; in reclaimable()
416 reclaimable += tq->size; in reclaimable()
421 * reclaim_completed_tx - reclaims completed TX descriptors
445 tq->in_use -= avail; in reclaim_completed_tx()
450 * get_buf_size - return the size of an RX Free List buffer.
457 const struct sge *s = &adapter->sge; in get_buf_size()
459 return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF) in get_buf_size()
460 ? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE); in get_buf_size()
464 * free_rx_bufs - free RX buffers on an SGE Free List
475 while (n--) { in free_rx_bufs()
476 struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx]; in free_rx_bufs()
479 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), in free_rx_bufs()
482 put_page(sdesc->page); in free_rx_bufs()
483 sdesc->page = NULL; in free_rx_bufs()
484 if (++fl->cidx == fl->size) in free_rx_bufs()
485 fl->cidx = 0; in free_rx_bufs()
486 fl->avail--; in free_rx_bufs()
491 * unmap_rx_buf - unmap the current RX buffer on an SGE Free List
496 * buffer must be made inaccessible to HW before calling this function.
505 struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx]; in unmap_rx_buf()
508 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), in unmap_rx_buf()
511 sdesc->page = NULL; in unmap_rx_buf()
512 if (++fl->cidx == fl->size) in unmap_rx_buf()
513 fl->cidx = 0; in unmap_rx_buf()
514 fl->avail--; in unmap_rx_buf()
518 * ring_fl_db - righ doorbell on free list
527 u32 val = adapter->params.arch.sge_fl_db; in ring_fl_db()
533 if (fl->pend_cred >= FL_PER_EQ_UNIT) { in ring_fl_db()
534 if (is_t4(adapter->params.chip)) in ring_fl_db()
535 val |= PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT); in ring_fl_db()
537 val |= PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT); in ring_fl_db()
548 if (unlikely(fl->bar2_addr == NULL)) { in ring_fl_db()
551 QID_V(fl->cntxt_id) | val); in ring_fl_db()
553 writel(val | QID_V(fl->bar2_qid), in ring_fl_db()
554 fl->bar2_addr + SGE_UDB_KDOORBELL); in ring_fl_db()
561 fl->pend_cred %= FL_PER_EQ_UNIT; in ring_fl_db()
566 * set_rx_sw_desc - initialize software RX buffer descriptor
569 * @dma_addr: PCI DMA address (possibly with low-bit flags)
574 sdesc->page = page; in set_rx_sw_desc()
575 sdesc->dma_addr = dma_addr; in set_rx_sw_desc()
581 #define POISON_BUF_VAL -1
591 * refill_fl - refill an SGE RX buffer ring
597 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
599 * @n does not exceed the queue's capacity -- i.e. (cidx == pidx) _IN
607 struct sge *s = &adapter->sge; in refill_fl()
610 unsigned int cred = fl->avail; in refill_fl()
611 __be64 *d = &fl->desc[fl->pidx]; in refill_fl()
612 struct rx_sw_desc *sdesc = &fl->sdesc[fl->pidx]; in refill_fl()
619 BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT); in refill_fl()
629 if (s->fl_pg_order == 0) in refill_fl()
633 page = __dev_alloc_pages(gfp, s->fl_pg_order); in refill_fl()
640 fl->large_alloc_failed++; in refill_fl()
643 poison_buf(page, PAGE_SIZE << s->fl_pg_order); in refill_fl()
645 dma_addr = dma_map_page(adapter->pdev_dev, page, 0, in refill_fl()
646 PAGE_SIZE << s->fl_pg_order, in refill_fl()
648 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { in refill_fl()
657 __free_pages(page, s->fl_pg_order); in refill_fl()
666 fl->avail++; in refill_fl()
667 if (++fl->pidx == fl->size) { in refill_fl()
668 fl->pidx = 0; in refill_fl()
669 sdesc = fl->sdesc; in refill_fl()
670 d = fl->desc; in refill_fl()
672 n--; in refill_fl()
676 while (n--) { in refill_fl()
679 fl->alloc_failed++; in refill_fl()
684 dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE, in refill_fl()
686 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { in refill_fl()
695 fl->avail++; in refill_fl()
696 if (++fl->pidx == fl->size) { in refill_fl()
697 fl->pidx = 0; in refill_fl()
698 sdesc = fl->sdesc; in refill_fl()
699 d = fl->desc; in refill_fl()
709 cred = fl->avail - cred; in refill_fl()
710 fl->pend_cred += cred; in refill_fl()
715 set_bit(fl->cntxt_id, adapter->sge.starving_fl); in refill_fl()
728 min((unsigned int)MAX_RX_REFILL, fl_cap(fl) - fl->avail), in __refill_fl()
733 * alloc_ring - allocate resources for an SGE descriptor ring
783 * sgl_len - calculates the size of an SGL of the given capacity
786 * Calculates the number of flits (8-byte units) needed for a Direct
792 * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA in sgl_len()
793 * addresses. The DSGL Work Request starts off with a 32-bit DSGL in sgl_len()
796 * Address[i+1] } (this ensures that all addresses are on 64-bit in sgl_len()
803 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3 in sgl_len()
804 * flits for every pair of the remaining N) +1 if (n-1) is odd; and in sgl_len()
805 * finally the "+((n-1)&1)" adds the one remaining flit needed if in sgl_len()
806 * (n-1) is odd ... in sgl_len()
808 n--; in sgl_len()
813 * flits_to_desc - returns the num of TX descriptors for the given flits
826 * is_eth_imm - can an Ethernet packet be sent as immediate data?
845 * calc_tx_flits - calculate the number of flits for a packet TX WR
861 return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), in calc_tx_flits()
873 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); in calc_tx_flits()
874 if (skb_shinfo(skb)->gso_size) in calc_tx_flits()
885 * write_sgl - populate a Scatter/Gather List for a packet
890 * @start: start offset into skb main-body data to include in the SGL
896 * main body except for the first @start bytes. @pos must be 16-byte
899 * wrap around, i.e., @end > @tq->stat.
908 unsigned int nfrags = si->nr_frags; in write_sgl()
911 len = skb_headlen(skb) - start; in write_sgl()
913 sgl->len0 = htonl(len); in write_sgl()
914 sgl->addr0 = cpu_to_be64(addr[0] + start); in write_sgl()
917 sgl->len0 = htonl(skb_frag_size(&si->frags[0])); in write_sgl()
918 sgl->addr0 = cpu_to_be64(addr[1]); in write_sgl()
921 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | in write_sgl()
923 if (likely(--nfrags == 0)) in write_sgl()
930 to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge; in write_sgl()
932 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { in write_sgl()
933 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); in write_sgl()
934 to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i])); in write_sgl()
935 to->addr[0] = cpu_to_be64(addr[i]); in write_sgl()
936 to->addr[1] = cpu_to_be64(addr[++i]); in write_sgl()
939 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); in write_sgl()
940 to->len[1] = cpu_to_be32(0); in write_sgl()
941 to->addr[0] = cpu_to_be64(addr[i + 1]); in write_sgl()
943 if (unlikely((u8 *)end > (u8 *)tq->stat)) { in write_sgl()
944 unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1; in write_sgl()
947 memcpy(sgl->sge, buf, part0); in write_sgl()
948 part1 = (u8 *)end - (u8 *)tq->stat; in write_sgl()
949 memcpy(tq->desc, (u8 *)buf + part0, part1); in write_sgl()
950 end = (void *)tq->desc + part1; in write_sgl()
952 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ in write_sgl()
957 * ring_tx_db - check and potentially ring a TX queue's doorbell
960 * @n: number of new descriptors to give to HW
975 if (unlikely(tq->bar2_addr == NULL)) { in ring_tx_db()
979 QID_V(tq->cntxt_id) | val); in ring_tx_db()
995 if (n == 1 && tq->bar2_qid == 0) { in ring_tx_db()
996 unsigned int index = (tq->pidx in ring_tx_db()
997 ? (tq->pidx - 1) in ring_tx_db()
998 : (tq->size - 1)); in ring_tx_db()
999 __be64 *src = (__be64 *)&tq->desc[index]; in ring_tx_db()
1000 __be64 __iomem *dst = (__be64 __iomem *)(tq->bar2_addr + in ring_tx_db()
1006 * Combined transfer on the PCI-E Bus. If the Write in ring_tx_db()
1021 count--; in ring_tx_db()
1024 writel(val | QID_V(tq->bar2_qid), in ring_tx_db()
1025 tq->bar2_addr + SGE_UDB_KDOORBELL); in ring_tx_db()
1042 * inline_tx_skb - inline a packet's data into TX descriptors
1056 int left = (void *)tq->stat - pos; in inline_tx_skb()
1058 if (likely(skb->len <= left)) { in inline_tx_skb()
1059 if (likely(!skb->data_len)) in inline_tx_skb()
1060 skb_copy_from_linear_data(skb, pos, skb->len); in inline_tx_skb()
1062 skb_copy_bits(skb, 0, pos, skb->len); in inline_tx_skb()
1063 pos += skb->len; in inline_tx_skb()
1066 skb_copy_bits(skb, left, tq->desc, skb->len - left); in inline_tx_skb()
1067 pos = (void *)tq->desc + (skb->len - left); in inline_tx_skb()
1070 /* 0-pad to multiple of 16 */ in inline_tx_skb()
1077 * Figure out what HW csum a packet wants and return the appropriate control
1085 if (iph->version == 4) { in hwcsum()
1086 if (iph->protocol == IPPROTO_TCP) in hwcsum()
1088 else if (iph->protocol == IPPROTO_UDP) in hwcsum()
1093 * unknown protocol, disable HW csum in hwcsum()
1104 if (ip6h->nexthdr == IPPROTO_TCP) in hwcsum()
1106 else if (ip6h->nexthdr == IPPROTO_UDP) in hwcsum()
1114 int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN; in hwcsum()
1126 TXPKT_CSUM_LOC_V(start + skb->csum_offset); in hwcsum()
1135 netif_tx_stop_queue(txq->txq); in txq_stop()
1136 txq->q.stops++; in txq_stop()
1144 tq->in_use += n; in txq_advance()
1145 tq->pidx += n; in txq_advance()
1146 if (tq->pidx >= tq->size) in txq_advance()
1147 tq->pidx -= tq->size; in txq_advance()
1151 * t4vf_eth_xmit - add a packet to an Ethernet TX queue
1170 const size_t fw_hdr_copy_len = sizeof(wr->firmware); in t4vf_eth_xmit()
1178 if (unlikely(skb->len < fw_hdr_copy_len)) in t4vf_eth_xmit()
1182 max_pkt_len = ETH_HLEN + dev->mtu; in t4vf_eth_xmit()
1185 if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len))) in t4vf_eth_xmit()
1192 adapter = pi->adapter; in t4vf_eth_xmit()
1194 BUG_ON(qidx >= pi->nqsets); in t4vf_eth_xmit()
1195 txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; in t4vf_eth_xmit()
1197 if (pi->vlan_id && !skb_vlan_tag_present(skb)) in t4vf_eth_xmit()
1199 pi->vlan_id); in t4vf_eth_xmit()
1205 reclaim_completed_tx(adapter, &txq->q, true); in t4vf_eth_xmit()
1214 credits = txq_avail(&txq->q) - ndesc; in t4vf_eth_xmit()
1224 dev_err(adapter->pdev_dev, in t4vf_eth_xmit()
1226 dev->name, qidx); in t4vf_eth_xmit()
1231 unlikely(map_skb(adapter->pdev_dev, skb, addr) < 0)) { in t4vf_eth_xmit()
1234 * be in-lined directly into the Work Request) and the mapping in t4vf_eth_xmit()
1237 txq->mapping_err++; in t4vf_eth_xmit()
1263 wr = (void *)&txq->q.desc[txq->q.pidx]; in t4vf_eth_xmit()
1264 wr->equiq_to_len16 = cpu_to_be32(wr_mid); in t4vf_eth_xmit()
1265 wr->r3[0] = cpu_to_be32(0); in t4vf_eth_xmit()
1266 wr->r3[1] = cpu_to_be32(0); in t4vf_eth_xmit()
1267 skb_copy_from_linear_data(skb, &wr->firmware, fw_hdr_copy_len); in t4vf_eth_xmit()
1276 if (ssi->gso_size) { in t4vf_eth_xmit()
1278 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; in t4vf_eth_xmit()
1280 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; in t4vf_eth_xmit()
1282 wr->op_immdlen = in t4vf_eth_xmit()
1289 lso->lso_ctrl = in t4vf_eth_xmit()
1296 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); in t4vf_eth_xmit()
1297 lso->ipid_ofst = cpu_to_be16(0); in t4vf_eth_xmit()
1298 lso->mss = cpu_to_be16(ssi->gso_size); in t4vf_eth_xmit()
1299 lso->seqno_offset = cpu_to_be32(0); in t4vf_eth_xmit()
1300 if (is_t4(adapter->params.chip)) in t4vf_eth_xmit()
1301 lso->len = cpu_to_be32(skb->len); in t4vf_eth_xmit()
1303 lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len)); in t4vf_eth_xmit()
1311 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) in t4vf_eth_xmit()
1319 txq->tso++; in t4vf_eth_xmit()
1320 txq->tx_cso += ssi->gso_segs; in t4vf_eth_xmit()
1324 len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl); in t4vf_eth_xmit()
1325 wr->op_immdlen = in t4vf_eth_xmit()
1334 if (skb->ip_summed == CHECKSUM_PARTIAL) { in t4vf_eth_xmit()
1335 cntrl = hwcsum(adapter->params.chip, skb) | in t4vf_eth_xmit()
1337 txq->tx_cso++; in t4vf_eth_xmit()
1347 txq->vlan_ins++; in t4vf_eth_xmit()
1354 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) | in t4vf_eth_xmit()
1355 TXPKT_INTF_V(pi->port_id) | in t4vf_eth_xmit()
1357 cpl->pack = cpu_to_be16(0); in t4vf_eth_xmit()
1358 cpl->len = cpu_to_be16(skb->len); in t4vf_eth_xmit()
1359 cpl->ctrl1 = cpu_to_be64(cntrl); in t4vf_eth_xmit()
1362 T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7], in t4vf_eth_xmit()
1364 ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags); in t4vf_eth_xmit()
1368 * Fill in the body of the TX Packet CPL message with either in-lined in t4vf_eth_xmit()
1373 * In-line the packet's data and free the skb since we don't in t4vf_eth_xmit()
1376 inline_tx_skb(skb, &txq->q, cpl + 1); in t4vf_eth_xmit()
1417 struct sge_txq *tq = &txq->q; in t4vf_eth_xmit()
1427 if (unlikely((void *)sgl == (void *)tq->stat)) { in t4vf_eth_xmit()
1428 sgl = (void *)tq->desc; in t4vf_eth_xmit()
1429 end = ((void *)tq->desc + ((void *)end - (void *)tq->stat)); in t4vf_eth_xmit()
1435 last_desc = tq->pidx + ndesc - 1; in t4vf_eth_xmit()
1436 if (last_desc >= tq->size) in t4vf_eth_xmit()
1437 last_desc -= tq->size; in t4vf_eth_xmit()
1438 tq->sdesc[last_desc].skb = skb; in t4vf_eth_xmit()
1439 tq->sdesc[last_desc].sgl = sgl; in t4vf_eth_xmit()
1446 txq_advance(&txq->q, ndesc); in t4vf_eth_xmit()
1448 ring_tx_db(adapter, &txq->q, ndesc); in t4vf_eth_xmit()
1461 * copy_frags - copy fragments from gather list into skb_shared_info
1476 __skb_fill_page_desc(skb, 0, gl->frags[0].page, in copy_frags()
1477 gl->frags[0].offset + offset, in copy_frags()
1478 gl->frags[0].size - offset); in copy_frags()
1479 skb_shinfo(skb)->nr_frags = gl->nfrags; in copy_frags()
1480 for (i = 1; i < gl->nfrags; i++) in copy_frags()
1481 __skb_fill_page_desc(skb, i, gl->frags[i].page, in copy_frags()
1482 gl->frags[i].offset, in copy_frags()
1483 gl->frags[i].size); in copy_frags()
1486 get_page(gl->frags[gl->nfrags - 1].page); in copy_frags()
1490 * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list
1515 if (gl->tot_len <= RX_COPY_THRES) { in t4vf_pktgl_to_skb()
1517 skb = alloc_skb(gl->tot_len, GFP_ATOMIC); in t4vf_pktgl_to_skb()
1520 __skb_put(skb, gl->tot_len); in t4vf_pktgl_to_skb()
1521 skb_copy_to_linear_data(skb, gl->va, gl->tot_len); in t4vf_pktgl_to_skb()
1527 skb_copy_to_linear_data(skb, gl->va, pull_len); in t4vf_pktgl_to_skb()
1530 skb->len = gl->tot_len; in t4vf_pktgl_to_skb()
1531 skb->data_len = skb->len - pull_len; in t4vf_pktgl_to_skb()
1532 skb->truesize += skb->data_len; in t4vf_pktgl_to_skb()
1540 * t4vf_pktgl_free - free a packet gather list
1550 frag = gl->nfrags - 1; in t4vf_pktgl_free()
1551 while (frag--) in t4vf_pktgl_free()
1552 put_page(gl->frags[frag].page); in t4vf_pktgl_free()
1556 * do_gro - perform Generic Receive Offload ingress packet processing
1561 * Perform Generic Receive Offload (GRO) ingress packet processing.
1562 * We use the standard Linux GRO interfaces for this.
1567 struct adapter *adapter = rxq->rspq.adapter; in do_gro()
1568 struct sge *s = &adapter->sge; in do_gro()
1573 skb = napi_get_frags(&rxq->rspq.napi); in do_gro()
1576 rxq->stats.rx_drops++; in do_gro()
1580 copy_frags(skb, gl, s->pktshift); in do_gro()
1581 skb->len = gl->tot_len - s->pktshift; in do_gro()
1582 skb->data_len = skb->len; in do_gro()
1583 skb->truesize += skb->data_len; in do_gro()
1584 skb->ip_summed = CHECKSUM_UNNECESSARY; in do_gro()
1585 skb_record_rx_queue(skb, rxq->rspq.idx); in do_gro()
1586 pi = netdev_priv(skb->dev); in do_gro()
1588 if (pkt->vlan_ex && !pi->vlan_id) { in do_gro()
1590 be16_to_cpu(pkt->vlan)); in do_gro()
1591 rxq->stats.vlan_ex++; in do_gro()
1593 ret = napi_gro_frags(&rxq->rspq.napi); in do_gro()
1596 rxq->stats.lro_pkts++; in do_gro()
1598 rxq->stats.lro_merged++; in do_gro()
1599 rxq->stats.pkts++; in do_gro()
1600 rxq->stats.rx_cso++; in do_gro()
1604 * t4vf_ethrx_handler - process an ingress ethernet packet
1616 bool csum_ok = pkt->csum_calc && !pkt->err_vec && in t4vf_ethrx_handler()
1617 (rspq->netdev->features & NETIF_F_RXCSUM); in t4vf_ethrx_handler()
1619 struct adapter *adapter = rspq->adapter; in t4vf_ethrx_handler()
1620 struct sge *s = &adapter->sge; in t4vf_ethrx_handler()
1625 * enabled, handle the packet in the GRO path. in t4vf_ethrx_handler()
1627 if ((pkt->l2info & cpu_to_be32(RXF_TCP_F)) && in t4vf_ethrx_handler()
1628 (rspq->netdev->features & NETIF_F_GRO) && csum_ok && in t4vf_ethrx_handler()
1629 !pkt->ip_frag) { in t4vf_ethrx_handler()
1640 rxq->stats.rx_drops++; in t4vf_ethrx_handler()
1643 __skb_pull(skb, s->pktshift); in t4vf_ethrx_handler()
1644 skb->protocol = eth_type_trans(skb, rspq->netdev); in t4vf_ethrx_handler()
1645 skb_record_rx_queue(skb, rspq->idx); in t4vf_ethrx_handler()
1646 pi = netdev_priv(skb->dev); in t4vf_ethrx_handler()
1647 rxq->stats.pkts++; in t4vf_ethrx_handler()
1649 if (csum_ok && !pkt->err_vec && in t4vf_ethrx_handler()
1650 (be32_to_cpu(pkt->l2info) & (RXF_UDP_F | RXF_TCP_F))) { in t4vf_ethrx_handler()
1651 if (!pkt->ip_frag) { in t4vf_ethrx_handler()
1652 skb->ip_summed = CHECKSUM_UNNECESSARY; in t4vf_ethrx_handler()
1653 rxq->stats.rx_cso++; in t4vf_ethrx_handler()
1654 } else if (pkt->l2info & htonl(RXF_IP_F)) { in t4vf_ethrx_handler()
1655 __sum16 c = (__force __sum16)pkt->csum; in t4vf_ethrx_handler()
1656 skb->csum = csum_unfold(c); in t4vf_ethrx_handler()
1657 skb->ip_summed = CHECKSUM_COMPLETE; in t4vf_ethrx_handler()
1658 rxq->stats.rx_cso++; in t4vf_ethrx_handler()
1663 if (pkt->vlan_ex && !pi->vlan_id) { in t4vf_ethrx_handler()
1664 rxq->stats.vlan_ex++; in t4vf_ethrx_handler()
1666 be16_to_cpu(pkt->vlan)); in t4vf_ethrx_handler()
1675 * is_new_response - check if a response is newly written
1685 return ((rc->type_gen >> RSPD_GEN_S) & 0x1) == rspq->gen; in is_new_response()
1689 * restore_rx_bufs - put back a packet's RX buffers
1713 while (frags--) { in restore_rx_bufs()
1714 if (fl->cidx == 0) in restore_rx_bufs()
1715 fl->cidx = fl->size - 1; in restore_rx_bufs()
1717 fl->cidx--; in restore_rx_bufs()
1718 sdesc = &fl->sdesc[fl->cidx]; in restore_rx_bufs()
1719 sdesc->page = gl->frags[frags].page; in restore_rx_bufs()
1720 sdesc->dma_addr |= RX_UNMAPPED_BUF; in restore_rx_bufs()
1721 fl->avail++; in restore_rx_bufs()
1726 * rspq_next - advance to the next entry in a response queue
1733 rspq->cur_desc = (void *)rspq->cur_desc + rspq->iqe_len; in rspq_next()
1734 if (unlikely(++rspq->cidx == rspq->size)) { in rspq_next()
1735 rspq->cidx = 0; in rspq_next()
1736 rspq->gen ^= 1; in rspq_next()
1737 rspq->cur_desc = rspq->desc; in rspq_next()
1742 * process_responses - process responses from an SGE response queue
1757 struct adapter *adapter = rspq->adapter; in process_responses()
1758 struct sge *s = &adapter->sge; in process_responses()
1765 rc = (void *)rspq->cur_desc + (rspq->iqe_len - sizeof(*rc)); in process_responses()
1774 rsp_type = RSPD_TYPE_G(rc->type_gen); in process_responses()
1780 u32 len = be32_to_cpu(rc->pldbuflen_qid); in process_responses()
1792 if (likely(rspq->offset > 0)) { in process_responses()
1793 free_rx_bufs(rspq->adapter, &rxq->fl, in process_responses()
1795 rspq->offset = 0; in process_responses()
1806 BUG_ON(rxq->fl.avail == 0); in process_responses()
1807 sdesc = &rxq->fl.sdesc[rxq->fl.cidx]; in process_responses()
1809 fp->page = sdesc->page; in process_responses()
1810 fp->offset = rspq->offset; in process_responses()
1811 fp->size = min(bufsz, len); in process_responses()
1812 len -= fp->size; in process_responses()
1815 unmap_rx_buf(rspq->adapter, &rxq->fl); in process_responses()
1824 dma_sync_single_for_cpu(rspq->adapter->pdev_dev, in process_responses()
1826 fp->size, DMA_FROM_DEVICE); in process_responses()
1835 ret = rspq->handler(rspq, rspq->cur_desc, &gl); in process_responses()
1837 rspq->offset += ALIGN(fp->size, s->fl_align); in process_responses()
1839 restore_rx_bufs(&gl, &rxq->fl, frag); in process_responses()
1841 ret = rspq->handler(rspq, rspq->cur_desc, NULL); in process_responses()
1853 const int NOMEM_TIMER_IDX = SGE_NTIMERS-1; in process_responses()
1854 rspq->next_intr_params = in process_responses()
1860 budget_left--; in process_responses()
1868 if (rspq->offset >= 0 && in process_responses()
1869 fl_cap(&rxq->fl) - rxq->fl.avail >= 2*FL_PER_EQ_UNIT) in process_responses()
1870 __refill_fl(rspq->adapter, &rxq->fl); in process_responses()
1871 return budget - budget_left; in process_responses()
1875 * napi_rx_handler - the NAPI handler for RX processing
1882 * in not a concern at all with MSI-X as non-data interrupts then have
1894 intr_params = rspq->next_intr_params; in napi_rx_handler()
1895 rspq->next_intr_params = rspq->intr_params; in napi_rx_handler()
1900 rspq->unhandled_irqs++; in napi_rx_handler()
1906 if (unlikely(!rspq->bar2_addr)) { in napi_rx_handler()
1907 t4_write_reg(rspq->adapter, in napi_rx_handler()
1909 val | INGRESSQID_V((u32)rspq->cntxt_id)); in napi_rx_handler()
1911 writel(val | INGRESSQID_V(rspq->bar2_qid), in napi_rx_handler()
1912 rspq->bar2_addr + SGE_UDB_GTS); in napi_rx_handler()
1919 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
1926 napi_schedule(&rspq->napi); in t4vf_sge_intr_msix()
1936 struct sge *s = &adapter->sge; in process_intrq()
1937 struct sge_rspq *intrq = &s->intrq; in process_intrq()
1941 spin_lock(&adapter->sge.intrq_lock); in process_intrq()
1951 rc = (void *)intrq->cur_desc + (intrq->iqe_len - sizeof(*rc)); in process_intrq()
1961 if (unlikely(RSPD_TYPE_G(rc->type_gen) != RSPD_TYPE_INTR_X)) { in process_intrq()
1962 dev_err(adapter->pdev_dev, in process_intrq()
1964 RSPD_TYPE_G(rc->type_gen)); in process_intrq()
1976 qid = RSPD_QID_G(be32_to_cpu(rc->pldbuflen_qid)); in process_intrq()
1979 dev_err(adapter->pdev_dev, in process_intrq()
1983 rspq = s->ingr_map[iq_idx]; in process_intrq()
1985 dev_err(adapter->pdev_dev, in process_intrq()
1989 if (unlikely(rspq->abs_id != qid)) { in process_intrq()
1990 dev_err(adapter->pdev_dev, in process_intrq()
1992 qid, rspq->abs_id); in process_intrq()
2001 napi_schedule(&rspq->napi); in process_intrq()
2005 val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params); in process_intrq()
2009 if (unlikely(!intrq->bar2_addr)) { in process_intrq()
2011 val | INGRESSQID_V(intrq->cntxt_id)); in process_intrq()
2013 writel(val | INGRESSQID_V(intrq->bar2_qid), in process_intrq()
2014 intrq->bar2_addr + SGE_UDB_GTS); in process_intrq()
2018 spin_unlock(&adapter->sge.intrq_lock); in process_intrq()
2036 * t4vf_intr_handler - select the top-level interrupt handler
2039 * Selects the top-level interrupt handler based on the type of interrupts
2040 * (MSI-X or MSI).
2044 BUG_ON((adapter->flags & in t4vf_intr_handler()
2046 if (adapter->flags & CXGB4VF_USING_MSIX) in t4vf_intr_handler()
2053 * sge_rx_timer_cb - perform periodic maintenance of SGE RX queues
2066 struct sge *s = &adapter->sge; in sge_rx_timer_cb()
2077 for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) { in sge_rx_timer_cb()
2080 for (m = s->starving_fl[i]; m; m &= m - 1) { in sge_rx_timer_cb()
2082 struct sge_fl *fl = s->egr_map[id]; in sge_rx_timer_cb()
2084 clear_bit(id, s->starving_fl); in sge_rx_timer_cb()
2097 if (napi_schedule(&rxq->rspq.napi)) in sge_rx_timer_cb()
2098 fl->starving++; in sge_rx_timer_cb()
2100 set_bit(id, s->starving_fl); in sge_rx_timer_cb()
2108 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); in sge_rx_timer_cb()
2112 * sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues
2125 struct sge *s = &adapter->sge; in sge_tx_timer_cb()
2129 i = s->ethtxq_rover; in sge_tx_timer_cb()
2131 struct sge_eth_txq *txq = &s->ethtxq[i]; in sge_tx_timer_cb()
2133 if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) { in sge_tx_timer_cb()
2134 int avail = reclaimable(&txq->q); in sge_tx_timer_cb()
2139 free_tx_desc(adapter, &txq->q, avail, true); in sge_tx_timer_cb()
2140 txq->q.in_use -= avail; in sge_tx_timer_cb()
2141 __netif_tx_unlock(txq->txq); in sge_tx_timer_cb()
2143 budget -= avail; in sge_tx_timer_cb()
2149 if (i >= s->ethqsets) in sge_tx_timer_cb()
2151 } while (i != s->ethtxq_rover); in sge_tx_timer_cb()
2152 s->ethtxq_rover = i; in sge_tx_timer_cb()
2159 mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2)); in sge_tx_timer_cb()
2163 * bar2_address - return the BAR2 address for an SGE Queue's Registers
2188 return adapter->bar2 + bar2_qoffset; in bar2_address()
2192 * t4vf_sge_alloc_rxq - allocate an SGE RX Queue
2197 * @intr_dest: MSI-X vector index (overriden in MSI mode)
2206 struct sge *s = &adapter->sge; in t4vf_sge_alloc_rxq()
2210 int relaxed = !(adapter->flags & CXGB4VF_ROOT_NO_RELAXED_ORDERING); in t4vf_sge_alloc_rxq()
2219 if ((adapter->flags & CXGB4VF_USING_MSI) && in t4vf_sge_alloc_rxq()
2220 rspq != &adapter->sge.intrq) { in t4vf_sge_alloc_rxq()
2222 intr_dest = adapter->sge.intrq.abs_id; in t4vf_sge_alloc_rxq()
2232 rspq->size = roundup(rspq->size, 16); in t4vf_sge_alloc_rxq()
2233 rspq->desc = alloc_ring(adapter->pdev_dev, rspq->size, rspq->iqe_len, in t4vf_sge_alloc_rxq()
2234 0, &rspq->phys_addr, NULL, 0); in t4vf_sge_alloc_rxq()
2235 if (!rspq->desc) in t4vf_sge_alloc_rxq()
2236 return -ENOMEM; in t4vf_sge_alloc_rxq()
2243 * into OS-independent common code ... in t4vf_sge_alloc_rxq()
2256 FW_IQ_CMD_VIID_V(pi->viid) | in t4vf_sge_alloc_rxq()
2262 cpu_to_be16(FW_IQ_CMD_IQPCIECH_V(pi->port_id) | in t4vf_sge_alloc_rxq()
2264 FW_IQ_CMD_IQINTCNTTHRESH_V(rspq->pktcnt_idx) | in t4vf_sge_alloc_rxq()
2265 FW_IQ_CMD_IQESIZE_V(ilog2(rspq->iqe_len) - 4)); in t4vf_sge_alloc_rxq()
2266 cmd.iqsize = cpu_to_be16(rspq->size); in t4vf_sge_alloc_rxq()
2267 cmd.iqaddr = cpu_to_be64(rspq->phys_addr); in t4vf_sge_alloc_rxq()
2271 CHELSIO_CHIP_VERSION(adapter->params.chip); in t4vf_sge_alloc_rxq()
2278 * (fl_starve_thres - 1). in t4vf_sge_alloc_rxq()
2280 if (fl->size < s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT) in t4vf_sge_alloc_rxq()
2281 fl->size = s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT; in t4vf_sge_alloc_rxq()
2282 fl->size = roundup(fl->size, FL_PER_EQ_UNIT); in t4vf_sge_alloc_rxq()
2283 fl->desc = alloc_ring(adapter->pdev_dev, fl->size, in t4vf_sge_alloc_rxq()
2285 &fl->addr, &fl->sdesc, s->stat_len); in t4vf_sge_alloc_rxq()
2286 if (!fl->desc) { in t4vf_sge_alloc_rxq()
2287 ret = -ENOMEM; in t4vf_sge_alloc_rxq()
2296 flsz = (fl->size / FL_PER_EQ_UNIT + in t4vf_sge_alloc_rxq()
2297 s->stat_len / EQ_UNIT); in t4vf_sge_alloc_rxq()
2315 * Free List pointers are provided, so we use a 128-byte Fetch in t4vf_sge_alloc_rxq()
2317 * the smaller 64-byte value there). in t4vf_sge_alloc_rxq()
2328 cmd.fl0addr = cpu_to_be64(fl->addr); in t4vf_sge_alloc_rxq()
2339 netif_napi_add(dev, &rspq->napi, napi_rx_handler); in t4vf_sge_alloc_rxq()
2340 rspq->cur_desc = rspq->desc; in t4vf_sge_alloc_rxq()
2341 rspq->cidx = 0; in t4vf_sge_alloc_rxq()
2342 rspq->gen = 1; in t4vf_sge_alloc_rxq()
2343 rspq->next_intr_params = rspq->intr_params; in t4vf_sge_alloc_rxq()
2344 rspq->cntxt_id = be16_to_cpu(rpl.iqid); in t4vf_sge_alloc_rxq()
2345 rspq->bar2_addr = bar2_address(adapter, in t4vf_sge_alloc_rxq()
2346 rspq->cntxt_id, in t4vf_sge_alloc_rxq()
2348 &rspq->bar2_qid); in t4vf_sge_alloc_rxq()
2349 rspq->abs_id = be16_to_cpu(rpl.physiqid); in t4vf_sge_alloc_rxq()
2350 rspq->size--; /* subtract status entry */ in t4vf_sge_alloc_rxq()
2351 rspq->adapter = adapter; in t4vf_sge_alloc_rxq()
2352 rspq->netdev = dev; in t4vf_sge_alloc_rxq()
2353 rspq->handler = hnd; in t4vf_sge_alloc_rxq()
2355 /* set offset to -1 to distinguish ingress queues without FL */ in t4vf_sge_alloc_rxq()
2356 rspq->offset = fl ? 0 : -1; in t4vf_sge_alloc_rxq()
2359 fl->cntxt_id = be16_to_cpu(rpl.fl0id); in t4vf_sge_alloc_rxq()
2360 fl->avail = 0; in t4vf_sge_alloc_rxq()
2361 fl->pend_cred = 0; in t4vf_sge_alloc_rxq()
2362 fl->pidx = 0; in t4vf_sge_alloc_rxq()
2363 fl->cidx = 0; in t4vf_sge_alloc_rxq()
2364 fl->alloc_failed = 0; in t4vf_sge_alloc_rxq()
2365 fl->large_alloc_failed = 0; in t4vf_sge_alloc_rxq()
2366 fl->starving = 0; in t4vf_sge_alloc_rxq()
2371 fl->bar2_addr = bar2_address(adapter, in t4vf_sge_alloc_rxq()
2372 fl->cntxt_id, in t4vf_sge_alloc_rxq()
2374 &fl->bar2_qid); in t4vf_sge_alloc_rxq()
2386 if (rspq->desc) { in t4vf_sge_alloc_rxq()
2387 dma_free_coherent(adapter->pdev_dev, rspq->size * rspq->iqe_len, in t4vf_sge_alloc_rxq()
2388 rspq->desc, rspq->phys_addr); in t4vf_sge_alloc_rxq()
2389 rspq->desc = NULL; in t4vf_sge_alloc_rxq()
2391 if (fl && fl->desc) { in t4vf_sge_alloc_rxq()
2392 kfree(fl->sdesc); in t4vf_sge_alloc_rxq()
2393 fl->sdesc = NULL; in t4vf_sge_alloc_rxq()
2394 dma_free_coherent(adapter->pdev_dev, flsz * EQ_UNIT, in t4vf_sge_alloc_rxq()
2395 fl->desc, fl->addr); in t4vf_sge_alloc_rxq()
2396 fl->desc = NULL; in t4vf_sge_alloc_rxq()
2402 * t4vf_sge_alloc_eth_txq - allocate an SGE Ethernet TX Queue
2414 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); in t4vf_sge_alloc_eth_txq()
2417 struct sge *s = &adapter->sge; in t4vf_sge_alloc_eth_txq()
2424 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); in t4vf_sge_alloc_eth_txq()
2430 txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size, in t4vf_sge_alloc_eth_txq()
2433 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len); in t4vf_sge_alloc_eth_txq()
2434 if (!txq->q.desc) in t4vf_sge_alloc_eth_txq()
2435 return -ENOMEM; in t4vf_sge_alloc_eth_txq()
2453 FW_EQ_ETH_CMD_VIID_V(pi->viid)); in t4vf_sge_alloc_eth_txq()
2456 FW_EQ_ETH_CMD_PCIECHN_V(pi->port_id) | in t4vf_sge_alloc_eth_txq()
2466 cmd.eqaddr = cpu_to_be64(txq->q.phys_addr); in t4vf_sge_alloc_eth_txq()
2478 kfree(txq->q.sdesc); in t4vf_sge_alloc_eth_txq()
2479 txq->q.sdesc = NULL; in t4vf_sge_alloc_eth_txq()
2480 dma_free_coherent(adapter->pdev_dev, in t4vf_sge_alloc_eth_txq()
2482 txq->q.desc, txq->q.phys_addr); in t4vf_sge_alloc_eth_txq()
2483 txq->q.desc = NULL; in t4vf_sge_alloc_eth_txq()
2487 txq->q.in_use = 0; in t4vf_sge_alloc_eth_txq()
2488 txq->q.cidx = 0; in t4vf_sge_alloc_eth_txq()
2489 txq->q.pidx = 0; in t4vf_sge_alloc_eth_txq()
2490 txq->q.stat = (void *)&txq->q.desc[txq->q.size]; in t4vf_sge_alloc_eth_txq()
2491 txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_G(be32_to_cpu(rpl.eqid_pkd)); in t4vf_sge_alloc_eth_txq()
2492 txq->q.bar2_addr = bar2_address(adapter, in t4vf_sge_alloc_eth_txq()
2493 txq->q.cntxt_id, in t4vf_sge_alloc_eth_txq()
2495 &txq->q.bar2_qid); in t4vf_sge_alloc_eth_txq()
2496 txq->q.abs_id = in t4vf_sge_alloc_eth_txq()
2498 txq->txq = devq; in t4vf_sge_alloc_eth_txq()
2499 txq->tso = 0; in t4vf_sge_alloc_eth_txq()
2500 txq->tx_cso = 0; in t4vf_sge_alloc_eth_txq()
2501 txq->vlan_ins = 0; in t4vf_sge_alloc_eth_txq()
2502 txq->q.stops = 0; in t4vf_sge_alloc_eth_txq()
2503 txq->q.restarts = 0; in t4vf_sge_alloc_eth_txq()
2504 txq->mapping_err = 0; in t4vf_sge_alloc_eth_txq()
2513 struct sge *s = &adapter->sge; in free_txq()
2515 dma_free_coherent(adapter->pdev_dev, in free_txq()
2516 tq->size * sizeof(*tq->desc) + s->stat_len, in free_txq()
2517 tq->desc, tq->phys_addr); in free_txq()
2518 tq->cntxt_id = 0; in free_txq()
2519 tq->sdesc = NULL; in free_txq()
2520 tq->desc = NULL; in free_txq()
2530 struct sge *s = &adapter->sge; in free_rspq_fl()
2531 unsigned int flid = fl ? fl->cntxt_id : 0xffff; in free_rspq_fl()
2534 rspq->cntxt_id, flid, 0xffff); in free_rspq_fl()
2535 dma_free_coherent(adapter->pdev_dev, (rspq->size + 1) * rspq->iqe_len, in free_rspq_fl()
2536 rspq->desc, rspq->phys_addr); in free_rspq_fl()
2537 netif_napi_del(&rspq->napi); in free_rspq_fl()
2538 rspq->netdev = NULL; in free_rspq_fl()
2539 rspq->cntxt_id = 0; in free_rspq_fl()
2540 rspq->abs_id = 0; in free_rspq_fl()
2541 rspq->desc = NULL; in free_rspq_fl()
2544 free_rx_bufs(adapter, fl, fl->avail); in free_rspq_fl()
2545 dma_free_coherent(adapter->pdev_dev, in free_rspq_fl()
2546 fl->size * sizeof(*fl->desc) + s->stat_len, in free_rspq_fl()
2547 fl->desc, fl->addr); in free_rspq_fl()
2548 kfree(fl->sdesc); in free_rspq_fl()
2549 fl->sdesc = NULL; in free_rspq_fl()
2550 fl->cntxt_id = 0; in free_rspq_fl()
2551 fl->desc = NULL; in free_rspq_fl()
2556 * t4vf_free_sge_resources - free SGE resources
2563 struct sge *s = &adapter->sge; in t4vf_free_sge_resources()
2564 struct sge_eth_rxq *rxq = s->ethrxq; in t4vf_free_sge_resources()
2565 struct sge_eth_txq *txq = s->ethtxq; in t4vf_free_sge_resources()
2566 struct sge_rspq *evtq = &s->fw_evtq; in t4vf_free_sge_resources()
2567 struct sge_rspq *intrq = &s->intrq; in t4vf_free_sge_resources()
2570 for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) { in t4vf_free_sge_resources()
2571 if (rxq->rspq.desc) in t4vf_free_sge_resources()
2572 free_rspq_fl(adapter, &rxq->rspq, &rxq->fl); in t4vf_free_sge_resources()
2573 if (txq->q.desc) { in t4vf_free_sge_resources()
2574 t4vf_eth_eq_free(adapter, txq->q.cntxt_id); in t4vf_free_sge_resources()
2575 free_tx_desc(adapter, &txq->q, txq->q.in_use, true); in t4vf_free_sge_resources()
2576 kfree(txq->q.sdesc); in t4vf_free_sge_resources()
2577 free_txq(adapter, &txq->q); in t4vf_free_sge_resources()
2580 if (evtq->desc) in t4vf_free_sge_resources()
2582 if (intrq->desc) in t4vf_free_sge_resources()
2587 * t4vf_sge_start - enable SGE operation
2594 adapter->sge.ethtxq_rover = 0; in t4vf_sge_start()
2595 mod_timer(&adapter->sge.rx_timer, jiffies + RX_QCHECK_PERIOD); in t4vf_sge_start()
2596 mod_timer(&adapter->sge.tx_timer, jiffies + TX_QCHECK_PERIOD); in t4vf_sge_start()
2600 * t4vf_sge_stop - disable SGE operation
2604 * this is effective only if measures have been taken to disable any HW
2609 struct sge *s = &adapter->sge; in t4vf_sge_stop()
2611 if (s->rx_timer.function) in t4vf_sge_stop()
2612 del_timer_sync(&s->rx_timer); in t4vf_sge_stop()
2613 if (s->tx_timer.function) in t4vf_sge_stop()
2614 del_timer_sync(&s->tx_timer); in t4vf_sge_stop()
2618 * t4vf_sge_init - initialize SGE
2623 * top-level must request those individually. We also do not enable DMA
2628 struct sge_params *sge_params = &adapter->params.sge; in t4vf_sge_init()
2629 u32 fl_small_pg = sge_params->sge_fl_buffer_size[0]; in t4vf_sge_init()
2630 u32 fl_large_pg = sge_params->sge_fl_buffer_size[1]; in t4vf_sge_init()
2631 struct sge *s = &adapter->sge; in t4vf_sge_init()
2649 (fl_large_pg & (fl_large_pg - 1)) != 0) { in t4vf_sge_init()
2650 dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n", in t4vf_sge_init()
2652 return -EINVAL; in t4vf_sge_init()
2654 if ((sge_params->sge_control & RXPKTCPLMODE_F) != in t4vf_sge_init()
2656 dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n"); in t4vf_sge_init()
2657 return -EINVAL; in t4vf_sge_init()
2664 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; in t4vf_sge_init()
2665 s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F) in t4vf_sge_init()
2667 s->pktshift = PKTSHIFT_G(sge_params->sge_control); in t4vf_sge_init()
2668 s->fl_align = t4vf_fl_pkt_align(adapter); in t4vf_sge_init()
2677 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) { in t4vf_sge_init()
2679 s->fl_starve_thres = in t4vf_sge_init()
2680 EGRTHRESHOLD_G(sge_params->sge_congestion_control); in t4vf_sge_init()
2683 s->fl_starve_thres = in t4vf_sge_init()
2684 EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control); in t4vf_sge_init()
2688 s->fl_starve_thres = in t4vf_sge_init()
2689 T6_EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control); in t4vf_sge_init()
2692 s->fl_starve_thres = s->fl_starve_thres * 2 + 1; in t4vf_sge_init()
2697 timer_setup(&s->rx_timer, sge_rx_timer_cb, 0); in t4vf_sge_init()
2698 timer_setup(&s->tx_timer, sge_tx_timer_cb, 0); in t4vf_sge_init()
2703 spin_lock_init(&s->intrq_lock); in t4vf_sge_init()