Lines Matching +full:tx +full:- +full:termination +full:- +full:fix

4  * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
16 * - Redistributions of source code must retain the above
20 * - Redistributions in binary form must reproduce the above
40 #include <linux/dma-mapping.h>
68 # define FL_PG_ORDER (16 - PAGE_SHIFT)
82 * Max number of Tx descriptors we clean up at a time. Should be modest as
103 * Period of the Tx queue check timer.
108 * Max number of Tx descriptors to be reclaimed by the Tx timer.
115 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
118 * Suspension threshold for non-Ethernet Tx queues. We require enough room
124 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
130 * Max size of a WR sent through a control Tx queue.
141 * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs.
151 struct sge *s = &adapter->sge; in fl_mtu_bufsize()
153 return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align); in fl_mtu_bufsize()
165 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
166 * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
193 return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS; in get_buf_addr()
198 return !(d->dma_addr & RX_UNMAPPED_BUF); in is_buf_mapped()
202 * txq_avail - return the number of available slots in a Tx queue
203 * @q: the Tx queue
205 * Returns the number of descriptors in a Tx queue available to write new
210 return q->size - 1 - q->in_use; in txq_avail()
214 * fl_cap - return the capacity of a free-buffer list
217 * Returns the capacity of a free-buffer list. The capacity is less than
223 return fl->size - 8; /* 1 descriptor = 8 buffers */ in fl_cap()
227 * fl_starving - return whether a Free List is starving.
238 const struct sge *s = &adapter->sge; in fl_starving()
240 return fl->avail - fl->pend_cred <= s->fl_starve_thres; in fl_starving()
249 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); in cxgb4_map_skb()
254 end = &si->frags[si->nr_frags]; in cxgb4_map_skb()
256 for (fp = si->frags; fp < end; fp++) { in cxgb4_map_skb()
265 while (fp-- > si->frags) in cxgb4_map_skb()
266 dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE); in cxgb4_map_skb()
268 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); in cxgb4_map_skb()
270 return -ENOMEM; in cxgb4_map_skb()
283 end = &si->frags[si->nr_frags]; in unmap_skb()
284 for (fp = si->frags; fp < end; fp++) in unmap_skb()
290 * deferred_unmap_destructor - unmap a packet when it is freed
293 * This is the packet destructor used for Tx packets that need to remain
294 * mapped until they are freed rather than until their Tx descriptors are
299 unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head); in deferred_unmap_destructor()
304 * free_tx_desc - reclaims Tx descriptors and their buffers
306 * @q: the Tx queue to reclaim descriptors from
310 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
311 * Tx buffers. Called with the Tx queue lock held.
316 unsigned int cidx = q->cidx; in free_tx_desc()
319 d = &q->sdesc[cidx]; in free_tx_desc()
320 while (n--) { in free_tx_desc()
321 if (d->skb) { /* an SGL is present */ in free_tx_desc()
322 if (unmap && d->addr[0]) { in free_tx_desc()
323 unmap_skb(adap->pdev_dev, d->skb, d->addr); in free_tx_desc()
324 memset(d->addr, 0, sizeof(d->addr)); in free_tx_desc()
326 dev_consume_skb_any(d->skb); in free_tx_desc()
327 d->skb = NULL; in free_tx_desc()
330 if (++cidx == q->size) { in free_tx_desc()
332 d = q->sdesc; in free_tx_desc()
335 q->cidx = cidx; in free_tx_desc()
339 * Return the number of reclaimable descriptors in a Tx queue.
343 int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); in reclaimable()
344 hw_cidx -= q->cidx; in reclaimable()
345 return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; in reclaimable()
349 * reclaim_completed_tx - reclaims completed TX Descriptors
351 * @q: the Tx queue to reclaim completed descriptors from
352 * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1
355 * Reclaims Tx Descriptors that the SGE has indicated it has processed,
356 * and frees the associated buffers if possible. If @max == -1, then
357 * we'll use a defaiult maximum. Called with the TX Queue locked.
367 * the Tx lock hold time O(1). in reclaim_completed_tx()
375 q->in_use -= reclaim; in reclaim_completed_tx()
382 * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors
384 * @q: the Tx queue to reclaim completed descriptors from
387 * Reclaims Tx descriptors that the SGE has indicated it has processed,
388 * and frees the associated buffers if possible. Called with the Tx
394 (void)reclaim_completed_tx(adap, q, -1, unmap); in cxgb4_reclaim_completed_tx()
401 struct sge *s = &adapter->sge; in get_buf_size()
402 unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE; in get_buf_size()
411 buf_size = PAGE_SIZE << s->fl_pg_order; in get_buf_size()
430 * free_rx_bufs - free the Rx buffers on an SGE free list
435 * Release the next @n buffers on an SGE free-buffer Rx queue. The
440 while (n--) { in free_rx_bufs()
441 struct rx_sw_desc *d = &q->sdesc[q->cidx]; in free_rx_bufs()
444 dma_unmap_page(adap->pdev_dev, get_buf_addr(d), in free_rx_bufs()
447 put_page(d->page); in free_rx_bufs()
448 d->page = NULL; in free_rx_bufs()
449 if (++q->cidx == q->size) in free_rx_bufs()
450 q->cidx = 0; in free_rx_bufs()
451 q->avail--; in free_rx_bufs()
456 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
460 * Unmap the current buffer on an SGE free-buffer Rx queue. The
468 struct rx_sw_desc *d = &q->sdesc[q->cidx]; in unmap_rx_buf()
471 dma_unmap_page(adap->pdev_dev, get_buf_addr(d), in unmap_rx_buf()
473 d->page = NULL; in unmap_rx_buf()
474 if (++q->cidx == q->size) in unmap_rx_buf()
475 q->cidx = 0; in unmap_rx_buf()
476 q->avail--; in unmap_rx_buf()
481 if (q->pend_cred >= 8) { in ring_fl_db()
482 u32 val = adap->params.arch.sge_fl_db; in ring_fl_db()
484 if (is_t4(adap->params.chip)) in ring_fl_db()
485 val |= PIDX_V(q->pend_cred / 8); in ring_fl_db()
487 val |= PIDX_T5_V(q->pend_cred / 8); in ring_fl_db()
498 if (unlikely(q->bar2_addr == NULL)) { in ring_fl_db()
500 val | QID_V(q->cntxt_id)); in ring_fl_db()
502 writel(val | QID_V(q->bar2_qid), in ring_fl_db()
503 q->bar2_addr + SGE_UDB_KDOORBELL); in ring_fl_db()
510 q->pend_cred &= 7; in ring_fl_db()
517 sd->page = pg; in set_rx_sw_desc()
518 sd->dma_addr = mapping; /* includes size low bits */ in set_rx_sw_desc()
522 * refill_fl - refill an SGE Rx buffer ring
528 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
538 struct sge *s = &adap->sge; in refill_fl()
541 unsigned int cred = q->avail; in refill_fl()
542 __be64 *d = &q->desc[q->pidx]; in refill_fl()
543 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; in refill_fl()
547 if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl)) in refill_fl()
552 node = dev_to_node(adap->pdev_dev); in refill_fl()
554 if (s->fl_pg_order == 0) in refill_fl()
561 pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order); in refill_fl()
563 q->large_alloc_failed++; in refill_fl()
567 mapping = dma_map_page(adap->pdev_dev, pg, 0, in refill_fl()
568 PAGE_SIZE << s->fl_pg_order, in refill_fl()
570 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { in refill_fl()
571 __free_pages(pg, s->fl_pg_order); in refill_fl()
572 q->mapping_err++; in refill_fl()
581 q->avail++; in refill_fl()
582 if (++q->pidx == q->size) { in refill_fl()
583 q->pidx = 0; in refill_fl()
584 sd = q->sdesc; in refill_fl()
585 d = q->desc; in refill_fl()
587 n--; in refill_fl()
591 while (n--) { in refill_fl()
594 q->alloc_failed++; in refill_fl()
598 mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE, in refill_fl()
600 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { in refill_fl()
602 q->mapping_err++; in refill_fl()
610 q->avail++; in refill_fl()
611 if (++q->pidx == q->size) { in refill_fl()
612 q->pidx = 0; in refill_fl()
613 sd = q->sdesc; in refill_fl()
614 d = q->desc; in refill_fl()
618 out: cred = q->avail - cred; in refill_fl()
619 q->pend_cred += cred; in refill_fl()
624 q->low++; in refill_fl()
625 set_bit(q->cntxt_id - adap->sge.egr_start, in refill_fl()
626 adap->sge.starving_fl); in refill_fl()
634 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail), in __refill_fl()
639 * alloc_ring - allocate resources for an SGE descriptor ring
649 * Allocates resources for an SGE descriptor ring, such as Tx queues,
681 * sgl_len - calculates the size of an SGL of the given capacity
689 /* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA in sgl_len()
690 * addresses. The DSGL Work Request starts off with a 32-bit DSGL in sgl_len()
693 * Address[i+1] } (this ensures that all addresses are on 64-bit in sgl_len()
700 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3 in sgl_len()
701 * flits for every pair of the remaining N) +1 if (n-1) is odd; and in sgl_len()
702 * finally the "+((n-1)&1)" adds the one remaining flit needed if in sgl_len()
703 * (n-1) is odd ... in sgl_len()
705 n--; in sgl_len()
710 * flits_to_desc - returns the num of Tx descriptors for the given flits
713 * Returns the number of Tx descriptors needed for the supplied number
723 * is_eth_imm - can an Ethernet packet be sent as immediate data?
734 if (skb->encapsulation && skb_shinfo(skb)->gso_size && in is_eth_imm()
738 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in is_eth_imm()
741 hdrlen = skb_shinfo(skb)->gso_size ? in is_eth_imm()
745 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen) in is_eth_imm()
751 * calc_tx_flits - calculate the number of flits for a packet Tx WR
755 * Returns the number of flits needed for a Tx WR for the given Ethernet
766 * TX Packet header plus the skb data in the Work Request. in calc_tx_flits()
770 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64)); in calc_tx_flits()
774 * for the TX Packet Work Request and CPL. We always have a firmware in calc_tx_flits()
776 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL in calc_tx_flits()
778 * with an embedded TX Packet Write CPL message. in calc_tx_flits()
780 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); in calc_tx_flits()
781 if (skb_shinfo(skb)->gso_size) { in calc_tx_flits()
782 if (skb->encapsulation && chip_ver > CHELSIO_T5) { in calc_tx_flits()
785 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in calc_tx_flits()
788 pkt_hdrlen = eth_get_headlen(skb->dev, skb->data, in calc_tx_flits()
807 * cxgb4_write_sgl - populate a scatter/gather list for a packet
809 * @q: the Tx queue we are writing into
812 * @start: start offset into skb main-body data to include in the SGL
818 * main body except for the first @start bytes. @sgl must be 16-byte
819 * aligned and within a Tx descriptor with available space. @end points
830 unsigned int nfrags = si->nr_frags; in cxgb4_write_sgl()
833 len = skb_headlen(skb) - start; in cxgb4_write_sgl()
835 sgl->len0 = htonl(len); in cxgb4_write_sgl()
836 sgl->addr0 = cpu_to_be64(addr[0] + start); in cxgb4_write_sgl()
839 sgl->len0 = htonl(skb_frag_size(&si->frags[0])); in cxgb4_write_sgl()
840 sgl->addr0 = cpu_to_be64(addr[1]); in cxgb4_write_sgl()
843 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | in cxgb4_write_sgl()
845 if (likely(--nfrags == 0)) in cxgb4_write_sgl()
852 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; in cxgb4_write_sgl()
854 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { in cxgb4_write_sgl()
855 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); in cxgb4_write_sgl()
856 to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i])); in cxgb4_write_sgl()
857 to->addr[0] = cpu_to_be64(addr[i]); in cxgb4_write_sgl()
858 to->addr[1] = cpu_to_be64(addr[++i]); in cxgb4_write_sgl()
861 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); in cxgb4_write_sgl()
862 to->len[1] = cpu_to_be32(0); in cxgb4_write_sgl()
863 to->addr[0] = cpu_to_be64(addr[i + 1]); in cxgb4_write_sgl()
865 if (unlikely((u8 *)end > (u8 *)q->stat)) { in cxgb4_write_sgl()
866 unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; in cxgb4_write_sgl()
869 memcpy(sgl->sge, buf, part0); in cxgb4_write_sgl()
870 part1 = (u8 *)end - (u8 *)q->stat; in cxgb4_write_sgl()
871 memcpy(q->desc, (u8 *)buf + part0, part1); in cxgb4_write_sgl()
872 end = (void *)q->desc + part1; in cxgb4_write_sgl()
874 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ in cxgb4_write_sgl()
879 /* cxgb4_write_partial_sgl - populate SGL for partial packet
881 * @q: the Tx queue we are writing into
906 frag_size = min(len, skb_linear_data_len - start); in cxgb4_write_partial_sgl()
907 sgl->len0 = htonl(frag_size); in cxgb4_write_partial_sgl()
908 sgl->addr0 = cpu_to_be64(addr[0] + start); in cxgb4_write_partial_sgl()
909 len -= frag_size; in cxgb4_write_partial_sgl()
912 start -= skb_linear_data_len; in cxgb4_write_partial_sgl()
913 frag = &si->frags[frag_idx]; in cxgb4_write_partial_sgl()
917 start -= frag_size; in cxgb4_write_partial_sgl()
919 frag = &si->frags[frag_idx]; in cxgb4_write_partial_sgl()
923 frag_size = min(len, skb_frag_size(frag) - start); in cxgb4_write_partial_sgl()
924 sgl->len0 = cpu_to_be32(frag_size); in cxgb4_write_partial_sgl()
925 sgl->addr0 = cpu_to_be64(addr[frag_idx + 1] + start); in cxgb4_write_partial_sgl()
926 len -= frag_size; in cxgb4_write_partial_sgl()
941 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; in cxgb4_write_partial_sgl()
948 frag_size = min(len, skb_frag_size(&si->frags[frag_idx])); in cxgb4_write_partial_sgl()
949 to->len[i & 1] = cpu_to_be32(frag_size); in cxgb4_write_partial_sgl()
950 to->addr[i & 1] = cpu_to_be64(addr[frag_idx + 1]); in cxgb4_write_partial_sgl()
956 len -= frag_size; in cxgb4_write_partial_sgl()
963 to->len[1] = cpu_to_be32(0); in cxgb4_write_partial_sgl()
965 /* Copy from temporary buffer to Tx ring, in case we hit the in cxgb4_write_partial_sgl()
968 if (unlikely((u8 *)end > (u8 *)q->stat)) { in cxgb4_write_partial_sgl()
969 u32 part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; in cxgb4_write_partial_sgl()
972 memcpy(sgl->sge, buf, part0); in cxgb4_write_partial_sgl()
973 part1 = (u8 *)end - (u8 *)q->stat; in cxgb4_write_partial_sgl()
974 memcpy(q->desc, (u8 *)buf + part0, part1); in cxgb4_write_partial_sgl()
975 end = (void *)q->desc + part1; in cxgb4_write_partial_sgl()
978 /* 0-pad to multiple of 16 */ in cxgb4_write_partial_sgl()
982 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | in cxgb4_write_partial_sgl()
999 count--; in cxgb_pio_copy()
1004 * cxgb4_ring_tx_db - check and potentially ring a Tx queue's doorbell
1006 * @q: the Tx queue
1009 * Ring the doorbel for a Tx queue.
1013 /* Make sure that all writes to the TX Descriptors are committed in cxgb4_ring_tx_db()
1021 if (unlikely(q->bar2_addr == NULL)) { in cxgb4_ring_tx_db()
1028 spin_lock_irqsave(&q->db_lock, flags); in cxgb4_ring_tx_db()
1029 if (!q->db_disabled) in cxgb4_ring_tx_db()
1031 QID_V(q->cntxt_id) | val); in cxgb4_ring_tx_db()
1033 q->db_pidx_inc += n; in cxgb4_ring_tx_db()
1034 q->db_pidx = q->pidx; in cxgb4_ring_tx_db()
1035 spin_unlock_irqrestore(&q->db_lock, flags); in cxgb4_ring_tx_db()
1047 /* If we're only writing a single TX Descriptor and we can use in cxgb4_ring_tx_db()
1051 if (n == 1 && q->bar2_qid == 0) { in cxgb4_ring_tx_db()
1052 int index = (q->pidx in cxgb4_ring_tx_db()
1053 ? (q->pidx - 1) in cxgb4_ring_tx_db()
1054 : (q->size - 1)); in cxgb4_ring_tx_db()
1055 u64 *wr = (u64 *)&q->desc[index]; in cxgb4_ring_tx_db()
1058 (q->bar2_addr + SGE_UDB_WCDOORBELL), in cxgb4_ring_tx_db()
1061 writel(val | QID_V(q->bar2_qid), in cxgb4_ring_tx_db()
1062 q->bar2_addr + SGE_UDB_KDOORBELL); in cxgb4_ring_tx_db()
1081 * cxgb4_inline_tx_skb - inline a packet's data into Tx descriptors
1083 * @q: the Tx queue where the packet will be inlined
1084 * @pos: starting position in the Tx queue where to inline the packet
1086 * Inline a packet's contents directly into Tx descriptors, starting at
1087 * the given position within the Tx DMA ring.
1094 int left = (void *)q->stat - pos; in cxgb4_inline_tx_skb()
1097 if (likely(skb->len <= left)) { in cxgb4_inline_tx_skb()
1098 if (likely(!skb->data_len)) in cxgb4_inline_tx_skb()
1099 skb_copy_from_linear_data(skb, pos, skb->len); in cxgb4_inline_tx_skb()
1101 skb_copy_bits(skb, 0, pos, skb->len); in cxgb4_inline_tx_skb()
1102 pos += skb->len; in cxgb4_inline_tx_skb()
1105 skb_copy_bits(skb, left, q->desc, skb->len - left); in cxgb4_inline_tx_skb()
1106 pos = (void *)q->desc + (skb->len - left); in cxgb4_inline_tx_skb()
1109 /* 0-pad to multiple of 16 */ in cxgb4_inline_tx_skb()
1121 int left = (void *)q->stat - pos; in inline_tx_skb_header()
1124 memcpy(pos, skb->data, length); in inline_tx_skb_header()
1127 memcpy(pos, skb->data, left); in inline_tx_skb_header()
1128 memcpy(q->desc, skb->data + left, length - left); in inline_tx_skb_header()
1129 pos = (void *)q->desc + (length - left); in inline_tx_skb_header()
1131 /* 0-pad to multiple of 16 */ in inline_tx_skb_header()
1150 if (skb->encapsulation && in hwcsum()
1155 ver = inner_ip_hdr(skb)->version; in hwcsum()
1156 proto = (ver == 4) ? inner_ip_hdr(skb)->protocol : in hwcsum()
1157 inner_ipv6_hdr(skb)->nexthdr; in hwcsum()
1159 ver = ip_hdr(skb)->version; in hwcsum()
1160 proto = (ver == 4) ? ip_hdr(skb)->protocol : in hwcsum()
1161 ipv6_hdr(skb)->nexthdr; in hwcsum()
1197 eth_hdr_len = skb_inner_network_offset(skb) - ETH_HLEN; in hwcsum()
1200 eth_hdr_len = skb_network_offset(skb) - ETH_HLEN; in hwcsum()
1214 TXPKT_CSUM_LOC_V(start + skb->csum_offset); in hwcsum()
1220 netif_tx_stop_queue(q->txq); in eth_txq_stop()
1221 q->q.stops++; in eth_txq_stop()
1226 q->in_use += n; in txq_advance()
1227 q->pidx += n; in txq_advance()
1228 if (q->pidx >= q->size) in txq_advance()
1229 q->pidx -= q->size; in txq_advance()
1237 const struct cxgb_fcoe *fcoe = &pi->fcoe; in cxgb_fcoe_offload()
1239 if (!(fcoe->flags & CXGB_FCOE_ENABLED)) in cxgb_fcoe_offload()
1242 if (skb->protocol != htons(ETH_P_FCOE)) in cxgb_fcoe_offload()
1246 skb->mac_len = sizeof(struct ethhdr); in cxgb_fcoe_offload()
1248 skb_set_network_header(skb, skb->mac_len); in cxgb_fcoe_offload()
1249 skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr)); in cxgb_fcoe_offload()
1252 return -ENOTSUPP; in cxgb_fcoe_offload()
1271 struct port_info *pi = netdev_priv(skb->dev); in cxgb_encap_offload_supported()
1272 struct adapter *adapter = pi->adapter; in cxgb_encap_offload_supported()
1274 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || in cxgb_encap_offload_supported()
1275 skb->inner_protocol != htons(ETH_P_TEB)) in cxgb_encap_offload_supported()
1280 l4_hdr = ip_hdr(skb)->protocol; in cxgb_encap_offload_supported()
1283 l4_hdr = ipv6_hdr(skb)->nexthdr; in cxgb_encap_offload_supported()
1291 if (adapter->vxlan_port == udp_hdr(skb)->dest) in cxgb_encap_offload_supported()
1293 else if (adapter->geneve_port == udp_hdr(skb)->dest) in cxgb_encap_offload_supported()
1310 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; in t6_fill_tnl_lso()
1312 bool v6 = (ip_hdr(skb)->version == 6); in t6_fill_tnl_lso()
1323 tnl_lso->op_to_IpIdSplitOut = htonl(val); in t6_fill_tnl_lso()
1325 tnl_lso->IpIdOffsetOut = 0; in t6_fill_tnl_lso()
1328 val = skb_inner_mac_header(skb) - skb_mac_header(skb); in t6_fill_tnl_lso()
1329 in_eth_xtra_len = skb_inner_network_header(skb) - in t6_fill_tnl_lso()
1330 skb_inner_mac_header(skb) - ETH_HLEN; in t6_fill_tnl_lso()
1335 tnl_lso->UdpLenSetOut_to_TnlHdrLen = in t6_fill_tnl_lso()
1340 tnl_lso->UdpLenSetOut_to_TnlHdrLen = 0; in t6_fill_tnl_lso()
1344 tnl_lso->UdpLenSetOut_to_TnlHdrLen |= in t6_fill_tnl_lso()
1348 tnl_lso->r1 = 0; in t6_fill_tnl_lso()
1351 CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb)->version == 6) | in t6_fill_tnl_lso()
1354 tnl_lso->Flow_to_TcpHdrLen = htonl(val); in t6_fill_tnl_lso()
1356 tnl_lso->IpIdOffset = htons(0); in t6_fill_tnl_lso()
1358 tnl_lso->IpIdSplit_to_Mss = htons(CPL_TX_TNL_LSO_MSS_V(ssi->gso_size)); in t6_fill_tnl_lso()
1359 tnl_lso->TCPSeqOffset = htonl(0); in t6_fill_tnl_lso()
1360 tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len)); in t6_fill_tnl_lso()
1366 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; in write_tso_wr()
1372 if (ssi->gso_type & SKB_GSO_TCPV6) in write_tso_wr()
1375 lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) | in write_tso_wr()
1380 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); in write_tso_wr()
1381 lso->ipid_ofst = htons(0); in write_tso_wr()
1382 lso->mss = htons(ssi->gso_size); in write_tso_wr()
1383 lso->seqno_offset = htonl(0); in write_tso_wr()
1384 if (is_t4(adap->params.chip)) in write_tso_wr()
1385 lso->len = htonl(skb->len); in write_tso_wr()
1387 lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len)); in write_tso_wr()
1393 * t4_sge_eth_txq_egress_update - handle Ethernet TX Queue update
1395 * @eq: the Ethernet TX Queue
1396 * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1
1398 * We're typically called here to update the state of an Ethernet TX
1399 * Queue with respect to the hardware's progress in consuming the TX
1402 * in regular timer-based Ethernet TX Queue maintenance.
1408 struct sge_txq *q = &eq->q; in t4_sge_eth_txq_egress_update()
1411 if (!q->in_use || !__netif_tx_trylock(eq->txq)) in t4_sge_eth_txq_egress_update()
1414 /* Reclaim pending completed TX Descriptors. */ in t4_sge_eth_txq_egress_update()
1415 reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true); in t4_sge_eth_txq_egress_update()
1417 hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); in t4_sge_eth_txq_egress_update()
1418 hw_in_use = q->pidx - hw_cidx; in t4_sge_eth_txq_egress_update()
1420 hw_in_use += q->size; in t4_sge_eth_txq_egress_update()
1422 /* If the TX Queue is currently stopped and there's now more than half in t4_sge_eth_txq_egress_update()
1425 * currently buffered Coalesced TX Work Request. in t4_sge_eth_txq_egress_update()
1427 if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) { in t4_sge_eth_txq_egress_update()
1428 netif_tx_wake_queue(eq->txq); in t4_sge_eth_txq_egress_update()
1429 eq->q.restarts++; in t4_sge_eth_txq_egress_update()
1432 __netif_tx_unlock(eq->txq); in t4_sge_eth_txq_egress_update()
1446 if (unlikely(skb->len < min_pkt_len)) in cxgb4_validate_skb()
1447 return -EINVAL; in cxgb4_validate_skb()
1450 max_pkt_len = ETH_HLEN + dev->mtu; in cxgb4_validate_skb()
1455 if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len))) in cxgb4_validate_skb()
1456 return -EINVAL; in cxgb4_validate_skb()
1464 wr->u.udpseg.type = FW_ETH_TX_EO_TYPE_UDPSEG; in write_eo_udp_wr()
1465 wr->u.udpseg.ethlen = skb_network_offset(skb); in write_eo_udp_wr()
1466 wr->u.udpseg.iplen = cpu_to_be16(skb_network_header_len(skb)); in write_eo_udp_wr()
1467 wr->u.udpseg.udplen = sizeof(struct udphdr); in write_eo_udp_wr()
1468 wr->u.udpseg.rtplen = 0; in write_eo_udp_wr()
1469 wr->u.udpseg.r4 = 0; in write_eo_udp_wr()
1470 if (skb_shinfo(skb)->gso_size) in write_eo_udp_wr()
1471 wr->u.udpseg.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); in write_eo_udp_wr()
1473 wr->u.udpseg.mss = cpu_to_be16(skb->len - hdr_len); in write_eo_udp_wr()
1474 wr->u.udpseg.schedpktsize = wr->u.udpseg.mss; in write_eo_udp_wr()
1475 wr->u.udpseg.plen = cpu_to_be32(skb->len - hdr_len); in write_eo_udp_wr()
1481 * cxgb4_eth_xmit - add a packet to an Ethernet Tx queue
1485 * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
1511 adap = pi->adapter; in cxgb4_eth_xmit()
1514 if (xfrm_offload(skb) && !ssi->gso_size) in cxgb4_eth_xmit()
1515 return adap->uld[CXGB4_ULD_IPSEC].tx_handler(skb, dev); in cxgb4_eth_xmit()
1520 (skb->len - skb_tcp_all_headers(skb))) in cxgb4_eth_xmit()
1521 return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev); in cxgb4_eth_xmit()
1526 if (!(adap->ptp_tx_skb)) { in cxgb4_eth_xmit()
1527 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in cxgb4_eth_xmit()
1528 adap->ptp_tx_skb = skb_get(skb); in cxgb4_eth_xmit()
1532 q = &adap->sge.ptptxq; in cxgb4_eth_xmit()
1534 q = &adap->sge.ethtxq[qidx + pi->first_qset]; in cxgb4_eth_xmit()
1538 reclaim_completed_tx(adap, &q->q, -1, true); in cxgb4_eth_xmit()
1543 if (unlikely(ret == -EOPNOTSUPP)) in cxgb4_eth_xmit()
1547 chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); in cxgb4_eth_xmit()
1550 credits = txq_avail(&q->q) - ndesc; in cxgb4_eth_xmit()
1554 dev_err(adap->pdev_dev, in cxgb4_eth_xmit()
1555 "%s: Tx ring %u full while queue awake!\n", in cxgb4_eth_xmit()
1556 dev->name, qidx); in cxgb4_eth_xmit()
1563 if (skb->encapsulation && chip_ver > CHELSIO_T5) in cxgb4_eth_xmit()
1566 last_desc = q->q.pidx + ndesc - 1; in cxgb4_eth_xmit()
1567 if (last_desc >= q->q.size) in cxgb4_eth_xmit()
1568 last_desc -= q->q.size; in cxgb4_eth_xmit()
1569 sgl_sdesc = &q->q.sdesc[last_desc]; in cxgb4_eth_xmit()
1572 unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) { in cxgb4_eth_xmit()
1573 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr)); in cxgb4_eth_xmit()
1574 q->mapping_err++; in cxgb4_eth_xmit()
1581 * packet, we'll be below our "stop threshold" so stop the TX in cxgb4_eth_xmit()
1593 wr = (void *)&q->q.desc[q->q.pidx]; in cxgb4_eth_xmit()
1594 eowr = (void *)&q->q.desc[q->q.pidx]; in cxgb4_eth_xmit()
1595 wr->equiq_to_len16 = htonl(wr_mid); in cxgb4_eth_xmit()
1596 wr->r3 = cpu_to_be64(0); in cxgb4_eth_xmit()
1597 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) in cxgb4_eth_xmit()
1602 len = immediate ? skb->len : 0; in cxgb4_eth_xmit()
1604 if (ssi->gso_size && !(ssi->gso_type & SKB_GSO_UDP_L4)) { in cxgb4_eth_xmit()
1613 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | in cxgb4_eth_xmit()
1623 if (iph->version == 4) { in cxgb4_eth_xmit()
1624 iph->check = 0; in cxgb4_eth_xmit()
1625 iph->tot_len = 0; in cxgb4_eth_xmit()
1626 iph->check = ~ip_fast_csum((u8 *)iph, iph->ihl); in cxgb4_eth_xmit()
1628 if (skb->ip_summed == CHECKSUM_PARTIAL) in cxgb4_eth_xmit()
1629 cntrl = hwcsum(adap->params.chip, skb); in cxgb4_eth_xmit()
1632 cntrl = hwcsum(adap->params.chip, skb); in cxgb4_eth_xmit()
1635 q->tso++; in cxgb4_eth_xmit()
1636 q->tx_cso += ssi->gso_segs; in cxgb4_eth_xmit()
1637 } else if (ssi->gso_size) { in cxgb4_eth_xmit()
1641 hdrlen = eth_get_headlen(dev, skb->data, skb_headlen(skb)); in cxgb4_eth_xmit()
1643 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) | in cxgb4_eth_xmit()
1646 cntrl = hwcsum(adap->params.chip, skb); in cxgb4_eth_xmit()
1649 sgl = (u64 *)inline_tx_skb_header(skb, &q->q, (void *)start, in cxgb4_eth_xmit()
1652 left = (u8 *)end - (u8 *)q->q.stat; in cxgb4_eth_xmit()
1653 end = (void *)q->q.desc + left; in cxgb4_eth_xmit()
1656 q->uso++; in cxgb4_eth_xmit()
1657 q->tx_cso += ssi->gso_segs; in cxgb4_eth_xmit()
1663 wr->op_immdlen = htonl(FW_WR_OP_V(op) | in cxgb4_eth_xmit()
1667 if (skb->ip_summed == CHECKSUM_PARTIAL) { in cxgb4_eth_xmit()
1668 cntrl = hwcsum(adap->params.chip, skb) | in cxgb4_eth_xmit()
1670 q->tx_cso++; in cxgb4_eth_xmit()
1674 if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) { in cxgb4_eth_xmit()
1679 left = (u8 *)end - (u8 *)q->q.stat; in cxgb4_eth_xmit()
1680 end = (void *)q->q.desc + left; in cxgb4_eth_xmit()
1681 sgl = (void *)q->q.desc; in cxgb4_eth_xmit()
1685 q->vlan_ins++; in cxgb4_eth_xmit()
1688 if (skb->protocol == htons(ETH_P_FCOE)) in cxgb4_eth_xmit()
1690 ((skb->priority & 0x7) << VLAN_PRIO_SHIFT)); in cxgb4_eth_xmit()
1694 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) | in cxgb4_eth_xmit()
1695 TXPKT_PF_V(adap->pf); in cxgb4_eth_xmit()
1699 if (is_t4(adap->params.chip)) in cxgb4_eth_xmit()
1700 ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio); in cxgb4_eth_xmit()
1702 ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio); in cxgb4_eth_xmit()
1704 cpl->ctrl0 = htonl(ctrl0); in cxgb4_eth_xmit()
1705 cpl->pack = htons(0); in cxgb4_eth_xmit()
1706 cpl->len = htons(skb->len); in cxgb4_eth_xmit()
1707 cpl->ctrl1 = cpu_to_be64(cntrl); in cxgb4_eth_xmit()
1710 cxgb4_inline_tx_skb(skb, &q->q, sgl); in cxgb4_eth_xmit()
1713 cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, sgl_off, in cxgb4_eth_xmit()
1714 sgl_sdesc->addr); in cxgb4_eth_xmit()
1716 sgl_sdesc->skb = skb; in cxgb4_eth_xmit()
1719 txq_advance(&q->q, ndesc); in cxgb4_eth_xmit()
1721 cxgb4_ring_tx_db(adap, &q->q, ndesc); in cxgb4_eth_xmit()
1735 * 64-bit PCI DMA addresses.
1747 * t4vf_is_eth_imm - can an Ethernet packet be sent as immediate data?
1765 * t4vf_calc_tx_flits - calculate the number of flits for a packet TX WR
1768 * Returns the number of flits needed for a TX Work Request for the
1777 * TX Packet header plus the skb data in the Work Request. in t4vf_calc_tx_flits()
1780 return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), in t4vf_calc_tx_flits()
1785 * for the TX Packet Work Request and CPL. We always have a firmware in t4vf_calc_tx_flits()
1787 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL in t4vf_calc_tx_flits()
1789 * with an embedded TX Packet Write CPL message. in t4vf_calc_tx_flits()
1791 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); in t4vf_calc_tx_flits()
1792 if (skb_shinfo(skb)->gso_size) in t4vf_calc_tx_flits()
1803 * cxgb4_vf_eth_xmit - add a packet to an Ethernet TX queue
1807 * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled.
1831 BUILD_BUG_ON(sizeof(wr->firmware) != in cxgb4_vf_eth_xmit()
1832 (sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) + in cxgb4_vf_eth_xmit()
1833 sizeof(wr->ethtype) + sizeof(wr->vlantci))); in cxgb4_vf_eth_xmit()
1834 fw_hdr_copy_len = sizeof(wr->firmware); in cxgb4_vf_eth_xmit()
1839 /* Figure out which TX Queue we're going to use. */ in cxgb4_vf_eth_xmit()
1841 adapter = pi->adapter; in cxgb4_vf_eth_xmit()
1843 WARN_ON(qidx >= pi->nqsets); in cxgb4_vf_eth_xmit()
1844 txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; in cxgb4_vf_eth_xmit()
1846 /* Take this opportunity to reclaim any TX Descriptors whose DMA in cxgb4_vf_eth_xmit()
1849 reclaim_completed_tx(adapter, &txq->q, -1, true); in cxgb4_vf_eth_xmit()
1851 /* Calculate the number of flits and TX Descriptors we're going to in cxgb4_vf_eth_xmit()
1852 * need along with how many TX Descriptors will be left over after in cxgb4_vf_eth_xmit()
1857 credits = txq_avail(&txq->q) - ndesc; in cxgb4_vf_eth_xmit()
1861 * TX Queue and return a "busy" condition. The queue will get in cxgb4_vf_eth_xmit()
1866 dev_err(adapter->pdev_dev, in cxgb4_vf_eth_xmit()
1867 "%s: TX ring %u full while queue awake!\n", in cxgb4_vf_eth_xmit()
1868 dev->name, qidx); in cxgb4_vf_eth_xmit()
1872 last_desc = txq->q.pidx + ndesc - 1; in cxgb4_vf_eth_xmit()
1873 if (last_desc >= txq->q.size) in cxgb4_vf_eth_xmit()
1874 last_desc -= txq->q.size; in cxgb4_vf_eth_xmit()
1875 sgl_sdesc = &txq->q.sdesc[last_desc]; in cxgb4_vf_eth_xmit()
1878 unlikely(cxgb4_map_skb(adapter->pdev_dev, skb, in cxgb4_vf_eth_xmit()
1879 sgl_sdesc->addr) < 0)) { in cxgb4_vf_eth_xmit()
1881 * be in-lined directly into the Work Request) and the mapping in cxgb4_vf_eth_xmit()
1884 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr)); in cxgb4_vf_eth_xmit()
1885 txq->mapping_err++; in cxgb4_vf_eth_xmit()
1889 chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); in cxgb4_vf_eth_xmit()
1893 * packet, we'll be below our "stop threshold" so stop the TX in cxgb4_vf_eth_xmit()
1906 * the WR Header wrapping around the TX Descriptor Ring. If our in cxgb4_vf_eth_xmit()
1907 * maximum header size ever exceeds one TX Descriptor, we'll need to in cxgb4_vf_eth_xmit()
1911 wr = (void *)&txq->q.desc[txq->q.pidx]; in cxgb4_vf_eth_xmit()
1912 wr->equiq_to_len16 = cpu_to_be32(wr_mid); in cxgb4_vf_eth_xmit()
1913 wr->r3[0] = cpu_to_be32(0); in cxgb4_vf_eth_xmit()
1914 wr->r3[1] = cpu_to_be32(0); in cxgb4_vf_eth_xmit()
1915 skb_copy_from_linear_data(skb, &wr->firmware, fw_hdr_copy_len); in cxgb4_vf_eth_xmit()
1919 * message with an encapsulated TX Packet CPL message. Otherwise we in cxgb4_vf_eth_xmit()
1920 * just use a TX Packet CPL message. in cxgb4_vf_eth_xmit()
1923 if (ssi->gso_size) { in cxgb4_vf_eth_xmit()
1925 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; in cxgb4_vf_eth_xmit()
1927 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; in cxgb4_vf_eth_xmit()
1929 wr->op_immdlen = in cxgb4_vf_eth_xmit()
1934 lso->lso_ctrl = in cxgb4_vf_eth_xmit()
1941 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); in cxgb4_vf_eth_xmit()
1942 lso->ipid_ofst = cpu_to_be16(0); in cxgb4_vf_eth_xmit()
1943 lso->mss = cpu_to_be16(ssi->gso_size); in cxgb4_vf_eth_xmit()
1944 lso->seqno_offset = cpu_to_be32(0); in cxgb4_vf_eth_xmit()
1945 if (is_t4(adapter->params.chip)) in cxgb4_vf_eth_xmit()
1946 lso->len = cpu_to_be32(skb->len); in cxgb4_vf_eth_xmit()
1948 lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len)); in cxgb4_vf_eth_xmit()
1950 /* Set up TX Packet CPL pointer, control word and perform in cxgb4_vf_eth_xmit()
1963 txq->tso++; in cxgb4_vf_eth_xmit()
1964 txq->tx_cso += ssi->gso_segs; in cxgb4_vf_eth_xmit()
1969 ? skb->len + sizeof(*cpl) in cxgb4_vf_eth_xmit()
1971 wr->op_immdlen = in cxgb4_vf_eth_xmit()
1975 /* Set up TX Packet CPL pointer, control word and perform in cxgb4_vf_eth_xmit()
1979 if (skb->ip_summed == CHECKSUM_PARTIAL) { in cxgb4_vf_eth_xmit()
1980 cntrl = hwcsum(adapter->params.chip, skb) | in cxgb4_vf_eth_xmit()
1982 txq->tx_cso++; in cxgb4_vf_eth_xmit()
1992 txq->vlan_ins++; in cxgb4_vf_eth_xmit()
1996 /* Fill in the TX Packet CPL message header. */ in cxgb4_vf_eth_xmit()
1997 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) | in cxgb4_vf_eth_xmit()
1998 TXPKT_INTF_V(pi->port_id) | in cxgb4_vf_eth_xmit()
2000 cpl->pack = cpu_to_be16(0); in cxgb4_vf_eth_xmit()
2001 cpl->len = cpu_to_be16(skb->len); in cxgb4_vf_eth_xmit()
2002 cpl->ctrl1 = cpu_to_be64(cntrl); in cxgb4_vf_eth_xmit()
2004 /* Fill in the body of the TX Packet CPL message with either in-lined in cxgb4_vf_eth_xmit()
2008 /* In-line the packet's data and free the skb since we don't in cxgb4_vf_eth_xmit()
2011 cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1); in cxgb4_vf_eth_xmit()
2014 /* Write the skb's Scatter/Gather list into the TX Packet CPL in cxgb4_vf_eth_xmit()
2017 * in the Software Descriptor corresponding to the last TX in cxgb4_vf_eth_xmit()
2020 * The retained skb will be freed when the corresponding TX in cxgb4_vf_eth_xmit()
2024 * completion notifications to us and we mostly perform TX in cxgb4_vf_eth_xmit()
2028 * TX packets arriving to run the destructors of completed in cxgb4_vf_eth_xmit()
2030 * Sometimes we do not get such new packets causing TX to in cxgb4_vf_eth_xmit()
2038 * extra memory is reasonable (limited by the number of TX in cxgb4_vf_eth_xmit()
2051 struct sge_txq *tq = &txq->q; in cxgb4_vf_eth_xmit()
2053 /* If the Work Request header was an exact multiple of our TX in cxgb4_vf_eth_xmit()
2055 * pointer lines up exactly with the end of our TX Descriptor in cxgb4_vf_eth_xmit()
2059 if (unlikely((void *)sgl == (void *)tq->stat)) { in cxgb4_vf_eth_xmit()
2060 sgl = (void *)tq->desc; in cxgb4_vf_eth_xmit()
2061 end = (void *)((void *)tq->desc + in cxgb4_vf_eth_xmit()
2062 ((void *)end - (void *)tq->stat)); in cxgb4_vf_eth_xmit()
2065 cxgb4_write_sgl(skb, tq, sgl, end, 0, sgl_sdesc->addr); in cxgb4_vf_eth_xmit()
2067 sgl_sdesc->skb = skb; in cxgb4_vf_eth_xmit()
2070 /* Advance our internal TX Queue state, tell the hardware about in cxgb4_vf_eth_xmit()
2071 * the new TX descriptors and return success. in cxgb4_vf_eth_xmit()
2073 txq_advance(&txq->q, ndesc); in cxgb4_vf_eth_xmit()
2075 cxgb4_ring_tx_db(adapter, &txq->q, ndesc); in cxgb4_vf_eth_xmit()
2079 /* An error of some sort happened. Free the TX skb and tell the in cxgb4_vf_eth_xmit()
2087 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
2088 * @q: the SGE control Tx queue
2091 * for Tx queues that send only immediate data (presently just
2096 int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); in reclaim_completed_tx_imm()
2097 int reclaim = hw_cidx - q->cidx; in reclaim_completed_tx_imm()
2100 reclaim += q->size; in reclaim_completed_tx_imm()
2102 q->in_use -= reclaim; in reclaim_completed_tx_imm()
2103 q->cidx = hw_cidx; in reclaim_completed_tx_imm()
2111 val -= max; in eosw_txq_advance_index()
2121 d = &eosw_txq->desc[eosw_txq->last_cidx]; in cxgb4_eosw_txq_free_desc()
2122 while (ndesc--) { in cxgb4_eosw_txq_free_desc()
2123 if (d->skb) { in cxgb4_eosw_txq_free_desc()
2124 if (d->addr[0]) { in cxgb4_eosw_txq_free_desc()
2125 unmap_skb(adap->pdev_dev, d->skb, d->addr); in cxgb4_eosw_txq_free_desc()
2126 memset(d->addr, 0, sizeof(d->addr)); in cxgb4_eosw_txq_free_desc()
2128 dev_consume_skb_any(d->skb); in cxgb4_eosw_txq_free_desc()
2129 d->skb = NULL; in cxgb4_eosw_txq_free_desc()
2131 eosw_txq_advance_index(&eosw_txq->last_cidx, 1, in cxgb4_eosw_txq_free_desc()
2132 eosw_txq->ndesc); in cxgb4_eosw_txq_free_desc()
2133 d = &eosw_txq->desc[eosw_txq->last_cidx]; in cxgb4_eosw_txq_free_desc()
2139 eosw_txq_advance_index(&eosw_txq->pidx, n, eosw_txq->ndesc); in eosw_txq_advance()
2140 eosw_txq->inuse += n; in eosw_txq_advance()
2146 if (eosw_txq->inuse == eosw_txq->ndesc) in eosw_txq_enqueue()
2147 return -ENOMEM; in eosw_txq_enqueue()
2149 eosw_txq->desc[eosw_txq->pidx].skb = skb; in eosw_txq_enqueue()
2155 return eosw_txq->desc[eosw_txq->last_pidx].skb; in eosw_txq_peek()
2165 if (skb_shinfo(skb)->gso_size && in ethofld_calc_tx_flits()
2166 !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)) in ethofld_calc_tx_flits()
2174 if (skb_shinfo(skb)->nr_frags > 0) { in ethofld_calc_tx_flits()
2175 if (skb_headlen(skb) - hdr_len) in ethofld_calc_tx_flits()
2176 nsgl = sgl_len(skb_shinfo(skb)->nr_frags + 1); in ethofld_calc_tx_flits()
2178 nsgl = sgl_len(skb_shinfo(skb)->nr_frags); in ethofld_calc_tx_flits()
2179 } else if (skb->len - hdr_len) { in ethofld_calc_tx_flits()
2196 ver = ip_hdr(skb)->version; in write_eo_wr()
2197 proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr : ip_hdr(skb)->protocol; in write_eo_wr()
2201 if (skb_shinfo(skb)->gso_size && in write_eo_wr()
2202 !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)) in write_eo_wr()
2206 if (!eosw_txq->ncompl || in write_eo_wr()
2207 (eosw_txq->last_compl + wrlen16) >= in write_eo_wr()
2208 (adap->params.ofldq_wr_cred / 2)) { in write_eo_wr()
2210 eosw_txq->ncompl++; in write_eo_wr()
2211 eosw_txq->last_compl = 0; in write_eo_wr()
2214 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) | in write_eo_wr()
2217 wr->equiq_to_len16 = cpu_to_be32(FW_WR_LEN16_V(wrlen16) | in write_eo_wr()
2218 FW_WR_FLOWID_V(eosw_txq->hwtid)); in write_eo_wr()
2219 wr->r3 = 0; in write_eo_wr()
2223 wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG; in write_eo_wr()
2224 wr->u.tcpseg.ethlen = skb_network_offset(skb); in write_eo_wr()
2225 wr->u.tcpseg.iplen = cpu_to_be16(skb_network_header_len(skb)); in write_eo_wr()
2226 wr->u.tcpseg.tcplen = tcp_hdrlen(skb); in write_eo_wr()
2227 wr->u.tcpseg.tsclk_tsoff = 0; in write_eo_wr()
2228 wr->u.tcpseg.r4 = 0; in write_eo_wr()
2229 wr->u.tcpseg.r5 = 0; in write_eo_wr()
2230 wr->u.tcpseg.plen = cpu_to_be32(skb->len - hdr_len); in write_eo_wr()
2232 if (ssi->gso_size) { in write_eo_wr()
2235 wr->u.tcpseg.mss = cpu_to_be16(ssi->gso_size); in write_eo_wr()
2238 wr->u.tcpseg.mss = cpu_to_be16(0xffff); in write_eo_wr()
2243 eosw_txq->cred -= wrlen16; in write_eo_wr()
2244 eosw_txq->last_compl += wrlen16; in write_eo_wr()
2265 eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid]; in ethofld_hard_xmit()
2266 spin_lock(&eohw_txq->lock); in ethofld_hard_xmit()
2267 reclaim_completed_tx_imm(&eohw_txq->q); in ethofld_hard_xmit()
2269 d = &eosw_txq->desc[eosw_txq->last_pidx]; in ethofld_hard_xmit()
2270 skb = d->skb; in ethofld_hard_xmit()
2273 wr = (struct fw_eth_tx_eo_wr *)&eohw_txq->q.desc[eohw_txq->q.pidx]; in ethofld_hard_xmit()
2274 if (unlikely(eosw_txq->state != CXGB4_EO_STATE_ACTIVE && in ethofld_hard_xmit()
2275 eosw_txq->last_pidx == eosw_txq->flowc_idx)) { in ethofld_hard_xmit()
2276 hdr_len = skb->len; in ethofld_hard_xmit()
2279 if (eosw_txq->state == CXGB4_EO_STATE_FLOWC_OPEN_SEND) in ethofld_hard_xmit()
2285 hdr_len = eth_get_headlen(dev, skb->data, skb_headlen(skb)); in ethofld_hard_xmit()
2286 data_len = skb->len - hdr_len; in ethofld_hard_xmit()
2293 left = txq_avail(&eohw_txq->q) - ndesc; in ethofld_hard_xmit()
2300 * credits and invoke the Tx path again. in ethofld_hard_xmit()
2302 if (unlikely(left < 0 || wrlen16 > eosw_txq->cred)) { in ethofld_hard_xmit()
2303 ret = -ENOMEM; in ethofld_hard_xmit()
2309 eosw_txq->state = next_state; in ethofld_hard_xmit()
2310 eosw_txq->cred -= wrlen16; in ethofld_hard_xmit()
2311 eosw_txq->ncompl++; in ethofld_hard_xmit()
2312 eosw_txq->last_compl = 0; in ethofld_hard_xmit()
2317 cntrl = hwcsum(adap->params.chip, skb); in ethofld_hard_xmit()
2321 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) | in ethofld_hard_xmit()
2322 TXPKT_INTF_V(pi->tx_chan) | in ethofld_hard_xmit()
2323 TXPKT_PF_V(adap->pf)); in ethofld_hard_xmit()
2324 cpl->pack = 0; in ethofld_hard_xmit()
2325 cpl->len = cpu_to_be16(skb->len); in ethofld_hard_xmit()
2326 cpl->ctrl1 = cpu_to_be64(cntrl); in ethofld_hard_xmit()
2331 sgl = (u64 *)inline_tx_skb_header(skb, &eohw_txq->q, (void *)start, in ethofld_hard_xmit()
2334 ret = cxgb4_map_skb(adap->pdev_dev, skb, d->addr); in ethofld_hard_xmit()
2336 memset(d->addr, 0, sizeof(d->addr)); in ethofld_hard_xmit()
2337 eohw_txq->mapping_err++; in ethofld_hard_xmit()
2343 left = (u8 *)end - (u8 *)eohw_txq->q.stat; in ethofld_hard_xmit()
2344 end = (void *)eohw_txq->q.desc + left; in ethofld_hard_xmit()
2347 if (unlikely((u8 *)sgl >= (u8 *)eohw_txq->q.stat)) { in ethofld_hard_xmit()
2352 left = (u8 *)end - (u8 *)eohw_txq->q.stat; in ethofld_hard_xmit()
2354 end = (void *)eohw_txq->q.desc + left; in ethofld_hard_xmit()
2355 sgl = (void *)eohw_txq->q.desc; in ethofld_hard_xmit()
2358 cxgb4_write_sgl(skb, &eohw_txq->q, (void *)sgl, end, hdr_len, in ethofld_hard_xmit()
2359 d->addr); in ethofld_hard_xmit()
2362 if (skb_shinfo(skb)->gso_size) { in ethofld_hard_xmit()
2363 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) in ethofld_hard_xmit()
2364 eohw_txq->uso++; in ethofld_hard_xmit()
2366 eohw_txq->tso++; in ethofld_hard_xmit()
2367 eohw_txq->tx_cso += skb_shinfo(skb)->gso_segs; in ethofld_hard_xmit()
2368 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { in ethofld_hard_xmit()
2369 eohw_txq->tx_cso++; in ethofld_hard_xmit()
2373 eohw_txq->vlan_ins++; in ethofld_hard_xmit()
2375 txq_advance(&eohw_txq->q, ndesc); in ethofld_hard_xmit()
2376 cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc); in ethofld_hard_xmit()
2377 eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc); in ethofld_hard_xmit()
2380 spin_unlock(&eohw_txq->lock); in ethofld_hard_xmit()
2389 switch (eosw_txq->state) { in ethofld_xmit()
2393 pktcount = eosw_txq->pidx - eosw_txq->last_pidx; in ethofld_xmit()
2395 pktcount += eosw_txq->ndesc; in ethofld_xmit()
2404 while (pktcount--) { in ethofld_xmit()
2407 eosw_txq_advance_index(&eosw_txq->last_pidx, 1, in ethofld_xmit()
2408 eosw_txq->ndesc); in ethofld_xmit()
2432 tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id]; in cxgb4_ethofld_xmit()
2433 qid = skb_get_queue_mapping(skb) - pi->nqsets; in cxgb4_ethofld_xmit()
2434 eosw_txq = &tc_port_mqprio->eosw_txq[qid]; in cxgb4_ethofld_xmit()
2435 spin_lock_bh(&eosw_txq->lock); in cxgb4_ethofld_xmit()
2436 if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE) in cxgb4_ethofld_xmit()
2451 spin_unlock_bh(&eosw_txq->lock); in cxgb4_ethofld_xmit()
2455 spin_unlock_bh(&eosw_txq->lock); in cxgb4_ethofld_xmit()
2466 if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM)) in t4_start_xmit()
2469 if (unlikely(qid >= pi->nqsets)) in t4_start_xmit()
2476 spin_lock(&adap->ptp_lock); in t4_start_xmit()
2478 spin_unlock(&adap->ptp_lock); in t4_start_xmit()
2487 int pktcount = eosw_txq->pidx - eosw_txq->last_pidx; in eosw_txq_flush_pending_skbs()
2488 int pidx = eosw_txq->pidx; in eosw_txq_flush_pending_skbs()
2495 pktcount += eosw_txq->ndesc; in eosw_txq_flush_pending_skbs()
2497 while (pktcount--) { in eosw_txq_flush_pending_skbs()
2498 pidx--; in eosw_txq_flush_pending_skbs()
2500 pidx += eosw_txq->ndesc; in eosw_txq_flush_pending_skbs()
2502 skb = eosw_txq->desc[pidx].skb; in eosw_txq_flush_pending_skbs()
2505 eosw_txq->desc[pidx].skb = NULL; in eosw_txq_flush_pending_skbs()
2506 eosw_txq->inuse--; in eosw_txq_flush_pending_skbs()
2510 eosw_txq->pidx = eosw_txq->last_pidx + 1; in eosw_txq_flush_pending_skbs()
2514 * cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc.
2539 entry = cxgb4_lookup_eotid(&adap->tids, eotid); in cxgb4_ethofld_send_flowc()
2541 return -ENOMEM; in cxgb4_ethofld_send_flowc()
2543 eosw_txq = (struct sge_eosw_txq *)entry->data; in cxgb4_ethofld_send_flowc()
2545 return -ENOMEM; in cxgb4_ethofld_send_flowc()
2547 if (!(adap->flags & CXGB4_FW_OK)) { in cxgb4_ethofld_send_flowc()
2549 complete(&eosw_txq->completion); in cxgb4_ethofld_send_flowc()
2550 return -EIO; in cxgb4_ethofld_send_flowc()
2555 return -ENOMEM; in cxgb4_ethofld_send_flowc()
2557 spin_lock_bh(&eosw_txq->lock); in cxgb4_ethofld_send_flowc()
2559 if (eosw_txq->state != CXGB4_EO_STATE_CLOSED) in cxgb4_ethofld_send_flowc()
2564 if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE) in cxgb4_ethofld_send_flowc()
2573 rxq = &adap->sge.eohw_rxq[eosw_txq->hwqid]; in cxgb4_ethofld_send_flowc()
2574 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(len16) | in cxgb4_ethofld_send_flowc()
2575 FW_WR_FLOWID_V(eosw_txq->hwtid)); in cxgb4_ethofld_send_flowc()
2576 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) | in cxgb4_ethofld_send_flowc()
2579 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; in cxgb4_ethofld_send_flowc()
2580 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V(adap->pf)); in cxgb4_ethofld_send_flowc()
2581 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; in cxgb4_ethofld_send_flowc()
2582 flowc->mnemval[1].val = cpu_to_be32(pi->tx_chan); in cxgb4_ethofld_send_flowc()
2583 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; in cxgb4_ethofld_send_flowc()
2584 flowc->mnemval[2].val = cpu_to_be32(pi->tx_chan); in cxgb4_ethofld_send_flowc()
2585 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; in cxgb4_ethofld_send_flowc()
2586 flowc->mnemval[3].val = cpu_to_be32(rxq->rspq.abs_id); in cxgb4_ethofld_send_flowc()
2587 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS; in cxgb4_ethofld_send_flowc()
2588 flowc->mnemval[4].val = cpu_to_be32(tc); in cxgb4_ethofld_send_flowc()
2589 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_EOSTATE; in cxgb4_ethofld_send_flowc()
2590 flowc->mnemval[5].val = cpu_to_be32(tc == FW_SCHED_CLS_NONE ? in cxgb4_ethofld_send_flowc()
2595 * termination FLOWC. in cxgb4_ethofld_send_flowc()
2604 eosw_txq->state = next_state; in cxgb4_ethofld_send_flowc()
2605 eosw_txq->flowc_idx = eosw_txq->pidx; in cxgb4_ethofld_send_flowc()
2609 spin_unlock_bh(&eosw_txq->lock); in cxgb4_ethofld_send_flowc()
2614 spin_unlock_bh(&eosw_txq->lock); in cxgb4_ethofld_send_flowc()
2619 * is_imm - check whether a packet can be sent as immediate data
2626 return skb->len <= MAX_CTRL_WR_LEN; in is_imm()
2630 * ctrlq_check_stop - check if a control queue is full and should stop
2641 reclaim_completed_tx_imm(&q->q); in ctrlq_check_stop()
2642 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { in ctrlq_check_stop()
2643 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); in ctrlq_check_stop()
2644 q->q.stops++; in ctrlq_check_stop()
2645 q->full = 1; in ctrlq_check_stop()
2654 struct adapter *adap = pi->adapter; in cxgb4_selftest_lb_pkt()
2669 lb = &pi->ethtool_lb; in cxgb4_selftest_lb_pkt()
2670 lb->loopback = 1; in cxgb4_selftest_lb_pkt()
2672 q = &adap->sge.ethtxq[pi->first_qset]; in cxgb4_selftest_lb_pkt()
2673 __netif_tx_lock_bh(q->txq); in cxgb4_selftest_lb_pkt()
2675 reclaim_completed_tx(adap, &q->q, -1, true); in cxgb4_selftest_lb_pkt()
2676 credits = txq_avail(&q->q) - ndesc; in cxgb4_selftest_lb_pkt()
2678 __netif_tx_unlock_bh(q->txq); in cxgb4_selftest_lb_pkt()
2679 return -ENOMEM; in cxgb4_selftest_lb_pkt()
2682 wr = (void *)&q->q.desc[q->q.pidx]; in cxgb4_selftest_lb_pkt()
2685 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | in cxgb4_selftest_lb_pkt()
2688 wr->equiq_to_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2))); in cxgb4_selftest_lb_pkt()
2689 wr->r3 = cpu_to_be64(0); in cxgb4_selftest_lb_pkt()
2694 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_PF_V(adap->pf) | in cxgb4_selftest_lb_pkt()
2695 TXPKT_INTF_V(pi->tx_chan + 4); in cxgb4_selftest_lb_pkt()
2697 cpl->ctrl0 = htonl(ctrl0); in cxgb4_selftest_lb_pkt()
2698 cpl->pack = htons(0); in cxgb4_selftest_lb_pkt()
2699 cpl->len = htons(pkt_len); in cxgb4_selftest_lb_pkt()
2700 cpl->ctrl1 = cpu_to_be64(TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F); in cxgb4_selftest_lb_pkt()
2704 ether_addr_copy(&sgl[i], netdev->dev_addr); in cxgb4_selftest_lb_pkt()
2710 init_completion(&lb->completion); in cxgb4_selftest_lb_pkt()
2711 txq_advance(&q->q, ndesc); in cxgb4_selftest_lb_pkt()
2712 cxgb4_ring_tx_db(adap, &q->q, ndesc); in cxgb4_selftest_lb_pkt()
2713 __netif_tx_unlock_bh(q->txq); in cxgb4_selftest_lb_pkt()
2716 ret = wait_for_completion_timeout(&lb->completion, 10 * HZ); in cxgb4_selftest_lb_pkt()
2718 ret = -ETIMEDOUT; in cxgb4_selftest_lb_pkt()
2720 ret = lb->result; in cxgb4_selftest_lb_pkt()
2722 lb->loopback = 0; in cxgb4_selftest_lb_pkt()
2728 * ctrl_xmit - send a packet through an SGE control Tx queue
2732 * Send a packet through an SGE control Tx queue. Packets sent through
2746 ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc)); in ctrl_xmit()
2747 spin_lock(&q->sendq.lock); in ctrl_xmit()
2749 if (unlikely(q->full)) { in ctrl_xmit()
2750 skb->priority = ndesc; /* save for restart */ in ctrl_xmit()
2751 __skb_queue_tail(&q->sendq, skb); in ctrl_xmit()
2752 spin_unlock(&q->sendq.lock); in ctrl_xmit()
2756 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; in ctrl_xmit()
2757 cxgb4_inline_tx_skb(skb, &q->q, wr); in ctrl_xmit()
2759 txq_advance(&q->q, ndesc); in ctrl_xmit()
2760 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) in ctrl_xmit()
2763 cxgb4_ring_tx_db(q->adap, &q->q, ndesc); in ctrl_xmit()
2764 spin_unlock(&q->sendq.lock); in ctrl_xmit()
2771 * restart_ctrlq - restart a suspended control queue
2774 * Resumes transmission on a suspended Tx control queue.
2782 spin_lock(&q->sendq.lock); in restart_ctrlq()
2783 reclaim_completed_tx_imm(&q->q); in restart_ctrlq()
2784 BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */ in restart_ctrlq()
2786 while ((skb = __skb_dequeue(&q->sendq)) != NULL) { in restart_ctrlq()
2788 unsigned int ndesc = skb->priority; /* previously saved */ in restart_ctrlq()
2792 * wait times. q->full is still set so new skbs will be queued. in restart_ctrlq()
2794 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; in restart_ctrlq()
2795 txq_advance(&q->q, ndesc); in restart_ctrlq()
2796 spin_unlock(&q->sendq.lock); in restart_ctrlq()
2798 cxgb4_inline_tx_skb(skb, &q->q, wr); in restart_ctrlq()
2801 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { in restart_ctrlq()
2802 unsigned long old = q->q.stops; in restart_ctrlq()
2805 if (q->q.stops != old) { /* suspended anew */ in restart_ctrlq()
2806 spin_lock(&q->sendq.lock); in restart_ctrlq()
2811 cxgb4_ring_tx_db(q->adap, &q->q, written); in restart_ctrlq()
2814 spin_lock(&q->sendq.lock); in restart_ctrlq()
2816 q->full = 0; in restart_ctrlq()
2819 cxgb4_ring_tx_db(q->adap, &q->q, written); in restart_ctrlq()
2820 spin_unlock(&q->sendq.lock); in restart_ctrlq()
2824 * t4_mgmt_tx - send a management message
2835 ret = ctrl_xmit(&adap->sge.ctrlq[0], skb); in t4_mgmt_tx()
2841 * is_ofld_imm - check whether a packet can be sent as immediate data
2846 * FW_OFLD_TX_DATA_WR limits the payload to 255 bytes due to 8-bit field.
2852 struct work_request_hdr *req = (struct work_request_hdr *)skb->data; in is_ofld_imm()
2853 unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi)); in is_ofld_imm()
2856 return skb->len <= MAX_IMM_ULPTX_WR_LEN; in is_ofld_imm()
2858 return skb->len <= SGE_MAX_WR_LEN; in is_ofld_imm()
2860 return skb->len <= MAX_IMM_OFLD_TX_DATA_WR_LEN; in is_ofld_imm()
2864 * calc_tx_flits_ofld - calculate # of flits for an offload packet
2876 return DIV_ROUND_UP(skb->len, 8); in calc_tx_flits_ofld()
2879 cnt = skb_shinfo(skb)->nr_frags; in calc_tx_flits_ofld()
2886 * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
2889 * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
2895 q->mapping_err++; in txq_stop_maperr()
2896 q->q.stops++; in txq_stop_maperr()
2897 set_bit(q->q.cntxt_id - q->adap->sge.egr_start, in txq_stop_maperr()
2898 q->adap->sge.txq_maperr); in txq_stop_maperr()
2902 * ofldtxq_stop - stop an offload Tx queue that has become full
2906 * Stops an offload Tx queue that has become full and modifies the packet
2911 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); in ofldtxq_stop()
2912 q->q.stops++; in ofldtxq_stop()
2913 q->full = 1; in ofldtxq_stop()
2917 * service_ofldq - service/restart a suspended offload queue
2920 * Services an offload Tx queue by moving packets from its Pending Send
2921 * Queue to the Hardware TX ring. The function starts and ends with the
2923 * head of the Send Queue onto the Hardware TX Ring. Dropping the lock
2935 __must_hold(&q->sendq.lock) in service_ofldq()
2951 if (q->service_ofldq_running) in service_ofldq()
2953 q->service_ofldq_running = true; in service_ofldq()
2955 while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) { in service_ofldq()
2959 * this one. We don't need to lock to guard the TX Ring in service_ofldq()
2963 spin_unlock(&q->sendq.lock); in service_ofldq()
2965 cxgb4_reclaim_completed_tx(q->adap, &q->q, false); in service_ofldq()
2967 flits = skb->priority; /* previously saved */ in service_ofldq()
2969 credits = txq_avail(&q->q) - ndesc; in service_ofldq()
2972 ofldtxq_stop(q, (struct fw_wr_hdr *)skb->data); in service_ofldq()
2974 pos = (u64 *)&q->q.desc[q->q.pidx]; in service_ofldq()
2976 cxgb4_inline_tx_skb(skb, &q->q, pos); in service_ofldq()
2977 else if (cxgb4_map_skb(q->adap->pdev_dev, skb, in service_ofldq()
2978 (dma_addr_t *)skb->head)) { in service_ofldq()
2980 spin_lock(&q->sendq.lock); in service_ofldq()
2986 * So we need to deal with wrap-around here. in service_ofldq()
2990 txq = &q->q; in service_ofldq()
2991 pos = (void *)inline_tx_skb_header(skb, &q->q, in service_ofldq()
2995 left = (u8 *)end - (u8 *)txq->stat; in service_ofldq()
2996 end = (void *)txq->desc + left; in service_ofldq()
3003 if (pos == (u64 *)txq->stat) { in service_ofldq()
3004 left = (u8 *)end - (u8 *)txq->stat; in service_ofldq()
3005 end = (void *)txq->desc + left; in service_ofldq()
3006 pos = (void *)txq->desc; in service_ofldq()
3009 cxgb4_write_sgl(skb, &q->q, (void *)pos, in service_ofldq()
3011 (dma_addr_t *)skb->head); in service_ofldq()
3013 skb->dev = q->adap->port[0]; in service_ofldq()
3014 skb->destructor = deferred_unmap_destructor; in service_ofldq()
3016 last_desc = q->q.pidx + ndesc - 1; in service_ofldq()
3017 if (last_desc >= q->q.size) in service_ofldq()
3018 last_desc -= q->q.size; in service_ofldq()
3019 q->q.sdesc[last_desc].skb = skb; in service_ofldq()
3022 txq_advance(&q->q, ndesc); in service_ofldq()
3025 cxgb4_ring_tx_db(q->adap, &q->q, written); in service_ofldq()
3030 * skb we've just successfully transferred to the TX Ring and in service_ofldq()
3034 spin_lock(&q->sendq.lock); in service_ofldq()
3035 __skb_unlink(skb, &q->sendq); in service_ofldq()
3040 cxgb4_ring_tx_db(q->adap, &q->q, written); in service_ofldq()
3045 q->service_ofldq_running = false; in service_ofldq()
3049 * ofld_xmit - send a packet through an offload queue
3050 * @q: the Tx offload queue
3057 skb->priority = calc_tx_flits_ofld(skb); /* save for restart */ in ofld_xmit()
3058 spin_lock(&q->sendq.lock); in ofld_xmit()
3066 * or filling the Hardware TX Ring. in ofld_xmit()
3068 __skb_queue_tail(&q->sendq, skb); in ofld_xmit()
3069 if (q->sendq.qlen == 1) in ofld_xmit()
3072 spin_unlock(&q->sendq.lock); in ofld_xmit()
3077 * restart_ofldq - restart a suspended offload queue
3080 * Resumes transmission on a suspended Tx offload queue.
3086 spin_lock(&q->sendq.lock); in restart_ofldq()
3087 q->full = 0; /* the queue actually is completely empty now */ in restart_ofldq()
3089 spin_unlock(&q->sendq.lock); in restart_ofldq()
3093 * skb_txq - return the Tx queue an offload packet should use
3096 * Returns the Tx queue an offload packet should use as indicated by bits
3097 * 1-15 in the packet's queue_mapping.
3101 return skb->queue_mapping >> 1; in skb_txq()
3105 * is_ctrl_pkt - return whether an offload packet is a control packet
3109 * Tx queue as indicated by bit 0 in the packet's queue_mapping.
3113 return skb->queue_mapping & 1; in is_ctrl_pkt()
3125 if (adap->tids.nsftids) in uld_send()
3127 return ctrl_xmit(&adap->sge.ctrlq[idx], skb); in uld_send()
3130 txq_info = adap->sge.uld_txq_info[tx_uld_type]; in uld_send()
3137 txq = &txq_info->uldtxq[idx]; in uld_send()
3142 * t4_ofld_send - send an offload packet
3147 * appropriate Tx queue as follows: bit 0 indicates whether the packet
3148 * should be sent as regular or control, bits 1-15 select the queue.
3161 * cxgb4_ofld_send - send an offload packet
3178 int left = (void *)q->stat - pos; in inline_tx_header()
3186 memcpy(q->desc, src + left, length - left); in inline_tx_header()
3187 pos = (void *)q->desc + (length - left); in inline_tx_header()
3189 /* 0-pad to multiple of 16 */ in inline_tx_header()
3199 * ofld_xmit_direct - copy a WR into offload queue
3200 * @q: the Tx offload queue
3213 /* Use the lower limit as the cut-off */ in ofld_xmit_direct()
3223 if (!spin_trylock(&q->sendq.lock)) in ofld_xmit_direct()
3226 if (q->full || !skb_queue_empty(&q->sendq) || in ofld_xmit_direct()
3227 q->service_ofldq_running) { in ofld_xmit_direct()
3228 spin_unlock(&q->sendq.lock); in ofld_xmit_direct()
3232 credits = txq_avail(&q->q) - ndesc; in ofld_xmit_direct()
3233 pos = (u64 *)&q->q.desc[q->q.pidx]; in ofld_xmit_direct()
3235 /* ofldtxq_stop modifies WR header in-situ */ in ofld_xmit_direct()
3236 inline_tx_header(src, &q->q, pos, len); in ofld_xmit_direct()
3239 txq_advance(&q->q, ndesc); in ofld_xmit_direct()
3240 cxgb4_ring_tx_db(q->adap, &q->q, ndesc); in ofld_xmit_direct()
3242 spin_unlock(&q->sendq.lock); in ofld_xmit_direct()
3257 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in cxgb4_immdata_send()
3263 txq = &txq_info->uldtxq[idx]; in cxgb4_immdata_send()
3272 * t4_crypto_send - send crypto packet
3277 * appropriate Tx queue as follows: bit 0 indicates whether the packet
3278 * should be sent as regular or control, bits 1-15 select the queue.
3291 * cxgb4_crypto_send - send crypto packet
3310 __skb_fill_page_desc(skb, 0, gl->frags[0].page, in copy_frags()
3311 gl->frags[0].offset + offset, in copy_frags()
3312 gl->frags[0].size - offset); in copy_frags()
3313 skb_shinfo(skb)->nr_frags = gl->nfrags; in copy_frags()
3314 for (i = 1; i < gl->nfrags; i++) in copy_frags()
3315 __skb_fill_page_desc(skb, i, gl->frags[i].page, in copy_frags()
3316 gl->frags[i].offset, in copy_frags()
3317 gl->frags[i].size); in copy_frags()
3320 get_page(gl->frags[gl->nfrags - 1].page); in copy_frags()
3324 * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
3342 if (gl->tot_len <= RX_COPY_THRES) { in cxgb4_pktgl_to_skb()
3343 skb = dev_alloc_skb(gl->tot_len); in cxgb4_pktgl_to_skb()
3346 __skb_put(skb, gl->tot_len); in cxgb4_pktgl_to_skb()
3347 skb_copy_to_linear_data(skb, gl->va, gl->tot_len); in cxgb4_pktgl_to_skb()
3353 skb_copy_to_linear_data(skb, gl->va, pull_len); in cxgb4_pktgl_to_skb()
3356 skb->len = gl->tot_len; in cxgb4_pktgl_to_skb()
3357 skb->data_len = skb->len - pull_len; in cxgb4_pktgl_to_skb()
3358 skb->truesize += skb->data_len; in cxgb4_pktgl_to_skb()
3365 * t4_pktgl_free - free a packet gather list
3376 for (p = gl->frags, n = gl->nfrags - 1; n--; p++) in t4_pktgl_free()
3377 put_page(p->page); in t4_pktgl_free()
3395 if (is_t4(adap->params.chip)) in handle_trace_pkt()
3401 skb->protocol = htons(0xffff); in handle_trace_pkt()
3402 skb->dev = adap->port[0]; in handle_trace_pkt()
3408 * cxgb4_sgetim_to_hwtstamp - convert sge time stamp to hw time stamp
3413 * Every ingress queue entry has the 60-bit timestamp, convert that timestamp
3421 u64 tmp = (sgetstamp * 1000 * 1000 + adap->params.vpd.cclk / 2); in cxgb4_sgetim_to_hwtstamp()
3423 ns = div_u64(tmp, adap->params.vpd.cclk); in cxgb4_sgetim_to_hwtstamp()
3426 hwtstamps->hwtstamp = ns_to_ktime(ns); in cxgb4_sgetim_to_hwtstamp()
3432 struct adapter *adapter = rxq->rspq.adap; in do_gro()
3433 struct sge *s = &adapter->sge; in do_gro()
3438 skb = napi_get_frags(&rxq->rspq.napi); in do_gro()
3441 rxq->stats.rx_drops++; in do_gro()
3445 copy_frags(skb, gl, s->pktshift); in do_gro()
3447 skb->csum_level = 1; in do_gro()
3448 skb->len = gl->tot_len - s->pktshift; in do_gro()
3449 skb->data_len = skb->len; in do_gro()
3450 skb->truesize += skb->data_len; in do_gro()
3451 skb->ip_summed = CHECKSUM_UNNECESSARY; in do_gro()
3452 skb_record_rx_queue(skb, rxq->rspq.idx); in do_gro()
3453 pi = netdev_priv(skb->dev); in do_gro()
3454 if (pi->rxtstamp) in do_gro()
3456 gl->sgetstamp); in do_gro()
3457 if (rxq->rspq.netdev->features & NETIF_F_RXHASH) in do_gro()
3458 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val, in do_gro()
3461 if (unlikely(pkt->vlan_ex)) { in do_gro()
3462 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); in do_gro()
3463 rxq->stats.vlan_ex++; in do_gro()
3465 ret = napi_gro_frags(&rxq->rspq.napi); in do_gro()
3467 rxq->stats.lro_pkts++; in do_gro()
3469 rxq->stats.lro_merged++; in do_gro()
3470 rxq->stats.pkts++; in do_gro()
3471 rxq->stats.rx_cso++; in do_gro()
3481 * t4_systim_to_hwstamp - read hardware time stamp
3496 cpl = (struct cpl_rx_mps_pkt *)skb->data; in t4_systim_to_hwstamp()
3497 if (!(CPL_RX_MPS_PKT_TYPE_G(ntohl(cpl->op_to_r1_hi)) & in t4_systim_to_hwstamp()
3501 data = skb->data + sizeof(*cpl); in t4_systim_to_hwstamp()
3503 offset = ETH_HLEN + IPV4_HLEN(skb->data) + UDP_HLEN; in t4_systim_to_hwstamp()
3504 if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(short)) in t4_systim_to_hwstamp()
3509 hwtstamps->hwtstamp = ns_to_ktime(get_unaligned_be64(data)); in t4_systim_to_hwstamp()
3515 * t4_rx_hststamp - Recv PTP Event Message
3529 !is_t4(adapter->params.chip))) { in t4_rx_hststamp()
3533 rxq->stats.rx_drops++; in t4_rx_hststamp()
3541 * t4_tx_hststamp - Loopback PTP Transmit Event Message
3546 * Read hardware timestamp for the loopback PTP Tx event message
3553 if (!is_t4(adapter->params.chip) && adapter->ptp_tx_skb) { in t4_tx_hststamp()
3562 * t4_tx_completion_handler - handle CPL_SGE_EGR_UPDATE messages
3563 * @rspq: Ethernet RX Response Queue associated with Ethernet TX Queue
3568 * we configure the Ethernet TX Queues to send CIDX Updates to the
3574 * of TX Data outstanding before receiving DMA Completions.
3580 u8 opcode = ((const struct rss_header *)rsp)->opcode; in t4_tx_completion_handler()
3581 struct port_info *pi = netdev_priv(rspq->netdev); in t4_tx_completion_handler()
3582 struct adapter *adapter = rspq->adap; in t4_tx_completion_handler()
3583 struct sge *s = &adapter->sge; in t4_tx_completion_handler()
3592 ((const struct cpl_fw4_msg *)rsp)->type == in t4_tx_completion_handler()
3595 opcode = ((const struct rss_header *)rsp)->opcode; in t4_tx_completion_handler()
3605 txq = &s->ethtxq[pi->first_qset + rspq->idx]; in t4_tx_completion_handler()
3611 * reclaiming successfully DMA'ed TX Work Requests uses the CIDX value in t4_tx_completion_handler()
3612 * stored in the Status Page at the end of the TX Queue. It's easiest in t4_tx_completion_handler()
3618 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) { in t4_tx_completion_handler()
3622 WRITE_ONCE(txq->q.stat->cidx, egr->cidx); in t4_tx_completion_handler()
3625 t4_sge_eth_txq_egress_update(adapter, txq, -1); in t4_tx_completion_handler()
3630 struct adapter *adap = pi->adapter; in cxgb4_validate_lb_pkt()
3632 struct sge *s = &adap->sge; in cxgb4_validate_lb_pkt()
3637 netdev = adap->port[pi->port_id]; in cxgb4_validate_lb_pkt()
3638 lb = &pi->ethtool_lb; in cxgb4_validate_lb_pkt()
3639 data = si->va + s->pktshift; in cxgb4_validate_lb_pkt()
3642 if (!ether_addr_equal(data + i, netdev->dev_addr)) in cxgb4_validate_lb_pkt()
3643 return -1; in cxgb4_validate_lb_pkt()
3647 lb->result = -EIO; in cxgb4_validate_lb_pkt()
3649 complete(&lb->completion); in cxgb4_validate_lb_pkt()
3654 * t4_ethrx_handler - process an ingress ethernet packet
3668 struct adapter *adapter = q->adap; in t4_ethrx_handler()
3669 struct sge *s = &q->adap->sge; in t4_ethrx_handler()
3670 int cpl_trace_pkt = is_t4(q->adap->params.chip) ? in t4_ethrx_handler()
3676 pi = netdev_priv(q->netdev); in t4_ethrx_handler()
3677 /* If we're looking at TX Queue CIDX Update, handle that separately in t4_ethrx_handler()
3687 return handle_trace_pkt(q->adap, si); in t4_ethrx_handler()
3691 if (q->adap->params.tp.rx_pkt_encap) { in t4_ethrx_handler()
3692 err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec)); in t4_ethrx_handler()
3693 tnl_hdr_len = T6_RX_TNLHDR_LEN_G(ntohs(pkt->err_vec)); in t4_ethrx_handler()
3695 err_vec = be16_to_cpu(pkt->err_vec); in t4_ethrx_handler()
3698 csum_ok = pkt->csum_calc && !err_vec && in t4_ethrx_handler()
3699 (q->netdev->features & NETIF_F_RXCSUM); in t4_ethrx_handler()
3702 rxq->stats.bad_rx_pkts++; in t4_ethrx_handler()
3704 if (unlikely(pi->ethtool_lb.loopback && pkt->iff >= NCHAN)) { in t4_ethrx_handler()
3710 if (((pkt->l2info & htonl(RXF_TCP_F)) || in t4_ethrx_handler()
3712 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { in t4_ethrx_handler()
3720 rxq->stats.rx_drops++; in t4_ethrx_handler()
3725 if (unlikely(pi->ptp_enable)) { in t4_ethrx_handler()
3731 __skb_pull(skb, s->pktshift); /* remove ethernet header pad */ in t4_ethrx_handler()
3733 /* Handle the PTP Event Tx Loopback packet */ in t4_ethrx_handler()
3734 if (unlikely(pi->ptp_enable && !ret && in t4_ethrx_handler()
3735 (pkt->l2info & htonl(RXF_UDP_F)) && in t4_ethrx_handler()
3737 if (!t4_tx_hststamp(adapter, skb, q->netdev)) in t4_ethrx_handler()
3741 skb->protocol = eth_type_trans(skb, q->netdev); in t4_ethrx_handler()
3742 skb_record_rx_queue(skb, q->idx); in t4_ethrx_handler()
3743 if (skb->dev->features & NETIF_F_RXHASH) in t4_ethrx_handler()
3744 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val, in t4_ethrx_handler()
3747 rxq->stats.pkts++; in t4_ethrx_handler()
3749 if (pi->rxtstamp) in t4_ethrx_handler()
3750 cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb), in t4_ethrx_handler()
3751 si->sgetstamp); in t4_ethrx_handler()
3752 if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) { in t4_ethrx_handler()
3753 if (!pkt->ip_frag) { in t4_ethrx_handler()
3754 skb->ip_summed = CHECKSUM_UNNECESSARY; in t4_ethrx_handler()
3755 rxq->stats.rx_cso++; in t4_ethrx_handler()
3756 } else if (pkt->l2info & htonl(RXF_IP_F)) { in t4_ethrx_handler()
3757 __sum16 c = (__force __sum16)pkt->csum; in t4_ethrx_handler()
3758 skb->csum = csum_unfold(c); in t4_ethrx_handler()
3761 skb->ip_summed = CHECKSUM_UNNECESSARY; in t4_ethrx_handler()
3762 skb->csum_level = 1; in t4_ethrx_handler()
3764 skb->ip_summed = CHECKSUM_COMPLETE; in t4_ethrx_handler()
3766 rxq->stats.rx_cso++; in t4_ethrx_handler()
3774 if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) { in t4_ethrx_handler()
3775 if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) && in t4_ethrx_handler()
3776 (pi->fcoe.flags & CXGB_FCOE_ENABLED)) { in t4_ethrx_handler()
3777 if (q->adap->params.tp.rx_pkt_encap) in t4_ethrx_handler()
3783 skb->ip_summed = CHECKSUM_UNNECESSARY; in t4_ethrx_handler()
3791 if (unlikely(pkt->vlan_ex)) { in t4_ethrx_handler()
3792 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); in t4_ethrx_handler()
3793 rxq->stats.vlan_ex++; in t4_ethrx_handler()
3795 skb_mark_napi_id(skb, &q->napi); in t4_ethrx_handler()
3801 * restore_rx_bufs - put back a packet's Rx buffers
3820 while (frags--) { in restore_rx_bufs()
3821 if (q->cidx == 0) in restore_rx_bufs()
3822 q->cidx = q->size - 1; in restore_rx_bufs()
3824 q->cidx--; in restore_rx_bufs()
3825 d = &q->sdesc[q->cidx]; in restore_rx_bufs()
3826 d->page = si->frags[frags].page; in restore_rx_bufs()
3827 d->dma_addr |= RX_UNMAPPED_BUF; in restore_rx_bufs()
3828 q->avail++; in restore_rx_bufs()
3833 * is_new_response - check if a response is newly written
3843 return (r->type_gen >> RSPD_GEN_S) == q->gen; in is_new_response()
3847 * rspq_next - advance to the next entry in a response queue
3854 q->cur_desc = (void *)q->cur_desc + q->iqe_len; in rspq_next()
3855 if (unlikely(++q->cidx == q->size)) { in rspq_next()
3856 q->cidx = 0; in rspq_next()
3857 q->gen ^= 1; in rspq_next()
3858 q->cur_desc = q->desc; in rspq_next()
3863 * process_responses - process responses from an SGE response queue
3881 struct adapter *adapter = q->adap; in process_responses()
3882 struct sge *s = &adapter->sge; in process_responses()
3885 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); in process_responses()
3887 if (q->flush_handler) in process_responses()
3888 q->flush_handler(q); in process_responses()
3893 rsp_type = RSPD_TYPE_G(rc->type_gen); in process_responses()
3898 u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags; in process_responses()
3901 if (likely(q->offset > 0)) { in process_responses()
3902 free_rx_bufs(q->adap, &rxq->fl, 1); in process_responses()
3903 q->offset = 0; in process_responses()
3911 rsd = &rxq->fl.sdesc[rxq->fl.cidx]; in process_responses()
3913 fp->page = rsd->page; in process_responses()
3914 fp->offset = q->offset; in process_responses()
3915 fp->size = min(bufsz, len); in process_responses()
3916 len -= fp->size; in process_responses()
3919 unmap_rx_buf(q->adap, &rxq->fl); in process_responses()
3923 be64_to_cpu(rc->last_flit)); in process_responses()
3928 dma_sync_single_for_cpu(q->adap->pdev_dev, in process_responses()
3930 fp->size, DMA_FROM_DEVICE); in process_responses()
3937 ret = q->handler(q, q->cur_desc, &si); in process_responses()
3939 q->offset += ALIGN(fp->size, s->fl_align); in process_responses()
3941 restore_rx_bufs(&si, &rxq->fl, frags); in process_responses()
3943 ret = q->handler(q, q->cur_desc, NULL); in process_responses()
3945 ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN); in process_responses()
3950 q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX); in process_responses()
3955 budget_left--; in process_responses()
3958 if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16) in process_responses()
3959 __refill_fl(q->adap, &rxq->fl); in process_responses()
3960 return budget - budget_left; in process_responses()
3964 * napi_rx_handler - the NAPI handler for Rx processing
3971 * in not a concern at all with MSI-X as non-data interrupts then have
3986 timer_index = QINTR_TIMER_IDX_G(q->next_intr_params); in napi_rx_handler()
3988 if (q->adaptive_rx) { in napi_rx_handler()
3993 timer_index = timer_index - 1; in napi_rx_handler()
3995 timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1); in napi_rx_handler()
3996 q->next_intr_params = in napi_rx_handler()
3999 params = q->next_intr_params; in napi_rx_handler()
4001 params = q->next_intr_params; in napi_rx_handler()
4002 q->next_intr_params = q->intr_params; in napi_rx_handler()
4012 if (unlikely(q->bar2_addr == NULL)) { in napi_rx_handler()
4013 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A), in napi_rx_handler()
4014 val | INGRESSQID_V((u32)q->cntxt_id)); in napi_rx_handler()
4016 writel(val | INGRESSQID_V(q->bar2_qid), in napi_rx_handler()
4017 q->bar2_addr + SGE_UDB_GTS); in napi_rx_handler()
4029 spin_lock(&eosw_txq->lock); in cxgb4_ethofld_restart()
4030 pktcount = eosw_txq->cidx - eosw_txq->last_cidx; in cxgb4_ethofld_restart()
4032 pktcount += eosw_txq->ndesc; in cxgb4_ethofld_restart()
4035 cxgb4_eosw_txq_free_desc(netdev2adap(eosw_txq->netdev), in cxgb4_ethofld_restart()
4037 eosw_txq->inuse -= pktcount; in cxgb4_ethofld_restart()
4043 ethofld_xmit(eosw_txq->netdev, eosw_txq); in cxgb4_ethofld_restart()
4044 spin_unlock(&eosw_txq->lock); in cxgb4_ethofld_restart()
4047 /* cxgb4_ethofld_rx_handler - Process ETHOFLD Tx completions
4052 * Process a ETHOFLD Tx completion. Increment the cidx here, but
4058 u8 opcode = ((const struct rss_header *)rsp)->opcode; in cxgb4_ethofld_rx_handler()
4073 eotid = CPL_FW4_ACK_FLOWID_G(ntohl(OPCODE_TID(cpl))) - in cxgb4_ethofld_rx_handler()
4074 q->adap->tids.eotid_base; in cxgb4_ethofld_rx_handler()
4075 entry = cxgb4_lookup_eotid(&q->adap->tids, eotid); in cxgb4_ethofld_rx_handler()
4079 eosw_txq = (struct sge_eosw_txq *)entry->data; in cxgb4_ethofld_rx_handler()
4083 spin_lock(&eosw_txq->lock); in cxgb4_ethofld_rx_handler()
4084 credits = cpl->credits; in cxgb4_ethofld_rx_handler()
4086 skb = eosw_txq->desc[eosw_txq->cidx].skb; in cxgb4_ethofld_rx_handler()
4090 if (unlikely((eosw_txq->state == in cxgb4_ethofld_rx_handler()
4092 eosw_txq->state == in cxgb4_ethofld_rx_handler()
4094 eosw_txq->cidx == eosw_txq->flowc_idx)) { in cxgb4_ethofld_rx_handler()
4095 flits = DIV_ROUND_UP(skb->len, 8); in cxgb4_ethofld_rx_handler()
4096 if (eosw_txq->state == in cxgb4_ethofld_rx_handler()
4098 eosw_txq->state = CXGB4_EO_STATE_ACTIVE; in cxgb4_ethofld_rx_handler()
4100 eosw_txq->state = CXGB4_EO_STATE_CLOSED; in cxgb4_ethofld_rx_handler()
4101 complete(&eosw_txq->completion); in cxgb4_ethofld_rx_handler()
4103 hdr_len = eth_get_headlen(eosw_txq->netdev, in cxgb4_ethofld_rx_handler()
4104 skb->data, in cxgb4_ethofld_rx_handler()
4106 flits = ethofld_calc_tx_flits(q->adap, skb, in cxgb4_ethofld_rx_handler()
4109 eosw_txq_advance_index(&eosw_txq->cidx, 1, in cxgb4_ethofld_rx_handler()
4110 eosw_txq->ndesc); in cxgb4_ethofld_rx_handler()
4112 credits -= wrlen16; in cxgb4_ethofld_rx_handler()
4115 eosw_txq->cred += cpl->credits; in cxgb4_ethofld_rx_handler()
4116 eosw_txq->ncompl--; in cxgb4_ethofld_rx_handler()
4118 spin_unlock(&eosw_txq->lock); in cxgb4_ethofld_rx_handler()
4120 /* Schedule a tasklet to reclaim SKBs and restart ETHOFLD Tx, in cxgb4_ethofld_rx_handler()
4123 tasklet_schedule(&eosw_txq->qresume_tsk); in cxgb4_ethofld_rx_handler()
4131 * The MSI-X interrupt handler for an SGE response queue.
4137 napi_schedule(&q->napi); in t4_sge_intr_msix()
4149 struct sge_rspq *q = &adap->sge.intrq; in process_intrq()
4152 spin_lock(&adap->sge.intrq_lock); in process_intrq()
4154 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); in process_intrq()
4159 if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) { in process_intrq()
4160 unsigned int qid = ntohl(rc->pldbuflen_qid); in process_intrq()
4162 qid -= adap->sge.ingr_start; in process_intrq()
4163 napi_schedule(&adap->sge.ingr_map[qid]->napi); in process_intrq()
4169 val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params); in process_intrq()
4174 if (unlikely(q->bar2_addr == NULL)) { in process_intrq()
4176 val | INGRESSQID_V(q->cntxt_id)); in process_intrq()
4178 writel(val | INGRESSQID_V(q->bar2_qid), in process_intrq()
4179 q->bar2_addr + SGE_UDB_GTS); in process_intrq()
4182 spin_unlock(&adap->sge.intrq_lock); in process_intrq()
4194 if (adap->flags & CXGB4_MASTER_PF) in t4_intr_msi()
4210 if (((adap->flags & CXGB4_MASTER_PF) && t4_slow_intr_handler(adap)) | in t4_intr_intx()
4217 * t4_intr_handler - select the top-level interrupt handler
4220 * Selects the top-level interrupt handler based on the type of interrupts
4221 * (MSI-X, MSI, or INTx).
4225 if (adap->flags & CXGB4_USING_MSIX) in t4_intr_handler()
4227 if (adap->flags & CXGB4_USING_MSI) in t4_intr_handler()
4237 struct sge *s = &adap->sge; in sge_rx_timer_cb()
4239 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) in sge_rx_timer_cb()
4240 for (m = s->starving_fl[i]; m; m &= m - 1) { in sge_rx_timer_cb()
4243 struct sge_fl *fl = s->egr_map[id]; in sge_rx_timer_cb()
4245 clear_bit(id, s->starving_fl); in sge_rx_timer_cb()
4250 if (napi_schedule(&rxq->rspq.napi)) in sge_rx_timer_cb()
4251 fl->starving++; in sge_rx_timer_cb()
4253 set_bit(id, s->starving_fl); in sge_rx_timer_cb()
4260 if (!(adap->flags & CXGB4_MASTER_PF)) in sge_rx_timer_cb()
4263 t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD); in sge_rx_timer_cb()
4266 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); in sge_rx_timer_cb()
4272 struct sge *s = &adap->sge; in sge_tx_timer_cb()
4276 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) in sge_tx_timer_cb()
4277 for (m = s->txq_maperr[i]; m; m &= m - 1) { in sge_tx_timer_cb()
4279 struct sge_uld_txq *txq = s->egr_map[id]; in sge_tx_timer_cb()
4281 clear_bit(id, s->txq_maperr); in sge_tx_timer_cb()
4282 tasklet_schedule(&txq->qresume_tsk); in sge_tx_timer_cb()
4285 if (!is_t4(adap->params.chip)) { in sge_tx_timer_cb()
4286 struct sge_eth_txq *q = &s->ptptxq; in sge_tx_timer_cb()
4289 spin_lock(&adap->ptp_lock); in sge_tx_timer_cb()
4290 avail = reclaimable(&q->q); in sge_tx_timer_cb()
4293 free_tx_desc(adap, &q->q, avail, false); in sge_tx_timer_cb()
4294 q->q.in_use -= avail; in sge_tx_timer_cb()
4296 spin_unlock(&adap->ptp_lock); in sge_tx_timer_cb()
4300 i = s->ethtxq_rover; in sge_tx_timer_cb()
4302 budget -= t4_sge_eth_txq_egress_update(adap, &s->ethtxq[i], in sge_tx_timer_cb()
4307 if (++i >= s->ethqsets) in sge_tx_timer_cb()
4309 } while (i != s->ethtxq_rover); in sge_tx_timer_cb()
4310 s->ethtxq_rover = i; in sge_tx_timer_cb()
4318 /* We reclaimed all reclaimable TX Descriptors, so reschedule in sge_tx_timer_cb()
4324 mod_timer(&s->tx_timer, jiffies + period); in sge_tx_timer_cb()
4328 * bar2_address - return the BAR2 address for an SGE Queue's Registers
4353 return adapter->bar2 + bar2_qoffset; in bar2_address()
4356 /* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
4357 * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
4366 struct sge *s = &adap->sge; in t4_sge_alloc_rxq()
4368 int relaxed = !(adap->flags & CXGB4_ROOT_NO_RELAXED_ORDERING); in t4_sge_alloc_rxq()
4371 iq->size = roundup(iq->size, 16); in t4_sge_alloc_rxq()
4373 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0, in t4_sge_alloc_rxq()
4374 &iq->phys_addr, NULL, 0, in t4_sge_alloc_rxq()
4375 dev_to_node(adap->pdev_dev)); in t4_sge_alloc_rxq()
4376 if (!iq->desc) in t4_sge_alloc_rxq()
4377 return -ENOMEM; in t4_sge_alloc_rxq()
4382 FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0)); in t4_sge_alloc_rxq()
4386 FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) | in t4_sge_alloc_rxq()
4390 -intr_idx - 1)); in t4_sge_alloc_rxq()
4391 c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) | in t4_sge_alloc_rxq()
4393 FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) | in t4_sge_alloc_rxq()
4394 FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4)); in t4_sge_alloc_rxq()
4395 c.iqsize = htons(iq->size); in t4_sge_alloc_rxq()
4396 c.iqaddr = cpu_to_be64(iq->phys_addr); in t4_sge_alloc_rxq()
4404 CHELSIO_CHIP_VERSION(adap->params.chip); in t4_sge_alloc_rxq()
4411 * (fl_starve_thres - 1). in t4_sge_alloc_rxq()
4413 if (fl->size < s->fl_starve_thres - 1 + 2 * 8) in t4_sge_alloc_rxq()
4414 fl->size = s->fl_starve_thres - 1 + 2 * 8; in t4_sge_alloc_rxq()
4415 fl->size = roundup(fl->size, 8); in t4_sge_alloc_rxq()
4416 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64), in t4_sge_alloc_rxq()
4417 sizeof(struct rx_sw_desc), &fl->addr, in t4_sge_alloc_rxq()
4418 &fl->sdesc, s->stat_len, in t4_sge_alloc_rxq()
4419 dev_to_node(adap->pdev_dev)); in t4_sge_alloc_rxq()
4420 if (!fl->desc) in t4_sge_alloc_rxq()
4423 flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc); in t4_sge_alloc_rxq()
4437 * Free List pointers are provided, so we use a 128-byte Fetch in t4_sge_alloc_rxq()
4439 * the smaller 64-byte value there). in t4_sge_alloc_rxq()
4449 c.fl0addr = cpu_to_be64(fl->addr); in t4_sge_alloc_rxq()
4452 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); in t4_sge_alloc_rxq()
4456 netif_napi_add(dev, &iq->napi, napi_rx_handler); in t4_sge_alloc_rxq()
4457 iq->cur_desc = iq->desc; in t4_sge_alloc_rxq()
4458 iq->cidx = 0; in t4_sge_alloc_rxq()
4459 iq->gen = 1; in t4_sge_alloc_rxq()
4460 iq->next_intr_params = iq->intr_params; in t4_sge_alloc_rxq()
4461 iq->cntxt_id = ntohs(c.iqid); in t4_sge_alloc_rxq()
4462 iq->abs_id = ntohs(c.physiqid); in t4_sge_alloc_rxq()
4463 iq->bar2_addr = bar2_address(adap, in t4_sge_alloc_rxq()
4464 iq->cntxt_id, in t4_sge_alloc_rxq()
4466 &iq->bar2_qid); in t4_sge_alloc_rxq()
4467 iq->size--; /* subtract status entry */ in t4_sge_alloc_rxq()
4468 iq->netdev = dev; in t4_sge_alloc_rxq()
4469 iq->handler = hnd; in t4_sge_alloc_rxq()
4470 iq->flush_handler = flush_hnd; in t4_sge_alloc_rxq()
4472 memset(&iq->lro_mgr, 0, sizeof(struct t4_lro_mgr)); in t4_sge_alloc_rxq()
4473 skb_queue_head_init(&iq->lro_mgr.lroq); in t4_sge_alloc_rxq()
4475 /* set offset to -1 to distinguish ingress queues without FL */ in t4_sge_alloc_rxq()
4476 iq->offset = fl ? 0 : -1; in t4_sge_alloc_rxq()
4478 adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq; in t4_sge_alloc_rxq()
4481 fl->cntxt_id = ntohs(c.fl0id); in t4_sge_alloc_rxq()
4482 fl->avail = fl->pend_cred = 0; in t4_sge_alloc_rxq()
4483 fl->pidx = fl->cidx = 0; in t4_sge_alloc_rxq()
4484 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; in t4_sge_alloc_rxq()
4485 adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl; in t4_sge_alloc_rxq()
4490 fl->bar2_addr = bar2_address(adap, in t4_sge_alloc_rxq()
4491 fl->cntxt_id, in t4_sge_alloc_rxq()
4493 &fl->bar2_qid); in t4_sge_alloc_rxq()
4502 * a lot easier to fix in one place ... For now we do something very in t4_sge_alloc_rxq()
4505 if (!is_t4(adap->params.chip) && cong >= 0) { in t4_sge_alloc_rxq()
4508 u16 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log; in t4_sge_alloc_rxq()
4512 FW_PARAMS_PARAM_YZ_V(iq->cntxt_id)); in t4_sge_alloc_rxq()
4524 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, in t4_sge_alloc_rxq()
4527 dev_warn(adap->pdev_dev, "Failed to set Congestion" in t4_sge_alloc_rxq()
4529 iq->cntxt_id, -ret); in t4_sge_alloc_rxq()
4535 ret = -ENOMEM; in t4_sge_alloc_rxq()
4537 if (iq->desc) { in t4_sge_alloc_rxq()
4538 dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len, in t4_sge_alloc_rxq()
4539 iq->desc, iq->phys_addr); in t4_sge_alloc_rxq()
4540 iq->desc = NULL; in t4_sge_alloc_rxq()
4542 if (fl && fl->desc) { in t4_sge_alloc_rxq()
4543 kfree(fl->sdesc); in t4_sge_alloc_rxq()
4544 fl->sdesc = NULL; in t4_sge_alloc_rxq()
4545 dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc), in t4_sge_alloc_rxq()
4546 fl->desc, fl->addr); in t4_sge_alloc_rxq()
4547 fl->desc = NULL; in t4_sge_alloc_rxq()
4554 q->cntxt_id = id; in init_txq()
4555 q->bar2_addr = bar2_address(adap, in init_txq()
4556 q->cntxt_id, in init_txq()
4558 &q->bar2_qid); in init_txq()
4559 q->in_use = 0; in init_txq()
4560 q->cidx = q->pidx = 0; in init_txq()
4561 q->stops = q->restarts = 0; in init_txq()
4562 q->stat = (void *)&q->desc[q->size]; in init_txq()
4563 spin_lock_init(&q->db_lock); in init_txq()
4564 adap->sge.egr_map[id - adap->sge.egr_start] = q; in init_txq()
4568 * t4_sge_alloc_eth_txq - allocate an Ethernet TX Queue
4570 * @txq: the SGE Ethernet TX Queue to initialize
4572 * @netdevq: the corresponding Linux TX Queue
4574 * @dbqt: whether this TX Queue will use the SGE Doorbell Queue Timers
4580 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); in t4_sge_alloc_eth_txq()
4582 struct sge *s = &adap->sge; in t4_sge_alloc_eth_txq()
4587 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); in t4_sge_alloc_eth_txq()
4589 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, in t4_sge_alloc_eth_txq()
4591 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, in t4_sge_alloc_eth_txq()
4593 if (!txq->q.desc) in t4_sge_alloc_eth_txq()
4594 return -ENOMEM; in t4_sge_alloc_eth_txq()
4599 FW_EQ_ETH_CMD_PFN_V(adap->pf) | in t4_sge_alloc_eth_txq()
4604 /* For TX Ethernet Queues using the SGE Doorbell Queue Timer in t4_sge_alloc_eth_txq()
4606 * Index Updates on the TX Queue. Otherwise we have the Hardware in t4_sge_alloc_eth_txq()
4608 * TX Queue. in t4_sge_alloc_eth_txq()
4613 FW_EQ_ETH_CMD_VIID_V(pi->viid)); in t4_sge_alloc_eth_txq()
4619 FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) | in t4_sge_alloc_eth_txq()
4632 c.eqaddr = cpu_to_be64(txq->q.phys_addr); in t4_sge_alloc_eth_txq()
4636 * ethtool -C tx-usecs {Timer Val} command. Note that the SGE in t4_sge_alloc_eth_txq()
4643 FW_EQ_ETH_CMD_TIMERIX_V(txq->dbqtimerix)); in t4_sge_alloc_eth_txq()
4645 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); in t4_sge_alloc_eth_txq()
4647 kfree(txq->q.sdesc); in t4_sge_alloc_eth_txq()
4648 txq->q.sdesc = NULL; in t4_sge_alloc_eth_txq()
4649 dma_free_coherent(adap->pdev_dev, in t4_sge_alloc_eth_txq()
4651 txq->q.desc, txq->q.phys_addr); in t4_sge_alloc_eth_txq()
4652 txq->q.desc = NULL; in t4_sge_alloc_eth_txq()
4656 txq->q.q_type = CXGB4_TXQ_ETH; in t4_sge_alloc_eth_txq()
4657 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd))); in t4_sge_alloc_eth_txq()
4658 txq->txq = netdevq; in t4_sge_alloc_eth_txq()
4659 txq->tso = 0; in t4_sge_alloc_eth_txq()
4660 txq->uso = 0; in t4_sge_alloc_eth_txq()
4661 txq->tx_cso = 0; in t4_sge_alloc_eth_txq()
4662 txq->vlan_ins = 0; in t4_sge_alloc_eth_txq()
4663 txq->mapping_err = 0; in t4_sge_alloc_eth_txq()
4664 txq->dbqt = dbqt; in t4_sge_alloc_eth_txq()
4673 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); in t4_sge_alloc_ctrl_txq()
4675 struct sge *s = &adap->sge; in t4_sge_alloc_ctrl_txq()
4680 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); in t4_sge_alloc_ctrl_txq()
4682 txq->q.desc = alloc_ring(adap->pdev_dev, nentries, in t4_sge_alloc_ctrl_txq()
4683 sizeof(struct tx_desc), 0, &txq->q.phys_addr, in t4_sge_alloc_ctrl_txq()
4684 NULL, 0, dev_to_node(adap->pdev_dev)); in t4_sge_alloc_ctrl_txq()
4685 if (!txq->q.desc) in t4_sge_alloc_ctrl_txq()
4686 return -ENOMEM; in t4_sge_alloc_ctrl_txq()
4690 FW_EQ_CTRL_CMD_PFN_V(adap->pf) | in t4_sge_alloc_ctrl_txq()
4698 FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) | in t4_sge_alloc_ctrl_txq()
4707 c.eqaddr = cpu_to_be64(txq->q.phys_addr); in t4_sge_alloc_ctrl_txq()
4709 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); in t4_sge_alloc_ctrl_txq()
4711 dma_free_coherent(adap->pdev_dev, in t4_sge_alloc_ctrl_txq()
4713 txq->q.desc, txq->q.phys_addr); in t4_sge_alloc_ctrl_txq()
4714 txq->q.desc = NULL; in t4_sge_alloc_ctrl_txq()
4718 txq->q.q_type = CXGB4_TXQ_CTRL; in t4_sge_alloc_ctrl_txq()
4719 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid))); in t4_sge_alloc_ctrl_txq()
4720 txq->adap = adap; in t4_sge_alloc_ctrl_txq()
4721 skb_queue_head_init(&txq->sendq); in t4_sge_alloc_ctrl_txq()
4722 tasklet_setup(&txq->qresume_tsk, restart_ctrlq); in t4_sge_alloc_ctrl_txq()
4723 txq->full = 0; in t4_sge_alloc_ctrl_txq()
4736 return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val); in t4_sge_mod_ctrl_txq()
4742 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); in t4_sge_alloc_ofld_txq()
4744 struct sge *s = &adap->sge; in t4_sge_alloc_ofld_txq()
4750 nentries = q->size + s->stat_len / sizeof(struct tx_desc); in t4_sge_alloc_ofld_txq()
4751 q->desc = alloc_ring(adap->pdev_dev, q->size, sizeof(struct tx_desc), in t4_sge_alloc_ofld_txq()
4752 sizeof(struct tx_sw_desc), &q->phys_addr, in t4_sge_alloc_ofld_txq()
4753 &q->sdesc, s->stat_len, NUMA_NO_NODE); in t4_sge_alloc_ofld_txq()
4754 if (!q->desc) in t4_sge_alloc_ofld_txq()
4755 return -ENOMEM; in t4_sge_alloc_ofld_txq()
4765 FW_EQ_OFLD_CMD_PFN_V(adap->pf) | in t4_sge_alloc_ofld_txq()
4771 FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) | in t4_sge_alloc_ofld_txq()
4778 c.eqaddr = cpu_to_be64(q->phys_addr); in t4_sge_alloc_ofld_txq()
4780 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); in t4_sge_alloc_ofld_txq()
4782 kfree(q->sdesc); in t4_sge_alloc_ofld_txq()
4783 q->sdesc = NULL; in t4_sge_alloc_ofld_txq()
4784 dma_free_coherent(adap->pdev_dev, in t4_sge_alloc_ofld_txq()
4786 q->desc, q->phys_addr); in t4_sge_alloc_ofld_txq()
4787 q->desc = NULL; in t4_sge_alloc_ofld_txq()
4805 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, cmd, iqid); in t4_sge_alloc_uld_txq()
4809 txq->q.q_type = CXGB4_TXQ_ULD; in t4_sge_alloc_uld_txq()
4810 txq->adap = adap; in t4_sge_alloc_uld_txq()
4811 skb_queue_head_init(&txq->sendq); in t4_sge_alloc_uld_txq()
4812 tasklet_setup(&txq->qresume_tsk, restart_ofldq); in t4_sge_alloc_uld_txq()
4813 txq->full = 0; in t4_sge_alloc_uld_txq()
4814 txq->mapping_err = 0; in t4_sge_alloc_uld_txq()
4823 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, FW_EQ_OFLD_CMD, iqid); in t4_sge_alloc_ethofld_txq()
4827 txq->q.q_type = CXGB4_TXQ_ULD; in t4_sge_alloc_ethofld_txq()
4828 spin_lock_init(&txq->lock); in t4_sge_alloc_ethofld_txq()
4829 txq->adap = adap; in t4_sge_alloc_ethofld_txq()
4830 txq->tso = 0; in t4_sge_alloc_ethofld_txq()
4831 txq->uso = 0; in t4_sge_alloc_ethofld_txq()
4832 txq->tx_cso = 0; in t4_sge_alloc_ethofld_txq()
4833 txq->vlan_ins = 0; in t4_sge_alloc_ethofld_txq()
4834 txq->mapping_err = 0; in t4_sge_alloc_ethofld_txq()
4840 struct sge *s = &adap->sge; in free_txq()
4842 dma_free_coherent(adap->pdev_dev, in free_txq()
4843 q->size * sizeof(struct tx_desc) + s->stat_len, in free_txq()
4844 q->desc, q->phys_addr); in free_txq()
4845 q->cntxt_id = 0; in free_txq()
4846 q->sdesc = NULL; in free_txq()
4847 q->desc = NULL; in free_txq()
4853 struct sge *s = &adap->sge; in free_rspq_fl()
4854 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; in free_rspq_fl()
4856 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL; in free_rspq_fl()
4857 t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP, in free_rspq_fl()
4858 rq->cntxt_id, fl_id, 0xffff); in free_rspq_fl()
4859 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, in free_rspq_fl()
4860 rq->desc, rq->phys_addr); in free_rspq_fl()
4861 netif_napi_del(&rq->napi); in free_rspq_fl()
4862 rq->netdev = NULL; in free_rspq_fl()
4863 rq->cntxt_id = rq->abs_id = 0; in free_rspq_fl()
4864 rq->desc = NULL; in free_rspq_fl()
4867 free_rx_bufs(adap, fl, fl->avail); in free_rspq_fl()
4868 dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len, in free_rspq_fl()
4869 fl->desc, fl->addr); in free_rspq_fl()
4870 kfree(fl->sdesc); in free_rspq_fl()
4871 fl->sdesc = NULL; in free_rspq_fl()
4872 fl->cntxt_id = 0; in free_rspq_fl()
4873 fl->desc = NULL; in free_rspq_fl()
4878 * t4_free_ofld_rxqs - free a block of consecutive Rx queues
4887 for ( ; n; n--, q++) in t4_free_ofld_rxqs()
4888 if (q->rspq.desc) in t4_free_ofld_rxqs()
4889 free_rspq_fl(adap, &q->rspq, in t4_free_ofld_rxqs()
4890 q->fl.size ? &q->fl : NULL); in t4_free_ofld_rxqs()
4895 if (txq->q.desc) { in t4_sge_free_ethofld_txq()
4896 t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0, in t4_sge_free_ethofld_txq()
4897 txq->q.cntxt_id); in t4_sge_free_ethofld_txq()
4898 free_tx_desc(adap, &txq->q, txq->q.in_use, false); in t4_sge_free_ethofld_txq()
4899 kfree(txq->q.sdesc); in t4_sge_free_ethofld_txq()
4900 free_txq(adap, &txq->q); in t4_sge_free_ethofld_txq()
4905 * t4_free_sge_resources - free SGE resources
4917 for (i = 0; i < adap->sge.ethqsets; i++) { in t4_free_sge_resources()
4918 eq = &adap->sge.ethrxq[i]; in t4_free_sge_resources()
4919 if (eq->rspq.desc) in t4_free_sge_resources()
4920 t4_iq_stop(adap, adap->mbox, adap->pf, 0, in t4_free_sge_resources()
4922 eq->rspq.cntxt_id, in t4_free_sge_resources()
4923 eq->fl.size ? eq->fl.cntxt_id : 0xffff, in t4_free_sge_resources()
4927 /* clean up Ethernet Tx/Rx queues */ in t4_free_sge_resources()
4928 for (i = 0; i < adap->sge.ethqsets; i++) { in t4_free_sge_resources()
4929 eq = &adap->sge.ethrxq[i]; in t4_free_sge_resources()
4930 if (eq->rspq.desc) in t4_free_sge_resources()
4931 free_rspq_fl(adap, &eq->rspq, in t4_free_sge_resources()
4932 eq->fl.size ? &eq->fl : NULL); in t4_free_sge_resources()
4933 if (eq->msix) { in t4_free_sge_resources()
4934 cxgb4_free_msix_idx_in_bmap(adap, eq->msix->idx); in t4_free_sge_resources()
4935 eq->msix = NULL; in t4_free_sge_resources()
4938 etq = &adap->sge.ethtxq[i]; in t4_free_sge_resources()
4939 if (etq->q.desc) { in t4_free_sge_resources()
4940 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, in t4_free_sge_resources()
4941 etq->q.cntxt_id); in t4_free_sge_resources()
4942 __netif_tx_lock_bh(etq->txq); in t4_free_sge_resources()
4943 free_tx_desc(adap, &etq->q, etq->q.in_use, true); in t4_free_sge_resources()
4944 __netif_tx_unlock_bh(etq->txq); in t4_free_sge_resources()
4945 kfree(etq->q.sdesc); in t4_free_sge_resources()
4946 free_txq(adap, &etq->q); in t4_free_sge_resources()
4950 /* clean up control Tx queues */ in t4_free_sge_resources()
4951 for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) { in t4_free_sge_resources()
4952 struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; in t4_free_sge_resources()
4954 if (cq->q.desc) { in t4_free_sge_resources()
4955 tasklet_kill(&cq->qresume_tsk); in t4_free_sge_resources()
4956 t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0, in t4_free_sge_resources()
4957 cq->q.cntxt_id); in t4_free_sge_resources()
4958 __skb_queue_purge(&cq->sendq); in t4_free_sge_resources()
4959 free_txq(adap, &cq->q); in t4_free_sge_resources()
4963 if (adap->sge.fw_evtq.desc) { in t4_free_sge_resources()
4964 free_rspq_fl(adap, &adap->sge.fw_evtq, NULL); in t4_free_sge_resources()
4965 if (adap->sge.fwevtq_msix_idx >= 0) in t4_free_sge_resources()
4967 adap->sge.fwevtq_msix_idx); in t4_free_sge_resources()
4970 if (adap->sge.nd_msix_idx >= 0) in t4_free_sge_resources()
4971 cxgb4_free_msix_idx_in_bmap(adap, adap->sge.nd_msix_idx); in t4_free_sge_resources()
4973 if (adap->sge.intrq.desc) in t4_free_sge_resources()
4974 free_rspq_fl(adap, &adap->sge.intrq, NULL); in t4_free_sge_resources()
4976 if (!is_t4(adap->params.chip)) { in t4_free_sge_resources()
4977 etq = &adap->sge.ptptxq; in t4_free_sge_resources()
4978 if (etq->q.desc) { in t4_free_sge_resources()
4979 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, in t4_free_sge_resources()
4980 etq->q.cntxt_id); in t4_free_sge_resources()
4981 spin_lock_bh(&adap->ptp_lock); in t4_free_sge_resources()
4982 free_tx_desc(adap, &etq->q, etq->q.in_use, true); in t4_free_sge_resources()
4983 spin_unlock_bh(&adap->ptp_lock); in t4_free_sge_resources()
4984 kfree(etq->q.sdesc); in t4_free_sge_resources()
4985 free_txq(adap, &etq->q); in t4_free_sge_resources()
4990 memset(adap->sge.egr_map, 0, in t4_free_sge_resources()
4991 adap->sge.egr_sz * sizeof(*adap->sge.egr_map)); in t4_free_sge_resources()
4996 adap->sge.ethtxq_rover = 0; in t4_sge_start()
4997 mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD); in t4_sge_start()
4998 mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD); in t4_sge_start()
5002 * t4_sge_stop - disable SGE operation
5012 struct sge *s = &adap->sge; in t4_sge_stop()
5014 if (s->rx_timer.function) in t4_sge_stop()
5015 del_timer_sync(&s->rx_timer); in t4_sge_stop()
5016 if (s->tx_timer.function) in t4_sge_stop()
5017 del_timer_sync(&s->tx_timer); in t4_sge_stop()
5022 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in t4_sge_stop()
5024 struct sge_uld_txq *txq = txq_info->uldtxq; in t4_sge_stop()
5026 for_each_ofldtxq(&adap->sge, i) { in t4_sge_stop()
5027 if (txq->q.desc) in t4_sge_stop()
5028 tasklet_kill(&txq->qresume_tsk); in t4_sge_stop()
5036 txq_info = adap->sge.uld_txq_info[CXGB4_TX_CRYPTO]; in t4_sge_stop()
5038 struct sge_uld_txq *txq = txq_info->uldtxq; in t4_sge_stop()
5040 for_each_ofldtxq(&adap->sge, i) { in t4_sge_stop()
5041 if (txq->q.desc) in t4_sge_stop()
5042 tasklet_kill(&txq->qresume_tsk); in t4_sge_stop()
5047 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) { in t4_sge_stop()
5048 struct sge_ctrl_txq *cq = &s->ctrlq[i]; in t4_sge_stop()
5050 if (cq->q.desc) in t4_sge_stop()
5051 tasklet_kill(&cq->qresume_tsk); in t4_sge_stop()
5056 * t4_sge_init_soft - grab core SGE values needed by SGE code
5065 struct sge *s = &adap->sge; in t4_sge_init_soft()
5077 dev_err(adap->pdev_dev, "bad SGE CPL MODE\n"); in t4_sge_init_soft()
5078 return -EINVAL; in t4_sge_init_soft()
5109 (fl_large_pg & (fl_large_pg-1)) != 0) { in t4_sge_init_soft()
5110 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", in t4_sge_init_soft()
5112 return -EINVAL; in t4_sge_init_soft()
5115 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; in t4_sge_init_soft()
5119 dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n", in t4_sge_init_soft()
5121 return -EINVAL; in t4_sge_init_soft()
5131 s->timer_val[0] = core_ticks_to_us(adap, in t4_sge_init_soft()
5133 s->timer_val[1] = core_ticks_to_us(adap, in t4_sge_init_soft()
5135 s->timer_val[2] = core_ticks_to_us(adap, in t4_sge_init_soft()
5137 s->timer_val[3] = core_ticks_to_us(adap, in t4_sge_init_soft()
5139 s->timer_val[4] = core_ticks_to_us(adap, in t4_sge_init_soft()
5141 s->timer_val[5] = core_ticks_to_us(adap, in t4_sge_init_soft()
5145 s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold); in t4_sge_init_soft()
5146 s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold); in t4_sge_init_soft()
5147 s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold); in t4_sge_init_soft()
5148 s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold); in t4_sge_init_soft()
5154 * t4_sge_init - initialize SGE
5157 * Perform low-level SGE code initialization needed every time after a
5162 struct sge *s = &adap->sge; in t4_sge_init()
5171 s->pktshift = PKTSHIFT_G(sge_control); in t4_sge_init()
5172 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64; in t4_sge_init()
5174 s->fl_align = t4_fl_pkt_align(adap); in t4_sge_init()
5192 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { in t4_sge_init()
5203 dev_err(adap->pdev_dev, "Unsupported Chip version %d\n", in t4_sge_init()
5204 CHELSIO_CHIP_VERSION(adap->params.chip)); in t4_sge_init()
5205 return -EINVAL; in t4_sge_init()
5207 s->fl_starve_thres = 2*egress_threshold + 1; in t4_sge_init()
5209 t4_idma_monitor_init(adap, &s->idma_monitor); in t4_sge_init()
5211 /* Set up timers used for recuring callbacks to process RX and TX in t4_sge_init()
5214 timer_setup(&s->rx_timer, sge_rx_timer_cb, 0); in t4_sge_init()
5215 timer_setup(&s->tx_timer, sge_tx_timer_cb, 0); in t4_sge_init()
5217 spin_lock_init(&s->intrq_lock); in t4_sge_init()