1fe57762cSJohn Daley // SPDX-License-Identifier: GPL-2.0-only
2fe57762cSJohn Daley // Copyright 2024 Cisco Systems, Inc. All rights reserved.
3fe57762cSJohn Daley
4fe57762cSJohn Daley #include <linux/skbuff.h>
5fe57762cSJohn Daley #include <linux/if_vlan.h>
6fe57762cSJohn Daley #include <net/busy_poll.h>
7fe57762cSJohn Daley #include "enic.h"
8fe57762cSJohn Daley #include "enic_res.h"
9fe57762cSJohn Daley #include "enic_rq.h"
10fe57762cSJohn Daley #include "vnic_rq.h"
11fe57762cSJohn Daley #include "cq_enet_desc.h"
12fe57762cSJohn Daley
13fe57762cSJohn Daley #define ENIC_LARGE_PKT_THRESHOLD 1000
14fe57762cSJohn Daley
enic_intr_update_pkt_size(struct vnic_rx_bytes_counter * pkt_size,u32 pkt_len)15fe57762cSJohn Daley static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
16fe57762cSJohn Daley u32 pkt_len)
17fe57762cSJohn Daley {
18fe57762cSJohn Daley if (pkt_len > ENIC_LARGE_PKT_THRESHOLD)
19fe57762cSJohn Daley pkt_size->large_pkt_bytes_cnt += pkt_len;
20fe57762cSJohn Daley else
21fe57762cSJohn Daley pkt_size->small_pkt_bytes_cnt += pkt_len;
22fe57762cSJohn Daley }
23fe57762cSJohn Daley
enic_rq_cq_desc_dec(void * cq_desc,u8 cq_desc_size,u8 * type,u8 * color,u16 * q_number,u16 * completed_index)24*bcb725c7SSatish Kharat static void enic_rq_cq_desc_dec(void *cq_desc, u8 cq_desc_size, u8 *type,
25eaa23db8SSatish Kharat u8 *color, u16 *q_number, u16 *completed_index)
26fe57762cSJohn Daley {
27eaa23db8SSatish Kharat /* type_color is the last field for all cq structs */
28*bcb725c7SSatish Kharat u8 type_color;
29*bcb725c7SSatish Kharat
30*bcb725c7SSatish Kharat switch (cq_desc_size) {
31*bcb725c7SSatish Kharat case VNIC_RQ_CQ_ENTRY_SIZE_16: {
32*bcb725c7SSatish Kharat struct cq_enet_rq_desc *desc =
33*bcb725c7SSatish Kharat (struct cq_enet_rq_desc *)cq_desc;
34*bcb725c7SSatish Kharat type_color = desc->type_color;
35fe57762cSJohn Daley
36eaa23db8SSatish Kharat /* Make sure color bit is read from desc *before* other fields
37eaa23db8SSatish Kharat * are read from desc. Hardware guarantees color bit is last
38eaa23db8SSatish Kharat * bit (byte) written. Adding the rmb() prevents the compiler
39eaa23db8SSatish Kharat * and/or CPU from reordering the reads which would potentially
40eaa23db8SSatish Kharat * result in reading stale values.
41eaa23db8SSatish Kharat */
42eaa23db8SSatish Kharat rmb();
43eaa23db8SSatish Kharat
44eaa23db8SSatish Kharat *q_number = le16_to_cpu(desc->q_number_rss_type_flags) &
45eaa23db8SSatish Kharat CQ_DESC_Q_NUM_MASK;
46eaa23db8SSatish Kharat *completed_index = le16_to_cpu(desc->completed_index_flags) &
47eaa23db8SSatish Kharat CQ_DESC_COMP_NDX_MASK;
48*bcb725c7SSatish Kharat break;
49*bcb725c7SSatish Kharat }
50*bcb725c7SSatish Kharat case VNIC_RQ_CQ_ENTRY_SIZE_32: {
51*bcb725c7SSatish Kharat struct cq_enet_rq_desc_32 *desc =
52*bcb725c7SSatish Kharat (struct cq_enet_rq_desc_32 *)cq_desc;
53*bcb725c7SSatish Kharat type_color = desc->type_color;
54*bcb725c7SSatish Kharat
55*bcb725c7SSatish Kharat /* Make sure color bit is read from desc *before* other fields
56*bcb725c7SSatish Kharat * are read from desc. Hardware guarantees color bit is last
57*bcb725c7SSatish Kharat * bit (byte) written. Adding the rmb() prevents the compiler
58*bcb725c7SSatish Kharat * and/or CPU from reordering the reads which would potentially
59*bcb725c7SSatish Kharat * result in reading stale values.
60*bcb725c7SSatish Kharat */
61*bcb725c7SSatish Kharat rmb();
62*bcb725c7SSatish Kharat
63*bcb725c7SSatish Kharat *q_number = le16_to_cpu(desc->q_number_rss_type_flags) &
64*bcb725c7SSatish Kharat CQ_DESC_Q_NUM_MASK;
65*bcb725c7SSatish Kharat *completed_index = le16_to_cpu(desc->completed_index_flags) &
66*bcb725c7SSatish Kharat CQ_DESC_COMP_NDX_MASK;
67*bcb725c7SSatish Kharat *completed_index |= (desc->fetch_index_flags & CQ_DESC_32_FI_MASK) <<
68*bcb725c7SSatish Kharat CQ_DESC_COMP_NDX_BITS;
69*bcb725c7SSatish Kharat break;
70*bcb725c7SSatish Kharat }
71*bcb725c7SSatish Kharat case VNIC_RQ_CQ_ENTRY_SIZE_64: {
72*bcb725c7SSatish Kharat struct cq_enet_rq_desc_64 *desc =
73*bcb725c7SSatish Kharat (struct cq_enet_rq_desc_64 *)cq_desc;
74*bcb725c7SSatish Kharat type_color = desc->type_color;
75*bcb725c7SSatish Kharat
76*bcb725c7SSatish Kharat /* Make sure color bit is read from desc *before* other fields
77*bcb725c7SSatish Kharat * are read from desc. Hardware guarantees color bit is last
78*bcb725c7SSatish Kharat * bit (byte) written. Adding the rmb() prevents the compiler
79*bcb725c7SSatish Kharat * and/or CPU from reordering the reads which would potentially
80*bcb725c7SSatish Kharat * result in reading stale values.
81*bcb725c7SSatish Kharat */
82*bcb725c7SSatish Kharat rmb();
83*bcb725c7SSatish Kharat
84*bcb725c7SSatish Kharat *q_number = le16_to_cpu(desc->q_number_rss_type_flags) &
85*bcb725c7SSatish Kharat CQ_DESC_Q_NUM_MASK;
86*bcb725c7SSatish Kharat *completed_index = le16_to_cpu(desc->completed_index_flags) &
87*bcb725c7SSatish Kharat CQ_DESC_COMP_NDX_MASK;
88*bcb725c7SSatish Kharat *completed_index |= (desc->fetch_index_flags & CQ_DESC_64_FI_MASK) <<
89*bcb725c7SSatish Kharat CQ_DESC_COMP_NDX_BITS;
90*bcb725c7SSatish Kharat break;
91*bcb725c7SSatish Kharat }
92*bcb725c7SSatish Kharat }
93*bcb725c7SSatish Kharat
94eaa23db8SSatish Kharat *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
95eaa23db8SSatish Kharat *type = type_color & CQ_DESC_TYPE_MASK;
96fe57762cSJohn Daley }
97fe57762cSJohn Daley
enic_rq_set_skb_flags(struct vnic_rq * vrq,u8 type,u32 rss_hash,u8 rss_type,u8 fcoe,u8 fcoe_fc_crc_ok,u8 vlan_stripped,u8 csum_not_calc,u8 tcp_udp_csum_ok,u8 ipv6,u8 ipv4_csum_ok,u16 vlan_tci,struct sk_buff * skb)98eab37263SJohn Daley static void enic_rq_set_skb_flags(struct vnic_rq *vrq, u8 type, u32 rss_hash,
99eab37263SJohn Daley u8 rss_type, u8 fcoe, u8 fcoe_fc_crc_ok,
100eab37263SJohn Daley u8 vlan_stripped, u8 csum_not_calc,
101eab37263SJohn Daley u8 tcp_udp_csum_ok, u8 ipv6, u8 ipv4_csum_ok,
102eab37263SJohn Daley u16 vlan_tci, struct sk_buff *skb)
103eab37263SJohn Daley {
104eab37263SJohn Daley struct enic *enic = vnic_dev_priv(vrq->vdev);
105eab37263SJohn Daley struct net_device *netdev = enic->netdev;
106eab37263SJohn Daley struct enic_rq_stats *rqstats = &enic->rq[vrq->index].stats;
107eab37263SJohn Daley bool outer_csum_ok = true, encap = false;
108eab37263SJohn Daley
109eab37263SJohn Daley if ((netdev->features & NETIF_F_RXHASH) && rss_hash && type == 3) {
110eab37263SJohn Daley switch (rss_type) {
111eab37263SJohn Daley case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4:
112eab37263SJohn Daley case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
113eab37263SJohn Daley case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX:
114eab37263SJohn Daley skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4);
115eab37263SJohn Daley rqstats->l4_rss_hash++;
116eab37263SJohn Daley break;
117eab37263SJohn Daley case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4:
118eab37263SJohn Daley case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6:
119eab37263SJohn Daley case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX:
120eab37263SJohn Daley skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3);
121eab37263SJohn Daley rqstats->l3_rss_hash++;
122eab37263SJohn Daley break;
123eab37263SJohn Daley }
124eab37263SJohn Daley }
125eab37263SJohn Daley if (enic->vxlan.vxlan_udp_port_number) {
126eab37263SJohn Daley switch (enic->vxlan.patch_level) {
127eab37263SJohn Daley case 0:
128eab37263SJohn Daley if (fcoe) {
129eab37263SJohn Daley encap = true;
130eab37263SJohn Daley outer_csum_ok = fcoe_fc_crc_ok;
131eab37263SJohn Daley }
132eab37263SJohn Daley break;
133eab37263SJohn Daley case 2:
134eab37263SJohn Daley if (type == 7 && (rss_hash & BIT(0))) {
135eab37263SJohn Daley encap = true;
136eab37263SJohn Daley outer_csum_ok = (rss_hash & BIT(1)) &&
137eab37263SJohn Daley (rss_hash & BIT(2));
138eab37263SJohn Daley }
139eab37263SJohn Daley break;
140eab37263SJohn Daley }
141eab37263SJohn Daley }
142eab37263SJohn Daley
143eab37263SJohn Daley /* Hardware does not provide whole packet checksum. It only
144eab37263SJohn Daley * provides pseudo checksum. Since hw validates the packet
145eab37263SJohn Daley * checksum but not provide us the checksum value. use
146eab37263SJohn Daley * CHECSUM_UNNECESSARY.
147eab37263SJohn Daley *
148eab37263SJohn Daley * In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is
149eab37263SJohn Daley * inner csum_ok. outer_csum_ok is set by hw when outer udp
150eab37263SJohn Daley * csum is correct or is zero.
151eab37263SJohn Daley */
152eab37263SJohn Daley if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
153eab37263SJohn Daley tcp_udp_csum_ok && outer_csum_ok && (ipv4_csum_ok || ipv6)) {
154eab37263SJohn Daley skb->ip_summed = CHECKSUM_UNNECESSARY;
155eab37263SJohn Daley skb->csum_level = encap;
156eab37263SJohn Daley if (encap)
157eab37263SJohn Daley rqstats->csum_unnecessary_encap++;
158eab37263SJohn Daley else
159eab37263SJohn Daley rqstats->csum_unnecessary++;
160eab37263SJohn Daley }
161eab37263SJohn Daley
162eab37263SJohn Daley if (vlan_stripped) {
163eab37263SJohn Daley __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
164eab37263SJohn Daley rqstats->vlan_stripped++;
165eab37263SJohn Daley }
166eab37263SJohn Daley }
167eab37263SJohn Daley
168*bcb725c7SSatish Kharat /*
169*bcb725c7SSatish Kharat * cq_enet_rq_desc accesses section uses only the 1st 15 bytes of the cq which
170*bcb725c7SSatish Kharat * is identical for all type (16,32 and 64 byte) of cqs.
171*bcb725c7SSatish Kharat */
cq_enet_rq_desc_dec(struct cq_enet_rq_desc * desc,u8 * ingress_port,u8 * fcoe,u8 * eop,u8 * sop,u8 * rss_type,u8 * csum_not_calc,u32 * rss_hash,u16 * bytes_written,u8 * packet_error,u8 * vlan_stripped,u16 * vlan_tci,u16 * checksum,u8 * fcoe_sof,u8 * fcoe_fc_crc_ok,u8 * fcoe_enc_error,u8 * fcoe_eof,u8 * tcp_udp_csum_ok,u8 * udp,u8 * tcp,u8 * ipv4_csum_ok,u8 * ipv6,u8 * ipv4,u8 * ipv4_fragment,u8 * fcs_ok)172eaa23db8SSatish Kharat static void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc, u8 *ingress_port,
173eaa23db8SSatish Kharat u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
174eaa23db8SSatish Kharat u8 *csum_not_calc, u32 *rss_hash,
175025cf931SSatish Kharat u16 *bytes_written, u8 *packet_error,
176025cf931SSatish Kharat u8 *vlan_stripped, u16 *vlan_tci,
177025cf931SSatish Kharat u16 *checksum, u8 *fcoe_sof,
178025cf931SSatish Kharat u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error,
179025cf931SSatish Kharat u8 *fcoe_eof, u8 *tcp_udp_csum_ok, u8 *udp,
180025cf931SSatish Kharat u8 *tcp, u8 *ipv4_csum_ok, u8 *ipv6, u8 *ipv4,
181025cf931SSatish Kharat u8 *ipv4_fragment, u8 *fcs_ok)
182025cf931SSatish Kharat {
183025cf931SSatish Kharat u16 completed_index_flags;
184025cf931SSatish Kharat u16 q_number_rss_type_flags;
185025cf931SSatish Kharat u16 bytes_written_flags;
186025cf931SSatish Kharat
187025cf931SSatish Kharat completed_index_flags = le16_to_cpu(desc->completed_index_flags);
188025cf931SSatish Kharat q_number_rss_type_flags =
189025cf931SSatish Kharat le16_to_cpu(desc->q_number_rss_type_flags);
190025cf931SSatish Kharat bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
191025cf931SSatish Kharat
192025cf931SSatish Kharat *ingress_port = (completed_index_flags &
193025cf931SSatish Kharat CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
194025cf931SSatish Kharat *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
195025cf931SSatish Kharat 1 : 0;
196025cf931SSatish Kharat *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
197025cf931SSatish Kharat 1 : 0;
198025cf931SSatish Kharat *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
199025cf931SSatish Kharat 1 : 0;
200025cf931SSatish Kharat
201025cf931SSatish Kharat *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
202025cf931SSatish Kharat CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
203025cf931SSatish Kharat *csum_not_calc = (q_number_rss_type_flags &
204025cf931SSatish Kharat CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
205025cf931SSatish Kharat
206025cf931SSatish Kharat *rss_hash = le32_to_cpu(desc->rss_hash);
207025cf931SSatish Kharat
208025cf931SSatish Kharat *bytes_written = bytes_written_flags &
209025cf931SSatish Kharat CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
210025cf931SSatish Kharat *packet_error = (bytes_written_flags &
211025cf931SSatish Kharat CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
212025cf931SSatish Kharat *vlan_stripped = (bytes_written_flags &
213025cf931SSatish Kharat CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
214025cf931SSatish Kharat
215025cf931SSatish Kharat /*
216025cf931SSatish Kharat * Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12)
217025cf931SSatish Kharat */
218025cf931SSatish Kharat *vlan_tci = le16_to_cpu(desc->vlan);
219025cf931SSatish Kharat
220025cf931SSatish Kharat if (*fcoe) {
221025cf931SSatish Kharat *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
222025cf931SSatish Kharat CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
223025cf931SSatish Kharat *fcoe_fc_crc_ok = (desc->flags &
224025cf931SSatish Kharat CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
225025cf931SSatish Kharat *fcoe_enc_error = (desc->flags &
226025cf931SSatish Kharat CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
227025cf931SSatish Kharat *fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >>
228025cf931SSatish Kharat CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
229025cf931SSatish Kharat CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
230025cf931SSatish Kharat *checksum = 0;
231025cf931SSatish Kharat } else {
232025cf931SSatish Kharat *fcoe_sof = 0;
233025cf931SSatish Kharat *fcoe_fc_crc_ok = 0;
234025cf931SSatish Kharat *fcoe_enc_error = 0;
235025cf931SSatish Kharat *fcoe_eof = 0;
236025cf931SSatish Kharat *checksum = le16_to_cpu(desc->checksum_fcoe);
237025cf931SSatish Kharat }
238025cf931SSatish Kharat
239025cf931SSatish Kharat *tcp_udp_csum_ok =
240025cf931SSatish Kharat (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
241025cf931SSatish Kharat *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
242025cf931SSatish Kharat *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
243025cf931SSatish Kharat *ipv4_csum_ok =
244025cf931SSatish Kharat (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
245025cf931SSatish Kharat *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
246025cf931SSatish Kharat *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
247025cf931SSatish Kharat *ipv4_fragment =
248025cf931SSatish Kharat (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
249025cf931SSatish Kharat *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
250025cf931SSatish Kharat }
251025cf931SSatish Kharat
enic_rq_pkt_error(struct vnic_rq * vrq,u8 packet_error,u8 fcs_ok,u16 bytes_written)252eab37263SJohn Daley static bool enic_rq_pkt_error(struct vnic_rq *vrq, u8 packet_error, u8 fcs_ok,
253eab37263SJohn Daley u16 bytes_written)
254eab37263SJohn Daley {
255eab37263SJohn Daley struct enic *enic = vnic_dev_priv(vrq->vdev);
256eab37263SJohn Daley struct enic_rq_stats *rqstats = &enic->rq[vrq->index].stats;
257eab37263SJohn Daley
258eab37263SJohn Daley if (packet_error) {
259eab37263SJohn Daley if (!fcs_ok) {
260eab37263SJohn Daley if (bytes_written > 0)
261eab37263SJohn Daley rqstats->bad_fcs++;
262eab37263SJohn Daley else if (bytes_written == 0)
263eab37263SJohn Daley rqstats->pkt_truncated++;
264eab37263SJohn Daley }
265eab37263SJohn Daley return true;
266eab37263SJohn Daley }
267eab37263SJohn Daley return false;
268eab37263SJohn Daley }
269eab37263SJohn Daley
enic_rq_alloc_buf(struct vnic_rq * rq)270fe57762cSJohn Daley int enic_rq_alloc_buf(struct vnic_rq *rq)
271fe57762cSJohn Daley {
272fe57762cSJohn Daley struct enic *enic = vnic_dev_priv(rq->vdev);
273fe57762cSJohn Daley struct net_device *netdev = enic->netdev;
274d24cb52bSJohn Daley struct enic_rq *erq = &enic->rq[rq->index];
275d24cb52bSJohn Daley struct enic_rq_stats *rqstats = &erq->stats;
276d24cb52bSJohn Daley unsigned int offset = 0;
277fe57762cSJohn Daley unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
278fe57762cSJohn Daley unsigned int os_buf_index = 0;
279fe57762cSJohn Daley dma_addr_t dma_addr;
280fe57762cSJohn Daley struct vnic_rq_buf *buf = rq->to_use;
281d24cb52bSJohn Daley struct page *page;
282d24cb52bSJohn Daley unsigned int truesize = len;
283fe57762cSJohn Daley
284fe57762cSJohn Daley if (buf->os_buf) {
285fe57762cSJohn Daley enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
286fe57762cSJohn Daley buf->len);
287fe57762cSJohn Daley
288fe57762cSJohn Daley return 0;
289fe57762cSJohn Daley }
290d24cb52bSJohn Daley
291d24cb52bSJohn Daley page = page_pool_dev_alloc(erq->pool, &offset, &truesize);
292d24cb52bSJohn Daley if (unlikely(!page)) {
293d24cb52bSJohn Daley rqstats->pp_alloc_fail++;
294fe57762cSJohn Daley return -ENOMEM;
295fe57762cSJohn Daley }
296d24cb52bSJohn Daley buf->offset = offset;
297d24cb52bSJohn Daley buf->truesize = truesize;
298d24cb52bSJohn Daley dma_addr = page_pool_get_dma_addr(page) + offset;
299d24cb52bSJohn Daley enic_queue_rq_desc(rq, (void *)page, os_buf_index, dma_addr, len);
300fe57762cSJohn Daley
301fe57762cSJohn Daley return 0;
302fe57762cSJohn Daley }
303fe57762cSJohn Daley
enic_free_rq_buf(struct vnic_rq * rq,struct vnic_rq_buf * buf)304fe57762cSJohn Daley void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
305fe57762cSJohn Daley {
306fe57762cSJohn Daley struct enic *enic = vnic_dev_priv(rq->vdev);
307d24cb52bSJohn Daley struct enic_rq *erq = &enic->rq[rq->index];
308fe57762cSJohn Daley
309fe57762cSJohn Daley if (!buf->os_buf)
310fe57762cSJohn Daley return;
311fe57762cSJohn Daley
312d24cb52bSJohn Daley page_pool_put_full_page(erq->pool, (struct page *)buf->os_buf, true);
313fe57762cSJohn Daley buf->os_buf = NULL;
314fe57762cSJohn Daley }
315fe57762cSJohn Daley
enic_rq_indicate_buf(struct enic * enic,struct vnic_rq * rq,struct vnic_rq_buf * buf,void * cq_desc,u8 type,u16 q_number,u16 completed_index)316eaa23db8SSatish Kharat static void enic_rq_indicate_buf(struct enic *enic, struct vnic_rq *rq,
317*bcb725c7SSatish Kharat struct vnic_rq_buf *buf, void *cq_desc,
318*bcb725c7SSatish Kharat u8 type, u16 q_number, u16 completed_index)
319fe57762cSJohn Daley {
320fe57762cSJohn Daley struct sk_buff *skb;
321fe57762cSJohn Daley struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
322fe57762cSJohn Daley struct enic_rq_stats *rqstats = &enic->rq[rq->index].stats;
323d24cb52bSJohn Daley struct napi_struct *napi;
324fe57762cSJohn Daley
325eaa23db8SSatish Kharat u8 eop, sop, ingress_port, vlan_stripped;
326fe57762cSJohn Daley u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
327fe57762cSJohn Daley u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
328fe57762cSJohn Daley u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
329fe57762cSJohn Daley u8 packet_error;
330eaa23db8SSatish Kharat u16 bytes_written, vlan_tci, checksum;
331fe57762cSJohn Daley u32 rss_hash;
332fe57762cSJohn Daley
333fe57762cSJohn Daley rqstats->packets++;
334fe57762cSJohn Daley
335*bcb725c7SSatish Kharat cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, &ingress_port,
336eaa23db8SSatish Kharat &fcoe, &eop, &sop, &rss_type, &csum_not_calc,
337eaa23db8SSatish Kharat &rss_hash, &bytes_written, &packet_error,
338eaa23db8SSatish Kharat &vlan_stripped, &vlan_tci, &checksum, &fcoe_sof,
339eaa23db8SSatish Kharat &fcoe_fc_crc_ok, &fcoe_enc_error, &fcoe_eof,
340eaa23db8SSatish Kharat &tcp_udp_csum_ok, &udp, &tcp, &ipv4_csum_ok, &ipv6,
341eaa23db8SSatish Kharat &ipv4, &ipv4_fragment, &fcs_ok);
342fe57762cSJohn Daley
343d24cb52bSJohn Daley if (enic_rq_pkt_error(rq, packet_error, fcs_ok, bytes_written))
344fe57762cSJohn Daley return;
345fe57762cSJohn Daley
346fe57762cSJohn Daley if (eop && bytes_written > 0) {
347fe57762cSJohn Daley /* Good receive
348fe57762cSJohn Daley */
349fe57762cSJohn Daley rqstats->bytes += bytes_written;
350d24cb52bSJohn Daley napi = &enic->napi[rq->index];
351d24cb52bSJohn Daley skb = napi_get_frags(napi);
352d24cb52bSJohn Daley if (unlikely(!skb)) {
353d24cb52bSJohn Daley net_warn_ratelimited("%s: skb alloc error rq[%d], desc[%d]\n",
354d24cb52bSJohn Daley enic->netdev->name, rq->index,
355d24cb52bSJohn Daley completed_index);
356d24cb52bSJohn Daley rqstats->no_skb++;
357d24cb52bSJohn Daley return;
358fe57762cSJohn Daley }
359d24cb52bSJohn Daley
360fe57762cSJohn Daley prefetch(skb->data - NET_IP_ALIGN);
361fe57762cSJohn Daley
362d24cb52bSJohn Daley dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr,
363d24cb52bSJohn Daley bytes_written, DMA_FROM_DEVICE);
364d24cb52bSJohn Daley skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
365d24cb52bSJohn Daley (struct page *)buf->os_buf, buf->offset,
366d24cb52bSJohn Daley bytes_written, buf->truesize);
367fe57762cSJohn Daley skb_record_rx_queue(skb, q_number);
368eab37263SJohn Daley enic_rq_set_skb_flags(rq, type, rss_hash, rss_type, fcoe,
369eab37263SJohn Daley fcoe_fc_crc_ok, vlan_stripped,
370eab37263SJohn Daley csum_not_calc, tcp_udp_csum_ok, ipv6,
371eab37263SJohn Daley ipv4_csum_ok, vlan_tci, skb);
372d24cb52bSJohn Daley skb_mark_for_recycle(skb);
373d24cb52bSJohn Daley napi_gro_frags(napi);
374fe57762cSJohn Daley if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
375fe57762cSJohn Daley enic_intr_update_pkt_size(&cq->pkt_size_counter,
376fe57762cSJohn Daley bytes_written);
377d24cb52bSJohn Daley buf->os_buf = NULL;
378d24cb52bSJohn Daley buf->dma_addr = 0;
379d24cb52bSJohn Daley buf = buf->next;
380fe57762cSJohn Daley } else {
381fe57762cSJohn Daley /* Buffer overflow
382fe57762cSJohn Daley */
383fe57762cSJohn Daley rqstats->pkt_truncated++;
384fe57762cSJohn Daley }
385fe57762cSJohn Daley }
386eaa23db8SSatish Kharat
enic_rq_service(struct enic * enic,void * cq_desc,u8 type,u16 q_number,u16 completed_index)387*bcb725c7SSatish Kharat static void enic_rq_service(struct enic *enic, void *cq_desc, u8 type,
388*bcb725c7SSatish Kharat u16 q_number, u16 completed_index)
389eaa23db8SSatish Kharat {
390eaa23db8SSatish Kharat struct enic_rq_stats *rqstats = &enic->rq[q_number].stats;
391eaa23db8SSatish Kharat struct vnic_rq *vrq = &enic->rq[q_number].vrq;
392eaa23db8SSatish Kharat struct vnic_rq_buf *vrq_buf = vrq->to_clean;
393eaa23db8SSatish Kharat int skipped;
394eaa23db8SSatish Kharat
395eaa23db8SSatish Kharat while (1) {
396eaa23db8SSatish Kharat skipped = (vrq_buf->index != completed_index);
397eaa23db8SSatish Kharat if (!skipped)
398eaa23db8SSatish Kharat enic_rq_indicate_buf(enic, vrq, vrq_buf, cq_desc, type,
399eaa23db8SSatish Kharat q_number, completed_index);
400eaa23db8SSatish Kharat else
401eaa23db8SSatish Kharat rqstats->desc_skip++;
402eaa23db8SSatish Kharat
403eaa23db8SSatish Kharat vrq->ring.desc_avail++;
404eaa23db8SSatish Kharat vrq->to_clean = vrq_buf->next;
405eaa23db8SSatish Kharat vrq_buf = vrq_buf->next;
406eaa23db8SSatish Kharat if (!skipped)
407eaa23db8SSatish Kharat break;
408eaa23db8SSatish Kharat }
409eaa23db8SSatish Kharat }
410eaa23db8SSatish Kharat
enic_rq_cq_service(struct enic * enic,unsigned int cq_index,unsigned int work_to_do)411eaa23db8SSatish Kharat unsigned int enic_rq_cq_service(struct enic *enic, unsigned int cq_index,
412eaa23db8SSatish Kharat unsigned int work_to_do)
413eaa23db8SSatish Kharat {
414eaa23db8SSatish Kharat struct vnic_cq *cq = &enic->cq[cq_index];
415*bcb725c7SSatish Kharat void *cq_desc = vnic_cq_to_clean(cq);
416eaa23db8SSatish Kharat u16 q_number, completed_index;
417eaa23db8SSatish Kharat unsigned int work_done = 0;
418eaa23db8SSatish Kharat u8 type, color;
419eaa23db8SSatish Kharat
420*bcb725c7SSatish Kharat enic_rq_cq_desc_dec(cq_desc, enic->ext_cq, &type, &color, &q_number,
421eaa23db8SSatish Kharat &completed_index);
422eaa23db8SSatish Kharat
423eaa23db8SSatish Kharat while (color != cq->last_color) {
424eaa23db8SSatish Kharat enic_rq_service(enic, cq_desc, type, q_number, completed_index);
425eaa23db8SSatish Kharat vnic_cq_inc_to_clean(cq);
426eaa23db8SSatish Kharat
427eaa23db8SSatish Kharat if (++work_done >= work_to_do)
428eaa23db8SSatish Kharat break;
429eaa23db8SSatish Kharat
430*bcb725c7SSatish Kharat cq_desc = vnic_cq_to_clean(cq);
431*bcb725c7SSatish Kharat enic_rq_cq_desc_dec(cq_desc, enic->ext_cq, &type, &color,
432*bcb725c7SSatish Kharat &q_number, &completed_index);
433eaa23db8SSatish Kharat }
434eaa23db8SSatish Kharat
435eaa23db8SSatish Kharat return work_done;
436eaa23db8SSatish Kharat }
437