xref: /linux/drivers/net/ethernet/microsoft/mana/mana_en.c (revision f73896b4197ed53cf0894657c899265ef7c86b7a)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #include <uapi/linux/bpf.h>
5 
6 #include <linux/debugfs.h>
7 #include <linux/inetdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/ethtool.h>
10 #include <linux/filter.h>
11 #include <linux/mm.h>
12 #include <linux/pci.h>
13 #include <linux/export.h>
14 #include <linux/skbuff.h>
15 
16 #include <net/checksum.h>
17 #include <net/ip6_checksum.h>
18 #include <net/netdev_lock.h>
19 #include <net/page_pool/helpers.h>
20 #include <net/xdp.h>
21 
22 #include <net/mana/mana.h>
23 #include <net/mana/mana_auxiliary.h>
24 #include <net/mana/hw_channel.h>
25 
26 static DEFINE_IDA(mana_adev_ida);
27 
28 static int mana_adev_idx_alloc(void)
29 {
30 	return ida_alloc(&mana_adev_ida, GFP_KERNEL);
31 }
32 
33 static void mana_adev_idx_free(int idx)
34 {
35 	ida_free(&mana_adev_ida, idx);
36 }
37 
38 static ssize_t mana_dbg_q_read(struct file *filp, char __user *buf, size_t count,
39 			       loff_t *pos)
40 {
41 	struct gdma_queue *gdma_q = filp->private_data;
42 
43 	return simple_read_from_buffer(buf, count, pos, gdma_q->queue_mem_ptr,
44 				       gdma_q->queue_size);
45 }
46 
47 static const struct file_operations mana_dbg_q_fops = {
48 	.owner  = THIS_MODULE,
49 	.open   = simple_open,
50 	.read   = mana_dbg_q_read,
51 };
52 
53 static bool mana_en_need_log(struct mana_port_context *apc, int err)
54 {
55 	if (apc && apc->ac && apc->ac->gdma_dev &&
56 	    apc->ac->gdma_dev->gdma_context)
57 		return mana_need_log(apc->ac->gdma_dev->gdma_context, err);
58 	else
59 		return true;
60 }
61 
62 static void mana_put_rx_page(struct mana_rxq *rxq, struct page *page,
63 			     bool from_pool)
64 {
65 	if (from_pool)
66 		page_pool_put_full_page(rxq->page_pool, page, false);
67 	else
68 		put_page(page);
69 }
70 
71 /* Microsoft Azure Network Adapter (MANA) functions */
72 
73 static int mana_open(struct net_device *ndev)
74 {
75 	struct mana_port_context *apc = netdev_priv(ndev);
76 	int err;
77 	err = mana_alloc_queues(ndev);
78 
79 	if (err) {
80 		netdev_err(ndev, "%s failed to allocate queues: %d\n", __func__, err);
81 		return err;
82 	}
83 
84 	apc->port_is_up = true;
85 
86 	/* Ensure port state updated before txq state */
87 	smp_wmb();
88 
89 	netif_tx_wake_all_queues(ndev);
90 	netdev_dbg(ndev, "%s successful\n", __func__);
91 	return 0;
92 }
93 
94 static int mana_close(struct net_device *ndev)
95 {
96 	struct mana_port_context *apc = netdev_priv(ndev);
97 
98 	if (!apc->port_is_up)
99 		return 0;
100 
101 	return mana_detach(ndev, true);
102 }
103 
104 static void mana_link_state_handle(struct work_struct *w)
105 {
106 	struct mana_context *ac;
107 	struct net_device *ndev;
108 	u32 link_event;
109 	bool link_up;
110 	int i;
111 
112 	ac = container_of(w, struct mana_context, link_change_work);
113 
114 	rtnl_lock();
115 
116 	link_event = READ_ONCE(ac->link_event);
117 
118 	if (link_event == HWC_DATA_HW_LINK_CONNECT)
119 		link_up = true;
120 	else if (link_event == HWC_DATA_HW_LINK_DISCONNECT)
121 		link_up = false;
122 	else
123 		goto out;
124 
125 	/* Process all ports */
126 	for (i = 0; i < ac->num_ports; i++) {
127 		ndev = ac->ports[i];
128 		if (!ndev)
129 			continue;
130 
131 		if (link_up) {
132 			netif_carrier_on(ndev);
133 
134 			__netdev_notify_peers(ndev);
135 		} else {
136 			netif_carrier_off(ndev);
137 		}
138 	}
139 
140 out:
141 	rtnl_unlock();
142 }
143 
144 static bool mana_can_tx(struct gdma_queue *wq)
145 {
146 	return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
147 }
148 
149 static unsigned int mana_checksum_info(struct sk_buff *skb)
150 {
151 	if (skb->protocol == htons(ETH_P_IP)) {
152 		struct iphdr *ip = ip_hdr(skb);
153 
154 		if (ip->protocol == IPPROTO_TCP)
155 			return IPPROTO_TCP;
156 
157 		if (ip->protocol == IPPROTO_UDP)
158 			return IPPROTO_UDP;
159 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
160 		struct ipv6hdr *ip6 = ipv6_hdr(skb);
161 
162 		if (ip6->nexthdr == IPPROTO_TCP)
163 			return IPPROTO_TCP;
164 
165 		if (ip6->nexthdr == IPPROTO_UDP)
166 			return IPPROTO_UDP;
167 	}
168 
169 	/* No csum offloading */
170 	return 0;
171 }
172 
173 static void mana_add_sge(struct mana_tx_package *tp, struct mana_skb_head *ash,
174 			 int sg_i, dma_addr_t da, int sge_len, u32 gpa_mkey)
175 {
176 	ash->dma_handle[sg_i] = da;
177 	ash->size[sg_i] = sge_len;
178 
179 	tp->wqe_req.sgl[sg_i].address = da;
180 	tp->wqe_req.sgl[sg_i].mem_key = gpa_mkey;
181 	tp->wqe_req.sgl[sg_i].size = sge_len;
182 }
183 
184 static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
185 			struct mana_tx_package *tp, int gso_hs)
186 {
187 	struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
188 	int hsg = 1; /* num of SGEs of linear part */
189 	struct gdma_dev *gd = apc->ac->gdma_dev;
190 	int skb_hlen = skb_headlen(skb);
191 	int sge0_len, sge1_len = 0;
192 	struct gdma_context *gc;
193 	struct device *dev;
194 	skb_frag_t *frag;
195 	dma_addr_t da;
196 	int sg_i;
197 	int i;
198 
199 	gc = gd->gdma_context;
200 	dev = gc->dev;
201 
202 	if (gso_hs && gso_hs < skb_hlen) {
203 		sge0_len = gso_hs;
204 		sge1_len = skb_hlen - gso_hs;
205 	} else {
206 		sge0_len = skb_hlen;
207 	}
208 
209 	da = dma_map_single(dev, skb->data, sge0_len, DMA_TO_DEVICE);
210 	if (dma_mapping_error(dev, da))
211 		return -ENOMEM;
212 
213 	mana_add_sge(tp, ash, 0, da, sge0_len, gd->gpa_mkey);
214 
215 	if (sge1_len) {
216 		sg_i = 1;
217 		da = dma_map_single(dev, skb->data + sge0_len, sge1_len,
218 				    DMA_TO_DEVICE);
219 		if (dma_mapping_error(dev, da))
220 			goto frag_err;
221 
222 		mana_add_sge(tp, ash, sg_i, da, sge1_len, gd->gpa_mkey);
223 		hsg = 2;
224 	}
225 
226 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
227 		sg_i = hsg + i;
228 
229 		frag = &skb_shinfo(skb)->frags[i];
230 		da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
231 				      DMA_TO_DEVICE);
232 		if (dma_mapping_error(dev, da))
233 			goto frag_err;
234 
235 		mana_add_sge(tp, ash, sg_i, da, skb_frag_size(frag),
236 			     gd->gpa_mkey);
237 	}
238 
239 	return 0;
240 
241 frag_err:
242 	if (net_ratelimit())
243 		netdev_err(apc->ndev, "Failed to map skb of size %u to DMA\n",
244 			   skb->len);
245 	for (i = sg_i - 1; i >= hsg; i--)
246 		dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
247 			       DMA_TO_DEVICE);
248 
249 	for (i = hsg - 1; i >= 0; i--)
250 		dma_unmap_single(dev, ash->dma_handle[i], ash->size[i],
251 				 DMA_TO_DEVICE);
252 
253 	return -ENOMEM;
254 }
255 
256 /* Handle the case when GSO SKB linear length is too large.
257  * MANA NIC requires GSO packets to put only the packet header to SGE0.
258  * So, we need 2 SGEs for the skb linear part which contains more than the
259  * header.
260  * Return a positive value for the number of SGEs, or a negative value
261  * for an error.
262  */
263 static int mana_fix_skb_head(struct net_device *ndev, struct sk_buff *skb,
264 			     int gso_hs)
265 {
266 	int num_sge = 1 + skb_shinfo(skb)->nr_frags;
267 	int skb_hlen = skb_headlen(skb);
268 
269 	if (gso_hs < skb_hlen) {
270 		num_sge++;
271 	} else if (gso_hs > skb_hlen) {
272 		if (net_ratelimit())
273 			netdev_err(ndev,
274 				   "TX nonlinear head: hs:%d, skb_hlen:%d\n",
275 				   gso_hs, skb_hlen);
276 
277 		return -EINVAL;
278 	}
279 
280 	return num_sge;
281 }
282 
283 /* Get the GSO packet's header size */
284 static int mana_get_gso_hs(struct sk_buff *skb)
285 {
286 	int gso_hs;
287 
288 	if (skb->encapsulation) {
289 		gso_hs = skb_inner_tcp_all_headers(skb);
290 	} else {
291 		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
292 			gso_hs = skb_transport_offset(skb) +
293 				 sizeof(struct udphdr);
294 		} else {
295 			gso_hs = skb_tcp_all_headers(skb);
296 		}
297 	}
298 
299 	return gso_hs;
300 }
301 
302 static void mana_per_port_queue_reset_work_handler(struct work_struct *work)
303 {
304 	struct mana_port_context *apc = container_of(work,
305 						     struct mana_port_context,
306 						     queue_reset_work);
307 	struct net_device *ndev = apc->ndev;
308 	int err;
309 
310 	rtnl_lock();
311 
312 	/* Pre-allocate buffers to prevent failure in mana_attach later */
313 	err = mana_pre_alloc_rxbufs(apc, ndev->mtu, apc->num_queues);
314 	if (err) {
315 		netdev_err(ndev, "Insufficient memory for reset post tx stall detection\n");
316 		goto out;
317 	}
318 
319 	err = mana_detach(ndev, false);
320 	if (err) {
321 		netdev_err(ndev, "mana_detach failed: %d\n", err);
322 		goto dealloc_pre_rxbufs;
323 	}
324 
325 	err = mana_attach(ndev);
326 	if (err)
327 		netdev_err(ndev, "mana_attach failed: %d\n", err);
328 
329 dealloc_pre_rxbufs:
330 	mana_pre_dealloc_rxbufs(apc);
331 out:
332 	rtnl_unlock();
333 }
334 
335 netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
336 {
337 	enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
338 	struct mana_port_context *apc = netdev_priv(ndev);
339 	int gso_hs = 0; /* zero for non-GSO pkts */
340 	u16 txq_idx = skb_get_queue_mapping(skb);
341 	struct gdma_dev *gd = apc->ac->gdma_dev;
342 	bool ipv4 = false, ipv6 = false;
343 	struct mana_tx_package pkg = {};
344 	struct netdev_queue *net_txq;
345 	struct mana_stats_tx *tx_stats;
346 	struct gdma_queue *gdma_sq;
347 	int err, len, num_gso_seg;
348 	unsigned int csum_type;
349 	struct mana_txq *txq;
350 	struct mana_cq *cq;
351 
352 	if (unlikely(!apc->port_is_up))
353 		goto tx_drop;
354 
355 	if (skb_cow_head(skb, MANA_HEADROOM))
356 		goto tx_drop_count;
357 
358 	txq = &apc->tx_qp[txq_idx].txq;
359 	gdma_sq = txq->gdma_sq;
360 	cq = &apc->tx_qp[txq_idx].tx_cq;
361 	tx_stats = &txq->stats;
362 
363 	BUILD_BUG_ON(MAX_TX_WQE_SGL_ENTRIES != MANA_MAX_TX_WQE_SGL_ENTRIES);
364 	if (MAX_SKB_FRAGS + 2 > MAX_TX_WQE_SGL_ENTRIES &&
365 	    skb_shinfo(skb)->nr_frags + 2 > MAX_TX_WQE_SGL_ENTRIES) {
366 		/* GSO skb with Hardware SGE limit exceeded is not expected here
367 		 * as they are handled in mana_features_check() callback
368 		 */
369 		if (skb_linearize(skb)) {
370 			netdev_warn_once(ndev, "Failed to linearize skb with nr_frags=%d and is_gso=%d\n",
371 					 skb_shinfo(skb)->nr_frags,
372 					 skb_is_gso(skb));
373 			goto tx_drop_count;
374 		}
375 		apc->eth_stats.tx_linear_pkt_cnt++;
376 	}
377 
378 	pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
379 	pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
380 
381 	if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
382 		pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
383 		pkt_fmt = MANA_LONG_PKT_FMT;
384 	} else {
385 		pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
386 	}
387 
388 	if (skb_vlan_tag_present(skb)) {
389 		pkt_fmt = MANA_LONG_PKT_FMT;
390 		pkg.tx_oob.l_oob.inject_vlan_pri_tag = 1;
391 		pkg.tx_oob.l_oob.pcp = skb_vlan_tag_get_prio(skb);
392 		pkg.tx_oob.l_oob.dei = skb_vlan_tag_get_cfi(skb);
393 		pkg.tx_oob.l_oob.vlan_id = skb_vlan_tag_get_id(skb);
394 	}
395 
396 	pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
397 
398 	if (pkt_fmt == MANA_SHORT_PKT_FMT) {
399 		pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
400 		u64_stats_update_begin(&tx_stats->syncp);
401 		tx_stats->short_pkt_fmt++;
402 		u64_stats_update_end(&tx_stats->syncp);
403 	} else {
404 		pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
405 		u64_stats_update_begin(&tx_stats->syncp);
406 		tx_stats->long_pkt_fmt++;
407 		u64_stats_update_end(&tx_stats->syncp);
408 	}
409 
410 	pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
411 	pkg.wqe_req.flags = 0;
412 	pkg.wqe_req.client_data_unit = 0;
413 
414 	pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
415 
416 	if (skb->protocol == htons(ETH_P_IP))
417 		ipv4 = true;
418 	else if (skb->protocol == htons(ETH_P_IPV6))
419 		ipv6 = true;
420 
421 	if (skb_is_gso(skb)) {
422 		int num_sge;
423 
424 		gso_hs = mana_get_gso_hs(skb);
425 
426 		num_sge = mana_fix_skb_head(ndev, skb, gso_hs);
427 		if (num_sge > 0)
428 			pkg.wqe_req.num_sge = num_sge;
429 		else
430 			goto tx_drop_count;
431 
432 		u64_stats_update_begin(&tx_stats->syncp);
433 		if (skb->encapsulation) {
434 			tx_stats->tso_inner_packets++;
435 			tx_stats->tso_inner_bytes += skb->len - gso_hs;
436 		} else {
437 			tx_stats->tso_packets++;
438 			tx_stats->tso_bytes += skb->len - gso_hs;
439 		}
440 		u64_stats_update_end(&tx_stats->syncp);
441 
442 		pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
443 		pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
444 
445 		pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
446 		pkg.tx_oob.s_oob.comp_tcp_csum = 1;
447 		pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
448 
449 		pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size;
450 		pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
451 		if (ipv4) {
452 			ip_hdr(skb)->tot_len = 0;
453 			ip_hdr(skb)->check = 0;
454 			tcp_hdr(skb)->check =
455 				~csum_tcpudp_magic(ip_hdr(skb)->saddr,
456 						   ip_hdr(skb)->daddr, 0,
457 						   IPPROTO_TCP, 0);
458 		} else {
459 			ipv6_hdr(skb)->payload_len = 0;
460 			tcp_hdr(skb)->check =
461 				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
462 						 &ipv6_hdr(skb)->daddr, 0,
463 						 IPPROTO_TCP, 0);
464 		}
465 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
466 		csum_type = mana_checksum_info(skb);
467 
468 		u64_stats_update_begin(&tx_stats->syncp);
469 		tx_stats->csum_partial++;
470 		u64_stats_update_end(&tx_stats->syncp);
471 
472 		if (csum_type == IPPROTO_TCP) {
473 			pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
474 			pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
475 
476 			pkg.tx_oob.s_oob.comp_tcp_csum = 1;
477 			pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
478 
479 		} else if (csum_type == IPPROTO_UDP) {
480 			pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
481 			pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
482 
483 			pkg.tx_oob.s_oob.comp_udp_csum = 1;
484 		} else {
485 			/* Can't do offload of this type of checksum */
486 			if (skb_checksum_help(skb))
487 				goto tx_drop_count;
488 		}
489 	}
490 
491 	if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
492 		pkg.wqe_req.sgl = pkg.sgl_array;
493 	} else {
494 		pkg.sgl_ptr = kmalloc_objs(struct gdma_sge, pkg.wqe_req.num_sge,
495 					   GFP_ATOMIC);
496 		if (!pkg.sgl_ptr)
497 			goto tx_drop_count;
498 
499 		pkg.wqe_req.sgl = pkg.sgl_ptr;
500 	}
501 
502 	if (mana_map_skb(skb, apc, &pkg, gso_hs)) {
503 		u64_stats_update_begin(&tx_stats->syncp);
504 		tx_stats->mana_map_err++;
505 		u64_stats_update_end(&tx_stats->syncp);
506 		goto free_sgl_ptr;
507 	}
508 
509 	skb_queue_tail(&txq->pending_skbs, skb);
510 
511 	len = skb->len;
512 	num_gso_seg = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
513 	net_txq = netdev_get_tx_queue(ndev, txq_idx);
514 
515 	err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
516 					(struct gdma_posted_wqe_info *)skb->cb);
517 	if (!mana_can_tx(gdma_sq)) {
518 		netif_tx_stop_queue(net_txq);
519 		apc->eth_stats.stop_queue++;
520 	}
521 
522 	if (err) {
523 		(void)skb_dequeue_tail(&txq->pending_skbs);
524 		mana_unmap_skb(skb, apc);
525 		netdev_warn(ndev, "Failed to post TX OOB: %d\n", err);
526 		goto free_sgl_ptr;
527 	}
528 
529 	err = NETDEV_TX_OK;
530 	atomic_inc(&txq->pending_sends);
531 
532 	mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
533 
534 	/* skb may be freed after mana_gd_post_work_request. Do not use it. */
535 	skb = NULL;
536 
537 	/* Populated the packet and bytes counters based on post GSO packet
538 	 * calculations
539 	 */
540 	tx_stats = &txq->stats;
541 	u64_stats_update_begin(&tx_stats->syncp);
542 	tx_stats->packets += num_gso_seg;
543 	tx_stats->bytes += len + ((num_gso_seg - 1) * gso_hs);
544 	u64_stats_update_end(&tx_stats->syncp);
545 
546 	if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) {
547 		netif_tx_wake_queue(net_txq);
548 		apc->eth_stats.wake_queue++;
549 	}
550 
551 	kfree(pkg.sgl_ptr);
552 	return err;
553 
554 free_sgl_ptr:
555 	kfree(pkg.sgl_ptr);
556 tx_drop_count:
557 	ndev->stats.tx_dropped++;
558 tx_drop:
559 	dev_kfree_skb_any(skb);
560 	return NETDEV_TX_OK;
561 }
562 
563 #if (MAX_SKB_FRAGS + 2 > MANA_MAX_TX_WQE_SGL_ENTRIES)
564 static netdev_features_t mana_features_check(struct sk_buff *skb,
565 					     struct net_device *ndev,
566 					     netdev_features_t features)
567 {
568 	if (skb_shinfo(skb)->nr_frags + 2 > MAX_TX_WQE_SGL_ENTRIES) {
569 		/* Exceeds HW SGE limit.
570 		 * GSO case:
571 		 *   Disable GSO so the stack will software-segment the skb
572 		 *   into smaller skbs that fit the SGE budget.
573 		 * Non-GSO case:
574 		 *   The xmit path will attempt skb_linearize() as a fallback.
575 		 */
576 		features &= ~NETIF_F_GSO_MASK;
577 	}
578 	return features;
579 }
580 #endif
581 
582 static void mana_get_stats64(struct net_device *ndev,
583 			     struct rtnl_link_stats64 *st)
584 {
585 	struct mana_port_context *apc = netdev_priv(ndev);
586 	unsigned int num_queues = apc->num_queues;
587 	struct mana_stats_rx *rx_stats;
588 	struct mana_stats_tx *tx_stats;
589 	unsigned int start;
590 	u64 packets, bytes;
591 	int q;
592 
593 	if (!apc->port_is_up)
594 		return;
595 
596 	netdev_stats_to_stats64(st, &ndev->stats);
597 
598 	if (apc->ac->hwc_timeout_occurred)
599 		netdev_warn_once(ndev, "HWC timeout occurred\n");
600 
601 	st->rx_missed_errors = apc->ac->hc_stats.hc_rx_discards_no_wqe;
602 
603 	for (q = 0; q < num_queues; q++) {
604 		rx_stats = &apc->rxqs[q]->stats;
605 
606 		do {
607 			start = u64_stats_fetch_begin(&rx_stats->syncp);
608 			packets = rx_stats->packets;
609 			bytes = rx_stats->bytes;
610 		} while (u64_stats_fetch_retry(&rx_stats->syncp, start));
611 
612 		st->rx_packets += packets;
613 		st->rx_bytes += bytes;
614 	}
615 
616 	for (q = 0; q < num_queues; q++) {
617 		tx_stats = &apc->tx_qp[q].txq.stats;
618 
619 		do {
620 			start = u64_stats_fetch_begin(&tx_stats->syncp);
621 			packets = tx_stats->packets;
622 			bytes = tx_stats->bytes;
623 		} while (u64_stats_fetch_retry(&tx_stats->syncp, start));
624 
625 		st->tx_packets += packets;
626 		st->tx_bytes += bytes;
627 	}
628 }
629 
630 static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb,
631 			     int old_q)
632 {
633 	struct mana_port_context *apc = netdev_priv(ndev);
634 	u32 hash = skb_get_hash(skb);
635 	struct sock *sk = skb->sk;
636 	int txq;
637 
638 	txq = apc->indir_table[hash & (apc->indir_table_sz - 1)];
639 
640 	if (txq != old_q && sk && sk_fullsock(sk) &&
641 	    rcu_access_pointer(sk->sk_dst_cache))
642 		sk_tx_queue_set(sk, txq);
643 
644 	return txq;
645 }
646 
647 static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb,
648 			     struct net_device *sb_dev)
649 {
650 	int txq;
651 
652 	if (ndev->real_num_tx_queues == 1)
653 		return 0;
654 
655 	txq = sk_tx_queue_get(skb->sk);
656 
657 	if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) {
658 		if (skb_rx_queue_recorded(skb))
659 			txq = skb_get_rx_queue(skb);
660 		else
661 			txq = mana_get_tx_queue(ndev, skb, txq);
662 	}
663 
664 	return txq;
665 }
666 
667 /* Release pre-allocated RX buffers */
668 void mana_pre_dealloc_rxbufs(struct mana_port_context *mpc)
669 {
670 	struct device *dev;
671 	int i;
672 
673 	dev = mpc->ac->gdma_dev->gdma_context->dev;
674 
675 	if (!mpc->rxbufs_pre)
676 		goto out1;
677 
678 	if (!mpc->das_pre)
679 		goto out2;
680 
681 	while (mpc->rxbpre_total) {
682 		i = --mpc->rxbpre_total;
683 		dma_unmap_single(dev, mpc->das_pre[i], mpc->rxbpre_datasize,
684 				 DMA_FROM_DEVICE);
685 		put_page(virt_to_head_page(mpc->rxbufs_pre[i]));
686 	}
687 
688 	kfree(mpc->das_pre);
689 	mpc->das_pre = NULL;
690 
691 out2:
692 	kfree(mpc->rxbufs_pre);
693 	mpc->rxbufs_pre = NULL;
694 
695 out1:
696 	mpc->rxbpre_datasize = 0;
697 	mpc->rxbpre_alloc_size = 0;
698 	mpc->rxbpre_headroom = 0;
699 }
700 
701 /* Get a buffer from the pre-allocated RX buffers */
702 static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da)
703 {
704 	struct net_device *ndev = rxq->ndev;
705 	struct mana_port_context *mpc;
706 	void *va;
707 
708 	mpc = netdev_priv(ndev);
709 
710 	if (!mpc->rxbufs_pre || !mpc->das_pre || !mpc->rxbpre_total) {
711 		netdev_err(ndev, "No RX pre-allocated bufs\n");
712 		return NULL;
713 	}
714 
715 	/* Check sizes to catch unexpected coding error */
716 	if (mpc->rxbpre_datasize != rxq->datasize) {
717 		netdev_err(ndev, "rxbpre_datasize mismatch: %u: %u\n",
718 			   mpc->rxbpre_datasize, rxq->datasize);
719 		return NULL;
720 	}
721 
722 	if (mpc->rxbpre_alloc_size != rxq->alloc_size) {
723 		netdev_err(ndev, "rxbpre_alloc_size mismatch: %u: %u\n",
724 			   mpc->rxbpre_alloc_size, rxq->alloc_size);
725 		return NULL;
726 	}
727 
728 	if (mpc->rxbpre_headroom != rxq->headroom) {
729 		netdev_err(ndev, "rxbpre_headroom mismatch: %u: %u\n",
730 			   mpc->rxbpre_headroom, rxq->headroom);
731 		return NULL;
732 	}
733 
734 	mpc->rxbpre_total--;
735 
736 	*da = mpc->das_pre[mpc->rxbpre_total];
737 	va = mpc->rxbufs_pre[mpc->rxbpre_total];
738 	mpc->rxbufs_pre[mpc->rxbpre_total] = NULL;
739 
740 	/* Deallocate the array after all buffers are gone */
741 	if (!mpc->rxbpre_total)
742 		mana_pre_dealloc_rxbufs(mpc);
743 
744 	return va;
745 }
746 
747 /* Get RX buffer's data size, alloc size, XDP headroom based on MTU */
748 static void mana_get_rxbuf_cfg(struct mana_port_context *apc,
749 			       int mtu, u32 *datasize, u32 *alloc_size,
750 			       u32 *headroom, u32 *frag_count)
751 {
752 	u32 len, buf_size;
753 
754 	/* Calculate datasize first (consistent across all cases) */
755 	*datasize = mtu + ETH_HLEN;
756 
757 	/* For xdp and jumbo frames make sure only one packet fits per page */
758 	if (mtu + MANA_RXBUF_PAD > PAGE_SIZE / 2 || mana_xdp_get(apc)) {
759 		if (mana_xdp_get(apc)) {
760 			*headroom = XDP_PACKET_HEADROOM;
761 			*alloc_size = PAGE_SIZE;
762 		} else {
763 			*headroom = 0; /* no support for XDP */
764 			*alloc_size = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD +
765 						     *headroom);
766 		}
767 
768 		*frag_count = 1;
769 
770 		/* In the single-buffer path, napi_build_skb() must see the
771 		 * actual backing allocation size so skb->truesize reflects
772 		 * the full page (or higher-order page), not just the usable
773 		 * packet area.
774 		 */
775 		*alloc_size = PAGE_SIZE << get_order(*alloc_size);
776 		return;
777 	}
778 
779 	/* Standard MTU case - optimize for multiple packets per page */
780 	*headroom = 0;
781 
782 	/* Calculate base buffer size needed */
783 	len = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD + *headroom);
784 	buf_size = ALIGN(len, MANA_RX_FRAG_ALIGNMENT);
785 
786 	/* Calculate how many packets can fit in a page */
787 	*frag_count = PAGE_SIZE / buf_size;
788 	*alloc_size = buf_size;
789 }
790 
791 int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_queues)
792 {
793 	struct device *dev;
794 	struct page *page;
795 	dma_addr_t da;
796 	int num_rxb;
797 	void *va;
798 	int i;
799 
800 	mana_get_rxbuf_cfg(mpc, new_mtu, &mpc->rxbpre_datasize,
801 			   &mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom,
802 			   &mpc->rxbpre_frag_count);
803 
804 	dev = mpc->ac->gdma_dev->gdma_context->dev;
805 
806 	num_rxb = num_queues * mpc->rx_queue_size;
807 
808 	WARN(mpc->rxbufs_pre, "mana rxbufs_pre exists\n");
809 	mpc->rxbufs_pre = kmalloc_array(num_rxb, sizeof(void *), GFP_KERNEL);
810 	if (!mpc->rxbufs_pre)
811 		goto error;
812 
813 	mpc->das_pre = kmalloc_objs(dma_addr_t, num_rxb);
814 	if (!mpc->das_pre)
815 		goto error;
816 
817 	mpc->rxbpre_total = 0;
818 
819 	for (i = 0; i < num_rxb; i++) {
820 		page = dev_alloc_pages(get_order(mpc->rxbpre_alloc_size));
821 		if (!page)
822 			goto error;
823 
824 		va = page_to_virt(page);
825 
826 		da = dma_map_single(dev, va + mpc->rxbpre_headroom,
827 				    mpc->rxbpre_datasize, DMA_FROM_DEVICE);
828 		if (dma_mapping_error(dev, da)) {
829 			put_page(page);
830 			goto error;
831 		}
832 
833 		mpc->rxbufs_pre[i] = va;
834 		mpc->das_pre[i] = da;
835 		mpc->rxbpre_total = i + 1;
836 	}
837 
838 	return 0;
839 
840 error:
841 	netdev_err(mpc->ndev, "Failed to pre-allocate RX buffers for %d queues\n", num_queues);
842 	mana_pre_dealloc_rxbufs(mpc);
843 	return -ENOMEM;
844 }
845 
846 static int mana_change_mtu(struct net_device *ndev, int new_mtu)
847 {
848 	struct mana_port_context *mpc = netdev_priv(ndev);
849 	unsigned int old_mtu = ndev->mtu;
850 	int err;
851 
852 	/* Pre-allocate buffers to prevent failure in mana_attach later */
853 	err = mana_pre_alloc_rxbufs(mpc, new_mtu, mpc->num_queues);
854 	if (err) {
855 		netdev_err(ndev, "Insufficient memory for new MTU\n");
856 		return err;
857 	}
858 
859 	err = mana_detach(ndev, false);
860 	if (err) {
861 		netdev_err(ndev, "mana_detach failed: %d\n", err);
862 		goto out;
863 	}
864 
865 	WRITE_ONCE(ndev->mtu, new_mtu);
866 
867 	err = mana_attach(ndev);
868 	if (err) {
869 		netdev_err(ndev, "mana_attach failed: %d\n", err);
870 		WRITE_ONCE(ndev->mtu, old_mtu);
871 	}
872 
873 out:
874 	mana_pre_dealloc_rxbufs(mpc);
875 	return err;
876 }
877 
878 static void mana_tx_timeout(struct net_device *netdev, unsigned int txqueue)
879 {
880 	struct mana_port_context *apc = netdev_priv(netdev);
881 	struct mana_context *ac = apc->ac;
882 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
883 
884 	/* Already in service, hence tx queue reset is not required.*/
885 	if (gc->in_service)
886 		return;
887 
888 	/* Note: If there are pending queue reset work for this port(apc),
889 	 * subsequent request queued up from here are ignored. This is because
890 	 * we are using the same work instance per port(apc).
891 	 */
892 	queue_work(ac->per_port_queue_reset_wq, &apc->queue_reset_work);
893 }
894 
895 static int mana_shaper_set(struct net_shaper_binding *binding,
896 			   const struct net_shaper *shaper,
897 			   struct netlink_ext_ack *extack)
898 {
899 	struct mana_port_context *apc = netdev_priv(binding->netdev);
900 	u32 old_speed, rate;
901 	int err;
902 
903 	if (shaper->handle.scope != NET_SHAPER_SCOPE_NETDEV) {
904 		NL_SET_ERR_MSG_MOD(extack, "net shaper scope should be netdev");
905 		return -EINVAL;
906 	}
907 
908 	if (apc->handle.id && shaper->handle.id != apc->handle.id) {
909 		NL_SET_ERR_MSG_MOD(extack, "Cannot create multiple shapers");
910 		return -EOPNOTSUPP;
911 	}
912 
913 	if (!shaper->bw_max || (shaper->bw_max % 100000000)) {
914 		NL_SET_ERR_MSG_MOD(extack, "Please use multiples of 100Mbps for bandwidth");
915 		return -EINVAL;
916 	}
917 
918 	rate = div_u64(shaper->bw_max, 1000); /* Convert bps to Kbps */
919 	rate = div_u64(rate, 1000);	      /* Convert Kbps to Mbps */
920 
921 	/* Get current speed */
922 	err = mana_query_link_cfg(apc);
923 	old_speed = (err) ? SPEED_UNKNOWN : apc->speed;
924 
925 	if (!err) {
926 		err = mana_set_bw_clamp(apc, rate, TRI_STATE_TRUE);
927 		apc->speed = (err) ? old_speed : rate;
928 		apc->handle = (err) ? apc->handle : shaper->handle;
929 	}
930 
931 	return err;
932 }
933 
934 static int mana_shaper_del(struct net_shaper_binding *binding,
935 			   const struct net_shaper_handle *handle,
936 			   struct netlink_ext_ack *extack)
937 {
938 	struct mana_port_context *apc = netdev_priv(binding->netdev);
939 	int err;
940 
941 	err = mana_set_bw_clamp(apc, 0, TRI_STATE_FALSE);
942 
943 	if (!err) {
944 		/* Reset mana port context parameters */
945 		apc->handle.id = 0;
946 		apc->handle.scope = NET_SHAPER_SCOPE_UNSPEC;
947 		apc->speed = apc->max_speed;
948 	}
949 
950 	return err;
951 }
952 
953 static void mana_shaper_cap(struct net_shaper_binding *binding,
954 			    enum net_shaper_scope scope,
955 			    unsigned long *flags)
956 {
957 	*flags = BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MAX) |
958 		 BIT(NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS);
959 }
960 
961 static const struct net_shaper_ops mana_shaper_ops = {
962 	.set = mana_shaper_set,
963 	.delete = mana_shaper_del,
964 	.capabilities = mana_shaper_cap,
965 };
966 
967 static const struct net_device_ops mana_devops = {
968 	.ndo_open		= mana_open,
969 	.ndo_stop		= mana_close,
970 	.ndo_select_queue	= mana_select_queue,
971 #if (MAX_SKB_FRAGS + 2 > MANA_MAX_TX_WQE_SGL_ENTRIES)
972 	.ndo_features_check	= mana_features_check,
973 #endif
974 	.ndo_start_xmit		= mana_start_xmit,
975 	.ndo_validate_addr	= eth_validate_addr,
976 	.ndo_get_stats64	= mana_get_stats64,
977 	.ndo_bpf		= mana_bpf,
978 	.ndo_xdp_xmit		= mana_xdp_xmit,
979 	.ndo_change_mtu		= mana_change_mtu,
980 	.ndo_tx_timeout		= mana_tx_timeout,
981 	.net_shaper_ops         = &mana_shaper_ops,
982 };
983 
984 static void mana_cleanup_port_context(struct mana_port_context *apc)
985 {
986 	/*
987 	 * make sure subsequent cleanup attempts don't end up removing already
988 	 * cleaned dentry pointer
989 	 */
990 	debugfs_remove(apc->mana_port_debugfs);
991 	apc->mana_port_debugfs = NULL;
992 	kfree(apc->rxqs);
993 	apc->rxqs = NULL;
994 }
995 
996 static void mana_cleanup_indir_table(struct mana_port_context *apc)
997 {
998 	apc->indir_table_sz = 0;
999 	kfree(apc->indir_table);
1000 	kfree(apc->rxobj_table);
1001 }
1002 
1003 static int mana_init_port_context(struct mana_port_context *apc)
1004 {
1005 	apc->rxqs = kzalloc_objs(struct mana_rxq *, apc->num_queues);
1006 
1007 	return !apc->rxqs ? -ENOMEM : 0;
1008 }
1009 
1010 static int mana_send_request(struct mana_context *ac, void *in_buf,
1011 			     u32 in_len, void *out_buf, u32 out_len)
1012 {
1013 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
1014 	struct gdma_resp_hdr *resp = out_buf;
1015 	struct gdma_req_hdr *req = in_buf;
1016 	struct device *dev = gc->dev;
1017 	static atomic_t activity_id;
1018 	int err;
1019 
1020 	req->dev_id = gc->mana.dev_id;
1021 	req->activity_id = atomic_inc_return(&activity_id);
1022 
1023 	err = mana_gd_send_request(gc, in_len, in_buf, out_len,
1024 				   out_buf);
1025 	if (err || resp->status) {
1026 		if (err == -EOPNOTSUPP)
1027 			return err;
1028 
1029 		if (req->req.msg_type != MANA_QUERY_PHY_STAT &&
1030 		    mana_need_log(gc, err))
1031 			dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
1032 				err, resp->status);
1033 		return err ? err : -EPROTO;
1034 	}
1035 
1036 	if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
1037 	    req->activity_id != resp->activity_id) {
1038 		dev_err(dev, "Unexpected mana message response: %x,%x,%x,%x\n",
1039 			req->dev_id.as_uint32, resp->dev_id.as_uint32,
1040 			req->activity_id, resp->activity_id);
1041 		return -EPROTO;
1042 	}
1043 
1044 	return 0;
1045 }
1046 
1047 static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
1048 				const enum mana_command_code expected_code,
1049 				const u32 min_size)
1050 {
1051 	if (resp_hdr->response.msg_type != expected_code)
1052 		return -EPROTO;
1053 
1054 	if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
1055 		return -EPROTO;
1056 
1057 	if (resp_hdr->response.msg_size < min_size)
1058 		return -EPROTO;
1059 
1060 	return 0;
1061 }
1062 
1063 static int mana_pf_register_hw_vport(struct mana_port_context *apc)
1064 {
1065 	struct mana_register_hw_vport_resp resp = {};
1066 	struct mana_register_hw_vport_req req = {};
1067 	int err;
1068 
1069 	mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_HW_PORT,
1070 			     sizeof(req), sizeof(resp));
1071 	req.attached_gfid = 1;
1072 	req.is_pf_default_vport = 1;
1073 	req.allow_all_ether_types = 1;
1074 
1075 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1076 				sizeof(resp));
1077 	if (err) {
1078 		netdev_err(apc->ndev, "Failed to register hw vPort: %d\n", err);
1079 		return err;
1080 	}
1081 
1082 	err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_HW_PORT,
1083 				   sizeof(resp));
1084 	if (err || resp.hdr.status) {
1085 		netdev_err(apc->ndev, "Failed to register hw vPort: %d, 0x%x\n",
1086 			   err, resp.hdr.status);
1087 		return err ? err : -EPROTO;
1088 	}
1089 
1090 	apc->port_handle = resp.hw_vport_handle;
1091 	return 0;
1092 }
1093 
1094 static void mana_pf_deregister_hw_vport(struct mana_port_context *apc)
1095 {
1096 	struct mana_deregister_hw_vport_resp resp = {};
1097 	struct mana_deregister_hw_vport_req req = {};
1098 	int err;
1099 
1100 	mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_HW_PORT,
1101 			     sizeof(req), sizeof(resp));
1102 	req.hw_vport_handle = apc->port_handle;
1103 
1104 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1105 				sizeof(resp));
1106 	if (err) {
1107 		if (mana_en_need_log(apc, err))
1108 			netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
1109 				   err);
1110 
1111 		return;
1112 	}
1113 
1114 	err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_HW_PORT,
1115 				   sizeof(resp));
1116 	if (err || resp.hdr.status)
1117 		netdev_err(apc->ndev,
1118 			   "Failed to deregister hw vPort: %d, 0x%x\n",
1119 			   err, resp.hdr.status);
1120 }
1121 
1122 static int mana_pf_register_filter(struct mana_port_context *apc)
1123 {
1124 	struct mana_register_filter_resp resp = {};
1125 	struct mana_register_filter_req req = {};
1126 	int err;
1127 
1128 	mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_FILTER,
1129 			     sizeof(req), sizeof(resp));
1130 	req.vport = apc->port_handle;
1131 	memcpy(req.mac_addr, apc->mac_addr, ETH_ALEN);
1132 
1133 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1134 				sizeof(resp));
1135 	if (err) {
1136 		netdev_err(apc->ndev, "Failed to register filter: %d\n", err);
1137 		return err;
1138 	}
1139 
1140 	err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_FILTER,
1141 				   sizeof(resp));
1142 	if (err || resp.hdr.status) {
1143 		netdev_err(apc->ndev, "Failed to register filter: %d, 0x%x\n",
1144 			   err, resp.hdr.status);
1145 		return err ? err : -EPROTO;
1146 	}
1147 
1148 	apc->pf_filter_handle = resp.filter_handle;
1149 	return 0;
1150 }
1151 
1152 static void mana_pf_deregister_filter(struct mana_port_context *apc)
1153 {
1154 	struct mana_deregister_filter_resp resp = {};
1155 	struct mana_deregister_filter_req req = {};
1156 	int err;
1157 
1158 	mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_FILTER,
1159 			     sizeof(req), sizeof(resp));
1160 	req.filter_handle = apc->pf_filter_handle;
1161 
1162 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1163 				sizeof(resp));
1164 	if (err) {
1165 		if (mana_en_need_log(apc, err))
1166 			netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
1167 				   err);
1168 
1169 		return;
1170 	}
1171 
1172 	err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_FILTER,
1173 				   sizeof(resp));
1174 	if (err || resp.hdr.status)
1175 		netdev_err(apc->ndev,
1176 			   "Failed to deregister filter: %d, 0x%x\n",
1177 			   err, resp.hdr.status);
1178 }
1179 
1180 static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
1181 				 u32 proto_minor_ver, u32 proto_micro_ver,
1182 				 u16 *max_num_vports, u8 *bm_hostmode)
1183 {
1184 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
1185 	struct mana_query_device_cfg_resp resp = {};
1186 	struct mana_query_device_cfg_req req = {};
1187 	struct device *dev = gc->dev;
1188 	int err = 0;
1189 
1190 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
1191 			     sizeof(req), sizeof(resp));
1192 
1193 	req.hdr.resp.msg_version = GDMA_MESSAGE_V3;
1194 
1195 	req.proto_major_ver = proto_major_ver;
1196 	req.proto_minor_ver = proto_minor_ver;
1197 	req.proto_micro_ver = proto_micro_ver;
1198 
1199 	err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
1200 	if (err) {
1201 		dev_err(dev, "Failed to query config: %d", err);
1202 		return err;
1203 	}
1204 
1205 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
1206 				   sizeof(resp));
1207 	if (err || resp.hdr.status) {
1208 		dev_err(dev, "Invalid query result: %d, 0x%x\n", err,
1209 			resp.hdr.status);
1210 		if (!err)
1211 			err = -EPROTO;
1212 		return err;
1213 	}
1214 
1215 	*max_num_vports = resp.max_num_vports;
1216 
1217 	if (resp.hdr.response.msg_version >= GDMA_MESSAGE_V2)
1218 		gc->adapter_mtu = resp.adapter_mtu;
1219 	else
1220 		gc->adapter_mtu = ETH_FRAME_LEN;
1221 
1222 	if (resp.hdr.response.msg_version >= GDMA_MESSAGE_V3)
1223 		*bm_hostmode = resp.bm_hostmode;
1224 	else
1225 		*bm_hostmode = 0;
1226 
1227 	debugfs_create_u16("adapter-MTU", 0400, gc->mana_pci_debugfs, &gc->adapter_mtu);
1228 
1229 	return 0;
1230 }
1231 
1232 static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index,
1233 				u32 *max_sq, u32 *max_rq, u32 *num_indir_entry)
1234 {
1235 	struct mana_query_vport_cfg_resp resp = {};
1236 	struct mana_query_vport_cfg_req req = {};
1237 	int err;
1238 
1239 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
1240 			     sizeof(req), sizeof(resp));
1241 
1242 	req.vport_index = vport_index;
1243 
1244 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1245 				sizeof(resp));
1246 	if (err)
1247 		return err;
1248 
1249 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
1250 				   sizeof(resp));
1251 	if (err)
1252 		return err;
1253 
1254 	if (resp.hdr.status)
1255 		return -EPROTO;
1256 
1257 	*max_sq = resp.max_num_sq;
1258 	*max_rq = resp.max_num_rq;
1259 	if (resp.num_indirection_ent > 0 &&
1260 	    resp.num_indirection_ent <= MANA_INDIRECT_TABLE_MAX_SIZE &&
1261 	    is_power_of_2(resp.num_indirection_ent)) {
1262 		*num_indir_entry = resp.num_indirection_ent;
1263 	} else {
1264 		netdev_warn(apc->ndev,
1265 			    "Setting indirection table size to default %d for vPort %d\n",
1266 			    MANA_INDIRECT_TABLE_DEF_SIZE, apc->port_idx);
1267 		*num_indir_entry = MANA_INDIRECT_TABLE_DEF_SIZE;
1268 	}
1269 
1270 	apc->port_handle = resp.vport;
1271 	ether_addr_copy(apc->mac_addr, resp.mac_addr);
1272 
1273 	return 0;
1274 }
1275 
1276 void mana_uncfg_vport(struct mana_port_context *apc)
1277 {
1278 	mutex_lock(&apc->vport_mutex);
1279 	apc->vport_use_count--;
1280 	WARN_ON(apc->vport_use_count < 0);
1281 	mutex_unlock(&apc->vport_mutex);
1282 }
1283 EXPORT_SYMBOL_NS(mana_uncfg_vport, "NET_MANA");
1284 
1285 int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
1286 		   u32 doorbell_pg_id)
1287 {
1288 	struct mana_config_vport_resp resp = {};
1289 	struct mana_config_vport_req req = {};
1290 	int err;
1291 
1292 	/* This function is used to program the Ethernet port in the hardware
1293 	 * table. It can be called from the Ethernet driver or the RDMA driver.
1294 	 *
1295 	 * For Ethernet usage, the hardware supports only one active user on a
1296 	 * physical port. The driver checks on the port usage before programming
1297 	 * the hardware when creating the RAW QP (RDMA driver) or exposing the
1298 	 * device to kernel NET layer (Ethernet driver).
1299 	 *
1300 	 * Because the RDMA driver doesn't know in advance which QP type the
1301 	 * user will create, it exposes the device with all its ports. The user
1302 	 * may not be able to create RAW QP on a port if this port is already
1303 	 * in used by the Ethernet driver from the kernel.
1304 	 *
1305 	 * This physical port limitation only applies to the RAW QP. For RC QP,
1306 	 * the hardware doesn't have this limitation. The user can create RC
1307 	 * QPs on a physical port up to the hardware limits independent of the
1308 	 * Ethernet usage on the same port.
1309 	 */
1310 	mutex_lock(&apc->vport_mutex);
1311 	if (apc->vport_use_count > 0) {
1312 		mutex_unlock(&apc->vport_mutex);
1313 		return -EBUSY;
1314 	}
1315 	apc->vport_use_count++;
1316 	mutex_unlock(&apc->vport_mutex);
1317 
1318 	mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
1319 			     sizeof(req), sizeof(resp));
1320 	req.vport = apc->port_handle;
1321 	req.pdid = protection_dom_id;
1322 	req.doorbell_pageid = doorbell_pg_id;
1323 
1324 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1325 				sizeof(resp));
1326 	if (err) {
1327 		netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err);
1328 		goto out;
1329 	}
1330 
1331 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
1332 				   sizeof(resp));
1333 	if (err || resp.hdr.status) {
1334 		netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
1335 			   err, resp.hdr.status);
1336 		if (!err)
1337 			err = -EPROTO;
1338 
1339 		goto out;
1340 	}
1341 
1342 	apc->tx_shortform_allowed = resp.short_form_allowed;
1343 	apc->tx_vp_offset = resp.tx_vport_offset;
1344 
1345 	netdev_info(apc->ndev, "Configured vPort %llu PD %u DB %u\n",
1346 		    apc->port_handle, protection_dom_id, doorbell_pg_id);
1347 out:
1348 	if (err)
1349 		mana_uncfg_vport(apc);
1350 
1351 	return err;
1352 }
1353 EXPORT_SYMBOL_NS(mana_cfg_vport, "NET_MANA");
1354 
1355 static int mana_cfg_vport_steering(struct mana_port_context *apc,
1356 				   enum TRI_STATE rx,
1357 				   bool update_default_rxobj, bool update_key,
1358 				   bool update_tab)
1359 {
1360 	struct mana_cfg_rx_steer_req_v2 *req;
1361 	struct mana_cfg_rx_steer_resp resp = {};
1362 	struct net_device *ndev = apc->ndev;
1363 	u32 req_buf_size;
1364 	int err;
1365 
1366 	req_buf_size = struct_size(req, indir_tab, apc->indir_table_sz);
1367 	req = kzalloc(req_buf_size, GFP_KERNEL);
1368 	if (!req)
1369 		return -ENOMEM;
1370 
1371 	mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
1372 			     sizeof(resp));
1373 
1374 	req->hdr.req.msg_version = GDMA_MESSAGE_V2;
1375 
1376 	req->vport = apc->port_handle;
1377 	req->num_indir_entries = apc->indir_table_sz;
1378 	req->indir_tab_offset = offsetof(struct mana_cfg_rx_steer_req_v2,
1379 					 indir_tab);
1380 	req->rx_enable = rx;
1381 	req->rss_enable = apc->rss_state;
1382 	req->update_default_rxobj = update_default_rxobj;
1383 	req->update_hashkey = update_key;
1384 	req->update_indir_tab = update_tab;
1385 	req->default_rxobj = apc->default_rxobj;
1386 	req->cqe_coalescing_enable = 0;
1387 
1388 	if (update_key)
1389 		memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
1390 
1391 	if (update_tab)
1392 		memcpy(req->indir_tab, apc->rxobj_table,
1393 		       flex_array_size(req, indir_tab, req->num_indir_entries));
1394 
1395 	err = mana_send_request(apc->ac, req, req_buf_size, &resp,
1396 				sizeof(resp));
1397 	if (err) {
1398 		if (mana_en_need_log(apc, err))
1399 			netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
1400 
1401 		goto out;
1402 	}
1403 
1404 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
1405 				   sizeof(resp));
1406 	if (err) {
1407 		netdev_err(ndev, "vPort RX configuration failed: %d\n", err);
1408 		goto out;
1409 	}
1410 
1411 	if (resp.hdr.status) {
1412 		netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
1413 			   resp.hdr.status);
1414 		err = -EPROTO;
1415 	}
1416 
1417 	netdev_info(ndev, "Configured steering vPort %llu entries %u\n",
1418 		    apc->port_handle, apc->indir_table_sz);
1419 out:
1420 	kfree(req);
1421 	return err;
1422 }
1423 
1424 int mana_query_link_cfg(struct mana_port_context *apc)
1425 {
1426 	struct net_device *ndev = apc->ndev;
1427 	struct mana_query_link_config_resp resp = {};
1428 	struct mana_query_link_config_req req = {};
1429 	int err;
1430 
1431 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_LINK_CONFIG,
1432 			     sizeof(req), sizeof(resp));
1433 
1434 	req.vport = apc->port_handle;
1435 	req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
1436 
1437 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1438 				sizeof(resp));
1439 
1440 	if (err) {
1441 		if (err == -EOPNOTSUPP) {
1442 			netdev_info_once(ndev, "MANA_QUERY_LINK_CONFIG not supported\n");
1443 			return err;
1444 		}
1445 		netdev_err(ndev, "Failed to query link config: %d\n", err);
1446 		return err;
1447 	}
1448 
1449 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_LINK_CONFIG,
1450 				   sizeof(resp));
1451 
1452 	if (err || resp.hdr.status) {
1453 		netdev_err(ndev, "Failed to query link config: %d, 0x%x\n", err,
1454 			   resp.hdr.status);
1455 		if (!err)
1456 			err = -EOPNOTSUPP;
1457 		return err;
1458 	}
1459 
1460 	if (resp.qos_unconfigured) {
1461 		err = -EINVAL;
1462 		return err;
1463 	}
1464 	apc->speed = resp.link_speed_mbps;
1465 	apc->max_speed = resp.qos_speed_mbps;
1466 	return 0;
1467 }
1468 
1469 int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed,
1470 		      int enable_clamping)
1471 {
1472 	struct mana_set_bw_clamp_resp resp = {};
1473 	struct mana_set_bw_clamp_req req = {};
1474 	struct net_device *ndev = apc->ndev;
1475 	int err;
1476 
1477 	mana_gd_init_req_hdr(&req.hdr, MANA_SET_BW_CLAMP,
1478 			     sizeof(req), sizeof(resp));
1479 	req.vport = apc->port_handle;
1480 	req.link_speed_mbps = speed;
1481 	req.enable_clamping = enable_clamping;
1482 
1483 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1484 				sizeof(resp));
1485 
1486 	if (err) {
1487 		if (err == -EOPNOTSUPP) {
1488 			netdev_info_once(ndev, "MANA_SET_BW_CLAMP not supported\n");
1489 			return err;
1490 		}
1491 		netdev_err(ndev, "Failed to set bandwidth clamp for speed %u, err = %d",
1492 			   speed, err);
1493 		return err;
1494 	}
1495 
1496 	err = mana_verify_resp_hdr(&resp.hdr, MANA_SET_BW_CLAMP,
1497 				   sizeof(resp));
1498 
1499 	if (err || resp.hdr.status) {
1500 		netdev_err(ndev, "Failed to set bandwidth clamp: %d, 0x%x\n", err,
1501 			   resp.hdr.status);
1502 		if (!err)
1503 			err = -EOPNOTSUPP;
1504 		return err;
1505 	}
1506 
1507 	if (resp.qos_unconfigured)
1508 		netdev_info(ndev, "QoS is unconfigured\n");
1509 
1510 	return 0;
1511 }
1512 
1513 int mana_create_wq_obj(struct mana_port_context *apc,
1514 		       mana_handle_t vport,
1515 		       u32 wq_type, struct mana_obj_spec *wq_spec,
1516 		       struct mana_obj_spec *cq_spec,
1517 		       mana_handle_t *wq_obj)
1518 {
1519 	struct mana_create_wqobj_resp resp = {};
1520 	struct mana_create_wqobj_req req = {};
1521 	struct net_device *ndev = apc->ndev;
1522 	int err;
1523 
1524 	mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
1525 			     sizeof(req), sizeof(resp));
1526 	req.vport = vport;
1527 	req.wq_type = wq_type;
1528 	req.wq_gdma_region = wq_spec->gdma_region;
1529 	req.cq_gdma_region = cq_spec->gdma_region;
1530 	req.wq_size = wq_spec->queue_size;
1531 	req.cq_size = cq_spec->queue_size;
1532 	req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
1533 	req.cq_parent_qid = cq_spec->attached_eq;
1534 
1535 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1536 				sizeof(resp));
1537 	if (err) {
1538 		netdev_err(ndev, "Failed to create WQ object: %d\n", err);
1539 		goto out;
1540 	}
1541 
1542 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
1543 				   sizeof(resp));
1544 	if (err || resp.hdr.status) {
1545 		netdev_err(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
1546 			   resp.hdr.status);
1547 		if (!err)
1548 			err = -EPROTO;
1549 		goto out;
1550 	}
1551 
1552 	if (resp.wq_obj == INVALID_MANA_HANDLE) {
1553 		netdev_err(ndev, "Got an invalid WQ object handle\n");
1554 		err = -EPROTO;
1555 		goto out;
1556 	}
1557 
1558 	*wq_obj = resp.wq_obj;
1559 	wq_spec->queue_index = resp.wq_id;
1560 	cq_spec->queue_index = resp.cq_id;
1561 
1562 	return 0;
1563 out:
1564 	return err;
1565 }
1566 EXPORT_SYMBOL_NS(mana_create_wq_obj, "NET_MANA");
1567 
1568 void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
1569 			 mana_handle_t wq_obj)
1570 {
1571 	struct mana_destroy_wqobj_resp resp = {};
1572 	struct mana_destroy_wqobj_req req = {};
1573 	struct net_device *ndev = apc->ndev;
1574 	int err;
1575 
1576 	mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
1577 			     sizeof(req), sizeof(resp));
1578 	req.wq_type = wq_type;
1579 	req.wq_obj_handle = wq_obj;
1580 
1581 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1582 				sizeof(resp));
1583 	if (err) {
1584 		if (mana_en_need_log(apc, err))
1585 			netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
1586 
1587 		return;
1588 	}
1589 
1590 	err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
1591 				   sizeof(resp));
1592 	if (err || resp.hdr.status)
1593 		netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err,
1594 			   resp.hdr.status);
1595 }
1596 EXPORT_SYMBOL_NS(mana_destroy_wq_obj, "NET_MANA");
1597 
1598 static void mana_destroy_eq(struct mana_context *ac)
1599 {
1600 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
1601 	struct gdma_queue *eq;
1602 	int i;
1603 
1604 	if (!ac->eqs)
1605 		return;
1606 
1607 	debugfs_remove_recursive(ac->mana_eqs_debugfs);
1608 	ac->mana_eqs_debugfs = NULL;
1609 
1610 	for (i = 0; i < gc->max_num_queues; i++) {
1611 		eq = ac->eqs[i].eq;
1612 		if (!eq)
1613 			continue;
1614 
1615 		mana_gd_destroy_queue(gc, eq);
1616 	}
1617 
1618 	kfree(ac->eqs);
1619 	ac->eqs = NULL;
1620 }
1621 
1622 static void mana_create_eq_debugfs(struct mana_context *ac, int i)
1623 {
1624 	struct mana_eq eq = ac->eqs[i];
1625 	char eqnum[32];
1626 
1627 	sprintf(eqnum, "eq%d", i);
1628 	eq.mana_eq_debugfs = debugfs_create_dir(eqnum, ac->mana_eqs_debugfs);
1629 	debugfs_create_u32("head", 0400, eq.mana_eq_debugfs, &eq.eq->head);
1630 	debugfs_create_u32("tail", 0400, eq.mana_eq_debugfs, &eq.eq->tail);
1631 	debugfs_create_file("eq_dump", 0400, eq.mana_eq_debugfs, eq.eq, &mana_dbg_q_fops);
1632 }
1633 
1634 static int mana_create_eq(struct mana_context *ac)
1635 {
1636 	struct gdma_dev *gd = ac->gdma_dev;
1637 	struct gdma_context *gc = gd->gdma_context;
1638 	struct gdma_queue_spec spec = {};
1639 	int err;
1640 	int i;
1641 
1642 	ac->eqs = kzalloc_objs(struct mana_eq, gc->max_num_queues);
1643 	if (!ac->eqs)
1644 		return -ENOMEM;
1645 
1646 	spec.type = GDMA_EQ;
1647 	spec.monitor_avl_buf = false;
1648 	spec.queue_size = EQ_SIZE;
1649 	spec.eq.callback = NULL;
1650 	spec.eq.context = ac->eqs;
1651 	spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
1652 
1653 	ac->mana_eqs_debugfs = debugfs_create_dir("EQs", gc->mana_pci_debugfs);
1654 
1655 	for (i = 0; i < gc->max_num_queues; i++) {
1656 		spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
1657 		err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
1658 		if (err) {
1659 			dev_err(gc->dev, "Failed to create EQ %d : %d\n", i, err);
1660 			goto out;
1661 		}
1662 		mana_create_eq_debugfs(ac, i);
1663 	}
1664 
1665 	return 0;
1666 out:
1667 	mana_destroy_eq(ac);
1668 	return err;
1669 }
1670 
1671 static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
1672 {
1673 	struct mana_fence_rq_resp resp = {};
1674 	struct mana_fence_rq_req req = {};
1675 	int err;
1676 
1677 	init_completion(&rxq->fence_event);
1678 
1679 	mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
1680 			     sizeof(req), sizeof(resp));
1681 	req.wq_obj_handle =  rxq->rxobj;
1682 
1683 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1684 				sizeof(resp));
1685 	if (err) {
1686 		netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n",
1687 			   rxq->rxq_idx, err);
1688 		return err;
1689 	}
1690 
1691 	err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
1692 	if (err || resp.hdr.status) {
1693 		netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
1694 			   rxq->rxq_idx, err, resp.hdr.status);
1695 		if (!err)
1696 			err = -EPROTO;
1697 
1698 		return err;
1699 	}
1700 
1701 	if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) {
1702 		netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n",
1703 			   rxq->rxq_idx);
1704 		return -ETIMEDOUT;
1705 	}
1706 
1707 	return 0;
1708 }
1709 
1710 static void mana_fence_rqs(struct mana_port_context *apc)
1711 {
1712 	unsigned int rxq_idx;
1713 	struct mana_rxq *rxq;
1714 	int err;
1715 
1716 	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
1717 		rxq = apc->rxqs[rxq_idx];
1718 		err = mana_fence_rq(apc, rxq);
1719 
1720 		/* In case of any error, use sleep instead. */
1721 		if (err)
1722 			msleep(100);
1723 	}
1724 }
1725 
1726 static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
1727 {
1728 	u32 used_space_old;
1729 	u32 used_space_new;
1730 
1731 	used_space_old = wq->head - wq->tail;
1732 	used_space_new = wq->head - (wq->tail + num_units);
1733 
1734 	if (WARN_ON_ONCE(used_space_new > used_space_old))
1735 		return -ERANGE;
1736 
1737 	wq->tail += num_units;
1738 	return 0;
1739 }
1740 
1741 void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
1742 {
1743 	struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
1744 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1745 	struct device *dev = gc->dev;
1746 	int hsg, i;
1747 
1748 	/* Number of SGEs of linear part */
1749 	hsg = (skb_is_gso(skb) && skb_headlen(skb) > ash->size[0]) ? 2 : 1;
1750 
1751 	for (i = 0; i < hsg; i++)
1752 		dma_unmap_single(dev, ash->dma_handle[i], ash->size[i],
1753 				 DMA_TO_DEVICE);
1754 
1755 	for (i = hsg; i < skb_shinfo(skb)->nr_frags + hsg; i++)
1756 		dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
1757 			       DMA_TO_DEVICE);
1758 }
1759 
1760 static void mana_poll_tx_cq(struct mana_cq *cq)
1761 {
1762 	struct gdma_comp *completions = cq->gdma_comp_buf;
1763 	struct gdma_posted_wqe_info *wqe_info;
1764 	unsigned int pkt_transmitted = 0;
1765 	unsigned int wqe_unit_cnt = 0;
1766 	struct mana_txq *txq = cq->txq;
1767 	struct mana_port_context *apc;
1768 	struct netdev_queue *net_txq;
1769 	struct gdma_queue *gdma_wq;
1770 	unsigned int avail_space;
1771 	struct net_device *ndev;
1772 	struct sk_buff *skb;
1773 	bool txq_stopped;
1774 	int comp_read;
1775 	int i;
1776 
1777 	ndev = txq->ndev;
1778 	apc = netdev_priv(ndev);
1779 
1780 	/* Limit CQEs polled to 4 wraparounds of the CQ to ensure the
1781 	 * doorbell can be rung in time for the hardware's requirement
1782 	 * of at least one doorbell ring every 8 wraparounds.
1783 	 */
1784 	comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
1785 				    min((cq->gdma_cq->queue_size /
1786 					  COMP_ENTRY_SIZE) * 4,
1787 					 CQE_POLLING_BUFFER));
1788 
1789 	if (comp_read < 1)
1790 		return;
1791 
1792 	for (i = 0; i < comp_read; i++) {
1793 		struct mana_tx_comp_oob *cqe_oob;
1794 
1795 		if (WARN_ON_ONCE(!completions[i].is_sq))
1796 			return;
1797 
1798 		cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
1799 		if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type !=
1800 				 MANA_CQE_COMPLETION))
1801 			return;
1802 
1803 		switch (cqe_oob->cqe_hdr.cqe_type) {
1804 		case CQE_TX_OKAY:
1805 			break;
1806 
1807 		case CQE_TX_SA_DROP:
1808 		case CQE_TX_MTU_DROP:
1809 		case CQE_TX_INVALID_OOB:
1810 		case CQE_TX_INVALID_ETH_TYPE:
1811 		case CQE_TX_HDR_PROCESSING_ERROR:
1812 		case CQE_TX_VF_DISABLED:
1813 		case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
1814 		case CQE_TX_VPORT_DISABLED:
1815 		case CQE_TX_VLAN_TAGGING_VIOLATION:
1816 			if (net_ratelimit())
1817 				netdev_err(ndev, "TX: CQE error %d\n",
1818 					   cqe_oob->cqe_hdr.cqe_type);
1819 
1820 			apc->eth_stats.tx_cqe_err++;
1821 			break;
1822 
1823 		default:
1824 			/* If the CQE type is unknown, log an error,
1825 			 * and still free the SKB, update tail, etc.
1826 			 */
1827 			if (net_ratelimit())
1828 				netdev_err(ndev, "TX: unknown CQE type %d\n",
1829 					   cqe_oob->cqe_hdr.cqe_type);
1830 
1831 			apc->eth_stats.tx_cqe_unknown_type++;
1832 			break;
1833 		}
1834 
1835 		if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num))
1836 			return;
1837 
1838 		skb = skb_dequeue(&txq->pending_skbs);
1839 		if (WARN_ON_ONCE(!skb))
1840 			return;
1841 
1842 		wqe_info = (struct gdma_posted_wqe_info *)skb->cb;
1843 		wqe_unit_cnt += wqe_info->wqe_size_in_bu;
1844 
1845 		mana_unmap_skb(skb, apc);
1846 
1847 		napi_consume_skb(skb, cq->budget);
1848 
1849 		pkt_transmitted++;
1850 	}
1851 
1852 	if (WARN_ON_ONCE(wqe_unit_cnt == 0))
1853 		return;
1854 
1855 	mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
1856 
1857 	gdma_wq = txq->gdma_sq;
1858 	avail_space = mana_gd_wq_avail_space(gdma_wq);
1859 
1860 	/* Ensure tail updated before checking q stop */
1861 	smp_mb();
1862 
1863 	net_txq = txq->net_txq;
1864 	txq_stopped = netif_tx_queue_stopped(net_txq);
1865 
1866 	/* Ensure checking txq_stopped before apc->port_is_up. */
1867 	smp_rmb();
1868 
1869 	if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1870 		netif_tx_wake_queue(net_txq);
1871 		apc->eth_stats.wake_queue++;
1872 	}
1873 
1874 	if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
1875 		WARN_ON_ONCE(1);
1876 
1877 	cq->work_done = pkt_transmitted;
1878 }
1879 
1880 static void mana_post_pkt_rxq(struct mana_rxq *rxq)
1881 {
1882 	struct mana_recv_buf_oob *recv_buf_oob;
1883 	u32 curr_index;
1884 	int err;
1885 
1886 	curr_index = rxq->buf_index++;
1887 	if (rxq->buf_index == rxq->num_rx_buf)
1888 		rxq->buf_index = 0;
1889 
1890 	recv_buf_oob = &rxq->rx_oobs[curr_index];
1891 
1892 	err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req,
1893 					&recv_buf_oob->wqe_inf);
1894 	if (WARN_ON_ONCE(err))
1895 		return;
1896 
1897 	WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1);
1898 }
1899 
1900 static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va,
1901 				      uint pkt_len, struct xdp_buff *xdp)
1902 {
1903 	struct sk_buff *skb = napi_build_skb(buf_va, rxq->alloc_size);
1904 
1905 	if (!skb)
1906 		return NULL;
1907 
1908 	if (xdp->data_hard_start) {
1909 		u32 metasize = xdp->data - xdp->data_meta;
1910 
1911 		skb_reserve(skb, xdp->data - xdp->data_hard_start);
1912 		skb_put(skb, xdp->data_end - xdp->data);
1913 		if (metasize)
1914 			skb_metadata_set(skb, metasize);
1915 		return skb;
1916 	}
1917 
1918 	skb_reserve(skb, rxq->headroom);
1919 	skb_put(skb, pkt_len);
1920 
1921 	return skb;
1922 }
1923 
1924 static void mana_rx_skb(void *buf_va, bool from_pool,
1925 			struct mana_rxcomp_oob *cqe, struct mana_rxq *rxq)
1926 {
1927 	struct mana_stats_rx *rx_stats = &rxq->stats;
1928 	struct net_device *ndev = rxq->ndev;
1929 	uint pkt_len = cqe->ppi[0].pkt_len;
1930 	u16 rxq_idx = rxq->rxq_idx;
1931 	struct napi_struct *napi;
1932 	struct xdp_buff xdp = {};
1933 	struct sk_buff *skb;
1934 	u32 hash_value;
1935 	u32 act;
1936 
1937 	rxq->rx_cq.work_done++;
1938 	napi = &rxq->rx_cq.napi;
1939 
1940 	if (!buf_va) {
1941 		++ndev->stats.rx_dropped;
1942 		return;
1943 	}
1944 
1945 	act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len);
1946 
1947 	if (act == XDP_REDIRECT && !rxq->xdp_rc)
1948 		return;
1949 
1950 	if (act != XDP_PASS && act != XDP_TX)
1951 		goto drop_xdp;
1952 
1953 	skb = mana_build_skb(rxq, buf_va, pkt_len, &xdp);
1954 
1955 	if (!skb)
1956 		goto drop;
1957 
1958 	if (from_pool)
1959 		skb_mark_for_recycle(skb);
1960 
1961 	skb->dev = napi->dev;
1962 
1963 	skb->protocol = eth_type_trans(skb, ndev);
1964 	skb_checksum_none_assert(skb);
1965 	skb_record_rx_queue(skb, rxq_idx);
1966 
1967 	if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) {
1968 		if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed)
1969 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1970 	}
1971 
1972 	if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) {
1973 		hash_value = cqe->ppi[0].pkt_hash;
1974 
1975 		if (cqe->rx_hashtype & MANA_HASH_L4)
1976 			skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L4);
1977 		else
1978 			skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3);
1979 	}
1980 
1981 	if (cqe->rx_vlantag_present) {
1982 		u16 vlan_tci = cqe->rx_vlan_id;
1983 
1984 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
1985 	}
1986 
1987 	u64_stats_update_begin(&rx_stats->syncp);
1988 	rx_stats->packets++;
1989 	rx_stats->bytes += pkt_len;
1990 
1991 	if (act == XDP_TX)
1992 		rx_stats->xdp_tx++;
1993 	u64_stats_update_end(&rx_stats->syncp);
1994 
1995 	if (act == XDP_TX) {
1996 		skb_set_queue_mapping(skb, rxq_idx);
1997 		mana_xdp_tx(skb, ndev);
1998 		return;
1999 	}
2000 
2001 	napi_gro_receive(napi, skb);
2002 
2003 	return;
2004 
2005 drop_xdp:
2006 	u64_stats_update_begin(&rx_stats->syncp);
2007 	rx_stats->xdp_drop++;
2008 	u64_stats_update_end(&rx_stats->syncp);
2009 
2010 drop:
2011 	if (from_pool) {
2012 		if (rxq->frag_count == 1)
2013 			page_pool_recycle_direct(rxq->page_pool,
2014 						 virt_to_head_page(buf_va));
2015 		else
2016 			page_pool_free_va(rxq->page_pool, buf_va, true);
2017 	} else {
2018 		WARN_ON_ONCE(rxq->xdp_save_va);
2019 		/* Save for reuse */
2020 		rxq->xdp_save_va = buf_va;
2021 	}
2022 
2023 	++ndev->stats.rx_dropped;
2024 
2025 	return;
2026 }
2027 
2028 static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
2029 			     dma_addr_t *da, bool *from_pool)
2030 {
2031 	struct page *page;
2032 	u32 offset;
2033 	void *va;
2034 	*from_pool = false;
2035 
2036 	/* Don't use fragments for jumbo frames or XDP where it's 1 fragment
2037 	 * per page.
2038 	 */
2039 	if (rxq->frag_count == 1) {
2040 		/* Reuse XDP dropped page if available */
2041 		if (rxq->xdp_save_va) {
2042 			va = rxq->xdp_save_va;
2043 			page = virt_to_head_page(va);
2044 			rxq->xdp_save_va = NULL;
2045 		} else {
2046 			page = page_pool_dev_alloc_pages(rxq->page_pool);
2047 			if (!page)
2048 				return NULL;
2049 
2050 			*from_pool = true;
2051 			va = page_to_virt(page);
2052 		}
2053 
2054 		*da = dma_map_single(dev, va + rxq->headroom, rxq->datasize,
2055 				     DMA_FROM_DEVICE);
2056 		if (dma_mapping_error(dev, *da)) {
2057 			mana_put_rx_page(rxq, page, *from_pool);
2058 			return NULL;
2059 		}
2060 
2061 		return va;
2062 	}
2063 
2064 	page =  page_pool_dev_alloc_frag(rxq->page_pool, &offset,
2065 					 rxq->alloc_size);
2066 	if (!page)
2067 		return NULL;
2068 
2069 	va  = page_to_virt(page) + offset;
2070 	*da = page_pool_get_dma_addr(page) + offset + rxq->headroom;
2071 	*from_pool = true;
2072 
2073 	return va;
2074 }
2075 
2076 /* Allocate frag for rx buffer, and save the old buf */
2077 static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
2078 			       struct mana_recv_buf_oob *rxoob, void **old_buf,
2079 			       bool *old_fp)
2080 {
2081 	bool from_pool;
2082 	dma_addr_t da;
2083 	void *va;
2084 
2085 	va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
2086 	if (!va)
2087 		return;
2088 	if (!rxoob->from_pool || rxq->frag_count == 1)
2089 		dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize,
2090 				 DMA_FROM_DEVICE);
2091 	*old_buf = rxoob->buf_va;
2092 	*old_fp = rxoob->from_pool;
2093 
2094 	rxoob->buf_va = va;
2095 	rxoob->sgl[0].address = da;
2096 	rxoob->from_pool = from_pool;
2097 }
2098 
2099 static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
2100 				struct gdma_comp *cqe)
2101 {
2102 	struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
2103 	struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
2104 	struct net_device *ndev = rxq->ndev;
2105 	struct mana_recv_buf_oob *rxbuf_oob;
2106 	struct mana_port_context *apc;
2107 	struct device *dev = gc->dev;
2108 	void *old_buf = NULL;
2109 	u32 curr, pktlen;
2110 	bool old_fp;
2111 
2112 	apc = netdev_priv(ndev);
2113 
2114 	switch (oob->cqe_hdr.cqe_type) {
2115 	case CQE_RX_OKAY:
2116 		break;
2117 
2118 	case CQE_RX_TRUNCATED:
2119 		++ndev->stats.rx_dropped;
2120 		rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
2121 		netdev_warn_once(ndev, "Dropped a truncated packet\n");
2122 		goto drop;
2123 
2124 	case CQE_RX_COALESCED_4:
2125 		netdev_err(ndev, "RX coalescing is unsupported\n");
2126 		apc->eth_stats.rx_coalesced_err++;
2127 		return;
2128 
2129 	case CQE_RX_OBJECT_FENCE:
2130 		complete(&rxq->fence_event);
2131 		return;
2132 
2133 	default:
2134 		netdev_err(ndev, "Unknown RX CQE type = %d\n",
2135 			   oob->cqe_hdr.cqe_type);
2136 		apc->eth_stats.rx_cqe_unknown_type++;
2137 		return;
2138 	}
2139 
2140 	pktlen = oob->ppi[0].pkt_len;
2141 
2142 	if (pktlen == 0) {
2143 		/* data packets should never have packetlength of zero */
2144 		netdev_err(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n",
2145 			   rxq->gdma_id, cq->gdma_id, rxq->rxobj);
2146 		return;
2147 	}
2148 
2149 	curr = rxq->buf_index;
2150 	rxbuf_oob = &rxq->rx_oobs[curr];
2151 	WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
2152 
2153 	mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf, &old_fp);
2154 
2155 	/* Unsuccessful refill will have old_buf == NULL.
2156 	 * In this case, mana_rx_skb() will drop the packet.
2157 	 */
2158 	mana_rx_skb(old_buf, old_fp, oob, rxq);
2159 
2160 drop:
2161 	mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
2162 
2163 	mana_post_pkt_rxq(rxq);
2164 }
2165 
2166 static void mana_poll_rx_cq(struct mana_cq *cq)
2167 {
2168 	struct gdma_comp *comp = cq->gdma_comp_buf;
2169 	struct mana_rxq *rxq = cq->rxq;
2170 	int comp_read, i;
2171 
2172 	/* Limit CQEs polled to 4 wraparounds of the CQ to ensure the
2173 	 * doorbell can be rung in time for the hardware's requirement
2174 	 * of at least one doorbell ring every 8 wraparounds.
2175 	 */
2176 	comp_read = mana_gd_poll_cq(cq->gdma_cq, comp,
2177 				    min((cq->gdma_cq->queue_size /
2178 					  COMP_ENTRY_SIZE) * 4,
2179 					 CQE_POLLING_BUFFER));
2180 	WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
2181 
2182 	rxq->xdp_flush = false;
2183 
2184 	for (i = 0; i < comp_read; i++) {
2185 		if (WARN_ON_ONCE(comp[i].is_sq))
2186 			return;
2187 
2188 		/* verify recv cqe references the right rxq */
2189 		if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id))
2190 			return;
2191 
2192 		mana_process_rx_cqe(rxq, cq, &comp[i]);
2193 	}
2194 
2195 	if (comp_read > 0) {
2196 		struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
2197 
2198 		mana_gd_wq_ring_doorbell(gc, rxq->gdma_rq);
2199 	}
2200 
2201 	if (rxq->xdp_flush)
2202 		xdp_do_flush();
2203 }
2204 
2205 static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
2206 {
2207 	struct mana_cq *cq = context;
2208 	int w;
2209 
2210 	WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
2211 
2212 	if (cq->type == MANA_CQ_TYPE_RX)
2213 		mana_poll_rx_cq(cq);
2214 	else
2215 		mana_poll_tx_cq(cq);
2216 
2217 	w = cq->work_done;
2218 	cq->work_done_since_doorbell += w;
2219 
2220 	if (w < cq->budget) {
2221 		mana_gd_ring_cq(gdma_queue, SET_ARM_BIT);
2222 		cq->work_done_since_doorbell = 0;
2223 		napi_complete_done(&cq->napi, w);
2224 	} else if (cq->work_done_since_doorbell >=
2225 		   (cq->gdma_cq->queue_size / COMP_ENTRY_SIZE) * 4) {
2226 		/* MANA hardware requires at least one doorbell ring every 8
2227 		 * wraparounds of CQ even if there is no need to arm the CQ.
2228 		 * This driver rings the doorbell as soon as it has processed
2229 		 * 4 wraparounds.
2230 		 */
2231 		mana_gd_ring_cq(gdma_queue, 0);
2232 		cq->work_done_since_doorbell = 0;
2233 	}
2234 
2235 	return w;
2236 }
2237 
2238 static int mana_poll(struct napi_struct *napi, int budget)
2239 {
2240 	struct mana_cq *cq = container_of(napi, struct mana_cq, napi);
2241 	int w;
2242 
2243 	cq->work_done = 0;
2244 	cq->budget = budget;
2245 
2246 	w = mana_cq_handler(cq, cq->gdma_cq);
2247 
2248 	return min(w, budget);
2249 }
2250 
2251 static void mana_schedule_napi(void *context, struct gdma_queue *gdma_queue)
2252 {
2253 	struct mana_cq *cq = context;
2254 
2255 	napi_schedule_irqoff(&cq->napi);
2256 }
2257 
2258 static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
2259 {
2260 	struct gdma_dev *gd = apc->ac->gdma_dev;
2261 
2262 	if (!cq->gdma_cq)
2263 		return;
2264 
2265 	mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
2266 }
2267 
2268 static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
2269 {
2270 	struct gdma_dev *gd = apc->ac->gdma_dev;
2271 
2272 	if (!txq->gdma_sq)
2273 		return;
2274 
2275 	mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
2276 }
2277 
2278 static void mana_destroy_txq(struct mana_port_context *apc)
2279 {
2280 	struct napi_struct *napi;
2281 	int i;
2282 
2283 	if (!apc->tx_qp)
2284 		return;
2285 
2286 	for (i = 0; i < apc->num_queues; i++) {
2287 		debugfs_remove_recursive(apc->tx_qp[i].mana_tx_debugfs);
2288 		apc->tx_qp[i].mana_tx_debugfs = NULL;
2289 
2290 		napi = &apc->tx_qp[i].tx_cq.napi;
2291 		if (apc->tx_qp[i].txq.napi_initialized) {
2292 			napi_synchronize(napi);
2293 			napi_disable_locked(napi);
2294 			netif_napi_del_locked(napi);
2295 			apc->tx_qp[i].txq.napi_initialized = false;
2296 		}
2297 		mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
2298 
2299 		mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
2300 
2301 		mana_deinit_txq(apc, &apc->tx_qp[i].txq);
2302 	}
2303 
2304 	kfree(apc->tx_qp);
2305 	apc->tx_qp = NULL;
2306 }
2307 
2308 static void mana_create_txq_debugfs(struct mana_port_context *apc, int idx)
2309 {
2310 	struct mana_tx_qp *tx_qp = &apc->tx_qp[idx];
2311 	char qnum[32];
2312 
2313 	sprintf(qnum, "TX-%d", idx);
2314 	tx_qp->mana_tx_debugfs = debugfs_create_dir(qnum, apc->mana_port_debugfs);
2315 	debugfs_create_u32("sq_head", 0400, tx_qp->mana_tx_debugfs,
2316 			   &tx_qp->txq.gdma_sq->head);
2317 	debugfs_create_u32("sq_tail", 0400, tx_qp->mana_tx_debugfs,
2318 			   &tx_qp->txq.gdma_sq->tail);
2319 	debugfs_create_u32("sq_pend_skb_qlen", 0400, tx_qp->mana_tx_debugfs,
2320 			   &tx_qp->txq.pending_skbs.qlen);
2321 	debugfs_create_u32("cq_head", 0400, tx_qp->mana_tx_debugfs,
2322 			   &tx_qp->tx_cq.gdma_cq->head);
2323 	debugfs_create_u32("cq_tail", 0400, tx_qp->mana_tx_debugfs,
2324 			   &tx_qp->tx_cq.gdma_cq->tail);
2325 	debugfs_create_u32("cq_budget", 0400, tx_qp->mana_tx_debugfs,
2326 			   &tx_qp->tx_cq.budget);
2327 	debugfs_create_file("txq_dump", 0400, tx_qp->mana_tx_debugfs,
2328 			    tx_qp->txq.gdma_sq, &mana_dbg_q_fops);
2329 	debugfs_create_file("cq_dump", 0400, tx_qp->mana_tx_debugfs,
2330 			    tx_qp->tx_cq.gdma_cq, &mana_dbg_q_fops);
2331 }
2332 
2333 static int mana_create_txq(struct mana_port_context *apc,
2334 			   struct net_device *net)
2335 {
2336 	struct mana_context *ac = apc->ac;
2337 	struct gdma_dev *gd = ac->gdma_dev;
2338 	struct mana_obj_spec wq_spec;
2339 	struct mana_obj_spec cq_spec;
2340 	struct gdma_queue_spec spec;
2341 	struct gdma_context *gc;
2342 	struct mana_txq *txq;
2343 	struct mana_cq *cq;
2344 	u32 txq_size;
2345 	u32 cq_size;
2346 	int err;
2347 	int i;
2348 
2349 	apc->tx_qp = kzalloc_objs(struct mana_tx_qp, apc->num_queues);
2350 	if (!apc->tx_qp)
2351 		return -ENOMEM;
2352 
2353 	/*  The minimum size of the WQE is 32 bytes, hence
2354 	 *  apc->tx_queue_size represents the maximum number of WQEs
2355 	 *  the SQ can store. This value is then used to size other queues
2356 	 *  to prevent overflow.
2357 	 *  Also note that the txq_size is always going to be MANA_PAGE_ALIGNED,
2358 	 *  as min val of apc->tx_queue_size is 128 and that would make
2359 	 *  txq_size 128*32 = 4096 and the other higher values of apc->tx_queue_size
2360 	 *  are always power of two
2361 	 */
2362 	txq_size = apc->tx_queue_size * 32;
2363 
2364 	cq_size = apc->tx_queue_size * COMP_ENTRY_SIZE;
2365 
2366 	gc = gd->gdma_context;
2367 
2368 	for (i = 0; i < apc->num_queues; i++) {
2369 		apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
2370 
2371 		/* Create SQ */
2372 		txq = &apc->tx_qp[i].txq;
2373 
2374 		u64_stats_init(&txq->stats.syncp);
2375 		txq->ndev = net;
2376 		txq->net_txq = netdev_get_tx_queue(net, i);
2377 		txq->vp_offset = apc->tx_vp_offset;
2378 		txq->napi_initialized = false;
2379 		skb_queue_head_init(&txq->pending_skbs);
2380 
2381 		memset(&spec, 0, sizeof(spec));
2382 		spec.type = GDMA_SQ;
2383 		spec.monitor_avl_buf = true;
2384 		spec.queue_size = txq_size;
2385 		err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
2386 		if (err)
2387 			goto out;
2388 
2389 		/* Create SQ's CQ */
2390 		cq = &apc->tx_qp[i].tx_cq;
2391 		cq->type = MANA_CQ_TYPE_TX;
2392 
2393 		cq->txq = txq;
2394 
2395 		memset(&spec, 0, sizeof(spec));
2396 		spec.type = GDMA_CQ;
2397 		spec.monitor_avl_buf = false;
2398 		spec.queue_size = cq_size;
2399 		spec.cq.callback = mana_schedule_napi;
2400 		spec.cq.parent_eq = ac->eqs[i].eq;
2401 		spec.cq.context = cq;
2402 		err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2403 		if (err)
2404 			goto out;
2405 
2406 		memset(&wq_spec, 0, sizeof(wq_spec));
2407 		memset(&cq_spec, 0, sizeof(cq_spec));
2408 
2409 		wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle;
2410 		wq_spec.queue_size = txq->gdma_sq->queue_size;
2411 
2412 		cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
2413 		cq_spec.queue_size = cq->gdma_cq->queue_size;
2414 		cq_spec.modr_ctx_id = 0;
2415 		cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2416 
2417 		err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
2418 					 &wq_spec, &cq_spec,
2419 					 &apc->tx_qp[i].tx_object);
2420 
2421 		if (err)
2422 			goto out;
2423 
2424 		txq->gdma_sq->id = wq_spec.queue_index;
2425 		cq->gdma_cq->id = cq_spec.queue_index;
2426 
2427 		txq->gdma_sq->mem_info.dma_region_handle =
2428 			GDMA_INVALID_DMA_REGION;
2429 		cq->gdma_cq->mem_info.dma_region_handle =
2430 			GDMA_INVALID_DMA_REGION;
2431 
2432 		txq->gdma_txq_id = txq->gdma_sq->id;
2433 
2434 		cq->gdma_id = cq->gdma_cq->id;
2435 
2436 		if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
2437 			err = -EINVAL;
2438 			goto out;
2439 		}
2440 
2441 		gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2442 
2443 		mana_create_txq_debugfs(apc, i);
2444 
2445 		set_bit(NAPI_STATE_NO_BUSY_POLL, &cq->napi.state);
2446 		netif_napi_add_locked(net, &cq->napi, mana_poll);
2447 		napi_enable_locked(&cq->napi);
2448 		txq->napi_initialized = true;
2449 
2450 		mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
2451 	}
2452 
2453 	return 0;
2454 out:
2455 	netdev_err(net, "Failed to create %d TX queues, %d\n",
2456 		   apc->num_queues, err);
2457 	mana_destroy_txq(apc);
2458 	return err;
2459 }
2460 
2461 static void mana_destroy_rxq(struct mana_port_context *apc,
2462 			     struct mana_rxq *rxq, bool napi_initialized)
2463 
2464 {
2465 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
2466 	struct mana_recv_buf_oob *rx_oob;
2467 	struct device *dev = gc->dev;
2468 	struct napi_struct *napi;
2469 	struct page *page;
2470 	int i;
2471 
2472 	if (!rxq)
2473 		return;
2474 
2475 	debugfs_remove_recursive(rxq->mana_rx_debugfs);
2476 	rxq->mana_rx_debugfs = NULL;
2477 
2478 	napi = &rxq->rx_cq.napi;
2479 
2480 	if (napi_initialized) {
2481 		napi_synchronize(napi);
2482 
2483 		napi_disable_locked(napi);
2484 		netif_napi_del_locked(napi);
2485 	}
2486 	xdp_rxq_info_unreg(&rxq->xdp_rxq);
2487 
2488 	mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
2489 
2490 	mana_deinit_cq(apc, &rxq->rx_cq);
2491 
2492 	if (rxq->xdp_save_va)
2493 		put_page(virt_to_head_page(rxq->xdp_save_va));
2494 
2495 	for (i = 0; i < rxq->num_rx_buf; i++) {
2496 		rx_oob = &rxq->rx_oobs[i];
2497 
2498 		if (!rx_oob->buf_va)
2499 			continue;
2500 
2501 		page = virt_to_head_page(rx_oob->buf_va);
2502 
2503 		if (rxq->frag_count == 1 || !rx_oob->from_pool) {
2504 			dma_unmap_single(dev, rx_oob->sgl[0].address,
2505 					 rx_oob->sgl[0].size, DMA_FROM_DEVICE);
2506 			mana_put_rx_page(rxq, page, rx_oob->from_pool);
2507 		} else {
2508 			page_pool_free_va(rxq->page_pool, rx_oob->buf_va, true);
2509 		}
2510 
2511 		rx_oob->buf_va = NULL;
2512 	}
2513 
2514 	page_pool_destroy(rxq->page_pool);
2515 
2516 	if (rxq->gdma_rq)
2517 		mana_gd_destroy_queue(gc, rxq->gdma_rq);
2518 
2519 	kfree(rxq);
2520 }
2521 
2522 static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
2523 			    struct mana_rxq *rxq, struct device *dev)
2524 {
2525 	struct mana_port_context *mpc = netdev_priv(rxq->ndev);
2526 	bool from_pool = false;
2527 	dma_addr_t da;
2528 	void *va;
2529 
2530 	if (mpc->rxbufs_pre)
2531 		va = mana_get_rxbuf_pre(rxq, &da);
2532 	else
2533 		va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
2534 
2535 	if (!va)
2536 		return -ENOMEM;
2537 
2538 	rx_oob->buf_va = va;
2539 	rx_oob->from_pool = from_pool;
2540 
2541 	rx_oob->sgl[0].address = da;
2542 	rx_oob->sgl[0].size = rxq->datasize;
2543 	rx_oob->sgl[0].mem_key = mem_key;
2544 
2545 	return 0;
2546 }
2547 
2548 #define MANA_WQE_HEADER_SIZE 16
2549 #define MANA_WQE_SGE_SIZE 16
2550 
2551 static int mana_alloc_rx_wqe(struct mana_port_context *apc,
2552 			     struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size)
2553 {
2554 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
2555 	struct mana_recv_buf_oob *rx_oob;
2556 	struct device *dev = gc->dev;
2557 	u32 buf_idx;
2558 	int ret;
2559 
2560 	WARN_ON(rxq->datasize == 0);
2561 
2562 	*rxq_size = 0;
2563 	*cq_size = 0;
2564 
2565 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2566 		rx_oob = &rxq->rx_oobs[buf_idx];
2567 		memset(rx_oob, 0, sizeof(*rx_oob));
2568 
2569 		rx_oob->num_sge = 1;
2570 
2571 		ret = mana_fill_rx_oob(rx_oob, apc->ac->gdma_dev->gpa_mkey, rxq,
2572 				       dev);
2573 		if (ret)
2574 			return ret;
2575 
2576 		rx_oob->wqe_req.sgl = rx_oob->sgl;
2577 		rx_oob->wqe_req.num_sge = rx_oob->num_sge;
2578 		rx_oob->wqe_req.inline_oob_size = 0;
2579 		rx_oob->wqe_req.inline_oob_data = NULL;
2580 		rx_oob->wqe_req.flags = 0;
2581 		rx_oob->wqe_req.client_data_unit = 0;
2582 
2583 		*rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
2584 				   MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
2585 		*cq_size += COMP_ENTRY_SIZE;
2586 	}
2587 
2588 	return 0;
2589 }
2590 
2591 static int mana_push_wqe(struct mana_rxq *rxq)
2592 {
2593 	struct mana_recv_buf_oob *rx_oob;
2594 	u32 buf_idx;
2595 	int err;
2596 
2597 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2598 		rx_oob = &rxq->rx_oobs[buf_idx];
2599 
2600 		err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
2601 					    &rx_oob->wqe_inf);
2602 		if (err)
2603 			return -ENOSPC;
2604 	}
2605 
2606 	return 0;
2607 }
2608 
2609 static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
2610 {
2611 	struct mana_port_context *mpc = netdev_priv(rxq->ndev);
2612 	struct page_pool_params pprm = {};
2613 	int ret;
2614 
2615 	pprm.pool_size = mpc->rx_queue_size / rxq->frag_count + 1;
2616 	pprm.nid = gc->numa_node;
2617 	pprm.napi = &rxq->rx_cq.napi;
2618 	pprm.netdev = rxq->ndev;
2619 	pprm.order = get_order(rxq->alloc_size);
2620 	pprm.queue_idx = rxq->rxq_idx;
2621 	pprm.dev = gc->dev;
2622 
2623 	/* Let the page pool do the dma map when page sharing with multiple
2624 	 * fragments enabled for rx buffers.
2625 	 */
2626 	if (rxq->frag_count > 1) {
2627 		pprm.flags =  PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2628 		pprm.max_len = PAGE_SIZE;
2629 		pprm.dma_dir = DMA_FROM_DEVICE;
2630 	}
2631 
2632 	rxq->page_pool = page_pool_create(&pprm);
2633 
2634 	if (IS_ERR(rxq->page_pool)) {
2635 		ret = PTR_ERR(rxq->page_pool);
2636 		rxq->page_pool = NULL;
2637 		return ret;
2638 	}
2639 
2640 	return 0;
2641 }
2642 
2643 static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
2644 					u32 rxq_idx, struct mana_eq *eq,
2645 					struct net_device *ndev)
2646 {
2647 	struct gdma_dev *gd = apc->ac->gdma_dev;
2648 	struct mana_obj_spec wq_spec;
2649 	struct mana_obj_spec cq_spec;
2650 	struct gdma_queue_spec spec;
2651 	struct mana_cq *cq = NULL;
2652 	struct gdma_context *gc;
2653 	u32 cq_size, rq_size;
2654 	struct mana_rxq *rxq;
2655 	int err;
2656 
2657 	gc = gd->gdma_context;
2658 
2659 	rxq = kzalloc_flex(*rxq, rx_oobs, apc->rx_queue_size);
2660 	if (!rxq)
2661 		return NULL;
2662 
2663 	rxq->ndev = ndev;
2664 	rxq->num_rx_buf = apc->rx_queue_size;
2665 	rxq->rxq_idx = rxq_idx;
2666 	rxq->rxobj = INVALID_MANA_HANDLE;
2667 
2668 	mana_get_rxbuf_cfg(apc, ndev->mtu, &rxq->datasize, &rxq->alloc_size,
2669 			   &rxq->headroom, &rxq->frag_count);
2670 	/* Create page pool for RX queue */
2671 	err = mana_create_page_pool(rxq, gc);
2672 	if (err) {
2673 		netdev_err(ndev, "Create page pool err:%d\n", err);
2674 		goto out;
2675 	}
2676 
2677 	err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
2678 	if (err)
2679 		goto out;
2680 
2681 	rq_size = MANA_PAGE_ALIGN(rq_size);
2682 	cq_size = MANA_PAGE_ALIGN(cq_size);
2683 
2684 	/* Create RQ */
2685 	memset(&spec, 0, sizeof(spec));
2686 	spec.type = GDMA_RQ;
2687 	spec.monitor_avl_buf = true;
2688 	spec.queue_size = rq_size;
2689 	err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
2690 	if (err)
2691 		goto out;
2692 
2693 	/* Create RQ's CQ */
2694 	cq = &rxq->rx_cq;
2695 	cq->type = MANA_CQ_TYPE_RX;
2696 	cq->rxq = rxq;
2697 
2698 	memset(&spec, 0, sizeof(spec));
2699 	spec.type = GDMA_CQ;
2700 	spec.monitor_avl_buf = false;
2701 	spec.queue_size = cq_size;
2702 	spec.cq.callback = mana_schedule_napi;
2703 	spec.cq.parent_eq = eq->eq;
2704 	spec.cq.context = cq;
2705 	err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2706 	if (err)
2707 		goto out;
2708 
2709 	memset(&wq_spec, 0, sizeof(wq_spec));
2710 	memset(&cq_spec, 0, sizeof(cq_spec));
2711 	wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle;
2712 	wq_spec.queue_size = rxq->gdma_rq->queue_size;
2713 
2714 	cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
2715 	cq_spec.queue_size = cq->gdma_cq->queue_size;
2716 	cq_spec.modr_ctx_id = 0;
2717 	cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2718 
2719 	err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
2720 				 &wq_spec, &cq_spec, &rxq->rxobj);
2721 	if (err)
2722 		goto out;
2723 
2724 	rxq->gdma_rq->id = wq_spec.queue_index;
2725 	cq->gdma_cq->id = cq_spec.queue_index;
2726 
2727 	rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2728 	cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2729 
2730 	rxq->gdma_id = rxq->gdma_rq->id;
2731 	cq->gdma_id = cq->gdma_cq->id;
2732 
2733 	err = mana_push_wqe(rxq);
2734 	if (err)
2735 		goto out;
2736 
2737 	if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
2738 		err = -EINVAL;
2739 		goto out;
2740 	}
2741 
2742 	gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2743 
2744 	netif_napi_add_weight_locked(ndev, &cq->napi, mana_poll, 1);
2745 
2746 	WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
2747 				 cq->napi.napi_id));
2748 	WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
2749 					   rxq->page_pool));
2750 
2751 	napi_enable_locked(&cq->napi);
2752 
2753 	mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
2754 out:
2755 	if (!err)
2756 		return rxq;
2757 
2758 	netdev_err(ndev, "Failed to create RXQ: err = %d\n", err);
2759 
2760 	mana_destroy_rxq(apc, rxq, false);
2761 
2762 	if (cq)
2763 		mana_deinit_cq(apc, cq);
2764 
2765 	return NULL;
2766 }
2767 
2768 static void mana_create_rxq_debugfs(struct mana_port_context *apc, int idx)
2769 {
2770 	struct mana_rxq *rxq;
2771 	char qnum[32];
2772 
2773 	rxq = apc->rxqs[idx];
2774 
2775 	sprintf(qnum, "RX-%d", idx);
2776 	rxq->mana_rx_debugfs = debugfs_create_dir(qnum, apc->mana_port_debugfs);
2777 	debugfs_create_u32("rq_head", 0400, rxq->mana_rx_debugfs, &rxq->gdma_rq->head);
2778 	debugfs_create_u32("rq_tail", 0400, rxq->mana_rx_debugfs, &rxq->gdma_rq->tail);
2779 	debugfs_create_u32("rq_nbuf", 0400, rxq->mana_rx_debugfs, &rxq->num_rx_buf);
2780 	debugfs_create_u32("cq_head", 0400, rxq->mana_rx_debugfs,
2781 			   &rxq->rx_cq.gdma_cq->head);
2782 	debugfs_create_u32("cq_tail", 0400, rxq->mana_rx_debugfs,
2783 			   &rxq->rx_cq.gdma_cq->tail);
2784 	debugfs_create_u32("cq_budget", 0400, rxq->mana_rx_debugfs, &rxq->rx_cq.budget);
2785 	debugfs_create_file("rxq_dump", 0400, rxq->mana_rx_debugfs, rxq->gdma_rq, &mana_dbg_q_fops);
2786 	debugfs_create_file("cq_dump", 0400, rxq->mana_rx_debugfs, rxq->rx_cq.gdma_cq,
2787 			    &mana_dbg_q_fops);
2788 }
2789 
2790 static int mana_add_rx_queues(struct mana_port_context *apc,
2791 			      struct net_device *ndev)
2792 {
2793 	struct mana_context *ac = apc->ac;
2794 	struct mana_rxq *rxq;
2795 	int err = 0;
2796 	int i;
2797 
2798 	for (i = 0; i < apc->num_queues; i++) {
2799 		rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
2800 		if (!rxq) {
2801 			err = -ENOMEM;
2802 			netdev_err(ndev, "Failed to create rxq %d : %d\n", i, err);
2803 			goto out;
2804 		}
2805 
2806 		u64_stats_init(&rxq->stats.syncp);
2807 
2808 		apc->rxqs[i] = rxq;
2809 
2810 		mana_create_rxq_debugfs(apc, i);
2811 	}
2812 
2813 	apc->default_rxobj = apc->rxqs[0]->rxobj;
2814 out:
2815 	return err;
2816 }
2817 
2818 static void mana_destroy_vport(struct mana_port_context *apc)
2819 {
2820 	struct gdma_dev *gd = apc->ac->gdma_dev;
2821 	struct mana_rxq *rxq;
2822 	u32 rxq_idx;
2823 
2824 	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
2825 		rxq = apc->rxqs[rxq_idx];
2826 		if (!rxq)
2827 			continue;
2828 
2829 		mana_destroy_rxq(apc, rxq, true);
2830 		apc->rxqs[rxq_idx] = NULL;
2831 	}
2832 
2833 	mana_destroy_txq(apc);
2834 	mana_uncfg_vport(apc);
2835 
2836 	if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode)
2837 		mana_pf_deregister_hw_vport(apc);
2838 }
2839 
2840 static int mana_create_vport(struct mana_port_context *apc,
2841 			     struct net_device *net)
2842 {
2843 	struct gdma_dev *gd = apc->ac->gdma_dev;
2844 	int err;
2845 
2846 	apc->default_rxobj = INVALID_MANA_HANDLE;
2847 
2848 	if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) {
2849 		err = mana_pf_register_hw_vport(apc);
2850 		if (err)
2851 			return err;
2852 	}
2853 
2854 	err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
2855 	if (err)
2856 		return err;
2857 
2858 	return mana_create_txq(apc, net);
2859 }
2860 
2861 static int mana_rss_table_alloc(struct mana_port_context *apc)
2862 {
2863 	if (!apc->indir_table_sz) {
2864 		netdev_err(apc->ndev,
2865 			   "Indirection table size not set for vPort %d\n",
2866 			   apc->port_idx);
2867 		return -EINVAL;
2868 	}
2869 
2870 	apc->indir_table = kcalloc(apc->indir_table_sz, sizeof(u32), GFP_KERNEL);
2871 	if (!apc->indir_table)
2872 		return -ENOMEM;
2873 
2874 	apc->rxobj_table = kzalloc_objs(mana_handle_t, apc->indir_table_sz);
2875 	if (!apc->rxobj_table) {
2876 		kfree(apc->indir_table);
2877 		return -ENOMEM;
2878 	}
2879 
2880 	return 0;
2881 }
2882 
2883 static void mana_rss_table_init(struct mana_port_context *apc)
2884 {
2885 	int i;
2886 
2887 	for (i = 0; i < apc->indir_table_sz; i++)
2888 		apc->indir_table[i] =
2889 			ethtool_rxfh_indir_default(i, apc->num_queues);
2890 }
2891 
2892 int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
2893 		    bool update_hash, bool update_tab)
2894 {
2895 	u32 queue_idx;
2896 	int err;
2897 	int i;
2898 
2899 	if (update_tab) {
2900 		for (i = 0; i < apc->indir_table_sz; i++) {
2901 			queue_idx = apc->indir_table[i];
2902 			apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
2903 		}
2904 	}
2905 
2906 	err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
2907 	if (err)
2908 		return err;
2909 
2910 	mana_fence_rqs(apc);
2911 
2912 	return 0;
2913 }
2914 
2915 int mana_query_gf_stats(struct mana_context *ac)
2916 {
2917 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
2918 	struct mana_query_gf_stat_resp resp = {};
2919 	struct mana_query_gf_stat_req req = {};
2920 	struct device *dev = gc->dev;
2921 	int err;
2922 
2923 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_GF_STAT,
2924 			     sizeof(req), sizeof(resp));
2925 	req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
2926 	req.req_stats = STATISTICS_FLAGS_RX_DISCARDS_NO_WQE |
2927 			STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED |
2928 			STATISTICS_FLAGS_HC_RX_BYTES |
2929 			STATISTICS_FLAGS_HC_RX_UCAST_PACKETS |
2930 			STATISTICS_FLAGS_HC_RX_UCAST_BYTES |
2931 			STATISTICS_FLAGS_HC_RX_MCAST_PACKETS |
2932 			STATISTICS_FLAGS_HC_RX_MCAST_BYTES |
2933 			STATISTICS_FLAGS_HC_RX_BCAST_PACKETS |
2934 			STATISTICS_FLAGS_HC_RX_BCAST_BYTES |
2935 			STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED |
2936 			STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED |
2937 			STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS |
2938 			STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT |
2939 			STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT |
2940 			STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT |
2941 			STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT |
2942 			STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT |
2943 			STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION |
2944 			STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB |
2945 			STATISTICS_FLAGS_HC_TX_BYTES |
2946 			STATISTICS_FLAGS_HC_TX_UCAST_PACKETS |
2947 			STATISTICS_FLAGS_HC_TX_UCAST_BYTES |
2948 			STATISTICS_FLAGS_HC_TX_MCAST_PACKETS |
2949 			STATISTICS_FLAGS_HC_TX_MCAST_BYTES |
2950 			STATISTICS_FLAGS_HC_TX_BCAST_PACKETS |
2951 			STATISTICS_FLAGS_HC_TX_BCAST_BYTES |
2952 			STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR;
2953 
2954 	err = mana_send_request(ac, &req, sizeof(req), &resp,
2955 				sizeof(resp));
2956 	if (err) {
2957 		dev_err(dev, "Failed to query GF stats: %d\n", err);
2958 		return err;
2959 	}
2960 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_GF_STAT,
2961 				   sizeof(resp));
2962 	if (err || resp.hdr.status) {
2963 		dev_err(dev, "Failed to query GF stats: %d, 0x%x\n", err,
2964 			resp.hdr.status);
2965 		return err;
2966 	}
2967 
2968 	ac->hc_stats.hc_rx_discards_no_wqe = resp.rx_discards_nowqe;
2969 	ac->hc_stats.hc_rx_err_vport_disabled = resp.rx_err_vport_disabled;
2970 	ac->hc_stats.hc_rx_bytes = resp.hc_rx_bytes;
2971 	ac->hc_stats.hc_rx_ucast_pkts = resp.hc_rx_ucast_pkts;
2972 	ac->hc_stats.hc_rx_ucast_bytes = resp.hc_rx_ucast_bytes;
2973 	ac->hc_stats.hc_rx_bcast_pkts = resp.hc_rx_bcast_pkts;
2974 	ac->hc_stats.hc_rx_bcast_bytes = resp.hc_rx_bcast_bytes;
2975 	ac->hc_stats.hc_rx_mcast_pkts = resp.hc_rx_mcast_pkts;
2976 	ac->hc_stats.hc_rx_mcast_bytes = resp.hc_rx_mcast_bytes;
2977 	ac->hc_stats.hc_tx_err_gf_disabled = resp.tx_err_gf_disabled;
2978 	ac->hc_stats.hc_tx_err_vport_disabled = resp.tx_err_vport_disabled;
2979 	ac->hc_stats.hc_tx_err_inval_vportoffset_pkt =
2980 					     resp.tx_err_inval_vport_offset_pkt;
2981 	ac->hc_stats.hc_tx_err_vlan_enforcement =
2982 					     resp.tx_err_vlan_enforcement;
2983 	ac->hc_stats.hc_tx_err_eth_type_enforcement =
2984 					     resp.tx_err_ethtype_enforcement;
2985 	ac->hc_stats.hc_tx_err_sa_enforcement = resp.tx_err_SA_enforcement;
2986 	ac->hc_stats.hc_tx_err_sqpdid_enforcement =
2987 					     resp.tx_err_SQPDID_enforcement;
2988 	ac->hc_stats.hc_tx_err_cqpdid_enforcement =
2989 					     resp.tx_err_CQPDID_enforcement;
2990 	ac->hc_stats.hc_tx_err_mtu_violation = resp.tx_err_mtu_violation;
2991 	ac->hc_stats.hc_tx_err_inval_oob = resp.tx_err_inval_oob;
2992 	ac->hc_stats.hc_tx_bytes = resp.hc_tx_bytes;
2993 	ac->hc_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts;
2994 	ac->hc_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes;
2995 	ac->hc_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts;
2996 	ac->hc_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes;
2997 	ac->hc_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts;
2998 	ac->hc_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes;
2999 	ac->hc_stats.hc_tx_err_gdma = resp.tx_err_gdma;
3000 
3001 	return 0;
3002 }
3003 
3004 void mana_query_phy_stats(struct mana_port_context *apc)
3005 {
3006 	struct mana_query_phy_stat_resp resp = {};
3007 	struct mana_query_phy_stat_req req = {};
3008 	struct net_device *ndev = apc->ndev;
3009 	int err;
3010 
3011 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_PHY_STAT,
3012 			     sizeof(req), sizeof(resp));
3013 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
3014 				sizeof(resp));
3015 	if (err)
3016 		return;
3017 
3018 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_PHY_STAT,
3019 				   sizeof(resp));
3020 	if (err || resp.hdr.status) {
3021 		netdev_err(ndev,
3022 			   "Failed to query PHY stats: %d, resp:0x%x\n",
3023 				err, resp.hdr.status);
3024 		return;
3025 	}
3026 
3027 	/* Aggregate drop counters */
3028 	apc->phy_stats.rx_pkt_drop_phy = resp.rx_pkt_drop_phy;
3029 	apc->phy_stats.tx_pkt_drop_phy = resp.tx_pkt_drop_phy;
3030 
3031 	/* Per TC traffic Counters */
3032 	apc->phy_stats.rx_pkt_tc0_phy = resp.rx_pkt_tc0_phy;
3033 	apc->phy_stats.tx_pkt_tc0_phy = resp.tx_pkt_tc0_phy;
3034 	apc->phy_stats.rx_pkt_tc1_phy = resp.rx_pkt_tc1_phy;
3035 	apc->phy_stats.tx_pkt_tc1_phy = resp.tx_pkt_tc1_phy;
3036 	apc->phy_stats.rx_pkt_tc2_phy = resp.rx_pkt_tc2_phy;
3037 	apc->phy_stats.tx_pkt_tc2_phy = resp.tx_pkt_tc2_phy;
3038 	apc->phy_stats.rx_pkt_tc3_phy = resp.rx_pkt_tc3_phy;
3039 	apc->phy_stats.tx_pkt_tc3_phy = resp.tx_pkt_tc3_phy;
3040 	apc->phy_stats.rx_pkt_tc4_phy = resp.rx_pkt_tc4_phy;
3041 	apc->phy_stats.tx_pkt_tc4_phy = resp.tx_pkt_tc4_phy;
3042 	apc->phy_stats.rx_pkt_tc5_phy = resp.rx_pkt_tc5_phy;
3043 	apc->phy_stats.tx_pkt_tc5_phy = resp.tx_pkt_tc5_phy;
3044 	apc->phy_stats.rx_pkt_tc6_phy = resp.rx_pkt_tc6_phy;
3045 	apc->phy_stats.tx_pkt_tc6_phy = resp.tx_pkt_tc6_phy;
3046 	apc->phy_stats.rx_pkt_tc7_phy = resp.rx_pkt_tc7_phy;
3047 	apc->phy_stats.tx_pkt_tc7_phy = resp.tx_pkt_tc7_phy;
3048 
3049 	/* Per TC byte Counters */
3050 	apc->phy_stats.rx_byte_tc0_phy = resp.rx_byte_tc0_phy;
3051 	apc->phy_stats.tx_byte_tc0_phy = resp.tx_byte_tc0_phy;
3052 	apc->phy_stats.rx_byte_tc1_phy = resp.rx_byte_tc1_phy;
3053 	apc->phy_stats.tx_byte_tc1_phy = resp.tx_byte_tc1_phy;
3054 	apc->phy_stats.rx_byte_tc2_phy = resp.rx_byte_tc2_phy;
3055 	apc->phy_stats.tx_byte_tc2_phy = resp.tx_byte_tc2_phy;
3056 	apc->phy_stats.rx_byte_tc3_phy = resp.rx_byte_tc3_phy;
3057 	apc->phy_stats.tx_byte_tc3_phy = resp.tx_byte_tc3_phy;
3058 	apc->phy_stats.rx_byte_tc4_phy = resp.rx_byte_tc4_phy;
3059 	apc->phy_stats.tx_byte_tc4_phy = resp.tx_byte_tc4_phy;
3060 	apc->phy_stats.rx_byte_tc5_phy = resp.rx_byte_tc5_phy;
3061 	apc->phy_stats.tx_byte_tc5_phy = resp.tx_byte_tc5_phy;
3062 	apc->phy_stats.rx_byte_tc6_phy = resp.rx_byte_tc6_phy;
3063 	apc->phy_stats.tx_byte_tc6_phy = resp.tx_byte_tc6_phy;
3064 	apc->phy_stats.rx_byte_tc7_phy = resp.rx_byte_tc7_phy;
3065 	apc->phy_stats.tx_byte_tc7_phy = resp.tx_byte_tc7_phy;
3066 
3067 	/* Per TC pause Counters */
3068 	apc->phy_stats.rx_pause_tc0_phy = resp.rx_pause_tc0_phy;
3069 	apc->phy_stats.tx_pause_tc0_phy = resp.tx_pause_tc0_phy;
3070 	apc->phy_stats.rx_pause_tc1_phy = resp.rx_pause_tc1_phy;
3071 	apc->phy_stats.tx_pause_tc1_phy = resp.tx_pause_tc1_phy;
3072 	apc->phy_stats.rx_pause_tc2_phy = resp.rx_pause_tc2_phy;
3073 	apc->phy_stats.tx_pause_tc2_phy = resp.tx_pause_tc2_phy;
3074 	apc->phy_stats.rx_pause_tc3_phy = resp.rx_pause_tc3_phy;
3075 	apc->phy_stats.tx_pause_tc3_phy = resp.tx_pause_tc3_phy;
3076 	apc->phy_stats.rx_pause_tc4_phy = resp.rx_pause_tc4_phy;
3077 	apc->phy_stats.tx_pause_tc4_phy = resp.tx_pause_tc4_phy;
3078 	apc->phy_stats.rx_pause_tc5_phy = resp.rx_pause_tc5_phy;
3079 	apc->phy_stats.tx_pause_tc5_phy = resp.tx_pause_tc5_phy;
3080 	apc->phy_stats.rx_pause_tc6_phy = resp.rx_pause_tc6_phy;
3081 	apc->phy_stats.tx_pause_tc6_phy = resp.tx_pause_tc6_phy;
3082 	apc->phy_stats.rx_pause_tc7_phy = resp.rx_pause_tc7_phy;
3083 	apc->phy_stats.tx_pause_tc7_phy = resp.tx_pause_tc7_phy;
3084 }
3085 
3086 static int mana_init_port(struct net_device *ndev)
3087 {
3088 	struct mana_port_context *apc = netdev_priv(ndev);
3089 	struct gdma_dev *gd = apc->ac->gdma_dev;
3090 	u32 max_txq, max_rxq, max_queues;
3091 	int port_idx = apc->port_idx;
3092 	struct gdma_context *gc;
3093 	char vport[32];
3094 	int err;
3095 
3096 	err = mana_init_port_context(apc);
3097 	if (err)
3098 		return err;
3099 
3100 	gc = gd->gdma_context;
3101 
3102 	err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
3103 				   &apc->indir_table_sz);
3104 	if (err) {
3105 		netdev_err(ndev, "Failed to query info for vPort %d\n",
3106 			   port_idx);
3107 		goto reset_apc;
3108 	}
3109 
3110 	max_queues = min_t(u32, max_txq, max_rxq);
3111 	if (apc->max_queues > max_queues)
3112 		apc->max_queues = max_queues;
3113 
3114 	if (apc->num_queues > apc->max_queues)
3115 		apc->num_queues = apc->max_queues;
3116 
3117 	eth_hw_addr_set(ndev, apc->mac_addr);
3118 	sprintf(vport, "vport%d", port_idx);
3119 	apc->mana_port_debugfs = debugfs_create_dir(vport, gc->mana_pci_debugfs);
3120 	return 0;
3121 
3122 reset_apc:
3123 	mana_cleanup_port_context(apc);
3124 	return err;
3125 }
3126 
3127 int mana_alloc_queues(struct net_device *ndev)
3128 {
3129 	struct mana_port_context *apc = netdev_priv(ndev);
3130 	struct gdma_dev *gd = apc->ac->gdma_dev;
3131 	int err;
3132 
3133 	err = mana_create_vport(apc, ndev);
3134 	if (err) {
3135 		netdev_err(ndev, "Failed to create vPort %u : %d\n", apc->port_idx, err);
3136 		return err;
3137 	}
3138 
3139 	err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
3140 	if (err) {
3141 		netdev_err(ndev,
3142 			   "netif_set_real_num_tx_queues () failed for ndev with num_queues %u : %d\n",
3143 			   apc->num_queues, err);
3144 		goto destroy_vport;
3145 	}
3146 
3147 	err = mana_add_rx_queues(apc, ndev);
3148 	if (err)
3149 		goto destroy_vport;
3150 
3151 	apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
3152 
3153 	err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
3154 	if (err) {
3155 		netdev_err(ndev,
3156 			   "netif_set_real_num_rx_queues () failed for ndev with num_queues %u : %d\n",
3157 			   apc->num_queues, err);
3158 		goto destroy_vport;
3159 	}
3160 
3161 	mana_rss_table_init(apc);
3162 
3163 	err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
3164 	if (err) {
3165 		netdev_err(ndev, "Failed to configure RSS table: %d\n", err);
3166 		goto destroy_vport;
3167 	}
3168 
3169 	if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) {
3170 		err = mana_pf_register_filter(apc);
3171 		if (err)
3172 			goto destroy_vport;
3173 	}
3174 
3175 	mana_chn_setxdp(apc, mana_xdp_get(apc));
3176 
3177 	return 0;
3178 
3179 destroy_vport:
3180 	mana_destroy_vport(apc);
3181 	return err;
3182 }
3183 
3184 int mana_attach(struct net_device *ndev)
3185 {
3186 	struct mana_port_context *apc = netdev_priv(ndev);
3187 	int err;
3188 
3189 	ASSERT_RTNL();
3190 
3191 	err = mana_init_port(ndev);
3192 	if (err)
3193 		return err;
3194 
3195 	if (apc->port_st_save) {
3196 		err = mana_alloc_queues(ndev);
3197 		if (err) {
3198 			mana_cleanup_port_context(apc);
3199 			return err;
3200 		}
3201 	}
3202 
3203 	apc->port_is_up = apc->port_st_save;
3204 
3205 	/* Ensure port state updated before txq state */
3206 	smp_wmb();
3207 
3208 	netif_device_attach(ndev);
3209 
3210 	return 0;
3211 }
3212 
3213 static int mana_dealloc_queues(struct net_device *ndev)
3214 {
3215 	struct mana_port_context *apc = netdev_priv(ndev);
3216 	unsigned long timeout = jiffies + 120 * HZ;
3217 	struct gdma_dev *gd = apc->ac->gdma_dev;
3218 	struct mana_txq *txq;
3219 	struct sk_buff *skb;
3220 	int i, err;
3221 	u32 tsleep;
3222 
3223 	if (apc->port_is_up)
3224 		return -EINVAL;
3225 
3226 	mana_chn_setxdp(apc, NULL);
3227 
3228 	if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode)
3229 		mana_pf_deregister_filter(apc);
3230 
3231 	/* No packet can be transmitted now since apc->port_is_up is false.
3232 	 * There is still a tiny chance that mana_poll_tx_cq() can re-enable
3233 	 * a txq because it may not timely see apc->port_is_up being cleared
3234 	 * to false, but it doesn't matter since mana_start_xmit() drops any
3235 	 * new packets due to apc->port_is_up being false.
3236 	 *
3237 	 * Drain all the in-flight TX packets.
3238 	 * A timeout of 120 seconds for all the queues is used.
3239 	 * This will break the while loop when h/w is not responding.
3240 	 * This value of 120 has been decided here considering max
3241 	 * number of queues.
3242 	 */
3243 
3244 	for (i = 0; i < apc->num_queues; i++) {
3245 		txq = &apc->tx_qp[i].txq;
3246 		tsleep = 1000;
3247 		while (atomic_read(&txq->pending_sends) > 0 &&
3248 		       time_before(jiffies, timeout)) {
3249 			usleep_range(tsleep, tsleep + 1000);
3250 			tsleep <<= 1;
3251 		}
3252 		if (atomic_read(&txq->pending_sends)) {
3253 			err = pcie_flr(to_pci_dev(gd->gdma_context->dev));
3254 			if (err) {
3255 				netdev_err(ndev, "flr failed %d with %d pkts pending in txq %u\n",
3256 					   err, atomic_read(&txq->pending_sends),
3257 					   txq->gdma_txq_id);
3258 			}
3259 			break;
3260 		}
3261 	}
3262 
3263 	for (i = 0; i < apc->num_queues; i++) {
3264 		txq = &apc->tx_qp[i].txq;
3265 		while ((skb = skb_dequeue(&txq->pending_skbs))) {
3266 			mana_unmap_skb(skb, apc);
3267 			dev_kfree_skb_any(skb);
3268 		}
3269 		atomic_set(&txq->pending_sends, 0);
3270 	}
3271 	/* We're 100% sure the queues can no longer be woken up, because
3272 	 * we're sure now mana_poll_tx_cq() can't be running.
3273 	 */
3274 
3275 	apc->rss_state = TRI_STATE_FALSE;
3276 	err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
3277 	if (err && mana_en_need_log(apc, err))
3278 		netdev_err(ndev, "Failed to disable vPort: %d\n", err);
3279 
3280 	/* Even in err case, still need to cleanup the vPort */
3281 	mana_destroy_vport(apc);
3282 
3283 	return 0;
3284 }
3285 
3286 int mana_detach(struct net_device *ndev, bool from_close)
3287 {
3288 	struct mana_port_context *apc = netdev_priv(ndev);
3289 	int err;
3290 
3291 	ASSERT_RTNL();
3292 
3293 	apc->port_st_save = apc->port_is_up;
3294 	apc->port_is_up = false;
3295 
3296 	/* Ensure port state updated before txq state */
3297 	smp_wmb();
3298 
3299 	netif_tx_disable(ndev);
3300 
3301 	if (apc->port_st_save) {
3302 		err = mana_dealloc_queues(ndev);
3303 		if (err) {
3304 			netdev_err(ndev, "%s failed to deallocate queues: %d\n", __func__, err);
3305 			return err;
3306 		}
3307 	}
3308 
3309 	if (!from_close) {
3310 		netif_device_detach(ndev);
3311 		mana_cleanup_port_context(apc);
3312 	}
3313 
3314 	return 0;
3315 }
3316 
3317 static int mana_probe_port(struct mana_context *ac, int port_idx,
3318 			   struct net_device **ndev_storage)
3319 {
3320 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
3321 	struct mana_port_context *apc;
3322 	struct net_device *ndev;
3323 	int err;
3324 
3325 	ndev = alloc_etherdev_mq(sizeof(struct mana_port_context),
3326 				 gc->max_num_queues);
3327 	if (!ndev)
3328 		return -ENOMEM;
3329 
3330 	*ndev_storage = ndev;
3331 
3332 	apc = netdev_priv(ndev);
3333 	apc->ac = ac;
3334 	apc->ndev = ndev;
3335 	apc->max_queues = gc->max_num_queues;
3336 	apc->num_queues = gc->max_num_queues;
3337 	apc->tx_queue_size = DEF_TX_BUFFERS_PER_QUEUE;
3338 	apc->rx_queue_size = DEF_RX_BUFFERS_PER_QUEUE;
3339 	apc->port_handle = INVALID_MANA_HANDLE;
3340 	apc->pf_filter_handle = INVALID_MANA_HANDLE;
3341 	apc->port_idx = port_idx;
3342 
3343 	mutex_init(&apc->vport_mutex);
3344 	apc->vport_use_count = 0;
3345 
3346 	ndev->netdev_ops = &mana_devops;
3347 	ndev->ethtool_ops = &mana_ethtool_ops;
3348 	ndev->mtu = ETH_DATA_LEN;
3349 	ndev->max_mtu = gc->adapter_mtu - ETH_HLEN;
3350 	ndev->min_mtu = ETH_MIN_MTU;
3351 	ndev->needed_headroom = MANA_HEADROOM;
3352 	ndev->dev_port = port_idx;
3353 	/* Recommended timeout based on HW FPGA re-config scenario. */
3354 	ndev->watchdog_timeo = 15 * HZ;
3355 	SET_NETDEV_DEV(ndev, gc->dev);
3356 
3357 	netif_set_tso_max_size(ndev, GSO_MAX_SIZE);
3358 
3359 	netif_carrier_off(ndev);
3360 
3361 	netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
3362 
3363 	err = mana_init_port(ndev);
3364 	if (err)
3365 		goto free_net;
3366 
3367 	err = mana_rss_table_alloc(apc);
3368 	if (err)
3369 		goto reset_apc;
3370 
3371 	/* Initialize the per port queue reset work.*/
3372 	INIT_WORK(&apc->queue_reset_work,
3373 		  mana_per_port_queue_reset_work_handler);
3374 
3375 	netdev_lockdep_set_classes(ndev);
3376 
3377 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3378 	ndev->hw_features |= NETIF_F_RXCSUM;
3379 	ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
3380 	ndev->hw_features |= NETIF_F_RXHASH;
3381 	ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_TX |
3382 			 NETIF_F_HW_VLAN_CTAG_RX;
3383 	ndev->vlan_features = ndev->features;
3384 	xdp_set_features_flag(ndev, NETDEV_XDP_ACT_BASIC |
3385 			      NETDEV_XDP_ACT_REDIRECT |
3386 			      NETDEV_XDP_ACT_NDO_XMIT);
3387 
3388 	err = register_netdev(ndev);
3389 	if (err) {
3390 		netdev_err(ndev, "Unable to register netdev.\n");
3391 		goto free_indir;
3392 	}
3393 
3394 	netif_carrier_on(ndev);
3395 
3396 	debugfs_create_u32("current_speed", 0400, apc->mana_port_debugfs, &apc->speed);
3397 
3398 	return 0;
3399 
3400 free_indir:
3401 	mana_cleanup_indir_table(apc);
3402 reset_apc:
3403 	mana_cleanup_port_context(apc);
3404 free_net:
3405 	*ndev_storage = NULL;
3406 	netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
3407 	free_netdev(ndev);
3408 	return err;
3409 }
3410 
3411 static void adev_release(struct device *dev)
3412 {
3413 	struct mana_adev *madev = container_of(dev, struct mana_adev, adev.dev);
3414 
3415 	kfree(madev);
3416 }
3417 
3418 static void remove_adev(struct gdma_dev *gd)
3419 {
3420 	struct auxiliary_device *adev = gd->adev;
3421 	int id = adev->id;
3422 
3423 	auxiliary_device_delete(adev);
3424 	auxiliary_device_uninit(adev);
3425 
3426 	mana_adev_idx_free(id);
3427 	gd->adev = NULL;
3428 }
3429 
3430 static int add_adev(struct gdma_dev *gd, const char *name)
3431 {
3432 	struct auxiliary_device *adev;
3433 	struct mana_adev *madev;
3434 	int ret;
3435 	int id;
3436 
3437 	madev = kzalloc_obj(*madev);
3438 	if (!madev)
3439 		return -ENOMEM;
3440 
3441 	adev = &madev->adev;
3442 	ret = mana_adev_idx_alloc();
3443 	if (ret < 0)
3444 		goto idx_fail;
3445 	id = ret;
3446 	adev->id = id;
3447 
3448 	adev->name = name;
3449 	adev->dev.parent = gd->gdma_context->dev;
3450 	adev->dev.release = adev_release;
3451 	madev->mdev = gd;
3452 
3453 	ret = auxiliary_device_init(adev);
3454 	if (ret)
3455 		goto init_fail;
3456 
3457 	/* madev is owned by the auxiliary device */
3458 	madev = NULL;
3459 	ret = auxiliary_device_add(adev);
3460 	if (ret)
3461 		goto add_fail;
3462 
3463 	gd->adev = adev;
3464 	dev_dbg(gd->gdma_context->dev,
3465 		"Auxiliary device added successfully\n");
3466 	return 0;
3467 
3468 add_fail:
3469 	auxiliary_device_uninit(adev);
3470 
3471 init_fail:
3472 	mana_adev_idx_free(id);
3473 
3474 idx_fail:
3475 	kfree(madev);
3476 
3477 	return ret;
3478 }
3479 
3480 static void mana_rdma_service_handle(struct work_struct *work)
3481 {
3482 	struct mana_service_work *serv_work =
3483 		container_of(work, struct mana_service_work, work);
3484 	struct gdma_dev *gd = serv_work->gdma_dev;
3485 	struct device *dev = gd->gdma_context->dev;
3486 	int ret;
3487 
3488 	if (READ_ONCE(gd->rdma_teardown))
3489 		goto out;
3490 
3491 	switch (serv_work->event) {
3492 	case GDMA_SERVICE_TYPE_RDMA_SUSPEND:
3493 		if (!gd->adev || gd->is_suspended)
3494 			break;
3495 
3496 		remove_adev(gd);
3497 		gd->is_suspended = true;
3498 		break;
3499 
3500 	case GDMA_SERVICE_TYPE_RDMA_RESUME:
3501 		if (!gd->is_suspended)
3502 			break;
3503 
3504 		ret = add_adev(gd, "rdma");
3505 		if (ret)
3506 			dev_err(dev, "Failed to add adev on resume: %d\n", ret);
3507 		else
3508 			gd->is_suspended = false;
3509 		break;
3510 
3511 	default:
3512 		dev_warn(dev, "unknown adev service event %u\n",
3513 			 serv_work->event);
3514 		break;
3515 	}
3516 
3517 out:
3518 	kfree(serv_work);
3519 }
3520 
3521 int mana_rdma_service_event(struct gdma_context *gc, enum gdma_service_type event)
3522 {
3523 	struct gdma_dev *gd = &gc->mana_ib;
3524 	struct mana_service_work *serv_work;
3525 
3526 	if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) {
3527 		/* RDMA device is not detected on pci */
3528 		return 0;
3529 	}
3530 
3531 	serv_work = kzalloc_obj(*serv_work, GFP_ATOMIC);
3532 	if (!serv_work)
3533 		return -ENOMEM;
3534 
3535 	serv_work->event = event;
3536 	serv_work->gdma_dev = gd;
3537 
3538 	INIT_WORK(&serv_work->work, mana_rdma_service_handle);
3539 	queue_work(gc->service_wq, &serv_work->work);
3540 
3541 	return 0;
3542 }
3543 
3544 #define MANA_GF_STATS_PERIOD (2 * HZ)
3545 
3546 static void mana_gf_stats_work_handler(struct work_struct *work)
3547 {
3548 	struct mana_context *ac =
3549 		container_of(to_delayed_work(work), struct mana_context, gf_stats_work);
3550 	int err;
3551 
3552 	err = mana_query_gf_stats(ac);
3553 	if (err == -ETIMEDOUT) {
3554 		/* HWC timeout detected - reset stats and stop rescheduling */
3555 		ac->hwc_timeout_occurred = true;
3556 		memset(&ac->hc_stats, 0, sizeof(ac->hc_stats));
3557 		return;
3558 	}
3559 	schedule_delayed_work(&ac->gf_stats_work, MANA_GF_STATS_PERIOD);
3560 }
3561 
3562 int mana_probe(struct gdma_dev *gd, bool resuming)
3563 {
3564 	struct gdma_context *gc = gd->gdma_context;
3565 	struct mana_context *ac = gd->driver_data;
3566 	struct mana_port_context *apc = NULL;
3567 	struct device *dev = gc->dev;
3568 	u8 bm_hostmode = 0;
3569 	u16 num_ports = 0;
3570 	int err;
3571 	int i;
3572 
3573 	dev_info(dev,
3574 		 "Microsoft Azure Network Adapter protocol version: %d.%d.%d\n",
3575 		 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
3576 
3577 	err = mana_gd_register_device(gd);
3578 	if (err)
3579 		return err;
3580 
3581 	if (!resuming) {
3582 		ac = kzalloc_obj(*ac);
3583 		if (!ac)
3584 			return -ENOMEM;
3585 
3586 		ac->gdma_dev = gd;
3587 		gd->driver_data = ac;
3588 	}
3589 
3590 	err = mana_create_eq(ac);
3591 	if (err) {
3592 		dev_err(dev, "Failed to create EQs: %d\n", err);
3593 		goto out;
3594 	}
3595 
3596 	err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
3597 				    MANA_MICRO_VERSION, &num_ports, &bm_hostmode);
3598 	if (err)
3599 		goto out;
3600 
3601 	ac->bm_hostmode = bm_hostmode;
3602 
3603 	if (!resuming) {
3604 		ac->num_ports = num_ports;
3605 
3606 		INIT_WORK(&ac->link_change_work, mana_link_state_handle);
3607 	} else {
3608 		if (ac->num_ports != num_ports) {
3609 			dev_err(dev, "The number of vPorts changed: %d->%d\n",
3610 				ac->num_ports, num_ports);
3611 			err = -EPROTO;
3612 			goto out;
3613 		}
3614 
3615 		enable_work(&ac->link_change_work);
3616 	}
3617 
3618 	if (ac->num_ports == 0)
3619 		dev_err(dev, "Failed to detect any vPort\n");
3620 
3621 	if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
3622 		ac->num_ports = MAX_PORTS_IN_MANA_DEV;
3623 
3624 	ac->per_port_queue_reset_wq =
3625 		create_singlethread_workqueue("mana_per_port_queue_reset_wq");
3626 	if (!ac->per_port_queue_reset_wq) {
3627 		dev_err(dev, "Failed to allocate per port queue reset workqueue\n");
3628 		err = -ENOMEM;
3629 		goto out;
3630 	}
3631 
3632 	if (!resuming) {
3633 		for (i = 0; i < ac->num_ports; i++) {
3634 			err = mana_probe_port(ac, i, &ac->ports[i]);
3635 			/* we log the port for which the probe failed and stop
3636 			 * probes for subsequent ports.
3637 			 * Note that we keep running ports, for which the probes
3638 			 * were successful, unless add_adev fails too
3639 			 */
3640 			if (err) {
3641 				dev_err(dev, "Probe Failed for port %d\n", i);
3642 				break;
3643 			}
3644 		}
3645 	} else {
3646 		for (i = 0; i < ac->num_ports; i++) {
3647 			rtnl_lock();
3648 			apc = netdev_priv(ac->ports[i]);
3649 			enable_work(&apc->queue_reset_work);
3650 			err = mana_attach(ac->ports[i]);
3651 			rtnl_unlock();
3652 			/* we log the port for which the attach failed and stop
3653 			 * attach for subsequent ports
3654 			 * Note that we keep running ports, for which the attach
3655 			 * were successful, unless add_adev fails too
3656 			 */
3657 			if (err) {
3658 				dev_err(dev, "Attach Failed for port %d\n", i);
3659 				break;
3660 			}
3661 		}
3662 	}
3663 
3664 	err = add_adev(gd, "eth");
3665 
3666 	INIT_DELAYED_WORK(&ac->gf_stats_work, mana_gf_stats_work_handler);
3667 	schedule_delayed_work(&ac->gf_stats_work, MANA_GF_STATS_PERIOD);
3668 
3669 out:
3670 	if (err) {
3671 		mana_remove(gd, false);
3672 	} else {
3673 		dev_dbg(dev, "gd=%p, id=%u, num_ports=%d, type=%u, instance=%u\n",
3674 			gd, gd->dev_id.as_uint32, ac->num_ports,
3675 			gd->dev_id.type, gd->dev_id.instance);
3676 		dev_dbg(dev, "%s succeeded\n", __func__);
3677 	}
3678 
3679 	return err;
3680 }
3681 
3682 void mana_remove(struct gdma_dev *gd, bool suspending)
3683 {
3684 	struct gdma_context *gc = gd->gdma_context;
3685 	struct mana_context *ac = gd->driver_data;
3686 	struct mana_port_context *apc;
3687 	struct device *dev = gc->dev;
3688 	struct net_device *ndev;
3689 	int err;
3690 	int i;
3691 
3692 	disable_work_sync(&ac->link_change_work);
3693 	cancel_delayed_work_sync(&ac->gf_stats_work);
3694 
3695 	/* adev currently doesn't support suspending, always remove it */
3696 	if (gd->adev)
3697 		remove_adev(gd);
3698 
3699 	for (i = 0; i < ac->num_ports; i++) {
3700 		ndev = ac->ports[i];
3701 		if (!ndev) {
3702 			if (i == 0)
3703 				dev_err(dev, "No net device to remove\n");
3704 			goto out;
3705 		}
3706 
3707 		apc = netdev_priv(ndev);
3708 		disable_work_sync(&apc->queue_reset_work);
3709 
3710 		/* All cleanup actions should stay after rtnl_lock(), otherwise
3711 		 * other functions may access partially cleaned up data.
3712 		 */
3713 		rtnl_lock();
3714 
3715 		err = mana_detach(ndev, false);
3716 		if (err)
3717 			netdev_err(ndev, "Failed to detach vPort %d: %d\n",
3718 				   i, err);
3719 
3720 		if (suspending) {
3721 			/* No need to unregister the ndev. */
3722 			rtnl_unlock();
3723 			continue;
3724 		}
3725 
3726 		unregister_netdevice(ndev);
3727 		mana_cleanup_indir_table(apc);
3728 
3729 		rtnl_unlock();
3730 
3731 		free_netdev(ndev);
3732 	}
3733 
3734 	mana_destroy_eq(ac);
3735 out:
3736 	if (ac->per_port_queue_reset_wq) {
3737 		destroy_workqueue(ac->per_port_queue_reset_wq);
3738 		ac->per_port_queue_reset_wq = NULL;
3739 	}
3740 
3741 	mana_gd_deregister_device(gd);
3742 
3743 	if (suspending)
3744 		return;
3745 
3746 	gd->driver_data = NULL;
3747 	gd->gdma_context = NULL;
3748 	kfree(ac);
3749 	dev_dbg(dev, "%s succeeded\n", __func__);
3750 }
3751 
3752 int mana_rdma_probe(struct gdma_dev *gd)
3753 {
3754 	int err = 0;
3755 
3756 	if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) {
3757 		/* RDMA device is not detected on pci */
3758 		return err;
3759 	}
3760 
3761 	err = mana_gd_register_device(gd);
3762 	if (err)
3763 		return err;
3764 
3765 	err = add_adev(gd, "rdma");
3766 	if (err)
3767 		mana_gd_deregister_device(gd);
3768 
3769 	return err;
3770 }
3771 
3772 void mana_rdma_remove(struct gdma_dev *gd)
3773 {
3774 	struct gdma_context *gc = gd->gdma_context;
3775 
3776 	if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) {
3777 		/* RDMA device is not detected on pci */
3778 		return;
3779 	}
3780 
3781 	WRITE_ONCE(gd->rdma_teardown, true);
3782 
3783 	if (gc->service_wq)
3784 		flush_workqueue(gc->service_wq);
3785 
3786 	if (gd->adev)
3787 		remove_adev(gd);
3788 
3789 	mana_gd_deregister_device(gd);
3790 }
3791 
3792 struct net_device *mana_get_primary_netdev(struct mana_context *ac,
3793 					   u32 port_index,
3794 					   netdevice_tracker *tracker)
3795 {
3796 	struct net_device *ndev;
3797 
3798 	if (port_index >= ac->num_ports)
3799 		return NULL;
3800 
3801 	rcu_read_lock();
3802 
3803 	/* If mana is used in netvsc, the upper netdevice should be returned. */
3804 	ndev = netdev_master_upper_dev_get_rcu(ac->ports[port_index]);
3805 
3806 	/* If there is no upper device, use the parent Ethernet device */
3807 	if (!ndev)
3808 		ndev = ac->ports[port_index];
3809 
3810 	netdev_hold(ndev, tracker, GFP_ATOMIC);
3811 	rcu_read_unlock();
3812 
3813 	return ndev;
3814 }
3815 EXPORT_SYMBOL_NS(mana_get_primary_netdev, "NET_MANA");
3816