xref: /linux/drivers/net/ethernet/microsoft/mana/mana_en.c (revision ea8d7647f9ddf1f81e2027ed305299797299aa03)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #include <uapi/linux/bpf.h>
5 
6 #include <linux/debugfs.h>
7 #include <linux/inetdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/ethtool.h>
10 #include <linux/filter.h>
11 #include <linux/mm.h>
12 #include <linux/pci.h>
13 
14 #include <net/checksum.h>
15 #include <net/ip6_checksum.h>
16 #include <net/netdev_lock.h>
17 #include <net/page_pool/helpers.h>
18 #include <net/xdp.h>
19 
20 #include <net/mana/mana.h>
21 #include <net/mana/mana_auxiliary.h>
22 
23 static DEFINE_IDA(mana_adev_ida);
24 
25 static int mana_adev_idx_alloc(void)
26 {
27 	return ida_alloc(&mana_adev_ida, GFP_KERNEL);
28 }
29 
30 static void mana_adev_idx_free(int idx)
31 {
32 	ida_free(&mana_adev_ida, idx);
33 }
34 
35 static ssize_t mana_dbg_q_read(struct file *filp, char __user *buf, size_t count,
36 			       loff_t *pos)
37 {
38 	struct gdma_queue *gdma_q = filp->private_data;
39 
40 	return simple_read_from_buffer(buf, count, pos, gdma_q->queue_mem_ptr,
41 				       gdma_q->queue_size);
42 }
43 
44 static const struct file_operations mana_dbg_q_fops = {
45 	.owner  = THIS_MODULE,
46 	.open   = simple_open,
47 	.read   = mana_dbg_q_read,
48 };
49 
50 /* Microsoft Azure Network Adapter (MANA) functions */
51 
52 static int mana_open(struct net_device *ndev)
53 {
54 	struct mana_port_context *apc = netdev_priv(ndev);
55 	int err;
56 	err = mana_alloc_queues(ndev);
57 
58 	if (err) {
59 		netdev_err(ndev, "%s failed to allocate queues: %d\n", __func__, err);
60 		return err;
61 	}
62 
63 	apc->port_is_up = true;
64 
65 	/* Ensure port state updated before txq state */
66 	smp_wmb();
67 
68 	netif_carrier_on(ndev);
69 	netif_tx_wake_all_queues(ndev);
70 	netdev_dbg(ndev, "%s successful\n", __func__);
71 	return 0;
72 }
73 
74 static int mana_close(struct net_device *ndev)
75 {
76 	struct mana_port_context *apc = netdev_priv(ndev);
77 
78 	if (!apc->port_is_up)
79 		return 0;
80 
81 	return mana_detach(ndev, true);
82 }
83 
84 static bool mana_can_tx(struct gdma_queue *wq)
85 {
86 	return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
87 }
88 
89 static unsigned int mana_checksum_info(struct sk_buff *skb)
90 {
91 	if (skb->protocol == htons(ETH_P_IP)) {
92 		struct iphdr *ip = ip_hdr(skb);
93 
94 		if (ip->protocol == IPPROTO_TCP)
95 			return IPPROTO_TCP;
96 
97 		if (ip->protocol == IPPROTO_UDP)
98 			return IPPROTO_UDP;
99 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
100 		struct ipv6hdr *ip6 = ipv6_hdr(skb);
101 
102 		if (ip6->nexthdr == IPPROTO_TCP)
103 			return IPPROTO_TCP;
104 
105 		if (ip6->nexthdr == IPPROTO_UDP)
106 			return IPPROTO_UDP;
107 	}
108 
109 	/* No csum offloading */
110 	return 0;
111 }
112 
113 static void mana_add_sge(struct mana_tx_package *tp, struct mana_skb_head *ash,
114 			 int sg_i, dma_addr_t da, int sge_len, u32 gpa_mkey)
115 {
116 	ash->dma_handle[sg_i] = da;
117 	ash->size[sg_i] = sge_len;
118 
119 	tp->wqe_req.sgl[sg_i].address = da;
120 	tp->wqe_req.sgl[sg_i].mem_key = gpa_mkey;
121 	tp->wqe_req.sgl[sg_i].size = sge_len;
122 }
123 
124 static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
125 			struct mana_tx_package *tp, int gso_hs)
126 {
127 	struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
128 	int hsg = 1; /* num of SGEs of linear part */
129 	struct gdma_dev *gd = apc->ac->gdma_dev;
130 	int skb_hlen = skb_headlen(skb);
131 	int sge0_len, sge1_len = 0;
132 	struct gdma_context *gc;
133 	struct device *dev;
134 	skb_frag_t *frag;
135 	dma_addr_t da;
136 	int sg_i;
137 	int i;
138 
139 	gc = gd->gdma_context;
140 	dev = gc->dev;
141 
142 	if (gso_hs && gso_hs < skb_hlen) {
143 		sge0_len = gso_hs;
144 		sge1_len = skb_hlen - gso_hs;
145 	} else {
146 		sge0_len = skb_hlen;
147 	}
148 
149 	da = dma_map_single(dev, skb->data, sge0_len, DMA_TO_DEVICE);
150 	if (dma_mapping_error(dev, da))
151 		return -ENOMEM;
152 
153 	mana_add_sge(tp, ash, 0, da, sge0_len, gd->gpa_mkey);
154 
155 	if (sge1_len) {
156 		sg_i = 1;
157 		da = dma_map_single(dev, skb->data + sge0_len, sge1_len,
158 				    DMA_TO_DEVICE);
159 		if (dma_mapping_error(dev, da))
160 			goto frag_err;
161 
162 		mana_add_sge(tp, ash, sg_i, da, sge1_len, gd->gpa_mkey);
163 		hsg = 2;
164 	}
165 
166 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
167 		sg_i = hsg + i;
168 
169 		frag = &skb_shinfo(skb)->frags[i];
170 		da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
171 				      DMA_TO_DEVICE);
172 		if (dma_mapping_error(dev, da))
173 			goto frag_err;
174 
175 		mana_add_sge(tp, ash, sg_i, da, skb_frag_size(frag),
176 			     gd->gpa_mkey);
177 	}
178 
179 	return 0;
180 
181 frag_err:
182 	if (net_ratelimit())
183 		netdev_err(apc->ndev, "Failed to map skb of size %u to DMA\n",
184 			   skb->len);
185 	for (i = sg_i - 1; i >= hsg; i--)
186 		dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
187 			       DMA_TO_DEVICE);
188 
189 	for (i = hsg - 1; i >= 0; i--)
190 		dma_unmap_single(dev, ash->dma_handle[i], ash->size[i],
191 				 DMA_TO_DEVICE);
192 
193 	return -ENOMEM;
194 }
195 
196 /* Handle the case when GSO SKB linear length is too large.
197  * MANA NIC requires GSO packets to put only the packet header to SGE0.
198  * So, we need 2 SGEs for the skb linear part which contains more than the
199  * header.
200  * Return a positive value for the number of SGEs, or a negative value
201  * for an error.
202  */
203 static int mana_fix_skb_head(struct net_device *ndev, struct sk_buff *skb,
204 			     int gso_hs)
205 {
206 	int num_sge = 1 + skb_shinfo(skb)->nr_frags;
207 	int skb_hlen = skb_headlen(skb);
208 
209 	if (gso_hs < skb_hlen) {
210 		num_sge++;
211 	} else if (gso_hs > skb_hlen) {
212 		if (net_ratelimit())
213 			netdev_err(ndev,
214 				   "TX nonlinear head: hs:%d, skb_hlen:%d\n",
215 				   gso_hs, skb_hlen);
216 
217 		return -EINVAL;
218 	}
219 
220 	return num_sge;
221 }
222 
223 /* Get the GSO packet's header size */
224 static int mana_get_gso_hs(struct sk_buff *skb)
225 {
226 	int gso_hs;
227 
228 	if (skb->encapsulation) {
229 		gso_hs = skb_inner_tcp_all_headers(skb);
230 	} else {
231 		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
232 			gso_hs = skb_transport_offset(skb) +
233 				 sizeof(struct udphdr);
234 		} else {
235 			gso_hs = skb_tcp_all_headers(skb);
236 		}
237 	}
238 
239 	return gso_hs;
240 }
241 
242 netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
243 {
244 	enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
245 	struct mana_port_context *apc = netdev_priv(ndev);
246 	int gso_hs = 0; /* zero for non-GSO pkts */
247 	u16 txq_idx = skb_get_queue_mapping(skb);
248 	struct gdma_dev *gd = apc->ac->gdma_dev;
249 	bool ipv4 = false, ipv6 = false;
250 	struct mana_tx_package pkg = {};
251 	struct netdev_queue *net_txq;
252 	struct mana_stats_tx *tx_stats;
253 	struct gdma_queue *gdma_sq;
254 	unsigned int csum_type;
255 	struct mana_txq *txq;
256 	struct mana_cq *cq;
257 	int err, len;
258 
259 	if (unlikely(!apc->port_is_up))
260 		goto tx_drop;
261 
262 	if (skb_cow_head(skb, MANA_HEADROOM))
263 		goto tx_drop_count;
264 
265 	if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
266 		goto tx_drop_count;
267 
268 	txq = &apc->tx_qp[txq_idx].txq;
269 	gdma_sq = txq->gdma_sq;
270 	cq = &apc->tx_qp[txq_idx].tx_cq;
271 	tx_stats = &txq->stats;
272 
273 	pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
274 	pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
275 
276 	if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
277 		pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
278 		pkt_fmt = MANA_LONG_PKT_FMT;
279 	} else {
280 		pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
281 	}
282 
283 	if (skb_vlan_tag_present(skb)) {
284 		pkt_fmt = MANA_LONG_PKT_FMT;
285 		pkg.tx_oob.l_oob.inject_vlan_pri_tag = 1;
286 		pkg.tx_oob.l_oob.pcp = skb_vlan_tag_get_prio(skb);
287 		pkg.tx_oob.l_oob.dei = skb_vlan_tag_get_cfi(skb);
288 		pkg.tx_oob.l_oob.vlan_id = skb_vlan_tag_get_id(skb);
289 	}
290 
291 	pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
292 
293 	if (pkt_fmt == MANA_SHORT_PKT_FMT) {
294 		pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
295 		u64_stats_update_begin(&tx_stats->syncp);
296 		tx_stats->short_pkt_fmt++;
297 		u64_stats_update_end(&tx_stats->syncp);
298 	} else {
299 		pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
300 		u64_stats_update_begin(&tx_stats->syncp);
301 		tx_stats->long_pkt_fmt++;
302 		u64_stats_update_end(&tx_stats->syncp);
303 	}
304 
305 	pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
306 	pkg.wqe_req.flags = 0;
307 	pkg.wqe_req.client_data_unit = 0;
308 
309 	pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
310 
311 	if (skb->protocol == htons(ETH_P_IP))
312 		ipv4 = true;
313 	else if (skb->protocol == htons(ETH_P_IPV6))
314 		ipv6 = true;
315 
316 	if (skb_is_gso(skb)) {
317 		int num_sge;
318 
319 		gso_hs = mana_get_gso_hs(skb);
320 
321 		num_sge = mana_fix_skb_head(ndev, skb, gso_hs);
322 		if (num_sge > 0)
323 			pkg.wqe_req.num_sge = num_sge;
324 		else
325 			goto tx_drop_count;
326 
327 		u64_stats_update_begin(&tx_stats->syncp);
328 		if (skb->encapsulation) {
329 			tx_stats->tso_inner_packets++;
330 			tx_stats->tso_inner_bytes += skb->len - gso_hs;
331 		} else {
332 			tx_stats->tso_packets++;
333 			tx_stats->tso_bytes += skb->len - gso_hs;
334 		}
335 		u64_stats_update_end(&tx_stats->syncp);
336 
337 		pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
338 		pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
339 
340 		pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
341 		pkg.tx_oob.s_oob.comp_tcp_csum = 1;
342 		pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
343 
344 		pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size;
345 		pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
346 		if (ipv4) {
347 			ip_hdr(skb)->tot_len = 0;
348 			ip_hdr(skb)->check = 0;
349 			tcp_hdr(skb)->check =
350 				~csum_tcpudp_magic(ip_hdr(skb)->saddr,
351 						   ip_hdr(skb)->daddr, 0,
352 						   IPPROTO_TCP, 0);
353 		} else {
354 			ipv6_hdr(skb)->payload_len = 0;
355 			tcp_hdr(skb)->check =
356 				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
357 						 &ipv6_hdr(skb)->daddr, 0,
358 						 IPPROTO_TCP, 0);
359 		}
360 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
361 		csum_type = mana_checksum_info(skb);
362 
363 		u64_stats_update_begin(&tx_stats->syncp);
364 		tx_stats->csum_partial++;
365 		u64_stats_update_end(&tx_stats->syncp);
366 
367 		if (csum_type == IPPROTO_TCP) {
368 			pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
369 			pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
370 
371 			pkg.tx_oob.s_oob.comp_tcp_csum = 1;
372 			pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
373 
374 		} else if (csum_type == IPPROTO_UDP) {
375 			pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
376 			pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
377 
378 			pkg.tx_oob.s_oob.comp_udp_csum = 1;
379 		} else {
380 			/* Can't do offload of this type of checksum */
381 			if (skb_checksum_help(skb))
382 				goto tx_drop_count;
383 		}
384 	}
385 
386 	WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES);
387 
388 	if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
389 		pkg.wqe_req.sgl = pkg.sgl_array;
390 	} else {
391 		pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge,
392 					    sizeof(struct gdma_sge),
393 					    GFP_ATOMIC);
394 		if (!pkg.sgl_ptr)
395 			goto tx_drop_count;
396 
397 		pkg.wqe_req.sgl = pkg.sgl_ptr;
398 	}
399 
400 	if (mana_map_skb(skb, apc, &pkg, gso_hs)) {
401 		u64_stats_update_begin(&tx_stats->syncp);
402 		tx_stats->mana_map_err++;
403 		u64_stats_update_end(&tx_stats->syncp);
404 		goto free_sgl_ptr;
405 	}
406 
407 	skb_queue_tail(&txq->pending_skbs, skb);
408 
409 	len = skb->len;
410 	net_txq = netdev_get_tx_queue(ndev, txq_idx);
411 
412 	err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
413 					(struct gdma_posted_wqe_info *)skb->cb);
414 	if (!mana_can_tx(gdma_sq)) {
415 		netif_tx_stop_queue(net_txq);
416 		apc->eth_stats.stop_queue++;
417 	}
418 
419 	if (err) {
420 		(void)skb_dequeue_tail(&txq->pending_skbs);
421 		netdev_warn(ndev, "Failed to post TX OOB: %d\n", err);
422 		err = NETDEV_TX_BUSY;
423 		goto tx_busy;
424 	}
425 
426 	err = NETDEV_TX_OK;
427 	atomic_inc(&txq->pending_sends);
428 
429 	mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
430 
431 	/* skb may be freed after mana_gd_post_work_request. Do not use it. */
432 	skb = NULL;
433 
434 	tx_stats = &txq->stats;
435 	u64_stats_update_begin(&tx_stats->syncp);
436 	tx_stats->packets++;
437 	tx_stats->bytes += len;
438 	u64_stats_update_end(&tx_stats->syncp);
439 
440 tx_busy:
441 	if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) {
442 		netif_tx_wake_queue(net_txq);
443 		apc->eth_stats.wake_queue++;
444 	}
445 
446 	kfree(pkg.sgl_ptr);
447 	return err;
448 
449 free_sgl_ptr:
450 	kfree(pkg.sgl_ptr);
451 tx_drop_count:
452 	ndev->stats.tx_dropped++;
453 tx_drop:
454 	dev_kfree_skb_any(skb);
455 	return NETDEV_TX_OK;
456 }
457 
458 static void mana_get_stats64(struct net_device *ndev,
459 			     struct rtnl_link_stats64 *st)
460 {
461 	struct mana_port_context *apc = netdev_priv(ndev);
462 	unsigned int num_queues = apc->num_queues;
463 	struct mana_stats_rx *rx_stats;
464 	struct mana_stats_tx *tx_stats;
465 	unsigned int start;
466 	u64 packets, bytes;
467 	int q;
468 
469 	if (!apc->port_is_up)
470 		return;
471 
472 	netdev_stats_to_stats64(st, &ndev->stats);
473 
474 	for (q = 0; q < num_queues; q++) {
475 		rx_stats = &apc->rxqs[q]->stats;
476 
477 		do {
478 			start = u64_stats_fetch_begin(&rx_stats->syncp);
479 			packets = rx_stats->packets;
480 			bytes = rx_stats->bytes;
481 		} while (u64_stats_fetch_retry(&rx_stats->syncp, start));
482 
483 		st->rx_packets += packets;
484 		st->rx_bytes += bytes;
485 	}
486 
487 	for (q = 0; q < num_queues; q++) {
488 		tx_stats = &apc->tx_qp[q].txq.stats;
489 
490 		do {
491 			start = u64_stats_fetch_begin(&tx_stats->syncp);
492 			packets = tx_stats->packets;
493 			bytes = tx_stats->bytes;
494 		} while (u64_stats_fetch_retry(&tx_stats->syncp, start));
495 
496 		st->tx_packets += packets;
497 		st->tx_bytes += bytes;
498 	}
499 }
500 
501 static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb,
502 			     int old_q)
503 {
504 	struct mana_port_context *apc = netdev_priv(ndev);
505 	u32 hash = skb_get_hash(skb);
506 	struct sock *sk = skb->sk;
507 	int txq;
508 
509 	txq = apc->indir_table[hash & (apc->indir_table_sz - 1)];
510 
511 	if (txq != old_q && sk && sk_fullsock(sk) &&
512 	    rcu_access_pointer(sk->sk_dst_cache))
513 		sk_tx_queue_set(sk, txq);
514 
515 	return txq;
516 }
517 
518 static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb,
519 			     struct net_device *sb_dev)
520 {
521 	int txq;
522 
523 	if (ndev->real_num_tx_queues == 1)
524 		return 0;
525 
526 	txq = sk_tx_queue_get(skb->sk);
527 
528 	if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) {
529 		if (skb_rx_queue_recorded(skb))
530 			txq = skb_get_rx_queue(skb);
531 		else
532 			txq = mana_get_tx_queue(ndev, skb, txq);
533 	}
534 
535 	return txq;
536 }
537 
538 /* Release pre-allocated RX buffers */
539 void mana_pre_dealloc_rxbufs(struct mana_port_context *mpc)
540 {
541 	struct device *dev;
542 	int i;
543 
544 	dev = mpc->ac->gdma_dev->gdma_context->dev;
545 
546 	if (!mpc->rxbufs_pre)
547 		goto out1;
548 
549 	if (!mpc->das_pre)
550 		goto out2;
551 
552 	while (mpc->rxbpre_total) {
553 		i = --mpc->rxbpre_total;
554 		dma_unmap_single(dev, mpc->das_pre[i], mpc->rxbpre_datasize,
555 				 DMA_FROM_DEVICE);
556 		put_page(virt_to_head_page(mpc->rxbufs_pre[i]));
557 	}
558 
559 	kfree(mpc->das_pre);
560 	mpc->das_pre = NULL;
561 
562 out2:
563 	kfree(mpc->rxbufs_pre);
564 	mpc->rxbufs_pre = NULL;
565 
566 out1:
567 	mpc->rxbpre_datasize = 0;
568 	mpc->rxbpre_alloc_size = 0;
569 	mpc->rxbpre_headroom = 0;
570 }
571 
572 /* Get a buffer from the pre-allocated RX buffers */
573 static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da)
574 {
575 	struct net_device *ndev = rxq->ndev;
576 	struct mana_port_context *mpc;
577 	void *va;
578 
579 	mpc = netdev_priv(ndev);
580 
581 	if (!mpc->rxbufs_pre || !mpc->das_pre || !mpc->rxbpre_total) {
582 		netdev_err(ndev, "No RX pre-allocated bufs\n");
583 		return NULL;
584 	}
585 
586 	/* Check sizes to catch unexpected coding error */
587 	if (mpc->rxbpre_datasize != rxq->datasize) {
588 		netdev_err(ndev, "rxbpre_datasize mismatch: %u: %u\n",
589 			   mpc->rxbpre_datasize, rxq->datasize);
590 		return NULL;
591 	}
592 
593 	if (mpc->rxbpre_alloc_size != rxq->alloc_size) {
594 		netdev_err(ndev, "rxbpre_alloc_size mismatch: %u: %u\n",
595 			   mpc->rxbpre_alloc_size, rxq->alloc_size);
596 		return NULL;
597 	}
598 
599 	if (mpc->rxbpre_headroom != rxq->headroom) {
600 		netdev_err(ndev, "rxbpre_headroom mismatch: %u: %u\n",
601 			   mpc->rxbpre_headroom, rxq->headroom);
602 		return NULL;
603 	}
604 
605 	mpc->rxbpre_total--;
606 
607 	*da = mpc->das_pre[mpc->rxbpre_total];
608 	va = mpc->rxbufs_pre[mpc->rxbpre_total];
609 	mpc->rxbufs_pre[mpc->rxbpre_total] = NULL;
610 
611 	/* Deallocate the array after all buffers are gone */
612 	if (!mpc->rxbpre_total)
613 		mana_pre_dealloc_rxbufs(mpc);
614 
615 	return va;
616 }
617 
618 /* Get RX buffer's data size, alloc size, XDP headroom based on MTU */
619 static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size,
620 			       u32 *headroom)
621 {
622 	if (mtu > MANA_XDP_MTU_MAX)
623 		*headroom = 0; /* no support for XDP */
624 	else
625 		*headroom = XDP_PACKET_HEADROOM;
626 
627 	*alloc_size = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD + *headroom);
628 
629 	/* Using page pool in this case, so alloc_size is PAGE_SIZE */
630 	if (*alloc_size < PAGE_SIZE)
631 		*alloc_size = PAGE_SIZE;
632 
633 	*datasize = mtu + ETH_HLEN;
634 }
635 
636 int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_queues)
637 {
638 	struct device *dev;
639 	struct page *page;
640 	dma_addr_t da;
641 	int num_rxb;
642 	void *va;
643 	int i;
644 
645 	mana_get_rxbuf_cfg(new_mtu, &mpc->rxbpre_datasize,
646 			   &mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom);
647 
648 	dev = mpc->ac->gdma_dev->gdma_context->dev;
649 
650 	num_rxb = num_queues * mpc->rx_queue_size;
651 
652 	WARN(mpc->rxbufs_pre, "mana rxbufs_pre exists\n");
653 	mpc->rxbufs_pre = kmalloc_array(num_rxb, sizeof(void *), GFP_KERNEL);
654 	if (!mpc->rxbufs_pre)
655 		goto error;
656 
657 	mpc->das_pre = kmalloc_array(num_rxb, sizeof(dma_addr_t), GFP_KERNEL);
658 	if (!mpc->das_pre)
659 		goto error;
660 
661 	mpc->rxbpre_total = 0;
662 
663 	for (i = 0; i < num_rxb; i++) {
664 		if (mpc->rxbpre_alloc_size > PAGE_SIZE) {
665 			va = netdev_alloc_frag(mpc->rxbpre_alloc_size);
666 			if (!va)
667 				goto error;
668 
669 			page = virt_to_head_page(va);
670 			/* Check if the frag falls back to single page */
671 			if (compound_order(page) <
672 			    get_order(mpc->rxbpre_alloc_size)) {
673 				put_page(page);
674 				goto error;
675 			}
676 		} else {
677 			page = dev_alloc_page();
678 			if (!page)
679 				goto error;
680 
681 			va = page_to_virt(page);
682 		}
683 
684 		da = dma_map_single(dev, va + mpc->rxbpre_headroom,
685 				    mpc->rxbpre_datasize, DMA_FROM_DEVICE);
686 		if (dma_mapping_error(dev, da)) {
687 			put_page(virt_to_head_page(va));
688 			goto error;
689 		}
690 
691 		mpc->rxbufs_pre[i] = va;
692 		mpc->das_pre[i] = da;
693 		mpc->rxbpre_total = i + 1;
694 	}
695 
696 	return 0;
697 
698 error:
699 	netdev_err(mpc->ndev, "Failed to pre-allocate RX buffers for %d queues\n", num_queues);
700 	mana_pre_dealloc_rxbufs(mpc);
701 	return -ENOMEM;
702 }
703 
704 static int mana_change_mtu(struct net_device *ndev, int new_mtu)
705 {
706 	struct mana_port_context *mpc = netdev_priv(ndev);
707 	unsigned int old_mtu = ndev->mtu;
708 	int err;
709 
710 	/* Pre-allocate buffers to prevent failure in mana_attach later */
711 	err = mana_pre_alloc_rxbufs(mpc, new_mtu, mpc->num_queues);
712 	if (err) {
713 		netdev_err(ndev, "Insufficient memory for new MTU\n");
714 		return err;
715 	}
716 
717 	err = mana_detach(ndev, false);
718 	if (err) {
719 		netdev_err(ndev, "mana_detach failed: %d\n", err);
720 		goto out;
721 	}
722 
723 	WRITE_ONCE(ndev->mtu, new_mtu);
724 
725 	err = mana_attach(ndev);
726 	if (err) {
727 		netdev_err(ndev, "mana_attach failed: %d\n", err);
728 		WRITE_ONCE(ndev->mtu, old_mtu);
729 	}
730 
731 out:
732 	mana_pre_dealloc_rxbufs(mpc);
733 	return err;
734 }
735 
736 static const struct net_device_ops mana_devops = {
737 	.ndo_open		= mana_open,
738 	.ndo_stop		= mana_close,
739 	.ndo_select_queue	= mana_select_queue,
740 	.ndo_start_xmit		= mana_start_xmit,
741 	.ndo_validate_addr	= eth_validate_addr,
742 	.ndo_get_stats64	= mana_get_stats64,
743 	.ndo_bpf		= mana_bpf,
744 	.ndo_xdp_xmit		= mana_xdp_xmit,
745 	.ndo_change_mtu		= mana_change_mtu,
746 };
747 
748 static void mana_cleanup_port_context(struct mana_port_context *apc)
749 {
750 	/*
751 	 * make sure subsequent cleanup attempts don't end up removing already
752 	 * cleaned dentry pointer
753 	 */
754 	debugfs_remove(apc->mana_port_debugfs);
755 	apc->mana_port_debugfs = NULL;
756 	kfree(apc->rxqs);
757 	apc->rxqs = NULL;
758 }
759 
760 static void mana_cleanup_indir_table(struct mana_port_context *apc)
761 {
762 	apc->indir_table_sz = 0;
763 	kfree(apc->indir_table);
764 	kfree(apc->rxobj_table);
765 }
766 
767 static int mana_init_port_context(struct mana_port_context *apc)
768 {
769 	apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *),
770 			    GFP_KERNEL);
771 
772 	return !apc->rxqs ? -ENOMEM : 0;
773 }
774 
775 static int mana_send_request(struct mana_context *ac, void *in_buf,
776 			     u32 in_len, void *out_buf, u32 out_len)
777 {
778 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
779 	struct gdma_resp_hdr *resp = out_buf;
780 	struct gdma_req_hdr *req = in_buf;
781 	struct device *dev = gc->dev;
782 	static atomic_t activity_id;
783 	int err;
784 
785 	req->dev_id = gc->mana.dev_id;
786 	req->activity_id = atomic_inc_return(&activity_id);
787 
788 	err = mana_gd_send_request(gc, in_len, in_buf, out_len,
789 				   out_buf);
790 	if (err || resp->status) {
791 		dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
792 			err, resp->status);
793 		return err ? err : -EPROTO;
794 	}
795 
796 	if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
797 	    req->activity_id != resp->activity_id) {
798 		dev_err(dev, "Unexpected mana message response: %x,%x,%x,%x\n",
799 			req->dev_id.as_uint32, resp->dev_id.as_uint32,
800 			req->activity_id, resp->activity_id);
801 		return -EPROTO;
802 	}
803 
804 	return 0;
805 }
806 
807 static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
808 				const enum mana_command_code expected_code,
809 				const u32 min_size)
810 {
811 	if (resp_hdr->response.msg_type != expected_code)
812 		return -EPROTO;
813 
814 	if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
815 		return -EPROTO;
816 
817 	if (resp_hdr->response.msg_size < min_size)
818 		return -EPROTO;
819 
820 	return 0;
821 }
822 
823 static int mana_pf_register_hw_vport(struct mana_port_context *apc)
824 {
825 	struct mana_register_hw_vport_resp resp = {};
826 	struct mana_register_hw_vport_req req = {};
827 	int err;
828 
829 	mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_HW_PORT,
830 			     sizeof(req), sizeof(resp));
831 	req.attached_gfid = 1;
832 	req.is_pf_default_vport = 1;
833 	req.allow_all_ether_types = 1;
834 
835 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
836 				sizeof(resp));
837 	if (err) {
838 		netdev_err(apc->ndev, "Failed to register hw vPort: %d\n", err);
839 		return err;
840 	}
841 
842 	err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_HW_PORT,
843 				   sizeof(resp));
844 	if (err || resp.hdr.status) {
845 		netdev_err(apc->ndev, "Failed to register hw vPort: %d, 0x%x\n",
846 			   err, resp.hdr.status);
847 		return err ? err : -EPROTO;
848 	}
849 
850 	apc->port_handle = resp.hw_vport_handle;
851 	return 0;
852 }
853 
854 static void mana_pf_deregister_hw_vport(struct mana_port_context *apc)
855 {
856 	struct mana_deregister_hw_vport_resp resp = {};
857 	struct mana_deregister_hw_vport_req req = {};
858 	int err;
859 
860 	mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_HW_PORT,
861 			     sizeof(req), sizeof(resp));
862 	req.hw_vport_handle = apc->port_handle;
863 
864 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
865 				sizeof(resp));
866 	if (err) {
867 		netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
868 			   err);
869 		return;
870 	}
871 
872 	err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_HW_PORT,
873 				   sizeof(resp));
874 	if (err || resp.hdr.status)
875 		netdev_err(apc->ndev,
876 			   "Failed to deregister hw vPort: %d, 0x%x\n",
877 			   err, resp.hdr.status);
878 }
879 
880 static int mana_pf_register_filter(struct mana_port_context *apc)
881 {
882 	struct mana_register_filter_resp resp = {};
883 	struct mana_register_filter_req req = {};
884 	int err;
885 
886 	mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_FILTER,
887 			     sizeof(req), sizeof(resp));
888 	req.vport = apc->port_handle;
889 	memcpy(req.mac_addr, apc->mac_addr, ETH_ALEN);
890 
891 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
892 				sizeof(resp));
893 	if (err) {
894 		netdev_err(apc->ndev, "Failed to register filter: %d\n", err);
895 		return err;
896 	}
897 
898 	err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_FILTER,
899 				   sizeof(resp));
900 	if (err || resp.hdr.status) {
901 		netdev_err(apc->ndev, "Failed to register filter: %d, 0x%x\n",
902 			   err, resp.hdr.status);
903 		return err ? err : -EPROTO;
904 	}
905 
906 	apc->pf_filter_handle = resp.filter_handle;
907 	return 0;
908 }
909 
910 static void mana_pf_deregister_filter(struct mana_port_context *apc)
911 {
912 	struct mana_deregister_filter_resp resp = {};
913 	struct mana_deregister_filter_req req = {};
914 	int err;
915 
916 	mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_FILTER,
917 			     sizeof(req), sizeof(resp));
918 	req.filter_handle = apc->pf_filter_handle;
919 
920 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
921 				sizeof(resp));
922 	if (err) {
923 		netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
924 			   err);
925 		return;
926 	}
927 
928 	err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_FILTER,
929 				   sizeof(resp));
930 	if (err || resp.hdr.status)
931 		netdev_err(apc->ndev,
932 			   "Failed to deregister filter: %d, 0x%x\n",
933 			   err, resp.hdr.status);
934 }
935 
936 static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
937 				 u32 proto_minor_ver, u32 proto_micro_ver,
938 				 u16 *max_num_vports)
939 {
940 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
941 	struct mana_query_device_cfg_resp resp = {};
942 	struct mana_query_device_cfg_req req = {};
943 	struct device *dev = gc->dev;
944 	int err = 0;
945 
946 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
947 			     sizeof(req), sizeof(resp));
948 
949 	req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
950 
951 	req.proto_major_ver = proto_major_ver;
952 	req.proto_minor_ver = proto_minor_ver;
953 	req.proto_micro_ver = proto_micro_ver;
954 
955 	err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
956 	if (err) {
957 		dev_err(dev, "Failed to query config: %d", err);
958 		return err;
959 	}
960 
961 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
962 				   sizeof(resp));
963 	if (err || resp.hdr.status) {
964 		dev_err(dev, "Invalid query result: %d, 0x%x\n", err,
965 			resp.hdr.status);
966 		if (!err)
967 			err = -EPROTO;
968 		return err;
969 	}
970 
971 	*max_num_vports = resp.max_num_vports;
972 
973 	if (resp.hdr.response.msg_version == GDMA_MESSAGE_V2)
974 		gc->adapter_mtu = resp.adapter_mtu;
975 	else
976 		gc->adapter_mtu = ETH_FRAME_LEN;
977 
978 	debugfs_create_u16("adapter-MTU", 0400, gc->mana_pci_debugfs, &gc->adapter_mtu);
979 
980 	return 0;
981 }
982 
983 static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index,
984 				u32 *max_sq, u32 *max_rq, u32 *num_indir_entry)
985 {
986 	struct mana_query_vport_cfg_resp resp = {};
987 	struct mana_query_vport_cfg_req req = {};
988 	int err;
989 
990 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
991 			     sizeof(req), sizeof(resp));
992 
993 	req.vport_index = vport_index;
994 
995 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
996 				sizeof(resp));
997 	if (err)
998 		return err;
999 
1000 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
1001 				   sizeof(resp));
1002 	if (err)
1003 		return err;
1004 
1005 	if (resp.hdr.status)
1006 		return -EPROTO;
1007 
1008 	*max_sq = resp.max_num_sq;
1009 	*max_rq = resp.max_num_rq;
1010 	if (resp.num_indirection_ent > 0 &&
1011 	    resp.num_indirection_ent <= MANA_INDIRECT_TABLE_MAX_SIZE &&
1012 	    is_power_of_2(resp.num_indirection_ent)) {
1013 		*num_indir_entry = resp.num_indirection_ent;
1014 	} else {
1015 		netdev_warn(apc->ndev,
1016 			    "Setting indirection table size to default %d for vPort %d\n",
1017 			    MANA_INDIRECT_TABLE_DEF_SIZE, apc->port_idx);
1018 		*num_indir_entry = MANA_INDIRECT_TABLE_DEF_SIZE;
1019 	}
1020 
1021 	apc->port_handle = resp.vport;
1022 	ether_addr_copy(apc->mac_addr, resp.mac_addr);
1023 
1024 	return 0;
1025 }
1026 
1027 void mana_uncfg_vport(struct mana_port_context *apc)
1028 {
1029 	mutex_lock(&apc->vport_mutex);
1030 	apc->vport_use_count--;
1031 	WARN_ON(apc->vport_use_count < 0);
1032 	mutex_unlock(&apc->vport_mutex);
1033 }
1034 EXPORT_SYMBOL_NS(mana_uncfg_vport, "NET_MANA");
1035 
1036 int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
1037 		   u32 doorbell_pg_id)
1038 {
1039 	struct mana_config_vport_resp resp = {};
1040 	struct mana_config_vport_req req = {};
1041 	int err;
1042 
1043 	/* This function is used to program the Ethernet port in the hardware
1044 	 * table. It can be called from the Ethernet driver or the RDMA driver.
1045 	 *
1046 	 * For Ethernet usage, the hardware supports only one active user on a
1047 	 * physical port. The driver checks on the port usage before programming
1048 	 * the hardware when creating the RAW QP (RDMA driver) or exposing the
1049 	 * device to kernel NET layer (Ethernet driver).
1050 	 *
1051 	 * Because the RDMA driver doesn't know in advance which QP type the
1052 	 * user will create, it exposes the device with all its ports. The user
1053 	 * may not be able to create RAW QP on a port if this port is already
1054 	 * in used by the Ethernet driver from the kernel.
1055 	 *
1056 	 * This physical port limitation only applies to the RAW QP. For RC QP,
1057 	 * the hardware doesn't have this limitation. The user can create RC
1058 	 * QPs on a physical port up to the hardware limits independent of the
1059 	 * Ethernet usage on the same port.
1060 	 */
1061 	mutex_lock(&apc->vport_mutex);
1062 	if (apc->vport_use_count > 0) {
1063 		mutex_unlock(&apc->vport_mutex);
1064 		return -EBUSY;
1065 	}
1066 	apc->vport_use_count++;
1067 	mutex_unlock(&apc->vport_mutex);
1068 
1069 	mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
1070 			     sizeof(req), sizeof(resp));
1071 	req.vport = apc->port_handle;
1072 	req.pdid = protection_dom_id;
1073 	req.doorbell_pageid = doorbell_pg_id;
1074 
1075 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1076 				sizeof(resp));
1077 	if (err) {
1078 		netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err);
1079 		goto out;
1080 	}
1081 
1082 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
1083 				   sizeof(resp));
1084 	if (err || resp.hdr.status) {
1085 		netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
1086 			   err, resp.hdr.status);
1087 		if (!err)
1088 			err = -EPROTO;
1089 
1090 		goto out;
1091 	}
1092 
1093 	apc->tx_shortform_allowed = resp.short_form_allowed;
1094 	apc->tx_vp_offset = resp.tx_vport_offset;
1095 
1096 	netdev_info(apc->ndev, "Configured vPort %llu PD %u DB %u\n",
1097 		    apc->port_handle, protection_dom_id, doorbell_pg_id);
1098 out:
1099 	if (err)
1100 		mana_uncfg_vport(apc);
1101 
1102 	return err;
1103 }
1104 EXPORT_SYMBOL_NS(mana_cfg_vport, "NET_MANA");
1105 
1106 static int mana_cfg_vport_steering(struct mana_port_context *apc,
1107 				   enum TRI_STATE rx,
1108 				   bool update_default_rxobj, bool update_key,
1109 				   bool update_tab)
1110 {
1111 	struct mana_cfg_rx_steer_req_v2 *req;
1112 	struct mana_cfg_rx_steer_resp resp = {};
1113 	struct net_device *ndev = apc->ndev;
1114 	u32 req_buf_size;
1115 	int err;
1116 
1117 	req_buf_size = struct_size(req, indir_tab, apc->indir_table_sz);
1118 	req = kzalloc(req_buf_size, GFP_KERNEL);
1119 	if (!req)
1120 		return -ENOMEM;
1121 
1122 	mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
1123 			     sizeof(resp));
1124 
1125 	req->hdr.req.msg_version = GDMA_MESSAGE_V2;
1126 
1127 	req->vport = apc->port_handle;
1128 	req->num_indir_entries = apc->indir_table_sz;
1129 	req->indir_tab_offset = offsetof(struct mana_cfg_rx_steer_req_v2,
1130 					 indir_tab);
1131 	req->rx_enable = rx;
1132 	req->rss_enable = apc->rss_state;
1133 	req->update_default_rxobj = update_default_rxobj;
1134 	req->update_hashkey = update_key;
1135 	req->update_indir_tab = update_tab;
1136 	req->default_rxobj = apc->default_rxobj;
1137 	req->cqe_coalescing_enable = 0;
1138 
1139 	if (update_key)
1140 		memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
1141 
1142 	if (update_tab)
1143 		memcpy(req->indir_tab, apc->rxobj_table,
1144 		       flex_array_size(req, indir_tab, req->num_indir_entries));
1145 
1146 	err = mana_send_request(apc->ac, req, req_buf_size, &resp,
1147 				sizeof(resp));
1148 	if (err) {
1149 		netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
1150 		goto out;
1151 	}
1152 
1153 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
1154 				   sizeof(resp));
1155 	if (err) {
1156 		netdev_err(ndev, "vPort RX configuration failed: %d\n", err);
1157 		goto out;
1158 	}
1159 
1160 	if (resp.hdr.status) {
1161 		netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
1162 			   resp.hdr.status);
1163 		err = -EPROTO;
1164 	}
1165 
1166 	netdev_info(ndev, "Configured steering vPort %llu entries %u\n",
1167 		    apc->port_handle, apc->indir_table_sz);
1168 out:
1169 	kfree(req);
1170 	return err;
1171 }
1172 
1173 int mana_create_wq_obj(struct mana_port_context *apc,
1174 		       mana_handle_t vport,
1175 		       u32 wq_type, struct mana_obj_spec *wq_spec,
1176 		       struct mana_obj_spec *cq_spec,
1177 		       mana_handle_t *wq_obj)
1178 {
1179 	struct mana_create_wqobj_resp resp = {};
1180 	struct mana_create_wqobj_req req = {};
1181 	struct net_device *ndev = apc->ndev;
1182 	int err;
1183 
1184 	mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
1185 			     sizeof(req), sizeof(resp));
1186 	req.vport = vport;
1187 	req.wq_type = wq_type;
1188 	req.wq_gdma_region = wq_spec->gdma_region;
1189 	req.cq_gdma_region = cq_spec->gdma_region;
1190 	req.wq_size = wq_spec->queue_size;
1191 	req.cq_size = cq_spec->queue_size;
1192 	req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
1193 	req.cq_parent_qid = cq_spec->attached_eq;
1194 
1195 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1196 				sizeof(resp));
1197 	if (err) {
1198 		netdev_err(ndev, "Failed to create WQ object: %d\n", err);
1199 		goto out;
1200 	}
1201 
1202 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
1203 				   sizeof(resp));
1204 	if (err || resp.hdr.status) {
1205 		netdev_err(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
1206 			   resp.hdr.status);
1207 		if (!err)
1208 			err = -EPROTO;
1209 		goto out;
1210 	}
1211 
1212 	if (resp.wq_obj == INVALID_MANA_HANDLE) {
1213 		netdev_err(ndev, "Got an invalid WQ object handle\n");
1214 		err = -EPROTO;
1215 		goto out;
1216 	}
1217 
1218 	*wq_obj = resp.wq_obj;
1219 	wq_spec->queue_index = resp.wq_id;
1220 	cq_spec->queue_index = resp.cq_id;
1221 
1222 	return 0;
1223 out:
1224 	return err;
1225 }
1226 EXPORT_SYMBOL_NS(mana_create_wq_obj, "NET_MANA");
1227 
1228 void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
1229 			 mana_handle_t wq_obj)
1230 {
1231 	struct mana_destroy_wqobj_resp resp = {};
1232 	struct mana_destroy_wqobj_req req = {};
1233 	struct net_device *ndev = apc->ndev;
1234 	int err;
1235 
1236 	mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
1237 			     sizeof(req), sizeof(resp));
1238 	req.wq_type = wq_type;
1239 	req.wq_obj_handle = wq_obj;
1240 
1241 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1242 				sizeof(resp));
1243 	if (err) {
1244 		netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
1245 		return;
1246 	}
1247 
1248 	err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
1249 				   sizeof(resp));
1250 	if (err || resp.hdr.status)
1251 		netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err,
1252 			   resp.hdr.status);
1253 }
1254 EXPORT_SYMBOL_NS(mana_destroy_wq_obj, "NET_MANA");
1255 
1256 static void mana_destroy_eq(struct mana_context *ac)
1257 {
1258 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
1259 	struct gdma_queue *eq;
1260 	int i;
1261 
1262 	if (!ac->eqs)
1263 		return;
1264 
1265 	debugfs_remove_recursive(ac->mana_eqs_debugfs);
1266 	ac->mana_eqs_debugfs = NULL;
1267 
1268 	for (i = 0; i < gc->max_num_queues; i++) {
1269 		eq = ac->eqs[i].eq;
1270 		if (!eq)
1271 			continue;
1272 
1273 		mana_gd_destroy_queue(gc, eq);
1274 	}
1275 
1276 	kfree(ac->eqs);
1277 	ac->eqs = NULL;
1278 }
1279 
1280 static void mana_create_eq_debugfs(struct mana_context *ac, int i)
1281 {
1282 	struct mana_eq eq = ac->eqs[i];
1283 	char eqnum[32];
1284 
1285 	sprintf(eqnum, "eq%d", i);
1286 	eq.mana_eq_debugfs = debugfs_create_dir(eqnum, ac->mana_eqs_debugfs);
1287 	debugfs_create_u32("head", 0400, eq.mana_eq_debugfs, &eq.eq->head);
1288 	debugfs_create_u32("tail", 0400, eq.mana_eq_debugfs, &eq.eq->tail);
1289 	debugfs_create_file("eq_dump", 0400, eq.mana_eq_debugfs, eq.eq, &mana_dbg_q_fops);
1290 }
1291 
1292 static int mana_create_eq(struct mana_context *ac)
1293 {
1294 	struct gdma_dev *gd = ac->gdma_dev;
1295 	struct gdma_context *gc = gd->gdma_context;
1296 	struct gdma_queue_spec spec = {};
1297 	int err;
1298 	int i;
1299 
1300 	ac->eqs = kcalloc(gc->max_num_queues, sizeof(struct mana_eq),
1301 			  GFP_KERNEL);
1302 	if (!ac->eqs)
1303 		return -ENOMEM;
1304 
1305 	spec.type = GDMA_EQ;
1306 	spec.monitor_avl_buf = false;
1307 	spec.queue_size = EQ_SIZE;
1308 	spec.eq.callback = NULL;
1309 	spec.eq.context = ac->eqs;
1310 	spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
1311 
1312 	ac->mana_eqs_debugfs = debugfs_create_dir("EQs", gc->mana_pci_debugfs);
1313 
1314 	for (i = 0; i < gc->max_num_queues; i++) {
1315 		spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
1316 		err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
1317 		if (err) {
1318 			dev_err(gc->dev, "Failed to create EQ %d : %d\n", i, err);
1319 			goto out;
1320 		}
1321 		mana_create_eq_debugfs(ac, i);
1322 	}
1323 
1324 	return 0;
1325 out:
1326 	mana_destroy_eq(ac);
1327 	return err;
1328 }
1329 
1330 static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
1331 {
1332 	struct mana_fence_rq_resp resp = {};
1333 	struct mana_fence_rq_req req = {};
1334 	int err;
1335 
1336 	init_completion(&rxq->fence_event);
1337 
1338 	mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
1339 			     sizeof(req), sizeof(resp));
1340 	req.wq_obj_handle =  rxq->rxobj;
1341 
1342 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1343 				sizeof(resp));
1344 	if (err) {
1345 		netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n",
1346 			   rxq->rxq_idx, err);
1347 		return err;
1348 	}
1349 
1350 	err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
1351 	if (err || resp.hdr.status) {
1352 		netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
1353 			   rxq->rxq_idx, err, resp.hdr.status);
1354 		if (!err)
1355 			err = -EPROTO;
1356 
1357 		return err;
1358 	}
1359 
1360 	if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) {
1361 		netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n",
1362 			   rxq->rxq_idx);
1363 		return -ETIMEDOUT;
1364 	}
1365 
1366 	return 0;
1367 }
1368 
1369 static void mana_fence_rqs(struct mana_port_context *apc)
1370 {
1371 	unsigned int rxq_idx;
1372 	struct mana_rxq *rxq;
1373 	int err;
1374 
1375 	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
1376 		rxq = apc->rxqs[rxq_idx];
1377 		err = mana_fence_rq(apc, rxq);
1378 
1379 		/* In case of any error, use sleep instead. */
1380 		if (err)
1381 			msleep(100);
1382 	}
1383 }
1384 
1385 static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
1386 {
1387 	u32 used_space_old;
1388 	u32 used_space_new;
1389 
1390 	used_space_old = wq->head - wq->tail;
1391 	used_space_new = wq->head - (wq->tail + num_units);
1392 
1393 	if (WARN_ON_ONCE(used_space_new > used_space_old))
1394 		return -ERANGE;
1395 
1396 	wq->tail += num_units;
1397 	return 0;
1398 }
1399 
1400 static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
1401 {
1402 	struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
1403 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1404 	struct device *dev = gc->dev;
1405 	int hsg, i;
1406 
1407 	/* Number of SGEs of linear part */
1408 	hsg = (skb_is_gso(skb) && skb_headlen(skb) > ash->size[0]) ? 2 : 1;
1409 
1410 	for (i = 0; i < hsg; i++)
1411 		dma_unmap_single(dev, ash->dma_handle[i], ash->size[i],
1412 				 DMA_TO_DEVICE);
1413 
1414 	for (i = hsg; i < skb_shinfo(skb)->nr_frags + hsg; i++)
1415 		dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
1416 			       DMA_TO_DEVICE);
1417 }
1418 
1419 static void mana_poll_tx_cq(struct mana_cq *cq)
1420 {
1421 	struct gdma_comp *completions = cq->gdma_comp_buf;
1422 	struct gdma_posted_wqe_info *wqe_info;
1423 	unsigned int pkt_transmitted = 0;
1424 	unsigned int wqe_unit_cnt = 0;
1425 	struct mana_txq *txq = cq->txq;
1426 	struct mana_port_context *apc;
1427 	struct netdev_queue *net_txq;
1428 	struct gdma_queue *gdma_wq;
1429 	unsigned int avail_space;
1430 	struct net_device *ndev;
1431 	struct sk_buff *skb;
1432 	bool txq_stopped;
1433 	int comp_read;
1434 	int i;
1435 
1436 	ndev = txq->ndev;
1437 	apc = netdev_priv(ndev);
1438 
1439 	comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
1440 				    CQE_POLLING_BUFFER);
1441 
1442 	if (comp_read < 1)
1443 		return;
1444 
1445 	for (i = 0; i < comp_read; i++) {
1446 		struct mana_tx_comp_oob *cqe_oob;
1447 
1448 		if (WARN_ON_ONCE(!completions[i].is_sq))
1449 			return;
1450 
1451 		cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
1452 		if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type !=
1453 				 MANA_CQE_COMPLETION))
1454 			return;
1455 
1456 		switch (cqe_oob->cqe_hdr.cqe_type) {
1457 		case CQE_TX_OKAY:
1458 			break;
1459 
1460 		case CQE_TX_SA_DROP:
1461 		case CQE_TX_MTU_DROP:
1462 		case CQE_TX_INVALID_OOB:
1463 		case CQE_TX_INVALID_ETH_TYPE:
1464 		case CQE_TX_HDR_PROCESSING_ERROR:
1465 		case CQE_TX_VF_DISABLED:
1466 		case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
1467 		case CQE_TX_VPORT_DISABLED:
1468 		case CQE_TX_VLAN_TAGGING_VIOLATION:
1469 			if (net_ratelimit())
1470 				netdev_err(ndev, "TX: CQE error %d\n",
1471 					   cqe_oob->cqe_hdr.cqe_type);
1472 
1473 			apc->eth_stats.tx_cqe_err++;
1474 			break;
1475 
1476 		default:
1477 			/* If the CQE type is unknown, log an error,
1478 			 * and still free the SKB, update tail, etc.
1479 			 */
1480 			if (net_ratelimit())
1481 				netdev_err(ndev, "TX: unknown CQE type %d\n",
1482 					   cqe_oob->cqe_hdr.cqe_type);
1483 
1484 			apc->eth_stats.tx_cqe_unknown_type++;
1485 			break;
1486 		}
1487 
1488 		if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num))
1489 			return;
1490 
1491 		skb = skb_dequeue(&txq->pending_skbs);
1492 		if (WARN_ON_ONCE(!skb))
1493 			return;
1494 
1495 		wqe_info = (struct gdma_posted_wqe_info *)skb->cb;
1496 		wqe_unit_cnt += wqe_info->wqe_size_in_bu;
1497 
1498 		mana_unmap_skb(skb, apc);
1499 
1500 		napi_consume_skb(skb, cq->budget);
1501 
1502 		pkt_transmitted++;
1503 	}
1504 
1505 	if (WARN_ON_ONCE(wqe_unit_cnt == 0))
1506 		return;
1507 
1508 	mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
1509 
1510 	gdma_wq = txq->gdma_sq;
1511 	avail_space = mana_gd_wq_avail_space(gdma_wq);
1512 
1513 	/* Ensure tail updated before checking q stop */
1514 	smp_mb();
1515 
1516 	net_txq = txq->net_txq;
1517 	txq_stopped = netif_tx_queue_stopped(net_txq);
1518 
1519 	/* Ensure checking txq_stopped before apc->port_is_up. */
1520 	smp_rmb();
1521 
1522 	if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1523 		netif_tx_wake_queue(net_txq);
1524 		apc->eth_stats.wake_queue++;
1525 	}
1526 
1527 	if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
1528 		WARN_ON_ONCE(1);
1529 
1530 	cq->work_done = pkt_transmitted;
1531 }
1532 
1533 static void mana_post_pkt_rxq(struct mana_rxq *rxq)
1534 {
1535 	struct mana_recv_buf_oob *recv_buf_oob;
1536 	u32 curr_index;
1537 	int err;
1538 
1539 	curr_index = rxq->buf_index++;
1540 	if (rxq->buf_index == rxq->num_rx_buf)
1541 		rxq->buf_index = 0;
1542 
1543 	recv_buf_oob = &rxq->rx_oobs[curr_index];
1544 
1545 	err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req,
1546 					&recv_buf_oob->wqe_inf);
1547 	if (WARN_ON_ONCE(err))
1548 		return;
1549 
1550 	WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1);
1551 }
1552 
1553 static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va,
1554 				      uint pkt_len, struct xdp_buff *xdp)
1555 {
1556 	struct sk_buff *skb = napi_build_skb(buf_va, rxq->alloc_size);
1557 
1558 	if (!skb)
1559 		return NULL;
1560 
1561 	if (xdp->data_hard_start) {
1562 		u32 metasize = xdp->data - xdp->data_meta;
1563 
1564 		skb_reserve(skb, xdp->data - xdp->data_hard_start);
1565 		skb_put(skb, xdp->data_end - xdp->data);
1566 		if (metasize)
1567 			skb_metadata_set(skb, metasize);
1568 		return skb;
1569 	}
1570 
1571 	skb_reserve(skb, rxq->headroom);
1572 	skb_put(skb, pkt_len);
1573 
1574 	return skb;
1575 }
1576 
1577 static void mana_rx_skb(void *buf_va, bool from_pool,
1578 			struct mana_rxcomp_oob *cqe, struct mana_rxq *rxq)
1579 {
1580 	struct mana_stats_rx *rx_stats = &rxq->stats;
1581 	struct net_device *ndev = rxq->ndev;
1582 	uint pkt_len = cqe->ppi[0].pkt_len;
1583 	u16 rxq_idx = rxq->rxq_idx;
1584 	struct napi_struct *napi;
1585 	struct xdp_buff xdp = {};
1586 	struct sk_buff *skb;
1587 	u32 hash_value;
1588 	u32 act;
1589 
1590 	rxq->rx_cq.work_done++;
1591 	napi = &rxq->rx_cq.napi;
1592 
1593 	if (!buf_va) {
1594 		++ndev->stats.rx_dropped;
1595 		return;
1596 	}
1597 
1598 	act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len);
1599 
1600 	if (act == XDP_REDIRECT && !rxq->xdp_rc)
1601 		return;
1602 
1603 	if (act != XDP_PASS && act != XDP_TX)
1604 		goto drop_xdp;
1605 
1606 	skb = mana_build_skb(rxq, buf_va, pkt_len, &xdp);
1607 
1608 	if (!skb)
1609 		goto drop;
1610 
1611 	if (from_pool)
1612 		skb_mark_for_recycle(skb);
1613 
1614 	skb->dev = napi->dev;
1615 
1616 	skb->protocol = eth_type_trans(skb, ndev);
1617 	skb_checksum_none_assert(skb);
1618 	skb_record_rx_queue(skb, rxq_idx);
1619 
1620 	if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) {
1621 		if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed)
1622 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1623 	}
1624 
1625 	if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) {
1626 		hash_value = cqe->ppi[0].pkt_hash;
1627 
1628 		if (cqe->rx_hashtype & MANA_HASH_L4)
1629 			skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L4);
1630 		else
1631 			skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3);
1632 	}
1633 
1634 	if (cqe->rx_vlantag_present) {
1635 		u16 vlan_tci = cqe->rx_vlan_id;
1636 
1637 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
1638 	}
1639 
1640 	u64_stats_update_begin(&rx_stats->syncp);
1641 	rx_stats->packets++;
1642 	rx_stats->bytes += pkt_len;
1643 
1644 	if (act == XDP_TX)
1645 		rx_stats->xdp_tx++;
1646 	u64_stats_update_end(&rx_stats->syncp);
1647 
1648 	if (act == XDP_TX) {
1649 		skb_set_queue_mapping(skb, rxq_idx);
1650 		mana_xdp_tx(skb, ndev);
1651 		return;
1652 	}
1653 
1654 	napi_gro_receive(napi, skb);
1655 
1656 	return;
1657 
1658 drop_xdp:
1659 	u64_stats_update_begin(&rx_stats->syncp);
1660 	rx_stats->xdp_drop++;
1661 	u64_stats_update_end(&rx_stats->syncp);
1662 
1663 drop:
1664 	if (from_pool) {
1665 		page_pool_recycle_direct(rxq->page_pool,
1666 					 virt_to_head_page(buf_va));
1667 	} else {
1668 		WARN_ON_ONCE(rxq->xdp_save_va);
1669 		/* Save for reuse */
1670 		rxq->xdp_save_va = buf_va;
1671 	}
1672 
1673 	++ndev->stats.rx_dropped;
1674 
1675 	return;
1676 }
1677 
1678 static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
1679 			     dma_addr_t *da, bool *from_pool, bool is_napi)
1680 {
1681 	struct page *page;
1682 	void *va;
1683 
1684 	*from_pool = false;
1685 
1686 	/* Reuse XDP dropped page if available */
1687 	if (rxq->xdp_save_va) {
1688 		va = rxq->xdp_save_va;
1689 		rxq->xdp_save_va = NULL;
1690 	} else if (rxq->alloc_size > PAGE_SIZE) {
1691 		if (is_napi)
1692 			va = napi_alloc_frag(rxq->alloc_size);
1693 		else
1694 			va = netdev_alloc_frag(rxq->alloc_size);
1695 
1696 		if (!va)
1697 			return NULL;
1698 
1699 		page = virt_to_head_page(va);
1700 		/* Check if the frag falls back to single page */
1701 		if (compound_order(page) < get_order(rxq->alloc_size)) {
1702 			put_page(page);
1703 			return NULL;
1704 		}
1705 	} else {
1706 		page = page_pool_dev_alloc_pages(rxq->page_pool);
1707 		if (!page)
1708 			return NULL;
1709 
1710 		*from_pool = true;
1711 		va = page_to_virt(page);
1712 	}
1713 
1714 	*da = dma_map_single(dev, va + rxq->headroom, rxq->datasize,
1715 			     DMA_FROM_DEVICE);
1716 	if (dma_mapping_error(dev, *da)) {
1717 		if (*from_pool)
1718 			page_pool_put_full_page(rxq->page_pool, page, false);
1719 		else
1720 			put_page(virt_to_head_page(va));
1721 
1722 		return NULL;
1723 	}
1724 
1725 	return va;
1726 }
1727 
1728 /* Allocate frag for rx buffer, and save the old buf */
1729 static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
1730 			       struct mana_recv_buf_oob *rxoob, void **old_buf,
1731 			       bool *old_fp)
1732 {
1733 	bool from_pool;
1734 	dma_addr_t da;
1735 	void *va;
1736 
1737 	va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true);
1738 	if (!va)
1739 		return;
1740 
1741 	dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize,
1742 			 DMA_FROM_DEVICE);
1743 	*old_buf = rxoob->buf_va;
1744 	*old_fp = rxoob->from_pool;
1745 
1746 	rxoob->buf_va = va;
1747 	rxoob->sgl[0].address = da;
1748 	rxoob->from_pool = from_pool;
1749 }
1750 
1751 static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
1752 				struct gdma_comp *cqe)
1753 {
1754 	struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
1755 	struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
1756 	struct net_device *ndev = rxq->ndev;
1757 	struct mana_recv_buf_oob *rxbuf_oob;
1758 	struct mana_port_context *apc;
1759 	struct device *dev = gc->dev;
1760 	void *old_buf = NULL;
1761 	u32 curr, pktlen;
1762 	bool old_fp;
1763 
1764 	apc = netdev_priv(ndev);
1765 
1766 	switch (oob->cqe_hdr.cqe_type) {
1767 	case CQE_RX_OKAY:
1768 		break;
1769 
1770 	case CQE_RX_TRUNCATED:
1771 		++ndev->stats.rx_dropped;
1772 		rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
1773 		netdev_warn_once(ndev, "Dropped a truncated packet\n");
1774 		goto drop;
1775 
1776 	case CQE_RX_COALESCED_4:
1777 		netdev_err(ndev, "RX coalescing is unsupported\n");
1778 		apc->eth_stats.rx_coalesced_err++;
1779 		return;
1780 
1781 	case CQE_RX_OBJECT_FENCE:
1782 		complete(&rxq->fence_event);
1783 		return;
1784 
1785 	default:
1786 		netdev_err(ndev, "Unknown RX CQE type = %d\n",
1787 			   oob->cqe_hdr.cqe_type);
1788 		apc->eth_stats.rx_cqe_unknown_type++;
1789 		return;
1790 	}
1791 
1792 	pktlen = oob->ppi[0].pkt_len;
1793 
1794 	if (pktlen == 0) {
1795 		/* data packets should never have packetlength of zero */
1796 		netdev_err(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n",
1797 			   rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1798 		return;
1799 	}
1800 
1801 	curr = rxq->buf_index;
1802 	rxbuf_oob = &rxq->rx_oobs[curr];
1803 	WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
1804 
1805 	mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf, &old_fp);
1806 
1807 	/* Unsuccessful refill will have old_buf == NULL.
1808 	 * In this case, mana_rx_skb() will drop the packet.
1809 	 */
1810 	mana_rx_skb(old_buf, old_fp, oob, rxq);
1811 
1812 drop:
1813 	mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
1814 
1815 	mana_post_pkt_rxq(rxq);
1816 }
1817 
1818 static void mana_poll_rx_cq(struct mana_cq *cq)
1819 {
1820 	struct gdma_comp *comp = cq->gdma_comp_buf;
1821 	struct mana_rxq *rxq = cq->rxq;
1822 	int comp_read, i;
1823 
1824 	comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
1825 	WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
1826 
1827 	rxq->xdp_flush = false;
1828 
1829 	for (i = 0; i < comp_read; i++) {
1830 		if (WARN_ON_ONCE(comp[i].is_sq))
1831 			return;
1832 
1833 		/* verify recv cqe references the right rxq */
1834 		if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id))
1835 			return;
1836 
1837 		mana_process_rx_cqe(rxq, cq, &comp[i]);
1838 	}
1839 
1840 	if (comp_read > 0) {
1841 		struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
1842 
1843 		mana_gd_wq_ring_doorbell(gc, rxq->gdma_rq);
1844 	}
1845 
1846 	if (rxq->xdp_flush)
1847 		xdp_do_flush();
1848 }
1849 
1850 static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
1851 {
1852 	struct mana_cq *cq = context;
1853 	int w;
1854 
1855 	WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
1856 
1857 	if (cq->type == MANA_CQ_TYPE_RX)
1858 		mana_poll_rx_cq(cq);
1859 	else
1860 		mana_poll_tx_cq(cq);
1861 
1862 	w = cq->work_done;
1863 	cq->work_done_since_doorbell += w;
1864 
1865 	if (w < cq->budget) {
1866 		mana_gd_ring_cq(gdma_queue, SET_ARM_BIT);
1867 		cq->work_done_since_doorbell = 0;
1868 		napi_complete_done(&cq->napi, w);
1869 	} else if (cq->work_done_since_doorbell >
1870 		   cq->gdma_cq->queue_size / COMP_ENTRY_SIZE * 4) {
1871 		/* MANA hardware requires at least one doorbell ring every 8
1872 		 * wraparounds of CQ even if there is no need to arm the CQ.
1873 		 * This driver rings the doorbell as soon as we have exceeded
1874 		 * 4 wraparounds.
1875 		 */
1876 		mana_gd_ring_cq(gdma_queue, 0);
1877 		cq->work_done_since_doorbell = 0;
1878 	}
1879 
1880 	return w;
1881 }
1882 
1883 static int mana_poll(struct napi_struct *napi, int budget)
1884 {
1885 	struct mana_cq *cq = container_of(napi, struct mana_cq, napi);
1886 	int w;
1887 
1888 	cq->work_done = 0;
1889 	cq->budget = budget;
1890 
1891 	w = mana_cq_handler(cq, cq->gdma_cq);
1892 
1893 	return min(w, budget);
1894 }
1895 
1896 static void mana_schedule_napi(void *context, struct gdma_queue *gdma_queue)
1897 {
1898 	struct mana_cq *cq = context;
1899 
1900 	napi_schedule_irqoff(&cq->napi);
1901 }
1902 
1903 static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
1904 {
1905 	struct gdma_dev *gd = apc->ac->gdma_dev;
1906 
1907 	if (!cq->gdma_cq)
1908 		return;
1909 
1910 	mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
1911 }
1912 
1913 static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
1914 {
1915 	struct gdma_dev *gd = apc->ac->gdma_dev;
1916 
1917 	if (!txq->gdma_sq)
1918 		return;
1919 
1920 	mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
1921 }
1922 
1923 static void mana_destroy_txq(struct mana_port_context *apc)
1924 {
1925 	struct napi_struct *napi;
1926 	int i;
1927 
1928 	if (!apc->tx_qp)
1929 		return;
1930 
1931 	for (i = 0; i < apc->num_queues; i++) {
1932 		debugfs_remove_recursive(apc->tx_qp[i].mana_tx_debugfs);
1933 		apc->tx_qp[i].mana_tx_debugfs = NULL;
1934 
1935 		napi = &apc->tx_qp[i].tx_cq.napi;
1936 		if (apc->tx_qp[i].txq.napi_initialized) {
1937 			napi_synchronize(napi);
1938 			napi_disable(napi);
1939 			netif_napi_del(napi);
1940 			apc->tx_qp[i].txq.napi_initialized = false;
1941 		}
1942 		mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
1943 
1944 		mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
1945 
1946 		mana_deinit_txq(apc, &apc->tx_qp[i].txq);
1947 	}
1948 
1949 	kfree(apc->tx_qp);
1950 	apc->tx_qp = NULL;
1951 }
1952 
1953 static void mana_create_txq_debugfs(struct mana_port_context *apc, int idx)
1954 {
1955 	struct mana_tx_qp *tx_qp = &apc->tx_qp[idx];
1956 	char qnum[32];
1957 
1958 	sprintf(qnum, "TX-%d", idx);
1959 	tx_qp->mana_tx_debugfs = debugfs_create_dir(qnum, apc->mana_port_debugfs);
1960 	debugfs_create_u32("sq_head", 0400, tx_qp->mana_tx_debugfs,
1961 			   &tx_qp->txq.gdma_sq->head);
1962 	debugfs_create_u32("sq_tail", 0400, tx_qp->mana_tx_debugfs,
1963 			   &tx_qp->txq.gdma_sq->tail);
1964 	debugfs_create_u32("sq_pend_skb_qlen", 0400, tx_qp->mana_tx_debugfs,
1965 			   &tx_qp->txq.pending_skbs.qlen);
1966 	debugfs_create_u32("cq_head", 0400, tx_qp->mana_tx_debugfs,
1967 			   &tx_qp->tx_cq.gdma_cq->head);
1968 	debugfs_create_u32("cq_tail", 0400, tx_qp->mana_tx_debugfs,
1969 			   &tx_qp->tx_cq.gdma_cq->tail);
1970 	debugfs_create_u32("cq_budget", 0400, tx_qp->mana_tx_debugfs,
1971 			   &tx_qp->tx_cq.budget);
1972 	debugfs_create_file("txq_dump", 0400, tx_qp->mana_tx_debugfs,
1973 			    tx_qp->txq.gdma_sq, &mana_dbg_q_fops);
1974 	debugfs_create_file("cq_dump", 0400, tx_qp->mana_tx_debugfs,
1975 			    tx_qp->tx_cq.gdma_cq, &mana_dbg_q_fops);
1976 }
1977 
1978 static int mana_create_txq(struct mana_port_context *apc,
1979 			   struct net_device *net)
1980 {
1981 	struct mana_context *ac = apc->ac;
1982 	struct gdma_dev *gd = ac->gdma_dev;
1983 	struct mana_obj_spec wq_spec;
1984 	struct mana_obj_spec cq_spec;
1985 	struct gdma_queue_spec spec;
1986 	struct gdma_context *gc;
1987 	struct mana_txq *txq;
1988 	struct mana_cq *cq;
1989 	u32 txq_size;
1990 	u32 cq_size;
1991 	int err;
1992 	int i;
1993 
1994 	apc->tx_qp = kcalloc(apc->num_queues, sizeof(struct mana_tx_qp),
1995 			     GFP_KERNEL);
1996 	if (!apc->tx_qp)
1997 		return -ENOMEM;
1998 
1999 	/*  The minimum size of the WQE is 32 bytes, hence
2000 	 *  apc->tx_queue_size represents the maximum number of WQEs
2001 	 *  the SQ can store. This value is then used to size other queues
2002 	 *  to prevent overflow.
2003 	 *  Also note that the txq_size is always going to be MANA_PAGE_ALIGNED,
2004 	 *  as min val of apc->tx_queue_size is 128 and that would make
2005 	 *  txq_size 128*32 = 4096 and the other higher values of apc->tx_queue_size
2006 	 *  are always power of two
2007 	 */
2008 	txq_size = apc->tx_queue_size * 32;
2009 
2010 	cq_size = apc->tx_queue_size * COMP_ENTRY_SIZE;
2011 
2012 	gc = gd->gdma_context;
2013 
2014 	for (i = 0; i < apc->num_queues; i++) {
2015 		apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
2016 
2017 		/* Create SQ */
2018 		txq = &apc->tx_qp[i].txq;
2019 
2020 		u64_stats_init(&txq->stats.syncp);
2021 		txq->ndev = net;
2022 		txq->net_txq = netdev_get_tx_queue(net, i);
2023 		txq->vp_offset = apc->tx_vp_offset;
2024 		txq->napi_initialized = false;
2025 		skb_queue_head_init(&txq->pending_skbs);
2026 
2027 		memset(&spec, 0, sizeof(spec));
2028 		spec.type = GDMA_SQ;
2029 		spec.monitor_avl_buf = true;
2030 		spec.queue_size = txq_size;
2031 		err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
2032 		if (err)
2033 			goto out;
2034 
2035 		/* Create SQ's CQ */
2036 		cq = &apc->tx_qp[i].tx_cq;
2037 		cq->type = MANA_CQ_TYPE_TX;
2038 
2039 		cq->txq = txq;
2040 
2041 		memset(&spec, 0, sizeof(spec));
2042 		spec.type = GDMA_CQ;
2043 		spec.monitor_avl_buf = false;
2044 		spec.queue_size = cq_size;
2045 		spec.cq.callback = mana_schedule_napi;
2046 		spec.cq.parent_eq = ac->eqs[i].eq;
2047 		spec.cq.context = cq;
2048 		err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2049 		if (err)
2050 			goto out;
2051 
2052 		memset(&wq_spec, 0, sizeof(wq_spec));
2053 		memset(&cq_spec, 0, sizeof(cq_spec));
2054 
2055 		wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle;
2056 		wq_spec.queue_size = txq->gdma_sq->queue_size;
2057 
2058 		cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
2059 		cq_spec.queue_size = cq->gdma_cq->queue_size;
2060 		cq_spec.modr_ctx_id = 0;
2061 		cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2062 
2063 		err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
2064 					 &wq_spec, &cq_spec,
2065 					 &apc->tx_qp[i].tx_object);
2066 
2067 		if (err)
2068 			goto out;
2069 
2070 		txq->gdma_sq->id = wq_spec.queue_index;
2071 		cq->gdma_cq->id = cq_spec.queue_index;
2072 
2073 		txq->gdma_sq->mem_info.dma_region_handle =
2074 			GDMA_INVALID_DMA_REGION;
2075 		cq->gdma_cq->mem_info.dma_region_handle =
2076 			GDMA_INVALID_DMA_REGION;
2077 
2078 		txq->gdma_txq_id = txq->gdma_sq->id;
2079 
2080 		cq->gdma_id = cq->gdma_cq->id;
2081 
2082 		if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
2083 			err = -EINVAL;
2084 			goto out;
2085 		}
2086 
2087 		gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2088 
2089 		mana_create_txq_debugfs(apc, i);
2090 
2091 		netif_napi_add_tx(net, &cq->napi, mana_poll);
2092 		napi_enable(&cq->napi);
2093 		txq->napi_initialized = true;
2094 
2095 		mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
2096 	}
2097 
2098 	return 0;
2099 out:
2100 	netdev_err(net, "Failed to create %d TX queues, %d\n",
2101 		   apc->num_queues, err);
2102 	mana_destroy_txq(apc);
2103 	return err;
2104 }
2105 
2106 static void mana_destroy_rxq(struct mana_port_context *apc,
2107 			     struct mana_rxq *rxq, bool napi_initialized)
2108 
2109 {
2110 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
2111 	struct mana_recv_buf_oob *rx_oob;
2112 	struct device *dev = gc->dev;
2113 	struct napi_struct *napi;
2114 	struct page *page;
2115 	int i;
2116 
2117 	if (!rxq)
2118 		return;
2119 
2120 	debugfs_remove_recursive(rxq->mana_rx_debugfs);
2121 	rxq->mana_rx_debugfs = NULL;
2122 
2123 	napi = &rxq->rx_cq.napi;
2124 
2125 	if (napi_initialized) {
2126 		napi_synchronize(napi);
2127 
2128 		napi_disable(napi);
2129 
2130 		netif_napi_del(napi);
2131 	}
2132 	xdp_rxq_info_unreg(&rxq->xdp_rxq);
2133 
2134 	mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
2135 
2136 	mana_deinit_cq(apc, &rxq->rx_cq);
2137 
2138 	if (rxq->xdp_save_va)
2139 		put_page(virt_to_head_page(rxq->xdp_save_va));
2140 
2141 	for (i = 0; i < rxq->num_rx_buf; i++) {
2142 		rx_oob = &rxq->rx_oobs[i];
2143 
2144 		if (!rx_oob->buf_va)
2145 			continue;
2146 
2147 		dma_unmap_single(dev, rx_oob->sgl[0].address,
2148 				 rx_oob->sgl[0].size, DMA_FROM_DEVICE);
2149 
2150 		page = virt_to_head_page(rx_oob->buf_va);
2151 
2152 		if (rx_oob->from_pool)
2153 			page_pool_put_full_page(rxq->page_pool, page, false);
2154 		else
2155 			put_page(page);
2156 
2157 		rx_oob->buf_va = NULL;
2158 	}
2159 
2160 	page_pool_destroy(rxq->page_pool);
2161 
2162 	if (rxq->gdma_rq)
2163 		mana_gd_destroy_queue(gc, rxq->gdma_rq);
2164 
2165 	kfree(rxq);
2166 }
2167 
2168 static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
2169 			    struct mana_rxq *rxq, struct device *dev)
2170 {
2171 	struct mana_port_context *mpc = netdev_priv(rxq->ndev);
2172 	bool from_pool = false;
2173 	dma_addr_t da;
2174 	void *va;
2175 
2176 	if (mpc->rxbufs_pre)
2177 		va = mana_get_rxbuf_pre(rxq, &da);
2178 	else
2179 		va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false);
2180 
2181 	if (!va)
2182 		return -ENOMEM;
2183 
2184 	rx_oob->buf_va = va;
2185 	rx_oob->from_pool = from_pool;
2186 
2187 	rx_oob->sgl[0].address = da;
2188 	rx_oob->sgl[0].size = rxq->datasize;
2189 	rx_oob->sgl[0].mem_key = mem_key;
2190 
2191 	return 0;
2192 }
2193 
2194 #define MANA_WQE_HEADER_SIZE 16
2195 #define MANA_WQE_SGE_SIZE 16
2196 
2197 static int mana_alloc_rx_wqe(struct mana_port_context *apc,
2198 			     struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size)
2199 {
2200 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
2201 	struct mana_recv_buf_oob *rx_oob;
2202 	struct device *dev = gc->dev;
2203 	u32 buf_idx;
2204 	int ret;
2205 
2206 	WARN_ON(rxq->datasize == 0);
2207 
2208 	*rxq_size = 0;
2209 	*cq_size = 0;
2210 
2211 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2212 		rx_oob = &rxq->rx_oobs[buf_idx];
2213 		memset(rx_oob, 0, sizeof(*rx_oob));
2214 
2215 		rx_oob->num_sge = 1;
2216 
2217 		ret = mana_fill_rx_oob(rx_oob, apc->ac->gdma_dev->gpa_mkey, rxq,
2218 				       dev);
2219 		if (ret)
2220 			return ret;
2221 
2222 		rx_oob->wqe_req.sgl = rx_oob->sgl;
2223 		rx_oob->wqe_req.num_sge = rx_oob->num_sge;
2224 		rx_oob->wqe_req.inline_oob_size = 0;
2225 		rx_oob->wqe_req.inline_oob_data = NULL;
2226 		rx_oob->wqe_req.flags = 0;
2227 		rx_oob->wqe_req.client_data_unit = 0;
2228 
2229 		*rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
2230 				   MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
2231 		*cq_size += COMP_ENTRY_SIZE;
2232 	}
2233 
2234 	return 0;
2235 }
2236 
2237 static int mana_push_wqe(struct mana_rxq *rxq)
2238 {
2239 	struct mana_recv_buf_oob *rx_oob;
2240 	u32 buf_idx;
2241 	int err;
2242 
2243 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2244 		rx_oob = &rxq->rx_oobs[buf_idx];
2245 
2246 		err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
2247 					    &rx_oob->wqe_inf);
2248 		if (err)
2249 			return -ENOSPC;
2250 	}
2251 
2252 	return 0;
2253 }
2254 
2255 static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
2256 {
2257 	struct mana_port_context *mpc = netdev_priv(rxq->ndev);
2258 	struct page_pool_params pprm = {};
2259 	int ret;
2260 
2261 	pprm.pool_size = mpc->rx_queue_size;
2262 	pprm.nid = gc->numa_node;
2263 	pprm.napi = &rxq->rx_cq.napi;
2264 	pprm.netdev = rxq->ndev;
2265 
2266 	rxq->page_pool = page_pool_create(&pprm);
2267 
2268 	if (IS_ERR(rxq->page_pool)) {
2269 		ret = PTR_ERR(rxq->page_pool);
2270 		rxq->page_pool = NULL;
2271 		return ret;
2272 	}
2273 
2274 	return 0;
2275 }
2276 
2277 static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
2278 					u32 rxq_idx, struct mana_eq *eq,
2279 					struct net_device *ndev)
2280 {
2281 	struct gdma_dev *gd = apc->ac->gdma_dev;
2282 	struct mana_obj_spec wq_spec;
2283 	struct mana_obj_spec cq_spec;
2284 	struct gdma_queue_spec spec;
2285 	struct mana_cq *cq = NULL;
2286 	struct gdma_context *gc;
2287 	u32 cq_size, rq_size;
2288 	struct mana_rxq *rxq;
2289 	int err;
2290 
2291 	gc = gd->gdma_context;
2292 
2293 	rxq = kzalloc(struct_size(rxq, rx_oobs, apc->rx_queue_size),
2294 		      GFP_KERNEL);
2295 	if (!rxq)
2296 		return NULL;
2297 
2298 	rxq->ndev = ndev;
2299 	rxq->num_rx_buf = apc->rx_queue_size;
2300 	rxq->rxq_idx = rxq_idx;
2301 	rxq->rxobj = INVALID_MANA_HANDLE;
2302 
2303 	mana_get_rxbuf_cfg(ndev->mtu, &rxq->datasize, &rxq->alloc_size,
2304 			   &rxq->headroom);
2305 
2306 	/* Create page pool for RX queue */
2307 	err = mana_create_page_pool(rxq, gc);
2308 	if (err) {
2309 		netdev_err(ndev, "Create page pool err:%d\n", err);
2310 		goto out;
2311 	}
2312 
2313 	err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
2314 	if (err)
2315 		goto out;
2316 
2317 	rq_size = MANA_PAGE_ALIGN(rq_size);
2318 	cq_size = MANA_PAGE_ALIGN(cq_size);
2319 
2320 	/* Create RQ */
2321 	memset(&spec, 0, sizeof(spec));
2322 	spec.type = GDMA_RQ;
2323 	spec.monitor_avl_buf = true;
2324 	spec.queue_size = rq_size;
2325 	err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
2326 	if (err)
2327 		goto out;
2328 
2329 	/* Create RQ's CQ */
2330 	cq = &rxq->rx_cq;
2331 	cq->type = MANA_CQ_TYPE_RX;
2332 	cq->rxq = rxq;
2333 
2334 	memset(&spec, 0, sizeof(spec));
2335 	spec.type = GDMA_CQ;
2336 	spec.monitor_avl_buf = false;
2337 	spec.queue_size = cq_size;
2338 	spec.cq.callback = mana_schedule_napi;
2339 	spec.cq.parent_eq = eq->eq;
2340 	spec.cq.context = cq;
2341 	err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2342 	if (err)
2343 		goto out;
2344 
2345 	memset(&wq_spec, 0, sizeof(wq_spec));
2346 	memset(&cq_spec, 0, sizeof(cq_spec));
2347 	wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle;
2348 	wq_spec.queue_size = rxq->gdma_rq->queue_size;
2349 
2350 	cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
2351 	cq_spec.queue_size = cq->gdma_cq->queue_size;
2352 	cq_spec.modr_ctx_id = 0;
2353 	cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2354 
2355 	err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
2356 				 &wq_spec, &cq_spec, &rxq->rxobj);
2357 	if (err)
2358 		goto out;
2359 
2360 	rxq->gdma_rq->id = wq_spec.queue_index;
2361 	cq->gdma_cq->id = cq_spec.queue_index;
2362 
2363 	rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2364 	cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2365 
2366 	rxq->gdma_id = rxq->gdma_rq->id;
2367 	cq->gdma_id = cq->gdma_cq->id;
2368 
2369 	err = mana_push_wqe(rxq);
2370 	if (err)
2371 		goto out;
2372 
2373 	if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
2374 		err = -EINVAL;
2375 		goto out;
2376 	}
2377 
2378 	gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2379 
2380 	netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1);
2381 
2382 	WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
2383 				 cq->napi.napi_id));
2384 	WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
2385 					   rxq->page_pool));
2386 
2387 	napi_enable(&cq->napi);
2388 
2389 	mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
2390 out:
2391 	if (!err)
2392 		return rxq;
2393 
2394 	netdev_err(ndev, "Failed to create RXQ: err = %d\n", err);
2395 
2396 	mana_destroy_rxq(apc, rxq, false);
2397 
2398 	if (cq)
2399 		mana_deinit_cq(apc, cq);
2400 
2401 	return NULL;
2402 }
2403 
2404 static void mana_create_rxq_debugfs(struct mana_port_context *apc, int idx)
2405 {
2406 	struct mana_rxq *rxq;
2407 	char qnum[32];
2408 
2409 	rxq = apc->rxqs[idx];
2410 
2411 	sprintf(qnum, "RX-%d", idx);
2412 	rxq->mana_rx_debugfs = debugfs_create_dir(qnum, apc->mana_port_debugfs);
2413 	debugfs_create_u32("rq_head", 0400, rxq->mana_rx_debugfs, &rxq->gdma_rq->head);
2414 	debugfs_create_u32("rq_tail", 0400, rxq->mana_rx_debugfs, &rxq->gdma_rq->tail);
2415 	debugfs_create_u32("rq_nbuf", 0400, rxq->mana_rx_debugfs, &rxq->num_rx_buf);
2416 	debugfs_create_u32("cq_head", 0400, rxq->mana_rx_debugfs,
2417 			   &rxq->rx_cq.gdma_cq->head);
2418 	debugfs_create_u32("cq_tail", 0400, rxq->mana_rx_debugfs,
2419 			   &rxq->rx_cq.gdma_cq->tail);
2420 	debugfs_create_u32("cq_budget", 0400, rxq->mana_rx_debugfs, &rxq->rx_cq.budget);
2421 	debugfs_create_file("rxq_dump", 0400, rxq->mana_rx_debugfs, rxq->gdma_rq, &mana_dbg_q_fops);
2422 	debugfs_create_file("cq_dump", 0400, rxq->mana_rx_debugfs, rxq->rx_cq.gdma_cq,
2423 			    &mana_dbg_q_fops);
2424 }
2425 
2426 static int mana_add_rx_queues(struct mana_port_context *apc,
2427 			      struct net_device *ndev)
2428 {
2429 	struct mana_context *ac = apc->ac;
2430 	struct mana_rxq *rxq;
2431 	int err = 0;
2432 	int i;
2433 
2434 	for (i = 0; i < apc->num_queues; i++) {
2435 		rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
2436 		if (!rxq) {
2437 			err = -ENOMEM;
2438 			netdev_err(ndev, "Failed to create rxq %d : %d\n", i, err);
2439 			goto out;
2440 		}
2441 
2442 		u64_stats_init(&rxq->stats.syncp);
2443 
2444 		apc->rxqs[i] = rxq;
2445 
2446 		mana_create_rxq_debugfs(apc, i);
2447 	}
2448 
2449 	apc->default_rxobj = apc->rxqs[0]->rxobj;
2450 out:
2451 	return err;
2452 }
2453 
2454 static void mana_destroy_vport(struct mana_port_context *apc)
2455 {
2456 	struct gdma_dev *gd = apc->ac->gdma_dev;
2457 	struct mana_rxq *rxq;
2458 	u32 rxq_idx;
2459 
2460 	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
2461 		rxq = apc->rxqs[rxq_idx];
2462 		if (!rxq)
2463 			continue;
2464 
2465 		mana_destroy_rxq(apc, rxq, true);
2466 		apc->rxqs[rxq_idx] = NULL;
2467 	}
2468 
2469 	mana_destroy_txq(apc);
2470 	mana_uncfg_vport(apc);
2471 
2472 	if (gd->gdma_context->is_pf)
2473 		mana_pf_deregister_hw_vport(apc);
2474 }
2475 
2476 static int mana_create_vport(struct mana_port_context *apc,
2477 			     struct net_device *net)
2478 {
2479 	struct gdma_dev *gd = apc->ac->gdma_dev;
2480 	int err;
2481 
2482 	apc->default_rxobj = INVALID_MANA_HANDLE;
2483 
2484 	if (gd->gdma_context->is_pf) {
2485 		err = mana_pf_register_hw_vport(apc);
2486 		if (err)
2487 			return err;
2488 	}
2489 
2490 	err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
2491 	if (err)
2492 		return err;
2493 
2494 	return mana_create_txq(apc, net);
2495 }
2496 
2497 static int mana_rss_table_alloc(struct mana_port_context *apc)
2498 {
2499 	if (!apc->indir_table_sz) {
2500 		netdev_err(apc->ndev,
2501 			   "Indirection table size not set for vPort %d\n",
2502 			   apc->port_idx);
2503 		return -EINVAL;
2504 	}
2505 
2506 	apc->indir_table = kcalloc(apc->indir_table_sz, sizeof(u32), GFP_KERNEL);
2507 	if (!apc->indir_table)
2508 		return -ENOMEM;
2509 
2510 	apc->rxobj_table = kcalloc(apc->indir_table_sz, sizeof(mana_handle_t), GFP_KERNEL);
2511 	if (!apc->rxobj_table) {
2512 		kfree(apc->indir_table);
2513 		return -ENOMEM;
2514 	}
2515 
2516 	return 0;
2517 }
2518 
2519 static void mana_rss_table_init(struct mana_port_context *apc)
2520 {
2521 	int i;
2522 
2523 	for (i = 0; i < apc->indir_table_sz; i++)
2524 		apc->indir_table[i] =
2525 			ethtool_rxfh_indir_default(i, apc->num_queues);
2526 }
2527 
2528 int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
2529 		    bool update_hash, bool update_tab)
2530 {
2531 	u32 queue_idx;
2532 	int err;
2533 	int i;
2534 
2535 	if (update_tab) {
2536 		for (i = 0; i < apc->indir_table_sz; i++) {
2537 			queue_idx = apc->indir_table[i];
2538 			apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
2539 		}
2540 	}
2541 
2542 	err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
2543 	if (err)
2544 		return err;
2545 
2546 	mana_fence_rqs(apc);
2547 
2548 	return 0;
2549 }
2550 
2551 void mana_query_gf_stats(struct mana_port_context *apc)
2552 {
2553 	struct mana_query_gf_stat_resp resp = {};
2554 	struct mana_query_gf_stat_req req = {};
2555 	struct net_device *ndev = apc->ndev;
2556 	int err;
2557 
2558 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_GF_STAT,
2559 			     sizeof(req), sizeof(resp));
2560 	req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
2561 	req.req_stats = STATISTICS_FLAGS_RX_DISCARDS_NO_WQE |
2562 			STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED |
2563 			STATISTICS_FLAGS_HC_RX_BYTES |
2564 			STATISTICS_FLAGS_HC_RX_UCAST_PACKETS |
2565 			STATISTICS_FLAGS_HC_RX_UCAST_BYTES |
2566 			STATISTICS_FLAGS_HC_RX_MCAST_PACKETS |
2567 			STATISTICS_FLAGS_HC_RX_MCAST_BYTES |
2568 			STATISTICS_FLAGS_HC_RX_BCAST_PACKETS |
2569 			STATISTICS_FLAGS_HC_RX_BCAST_BYTES |
2570 			STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED |
2571 			STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED |
2572 			STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS |
2573 			STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT |
2574 			STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT |
2575 			STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT |
2576 			STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT |
2577 			STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT |
2578 			STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION |
2579 			STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB |
2580 			STATISTICS_FLAGS_HC_TX_BYTES |
2581 			STATISTICS_FLAGS_HC_TX_UCAST_PACKETS |
2582 			STATISTICS_FLAGS_HC_TX_UCAST_BYTES |
2583 			STATISTICS_FLAGS_HC_TX_MCAST_PACKETS |
2584 			STATISTICS_FLAGS_HC_TX_MCAST_BYTES |
2585 			STATISTICS_FLAGS_HC_TX_BCAST_PACKETS |
2586 			STATISTICS_FLAGS_HC_TX_BCAST_BYTES |
2587 			STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR;
2588 
2589 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
2590 				sizeof(resp));
2591 	if (err) {
2592 		netdev_err(ndev, "Failed to query GF stats: %d\n", err);
2593 		return;
2594 	}
2595 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_GF_STAT,
2596 				   sizeof(resp));
2597 	if (err || resp.hdr.status) {
2598 		netdev_err(ndev, "Failed to query GF stats: %d, 0x%x\n", err,
2599 			   resp.hdr.status);
2600 		return;
2601 	}
2602 
2603 	apc->eth_stats.hc_rx_discards_no_wqe = resp.rx_discards_nowqe;
2604 	apc->eth_stats.hc_rx_err_vport_disabled = resp.rx_err_vport_disabled;
2605 	apc->eth_stats.hc_rx_bytes = resp.hc_rx_bytes;
2606 	apc->eth_stats.hc_rx_ucast_pkts = resp.hc_rx_ucast_pkts;
2607 	apc->eth_stats.hc_rx_ucast_bytes = resp.hc_rx_ucast_bytes;
2608 	apc->eth_stats.hc_rx_bcast_pkts = resp.hc_rx_bcast_pkts;
2609 	apc->eth_stats.hc_rx_bcast_bytes = resp.hc_rx_bcast_bytes;
2610 	apc->eth_stats.hc_rx_mcast_pkts = resp.hc_rx_mcast_pkts;
2611 	apc->eth_stats.hc_rx_mcast_bytes = resp.hc_rx_mcast_bytes;
2612 	apc->eth_stats.hc_tx_err_gf_disabled = resp.tx_err_gf_disabled;
2613 	apc->eth_stats.hc_tx_err_vport_disabled = resp.tx_err_vport_disabled;
2614 	apc->eth_stats.hc_tx_err_inval_vportoffset_pkt =
2615 					     resp.tx_err_inval_vport_offset_pkt;
2616 	apc->eth_stats.hc_tx_err_vlan_enforcement =
2617 					     resp.tx_err_vlan_enforcement;
2618 	apc->eth_stats.hc_tx_err_eth_type_enforcement =
2619 					     resp.tx_err_ethtype_enforcement;
2620 	apc->eth_stats.hc_tx_err_sa_enforcement = resp.tx_err_SA_enforcement;
2621 	apc->eth_stats.hc_tx_err_sqpdid_enforcement =
2622 					     resp.tx_err_SQPDID_enforcement;
2623 	apc->eth_stats.hc_tx_err_cqpdid_enforcement =
2624 					     resp.tx_err_CQPDID_enforcement;
2625 	apc->eth_stats.hc_tx_err_mtu_violation = resp.tx_err_mtu_violation;
2626 	apc->eth_stats.hc_tx_err_inval_oob = resp.tx_err_inval_oob;
2627 	apc->eth_stats.hc_tx_bytes = resp.hc_tx_bytes;
2628 	apc->eth_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts;
2629 	apc->eth_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes;
2630 	apc->eth_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts;
2631 	apc->eth_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes;
2632 	apc->eth_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts;
2633 	apc->eth_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes;
2634 	apc->eth_stats.hc_tx_err_gdma = resp.tx_err_gdma;
2635 }
2636 
2637 static int mana_init_port(struct net_device *ndev)
2638 {
2639 	struct mana_port_context *apc = netdev_priv(ndev);
2640 	struct gdma_dev *gd = apc->ac->gdma_dev;
2641 	u32 max_txq, max_rxq, max_queues;
2642 	int port_idx = apc->port_idx;
2643 	struct gdma_context *gc;
2644 	char vport[32];
2645 	int err;
2646 
2647 	err = mana_init_port_context(apc);
2648 	if (err)
2649 		return err;
2650 
2651 	gc = gd->gdma_context;
2652 
2653 	err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
2654 				   &apc->indir_table_sz);
2655 	if (err) {
2656 		netdev_err(ndev, "Failed to query info for vPort %d\n",
2657 			   port_idx);
2658 		goto reset_apc;
2659 	}
2660 
2661 	max_queues = min_t(u32, max_txq, max_rxq);
2662 	if (apc->max_queues > max_queues)
2663 		apc->max_queues = max_queues;
2664 
2665 	if (apc->num_queues > apc->max_queues)
2666 		apc->num_queues = apc->max_queues;
2667 
2668 	eth_hw_addr_set(ndev, apc->mac_addr);
2669 	sprintf(vport, "vport%d", port_idx);
2670 	apc->mana_port_debugfs = debugfs_create_dir(vport, gc->mana_pci_debugfs);
2671 	return 0;
2672 
2673 reset_apc:
2674 	mana_cleanup_port_context(apc);
2675 	return err;
2676 }
2677 
2678 int mana_alloc_queues(struct net_device *ndev)
2679 {
2680 	struct mana_port_context *apc = netdev_priv(ndev);
2681 	struct gdma_dev *gd = apc->ac->gdma_dev;
2682 	int err;
2683 
2684 	err = mana_create_vport(apc, ndev);
2685 	if (err) {
2686 		netdev_err(ndev, "Failed to create vPort %u : %d\n", apc->port_idx, err);
2687 		return err;
2688 	}
2689 
2690 	err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
2691 	if (err) {
2692 		netdev_err(ndev,
2693 			   "netif_set_real_num_tx_queues () failed for ndev with num_queues %u : %d\n",
2694 			   apc->num_queues, err);
2695 		goto destroy_vport;
2696 	}
2697 
2698 	err = mana_add_rx_queues(apc, ndev);
2699 	if (err)
2700 		goto destroy_vport;
2701 
2702 	apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
2703 
2704 	err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
2705 	if (err) {
2706 		netdev_err(ndev,
2707 			   "netif_set_real_num_rx_queues () failed for ndev with num_queues %u : %d\n",
2708 			   apc->num_queues, err);
2709 		goto destroy_vport;
2710 	}
2711 
2712 	mana_rss_table_init(apc);
2713 
2714 	err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
2715 	if (err) {
2716 		netdev_err(ndev, "Failed to configure RSS table: %d\n", err);
2717 		goto destroy_vport;
2718 	}
2719 
2720 	if (gd->gdma_context->is_pf) {
2721 		err = mana_pf_register_filter(apc);
2722 		if (err)
2723 			goto destroy_vport;
2724 	}
2725 
2726 	mana_chn_setxdp(apc, mana_xdp_get(apc));
2727 
2728 	return 0;
2729 
2730 destroy_vport:
2731 	mana_destroy_vport(apc);
2732 	return err;
2733 }
2734 
2735 int mana_attach(struct net_device *ndev)
2736 {
2737 	struct mana_port_context *apc = netdev_priv(ndev);
2738 	int err;
2739 
2740 	ASSERT_RTNL();
2741 
2742 	err = mana_init_port(ndev);
2743 	if (err)
2744 		return err;
2745 
2746 	if (apc->port_st_save) {
2747 		err = mana_alloc_queues(ndev);
2748 		if (err) {
2749 			mana_cleanup_port_context(apc);
2750 			return err;
2751 		}
2752 	}
2753 
2754 	apc->port_is_up = apc->port_st_save;
2755 
2756 	/* Ensure port state updated before txq state */
2757 	smp_wmb();
2758 
2759 	if (apc->port_is_up)
2760 		netif_carrier_on(ndev);
2761 
2762 	netif_device_attach(ndev);
2763 
2764 	return 0;
2765 }
2766 
2767 static int mana_dealloc_queues(struct net_device *ndev)
2768 {
2769 	struct mana_port_context *apc = netdev_priv(ndev);
2770 	unsigned long timeout = jiffies + 120 * HZ;
2771 	struct gdma_dev *gd = apc->ac->gdma_dev;
2772 	struct mana_txq *txq;
2773 	struct sk_buff *skb;
2774 	int i, err;
2775 	u32 tsleep;
2776 
2777 	if (apc->port_is_up)
2778 		return -EINVAL;
2779 
2780 	mana_chn_setxdp(apc, NULL);
2781 
2782 	if (gd->gdma_context->is_pf)
2783 		mana_pf_deregister_filter(apc);
2784 
2785 	/* No packet can be transmitted now since apc->port_is_up is false.
2786 	 * There is still a tiny chance that mana_poll_tx_cq() can re-enable
2787 	 * a txq because it may not timely see apc->port_is_up being cleared
2788 	 * to false, but it doesn't matter since mana_start_xmit() drops any
2789 	 * new packets due to apc->port_is_up being false.
2790 	 *
2791 	 * Drain all the in-flight TX packets.
2792 	 * A timeout of 120 seconds for all the queues is used.
2793 	 * This will break the while loop when h/w is not responding.
2794 	 * This value of 120 has been decided here considering max
2795 	 * number of queues.
2796 	 */
2797 
2798 	for (i = 0; i < apc->num_queues; i++) {
2799 		txq = &apc->tx_qp[i].txq;
2800 		tsleep = 1000;
2801 		while (atomic_read(&txq->pending_sends) > 0 &&
2802 		       time_before(jiffies, timeout)) {
2803 			usleep_range(tsleep, tsleep + 1000);
2804 			tsleep <<= 1;
2805 		}
2806 		if (atomic_read(&txq->pending_sends)) {
2807 			err = pcie_flr(to_pci_dev(gd->gdma_context->dev));
2808 			if (err) {
2809 				netdev_err(ndev, "flr failed %d with %d pkts pending in txq %u\n",
2810 					   err, atomic_read(&txq->pending_sends),
2811 					   txq->gdma_txq_id);
2812 			}
2813 			break;
2814 		}
2815 	}
2816 
2817 	for (i = 0; i < apc->num_queues; i++) {
2818 		txq = &apc->tx_qp[i].txq;
2819 		while ((skb = skb_dequeue(&txq->pending_skbs))) {
2820 			mana_unmap_skb(skb, apc);
2821 			dev_kfree_skb_any(skb);
2822 		}
2823 		atomic_set(&txq->pending_sends, 0);
2824 	}
2825 	/* We're 100% sure the queues can no longer be woken up, because
2826 	 * we're sure now mana_poll_tx_cq() can't be running.
2827 	 */
2828 
2829 	apc->rss_state = TRI_STATE_FALSE;
2830 	err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
2831 	if (err) {
2832 		netdev_err(ndev, "Failed to disable vPort: %d\n", err);
2833 		return err;
2834 	}
2835 
2836 	mana_destroy_vport(apc);
2837 
2838 	return 0;
2839 }
2840 
2841 int mana_detach(struct net_device *ndev, bool from_close)
2842 {
2843 	struct mana_port_context *apc = netdev_priv(ndev);
2844 	int err;
2845 
2846 	ASSERT_RTNL();
2847 
2848 	apc->port_st_save = apc->port_is_up;
2849 	apc->port_is_up = false;
2850 
2851 	/* Ensure port state updated before txq state */
2852 	smp_wmb();
2853 
2854 	netif_tx_disable(ndev);
2855 	netif_carrier_off(ndev);
2856 
2857 	if (apc->port_st_save) {
2858 		err = mana_dealloc_queues(ndev);
2859 		if (err) {
2860 			netdev_err(ndev, "%s failed to deallocate queues: %d\n", __func__, err);
2861 			return err;
2862 		}
2863 	}
2864 
2865 	if (!from_close) {
2866 		netif_device_detach(ndev);
2867 		mana_cleanup_port_context(apc);
2868 	}
2869 
2870 	return 0;
2871 }
2872 
2873 static int mana_probe_port(struct mana_context *ac, int port_idx,
2874 			   struct net_device **ndev_storage)
2875 {
2876 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
2877 	struct mana_port_context *apc;
2878 	struct net_device *ndev;
2879 	int err;
2880 
2881 	ndev = alloc_etherdev_mq(sizeof(struct mana_port_context),
2882 				 gc->max_num_queues);
2883 	if (!ndev)
2884 		return -ENOMEM;
2885 
2886 	*ndev_storage = ndev;
2887 
2888 	apc = netdev_priv(ndev);
2889 	apc->ac = ac;
2890 	apc->ndev = ndev;
2891 	apc->max_queues = gc->max_num_queues;
2892 	apc->num_queues = gc->max_num_queues;
2893 	apc->tx_queue_size = DEF_TX_BUFFERS_PER_QUEUE;
2894 	apc->rx_queue_size = DEF_RX_BUFFERS_PER_QUEUE;
2895 	apc->port_handle = INVALID_MANA_HANDLE;
2896 	apc->pf_filter_handle = INVALID_MANA_HANDLE;
2897 	apc->port_idx = port_idx;
2898 
2899 	mutex_init(&apc->vport_mutex);
2900 	apc->vport_use_count = 0;
2901 
2902 	ndev->netdev_ops = &mana_devops;
2903 	ndev->ethtool_ops = &mana_ethtool_ops;
2904 	ndev->mtu = ETH_DATA_LEN;
2905 	ndev->max_mtu = gc->adapter_mtu - ETH_HLEN;
2906 	ndev->min_mtu = ETH_MIN_MTU;
2907 	ndev->needed_headroom = MANA_HEADROOM;
2908 	ndev->dev_port = port_idx;
2909 	SET_NETDEV_DEV(ndev, gc->dev);
2910 
2911 	netif_set_tso_max_size(ndev, GSO_MAX_SIZE);
2912 
2913 	netif_carrier_off(ndev);
2914 
2915 	netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
2916 
2917 	err = mana_init_port(ndev);
2918 	if (err)
2919 		goto free_net;
2920 
2921 	err = mana_rss_table_alloc(apc);
2922 	if (err)
2923 		goto reset_apc;
2924 
2925 	netdev_lockdep_set_classes(ndev);
2926 
2927 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2928 	ndev->hw_features |= NETIF_F_RXCSUM;
2929 	ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
2930 	ndev->hw_features |= NETIF_F_RXHASH;
2931 	ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_TX |
2932 			 NETIF_F_HW_VLAN_CTAG_RX;
2933 	ndev->vlan_features = ndev->features;
2934 	xdp_set_features_flag(ndev, NETDEV_XDP_ACT_BASIC |
2935 			      NETDEV_XDP_ACT_REDIRECT |
2936 			      NETDEV_XDP_ACT_NDO_XMIT);
2937 
2938 	err = register_netdev(ndev);
2939 	if (err) {
2940 		netdev_err(ndev, "Unable to register netdev.\n");
2941 		goto free_indir;
2942 	}
2943 
2944 	return 0;
2945 
2946 free_indir:
2947 	mana_cleanup_indir_table(apc);
2948 reset_apc:
2949 	mana_cleanup_port_context(apc);
2950 free_net:
2951 	*ndev_storage = NULL;
2952 	netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
2953 	free_netdev(ndev);
2954 	return err;
2955 }
2956 
2957 static void adev_release(struct device *dev)
2958 {
2959 	struct mana_adev *madev = container_of(dev, struct mana_adev, adev.dev);
2960 
2961 	kfree(madev);
2962 }
2963 
2964 static void remove_adev(struct gdma_dev *gd)
2965 {
2966 	struct auxiliary_device *adev = gd->adev;
2967 	int id = adev->id;
2968 
2969 	auxiliary_device_delete(adev);
2970 	auxiliary_device_uninit(adev);
2971 
2972 	mana_adev_idx_free(id);
2973 	gd->adev = NULL;
2974 }
2975 
2976 static int add_adev(struct gdma_dev *gd)
2977 {
2978 	struct auxiliary_device *adev;
2979 	struct mana_adev *madev;
2980 	int ret;
2981 
2982 	madev = kzalloc(sizeof(*madev), GFP_KERNEL);
2983 	if (!madev)
2984 		return -ENOMEM;
2985 
2986 	adev = &madev->adev;
2987 	ret = mana_adev_idx_alloc();
2988 	if (ret < 0)
2989 		goto idx_fail;
2990 	adev->id = ret;
2991 
2992 	adev->name = "rdma";
2993 	adev->dev.parent = gd->gdma_context->dev;
2994 	adev->dev.release = adev_release;
2995 	madev->mdev = gd;
2996 
2997 	ret = auxiliary_device_init(adev);
2998 	if (ret)
2999 		goto init_fail;
3000 
3001 	/* madev is owned by the auxiliary device */
3002 	madev = NULL;
3003 	ret = auxiliary_device_add(adev);
3004 	if (ret)
3005 		goto add_fail;
3006 
3007 	gd->adev = adev;
3008 	dev_dbg(gd->gdma_context->dev,
3009 		"Auxiliary device added successfully\n");
3010 	return 0;
3011 
3012 add_fail:
3013 	auxiliary_device_uninit(adev);
3014 
3015 init_fail:
3016 	mana_adev_idx_free(adev->id);
3017 
3018 idx_fail:
3019 	kfree(madev);
3020 
3021 	return ret;
3022 }
3023 
3024 int mana_probe(struct gdma_dev *gd, bool resuming)
3025 {
3026 	struct gdma_context *gc = gd->gdma_context;
3027 	struct mana_context *ac = gd->driver_data;
3028 	struct device *dev = gc->dev;
3029 	u16 num_ports = 0;
3030 	int err;
3031 	int i;
3032 
3033 	dev_info(dev,
3034 		 "Microsoft Azure Network Adapter protocol version: %d.%d.%d\n",
3035 		 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
3036 
3037 	err = mana_gd_register_device(gd);
3038 	if (err)
3039 		return err;
3040 
3041 	if (!resuming) {
3042 		ac = kzalloc(sizeof(*ac), GFP_KERNEL);
3043 		if (!ac)
3044 			return -ENOMEM;
3045 
3046 		ac->gdma_dev = gd;
3047 		gd->driver_data = ac;
3048 	}
3049 
3050 	err = mana_create_eq(ac);
3051 	if (err) {
3052 		dev_err(dev, "Failed to create EQs: %d\n", err);
3053 		goto out;
3054 	}
3055 
3056 	err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
3057 				    MANA_MICRO_VERSION, &num_ports);
3058 	if (err)
3059 		goto out;
3060 
3061 	if (!resuming) {
3062 		ac->num_ports = num_ports;
3063 	} else {
3064 		if (ac->num_ports != num_ports) {
3065 			dev_err(dev, "The number of vPorts changed: %d->%d\n",
3066 				ac->num_ports, num_ports);
3067 			err = -EPROTO;
3068 			goto out;
3069 		}
3070 	}
3071 
3072 	if (ac->num_ports == 0)
3073 		dev_err(dev, "Failed to detect any vPort\n");
3074 
3075 	if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
3076 		ac->num_ports = MAX_PORTS_IN_MANA_DEV;
3077 
3078 	if (!resuming) {
3079 		for (i = 0; i < ac->num_ports; i++) {
3080 			err = mana_probe_port(ac, i, &ac->ports[i]);
3081 			/* we log the port for which the probe failed and stop
3082 			 * probes for subsequent ports.
3083 			 * Note that we keep running ports, for which the probes
3084 			 * were successful, unless add_adev fails too
3085 			 */
3086 			if (err) {
3087 				dev_err(dev, "Probe Failed for port %d\n", i);
3088 				break;
3089 			}
3090 		}
3091 	} else {
3092 		for (i = 0; i < ac->num_ports; i++) {
3093 			rtnl_lock();
3094 			err = mana_attach(ac->ports[i]);
3095 			rtnl_unlock();
3096 			/* we log the port for which the attach failed and stop
3097 			 * attach for subsequent ports
3098 			 * Note that we keep running ports, for which the attach
3099 			 * were successful, unless add_adev fails too
3100 			 */
3101 			if (err) {
3102 				dev_err(dev, "Attach Failed for port %d\n", i);
3103 				break;
3104 			}
3105 		}
3106 	}
3107 
3108 	err = add_adev(gd);
3109 out:
3110 	if (err) {
3111 		mana_remove(gd, false);
3112 	} else {
3113 		dev_dbg(dev, "gd=%p, id=%u, num_ports=%d, type=%u, instance=%u\n",
3114 			gd, gd->dev_id.as_uint32, ac->num_ports,
3115 			gd->dev_id.type, gd->dev_id.instance);
3116 		dev_dbg(dev, "%s succeeded\n", __func__);
3117 	}
3118 
3119 	return err;
3120 }
3121 
3122 void mana_remove(struct gdma_dev *gd, bool suspending)
3123 {
3124 	struct gdma_context *gc = gd->gdma_context;
3125 	struct mana_context *ac = gd->driver_data;
3126 	struct mana_port_context *apc;
3127 	struct device *dev = gc->dev;
3128 	struct net_device *ndev;
3129 	int err;
3130 	int i;
3131 
3132 	/* adev currently doesn't support suspending, always remove it */
3133 	if (gd->adev)
3134 		remove_adev(gd);
3135 
3136 	for (i = 0; i < ac->num_ports; i++) {
3137 		ndev = ac->ports[i];
3138 		apc = netdev_priv(ndev);
3139 		if (!ndev) {
3140 			if (i == 0)
3141 				dev_err(dev, "No net device to remove\n");
3142 			goto out;
3143 		}
3144 
3145 		/* All cleanup actions should stay after rtnl_lock(), otherwise
3146 		 * other functions may access partially cleaned up data.
3147 		 */
3148 		rtnl_lock();
3149 
3150 		err = mana_detach(ndev, false);
3151 		if (err)
3152 			netdev_err(ndev, "Failed to detach vPort %d: %d\n",
3153 				   i, err);
3154 
3155 		if (suspending) {
3156 			/* No need to unregister the ndev. */
3157 			rtnl_unlock();
3158 			continue;
3159 		}
3160 
3161 		unregister_netdevice(ndev);
3162 		mana_cleanup_indir_table(apc);
3163 
3164 		rtnl_unlock();
3165 
3166 		free_netdev(ndev);
3167 	}
3168 
3169 	mana_destroy_eq(ac);
3170 out:
3171 	mana_gd_deregister_device(gd);
3172 
3173 	if (suspending)
3174 		return;
3175 
3176 	gd->driver_data = NULL;
3177 	gd->gdma_context = NULL;
3178 	kfree(ac);
3179 	dev_dbg(dev, "%s succeeded\n", __func__);
3180 }
3181 
3182 struct net_device *mana_get_primary_netdev(struct mana_context *ac,
3183 					   u32 port_index,
3184 					   netdevice_tracker *tracker)
3185 {
3186 	struct net_device *ndev;
3187 
3188 	if (port_index >= ac->num_ports)
3189 		return NULL;
3190 
3191 	rcu_read_lock();
3192 
3193 	/* If mana is used in netvsc, the upper netdevice should be returned. */
3194 	ndev = netdev_master_upper_dev_get_rcu(ac->ports[port_index]);
3195 
3196 	/* If there is no upper device, use the parent Ethernet device */
3197 	if (!ndev)
3198 		ndev = ac->ports[port_index];
3199 
3200 	netdev_hold(ndev, tracker, GFP_ATOMIC);
3201 	rcu_read_unlock();
3202 
3203 	return ndev;
3204 }
3205 EXPORT_SYMBOL_NS(mana_get_primary_netdev, "NET_MANA");
3206