xref: /linux/drivers/net/ethernet/microsoft/mana/mana_en.c (revision abacaf559950eec0d99d37ff6b92049409af5943)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #include <uapi/linux/bpf.h>
5 
6 #include <linux/debugfs.h>
7 #include <linux/inetdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/ethtool.h>
10 #include <linux/filter.h>
11 #include <linux/mm.h>
12 #include <linux/pci.h>
13 #include <linux/export.h>
14 #include <linux/skbuff.h>
15 
16 #include <net/checksum.h>
17 #include <net/ip6_checksum.h>
18 #include <net/netdev_lock.h>
19 #include <net/page_pool/helpers.h>
20 #include <net/xdp.h>
21 
22 #include <net/mana/mana.h>
23 #include <net/mana/mana_auxiliary.h>
24 #include <net/mana/hw_channel.h>
25 
26 static DEFINE_IDA(mana_adev_ida);
27 
mana_adev_idx_alloc(void)28 static int mana_adev_idx_alloc(void)
29 {
30 	return ida_alloc(&mana_adev_ida, GFP_KERNEL);
31 }
32 
mana_adev_idx_free(int idx)33 static void mana_adev_idx_free(int idx)
34 {
35 	ida_free(&mana_adev_ida, idx);
36 }
37 
mana_dbg_q_read(struct file * filp,char __user * buf,size_t count,loff_t * pos)38 static ssize_t mana_dbg_q_read(struct file *filp, char __user *buf, size_t count,
39 			       loff_t *pos)
40 {
41 	struct gdma_queue *gdma_q = filp->private_data;
42 
43 	return simple_read_from_buffer(buf, count, pos, gdma_q->queue_mem_ptr,
44 				       gdma_q->queue_size);
45 }
46 
47 static const struct file_operations mana_dbg_q_fops = {
48 	.owner  = THIS_MODULE,
49 	.open   = simple_open,
50 	.read   = mana_dbg_q_read,
51 };
52 
mana_en_need_log(struct mana_port_context * apc,int err)53 static bool mana_en_need_log(struct mana_port_context *apc, int err)
54 {
55 	if (apc && apc->ac && apc->ac->gdma_dev &&
56 	    apc->ac->gdma_dev->gdma_context)
57 		return mana_need_log(apc->ac->gdma_dev->gdma_context, err);
58 	else
59 		return true;
60 }
61 
mana_put_rx_page(struct mana_rxq * rxq,struct page * page,bool from_pool)62 static void mana_put_rx_page(struct mana_rxq *rxq, struct page *page,
63 			     bool from_pool)
64 {
65 	if (from_pool)
66 		page_pool_put_full_page(rxq->page_pool, page, false);
67 	else
68 		put_page(page);
69 }
70 
71 /* Microsoft Azure Network Adapter (MANA) functions */
72 
mana_open(struct net_device * ndev)73 static int mana_open(struct net_device *ndev)
74 {
75 	struct mana_port_context *apc = netdev_priv(ndev);
76 	int err;
77 	err = mana_alloc_queues(ndev);
78 
79 	if (err) {
80 		netdev_err(ndev, "%s failed to allocate queues: %d\n", __func__, err);
81 		return err;
82 	}
83 
84 	apc->port_is_up = true;
85 
86 	/* Ensure port state updated before txq state */
87 	smp_wmb();
88 
89 	netif_tx_wake_all_queues(ndev);
90 	netdev_dbg(ndev, "%s successful\n", __func__);
91 	return 0;
92 }
93 
mana_close(struct net_device * ndev)94 static int mana_close(struct net_device *ndev)
95 {
96 	struct mana_port_context *apc = netdev_priv(ndev);
97 
98 	if (!apc->port_is_up)
99 		return 0;
100 
101 	return mana_detach(ndev, true);
102 }
103 
mana_link_state_handle(struct work_struct * w)104 static void mana_link_state_handle(struct work_struct *w)
105 {
106 	struct mana_context *ac;
107 	struct net_device *ndev;
108 	u32 link_event;
109 	bool link_up;
110 	int i;
111 
112 	ac = container_of(w, struct mana_context, link_change_work);
113 
114 	rtnl_lock();
115 
116 	link_event = READ_ONCE(ac->link_event);
117 
118 	if (link_event == HWC_DATA_HW_LINK_CONNECT)
119 		link_up = true;
120 	else if (link_event == HWC_DATA_HW_LINK_DISCONNECT)
121 		link_up = false;
122 	else
123 		goto out;
124 
125 	/* Process all ports */
126 	for (i = 0; i < ac->num_ports; i++) {
127 		ndev = ac->ports[i];
128 		if (!ndev)
129 			continue;
130 
131 		if (link_up) {
132 			netif_carrier_on(ndev);
133 
134 			__netdev_notify_peers(ndev);
135 		} else {
136 			netif_carrier_off(ndev);
137 		}
138 	}
139 
140 out:
141 	rtnl_unlock();
142 }
143 
mana_can_tx(struct gdma_queue * wq)144 static bool mana_can_tx(struct gdma_queue *wq)
145 {
146 	return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
147 }
148 
mana_checksum_info(struct sk_buff * skb)149 static unsigned int mana_checksum_info(struct sk_buff *skb)
150 {
151 	if (skb->protocol == htons(ETH_P_IP)) {
152 		struct iphdr *ip = ip_hdr(skb);
153 
154 		if (ip->protocol == IPPROTO_TCP)
155 			return IPPROTO_TCP;
156 
157 		if (ip->protocol == IPPROTO_UDP)
158 			return IPPROTO_UDP;
159 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
160 		struct ipv6hdr *ip6 = ipv6_hdr(skb);
161 
162 		if (ip6->nexthdr == IPPROTO_TCP)
163 			return IPPROTO_TCP;
164 
165 		if (ip6->nexthdr == IPPROTO_UDP)
166 			return IPPROTO_UDP;
167 	}
168 
169 	/* No csum offloading */
170 	return 0;
171 }
172 
mana_add_sge(struct mana_tx_package * tp,struct mana_skb_head * ash,int sg_i,dma_addr_t da,int sge_len,u32 gpa_mkey)173 static void mana_add_sge(struct mana_tx_package *tp, struct mana_skb_head *ash,
174 			 int sg_i, dma_addr_t da, int sge_len, u32 gpa_mkey)
175 {
176 	ash->dma_handle[sg_i] = da;
177 	ash->size[sg_i] = sge_len;
178 
179 	tp->wqe_req.sgl[sg_i].address = da;
180 	tp->wqe_req.sgl[sg_i].mem_key = gpa_mkey;
181 	tp->wqe_req.sgl[sg_i].size = sge_len;
182 }
183 
mana_map_skb(struct sk_buff * skb,struct mana_port_context * apc,struct mana_tx_package * tp,int gso_hs)184 static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
185 			struct mana_tx_package *tp, int gso_hs)
186 {
187 	struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
188 	int hsg = 1; /* num of SGEs of linear part */
189 	struct gdma_dev *gd = apc->ac->gdma_dev;
190 	int skb_hlen = skb_headlen(skb);
191 	int sge0_len, sge1_len = 0;
192 	struct gdma_context *gc;
193 	struct device *dev;
194 	skb_frag_t *frag;
195 	dma_addr_t da;
196 	int sg_i;
197 	int i;
198 
199 	gc = gd->gdma_context;
200 	dev = gc->dev;
201 
202 	if (gso_hs && gso_hs < skb_hlen) {
203 		sge0_len = gso_hs;
204 		sge1_len = skb_hlen - gso_hs;
205 	} else {
206 		sge0_len = skb_hlen;
207 	}
208 
209 	da = dma_map_single(dev, skb->data, sge0_len, DMA_TO_DEVICE);
210 	if (dma_mapping_error(dev, da))
211 		return -ENOMEM;
212 
213 	mana_add_sge(tp, ash, 0, da, sge0_len, gd->gpa_mkey);
214 
215 	if (sge1_len) {
216 		sg_i = 1;
217 		da = dma_map_single(dev, skb->data + sge0_len, sge1_len,
218 				    DMA_TO_DEVICE);
219 		if (dma_mapping_error(dev, da))
220 			goto frag_err;
221 
222 		mana_add_sge(tp, ash, sg_i, da, sge1_len, gd->gpa_mkey);
223 		hsg = 2;
224 	}
225 
226 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
227 		sg_i = hsg + i;
228 
229 		frag = &skb_shinfo(skb)->frags[i];
230 		da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
231 				      DMA_TO_DEVICE);
232 		if (dma_mapping_error(dev, da))
233 			goto frag_err;
234 
235 		mana_add_sge(tp, ash, sg_i, da, skb_frag_size(frag),
236 			     gd->gpa_mkey);
237 	}
238 
239 	return 0;
240 
241 frag_err:
242 	if (net_ratelimit())
243 		netdev_err(apc->ndev, "Failed to map skb of size %u to DMA\n",
244 			   skb->len);
245 	for (i = sg_i - 1; i >= hsg; i--)
246 		dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
247 			       DMA_TO_DEVICE);
248 
249 	for (i = hsg - 1; i >= 0; i--)
250 		dma_unmap_single(dev, ash->dma_handle[i], ash->size[i],
251 				 DMA_TO_DEVICE);
252 
253 	return -ENOMEM;
254 }
255 
256 /* Handle the case when GSO SKB linear length is too large.
257  * MANA NIC requires GSO packets to put only the packet header to SGE0.
258  * So, we need 2 SGEs for the skb linear part which contains more than the
259  * header.
260  * Return a positive value for the number of SGEs, or a negative value
261  * for an error.
262  */
mana_fix_skb_head(struct net_device * ndev,struct sk_buff * skb,int gso_hs)263 static int mana_fix_skb_head(struct net_device *ndev, struct sk_buff *skb,
264 			     int gso_hs)
265 {
266 	int num_sge = 1 + skb_shinfo(skb)->nr_frags;
267 	int skb_hlen = skb_headlen(skb);
268 
269 	if (gso_hs < skb_hlen) {
270 		num_sge++;
271 	} else if (gso_hs > skb_hlen) {
272 		if (net_ratelimit())
273 			netdev_err(ndev,
274 				   "TX nonlinear head: hs:%d, skb_hlen:%d\n",
275 				   gso_hs, skb_hlen);
276 
277 		return -EINVAL;
278 	}
279 
280 	return num_sge;
281 }
282 
283 /* Get the GSO packet's header size */
mana_get_gso_hs(struct sk_buff * skb)284 static int mana_get_gso_hs(struct sk_buff *skb)
285 {
286 	int gso_hs;
287 
288 	if (skb->encapsulation) {
289 		gso_hs = skb_inner_tcp_all_headers(skb);
290 	} else {
291 		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
292 			gso_hs = skb_transport_offset(skb) +
293 				 sizeof(struct udphdr);
294 		} else {
295 			gso_hs = skb_tcp_all_headers(skb);
296 		}
297 	}
298 
299 	return gso_hs;
300 }
301 
mana_per_port_queue_reset_work_handler(struct work_struct * work)302 static void mana_per_port_queue_reset_work_handler(struct work_struct *work)
303 {
304 	struct mana_port_context *apc = container_of(work,
305 						     struct mana_port_context,
306 						     queue_reset_work);
307 	struct net_device *ndev = apc->ndev;
308 	int err;
309 
310 	rtnl_lock();
311 
312 	/* Pre-allocate buffers to prevent failure in mana_attach later */
313 	err = mana_pre_alloc_rxbufs(apc, ndev->mtu, apc->num_queues);
314 	if (err) {
315 		netdev_err(ndev, "Insufficient memory for reset post tx stall detection\n");
316 		goto out;
317 	}
318 
319 	err = mana_detach(ndev, false);
320 	if (err) {
321 		netdev_err(ndev, "mana_detach failed: %d\n", err);
322 		goto dealloc_pre_rxbufs;
323 	}
324 
325 	err = mana_attach(ndev);
326 	if (err)
327 		netdev_err(ndev, "mana_attach failed: %d\n", err);
328 
329 dealloc_pre_rxbufs:
330 	mana_pre_dealloc_rxbufs(apc);
331 out:
332 	rtnl_unlock();
333 }
334 
mana_start_xmit(struct sk_buff * skb,struct net_device * ndev)335 netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
336 {
337 	enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
338 	struct mana_port_context *apc = netdev_priv(ndev);
339 	int gso_hs = 0; /* zero for non-GSO pkts */
340 	u16 txq_idx = skb_get_queue_mapping(skb);
341 	struct gdma_dev *gd = apc->ac->gdma_dev;
342 	bool ipv4 = false, ipv6 = false;
343 	struct mana_tx_package pkg = {};
344 	struct netdev_queue *net_txq;
345 	struct mana_stats_tx *tx_stats;
346 	struct gdma_queue *gdma_sq;
347 	int err, len, num_gso_seg;
348 	unsigned int csum_type;
349 	struct mana_txq *txq;
350 	struct mana_cq *cq;
351 
352 	if (unlikely(!apc->port_is_up))
353 		goto tx_drop;
354 
355 	if (skb_cow_head(skb, MANA_HEADROOM))
356 		goto tx_drop_count;
357 
358 	txq = &apc->tx_qp[txq_idx].txq;
359 	gdma_sq = txq->gdma_sq;
360 	cq = &apc->tx_qp[txq_idx].tx_cq;
361 	tx_stats = &txq->stats;
362 
363 	BUILD_BUG_ON(MAX_TX_WQE_SGL_ENTRIES != MANA_MAX_TX_WQE_SGL_ENTRIES);
364 	if (MAX_SKB_FRAGS + 2 > MAX_TX_WQE_SGL_ENTRIES &&
365 	    skb_shinfo(skb)->nr_frags + 2 > MAX_TX_WQE_SGL_ENTRIES) {
366 		/* GSO skb with Hardware SGE limit exceeded is not expected here
367 		 * as they are handled in mana_features_check() callback
368 		 */
369 		if (skb_linearize(skb)) {
370 			netdev_warn_once(ndev, "Failed to linearize skb with nr_frags=%d and is_gso=%d\n",
371 					 skb_shinfo(skb)->nr_frags,
372 					 skb_is_gso(skb));
373 			goto tx_drop_count;
374 		}
375 		apc->eth_stats.tx_linear_pkt_cnt++;
376 	}
377 
378 	pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
379 	pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
380 
381 	if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
382 		pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
383 		pkt_fmt = MANA_LONG_PKT_FMT;
384 	} else {
385 		pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
386 	}
387 
388 	if (skb_vlan_tag_present(skb)) {
389 		pkt_fmt = MANA_LONG_PKT_FMT;
390 		pkg.tx_oob.l_oob.inject_vlan_pri_tag = 1;
391 		pkg.tx_oob.l_oob.pcp = skb_vlan_tag_get_prio(skb);
392 		pkg.tx_oob.l_oob.dei = skb_vlan_tag_get_cfi(skb);
393 		pkg.tx_oob.l_oob.vlan_id = skb_vlan_tag_get_id(skb);
394 	}
395 
396 	pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
397 
398 	if (pkt_fmt == MANA_SHORT_PKT_FMT) {
399 		pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
400 		u64_stats_update_begin(&tx_stats->syncp);
401 		tx_stats->short_pkt_fmt++;
402 		u64_stats_update_end(&tx_stats->syncp);
403 	} else {
404 		pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
405 		u64_stats_update_begin(&tx_stats->syncp);
406 		tx_stats->long_pkt_fmt++;
407 		u64_stats_update_end(&tx_stats->syncp);
408 	}
409 
410 	pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
411 	pkg.wqe_req.flags = 0;
412 	pkg.wqe_req.client_data_unit = 0;
413 
414 	pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
415 
416 	if (skb->protocol == htons(ETH_P_IP))
417 		ipv4 = true;
418 	else if (skb->protocol == htons(ETH_P_IPV6))
419 		ipv6 = true;
420 
421 	if (skb_is_gso(skb)) {
422 		int num_sge;
423 
424 		gso_hs = mana_get_gso_hs(skb);
425 
426 		num_sge = mana_fix_skb_head(ndev, skb, gso_hs);
427 		if (num_sge > 0)
428 			pkg.wqe_req.num_sge = num_sge;
429 		else
430 			goto tx_drop_count;
431 
432 		u64_stats_update_begin(&tx_stats->syncp);
433 		if (skb->encapsulation) {
434 			tx_stats->tso_inner_packets++;
435 			tx_stats->tso_inner_bytes += skb->len - gso_hs;
436 		} else {
437 			tx_stats->tso_packets++;
438 			tx_stats->tso_bytes += skb->len - gso_hs;
439 		}
440 		u64_stats_update_end(&tx_stats->syncp);
441 
442 		pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
443 		pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
444 
445 		pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
446 		pkg.tx_oob.s_oob.comp_tcp_csum = 1;
447 		pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
448 
449 		pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size;
450 		pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
451 		if (ipv4) {
452 			ip_hdr(skb)->tot_len = 0;
453 			ip_hdr(skb)->check = 0;
454 			tcp_hdr(skb)->check =
455 				~csum_tcpudp_magic(ip_hdr(skb)->saddr,
456 						   ip_hdr(skb)->daddr, 0,
457 						   IPPROTO_TCP, 0);
458 		} else {
459 			ipv6_hdr(skb)->payload_len = 0;
460 			tcp_hdr(skb)->check =
461 				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
462 						 &ipv6_hdr(skb)->daddr, 0,
463 						 IPPROTO_TCP, 0);
464 		}
465 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
466 		csum_type = mana_checksum_info(skb);
467 
468 		u64_stats_update_begin(&tx_stats->syncp);
469 		tx_stats->csum_partial++;
470 		u64_stats_update_end(&tx_stats->syncp);
471 
472 		if (csum_type == IPPROTO_TCP) {
473 			pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
474 			pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
475 
476 			pkg.tx_oob.s_oob.comp_tcp_csum = 1;
477 			pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
478 
479 		} else if (csum_type == IPPROTO_UDP) {
480 			pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
481 			pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
482 
483 			pkg.tx_oob.s_oob.comp_udp_csum = 1;
484 		} else {
485 			/* Can't do offload of this type of checksum */
486 			if (skb_checksum_help(skb))
487 				goto tx_drop_count;
488 		}
489 	}
490 
491 	if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
492 		pkg.wqe_req.sgl = pkg.sgl_array;
493 	} else {
494 		pkg.sgl_ptr = kmalloc_objs(struct gdma_sge, pkg.wqe_req.num_sge,
495 					   GFP_ATOMIC);
496 		if (!pkg.sgl_ptr)
497 			goto tx_drop_count;
498 
499 		pkg.wqe_req.sgl = pkg.sgl_ptr;
500 	}
501 
502 	if (mana_map_skb(skb, apc, &pkg, gso_hs)) {
503 		u64_stats_update_begin(&tx_stats->syncp);
504 		tx_stats->mana_map_err++;
505 		u64_stats_update_end(&tx_stats->syncp);
506 		goto free_sgl_ptr;
507 	}
508 
509 	skb_queue_tail(&txq->pending_skbs, skb);
510 
511 	len = skb->len;
512 	num_gso_seg = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
513 	net_txq = netdev_get_tx_queue(ndev, txq_idx);
514 
515 	err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
516 					(struct gdma_posted_wqe_info *)skb->cb);
517 	if (!mana_can_tx(gdma_sq)) {
518 		netif_tx_stop_queue(net_txq);
519 		apc->eth_stats.stop_queue++;
520 	}
521 
522 	if (err) {
523 		(void)skb_dequeue_tail(&txq->pending_skbs);
524 		mana_unmap_skb(skb, apc);
525 		netdev_warn(ndev, "Failed to post TX OOB: %d\n", err);
526 		goto free_sgl_ptr;
527 	}
528 
529 	err = NETDEV_TX_OK;
530 	atomic_inc(&txq->pending_sends);
531 
532 	mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
533 
534 	/* skb may be freed after mana_gd_post_work_request. Do not use it. */
535 	skb = NULL;
536 
537 	/* Populated the packet and bytes counters based on post GSO packet
538 	 * calculations
539 	 */
540 	tx_stats = &txq->stats;
541 	u64_stats_update_begin(&tx_stats->syncp);
542 	tx_stats->packets += num_gso_seg;
543 	tx_stats->bytes += len + ((num_gso_seg - 1) * gso_hs);
544 	u64_stats_update_end(&tx_stats->syncp);
545 
546 	if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) {
547 		netif_tx_wake_queue(net_txq);
548 		apc->eth_stats.wake_queue++;
549 	}
550 
551 	kfree(pkg.sgl_ptr);
552 	return err;
553 
554 free_sgl_ptr:
555 	kfree(pkg.sgl_ptr);
556 tx_drop_count:
557 	ndev->stats.tx_dropped++;
558 tx_drop:
559 	dev_kfree_skb_any(skb);
560 	return NETDEV_TX_OK;
561 }
562 
563 #if (MAX_SKB_FRAGS + 2 > MANA_MAX_TX_WQE_SGL_ENTRIES)
mana_features_check(struct sk_buff * skb,struct net_device * ndev,netdev_features_t features)564 static netdev_features_t mana_features_check(struct sk_buff *skb,
565 					     struct net_device *ndev,
566 					     netdev_features_t features)
567 {
568 	if (skb_shinfo(skb)->nr_frags + 2 > MAX_TX_WQE_SGL_ENTRIES) {
569 		/* Exceeds HW SGE limit.
570 		 * GSO case:
571 		 *   Disable GSO so the stack will software-segment the skb
572 		 *   into smaller skbs that fit the SGE budget.
573 		 * Non-GSO case:
574 		 *   The xmit path will attempt skb_linearize() as a fallback.
575 		 */
576 		features &= ~NETIF_F_GSO_MASK;
577 	}
578 	return features;
579 }
580 #endif
581 
mana_get_stats64(struct net_device * ndev,struct rtnl_link_stats64 * st)582 static void mana_get_stats64(struct net_device *ndev,
583 			     struct rtnl_link_stats64 *st)
584 {
585 	struct mana_port_context *apc = netdev_priv(ndev);
586 	unsigned int num_queues = apc->num_queues;
587 	struct mana_stats_rx *rx_stats;
588 	struct mana_stats_tx *tx_stats;
589 	unsigned int start;
590 	u64 packets, bytes;
591 	int q;
592 
593 	if (!apc->port_is_up)
594 		return;
595 
596 	netdev_stats_to_stats64(st, &ndev->stats);
597 
598 	if (apc->ac->hwc_timeout_occurred)
599 		netdev_warn_once(ndev, "HWC timeout occurred\n");
600 
601 	st->rx_missed_errors = apc->ac->hc_stats.hc_rx_discards_no_wqe;
602 
603 	for (q = 0; q < num_queues; q++) {
604 		rx_stats = &apc->rxqs[q]->stats;
605 
606 		do {
607 			start = u64_stats_fetch_begin(&rx_stats->syncp);
608 			packets = rx_stats->packets;
609 			bytes = rx_stats->bytes;
610 		} while (u64_stats_fetch_retry(&rx_stats->syncp, start));
611 
612 		st->rx_packets += packets;
613 		st->rx_bytes += bytes;
614 	}
615 
616 	for (q = 0; q < num_queues; q++) {
617 		tx_stats = &apc->tx_qp[q].txq.stats;
618 
619 		do {
620 			start = u64_stats_fetch_begin(&tx_stats->syncp);
621 			packets = tx_stats->packets;
622 			bytes = tx_stats->bytes;
623 		} while (u64_stats_fetch_retry(&tx_stats->syncp, start));
624 
625 		st->tx_packets += packets;
626 		st->tx_bytes += bytes;
627 	}
628 }
629 
mana_get_tx_queue(struct net_device * ndev,struct sk_buff * skb,int old_q)630 static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb,
631 			     int old_q)
632 {
633 	struct mana_port_context *apc = netdev_priv(ndev);
634 	u32 hash = skb_get_hash(skb);
635 	struct sock *sk = skb->sk;
636 	int txq;
637 
638 	txq = apc->indir_table[hash & (apc->indir_table_sz - 1)];
639 
640 	if (txq != old_q && sk && sk_fullsock(sk) &&
641 	    rcu_access_pointer(sk->sk_dst_cache))
642 		sk_tx_queue_set(sk, txq);
643 
644 	return txq;
645 }
646 
mana_select_queue(struct net_device * ndev,struct sk_buff * skb,struct net_device * sb_dev)647 static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb,
648 			     struct net_device *sb_dev)
649 {
650 	int txq;
651 
652 	if (ndev->real_num_tx_queues == 1)
653 		return 0;
654 
655 	txq = sk_tx_queue_get(skb->sk);
656 
657 	if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) {
658 		if (skb_rx_queue_recorded(skb))
659 			txq = skb_get_rx_queue(skb);
660 		else
661 			txq = mana_get_tx_queue(ndev, skb, txq);
662 	}
663 
664 	return txq;
665 }
666 
667 /* Release pre-allocated RX buffers */
mana_pre_dealloc_rxbufs(struct mana_port_context * mpc)668 void mana_pre_dealloc_rxbufs(struct mana_port_context *mpc)
669 {
670 	struct device *dev;
671 	int i;
672 
673 	dev = mpc->ac->gdma_dev->gdma_context->dev;
674 
675 	if (!mpc->rxbufs_pre)
676 		goto out1;
677 
678 	if (!mpc->das_pre)
679 		goto out2;
680 
681 	while (mpc->rxbpre_total) {
682 		i = --mpc->rxbpre_total;
683 		dma_unmap_single(dev, mpc->das_pre[i], mpc->rxbpre_datasize,
684 				 DMA_FROM_DEVICE);
685 		put_page(virt_to_head_page(mpc->rxbufs_pre[i]));
686 	}
687 
688 	kfree(mpc->das_pre);
689 	mpc->das_pre = NULL;
690 
691 out2:
692 	kfree(mpc->rxbufs_pre);
693 	mpc->rxbufs_pre = NULL;
694 
695 out1:
696 	mpc->rxbpre_datasize = 0;
697 	mpc->rxbpre_alloc_size = 0;
698 	mpc->rxbpre_headroom = 0;
699 }
700 
701 /* Get a buffer from the pre-allocated RX buffers */
mana_get_rxbuf_pre(struct mana_rxq * rxq,dma_addr_t * da)702 static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da)
703 {
704 	struct net_device *ndev = rxq->ndev;
705 	struct mana_port_context *mpc;
706 	void *va;
707 
708 	mpc = netdev_priv(ndev);
709 
710 	if (!mpc->rxbufs_pre || !mpc->das_pre || !mpc->rxbpre_total) {
711 		netdev_err(ndev, "No RX pre-allocated bufs\n");
712 		return NULL;
713 	}
714 
715 	/* Check sizes to catch unexpected coding error */
716 	if (mpc->rxbpre_datasize != rxq->datasize) {
717 		netdev_err(ndev, "rxbpre_datasize mismatch: %u: %u\n",
718 			   mpc->rxbpre_datasize, rxq->datasize);
719 		return NULL;
720 	}
721 
722 	if (mpc->rxbpre_alloc_size != rxq->alloc_size) {
723 		netdev_err(ndev, "rxbpre_alloc_size mismatch: %u: %u\n",
724 			   mpc->rxbpre_alloc_size, rxq->alloc_size);
725 		return NULL;
726 	}
727 
728 	if (mpc->rxbpre_headroom != rxq->headroom) {
729 		netdev_err(ndev, "rxbpre_headroom mismatch: %u: %u\n",
730 			   mpc->rxbpre_headroom, rxq->headroom);
731 		return NULL;
732 	}
733 
734 	mpc->rxbpre_total--;
735 
736 	*da = mpc->das_pre[mpc->rxbpre_total];
737 	va = mpc->rxbufs_pre[mpc->rxbpre_total];
738 	mpc->rxbufs_pre[mpc->rxbpre_total] = NULL;
739 
740 	/* Deallocate the array after all buffers are gone */
741 	if (!mpc->rxbpre_total)
742 		mana_pre_dealloc_rxbufs(mpc);
743 
744 	return va;
745 }
746 
747 /* Get RX buffer's data size, alloc size, XDP headroom based on MTU */
mana_get_rxbuf_cfg(struct mana_port_context * apc,int mtu,u32 * datasize,u32 * alloc_size,u32 * headroom,u32 * frag_count)748 static void mana_get_rxbuf_cfg(struct mana_port_context *apc,
749 			       int mtu, u32 *datasize, u32 *alloc_size,
750 			       u32 *headroom, u32 *frag_count)
751 {
752 	u32 len, buf_size;
753 
754 	/* Calculate datasize first (consistent across all cases) */
755 	*datasize = mtu + ETH_HLEN;
756 
757 	/* For xdp and jumbo frames make sure only one packet fits per page */
758 	if (mtu + MANA_RXBUF_PAD > PAGE_SIZE / 2 || mana_xdp_get(apc)) {
759 		if (mana_xdp_get(apc)) {
760 			*headroom = XDP_PACKET_HEADROOM;
761 			*alloc_size = PAGE_SIZE;
762 		} else {
763 			*headroom = 0; /* no support for XDP */
764 			*alloc_size = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD +
765 						     *headroom);
766 		}
767 
768 		*frag_count = 1;
769 		return;
770 	}
771 
772 	/* Standard MTU case - optimize for multiple packets per page */
773 	*headroom = 0;
774 
775 	/* Calculate base buffer size needed */
776 	len = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD + *headroom);
777 	buf_size = ALIGN(len, MANA_RX_FRAG_ALIGNMENT);
778 
779 	/* Calculate how many packets can fit in a page */
780 	*frag_count = PAGE_SIZE / buf_size;
781 	*alloc_size = buf_size;
782 }
783 
mana_pre_alloc_rxbufs(struct mana_port_context * mpc,int new_mtu,int num_queues)784 int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_queues)
785 {
786 	struct device *dev;
787 	struct page *page;
788 	dma_addr_t da;
789 	int num_rxb;
790 	void *va;
791 	int i;
792 
793 	mana_get_rxbuf_cfg(mpc, new_mtu, &mpc->rxbpre_datasize,
794 			   &mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom,
795 			   &mpc->rxbpre_frag_count);
796 
797 	dev = mpc->ac->gdma_dev->gdma_context->dev;
798 
799 	num_rxb = num_queues * mpc->rx_queue_size;
800 
801 	WARN(mpc->rxbufs_pre, "mana rxbufs_pre exists\n");
802 	mpc->rxbufs_pre = kmalloc_array(num_rxb, sizeof(void *), GFP_KERNEL);
803 	if (!mpc->rxbufs_pre)
804 		goto error;
805 
806 	mpc->das_pre = kmalloc_objs(dma_addr_t, num_rxb);
807 	if (!mpc->das_pre)
808 		goto error;
809 
810 	mpc->rxbpre_total = 0;
811 
812 	for (i = 0; i < num_rxb; i++) {
813 		page = dev_alloc_pages(get_order(mpc->rxbpre_alloc_size));
814 		if (!page)
815 			goto error;
816 
817 		va = page_to_virt(page);
818 
819 		da = dma_map_single(dev, va + mpc->rxbpre_headroom,
820 				    mpc->rxbpre_datasize, DMA_FROM_DEVICE);
821 		if (dma_mapping_error(dev, da)) {
822 			put_page(page);
823 			goto error;
824 		}
825 
826 		mpc->rxbufs_pre[i] = va;
827 		mpc->das_pre[i] = da;
828 		mpc->rxbpre_total = i + 1;
829 	}
830 
831 	return 0;
832 
833 error:
834 	netdev_err(mpc->ndev, "Failed to pre-allocate RX buffers for %d queues\n", num_queues);
835 	mana_pre_dealloc_rxbufs(mpc);
836 	return -ENOMEM;
837 }
838 
mana_change_mtu(struct net_device * ndev,int new_mtu)839 static int mana_change_mtu(struct net_device *ndev, int new_mtu)
840 {
841 	struct mana_port_context *mpc = netdev_priv(ndev);
842 	unsigned int old_mtu = ndev->mtu;
843 	int err;
844 
845 	/* Pre-allocate buffers to prevent failure in mana_attach later */
846 	err = mana_pre_alloc_rxbufs(mpc, new_mtu, mpc->num_queues);
847 	if (err) {
848 		netdev_err(ndev, "Insufficient memory for new MTU\n");
849 		return err;
850 	}
851 
852 	err = mana_detach(ndev, false);
853 	if (err) {
854 		netdev_err(ndev, "mana_detach failed: %d\n", err);
855 		goto out;
856 	}
857 
858 	WRITE_ONCE(ndev->mtu, new_mtu);
859 
860 	err = mana_attach(ndev);
861 	if (err) {
862 		netdev_err(ndev, "mana_attach failed: %d\n", err);
863 		WRITE_ONCE(ndev->mtu, old_mtu);
864 	}
865 
866 out:
867 	mana_pre_dealloc_rxbufs(mpc);
868 	return err;
869 }
870 
mana_tx_timeout(struct net_device * netdev,unsigned int txqueue)871 static void mana_tx_timeout(struct net_device *netdev, unsigned int txqueue)
872 {
873 	struct mana_port_context *apc = netdev_priv(netdev);
874 	struct mana_context *ac = apc->ac;
875 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
876 
877 	/* Already in service, hence tx queue reset is not required.*/
878 	if (gc->in_service)
879 		return;
880 
881 	/* Note: If there are pending queue reset work for this port(apc),
882 	 * subsequent request queued up from here are ignored. This is because
883 	 * we are using the same work instance per port(apc).
884 	 */
885 	queue_work(ac->per_port_queue_reset_wq, &apc->queue_reset_work);
886 }
887 
mana_shaper_set(struct net_shaper_binding * binding,const struct net_shaper * shaper,struct netlink_ext_ack * extack)888 static int mana_shaper_set(struct net_shaper_binding *binding,
889 			   const struct net_shaper *shaper,
890 			   struct netlink_ext_ack *extack)
891 {
892 	struct mana_port_context *apc = netdev_priv(binding->netdev);
893 	u32 old_speed, rate;
894 	int err;
895 
896 	if (shaper->handle.scope != NET_SHAPER_SCOPE_NETDEV) {
897 		NL_SET_ERR_MSG_MOD(extack, "net shaper scope should be netdev");
898 		return -EINVAL;
899 	}
900 
901 	if (apc->handle.id && shaper->handle.id != apc->handle.id) {
902 		NL_SET_ERR_MSG_MOD(extack, "Cannot create multiple shapers");
903 		return -EOPNOTSUPP;
904 	}
905 
906 	if (!shaper->bw_max || (shaper->bw_max % 100000000)) {
907 		NL_SET_ERR_MSG_MOD(extack, "Please use multiples of 100Mbps for bandwidth");
908 		return -EINVAL;
909 	}
910 
911 	rate = div_u64(shaper->bw_max, 1000); /* Convert bps to Kbps */
912 	rate = div_u64(rate, 1000);	      /* Convert Kbps to Mbps */
913 
914 	/* Get current speed */
915 	err = mana_query_link_cfg(apc);
916 	old_speed = (err) ? SPEED_UNKNOWN : apc->speed;
917 
918 	if (!err) {
919 		err = mana_set_bw_clamp(apc, rate, TRI_STATE_TRUE);
920 		apc->speed = (err) ? old_speed : rate;
921 		apc->handle = (err) ? apc->handle : shaper->handle;
922 	}
923 
924 	return err;
925 }
926 
mana_shaper_del(struct net_shaper_binding * binding,const struct net_shaper_handle * handle,struct netlink_ext_ack * extack)927 static int mana_shaper_del(struct net_shaper_binding *binding,
928 			   const struct net_shaper_handle *handle,
929 			   struct netlink_ext_ack *extack)
930 {
931 	struct mana_port_context *apc = netdev_priv(binding->netdev);
932 	int err;
933 
934 	err = mana_set_bw_clamp(apc, 0, TRI_STATE_FALSE);
935 
936 	if (!err) {
937 		/* Reset mana port context parameters */
938 		apc->handle.id = 0;
939 		apc->handle.scope = NET_SHAPER_SCOPE_UNSPEC;
940 		apc->speed = apc->max_speed;
941 	}
942 
943 	return err;
944 }
945 
mana_shaper_cap(struct net_shaper_binding * binding,enum net_shaper_scope scope,unsigned long * flags)946 static void mana_shaper_cap(struct net_shaper_binding *binding,
947 			    enum net_shaper_scope scope,
948 			    unsigned long *flags)
949 {
950 	*flags = BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MAX) |
951 		 BIT(NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS);
952 }
953 
954 static const struct net_shaper_ops mana_shaper_ops = {
955 	.set = mana_shaper_set,
956 	.delete = mana_shaper_del,
957 	.capabilities = mana_shaper_cap,
958 };
959 
960 static const struct net_device_ops mana_devops = {
961 	.ndo_open		= mana_open,
962 	.ndo_stop		= mana_close,
963 	.ndo_select_queue	= mana_select_queue,
964 #if (MAX_SKB_FRAGS + 2 > MANA_MAX_TX_WQE_SGL_ENTRIES)
965 	.ndo_features_check	= mana_features_check,
966 #endif
967 	.ndo_start_xmit		= mana_start_xmit,
968 	.ndo_validate_addr	= eth_validate_addr,
969 	.ndo_get_stats64	= mana_get_stats64,
970 	.ndo_bpf		= mana_bpf,
971 	.ndo_xdp_xmit		= mana_xdp_xmit,
972 	.ndo_change_mtu		= mana_change_mtu,
973 	.ndo_tx_timeout		= mana_tx_timeout,
974 	.net_shaper_ops         = &mana_shaper_ops,
975 };
976 
mana_cleanup_port_context(struct mana_port_context * apc)977 static void mana_cleanup_port_context(struct mana_port_context *apc)
978 {
979 	/*
980 	 * make sure subsequent cleanup attempts don't end up removing already
981 	 * cleaned dentry pointer
982 	 */
983 	debugfs_remove(apc->mana_port_debugfs);
984 	apc->mana_port_debugfs = NULL;
985 	kfree(apc->rxqs);
986 	apc->rxqs = NULL;
987 }
988 
mana_cleanup_indir_table(struct mana_port_context * apc)989 static void mana_cleanup_indir_table(struct mana_port_context *apc)
990 {
991 	apc->indir_table_sz = 0;
992 	kfree(apc->indir_table);
993 	kfree(apc->rxobj_table);
994 }
995 
mana_init_port_context(struct mana_port_context * apc)996 static int mana_init_port_context(struct mana_port_context *apc)
997 {
998 	apc->rxqs = kzalloc_objs(struct mana_rxq *, apc->num_queues);
999 
1000 	return !apc->rxqs ? -ENOMEM : 0;
1001 }
1002 
mana_send_request(struct mana_context * ac,void * in_buf,u32 in_len,void * out_buf,u32 out_len)1003 static int mana_send_request(struct mana_context *ac, void *in_buf,
1004 			     u32 in_len, void *out_buf, u32 out_len)
1005 {
1006 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
1007 	struct gdma_resp_hdr *resp = out_buf;
1008 	struct gdma_req_hdr *req = in_buf;
1009 	struct device *dev = gc->dev;
1010 	static atomic_t activity_id;
1011 	int err;
1012 
1013 	req->dev_id = gc->mana.dev_id;
1014 	req->activity_id = atomic_inc_return(&activity_id);
1015 
1016 	err = mana_gd_send_request(gc, in_len, in_buf, out_len,
1017 				   out_buf);
1018 	if (err || resp->status) {
1019 		if (err == -EOPNOTSUPP)
1020 			return err;
1021 
1022 		if (req->req.msg_type != MANA_QUERY_PHY_STAT &&
1023 		    mana_need_log(gc, err))
1024 			dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
1025 				err, resp->status);
1026 		return err ? err : -EPROTO;
1027 	}
1028 
1029 	if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
1030 	    req->activity_id != resp->activity_id) {
1031 		dev_err(dev, "Unexpected mana message response: %x,%x,%x,%x\n",
1032 			req->dev_id.as_uint32, resp->dev_id.as_uint32,
1033 			req->activity_id, resp->activity_id);
1034 		return -EPROTO;
1035 	}
1036 
1037 	return 0;
1038 }
1039 
mana_verify_resp_hdr(const struct gdma_resp_hdr * resp_hdr,const enum mana_command_code expected_code,const u32 min_size)1040 static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
1041 				const enum mana_command_code expected_code,
1042 				const u32 min_size)
1043 {
1044 	if (resp_hdr->response.msg_type != expected_code)
1045 		return -EPROTO;
1046 
1047 	if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
1048 		return -EPROTO;
1049 
1050 	if (resp_hdr->response.msg_size < min_size)
1051 		return -EPROTO;
1052 
1053 	return 0;
1054 }
1055 
mana_pf_register_hw_vport(struct mana_port_context * apc)1056 static int mana_pf_register_hw_vport(struct mana_port_context *apc)
1057 {
1058 	struct mana_register_hw_vport_resp resp = {};
1059 	struct mana_register_hw_vport_req req = {};
1060 	int err;
1061 
1062 	mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_HW_PORT,
1063 			     sizeof(req), sizeof(resp));
1064 	req.attached_gfid = 1;
1065 	req.is_pf_default_vport = 1;
1066 	req.allow_all_ether_types = 1;
1067 
1068 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1069 				sizeof(resp));
1070 	if (err) {
1071 		netdev_err(apc->ndev, "Failed to register hw vPort: %d\n", err);
1072 		return err;
1073 	}
1074 
1075 	err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_HW_PORT,
1076 				   sizeof(resp));
1077 	if (err || resp.hdr.status) {
1078 		netdev_err(apc->ndev, "Failed to register hw vPort: %d, 0x%x\n",
1079 			   err, resp.hdr.status);
1080 		return err ? err : -EPROTO;
1081 	}
1082 
1083 	apc->port_handle = resp.hw_vport_handle;
1084 	return 0;
1085 }
1086 
mana_pf_deregister_hw_vport(struct mana_port_context * apc)1087 static void mana_pf_deregister_hw_vport(struct mana_port_context *apc)
1088 {
1089 	struct mana_deregister_hw_vport_resp resp = {};
1090 	struct mana_deregister_hw_vport_req req = {};
1091 	int err;
1092 
1093 	mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_HW_PORT,
1094 			     sizeof(req), sizeof(resp));
1095 	req.hw_vport_handle = apc->port_handle;
1096 
1097 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1098 				sizeof(resp));
1099 	if (err) {
1100 		if (mana_en_need_log(apc, err))
1101 			netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
1102 				   err);
1103 
1104 		return;
1105 	}
1106 
1107 	err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_HW_PORT,
1108 				   sizeof(resp));
1109 	if (err || resp.hdr.status)
1110 		netdev_err(apc->ndev,
1111 			   "Failed to deregister hw vPort: %d, 0x%x\n",
1112 			   err, resp.hdr.status);
1113 }
1114 
mana_pf_register_filter(struct mana_port_context * apc)1115 static int mana_pf_register_filter(struct mana_port_context *apc)
1116 {
1117 	struct mana_register_filter_resp resp = {};
1118 	struct mana_register_filter_req req = {};
1119 	int err;
1120 
1121 	mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_FILTER,
1122 			     sizeof(req), sizeof(resp));
1123 	req.vport = apc->port_handle;
1124 	memcpy(req.mac_addr, apc->mac_addr, ETH_ALEN);
1125 
1126 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1127 				sizeof(resp));
1128 	if (err) {
1129 		netdev_err(apc->ndev, "Failed to register filter: %d\n", err);
1130 		return err;
1131 	}
1132 
1133 	err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_FILTER,
1134 				   sizeof(resp));
1135 	if (err || resp.hdr.status) {
1136 		netdev_err(apc->ndev, "Failed to register filter: %d, 0x%x\n",
1137 			   err, resp.hdr.status);
1138 		return err ? err : -EPROTO;
1139 	}
1140 
1141 	apc->pf_filter_handle = resp.filter_handle;
1142 	return 0;
1143 }
1144 
mana_pf_deregister_filter(struct mana_port_context * apc)1145 static void mana_pf_deregister_filter(struct mana_port_context *apc)
1146 {
1147 	struct mana_deregister_filter_resp resp = {};
1148 	struct mana_deregister_filter_req req = {};
1149 	int err;
1150 
1151 	mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_FILTER,
1152 			     sizeof(req), sizeof(resp));
1153 	req.filter_handle = apc->pf_filter_handle;
1154 
1155 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1156 				sizeof(resp));
1157 	if (err) {
1158 		if (mana_en_need_log(apc, err))
1159 			netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
1160 				   err);
1161 
1162 		return;
1163 	}
1164 
1165 	err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_FILTER,
1166 				   sizeof(resp));
1167 	if (err || resp.hdr.status)
1168 		netdev_err(apc->ndev,
1169 			   "Failed to deregister filter: %d, 0x%x\n",
1170 			   err, resp.hdr.status);
1171 }
1172 
mana_query_device_cfg(struct mana_context * ac,u32 proto_major_ver,u32 proto_minor_ver,u32 proto_micro_ver,u16 * max_num_vports,u8 * bm_hostmode)1173 static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
1174 				 u32 proto_minor_ver, u32 proto_micro_ver,
1175 				 u16 *max_num_vports, u8 *bm_hostmode)
1176 {
1177 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
1178 	struct mana_query_device_cfg_resp resp = {};
1179 	struct mana_query_device_cfg_req req = {};
1180 	struct device *dev = gc->dev;
1181 	int err = 0;
1182 
1183 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
1184 			     sizeof(req), sizeof(resp));
1185 
1186 	req.hdr.resp.msg_version = GDMA_MESSAGE_V3;
1187 
1188 	req.proto_major_ver = proto_major_ver;
1189 	req.proto_minor_ver = proto_minor_ver;
1190 	req.proto_micro_ver = proto_micro_ver;
1191 
1192 	err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
1193 	if (err) {
1194 		dev_err(dev, "Failed to query config: %d", err);
1195 		return err;
1196 	}
1197 
1198 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
1199 				   sizeof(resp));
1200 	if (err || resp.hdr.status) {
1201 		dev_err(dev, "Invalid query result: %d, 0x%x\n", err,
1202 			resp.hdr.status);
1203 		if (!err)
1204 			err = -EPROTO;
1205 		return err;
1206 	}
1207 
1208 	*max_num_vports = resp.max_num_vports;
1209 
1210 	if (resp.hdr.response.msg_version >= GDMA_MESSAGE_V2)
1211 		gc->adapter_mtu = resp.adapter_mtu;
1212 	else
1213 		gc->adapter_mtu = ETH_FRAME_LEN;
1214 
1215 	if (resp.hdr.response.msg_version >= GDMA_MESSAGE_V3)
1216 		*bm_hostmode = resp.bm_hostmode;
1217 	else
1218 		*bm_hostmode = 0;
1219 
1220 	debugfs_create_u16("adapter-MTU", 0400, gc->mana_pci_debugfs, &gc->adapter_mtu);
1221 
1222 	return 0;
1223 }
1224 
mana_query_vport_cfg(struct mana_port_context * apc,u32 vport_index,u32 * max_sq,u32 * max_rq,u32 * num_indir_entry)1225 static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index,
1226 				u32 *max_sq, u32 *max_rq, u32 *num_indir_entry)
1227 {
1228 	struct mana_query_vport_cfg_resp resp = {};
1229 	struct mana_query_vport_cfg_req req = {};
1230 	int err;
1231 
1232 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
1233 			     sizeof(req), sizeof(resp));
1234 
1235 	req.vport_index = vport_index;
1236 
1237 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1238 				sizeof(resp));
1239 	if (err)
1240 		return err;
1241 
1242 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
1243 				   sizeof(resp));
1244 	if (err)
1245 		return err;
1246 
1247 	if (resp.hdr.status)
1248 		return -EPROTO;
1249 
1250 	*max_sq = resp.max_num_sq;
1251 	*max_rq = resp.max_num_rq;
1252 	if (resp.num_indirection_ent > 0 &&
1253 	    resp.num_indirection_ent <= MANA_INDIRECT_TABLE_MAX_SIZE &&
1254 	    is_power_of_2(resp.num_indirection_ent)) {
1255 		*num_indir_entry = resp.num_indirection_ent;
1256 	} else {
1257 		netdev_warn(apc->ndev,
1258 			    "Setting indirection table size to default %d for vPort %d\n",
1259 			    MANA_INDIRECT_TABLE_DEF_SIZE, apc->port_idx);
1260 		*num_indir_entry = MANA_INDIRECT_TABLE_DEF_SIZE;
1261 	}
1262 
1263 	apc->port_handle = resp.vport;
1264 	ether_addr_copy(apc->mac_addr, resp.mac_addr);
1265 
1266 	return 0;
1267 }
1268 
mana_uncfg_vport(struct mana_port_context * apc)1269 void mana_uncfg_vport(struct mana_port_context *apc)
1270 {
1271 	mutex_lock(&apc->vport_mutex);
1272 	apc->vport_use_count--;
1273 	WARN_ON(apc->vport_use_count < 0);
1274 	mutex_unlock(&apc->vport_mutex);
1275 }
1276 EXPORT_SYMBOL_NS(mana_uncfg_vport, "NET_MANA");
1277 
mana_cfg_vport(struct mana_port_context * apc,u32 protection_dom_id,u32 doorbell_pg_id)1278 int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
1279 		   u32 doorbell_pg_id)
1280 {
1281 	struct mana_config_vport_resp resp = {};
1282 	struct mana_config_vport_req req = {};
1283 	int err;
1284 
1285 	/* This function is used to program the Ethernet port in the hardware
1286 	 * table. It can be called from the Ethernet driver or the RDMA driver.
1287 	 *
1288 	 * For Ethernet usage, the hardware supports only one active user on a
1289 	 * physical port. The driver checks on the port usage before programming
1290 	 * the hardware when creating the RAW QP (RDMA driver) or exposing the
1291 	 * device to kernel NET layer (Ethernet driver).
1292 	 *
1293 	 * Because the RDMA driver doesn't know in advance which QP type the
1294 	 * user will create, it exposes the device with all its ports. The user
1295 	 * may not be able to create RAW QP on a port if this port is already
1296 	 * in used by the Ethernet driver from the kernel.
1297 	 *
1298 	 * This physical port limitation only applies to the RAW QP. For RC QP,
1299 	 * the hardware doesn't have this limitation. The user can create RC
1300 	 * QPs on a physical port up to the hardware limits independent of the
1301 	 * Ethernet usage on the same port.
1302 	 */
1303 	mutex_lock(&apc->vport_mutex);
1304 	if (apc->vport_use_count > 0) {
1305 		mutex_unlock(&apc->vport_mutex);
1306 		return -EBUSY;
1307 	}
1308 	apc->vport_use_count++;
1309 	mutex_unlock(&apc->vport_mutex);
1310 
1311 	mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
1312 			     sizeof(req), sizeof(resp));
1313 	req.vport = apc->port_handle;
1314 	req.pdid = protection_dom_id;
1315 	req.doorbell_pageid = doorbell_pg_id;
1316 
1317 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1318 				sizeof(resp));
1319 	if (err) {
1320 		netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err);
1321 		goto out;
1322 	}
1323 
1324 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
1325 				   sizeof(resp));
1326 	if (err || resp.hdr.status) {
1327 		netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
1328 			   err, resp.hdr.status);
1329 		if (!err)
1330 			err = -EPROTO;
1331 
1332 		goto out;
1333 	}
1334 
1335 	apc->tx_shortform_allowed = resp.short_form_allowed;
1336 	apc->tx_vp_offset = resp.tx_vport_offset;
1337 
1338 	netdev_info(apc->ndev, "Configured vPort %llu PD %u DB %u\n",
1339 		    apc->port_handle, protection_dom_id, doorbell_pg_id);
1340 out:
1341 	if (err)
1342 		mana_uncfg_vport(apc);
1343 
1344 	return err;
1345 }
1346 EXPORT_SYMBOL_NS(mana_cfg_vport, "NET_MANA");
1347 
mana_cfg_vport_steering(struct mana_port_context * apc,enum TRI_STATE rx,bool update_default_rxobj,bool update_key,bool update_tab)1348 static int mana_cfg_vport_steering(struct mana_port_context *apc,
1349 				   enum TRI_STATE rx,
1350 				   bool update_default_rxobj, bool update_key,
1351 				   bool update_tab)
1352 {
1353 	struct mana_cfg_rx_steer_req_v2 *req;
1354 	struct mana_cfg_rx_steer_resp resp = {};
1355 	struct net_device *ndev = apc->ndev;
1356 	u32 req_buf_size;
1357 	int err;
1358 
1359 	req_buf_size = struct_size(req, indir_tab, apc->indir_table_sz);
1360 	req = kzalloc(req_buf_size, GFP_KERNEL);
1361 	if (!req)
1362 		return -ENOMEM;
1363 
1364 	mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
1365 			     sizeof(resp));
1366 
1367 	req->hdr.req.msg_version = GDMA_MESSAGE_V2;
1368 
1369 	req->vport = apc->port_handle;
1370 	req->num_indir_entries = apc->indir_table_sz;
1371 	req->indir_tab_offset = offsetof(struct mana_cfg_rx_steer_req_v2,
1372 					 indir_tab);
1373 	req->rx_enable = rx;
1374 	req->rss_enable = apc->rss_state;
1375 	req->update_default_rxobj = update_default_rxobj;
1376 	req->update_hashkey = update_key;
1377 	req->update_indir_tab = update_tab;
1378 	req->default_rxobj = apc->default_rxobj;
1379 	req->cqe_coalescing_enable = 0;
1380 
1381 	if (update_key)
1382 		memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
1383 
1384 	if (update_tab)
1385 		memcpy(req->indir_tab, apc->rxobj_table,
1386 		       flex_array_size(req, indir_tab, req->num_indir_entries));
1387 
1388 	err = mana_send_request(apc->ac, req, req_buf_size, &resp,
1389 				sizeof(resp));
1390 	if (err) {
1391 		if (mana_en_need_log(apc, err))
1392 			netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
1393 
1394 		goto out;
1395 	}
1396 
1397 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
1398 				   sizeof(resp));
1399 	if (err) {
1400 		netdev_err(ndev, "vPort RX configuration failed: %d\n", err);
1401 		goto out;
1402 	}
1403 
1404 	if (resp.hdr.status) {
1405 		netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
1406 			   resp.hdr.status);
1407 		err = -EPROTO;
1408 	}
1409 
1410 	netdev_info(ndev, "Configured steering vPort %llu entries %u\n",
1411 		    apc->port_handle, apc->indir_table_sz);
1412 out:
1413 	kfree(req);
1414 	return err;
1415 }
1416 
mana_query_link_cfg(struct mana_port_context * apc)1417 int mana_query_link_cfg(struct mana_port_context *apc)
1418 {
1419 	struct net_device *ndev = apc->ndev;
1420 	struct mana_query_link_config_resp resp = {};
1421 	struct mana_query_link_config_req req = {};
1422 	int err;
1423 
1424 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_LINK_CONFIG,
1425 			     sizeof(req), sizeof(resp));
1426 
1427 	req.vport = apc->port_handle;
1428 	req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
1429 
1430 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1431 				sizeof(resp));
1432 
1433 	if (err) {
1434 		if (err == -EOPNOTSUPP) {
1435 			netdev_info_once(ndev, "MANA_QUERY_LINK_CONFIG not supported\n");
1436 			return err;
1437 		}
1438 		netdev_err(ndev, "Failed to query link config: %d\n", err);
1439 		return err;
1440 	}
1441 
1442 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_LINK_CONFIG,
1443 				   sizeof(resp));
1444 
1445 	if (err || resp.hdr.status) {
1446 		netdev_err(ndev, "Failed to query link config: %d, 0x%x\n", err,
1447 			   resp.hdr.status);
1448 		if (!err)
1449 			err = -EOPNOTSUPP;
1450 		return err;
1451 	}
1452 
1453 	if (resp.qos_unconfigured) {
1454 		err = -EINVAL;
1455 		return err;
1456 	}
1457 	apc->speed = resp.link_speed_mbps;
1458 	apc->max_speed = resp.qos_speed_mbps;
1459 	return 0;
1460 }
1461 
mana_set_bw_clamp(struct mana_port_context * apc,u32 speed,int enable_clamping)1462 int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed,
1463 		      int enable_clamping)
1464 {
1465 	struct mana_set_bw_clamp_resp resp = {};
1466 	struct mana_set_bw_clamp_req req = {};
1467 	struct net_device *ndev = apc->ndev;
1468 	int err;
1469 
1470 	mana_gd_init_req_hdr(&req.hdr, MANA_SET_BW_CLAMP,
1471 			     sizeof(req), sizeof(resp));
1472 	req.vport = apc->port_handle;
1473 	req.link_speed_mbps = speed;
1474 	req.enable_clamping = enable_clamping;
1475 
1476 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1477 				sizeof(resp));
1478 
1479 	if (err) {
1480 		if (err == -EOPNOTSUPP) {
1481 			netdev_info_once(ndev, "MANA_SET_BW_CLAMP not supported\n");
1482 			return err;
1483 		}
1484 		netdev_err(ndev, "Failed to set bandwidth clamp for speed %u, err = %d",
1485 			   speed, err);
1486 		return err;
1487 	}
1488 
1489 	err = mana_verify_resp_hdr(&resp.hdr, MANA_SET_BW_CLAMP,
1490 				   sizeof(resp));
1491 
1492 	if (err || resp.hdr.status) {
1493 		netdev_err(ndev, "Failed to set bandwidth clamp: %d, 0x%x\n", err,
1494 			   resp.hdr.status);
1495 		if (!err)
1496 			err = -EOPNOTSUPP;
1497 		return err;
1498 	}
1499 
1500 	if (resp.qos_unconfigured)
1501 		netdev_info(ndev, "QoS is unconfigured\n");
1502 
1503 	return 0;
1504 }
1505 
mana_create_wq_obj(struct mana_port_context * apc,mana_handle_t vport,u32 wq_type,struct mana_obj_spec * wq_spec,struct mana_obj_spec * cq_spec,mana_handle_t * wq_obj)1506 int mana_create_wq_obj(struct mana_port_context *apc,
1507 		       mana_handle_t vport,
1508 		       u32 wq_type, struct mana_obj_spec *wq_spec,
1509 		       struct mana_obj_spec *cq_spec,
1510 		       mana_handle_t *wq_obj)
1511 {
1512 	struct mana_create_wqobj_resp resp = {};
1513 	struct mana_create_wqobj_req req = {};
1514 	struct net_device *ndev = apc->ndev;
1515 	int err;
1516 
1517 	mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
1518 			     sizeof(req), sizeof(resp));
1519 	req.vport = vport;
1520 	req.wq_type = wq_type;
1521 	req.wq_gdma_region = wq_spec->gdma_region;
1522 	req.cq_gdma_region = cq_spec->gdma_region;
1523 	req.wq_size = wq_spec->queue_size;
1524 	req.cq_size = cq_spec->queue_size;
1525 	req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
1526 	req.cq_parent_qid = cq_spec->attached_eq;
1527 
1528 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1529 				sizeof(resp));
1530 	if (err) {
1531 		netdev_err(ndev, "Failed to create WQ object: %d\n", err);
1532 		goto out;
1533 	}
1534 
1535 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
1536 				   sizeof(resp));
1537 	if (err || resp.hdr.status) {
1538 		netdev_err(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
1539 			   resp.hdr.status);
1540 		if (!err)
1541 			err = -EPROTO;
1542 		goto out;
1543 	}
1544 
1545 	if (resp.wq_obj == INVALID_MANA_HANDLE) {
1546 		netdev_err(ndev, "Got an invalid WQ object handle\n");
1547 		err = -EPROTO;
1548 		goto out;
1549 	}
1550 
1551 	*wq_obj = resp.wq_obj;
1552 	wq_spec->queue_index = resp.wq_id;
1553 	cq_spec->queue_index = resp.cq_id;
1554 
1555 	return 0;
1556 out:
1557 	return err;
1558 }
1559 EXPORT_SYMBOL_NS(mana_create_wq_obj, "NET_MANA");
1560 
mana_destroy_wq_obj(struct mana_port_context * apc,u32 wq_type,mana_handle_t wq_obj)1561 void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
1562 			 mana_handle_t wq_obj)
1563 {
1564 	struct mana_destroy_wqobj_resp resp = {};
1565 	struct mana_destroy_wqobj_req req = {};
1566 	struct net_device *ndev = apc->ndev;
1567 	int err;
1568 
1569 	mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
1570 			     sizeof(req), sizeof(resp));
1571 	req.wq_type = wq_type;
1572 	req.wq_obj_handle = wq_obj;
1573 
1574 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1575 				sizeof(resp));
1576 	if (err) {
1577 		if (mana_en_need_log(apc, err))
1578 			netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
1579 
1580 		return;
1581 	}
1582 
1583 	err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
1584 				   sizeof(resp));
1585 	if (err || resp.hdr.status)
1586 		netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err,
1587 			   resp.hdr.status);
1588 }
1589 EXPORT_SYMBOL_NS(mana_destroy_wq_obj, "NET_MANA");
1590 
mana_destroy_eq(struct mana_context * ac)1591 static void mana_destroy_eq(struct mana_context *ac)
1592 {
1593 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
1594 	struct gdma_queue *eq;
1595 	int i;
1596 
1597 	if (!ac->eqs)
1598 		return;
1599 
1600 	debugfs_remove_recursive(ac->mana_eqs_debugfs);
1601 	ac->mana_eqs_debugfs = NULL;
1602 
1603 	for (i = 0; i < gc->max_num_queues; i++) {
1604 		eq = ac->eqs[i].eq;
1605 		if (!eq)
1606 			continue;
1607 
1608 		mana_gd_destroy_queue(gc, eq);
1609 	}
1610 
1611 	kfree(ac->eqs);
1612 	ac->eqs = NULL;
1613 }
1614 
mana_create_eq_debugfs(struct mana_context * ac,int i)1615 static void mana_create_eq_debugfs(struct mana_context *ac, int i)
1616 {
1617 	struct mana_eq eq = ac->eqs[i];
1618 	char eqnum[32];
1619 
1620 	sprintf(eqnum, "eq%d", i);
1621 	eq.mana_eq_debugfs = debugfs_create_dir(eqnum, ac->mana_eqs_debugfs);
1622 	debugfs_create_u32("head", 0400, eq.mana_eq_debugfs, &eq.eq->head);
1623 	debugfs_create_u32("tail", 0400, eq.mana_eq_debugfs, &eq.eq->tail);
1624 	debugfs_create_file("eq_dump", 0400, eq.mana_eq_debugfs, eq.eq, &mana_dbg_q_fops);
1625 }
1626 
mana_create_eq(struct mana_context * ac)1627 static int mana_create_eq(struct mana_context *ac)
1628 {
1629 	struct gdma_dev *gd = ac->gdma_dev;
1630 	struct gdma_context *gc = gd->gdma_context;
1631 	struct gdma_queue_spec spec = {};
1632 	int err;
1633 	int i;
1634 
1635 	ac->eqs = kzalloc_objs(struct mana_eq, gc->max_num_queues);
1636 	if (!ac->eqs)
1637 		return -ENOMEM;
1638 
1639 	spec.type = GDMA_EQ;
1640 	spec.monitor_avl_buf = false;
1641 	spec.queue_size = EQ_SIZE;
1642 	spec.eq.callback = NULL;
1643 	spec.eq.context = ac->eqs;
1644 	spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
1645 
1646 	ac->mana_eqs_debugfs = debugfs_create_dir("EQs", gc->mana_pci_debugfs);
1647 
1648 	for (i = 0; i < gc->max_num_queues; i++) {
1649 		spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
1650 		err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
1651 		if (err) {
1652 			dev_err(gc->dev, "Failed to create EQ %d : %d\n", i, err);
1653 			goto out;
1654 		}
1655 		mana_create_eq_debugfs(ac, i);
1656 	}
1657 
1658 	return 0;
1659 out:
1660 	mana_destroy_eq(ac);
1661 	return err;
1662 }
1663 
mana_fence_rq(struct mana_port_context * apc,struct mana_rxq * rxq)1664 static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
1665 {
1666 	struct mana_fence_rq_resp resp = {};
1667 	struct mana_fence_rq_req req = {};
1668 	int err;
1669 
1670 	init_completion(&rxq->fence_event);
1671 
1672 	mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
1673 			     sizeof(req), sizeof(resp));
1674 	req.wq_obj_handle =  rxq->rxobj;
1675 
1676 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1677 				sizeof(resp));
1678 	if (err) {
1679 		netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n",
1680 			   rxq->rxq_idx, err);
1681 		return err;
1682 	}
1683 
1684 	err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
1685 	if (err || resp.hdr.status) {
1686 		netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
1687 			   rxq->rxq_idx, err, resp.hdr.status);
1688 		if (!err)
1689 			err = -EPROTO;
1690 
1691 		return err;
1692 	}
1693 
1694 	if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) {
1695 		netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n",
1696 			   rxq->rxq_idx);
1697 		return -ETIMEDOUT;
1698 	}
1699 
1700 	return 0;
1701 }
1702 
mana_fence_rqs(struct mana_port_context * apc)1703 static void mana_fence_rqs(struct mana_port_context *apc)
1704 {
1705 	unsigned int rxq_idx;
1706 	struct mana_rxq *rxq;
1707 	int err;
1708 
1709 	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
1710 		rxq = apc->rxqs[rxq_idx];
1711 		err = mana_fence_rq(apc, rxq);
1712 
1713 		/* In case of any error, use sleep instead. */
1714 		if (err)
1715 			msleep(100);
1716 	}
1717 }
1718 
mana_move_wq_tail(struct gdma_queue * wq,u32 num_units)1719 static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
1720 {
1721 	u32 used_space_old;
1722 	u32 used_space_new;
1723 
1724 	used_space_old = wq->head - wq->tail;
1725 	used_space_new = wq->head - (wq->tail + num_units);
1726 
1727 	if (WARN_ON_ONCE(used_space_new > used_space_old))
1728 		return -ERANGE;
1729 
1730 	wq->tail += num_units;
1731 	return 0;
1732 }
1733 
mana_unmap_skb(struct sk_buff * skb,struct mana_port_context * apc)1734 void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
1735 {
1736 	struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
1737 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1738 	struct device *dev = gc->dev;
1739 	int hsg, i;
1740 
1741 	/* Number of SGEs of linear part */
1742 	hsg = (skb_is_gso(skb) && skb_headlen(skb) > ash->size[0]) ? 2 : 1;
1743 
1744 	for (i = 0; i < hsg; i++)
1745 		dma_unmap_single(dev, ash->dma_handle[i], ash->size[i],
1746 				 DMA_TO_DEVICE);
1747 
1748 	for (i = hsg; i < skb_shinfo(skb)->nr_frags + hsg; i++)
1749 		dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
1750 			       DMA_TO_DEVICE);
1751 }
1752 
mana_poll_tx_cq(struct mana_cq * cq)1753 static void mana_poll_tx_cq(struct mana_cq *cq)
1754 {
1755 	struct gdma_comp *completions = cq->gdma_comp_buf;
1756 	struct gdma_posted_wqe_info *wqe_info;
1757 	unsigned int pkt_transmitted = 0;
1758 	unsigned int wqe_unit_cnt = 0;
1759 	struct mana_txq *txq = cq->txq;
1760 	struct mana_port_context *apc;
1761 	struct netdev_queue *net_txq;
1762 	struct gdma_queue *gdma_wq;
1763 	unsigned int avail_space;
1764 	struct net_device *ndev;
1765 	struct sk_buff *skb;
1766 	bool txq_stopped;
1767 	int comp_read;
1768 	int i;
1769 
1770 	ndev = txq->ndev;
1771 	apc = netdev_priv(ndev);
1772 
1773 	/* Limit CQEs polled to 4 wraparounds of the CQ to ensure the
1774 	 * doorbell can be rung in time for the hardware's requirement
1775 	 * of at least one doorbell ring every 8 wraparounds.
1776 	 */
1777 	comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
1778 				    min((cq->gdma_cq->queue_size /
1779 					  COMP_ENTRY_SIZE) * 4,
1780 					 CQE_POLLING_BUFFER));
1781 
1782 	if (comp_read < 1)
1783 		return;
1784 
1785 	for (i = 0; i < comp_read; i++) {
1786 		struct mana_tx_comp_oob *cqe_oob;
1787 
1788 		if (WARN_ON_ONCE(!completions[i].is_sq))
1789 			return;
1790 
1791 		cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
1792 		if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type !=
1793 				 MANA_CQE_COMPLETION))
1794 			return;
1795 
1796 		switch (cqe_oob->cqe_hdr.cqe_type) {
1797 		case CQE_TX_OKAY:
1798 			break;
1799 
1800 		case CQE_TX_SA_DROP:
1801 		case CQE_TX_MTU_DROP:
1802 		case CQE_TX_INVALID_OOB:
1803 		case CQE_TX_INVALID_ETH_TYPE:
1804 		case CQE_TX_HDR_PROCESSING_ERROR:
1805 		case CQE_TX_VF_DISABLED:
1806 		case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
1807 		case CQE_TX_VPORT_DISABLED:
1808 		case CQE_TX_VLAN_TAGGING_VIOLATION:
1809 			if (net_ratelimit())
1810 				netdev_err(ndev, "TX: CQE error %d\n",
1811 					   cqe_oob->cqe_hdr.cqe_type);
1812 
1813 			apc->eth_stats.tx_cqe_err++;
1814 			break;
1815 
1816 		default:
1817 			/* If the CQE type is unknown, log an error,
1818 			 * and still free the SKB, update tail, etc.
1819 			 */
1820 			if (net_ratelimit())
1821 				netdev_err(ndev, "TX: unknown CQE type %d\n",
1822 					   cqe_oob->cqe_hdr.cqe_type);
1823 
1824 			apc->eth_stats.tx_cqe_unknown_type++;
1825 			break;
1826 		}
1827 
1828 		if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num))
1829 			return;
1830 
1831 		skb = skb_dequeue(&txq->pending_skbs);
1832 		if (WARN_ON_ONCE(!skb))
1833 			return;
1834 
1835 		wqe_info = (struct gdma_posted_wqe_info *)skb->cb;
1836 		wqe_unit_cnt += wqe_info->wqe_size_in_bu;
1837 
1838 		mana_unmap_skb(skb, apc);
1839 
1840 		napi_consume_skb(skb, cq->budget);
1841 
1842 		pkt_transmitted++;
1843 	}
1844 
1845 	if (WARN_ON_ONCE(wqe_unit_cnt == 0))
1846 		return;
1847 
1848 	mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
1849 
1850 	gdma_wq = txq->gdma_sq;
1851 	avail_space = mana_gd_wq_avail_space(gdma_wq);
1852 
1853 	/* Ensure tail updated before checking q stop */
1854 	smp_mb();
1855 
1856 	net_txq = txq->net_txq;
1857 	txq_stopped = netif_tx_queue_stopped(net_txq);
1858 
1859 	/* Ensure checking txq_stopped before apc->port_is_up. */
1860 	smp_rmb();
1861 
1862 	if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1863 		netif_tx_wake_queue(net_txq);
1864 		apc->eth_stats.wake_queue++;
1865 	}
1866 
1867 	if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
1868 		WARN_ON_ONCE(1);
1869 
1870 	cq->work_done = pkt_transmitted;
1871 }
1872 
mana_post_pkt_rxq(struct mana_rxq * rxq)1873 static void mana_post_pkt_rxq(struct mana_rxq *rxq)
1874 {
1875 	struct mana_recv_buf_oob *recv_buf_oob;
1876 	u32 curr_index;
1877 	int err;
1878 
1879 	curr_index = rxq->buf_index++;
1880 	if (rxq->buf_index == rxq->num_rx_buf)
1881 		rxq->buf_index = 0;
1882 
1883 	recv_buf_oob = &rxq->rx_oobs[curr_index];
1884 
1885 	err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req,
1886 					&recv_buf_oob->wqe_inf);
1887 	if (WARN_ON_ONCE(err))
1888 		return;
1889 
1890 	WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1);
1891 }
1892 
mana_build_skb(struct mana_rxq * rxq,void * buf_va,uint pkt_len,struct xdp_buff * xdp)1893 static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va,
1894 				      uint pkt_len, struct xdp_buff *xdp)
1895 {
1896 	struct sk_buff *skb = napi_build_skb(buf_va, rxq->alloc_size);
1897 
1898 	if (!skb)
1899 		return NULL;
1900 
1901 	if (xdp->data_hard_start) {
1902 		u32 metasize = xdp->data - xdp->data_meta;
1903 
1904 		skb_reserve(skb, xdp->data - xdp->data_hard_start);
1905 		skb_put(skb, xdp->data_end - xdp->data);
1906 		if (metasize)
1907 			skb_metadata_set(skb, metasize);
1908 		return skb;
1909 	}
1910 
1911 	skb_reserve(skb, rxq->headroom);
1912 	skb_put(skb, pkt_len);
1913 
1914 	return skb;
1915 }
1916 
mana_rx_skb(void * buf_va,bool from_pool,struct mana_rxcomp_oob * cqe,struct mana_rxq * rxq)1917 static void mana_rx_skb(void *buf_va, bool from_pool,
1918 			struct mana_rxcomp_oob *cqe, struct mana_rxq *rxq)
1919 {
1920 	struct mana_stats_rx *rx_stats = &rxq->stats;
1921 	struct net_device *ndev = rxq->ndev;
1922 	uint pkt_len = cqe->ppi[0].pkt_len;
1923 	u16 rxq_idx = rxq->rxq_idx;
1924 	struct napi_struct *napi;
1925 	struct xdp_buff xdp = {};
1926 	struct sk_buff *skb;
1927 	u32 hash_value;
1928 	u32 act;
1929 
1930 	rxq->rx_cq.work_done++;
1931 	napi = &rxq->rx_cq.napi;
1932 
1933 	if (!buf_va) {
1934 		++ndev->stats.rx_dropped;
1935 		return;
1936 	}
1937 
1938 	act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len);
1939 
1940 	if (act == XDP_REDIRECT && !rxq->xdp_rc)
1941 		return;
1942 
1943 	if (act != XDP_PASS && act != XDP_TX)
1944 		goto drop_xdp;
1945 
1946 	skb = mana_build_skb(rxq, buf_va, pkt_len, &xdp);
1947 
1948 	if (!skb)
1949 		goto drop;
1950 
1951 	if (from_pool)
1952 		skb_mark_for_recycle(skb);
1953 
1954 	skb->dev = napi->dev;
1955 
1956 	skb->protocol = eth_type_trans(skb, ndev);
1957 	skb_checksum_none_assert(skb);
1958 	skb_record_rx_queue(skb, rxq_idx);
1959 
1960 	if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) {
1961 		if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed)
1962 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1963 	}
1964 
1965 	if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) {
1966 		hash_value = cqe->ppi[0].pkt_hash;
1967 
1968 		if (cqe->rx_hashtype & MANA_HASH_L4)
1969 			skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L4);
1970 		else
1971 			skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3);
1972 	}
1973 
1974 	if (cqe->rx_vlantag_present) {
1975 		u16 vlan_tci = cqe->rx_vlan_id;
1976 
1977 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
1978 	}
1979 
1980 	u64_stats_update_begin(&rx_stats->syncp);
1981 	rx_stats->packets++;
1982 	rx_stats->bytes += pkt_len;
1983 
1984 	if (act == XDP_TX)
1985 		rx_stats->xdp_tx++;
1986 	u64_stats_update_end(&rx_stats->syncp);
1987 
1988 	if (act == XDP_TX) {
1989 		skb_set_queue_mapping(skb, rxq_idx);
1990 		mana_xdp_tx(skb, ndev);
1991 		return;
1992 	}
1993 
1994 	napi_gro_receive(napi, skb);
1995 
1996 	return;
1997 
1998 drop_xdp:
1999 	u64_stats_update_begin(&rx_stats->syncp);
2000 	rx_stats->xdp_drop++;
2001 	u64_stats_update_end(&rx_stats->syncp);
2002 
2003 drop:
2004 	if (from_pool) {
2005 		if (rxq->frag_count == 1)
2006 			page_pool_recycle_direct(rxq->page_pool,
2007 						 virt_to_head_page(buf_va));
2008 		else
2009 			page_pool_free_va(rxq->page_pool, buf_va, true);
2010 	} else {
2011 		WARN_ON_ONCE(rxq->xdp_save_va);
2012 		/* Save for reuse */
2013 		rxq->xdp_save_va = buf_va;
2014 	}
2015 
2016 	++ndev->stats.rx_dropped;
2017 
2018 	return;
2019 }
2020 
mana_get_rxfrag(struct mana_rxq * rxq,struct device * dev,dma_addr_t * da,bool * from_pool)2021 static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
2022 			     dma_addr_t *da, bool *from_pool)
2023 {
2024 	struct page *page;
2025 	u32 offset;
2026 	void *va;
2027 	*from_pool = false;
2028 
2029 	/* Don't use fragments for jumbo frames or XDP where it's 1 fragment
2030 	 * per page.
2031 	 */
2032 	if (rxq->frag_count == 1) {
2033 		/* Reuse XDP dropped page if available */
2034 		if (rxq->xdp_save_va) {
2035 			va = rxq->xdp_save_va;
2036 			page = virt_to_head_page(va);
2037 			rxq->xdp_save_va = NULL;
2038 		} else {
2039 			page = page_pool_dev_alloc_pages(rxq->page_pool);
2040 			if (!page)
2041 				return NULL;
2042 
2043 			*from_pool = true;
2044 			va = page_to_virt(page);
2045 		}
2046 
2047 		*da = dma_map_single(dev, va + rxq->headroom, rxq->datasize,
2048 				     DMA_FROM_DEVICE);
2049 		if (dma_mapping_error(dev, *da)) {
2050 			mana_put_rx_page(rxq, page, *from_pool);
2051 			return NULL;
2052 		}
2053 
2054 		return va;
2055 	}
2056 
2057 	page =  page_pool_dev_alloc_frag(rxq->page_pool, &offset,
2058 					 rxq->alloc_size);
2059 	if (!page)
2060 		return NULL;
2061 
2062 	va  = page_to_virt(page) + offset;
2063 	*da = page_pool_get_dma_addr(page) + offset + rxq->headroom;
2064 	*from_pool = true;
2065 
2066 	return va;
2067 }
2068 
2069 /* Allocate frag for rx buffer, and save the old buf */
mana_refill_rx_oob(struct device * dev,struct mana_rxq * rxq,struct mana_recv_buf_oob * rxoob,void ** old_buf,bool * old_fp)2070 static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
2071 			       struct mana_recv_buf_oob *rxoob, void **old_buf,
2072 			       bool *old_fp)
2073 {
2074 	bool from_pool;
2075 	dma_addr_t da;
2076 	void *va;
2077 
2078 	va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
2079 	if (!va)
2080 		return;
2081 	if (!rxoob->from_pool || rxq->frag_count == 1)
2082 		dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize,
2083 				 DMA_FROM_DEVICE);
2084 	*old_buf = rxoob->buf_va;
2085 	*old_fp = rxoob->from_pool;
2086 
2087 	rxoob->buf_va = va;
2088 	rxoob->sgl[0].address = da;
2089 	rxoob->from_pool = from_pool;
2090 }
2091 
mana_process_rx_cqe(struct mana_rxq * rxq,struct mana_cq * cq,struct gdma_comp * cqe)2092 static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
2093 				struct gdma_comp *cqe)
2094 {
2095 	struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
2096 	struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
2097 	struct net_device *ndev = rxq->ndev;
2098 	struct mana_recv_buf_oob *rxbuf_oob;
2099 	struct mana_port_context *apc;
2100 	struct device *dev = gc->dev;
2101 	void *old_buf = NULL;
2102 	u32 curr, pktlen;
2103 	bool old_fp;
2104 
2105 	apc = netdev_priv(ndev);
2106 
2107 	switch (oob->cqe_hdr.cqe_type) {
2108 	case CQE_RX_OKAY:
2109 		break;
2110 
2111 	case CQE_RX_TRUNCATED:
2112 		++ndev->stats.rx_dropped;
2113 		rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
2114 		netdev_warn_once(ndev, "Dropped a truncated packet\n");
2115 		goto drop;
2116 
2117 	case CQE_RX_COALESCED_4:
2118 		netdev_err(ndev, "RX coalescing is unsupported\n");
2119 		apc->eth_stats.rx_coalesced_err++;
2120 		return;
2121 
2122 	case CQE_RX_OBJECT_FENCE:
2123 		complete(&rxq->fence_event);
2124 		return;
2125 
2126 	default:
2127 		netdev_err(ndev, "Unknown RX CQE type = %d\n",
2128 			   oob->cqe_hdr.cqe_type);
2129 		apc->eth_stats.rx_cqe_unknown_type++;
2130 		return;
2131 	}
2132 
2133 	pktlen = oob->ppi[0].pkt_len;
2134 
2135 	if (pktlen == 0) {
2136 		/* data packets should never have packetlength of zero */
2137 		netdev_err(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n",
2138 			   rxq->gdma_id, cq->gdma_id, rxq->rxobj);
2139 		return;
2140 	}
2141 
2142 	curr = rxq->buf_index;
2143 	rxbuf_oob = &rxq->rx_oobs[curr];
2144 	WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
2145 
2146 	mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf, &old_fp);
2147 
2148 	/* Unsuccessful refill will have old_buf == NULL.
2149 	 * In this case, mana_rx_skb() will drop the packet.
2150 	 */
2151 	mana_rx_skb(old_buf, old_fp, oob, rxq);
2152 
2153 drop:
2154 	mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
2155 
2156 	mana_post_pkt_rxq(rxq);
2157 }
2158 
mana_poll_rx_cq(struct mana_cq * cq)2159 static void mana_poll_rx_cq(struct mana_cq *cq)
2160 {
2161 	struct gdma_comp *comp = cq->gdma_comp_buf;
2162 	struct mana_rxq *rxq = cq->rxq;
2163 	int comp_read, i;
2164 
2165 	/* Limit CQEs polled to 4 wraparounds of the CQ to ensure the
2166 	 * doorbell can be rung in time for the hardware's requirement
2167 	 * of at least one doorbell ring every 8 wraparounds.
2168 	 */
2169 	comp_read = mana_gd_poll_cq(cq->gdma_cq, comp,
2170 				    min((cq->gdma_cq->queue_size /
2171 					  COMP_ENTRY_SIZE) * 4,
2172 					 CQE_POLLING_BUFFER));
2173 	WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
2174 
2175 	rxq->xdp_flush = false;
2176 
2177 	for (i = 0; i < comp_read; i++) {
2178 		if (WARN_ON_ONCE(comp[i].is_sq))
2179 			return;
2180 
2181 		/* verify recv cqe references the right rxq */
2182 		if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id))
2183 			return;
2184 
2185 		mana_process_rx_cqe(rxq, cq, &comp[i]);
2186 	}
2187 
2188 	if (comp_read > 0) {
2189 		struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
2190 
2191 		mana_gd_wq_ring_doorbell(gc, rxq->gdma_rq);
2192 	}
2193 
2194 	if (rxq->xdp_flush)
2195 		xdp_do_flush();
2196 }
2197 
mana_cq_handler(void * context,struct gdma_queue * gdma_queue)2198 static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
2199 {
2200 	struct mana_cq *cq = context;
2201 	int w;
2202 
2203 	WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
2204 
2205 	if (cq->type == MANA_CQ_TYPE_RX)
2206 		mana_poll_rx_cq(cq);
2207 	else
2208 		mana_poll_tx_cq(cq);
2209 
2210 	w = cq->work_done;
2211 	cq->work_done_since_doorbell += w;
2212 
2213 	if (w < cq->budget) {
2214 		mana_gd_ring_cq(gdma_queue, SET_ARM_BIT);
2215 		cq->work_done_since_doorbell = 0;
2216 		napi_complete_done(&cq->napi, w);
2217 	} else if (cq->work_done_since_doorbell >=
2218 		   (cq->gdma_cq->queue_size / COMP_ENTRY_SIZE) * 4) {
2219 		/* MANA hardware requires at least one doorbell ring every 8
2220 		 * wraparounds of CQ even if there is no need to arm the CQ.
2221 		 * This driver rings the doorbell as soon as it has processed
2222 		 * 4 wraparounds.
2223 		 */
2224 		mana_gd_ring_cq(gdma_queue, 0);
2225 		cq->work_done_since_doorbell = 0;
2226 	}
2227 
2228 	return w;
2229 }
2230 
mana_poll(struct napi_struct * napi,int budget)2231 static int mana_poll(struct napi_struct *napi, int budget)
2232 {
2233 	struct mana_cq *cq = container_of(napi, struct mana_cq, napi);
2234 	int w;
2235 
2236 	cq->work_done = 0;
2237 	cq->budget = budget;
2238 
2239 	w = mana_cq_handler(cq, cq->gdma_cq);
2240 
2241 	return min(w, budget);
2242 }
2243 
mana_schedule_napi(void * context,struct gdma_queue * gdma_queue)2244 static void mana_schedule_napi(void *context, struct gdma_queue *gdma_queue)
2245 {
2246 	struct mana_cq *cq = context;
2247 
2248 	napi_schedule_irqoff(&cq->napi);
2249 }
2250 
mana_deinit_cq(struct mana_port_context * apc,struct mana_cq * cq)2251 static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
2252 {
2253 	struct gdma_dev *gd = apc->ac->gdma_dev;
2254 
2255 	if (!cq->gdma_cq)
2256 		return;
2257 
2258 	mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
2259 }
2260 
mana_deinit_txq(struct mana_port_context * apc,struct mana_txq * txq)2261 static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
2262 {
2263 	struct gdma_dev *gd = apc->ac->gdma_dev;
2264 
2265 	if (!txq->gdma_sq)
2266 		return;
2267 
2268 	mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
2269 }
2270 
mana_destroy_txq(struct mana_port_context * apc)2271 static void mana_destroy_txq(struct mana_port_context *apc)
2272 {
2273 	struct napi_struct *napi;
2274 	int i;
2275 
2276 	if (!apc->tx_qp)
2277 		return;
2278 
2279 	for (i = 0; i < apc->num_queues; i++) {
2280 		debugfs_remove_recursive(apc->tx_qp[i].mana_tx_debugfs);
2281 		apc->tx_qp[i].mana_tx_debugfs = NULL;
2282 
2283 		napi = &apc->tx_qp[i].tx_cq.napi;
2284 		if (apc->tx_qp[i].txq.napi_initialized) {
2285 			napi_synchronize(napi);
2286 			napi_disable_locked(napi);
2287 			netif_napi_del_locked(napi);
2288 			apc->tx_qp[i].txq.napi_initialized = false;
2289 		}
2290 		mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
2291 
2292 		mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
2293 
2294 		mana_deinit_txq(apc, &apc->tx_qp[i].txq);
2295 	}
2296 
2297 	kfree(apc->tx_qp);
2298 	apc->tx_qp = NULL;
2299 }
2300 
mana_create_txq_debugfs(struct mana_port_context * apc,int idx)2301 static void mana_create_txq_debugfs(struct mana_port_context *apc, int idx)
2302 {
2303 	struct mana_tx_qp *tx_qp = &apc->tx_qp[idx];
2304 	char qnum[32];
2305 
2306 	sprintf(qnum, "TX-%d", idx);
2307 	tx_qp->mana_tx_debugfs = debugfs_create_dir(qnum, apc->mana_port_debugfs);
2308 	debugfs_create_u32("sq_head", 0400, tx_qp->mana_tx_debugfs,
2309 			   &tx_qp->txq.gdma_sq->head);
2310 	debugfs_create_u32("sq_tail", 0400, tx_qp->mana_tx_debugfs,
2311 			   &tx_qp->txq.gdma_sq->tail);
2312 	debugfs_create_u32("sq_pend_skb_qlen", 0400, tx_qp->mana_tx_debugfs,
2313 			   &tx_qp->txq.pending_skbs.qlen);
2314 	debugfs_create_u32("cq_head", 0400, tx_qp->mana_tx_debugfs,
2315 			   &tx_qp->tx_cq.gdma_cq->head);
2316 	debugfs_create_u32("cq_tail", 0400, tx_qp->mana_tx_debugfs,
2317 			   &tx_qp->tx_cq.gdma_cq->tail);
2318 	debugfs_create_u32("cq_budget", 0400, tx_qp->mana_tx_debugfs,
2319 			   &tx_qp->tx_cq.budget);
2320 	debugfs_create_file("txq_dump", 0400, tx_qp->mana_tx_debugfs,
2321 			    tx_qp->txq.gdma_sq, &mana_dbg_q_fops);
2322 	debugfs_create_file("cq_dump", 0400, tx_qp->mana_tx_debugfs,
2323 			    tx_qp->tx_cq.gdma_cq, &mana_dbg_q_fops);
2324 }
2325 
mana_create_txq(struct mana_port_context * apc,struct net_device * net)2326 static int mana_create_txq(struct mana_port_context *apc,
2327 			   struct net_device *net)
2328 {
2329 	struct mana_context *ac = apc->ac;
2330 	struct gdma_dev *gd = ac->gdma_dev;
2331 	struct mana_obj_spec wq_spec;
2332 	struct mana_obj_spec cq_spec;
2333 	struct gdma_queue_spec spec;
2334 	struct gdma_context *gc;
2335 	struct mana_txq *txq;
2336 	struct mana_cq *cq;
2337 	u32 txq_size;
2338 	u32 cq_size;
2339 	int err;
2340 	int i;
2341 
2342 	apc->tx_qp = kzalloc_objs(struct mana_tx_qp, apc->num_queues);
2343 	if (!apc->tx_qp)
2344 		return -ENOMEM;
2345 
2346 	/*  The minimum size of the WQE is 32 bytes, hence
2347 	 *  apc->tx_queue_size represents the maximum number of WQEs
2348 	 *  the SQ can store. This value is then used to size other queues
2349 	 *  to prevent overflow.
2350 	 *  Also note that the txq_size is always going to be MANA_PAGE_ALIGNED,
2351 	 *  as min val of apc->tx_queue_size is 128 and that would make
2352 	 *  txq_size 128*32 = 4096 and the other higher values of apc->tx_queue_size
2353 	 *  are always power of two
2354 	 */
2355 	txq_size = apc->tx_queue_size * 32;
2356 
2357 	cq_size = apc->tx_queue_size * COMP_ENTRY_SIZE;
2358 
2359 	gc = gd->gdma_context;
2360 
2361 	for (i = 0; i < apc->num_queues; i++) {
2362 		apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
2363 
2364 		/* Create SQ */
2365 		txq = &apc->tx_qp[i].txq;
2366 
2367 		u64_stats_init(&txq->stats.syncp);
2368 		txq->ndev = net;
2369 		txq->net_txq = netdev_get_tx_queue(net, i);
2370 		txq->vp_offset = apc->tx_vp_offset;
2371 		txq->napi_initialized = false;
2372 		skb_queue_head_init(&txq->pending_skbs);
2373 
2374 		memset(&spec, 0, sizeof(spec));
2375 		spec.type = GDMA_SQ;
2376 		spec.monitor_avl_buf = true;
2377 		spec.queue_size = txq_size;
2378 		err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
2379 		if (err)
2380 			goto out;
2381 
2382 		/* Create SQ's CQ */
2383 		cq = &apc->tx_qp[i].tx_cq;
2384 		cq->type = MANA_CQ_TYPE_TX;
2385 
2386 		cq->txq = txq;
2387 
2388 		memset(&spec, 0, sizeof(spec));
2389 		spec.type = GDMA_CQ;
2390 		spec.monitor_avl_buf = false;
2391 		spec.queue_size = cq_size;
2392 		spec.cq.callback = mana_schedule_napi;
2393 		spec.cq.parent_eq = ac->eqs[i].eq;
2394 		spec.cq.context = cq;
2395 		err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2396 		if (err)
2397 			goto out;
2398 
2399 		memset(&wq_spec, 0, sizeof(wq_spec));
2400 		memset(&cq_spec, 0, sizeof(cq_spec));
2401 
2402 		wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle;
2403 		wq_spec.queue_size = txq->gdma_sq->queue_size;
2404 
2405 		cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
2406 		cq_spec.queue_size = cq->gdma_cq->queue_size;
2407 		cq_spec.modr_ctx_id = 0;
2408 		cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2409 
2410 		err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
2411 					 &wq_spec, &cq_spec,
2412 					 &apc->tx_qp[i].tx_object);
2413 
2414 		if (err)
2415 			goto out;
2416 
2417 		txq->gdma_sq->id = wq_spec.queue_index;
2418 		cq->gdma_cq->id = cq_spec.queue_index;
2419 
2420 		txq->gdma_sq->mem_info.dma_region_handle =
2421 			GDMA_INVALID_DMA_REGION;
2422 		cq->gdma_cq->mem_info.dma_region_handle =
2423 			GDMA_INVALID_DMA_REGION;
2424 
2425 		txq->gdma_txq_id = txq->gdma_sq->id;
2426 
2427 		cq->gdma_id = cq->gdma_cq->id;
2428 
2429 		if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
2430 			err = -EINVAL;
2431 			goto out;
2432 		}
2433 
2434 		gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2435 
2436 		mana_create_txq_debugfs(apc, i);
2437 
2438 		set_bit(NAPI_STATE_NO_BUSY_POLL, &cq->napi.state);
2439 		netif_napi_add_locked(net, &cq->napi, mana_poll);
2440 		napi_enable_locked(&cq->napi);
2441 		txq->napi_initialized = true;
2442 
2443 		mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
2444 	}
2445 
2446 	return 0;
2447 out:
2448 	netdev_err(net, "Failed to create %d TX queues, %d\n",
2449 		   apc->num_queues, err);
2450 	mana_destroy_txq(apc);
2451 	return err;
2452 }
2453 
mana_destroy_rxq(struct mana_port_context * apc,struct mana_rxq * rxq,bool napi_initialized)2454 static void mana_destroy_rxq(struct mana_port_context *apc,
2455 			     struct mana_rxq *rxq, bool napi_initialized)
2456 
2457 {
2458 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
2459 	struct mana_recv_buf_oob *rx_oob;
2460 	struct device *dev = gc->dev;
2461 	struct napi_struct *napi;
2462 	struct page *page;
2463 	int i;
2464 
2465 	if (!rxq)
2466 		return;
2467 
2468 	debugfs_remove_recursive(rxq->mana_rx_debugfs);
2469 	rxq->mana_rx_debugfs = NULL;
2470 
2471 	napi = &rxq->rx_cq.napi;
2472 
2473 	if (napi_initialized) {
2474 		napi_synchronize(napi);
2475 
2476 		napi_disable_locked(napi);
2477 		netif_napi_del_locked(napi);
2478 	}
2479 	xdp_rxq_info_unreg(&rxq->xdp_rxq);
2480 
2481 	mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
2482 
2483 	mana_deinit_cq(apc, &rxq->rx_cq);
2484 
2485 	if (rxq->xdp_save_va)
2486 		put_page(virt_to_head_page(rxq->xdp_save_va));
2487 
2488 	for (i = 0; i < rxq->num_rx_buf; i++) {
2489 		rx_oob = &rxq->rx_oobs[i];
2490 
2491 		if (!rx_oob->buf_va)
2492 			continue;
2493 
2494 		page = virt_to_head_page(rx_oob->buf_va);
2495 
2496 		if (rxq->frag_count == 1 || !rx_oob->from_pool) {
2497 			dma_unmap_single(dev, rx_oob->sgl[0].address,
2498 					 rx_oob->sgl[0].size, DMA_FROM_DEVICE);
2499 			mana_put_rx_page(rxq, page, rx_oob->from_pool);
2500 		} else {
2501 			page_pool_free_va(rxq->page_pool, rx_oob->buf_va, true);
2502 		}
2503 
2504 		rx_oob->buf_va = NULL;
2505 	}
2506 
2507 	page_pool_destroy(rxq->page_pool);
2508 
2509 	if (rxq->gdma_rq)
2510 		mana_gd_destroy_queue(gc, rxq->gdma_rq);
2511 
2512 	kfree(rxq);
2513 }
2514 
mana_fill_rx_oob(struct mana_recv_buf_oob * rx_oob,u32 mem_key,struct mana_rxq * rxq,struct device * dev)2515 static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
2516 			    struct mana_rxq *rxq, struct device *dev)
2517 {
2518 	struct mana_port_context *mpc = netdev_priv(rxq->ndev);
2519 	bool from_pool = false;
2520 	dma_addr_t da;
2521 	void *va;
2522 
2523 	if (mpc->rxbufs_pre)
2524 		va = mana_get_rxbuf_pre(rxq, &da);
2525 	else
2526 		va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
2527 
2528 	if (!va)
2529 		return -ENOMEM;
2530 
2531 	rx_oob->buf_va = va;
2532 	rx_oob->from_pool = from_pool;
2533 
2534 	rx_oob->sgl[0].address = da;
2535 	rx_oob->sgl[0].size = rxq->datasize;
2536 	rx_oob->sgl[0].mem_key = mem_key;
2537 
2538 	return 0;
2539 }
2540 
2541 #define MANA_WQE_HEADER_SIZE 16
2542 #define MANA_WQE_SGE_SIZE 16
2543 
mana_alloc_rx_wqe(struct mana_port_context * apc,struct mana_rxq * rxq,u32 * rxq_size,u32 * cq_size)2544 static int mana_alloc_rx_wqe(struct mana_port_context *apc,
2545 			     struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size)
2546 {
2547 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
2548 	struct mana_recv_buf_oob *rx_oob;
2549 	struct device *dev = gc->dev;
2550 	u32 buf_idx;
2551 	int ret;
2552 
2553 	WARN_ON(rxq->datasize == 0);
2554 
2555 	*rxq_size = 0;
2556 	*cq_size = 0;
2557 
2558 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2559 		rx_oob = &rxq->rx_oobs[buf_idx];
2560 		memset(rx_oob, 0, sizeof(*rx_oob));
2561 
2562 		rx_oob->num_sge = 1;
2563 
2564 		ret = mana_fill_rx_oob(rx_oob, apc->ac->gdma_dev->gpa_mkey, rxq,
2565 				       dev);
2566 		if (ret)
2567 			return ret;
2568 
2569 		rx_oob->wqe_req.sgl = rx_oob->sgl;
2570 		rx_oob->wqe_req.num_sge = rx_oob->num_sge;
2571 		rx_oob->wqe_req.inline_oob_size = 0;
2572 		rx_oob->wqe_req.inline_oob_data = NULL;
2573 		rx_oob->wqe_req.flags = 0;
2574 		rx_oob->wqe_req.client_data_unit = 0;
2575 
2576 		*rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
2577 				   MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
2578 		*cq_size += COMP_ENTRY_SIZE;
2579 	}
2580 
2581 	return 0;
2582 }
2583 
mana_push_wqe(struct mana_rxq * rxq)2584 static int mana_push_wqe(struct mana_rxq *rxq)
2585 {
2586 	struct mana_recv_buf_oob *rx_oob;
2587 	u32 buf_idx;
2588 	int err;
2589 
2590 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2591 		rx_oob = &rxq->rx_oobs[buf_idx];
2592 
2593 		err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
2594 					    &rx_oob->wqe_inf);
2595 		if (err)
2596 			return -ENOSPC;
2597 	}
2598 
2599 	return 0;
2600 }
2601 
mana_create_page_pool(struct mana_rxq * rxq,struct gdma_context * gc)2602 static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
2603 {
2604 	struct mana_port_context *mpc = netdev_priv(rxq->ndev);
2605 	struct page_pool_params pprm = {};
2606 	int ret;
2607 
2608 	pprm.pool_size = mpc->rx_queue_size / rxq->frag_count + 1;
2609 	pprm.nid = gc->numa_node;
2610 	pprm.napi = &rxq->rx_cq.napi;
2611 	pprm.netdev = rxq->ndev;
2612 	pprm.order = get_order(rxq->alloc_size);
2613 	pprm.queue_idx = rxq->rxq_idx;
2614 	pprm.dev = gc->dev;
2615 
2616 	/* Let the page pool do the dma map when page sharing with multiple
2617 	 * fragments enabled for rx buffers.
2618 	 */
2619 	if (rxq->frag_count > 1) {
2620 		pprm.flags =  PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2621 		pprm.max_len = PAGE_SIZE;
2622 		pprm.dma_dir = DMA_FROM_DEVICE;
2623 	}
2624 
2625 	rxq->page_pool = page_pool_create(&pprm);
2626 
2627 	if (IS_ERR(rxq->page_pool)) {
2628 		ret = PTR_ERR(rxq->page_pool);
2629 		rxq->page_pool = NULL;
2630 		return ret;
2631 	}
2632 
2633 	return 0;
2634 }
2635 
mana_create_rxq(struct mana_port_context * apc,u32 rxq_idx,struct mana_eq * eq,struct net_device * ndev)2636 static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
2637 					u32 rxq_idx, struct mana_eq *eq,
2638 					struct net_device *ndev)
2639 {
2640 	struct gdma_dev *gd = apc->ac->gdma_dev;
2641 	struct mana_obj_spec wq_spec;
2642 	struct mana_obj_spec cq_spec;
2643 	struct gdma_queue_spec spec;
2644 	struct mana_cq *cq = NULL;
2645 	struct gdma_context *gc;
2646 	u32 cq_size, rq_size;
2647 	struct mana_rxq *rxq;
2648 	int err;
2649 
2650 	gc = gd->gdma_context;
2651 
2652 	rxq = kzalloc_flex(*rxq, rx_oobs, apc->rx_queue_size);
2653 	if (!rxq)
2654 		return NULL;
2655 
2656 	rxq->ndev = ndev;
2657 	rxq->num_rx_buf = apc->rx_queue_size;
2658 	rxq->rxq_idx = rxq_idx;
2659 	rxq->rxobj = INVALID_MANA_HANDLE;
2660 
2661 	mana_get_rxbuf_cfg(apc, ndev->mtu, &rxq->datasize, &rxq->alloc_size,
2662 			   &rxq->headroom, &rxq->frag_count);
2663 	/* Create page pool for RX queue */
2664 	err = mana_create_page_pool(rxq, gc);
2665 	if (err) {
2666 		netdev_err(ndev, "Create page pool err:%d\n", err);
2667 		goto out;
2668 	}
2669 
2670 	err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
2671 	if (err)
2672 		goto out;
2673 
2674 	rq_size = MANA_PAGE_ALIGN(rq_size);
2675 	cq_size = MANA_PAGE_ALIGN(cq_size);
2676 
2677 	/* Create RQ */
2678 	memset(&spec, 0, sizeof(spec));
2679 	spec.type = GDMA_RQ;
2680 	spec.monitor_avl_buf = true;
2681 	spec.queue_size = rq_size;
2682 	err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
2683 	if (err)
2684 		goto out;
2685 
2686 	/* Create RQ's CQ */
2687 	cq = &rxq->rx_cq;
2688 	cq->type = MANA_CQ_TYPE_RX;
2689 	cq->rxq = rxq;
2690 
2691 	memset(&spec, 0, sizeof(spec));
2692 	spec.type = GDMA_CQ;
2693 	spec.monitor_avl_buf = false;
2694 	spec.queue_size = cq_size;
2695 	spec.cq.callback = mana_schedule_napi;
2696 	spec.cq.parent_eq = eq->eq;
2697 	spec.cq.context = cq;
2698 	err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2699 	if (err)
2700 		goto out;
2701 
2702 	memset(&wq_spec, 0, sizeof(wq_spec));
2703 	memset(&cq_spec, 0, sizeof(cq_spec));
2704 	wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle;
2705 	wq_spec.queue_size = rxq->gdma_rq->queue_size;
2706 
2707 	cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
2708 	cq_spec.queue_size = cq->gdma_cq->queue_size;
2709 	cq_spec.modr_ctx_id = 0;
2710 	cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2711 
2712 	err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
2713 				 &wq_spec, &cq_spec, &rxq->rxobj);
2714 	if (err)
2715 		goto out;
2716 
2717 	rxq->gdma_rq->id = wq_spec.queue_index;
2718 	cq->gdma_cq->id = cq_spec.queue_index;
2719 
2720 	rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2721 	cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2722 
2723 	rxq->gdma_id = rxq->gdma_rq->id;
2724 	cq->gdma_id = cq->gdma_cq->id;
2725 
2726 	err = mana_push_wqe(rxq);
2727 	if (err)
2728 		goto out;
2729 
2730 	if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
2731 		err = -EINVAL;
2732 		goto out;
2733 	}
2734 
2735 	gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2736 
2737 	netif_napi_add_weight_locked(ndev, &cq->napi, mana_poll, 1);
2738 
2739 	WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
2740 				 cq->napi.napi_id));
2741 	WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
2742 					   rxq->page_pool));
2743 
2744 	napi_enable_locked(&cq->napi);
2745 
2746 	mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
2747 out:
2748 	if (!err)
2749 		return rxq;
2750 
2751 	netdev_err(ndev, "Failed to create RXQ: err = %d\n", err);
2752 
2753 	mana_destroy_rxq(apc, rxq, false);
2754 
2755 	if (cq)
2756 		mana_deinit_cq(apc, cq);
2757 
2758 	return NULL;
2759 }
2760 
mana_create_rxq_debugfs(struct mana_port_context * apc,int idx)2761 static void mana_create_rxq_debugfs(struct mana_port_context *apc, int idx)
2762 {
2763 	struct mana_rxq *rxq;
2764 	char qnum[32];
2765 
2766 	rxq = apc->rxqs[idx];
2767 
2768 	sprintf(qnum, "RX-%d", idx);
2769 	rxq->mana_rx_debugfs = debugfs_create_dir(qnum, apc->mana_port_debugfs);
2770 	debugfs_create_u32("rq_head", 0400, rxq->mana_rx_debugfs, &rxq->gdma_rq->head);
2771 	debugfs_create_u32("rq_tail", 0400, rxq->mana_rx_debugfs, &rxq->gdma_rq->tail);
2772 	debugfs_create_u32("rq_nbuf", 0400, rxq->mana_rx_debugfs, &rxq->num_rx_buf);
2773 	debugfs_create_u32("cq_head", 0400, rxq->mana_rx_debugfs,
2774 			   &rxq->rx_cq.gdma_cq->head);
2775 	debugfs_create_u32("cq_tail", 0400, rxq->mana_rx_debugfs,
2776 			   &rxq->rx_cq.gdma_cq->tail);
2777 	debugfs_create_u32("cq_budget", 0400, rxq->mana_rx_debugfs, &rxq->rx_cq.budget);
2778 	debugfs_create_file("rxq_dump", 0400, rxq->mana_rx_debugfs, rxq->gdma_rq, &mana_dbg_q_fops);
2779 	debugfs_create_file("cq_dump", 0400, rxq->mana_rx_debugfs, rxq->rx_cq.gdma_cq,
2780 			    &mana_dbg_q_fops);
2781 }
2782 
mana_add_rx_queues(struct mana_port_context * apc,struct net_device * ndev)2783 static int mana_add_rx_queues(struct mana_port_context *apc,
2784 			      struct net_device *ndev)
2785 {
2786 	struct mana_context *ac = apc->ac;
2787 	struct mana_rxq *rxq;
2788 	int err = 0;
2789 	int i;
2790 
2791 	for (i = 0; i < apc->num_queues; i++) {
2792 		rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
2793 		if (!rxq) {
2794 			err = -ENOMEM;
2795 			netdev_err(ndev, "Failed to create rxq %d : %d\n", i, err);
2796 			goto out;
2797 		}
2798 
2799 		u64_stats_init(&rxq->stats.syncp);
2800 
2801 		apc->rxqs[i] = rxq;
2802 
2803 		mana_create_rxq_debugfs(apc, i);
2804 	}
2805 
2806 	apc->default_rxobj = apc->rxqs[0]->rxobj;
2807 out:
2808 	return err;
2809 }
2810 
mana_destroy_vport(struct mana_port_context * apc)2811 static void mana_destroy_vport(struct mana_port_context *apc)
2812 {
2813 	struct gdma_dev *gd = apc->ac->gdma_dev;
2814 	struct mana_rxq *rxq;
2815 	u32 rxq_idx;
2816 
2817 	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
2818 		rxq = apc->rxqs[rxq_idx];
2819 		if (!rxq)
2820 			continue;
2821 
2822 		mana_destroy_rxq(apc, rxq, true);
2823 		apc->rxqs[rxq_idx] = NULL;
2824 	}
2825 
2826 	mana_destroy_txq(apc);
2827 	mana_uncfg_vport(apc);
2828 
2829 	if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode)
2830 		mana_pf_deregister_hw_vport(apc);
2831 }
2832 
mana_create_vport(struct mana_port_context * apc,struct net_device * net)2833 static int mana_create_vport(struct mana_port_context *apc,
2834 			     struct net_device *net)
2835 {
2836 	struct gdma_dev *gd = apc->ac->gdma_dev;
2837 	int err;
2838 
2839 	apc->default_rxobj = INVALID_MANA_HANDLE;
2840 
2841 	if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) {
2842 		err = mana_pf_register_hw_vport(apc);
2843 		if (err)
2844 			return err;
2845 	}
2846 
2847 	err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
2848 	if (err)
2849 		return err;
2850 
2851 	return mana_create_txq(apc, net);
2852 }
2853 
mana_rss_table_alloc(struct mana_port_context * apc)2854 static int mana_rss_table_alloc(struct mana_port_context *apc)
2855 {
2856 	if (!apc->indir_table_sz) {
2857 		netdev_err(apc->ndev,
2858 			   "Indirection table size not set for vPort %d\n",
2859 			   apc->port_idx);
2860 		return -EINVAL;
2861 	}
2862 
2863 	apc->indir_table = kcalloc(apc->indir_table_sz, sizeof(u32), GFP_KERNEL);
2864 	if (!apc->indir_table)
2865 		return -ENOMEM;
2866 
2867 	apc->rxobj_table = kzalloc_objs(mana_handle_t, apc->indir_table_sz);
2868 	if (!apc->rxobj_table) {
2869 		kfree(apc->indir_table);
2870 		return -ENOMEM;
2871 	}
2872 
2873 	return 0;
2874 }
2875 
mana_rss_table_init(struct mana_port_context * apc)2876 static void mana_rss_table_init(struct mana_port_context *apc)
2877 {
2878 	int i;
2879 
2880 	for (i = 0; i < apc->indir_table_sz; i++)
2881 		apc->indir_table[i] =
2882 			ethtool_rxfh_indir_default(i, apc->num_queues);
2883 }
2884 
mana_config_rss(struct mana_port_context * apc,enum TRI_STATE rx,bool update_hash,bool update_tab)2885 int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
2886 		    bool update_hash, bool update_tab)
2887 {
2888 	u32 queue_idx;
2889 	int err;
2890 	int i;
2891 
2892 	if (update_tab) {
2893 		for (i = 0; i < apc->indir_table_sz; i++) {
2894 			queue_idx = apc->indir_table[i];
2895 			apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
2896 		}
2897 	}
2898 
2899 	err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
2900 	if (err)
2901 		return err;
2902 
2903 	mana_fence_rqs(apc);
2904 
2905 	return 0;
2906 }
2907 
mana_query_gf_stats(struct mana_context * ac)2908 int mana_query_gf_stats(struct mana_context *ac)
2909 {
2910 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
2911 	struct mana_query_gf_stat_resp resp = {};
2912 	struct mana_query_gf_stat_req req = {};
2913 	struct device *dev = gc->dev;
2914 	int err;
2915 
2916 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_GF_STAT,
2917 			     sizeof(req), sizeof(resp));
2918 	req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
2919 	req.req_stats = STATISTICS_FLAGS_RX_DISCARDS_NO_WQE |
2920 			STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED |
2921 			STATISTICS_FLAGS_HC_RX_BYTES |
2922 			STATISTICS_FLAGS_HC_RX_UCAST_PACKETS |
2923 			STATISTICS_FLAGS_HC_RX_UCAST_BYTES |
2924 			STATISTICS_FLAGS_HC_RX_MCAST_PACKETS |
2925 			STATISTICS_FLAGS_HC_RX_MCAST_BYTES |
2926 			STATISTICS_FLAGS_HC_RX_BCAST_PACKETS |
2927 			STATISTICS_FLAGS_HC_RX_BCAST_BYTES |
2928 			STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED |
2929 			STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED |
2930 			STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS |
2931 			STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT |
2932 			STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT |
2933 			STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT |
2934 			STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT |
2935 			STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT |
2936 			STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION |
2937 			STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB |
2938 			STATISTICS_FLAGS_HC_TX_BYTES |
2939 			STATISTICS_FLAGS_HC_TX_UCAST_PACKETS |
2940 			STATISTICS_FLAGS_HC_TX_UCAST_BYTES |
2941 			STATISTICS_FLAGS_HC_TX_MCAST_PACKETS |
2942 			STATISTICS_FLAGS_HC_TX_MCAST_BYTES |
2943 			STATISTICS_FLAGS_HC_TX_BCAST_PACKETS |
2944 			STATISTICS_FLAGS_HC_TX_BCAST_BYTES |
2945 			STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR;
2946 
2947 	err = mana_send_request(ac, &req, sizeof(req), &resp,
2948 				sizeof(resp));
2949 	if (err) {
2950 		dev_err(dev, "Failed to query GF stats: %d\n", err);
2951 		return err;
2952 	}
2953 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_GF_STAT,
2954 				   sizeof(resp));
2955 	if (err || resp.hdr.status) {
2956 		dev_err(dev, "Failed to query GF stats: %d, 0x%x\n", err,
2957 			resp.hdr.status);
2958 		return err;
2959 	}
2960 
2961 	ac->hc_stats.hc_rx_discards_no_wqe = resp.rx_discards_nowqe;
2962 	ac->hc_stats.hc_rx_err_vport_disabled = resp.rx_err_vport_disabled;
2963 	ac->hc_stats.hc_rx_bytes = resp.hc_rx_bytes;
2964 	ac->hc_stats.hc_rx_ucast_pkts = resp.hc_rx_ucast_pkts;
2965 	ac->hc_stats.hc_rx_ucast_bytes = resp.hc_rx_ucast_bytes;
2966 	ac->hc_stats.hc_rx_bcast_pkts = resp.hc_rx_bcast_pkts;
2967 	ac->hc_stats.hc_rx_bcast_bytes = resp.hc_rx_bcast_bytes;
2968 	ac->hc_stats.hc_rx_mcast_pkts = resp.hc_rx_mcast_pkts;
2969 	ac->hc_stats.hc_rx_mcast_bytes = resp.hc_rx_mcast_bytes;
2970 	ac->hc_stats.hc_tx_err_gf_disabled = resp.tx_err_gf_disabled;
2971 	ac->hc_stats.hc_tx_err_vport_disabled = resp.tx_err_vport_disabled;
2972 	ac->hc_stats.hc_tx_err_inval_vportoffset_pkt =
2973 					     resp.tx_err_inval_vport_offset_pkt;
2974 	ac->hc_stats.hc_tx_err_vlan_enforcement =
2975 					     resp.tx_err_vlan_enforcement;
2976 	ac->hc_stats.hc_tx_err_eth_type_enforcement =
2977 					     resp.tx_err_ethtype_enforcement;
2978 	ac->hc_stats.hc_tx_err_sa_enforcement = resp.tx_err_SA_enforcement;
2979 	ac->hc_stats.hc_tx_err_sqpdid_enforcement =
2980 					     resp.tx_err_SQPDID_enforcement;
2981 	ac->hc_stats.hc_tx_err_cqpdid_enforcement =
2982 					     resp.tx_err_CQPDID_enforcement;
2983 	ac->hc_stats.hc_tx_err_mtu_violation = resp.tx_err_mtu_violation;
2984 	ac->hc_stats.hc_tx_err_inval_oob = resp.tx_err_inval_oob;
2985 	ac->hc_stats.hc_tx_bytes = resp.hc_tx_bytes;
2986 	ac->hc_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts;
2987 	ac->hc_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes;
2988 	ac->hc_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts;
2989 	ac->hc_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes;
2990 	ac->hc_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts;
2991 	ac->hc_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes;
2992 	ac->hc_stats.hc_tx_err_gdma = resp.tx_err_gdma;
2993 
2994 	return 0;
2995 }
2996 
mana_query_phy_stats(struct mana_port_context * apc)2997 void mana_query_phy_stats(struct mana_port_context *apc)
2998 {
2999 	struct mana_query_phy_stat_resp resp = {};
3000 	struct mana_query_phy_stat_req req = {};
3001 	struct net_device *ndev = apc->ndev;
3002 	int err;
3003 
3004 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_PHY_STAT,
3005 			     sizeof(req), sizeof(resp));
3006 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
3007 				sizeof(resp));
3008 	if (err)
3009 		return;
3010 
3011 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_PHY_STAT,
3012 				   sizeof(resp));
3013 	if (err || resp.hdr.status) {
3014 		netdev_err(ndev,
3015 			   "Failed to query PHY stats: %d, resp:0x%x\n",
3016 				err, resp.hdr.status);
3017 		return;
3018 	}
3019 
3020 	/* Aggregate drop counters */
3021 	apc->phy_stats.rx_pkt_drop_phy = resp.rx_pkt_drop_phy;
3022 	apc->phy_stats.tx_pkt_drop_phy = resp.tx_pkt_drop_phy;
3023 
3024 	/* Per TC traffic Counters */
3025 	apc->phy_stats.rx_pkt_tc0_phy = resp.rx_pkt_tc0_phy;
3026 	apc->phy_stats.tx_pkt_tc0_phy = resp.tx_pkt_tc0_phy;
3027 	apc->phy_stats.rx_pkt_tc1_phy = resp.rx_pkt_tc1_phy;
3028 	apc->phy_stats.tx_pkt_tc1_phy = resp.tx_pkt_tc1_phy;
3029 	apc->phy_stats.rx_pkt_tc2_phy = resp.rx_pkt_tc2_phy;
3030 	apc->phy_stats.tx_pkt_tc2_phy = resp.tx_pkt_tc2_phy;
3031 	apc->phy_stats.rx_pkt_tc3_phy = resp.rx_pkt_tc3_phy;
3032 	apc->phy_stats.tx_pkt_tc3_phy = resp.tx_pkt_tc3_phy;
3033 	apc->phy_stats.rx_pkt_tc4_phy = resp.rx_pkt_tc4_phy;
3034 	apc->phy_stats.tx_pkt_tc4_phy = resp.tx_pkt_tc4_phy;
3035 	apc->phy_stats.rx_pkt_tc5_phy = resp.rx_pkt_tc5_phy;
3036 	apc->phy_stats.tx_pkt_tc5_phy = resp.tx_pkt_tc5_phy;
3037 	apc->phy_stats.rx_pkt_tc6_phy = resp.rx_pkt_tc6_phy;
3038 	apc->phy_stats.tx_pkt_tc6_phy = resp.tx_pkt_tc6_phy;
3039 	apc->phy_stats.rx_pkt_tc7_phy = resp.rx_pkt_tc7_phy;
3040 	apc->phy_stats.tx_pkt_tc7_phy = resp.tx_pkt_tc7_phy;
3041 
3042 	/* Per TC byte Counters */
3043 	apc->phy_stats.rx_byte_tc0_phy = resp.rx_byte_tc0_phy;
3044 	apc->phy_stats.tx_byte_tc0_phy = resp.tx_byte_tc0_phy;
3045 	apc->phy_stats.rx_byte_tc1_phy = resp.rx_byte_tc1_phy;
3046 	apc->phy_stats.tx_byte_tc1_phy = resp.tx_byte_tc1_phy;
3047 	apc->phy_stats.rx_byte_tc2_phy = resp.rx_byte_tc2_phy;
3048 	apc->phy_stats.tx_byte_tc2_phy = resp.tx_byte_tc2_phy;
3049 	apc->phy_stats.rx_byte_tc3_phy = resp.rx_byte_tc3_phy;
3050 	apc->phy_stats.tx_byte_tc3_phy = resp.tx_byte_tc3_phy;
3051 	apc->phy_stats.rx_byte_tc4_phy = resp.rx_byte_tc4_phy;
3052 	apc->phy_stats.tx_byte_tc4_phy = resp.tx_byte_tc4_phy;
3053 	apc->phy_stats.rx_byte_tc5_phy = resp.rx_byte_tc5_phy;
3054 	apc->phy_stats.tx_byte_tc5_phy = resp.tx_byte_tc5_phy;
3055 	apc->phy_stats.rx_byte_tc6_phy = resp.rx_byte_tc6_phy;
3056 	apc->phy_stats.tx_byte_tc6_phy = resp.tx_byte_tc6_phy;
3057 	apc->phy_stats.rx_byte_tc7_phy = resp.rx_byte_tc7_phy;
3058 	apc->phy_stats.tx_byte_tc7_phy = resp.tx_byte_tc7_phy;
3059 
3060 	/* Per TC pause Counters */
3061 	apc->phy_stats.rx_pause_tc0_phy = resp.rx_pause_tc0_phy;
3062 	apc->phy_stats.tx_pause_tc0_phy = resp.tx_pause_tc0_phy;
3063 	apc->phy_stats.rx_pause_tc1_phy = resp.rx_pause_tc1_phy;
3064 	apc->phy_stats.tx_pause_tc1_phy = resp.tx_pause_tc1_phy;
3065 	apc->phy_stats.rx_pause_tc2_phy = resp.rx_pause_tc2_phy;
3066 	apc->phy_stats.tx_pause_tc2_phy = resp.tx_pause_tc2_phy;
3067 	apc->phy_stats.rx_pause_tc3_phy = resp.rx_pause_tc3_phy;
3068 	apc->phy_stats.tx_pause_tc3_phy = resp.tx_pause_tc3_phy;
3069 	apc->phy_stats.rx_pause_tc4_phy = resp.rx_pause_tc4_phy;
3070 	apc->phy_stats.tx_pause_tc4_phy = resp.tx_pause_tc4_phy;
3071 	apc->phy_stats.rx_pause_tc5_phy = resp.rx_pause_tc5_phy;
3072 	apc->phy_stats.tx_pause_tc5_phy = resp.tx_pause_tc5_phy;
3073 	apc->phy_stats.rx_pause_tc6_phy = resp.rx_pause_tc6_phy;
3074 	apc->phy_stats.tx_pause_tc6_phy = resp.tx_pause_tc6_phy;
3075 	apc->phy_stats.rx_pause_tc7_phy = resp.rx_pause_tc7_phy;
3076 	apc->phy_stats.tx_pause_tc7_phy = resp.tx_pause_tc7_phy;
3077 }
3078 
mana_init_port(struct net_device * ndev)3079 static int mana_init_port(struct net_device *ndev)
3080 {
3081 	struct mana_port_context *apc = netdev_priv(ndev);
3082 	struct gdma_dev *gd = apc->ac->gdma_dev;
3083 	u32 max_txq, max_rxq, max_queues;
3084 	int port_idx = apc->port_idx;
3085 	struct gdma_context *gc;
3086 	char vport[32];
3087 	int err;
3088 
3089 	err = mana_init_port_context(apc);
3090 	if (err)
3091 		return err;
3092 
3093 	gc = gd->gdma_context;
3094 
3095 	err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
3096 				   &apc->indir_table_sz);
3097 	if (err) {
3098 		netdev_err(ndev, "Failed to query info for vPort %d\n",
3099 			   port_idx);
3100 		goto reset_apc;
3101 	}
3102 
3103 	max_queues = min_t(u32, max_txq, max_rxq);
3104 	if (apc->max_queues > max_queues)
3105 		apc->max_queues = max_queues;
3106 
3107 	if (apc->num_queues > apc->max_queues)
3108 		apc->num_queues = apc->max_queues;
3109 
3110 	eth_hw_addr_set(ndev, apc->mac_addr);
3111 	sprintf(vport, "vport%d", port_idx);
3112 	apc->mana_port_debugfs = debugfs_create_dir(vport, gc->mana_pci_debugfs);
3113 	return 0;
3114 
3115 reset_apc:
3116 	mana_cleanup_port_context(apc);
3117 	return err;
3118 }
3119 
mana_alloc_queues(struct net_device * ndev)3120 int mana_alloc_queues(struct net_device *ndev)
3121 {
3122 	struct mana_port_context *apc = netdev_priv(ndev);
3123 	struct gdma_dev *gd = apc->ac->gdma_dev;
3124 	int err;
3125 
3126 	err = mana_create_vport(apc, ndev);
3127 	if (err) {
3128 		netdev_err(ndev, "Failed to create vPort %u : %d\n", apc->port_idx, err);
3129 		return err;
3130 	}
3131 
3132 	err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
3133 	if (err) {
3134 		netdev_err(ndev,
3135 			   "netif_set_real_num_tx_queues () failed for ndev with num_queues %u : %d\n",
3136 			   apc->num_queues, err);
3137 		goto destroy_vport;
3138 	}
3139 
3140 	err = mana_add_rx_queues(apc, ndev);
3141 	if (err)
3142 		goto destroy_vport;
3143 
3144 	apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
3145 
3146 	err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
3147 	if (err) {
3148 		netdev_err(ndev,
3149 			   "netif_set_real_num_rx_queues () failed for ndev with num_queues %u : %d\n",
3150 			   apc->num_queues, err);
3151 		goto destroy_vport;
3152 	}
3153 
3154 	mana_rss_table_init(apc);
3155 
3156 	err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
3157 	if (err) {
3158 		netdev_err(ndev, "Failed to configure RSS table: %d\n", err);
3159 		goto destroy_vport;
3160 	}
3161 
3162 	if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) {
3163 		err = mana_pf_register_filter(apc);
3164 		if (err)
3165 			goto destroy_vport;
3166 	}
3167 
3168 	mana_chn_setxdp(apc, mana_xdp_get(apc));
3169 
3170 	return 0;
3171 
3172 destroy_vport:
3173 	mana_destroy_vport(apc);
3174 	return err;
3175 }
3176 
mana_attach(struct net_device * ndev)3177 int mana_attach(struct net_device *ndev)
3178 {
3179 	struct mana_port_context *apc = netdev_priv(ndev);
3180 	int err;
3181 
3182 	ASSERT_RTNL();
3183 
3184 	err = mana_init_port(ndev);
3185 	if (err)
3186 		return err;
3187 
3188 	if (apc->port_st_save) {
3189 		err = mana_alloc_queues(ndev);
3190 		if (err) {
3191 			mana_cleanup_port_context(apc);
3192 			return err;
3193 		}
3194 	}
3195 
3196 	apc->port_is_up = apc->port_st_save;
3197 
3198 	/* Ensure port state updated before txq state */
3199 	smp_wmb();
3200 
3201 	netif_device_attach(ndev);
3202 
3203 	return 0;
3204 }
3205 
mana_dealloc_queues(struct net_device * ndev)3206 static int mana_dealloc_queues(struct net_device *ndev)
3207 {
3208 	struct mana_port_context *apc = netdev_priv(ndev);
3209 	unsigned long timeout = jiffies + 120 * HZ;
3210 	struct gdma_dev *gd = apc->ac->gdma_dev;
3211 	struct mana_txq *txq;
3212 	struct sk_buff *skb;
3213 	int i, err;
3214 	u32 tsleep;
3215 
3216 	if (apc->port_is_up)
3217 		return -EINVAL;
3218 
3219 	mana_chn_setxdp(apc, NULL);
3220 
3221 	if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode)
3222 		mana_pf_deregister_filter(apc);
3223 
3224 	/* No packet can be transmitted now since apc->port_is_up is false.
3225 	 * There is still a tiny chance that mana_poll_tx_cq() can re-enable
3226 	 * a txq because it may not timely see apc->port_is_up being cleared
3227 	 * to false, but it doesn't matter since mana_start_xmit() drops any
3228 	 * new packets due to apc->port_is_up being false.
3229 	 *
3230 	 * Drain all the in-flight TX packets.
3231 	 * A timeout of 120 seconds for all the queues is used.
3232 	 * This will break the while loop when h/w is not responding.
3233 	 * This value of 120 has been decided here considering max
3234 	 * number of queues.
3235 	 */
3236 
3237 	for (i = 0; i < apc->num_queues; i++) {
3238 		txq = &apc->tx_qp[i].txq;
3239 		tsleep = 1000;
3240 		while (atomic_read(&txq->pending_sends) > 0 &&
3241 		       time_before(jiffies, timeout)) {
3242 			usleep_range(tsleep, tsleep + 1000);
3243 			tsleep <<= 1;
3244 		}
3245 		if (atomic_read(&txq->pending_sends)) {
3246 			err = pcie_flr(to_pci_dev(gd->gdma_context->dev));
3247 			if (err) {
3248 				netdev_err(ndev, "flr failed %d with %d pkts pending in txq %u\n",
3249 					   err, atomic_read(&txq->pending_sends),
3250 					   txq->gdma_txq_id);
3251 			}
3252 			break;
3253 		}
3254 	}
3255 
3256 	for (i = 0; i < apc->num_queues; i++) {
3257 		txq = &apc->tx_qp[i].txq;
3258 		while ((skb = skb_dequeue(&txq->pending_skbs))) {
3259 			mana_unmap_skb(skb, apc);
3260 			dev_kfree_skb_any(skb);
3261 		}
3262 		atomic_set(&txq->pending_sends, 0);
3263 	}
3264 	/* We're 100% sure the queues can no longer be woken up, because
3265 	 * we're sure now mana_poll_tx_cq() can't be running.
3266 	 */
3267 
3268 	apc->rss_state = TRI_STATE_FALSE;
3269 	err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
3270 	if (err && mana_en_need_log(apc, err))
3271 		netdev_err(ndev, "Failed to disable vPort: %d\n", err);
3272 
3273 	/* Even in err case, still need to cleanup the vPort */
3274 	mana_destroy_vport(apc);
3275 
3276 	return 0;
3277 }
3278 
mana_detach(struct net_device * ndev,bool from_close)3279 int mana_detach(struct net_device *ndev, bool from_close)
3280 {
3281 	struct mana_port_context *apc = netdev_priv(ndev);
3282 	int err;
3283 
3284 	ASSERT_RTNL();
3285 
3286 	apc->port_st_save = apc->port_is_up;
3287 	apc->port_is_up = false;
3288 
3289 	/* Ensure port state updated before txq state */
3290 	smp_wmb();
3291 
3292 	netif_tx_disable(ndev);
3293 
3294 	if (apc->port_st_save) {
3295 		err = mana_dealloc_queues(ndev);
3296 		if (err) {
3297 			netdev_err(ndev, "%s failed to deallocate queues: %d\n", __func__, err);
3298 			return err;
3299 		}
3300 	}
3301 
3302 	if (!from_close) {
3303 		netif_device_detach(ndev);
3304 		mana_cleanup_port_context(apc);
3305 	}
3306 
3307 	return 0;
3308 }
3309 
mana_probe_port(struct mana_context * ac,int port_idx,struct net_device ** ndev_storage)3310 static int mana_probe_port(struct mana_context *ac, int port_idx,
3311 			   struct net_device **ndev_storage)
3312 {
3313 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
3314 	struct mana_port_context *apc;
3315 	struct net_device *ndev;
3316 	int err;
3317 
3318 	ndev = alloc_etherdev_mq(sizeof(struct mana_port_context),
3319 				 gc->max_num_queues);
3320 	if (!ndev)
3321 		return -ENOMEM;
3322 
3323 	*ndev_storage = ndev;
3324 
3325 	apc = netdev_priv(ndev);
3326 	apc->ac = ac;
3327 	apc->ndev = ndev;
3328 	apc->max_queues = gc->max_num_queues;
3329 	apc->num_queues = gc->max_num_queues;
3330 	apc->tx_queue_size = DEF_TX_BUFFERS_PER_QUEUE;
3331 	apc->rx_queue_size = DEF_RX_BUFFERS_PER_QUEUE;
3332 	apc->port_handle = INVALID_MANA_HANDLE;
3333 	apc->pf_filter_handle = INVALID_MANA_HANDLE;
3334 	apc->port_idx = port_idx;
3335 
3336 	mutex_init(&apc->vport_mutex);
3337 	apc->vport_use_count = 0;
3338 
3339 	ndev->netdev_ops = &mana_devops;
3340 	ndev->ethtool_ops = &mana_ethtool_ops;
3341 	ndev->mtu = ETH_DATA_LEN;
3342 	ndev->max_mtu = gc->adapter_mtu - ETH_HLEN;
3343 	ndev->min_mtu = ETH_MIN_MTU;
3344 	ndev->needed_headroom = MANA_HEADROOM;
3345 	ndev->dev_port = port_idx;
3346 	/* Recommended timeout based on HW FPGA re-config scenario. */
3347 	ndev->watchdog_timeo = 15 * HZ;
3348 	SET_NETDEV_DEV(ndev, gc->dev);
3349 
3350 	netif_set_tso_max_size(ndev, GSO_MAX_SIZE);
3351 
3352 	netif_carrier_off(ndev);
3353 
3354 	netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
3355 
3356 	err = mana_init_port(ndev);
3357 	if (err)
3358 		goto free_net;
3359 
3360 	err = mana_rss_table_alloc(apc);
3361 	if (err)
3362 		goto reset_apc;
3363 
3364 	/* Initialize the per port queue reset work.*/
3365 	INIT_WORK(&apc->queue_reset_work,
3366 		  mana_per_port_queue_reset_work_handler);
3367 
3368 	netdev_lockdep_set_classes(ndev);
3369 
3370 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3371 	ndev->hw_features |= NETIF_F_RXCSUM;
3372 	ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
3373 	ndev->hw_features |= NETIF_F_RXHASH;
3374 	ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_TX |
3375 			 NETIF_F_HW_VLAN_CTAG_RX;
3376 	ndev->vlan_features = ndev->features;
3377 	xdp_set_features_flag(ndev, NETDEV_XDP_ACT_BASIC |
3378 			      NETDEV_XDP_ACT_REDIRECT |
3379 			      NETDEV_XDP_ACT_NDO_XMIT);
3380 
3381 	err = register_netdev(ndev);
3382 	if (err) {
3383 		netdev_err(ndev, "Unable to register netdev.\n");
3384 		goto free_indir;
3385 	}
3386 
3387 	netif_carrier_on(ndev);
3388 
3389 	debugfs_create_u32("current_speed", 0400, apc->mana_port_debugfs, &apc->speed);
3390 
3391 	return 0;
3392 
3393 free_indir:
3394 	mana_cleanup_indir_table(apc);
3395 reset_apc:
3396 	mana_cleanup_port_context(apc);
3397 free_net:
3398 	*ndev_storage = NULL;
3399 	netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
3400 	free_netdev(ndev);
3401 	return err;
3402 }
3403 
adev_release(struct device * dev)3404 static void adev_release(struct device *dev)
3405 {
3406 	struct mana_adev *madev = container_of(dev, struct mana_adev, adev.dev);
3407 
3408 	kfree(madev);
3409 }
3410 
remove_adev(struct gdma_dev * gd)3411 static void remove_adev(struct gdma_dev *gd)
3412 {
3413 	struct auxiliary_device *adev = gd->adev;
3414 	int id = adev->id;
3415 
3416 	auxiliary_device_delete(adev);
3417 	auxiliary_device_uninit(adev);
3418 
3419 	mana_adev_idx_free(id);
3420 	gd->adev = NULL;
3421 }
3422 
add_adev(struct gdma_dev * gd,const char * name)3423 static int add_adev(struct gdma_dev *gd, const char *name)
3424 {
3425 	struct auxiliary_device *adev;
3426 	struct mana_adev *madev;
3427 	int ret;
3428 
3429 	madev = kzalloc_obj(*madev);
3430 	if (!madev)
3431 		return -ENOMEM;
3432 
3433 	adev = &madev->adev;
3434 	ret = mana_adev_idx_alloc();
3435 	if (ret < 0)
3436 		goto idx_fail;
3437 	adev->id = ret;
3438 
3439 	adev->name = name;
3440 	adev->dev.parent = gd->gdma_context->dev;
3441 	adev->dev.release = adev_release;
3442 	madev->mdev = gd;
3443 
3444 	ret = auxiliary_device_init(adev);
3445 	if (ret)
3446 		goto init_fail;
3447 
3448 	/* madev is owned by the auxiliary device */
3449 	madev = NULL;
3450 	ret = auxiliary_device_add(adev);
3451 	if (ret)
3452 		goto add_fail;
3453 
3454 	gd->adev = adev;
3455 	dev_dbg(gd->gdma_context->dev,
3456 		"Auxiliary device added successfully\n");
3457 	return 0;
3458 
3459 add_fail:
3460 	auxiliary_device_uninit(adev);
3461 
3462 init_fail:
3463 	mana_adev_idx_free(adev->id);
3464 
3465 idx_fail:
3466 	kfree(madev);
3467 
3468 	return ret;
3469 }
3470 
mana_rdma_service_handle(struct work_struct * work)3471 static void mana_rdma_service_handle(struct work_struct *work)
3472 {
3473 	struct mana_service_work *serv_work =
3474 		container_of(work, struct mana_service_work, work);
3475 	struct gdma_dev *gd = serv_work->gdma_dev;
3476 	struct device *dev = gd->gdma_context->dev;
3477 	int ret;
3478 
3479 	if (READ_ONCE(gd->rdma_teardown))
3480 		goto out;
3481 
3482 	switch (serv_work->event) {
3483 	case GDMA_SERVICE_TYPE_RDMA_SUSPEND:
3484 		if (!gd->adev || gd->is_suspended)
3485 			break;
3486 
3487 		remove_adev(gd);
3488 		gd->is_suspended = true;
3489 		break;
3490 
3491 	case GDMA_SERVICE_TYPE_RDMA_RESUME:
3492 		if (!gd->is_suspended)
3493 			break;
3494 
3495 		ret = add_adev(gd, "rdma");
3496 		if (ret)
3497 			dev_err(dev, "Failed to add adev on resume: %d\n", ret);
3498 		else
3499 			gd->is_suspended = false;
3500 		break;
3501 
3502 	default:
3503 		dev_warn(dev, "unknown adev service event %u\n",
3504 			 serv_work->event);
3505 		break;
3506 	}
3507 
3508 out:
3509 	kfree(serv_work);
3510 }
3511 
mana_rdma_service_event(struct gdma_context * gc,enum gdma_service_type event)3512 int mana_rdma_service_event(struct gdma_context *gc, enum gdma_service_type event)
3513 {
3514 	struct gdma_dev *gd = &gc->mana_ib;
3515 	struct mana_service_work *serv_work;
3516 
3517 	if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) {
3518 		/* RDMA device is not detected on pci */
3519 		return 0;
3520 	}
3521 
3522 	serv_work = kzalloc_obj(*serv_work, GFP_ATOMIC);
3523 	if (!serv_work)
3524 		return -ENOMEM;
3525 
3526 	serv_work->event = event;
3527 	serv_work->gdma_dev = gd;
3528 
3529 	INIT_WORK(&serv_work->work, mana_rdma_service_handle);
3530 	queue_work(gc->service_wq, &serv_work->work);
3531 
3532 	return 0;
3533 }
3534 
3535 #define MANA_GF_STATS_PERIOD (2 * HZ)
3536 
mana_gf_stats_work_handler(struct work_struct * work)3537 static void mana_gf_stats_work_handler(struct work_struct *work)
3538 {
3539 	struct mana_context *ac =
3540 		container_of(to_delayed_work(work), struct mana_context, gf_stats_work);
3541 	int err;
3542 
3543 	err = mana_query_gf_stats(ac);
3544 	if (err == -ETIMEDOUT) {
3545 		/* HWC timeout detected - reset stats and stop rescheduling */
3546 		ac->hwc_timeout_occurred = true;
3547 		memset(&ac->hc_stats, 0, sizeof(ac->hc_stats));
3548 		return;
3549 	}
3550 	schedule_delayed_work(&ac->gf_stats_work, MANA_GF_STATS_PERIOD);
3551 }
3552 
mana_probe(struct gdma_dev * gd,bool resuming)3553 int mana_probe(struct gdma_dev *gd, bool resuming)
3554 {
3555 	struct gdma_context *gc = gd->gdma_context;
3556 	struct mana_context *ac = gd->driver_data;
3557 	struct mana_port_context *apc = NULL;
3558 	struct device *dev = gc->dev;
3559 	u8 bm_hostmode = 0;
3560 	u16 num_ports = 0;
3561 	int err;
3562 	int i;
3563 
3564 	dev_info(dev,
3565 		 "Microsoft Azure Network Adapter protocol version: %d.%d.%d\n",
3566 		 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
3567 
3568 	err = mana_gd_register_device(gd);
3569 	if (err)
3570 		return err;
3571 
3572 	if (!resuming) {
3573 		ac = kzalloc_obj(*ac);
3574 		if (!ac)
3575 			return -ENOMEM;
3576 
3577 		ac->gdma_dev = gd;
3578 		gd->driver_data = ac;
3579 	}
3580 
3581 	err = mana_create_eq(ac);
3582 	if (err) {
3583 		dev_err(dev, "Failed to create EQs: %d\n", err);
3584 		goto out;
3585 	}
3586 
3587 	err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
3588 				    MANA_MICRO_VERSION, &num_ports, &bm_hostmode);
3589 	if (err)
3590 		goto out;
3591 
3592 	ac->bm_hostmode = bm_hostmode;
3593 
3594 	if (!resuming) {
3595 		ac->num_ports = num_ports;
3596 
3597 		INIT_WORK(&ac->link_change_work, mana_link_state_handle);
3598 	} else {
3599 		if (ac->num_ports != num_ports) {
3600 			dev_err(dev, "The number of vPorts changed: %d->%d\n",
3601 				ac->num_ports, num_ports);
3602 			err = -EPROTO;
3603 			goto out;
3604 		}
3605 
3606 		enable_work(&ac->link_change_work);
3607 	}
3608 
3609 	if (ac->num_ports == 0)
3610 		dev_err(dev, "Failed to detect any vPort\n");
3611 
3612 	if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
3613 		ac->num_ports = MAX_PORTS_IN_MANA_DEV;
3614 
3615 	ac->per_port_queue_reset_wq =
3616 		create_singlethread_workqueue("mana_per_port_queue_reset_wq");
3617 	if (!ac->per_port_queue_reset_wq) {
3618 		dev_err(dev, "Failed to allocate per port queue reset workqueue\n");
3619 		err = -ENOMEM;
3620 		goto out;
3621 	}
3622 
3623 	if (!resuming) {
3624 		for (i = 0; i < ac->num_ports; i++) {
3625 			err = mana_probe_port(ac, i, &ac->ports[i]);
3626 			/* we log the port for which the probe failed and stop
3627 			 * probes for subsequent ports.
3628 			 * Note that we keep running ports, for which the probes
3629 			 * were successful, unless add_adev fails too
3630 			 */
3631 			if (err) {
3632 				dev_err(dev, "Probe Failed for port %d\n", i);
3633 				break;
3634 			}
3635 		}
3636 	} else {
3637 		for (i = 0; i < ac->num_ports; i++) {
3638 			rtnl_lock();
3639 			apc = netdev_priv(ac->ports[i]);
3640 			enable_work(&apc->queue_reset_work);
3641 			err = mana_attach(ac->ports[i]);
3642 			rtnl_unlock();
3643 			/* we log the port for which the attach failed and stop
3644 			 * attach for subsequent ports
3645 			 * Note that we keep running ports, for which the attach
3646 			 * were successful, unless add_adev fails too
3647 			 */
3648 			if (err) {
3649 				dev_err(dev, "Attach Failed for port %d\n", i);
3650 				break;
3651 			}
3652 		}
3653 	}
3654 
3655 	err = add_adev(gd, "eth");
3656 
3657 	INIT_DELAYED_WORK(&ac->gf_stats_work, mana_gf_stats_work_handler);
3658 	schedule_delayed_work(&ac->gf_stats_work, MANA_GF_STATS_PERIOD);
3659 
3660 out:
3661 	if (err) {
3662 		mana_remove(gd, false);
3663 	} else {
3664 		dev_dbg(dev, "gd=%p, id=%u, num_ports=%d, type=%u, instance=%u\n",
3665 			gd, gd->dev_id.as_uint32, ac->num_ports,
3666 			gd->dev_id.type, gd->dev_id.instance);
3667 		dev_dbg(dev, "%s succeeded\n", __func__);
3668 	}
3669 
3670 	return err;
3671 }
3672 
mana_remove(struct gdma_dev * gd,bool suspending)3673 void mana_remove(struct gdma_dev *gd, bool suspending)
3674 {
3675 	struct gdma_context *gc = gd->gdma_context;
3676 	struct mana_context *ac = gd->driver_data;
3677 	struct mana_port_context *apc;
3678 	struct device *dev = gc->dev;
3679 	struct net_device *ndev;
3680 	int err;
3681 	int i;
3682 
3683 	disable_work_sync(&ac->link_change_work);
3684 	cancel_delayed_work_sync(&ac->gf_stats_work);
3685 
3686 	/* adev currently doesn't support suspending, always remove it */
3687 	if (gd->adev)
3688 		remove_adev(gd);
3689 
3690 	for (i = 0; i < ac->num_ports; i++) {
3691 		ndev = ac->ports[i];
3692 		if (!ndev) {
3693 			if (i == 0)
3694 				dev_err(dev, "No net device to remove\n");
3695 			goto out;
3696 		}
3697 
3698 		apc = netdev_priv(ndev);
3699 		disable_work_sync(&apc->queue_reset_work);
3700 
3701 		/* All cleanup actions should stay after rtnl_lock(), otherwise
3702 		 * other functions may access partially cleaned up data.
3703 		 */
3704 		rtnl_lock();
3705 
3706 		err = mana_detach(ndev, false);
3707 		if (err)
3708 			netdev_err(ndev, "Failed to detach vPort %d: %d\n",
3709 				   i, err);
3710 
3711 		if (suspending) {
3712 			/* No need to unregister the ndev. */
3713 			rtnl_unlock();
3714 			continue;
3715 		}
3716 
3717 		unregister_netdevice(ndev);
3718 		mana_cleanup_indir_table(apc);
3719 
3720 		rtnl_unlock();
3721 
3722 		free_netdev(ndev);
3723 	}
3724 
3725 	mana_destroy_eq(ac);
3726 out:
3727 	if (ac->per_port_queue_reset_wq) {
3728 		destroy_workqueue(ac->per_port_queue_reset_wq);
3729 		ac->per_port_queue_reset_wq = NULL;
3730 	}
3731 
3732 	mana_gd_deregister_device(gd);
3733 
3734 	if (suspending)
3735 		return;
3736 
3737 	gd->driver_data = NULL;
3738 	gd->gdma_context = NULL;
3739 	kfree(ac);
3740 	dev_dbg(dev, "%s succeeded\n", __func__);
3741 }
3742 
mana_rdma_probe(struct gdma_dev * gd)3743 int mana_rdma_probe(struct gdma_dev *gd)
3744 {
3745 	int err = 0;
3746 
3747 	if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) {
3748 		/* RDMA device is not detected on pci */
3749 		return err;
3750 	}
3751 
3752 	err = mana_gd_register_device(gd);
3753 	if (err)
3754 		return err;
3755 
3756 	err = add_adev(gd, "rdma");
3757 	if (err)
3758 		mana_gd_deregister_device(gd);
3759 
3760 	return err;
3761 }
3762 
mana_rdma_remove(struct gdma_dev * gd)3763 void mana_rdma_remove(struct gdma_dev *gd)
3764 {
3765 	struct gdma_context *gc = gd->gdma_context;
3766 
3767 	if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) {
3768 		/* RDMA device is not detected on pci */
3769 		return;
3770 	}
3771 
3772 	WRITE_ONCE(gd->rdma_teardown, true);
3773 
3774 	if (gc->service_wq)
3775 		flush_workqueue(gc->service_wq);
3776 
3777 	if (gd->adev)
3778 		remove_adev(gd);
3779 
3780 	mana_gd_deregister_device(gd);
3781 }
3782 
mana_get_primary_netdev(struct mana_context * ac,u32 port_index,netdevice_tracker * tracker)3783 struct net_device *mana_get_primary_netdev(struct mana_context *ac,
3784 					   u32 port_index,
3785 					   netdevice_tracker *tracker)
3786 {
3787 	struct net_device *ndev;
3788 
3789 	if (port_index >= ac->num_ports)
3790 		return NULL;
3791 
3792 	rcu_read_lock();
3793 
3794 	/* If mana is used in netvsc, the upper netdevice should be returned. */
3795 	ndev = netdev_master_upper_dev_get_rcu(ac->ports[port_index]);
3796 
3797 	/* If there is no upper device, use the parent Ethernet device */
3798 	if (!ndev)
3799 		ndev = ac->ports[port_index];
3800 
3801 	netdev_hold(ndev, tracker, GFP_ATOMIC);
3802 	rcu_read_unlock();
3803 
3804 	return ndev;
3805 }
3806 EXPORT_SYMBOL_NS(mana_get_primary_netdev, "NET_MANA");
3807