xref: /linux/drivers/net/ethernet/cisco/enic/enic_wq.c (revision 1a9239bb4253f9076b5b4b2a1a4e8d7defd77a95)
1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright 2025 Cisco Systems, Inc.  All rights reserved.
3 
4 #include <net/netdev_queues.h>
5 #include "enic_res.h"
6 #include "enic.h"
7 #include "enic_wq.h"
8 
9 #define ENET_CQ_DESC_COMP_NDX_BITS 14
10 #define ENET_CQ_DESC_COMP_NDX_MASK GENMASK(ENET_CQ_DESC_COMP_NDX_BITS - 1, 0)
11 
enic_wq_cq_desc_dec(const struct cq_desc * desc_arg,bool ext_wq,u8 * type,u8 * color,u16 * q_number,u16 * completed_index)12 static void enic_wq_cq_desc_dec(const struct cq_desc *desc_arg, bool ext_wq,
13 				u8 *type, u8 *color, u16 *q_number,
14 				u16 *completed_index)
15 {
16 	const struct cq_desc *desc = desc_arg;
17 	const u8 type_color = desc->type_color;
18 
19 	*color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
20 
21 	/*
22 	 * Make sure color bit is read from desc *before* other fields
23 	 * are read from desc.  Hardware guarantees color bit is last
24 	 * bit (byte) written.  Adding the rmb() prevents the compiler
25 	 * and/or CPU from reordering the reads which would potentially
26 	 * result in reading stale values.
27 	 */
28 	rmb();
29 
30 	*type = type_color & CQ_DESC_TYPE_MASK;
31 	*q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
32 
33 	if (ext_wq)
34 		*completed_index = le16_to_cpu(desc->completed_index) &
35 			ENET_CQ_DESC_COMP_NDX_MASK;
36 	else
37 		*completed_index = le16_to_cpu(desc->completed_index) &
38 			CQ_DESC_COMP_NDX_MASK;
39 }
40 
enic_free_wq_buf(struct vnic_wq * wq,struct vnic_wq_buf * buf)41 void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
42 {
43 	struct enic *enic = vnic_dev_priv(wq->vdev);
44 
45 	if (buf->sop)
46 		dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
47 				 DMA_TO_DEVICE);
48 	else
49 		dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len,
50 			       DMA_TO_DEVICE);
51 
52 	if (buf->os_buf)
53 		dev_kfree_skb_any(buf->os_buf);
54 }
55 
enic_wq_free_buf(struct vnic_wq * wq,struct cq_desc * cq_desc,struct vnic_wq_buf * buf,void * opaque)56 static void enic_wq_free_buf(struct vnic_wq *wq, struct cq_desc *cq_desc,
57 			     struct vnic_wq_buf *buf, void *opaque)
58 {
59 	struct enic *enic = vnic_dev_priv(wq->vdev);
60 
61 	enic->wq[wq->index].stats.cq_work++;
62 	enic->wq[wq->index].stats.cq_bytes += buf->len;
63 	enic_free_wq_buf(wq, buf);
64 }
65 
enic_wq_service(struct vnic_dev * vdev,struct cq_desc * cq_desc,u8 type,u16 q_number,u16 completed_index)66 static void enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
67 			    u8 type, u16 q_number, u16 completed_index)
68 {
69 	struct enic *enic = vnic_dev_priv(vdev);
70 
71 	spin_lock(&enic->wq[q_number].lock);
72 
73 	vnic_wq_service(&enic->wq[q_number].vwq, cq_desc,
74 			completed_index, enic_wq_free_buf, NULL);
75 
76 	if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number))
77 	    && vnic_wq_desc_avail(&enic->wq[q_number].vwq) >=
78 	    (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) {
79 		netif_wake_subqueue(enic->netdev, q_number);
80 		enic->wq[q_number].stats.wake++;
81 	}
82 
83 	spin_unlock(&enic->wq[q_number].lock);
84 }
85 
enic_wq_cq_service(struct enic * enic,unsigned int cq_index,unsigned int work_to_do)86 unsigned int enic_wq_cq_service(struct enic *enic, unsigned int cq_index,
87 				unsigned int work_to_do)
88 {
89 	struct vnic_cq *cq = &enic->cq[cq_index];
90 	u16 q_number, completed_index;
91 	unsigned int work_done = 0;
92 	struct cq_desc *cq_desc;
93 	u8 type, color;
94 	bool ext_wq;
95 
96 	ext_wq = cq->ring.size > ENIC_MAX_WQ_DESCS_DEFAULT;
97 
98 	cq_desc = (struct cq_desc *)vnic_cq_to_clean(cq);
99 	enic_wq_cq_desc_dec(cq_desc, ext_wq, &type, &color,
100 			    &q_number, &completed_index);
101 
102 	while (color != cq->last_color) {
103 		enic_wq_service(cq->vdev, cq_desc, type, q_number,
104 				completed_index);
105 
106 		vnic_cq_inc_to_clean(cq);
107 
108 		if (++work_done >= work_to_do)
109 			break;
110 
111 		cq_desc = (struct cq_desc *)vnic_cq_to_clean(cq);
112 		enic_wq_cq_desc_dec(cq_desc, ext_wq, &type, &color,
113 				    &q_number, &completed_index);
114 	}
115 
116 	return work_done;
117 }
118