xref: /linux/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c (revision cdd5b5a9761fd66d17586e4f4ba6588c70e640ea)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Physical Function ethernet driver
3  *
4  * Copyright (C) 2023 Marvell.
5  *
6  */
7 
8 #include <linux/netdevice.h>
9 #include <net/tso.h>
10 
11 #include "cn10k.h"
12 #include "otx2_reg.h"
13 #include "otx2_common.h"
14 #include "otx2_txrx.h"
15 #include "otx2_struct.h"
16 
17 #define OTX2_QOS_MAX_LEAF_NODES 16
18 
otx2_qos_aura_pool_free(struct otx2_nic * pfvf,int pool_id)19 static void otx2_qos_aura_pool_free(struct otx2_nic *pfvf, int pool_id)
20 {
21 	struct otx2_pool *pool;
22 
23 	if (!pfvf->qset.pool)
24 		return;
25 
26 	pool = &pfvf->qset.pool[pool_id];
27 	qmem_free(pfvf->dev, pool->stack);
28 	qmem_free(pfvf->dev, pool->fc_addr);
29 	pool->stack = NULL;
30 	pool->fc_addr = NULL;
31 }
32 
otx2_qos_sq_aura_pool_init(struct otx2_nic * pfvf,int qidx)33 static int otx2_qos_sq_aura_pool_init(struct otx2_nic *pfvf, int qidx)
34 {
35 	struct otx2_qset *qset = &pfvf->qset;
36 	int pool_id, stack_pages, num_sqbs;
37 	struct otx2_hw *hw = &pfvf->hw;
38 	struct otx2_snd_queue *sq;
39 	struct otx2_pool *pool;
40 	dma_addr_t bufptr;
41 	int err, ptr;
42 	u64 iova, pa;
43 
44 	/* Calculate number of SQBs needed.
45 	 *
46 	 * For a 128byte SQE, and 4K size SQB, 31 SQEs will fit in one SQB.
47 	 * Last SQE is used for pointing to next SQB.
48 	 */
49 	num_sqbs = (hw->sqb_size / 128) - 1;
50 	num_sqbs = (qset->sqe_cnt + num_sqbs) / num_sqbs;
51 
52 	/* Get no of stack pages needed */
53 	stack_pages =
54 		(num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
55 
56 	pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
57 	pool = &pfvf->qset.pool[pool_id];
58 
59 	/* Initialize aura context */
60 	err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs);
61 	if (err)
62 		return err;
63 
64 	/* Initialize pool context */
65 	err = otx2_pool_init(pfvf, pool_id, stack_pages,
66 			     num_sqbs, hw->sqb_size, AURA_NIX_SQ);
67 	if (err)
68 		goto aura_free;
69 
70 	/* Flush accumulated messages */
71 	err = otx2_sync_mbox_msg(&pfvf->mbox);
72 	if (err)
73 		goto pool_free;
74 
75 	/* Allocate pointers and free them to aura/pool */
76 	sq = &qset->sq[qidx];
77 	sq->sqb_count = 0;
78 	sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL);
79 	if (!sq->sqb_ptrs) {
80 		err = -ENOMEM;
81 		goto pool_free;
82 	}
83 
84 	for (ptr = 0; ptr < num_sqbs; ptr++) {
85 		err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
86 		if (err)
87 			goto sqb_free;
88 		pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
89 		sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
90 	}
91 
92 	return 0;
93 
94 sqb_free:
95 	while (ptr--) {
96 		if (!sq->sqb_ptrs[ptr])
97 			continue;
98 		iova = sq->sqb_ptrs[ptr];
99 		pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
100 		dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size,
101 				     DMA_FROM_DEVICE,
102 				     DMA_ATTR_SKIP_CPU_SYNC);
103 		put_page(virt_to_page(phys_to_virt(pa)));
104 		otx2_aura_allocptr(pfvf, pool_id);
105 	}
106 	sq->sqb_count = 0;
107 	kfree(sq->sqb_ptrs);
108 pool_free:
109 	qmem_free(pfvf->dev, pool->stack);
110 aura_free:
111 	qmem_free(pfvf->dev, pool->fc_addr);
112 	otx2_mbox_reset(&pfvf->mbox.mbox, 0);
113 	return err;
114 }
115 
otx2_qos_sq_free_sqbs(struct otx2_nic * pfvf,int qidx)116 static void otx2_qos_sq_free_sqbs(struct otx2_nic *pfvf, int qidx)
117 {
118 	struct otx2_qset *qset = &pfvf->qset;
119 	struct otx2_hw *hw = &pfvf->hw;
120 	struct otx2_snd_queue *sq;
121 	u64 iova, pa;
122 	int sqb;
123 
124 	sq = &qset->sq[qidx];
125 	if (!sq->sqb_ptrs)
126 		return;
127 	for (sqb = 0; sqb < sq->sqb_count; sqb++) {
128 		if (!sq->sqb_ptrs[sqb])
129 			continue;
130 		iova = sq->sqb_ptrs[sqb];
131 		pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
132 		dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size,
133 				     DMA_FROM_DEVICE,
134 				     DMA_ATTR_SKIP_CPU_SYNC);
135 		put_page(virt_to_page(phys_to_virt(pa)));
136 	}
137 
138 	sq->sqb_count = 0;
139 
140 	sq = &qset->sq[qidx];
141 	qmem_free(pfvf->dev, sq->sqe);
142 	qmem_free(pfvf->dev, sq->tso_hdrs);
143 	kfree(sq->sg);
144 	kfree(sq->sqb_ptrs);
145 	qmem_free(pfvf->dev, sq->timestamps);
146 
147 	memset((void *)sq, 0, sizeof(*sq));
148 }
149 
150 /* send queue id */
otx2_qos_sqb_flush(struct otx2_nic * pfvf,int qidx)151 static void otx2_qos_sqb_flush(struct otx2_nic *pfvf, int qidx)
152 {
153 	int sqe_tail, sqe_head;
154 	u64 incr, *ptr, val;
155 
156 	ptr = (__force u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
157 	incr = (u64)qidx << 32;
158 	val = otx2_atomic64_add(incr, ptr);
159 	sqe_head = (val >> 20) & 0x3F;
160 	sqe_tail = (val >> 28) & 0x3F;
161 	if (sqe_head != sqe_tail)
162 		usleep_range(50, 60);
163 }
164 
otx2_qos_ctx_disable(struct otx2_nic * pfvf,u16 qidx,int aura_id)165 static int otx2_qos_ctx_disable(struct otx2_nic *pfvf, u16 qidx, int aura_id)
166 {
167 	struct nix_cn10k_aq_enq_req *cn10k_sq_aq;
168 	struct npa_aq_enq_req *aura_aq;
169 	struct npa_aq_enq_req *pool_aq;
170 	struct nix_aq_enq_req *sq_aq;
171 
172 	if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
173 		cn10k_sq_aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
174 		if (!cn10k_sq_aq)
175 			return -ENOMEM;
176 		cn10k_sq_aq->qidx = qidx;
177 		cn10k_sq_aq->sq.ena = 0;
178 		cn10k_sq_aq->sq_mask.ena = 1;
179 		cn10k_sq_aq->ctype = NIX_AQ_CTYPE_SQ;
180 		cn10k_sq_aq->op = NIX_AQ_INSTOP_WRITE;
181 	} else {
182 		sq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
183 		if (!sq_aq)
184 			return -ENOMEM;
185 		sq_aq->qidx = qidx;
186 		sq_aq->sq.ena = 0;
187 		sq_aq->sq_mask.ena = 1;
188 		sq_aq->ctype = NIX_AQ_CTYPE_SQ;
189 		sq_aq->op = NIX_AQ_INSTOP_WRITE;
190 	}
191 
192 	aura_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
193 	if (!aura_aq) {
194 		otx2_mbox_reset(&pfvf->mbox.mbox, 0);
195 		return -ENOMEM;
196 	}
197 
198 	aura_aq->aura_id = aura_id;
199 	aura_aq->aura.ena = 0;
200 	aura_aq->aura_mask.ena = 1;
201 	aura_aq->ctype = NPA_AQ_CTYPE_AURA;
202 	aura_aq->op = NPA_AQ_INSTOP_WRITE;
203 
204 	pool_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
205 	if (!pool_aq) {
206 		otx2_mbox_reset(&pfvf->mbox.mbox, 0);
207 		return -ENOMEM;
208 	}
209 
210 	pool_aq->aura_id = aura_id;
211 	pool_aq->pool.ena = 0;
212 	pool_aq->pool_mask.ena = 1;
213 
214 	pool_aq->ctype = NPA_AQ_CTYPE_POOL;
215 	pool_aq->op = NPA_AQ_INSTOP_WRITE;
216 
217 	return otx2_sync_mbox_msg(&pfvf->mbox);
218 }
219 
otx2_qos_get_qid(struct otx2_nic * pfvf)220 int otx2_qos_get_qid(struct otx2_nic *pfvf)
221 {
222 	int qidx;
223 
224 	qidx = find_first_zero_bit(pfvf->qos.qos_sq_bmap,
225 				   pfvf->hw.tc_tx_queues);
226 
227 	return qidx == pfvf->hw.tc_tx_queues ? -ENOSPC : qidx;
228 }
229 
otx2_qos_free_qid(struct otx2_nic * pfvf,int qidx)230 void otx2_qos_free_qid(struct otx2_nic *pfvf, int qidx)
231 {
232 	clear_bit(qidx, pfvf->qos.qos_sq_bmap);
233 }
234 
otx2_qos_enable_sq(struct otx2_nic * pfvf,int qidx)235 int otx2_qos_enable_sq(struct otx2_nic *pfvf, int qidx)
236 {
237 	struct otx2_hw *hw = &pfvf->hw;
238 	int pool_id, sq_idx, err;
239 
240 	if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
241 		return -EPERM;
242 
243 	sq_idx = hw->non_qos_queues + qidx;
244 
245 	mutex_lock(&pfvf->mbox.lock);
246 	err = otx2_qos_sq_aura_pool_init(pfvf, sq_idx);
247 	if (err)
248 		goto out;
249 
250 	pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, sq_idx);
251 	err = otx2_sq_init(pfvf, sq_idx, pool_id);
252 	if (err)
253 		goto out;
254 out:
255 	mutex_unlock(&pfvf->mbox.lock);
256 	return err;
257 }
258 
otx2_qos_disable_sq(struct otx2_nic * pfvf,int qidx)259 void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx)
260 {
261 	struct otx2_qset *qset = &pfvf->qset;
262 	struct otx2_hw *hw = &pfvf->hw;
263 	struct otx2_snd_queue *sq;
264 	struct otx2_cq_queue *cq;
265 	int pool_id, sq_idx;
266 
267 	sq_idx = hw->non_qos_queues + qidx;
268 
269 	/* If the DOWN flag is set SQs are already freed */
270 	if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
271 		return;
272 
273 	sq = &pfvf->qset.sq[sq_idx];
274 	if (!sq->sqb_ptrs)
275 		return;
276 
277 	if (sq_idx < hw->non_qos_queues ||
278 	    sq_idx >= otx2_get_total_tx_queues(pfvf)) {
279 		netdev_err(pfvf->netdev, "Send Queue is not a QoS queue\n");
280 		return;
281 	}
282 
283 	cq = &qset->cq[pfvf->hw.rx_queues + sq_idx];
284 	pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, sq_idx);
285 
286 	otx2_qos_sqb_flush(pfvf, sq_idx);
287 	otx2_smq_flush(pfvf, otx2_get_smq_idx(pfvf, sq_idx));
288 	otx2_cleanup_tx_cqes(pfvf, cq);
289 
290 	mutex_lock(&pfvf->mbox.lock);
291 	otx2_qos_ctx_disable(pfvf, sq_idx, pool_id);
292 	mutex_unlock(&pfvf->mbox.lock);
293 
294 	otx2_qos_sq_free_sqbs(pfvf, sq_idx);
295 	otx2_qos_aura_pool_free(pfvf, pool_id);
296 }
297