xref: /linux/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c (revision a23e1966932464e1c5226cb9ac4ce1d5fc10ba22)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
3  *
4  * Copyright (C) 2021 Marvell.
5  *
6  */
7 
8 #include "otx2_common.h"
9 
10 static int otx2_check_pfc_config(struct otx2_nic *pfvf)
11 {
12 	u8 tx_queues = pfvf->hw.tx_queues, prio;
13 	u8 pfc_en = pfvf->pfc_en;
14 
15 	for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
16 		if ((pfc_en & (1 << prio)) &&
17 		    prio > tx_queues - 1) {
18 			dev_warn(pfvf->dev,
19 				 "Increase number of tx queues from %d to %d to support PFC.\n",
20 				 tx_queues, prio + 1);
21 			return -EINVAL;
22 		}
23 	}
24 
25 	return 0;
26 }
27 
28 int otx2_pfc_txschq_config(struct otx2_nic *pfvf)
29 {
30 	u8 pfc_en, pfc_bit_set;
31 	int prio, lvl, err;
32 
33 	pfc_en = pfvf->pfc_en;
34 	for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
35 		pfc_bit_set = pfc_en & (1 << prio);
36 
37 		/* Either PFC bit is not set
38 		 * or tx scheduler is not allocated for the priority
39 		 */
40 		if (!pfc_bit_set || !pfvf->pfc_alloc_status[prio])
41 			continue;
42 
43 		/* configure the scheduler for the tls*/
44 		for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
45 			err = otx2_txschq_config(pfvf, lvl, prio, true);
46 			if (err) {
47 				dev_err(pfvf->dev,
48 					"%s configure PFC tx schq for lvl:%d, prio:%d failed!\n",
49 					__func__, lvl, prio);
50 				return err;
51 			}
52 		}
53 	}
54 
55 	return 0;
56 }
57 
58 static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio)
59 {
60 	struct nix_txsch_alloc_req *req;
61 	struct nix_txsch_alloc_rsp *rsp;
62 	int lvl, rc;
63 
64 	/* Get memory to put this msg */
65 	req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox);
66 	if (!req)
67 		return -ENOMEM;
68 
69 	/* Request one schq per level upto max level as configured
70 	 * link config level. These rest of the scheduler can be
71 	 * same as hw.txschq_list.
72 	 */
73 	for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++)
74 		req->schq[lvl] = 1;
75 
76 	rc = otx2_sync_mbox_msg(&pfvf->mbox);
77 	if (rc)
78 		return rc;
79 
80 	rsp = (struct nix_txsch_alloc_rsp *)
81 	      otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
82 	if (IS_ERR(rsp))
83 		return PTR_ERR(rsp);
84 
85 	/* Setup transmit scheduler list */
86 	for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++) {
87 		if (!rsp->schq[lvl])
88 			return -ENOSPC;
89 
90 		pfvf->pfc_schq_list[lvl][prio] = rsp->schq_list[lvl][0];
91 	}
92 
93 	/* Set the Tx schedulers for rest of the levels same as
94 	 * hw.txschq_list as those will be common for all.
95 	 */
96 	for (; lvl < NIX_TXSCH_LVL_CNT; lvl++)
97 		pfvf->pfc_schq_list[lvl][prio] = pfvf->hw.txschq_list[lvl][0];
98 
99 	pfvf->pfc_alloc_status[prio] = true;
100 	return 0;
101 }
102 
103 int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf)
104 {
105 	u8 pfc_en = pfvf->pfc_en;
106 	u8 pfc_bit_set;
107 	int err, prio;
108 
109 	for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
110 		pfc_bit_set = pfc_en & (1 << prio);
111 
112 		if (!pfc_bit_set || pfvf->pfc_alloc_status[prio])
113 			continue;
114 
115 		/* Add new scheduler to the priority */
116 		err = otx2_pfc_txschq_alloc_one(pfvf, prio);
117 		if (err) {
118 			dev_err(pfvf->dev, "%s failed to allocate PFC TX schedulers\n", __func__);
119 			return err;
120 		}
121 	}
122 
123 	return 0;
124 }
125 
126 static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio)
127 {
128 	int lvl;
129 
130 	/* free PFC TLx nodes */
131 	for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++)
132 		otx2_txschq_free_one(pfvf, lvl,
133 				     pfvf->pfc_schq_list[lvl][prio]);
134 
135 	pfvf->pfc_alloc_status[prio] = false;
136 	return 0;
137 }
138 
139 static int otx2_pfc_update_sq_smq_mapping(struct otx2_nic *pfvf, int prio)
140 {
141 	struct nix_cn10k_aq_enq_req *cn10k_sq_aq;
142 	struct net_device *dev = pfvf->netdev;
143 	bool if_up = netif_running(dev);
144 	struct nix_aq_enq_req *sq_aq;
145 
146 	if (if_up) {
147 		if (pfvf->pfc_alloc_status[prio])
148 			netif_tx_stop_all_queues(pfvf->netdev);
149 		else
150 			netif_tx_stop_queue(netdev_get_tx_queue(dev, prio));
151 	}
152 
153 	if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
154 		cn10k_sq_aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
155 		if (!cn10k_sq_aq)
156 			return -ENOMEM;
157 
158 		/* Fill AQ info */
159 		cn10k_sq_aq->qidx = prio;
160 		cn10k_sq_aq->ctype = NIX_AQ_CTYPE_SQ;
161 		cn10k_sq_aq->op = NIX_AQ_INSTOP_WRITE;
162 
163 		/* Fill fields to update */
164 		cn10k_sq_aq->sq.ena = 1;
165 		cn10k_sq_aq->sq_mask.ena = 1;
166 		cn10k_sq_aq->sq_mask.smq = GENMASK(9, 0);
167 		cn10k_sq_aq->sq.smq = otx2_get_smq_idx(pfvf, prio);
168 	} else {
169 		sq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
170 		if (!sq_aq)
171 			return -ENOMEM;
172 
173 		/* Fill AQ info */
174 		sq_aq->qidx = prio;
175 		sq_aq->ctype = NIX_AQ_CTYPE_SQ;
176 		sq_aq->op = NIX_AQ_INSTOP_WRITE;
177 
178 		/* Fill fields to update */
179 		sq_aq->sq.ena = 1;
180 		sq_aq->sq_mask.ena = 1;
181 		sq_aq->sq_mask.smq = GENMASK(8, 0);
182 		sq_aq->sq.smq = otx2_get_smq_idx(pfvf, prio);
183 	}
184 
185 	otx2_sync_mbox_msg(&pfvf->mbox);
186 
187 	if (if_up) {
188 		if (pfvf->pfc_alloc_status[prio])
189 			netif_tx_start_all_queues(pfvf->netdev);
190 		else
191 			netif_tx_start_queue(netdev_get_tx_queue(dev, prio));
192 	}
193 
194 	return 0;
195 }
196 
197 int otx2_pfc_txschq_update(struct otx2_nic *pfvf)
198 {
199 	bool if_up = netif_running(pfvf->netdev);
200 	u8 pfc_en = pfvf->pfc_en, pfc_bit_set;
201 	struct mbox *mbox = &pfvf->mbox;
202 	int err, prio;
203 
204 	mutex_lock(&mbox->lock);
205 	for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
206 		pfc_bit_set = pfc_en & (1 << prio);
207 
208 		/* tx scheduler was created but user wants to disable now */
209 		if (!pfc_bit_set && pfvf->pfc_alloc_status[prio]) {
210 			mutex_unlock(&mbox->lock);
211 			if (if_up)
212 				netif_tx_stop_all_queues(pfvf->netdev);
213 
214 			otx2_smq_flush(pfvf, pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][prio]);
215 			if (if_up)
216 				netif_tx_start_all_queues(pfvf->netdev);
217 
218 			/* delete the schq */
219 			err = otx2_pfc_txschq_stop_one(pfvf, prio);
220 			if (err) {
221 				dev_err(pfvf->dev,
222 					"%s failed to stop PFC tx schedulers for priority: %d\n",
223 					__func__, prio);
224 				return err;
225 			}
226 
227 			mutex_lock(&mbox->lock);
228 			goto update_sq_smq_map;
229 		}
230 
231 		/* Either PFC bit is not set
232 		 * or Tx scheduler is already mapped for the priority
233 		 */
234 		if (!pfc_bit_set || pfvf->pfc_alloc_status[prio])
235 			continue;
236 
237 		/* Add new scheduler to the priority */
238 		err = otx2_pfc_txschq_alloc_one(pfvf, prio);
239 		if (err) {
240 			mutex_unlock(&mbox->lock);
241 			dev_err(pfvf->dev,
242 				"%s failed to allocate PFC tx schedulers for priority: %d\n",
243 				__func__, prio);
244 			return err;
245 		}
246 
247 update_sq_smq_map:
248 		err = otx2_pfc_update_sq_smq_mapping(pfvf, prio);
249 		if (err) {
250 			mutex_unlock(&mbox->lock);
251 			dev_err(pfvf->dev, "%s failed PFC Tx schq sq:%d mapping", __func__, prio);
252 			return err;
253 		}
254 	}
255 
256 	err = otx2_pfc_txschq_config(pfvf);
257 	mutex_unlock(&mbox->lock);
258 	if (err)
259 		return err;
260 
261 	return 0;
262 }
263 
264 int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
265 {
266 	u8 pfc_en, pfc_bit_set;
267 	int prio, err;
268 
269 	pfc_en = pfvf->pfc_en;
270 	for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
271 		pfc_bit_set = pfc_en & (1 << prio);
272 		if (!pfc_bit_set || !pfvf->pfc_alloc_status[prio])
273 			continue;
274 
275 		/* Delete the existing scheduler */
276 		err = otx2_pfc_txschq_stop_one(pfvf, prio);
277 		if (err) {
278 			dev_err(pfvf->dev, "%s failed to stop PFC TX schedulers\n", __func__);
279 			return err;
280 		}
281 	}
282 
283 	return 0;
284 }
285 
286 int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
287 {
288 	struct cgx_pfc_cfg *req;
289 	struct cgx_pfc_rsp *rsp;
290 	int err = 0;
291 
292 	if (is_otx2_lbkvf(pfvf->pdev))
293 		return 0;
294 
295 	mutex_lock(&pfvf->mbox.lock);
296 	req = otx2_mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(&pfvf->mbox);
297 	if (!req) {
298 		err = -ENOMEM;
299 		goto unlock;
300 	}
301 
302 	if (pfvf->pfc_en) {
303 		req->rx_pause = true;
304 		req->tx_pause = true;
305 	} else {
306 		req->rx_pause = false;
307 		req->tx_pause = false;
308 	}
309 	req->pfc_en = pfvf->pfc_en;
310 
311 	if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
312 		rsp = (struct cgx_pfc_rsp *)
313 		       otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
314 		if (req->rx_pause != rsp->rx_pause || req->tx_pause != rsp->tx_pause) {
315 			dev_warn(pfvf->dev,
316 				 "Failed to config PFC\n");
317 			err = -EPERM;
318 		}
319 	}
320 unlock:
321 	mutex_unlock(&pfvf->mbox.lock);
322 	return err;
323 }
324 
325 void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx,
326 			       bool pfc_enable)
327 {
328 	bool if_up = netif_running(pfvf->netdev);
329 	struct npa_aq_enq_req *npa_aq;
330 	struct nix_aq_enq_req *aq;
331 	int err = 0;
332 
333 	if (pfvf->queue_to_pfc_map[qidx] && pfc_enable) {
334 		dev_warn(pfvf->dev,
335 			 "PFC enable not permitted as Priority %d already mapped to Queue %d\n",
336 			 pfvf->queue_to_pfc_map[qidx], qidx);
337 		return;
338 	}
339 
340 	if (if_up) {
341 		netif_tx_stop_all_queues(pfvf->netdev);
342 		netif_carrier_off(pfvf->netdev);
343 	}
344 
345 	pfvf->queue_to_pfc_map[qidx] = vlan_prio;
346 
347 	aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
348 	if (!aq) {
349 		err = -ENOMEM;
350 		goto out;
351 	}
352 
353 	aq->cq.bpid = pfvf->bpid[vlan_prio];
354 	aq->cq_mask.bpid = GENMASK(8, 0);
355 
356 	/* Fill AQ info */
357 	aq->qidx = qidx;
358 	aq->ctype = NIX_AQ_CTYPE_CQ;
359 	aq->op = NIX_AQ_INSTOP_WRITE;
360 
361 	otx2_sync_mbox_msg(&pfvf->mbox);
362 
363 	npa_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
364 	if (!npa_aq) {
365 		err = -ENOMEM;
366 		goto out;
367 	}
368 	npa_aq->aura.nix0_bpid = pfvf->bpid[vlan_prio];
369 	npa_aq->aura_mask.nix0_bpid = GENMASK(8, 0);
370 
371 	/* Fill NPA AQ info */
372 	npa_aq->aura_id = qidx;
373 	npa_aq->ctype = NPA_AQ_CTYPE_AURA;
374 	npa_aq->op = NPA_AQ_INSTOP_WRITE;
375 	otx2_sync_mbox_msg(&pfvf->mbox);
376 
377 out:
378 	if (if_up) {
379 		netif_carrier_on(pfvf->netdev);
380 		netif_tx_start_all_queues(pfvf->netdev);
381 	}
382 
383 	if (err)
384 		dev_warn(pfvf->dev,
385 			 "Updating BPIDs in CQ and Aura contexts of RQ%d failed with err %d\n",
386 			 qidx, err);
387 }
388 
389 static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
390 {
391 	struct otx2_nic *pfvf = netdev_priv(dev);
392 
393 	pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
394 	pfc->pfc_en = pfvf->pfc_en;
395 
396 	return 0;
397 }
398 
399 static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
400 {
401 	struct otx2_nic *pfvf = netdev_priv(dev);
402 	u8 old_pfc_en;
403 	int err;
404 
405 	old_pfc_en = pfvf->pfc_en;
406 	pfvf->pfc_en = pfc->pfc_en;
407 
408 	if (pfvf->hw.tx_queues >= NIX_PF_PFC_PRIO_MAX)
409 		goto process_pfc;
410 
411 	/* Check if the PFC configuration can be
412 	 * supported by the tx queue configuration
413 	 */
414 	err = otx2_check_pfc_config(pfvf);
415 	if (err) {
416 		pfvf->pfc_en = old_pfc_en;
417 		return err;
418 	}
419 
420 process_pfc:
421 	err = otx2_config_priority_flow_ctrl(pfvf);
422 	if (err) {
423 		pfvf->pfc_en = old_pfc_en;
424 		return err;
425 	}
426 
427 	/* Request Per channel Bpids */
428 	if (pfc->pfc_en)
429 		otx2_nix_config_bp(pfvf, true);
430 
431 	err = otx2_pfc_txschq_update(pfvf);
432 	if (err) {
433 		if (pfc->pfc_en)
434 			otx2_nix_config_bp(pfvf, false);
435 
436 		otx2_pfc_txschq_stop(pfvf);
437 		pfvf->pfc_en = old_pfc_en;
438 		otx2_config_priority_flow_ctrl(pfvf);
439 		dev_err(pfvf->dev, "%s failed to update TX schedulers\n", __func__);
440 		return err;
441 	}
442 
443 	return 0;
444 }
445 
446 static u8 otx2_dcbnl_getdcbx(struct net_device __always_unused *dev)
447 {
448 	return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
449 }
450 
451 static u8 otx2_dcbnl_setdcbx(struct net_device __always_unused *dev, u8 mode)
452 {
453 	return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0;
454 }
455 
456 static const struct dcbnl_rtnl_ops otx2_dcbnl_ops = {
457 	.ieee_getpfc	= otx2_dcbnl_ieee_getpfc,
458 	.ieee_setpfc	= otx2_dcbnl_ieee_setpfc,
459 	.getdcbx	= otx2_dcbnl_getdcbx,
460 	.setdcbx	= otx2_dcbnl_setdcbx,
461 };
462 
463 int otx2_dcbnl_set_ops(struct net_device *dev)
464 {
465 	struct otx2_nic *pfvf = netdev_priv(dev);
466 
467 	pfvf->queue_to_pfc_map = devm_kzalloc(pfvf->dev, pfvf->hw.rx_queues,
468 					      GFP_KERNEL);
469 	if (!pfvf->queue_to_pfc_map)
470 		return -ENOMEM;
471 	dev->dcbnl_ops = &otx2_dcbnl_ops;
472 
473 	return 0;
474 }
475