xref: /linux/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c (revision 727c94c9539aa8865cdbf6a783da6a6585f1fec2)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
3  *
4  * Copyright (C) 2021 Marvell.
5  *
6  */
7 
8 #include "otx2_common.h"
9 
10 static int otx2_check_pfc_config(struct otx2_nic *pfvf)
11 {
12 	u8 tx_queues = pfvf->hw.tx_queues, prio;
13 	u8 pfc_en = pfvf->pfc_en;
14 
15 	for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
16 		if ((pfc_en & (1 << prio)) &&
17 		    prio > tx_queues - 1) {
18 			dev_warn(pfvf->dev,
19 				 "Increase number of tx queues from %d to %d to support PFC.\n",
20 				 tx_queues, prio + 1);
21 			return -EINVAL;
22 		}
23 	}
24 
25 	return 0;
26 }
27 
28 int otx2_pfc_txschq_config(struct otx2_nic *pfvf)
29 {
30 	u8 pfc_en, pfc_bit_set;
31 	int prio, lvl, err;
32 
33 	pfc_en = pfvf->pfc_en;
34 	for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
35 		pfc_bit_set = pfc_en & (1 << prio);
36 
37 		/* Either PFC bit is not set
38 		 * or tx scheduler is not allocated for the priority
39 		 */
40 		if (!pfc_bit_set || !pfvf->pfc_alloc_status[prio])
41 			continue;
42 
43 		/* configure the scheduler for the tls*/
44 		for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
45 			err = otx2_txschq_config(pfvf, lvl, prio, true);
46 			if (err) {
47 				dev_err(pfvf->dev,
48 					"%s configure PFC tx schq for lvl:%d, prio:%d failed!\n",
49 					__func__, lvl, prio);
50 				return err;
51 			}
52 		}
53 	}
54 
55 	return 0;
56 }
57 EXPORT_SYMBOL_GPL(otx2_pfc_txschq_config);
58 
59 static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio)
60 {
61 	struct nix_txsch_alloc_req *req;
62 	struct nix_txsch_alloc_rsp *rsp;
63 	int lvl, rc;
64 
65 	/* Get memory to put this msg */
66 	req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox);
67 	if (!req)
68 		return -ENOMEM;
69 
70 	/* Request one schq per level upto max level as configured
71 	 * link config level. These rest of the scheduler can be
72 	 * same as hw.txschq_list.
73 	 */
74 	for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++)
75 		req->schq[lvl] = 1;
76 
77 	rc = otx2_sync_mbox_msg(&pfvf->mbox);
78 	if (rc)
79 		return rc;
80 
81 	rsp = (struct nix_txsch_alloc_rsp *)
82 	      otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
83 	if (IS_ERR(rsp))
84 		return PTR_ERR(rsp);
85 
86 	/* Setup transmit scheduler list */
87 	for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++) {
88 		if (!rsp->schq[lvl])
89 			return -ENOSPC;
90 
91 		pfvf->pfc_schq_list[lvl][prio] = rsp->schq_list[lvl][0];
92 	}
93 
94 	/* Set the Tx schedulers for rest of the levels same as
95 	 * hw.txschq_list as those will be common for all.
96 	 */
97 	for (; lvl < NIX_TXSCH_LVL_CNT; lvl++)
98 		pfvf->pfc_schq_list[lvl][prio] = pfvf->hw.txschq_list[lvl][0];
99 
100 	pfvf->pfc_alloc_status[prio] = true;
101 	return 0;
102 }
103 
104 int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf)
105 {
106 	u8 pfc_en = pfvf->pfc_en;
107 	u8 pfc_bit_set;
108 	int err, prio;
109 
110 	for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
111 		pfc_bit_set = pfc_en & (1 << prio);
112 
113 		if (!pfc_bit_set || pfvf->pfc_alloc_status[prio])
114 			continue;
115 
116 		/* Add new scheduler to the priority */
117 		err = otx2_pfc_txschq_alloc_one(pfvf, prio);
118 		if (err) {
119 			dev_err(pfvf->dev, "%s failed to allocate PFC TX schedulers\n", __func__);
120 			return err;
121 		}
122 	}
123 
124 	return 0;
125 }
126 EXPORT_SYMBOL_GPL(otx2_pfc_txschq_alloc);
127 
128 static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio)
129 {
130 	int lvl;
131 
132 	/* free PFC TLx nodes */
133 	for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++)
134 		otx2_txschq_free_one(pfvf, lvl,
135 				     pfvf->pfc_schq_list[lvl][prio]);
136 
137 	pfvf->pfc_alloc_status[prio] = false;
138 	return 0;
139 }
140 
141 static int otx2_pfc_update_sq_smq_mapping(struct otx2_nic *pfvf, int prio)
142 {
143 	struct nix_cn10k_aq_enq_req *cn10k_sq_aq;
144 	struct net_device *dev = pfvf->netdev;
145 	bool if_up = netif_running(dev);
146 	struct nix_aq_enq_req *sq_aq;
147 
148 	if (if_up) {
149 		if (pfvf->pfc_alloc_status[prio])
150 			netif_tx_stop_all_queues(pfvf->netdev);
151 		else
152 			netif_tx_stop_queue(netdev_get_tx_queue(dev, prio));
153 	}
154 
155 	if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
156 		cn10k_sq_aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
157 		if (!cn10k_sq_aq)
158 			return -ENOMEM;
159 
160 		/* Fill AQ info */
161 		cn10k_sq_aq->qidx = prio;
162 		cn10k_sq_aq->ctype = NIX_AQ_CTYPE_SQ;
163 		cn10k_sq_aq->op = NIX_AQ_INSTOP_WRITE;
164 
165 		/* Fill fields to update */
166 		cn10k_sq_aq->sq.ena = 1;
167 		cn10k_sq_aq->sq_mask.ena = 1;
168 		cn10k_sq_aq->sq_mask.smq = GENMASK(9, 0);
169 		cn10k_sq_aq->sq.smq = otx2_get_smq_idx(pfvf, prio);
170 	} else {
171 		sq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
172 		if (!sq_aq)
173 			return -ENOMEM;
174 
175 		/* Fill AQ info */
176 		sq_aq->qidx = prio;
177 		sq_aq->ctype = NIX_AQ_CTYPE_SQ;
178 		sq_aq->op = NIX_AQ_INSTOP_WRITE;
179 
180 		/* Fill fields to update */
181 		sq_aq->sq.ena = 1;
182 		sq_aq->sq_mask.ena = 1;
183 		sq_aq->sq_mask.smq = GENMASK(8, 0);
184 		sq_aq->sq.smq = otx2_get_smq_idx(pfvf, prio);
185 	}
186 
187 	otx2_sync_mbox_msg(&pfvf->mbox);
188 
189 	if (if_up) {
190 		if (pfvf->pfc_alloc_status[prio])
191 			netif_tx_start_all_queues(pfvf->netdev);
192 		else
193 			netif_tx_start_queue(netdev_get_tx_queue(dev, prio));
194 	}
195 
196 	return 0;
197 }
198 
199 int otx2_pfc_txschq_update(struct otx2_nic *pfvf)
200 {
201 	bool if_up = netif_running(pfvf->netdev);
202 	u8 pfc_en = pfvf->pfc_en, pfc_bit_set;
203 	struct mbox *mbox = &pfvf->mbox;
204 	int err, prio;
205 
206 	mutex_lock(&mbox->lock);
207 	for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
208 		pfc_bit_set = pfc_en & (1 << prio);
209 
210 		/* tx scheduler was created but user wants to disable now */
211 		if (!pfc_bit_set && pfvf->pfc_alloc_status[prio]) {
212 			mutex_unlock(&mbox->lock);
213 			if (if_up)
214 				netif_tx_stop_all_queues(pfvf->netdev);
215 
216 			otx2_smq_flush(pfvf, pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][prio]);
217 			if (if_up)
218 				netif_tx_start_all_queues(pfvf->netdev);
219 
220 			/* delete the schq */
221 			err = otx2_pfc_txschq_stop_one(pfvf, prio);
222 			if (err) {
223 				dev_err(pfvf->dev,
224 					"%s failed to stop PFC tx schedulers for priority: %d\n",
225 					__func__, prio);
226 				return err;
227 			}
228 
229 			mutex_lock(&mbox->lock);
230 			goto update_sq_smq_map;
231 		}
232 
233 		/* Either PFC bit is not set
234 		 * or Tx scheduler is already mapped for the priority
235 		 */
236 		if (!pfc_bit_set || pfvf->pfc_alloc_status[prio])
237 			continue;
238 
239 		/* Add new scheduler to the priority */
240 		err = otx2_pfc_txschq_alloc_one(pfvf, prio);
241 		if (err) {
242 			mutex_unlock(&mbox->lock);
243 			dev_err(pfvf->dev,
244 				"%s failed to allocate PFC tx schedulers for priority: %d\n",
245 				__func__, prio);
246 			return err;
247 		}
248 
249 update_sq_smq_map:
250 		err = otx2_pfc_update_sq_smq_mapping(pfvf, prio);
251 		if (err) {
252 			mutex_unlock(&mbox->lock);
253 			dev_err(pfvf->dev, "%s failed PFC Tx schq sq:%d mapping", __func__, prio);
254 			return err;
255 		}
256 	}
257 
258 	err = otx2_pfc_txschq_config(pfvf);
259 	mutex_unlock(&mbox->lock);
260 	if (err)
261 		return err;
262 
263 	return 0;
264 }
265 EXPORT_SYMBOL_GPL(otx2_pfc_txschq_update);
266 
267 int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
268 {
269 	u8 pfc_en, pfc_bit_set;
270 	int prio, err;
271 
272 	pfc_en = pfvf->pfc_en;
273 	for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
274 		pfc_bit_set = pfc_en & (1 << prio);
275 		if (!pfc_bit_set || !pfvf->pfc_alloc_status[prio])
276 			continue;
277 
278 		/* Delete the existing scheduler */
279 		err = otx2_pfc_txschq_stop_one(pfvf, prio);
280 		if (err) {
281 			dev_err(pfvf->dev, "%s failed to stop PFC TX schedulers\n", __func__);
282 			return err;
283 		}
284 	}
285 
286 	return 0;
287 }
288 EXPORT_SYMBOL_GPL(otx2_pfc_txschq_stop);
289 
290 int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
291 {
292 	struct cgx_pfc_cfg *req;
293 	struct cgx_pfc_rsp *rsp;
294 	int err = 0;
295 
296 	if (is_otx2_lbkvf(pfvf->pdev))
297 		return 0;
298 
299 	mutex_lock(&pfvf->mbox.lock);
300 	req = otx2_mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(&pfvf->mbox);
301 	if (!req) {
302 		err = -ENOMEM;
303 		goto unlock;
304 	}
305 
306 	if (pfvf->pfc_en) {
307 		req->rx_pause = true;
308 		req->tx_pause = true;
309 	} else {
310 		req->rx_pause = false;
311 		req->tx_pause = false;
312 	}
313 	req->pfc_en = pfvf->pfc_en;
314 
315 	if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
316 		rsp = (struct cgx_pfc_rsp *)
317 		       otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
318 		if (req->rx_pause != rsp->rx_pause || req->tx_pause != rsp->tx_pause) {
319 			dev_warn(pfvf->dev,
320 				 "Failed to config PFC\n");
321 			err = -EPERM;
322 		}
323 	}
324 unlock:
325 	mutex_unlock(&pfvf->mbox.lock);
326 	return err;
327 }
328 EXPORT_SYMBOL_GPL(otx2_config_priority_flow_ctrl);
329 
330 void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx,
331 			       bool pfc_enable)
332 {
333 	bool if_up = netif_running(pfvf->netdev);
334 	struct npa_aq_enq_req *npa_aq;
335 	struct nix_aq_enq_req *aq;
336 	int err = 0;
337 
338 	if (pfvf->queue_to_pfc_map[qidx] && pfc_enable) {
339 		dev_warn(pfvf->dev,
340 			 "PFC enable not permitted as Priority %d already mapped to Queue %d\n",
341 			 pfvf->queue_to_pfc_map[qidx], qidx);
342 		return;
343 	}
344 
345 	if (if_up) {
346 		netif_tx_stop_all_queues(pfvf->netdev);
347 		netif_carrier_off(pfvf->netdev);
348 	}
349 
350 	pfvf->queue_to_pfc_map[qidx] = vlan_prio;
351 
352 	aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
353 	if (!aq) {
354 		err = -ENOMEM;
355 		goto out;
356 	}
357 
358 	aq->cq.bpid = pfvf->bpid[vlan_prio];
359 	aq->cq_mask.bpid = GENMASK(8, 0);
360 
361 	/* Fill AQ info */
362 	aq->qidx = qidx;
363 	aq->ctype = NIX_AQ_CTYPE_CQ;
364 	aq->op = NIX_AQ_INSTOP_WRITE;
365 
366 	otx2_sync_mbox_msg(&pfvf->mbox);
367 
368 	npa_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
369 	if (!npa_aq) {
370 		err = -ENOMEM;
371 		goto out;
372 	}
373 	npa_aq->aura.nix0_bpid = pfvf->bpid[vlan_prio];
374 	npa_aq->aura_mask.nix0_bpid = GENMASK(8, 0);
375 
376 	/* Fill NPA AQ info */
377 	npa_aq->aura_id = qidx;
378 	npa_aq->ctype = NPA_AQ_CTYPE_AURA;
379 	npa_aq->op = NPA_AQ_INSTOP_WRITE;
380 	otx2_sync_mbox_msg(&pfvf->mbox);
381 
382 out:
383 	if (if_up) {
384 		netif_carrier_on(pfvf->netdev);
385 		netif_tx_start_all_queues(pfvf->netdev);
386 	}
387 
388 	if (err)
389 		dev_warn(pfvf->dev,
390 			 "Updating BPIDs in CQ and Aura contexts of RQ%d failed with err %d\n",
391 			 qidx, err);
392 }
393 EXPORT_SYMBOL_GPL(otx2_update_bpid_in_rqctx);
394 
395 static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
396 {
397 	struct otx2_nic *pfvf = netdev_priv(dev);
398 
399 	pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
400 	pfc->pfc_en = pfvf->pfc_en;
401 
402 	return 0;
403 }
404 
405 static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
406 {
407 	struct otx2_nic *pfvf = netdev_priv(dev);
408 	u8 old_pfc_en;
409 	int err;
410 
411 	old_pfc_en = pfvf->pfc_en;
412 	pfvf->pfc_en = pfc->pfc_en;
413 
414 	if (pfvf->hw.tx_queues >= NIX_PF_PFC_PRIO_MAX)
415 		goto process_pfc;
416 
417 	/* Check if the PFC configuration can be
418 	 * supported by the tx queue configuration
419 	 */
420 	err = otx2_check_pfc_config(pfvf);
421 	if (err) {
422 		pfvf->pfc_en = old_pfc_en;
423 		return err;
424 	}
425 
426 process_pfc:
427 	err = otx2_config_priority_flow_ctrl(pfvf);
428 	if (err) {
429 		pfvf->pfc_en = old_pfc_en;
430 		return err;
431 	}
432 
433 	/* Request Per channel Bpids */
434 	if (pfc->pfc_en)
435 		otx2_nix_config_bp(pfvf, true);
436 
437 	err = otx2_pfc_txschq_update(pfvf);
438 	if (err) {
439 		if (pfc->pfc_en)
440 			otx2_nix_config_bp(pfvf, false);
441 
442 		otx2_pfc_txschq_stop(pfvf);
443 		pfvf->pfc_en = old_pfc_en;
444 		otx2_config_priority_flow_ctrl(pfvf);
445 		dev_err(pfvf->dev, "%s failed to update TX schedulers\n", __func__);
446 		return err;
447 	}
448 
449 	return 0;
450 }
451 
452 static u8 otx2_dcbnl_getdcbx(struct net_device __always_unused *dev)
453 {
454 	return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
455 }
456 
457 static u8 otx2_dcbnl_setdcbx(struct net_device __always_unused *dev, u8 mode)
458 {
459 	return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0;
460 }
461 
462 static const struct dcbnl_rtnl_ops otx2_dcbnl_ops = {
463 	.ieee_getpfc	= otx2_dcbnl_ieee_getpfc,
464 	.ieee_setpfc	= otx2_dcbnl_ieee_setpfc,
465 	.getdcbx	= otx2_dcbnl_getdcbx,
466 	.setdcbx	= otx2_dcbnl_setdcbx,
467 };
468 
469 int otx2_dcbnl_set_ops(struct net_device *dev)
470 {
471 	struct otx2_nic *pfvf = netdev_priv(dev);
472 
473 	pfvf->queue_to_pfc_map = devm_kzalloc(pfvf->dev, pfvf->hw.rx_queues,
474 					      GFP_KERNEL);
475 	if (!pfvf->queue_to_pfc_map)
476 		return -ENOMEM;
477 	dev->dcbnl_ops = &otx2_dcbnl_ops;
478 
479 	return 0;
480 }
481 EXPORT_SYMBOL_GPL(otx2_dcbnl_set_ops);
482 
483 MODULE_LICENSE("GPL");
484 MODULE_DESCRIPTION("Marvell RVU dcbnl");
485 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
486