xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c (revision 3f1c07fc21c68bd3bd2df9d2c9441f6485e934d9)
1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/device.h>
33 #include <linux/netdevice.h>
34 #include <linux/units.h>
35 #include "en.h"
36 #include "en/port.h"
37 #include "en/port_buffer.h"
38 
39 #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
40 
41 #define MLX5E_100MB_TO_KB (100 * MEGA / KILO)
42 #define MLX5E_1GB_TO_KB   (GIGA / KILO)
43 
44 #define MLX5E_CEE_STATE_UP    1
45 #define MLX5E_CEE_STATE_DOWN  0
46 
47 /* Max supported cable length is 1000 meters */
48 #define MLX5E_MAX_CABLE_LENGTH 1000
49 
50 enum {
51 	MLX5E_VENDOR_TC_GROUP_NUM = 7,
52 	MLX5E_LOWEST_PRIO_GROUP   = 0,
53 };
54 
55 enum {
56 	MLX5_DCB_CHG_RESET,
57 	MLX5_DCB_NO_CHG,
58 	MLX5_DCB_CHG_NO_RESET,
59 };
60 
61 #define MLX5_DSCP_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, qcam_reg)  && \
62 				   MLX5_CAP_QCAM_REG(mdev, qpts) && \
63 				   MLX5_CAP_QCAM_REG(mdev, qpdpm))
64 
65 static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state);
66 static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio);
67 
68 /* If dcbx mode is non-host set the dcbx mode to host.
69  */
mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv * priv,enum mlx5_dcbx_oper_mode mode)70 static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv,
71 				     enum mlx5_dcbx_oper_mode mode)
72 {
73 	struct mlx5_core_dev *mdev = priv->mdev;
74 	u32 param[MLX5_ST_SZ_DW(dcbx_param)];
75 	int err;
76 
77 	err = mlx5_query_port_dcbx_param(mdev, param);
78 	if (err)
79 		return err;
80 
81 	MLX5_SET(dcbx_param, param, version_admin, mode);
82 	if (mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
83 		MLX5_SET(dcbx_param, param, willing_admin, 1);
84 
85 	return mlx5_set_port_dcbx_param(mdev, param);
86 }
87 
mlx5e_dcbnl_switch_to_host_mode(struct mlx5e_priv * priv)88 static int mlx5e_dcbnl_switch_to_host_mode(struct mlx5e_priv *priv)
89 {
90 	struct mlx5e_dcbx *dcbx = &priv->dcbx;
91 	int err;
92 
93 	if (!MLX5_CAP_GEN(priv->mdev, dcbx))
94 		return 0;
95 
96 	if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
97 		return 0;
98 
99 	err = mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_HOST);
100 	if (err)
101 		return err;
102 
103 	dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
104 	return 0;
105 }
106 
mlx5e_dcbnl_ieee_getets(struct net_device * netdev,struct ieee_ets * ets)107 static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
108 				   struct ieee_ets *ets)
109 {
110 	struct mlx5e_priv *priv = netdev_priv(netdev);
111 	struct mlx5_core_dev *mdev = priv->mdev;
112 	u8 tc_group[IEEE_8021QAZ_MAX_TCS];
113 	bool is_tc_group_6_exist = false;
114 	bool is_zero_bw_ets_tc = false;
115 	int err = 0;
116 	int i;
117 
118 	if (!MLX5_CAP_GEN(priv->mdev, ets))
119 		return -EOPNOTSUPP;
120 
121 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
122 		err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
123 		if (err)
124 			return err;
125 	}
126 
127 	ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
128 	for (i = 0; i < ets->ets_cap; i++) {
129 		err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
130 		if (err)
131 			return err;
132 
133 		err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]);
134 		if (err)
135 			return err;
136 
137 		if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC &&
138 		    tc_group[i] == (MLX5E_LOWEST_PRIO_GROUP + 1))
139 			is_zero_bw_ets_tc = true;
140 
141 		if (tc_group[i] == (MLX5E_VENDOR_TC_GROUP_NUM - 1))
142 			is_tc_group_6_exist = true;
143 	}
144 
145 	/* Report 0% ets tc if exits*/
146 	if (is_zero_bw_ets_tc) {
147 		for (i = 0; i < ets->ets_cap; i++)
148 			if (tc_group[i] == MLX5E_LOWEST_PRIO_GROUP)
149 				ets->tc_tx_bw[i] = 0;
150 	}
151 
152 	/* Update tc_tsa based on fw setting*/
153 	for (i = 0; i < ets->ets_cap; i++) {
154 		if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC)
155 			priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
156 		else if (tc_group[i] == MLX5E_VENDOR_TC_GROUP_NUM &&
157 			 !is_tc_group_6_exist)
158 			priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
159 	}
160 	memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa));
161 
162 	return err;
163 }
164 
mlx5e_build_tc_group(struct ieee_ets * ets,u8 * tc_group,int max_tc)165 static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
166 {
167 	bool any_tc_mapped_to_ets = false;
168 	bool ets_zero_bw = false;
169 	int strict_group;
170 	int i;
171 
172 	for (i = 0; i <= max_tc; i++) {
173 		if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
174 			any_tc_mapped_to_ets = true;
175 			if (!ets->tc_tx_bw[i])
176 				ets_zero_bw = true;
177 		}
178 	}
179 
180 	/* strict group has higher priority than ets group */
181 	strict_group = MLX5E_LOWEST_PRIO_GROUP;
182 	if (any_tc_mapped_to_ets)
183 		strict_group++;
184 	if (ets_zero_bw)
185 		strict_group++;
186 
187 	for (i = 0; i <= max_tc; i++) {
188 		switch (ets->tc_tsa[i]) {
189 		case IEEE_8021QAZ_TSA_VENDOR:
190 			tc_group[i] = MLX5E_VENDOR_TC_GROUP_NUM;
191 			break;
192 		case IEEE_8021QAZ_TSA_STRICT:
193 			tc_group[i] = strict_group++;
194 			break;
195 		case IEEE_8021QAZ_TSA_ETS:
196 			tc_group[i] = MLX5E_LOWEST_PRIO_GROUP;
197 			if (ets->tc_tx_bw[i] && ets_zero_bw)
198 				tc_group[i] = MLX5E_LOWEST_PRIO_GROUP + 1;
199 			break;
200 		}
201 	}
202 }
203 
mlx5e_build_tc_tx_bw(struct ieee_ets * ets,u8 * tc_tx_bw,u8 * tc_group,int max_tc)204 static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
205 				 u8 *tc_group, int max_tc)
206 {
207 	int bw_for_ets_zero_bw_tc = 0;
208 	int last_ets_zero_bw_tc = -1;
209 	int num_ets_zero_bw = 0;
210 	int i;
211 
212 	for (i = 0; i <= max_tc; i++) {
213 		if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS &&
214 		    !ets->tc_tx_bw[i]) {
215 			num_ets_zero_bw++;
216 			last_ets_zero_bw_tc = i;
217 		}
218 	}
219 
220 	if (num_ets_zero_bw)
221 		bw_for_ets_zero_bw_tc = MLX5E_MAX_BW_ALLOC / num_ets_zero_bw;
222 
223 	for (i = 0; i <= max_tc; i++) {
224 		switch (ets->tc_tsa[i]) {
225 		case IEEE_8021QAZ_TSA_VENDOR:
226 			tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
227 			break;
228 		case IEEE_8021QAZ_TSA_STRICT:
229 			tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
230 			break;
231 		case IEEE_8021QAZ_TSA_ETS:
232 			tc_tx_bw[i] = ets->tc_tx_bw[i] ?
233 				      ets->tc_tx_bw[i] :
234 				      bw_for_ets_zero_bw_tc;
235 			break;
236 		}
237 	}
238 
239 	/* Make sure the total bw for ets zero bw group is 100% */
240 	if (last_ets_zero_bw_tc != -1)
241 		tc_tx_bw[last_ets_zero_bw_tc] +=
242 			MLX5E_MAX_BW_ALLOC % num_ets_zero_bw;
243 }
244 
245 /* If there are ETS BW 0,
246  *   Set ETS group # to 1 for all ETS non zero BW tcs. Their sum must be 100%.
247  *   Set group #0 to all the ETS BW 0 tcs and
248  *     equally splits the 100% BW between them
249  *   Report both group #0 and #1 as ETS type.
250  *     All the tcs in group #0 will be reported with 0% BW.
251  */
mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv * priv,struct ieee_ets * ets)252 static int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
253 {
254 	struct mlx5_core_dev *mdev = priv->mdev;
255 	u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS];
256 	u8 tc_group[IEEE_8021QAZ_MAX_TCS];
257 	int max_tc = mlx5_max_tc(mdev);
258 	int err, i;
259 
260 	mlx5e_build_tc_group(ets, tc_group, max_tc);
261 	mlx5e_build_tc_tx_bw(ets, tc_tx_bw, tc_group, max_tc);
262 
263 	err = mlx5_set_port_prio_tc(mdev, ets->prio_tc);
264 	if (err)
265 		return err;
266 
267 	err = mlx5_set_port_tc_group(mdev, tc_group);
268 	if (err)
269 		return err;
270 
271 	err = mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw);
272 
273 	if (err)
274 		return err;
275 
276 	memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
277 
278 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
279 		netdev_dbg(priv->netdev, "%s: prio_%d <=> tc_%d\n",
280 			   __func__, i, ets->prio_tc[i]);
281 		netdev_dbg(priv->netdev, "%s: tc_%d <=> tx_bw_%d%%, group_%d\n",
282 			   __func__, i, tc_tx_bw[i], tc_group[i]);
283 	}
284 
285 	return err;
286 }
287 
mlx5e_dbcnl_validate_ets(struct net_device * netdev,struct ieee_ets * ets,bool zero_sum_allowed)288 static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
289 				    struct ieee_ets *ets,
290 				    bool zero_sum_allowed)
291 {
292 	bool have_ets_tc = false;
293 	int bw_sum = 0;
294 	int i;
295 
296 	/* Validate Priority */
297 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
298 		if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) {
299 			netdev_err(netdev,
300 				   "Failed to validate ETS: priority value greater than max(%d)\n",
301 				    MLX5E_MAX_PRIORITY);
302 			return -EINVAL;
303 		}
304 	}
305 
306 	/* Validate Bandwidth Sum */
307 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
308 		if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
309 			have_ets_tc = true;
310 			bw_sum += ets->tc_tx_bw[i];
311 		}
312 	}
313 
314 	if (have_ets_tc && bw_sum != 100) {
315 		if (bw_sum || (!bw_sum && !zero_sum_allowed))
316 			netdev_err(netdev,
317 				   "Failed to validate ETS: BW sum is illegal\n");
318 		return -EINVAL;
319 	}
320 	return 0;
321 }
322 
mlx5e_dcbnl_ieee_setets(struct net_device * netdev,struct ieee_ets * ets)323 static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
324 				   struct ieee_ets *ets)
325 {
326 	struct mlx5e_priv *priv = netdev_priv(netdev);
327 	int err;
328 
329 	if (!MLX5_CAP_GEN(priv->mdev, ets))
330 		return -EOPNOTSUPP;
331 
332 	err = mlx5e_dbcnl_validate_ets(netdev, ets, false);
333 	if (err)
334 		return err;
335 
336 	err = mlx5e_dcbnl_ieee_setets_core(priv, ets);
337 	if (err)
338 		return err;
339 
340 	return 0;
341 }
342 
mlx5e_dcbnl_ieee_getpfc(struct net_device * dev,struct ieee_pfc * pfc)343 static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
344 				   struct ieee_pfc *pfc)
345 {
346 	struct mlx5e_priv *priv = netdev_priv(dev);
347 	struct mlx5_core_dev *mdev = priv->mdev;
348 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
349 	int i;
350 
351 	pfc->pfc_cap = mlx5_max_tc(mdev) + 1;
352 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
353 		pfc->requests[i]    = PPORT_PER_PRIO_GET(pstats, i, tx_pause);
354 		pfc->indications[i] = PPORT_PER_PRIO_GET(pstats, i, rx_pause);
355 	}
356 
357 	if (MLX5_BUFFER_SUPPORTED(mdev))
358 		pfc->delay = priv->dcbx.cable_len;
359 
360 	return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL);
361 }
362 
mlx5e_dcbnl_ieee_setpfc(struct net_device * dev,struct ieee_pfc * pfc)363 static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
364 				   struct ieee_pfc *pfc)
365 {
366 	u8 buffer_ownership = MLX5_BUF_OWNERSHIP_UNKNOWN;
367 	struct mlx5e_priv *priv = netdev_priv(dev);
368 	struct mlx5_core_dev *mdev = priv->mdev;
369 	u32 old_cable_len = priv->dcbx.cable_len;
370 	struct ieee_pfc pfc_new;
371 	u32 changed = 0;
372 	u8 curr_pfc_en;
373 	int ret = 0;
374 
375 	/* pfc_en */
376 	mlx5_query_port_pfc(mdev, &curr_pfc_en, NULL);
377 	if (pfc->pfc_en != curr_pfc_en) {
378 		ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en);
379 		if (ret)
380 			return ret;
381 		mlx5_toggle_port_link(mdev);
382 		changed |= MLX5E_PORT_BUFFER_PFC;
383 	}
384 
385 	if (pfc->delay &&
386 	    pfc->delay < MLX5E_MAX_CABLE_LENGTH &&
387 	    pfc->delay != priv->dcbx.cable_len) {
388 		priv->dcbx.cable_len = pfc->delay;
389 		changed |= MLX5E_PORT_BUFFER_CABLE_LEN;
390 	}
391 
392 	if (MLX5_BUFFER_SUPPORTED(mdev)) {
393 		pfc_new.pfc_en = (changed & MLX5E_PORT_BUFFER_PFC) ? pfc->pfc_en : curr_pfc_en;
394 		ret = mlx5_query_port_buffer_ownership(mdev,
395 						       &buffer_ownership);
396 		if (ret)
397 			netdev_err(dev,
398 				   "%s, Failed to get buffer ownership: %d\n",
399 				   __func__, ret);
400 
401 		if (buffer_ownership == MLX5_BUF_OWNERSHIP_SW_OWNED)
402 			ret = mlx5e_port_manual_buffer_config(priv, changed,
403 							      dev->mtu, &pfc_new,
404 							      NULL, NULL);
405 
406 		if (ret && (changed & MLX5E_PORT_BUFFER_CABLE_LEN))
407 			priv->dcbx.cable_len = old_cable_len;
408 	}
409 
410 	if (!ret) {
411 		netdev_dbg(dev,
412 			   "%s: PFC per priority bit mask: 0x%x\n",
413 			   __func__, pfc->pfc_en);
414 	}
415 	return ret;
416 }
417 
mlx5e_dcbnl_getdcbx(struct net_device * dev)418 static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev)
419 {
420 	struct mlx5e_priv *priv = netdev_priv(dev);
421 
422 	return priv->dcbx.cap;
423 }
424 
mlx5e_dcbnl_setdcbx(struct net_device * dev,u8 mode)425 static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
426 {
427 	struct mlx5e_priv *priv = netdev_priv(dev);
428 	struct mlx5e_dcbx *dcbx = &priv->dcbx;
429 
430 	if (mode & DCB_CAP_DCBX_LLD_MANAGED)
431 		return 1;
432 
433 	if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) {
434 		if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_AUTO)
435 			return 0;
436 
437 		/* set dcbx to fw controlled */
438 		if (!mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_AUTO)) {
439 			dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
440 			dcbx->cap &= ~DCB_CAP_DCBX_HOST;
441 			return 0;
442 		}
443 
444 		return 1;
445 	}
446 
447 	if (!(mode & DCB_CAP_DCBX_HOST))
448 		return 1;
449 
450 	if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
451 		return 1;
452 
453 	dcbx->cap = mode;
454 
455 	return 0;
456 }
457 
mlx5e_dcbnl_ieee_setapp(struct net_device * dev,struct dcb_app * app)458 static int mlx5e_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
459 {
460 	struct mlx5e_priv *priv = netdev_priv(dev);
461 	struct dcb_app temp;
462 	bool is_new;
463 	int err;
464 
465 	if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
466 	    !MLX5_DSCP_SUPPORTED(priv->mdev))
467 		return -EOPNOTSUPP;
468 
469 	if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
470 	    (app->protocol >= MLX5E_MAX_DSCP))
471 		return -EINVAL;
472 
473 	/* Save the old entry info */
474 	temp.selector = IEEE_8021QAZ_APP_SEL_DSCP;
475 	temp.protocol = app->protocol;
476 	temp.priority = priv->dcbx_dp.dscp2prio[app->protocol];
477 
478 	/* Check if need to switch to dscp trust state */
479 	if (!priv->dcbx.dscp_app_cnt) {
480 		err =  mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_DSCP);
481 		if (err)
482 			return err;
483 	}
484 
485 	/* Skip the fw command if new and old mapping are the same */
486 	if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol]) {
487 		err = mlx5e_set_dscp2prio(priv, app->protocol, app->priority);
488 		if (err)
489 			goto fw_err;
490 	}
491 
492 	/* Delete the old entry if exists */
493 	is_new = false;
494 	err = dcb_ieee_delapp(dev, &temp);
495 	if (err)
496 		is_new = true;
497 
498 	/* Add new entry and update counter */
499 	err = dcb_ieee_setapp(dev, app);
500 	if (err)
501 		return err;
502 
503 	if (is_new)
504 		priv->dcbx.dscp_app_cnt++;
505 
506 	return err;
507 
508 fw_err:
509 	mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
510 	return err;
511 }
512 
mlx5e_dcbnl_ieee_delapp(struct net_device * dev,struct dcb_app * app)513 static int mlx5e_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
514 {
515 	struct mlx5e_priv *priv = netdev_priv(dev);
516 	int err;
517 
518 	if  (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
519 	     !MLX5_DSCP_SUPPORTED(priv->mdev))
520 		return -EOPNOTSUPP;
521 
522 	if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
523 	    (app->protocol >= MLX5E_MAX_DSCP))
524 		return -EINVAL;
525 
526 	/* Skip if no dscp app entry */
527 	if (!priv->dcbx.dscp_app_cnt)
528 		return -ENOENT;
529 
530 	/* Check if the entry matches fw setting */
531 	if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol])
532 		return -ENOENT;
533 
534 	/* Delete the app entry */
535 	err = dcb_ieee_delapp(dev, app);
536 	if (err)
537 		return err;
538 
539 	/* Reset the priority mapping back to zero */
540 	err = mlx5e_set_dscp2prio(priv, app->protocol, 0);
541 	if (err)
542 		goto fw_err;
543 
544 	priv->dcbx.dscp_app_cnt--;
545 
546 	/* Check if need to switch to pcp trust state */
547 	if (!priv->dcbx.dscp_app_cnt)
548 		err = mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
549 
550 	return err;
551 
552 fw_err:
553 	mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
554 	return err;
555 }
556 
mlx5e_dcbnl_ieee_getmaxrate(struct net_device * netdev,struct ieee_maxrate * maxrate)557 static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev,
558 				       struct ieee_maxrate *maxrate)
559 {
560 	struct mlx5e_priv *priv    = netdev_priv(netdev);
561 	struct mlx5_core_dev *mdev = priv->mdev;
562 	u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
563 	u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
564 	int err;
565 	int i;
566 
567 	err = mlx5_query_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
568 	if (err)
569 		return err;
570 
571 	memset(maxrate->tc_maxrate, 0, sizeof(maxrate->tc_maxrate));
572 
573 	for (i = 0; i <= mlx5_max_tc(mdev); i++) {
574 		switch (max_bw_unit[i]) {
575 		case MLX5_100_MBPS_UNIT:
576 			maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_100MB_TO_KB;
577 			break;
578 		case MLX5_GBPS_UNIT:
579 			maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_1GB_TO_KB;
580 			break;
581 		case MLX5_BW_NO_LIMIT:
582 			break;
583 		default:
584 			WARN(true, "non-supported BW unit");
585 			break;
586 		}
587 	}
588 
589 	return 0;
590 }
591 
mlx5e_dcbnl_ieee_setmaxrate(struct net_device * netdev,struct ieee_maxrate * maxrate)592 static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
593 				       struct ieee_maxrate *maxrate)
594 {
595 	struct mlx5e_priv *priv    = netdev_priv(netdev);
596 	struct mlx5_core_dev *mdev = priv->mdev;
597 	u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
598 	u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
599 	u64 upper_limit_100mbps;
600 	u64 upper_limit_gbps;
601 	int i;
602 	struct {
603 		int scale;
604 		const char *units_str;
605 	} units[] = {
606 		[MLX5_100_MBPS_UNIT] = {
607 			.scale = 100,
608 			.units_str = "Mbps",
609 		},
610 		[MLX5_GBPS_UNIT] = {
611 			.scale = 1,
612 			.units_str = "Gbps",
613 		},
614 	};
615 
616 	memset(max_bw_value, 0, sizeof(max_bw_value));
617 	memset(max_bw_unit, 0, sizeof(max_bw_unit));
618 	upper_limit_100mbps = U8_MAX * MLX5E_100MB_TO_KB;
619 	upper_limit_gbps = U8_MAX * MLX5E_1GB_TO_KB;
620 
621 	for (i = 0; i <= mlx5_max_tc(mdev); i++) {
622 		if (!maxrate->tc_maxrate[i]) {
623 			max_bw_unit[i]  = MLX5_BW_NO_LIMIT;
624 			continue;
625 		}
626 		if (maxrate->tc_maxrate[i] <= upper_limit_100mbps) {
627 			max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
628 						  MLX5E_100MB_TO_KB);
629 			max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1;
630 			max_bw_unit[i]  = MLX5_100_MBPS_UNIT;
631 		} else if (maxrate->tc_maxrate[i] <= upper_limit_gbps) {
632 			max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
633 						  MLX5E_1GB_TO_KB);
634 			max_bw_unit[i]  = MLX5_GBPS_UNIT;
635 		} else {
636 			netdev_err(netdev,
637 				   "tc_%d maxrate %llu Kbps exceeds limit %llu\n",
638 				   i, maxrate->tc_maxrate[i],
639 				   upper_limit_gbps);
640 			return -EINVAL;
641 		}
642 	}
643 
644 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
645 		netdev_dbg(netdev, "%s: tc_%d <=> max_bw %u %s\n", __func__, i,
646 			   max_bw_value[i] * units[max_bw_unit[i]].scale,
647 			   units[max_bw_unit[i]].units_str);
648 	}
649 
650 	return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
651 }
652 
mlx5e_dcbnl_setall(struct net_device * netdev)653 static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
654 {
655 	struct mlx5e_priv *priv = netdev_priv(netdev);
656 	struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
657 	struct mlx5_core_dev *mdev = priv->mdev;
658 	struct ieee_ets ets;
659 	struct ieee_pfc pfc;
660 	int err = -EOPNOTSUPP;
661 	int i;
662 
663 	if (!MLX5_CAP_GEN(mdev, ets))
664 		goto out;
665 
666 	memset(&ets, 0, sizeof(ets));
667 	memset(&pfc, 0, sizeof(pfc));
668 
669 	ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
670 	for (i = 0; i < CEE_DCBX_MAX_PGS; i++) {
671 		ets.tc_tx_bw[i] = cee_cfg->pg_bw_pct[i];
672 		ets.tc_rx_bw[i] = cee_cfg->pg_bw_pct[i];
673 		ets.tc_tsa[i]   = IEEE_8021QAZ_TSA_ETS;
674 		ets.prio_tc[i]  = cee_cfg->prio_to_pg_map[i];
675 		netdev_dbg(netdev,
676 			   "%s: Priority group %d: tx_bw %d, rx_bw %d, prio_tc %d\n",
677 			   __func__, i, ets.tc_tx_bw[i], ets.tc_rx_bw[i],
678 			   ets.prio_tc[i]);
679 	}
680 
681 	err = mlx5e_dbcnl_validate_ets(netdev, &ets, true);
682 	if (err)
683 		goto out;
684 
685 	err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
686 	if (err) {
687 		netdev_err(netdev,
688 			   "%s, Failed to set ETS: %d\n", __func__, err);
689 		goto out;
690 	}
691 
692 	/* Set PFC */
693 	pfc.pfc_cap = mlx5_max_tc(mdev) + 1;
694 	if (!cee_cfg->pfc_enable)
695 		pfc.pfc_en = 0;
696 	else
697 		for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
698 			pfc.pfc_en |= cee_cfg->pfc_setting[i] << i;
699 
700 	err = mlx5e_dcbnl_ieee_setpfc(netdev, &pfc);
701 	if (err) {
702 		netdev_err(netdev,
703 			   "%s, Failed to set PFC: %d\n", __func__, err);
704 		goto out;
705 	}
706 out:
707 	return err ? MLX5_DCB_NO_CHG : MLX5_DCB_CHG_RESET;
708 }
709 
mlx5e_dcbnl_getstate(struct net_device * netdev)710 static u8 mlx5e_dcbnl_getstate(struct net_device *netdev)
711 {
712 	return MLX5E_CEE_STATE_UP;
713 }
714 
mlx5e_dcbnl_getpermhwaddr(struct net_device * netdev,u8 * perm_addr)715 static void mlx5e_dcbnl_getpermhwaddr(struct net_device *netdev,
716 				      u8 *perm_addr)
717 {
718 	struct mlx5e_priv *priv = netdev_priv(netdev);
719 
720 	if (!perm_addr)
721 		return;
722 
723 	memset(perm_addr, 0xff, MAX_ADDR_LEN);
724 
725 	mlx5_query_mac_address(priv->mdev, perm_addr);
726 }
727 
mlx5e_dcbnl_setpgtccfgtx(struct net_device * netdev,int priority,u8 prio_type,u8 pgid,u8 bw_pct,u8 up_map)728 static void mlx5e_dcbnl_setpgtccfgtx(struct net_device *netdev,
729 				     int priority, u8 prio_type,
730 				     u8 pgid, u8 bw_pct, u8 up_map)
731 {
732 	struct mlx5e_priv *priv = netdev_priv(netdev);
733 	struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
734 
735 	if (priority >= CEE_DCBX_MAX_PRIO) {
736 		netdev_err(netdev,
737 			   "%s, priority is out of range\n", __func__);
738 		return;
739 	}
740 
741 	if (pgid >= CEE_DCBX_MAX_PGS) {
742 		netdev_err(netdev,
743 			   "%s, priority group is out of range\n", __func__);
744 		return;
745 	}
746 
747 	cee_cfg->prio_to_pg_map[priority] = pgid;
748 }
749 
mlx5e_dcbnl_setpgbwgcfgtx(struct net_device * netdev,int pgid,u8 bw_pct)750 static void mlx5e_dcbnl_setpgbwgcfgtx(struct net_device *netdev,
751 				      int pgid, u8 bw_pct)
752 {
753 	struct mlx5e_priv *priv = netdev_priv(netdev);
754 	struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
755 
756 	if (pgid >= CEE_DCBX_MAX_PGS) {
757 		netdev_err(netdev,
758 			   "%s, priority group is out of range\n", __func__);
759 		return;
760 	}
761 
762 	cee_cfg->pg_bw_pct[pgid] = bw_pct;
763 }
764 
mlx5e_dcbnl_getpgtccfgtx(struct net_device * netdev,int priority,u8 * prio_type,u8 * pgid,u8 * bw_pct,u8 * up_map)765 static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
766 				     int priority, u8 *prio_type,
767 				     u8 *pgid, u8 *bw_pct, u8 *up_map)
768 {
769 	struct mlx5e_priv *priv = netdev_priv(netdev);
770 	struct mlx5_core_dev *mdev = priv->mdev;
771 
772 	if (!MLX5_CAP_GEN(priv->mdev, ets)) {
773 		netdev_err(netdev, "%s, ets is not supported\n", __func__);
774 		return;
775 	}
776 
777 	if (priority >= CEE_DCBX_MAX_PRIO) {
778 		netdev_err(netdev,
779 			   "%s, priority is out of range\n", __func__);
780 		return;
781 	}
782 
783 	*prio_type = 0;
784 	*bw_pct = 0;
785 	*up_map = 0;
786 
787 	if (mlx5_query_port_prio_tc(mdev, priority, pgid))
788 		*pgid = 0;
789 }
790 
mlx5e_dcbnl_getpgbwgcfgtx(struct net_device * netdev,int pgid,u8 * bw_pct)791 static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
792 				      int pgid, u8 *bw_pct)
793 {
794 	struct ieee_ets ets;
795 
796 	if (pgid >= CEE_DCBX_MAX_PGS) {
797 		netdev_err(netdev,
798 			   "%s, priority group is out of range\n", __func__);
799 		return;
800 	}
801 
802 	mlx5e_dcbnl_ieee_getets(netdev, &ets);
803 	*bw_pct = ets.tc_tx_bw[pgid];
804 }
805 
mlx5e_dcbnl_setpfccfg(struct net_device * netdev,int priority,u8 setting)806 static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev,
807 				  int priority, u8 setting)
808 {
809 	struct mlx5e_priv *priv = netdev_priv(netdev);
810 	struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
811 
812 	if (priority >= CEE_DCBX_MAX_PRIO) {
813 		netdev_err(netdev,
814 			   "%s, priority is out of range\n", __func__);
815 		return;
816 	}
817 
818 	if (setting > 1)
819 		return;
820 
821 	cee_cfg->pfc_setting[priority] = setting;
822 }
823 
824 static int
mlx5e_dcbnl_get_priority_pfc(struct net_device * netdev,int priority,u8 * setting)825 mlx5e_dcbnl_get_priority_pfc(struct net_device *netdev,
826 			     int priority, u8 *setting)
827 {
828 	struct ieee_pfc pfc;
829 	int err;
830 
831 	err = mlx5e_dcbnl_ieee_getpfc(netdev, &pfc);
832 
833 	if (err)
834 		*setting = 0;
835 	else
836 		*setting = (pfc.pfc_en >> priority) & 0x01;
837 
838 	return err;
839 }
840 
mlx5e_dcbnl_getpfccfg(struct net_device * netdev,int priority,u8 * setting)841 static void mlx5e_dcbnl_getpfccfg(struct net_device *netdev,
842 				  int priority, u8 *setting)
843 {
844 	if (priority >= CEE_DCBX_MAX_PRIO) {
845 		netdev_err(netdev,
846 			   "%s, priority is out of range\n", __func__);
847 		return;
848 	}
849 
850 	if (!setting)
851 		return;
852 
853 	mlx5e_dcbnl_get_priority_pfc(netdev, priority, setting);
854 }
855 
mlx5e_dcbnl_getcap(struct net_device * netdev,int capid,u8 * cap)856 static u8 mlx5e_dcbnl_getcap(struct net_device *netdev,
857 			     int capid, u8 *cap)
858 {
859 	struct mlx5e_priv *priv = netdev_priv(netdev);
860 	struct mlx5_core_dev *mdev = priv->mdev;
861 	u8 rval = 0;
862 
863 	switch (capid) {
864 	case DCB_CAP_ATTR_PG:
865 		*cap = true;
866 		break;
867 	case DCB_CAP_ATTR_PFC:
868 		*cap = true;
869 		break;
870 	case DCB_CAP_ATTR_UP2TC:
871 		*cap = false;
872 		break;
873 	case DCB_CAP_ATTR_PG_TCS:
874 		*cap = 1 << mlx5_max_tc(mdev);
875 		break;
876 	case DCB_CAP_ATTR_PFC_TCS:
877 		*cap = 1 << mlx5_max_tc(mdev);
878 		break;
879 	case DCB_CAP_ATTR_GSP:
880 		*cap = false;
881 		break;
882 	case DCB_CAP_ATTR_BCN:
883 		*cap = false;
884 		break;
885 	case DCB_CAP_ATTR_DCBX:
886 		*cap = priv->dcbx.cap |
887 		       DCB_CAP_DCBX_VER_CEE |
888 		       DCB_CAP_DCBX_VER_IEEE;
889 		break;
890 	default:
891 		*cap = 0;
892 		rval = 1;
893 		break;
894 	}
895 
896 	return rval;
897 }
898 
mlx5e_dcbnl_getnumtcs(struct net_device * netdev,int tcs_id,u8 * num)899 static int mlx5e_dcbnl_getnumtcs(struct net_device *netdev,
900 				 int tcs_id, u8 *num)
901 {
902 	struct mlx5e_priv *priv = netdev_priv(netdev);
903 	struct mlx5_core_dev *mdev = priv->mdev;
904 
905 	switch (tcs_id) {
906 	case DCB_NUMTCS_ATTR_PG:
907 	case DCB_NUMTCS_ATTR_PFC:
908 		*num = mlx5_max_tc(mdev) + 1;
909 		break;
910 	default:
911 		return -EINVAL;
912 	}
913 
914 	return 0;
915 }
916 
mlx5e_dcbnl_getpfcstate(struct net_device * netdev)917 static u8 mlx5e_dcbnl_getpfcstate(struct net_device *netdev)
918 {
919 	struct ieee_pfc pfc;
920 
921 	if (mlx5e_dcbnl_ieee_getpfc(netdev, &pfc))
922 		return MLX5E_CEE_STATE_DOWN;
923 
924 	return pfc.pfc_en ? MLX5E_CEE_STATE_UP : MLX5E_CEE_STATE_DOWN;
925 }
926 
mlx5e_dcbnl_setpfcstate(struct net_device * netdev,u8 state)927 static void mlx5e_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
928 {
929 	struct mlx5e_priv *priv = netdev_priv(netdev);
930 	struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
931 
932 	if ((state != MLX5E_CEE_STATE_UP) && (state != MLX5E_CEE_STATE_DOWN))
933 		return;
934 
935 	cee_cfg->pfc_enable = state;
936 }
937 
mlx5e_dcbnl_getbuffer(struct net_device * dev,struct dcbnl_buffer * dcb_buffer)938 static int mlx5e_dcbnl_getbuffer(struct net_device *dev,
939 				 struct dcbnl_buffer *dcb_buffer)
940 {
941 	struct mlx5e_priv *priv = netdev_priv(dev);
942 	struct mlx5_core_dev *mdev = priv->mdev;
943 	struct mlx5e_port_buffer port_buffer;
944 	u8 buffer[MLX5E_MAX_PRIORITY];
945 	int i, err;
946 
947 	if (!MLX5_BUFFER_SUPPORTED(mdev))
948 		return -EOPNOTSUPP;
949 
950 	err = mlx5e_port_query_priority2buffer(mdev, buffer);
951 	if (err)
952 		return err;
953 
954 	for (i = 0; i < MLX5E_MAX_PRIORITY; i++)
955 		dcb_buffer->prio2buffer[i] = buffer[i];
956 
957 	err = mlx5e_port_query_buffer(priv, &port_buffer);
958 	if (err)
959 		return err;
960 
961 	for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++)
962 		dcb_buffer->buffer_size[i] = port_buffer.buffer[i].size;
963 	dcb_buffer->total_size = port_buffer.port_buffer_size -
964 				 port_buffer.internal_buffers_size;
965 
966 	return 0;
967 }
968 
mlx5e_dcbnl_setbuffer(struct net_device * dev,struct dcbnl_buffer * dcb_buffer)969 static int mlx5e_dcbnl_setbuffer(struct net_device *dev,
970 				 struct dcbnl_buffer *dcb_buffer)
971 {
972 	struct mlx5e_priv *priv = netdev_priv(dev);
973 	struct mlx5_core_dev *mdev = priv->mdev;
974 	struct mlx5e_port_buffer port_buffer;
975 	u8 old_prio2buffer[MLX5E_MAX_PRIORITY];
976 	u32 *buffer_size = NULL;
977 	u8 *prio2buffer = NULL;
978 	u32 changed = 0;
979 	int i, err;
980 
981 	if (!MLX5_BUFFER_SUPPORTED(mdev))
982 		return -EOPNOTSUPP;
983 
984 	for (i = 0; i < DCBX_MAX_BUFFERS; i++)
985 		mlx5_core_dbg(mdev, "buffer[%d]=%d\n", i, dcb_buffer->buffer_size[i]);
986 
987 	for (i = 0; i < MLX5E_MAX_PRIORITY; i++)
988 		mlx5_core_dbg(mdev, "priority %d buffer%d\n", i, dcb_buffer->prio2buffer[i]);
989 
990 	err = mlx5e_port_query_priority2buffer(mdev, old_prio2buffer);
991 	if (err)
992 		return err;
993 
994 	for (i = 0; i < MLX5E_MAX_PRIORITY; i++) {
995 		if (dcb_buffer->prio2buffer[i] != old_prio2buffer[i]) {
996 			changed |= MLX5E_PORT_BUFFER_PRIO2BUFFER;
997 			prio2buffer = dcb_buffer->prio2buffer;
998 			break;
999 		}
1000 	}
1001 
1002 	err = mlx5e_port_query_buffer(priv, &port_buffer);
1003 	if (err)
1004 		return err;
1005 
1006 	for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
1007 		if (port_buffer.buffer[i].size != dcb_buffer->buffer_size[i]) {
1008 			changed |= MLX5E_PORT_BUFFER_SIZE;
1009 			buffer_size = dcb_buffer->buffer_size;
1010 			break;
1011 		}
1012 	}
1013 
1014 	if (!changed)
1015 		return 0;
1016 
1017 	err = mlx5e_port_manual_buffer_config(priv, changed, dev->mtu, NULL,
1018 					      buffer_size, prio2buffer);
1019 	return err;
1020 }
1021 
1022 static const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = {
1023 	.ieee_getets	= mlx5e_dcbnl_ieee_getets,
1024 	.ieee_setets	= mlx5e_dcbnl_ieee_setets,
1025 	.ieee_getmaxrate = mlx5e_dcbnl_ieee_getmaxrate,
1026 	.ieee_setmaxrate = mlx5e_dcbnl_ieee_setmaxrate,
1027 	.ieee_getpfc	= mlx5e_dcbnl_ieee_getpfc,
1028 	.ieee_setpfc	= mlx5e_dcbnl_ieee_setpfc,
1029 	.ieee_setapp    = mlx5e_dcbnl_ieee_setapp,
1030 	.ieee_delapp    = mlx5e_dcbnl_ieee_delapp,
1031 	.getdcbx	= mlx5e_dcbnl_getdcbx,
1032 	.setdcbx	= mlx5e_dcbnl_setdcbx,
1033 	.dcbnl_getbuffer = mlx5e_dcbnl_getbuffer,
1034 	.dcbnl_setbuffer = mlx5e_dcbnl_setbuffer,
1035 
1036 /* CEE interfaces */
1037 	.setall         = mlx5e_dcbnl_setall,
1038 	.getstate       = mlx5e_dcbnl_getstate,
1039 	.getpermhwaddr  = mlx5e_dcbnl_getpermhwaddr,
1040 
1041 	.setpgtccfgtx   = mlx5e_dcbnl_setpgtccfgtx,
1042 	.setpgbwgcfgtx  = mlx5e_dcbnl_setpgbwgcfgtx,
1043 	.getpgtccfgtx   = mlx5e_dcbnl_getpgtccfgtx,
1044 	.getpgbwgcfgtx  = mlx5e_dcbnl_getpgbwgcfgtx,
1045 
1046 	.setpfccfg      = mlx5e_dcbnl_setpfccfg,
1047 	.getpfccfg      = mlx5e_dcbnl_getpfccfg,
1048 	.getcap         = mlx5e_dcbnl_getcap,
1049 	.getnumtcs      = mlx5e_dcbnl_getnumtcs,
1050 	.getpfcstate    = mlx5e_dcbnl_getpfcstate,
1051 	.setpfcstate    = mlx5e_dcbnl_setpfcstate,
1052 };
1053 
mlx5e_dcbnl_build_netdev(struct net_device * netdev)1054 void mlx5e_dcbnl_build_netdev(struct net_device *netdev)
1055 {
1056 	struct mlx5e_priv *priv = netdev_priv(netdev);
1057 	struct mlx5_core_dev *mdev = priv->mdev;
1058 
1059 	if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos))
1060 		netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
1061 }
1062 
mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv * priv,enum mlx5_dcbx_oper_mode * mode)1063 static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv,
1064 					enum mlx5_dcbx_oper_mode *mode)
1065 {
1066 	u32 out[MLX5_ST_SZ_DW(dcbx_param)];
1067 
1068 	*mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
1069 
1070 	if (!mlx5_query_port_dcbx_param(priv->mdev, out))
1071 		*mode = MLX5_GET(dcbx_param, out, version_oper);
1072 
1073 	/* From driver's point of view, we only care if the mode
1074 	 * is host (HOST) or non-host (AUTO)
1075 	 */
1076 	if (*mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
1077 		*mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
1078 }
1079 
mlx5e_ets_init(struct mlx5e_priv * priv)1080 static void mlx5e_ets_init(struct mlx5e_priv *priv)
1081 {
1082 	struct ieee_ets ets;
1083 	int err;
1084 	int i;
1085 
1086 	if (!MLX5_CAP_GEN(priv->mdev, ets))
1087 		return;
1088 
1089 	memset(&ets, 0, sizeof(ets));
1090 	ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
1091 	for (i = 0; i < ets.ets_cap; i++) {
1092 		ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
1093 		ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
1094 		ets.prio_tc[i] = i;
1095 	}
1096 
1097 	if (ets.ets_cap > 1) {
1098 		/* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
1099 		ets.prio_tc[0] = 1;
1100 		ets.prio_tc[1] = 0;
1101 	}
1102 
1103 	err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
1104 	if (err)
1105 		netdev_err(priv->netdev,
1106 			   "%s, Failed to init ETS: %d\n", __func__, err);
1107 }
1108 
1109 enum {
1110 	INIT,
1111 	DELETE,
1112 };
1113 
mlx5e_dcbnl_dscp_app(struct mlx5e_priv * priv,int action)1114 static void mlx5e_dcbnl_dscp_app(struct mlx5e_priv *priv, int action)
1115 {
1116 	struct dcb_app temp;
1117 	int i;
1118 
1119 	if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
1120 		return;
1121 
1122 	if (!MLX5_DSCP_SUPPORTED(priv->mdev))
1123 		return;
1124 
1125 	/* No SEL_DSCP entry in non DSCP state */
1126 	if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_DSCP)
1127 		return;
1128 
1129 	temp.selector = IEEE_8021QAZ_APP_SEL_DSCP;
1130 	for (i = 0; i < MLX5E_MAX_DSCP; i++) {
1131 		temp.protocol = i;
1132 		temp.priority = priv->dcbx_dp.dscp2prio[i];
1133 		if (action == INIT)
1134 			dcb_ieee_setapp(priv->netdev, &temp);
1135 		else
1136 			dcb_ieee_delapp(priv->netdev, &temp);
1137 	}
1138 
1139 	priv->dcbx.dscp_app_cnt = (action == INIT) ? MLX5E_MAX_DSCP : 0;
1140 }
1141 
mlx5e_dcbnl_init_app(struct mlx5e_priv * priv)1142 void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv)
1143 {
1144 	mlx5e_dcbnl_dscp_app(priv, INIT);
1145 }
1146 
mlx5e_dcbnl_delete_app(struct mlx5e_priv * priv)1147 void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv)
1148 {
1149 	mlx5e_dcbnl_dscp_app(priv, DELETE);
1150 }
1151 
mlx5e_params_calc_trust_tx_min_inline_mode(struct mlx5_core_dev * mdev,struct mlx5e_params * params,u8 trust_state)1152 static void mlx5e_params_calc_trust_tx_min_inline_mode(struct mlx5_core_dev *mdev,
1153 						       struct mlx5e_params *params,
1154 						       u8 trust_state)
1155 {
1156 	mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
1157 	if (trust_state == MLX5_QPTS_TRUST_DSCP &&
1158 	    params->tx_min_inline_mode == MLX5_INLINE_MODE_L2)
1159 		params->tx_min_inline_mode = MLX5_INLINE_MODE_IP;
1160 }
1161 
mlx5e_update_trust_state_hw(struct mlx5e_priv * priv,void * context)1162 static int mlx5e_update_trust_state_hw(struct mlx5e_priv *priv, void *context)
1163 {
1164 	u8 *trust_state = context;
1165 	int err;
1166 
1167 	err = mlx5_set_trust_state(priv->mdev, *trust_state);
1168 	if (err)
1169 		return err;
1170 	WRITE_ONCE(priv->dcbx_dp.trust_state, *trust_state);
1171 
1172 	return 0;
1173 }
1174 
mlx5e_set_trust_state(struct mlx5e_priv * priv,u8 trust_state)1175 static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
1176 {
1177 	struct mlx5e_params new_params;
1178 	bool reset = true;
1179 	int err;
1180 
1181 	netdev_lock(priv->netdev);
1182 	mutex_lock(&priv->state_lock);
1183 
1184 	new_params = priv->channels.params;
1185 	mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_params,
1186 						   trust_state);
1187 
1188 	/* Skip if tx_min_inline is the same */
1189 	if (new_params.tx_min_inline_mode == priv->channels.params.tx_min_inline_mode)
1190 		reset = false;
1191 
1192 	err = mlx5e_safe_switch_params(priv, &new_params,
1193 				       mlx5e_update_trust_state_hw,
1194 				       &trust_state, reset);
1195 
1196 	mutex_unlock(&priv->state_lock);
1197 	netdev_unlock(priv->netdev);
1198 
1199 	return err;
1200 }
1201 
mlx5e_set_dscp2prio(struct mlx5e_priv * priv,u8 dscp,u8 prio)1202 static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio)
1203 {
1204 	int err;
1205 
1206 	err = mlx5_set_dscp2prio(priv->mdev, dscp, prio);
1207 	if (err)
1208 		return err;
1209 
1210 	priv->dcbx_dp.dscp2prio[dscp] = prio;
1211 	return err;
1212 }
1213 
mlx5e_trust_initialize(struct mlx5e_priv * priv)1214 static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
1215 {
1216 	struct mlx5_core_dev *mdev = priv->mdev;
1217 	u8 trust_state;
1218 	int err;
1219 
1220 	if (!MLX5_DSCP_SUPPORTED(mdev)) {
1221 		WRITE_ONCE(priv->dcbx_dp.trust_state, MLX5_QPTS_TRUST_PCP);
1222 		return 0;
1223 	}
1224 
1225 	err = mlx5_query_trust_state(priv->mdev, &trust_state);
1226 	if (err)
1227 		return err;
1228 	WRITE_ONCE(priv->dcbx_dp.trust_state, trust_state);
1229 
1230 	if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_PCP && priv->dcbx.dscp_app_cnt) {
1231 		/*
1232 		 * Align the driver state with the register state.
1233 		 * Temporary state change is required to enable the app list reset.
1234 		 */
1235 		priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_DSCP;
1236 		mlx5e_dcbnl_delete_app(priv);
1237 		priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP;
1238 	}
1239 
1240 	mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &priv->channels.params,
1241 						   priv->dcbx_dp.trust_state);
1242 
1243 	err = mlx5_query_dscp2prio(priv->mdev, priv->dcbx_dp.dscp2prio);
1244 	if (err)
1245 		return err;
1246 
1247 	return 0;
1248 }
1249 
1250 #define MLX5E_BUFFER_CELL_SHIFT 7
1251 
mlx5e_query_port_buffers_cell_size(struct mlx5e_priv * priv)1252 static u16 mlx5e_query_port_buffers_cell_size(struct mlx5e_priv *priv)
1253 {
1254 	struct mlx5_core_dev *mdev = priv->mdev;
1255 	u32 out[MLX5_ST_SZ_DW(sbcam_reg)] = {};
1256 	u32 in[MLX5_ST_SZ_DW(sbcam_reg)] = {};
1257 
1258 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1259 		return (1 << MLX5E_BUFFER_CELL_SHIFT);
1260 
1261 	if (mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
1262 				 MLX5_REG_SBCAM, 0, 0))
1263 		return (1 << MLX5E_BUFFER_CELL_SHIFT);
1264 
1265 	return MLX5_GET(sbcam_reg, out, cap_cell_size);
1266 }
1267 
mlx5e_dcbnl_initialize(struct mlx5e_priv * priv)1268 void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
1269 {
1270 	struct mlx5e_dcbx *dcbx = &priv->dcbx;
1271 
1272 	mlx5e_trust_initialize(priv);
1273 
1274 	if (!MLX5_CAP_GEN(priv->mdev, qos))
1275 		return;
1276 
1277 	if (MLX5_CAP_GEN(priv->mdev, dcbx))
1278 		mlx5e_dcbnl_query_dcbx_mode(priv, &dcbx->mode);
1279 
1280 	priv->dcbx.cap = DCB_CAP_DCBX_VER_CEE |
1281 			 DCB_CAP_DCBX_VER_IEEE;
1282 	if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
1283 		priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
1284 
1285 	priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv);
1286 	priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN;
1287 
1288 	mlx5e_ets_init(priv);
1289 }
1290