xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c (revision 3e5aa52b45c73470092f00d219e947f32cce340c)
1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/device.h>
33 #include <linux/netdevice.h>
34 #include <linux/units.h>
35 #include "en.h"
36 #include "en/port.h"
37 #include "en/port_buffer.h"
38 
39 #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
40 
41 #define MLX5E_100MB_TO_KB (100 * MEGA / KILO)
42 #define MLX5E_1GB_TO_KB   (GIGA / KILO)
43 
44 #define MLX5E_CEE_STATE_UP    1
45 #define MLX5E_CEE_STATE_DOWN  0
46 
47 /* Max supported cable length is 1000 meters */
48 #define MLX5E_MAX_CABLE_LENGTH 1000
49 
50 enum {
51 	MLX5E_VENDOR_TC_GROUP_NUM = 7,
52 	MLX5E_LOWEST_PRIO_GROUP   = 0,
53 };
54 
55 enum {
56 	MLX5_DCB_CHG_RESET,
57 	MLX5_DCB_NO_CHG,
58 	MLX5_DCB_CHG_NO_RESET,
59 };
60 
61 static const struct {
62 	int scale;
63 	const char *units_str;
64 } mlx5e_bw_units[] = {
65 	[MLX5_100_MBPS_UNIT] = {
66 		.scale = 100,
67 		.units_str = "Mbps",
68 	},
69 	[MLX5_GBPS_UNIT] = {
70 		.scale = 1,
71 		.units_str = "Gbps",
72 	},
73 };
74 
75 #define MLX5_DSCP_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, qcam_reg)  && \
76 				   MLX5_CAP_QCAM_REG(mdev, qpts) && \
77 				   MLX5_CAP_QCAM_REG(mdev, qpdpm))
78 
79 static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state);
80 static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio);
81 
82 /* If dcbx mode is non-host set the dcbx mode to host.
83  */
84 static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv,
85 				     enum mlx5_dcbx_oper_mode mode)
86 {
87 	struct mlx5_core_dev *mdev = priv->mdev;
88 	u32 param[MLX5_ST_SZ_DW(dcbx_param)];
89 	int err;
90 
91 	err = mlx5_query_port_dcbx_param(mdev, param);
92 	if (err)
93 		return err;
94 
95 	MLX5_SET(dcbx_param, param, version_admin, mode);
96 	if (mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
97 		MLX5_SET(dcbx_param, param, willing_admin, 1);
98 
99 	return mlx5_set_port_dcbx_param(mdev, param);
100 }
101 
102 static int mlx5e_dcbnl_switch_to_host_mode(struct mlx5e_priv *priv)
103 {
104 	struct mlx5e_dcbx *dcbx = &priv->dcbx;
105 	int err;
106 
107 	if (!MLX5_CAP_GEN(priv->mdev, dcbx))
108 		return 0;
109 
110 	if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
111 		return 0;
112 
113 	err = mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_HOST);
114 	if (err)
115 		return err;
116 
117 	dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
118 	return 0;
119 }
120 
121 static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
122 				   struct ieee_ets *ets)
123 {
124 	struct mlx5e_priv *priv = netdev_priv(netdev);
125 	struct mlx5_core_dev *mdev = priv->mdev;
126 	u8 tc_group[IEEE_8021QAZ_MAX_TCS];
127 	bool is_tc_group_6_exist = false;
128 	bool is_zero_bw_ets_tc = false;
129 	int err = 0;
130 	int i;
131 
132 	if (!MLX5_CAP_GEN(priv->mdev, ets))
133 		return -EOPNOTSUPP;
134 
135 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
136 		err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
137 		if (err)
138 			return err;
139 	}
140 
141 	ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
142 	for (i = 0; i < ets->ets_cap; i++) {
143 		err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
144 		if (err)
145 			return err;
146 
147 		err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]);
148 		if (err)
149 			return err;
150 
151 		if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC &&
152 		    tc_group[i] == (MLX5E_LOWEST_PRIO_GROUP + 1))
153 			is_zero_bw_ets_tc = true;
154 
155 		if (tc_group[i] == (MLX5E_VENDOR_TC_GROUP_NUM - 1))
156 			is_tc_group_6_exist = true;
157 	}
158 
159 	/* Report 0% ets tc if exits*/
160 	if (is_zero_bw_ets_tc) {
161 		for (i = 0; i < ets->ets_cap; i++)
162 			if (tc_group[i] == MLX5E_LOWEST_PRIO_GROUP)
163 				ets->tc_tx_bw[i] = 0;
164 	}
165 
166 	/* Update tc_tsa based on fw setting*/
167 	for (i = 0; i < ets->ets_cap; i++) {
168 		if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC)
169 			priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
170 		else if (tc_group[i] == MLX5E_VENDOR_TC_GROUP_NUM &&
171 			 !is_tc_group_6_exist)
172 			priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
173 	}
174 	memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa));
175 
176 	return err;
177 }
178 
179 static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
180 {
181 	bool any_tc_mapped_to_ets = false;
182 	bool ets_zero_bw = false;
183 	int strict_group;
184 	int i;
185 
186 	for (i = 0; i <= max_tc; i++) {
187 		if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
188 			any_tc_mapped_to_ets = true;
189 			if (!ets->tc_tx_bw[i])
190 				ets_zero_bw = true;
191 		}
192 	}
193 
194 	/* strict group has higher priority than ets group */
195 	strict_group = MLX5E_LOWEST_PRIO_GROUP;
196 	if (any_tc_mapped_to_ets)
197 		strict_group++;
198 	if (ets_zero_bw)
199 		strict_group++;
200 
201 	for (i = 0; i <= max_tc; i++) {
202 		switch (ets->tc_tsa[i]) {
203 		case IEEE_8021QAZ_TSA_VENDOR:
204 			tc_group[i] = MLX5E_VENDOR_TC_GROUP_NUM;
205 			break;
206 		case IEEE_8021QAZ_TSA_STRICT:
207 			tc_group[i] = strict_group++;
208 			break;
209 		case IEEE_8021QAZ_TSA_ETS:
210 			tc_group[i] = MLX5E_LOWEST_PRIO_GROUP;
211 			if (ets->tc_tx_bw[i] && ets_zero_bw)
212 				tc_group[i] = MLX5E_LOWEST_PRIO_GROUP + 1;
213 			break;
214 		}
215 	}
216 }
217 
218 static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
219 				 u8 *tc_group, int max_tc)
220 {
221 	int bw_for_ets_zero_bw_tc = 0;
222 	int last_ets_zero_bw_tc = -1;
223 	int num_ets_zero_bw = 0;
224 	int i;
225 
226 	for (i = 0; i <= max_tc; i++) {
227 		if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS &&
228 		    !ets->tc_tx_bw[i]) {
229 			num_ets_zero_bw++;
230 			last_ets_zero_bw_tc = i;
231 		}
232 	}
233 
234 	if (num_ets_zero_bw)
235 		bw_for_ets_zero_bw_tc = MLX5E_MAX_BW_ALLOC / num_ets_zero_bw;
236 
237 	for (i = 0; i <= max_tc; i++) {
238 		switch (ets->tc_tsa[i]) {
239 		case IEEE_8021QAZ_TSA_VENDOR:
240 			tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
241 			break;
242 		case IEEE_8021QAZ_TSA_STRICT:
243 			tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
244 			break;
245 		case IEEE_8021QAZ_TSA_ETS:
246 			tc_tx_bw[i] = ets->tc_tx_bw[i] ?
247 				      ets->tc_tx_bw[i] :
248 				      bw_for_ets_zero_bw_tc;
249 			break;
250 		}
251 	}
252 
253 	/* Make sure the total bw for ets zero bw group is 100% */
254 	if (last_ets_zero_bw_tc != -1)
255 		tc_tx_bw[last_ets_zero_bw_tc] +=
256 			MLX5E_MAX_BW_ALLOC % num_ets_zero_bw;
257 }
258 
259 /* If there are ETS BW 0,
260  *   Set ETS group # to 1 for all ETS non zero BW tcs. Their sum must be 100%.
261  *   Set group #0 to all the ETS BW 0 tcs and
262  *     equally splits the 100% BW between them
263  *   Report both group #0 and #1 as ETS type.
264  *     All the tcs in group #0 will be reported with 0% BW.
265  */
266 static int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
267 {
268 	struct mlx5_core_dev *mdev = priv->mdev;
269 	u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS];
270 	u8 tc_group[IEEE_8021QAZ_MAX_TCS];
271 	int max_tc = mlx5_max_tc(mdev);
272 	int err, i;
273 
274 	mlx5e_build_tc_group(ets, tc_group, max_tc);
275 	mlx5e_build_tc_tx_bw(ets, tc_tx_bw, tc_group, max_tc);
276 
277 	err = mlx5_set_port_prio_tc(mdev, ets->prio_tc);
278 	if (err)
279 		return err;
280 
281 	err = mlx5_set_port_tc_group(mdev, tc_group);
282 	if (err)
283 		return err;
284 
285 	err = mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw);
286 
287 	if (err)
288 		return err;
289 
290 	memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
291 
292 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
293 		netdev_dbg(priv->netdev, "%s: prio_%d <=> tc_%d\n",
294 			   __func__, i, ets->prio_tc[i]);
295 		netdev_dbg(priv->netdev, "%s: tc_%d <=> tx_bw_%d%%, group_%d\n",
296 			   __func__, i, tc_tx_bw[i], tc_group[i]);
297 	}
298 
299 	return err;
300 }
301 
302 static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
303 				    struct ieee_ets *ets,
304 				    bool zero_sum_allowed)
305 {
306 	bool have_ets_tc = false;
307 	int bw_sum = 0;
308 	int i;
309 
310 	/* Validate Priority */
311 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
312 		if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) {
313 			netdev_err(netdev,
314 				   "Failed to validate ETS: priority value greater than max(%d)\n",
315 				    MLX5E_MAX_PRIORITY);
316 			return -EINVAL;
317 		}
318 	}
319 
320 	/* Validate Bandwidth Sum */
321 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
322 		if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
323 			have_ets_tc = true;
324 			bw_sum += ets->tc_tx_bw[i];
325 		}
326 	}
327 
328 	if (have_ets_tc && bw_sum != 100) {
329 		if (bw_sum || (!bw_sum && !zero_sum_allowed))
330 			netdev_err(netdev,
331 				   "Failed to validate ETS: BW sum is illegal\n");
332 		return -EINVAL;
333 	}
334 	return 0;
335 }
336 
337 static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
338 				   struct ieee_ets *ets)
339 {
340 	struct mlx5e_priv *priv = netdev_priv(netdev);
341 	int err;
342 
343 	if (!MLX5_CAP_GEN(priv->mdev, ets))
344 		return -EOPNOTSUPP;
345 
346 	err = mlx5e_dbcnl_validate_ets(netdev, ets, false);
347 	if (err)
348 		return err;
349 
350 	err = mlx5e_dcbnl_ieee_setets_core(priv, ets);
351 	if (err)
352 		return err;
353 
354 	return 0;
355 }
356 
357 static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
358 				   struct ieee_pfc *pfc)
359 {
360 	struct mlx5e_priv *priv = netdev_priv(dev);
361 	struct mlx5_core_dev *mdev = priv->mdev;
362 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
363 	int i;
364 
365 	pfc->pfc_cap = mlx5_max_tc(mdev) + 1;
366 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
367 		pfc->requests[i]    = PPORT_PER_PRIO_GET(pstats, i, tx_pause);
368 		pfc->indications[i] = PPORT_PER_PRIO_GET(pstats, i, rx_pause);
369 	}
370 
371 	if (MLX5_BUFFER_SUPPORTED(mdev))
372 		pfc->delay = priv->dcbx.cable_len;
373 
374 	return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL);
375 }
376 
377 static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
378 				   struct ieee_pfc *pfc)
379 {
380 	u8 buffer_ownership = MLX5_BUF_OWNERSHIP_UNKNOWN;
381 	struct mlx5e_priv *priv = netdev_priv(dev);
382 	struct mlx5_core_dev *mdev = priv->mdev;
383 	u32 old_cable_len = priv->dcbx.cable_len;
384 	struct ieee_pfc pfc_new;
385 	u32 changed = 0;
386 	u8 curr_pfc_en;
387 	int ret = 0;
388 
389 	/* pfc_en */
390 	mlx5_query_port_pfc(mdev, &curr_pfc_en, NULL);
391 	if (pfc->pfc_en != curr_pfc_en) {
392 		ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en);
393 		if (ret)
394 			return ret;
395 		mlx5_toggle_port_link(mdev);
396 		changed |= MLX5E_PORT_BUFFER_PFC;
397 	}
398 
399 	if (pfc->delay &&
400 	    pfc->delay < MLX5E_MAX_CABLE_LENGTH &&
401 	    pfc->delay != priv->dcbx.cable_len) {
402 		priv->dcbx.cable_len = pfc->delay;
403 		changed |= MLX5E_PORT_BUFFER_CABLE_LEN;
404 	}
405 
406 	if (MLX5_BUFFER_SUPPORTED(mdev)) {
407 		pfc_new.pfc_en = (changed & MLX5E_PORT_BUFFER_PFC) ? pfc->pfc_en : curr_pfc_en;
408 		ret = mlx5_query_port_buffer_ownership(mdev,
409 						       &buffer_ownership);
410 		if (ret)
411 			netdev_err(dev,
412 				   "%s, Failed to get buffer ownership: %d\n",
413 				   __func__, ret);
414 
415 		if (buffer_ownership == MLX5_BUF_OWNERSHIP_SW_OWNED)
416 			ret = mlx5e_port_manual_buffer_config(priv, changed,
417 							      dev->mtu, &pfc_new,
418 							      NULL, NULL);
419 
420 		if (ret && (changed & MLX5E_PORT_BUFFER_CABLE_LEN))
421 			priv->dcbx.cable_len = old_cable_len;
422 	}
423 
424 	if (!ret) {
425 		netdev_dbg(dev,
426 			   "%s: PFC per priority bit mask: 0x%x\n",
427 			   __func__, pfc->pfc_en);
428 	}
429 	return ret;
430 }
431 
432 static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev)
433 {
434 	struct mlx5e_priv *priv = netdev_priv(dev);
435 
436 	return priv->dcbx.cap;
437 }
438 
439 static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
440 {
441 	struct mlx5e_priv *priv = netdev_priv(dev);
442 	struct mlx5e_dcbx *dcbx = &priv->dcbx;
443 
444 	if (mode & DCB_CAP_DCBX_LLD_MANAGED)
445 		return 1;
446 
447 	if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) {
448 		if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_AUTO)
449 			return 0;
450 
451 		/* set dcbx to fw controlled */
452 		if (!mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_AUTO)) {
453 			dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
454 			dcbx->cap &= ~DCB_CAP_DCBX_HOST;
455 			return 0;
456 		}
457 
458 		return 1;
459 	}
460 
461 	if (!(mode & DCB_CAP_DCBX_HOST))
462 		return 1;
463 
464 	if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
465 		return 1;
466 
467 	dcbx->cap = mode;
468 
469 	return 0;
470 }
471 
472 static int mlx5e_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
473 {
474 	struct mlx5e_priv *priv = netdev_priv(dev);
475 	struct dcb_app temp;
476 	bool is_new;
477 	int err;
478 
479 	if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
480 	    !MLX5_DSCP_SUPPORTED(priv->mdev))
481 		return -EOPNOTSUPP;
482 
483 	if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
484 	    (app->protocol >= MLX5E_MAX_DSCP))
485 		return -EINVAL;
486 
487 	/* Save the old entry info */
488 	temp.selector = IEEE_8021QAZ_APP_SEL_DSCP;
489 	temp.protocol = app->protocol;
490 	temp.priority = priv->dcbx_dp.dscp2prio[app->protocol];
491 
492 	/* Check if need to switch to dscp trust state */
493 	if (!priv->dcbx.dscp_app_cnt) {
494 		err =  mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_DSCP);
495 		if (err)
496 			return err;
497 	}
498 
499 	/* Skip the fw command if new and old mapping are the same */
500 	if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol]) {
501 		err = mlx5e_set_dscp2prio(priv, app->protocol, app->priority);
502 		if (err)
503 			goto fw_err;
504 	}
505 
506 	/* Delete the old entry if exists */
507 	is_new = false;
508 	err = dcb_ieee_delapp(dev, &temp);
509 	if (err)
510 		is_new = true;
511 
512 	/* Add new entry and update counter */
513 	err = dcb_ieee_setapp(dev, app);
514 	if (err)
515 		return err;
516 
517 	if (is_new)
518 		priv->dcbx.dscp_app_cnt++;
519 
520 	return err;
521 
522 fw_err:
523 	mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
524 	return err;
525 }
526 
527 static int mlx5e_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
528 {
529 	struct mlx5e_priv *priv = netdev_priv(dev);
530 	int err;
531 
532 	if  (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
533 	     !MLX5_DSCP_SUPPORTED(priv->mdev))
534 		return -EOPNOTSUPP;
535 
536 	if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
537 	    (app->protocol >= MLX5E_MAX_DSCP))
538 		return -EINVAL;
539 
540 	/* Skip if no dscp app entry */
541 	if (!priv->dcbx.dscp_app_cnt)
542 		return -ENOENT;
543 
544 	/* Check if the entry matches fw setting */
545 	if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol])
546 		return -ENOENT;
547 
548 	/* Delete the app entry */
549 	err = dcb_ieee_delapp(dev, app);
550 	if (err)
551 		return err;
552 
553 	/* Reset the priority mapping back to zero */
554 	err = mlx5e_set_dscp2prio(priv, app->protocol, 0);
555 	if (err)
556 		goto fw_err;
557 
558 	priv->dcbx.dscp_app_cnt--;
559 
560 	/* Check if need to switch to pcp trust state */
561 	if (!priv->dcbx.dscp_app_cnt)
562 		err = mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
563 
564 	return err;
565 
566 fw_err:
567 	mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
568 	return err;
569 }
570 
571 static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev,
572 				       struct ieee_maxrate *maxrate)
573 {
574 	struct mlx5e_priv *priv    = netdev_priv(netdev);
575 	struct mlx5_core_dev *mdev = priv->mdev;
576 	u16 max_bw_value[IEEE_8021QAZ_MAX_TCS];
577 	u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
578 	int err;
579 	int i;
580 
581 	err = mlx5_query_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
582 	if (err)
583 		return err;
584 
585 	memset(maxrate->tc_maxrate, 0, sizeof(maxrate->tc_maxrate));
586 
587 	for (i = 0; i <= mlx5_max_tc(mdev); i++) {
588 		switch (max_bw_unit[i]) {
589 		case MLX5_100_MBPS_UNIT:
590 			maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_100MB_TO_KB;
591 			break;
592 		case MLX5_GBPS_UNIT:
593 			maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_1GB_TO_KB;
594 			break;
595 		case MLX5_BW_NO_LIMIT:
596 			break;
597 		default:
598 			WARN(true, "non-supported BW unit");
599 			break;
600 		}
601 	}
602 
603 	return 0;
604 }
605 
606 static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
607 				       struct ieee_maxrate *maxrate)
608 {
609 	struct mlx5e_priv *priv    = netdev_priv(netdev);
610 	struct mlx5_core_dev *mdev = priv->mdev;
611 	u16 max_bw_value[IEEE_8021QAZ_MAX_TCS];
612 	u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
613 	int i;
614 
615 	memset(max_bw_value, 0, sizeof(max_bw_value));
616 	memset(max_bw_unit, 0, sizeof(max_bw_unit));
617 
618 	for (i = 0; i <= mlx5_max_tc(mdev); i++) {
619 		u64 rate = maxrate->tc_maxrate[i];
620 
621 		if (!rate) {
622 			max_bw_unit[i]  = MLX5_BW_NO_LIMIT;
623 			continue;
624 		}
625 		if (rate <= priv->dcbx.upper_limit_100mbps) {
626 			max_bw_value[i] = div_u64(rate, MLX5E_100MB_TO_KB);
627 			max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1;
628 			max_bw_unit[i]  = MLX5_100_MBPS_UNIT;
629 		} else if (rate <= priv->dcbx.upper_limit_gbps) {
630 			max_bw_value[i] = div_u64(rate, MLX5E_1GB_TO_KB);
631 			max_bw_unit[i]  = MLX5_GBPS_UNIT;
632 		} else {
633 			netdev_err(netdev,
634 				   "tc_%d maxrate %llu Kbps exceeds limit %llu\n",
635 				   i, rate, priv->dcbx.upper_limit_gbps);
636 			return -EINVAL;
637 		}
638 	}
639 
640 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
641 		u8 unit = max_bw_unit[i];
642 
643 		netdev_dbg(netdev, "%s: tc_%d <=> max_bw %u %s\n", __func__, i,
644 			   max_bw_value[i] * mlx5e_bw_units[unit].scale,
645 			   mlx5e_bw_units[unit].units_str);
646 	}
647 
648 	return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
649 }
650 
651 static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
652 {
653 	struct mlx5e_priv *priv = netdev_priv(netdev);
654 	struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
655 	struct mlx5_core_dev *mdev = priv->mdev;
656 	struct ieee_ets ets;
657 	struct ieee_pfc pfc;
658 	int err = -EOPNOTSUPP;
659 	int i;
660 
661 	if (!MLX5_CAP_GEN(mdev, ets))
662 		goto out;
663 
664 	memset(&ets, 0, sizeof(ets));
665 	memset(&pfc, 0, sizeof(pfc));
666 
667 	ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
668 	for (i = 0; i < CEE_DCBX_MAX_PGS; i++) {
669 		ets.tc_tx_bw[i] = cee_cfg->pg_bw_pct[i];
670 		ets.tc_rx_bw[i] = cee_cfg->pg_bw_pct[i];
671 		ets.tc_tsa[i]   = IEEE_8021QAZ_TSA_ETS;
672 		ets.prio_tc[i]  = cee_cfg->prio_to_pg_map[i];
673 		netdev_dbg(netdev,
674 			   "%s: Priority group %d: tx_bw %d, rx_bw %d, prio_tc %d\n",
675 			   __func__, i, ets.tc_tx_bw[i], ets.tc_rx_bw[i],
676 			   ets.prio_tc[i]);
677 	}
678 
679 	err = mlx5e_dbcnl_validate_ets(netdev, &ets, true);
680 	if (err)
681 		goto out;
682 
683 	err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
684 	if (err) {
685 		netdev_err(netdev,
686 			   "%s, Failed to set ETS: %d\n", __func__, err);
687 		goto out;
688 	}
689 
690 	/* Set PFC */
691 	pfc.pfc_cap = mlx5_max_tc(mdev) + 1;
692 	if (!cee_cfg->pfc_enable)
693 		pfc.pfc_en = 0;
694 	else
695 		for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
696 			pfc.pfc_en |= cee_cfg->pfc_setting[i] << i;
697 
698 	err = mlx5e_dcbnl_ieee_setpfc(netdev, &pfc);
699 	if (err) {
700 		netdev_err(netdev,
701 			   "%s, Failed to set PFC: %d\n", __func__, err);
702 		goto out;
703 	}
704 out:
705 	return err ? MLX5_DCB_NO_CHG : MLX5_DCB_CHG_RESET;
706 }
707 
708 static u8 mlx5e_dcbnl_getstate(struct net_device *netdev)
709 {
710 	return MLX5E_CEE_STATE_UP;
711 }
712 
713 static void mlx5e_dcbnl_getpermhwaddr(struct net_device *netdev,
714 				      u8 *perm_addr)
715 {
716 	struct mlx5e_priv *priv = netdev_priv(netdev);
717 
718 	if (!perm_addr)
719 		return;
720 
721 	memset(perm_addr, 0xff, MAX_ADDR_LEN);
722 
723 	mlx5_query_mac_address(priv->mdev, perm_addr);
724 }
725 
726 static void mlx5e_dcbnl_setpgtccfgtx(struct net_device *netdev,
727 				     int priority, u8 prio_type,
728 				     u8 pgid, u8 bw_pct, u8 up_map)
729 {
730 	struct mlx5e_priv *priv = netdev_priv(netdev);
731 	struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
732 
733 	if (priority >= CEE_DCBX_MAX_PRIO) {
734 		netdev_err(netdev,
735 			   "%s, priority is out of range\n", __func__);
736 		return;
737 	}
738 
739 	if (pgid >= CEE_DCBX_MAX_PGS) {
740 		netdev_err(netdev,
741 			   "%s, priority group is out of range\n", __func__);
742 		return;
743 	}
744 
745 	cee_cfg->prio_to_pg_map[priority] = pgid;
746 }
747 
748 static void mlx5e_dcbnl_setpgbwgcfgtx(struct net_device *netdev,
749 				      int pgid, u8 bw_pct)
750 {
751 	struct mlx5e_priv *priv = netdev_priv(netdev);
752 	struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
753 
754 	if (pgid >= CEE_DCBX_MAX_PGS) {
755 		netdev_err(netdev,
756 			   "%s, priority group is out of range\n", __func__);
757 		return;
758 	}
759 
760 	cee_cfg->pg_bw_pct[pgid] = bw_pct;
761 }
762 
763 static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
764 				     int priority, u8 *prio_type,
765 				     u8 *pgid, u8 *bw_pct, u8 *up_map)
766 {
767 	struct mlx5e_priv *priv = netdev_priv(netdev);
768 	struct mlx5_core_dev *mdev = priv->mdev;
769 
770 	if (!MLX5_CAP_GEN(priv->mdev, ets)) {
771 		netdev_err(netdev, "%s, ets is not supported\n", __func__);
772 		return;
773 	}
774 
775 	if (priority >= CEE_DCBX_MAX_PRIO) {
776 		netdev_err(netdev,
777 			   "%s, priority is out of range\n", __func__);
778 		return;
779 	}
780 
781 	*prio_type = 0;
782 	*bw_pct = 0;
783 	*up_map = 0;
784 
785 	if (mlx5_query_port_prio_tc(mdev, priority, pgid))
786 		*pgid = 0;
787 }
788 
789 static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
790 				      int pgid, u8 *bw_pct)
791 {
792 	struct ieee_ets ets;
793 
794 	if (pgid >= CEE_DCBX_MAX_PGS) {
795 		netdev_err(netdev,
796 			   "%s, priority group is out of range\n", __func__);
797 		return;
798 	}
799 
800 	mlx5e_dcbnl_ieee_getets(netdev, &ets);
801 	*bw_pct = ets.tc_tx_bw[pgid];
802 }
803 
804 static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev,
805 				  int priority, u8 setting)
806 {
807 	struct mlx5e_priv *priv = netdev_priv(netdev);
808 	struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
809 
810 	if (priority >= CEE_DCBX_MAX_PRIO) {
811 		netdev_err(netdev,
812 			   "%s, priority is out of range\n", __func__);
813 		return;
814 	}
815 
816 	if (setting > 1)
817 		return;
818 
819 	cee_cfg->pfc_setting[priority] = setting;
820 }
821 
822 static int
823 mlx5e_dcbnl_get_priority_pfc(struct net_device *netdev,
824 			     int priority, u8 *setting)
825 {
826 	struct ieee_pfc pfc;
827 	int err;
828 
829 	err = mlx5e_dcbnl_ieee_getpfc(netdev, &pfc);
830 
831 	if (err)
832 		*setting = 0;
833 	else
834 		*setting = (pfc.pfc_en >> priority) & 0x01;
835 
836 	return err;
837 }
838 
839 static void mlx5e_dcbnl_getpfccfg(struct net_device *netdev,
840 				  int priority, u8 *setting)
841 {
842 	if (priority >= CEE_DCBX_MAX_PRIO) {
843 		netdev_err(netdev,
844 			   "%s, priority is out of range\n", __func__);
845 		return;
846 	}
847 
848 	if (!setting)
849 		return;
850 
851 	mlx5e_dcbnl_get_priority_pfc(netdev, priority, setting);
852 }
853 
854 static u8 mlx5e_dcbnl_getcap(struct net_device *netdev,
855 			     int capid, u8 *cap)
856 {
857 	struct mlx5e_priv *priv = netdev_priv(netdev);
858 	struct mlx5_core_dev *mdev = priv->mdev;
859 	u8 rval = 0;
860 
861 	switch (capid) {
862 	case DCB_CAP_ATTR_PG:
863 		*cap = true;
864 		break;
865 	case DCB_CAP_ATTR_PFC:
866 		*cap = true;
867 		break;
868 	case DCB_CAP_ATTR_UP2TC:
869 		*cap = false;
870 		break;
871 	case DCB_CAP_ATTR_PG_TCS:
872 		*cap = 1 << mlx5_max_tc(mdev);
873 		break;
874 	case DCB_CAP_ATTR_PFC_TCS:
875 		*cap = 1 << mlx5_max_tc(mdev);
876 		break;
877 	case DCB_CAP_ATTR_GSP:
878 		*cap = false;
879 		break;
880 	case DCB_CAP_ATTR_BCN:
881 		*cap = false;
882 		break;
883 	case DCB_CAP_ATTR_DCBX:
884 		*cap = priv->dcbx.cap |
885 		       DCB_CAP_DCBX_VER_CEE |
886 		       DCB_CAP_DCBX_VER_IEEE;
887 		break;
888 	default:
889 		*cap = 0;
890 		rval = 1;
891 		break;
892 	}
893 
894 	return rval;
895 }
896 
897 static int mlx5e_dcbnl_getnumtcs(struct net_device *netdev,
898 				 int tcs_id, u8 *num)
899 {
900 	struct mlx5e_priv *priv = netdev_priv(netdev);
901 	struct mlx5_core_dev *mdev = priv->mdev;
902 
903 	switch (tcs_id) {
904 	case DCB_NUMTCS_ATTR_PG:
905 	case DCB_NUMTCS_ATTR_PFC:
906 		*num = mlx5_max_tc(mdev) + 1;
907 		break;
908 	default:
909 		return -EINVAL;
910 	}
911 
912 	return 0;
913 }
914 
915 static u8 mlx5e_dcbnl_getpfcstate(struct net_device *netdev)
916 {
917 	struct ieee_pfc pfc;
918 
919 	if (mlx5e_dcbnl_ieee_getpfc(netdev, &pfc))
920 		return MLX5E_CEE_STATE_DOWN;
921 
922 	return pfc.pfc_en ? MLX5E_CEE_STATE_UP : MLX5E_CEE_STATE_DOWN;
923 }
924 
925 static void mlx5e_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
926 {
927 	struct mlx5e_priv *priv = netdev_priv(netdev);
928 	struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
929 
930 	if ((state != MLX5E_CEE_STATE_UP) && (state != MLX5E_CEE_STATE_DOWN))
931 		return;
932 
933 	cee_cfg->pfc_enable = state;
934 }
935 
936 static int mlx5e_dcbnl_getbuffer(struct net_device *dev,
937 				 struct dcbnl_buffer *dcb_buffer)
938 {
939 	struct mlx5e_priv *priv = netdev_priv(dev);
940 	struct mlx5_core_dev *mdev = priv->mdev;
941 	struct mlx5e_port_buffer port_buffer;
942 	u8 buffer[MLX5E_MAX_PRIORITY];
943 	int i, err;
944 
945 	if (!MLX5_BUFFER_SUPPORTED(mdev))
946 		return -EOPNOTSUPP;
947 
948 	err = mlx5e_port_query_priority2buffer(mdev, buffer);
949 	if (err)
950 		return err;
951 
952 	for (i = 0; i < MLX5E_MAX_PRIORITY; i++)
953 		dcb_buffer->prio2buffer[i] = buffer[i];
954 
955 	err = mlx5e_port_query_buffer(priv, &port_buffer);
956 	if (err)
957 		return err;
958 
959 	for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++)
960 		dcb_buffer->buffer_size[i] = port_buffer.buffer[i].size;
961 	dcb_buffer->total_size = port_buffer.port_buffer_size -
962 				 port_buffer.internal_buffers_size;
963 
964 	return 0;
965 }
966 
967 static int mlx5e_dcbnl_setbuffer(struct net_device *dev,
968 				 struct dcbnl_buffer *dcb_buffer)
969 {
970 	struct mlx5e_priv *priv = netdev_priv(dev);
971 	struct mlx5_core_dev *mdev = priv->mdev;
972 	struct mlx5e_port_buffer port_buffer;
973 	u8 old_prio2buffer[MLX5E_MAX_PRIORITY];
974 	u32 *buffer_size = NULL;
975 	u8 *prio2buffer = NULL;
976 	u32 changed = 0;
977 	int i, err;
978 
979 	if (!MLX5_BUFFER_SUPPORTED(mdev))
980 		return -EOPNOTSUPP;
981 
982 	for (i = 0; i < DCBX_MAX_BUFFERS; i++)
983 		mlx5_core_dbg(mdev, "buffer[%d]=%d\n", i, dcb_buffer->buffer_size[i]);
984 
985 	for (i = 0; i < MLX5E_MAX_PRIORITY; i++)
986 		mlx5_core_dbg(mdev, "priority %d buffer%d\n", i, dcb_buffer->prio2buffer[i]);
987 
988 	err = mlx5e_port_query_priority2buffer(mdev, old_prio2buffer);
989 	if (err)
990 		return err;
991 
992 	for (i = 0; i < MLX5E_MAX_PRIORITY; i++) {
993 		if (dcb_buffer->prio2buffer[i] != old_prio2buffer[i]) {
994 			changed |= MLX5E_PORT_BUFFER_PRIO2BUFFER;
995 			prio2buffer = dcb_buffer->prio2buffer;
996 			break;
997 		}
998 	}
999 
1000 	err = mlx5e_port_query_buffer(priv, &port_buffer);
1001 	if (err)
1002 		return err;
1003 
1004 	for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
1005 		if (port_buffer.buffer[i].size != dcb_buffer->buffer_size[i]) {
1006 			changed |= MLX5E_PORT_BUFFER_SIZE;
1007 			buffer_size = dcb_buffer->buffer_size;
1008 			break;
1009 		}
1010 	}
1011 
1012 	if (!changed)
1013 		return 0;
1014 
1015 	err = mlx5e_port_manual_buffer_config(priv, changed, dev->mtu, NULL,
1016 					      buffer_size, prio2buffer);
1017 	return err;
1018 }
1019 
1020 static const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = {
1021 	.ieee_getets	= mlx5e_dcbnl_ieee_getets,
1022 	.ieee_setets	= mlx5e_dcbnl_ieee_setets,
1023 	.ieee_getmaxrate = mlx5e_dcbnl_ieee_getmaxrate,
1024 	.ieee_setmaxrate = mlx5e_dcbnl_ieee_setmaxrate,
1025 	.ieee_getpfc	= mlx5e_dcbnl_ieee_getpfc,
1026 	.ieee_setpfc	= mlx5e_dcbnl_ieee_setpfc,
1027 	.ieee_setapp    = mlx5e_dcbnl_ieee_setapp,
1028 	.ieee_delapp    = mlx5e_dcbnl_ieee_delapp,
1029 	.getdcbx	= mlx5e_dcbnl_getdcbx,
1030 	.setdcbx	= mlx5e_dcbnl_setdcbx,
1031 	.dcbnl_getbuffer = mlx5e_dcbnl_getbuffer,
1032 	.dcbnl_setbuffer = mlx5e_dcbnl_setbuffer,
1033 
1034 /* CEE interfaces */
1035 	.setall         = mlx5e_dcbnl_setall,
1036 	.getstate       = mlx5e_dcbnl_getstate,
1037 	.getpermhwaddr  = mlx5e_dcbnl_getpermhwaddr,
1038 
1039 	.setpgtccfgtx   = mlx5e_dcbnl_setpgtccfgtx,
1040 	.setpgbwgcfgtx  = mlx5e_dcbnl_setpgbwgcfgtx,
1041 	.getpgtccfgtx   = mlx5e_dcbnl_getpgtccfgtx,
1042 	.getpgbwgcfgtx  = mlx5e_dcbnl_getpgbwgcfgtx,
1043 
1044 	.setpfccfg      = mlx5e_dcbnl_setpfccfg,
1045 	.getpfccfg      = mlx5e_dcbnl_getpfccfg,
1046 	.getcap         = mlx5e_dcbnl_getcap,
1047 	.getnumtcs      = mlx5e_dcbnl_getnumtcs,
1048 	.getpfcstate    = mlx5e_dcbnl_getpfcstate,
1049 	.setpfcstate    = mlx5e_dcbnl_setpfcstate,
1050 };
1051 
1052 void mlx5e_dcbnl_build_netdev(struct net_device *netdev)
1053 {
1054 	struct mlx5e_priv *priv = netdev_priv(netdev);
1055 	struct mlx5_core_dev *mdev = priv->mdev;
1056 
1057 	if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos))
1058 		netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
1059 }
1060 
1061 static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv,
1062 					enum mlx5_dcbx_oper_mode *mode)
1063 {
1064 	u32 out[MLX5_ST_SZ_DW(dcbx_param)];
1065 
1066 	*mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
1067 
1068 	if (!mlx5_query_port_dcbx_param(priv->mdev, out))
1069 		*mode = MLX5_GET(dcbx_param, out, version_oper);
1070 
1071 	/* From driver's point of view, we only care if the mode
1072 	 * is host (HOST) or non-host (AUTO)
1073 	 */
1074 	if (*mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
1075 		*mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
1076 }
1077 
1078 static void mlx5e_ets_init(struct mlx5e_priv *priv)
1079 {
1080 	struct ieee_ets ets;
1081 	int err;
1082 	int i;
1083 
1084 	if (!MLX5_CAP_GEN(priv->mdev, ets))
1085 		return;
1086 
1087 	memset(&ets, 0, sizeof(ets));
1088 	ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
1089 	for (i = 0; i < ets.ets_cap; i++) {
1090 		ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
1091 		ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
1092 		ets.prio_tc[i] = i;
1093 	}
1094 
1095 	if (ets.ets_cap > 1) {
1096 		/* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
1097 		ets.prio_tc[0] = 1;
1098 		ets.prio_tc[1] = 0;
1099 	}
1100 
1101 	err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
1102 	if (err)
1103 		netdev_err(priv->netdev,
1104 			   "%s, Failed to init ETS: %d\n", __func__, err);
1105 }
1106 
1107 enum {
1108 	INIT,
1109 	DELETE,
1110 };
1111 
1112 static void mlx5e_dcbnl_dscp_app(struct mlx5e_priv *priv, int action)
1113 {
1114 	struct dcb_app temp;
1115 	int i;
1116 
1117 	if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
1118 		return;
1119 
1120 	if (!MLX5_DSCP_SUPPORTED(priv->mdev))
1121 		return;
1122 
1123 	/* No SEL_DSCP entry in non DSCP state */
1124 	if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_DSCP)
1125 		return;
1126 
1127 	temp.selector = IEEE_8021QAZ_APP_SEL_DSCP;
1128 	for (i = 0; i < MLX5E_MAX_DSCP; i++) {
1129 		temp.protocol = i;
1130 		temp.priority = priv->dcbx_dp.dscp2prio[i];
1131 		if (action == INIT)
1132 			dcb_ieee_setapp(priv->netdev, &temp);
1133 		else
1134 			dcb_ieee_delapp(priv->netdev, &temp);
1135 	}
1136 
1137 	priv->dcbx.dscp_app_cnt = (action == INIT) ? MLX5E_MAX_DSCP : 0;
1138 }
1139 
1140 void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv)
1141 {
1142 	mlx5e_dcbnl_dscp_app(priv, INIT);
1143 }
1144 
1145 void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv)
1146 {
1147 	mlx5e_dcbnl_dscp_app(priv, DELETE);
1148 }
1149 
1150 static void mlx5e_params_calc_trust_tx_min_inline_mode(struct mlx5_core_dev *mdev,
1151 						       struct mlx5e_params *params,
1152 						       u8 trust_state)
1153 {
1154 	mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
1155 	if (trust_state == MLX5_QPTS_TRUST_DSCP &&
1156 	    params->tx_min_inline_mode == MLX5_INLINE_MODE_L2)
1157 		params->tx_min_inline_mode = MLX5_INLINE_MODE_IP;
1158 }
1159 
1160 static int mlx5e_update_trust_state_hw(struct mlx5e_priv *priv, void *context)
1161 {
1162 	u8 *trust_state = context;
1163 	int err;
1164 
1165 	err = mlx5_set_trust_state(priv->mdev, *trust_state);
1166 	if (err)
1167 		return err;
1168 	WRITE_ONCE(priv->dcbx_dp.trust_state, *trust_state);
1169 
1170 	return 0;
1171 }
1172 
1173 static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
1174 {
1175 	struct mlx5e_params new_params;
1176 	bool reset = true;
1177 	int err;
1178 
1179 	netdev_lock(priv->netdev);
1180 	mutex_lock(&priv->state_lock);
1181 
1182 	new_params = priv->channels.params;
1183 	mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_params,
1184 						   trust_state);
1185 
1186 	/* Skip if tx_min_inline is the same */
1187 	if (new_params.tx_min_inline_mode == priv->channels.params.tx_min_inline_mode)
1188 		reset = false;
1189 
1190 	err = mlx5e_safe_switch_params(priv, &new_params,
1191 				       mlx5e_update_trust_state_hw,
1192 				       &trust_state, reset);
1193 
1194 	mutex_unlock(&priv->state_lock);
1195 	netdev_unlock(priv->netdev);
1196 
1197 	return err;
1198 }
1199 
1200 static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio)
1201 {
1202 	int err;
1203 
1204 	err = mlx5_set_dscp2prio(priv->mdev, dscp, prio);
1205 	if (err)
1206 		return err;
1207 
1208 	priv->dcbx_dp.dscp2prio[dscp] = prio;
1209 	return err;
1210 }
1211 
1212 static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
1213 {
1214 	struct mlx5_core_dev *mdev = priv->mdev;
1215 	u8 trust_state;
1216 	int err;
1217 
1218 	if (!MLX5_DSCP_SUPPORTED(mdev)) {
1219 		WRITE_ONCE(priv->dcbx_dp.trust_state, MLX5_QPTS_TRUST_PCP);
1220 		return 0;
1221 	}
1222 
1223 	err = mlx5_query_trust_state(priv->mdev, &trust_state);
1224 	if (err)
1225 		return err;
1226 	WRITE_ONCE(priv->dcbx_dp.trust_state, trust_state);
1227 
1228 	if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_PCP && priv->dcbx.dscp_app_cnt) {
1229 		/*
1230 		 * Align the driver state with the register state.
1231 		 * Temporary state change is required to enable the app list reset.
1232 		 */
1233 		priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_DSCP;
1234 		mlx5e_dcbnl_delete_app(priv);
1235 		priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP;
1236 	}
1237 
1238 	mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &priv->channels.params,
1239 						   priv->dcbx_dp.trust_state);
1240 
1241 	err = mlx5_query_dscp2prio(priv->mdev, priv->dcbx_dp.dscp2prio);
1242 	if (err)
1243 		return err;
1244 
1245 	return 0;
1246 }
1247 
1248 #define MLX5E_BUFFER_CELL_SHIFT 7
1249 
1250 static u16 mlx5e_query_port_buffers_cell_size(struct mlx5e_priv *priv)
1251 {
1252 	struct mlx5_core_dev *mdev = priv->mdev;
1253 	u32 out[MLX5_ST_SZ_DW(sbcam_reg)] = {};
1254 	u32 in[MLX5_ST_SZ_DW(sbcam_reg)] = {};
1255 
1256 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1257 		return (1 << MLX5E_BUFFER_CELL_SHIFT);
1258 
1259 	if (mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
1260 				 MLX5_REG_SBCAM, 0, 0))
1261 		return (1 << MLX5E_BUFFER_CELL_SHIFT);
1262 
1263 	return MLX5_GET(sbcam_reg, out, cap_cell_size);
1264 }
1265 
1266 void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
1267 {
1268 	struct mlx5e_dcbx *dcbx = &priv->dcbx;
1269 	bool max_bw_msb_supported;
1270 	u16 type_max;
1271 
1272 	mlx5e_trust_initialize(priv);
1273 
1274 	if (!MLX5_CAP_GEN(priv->mdev, qos))
1275 		return;
1276 
1277 	if (MLX5_CAP_GEN(priv->mdev, dcbx))
1278 		mlx5e_dcbnl_query_dcbx_mode(priv, &dcbx->mode);
1279 
1280 	priv->dcbx.cap = DCB_CAP_DCBX_VER_CEE |
1281 			 DCB_CAP_DCBX_VER_IEEE;
1282 	if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
1283 		priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
1284 
1285 	priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv);
1286 	priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN;
1287 
1288 	max_bw_msb_supported = MLX5_CAP_QCAM_FEATURE(priv->mdev,
1289 						     qetcr_qshr_max_bw_val_msb);
1290 	type_max = max_bw_msb_supported ? U16_MAX : U8_MAX;
1291 	priv->dcbx.upper_limit_100mbps = type_max * MLX5E_100MB_TO_KB;
1292 	priv->dcbx.upper_limit_gbps = type_max * MLX5E_1GB_TO_KB;
1293 
1294 	mlx5e_ets_init(priv);
1295 }
1296