1 /*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32 #include <linux/device.h>
33 #include <linux/netdevice.h>
34 #include "en.h"
35 #include "en/port.h"
36 #include "en/port_buffer.h"
37
38 #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
39
40 #define MLX5E_100MB (100000)
41 #define MLX5E_1GB (1000000)
42
43 #define MLX5E_CEE_STATE_UP 1
44 #define MLX5E_CEE_STATE_DOWN 0
45
46 /* Max supported cable length is 1000 meters */
47 #define MLX5E_MAX_CABLE_LENGTH 1000
48
49 enum {
50 MLX5E_VENDOR_TC_GROUP_NUM = 7,
51 MLX5E_LOWEST_PRIO_GROUP = 0,
52 };
53
54 enum {
55 MLX5_DCB_CHG_RESET,
56 MLX5_DCB_NO_CHG,
57 MLX5_DCB_CHG_NO_RESET,
58 };
59
60 #define MLX5_DSCP_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, qcam_reg) && \
61 MLX5_CAP_QCAM_REG(mdev, qpts) && \
62 MLX5_CAP_QCAM_REG(mdev, qpdpm))
63
64 static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state);
65 static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio);
66
67 /* If dcbx mode is non-host set the dcbx mode to host.
68 */
mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv * priv,enum mlx5_dcbx_oper_mode mode)69 static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv,
70 enum mlx5_dcbx_oper_mode mode)
71 {
72 struct mlx5_core_dev *mdev = priv->mdev;
73 u32 param[MLX5_ST_SZ_DW(dcbx_param)];
74 int err;
75
76 err = mlx5_query_port_dcbx_param(mdev, param);
77 if (err)
78 return err;
79
80 MLX5_SET(dcbx_param, param, version_admin, mode);
81 if (mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
82 MLX5_SET(dcbx_param, param, willing_admin, 1);
83
84 return mlx5_set_port_dcbx_param(mdev, param);
85 }
86
mlx5e_dcbnl_switch_to_host_mode(struct mlx5e_priv * priv)87 static int mlx5e_dcbnl_switch_to_host_mode(struct mlx5e_priv *priv)
88 {
89 struct mlx5e_dcbx *dcbx = &priv->dcbx;
90 int err;
91
92 if (!MLX5_CAP_GEN(priv->mdev, dcbx))
93 return 0;
94
95 if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
96 return 0;
97
98 err = mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_HOST);
99 if (err)
100 return err;
101
102 dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
103 return 0;
104 }
105
mlx5e_dcbnl_ieee_getets(struct net_device * netdev,struct ieee_ets * ets)106 static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
107 struct ieee_ets *ets)
108 {
109 struct mlx5e_priv *priv = netdev_priv(netdev);
110 struct mlx5_core_dev *mdev = priv->mdev;
111 u8 tc_group[IEEE_8021QAZ_MAX_TCS];
112 bool is_tc_group_6_exist = false;
113 bool is_zero_bw_ets_tc = false;
114 int err = 0;
115 int i;
116
117 if (!MLX5_CAP_GEN(priv->mdev, ets))
118 return -EOPNOTSUPP;
119
120 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
121 err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
122 if (err)
123 return err;
124 }
125
126 ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
127 for (i = 0; i < ets->ets_cap; i++) {
128 err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
129 if (err)
130 return err;
131
132 err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]);
133 if (err)
134 return err;
135
136 if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC &&
137 tc_group[i] == (MLX5E_LOWEST_PRIO_GROUP + 1))
138 is_zero_bw_ets_tc = true;
139
140 if (tc_group[i] == (MLX5E_VENDOR_TC_GROUP_NUM - 1))
141 is_tc_group_6_exist = true;
142 }
143
144 /* Report 0% ets tc if exits*/
145 if (is_zero_bw_ets_tc) {
146 for (i = 0; i < ets->ets_cap; i++)
147 if (tc_group[i] == MLX5E_LOWEST_PRIO_GROUP)
148 ets->tc_tx_bw[i] = 0;
149 }
150
151 /* Update tc_tsa based on fw setting*/
152 for (i = 0; i < ets->ets_cap; i++) {
153 if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC)
154 priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
155 else if (tc_group[i] == MLX5E_VENDOR_TC_GROUP_NUM &&
156 !is_tc_group_6_exist)
157 priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
158 }
159 memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa));
160
161 return err;
162 }
163
mlx5e_build_tc_group(struct ieee_ets * ets,u8 * tc_group,int max_tc)164 static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
165 {
166 bool any_tc_mapped_to_ets = false;
167 bool ets_zero_bw = false;
168 int strict_group;
169 int i;
170
171 for (i = 0; i <= max_tc; i++) {
172 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
173 any_tc_mapped_to_ets = true;
174 if (!ets->tc_tx_bw[i])
175 ets_zero_bw = true;
176 }
177 }
178
179 /* strict group has higher priority than ets group */
180 strict_group = MLX5E_LOWEST_PRIO_GROUP;
181 if (any_tc_mapped_to_ets)
182 strict_group++;
183 if (ets_zero_bw)
184 strict_group++;
185
186 for (i = 0; i <= max_tc; i++) {
187 switch (ets->tc_tsa[i]) {
188 case IEEE_8021QAZ_TSA_VENDOR:
189 tc_group[i] = MLX5E_VENDOR_TC_GROUP_NUM;
190 break;
191 case IEEE_8021QAZ_TSA_STRICT:
192 tc_group[i] = strict_group++;
193 break;
194 case IEEE_8021QAZ_TSA_ETS:
195 tc_group[i] = MLX5E_LOWEST_PRIO_GROUP;
196 if (ets->tc_tx_bw[i] && ets_zero_bw)
197 tc_group[i] = MLX5E_LOWEST_PRIO_GROUP + 1;
198 break;
199 }
200 }
201 }
202
mlx5e_build_tc_tx_bw(struct ieee_ets * ets,u8 * tc_tx_bw,u8 * tc_group,int max_tc)203 static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
204 u8 *tc_group, int max_tc)
205 {
206 int bw_for_ets_zero_bw_tc = 0;
207 int last_ets_zero_bw_tc = -1;
208 int num_ets_zero_bw = 0;
209 int i;
210
211 for (i = 0; i <= max_tc; i++) {
212 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS &&
213 !ets->tc_tx_bw[i]) {
214 num_ets_zero_bw++;
215 last_ets_zero_bw_tc = i;
216 }
217 }
218
219 if (num_ets_zero_bw)
220 bw_for_ets_zero_bw_tc = MLX5E_MAX_BW_ALLOC / num_ets_zero_bw;
221
222 for (i = 0; i <= max_tc; i++) {
223 switch (ets->tc_tsa[i]) {
224 case IEEE_8021QAZ_TSA_VENDOR:
225 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
226 break;
227 case IEEE_8021QAZ_TSA_STRICT:
228 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
229 break;
230 case IEEE_8021QAZ_TSA_ETS:
231 tc_tx_bw[i] = ets->tc_tx_bw[i] ?
232 ets->tc_tx_bw[i] :
233 bw_for_ets_zero_bw_tc;
234 break;
235 }
236 }
237
238 /* Make sure the total bw for ets zero bw group is 100% */
239 if (last_ets_zero_bw_tc != -1)
240 tc_tx_bw[last_ets_zero_bw_tc] +=
241 MLX5E_MAX_BW_ALLOC % num_ets_zero_bw;
242 }
243
244 /* If there are ETS BW 0,
245 * Set ETS group # to 1 for all ETS non zero BW tcs. Their sum must be 100%.
246 * Set group #0 to all the ETS BW 0 tcs and
247 * equally splits the 100% BW between them
248 * Report both group #0 and #1 as ETS type.
249 * All the tcs in group #0 will be reported with 0% BW.
250 */
mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv * priv,struct ieee_ets * ets)251 static int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
252 {
253 struct mlx5_core_dev *mdev = priv->mdev;
254 u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS];
255 u8 tc_group[IEEE_8021QAZ_MAX_TCS];
256 int max_tc = mlx5_max_tc(mdev);
257 int err, i;
258
259 mlx5e_build_tc_group(ets, tc_group, max_tc);
260 mlx5e_build_tc_tx_bw(ets, tc_tx_bw, tc_group, max_tc);
261
262 err = mlx5_set_port_prio_tc(mdev, ets->prio_tc);
263 if (err)
264 return err;
265
266 err = mlx5_set_port_tc_group(mdev, tc_group);
267 if (err)
268 return err;
269
270 err = mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw);
271
272 if (err)
273 return err;
274
275 memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
276
277 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
278 netdev_dbg(priv->netdev, "%s: prio_%d <=> tc_%d\n",
279 __func__, i, ets->prio_tc[i]);
280 netdev_dbg(priv->netdev, "%s: tc_%d <=> tx_bw_%d%%, group_%d\n",
281 __func__, i, tc_tx_bw[i], tc_group[i]);
282 }
283
284 return err;
285 }
286
mlx5e_dbcnl_validate_ets(struct net_device * netdev,struct ieee_ets * ets,bool zero_sum_allowed)287 static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
288 struct ieee_ets *ets,
289 bool zero_sum_allowed)
290 {
291 bool have_ets_tc = false;
292 int bw_sum = 0;
293 int i;
294
295 /* Validate Priority */
296 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
297 if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) {
298 netdev_err(netdev,
299 "Failed to validate ETS: priority value greater than max(%d)\n",
300 MLX5E_MAX_PRIORITY);
301 return -EINVAL;
302 }
303 }
304
305 /* Validate Bandwidth Sum */
306 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
307 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
308 have_ets_tc = true;
309 bw_sum += ets->tc_tx_bw[i];
310 }
311 }
312
313 if (have_ets_tc && bw_sum != 100) {
314 if (bw_sum || (!bw_sum && !zero_sum_allowed))
315 netdev_err(netdev,
316 "Failed to validate ETS: BW sum is illegal\n");
317 return -EINVAL;
318 }
319 return 0;
320 }
321
mlx5e_dcbnl_ieee_setets(struct net_device * netdev,struct ieee_ets * ets)322 static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
323 struct ieee_ets *ets)
324 {
325 struct mlx5e_priv *priv = netdev_priv(netdev);
326 int err;
327
328 if (!MLX5_CAP_GEN(priv->mdev, ets))
329 return -EOPNOTSUPP;
330
331 err = mlx5e_dbcnl_validate_ets(netdev, ets, false);
332 if (err)
333 return err;
334
335 err = mlx5e_dcbnl_ieee_setets_core(priv, ets);
336 if (err)
337 return err;
338
339 return 0;
340 }
341
mlx5e_dcbnl_ieee_getpfc(struct net_device * dev,struct ieee_pfc * pfc)342 static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
343 struct ieee_pfc *pfc)
344 {
345 struct mlx5e_priv *priv = netdev_priv(dev);
346 struct mlx5_core_dev *mdev = priv->mdev;
347 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
348 int i;
349
350 pfc->pfc_cap = mlx5_max_tc(mdev) + 1;
351 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
352 pfc->requests[i] = PPORT_PER_PRIO_GET(pstats, i, tx_pause);
353 pfc->indications[i] = PPORT_PER_PRIO_GET(pstats, i, rx_pause);
354 }
355
356 if (MLX5_BUFFER_SUPPORTED(mdev))
357 pfc->delay = priv->dcbx.cable_len;
358
359 return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL);
360 }
361
mlx5e_dcbnl_ieee_setpfc(struct net_device * dev,struct ieee_pfc * pfc)362 static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
363 struct ieee_pfc *pfc)
364 {
365 u8 buffer_ownership = MLX5_BUF_OWNERSHIP_UNKNOWN;
366 struct mlx5e_priv *priv = netdev_priv(dev);
367 struct mlx5_core_dev *mdev = priv->mdev;
368 u32 old_cable_len = priv->dcbx.cable_len;
369 struct ieee_pfc pfc_new;
370 u32 changed = 0;
371 u8 curr_pfc_en;
372 int ret = 0;
373
374 /* pfc_en */
375 mlx5_query_port_pfc(mdev, &curr_pfc_en, NULL);
376 if (pfc->pfc_en != curr_pfc_en) {
377 ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en);
378 if (ret)
379 return ret;
380 mlx5_toggle_port_link(mdev);
381 changed |= MLX5E_PORT_BUFFER_PFC;
382 }
383
384 if (pfc->delay &&
385 pfc->delay < MLX5E_MAX_CABLE_LENGTH &&
386 pfc->delay != priv->dcbx.cable_len) {
387 priv->dcbx.cable_len = pfc->delay;
388 changed |= MLX5E_PORT_BUFFER_CABLE_LEN;
389 }
390
391 if (MLX5_BUFFER_SUPPORTED(mdev)) {
392 pfc_new.pfc_en = (changed & MLX5E_PORT_BUFFER_PFC) ? pfc->pfc_en : curr_pfc_en;
393 ret = mlx5_query_port_buffer_ownership(mdev,
394 &buffer_ownership);
395 if (ret)
396 netdev_err(dev,
397 "%s, Failed to get buffer ownership: %d\n",
398 __func__, ret);
399
400 if (buffer_ownership == MLX5_BUF_OWNERSHIP_SW_OWNED)
401 ret = mlx5e_port_manual_buffer_config(priv, changed,
402 dev->mtu, &pfc_new,
403 NULL, NULL);
404
405 if (ret && (changed & MLX5E_PORT_BUFFER_CABLE_LEN))
406 priv->dcbx.cable_len = old_cable_len;
407 }
408
409 if (!ret) {
410 netdev_dbg(dev,
411 "%s: PFC per priority bit mask: 0x%x\n",
412 __func__, pfc->pfc_en);
413 }
414 return ret;
415 }
416
mlx5e_dcbnl_getdcbx(struct net_device * dev)417 static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev)
418 {
419 struct mlx5e_priv *priv = netdev_priv(dev);
420
421 return priv->dcbx.cap;
422 }
423
mlx5e_dcbnl_setdcbx(struct net_device * dev,u8 mode)424 static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
425 {
426 struct mlx5e_priv *priv = netdev_priv(dev);
427 struct mlx5e_dcbx *dcbx = &priv->dcbx;
428
429 if (mode & DCB_CAP_DCBX_LLD_MANAGED)
430 return 1;
431
432 if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) {
433 if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_AUTO)
434 return 0;
435
436 /* set dcbx to fw controlled */
437 if (!mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_AUTO)) {
438 dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
439 dcbx->cap &= ~DCB_CAP_DCBX_HOST;
440 return 0;
441 }
442
443 return 1;
444 }
445
446 if (!(mode & DCB_CAP_DCBX_HOST))
447 return 1;
448
449 if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
450 return 1;
451
452 dcbx->cap = mode;
453
454 return 0;
455 }
456
mlx5e_dcbnl_ieee_setapp(struct net_device * dev,struct dcb_app * app)457 static int mlx5e_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
458 {
459 struct mlx5e_priv *priv = netdev_priv(dev);
460 struct dcb_app temp;
461 bool is_new;
462 int err;
463
464 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
465 !MLX5_DSCP_SUPPORTED(priv->mdev))
466 return -EOPNOTSUPP;
467
468 if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
469 (app->protocol >= MLX5E_MAX_DSCP))
470 return -EINVAL;
471
472 /* Save the old entry info */
473 temp.selector = IEEE_8021QAZ_APP_SEL_DSCP;
474 temp.protocol = app->protocol;
475 temp.priority = priv->dcbx_dp.dscp2prio[app->protocol];
476
477 /* Check if need to switch to dscp trust state */
478 if (!priv->dcbx.dscp_app_cnt) {
479 err = mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_DSCP);
480 if (err)
481 return err;
482 }
483
484 /* Skip the fw command if new and old mapping are the same */
485 if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol]) {
486 err = mlx5e_set_dscp2prio(priv, app->protocol, app->priority);
487 if (err)
488 goto fw_err;
489 }
490
491 /* Delete the old entry if exists */
492 is_new = false;
493 err = dcb_ieee_delapp(dev, &temp);
494 if (err)
495 is_new = true;
496
497 /* Add new entry and update counter */
498 err = dcb_ieee_setapp(dev, app);
499 if (err)
500 return err;
501
502 if (is_new)
503 priv->dcbx.dscp_app_cnt++;
504
505 return err;
506
507 fw_err:
508 mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
509 return err;
510 }
511
mlx5e_dcbnl_ieee_delapp(struct net_device * dev,struct dcb_app * app)512 static int mlx5e_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
513 {
514 struct mlx5e_priv *priv = netdev_priv(dev);
515 int err;
516
517 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
518 !MLX5_DSCP_SUPPORTED(priv->mdev))
519 return -EOPNOTSUPP;
520
521 if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
522 (app->protocol >= MLX5E_MAX_DSCP))
523 return -EINVAL;
524
525 /* Skip if no dscp app entry */
526 if (!priv->dcbx.dscp_app_cnt)
527 return -ENOENT;
528
529 /* Check if the entry matches fw setting */
530 if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol])
531 return -ENOENT;
532
533 /* Delete the app entry */
534 err = dcb_ieee_delapp(dev, app);
535 if (err)
536 return err;
537
538 /* Reset the priority mapping back to zero */
539 err = mlx5e_set_dscp2prio(priv, app->protocol, 0);
540 if (err)
541 goto fw_err;
542
543 priv->dcbx.dscp_app_cnt--;
544
545 /* Check if need to switch to pcp trust state */
546 if (!priv->dcbx.dscp_app_cnt)
547 err = mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
548
549 return err;
550
551 fw_err:
552 mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
553 return err;
554 }
555
mlx5e_dcbnl_ieee_getmaxrate(struct net_device * netdev,struct ieee_maxrate * maxrate)556 static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev,
557 struct ieee_maxrate *maxrate)
558 {
559 struct mlx5e_priv *priv = netdev_priv(netdev);
560 struct mlx5_core_dev *mdev = priv->mdev;
561 u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
562 u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
563 int err;
564 int i;
565
566 err = mlx5_query_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
567 if (err)
568 return err;
569
570 memset(maxrate->tc_maxrate, 0, sizeof(maxrate->tc_maxrate));
571
572 for (i = 0; i <= mlx5_max_tc(mdev); i++) {
573 switch (max_bw_unit[i]) {
574 case MLX5_100_MBPS_UNIT:
575 maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_100MB;
576 break;
577 case MLX5_GBPS_UNIT:
578 maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_1GB;
579 break;
580 case MLX5_BW_NO_LIMIT:
581 break;
582 default:
583 WARN(true, "non-supported BW unit");
584 break;
585 }
586 }
587
588 return 0;
589 }
590
mlx5e_dcbnl_ieee_setmaxrate(struct net_device * netdev,struct ieee_maxrate * maxrate)591 static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
592 struct ieee_maxrate *maxrate)
593 {
594 struct mlx5e_priv *priv = netdev_priv(netdev);
595 struct mlx5_core_dev *mdev = priv->mdev;
596 u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
597 u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
598 __u64 upper_limit_mbps;
599 __u64 upper_limit_gbps;
600 int i;
601 struct {
602 int scale;
603 const char *units_str;
604 } units[] = {
605 [MLX5_100_MBPS_UNIT] = {
606 .scale = 100,
607 .units_str = "Mbps",
608 },
609 [MLX5_GBPS_UNIT] = {
610 .scale = 1,
611 .units_str = "Gbps",
612 },
613 };
614
615 memset(max_bw_value, 0, sizeof(max_bw_value));
616 memset(max_bw_unit, 0, sizeof(max_bw_unit));
617 upper_limit_mbps = 255 * MLX5E_100MB;
618 upper_limit_gbps = 255 * MLX5E_1GB;
619
620 for (i = 0; i <= mlx5_max_tc(mdev); i++) {
621 if (!maxrate->tc_maxrate[i]) {
622 max_bw_unit[i] = MLX5_BW_NO_LIMIT;
623 continue;
624 }
625 if (maxrate->tc_maxrate[i] <= upper_limit_mbps) {
626 max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
627 MLX5E_100MB);
628 max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1;
629 max_bw_unit[i] = MLX5_100_MBPS_UNIT;
630 } else if (max_bw_value[i] <= upper_limit_gbps) {
631 max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
632 MLX5E_1GB);
633 max_bw_unit[i] = MLX5_GBPS_UNIT;
634 } else {
635 netdev_err(netdev,
636 "tc_%d maxrate %llu Kbps exceeds limit %llu\n",
637 i, maxrate->tc_maxrate[i],
638 upper_limit_gbps);
639 return -EINVAL;
640 }
641 }
642
643 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
644 netdev_dbg(netdev, "%s: tc_%d <=> max_bw %u %s\n", __func__, i,
645 max_bw_value[i] * units[max_bw_unit[i]].scale,
646 units[max_bw_unit[i]].units_str);
647 }
648
649 return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
650 }
651
mlx5e_dcbnl_setall(struct net_device * netdev)652 static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
653 {
654 struct mlx5e_priv *priv = netdev_priv(netdev);
655 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
656 struct mlx5_core_dev *mdev = priv->mdev;
657 struct ieee_ets ets;
658 struct ieee_pfc pfc;
659 int err = -EOPNOTSUPP;
660 int i;
661
662 if (!MLX5_CAP_GEN(mdev, ets))
663 goto out;
664
665 memset(&ets, 0, sizeof(ets));
666 memset(&pfc, 0, sizeof(pfc));
667
668 ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
669 for (i = 0; i < CEE_DCBX_MAX_PGS; i++) {
670 ets.tc_tx_bw[i] = cee_cfg->pg_bw_pct[i];
671 ets.tc_rx_bw[i] = cee_cfg->pg_bw_pct[i];
672 ets.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
673 ets.prio_tc[i] = cee_cfg->prio_to_pg_map[i];
674 netdev_dbg(netdev,
675 "%s: Priority group %d: tx_bw %d, rx_bw %d, prio_tc %d\n",
676 __func__, i, ets.tc_tx_bw[i], ets.tc_rx_bw[i],
677 ets.prio_tc[i]);
678 }
679
680 err = mlx5e_dbcnl_validate_ets(netdev, &ets, true);
681 if (err)
682 goto out;
683
684 err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
685 if (err) {
686 netdev_err(netdev,
687 "%s, Failed to set ETS: %d\n", __func__, err);
688 goto out;
689 }
690
691 /* Set PFC */
692 pfc.pfc_cap = mlx5_max_tc(mdev) + 1;
693 if (!cee_cfg->pfc_enable)
694 pfc.pfc_en = 0;
695 else
696 for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
697 pfc.pfc_en |= cee_cfg->pfc_setting[i] << i;
698
699 err = mlx5e_dcbnl_ieee_setpfc(netdev, &pfc);
700 if (err) {
701 netdev_err(netdev,
702 "%s, Failed to set PFC: %d\n", __func__, err);
703 goto out;
704 }
705 out:
706 return err ? MLX5_DCB_NO_CHG : MLX5_DCB_CHG_RESET;
707 }
708
mlx5e_dcbnl_getstate(struct net_device * netdev)709 static u8 mlx5e_dcbnl_getstate(struct net_device *netdev)
710 {
711 return MLX5E_CEE_STATE_UP;
712 }
713
mlx5e_dcbnl_getpermhwaddr(struct net_device * netdev,u8 * perm_addr)714 static void mlx5e_dcbnl_getpermhwaddr(struct net_device *netdev,
715 u8 *perm_addr)
716 {
717 struct mlx5e_priv *priv = netdev_priv(netdev);
718
719 if (!perm_addr)
720 return;
721
722 memset(perm_addr, 0xff, MAX_ADDR_LEN);
723
724 mlx5_query_mac_address(priv->mdev, perm_addr);
725 }
726
mlx5e_dcbnl_setpgtccfgtx(struct net_device * netdev,int priority,u8 prio_type,u8 pgid,u8 bw_pct,u8 up_map)727 static void mlx5e_dcbnl_setpgtccfgtx(struct net_device *netdev,
728 int priority, u8 prio_type,
729 u8 pgid, u8 bw_pct, u8 up_map)
730 {
731 struct mlx5e_priv *priv = netdev_priv(netdev);
732 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
733
734 if (priority >= CEE_DCBX_MAX_PRIO) {
735 netdev_err(netdev,
736 "%s, priority is out of range\n", __func__);
737 return;
738 }
739
740 if (pgid >= CEE_DCBX_MAX_PGS) {
741 netdev_err(netdev,
742 "%s, priority group is out of range\n", __func__);
743 return;
744 }
745
746 cee_cfg->prio_to_pg_map[priority] = pgid;
747 }
748
mlx5e_dcbnl_setpgbwgcfgtx(struct net_device * netdev,int pgid,u8 bw_pct)749 static void mlx5e_dcbnl_setpgbwgcfgtx(struct net_device *netdev,
750 int pgid, u8 bw_pct)
751 {
752 struct mlx5e_priv *priv = netdev_priv(netdev);
753 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
754
755 if (pgid >= CEE_DCBX_MAX_PGS) {
756 netdev_err(netdev,
757 "%s, priority group is out of range\n", __func__);
758 return;
759 }
760
761 cee_cfg->pg_bw_pct[pgid] = bw_pct;
762 }
763
mlx5e_dcbnl_getpgtccfgtx(struct net_device * netdev,int priority,u8 * prio_type,u8 * pgid,u8 * bw_pct,u8 * up_map)764 static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
765 int priority, u8 *prio_type,
766 u8 *pgid, u8 *bw_pct, u8 *up_map)
767 {
768 struct mlx5e_priv *priv = netdev_priv(netdev);
769 struct mlx5_core_dev *mdev = priv->mdev;
770
771 if (!MLX5_CAP_GEN(priv->mdev, ets)) {
772 netdev_err(netdev, "%s, ets is not supported\n", __func__);
773 return;
774 }
775
776 if (priority >= CEE_DCBX_MAX_PRIO) {
777 netdev_err(netdev,
778 "%s, priority is out of range\n", __func__);
779 return;
780 }
781
782 *prio_type = 0;
783 *bw_pct = 0;
784 *up_map = 0;
785
786 if (mlx5_query_port_prio_tc(mdev, priority, pgid))
787 *pgid = 0;
788 }
789
mlx5e_dcbnl_getpgbwgcfgtx(struct net_device * netdev,int pgid,u8 * bw_pct)790 static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
791 int pgid, u8 *bw_pct)
792 {
793 struct ieee_ets ets;
794
795 if (pgid >= CEE_DCBX_MAX_PGS) {
796 netdev_err(netdev,
797 "%s, priority group is out of range\n", __func__);
798 return;
799 }
800
801 mlx5e_dcbnl_ieee_getets(netdev, &ets);
802 *bw_pct = ets.tc_tx_bw[pgid];
803 }
804
mlx5e_dcbnl_setpfccfg(struct net_device * netdev,int priority,u8 setting)805 static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev,
806 int priority, u8 setting)
807 {
808 struct mlx5e_priv *priv = netdev_priv(netdev);
809 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
810
811 if (priority >= CEE_DCBX_MAX_PRIO) {
812 netdev_err(netdev,
813 "%s, priority is out of range\n", __func__);
814 return;
815 }
816
817 if (setting > 1)
818 return;
819
820 cee_cfg->pfc_setting[priority] = setting;
821 }
822
823 static int
mlx5e_dcbnl_get_priority_pfc(struct net_device * netdev,int priority,u8 * setting)824 mlx5e_dcbnl_get_priority_pfc(struct net_device *netdev,
825 int priority, u8 *setting)
826 {
827 struct ieee_pfc pfc;
828 int err;
829
830 err = mlx5e_dcbnl_ieee_getpfc(netdev, &pfc);
831
832 if (err)
833 *setting = 0;
834 else
835 *setting = (pfc.pfc_en >> priority) & 0x01;
836
837 return err;
838 }
839
mlx5e_dcbnl_getpfccfg(struct net_device * netdev,int priority,u8 * setting)840 static void mlx5e_dcbnl_getpfccfg(struct net_device *netdev,
841 int priority, u8 *setting)
842 {
843 if (priority >= CEE_DCBX_MAX_PRIO) {
844 netdev_err(netdev,
845 "%s, priority is out of range\n", __func__);
846 return;
847 }
848
849 if (!setting)
850 return;
851
852 mlx5e_dcbnl_get_priority_pfc(netdev, priority, setting);
853 }
854
mlx5e_dcbnl_getcap(struct net_device * netdev,int capid,u8 * cap)855 static u8 mlx5e_dcbnl_getcap(struct net_device *netdev,
856 int capid, u8 *cap)
857 {
858 struct mlx5e_priv *priv = netdev_priv(netdev);
859 struct mlx5_core_dev *mdev = priv->mdev;
860 u8 rval = 0;
861
862 switch (capid) {
863 case DCB_CAP_ATTR_PG:
864 *cap = true;
865 break;
866 case DCB_CAP_ATTR_PFC:
867 *cap = true;
868 break;
869 case DCB_CAP_ATTR_UP2TC:
870 *cap = false;
871 break;
872 case DCB_CAP_ATTR_PG_TCS:
873 *cap = 1 << mlx5_max_tc(mdev);
874 break;
875 case DCB_CAP_ATTR_PFC_TCS:
876 *cap = 1 << mlx5_max_tc(mdev);
877 break;
878 case DCB_CAP_ATTR_GSP:
879 *cap = false;
880 break;
881 case DCB_CAP_ATTR_BCN:
882 *cap = false;
883 break;
884 case DCB_CAP_ATTR_DCBX:
885 *cap = priv->dcbx.cap |
886 DCB_CAP_DCBX_VER_CEE |
887 DCB_CAP_DCBX_VER_IEEE;
888 break;
889 default:
890 *cap = 0;
891 rval = 1;
892 break;
893 }
894
895 return rval;
896 }
897
mlx5e_dcbnl_getnumtcs(struct net_device * netdev,int tcs_id,u8 * num)898 static int mlx5e_dcbnl_getnumtcs(struct net_device *netdev,
899 int tcs_id, u8 *num)
900 {
901 struct mlx5e_priv *priv = netdev_priv(netdev);
902 struct mlx5_core_dev *mdev = priv->mdev;
903
904 switch (tcs_id) {
905 case DCB_NUMTCS_ATTR_PG:
906 case DCB_NUMTCS_ATTR_PFC:
907 *num = mlx5_max_tc(mdev) + 1;
908 break;
909 default:
910 return -EINVAL;
911 }
912
913 return 0;
914 }
915
mlx5e_dcbnl_getpfcstate(struct net_device * netdev)916 static u8 mlx5e_dcbnl_getpfcstate(struct net_device *netdev)
917 {
918 struct ieee_pfc pfc;
919
920 if (mlx5e_dcbnl_ieee_getpfc(netdev, &pfc))
921 return MLX5E_CEE_STATE_DOWN;
922
923 return pfc.pfc_en ? MLX5E_CEE_STATE_UP : MLX5E_CEE_STATE_DOWN;
924 }
925
mlx5e_dcbnl_setpfcstate(struct net_device * netdev,u8 state)926 static void mlx5e_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
927 {
928 struct mlx5e_priv *priv = netdev_priv(netdev);
929 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
930
931 if ((state != MLX5E_CEE_STATE_UP) && (state != MLX5E_CEE_STATE_DOWN))
932 return;
933
934 cee_cfg->pfc_enable = state;
935 }
936
mlx5e_dcbnl_getbuffer(struct net_device * dev,struct dcbnl_buffer * dcb_buffer)937 static int mlx5e_dcbnl_getbuffer(struct net_device *dev,
938 struct dcbnl_buffer *dcb_buffer)
939 {
940 struct mlx5e_priv *priv = netdev_priv(dev);
941 struct mlx5_core_dev *mdev = priv->mdev;
942 struct mlx5e_port_buffer port_buffer;
943 u8 buffer[MLX5E_MAX_PRIORITY];
944 int i, err;
945
946 if (!MLX5_BUFFER_SUPPORTED(mdev))
947 return -EOPNOTSUPP;
948
949 err = mlx5e_port_query_priority2buffer(mdev, buffer);
950 if (err)
951 return err;
952
953 for (i = 0; i < MLX5E_MAX_PRIORITY; i++)
954 dcb_buffer->prio2buffer[i] = buffer[i];
955
956 err = mlx5e_port_query_buffer(priv, &port_buffer);
957 if (err)
958 return err;
959
960 for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++)
961 dcb_buffer->buffer_size[i] = port_buffer.buffer[i].size;
962 dcb_buffer->total_size = port_buffer.port_buffer_size -
963 port_buffer.internal_buffers_size;
964
965 return 0;
966 }
967
mlx5e_dcbnl_setbuffer(struct net_device * dev,struct dcbnl_buffer * dcb_buffer)968 static int mlx5e_dcbnl_setbuffer(struct net_device *dev,
969 struct dcbnl_buffer *dcb_buffer)
970 {
971 struct mlx5e_priv *priv = netdev_priv(dev);
972 struct mlx5_core_dev *mdev = priv->mdev;
973 struct mlx5e_port_buffer port_buffer;
974 u8 old_prio2buffer[MLX5E_MAX_PRIORITY];
975 u32 *buffer_size = NULL;
976 u8 *prio2buffer = NULL;
977 u32 changed = 0;
978 int i, err;
979
980 if (!MLX5_BUFFER_SUPPORTED(mdev))
981 return -EOPNOTSUPP;
982
983 for (i = 0; i < DCBX_MAX_BUFFERS; i++)
984 mlx5_core_dbg(mdev, "buffer[%d]=%d\n", i, dcb_buffer->buffer_size[i]);
985
986 for (i = 0; i < MLX5E_MAX_PRIORITY; i++)
987 mlx5_core_dbg(mdev, "priority %d buffer%d\n", i, dcb_buffer->prio2buffer[i]);
988
989 err = mlx5e_port_query_priority2buffer(mdev, old_prio2buffer);
990 if (err)
991 return err;
992
993 for (i = 0; i < MLX5E_MAX_PRIORITY; i++) {
994 if (dcb_buffer->prio2buffer[i] != old_prio2buffer[i]) {
995 changed |= MLX5E_PORT_BUFFER_PRIO2BUFFER;
996 prio2buffer = dcb_buffer->prio2buffer;
997 break;
998 }
999 }
1000
1001 err = mlx5e_port_query_buffer(priv, &port_buffer);
1002 if (err)
1003 return err;
1004
1005 for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
1006 if (port_buffer.buffer[i].size != dcb_buffer->buffer_size[i]) {
1007 changed |= MLX5E_PORT_BUFFER_SIZE;
1008 buffer_size = dcb_buffer->buffer_size;
1009 break;
1010 }
1011 }
1012
1013 if (!changed)
1014 return 0;
1015
1016 err = mlx5e_port_manual_buffer_config(priv, changed, dev->mtu, NULL,
1017 buffer_size, prio2buffer);
1018 return err;
1019 }
1020
1021 static const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = {
1022 .ieee_getets = mlx5e_dcbnl_ieee_getets,
1023 .ieee_setets = mlx5e_dcbnl_ieee_setets,
1024 .ieee_getmaxrate = mlx5e_dcbnl_ieee_getmaxrate,
1025 .ieee_setmaxrate = mlx5e_dcbnl_ieee_setmaxrate,
1026 .ieee_getpfc = mlx5e_dcbnl_ieee_getpfc,
1027 .ieee_setpfc = mlx5e_dcbnl_ieee_setpfc,
1028 .ieee_setapp = mlx5e_dcbnl_ieee_setapp,
1029 .ieee_delapp = mlx5e_dcbnl_ieee_delapp,
1030 .getdcbx = mlx5e_dcbnl_getdcbx,
1031 .setdcbx = mlx5e_dcbnl_setdcbx,
1032 .dcbnl_getbuffer = mlx5e_dcbnl_getbuffer,
1033 .dcbnl_setbuffer = mlx5e_dcbnl_setbuffer,
1034
1035 /* CEE interfaces */
1036 .setall = mlx5e_dcbnl_setall,
1037 .getstate = mlx5e_dcbnl_getstate,
1038 .getpermhwaddr = mlx5e_dcbnl_getpermhwaddr,
1039
1040 .setpgtccfgtx = mlx5e_dcbnl_setpgtccfgtx,
1041 .setpgbwgcfgtx = mlx5e_dcbnl_setpgbwgcfgtx,
1042 .getpgtccfgtx = mlx5e_dcbnl_getpgtccfgtx,
1043 .getpgbwgcfgtx = mlx5e_dcbnl_getpgbwgcfgtx,
1044
1045 .setpfccfg = mlx5e_dcbnl_setpfccfg,
1046 .getpfccfg = mlx5e_dcbnl_getpfccfg,
1047 .getcap = mlx5e_dcbnl_getcap,
1048 .getnumtcs = mlx5e_dcbnl_getnumtcs,
1049 .getpfcstate = mlx5e_dcbnl_getpfcstate,
1050 .setpfcstate = mlx5e_dcbnl_setpfcstate,
1051 };
1052
mlx5e_dcbnl_build_netdev(struct net_device * netdev)1053 void mlx5e_dcbnl_build_netdev(struct net_device *netdev)
1054 {
1055 struct mlx5e_priv *priv = netdev_priv(netdev);
1056 struct mlx5_core_dev *mdev = priv->mdev;
1057
1058 if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos))
1059 netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
1060 }
1061
mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv * priv,enum mlx5_dcbx_oper_mode * mode)1062 static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv,
1063 enum mlx5_dcbx_oper_mode *mode)
1064 {
1065 u32 out[MLX5_ST_SZ_DW(dcbx_param)];
1066
1067 *mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
1068
1069 if (!mlx5_query_port_dcbx_param(priv->mdev, out))
1070 *mode = MLX5_GET(dcbx_param, out, version_oper);
1071
1072 /* From driver's point of view, we only care if the mode
1073 * is host (HOST) or non-host (AUTO)
1074 */
1075 if (*mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
1076 *mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
1077 }
1078
mlx5e_ets_init(struct mlx5e_priv * priv)1079 static void mlx5e_ets_init(struct mlx5e_priv *priv)
1080 {
1081 struct ieee_ets ets;
1082 int err;
1083 int i;
1084
1085 if (!MLX5_CAP_GEN(priv->mdev, ets))
1086 return;
1087
1088 memset(&ets, 0, sizeof(ets));
1089 ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
1090 for (i = 0; i < ets.ets_cap; i++) {
1091 ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
1092 ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
1093 ets.prio_tc[i] = i;
1094 }
1095
1096 if (ets.ets_cap > 1) {
1097 /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
1098 ets.prio_tc[0] = 1;
1099 ets.prio_tc[1] = 0;
1100 }
1101
1102 err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
1103 if (err)
1104 netdev_err(priv->netdev,
1105 "%s, Failed to init ETS: %d\n", __func__, err);
1106 }
1107
1108 enum {
1109 INIT,
1110 DELETE,
1111 };
1112
mlx5e_dcbnl_dscp_app(struct mlx5e_priv * priv,int action)1113 static void mlx5e_dcbnl_dscp_app(struct mlx5e_priv *priv, int action)
1114 {
1115 struct dcb_app temp;
1116 int i;
1117
1118 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
1119 return;
1120
1121 if (!MLX5_DSCP_SUPPORTED(priv->mdev))
1122 return;
1123
1124 /* No SEL_DSCP entry in non DSCP state */
1125 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_DSCP)
1126 return;
1127
1128 temp.selector = IEEE_8021QAZ_APP_SEL_DSCP;
1129 for (i = 0; i < MLX5E_MAX_DSCP; i++) {
1130 temp.protocol = i;
1131 temp.priority = priv->dcbx_dp.dscp2prio[i];
1132 if (action == INIT)
1133 dcb_ieee_setapp(priv->netdev, &temp);
1134 else
1135 dcb_ieee_delapp(priv->netdev, &temp);
1136 }
1137
1138 priv->dcbx.dscp_app_cnt = (action == INIT) ? MLX5E_MAX_DSCP : 0;
1139 }
1140
mlx5e_dcbnl_init_app(struct mlx5e_priv * priv)1141 void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv)
1142 {
1143 mlx5e_dcbnl_dscp_app(priv, INIT);
1144 }
1145
mlx5e_dcbnl_delete_app(struct mlx5e_priv * priv)1146 void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv)
1147 {
1148 mlx5e_dcbnl_dscp_app(priv, DELETE);
1149 }
1150
mlx5e_params_calc_trust_tx_min_inline_mode(struct mlx5_core_dev * mdev,struct mlx5e_params * params,u8 trust_state)1151 static void mlx5e_params_calc_trust_tx_min_inline_mode(struct mlx5_core_dev *mdev,
1152 struct mlx5e_params *params,
1153 u8 trust_state)
1154 {
1155 mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode);
1156 if (trust_state == MLX5_QPTS_TRUST_DSCP &&
1157 params->tx_min_inline_mode == MLX5_INLINE_MODE_L2)
1158 params->tx_min_inline_mode = MLX5_INLINE_MODE_IP;
1159 }
1160
mlx5e_update_trust_state_hw(struct mlx5e_priv * priv,void * context)1161 static int mlx5e_update_trust_state_hw(struct mlx5e_priv *priv, void *context)
1162 {
1163 u8 *trust_state = context;
1164 int err;
1165
1166 err = mlx5_set_trust_state(priv->mdev, *trust_state);
1167 if (err)
1168 return err;
1169 WRITE_ONCE(priv->dcbx_dp.trust_state, *trust_state);
1170
1171 return 0;
1172 }
1173
mlx5e_set_trust_state(struct mlx5e_priv * priv,u8 trust_state)1174 static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
1175 {
1176 struct mlx5e_params new_params;
1177 bool reset = true;
1178 int err;
1179
1180 netdev_lock(priv->netdev);
1181 mutex_lock(&priv->state_lock);
1182
1183 new_params = priv->channels.params;
1184 mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_params,
1185 trust_state);
1186
1187 /* Skip if tx_min_inline is the same */
1188 if (new_params.tx_min_inline_mode == priv->channels.params.tx_min_inline_mode)
1189 reset = false;
1190
1191 err = mlx5e_safe_switch_params(priv, &new_params,
1192 mlx5e_update_trust_state_hw,
1193 &trust_state, reset);
1194
1195 mutex_unlock(&priv->state_lock);
1196 netdev_unlock(priv->netdev);
1197
1198 return err;
1199 }
1200
mlx5e_set_dscp2prio(struct mlx5e_priv * priv,u8 dscp,u8 prio)1201 static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio)
1202 {
1203 int err;
1204
1205 err = mlx5_set_dscp2prio(priv->mdev, dscp, prio);
1206 if (err)
1207 return err;
1208
1209 priv->dcbx_dp.dscp2prio[dscp] = prio;
1210 return err;
1211 }
1212
mlx5e_trust_initialize(struct mlx5e_priv * priv)1213 static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
1214 {
1215 struct mlx5_core_dev *mdev = priv->mdev;
1216 u8 trust_state;
1217 int err;
1218
1219 if (!MLX5_DSCP_SUPPORTED(mdev)) {
1220 WRITE_ONCE(priv->dcbx_dp.trust_state, MLX5_QPTS_TRUST_PCP);
1221 return 0;
1222 }
1223
1224 err = mlx5_query_trust_state(priv->mdev, &trust_state);
1225 if (err)
1226 return err;
1227 WRITE_ONCE(priv->dcbx_dp.trust_state, trust_state);
1228
1229 if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_PCP && priv->dcbx.dscp_app_cnt) {
1230 /*
1231 * Align the driver state with the register state.
1232 * Temporary state change is required to enable the app list reset.
1233 */
1234 priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_DSCP;
1235 mlx5e_dcbnl_delete_app(priv);
1236 priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP;
1237 }
1238
1239 mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &priv->channels.params,
1240 priv->dcbx_dp.trust_state);
1241
1242 err = mlx5_query_dscp2prio(priv->mdev, priv->dcbx_dp.dscp2prio);
1243 if (err)
1244 return err;
1245
1246 return 0;
1247 }
1248
1249 #define MLX5E_BUFFER_CELL_SHIFT 7
1250
mlx5e_query_port_buffers_cell_size(struct mlx5e_priv * priv)1251 static u16 mlx5e_query_port_buffers_cell_size(struct mlx5e_priv *priv)
1252 {
1253 struct mlx5_core_dev *mdev = priv->mdev;
1254 u32 out[MLX5_ST_SZ_DW(sbcam_reg)] = {};
1255 u32 in[MLX5_ST_SZ_DW(sbcam_reg)] = {};
1256
1257 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1258 return (1 << MLX5E_BUFFER_CELL_SHIFT);
1259
1260 if (mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
1261 MLX5_REG_SBCAM, 0, 0))
1262 return (1 << MLX5E_BUFFER_CELL_SHIFT);
1263
1264 return MLX5_GET(sbcam_reg, out, cap_cell_size);
1265 }
1266
mlx5e_dcbnl_initialize(struct mlx5e_priv * priv)1267 void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
1268 {
1269 struct mlx5e_dcbx *dcbx = &priv->dcbx;
1270
1271 mlx5e_trust_initialize(priv);
1272
1273 if (!MLX5_CAP_GEN(priv->mdev, qos))
1274 return;
1275
1276 if (MLX5_CAP_GEN(priv->mdev, dcbx))
1277 mlx5e_dcbnl_query_dcbx_mode(priv, &dcbx->mode);
1278
1279 priv->dcbx.cap = DCB_CAP_DCBX_VER_CEE |
1280 DCB_CAP_DCBX_VER_IEEE;
1281 if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
1282 priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
1283
1284 priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv);
1285 priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN;
1286
1287 mlx5e_ets_init(priv);
1288 }
1289