1 /* 2 * Copyright (c) 2011 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34 #include <linux/dcbnl.h> 35 #include <linux/math64.h> 36 37 #include "mlx4_en.h" 38 #include "fw_qos.h" 39 40 enum { 41 MLX4_CEE_STATE_DOWN = 0, 42 MLX4_CEE_STATE_UP = 1, 43 }; 44 45 /* Definitions for QCN 46 */ 47 48 struct mlx4_congestion_control_mb_prio_802_1_qau_params { 49 __be32 modify_enable_high; 50 __be32 modify_enable_low; 51 __be32 reserved1; 52 __be32 extended_enable; 53 __be32 rppp_max_rps; 54 __be32 rpg_time_reset; 55 __be32 rpg_byte_reset; 56 __be32 rpg_threshold; 57 __be32 rpg_max_rate; 58 __be32 rpg_ai_rate; 59 __be32 rpg_hai_rate; 60 __be32 rpg_gd; 61 __be32 rpg_min_dec_fac; 62 __be32 rpg_min_rate; 63 __be32 max_time_rise; 64 __be32 max_byte_rise; 65 __be32 max_qdelta; 66 __be32 min_qoffset; 67 __be32 gd_coefficient; 68 __be32 reserved2[5]; 69 __be32 cp_sample_base; 70 __be32 reserved3[39]; 71 }; 72 73 struct mlx4_congestion_control_mb_prio_802_1_qau_statistics { 74 __be64 rppp_rp_centiseconds; 75 __be32 reserved1; 76 __be32 ignored_cnm; 77 __be32 rppp_created_rps; 78 __be32 estimated_total_rate; 79 __be32 max_active_rate_limiter_index; 80 __be32 dropped_cnms_busy_fw; 81 __be32 reserved2; 82 __be32 cnms_handled_successfully; 83 __be32 min_total_limiters_rate; 84 __be32 max_total_limiters_rate; 85 __be32 reserved3[4]; 86 }; 87 88 static u8 mlx4_en_dcbnl_getcap(struct net_device *dev, int capid, u8 *cap) 89 { 90 struct mlx4_en_priv *priv = netdev_priv(dev); 91 92 switch (capid) { 93 case DCB_CAP_ATTR_PFC: 94 *cap = true; 95 break; 96 case DCB_CAP_ATTR_DCBX: 97 *cap = priv->dcbx_cap; 98 break; 99 case DCB_CAP_ATTR_PFC_TCS: 100 *cap = 1 << mlx4_max_tc(priv->mdev->dev); 101 break; 102 default: 103 *cap = false; 104 break; 105 } 106 107 return 0; 108 } 109 110 static u8 mlx4_en_dcbnl_getpfcstate(struct net_device *netdev) 111 { 112 struct mlx4_en_priv *priv = netdev_priv(netdev); 113 114 return priv->cee_config.pfc_state; 115 } 116 117 static void mlx4_en_dcbnl_setpfcstate(struct net_device *netdev, u8 state) 118 { 119 struct mlx4_en_priv *priv = netdev_priv(netdev); 120 121 priv->cee_config.pfc_state = state; 122 } 123 124 static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, 125 u8 *setting) 126 { 127 struct mlx4_en_priv *priv = netdev_priv(netdev); 128 129 *setting = priv->cee_config.dcb_pfc[priority]; 130 } 131 132 static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, 133 u8 setting) 134 { 135 struct mlx4_en_priv *priv = netdev_priv(netdev); 136 137 priv->cee_config.dcb_pfc[priority] = setting; 138 priv->cee_config.pfc_state = true; 139 } 140 141 static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) 142 { 143 struct mlx4_en_priv *priv = netdev_priv(netdev); 144 145 if (!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED)) 146 return -EINVAL; 147 148 if (tcid == DCB_NUMTCS_ATTR_PFC) 149 *num = mlx4_max_tc(priv->mdev->dev); 150 else 151 *num = 0; 152 153 return 0; 154 } 155 156 static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev) 157 { 158 struct mlx4_en_priv *priv = netdev_priv(netdev); 159 struct mlx4_en_dev *mdev = priv->mdev; 160 161 if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) 162 return 1; 163 164 if (priv->cee_config.pfc_state) { 165 int tc; 166 167 priv->prof->rx_pause = 0; 168 priv->prof->tx_pause = 0; 169 for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) { 170 u8 tc_mask = 1 << tc; 171 172 switch (priv->cee_config.dcb_pfc[tc]) { 173 case pfc_disabled: 174 priv->prof->tx_ppp &= ~tc_mask; 175 priv->prof->rx_ppp &= ~tc_mask; 176 break; 177 case pfc_enabled_full: 178 priv->prof->tx_ppp |= tc_mask; 179 priv->prof->rx_ppp |= tc_mask; 180 break; 181 case pfc_enabled_tx: 182 priv->prof->tx_ppp |= tc_mask; 183 priv->prof->rx_ppp &= ~tc_mask; 184 break; 185 case pfc_enabled_rx: 186 priv->prof->tx_ppp &= ~tc_mask; 187 priv->prof->rx_ppp |= tc_mask; 188 break; 189 default: 190 break; 191 } 192 } 193 en_dbg(DRV, priv, "Set pfc on\n"); 194 } else { 195 priv->prof->rx_pause = 1; 196 priv->prof->tx_pause = 1; 197 en_dbg(DRV, priv, "Set pfc off\n"); 198 } 199 200 if (mlx4_SET_PORT_general(mdev->dev, priv->port, 201 priv->rx_skb_size + ETH_FCS_LEN, 202 priv->prof->tx_pause, 203 priv->prof->tx_ppp, 204 priv->prof->rx_pause, 205 priv->prof->rx_ppp)) { 206 en_err(priv, "Failed setting pause params\n"); 207 return 1; 208 } 209 210 return 0; 211 } 212 213 static u8 mlx4_en_dcbnl_get_state(struct net_device *dev) 214 { 215 struct mlx4_en_priv *priv = netdev_priv(dev); 216 217 if (priv->flags & MLX4_EN_FLAG_DCB_ENABLED) 218 return MLX4_CEE_STATE_UP; 219 220 return MLX4_CEE_STATE_DOWN; 221 } 222 223 static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state) 224 { 225 struct mlx4_en_priv *priv = netdev_priv(dev); 226 int num_tcs = 0; 227 228 if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) 229 return 1; 230 231 if (!!(state) == !!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED)) 232 return 0; 233 234 if (state) { 235 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED; 236 num_tcs = IEEE_8021QAZ_MAX_TCS; 237 } else { 238 priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED; 239 } 240 241 if (mlx4_en_setup_tc(dev, num_tcs)) 242 return 1; 243 244 return 0; 245 } 246 247 /* On success returns a non-zero 802.1p user priority bitmap 248 * otherwise returns 0 as the invalid user priority bitmap to 249 * indicate an error. 250 */ 251 static int mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) 252 { 253 struct mlx4_en_priv *priv = netdev_priv(netdev); 254 struct dcb_app app = { 255 .selector = idtype, 256 .protocol = id, 257 }; 258 if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) 259 return 0; 260 261 return dcb_getapp(netdev, &app); 262 } 263 264 static int mlx4_en_dcbnl_setapp(struct net_device *netdev, u8 idtype, 265 u16 id, u8 up) 266 { 267 struct mlx4_en_priv *priv = netdev_priv(netdev); 268 struct dcb_app app; 269 270 if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) 271 return -EINVAL; 272 273 memset(&app, 0, sizeof(struct dcb_app)); 274 app.selector = idtype; 275 app.protocol = id; 276 app.priority = up; 277 278 return dcb_setapp(netdev, &app); 279 } 280 281 static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev, 282 struct ieee_ets *ets) 283 { 284 struct mlx4_en_priv *priv = netdev_priv(dev); 285 struct ieee_ets *my_ets = &priv->ets; 286 287 if (!my_ets) 288 return -EINVAL; 289 290 ets->ets_cap = IEEE_8021QAZ_MAX_TCS; 291 ets->cbs = my_ets->cbs; 292 memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw)); 293 memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa)); 294 memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc)); 295 296 return 0; 297 } 298 299 static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets) 300 { 301 int i; 302 int total_ets_bw = 0; 303 int has_ets_tc = 0; 304 305 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 306 if (ets->prio_tc[i] >= MLX4_EN_NUM_UP) { 307 en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n", 308 i, ets->prio_tc[i]); 309 return -EINVAL; 310 } 311 312 switch (ets->tc_tsa[i]) { 313 case IEEE_8021QAZ_TSA_STRICT: 314 break; 315 case IEEE_8021QAZ_TSA_ETS: 316 has_ets_tc = 1; 317 total_ets_bw += ets->tc_tx_bw[i]; 318 break; 319 default: 320 en_err(priv, "TC[%d]: Not supported TSA: %d\n", 321 i, ets->tc_tsa[i]); 322 return -EOPNOTSUPP; 323 } 324 } 325 326 if (has_ets_tc && total_ets_bw != MLX4_EN_BW_MAX) { 327 en_err(priv, "Bad ETS BW sum: %d. Should be exactly 100%%\n", 328 total_ets_bw); 329 return -EINVAL; 330 } 331 332 return 0; 333 } 334 335 static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv, 336 struct ieee_ets *ets, u16 *ratelimit) 337 { 338 struct mlx4_en_dev *mdev = priv->mdev; 339 int num_strict = 0; 340 int i; 341 __u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS] = { 0 }; 342 __u8 pg[IEEE_8021QAZ_MAX_TCS] = { 0 }; 343 344 ets = ets ?: &priv->ets; 345 ratelimit = ratelimit ?: priv->maxrate; 346 347 /* higher TC means higher priority => lower pg */ 348 for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) { 349 switch (ets->tc_tsa[i]) { 350 case IEEE_8021QAZ_TSA_STRICT: 351 pg[i] = num_strict++; 352 tc_tx_bw[i] = MLX4_EN_BW_MAX; 353 break; 354 case IEEE_8021QAZ_TSA_ETS: 355 pg[i] = MLX4_EN_TC_ETS; 356 tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX4_EN_BW_MIN; 357 break; 358 } 359 } 360 361 return mlx4_SET_PORT_SCHEDULER(mdev->dev, priv->port, tc_tx_bw, pg, 362 ratelimit); 363 } 364 365 static int 366 mlx4_en_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets) 367 { 368 struct mlx4_en_priv *priv = netdev_priv(dev); 369 struct mlx4_en_dev *mdev = priv->mdev; 370 int err; 371 372 err = mlx4_en_ets_validate(priv, ets); 373 if (err) 374 return err; 375 376 err = mlx4_SET_PORT_PRIO2TC(mdev->dev, priv->port, ets->prio_tc); 377 if (err) 378 return err; 379 380 err = mlx4_en_config_port_scheduler(priv, ets, NULL); 381 if (err) 382 return err; 383 384 memcpy(&priv->ets, ets, sizeof(priv->ets)); 385 386 return 0; 387 } 388 389 static int mlx4_en_dcbnl_ieee_getpfc(struct net_device *dev, 390 struct ieee_pfc *pfc) 391 { 392 struct mlx4_en_priv *priv = netdev_priv(dev); 393 394 pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS; 395 pfc->pfc_en = priv->prof->tx_ppp; 396 397 return 0; 398 } 399 400 static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev, 401 struct ieee_pfc *pfc) 402 { 403 struct mlx4_en_priv *priv = netdev_priv(dev); 404 struct mlx4_en_port_profile *prof = priv->prof; 405 struct mlx4_en_dev *mdev = priv->mdev; 406 int err; 407 408 en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n", 409 pfc->pfc_cap, 410 pfc->pfc_en, 411 pfc->mbc, 412 pfc->delay); 413 414 prof->rx_pause = !pfc->pfc_en; 415 prof->tx_pause = !pfc->pfc_en; 416 prof->rx_ppp = pfc->pfc_en; 417 prof->tx_ppp = pfc->pfc_en; 418 419 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 420 priv->rx_skb_size + ETH_FCS_LEN, 421 prof->tx_pause, 422 prof->tx_ppp, 423 prof->rx_pause, 424 prof->rx_ppp); 425 if (err) 426 en_err(priv, "Failed setting pause params\n"); 427 else 428 mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap, 429 prof->rx_ppp, prof->rx_pause, 430 prof->tx_ppp, prof->tx_pause); 431 432 return err; 433 } 434 435 static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev) 436 { 437 struct mlx4_en_priv *priv = netdev_priv(dev); 438 439 return priv->dcbx_cap; 440 } 441 442 static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode) 443 { 444 struct mlx4_en_priv *priv = netdev_priv(dev); 445 struct ieee_ets ets = {0}; 446 struct ieee_pfc pfc = {0}; 447 448 if (mode == priv->dcbx_cap) 449 return 0; 450 451 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || 452 ((mode & DCB_CAP_DCBX_VER_IEEE) && 453 (mode & DCB_CAP_DCBX_VER_CEE)) || 454 !(mode & DCB_CAP_DCBX_HOST)) 455 goto err; 456 457 priv->dcbx_cap = mode; 458 459 ets.ets_cap = IEEE_8021QAZ_MAX_TCS; 460 pfc.pfc_cap = IEEE_8021QAZ_MAX_TCS; 461 462 if (mode & DCB_CAP_DCBX_VER_IEEE) { 463 if (mlx4_en_dcbnl_ieee_setets(dev, &ets)) 464 goto err; 465 if (mlx4_en_dcbnl_ieee_setpfc(dev, &pfc)) 466 goto err; 467 } else if (mode & DCB_CAP_DCBX_VER_CEE) { 468 if (mlx4_en_dcbnl_set_all(dev)) 469 goto err; 470 } else { 471 if (mlx4_en_dcbnl_ieee_setets(dev, &ets)) 472 goto err; 473 if (mlx4_en_dcbnl_ieee_setpfc(dev, &pfc)) 474 goto err; 475 if (mlx4_en_setup_tc(dev, 0)) 476 goto err; 477 } 478 479 return 0; 480 err: 481 return 1; 482 } 483 484 #define MLX4_RATELIMIT_UNITS_IN_KB 100000 /* rate-limit HW unit in Kbps */ 485 static int mlx4_en_dcbnl_ieee_getmaxrate(struct net_device *dev, 486 struct ieee_maxrate *maxrate) 487 { 488 struct mlx4_en_priv *priv = netdev_priv(dev); 489 int i; 490 491 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 492 maxrate->tc_maxrate[i] = 493 priv->maxrate[i] * MLX4_RATELIMIT_UNITS_IN_KB; 494 495 return 0; 496 } 497 498 static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev, 499 struct ieee_maxrate *maxrate) 500 { 501 struct mlx4_en_priv *priv = netdev_priv(dev); 502 u16 tmp[IEEE_8021QAZ_MAX_TCS]; 503 int i, err; 504 505 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 506 /* Convert from Kbps into HW units, rounding result up. 507 * Setting to 0, means unlimited BW. 508 */ 509 tmp[i] = div_u64(maxrate->tc_maxrate[i] + 510 MLX4_RATELIMIT_UNITS_IN_KB - 1, 511 MLX4_RATELIMIT_UNITS_IN_KB); 512 } 513 514 err = mlx4_en_config_port_scheduler(priv, NULL, tmp); 515 if (err) 516 return err; 517 518 memcpy(priv->maxrate, tmp, sizeof(priv->maxrate)); 519 520 return 0; 521 } 522 523 #define RPG_ENABLE_BIT 31 524 #define CN_TAG_BIT 30 525 526 static int mlx4_en_dcbnl_ieee_getqcn(struct net_device *dev, 527 struct ieee_qcn *qcn) 528 { 529 struct mlx4_en_priv *priv = netdev_priv(dev); 530 struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn; 531 struct mlx4_cmd_mailbox *mailbox_out = NULL; 532 u64 mailbox_in_dma = 0; 533 u32 inmod = 0; 534 int i, err; 535 536 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN)) 537 return -EOPNOTSUPP; 538 539 mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev); 540 if (IS_ERR(mailbox_out)) 541 return -ENOMEM; 542 hw_qcn = 543 (struct mlx4_congestion_control_mb_prio_802_1_qau_params *) 544 mailbox_out->buf; 545 546 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 547 inmod = priv->port | ((1 << i) << 8) | 548 (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16); 549 err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma, 550 mailbox_out->dma, 551 inmod, MLX4_CONGESTION_CONTROL_GET_PARAMS, 552 MLX4_CMD_CONGESTION_CTRL_OPCODE, 553 MLX4_CMD_TIME_CLASS_C, 554 MLX4_CMD_NATIVE); 555 if (err) { 556 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out); 557 return err; 558 } 559 560 qcn->rpg_enable[i] = 561 be32_to_cpu(hw_qcn->extended_enable) >> RPG_ENABLE_BIT; 562 qcn->rppp_max_rps[i] = 563 be32_to_cpu(hw_qcn->rppp_max_rps); 564 qcn->rpg_time_reset[i] = 565 be32_to_cpu(hw_qcn->rpg_time_reset); 566 qcn->rpg_byte_reset[i] = 567 be32_to_cpu(hw_qcn->rpg_byte_reset); 568 qcn->rpg_threshold[i] = 569 be32_to_cpu(hw_qcn->rpg_threshold); 570 qcn->rpg_max_rate[i] = 571 be32_to_cpu(hw_qcn->rpg_max_rate); 572 qcn->rpg_ai_rate[i] = 573 be32_to_cpu(hw_qcn->rpg_ai_rate); 574 qcn->rpg_hai_rate[i] = 575 be32_to_cpu(hw_qcn->rpg_hai_rate); 576 qcn->rpg_gd[i] = 577 be32_to_cpu(hw_qcn->rpg_gd); 578 qcn->rpg_min_dec_fac[i] = 579 be32_to_cpu(hw_qcn->rpg_min_dec_fac); 580 qcn->rpg_min_rate[i] = 581 be32_to_cpu(hw_qcn->rpg_min_rate); 582 qcn->cndd_state_machine[i] = 583 priv->cndd_state[i]; 584 } 585 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out); 586 return 0; 587 } 588 589 static int mlx4_en_dcbnl_ieee_setqcn(struct net_device *dev, 590 struct ieee_qcn *qcn) 591 { 592 struct mlx4_en_priv *priv = netdev_priv(dev); 593 struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn; 594 struct mlx4_cmd_mailbox *mailbox_in = NULL; 595 u64 mailbox_in_dma = 0; 596 u32 inmod = 0; 597 int i, err; 598 #define MODIFY_ENABLE_HIGH_MASK 0xc0000000 599 #define MODIFY_ENABLE_LOW_MASK 0xffc00000 600 601 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN)) 602 return -EOPNOTSUPP; 603 604 mailbox_in = mlx4_alloc_cmd_mailbox(priv->mdev->dev); 605 if (IS_ERR(mailbox_in)) 606 return -ENOMEM; 607 608 mailbox_in_dma = mailbox_in->dma; 609 hw_qcn = 610 (struct mlx4_congestion_control_mb_prio_802_1_qau_params *)mailbox_in->buf; 611 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 612 inmod = priv->port | ((1 << i) << 8) | 613 (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16); 614 615 /* Before updating QCN parameter, 616 * need to set it's modify enable bit to 1 617 */ 618 619 hw_qcn->modify_enable_high = cpu_to_be32( 620 MODIFY_ENABLE_HIGH_MASK); 621 hw_qcn->modify_enable_low = cpu_to_be32(MODIFY_ENABLE_LOW_MASK); 622 623 hw_qcn->extended_enable = cpu_to_be32(qcn->rpg_enable[i] << RPG_ENABLE_BIT); 624 hw_qcn->rppp_max_rps = cpu_to_be32(qcn->rppp_max_rps[i]); 625 hw_qcn->rpg_time_reset = cpu_to_be32(qcn->rpg_time_reset[i]); 626 hw_qcn->rpg_byte_reset = cpu_to_be32(qcn->rpg_byte_reset[i]); 627 hw_qcn->rpg_threshold = cpu_to_be32(qcn->rpg_threshold[i]); 628 hw_qcn->rpg_max_rate = cpu_to_be32(qcn->rpg_max_rate[i]); 629 hw_qcn->rpg_ai_rate = cpu_to_be32(qcn->rpg_ai_rate[i]); 630 hw_qcn->rpg_hai_rate = cpu_to_be32(qcn->rpg_hai_rate[i]); 631 hw_qcn->rpg_gd = cpu_to_be32(qcn->rpg_gd[i]); 632 hw_qcn->rpg_min_dec_fac = cpu_to_be32(qcn->rpg_min_dec_fac[i]); 633 hw_qcn->rpg_min_rate = cpu_to_be32(qcn->rpg_min_rate[i]); 634 priv->cndd_state[i] = qcn->cndd_state_machine[i]; 635 if (qcn->cndd_state_machine[i] == DCB_CNDD_INTERIOR_READY) 636 hw_qcn->extended_enable |= cpu_to_be32(1 << CN_TAG_BIT); 637 638 err = mlx4_cmd(priv->mdev->dev, mailbox_in_dma, inmod, 639 MLX4_CONGESTION_CONTROL_SET_PARAMS, 640 MLX4_CMD_CONGESTION_CTRL_OPCODE, 641 MLX4_CMD_TIME_CLASS_C, 642 MLX4_CMD_NATIVE); 643 if (err) { 644 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in); 645 return err; 646 } 647 } 648 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in); 649 return 0; 650 } 651 652 static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev, 653 struct ieee_qcn_stats *qcn_stats) 654 { 655 struct mlx4_en_priv *priv = netdev_priv(dev); 656 struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *hw_qcn_stats; 657 struct mlx4_cmd_mailbox *mailbox_out = NULL; 658 u64 mailbox_in_dma = 0; 659 u32 inmod = 0; 660 int i, err; 661 662 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN)) 663 return -EOPNOTSUPP; 664 665 mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev); 666 if (IS_ERR(mailbox_out)) 667 return -ENOMEM; 668 669 hw_qcn_stats = 670 (struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *) 671 mailbox_out->buf; 672 673 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 674 inmod = priv->port | ((1 << i) << 8) | 675 (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16); 676 err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma, 677 mailbox_out->dma, inmod, 678 MLX4_CONGESTION_CONTROL_GET_STATISTICS, 679 MLX4_CMD_CONGESTION_CTRL_OPCODE, 680 MLX4_CMD_TIME_CLASS_C, 681 MLX4_CMD_NATIVE); 682 if (err) { 683 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out); 684 return err; 685 } 686 qcn_stats->rppp_rp_centiseconds[i] = 687 be64_to_cpu(hw_qcn_stats->rppp_rp_centiseconds); 688 qcn_stats->rppp_created_rps[i] = 689 be32_to_cpu(hw_qcn_stats->rppp_created_rps); 690 } 691 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out); 692 return 0; 693 } 694 695 const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = { 696 .ieee_getets = mlx4_en_dcbnl_ieee_getets, 697 .ieee_setets = mlx4_en_dcbnl_ieee_setets, 698 .ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate, 699 .ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate, 700 .ieee_getqcn = mlx4_en_dcbnl_ieee_getqcn, 701 .ieee_setqcn = mlx4_en_dcbnl_ieee_setqcn, 702 .ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats, 703 .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc, 704 .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc, 705 706 .getstate = mlx4_en_dcbnl_get_state, 707 .setstate = mlx4_en_dcbnl_set_state, 708 .getpfccfg = mlx4_en_dcbnl_get_pfc_cfg, 709 .setpfccfg = mlx4_en_dcbnl_set_pfc_cfg, 710 .setall = mlx4_en_dcbnl_set_all, 711 .getcap = mlx4_en_dcbnl_getcap, 712 .getnumtcs = mlx4_en_dcbnl_getnumtcs, 713 .getpfcstate = mlx4_en_dcbnl_getpfcstate, 714 .setpfcstate = mlx4_en_dcbnl_setpfcstate, 715 .getapp = mlx4_en_dcbnl_getapp, 716 .setapp = mlx4_en_dcbnl_setapp, 717 718 .getdcbx = mlx4_en_dcbnl_getdcbx, 719 .setdcbx = mlx4_en_dcbnl_setdcbx, 720 }; 721 722 const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = { 723 .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc, 724 .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc, 725 726 .setstate = mlx4_en_dcbnl_set_state, 727 .getpfccfg = mlx4_en_dcbnl_get_pfc_cfg, 728 .setpfccfg = mlx4_en_dcbnl_set_pfc_cfg, 729 .setall = mlx4_en_dcbnl_set_all, 730 .getnumtcs = mlx4_en_dcbnl_getnumtcs, 731 .getpfcstate = mlx4_en_dcbnl_getpfcstate, 732 .setpfcstate = mlx4_en_dcbnl_setpfcstate, 733 .getapp = mlx4_en_dcbnl_getapp, 734 .setapp = mlx4_en_dcbnl_setapp, 735 736 .getdcbx = mlx4_en_dcbnl_getdcbx, 737 .setdcbx = mlx4_en_dcbnl_setdcbx, 738 }; 739