1 /* 2 * Copyright (c) 2011 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34 #include <linux/dcbnl.h> 35 #include <linux/math64.h> 36 37 #include "mlx4_en.h" 38 #include "fw_qos.h" 39 40 enum { 41 MLX4_CEE_STATE_DOWN = 0, 42 MLX4_CEE_STATE_UP = 1, 43 }; 44 45 /* Definitions for QCN 46 */ 47 48 struct mlx4_congestion_control_mb_prio_802_1_qau_params { 49 __be32 modify_enable_high; 50 __be32 modify_enable_low; 51 __be32 reserved1; 52 __be32 extended_enable; 53 __be32 rppp_max_rps; 54 __be32 rpg_time_reset; 55 __be32 rpg_byte_reset; 56 __be32 rpg_threshold; 57 __be32 rpg_max_rate; 58 __be32 rpg_ai_rate; 59 __be32 rpg_hai_rate; 60 __be32 rpg_gd; 61 __be32 rpg_min_dec_fac; 62 __be32 rpg_min_rate; 63 __be32 max_time_rise; 64 __be32 max_byte_rise; 65 __be32 max_qdelta; 66 __be32 min_qoffset; 67 __be32 gd_coefficient; 68 __be32 reserved2[5]; 69 __be32 cp_sample_base; 70 __be32 reserved3[39]; 71 }; 72 73 struct mlx4_congestion_control_mb_prio_802_1_qau_statistics { 74 __be64 rppp_rp_centiseconds; 75 __be32 reserved1; 76 __be32 ignored_cnm; 77 __be32 rppp_created_rps; 78 __be32 estimated_total_rate; 79 __be32 max_active_rate_limiter_index; 80 __be32 dropped_cnms_busy_fw; 81 __be32 reserved2; 82 __be32 cnms_handled_successfully; 83 __be32 min_total_limiters_rate; 84 __be32 max_total_limiters_rate; 85 __be32 reserved3[4]; 86 }; 87 88 static u8 mlx4_en_dcbnl_getcap(struct net_device *dev, int capid, u8 *cap) 89 { 90 struct mlx4_en_priv *priv = netdev_priv(dev); 91 92 switch (capid) { 93 case DCB_CAP_ATTR_PFC: 94 *cap = true; 95 break; 96 case DCB_CAP_ATTR_DCBX: 97 *cap = priv->dcbx_cap; 98 break; 99 case DCB_CAP_ATTR_PFC_TCS: 100 *cap = 1 << mlx4_max_tc(priv->mdev->dev); 101 break; 102 default: 103 *cap = false; 104 break; 105 } 106 107 return 0; 108 } 109 110 static u8 mlx4_en_dcbnl_getpfcstate(struct net_device *netdev) 111 { 112 struct mlx4_en_priv *priv = netdev_priv(netdev); 113 114 return priv->cee_config.pfc_state; 115 } 116 117 static void mlx4_en_dcbnl_setpfcstate(struct net_device *netdev, u8 state) 118 { 119 struct mlx4_en_priv *priv = netdev_priv(netdev); 120 121 priv->cee_config.pfc_state = state; 122 } 123 124 static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, 125 u8 *setting) 126 { 127 struct mlx4_en_priv *priv = netdev_priv(netdev); 128 129 *setting = priv->cee_config.dcb_pfc[priority]; 130 } 131 132 static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, 133 u8 setting) 134 { 135 struct mlx4_en_priv *priv = netdev_priv(netdev); 136 137 priv->cee_config.dcb_pfc[priority] = setting; 138 priv->cee_config.pfc_state = true; 139 } 140 141 static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) 142 { 143 struct mlx4_en_priv *priv = netdev_priv(netdev); 144 145 if (!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED)) 146 return -EINVAL; 147 148 if (tcid == DCB_NUMTCS_ATTR_PFC) 149 *num = mlx4_max_tc(priv->mdev->dev); 150 else 151 *num = 0; 152 153 return 0; 154 } 155 156 static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev) 157 { 158 struct mlx4_en_priv *priv = netdev_priv(netdev); 159 struct mlx4_en_port_profile *prof = priv->prof; 160 struct mlx4_en_dev *mdev = priv->mdev; 161 u8 tx_pause, tx_ppp, rx_pause, rx_ppp; 162 163 if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) 164 return 1; 165 166 if (priv->cee_config.pfc_state) { 167 int tc; 168 rx_ppp = prof->rx_ppp; 169 tx_ppp = prof->tx_ppp; 170 171 for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) { 172 u8 tc_mask = 1 << tc; 173 174 switch (priv->cee_config.dcb_pfc[tc]) { 175 case pfc_disabled: 176 tx_ppp &= ~tc_mask; 177 rx_ppp &= ~tc_mask; 178 break; 179 case pfc_enabled_full: 180 tx_ppp |= tc_mask; 181 rx_ppp |= tc_mask; 182 break; 183 case pfc_enabled_tx: 184 tx_ppp |= tc_mask; 185 rx_ppp &= ~tc_mask; 186 break; 187 case pfc_enabled_rx: 188 tx_ppp &= ~tc_mask; 189 rx_ppp |= tc_mask; 190 break; 191 default: 192 break; 193 } 194 } 195 rx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->rx_pause; 196 tx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->tx_pause; 197 } else { 198 rx_ppp = 0; 199 tx_ppp = 0; 200 rx_pause = prof->rx_pause; 201 tx_pause = prof->tx_pause; 202 } 203 204 if (mlx4_SET_PORT_general(mdev->dev, priv->port, 205 priv->rx_skb_size + ETH_FCS_LEN, 206 tx_pause, tx_ppp, rx_pause, rx_ppp)) { 207 en_err(priv, "Failed setting pause params\n"); 208 return 1; 209 } 210 211 prof->tx_ppp = tx_ppp; 212 prof->rx_ppp = rx_ppp; 213 prof->tx_pause = tx_pause; 214 prof->rx_pause = rx_pause; 215 216 return 0; 217 } 218 219 static u8 mlx4_en_dcbnl_get_state(struct net_device *dev) 220 { 221 struct mlx4_en_priv *priv = netdev_priv(dev); 222 223 if (priv->flags & MLX4_EN_FLAG_DCB_ENABLED) 224 return MLX4_CEE_STATE_UP; 225 226 return MLX4_CEE_STATE_DOWN; 227 } 228 229 static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state) 230 { 231 struct mlx4_en_priv *priv = netdev_priv(dev); 232 int num_tcs = 0; 233 234 if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) 235 return 1; 236 237 if (!!(state) == !!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED)) 238 return 0; 239 240 if (state) { 241 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED; 242 num_tcs = IEEE_8021QAZ_MAX_TCS; 243 } else { 244 priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED; 245 } 246 247 if (mlx4_en_alloc_tx_queue_per_tc(dev, num_tcs)) 248 return 1; 249 250 return 0; 251 } 252 253 /* On success returns a non-zero 802.1p user priority bitmap 254 * otherwise returns 0 as the invalid user priority bitmap to 255 * indicate an error. 256 */ 257 static int mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) 258 { 259 struct mlx4_en_priv *priv = netdev_priv(netdev); 260 struct dcb_app app = { 261 .selector = idtype, 262 .protocol = id, 263 }; 264 if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) 265 return 0; 266 267 return dcb_getapp(netdev, &app); 268 } 269 270 static int mlx4_en_dcbnl_setapp(struct net_device *netdev, u8 idtype, 271 u16 id, u8 up) 272 { 273 struct mlx4_en_priv *priv = netdev_priv(netdev); 274 struct dcb_app app; 275 276 if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) 277 return -EINVAL; 278 279 memset(&app, 0, sizeof(struct dcb_app)); 280 app.selector = idtype; 281 app.protocol = id; 282 app.priority = up; 283 284 return dcb_setapp(netdev, &app); 285 } 286 287 static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev, 288 struct ieee_ets *ets) 289 { 290 struct mlx4_en_priv *priv = netdev_priv(dev); 291 struct ieee_ets *my_ets = &priv->ets; 292 293 if (!my_ets) 294 return -EINVAL; 295 296 ets->ets_cap = IEEE_8021QAZ_MAX_TCS; 297 ets->cbs = my_ets->cbs; 298 memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw)); 299 memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa)); 300 memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc)); 301 302 return 0; 303 } 304 305 static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets) 306 { 307 int i; 308 int total_ets_bw = 0; 309 int has_ets_tc = 0; 310 311 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 312 if (ets->prio_tc[i] >= MLX4_EN_NUM_UP_HIGH) { 313 en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n", 314 i, ets->prio_tc[i]); 315 return -EINVAL; 316 } 317 318 switch (ets->tc_tsa[i]) { 319 case IEEE_8021QAZ_TSA_VENDOR: 320 case IEEE_8021QAZ_TSA_STRICT: 321 break; 322 case IEEE_8021QAZ_TSA_ETS: 323 has_ets_tc = 1; 324 total_ets_bw += ets->tc_tx_bw[i]; 325 break; 326 default: 327 en_err(priv, "TC[%d]: Not supported TSA: %d\n", 328 i, ets->tc_tsa[i]); 329 return -EOPNOTSUPP; 330 } 331 } 332 333 if (has_ets_tc && total_ets_bw != MLX4_EN_BW_MAX) { 334 en_err(priv, "Bad ETS BW sum: %d. Should be exactly 100%%\n", 335 total_ets_bw); 336 return -EINVAL; 337 } 338 339 return 0; 340 } 341 342 static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv, 343 struct ieee_ets *ets, u16 *ratelimit) 344 { 345 struct mlx4_en_dev *mdev = priv->mdev; 346 int num_strict = 0; 347 int i; 348 __u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS] = { 0 }; 349 __u8 pg[IEEE_8021QAZ_MAX_TCS] = { 0 }; 350 351 ets = ets ?: &priv->ets; 352 ratelimit = ratelimit ?: priv->maxrate; 353 354 /* higher TC means higher priority => lower pg */ 355 for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) { 356 switch (ets->tc_tsa[i]) { 357 case IEEE_8021QAZ_TSA_VENDOR: 358 pg[i] = MLX4_EN_TC_VENDOR; 359 tc_tx_bw[i] = MLX4_EN_BW_MAX; 360 break; 361 case IEEE_8021QAZ_TSA_STRICT: 362 pg[i] = num_strict++; 363 tc_tx_bw[i] = MLX4_EN_BW_MAX; 364 break; 365 case IEEE_8021QAZ_TSA_ETS: 366 pg[i] = MLX4_EN_TC_ETS; 367 tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX4_EN_BW_MIN; 368 break; 369 } 370 } 371 372 return mlx4_SET_PORT_SCHEDULER(mdev->dev, priv->port, tc_tx_bw, pg, 373 ratelimit); 374 } 375 376 static int 377 mlx4_en_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets) 378 { 379 struct mlx4_en_priv *priv = netdev_priv(dev); 380 struct mlx4_en_dev *mdev = priv->mdev; 381 int err; 382 383 err = mlx4_en_ets_validate(priv, ets); 384 if (err) 385 return err; 386 387 err = mlx4_SET_PORT_PRIO2TC(mdev->dev, priv->port, ets->prio_tc); 388 if (err) 389 return err; 390 391 err = mlx4_en_config_port_scheduler(priv, ets, NULL); 392 if (err) 393 return err; 394 395 memcpy(&priv->ets, ets, sizeof(priv->ets)); 396 397 return 0; 398 } 399 400 static int mlx4_en_dcbnl_ieee_getpfc(struct net_device *dev, 401 struct ieee_pfc *pfc) 402 { 403 struct mlx4_en_priv *priv = netdev_priv(dev); 404 405 pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS; 406 pfc->pfc_en = priv->prof->tx_ppp; 407 408 return 0; 409 } 410 411 static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev, 412 struct ieee_pfc *pfc) 413 { 414 struct mlx4_en_priv *priv = netdev_priv(dev); 415 struct mlx4_en_port_profile *prof = priv->prof; 416 struct mlx4_en_dev *mdev = priv->mdev; 417 u32 tx_pause, tx_ppp, rx_pause, rx_ppp; 418 int err; 419 420 en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n", 421 pfc->pfc_cap, 422 pfc->pfc_en, 423 pfc->mbc, 424 pfc->delay); 425 426 rx_pause = prof->rx_pause && !pfc->pfc_en; 427 tx_pause = prof->tx_pause && !pfc->pfc_en; 428 rx_ppp = pfc->pfc_en; 429 tx_ppp = pfc->pfc_en; 430 431 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 432 priv->rx_skb_size + ETH_FCS_LEN, 433 tx_pause, tx_ppp, rx_pause, rx_ppp); 434 if (err) { 435 en_err(priv, "Failed setting pause params\n"); 436 return err; 437 } 438 439 mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap, 440 rx_ppp, rx_pause, tx_ppp, tx_pause); 441 442 prof->tx_ppp = tx_ppp; 443 prof->rx_ppp = rx_ppp; 444 prof->rx_pause = rx_pause; 445 prof->tx_pause = tx_pause; 446 447 return err; 448 } 449 450 static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev) 451 { 452 struct mlx4_en_priv *priv = netdev_priv(dev); 453 454 return priv->dcbx_cap; 455 } 456 457 static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode) 458 { 459 struct mlx4_en_priv *priv = netdev_priv(dev); 460 struct ieee_ets ets = {0}; 461 struct ieee_pfc pfc = {0}; 462 463 if (mode == priv->dcbx_cap) 464 return 0; 465 466 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || 467 ((mode & DCB_CAP_DCBX_VER_IEEE) && 468 (mode & DCB_CAP_DCBX_VER_CEE)) || 469 !(mode & DCB_CAP_DCBX_HOST)) 470 goto err; 471 472 priv->dcbx_cap = mode; 473 474 ets.ets_cap = IEEE_8021QAZ_MAX_TCS; 475 pfc.pfc_cap = IEEE_8021QAZ_MAX_TCS; 476 477 if (mode & DCB_CAP_DCBX_VER_IEEE) { 478 if (mlx4_en_dcbnl_ieee_setets(dev, &ets)) 479 goto err; 480 if (mlx4_en_dcbnl_ieee_setpfc(dev, &pfc)) 481 goto err; 482 } else if (mode & DCB_CAP_DCBX_VER_CEE) { 483 if (mlx4_en_dcbnl_set_all(dev)) 484 goto err; 485 } else { 486 if (mlx4_en_dcbnl_ieee_setets(dev, &ets)) 487 goto err; 488 if (mlx4_en_dcbnl_ieee_setpfc(dev, &pfc)) 489 goto err; 490 if (mlx4_en_alloc_tx_queue_per_tc(dev, 0)) 491 goto err; 492 } 493 494 return 0; 495 err: 496 return 1; 497 } 498 499 #define MLX4_RATELIMIT_UNITS_IN_KB 100000 /* rate-limit HW unit in Kbps */ 500 static int mlx4_en_dcbnl_ieee_getmaxrate(struct net_device *dev, 501 struct ieee_maxrate *maxrate) 502 { 503 struct mlx4_en_priv *priv = netdev_priv(dev); 504 int i; 505 506 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 507 maxrate->tc_maxrate[i] = 508 priv->maxrate[i] * MLX4_RATELIMIT_UNITS_IN_KB; 509 510 return 0; 511 } 512 513 static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev, 514 struct ieee_maxrate *maxrate) 515 { 516 struct mlx4_en_priv *priv = netdev_priv(dev); 517 u16 tmp[IEEE_8021QAZ_MAX_TCS]; 518 int i, err; 519 520 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 521 /* Convert from Kbps into HW units, rounding result up. 522 * Setting to 0, means unlimited BW. 523 */ 524 tmp[i] = div_u64(maxrate->tc_maxrate[i] + 525 MLX4_RATELIMIT_UNITS_IN_KB - 1, 526 MLX4_RATELIMIT_UNITS_IN_KB); 527 } 528 529 err = mlx4_en_config_port_scheduler(priv, NULL, tmp); 530 if (err) 531 return err; 532 533 memcpy(priv->maxrate, tmp, sizeof(priv->maxrate)); 534 535 return 0; 536 } 537 538 #define RPG_ENABLE_BIT 31 539 #define CN_TAG_BIT 30 540 541 static int mlx4_en_dcbnl_ieee_getqcn(struct net_device *dev, 542 struct ieee_qcn *qcn) 543 { 544 struct mlx4_en_priv *priv = netdev_priv(dev); 545 struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn; 546 struct mlx4_cmd_mailbox *mailbox_out = NULL; 547 u64 mailbox_in_dma = 0; 548 u32 inmod = 0; 549 int i, err; 550 551 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN)) 552 return -EOPNOTSUPP; 553 554 mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev); 555 if (IS_ERR(mailbox_out)) 556 return -ENOMEM; 557 hw_qcn = 558 (struct mlx4_congestion_control_mb_prio_802_1_qau_params *) 559 mailbox_out->buf; 560 561 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 562 inmod = priv->port | ((1 << i) << 8) | 563 (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16); 564 err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma, 565 mailbox_out->dma, 566 inmod, MLX4_CONGESTION_CONTROL_GET_PARAMS, 567 MLX4_CMD_CONGESTION_CTRL_OPCODE, 568 MLX4_CMD_TIME_CLASS_C, 569 MLX4_CMD_NATIVE); 570 if (err) { 571 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out); 572 return err; 573 } 574 575 qcn->rpg_enable[i] = 576 be32_to_cpu(hw_qcn->extended_enable) >> RPG_ENABLE_BIT; 577 qcn->rppp_max_rps[i] = 578 be32_to_cpu(hw_qcn->rppp_max_rps); 579 qcn->rpg_time_reset[i] = 580 be32_to_cpu(hw_qcn->rpg_time_reset); 581 qcn->rpg_byte_reset[i] = 582 be32_to_cpu(hw_qcn->rpg_byte_reset); 583 qcn->rpg_threshold[i] = 584 be32_to_cpu(hw_qcn->rpg_threshold); 585 qcn->rpg_max_rate[i] = 586 be32_to_cpu(hw_qcn->rpg_max_rate); 587 qcn->rpg_ai_rate[i] = 588 be32_to_cpu(hw_qcn->rpg_ai_rate); 589 qcn->rpg_hai_rate[i] = 590 be32_to_cpu(hw_qcn->rpg_hai_rate); 591 qcn->rpg_gd[i] = 592 be32_to_cpu(hw_qcn->rpg_gd); 593 qcn->rpg_min_dec_fac[i] = 594 be32_to_cpu(hw_qcn->rpg_min_dec_fac); 595 qcn->rpg_min_rate[i] = 596 be32_to_cpu(hw_qcn->rpg_min_rate); 597 qcn->cndd_state_machine[i] = 598 priv->cndd_state[i]; 599 } 600 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out); 601 return 0; 602 } 603 604 static int mlx4_en_dcbnl_ieee_setqcn(struct net_device *dev, 605 struct ieee_qcn *qcn) 606 { 607 struct mlx4_en_priv *priv = netdev_priv(dev); 608 struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn; 609 struct mlx4_cmd_mailbox *mailbox_in = NULL; 610 u64 mailbox_in_dma = 0; 611 u32 inmod = 0; 612 int i, err; 613 #define MODIFY_ENABLE_HIGH_MASK 0xc0000000 614 #define MODIFY_ENABLE_LOW_MASK 0xffc00000 615 616 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN)) 617 return -EOPNOTSUPP; 618 619 mailbox_in = mlx4_alloc_cmd_mailbox(priv->mdev->dev); 620 if (IS_ERR(mailbox_in)) 621 return -ENOMEM; 622 623 mailbox_in_dma = mailbox_in->dma; 624 hw_qcn = 625 (struct mlx4_congestion_control_mb_prio_802_1_qau_params *)mailbox_in->buf; 626 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 627 inmod = priv->port | ((1 << i) << 8) | 628 (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16); 629 630 /* Before updating QCN parameter, 631 * need to set it's modify enable bit to 1 632 */ 633 634 hw_qcn->modify_enable_high = cpu_to_be32( 635 MODIFY_ENABLE_HIGH_MASK); 636 hw_qcn->modify_enable_low = cpu_to_be32(MODIFY_ENABLE_LOW_MASK); 637 638 hw_qcn->extended_enable = cpu_to_be32(qcn->rpg_enable[i] << RPG_ENABLE_BIT); 639 hw_qcn->rppp_max_rps = cpu_to_be32(qcn->rppp_max_rps[i]); 640 hw_qcn->rpg_time_reset = cpu_to_be32(qcn->rpg_time_reset[i]); 641 hw_qcn->rpg_byte_reset = cpu_to_be32(qcn->rpg_byte_reset[i]); 642 hw_qcn->rpg_threshold = cpu_to_be32(qcn->rpg_threshold[i]); 643 hw_qcn->rpg_max_rate = cpu_to_be32(qcn->rpg_max_rate[i]); 644 hw_qcn->rpg_ai_rate = cpu_to_be32(qcn->rpg_ai_rate[i]); 645 hw_qcn->rpg_hai_rate = cpu_to_be32(qcn->rpg_hai_rate[i]); 646 hw_qcn->rpg_gd = cpu_to_be32(qcn->rpg_gd[i]); 647 hw_qcn->rpg_min_dec_fac = cpu_to_be32(qcn->rpg_min_dec_fac[i]); 648 hw_qcn->rpg_min_rate = cpu_to_be32(qcn->rpg_min_rate[i]); 649 priv->cndd_state[i] = qcn->cndd_state_machine[i]; 650 if (qcn->cndd_state_machine[i] == DCB_CNDD_INTERIOR_READY) 651 hw_qcn->extended_enable |= cpu_to_be32(1 << CN_TAG_BIT); 652 653 err = mlx4_cmd(priv->mdev->dev, mailbox_in_dma, inmod, 654 MLX4_CONGESTION_CONTROL_SET_PARAMS, 655 MLX4_CMD_CONGESTION_CTRL_OPCODE, 656 MLX4_CMD_TIME_CLASS_C, 657 MLX4_CMD_NATIVE); 658 if (err) { 659 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in); 660 return err; 661 } 662 } 663 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in); 664 return 0; 665 } 666 667 static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev, 668 struct ieee_qcn_stats *qcn_stats) 669 { 670 struct mlx4_en_priv *priv = netdev_priv(dev); 671 struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *hw_qcn_stats; 672 struct mlx4_cmd_mailbox *mailbox_out = NULL; 673 u64 mailbox_in_dma = 0; 674 u32 inmod = 0; 675 int i, err; 676 677 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN)) 678 return -EOPNOTSUPP; 679 680 mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev); 681 if (IS_ERR(mailbox_out)) 682 return -ENOMEM; 683 684 hw_qcn_stats = 685 (struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *) 686 mailbox_out->buf; 687 688 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 689 inmod = priv->port | ((1 << i) << 8) | 690 (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16); 691 err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma, 692 mailbox_out->dma, inmod, 693 MLX4_CONGESTION_CONTROL_GET_STATISTICS, 694 MLX4_CMD_CONGESTION_CTRL_OPCODE, 695 MLX4_CMD_TIME_CLASS_C, 696 MLX4_CMD_NATIVE); 697 if (err) { 698 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out); 699 return err; 700 } 701 qcn_stats->rppp_rp_centiseconds[i] = 702 be64_to_cpu(hw_qcn_stats->rppp_rp_centiseconds); 703 qcn_stats->rppp_created_rps[i] = 704 be32_to_cpu(hw_qcn_stats->rppp_created_rps); 705 } 706 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out); 707 return 0; 708 } 709 710 const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = { 711 .ieee_getets = mlx4_en_dcbnl_ieee_getets, 712 .ieee_setets = mlx4_en_dcbnl_ieee_setets, 713 .ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate, 714 .ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate, 715 .ieee_getqcn = mlx4_en_dcbnl_ieee_getqcn, 716 .ieee_setqcn = mlx4_en_dcbnl_ieee_setqcn, 717 .ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats, 718 .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc, 719 .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc, 720 721 .getstate = mlx4_en_dcbnl_get_state, 722 .setstate = mlx4_en_dcbnl_set_state, 723 .getpfccfg = mlx4_en_dcbnl_get_pfc_cfg, 724 .setpfccfg = mlx4_en_dcbnl_set_pfc_cfg, 725 .setall = mlx4_en_dcbnl_set_all, 726 .getcap = mlx4_en_dcbnl_getcap, 727 .getnumtcs = mlx4_en_dcbnl_getnumtcs, 728 .getpfcstate = mlx4_en_dcbnl_getpfcstate, 729 .setpfcstate = mlx4_en_dcbnl_setpfcstate, 730 .getapp = mlx4_en_dcbnl_getapp, 731 .setapp = mlx4_en_dcbnl_setapp, 732 733 .getdcbx = mlx4_en_dcbnl_getdcbx, 734 .setdcbx = mlx4_en_dcbnl_setdcbx, 735 }; 736 737 const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = { 738 .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc, 739 .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc, 740 741 .setstate = mlx4_en_dcbnl_set_state, 742 .getpfccfg = mlx4_en_dcbnl_get_pfc_cfg, 743 .setpfccfg = mlx4_en_dcbnl_set_pfc_cfg, 744 .setall = mlx4_en_dcbnl_set_all, 745 .getnumtcs = mlx4_en_dcbnl_getnumtcs, 746 .getpfcstate = mlx4_en_dcbnl_getpfcstate, 747 .setpfcstate = mlx4_en_dcbnl_setpfcstate, 748 .getapp = mlx4_en_dcbnl_getapp, 749 .setapp = mlx4_en_dcbnl_setapp, 750 751 .getdcbx = mlx4_en_dcbnl_getdcbx, 752 .setdcbx = mlx4_en_dcbnl_setdcbx, 753 }; 754