1 /*- 2 * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include "en.h" 29 30 #include <sys/eventhandler.h> 31 #include <sys/sockio.h> 32 #include <machine/atomic.h> 33 34 #ifndef ETH_DRIVER_VERSION 35 #define ETH_DRIVER_VERSION "3.5.1" 36 #endif 37 #define DRIVER_RELDATE "April 2019" 38 39 static const char mlx5e_version[] = "mlx5en: Mellanox Ethernet driver " 40 ETH_DRIVER_VERSION " (" DRIVER_RELDATE ")\n"; 41 42 static int mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs); 43 44 struct mlx5e_channel_param { 45 struct mlx5e_rq_param rq; 46 struct mlx5e_sq_param sq; 47 struct mlx5e_cq_param rx_cq; 48 struct mlx5e_cq_param tx_cq; 49 }; 50 51 struct media { 52 u32 subtype; 53 u64 baudrate; 54 }; 55 56 static const struct media mlx5e_mode_table[MLX5E_LINK_SPEEDS_NUMBER][MLX5E_LINK_MODES_NUMBER] = { 57 58 [MLX5E_1000BASE_CX_SGMII][MLX5E_SGMII] = { 59 .subtype = IFM_1000_CX_SGMII, 60 .baudrate = IF_Mbps(1000ULL), 61 }, 62 [MLX5E_1000BASE_KX][MLX5E_KX] = { 63 .subtype = IFM_1000_KX, 64 .baudrate = IF_Mbps(1000ULL), 65 }, 66 [MLX5E_10GBASE_CX4][MLX5E_CX4] = { 67 .subtype = IFM_10G_CX4, 68 .baudrate = IF_Gbps(10ULL), 69 }, 70 [MLX5E_10GBASE_KX4][MLX5E_KX4] = { 71 .subtype = IFM_10G_KX4, 72 .baudrate = IF_Gbps(10ULL), 73 }, 74 [MLX5E_10GBASE_KR][MLX5E_KR] = { 75 .subtype = IFM_10G_KR, 76 .baudrate = IF_Gbps(10ULL), 77 }, 78 [MLX5E_20GBASE_KR2][MLX5E_KR2] = { 79 .subtype = IFM_20G_KR2, 80 .baudrate = IF_Gbps(20ULL), 81 }, 82 [MLX5E_40GBASE_CR4][MLX5E_CR4] = { 83 .subtype = IFM_40G_CR4, 84 .baudrate = IF_Gbps(40ULL), 85 }, 86 [MLX5E_40GBASE_KR4][MLX5E_KR4] = { 87 .subtype = IFM_40G_KR4, 88 .baudrate = IF_Gbps(40ULL), 89 }, 90 [MLX5E_56GBASE_R4][MLX5E_R] = { 91 .subtype = IFM_56G_R4, 92 .baudrate = IF_Gbps(56ULL), 93 }, 94 [MLX5E_10GBASE_CR][MLX5E_CR1] = { 95 .subtype = IFM_10G_CR1, 96 .baudrate = IF_Gbps(10ULL), 97 }, 98 [MLX5E_10GBASE_SR][MLX5E_SR] = { 99 .subtype = IFM_10G_SR, 100 .baudrate = IF_Gbps(10ULL), 101 }, 102 [MLX5E_10GBASE_ER_LR][MLX5E_ER] = { 103 .subtype = IFM_10G_ER, 104 .baudrate = IF_Gbps(10ULL), 105 }, 106 [MLX5E_10GBASE_ER_LR][MLX5E_LR] = { 107 .subtype = IFM_10G_LR, 108 .baudrate = IF_Gbps(10ULL), 109 }, 110 [MLX5E_40GBASE_SR4][MLX5E_SR4] = { 111 .subtype = IFM_40G_SR4, 112 .baudrate = IF_Gbps(40ULL), 113 }, 114 [MLX5E_40GBASE_LR4_ER4][MLX5E_LR4] = { 115 .subtype = IFM_40G_LR4, 116 .baudrate = IF_Gbps(40ULL), 117 }, 118 [MLX5E_40GBASE_LR4_ER4][MLX5E_ER4] = { 119 .subtype = IFM_40G_ER4, 120 .baudrate = IF_Gbps(40ULL), 121 }, 122 [MLX5E_100GBASE_CR4][MLX5E_CR4] = { 123 .subtype = IFM_100G_CR4, 124 .baudrate = IF_Gbps(100ULL), 125 }, 126 [MLX5E_100GBASE_SR4][MLX5E_SR4] = { 127 .subtype = IFM_100G_SR4, 128 .baudrate = IF_Gbps(100ULL), 129 }, 130 [MLX5E_100GBASE_KR4][MLX5E_KR4] = { 131 .subtype = IFM_100G_KR4, 132 .baudrate = IF_Gbps(100ULL), 133 }, 134 [MLX5E_100GBASE_LR4][MLX5E_LR4] = { 135 .subtype = IFM_100G_LR4, 136 .baudrate = IF_Gbps(100ULL), 137 }, 138 [MLX5E_100BASE_TX][MLX5E_TX] = { 139 .subtype = IFM_100_TX, 140 .baudrate = IF_Mbps(100ULL), 141 }, 142 [MLX5E_1000BASE_T][MLX5E_T] = { 143 .subtype = IFM_1000_T, 144 .baudrate = IF_Mbps(1000ULL), 145 }, 146 [MLX5E_10GBASE_T][MLX5E_T] = { 147 .subtype = IFM_10G_T, 148 .baudrate = IF_Gbps(10ULL), 149 }, 150 [MLX5E_25GBASE_CR][MLX5E_CR] = { 151 .subtype = IFM_25G_CR, 152 .baudrate = IF_Gbps(25ULL), 153 }, 154 [MLX5E_25GBASE_KR][MLX5E_KR] = { 155 .subtype = IFM_25G_KR, 156 .baudrate = IF_Gbps(25ULL), 157 }, 158 [MLX5E_25GBASE_SR][MLX5E_SR] = { 159 .subtype = IFM_25G_SR, 160 .baudrate = IF_Gbps(25ULL), 161 }, 162 [MLX5E_50GBASE_CR2][MLX5E_CR2] = { 163 .subtype = IFM_50G_CR2, 164 .baudrate = IF_Gbps(50ULL), 165 }, 166 [MLX5E_50GBASE_KR2][MLX5E_KR2] = { 167 .subtype = IFM_50G_KR2, 168 .baudrate = IF_Gbps(50ULL), 169 }, 170 }; 171 172 static const struct media mlx5e_ext_mode_table[MLX5E_EXT_LINK_SPEEDS_NUMBER][MLX5E_LINK_MODES_NUMBER] = { 173 [MLX5E_SGMII_100M][MLX5E_SGMII] = { 174 .subtype = IFM_100_SGMII, 175 .baudrate = IF_Mbps(100), 176 }, 177 [MLX5E_1000BASE_X_SGMII][MLX5E_KX] = { 178 .subtype = IFM_1000_KX, 179 .baudrate = IF_Mbps(1000), 180 }, 181 [MLX5E_1000BASE_X_SGMII][MLX5E_CX_SGMII] = { 182 .subtype = IFM_1000_CX_SGMII, 183 .baudrate = IF_Mbps(1000), 184 }, 185 [MLX5E_1000BASE_X_SGMII][MLX5E_CX] = { 186 .subtype = IFM_1000_CX, 187 .baudrate = IF_Mbps(1000), 188 }, 189 [MLX5E_1000BASE_X_SGMII][MLX5E_LX] = { 190 .subtype = IFM_1000_LX, 191 .baudrate = IF_Mbps(1000), 192 }, 193 [MLX5E_1000BASE_X_SGMII][MLX5E_SX] = { 194 .subtype = IFM_1000_SX, 195 .baudrate = IF_Mbps(1000), 196 }, 197 [MLX5E_1000BASE_X_SGMII][MLX5E_T] = { 198 .subtype = IFM_1000_T, 199 .baudrate = IF_Mbps(1000), 200 }, 201 [MLX5E_5GBASE_R][MLX5E_T] = { 202 .subtype = IFM_5000_T, 203 .baudrate = IF_Mbps(5000), 204 }, 205 [MLX5E_5GBASE_R][MLX5E_KR] = { 206 .subtype = IFM_5000_KR, 207 .baudrate = IF_Mbps(5000), 208 }, 209 [MLX5E_5GBASE_R][MLX5E_KR1] = { 210 .subtype = IFM_5000_KR1, 211 .baudrate = IF_Mbps(5000), 212 }, 213 [MLX5E_5GBASE_R][MLX5E_KR_S] = { 214 .subtype = IFM_5000_KR_S, 215 .baudrate = IF_Mbps(5000), 216 }, 217 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_ER] = { 218 .subtype = IFM_10G_ER, 219 .baudrate = IF_Gbps(10ULL), 220 }, 221 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_KR] = { 222 .subtype = IFM_10G_KR, 223 .baudrate = IF_Gbps(10ULL), 224 }, 225 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_LR] = { 226 .subtype = IFM_10G_LR, 227 .baudrate = IF_Gbps(10ULL), 228 }, 229 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_SR] = { 230 .subtype = IFM_10G_SR, 231 .baudrate = IF_Gbps(10ULL), 232 }, 233 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_T] = { 234 .subtype = IFM_10G_T, 235 .baudrate = IF_Gbps(10ULL), 236 }, 237 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_AOC] = { 238 .subtype = IFM_10G_AOC, 239 .baudrate = IF_Gbps(10ULL), 240 }, 241 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_CR1] = { 242 .subtype = IFM_10G_CR1, 243 .baudrate = IF_Gbps(10ULL), 244 }, 245 [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_CR4] = { 246 .subtype = IFM_40G_CR4, 247 .baudrate = IF_Gbps(40ULL), 248 }, 249 [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_KR4] = { 250 .subtype = IFM_40G_KR4, 251 .baudrate = IF_Gbps(40ULL), 252 }, 253 [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_LR4] = { 254 .subtype = IFM_40G_LR4, 255 .baudrate = IF_Gbps(40ULL), 256 }, 257 [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_SR4] = { 258 .subtype = IFM_40G_SR4, 259 .baudrate = IF_Gbps(40ULL), 260 }, 261 [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_ER4] = { 262 .subtype = IFM_40G_ER4, 263 .baudrate = IF_Gbps(40ULL), 264 }, 265 266 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CR] = { 267 .subtype = IFM_25G_CR, 268 .baudrate = IF_Gbps(25ULL), 269 }, 270 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_KR] = { 271 .subtype = IFM_25G_KR, 272 .baudrate = IF_Gbps(25ULL), 273 }, 274 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_SR] = { 275 .subtype = IFM_25G_SR, 276 .baudrate = IF_Gbps(25ULL), 277 }, 278 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_ACC] = { 279 .subtype = IFM_25G_ACC, 280 .baudrate = IF_Gbps(25ULL), 281 }, 282 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_AOC] = { 283 .subtype = IFM_25G_AOC, 284 .baudrate = IF_Gbps(25ULL), 285 }, 286 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CR1] = { 287 .subtype = IFM_25G_CR1, 288 .baudrate = IF_Gbps(25ULL), 289 }, 290 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CR_S] = { 291 .subtype = IFM_25G_CR_S, 292 .baudrate = IF_Gbps(25ULL), 293 }, 294 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_KR1] = { 295 .subtype = IFM_5000_KR1, 296 .baudrate = IF_Gbps(25ULL), 297 }, 298 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_KR_S] = { 299 .subtype = IFM_25G_KR_S, 300 .baudrate = IF_Gbps(25ULL), 301 }, 302 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_LR] = { 303 .subtype = IFM_25G_LR, 304 .baudrate = IF_Gbps(25ULL), 305 }, 306 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_T] = { 307 .subtype = IFM_25G_T, 308 .baudrate = IF_Gbps(25ULL), 309 }, 310 [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_CR2] = { 311 .subtype = IFM_50G_CR2, 312 .baudrate = IF_Gbps(50ULL), 313 }, 314 [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_KR2] = { 315 .subtype = IFM_50G_KR2, 316 .baudrate = IF_Gbps(50ULL), 317 }, 318 [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_SR2] = { 319 .subtype = IFM_50G_SR2, 320 .baudrate = IF_Gbps(50ULL), 321 }, 322 [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_LR2] = { 323 .subtype = IFM_50G_LR2, 324 .baudrate = IF_Gbps(50ULL), 325 }, 326 [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_LR] = { 327 .subtype = IFM_50G_LR, 328 .baudrate = IF_Gbps(50ULL), 329 }, 330 [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_SR] = { 331 .subtype = IFM_50G_SR, 332 .baudrate = IF_Gbps(50ULL), 333 }, 334 [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_CP] = { 335 .subtype = IFM_50G_CP, 336 .baudrate = IF_Gbps(50ULL), 337 }, 338 [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_FR] = { 339 .subtype = IFM_50G_FR, 340 .baudrate = IF_Gbps(50ULL), 341 }, 342 [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_KR_PAM4] = { 343 .subtype = IFM_50G_KR_PAM4, 344 .baudrate = IF_Gbps(50ULL), 345 }, 346 [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_CR4] = { 347 .subtype = IFM_100G_CR4, 348 .baudrate = IF_Gbps(100ULL), 349 }, 350 [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_KR4] = { 351 .subtype = IFM_100G_KR4, 352 .baudrate = IF_Gbps(100ULL), 353 }, 354 [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_LR4] = { 355 .subtype = IFM_100G_LR4, 356 .baudrate = IF_Gbps(100ULL), 357 }, 358 [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_SR4] = { 359 .subtype = IFM_100G_SR4, 360 .baudrate = IF_Gbps(100ULL), 361 }, 362 [MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_SR2] = { 363 .subtype = IFM_100G_SR2, 364 .baudrate = IF_Gbps(100ULL), 365 }, 366 [MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_CP2] = { 367 .subtype = IFM_100G_CP2, 368 .baudrate = IF_Gbps(100ULL), 369 }, 370 [MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_KR2_PAM4] = { 371 .subtype = IFM_100G_KR2_PAM4, 372 .baudrate = IF_Gbps(100ULL), 373 }, 374 [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_DR4] = { 375 .subtype = IFM_200G_DR4, 376 .baudrate = IF_Gbps(200ULL), 377 }, 378 [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_LR4] = { 379 .subtype = IFM_200G_LR4, 380 .baudrate = IF_Gbps(200ULL), 381 }, 382 [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_SR4] = { 383 .subtype = IFM_200G_SR4, 384 .baudrate = IF_Gbps(200ULL), 385 }, 386 [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_FR4] = { 387 .subtype = IFM_200G_FR4, 388 .baudrate = IF_Gbps(200ULL), 389 }, 390 [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_CR4_PAM4] = { 391 .subtype = IFM_200G_CR4_PAM4, 392 .baudrate = IF_Gbps(200ULL), 393 }, 394 [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_KR4_PAM4] = { 395 .subtype = IFM_200G_KR4_PAM4, 396 .baudrate = IF_Gbps(200ULL), 397 }, 398 }; 399 400 MALLOC_DEFINE(M_MLX5EN, "MLX5EN", "MLX5 Ethernet"); 401 402 static void 403 mlx5e_update_carrier(struct mlx5e_priv *priv) 404 { 405 struct mlx5_core_dev *mdev = priv->mdev; 406 u32 out[MLX5_ST_SZ_DW(ptys_reg)]; 407 u32 eth_proto_oper; 408 int error; 409 u8 port_state; 410 u8 is_er_type; 411 u8 i, j; 412 bool ext; 413 struct media media_entry = {}; 414 415 port_state = mlx5_query_vport_state(mdev, 416 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0); 417 418 if (port_state == VPORT_STATE_UP) { 419 priv->media_status_last |= IFM_ACTIVE; 420 } else { 421 priv->media_status_last &= ~IFM_ACTIVE; 422 priv->media_active_last = IFM_ETHER; 423 if_link_state_change(priv->ifp, LINK_STATE_DOWN); 424 return; 425 } 426 427 error = mlx5_query_port_ptys(mdev, out, sizeof(out), 428 MLX5_PTYS_EN, 1); 429 if (error) { 430 priv->media_active_last = IFM_ETHER; 431 priv->ifp->if_baudrate = 1; 432 if_printf(priv->ifp, "%s: query port ptys failed: " 433 "0x%x\n", __func__, error); 434 return; 435 } 436 437 ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); 438 eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, 439 eth_proto_oper); 440 441 i = ilog2(eth_proto_oper); 442 443 for (j = 0; j != MLX5E_LINK_MODES_NUMBER; j++) { 444 media_entry = ext ? mlx5e_ext_mode_table[i][j] : 445 mlx5e_mode_table[i][j]; 446 if (media_entry.baudrate != 0) 447 break; 448 } 449 450 if (media_entry.subtype == 0) { 451 if_printf(priv->ifp, "%s: Could not find operational " 452 "media subtype\n", __func__); 453 return; 454 } 455 456 switch (media_entry.subtype) { 457 case IFM_10G_ER: 458 error = mlx5_query_pddr_range_info(mdev, 1, &is_er_type); 459 if (error != 0) { 460 if_printf(priv->ifp, "%s: query port pddr failed: %d\n", 461 __func__, error); 462 } 463 if (error != 0 || is_er_type == 0) 464 media_entry.subtype = IFM_10G_LR; 465 break; 466 case IFM_40G_LR4: 467 error = mlx5_query_pddr_range_info(mdev, 1, &is_er_type); 468 if (error != 0) { 469 if_printf(priv->ifp, "%s: query port pddr failed: %d\n", 470 __func__, error); 471 } 472 if (error == 0 && is_er_type != 0) 473 media_entry.subtype = IFM_40G_ER4; 474 break; 475 } 476 priv->media_active_last = media_entry.subtype | IFM_ETHER | IFM_FDX; 477 priv->ifp->if_baudrate = media_entry.baudrate; 478 479 if_link_state_change(priv->ifp, LINK_STATE_UP); 480 } 481 482 static void 483 mlx5e_media_status(struct ifnet *dev, struct ifmediareq *ifmr) 484 { 485 struct mlx5e_priv *priv = dev->if_softc; 486 487 ifmr->ifm_status = priv->media_status_last; 488 ifmr->ifm_active = priv->media_active_last | 489 (priv->params.rx_pauseframe_control ? IFM_ETH_RXPAUSE : 0) | 490 (priv->params.tx_pauseframe_control ? IFM_ETH_TXPAUSE : 0); 491 492 } 493 494 static u32 495 mlx5e_find_link_mode(u32 subtype, bool ext) 496 { 497 u32 i; 498 u32 j; 499 u32 link_mode = 0; 500 u32 speeds_num = 0; 501 struct media media_entry = {}; 502 503 switch (subtype) { 504 case IFM_10G_LR: 505 subtype = IFM_10G_ER; 506 break; 507 case IFM_40G_ER4: 508 subtype = IFM_40G_LR4; 509 break; 510 } 511 512 speeds_num = ext ? MLX5E_EXT_LINK_SPEEDS_NUMBER : 513 MLX5E_LINK_SPEEDS_NUMBER; 514 515 for (i = 0; i != speeds_num; i++) { 516 for (j = 0; j < MLX5E_LINK_MODES_NUMBER ; ++j) { 517 media_entry = ext ? mlx5e_ext_mode_table[i][j] : 518 mlx5e_mode_table[i][j]; 519 if (media_entry.baudrate == 0) 520 continue; 521 if (media_entry.subtype == subtype) { 522 link_mode |= MLX5E_PROT_MASK(i); 523 } 524 } 525 } 526 527 return (link_mode); 528 } 529 530 static int 531 mlx5e_set_port_pause_and_pfc(struct mlx5e_priv *priv) 532 { 533 return (mlx5_set_port_pause_and_pfc(priv->mdev, 1, 534 priv->params.rx_pauseframe_control, 535 priv->params.tx_pauseframe_control, 536 priv->params.rx_priority_flow_control, 537 priv->params.tx_priority_flow_control)); 538 } 539 540 static int 541 mlx5e_set_port_pfc(struct mlx5e_priv *priv) 542 { 543 int error; 544 545 if (priv->gone != 0) { 546 error = -ENXIO; 547 } else if (priv->params.rx_pauseframe_control || 548 priv->params.tx_pauseframe_control) { 549 if_printf(priv->ifp, 550 "Global pauseframes must be disabled before " 551 "enabling PFC.\n"); 552 error = -EINVAL; 553 } else { 554 error = mlx5e_set_port_pause_and_pfc(priv); 555 } 556 return (error); 557 } 558 559 static int 560 mlx5e_media_change(struct ifnet *dev) 561 { 562 struct mlx5e_priv *priv = dev->if_softc; 563 struct mlx5_core_dev *mdev = priv->mdev; 564 u32 eth_proto_cap; 565 u32 link_mode; 566 u32 out[MLX5_ST_SZ_DW(ptys_reg)]; 567 int was_opened; 568 int locked; 569 int error; 570 bool ext; 571 572 locked = PRIV_LOCKED(priv); 573 if (!locked) 574 PRIV_LOCK(priv); 575 576 if (IFM_TYPE(priv->media.ifm_media) != IFM_ETHER) { 577 error = EINVAL; 578 goto done; 579 } 580 581 error = mlx5_query_port_ptys(mdev, out, sizeof(out), 582 MLX5_PTYS_EN, 1); 583 if (error != 0) { 584 if_printf(dev, "Query port media capability failed\n"); 585 goto done; 586 } 587 588 ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); 589 link_mode = mlx5e_find_link_mode(IFM_SUBTYPE(priv->media.ifm_media), ext); 590 591 /* query supported capabilities */ 592 eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, 593 eth_proto_capability); 594 595 /* check for autoselect */ 596 if (IFM_SUBTYPE(priv->media.ifm_media) == IFM_AUTO) { 597 link_mode = eth_proto_cap; 598 if (link_mode == 0) { 599 if_printf(dev, "Port media capability is zero\n"); 600 error = EINVAL; 601 goto done; 602 } 603 } else { 604 link_mode = link_mode & eth_proto_cap; 605 if (link_mode == 0) { 606 if_printf(dev, "Not supported link mode requested\n"); 607 error = EINVAL; 608 goto done; 609 } 610 } 611 if (priv->media.ifm_media & (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 612 /* check if PFC is enabled */ 613 if (priv->params.rx_priority_flow_control || 614 priv->params.tx_priority_flow_control) { 615 if_printf(dev, "PFC must be disabled before enabling global pauseframes.\n"); 616 error = EINVAL; 617 goto done; 618 } 619 } 620 /* update pauseframe control bits */ 621 priv->params.rx_pauseframe_control = 622 (priv->media.ifm_media & IFM_ETH_RXPAUSE) ? 1 : 0; 623 priv->params.tx_pauseframe_control = 624 (priv->media.ifm_media & IFM_ETH_TXPAUSE) ? 1 : 0; 625 626 /* check if device is opened */ 627 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); 628 629 /* reconfigure the hardware */ 630 mlx5_set_port_status(mdev, MLX5_PORT_DOWN); 631 mlx5_set_port_proto(mdev, link_mode, MLX5_PTYS_EN, ext); 632 error = -mlx5e_set_port_pause_and_pfc(priv); 633 if (was_opened) 634 mlx5_set_port_status(mdev, MLX5_PORT_UP); 635 636 done: 637 if (!locked) 638 PRIV_UNLOCK(priv); 639 return (error); 640 } 641 642 static void 643 mlx5e_update_carrier_work(struct work_struct *work) 644 { 645 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, 646 update_carrier_work); 647 648 PRIV_LOCK(priv); 649 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 650 mlx5e_update_carrier(priv); 651 PRIV_UNLOCK(priv); 652 } 653 654 #define MLX5E_PCIE_PERF_GET_64(a,b,c,d,e,f) \ 655 s_debug->c = MLX5_GET64(mpcnt_reg, out, counter_set.f.c); 656 657 #define MLX5E_PCIE_PERF_GET_32(a,b,c,d,e,f) \ 658 s_debug->c = MLX5_GET(mpcnt_reg, out, counter_set.f.c); 659 660 static void 661 mlx5e_update_pcie_counters(struct mlx5e_priv *priv) 662 { 663 struct mlx5_core_dev *mdev = priv->mdev; 664 struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug; 665 const unsigned sz = MLX5_ST_SZ_BYTES(mpcnt_reg); 666 void *out; 667 void *in; 668 int err; 669 670 /* allocate firmware request structures */ 671 in = mlx5_vzalloc(sz); 672 out = mlx5_vzalloc(sz); 673 if (in == NULL || out == NULL) 674 goto free_out; 675 676 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP); 677 err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); 678 if (err != 0) 679 goto free_out; 680 681 MLX5E_PCIE_PERFORMANCE_COUNTERS_64(MLX5E_PCIE_PERF_GET_64) 682 MLX5E_PCIE_PERFORMANCE_COUNTERS_32(MLX5E_PCIE_PERF_GET_32) 683 684 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP); 685 err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); 686 if (err != 0) 687 goto free_out; 688 689 MLX5E_PCIE_TIMERS_AND_STATES_COUNTERS_32(MLX5E_PCIE_PERF_GET_32) 690 691 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_LANE_COUNTERS_GROUP); 692 err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); 693 if (err != 0) 694 goto free_out; 695 696 MLX5E_PCIE_LANE_COUNTERS_32(MLX5E_PCIE_PERF_GET_32) 697 698 free_out: 699 /* free firmware request structures */ 700 kvfree(in); 701 kvfree(out); 702 } 703 704 /* 705 * This function reads the physical port counters from the firmware 706 * using a pre-defined layout defined by various MLX5E_PPORT_XXX() 707 * macros. The output is converted from big-endian 64-bit values into 708 * host endian ones and stored in the "priv->stats.pport" structure. 709 */ 710 static void 711 mlx5e_update_pport_counters(struct mlx5e_priv *priv) 712 { 713 struct mlx5_core_dev *mdev = priv->mdev; 714 struct mlx5e_pport_stats *s = &priv->stats.pport; 715 struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug; 716 u32 *in; 717 u32 *out; 718 const u64 *ptr; 719 unsigned sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 720 unsigned x; 721 unsigned y; 722 unsigned z; 723 724 /* allocate firmware request structures */ 725 in = mlx5_vzalloc(sz); 726 out = mlx5_vzalloc(sz); 727 if (in == NULL || out == NULL) 728 goto free_out; 729 730 /* 731 * Get pointer to the 64-bit counter set which is located at a 732 * fixed offset in the output firmware request structure: 733 */ 734 ptr = (const uint64_t *)MLX5_ADDR_OF(ppcnt_reg, out, counter_set); 735 736 MLX5_SET(ppcnt_reg, in, local_port, 1); 737 738 /* read IEEE802_3 counter group using predefined counter layout */ 739 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); 740 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 741 for (x = 0, y = MLX5E_PPORT_PER_PRIO_STATS_NUM; 742 x != MLX5E_PPORT_IEEE802_3_STATS_NUM; x++, y++) 743 s->arg[y] = be64toh(ptr[x]); 744 745 /* read RFC2819 counter group using predefined counter layout */ 746 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP); 747 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 748 for (x = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM; x++, y++) 749 s->arg[y] = be64toh(ptr[x]); 750 751 for (y = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM + 752 MLX5E_PPORT_RFC2819_STATS_DEBUG_NUM; x++, y++) 753 s_debug->arg[y] = be64toh(ptr[x]); 754 755 /* read RFC2863 counter group using predefined counter layout */ 756 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP); 757 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 758 for (x = 0; x != MLX5E_PPORT_RFC2863_STATS_DEBUG_NUM; x++, y++) 759 s_debug->arg[y] = be64toh(ptr[x]); 760 761 /* read physical layer stats counter group using predefined counter layout */ 762 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP); 763 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 764 for (x = 0; x != MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM; x++, y++) 765 s_debug->arg[y] = be64toh(ptr[x]); 766 767 /* read Extended Ethernet counter group using predefined counter layout */ 768 MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP); 769 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 770 for (x = 0; x != MLX5E_PPORT_ETHERNET_EXTENDED_STATS_DEBUG_NUM; x++, y++) 771 s_debug->arg[y] = be64toh(ptr[x]); 772 773 /* read Extended Statistical Group */ 774 if (MLX5_CAP_GEN(mdev, pcam_reg) && 775 MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) && 776 MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters)) { 777 /* read Extended Statistical counter group using predefined counter layout */ 778 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP); 779 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 780 781 for (x = 0; x != MLX5E_PPORT_STATISTICAL_DEBUG_NUM; x++, y++) 782 s_debug->arg[y] = be64toh(ptr[x]); 783 } 784 785 /* read PCIE counters */ 786 mlx5e_update_pcie_counters(priv); 787 788 /* read per-priority counters */ 789 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP); 790 791 /* iterate all the priorities */ 792 for (y = z = 0; z != MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO; z++) { 793 MLX5_SET(ppcnt_reg, in, prio_tc, z); 794 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 795 796 /* read per priority stats counter group using predefined counter layout */ 797 for (x = 0; x != (MLX5E_PPORT_PER_PRIO_STATS_NUM / 798 MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO); x++, y++) 799 s->arg[y] = be64toh(ptr[x]); 800 } 801 802 free_out: 803 /* free firmware request structures */ 804 kvfree(in); 805 kvfree(out); 806 } 807 808 static void 809 mlx5e_grp_vnic_env_update_stats(struct mlx5e_priv *priv) 810 { 811 u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {}; 812 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {}; 813 814 if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard)) 815 return; 816 817 MLX5_SET(query_vnic_env_in, in, opcode, 818 MLX5_CMD_OP_QUERY_VNIC_ENV); 819 MLX5_SET(query_vnic_env_in, in, op_mod, 0); 820 MLX5_SET(query_vnic_env_in, in, other_vport, 0); 821 822 if (mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out)) != 0) 823 return; 824 825 priv->stats.vport.rx_steer_missed_packets = 826 MLX5_GET64(query_vnic_env_out, out, 827 vport_env.nic_receive_steering_discard); 828 } 829 830 /* 831 * This function is called regularly to collect all statistics 832 * counters from the firmware. The values can be viewed through the 833 * sysctl interface. Execution is serialized using the priv's global 834 * configuration lock. 835 */ 836 static void 837 mlx5e_update_stats_locked(struct mlx5e_priv *priv) 838 { 839 struct mlx5_core_dev *mdev = priv->mdev; 840 struct mlx5e_vport_stats *s = &priv->stats.vport; 841 struct mlx5e_sq_stats *sq_stats; 842 struct buf_ring *sq_br; 843 #if (__FreeBSD_version < 1100000) 844 struct ifnet *ifp = priv->ifp; 845 #endif 846 847 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)]; 848 u32 *out; 849 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); 850 u64 tso_packets = 0; 851 u64 tso_bytes = 0; 852 u64 tx_queue_dropped = 0; 853 u64 tx_defragged = 0; 854 u64 tx_offload_none = 0; 855 u64 lro_packets = 0; 856 u64 lro_bytes = 0; 857 u64 sw_lro_queued = 0; 858 u64 sw_lro_flushed = 0; 859 u64 rx_csum_none = 0; 860 u64 rx_wqe_err = 0; 861 u64 rx_packets = 0; 862 u64 rx_bytes = 0; 863 u32 rx_out_of_buffer = 0; 864 int i; 865 int j; 866 867 out = mlx5_vzalloc(outlen); 868 if (out == NULL) 869 goto free_out; 870 871 /* Collect firts the SW counters and then HW for consistency */ 872 for (i = 0; i < priv->params.num_channels; i++) { 873 struct mlx5e_channel *pch = priv->channel + i; 874 struct mlx5e_rq *rq = &pch->rq; 875 struct mlx5e_rq_stats *rq_stats = &pch->rq.stats; 876 877 /* collect stats from LRO */ 878 rq_stats->sw_lro_queued = rq->lro.lro_queued; 879 rq_stats->sw_lro_flushed = rq->lro.lro_flushed; 880 sw_lro_queued += rq_stats->sw_lro_queued; 881 sw_lro_flushed += rq_stats->sw_lro_flushed; 882 lro_packets += rq_stats->lro_packets; 883 lro_bytes += rq_stats->lro_bytes; 884 rx_csum_none += rq_stats->csum_none; 885 rx_wqe_err += rq_stats->wqe_err; 886 rx_packets += rq_stats->packets; 887 rx_bytes += rq_stats->bytes; 888 889 for (j = 0; j < priv->num_tc; j++) { 890 sq_stats = &pch->sq[j].stats; 891 sq_br = pch->sq[j].br; 892 893 tso_packets += sq_stats->tso_packets; 894 tso_bytes += sq_stats->tso_bytes; 895 tx_queue_dropped += sq_stats->dropped; 896 if (sq_br != NULL) 897 tx_queue_dropped += sq_br->br_drops; 898 tx_defragged += sq_stats->defragged; 899 tx_offload_none += sq_stats->csum_offload_none; 900 } 901 } 902 903 /* update counters */ 904 s->tso_packets = tso_packets; 905 s->tso_bytes = tso_bytes; 906 s->tx_queue_dropped = tx_queue_dropped; 907 s->tx_defragged = tx_defragged; 908 s->lro_packets = lro_packets; 909 s->lro_bytes = lro_bytes; 910 s->sw_lro_queued = sw_lro_queued; 911 s->sw_lro_flushed = sw_lro_flushed; 912 s->rx_csum_none = rx_csum_none; 913 s->rx_wqe_err = rx_wqe_err; 914 s->rx_packets = rx_packets; 915 s->rx_bytes = rx_bytes; 916 917 mlx5e_grp_vnic_env_update_stats(priv); 918 919 /* HW counters */ 920 memset(in, 0, sizeof(in)); 921 922 MLX5_SET(query_vport_counter_in, in, opcode, 923 MLX5_CMD_OP_QUERY_VPORT_COUNTER); 924 MLX5_SET(query_vport_counter_in, in, op_mod, 0); 925 MLX5_SET(query_vport_counter_in, in, other_vport, 0); 926 927 memset(out, 0, outlen); 928 929 /* get number of out-of-buffer drops first */ 930 if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0 && 931 mlx5_vport_query_out_of_rx_buffer(mdev, priv->counter_set_id, 932 &rx_out_of_buffer) == 0) { 933 s->rx_out_of_buffer = rx_out_of_buffer; 934 } 935 936 /* get port statistics */ 937 if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen) == 0) { 938 #define MLX5_GET_CTR(out, x) \ 939 MLX5_GET64(query_vport_counter_out, out, x) 940 941 s->rx_error_packets = 942 MLX5_GET_CTR(out, received_errors.packets); 943 s->rx_error_bytes = 944 MLX5_GET_CTR(out, received_errors.octets); 945 s->tx_error_packets = 946 MLX5_GET_CTR(out, transmit_errors.packets); 947 s->tx_error_bytes = 948 MLX5_GET_CTR(out, transmit_errors.octets); 949 950 s->rx_unicast_packets = 951 MLX5_GET_CTR(out, received_eth_unicast.packets); 952 s->rx_unicast_bytes = 953 MLX5_GET_CTR(out, received_eth_unicast.octets); 954 s->tx_unicast_packets = 955 MLX5_GET_CTR(out, transmitted_eth_unicast.packets); 956 s->tx_unicast_bytes = 957 MLX5_GET_CTR(out, transmitted_eth_unicast.octets); 958 959 s->rx_multicast_packets = 960 MLX5_GET_CTR(out, received_eth_multicast.packets); 961 s->rx_multicast_bytes = 962 MLX5_GET_CTR(out, received_eth_multicast.octets); 963 s->tx_multicast_packets = 964 MLX5_GET_CTR(out, transmitted_eth_multicast.packets); 965 s->tx_multicast_bytes = 966 MLX5_GET_CTR(out, transmitted_eth_multicast.octets); 967 968 s->rx_broadcast_packets = 969 MLX5_GET_CTR(out, received_eth_broadcast.packets); 970 s->rx_broadcast_bytes = 971 MLX5_GET_CTR(out, received_eth_broadcast.octets); 972 s->tx_broadcast_packets = 973 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets); 974 s->tx_broadcast_bytes = 975 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); 976 977 s->tx_packets = s->tx_unicast_packets + 978 s->tx_multicast_packets + s->tx_broadcast_packets; 979 s->tx_bytes = s->tx_unicast_bytes + s->tx_multicast_bytes + 980 s->tx_broadcast_bytes; 981 982 /* Update calculated offload counters */ 983 s->tx_csum_offload = s->tx_packets - tx_offload_none; 984 s->rx_csum_good = s->rx_packets - s->rx_csum_none; 985 } 986 987 /* Get physical port counters */ 988 mlx5e_update_pport_counters(priv); 989 990 s->tx_jumbo_packets = 991 priv->stats.port_stats_debug.tx_stat_p1519to2047octets + 992 priv->stats.port_stats_debug.tx_stat_p2048to4095octets + 993 priv->stats.port_stats_debug.tx_stat_p4096to8191octets + 994 priv->stats.port_stats_debug.tx_stat_p8192to10239octets; 995 996 #if (__FreeBSD_version < 1100000) 997 /* no get_counters interface in fbsd 10 */ 998 ifp->if_ipackets = s->rx_packets; 999 ifp->if_ierrors = priv->stats.pport.in_range_len_errors + 1000 priv->stats.pport.out_of_range_len + 1001 priv->stats.pport.too_long_errors + 1002 priv->stats.pport.check_seq_err + 1003 priv->stats.pport.alignment_err; 1004 ifp->if_iqdrops = s->rx_out_of_buffer; 1005 ifp->if_opackets = s->tx_packets; 1006 ifp->if_oerrors = priv->stats.port_stats_debug.out_discards; 1007 ifp->if_snd.ifq_drops = s->tx_queue_dropped; 1008 ifp->if_ibytes = s->rx_bytes; 1009 ifp->if_obytes = s->tx_bytes; 1010 ifp->if_collisions = 1011 priv->stats.pport.collisions; 1012 #endif 1013 1014 free_out: 1015 kvfree(out); 1016 1017 /* Update diagnostics, if any */ 1018 if (priv->params_ethtool.diag_pci_enable || 1019 priv->params_ethtool.diag_general_enable) { 1020 int error = mlx5_core_get_diagnostics_full(mdev, 1021 priv->params_ethtool.diag_pci_enable ? &priv->params_pci : NULL, 1022 priv->params_ethtool.diag_general_enable ? &priv->params_general : NULL); 1023 if (error != 0) 1024 if_printf(priv->ifp, "Failed reading diagnostics: %d\n", error); 1025 } 1026 } 1027 1028 static void 1029 mlx5e_update_stats_work(struct work_struct *work) 1030 { 1031 struct mlx5e_priv *priv; 1032 1033 priv = container_of(work, struct mlx5e_priv, update_stats_work); 1034 PRIV_LOCK(priv); 1035 if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0) 1036 mlx5e_update_stats_locked(priv); 1037 PRIV_UNLOCK(priv); 1038 } 1039 1040 static void 1041 mlx5e_update_stats(void *arg) 1042 { 1043 struct mlx5e_priv *priv = arg; 1044 1045 queue_work(priv->wq, &priv->update_stats_work); 1046 1047 callout_reset(&priv->watchdog, hz, &mlx5e_update_stats, priv); 1048 } 1049 1050 static void 1051 mlx5e_async_event_sub(struct mlx5e_priv *priv, 1052 enum mlx5_dev_event event) 1053 { 1054 switch (event) { 1055 case MLX5_DEV_EVENT_PORT_UP: 1056 case MLX5_DEV_EVENT_PORT_DOWN: 1057 queue_work(priv->wq, &priv->update_carrier_work); 1058 break; 1059 1060 default: 1061 break; 1062 } 1063 } 1064 1065 static void 1066 mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, 1067 enum mlx5_dev_event event, unsigned long param) 1068 { 1069 struct mlx5e_priv *priv = vpriv; 1070 1071 mtx_lock(&priv->async_events_mtx); 1072 if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state)) 1073 mlx5e_async_event_sub(priv, event); 1074 mtx_unlock(&priv->async_events_mtx); 1075 } 1076 1077 static void 1078 mlx5e_enable_async_events(struct mlx5e_priv *priv) 1079 { 1080 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); 1081 } 1082 1083 static void 1084 mlx5e_disable_async_events(struct mlx5e_priv *priv) 1085 { 1086 mtx_lock(&priv->async_events_mtx); 1087 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); 1088 mtx_unlock(&priv->async_events_mtx); 1089 } 1090 1091 static void mlx5e_calibration_callout(void *arg); 1092 static int mlx5e_calibration_duration = 20; 1093 static int mlx5e_fast_calibration = 1; 1094 static int mlx5e_normal_calibration = 30; 1095 1096 static SYSCTL_NODE(_hw_mlx5, OID_AUTO, calibr, CTLFLAG_RW, 0, 1097 "MLX5 timestamp calibration parameteres"); 1098 1099 SYSCTL_INT(_hw_mlx5_calibr, OID_AUTO, duration, CTLFLAG_RWTUN, 1100 &mlx5e_calibration_duration, 0, 1101 "Duration of initial calibration"); 1102 SYSCTL_INT(_hw_mlx5_calibr, OID_AUTO, fast, CTLFLAG_RWTUN, 1103 &mlx5e_fast_calibration, 0, 1104 "Recalibration interval during initial calibration"); 1105 SYSCTL_INT(_hw_mlx5_calibr, OID_AUTO, normal, CTLFLAG_RWTUN, 1106 &mlx5e_normal_calibration, 0, 1107 "Recalibration interval during normal operations"); 1108 1109 /* 1110 * Ignites the calibration process. 1111 */ 1112 static void 1113 mlx5e_reset_calibration_callout(struct mlx5e_priv *priv) 1114 { 1115 1116 if (priv->clbr_done == 0) 1117 mlx5e_calibration_callout(priv); 1118 else 1119 callout_reset_curcpu(&priv->tstmp_clbr, (priv->clbr_done < 1120 mlx5e_calibration_duration ? mlx5e_fast_calibration : 1121 mlx5e_normal_calibration) * hz, mlx5e_calibration_callout, 1122 priv); 1123 } 1124 1125 static uint64_t 1126 mlx5e_timespec2usec(const struct timespec *ts) 1127 { 1128 1129 return ((uint64_t)ts->tv_sec * 1000000000 + ts->tv_nsec); 1130 } 1131 1132 static uint64_t 1133 mlx5e_hw_clock(struct mlx5e_priv *priv) 1134 { 1135 struct mlx5_init_seg *iseg; 1136 uint32_t hw_h, hw_h1, hw_l; 1137 1138 iseg = priv->mdev->iseg; 1139 do { 1140 hw_h = ioread32be(&iseg->internal_timer_h); 1141 hw_l = ioread32be(&iseg->internal_timer_l); 1142 hw_h1 = ioread32be(&iseg->internal_timer_h); 1143 } while (hw_h1 != hw_h); 1144 return (((uint64_t)hw_h << 32) | hw_l); 1145 } 1146 1147 /* 1148 * The calibration callout, it runs either in the context of the 1149 * thread which enables calibration, or in callout. It takes the 1150 * snapshot of system and adapter clocks, then advances the pointers to 1151 * the calibration point to allow rx path to read the consistent data 1152 * lockless. 1153 */ 1154 static void 1155 mlx5e_calibration_callout(void *arg) 1156 { 1157 struct mlx5e_priv *priv; 1158 struct mlx5e_clbr_point *next, *curr; 1159 struct timespec ts; 1160 int clbr_curr_next; 1161 1162 priv = arg; 1163 curr = &priv->clbr_points[priv->clbr_curr]; 1164 clbr_curr_next = priv->clbr_curr + 1; 1165 if (clbr_curr_next >= nitems(priv->clbr_points)) 1166 clbr_curr_next = 0; 1167 next = &priv->clbr_points[clbr_curr_next]; 1168 1169 next->base_prev = curr->base_curr; 1170 next->clbr_hw_prev = curr->clbr_hw_curr; 1171 1172 next->clbr_hw_curr = mlx5e_hw_clock(priv); 1173 if (((next->clbr_hw_curr - curr->clbr_hw_curr) >> MLX5E_TSTMP_PREC) == 1174 0) { 1175 if (priv->clbr_done != 0) { 1176 if_printf(priv->ifp, "HW failed tstmp frozen %#jx %#jx," 1177 "disabling\n", 1178 next->clbr_hw_curr, curr->clbr_hw_prev); 1179 priv->clbr_done = 0; 1180 } 1181 atomic_store_rel_int(&curr->clbr_gen, 0); 1182 return; 1183 } 1184 1185 nanouptime(&ts); 1186 next->base_curr = mlx5e_timespec2usec(&ts); 1187 1188 curr->clbr_gen = 0; 1189 atomic_thread_fence_rel(); 1190 priv->clbr_curr = clbr_curr_next; 1191 atomic_store_rel_int(&next->clbr_gen, ++(priv->clbr_gen)); 1192 1193 if (priv->clbr_done < mlx5e_calibration_duration) 1194 priv->clbr_done++; 1195 mlx5e_reset_calibration_callout(priv); 1196 } 1197 1198 static const char *mlx5e_rq_stats_desc[] = { 1199 MLX5E_RQ_STATS(MLX5E_STATS_DESC) 1200 }; 1201 1202 static int 1203 mlx5e_create_rq(struct mlx5e_channel *c, 1204 struct mlx5e_rq_param *param, 1205 struct mlx5e_rq *rq) 1206 { 1207 struct mlx5e_priv *priv = c->priv; 1208 struct mlx5_core_dev *mdev = priv->mdev; 1209 char buffer[16]; 1210 void *rqc = param->rqc; 1211 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); 1212 int wq_sz; 1213 int err; 1214 int i; 1215 u32 nsegs, wqe_sz; 1216 1217 err = mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs); 1218 if (err != 0) 1219 goto done; 1220 1221 /* Create DMA descriptor TAG */ 1222 if ((err = -bus_dma_tag_create( 1223 bus_get_dma_tag(mdev->pdev->dev.bsddev), 1224 1, /* any alignment */ 1225 0, /* no boundary */ 1226 BUS_SPACE_MAXADDR, /* lowaddr */ 1227 BUS_SPACE_MAXADDR, /* highaddr */ 1228 NULL, NULL, /* filter, filterarg */ 1229 nsegs * MLX5E_MAX_RX_BYTES, /* maxsize */ 1230 nsegs, /* nsegments */ 1231 nsegs * MLX5E_MAX_RX_BYTES, /* maxsegsize */ 1232 0, /* flags */ 1233 NULL, NULL, /* lockfunc, lockfuncarg */ 1234 &rq->dma_tag))) 1235 goto done; 1236 1237 err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq, 1238 &rq->wq_ctrl); 1239 if (err) 1240 goto err_free_dma_tag; 1241 1242 rq->wq.db = &rq->wq.db[MLX5_RCV_DBR]; 1243 1244 err = mlx5e_get_wqe_sz(priv, &rq->wqe_sz, &rq->nsegs); 1245 if (err != 0) 1246 goto err_rq_wq_destroy; 1247 1248 wq_sz = mlx5_wq_ll_get_size(&rq->wq); 1249 1250 err = -tcp_lro_init_args(&rq->lro, priv->ifp, TCP_LRO_ENTRIES, wq_sz); 1251 if (err) 1252 goto err_rq_wq_destroy; 1253 1254 rq->mbuf = malloc(wq_sz * sizeof(rq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO); 1255 for (i = 0; i != wq_sz; i++) { 1256 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); 1257 int j; 1258 1259 err = -bus_dmamap_create(rq->dma_tag, 0, &rq->mbuf[i].dma_map); 1260 if (err != 0) { 1261 while (i--) 1262 bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map); 1263 goto err_rq_mbuf_free; 1264 } 1265 1266 /* set value for constant fields */ 1267 for (j = 0; j < rq->nsegs; j++) 1268 wqe->data[j].lkey = c->mkey_be; 1269 } 1270 1271 INIT_WORK(&rq->dim.work, mlx5e_dim_work); 1272 if (priv->params.rx_cq_moderation_mode < 2) { 1273 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED; 1274 } else { 1275 void *cqc = container_of(param, 1276 struct mlx5e_channel_param, rq)->rx_cq.cqc; 1277 1278 switch (MLX5_GET(cqc, cqc, cq_period_mode)) { 1279 case MLX5_CQ_PERIOD_MODE_START_FROM_EQE: 1280 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; 1281 break; 1282 case MLX5_CQ_PERIOD_MODE_START_FROM_CQE: 1283 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE; 1284 break; 1285 default: 1286 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED; 1287 break; 1288 } 1289 } 1290 1291 rq->ifp = priv->ifp; 1292 rq->channel = c; 1293 rq->ix = c->ix; 1294 1295 snprintf(buffer, sizeof(buffer), "rxstat%d", c->ix); 1296 mlx5e_create_stats(&rq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 1297 buffer, mlx5e_rq_stats_desc, MLX5E_RQ_STATS_NUM, 1298 rq->stats.arg); 1299 return (0); 1300 1301 err_rq_mbuf_free: 1302 free(rq->mbuf, M_MLX5EN); 1303 tcp_lro_free(&rq->lro); 1304 err_rq_wq_destroy: 1305 mlx5_wq_destroy(&rq->wq_ctrl); 1306 err_free_dma_tag: 1307 bus_dma_tag_destroy(rq->dma_tag); 1308 done: 1309 return (err); 1310 } 1311 1312 static void 1313 mlx5e_destroy_rq(struct mlx5e_rq *rq) 1314 { 1315 int wq_sz; 1316 int i; 1317 1318 /* destroy all sysctl nodes */ 1319 sysctl_ctx_free(&rq->stats.ctx); 1320 1321 /* free leftover LRO packets, if any */ 1322 tcp_lro_free(&rq->lro); 1323 1324 wq_sz = mlx5_wq_ll_get_size(&rq->wq); 1325 for (i = 0; i != wq_sz; i++) { 1326 if (rq->mbuf[i].mbuf != NULL) { 1327 bus_dmamap_unload(rq->dma_tag, rq->mbuf[i].dma_map); 1328 m_freem(rq->mbuf[i].mbuf); 1329 } 1330 bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map); 1331 } 1332 free(rq->mbuf, M_MLX5EN); 1333 mlx5_wq_destroy(&rq->wq_ctrl); 1334 bus_dma_tag_destroy(rq->dma_tag); 1335 } 1336 1337 static int 1338 mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) 1339 { 1340 struct mlx5e_channel *c = rq->channel; 1341 struct mlx5e_priv *priv = c->priv; 1342 struct mlx5_core_dev *mdev = priv->mdev; 1343 1344 void *in; 1345 void *rqc; 1346 void *wq; 1347 int inlen; 1348 int err; 1349 1350 inlen = MLX5_ST_SZ_BYTES(create_rq_in) + 1351 sizeof(u64) * rq->wq_ctrl.buf.npages; 1352 in = mlx5_vzalloc(inlen); 1353 if (in == NULL) 1354 return (-ENOMEM); 1355 1356 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); 1357 wq = MLX5_ADDR_OF(rqc, rqc, wq); 1358 1359 memcpy(rqc, param->rqc, sizeof(param->rqc)); 1360 1361 MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn); 1362 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); 1363 MLX5_SET(rqc, rqc, flush_in_error_en, 1); 1364 if (priv->counter_set_id >= 0) 1365 MLX5_SET(rqc, rqc, counter_set_id, priv->counter_set_id); 1366 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - 1367 PAGE_SHIFT); 1368 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma); 1369 1370 mlx5_fill_page_array(&rq->wq_ctrl.buf, 1371 (__be64 *) MLX5_ADDR_OF(wq, wq, pas)); 1372 1373 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn); 1374 1375 kvfree(in); 1376 1377 return (err); 1378 } 1379 1380 static int 1381 mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state) 1382 { 1383 struct mlx5e_channel *c = rq->channel; 1384 struct mlx5e_priv *priv = c->priv; 1385 struct mlx5_core_dev *mdev = priv->mdev; 1386 1387 void *in; 1388 void *rqc; 1389 int inlen; 1390 int err; 1391 1392 inlen = MLX5_ST_SZ_BYTES(modify_rq_in); 1393 in = mlx5_vzalloc(inlen); 1394 if (in == NULL) 1395 return (-ENOMEM); 1396 1397 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); 1398 1399 MLX5_SET(modify_rq_in, in, rqn, rq->rqn); 1400 MLX5_SET(modify_rq_in, in, rq_state, curr_state); 1401 MLX5_SET(rqc, rqc, state, next_state); 1402 1403 err = mlx5_core_modify_rq(mdev, in, inlen); 1404 1405 kvfree(in); 1406 1407 return (err); 1408 } 1409 1410 static void 1411 mlx5e_disable_rq(struct mlx5e_rq *rq) 1412 { 1413 struct mlx5e_channel *c = rq->channel; 1414 struct mlx5e_priv *priv = c->priv; 1415 struct mlx5_core_dev *mdev = priv->mdev; 1416 1417 mlx5_core_destroy_rq(mdev, rq->rqn); 1418 } 1419 1420 static int 1421 mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq) 1422 { 1423 struct mlx5e_channel *c = rq->channel; 1424 struct mlx5e_priv *priv = c->priv; 1425 struct mlx5_wq_ll *wq = &rq->wq; 1426 int i; 1427 1428 for (i = 0; i < 1000; i++) { 1429 if (wq->cur_sz >= priv->params.min_rx_wqes) 1430 return (0); 1431 1432 msleep(4); 1433 } 1434 return (-ETIMEDOUT); 1435 } 1436 1437 static int 1438 mlx5e_open_rq(struct mlx5e_channel *c, 1439 struct mlx5e_rq_param *param, 1440 struct mlx5e_rq *rq) 1441 { 1442 int err; 1443 1444 err = mlx5e_create_rq(c, param, rq); 1445 if (err) 1446 return (err); 1447 1448 err = mlx5e_enable_rq(rq, param); 1449 if (err) 1450 goto err_destroy_rq; 1451 1452 err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); 1453 if (err) 1454 goto err_disable_rq; 1455 1456 c->rq.enabled = 1; 1457 1458 return (0); 1459 1460 err_disable_rq: 1461 mlx5e_disable_rq(rq); 1462 err_destroy_rq: 1463 mlx5e_destroy_rq(rq); 1464 1465 return (err); 1466 } 1467 1468 static void 1469 mlx5e_close_rq(struct mlx5e_rq *rq) 1470 { 1471 mtx_lock(&rq->mtx); 1472 rq->enabled = 0; 1473 callout_stop(&rq->watchdog); 1474 mtx_unlock(&rq->mtx); 1475 1476 callout_drain(&rq->watchdog); 1477 1478 mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); 1479 } 1480 1481 static void 1482 mlx5e_close_rq_wait(struct mlx5e_rq *rq) 1483 { 1484 1485 mlx5e_disable_rq(rq); 1486 mlx5e_close_cq(&rq->cq); 1487 cancel_work_sync(&rq->dim.work); 1488 mlx5e_destroy_rq(rq); 1489 } 1490 1491 void 1492 mlx5e_free_sq_db(struct mlx5e_sq *sq) 1493 { 1494 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); 1495 int x; 1496 1497 for (x = 0; x != wq_sz; x++) { 1498 if (sq->mbuf[x].mbuf != NULL) { 1499 bus_dmamap_unload(sq->dma_tag, sq->mbuf[x].dma_map); 1500 m_freem(sq->mbuf[x].mbuf); 1501 } 1502 bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map); 1503 } 1504 free(sq->mbuf, M_MLX5EN); 1505 } 1506 1507 int 1508 mlx5e_alloc_sq_db(struct mlx5e_sq *sq) 1509 { 1510 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); 1511 int err; 1512 int x; 1513 1514 sq->mbuf = malloc(wq_sz * sizeof(sq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO); 1515 1516 /* Create DMA descriptor MAPs */ 1517 for (x = 0; x != wq_sz; x++) { 1518 err = -bus_dmamap_create(sq->dma_tag, 0, &sq->mbuf[x].dma_map); 1519 if (err != 0) { 1520 while (x--) 1521 bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map); 1522 free(sq->mbuf, M_MLX5EN); 1523 return (err); 1524 } 1525 } 1526 return (0); 1527 } 1528 1529 static const char *mlx5e_sq_stats_desc[] = { 1530 MLX5E_SQ_STATS(MLX5E_STATS_DESC) 1531 }; 1532 1533 void 1534 mlx5e_update_sq_inline(struct mlx5e_sq *sq) 1535 { 1536 sq->max_inline = sq->priv->params.tx_max_inline; 1537 sq->min_inline_mode = sq->priv->params.tx_min_inline_mode; 1538 1539 /* 1540 * Check if trust state is DSCP or if inline mode is NONE which 1541 * indicates CX-5 or newer hardware. 1542 */ 1543 if (sq->priv->params_ethtool.trust_state != MLX5_QPTS_TRUST_PCP || 1544 sq->min_inline_mode == MLX5_INLINE_MODE_NONE) { 1545 if (MLX5_CAP_ETH(sq->priv->mdev, wqe_vlan_insert)) 1546 sq->min_insert_caps = MLX5E_INSERT_VLAN | MLX5E_INSERT_NON_VLAN; 1547 else 1548 sq->min_insert_caps = MLX5E_INSERT_NON_VLAN; 1549 } else { 1550 sq->min_insert_caps = 0; 1551 } 1552 } 1553 1554 static void 1555 mlx5e_refresh_sq_inline_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c) 1556 { 1557 int i; 1558 1559 for (i = 0; i != c->num_tc; i++) { 1560 mtx_lock(&c->sq[i].lock); 1561 mlx5e_update_sq_inline(&c->sq[i]); 1562 mtx_unlock(&c->sq[i].lock); 1563 } 1564 } 1565 1566 void 1567 mlx5e_refresh_sq_inline(struct mlx5e_priv *priv) 1568 { 1569 int i; 1570 1571 /* check if channels are closed */ 1572 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 1573 return; 1574 1575 for (i = 0; i < priv->params.num_channels; i++) 1576 mlx5e_refresh_sq_inline_sub(priv, &priv->channel[i]); 1577 } 1578 1579 static int 1580 mlx5e_create_sq(struct mlx5e_channel *c, 1581 int tc, 1582 struct mlx5e_sq_param *param, 1583 struct mlx5e_sq *sq) 1584 { 1585 struct mlx5e_priv *priv = c->priv; 1586 struct mlx5_core_dev *mdev = priv->mdev; 1587 char buffer[16]; 1588 void *sqc = param->sqc; 1589 void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq); 1590 int err; 1591 1592 /* Create DMA descriptor TAG */ 1593 if ((err = -bus_dma_tag_create( 1594 bus_get_dma_tag(mdev->pdev->dev.bsddev), 1595 1, /* any alignment */ 1596 0, /* no boundary */ 1597 BUS_SPACE_MAXADDR, /* lowaddr */ 1598 BUS_SPACE_MAXADDR, /* highaddr */ 1599 NULL, NULL, /* filter, filterarg */ 1600 MLX5E_MAX_TX_PAYLOAD_SIZE, /* maxsize */ 1601 MLX5E_MAX_TX_MBUF_FRAGS, /* nsegments */ 1602 MLX5E_MAX_TX_MBUF_SIZE, /* maxsegsize */ 1603 0, /* flags */ 1604 NULL, NULL, /* lockfunc, lockfuncarg */ 1605 &sq->dma_tag))) 1606 goto done; 1607 1608 err = mlx5_alloc_map_uar(mdev, &sq->uar); 1609 if (err) 1610 goto err_free_dma_tag; 1611 1612 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, 1613 &sq->wq_ctrl); 1614 if (err) 1615 goto err_unmap_free_uar; 1616 1617 sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; 1618 sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; 1619 1620 err = mlx5e_alloc_sq_db(sq); 1621 if (err) 1622 goto err_sq_wq_destroy; 1623 1624 sq->mkey_be = c->mkey_be; 1625 sq->ifp = priv->ifp; 1626 sq->priv = priv; 1627 sq->tc = tc; 1628 1629 mlx5e_update_sq_inline(sq); 1630 1631 snprintf(buffer, sizeof(buffer), "txstat%dtc%d", c->ix, tc); 1632 mlx5e_create_stats(&sq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 1633 buffer, mlx5e_sq_stats_desc, MLX5E_SQ_STATS_NUM, 1634 sq->stats.arg); 1635 1636 return (0); 1637 1638 err_sq_wq_destroy: 1639 mlx5_wq_destroy(&sq->wq_ctrl); 1640 1641 err_unmap_free_uar: 1642 mlx5_unmap_free_uar(mdev, &sq->uar); 1643 1644 err_free_dma_tag: 1645 bus_dma_tag_destroy(sq->dma_tag); 1646 done: 1647 return (err); 1648 } 1649 1650 static void 1651 mlx5e_destroy_sq(struct mlx5e_sq *sq) 1652 { 1653 /* destroy all sysctl nodes */ 1654 sysctl_ctx_free(&sq->stats.ctx); 1655 1656 mlx5e_free_sq_db(sq); 1657 mlx5_wq_destroy(&sq->wq_ctrl); 1658 mlx5_unmap_free_uar(sq->priv->mdev, &sq->uar); 1659 bus_dma_tag_destroy(sq->dma_tag); 1660 } 1661 1662 int 1663 mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param, 1664 int tis_num) 1665 { 1666 void *in; 1667 void *sqc; 1668 void *wq; 1669 int inlen; 1670 int err; 1671 1672 inlen = MLX5_ST_SZ_BYTES(create_sq_in) + 1673 sizeof(u64) * sq->wq_ctrl.buf.npages; 1674 in = mlx5_vzalloc(inlen); 1675 if (in == NULL) 1676 return (-ENOMEM); 1677 1678 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); 1679 wq = MLX5_ADDR_OF(sqc, sqc, wq); 1680 1681 memcpy(sqc, param->sqc, sizeof(param->sqc)); 1682 1683 MLX5_SET(sqc, sqc, tis_num_0, tis_num); 1684 MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn); 1685 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); 1686 MLX5_SET(sqc, sqc, tis_lst_sz, 1); 1687 MLX5_SET(sqc, sqc, flush_in_error_en, 1); 1688 1689 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); 1690 MLX5_SET(wq, wq, uar_page, sq->uar.index); 1691 MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - 1692 PAGE_SHIFT); 1693 MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma); 1694 1695 mlx5_fill_page_array(&sq->wq_ctrl.buf, 1696 (__be64 *) MLX5_ADDR_OF(wq, wq, pas)); 1697 1698 err = mlx5_core_create_sq(sq->priv->mdev, in, inlen, &sq->sqn); 1699 1700 kvfree(in); 1701 1702 return (err); 1703 } 1704 1705 int 1706 mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state) 1707 { 1708 void *in; 1709 void *sqc; 1710 int inlen; 1711 int err; 1712 1713 inlen = MLX5_ST_SZ_BYTES(modify_sq_in); 1714 in = mlx5_vzalloc(inlen); 1715 if (in == NULL) 1716 return (-ENOMEM); 1717 1718 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); 1719 1720 MLX5_SET(modify_sq_in, in, sqn, sq->sqn); 1721 MLX5_SET(modify_sq_in, in, sq_state, curr_state); 1722 MLX5_SET(sqc, sqc, state, next_state); 1723 1724 err = mlx5_core_modify_sq(sq->priv->mdev, in, inlen); 1725 1726 kvfree(in); 1727 1728 return (err); 1729 } 1730 1731 void 1732 mlx5e_disable_sq(struct mlx5e_sq *sq) 1733 { 1734 1735 mlx5_core_destroy_sq(sq->priv->mdev, sq->sqn); 1736 } 1737 1738 static int 1739 mlx5e_open_sq(struct mlx5e_channel *c, 1740 int tc, 1741 struct mlx5e_sq_param *param, 1742 struct mlx5e_sq *sq) 1743 { 1744 int err; 1745 1746 err = mlx5e_create_sq(c, tc, param, sq); 1747 if (err) 1748 return (err); 1749 1750 err = mlx5e_enable_sq(sq, param, c->priv->tisn[tc]); 1751 if (err) 1752 goto err_destroy_sq; 1753 1754 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY); 1755 if (err) 1756 goto err_disable_sq; 1757 1758 WRITE_ONCE(sq->running, 1); 1759 1760 return (0); 1761 1762 err_disable_sq: 1763 mlx5e_disable_sq(sq); 1764 err_destroy_sq: 1765 mlx5e_destroy_sq(sq); 1766 1767 return (err); 1768 } 1769 1770 static void 1771 mlx5e_sq_send_nops_locked(struct mlx5e_sq *sq, int can_sleep) 1772 { 1773 /* fill up remainder with NOPs */ 1774 while (sq->cev_counter != 0) { 1775 while (!mlx5e_sq_has_room_for(sq, 1)) { 1776 if (can_sleep != 0) { 1777 mtx_unlock(&sq->lock); 1778 msleep(4); 1779 mtx_lock(&sq->lock); 1780 } else { 1781 goto done; 1782 } 1783 } 1784 /* send a single NOP */ 1785 mlx5e_send_nop(sq, 1); 1786 atomic_thread_fence_rel(); 1787 } 1788 done: 1789 /* Check if we need to write the doorbell */ 1790 if (likely(sq->doorbell.d64 != 0)) { 1791 mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0); 1792 sq->doorbell.d64 = 0; 1793 } 1794 } 1795 1796 void 1797 mlx5e_sq_cev_timeout(void *arg) 1798 { 1799 struct mlx5e_sq *sq = arg; 1800 1801 mtx_assert(&sq->lock, MA_OWNED); 1802 1803 /* check next state */ 1804 switch (sq->cev_next_state) { 1805 case MLX5E_CEV_STATE_SEND_NOPS: 1806 /* fill TX ring with NOPs, if any */ 1807 mlx5e_sq_send_nops_locked(sq, 0); 1808 1809 /* check if completed */ 1810 if (sq->cev_counter == 0) { 1811 sq->cev_next_state = MLX5E_CEV_STATE_INITIAL; 1812 return; 1813 } 1814 break; 1815 default: 1816 /* send NOPs on next timeout */ 1817 sq->cev_next_state = MLX5E_CEV_STATE_SEND_NOPS; 1818 break; 1819 } 1820 1821 /* restart timer */ 1822 callout_reset_curcpu(&sq->cev_callout, hz, mlx5e_sq_cev_timeout, sq); 1823 } 1824 1825 void 1826 mlx5e_drain_sq(struct mlx5e_sq *sq) 1827 { 1828 int error; 1829 struct mlx5_core_dev *mdev= sq->priv->mdev; 1830 1831 /* 1832 * Check if already stopped. 1833 * 1834 * NOTE: Serialization of this function is managed by the 1835 * caller ensuring the priv's state lock is locked or in case 1836 * of rate limit support, a single thread manages drain and 1837 * resume of SQs. The "running" variable can therefore safely 1838 * be read without any locks. 1839 */ 1840 if (READ_ONCE(sq->running) == 0) 1841 return; 1842 1843 /* don't put more packets into the SQ */ 1844 WRITE_ONCE(sq->running, 0); 1845 1846 /* serialize access to DMA rings */ 1847 mtx_lock(&sq->lock); 1848 1849 /* teardown event factor timer, if any */ 1850 sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS; 1851 callout_stop(&sq->cev_callout); 1852 1853 /* send dummy NOPs in order to flush the transmit ring */ 1854 mlx5e_sq_send_nops_locked(sq, 1); 1855 mtx_unlock(&sq->lock); 1856 1857 /* make sure it is safe to free the callout */ 1858 callout_drain(&sq->cev_callout); 1859 1860 /* wait till SQ is empty or link is down */ 1861 mtx_lock(&sq->lock); 1862 while (sq->cc != sq->pc && 1863 (sq->priv->media_status_last & IFM_ACTIVE) != 0 && 1864 mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) { 1865 mtx_unlock(&sq->lock); 1866 msleep(1); 1867 sq->cq.mcq.comp(&sq->cq.mcq); 1868 mtx_lock(&sq->lock); 1869 } 1870 mtx_unlock(&sq->lock); 1871 1872 /* error out remaining requests */ 1873 error = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR); 1874 if (error != 0) { 1875 if_printf(sq->ifp, 1876 "mlx5e_modify_sq() from RDY to ERR failed: %d\n", error); 1877 } 1878 1879 /* wait till SQ is empty */ 1880 mtx_lock(&sq->lock); 1881 while (sq->cc != sq->pc && 1882 mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) { 1883 mtx_unlock(&sq->lock); 1884 msleep(1); 1885 sq->cq.mcq.comp(&sq->cq.mcq); 1886 mtx_lock(&sq->lock); 1887 } 1888 mtx_unlock(&sq->lock); 1889 } 1890 1891 static void 1892 mlx5e_close_sq_wait(struct mlx5e_sq *sq) 1893 { 1894 1895 mlx5e_drain_sq(sq); 1896 mlx5e_disable_sq(sq); 1897 mlx5e_destroy_sq(sq); 1898 } 1899 1900 static int 1901 mlx5e_create_cq(struct mlx5e_priv *priv, 1902 struct mlx5e_cq_param *param, 1903 struct mlx5e_cq *cq, 1904 mlx5e_cq_comp_t *comp, 1905 int eq_ix) 1906 { 1907 struct mlx5_core_dev *mdev = priv->mdev; 1908 struct mlx5_core_cq *mcq = &cq->mcq; 1909 int eqn_not_used; 1910 int irqn; 1911 int err; 1912 u32 i; 1913 1914 param->wq.buf_numa_node = 0; 1915 param->wq.db_numa_node = 0; 1916 1917 err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, 1918 &cq->wq_ctrl); 1919 if (err) 1920 return (err); 1921 1922 mlx5_vector2eqn(mdev, eq_ix, &eqn_not_used, &irqn); 1923 1924 mcq->cqe_sz = 64; 1925 mcq->set_ci_db = cq->wq_ctrl.db.db; 1926 mcq->arm_db = cq->wq_ctrl.db.db + 1; 1927 *mcq->set_ci_db = 0; 1928 *mcq->arm_db = 0; 1929 mcq->vector = eq_ix; 1930 mcq->comp = comp; 1931 mcq->event = mlx5e_cq_error_event; 1932 mcq->irqn = irqn; 1933 mcq->uar = &priv->cq_uar; 1934 1935 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { 1936 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); 1937 1938 cqe->op_own = 0xf1; 1939 } 1940 1941 cq->priv = priv; 1942 1943 return (0); 1944 } 1945 1946 static void 1947 mlx5e_destroy_cq(struct mlx5e_cq *cq) 1948 { 1949 mlx5_wq_destroy(&cq->wq_ctrl); 1950 } 1951 1952 static int 1953 mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param, int eq_ix) 1954 { 1955 struct mlx5_core_cq *mcq = &cq->mcq; 1956 void *in; 1957 void *cqc; 1958 int inlen; 1959 int irqn_not_used; 1960 int eqn; 1961 int err; 1962 1963 inlen = MLX5_ST_SZ_BYTES(create_cq_in) + 1964 sizeof(u64) * cq->wq_ctrl.buf.npages; 1965 in = mlx5_vzalloc(inlen); 1966 if (in == NULL) 1967 return (-ENOMEM); 1968 1969 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); 1970 1971 memcpy(cqc, param->cqc, sizeof(param->cqc)); 1972 1973 mlx5_fill_page_array(&cq->wq_ctrl.buf, 1974 (__be64 *) MLX5_ADDR_OF(create_cq_in, in, pas)); 1975 1976 mlx5_vector2eqn(cq->priv->mdev, eq_ix, &eqn, &irqn_not_used); 1977 1978 MLX5_SET(cqc, cqc, c_eqn, eqn); 1979 MLX5_SET(cqc, cqc, uar_page, mcq->uar->index); 1980 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - 1981 PAGE_SHIFT); 1982 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); 1983 1984 err = mlx5_core_create_cq(cq->priv->mdev, mcq, in, inlen); 1985 1986 kvfree(in); 1987 1988 if (err) 1989 return (err); 1990 1991 mlx5e_cq_arm(cq, MLX5_GET_DOORBELL_LOCK(&cq->priv->doorbell_lock)); 1992 1993 return (0); 1994 } 1995 1996 static void 1997 mlx5e_disable_cq(struct mlx5e_cq *cq) 1998 { 1999 2000 mlx5_core_destroy_cq(cq->priv->mdev, &cq->mcq); 2001 } 2002 2003 int 2004 mlx5e_open_cq(struct mlx5e_priv *priv, 2005 struct mlx5e_cq_param *param, 2006 struct mlx5e_cq *cq, 2007 mlx5e_cq_comp_t *comp, 2008 int eq_ix) 2009 { 2010 int err; 2011 2012 err = mlx5e_create_cq(priv, param, cq, comp, eq_ix); 2013 if (err) 2014 return (err); 2015 2016 err = mlx5e_enable_cq(cq, param, eq_ix); 2017 if (err) 2018 goto err_destroy_cq; 2019 2020 return (0); 2021 2022 err_destroy_cq: 2023 mlx5e_destroy_cq(cq); 2024 2025 return (err); 2026 } 2027 2028 void 2029 mlx5e_close_cq(struct mlx5e_cq *cq) 2030 { 2031 mlx5e_disable_cq(cq); 2032 mlx5e_destroy_cq(cq); 2033 } 2034 2035 static int 2036 mlx5e_open_tx_cqs(struct mlx5e_channel *c, 2037 struct mlx5e_channel_param *cparam) 2038 { 2039 int err; 2040 int tc; 2041 2042 for (tc = 0; tc < c->num_tc; tc++) { 2043 /* open completion queue */ 2044 err = mlx5e_open_cq(c->priv, &cparam->tx_cq, &c->sq[tc].cq, 2045 &mlx5e_tx_cq_comp, c->ix); 2046 if (err) 2047 goto err_close_tx_cqs; 2048 } 2049 return (0); 2050 2051 err_close_tx_cqs: 2052 for (tc--; tc >= 0; tc--) 2053 mlx5e_close_cq(&c->sq[tc].cq); 2054 2055 return (err); 2056 } 2057 2058 static void 2059 mlx5e_close_tx_cqs(struct mlx5e_channel *c) 2060 { 2061 int tc; 2062 2063 for (tc = 0; tc < c->num_tc; tc++) 2064 mlx5e_close_cq(&c->sq[tc].cq); 2065 } 2066 2067 static int 2068 mlx5e_open_sqs(struct mlx5e_channel *c, 2069 struct mlx5e_channel_param *cparam) 2070 { 2071 int err; 2072 int tc; 2073 2074 for (tc = 0; tc < c->num_tc; tc++) { 2075 err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]); 2076 if (err) 2077 goto err_close_sqs; 2078 } 2079 2080 return (0); 2081 2082 err_close_sqs: 2083 for (tc--; tc >= 0; tc--) 2084 mlx5e_close_sq_wait(&c->sq[tc]); 2085 2086 return (err); 2087 } 2088 2089 static void 2090 mlx5e_close_sqs_wait(struct mlx5e_channel *c) 2091 { 2092 int tc; 2093 2094 for (tc = 0; tc < c->num_tc; tc++) 2095 mlx5e_close_sq_wait(&c->sq[tc]); 2096 } 2097 2098 static void 2099 mlx5e_chan_mtx_init(struct mlx5e_channel *c) 2100 { 2101 int tc; 2102 2103 mtx_init(&c->rq.mtx, "mlx5rx", MTX_NETWORK_LOCK, MTX_DEF); 2104 2105 callout_init_mtx(&c->rq.watchdog, &c->rq.mtx, 0); 2106 2107 for (tc = 0; tc < c->num_tc; tc++) { 2108 struct mlx5e_sq *sq = c->sq + tc; 2109 2110 mtx_init(&sq->lock, "mlx5tx", 2111 MTX_NETWORK_LOCK " TX", MTX_DEF); 2112 mtx_init(&sq->comp_lock, "mlx5comp", 2113 MTX_NETWORK_LOCK " TX", MTX_DEF); 2114 2115 callout_init_mtx(&sq->cev_callout, &sq->lock, 0); 2116 2117 sq->cev_factor = c->priv->params_ethtool.tx_completion_fact; 2118 2119 /* ensure the TX completion event factor is not zero */ 2120 if (sq->cev_factor == 0) 2121 sq->cev_factor = 1; 2122 } 2123 } 2124 2125 static void 2126 mlx5e_chan_mtx_destroy(struct mlx5e_channel *c) 2127 { 2128 int tc; 2129 2130 mtx_destroy(&c->rq.mtx); 2131 2132 for (tc = 0; tc < c->num_tc; tc++) { 2133 mtx_destroy(&c->sq[tc].lock); 2134 mtx_destroy(&c->sq[tc].comp_lock); 2135 } 2136 } 2137 2138 static int 2139 mlx5e_open_channel(struct mlx5e_priv *priv, int ix, 2140 struct mlx5e_channel_param *cparam, 2141 struct mlx5e_channel *c) 2142 { 2143 int err; 2144 2145 memset(c, 0, sizeof(*c)); 2146 2147 c->priv = priv; 2148 c->ix = ix; 2149 /* setup send tag */ 2150 c->tag.type = IF_SND_TAG_TYPE_UNLIMITED; 2151 c->mkey_be = cpu_to_be32(priv->mr.key); 2152 c->num_tc = priv->num_tc; 2153 2154 /* init mutexes */ 2155 mlx5e_chan_mtx_init(c); 2156 2157 /* open transmit completion queue */ 2158 err = mlx5e_open_tx_cqs(c, cparam); 2159 if (err) 2160 goto err_free; 2161 2162 /* open receive completion queue */ 2163 err = mlx5e_open_cq(c->priv, &cparam->rx_cq, &c->rq.cq, 2164 &mlx5e_rx_cq_comp, c->ix); 2165 if (err) 2166 goto err_close_tx_cqs; 2167 2168 err = mlx5e_open_sqs(c, cparam); 2169 if (err) 2170 goto err_close_rx_cq; 2171 2172 err = mlx5e_open_rq(c, &cparam->rq, &c->rq); 2173 if (err) 2174 goto err_close_sqs; 2175 2176 /* poll receive queue initially */ 2177 c->rq.cq.mcq.comp(&c->rq.cq.mcq); 2178 2179 return (0); 2180 2181 err_close_sqs: 2182 mlx5e_close_sqs_wait(c); 2183 2184 err_close_rx_cq: 2185 mlx5e_close_cq(&c->rq.cq); 2186 2187 err_close_tx_cqs: 2188 mlx5e_close_tx_cqs(c); 2189 2190 err_free: 2191 /* destroy mutexes */ 2192 mlx5e_chan_mtx_destroy(c); 2193 return (err); 2194 } 2195 2196 static void 2197 mlx5e_close_channel(struct mlx5e_channel *c) 2198 { 2199 mlx5e_close_rq(&c->rq); 2200 } 2201 2202 static void 2203 mlx5e_close_channel_wait(struct mlx5e_channel *c) 2204 { 2205 mlx5e_close_rq_wait(&c->rq); 2206 mlx5e_close_sqs_wait(c); 2207 mlx5e_close_tx_cqs(c); 2208 /* destroy mutexes */ 2209 mlx5e_chan_mtx_destroy(c); 2210 } 2211 2212 static int 2213 mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs) 2214 { 2215 u32 r, n; 2216 2217 r = priv->params.hw_lro_en ? priv->params.lro_wqe_sz : 2218 MLX5E_SW2MB_MTU(priv->ifp->if_mtu); 2219 if (r > MJUM16BYTES) 2220 return (-ENOMEM); 2221 2222 if (r > MJUM9BYTES) 2223 r = MJUM16BYTES; 2224 else if (r > MJUMPAGESIZE) 2225 r = MJUM9BYTES; 2226 else if (r > MCLBYTES) 2227 r = MJUMPAGESIZE; 2228 else 2229 r = MCLBYTES; 2230 2231 /* 2232 * n + 1 must be a power of two, because stride size must be. 2233 * Stride size is 16 * (n + 1), as the first segment is 2234 * control. 2235 */ 2236 for (n = howmany(r, MLX5E_MAX_RX_BYTES); !powerof2(n + 1); n++) 2237 ; 2238 2239 if (n > MLX5E_MAX_BUSDMA_RX_SEGS) 2240 return (-ENOMEM); 2241 2242 *wqe_sz = r; 2243 *nsegs = n; 2244 return (0); 2245 } 2246 2247 static void 2248 mlx5e_build_rq_param(struct mlx5e_priv *priv, 2249 struct mlx5e_rq_param *param) 2250 { 2251 void *rqc = param->rqc; 2252 void *wq = MLX5_ADDR_OF(rqc, rqc, wq); 2253 u32 wqe_sz, nsegs; 2254 2255 mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs); 2256 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); 2257 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); 2258 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe) + 2259 nsegs * sizeof(struct mlx5_wqe_data_seg))); 2260 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size); 2261 MLX5_SET(wq, wq, pd, priv->pdn); 2262 2263 param->wq.buf_numa_node = 0; 2264 param->wq.db_numa_node = 0; 2265 param->wq.linear = 1; 2266 } 2267 2268 static void 2269 mlx5e_build_sq_param(struct mlx5e_priv *priv, 2270 struct mlx5e_sq_param *param) 2271 { 2272 void *sqc = param->sqc; 2273 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 2274 2275 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size); 2276 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); 2277 MLX5_SET(wq, wq, pd, priv->pdn); 2278 2279 param->wq.buf_numa_node = 0; 2280 param->wq.db_numa_node = 0; 2281 param->wq.linear = 1; 2282 } 2283 2284 static void 2285 mlx5e_build_common_cq_param(struct mlx5e_priv *priv, 2286 struct mlx5e_cq_param *param) 2287 { 2288 void *cqc = param->cqc; 2289 2290 MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index); 2291 } 2292 2293 static void 2294 mlx5e_get_default_profile(struct mlx5e_priv *priv, int mode, struct net_dim_cq_moder *ptr) 2295 { 2296 2297 *ptr = net_dim_get_profile(mode, MLX5E_DIM_DEFAULT_PROFILE); 2298 2299 /* apply LRO restrictions */ 2300 if (priv->params.hw_lro_en && 2301 ptr->pkts > MLX5E_DIM_MAX_RX_CQ_MODERATION_PKTS_WITH_LRO) { 2302 ptr->pkts = MLX5E_DIM_MAX_RX_CQ_MODERATION_PKTS_WITH_LRO; 2303 } 2304 } 2305 2306 static void 2307 mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, 2308 struct mlx5e_cq_param *param) 2309 { 2310 struct net_dim_cq_moder curr; 2311 void *cqc = param->cqc; 2312 2313 /* 2314 * We use MLX5_CQE_FORMAT_HASH because the RX hash mini CQE 2315 * format is more beneficial for FreeBSD use case. 2316 * 2317 * Adding support for MLX5_CQE_FORMAT_CSUM will require changes 2318 * in mlx5e_decompress_cqe. 2319 */ 2320 if (priv->params.cqe_zipping_en) { 2321 MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_HASH); 2322 MLX5_SET(cqc, cqc, cqe_compression_en, 1); 2323 } 2324 2325 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size); 2326 2327 switch (priv->params.rx_cq_moderation_mode) { 2328 case 0: 2329 MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec); 2330 MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts); 2331 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 2332 break; 2333 case 1: 2334 MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec); 2335 MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts); 2336 if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) 2337 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 2338 else 2339 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 2340 break; 2341 case 2: 2342 mlx5e_get_default_profile(priv, NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE, &curr); 2343 MLX5_SET(cqc, cqc, cq_period, curr.usec); 2344 MLX5_SET(cqc, cqc, cq_max_count, curr.pkts); 2345 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 2346 break; 2347 case 3: 2348 mlx5e_get_default_profile(priv, NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE, &curr); 2349 MLX5_SET(cqc, cqc, cq_period, curr.usec); 2350 MLX5_SET(cqc, cqc, cq_max_count, curr.pkts); 2351 if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) 2352 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 2353 else 2354 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 2355 break; 2356 default: 2357 break; 2358 } 2359 2360 mlx5e_dim_build_cq_param(priv, param); 2361 2362 mlx5e_build_common_cq_param(priv, param); 2363 } 2364 2365 static void 2366 mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, 2367 struct mlx5e_cq_param *param) 2368 { 2369 void *cqc = param->cqc; 2370 2371 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size); 2372 MLX5_SET(cqc, cqc, cq_period, priv->params.tx_cq_moderation_usec); 2373 MLX5_SET(cqc, cqc, cq_max_count, priv->params.tx_cq_moderation_pkts); 2374 2375 switch (priv->params.tx_cq_moderation_mode) { 2376 case 0: 2377 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 2378 break; 2379 default: 2380 if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) 2381 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 2382 else 2383 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 2384 break; 2385 } 2386 2387 mlx5e_build_common_cq_param(priv, param); 2388 } 2389 2390 static void 2391 mlx5e_build_channel_param(struct mlx5e_priv *priv, 2392 struct mlx5e_channel_param *cparam) 2393 { 2394 memset(cparam, 0, sizeof(*cparam)); 2395 2396 mlx5e_build_rq_param(priv, &cparam->rq); 2397 mlx5e_build_sq_param(priv, &cparam->sq); 2398 mlx5e_build_rx_cq_param(priv, &cparam->rx_cq); 2399 mlx5e_build_tx_cq_param(priv, &cparam->tx_cq); 2400 } 2401 2402 static int 2403 mlx5e_open_channels(struct mlx5e_priv *priv) 2404 { 2405 struct mlx5e_channel_param cparam; 2406 int err; 2407 int i; 2408 int j; 2409 2410 mlx5e_build_channel_param(priv, &cparam); 2411 for (i = 0; i < priv->params.num_channels; i++) { 2412 err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]); 2413 if (err) 2414 goto err_close_channels; 2415 } 2416 2417 for (j = 0; j < priv->params.num_channels; j++) { 2418 err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j].rq); 2419 if (err) 2420 goto err_close_channels; 2421 } 2422 return (0); 2423 2424 err_close_channels: 2425 while (i--) { 2426 mlx5e_close_channel(&priv->channel[i]); 2427 mlx5e_close_channel_wait(&priv->channel[i]); 2428 } 2429 return (err); 2430 } 2431 2432 static void 2433 mlx5e_close_channels(struct mlx5e_priv *priv) 2434 { 2435 int i; 2436 2437 for (i = 0; i < priv->params.num_channels; i++) 2438 mlx5e_close_channel(&priv->channel[i]); 2439 for (i = 0; i < priv->params.num_channels; i++) 2440 mlx5e_close_channel_wait(&priv->channel[i]); 2441 } 2442 2443 static int 2444 mlx5e_refresh_sq_params(struct mlx5e_priv *priv, struct mlx5e_sq *sq) 2445 { 2446 2447 if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) { 2448 uint8_t cq_mode; 2449 2450 switch (priv->params.tx_cq_moderation_mode) { 2451 case 0: 2452 case 2: 2453 cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; 2454 break; 2455 default: 2456 cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE; 2457 break; 2458 } 2459 2460 return (mlx5_core_modify_cq_moderation_mode(priv->mdev, &sq->cq.mcq, 2461 priv->params.tx_cq_moderation_usec, 2462 priv->params.tx_cq_moderation_pkts, 2463 cq_mode)); 2464 } 2465 2466 return (mlx5_core_modify_cq_moderation(priv->mdev, &sq->cq.mcq, 2467 priv->params.tx_cq_moderation_usec, 2468 priv->params.tx_cq_moderation_pkts)); 2469 } 2470 2471 static int 2472 mlx5e_refresh_rq_params(struct mlx5e_priv *priv, struct mlx5e_rq *rq) 2473 { 2474 2475 if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) { 2476 uint8_t cq_mode; 2477 uint8_t dim_mode; 2478 int retval; 2479 2480 switch (priv->params.rx_cq_moderation_mode) { 2481 case 0: 2482 case 2: 2483 cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; 2484 dim_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; 2485 break; 2486 default: 2487 cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE; 2488 dim_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE; 2489 break; 2490 } 2491 2492 /* tear down dynamic interrupt moderation */ 2493 mtx_lock(&rq->mtx); 2494 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED; 2495 mtx_unlock(&rq->mtx); 2496 2497 /* wait for dynamic interrupt moderation work task, if any */ 2498 cancel_work_sync(&rq->dim.work); 2499 2500 if (priv->params.rx_cq_moderation_mode >= 2) { 2501 struct net_dim_cq_moder curr; 2502 2503 mlx5e_get_default_profile(priv, dim_mode, &curr); 2504 2505 retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq, 2506 curr.usec, curr.pkts, cq_mode); 2507 2508 /* set dynamic interrupt moderation mode and zero defaults */ 2509 mtx_lock(&rq->mtx); 2510 rq->dim.mode = dim_mode; 2511 rq->dim.state = 0; 2512 rq->dim.profile_ix = MLX5E_DIM_DEFAULT_PROFILE; 2513 mtx_unlock(&rq->mtx); 2514 } else { 2515 retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq, 2516 priv->params.rx_cq_moderation_usec, 2517 priv->params.rx_cq_moderation_pkts, 2518 cq_mode); 2519 } 2520 return (retval); 2521 } 2522 2523 return (mlx5_core_modify_cq_moderation(priv->mdev, &rq->cq.mcq, 2524 priv->params.rx_cq_moderation_usec, 2525 priv->params.rx_cq_moderation_pkts)); 2526 } 2527 2528 static int 2529 mlx5e_refresh_channel_params_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c) 2530 { 2531 int err; 2532 int i; 2533 2534 err = mlx5e_refresh_rq_params(priv, &c->rq); 2535 if (err) 2536 goto done; 2537 2538 for (i = 0; i != c->num_tc; i++) { 2539 err = mlx5e_refresh_sq_params(priv, &c->sq[i]); 2540 if (err) 2541 goto done; 2542 } 2543 done: 2544 return (err); 2545 } 2546 2547 int 2548 mlx5e_refresh_channel_params(struct mlx5e_priv *priv) 2549 { 2550 int i; 2551 2552 /* check if channels are closed */ 2553 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 2554 return (EINVAL); 2555 2556 for (i = 0; i < priv->params.num_channels; i++) { 2557 int err; 2558 2559 err = mlx5e_refresh_channel_params_sub(priv, &priv->channel[i]); 2560 if (err) 2561 return (err); 2562 } 2563 return (0); 2564 } 2565 2566 static int 2567 mlx5e_open_tis(struct mlx5e_priv *priv, int tc) 2568 { 2569 struct mlx5_core_dev *mdev = priv->mdev; 2570 u32 in[MLX5_ST_SZ_DW(create_tis_in)]; 2571 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); 2572 2573 memset(in, 0, sizeof(in)); 2574 2575 MLX5_SET(tisc, tisc, prio, tc); 2576 MLX5_SET(tisc, tisc, transport_domain, priv->tdn); 2577 2578 return (mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc])); 2579 } 2580 2581 static void 2582 mlx5e_close_tis(struct mlx5e_priv *priv, int tc) 2583 { 2584 mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]); 2585 } 2586 2587 static int 2588 mlx5e_open_tises(struct mlx5e_priv *priv) 2589 { 2590 int num_tc = priv->num_tc; 2591 int err; 2592 int tc; 2593 2594 for (tc = 0; tc < num_tc; tc++) { 2595 err = mlx5e_open_tis(priv, tc); 2596 if (err) 2597 goto err_close_tises; 2598 } 2599 2600 return (0); 2601 2602 err_close_tises: 2603 for (tc--; tc >= 0; tc--) 2604 mlx5e_close_tis(priv, tc); 2605 2606 return (err); 2607 } 2608 2609 static void 2610 mlx5e_close_tises(struct mlx5e_priv *priv) 2611 { 2612 int num_tc = priv->num_tc; 2613 int tc; 2614 2615 for (tc = 0; tc < num_tc; tc++) 2616 mlx5e_close_tis(priv, tc); 2617 } 2618 2619 static int 2620 mlx5e_open_rqt(struct mlx5e_priv *priv) 2621 { 2622 struct mlx5_core_dev *mdev = priv->mdev; 2623 u32 *in; 2624 u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {0}; 2625 void *rqtc; 2626 int inlen; 2627 int err; 2628 int sz; 2629 int i; 2630 2631 sz = 1 << priv->params.rx_hash_log_tbl_sz; 2632 2633 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; 2634 in = mlx5_vzalloc(inlen); 2635 if (in == NULL) 2636 return (-ENOMEM); 2637 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); 2638 2639 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); 2640 MLX5_SET(rqtc, rqtc, rqt_max_size, sz); 2641 2642 for (i = 0; i < sz; i++) { 2643 int ix = i; 2644 #ifdef RSS 2645 ix = rss_get_indirection_to_bucket(ix); 2646 #endif 2647 /* ensure we don't overflow */ 2648 ix %= priv->params.num_channels; 2649 2650 /* apply receive side scaling stride, if any */ 2651 ix -= ix % (int)priv->params.channels_rsss; 2652 2653 MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix].rq.rqn); 2654 } 2655 2656 MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT); 2657 2658 err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); 2659 if (!err) 2660 priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn); 2661 2662 kvfree(in); 2663 2664 return (err); 2665 } 2666 2667 static void 2668 mlx5e_close_rqt(struct mlx5e_priv *priv) 2669 { 2670 u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {0}; 2671 u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {0}; 2672 2673 MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT); 2674 MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn); 2675 2676 mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out)); 2677 } 2678 2679 static void 2680 mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt) 2681 { 2682 void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); 2683 __be32 *hkey; 2684 2685 MLX5_SET(tirc, tirc, transport_domain, priv->tdn); 2686 2687 #define ROUGH_MAX_L2_L3_HDR_SZ 256 2688 2689 #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ 2690 MLX5_HASH_FIELD_SEL_DST_IP) 2691 2692 #define MLX5_HASH_ALL (MLX5_HASH_FIELD_SEL_SRC_IP |\ 2693 MLX5_HASH_FIELD_SEL_DST_IP |\ 2694 MLX5_HASH_FIELD_SEL_L4_SPORT |\ 2695 MLX5_HASH_FIELD_SEL_L4_DPORT) 2696 2697 #define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\ 2698 MLX5_HASH_FIELD_SEL_DST_IP |\ 2699 MLX5_HASH_FIELD_SEL_IPSEC_SPI) 2700 2701 if (priv->params.hw_lro_en) { 2702 MLX5_SET(tirc, tirc, lro_enable_mask, 2703 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | 2704 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO); 2705 MLX5_SET(tirc, tirc, lro_max_msg_sz, 2706 (priv->params.lro_wqe_sz - 2707 ROUGH_MAX_L2_L3_HDR_SZ) >> 8); 2708 /* TODO: add the option to choose timer value dynamically */ 2709 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, 2710 MLX5_CAP_ETH(priv->mdev, 2711 lro_timer_supported_periods[2])); 2712 } 2713 2714 /* setup parameters for hashing TIR type, if any */ 2715 switch (tt) { 2716 case MLX5E_TT_ANY: 2717 MLX5_SET(tirc, tirc, disp_type, 2718 MLX5_TIRC_DISP_TYPE_DIRECT); 2719 MLX5_SET(tirc, tirc, inline_rqn, 2720 priv->channel[0].rq.rqn); 2721 break; 2722 default: 2723 MLX5_SET(tirc, tirc, disp_type, 2724 MLX5_TIRC_DISP_TYPE_INDIRECT); 2725 MLX5_SET(tirc, tirc, indirect_table, 2726 priv->rqtn); 2727 MLX5_SET(tirc, tirc, rx_hash_fn, 2728 MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ); 2729 hkey = (__be32 *) MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key); 2730 #ifdef RSS 2731 /* 2732 * The FreeBSD RSS implementation does currently not 2733 * support symmetric Toeplitz hashes: 2734 */ 2735 MLX5_SET(tirc, tirc, rx_hash_symmetric, 0); 2736 rss_getkey((uint8_t *)hkey); 2737 #else 2738 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); 2739 hkey[0] = cpu_to_be32(0xD181C62C); 2740 hkey[1] = cpu_to_be32(0xF7F4DB5B); 2741 hkey[2] = cpu_to_be32(0x1983A2FC); 2742 hkey[3] = cpu_to_be32(0x943E1ADB); 2743 hkey[4] = cpu_to_be32(0xD9389E6B); 2744 hkey[5] = cpu_to_be32(0xD1039C2C); 2745 hkey[6] = cpu_to_be32(0xA74499AD); 2746 hkey[7] = cpu_to_be32(0x593D56D9); 2747 hkey[8] = cpu_to_be32(0xF3253C06); 2748 hkey[9] = cpu_to_be32(0x2ADC1FFC); 2749 #endif 2750 break; 2751 } 2752 2753 switch (tt) { 2754 case MLX5E_TT_IPV4_TCP: 2755 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2756 MLX5_L3_PROT_TYPE_IPV4); 2757 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 2758 MLX5_L4_PROT_TYPE_TCP); 2759 #ifdef RSS 2760 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV4)) { 2761 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2762 MLX5_HASH_IP); 2763 } else 2764 #endif 2765 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2766 MLX5_HASH_ALL); 2767 break; 2768 2769 case MLX5E_TT_IPV6_TCP: 2770 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2771 MLX5_L3_PROT_TYPE_IPV6); 2772 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 2773 MLX5_L4_PROT_TYPE_TCP); 2774 #ifdef RSS 2775 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV6)) { 2776 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2777 MLX5_HASH_IP); 2778 } else 2779 #endif 2780 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2781 MLX5_HASH_ALL); 2782 break; 2783 2784 case MLX5E_TT_IPV4_UDP: 2785 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2786 MLX5_L3_PROT_TYPE_IPV4); 2787 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 2788 MLX5_L4_PROT_TYPE_UDP); 2789 #ifdef RSS 2790 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV4)) { 2791 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2792 MLX5_HASH_IP); 2793 } else 2794 #endif 2795 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2796 MLX5_HASH_ALL); 2797 break; 2798 2799 case MLX5E_TT_IPV6_UDP: 2800 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2801 MLX5_L3_PROT_TYPE_IPV6); 2802 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 2803 MLX5_L4_PROT_TYPE_UDP); 2804 #ifdef RSS 2805 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV6)) { 2806 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2807 MLX5_HASH_IP); 2808 } else 2809 #endif 2810 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2811 MLX5_HASH_ALL); 2812 break; 2813 2814 case MLX5E_TT_IPV4_IPSEC_AH: 2815 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2816 MLX5_L3_PROT_TYPE_IPV4); 2817 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2818 MLX5_HASH_IP_IPSEC_SPI); 2819 break; 2820 2821 case MLX5E_TT_IPV6_IPSEC_AH: 2822 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2823 MLX5_L3_PROT_TYPE_IPV6); 2824 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2825 MLX5_HASH_IP_IPSEC_SPI); 2826 break; 2827 2828 case MLX5E_TT_IPV4_IPSEC_ESP: 2829 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2830 MLX5_L3_PROT_TYPE_IPV4); 2831 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2832 MLX5_HASH_IP_IPSEC_SPI); 2833 break; 2834 2835 case MLX5E_TT_IPV6_IPSEC_ESP: 2836 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2837 MLX5_L3_PROT_TYPE_IPV6); 2838 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2839 MLX5_HASH_IP_IPSEC_SPI); 2840 break; 2841 2842 case MLX5E_TT_IPV4: 2843 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2844 MLX5_L3_PROT_TYPE_IPV4); 2845 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2846 MLX5_HASH_IP); 2847 break; 2848 2849 case MLX5E_TT_IPV6: 2850 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2851 MLX5_L3_PROT_TYPE_IPV6); 2852 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2853 MLX5_HASH_IP); 2854 break; 2855 2856 default: 2857 break; 2858 } 2859 } 2860 2861 static int 2862 mlx5e_open_tir(struct mlx5e_priv *priv, int tt) 2863 { 2864 struct mlx5_core_dev *mdev = priv->mdev; 2865 u32 *in; 2866 void *tirc; 2867 int inlen; 2868 int err; 2869 2870 inlen = MLX5_ST_SZ_BYTES(create_tir_in); 2871 in = mlx5_vzalloc(inlen); 2872 if (in == NULL) 2873 return (-ENOMEM); 2874 tirc = MLX5_ADDR_OF(create_tir_in, in, tir_context); 2875 2876 mlx5e_build_tir_ctx(priv, tirc, tt); 2877 2878 err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]); 2879 2880 kvfree(in); 2881 2882 return (err); 2883 } 2884 2885 static void 2886 mlx5e_close_tir(struct mlx5e_priv *priv, int tt) 2887 { 2888 mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]); 2889 } 2890 2891 static int 2892 mlx5e_open_tirs(struct mlx5e_priv *priv) 2893 { 2894 int err; 2895 int i; 2896 2897 for (i = 0; i < MLX5E_NUM_TT; i++) { 2898 err = mlx5e_open_tir(priv, i); 2899 if (err) 2900 goto err_close_tirs; 2901 } 2902 2903 return (0); 2904 2905 err_close_tirs: 2906 for (i--; i >= 0; i--) 2907 mlx5e_close_tir(priv, i); 2908 2909 return (err); 2910 } 2911 2912 static void 2913 mlx5e_close_tirs(struct mlx5e_priv *priv) 2914 { 2915 int i; 2916 2917 for (i = 0; i < MLX5E_NUM_TT; i++) 2918 mlx5e_close_tir(priv, i); 2919 } 2920 2921 /* 2922 * SW MTU does not include headers, 2923 * HW MTU includes all headers and checksums. 2924 */ 2925 static int 2926 mlx5e_set_dev_port_mtu(struct ifnet *ifp, int sw_mtu) 2927 { 2928 struct mlx5e_priv *priv = ifp->if_softc; 2929 struct mlx5_core_dev *mdev = priv->mdev; 2930 int hw_mtu; 2931 int err; 2932 2933 hw_mtu = MLX5E_SW2HW_MTU(sw_mtu); 2934 2935 err = mlx5_set_port_mtu(mdev, hw_mtu); 2936 if (err) { 2937 if_printf(ifp, "%s: mlx5_set_port_mtu failed setting %d, err=%d\n", 2938 __func__, sw_mtu, err); 2939 return (err); 2940 } 2941 2942 /* Update vport context MTU */ 2943 err = mlx5_set_vport_mtu(mdev, hw_mtu); 2944 if (err) { 2945 if_printf(ifp, "%s: Failed updating vport context with MTU size, err=%d\n", 2946 __func__, err); 2947 } 2948 2949 ifp->if_mtu = sw_mtu; 2950 2951 err = mlx5_query_vport_mtu(mdev, &hw_mtu); 2952 if (err || !hw_mtu) { 2953 /* fallback to port oper mtu */ 2954 err = mlx5_query_port_oper_mtu(mdev, &hw_mtu); 2955 } 2956 if (err) { 2957 if_printf(ifp, "Query port MTU, after setting new " 2958 "MTU value, failed\n"); 2959 return (err); 2960 } else if (MLX5E_HW2SW_MTU(hw_mtu) < sw_mtu) { 2961 err = -E2BIG, 2962 if_printf(ifp, "Port MTU %d is smaller than " 2963 "ifp mtu %d\n", hw_mtu, sw_mtu); 2964 } else if (MLX5E_HW2SW_MTU(hw_mtu) > sw_mtu) { 2965 err = -EINVAL; 2966 if_printf(ifp, "Port MTU %d is bigger than " 2967 "ifp mtu %d\n", hw_mtu, sw_mtu); 2968 } 2969 priv->params_ethtool.hw_mtu = hw_mtu; 2970 2971 return (err); 2972 } 2973 2974 int 2975 mlx5e_open_locked(struct ifnet *ifp) 2976 { 2977 struct mlx5e_priv *priv = ifp->if_softc; 2978 int err; 2979 u16 set_id; 2980 2981 /* check if already opened */ 2982 if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0) 2983 return (0); 2984 2985 #ifdef RSS 2986 if (rss_getnumbuckets() > priv->params.num_channels) { 2987 if_printf(ifp, "NOTE: There are more RSS buckets(%u) than " 2988 "channels(%u) available\n", rss_getnumbuckets(), 2989 priv->params.num_channels); 2990 } 2991 #endif 2992 err = mlx5e_open_tises(priv); 2993 if (err) { 2994 if_printf(ifp, "%s: mlx5e_open_tises failed, %d\n", 2995 __func__, err); 2996 return (err); 2997 } 2998 err = mlx5_vport_alloc_q_counter(priv->mdev, 2999 MLX5_INTERFACE_PROTOCOL_ETH, &set_id); 3000 if (err) { 3001 if_printf(priv->ifp, 3002 "%s: mlx5_vport_alloc_q_counter failed: %d\n", 3003 __func__, err); 3004 goto err_close_tises; 3005 } 3006 /* store counter set ID */ 3007 priv->counter_set_id = set_id; 3008 3009 err = mlx5e_open_channels(priv); 3010 if (err) { 3011 if_printf(ifp, "%s: mlx5e_open_channels failed, %d\n", 3012 __func__, err); 3013 goto err_dalloc_q_counter; 3014 } 3015 err = mlx5e_open_rqt(priv); 3016 if (err) { 3017 if_printf(ifp, "%s: mlx5e_open_rqt failed, %d\n", 3018 __func__, err); 3019 goto err_close_channels; 3020 } 3021 err = mlx5e_open_tirs(priv); 3022 if (err) { 3023 if_printf(ifp, "%s: mlx5e_open_tir failed, %d\n", 3024 __func__, err); 3025 goto err_close_rqls; 3026 } 3027 err = mlx5e_open_flow_table(priv); 3028 if (err) { 3029 if_printf(ifp, "%s: mlx5e_open_flow_table failed, %d\n", 3030 __func__, err); 3031 goto err_close_tirs; 3032 } 3033 err = mlx5e_add_all_vlan_rules(priv); 3034 if (err) { 3035 if_printf(ifp, "%s: mlx5e_add_all_vlan_rules failed, %d\n", 3036 __func__, err); 3037 goto err_close_flow_table; 3038 } 3039 set_bit(MLX5E_STATE_OPENED, &priv->state); 3040 3041 mlx5e_update_carrier(priv); 3042 mlx5e_set_rx_mode_core(priv); 3043 3044 return (0); 3045 3046 err_close_flow_table: 3047 mlx5e_close_flow_table(priv); 3048 3049 err_close_tirs: 3050 mlx5e_close_tirs(priv); 3051 3052 err_close_rqls: 3053 mlx5e_close_rqt(priv); 3054 3055 err_close_channels: 3056 mlx5e_close_channels(priv); 3057 3058 err_dalloc_q_counter: 3059 mlx5_vport_dealloc_q_counter(priv->mdev, 3060 MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id); 3061 3062 err_close_tises: 3063 mlx5e_close_tises(priv); 3064 3065 return (err); 3066 } 3067 3068 static void 3069 mlx5e_open(void *arg) 3070 { 3071 struct mlx5e_priv *priv = arg; 3072 3073 PRIV_LOCK(priv); 3074 if (mlx5_set_port_status(priv->mdev, MLX5_PORT_UP)) 3075 if_printf(priv->ifp, 3076 "%s: Setting port status to up failed\n", 3077 __func__); 3078 3079 mlx5e_open_locked(priv->ifp); 3080 priv->ifp->if_drv_flags |= IFF_DRV_RUNNING; 3081 PRIV_UNLOCK(priv); 3082 } 3083 3084 int 3085 mlx5e_close_locked(struct ifnet *ifp) 3086 { 3087 struct mlx5e_priv *priv = ifp->if_softc; 3088 3089 /* check if already closed */ 3090 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 3091 return (0); 3092 3093 clear_bit(MLX5E_STATE_OPENED, &priv->state); 3094 3095 mlx5e_set_rx_mode_core(priv); 3096 mlx5e_del_all_vlan_rules(priv); 3097 if_link_state_change(priv->ifp, LINK_STATE_DOWN); 3098 mlx5e_close_flow_table(priv); 3099 mlx5e_close_tirs(priv); 3100 mlx5e_close_rqt(priv); 3101 mlx5e_close_channels(priv); 3102 mlx5_vport_dealloc_q_counter(priv->mdev, 3103 MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id); 3104 mlx5e_close_tises(priv); 3105 3106 return (0); 3107 } 3108 3109 #if (__FreeBSD_version >= 1100000) 3110 static uint64_t 3111 mlx5e_get_counter(struct ifnet *ifp, ift_counter cnt) 3112 { 3113 struct mlx5e_priv *priv = ifp->if_softc; 3114 u64 retval; 3115 3116 /* PRIV_LOCK(priv); XXX not allowed */ 3117 switch (cnt) { 3118 case IFCOUNTER_IPACKETS: 3119 retval = priv->stats.vport.rx_packets; 3120 break; 3121 case IFCOUNTER_IERRORS: 3122 retval = priv->stats.pport.in_range_len_errors + 3123 priv->stats.pport.out_of_range_len + 3124 priv->stats.pport.too_long_errors + 3125 priv->stats.pport.check_seq_err + 3126 priv->stats.pport.alignment_err; 3127 break; 3128 case IFCOUNTER_IQDROPS: 3129 retval = priv->stats.vport.rx_out_of_buffer; 3130 break; 3131 case IFCOUNTER_OPACKETS: 3132 retval = priv->stats.vport.tx_packets; 3133 break; 3134 case IFCOUNTER_OERRORS: 3135 retval = priv->stats.port_stats_debug.out_discards; 3136 break; 3137 case IFCOUNTER_IBYTES: 3138 retval = priv->stats.vport.rx_bytes; 3139 break; 3140 case IFCOUNTER_OBYTES: 3141 retval = priv->stats.vport.tx_bytes; 3142 break; 3143 case IFCOUNTER_IMCASTS: 3144 retval = priv->stats.vport.rx_multicast_packets; 3145 break; 3146 case IFCOUNTER_OMCASTS: 3147 retval = priv->stats.vport.tx_multicast_packets; 3148 break; 3149 case IFCOUNTER_OQDROPS: 3150 retval = priv->stats.vport.tx_queue_dropped; 3151 break; 3152 case IFCOUNTER_COLLISIONS: 3153 retval = priv->stats.pport.collisions; 3154 break; 3155 default: 3156 retval = if_get_counter_default(ifp, cnt); 3157 break; 3158 } 3159 /* PRIV_UNLOCK(priv); XXX not allowed */ 3160 return (retval); 3161 } 3162 #endif 3163 3164 static void 3165 mlx5e_set_rx_mode(struct ifnet *ifp) 3166 { 3167 struct mlx5e_priv *priv = ifp->if_softc; 3168 3169 queue_work(priv->wq, &priv->set_rx_mode_work); 3170 } 3171 3172 static int 3173 mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 3174 { 3175 struct mlx5e_priv *priv; 3176 struct ifreq *ifr; 3177 struct ifi2creq i2c; 3178 int error = 0; 3179 int mask = 0; 3180 int size_read = 0; 3181 int module_status; 3182 int module_num; 3183 int max_mtu; 3184 uint8_t read_addr; 3185 3186 priv = ifp->if_softc; 3187 3188 /* check if detaching */ 3189 if (priv == NULL || priv->gone != 0) 3190 return (ENXIO); 3191 3192 switch (command) { 3193 case SIOCSIFMTU: 3194 ifr = (struct ifreq *)data; 3195 3196 PRIV_LOCK(priv); 3197 mlx5_query_port_max_mtu(priv->mdev, &max_mtu); 3198 3199 if (ifr->ifr_mtu >= MLX5E_MTU_MIN && 3200 ifr->ifr_mtu <= MIN(MLX5E_MTU_MAX, max_mtu)) { 3201 int was_opened; 3202 3203 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); 3204 if (was_opened) 3205 mlx5e_close_locked(ifp); 3206 3207 /* set new MTU */ 3208 mlx5e_set_dev_port_mtu(ifp, ifr->ifr_mtu); 3209 3210 if (was_opened) 3211 mlx5e_open_locked(ifp); 3212 } else { 3213 error = EINVAL; 3214 if_printf(ifp, "Invalid MTU value. Min val: %d, Max val: %d\n", 3215 MLX5E_MTU_MIN, MIN(MLX5E_MTU_MAX, max_mtu)); 3216 } 3217 PRIV_UNLOCK(priv); 3218 break; 3219 case SIOCSIFFLAGS: 3220 if ((ifp->if_flags & IFF_UP) && 3221 (ifp->if_drv_flags & IFF_DRV_RUNNING)) { 3222 mlx5e_set_rx_mode(ifp); 3223 break; 3224 } 3225 PRIV_LOCK(priv); 3226 if (ifp->if_flags & IFF_UP) { 3227 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 3228 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 3229 mlx5e_open_locked(ifp); 3230 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3231 mlx5_set_port_status(priv->mdev, MLX5_PORT_UP); 3232 } 3233 } else { 3234 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3235 mlx5_set_port_status(priv->mdev, 3236 MLX5_PORT_DOWN); 3237 if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0) 3238 mlx5e_close_locked(ifp); 3239 mlx5e_update_carrier(priv); 3240 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3241 } 3242 } 3243 PRIV_UNLOCK(priv); 3244 break; 3245 case SIOCADDMULTI: 3246 case SIOCDELMULTI: 3247 mlx5e_set_rx_mode(ifp); 3248 break; 3249 case SIOCSIFMEDIA: 3250 case SIOCGIFMEDIA: 3251 case SIOCGIFXMEDIA: 3252 ifr = (struct ifreq *)data; 3253 error = ifmedia_ioctl(ifp, ifr, &priv->media, command); 3254 break; 3255 case SIOCSIFCAP: 3256 ifr = (struct ifreq *)data; 3257 PRIV_LOCK(priv); 3258 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 3259 3260 if (mask & IFCAP_TXCSUM) { 3261 ifp->if_capenable ^= IFCAP_TXCSUM; 3262 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 3263 3264 if (IFCAP_TSO4 & ifp->if_capenable && 3265 !(IFCAP_TXCSUM & ifp->if_capenable)) { 3266 ifp->if_capenable &= ~IFCAP_TSO4; 3267 ifp->if_hwassist &= ~CSUM_IP_TSO; 3268 if_printf(ifp, 3269 "tso4 disabled due to -txcsum.\n"); 3270 } 3271 } 3272 if (mask & IFCAP_TXCSUM_IPV6) { 3273 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; 3274 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 3275 3276 if (IFCAP_TSO6 & ifp->if_capenable && 3277 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 3278 ifp->if_capenable &= ~IFCAP_TSO6; 3279 ifp->if_hwassist &= ~CSUM_IP6_TSO; 3280 if_printf(ifp, 3281 "tso6 disabled due to -txcsum6.\n"); 3282 } 3283 } 3284 if (mask & IFCAP_NOMAP) 3285 ifp->if_capenable ^= IFCAP_NOMAP; 3286 if (mask & IFCAP_RXCSUM) 3287 ifp->if_capenable ^= IFCAP_RXCSUM; 3288 if (mask & IFCAP_RXCSUM_IPV6) 3289 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 3290 if (mask & IFCAP_TSO4) { 3291 if (!(IFCAP_TSO4 & ifp->if_capenable) && 3292 !(IFCAP_TXCSUM & ifp->if_capenable)) { 3293 if_printf(ifp, "enable txcsum first.\n"); 3294 error = EAGAIN; 3295 goto out; 3296 } 3297 ifp->if_capenable ^= IFCAP_TSO4; 3298 ifp->if_hwassist ^= CSUM_IP_TSO; 3299 } 3300 if (mask & IFCAP_TSO6) { 3301 if (!(IFCAP_TSO6 & ifp->if_capenable) && 3302 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 3303 if_printf(ifp, "enable txcsum6 first.\n"); 3304 error = EAGAIN; 3305 goto out; 3306 } 3307 ifp->if_capenable ^= IFCAP_TSO6; 3308 ifp->if_hwassist ^= CSUM_IP6_TSO; 3309 } 3310 if (mask & IFCAP_VLAN_HWFILTER) { 3311 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) 3312 mlx5e_disable_vlan_filter(priv); 3313 else 3314 mlx5e_enable_vlan_filter(priv); 3315 3316 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; 3317 } 3318 if (mask & IFCAP_VLAN_HWTAGGING) 3319 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 3320 if (mask & IFCAP_WOL_MAGIC) 3321 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 3322 3323 VLAN_CAPABILITIES(ifp); 3324 /* turn off LRO means also turn of HW LRO - if it's on */ 3325 if (mask & IFCAP_LRO) { 3326 int was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); 3327 bool need_restart = false; 3328 3329 ifp->if_capenable ^= IFCAP_LRO; 3330 3331 /* figure out if updating HW LRO is needed */ 3332 if (!(ifp->if_capenable & IFCAP_LRO)) { 3333 if (priv->params.hw_lro_en) { 3334 priv->params.hw_lro_en = false; 3335 need_restart = true; 3336 } 3337 } else { 3338 if (priv->params.hw_lro_en == false && 3339 priv->params_ethtool.hw_lro != 0) { 3340 priv->params.hw_lro_en = true; 3341 need_restart = true; 3342 } 3343 } 3344 if (was_opened && need_restart) { 3345 mlx5e_close_locked(ifp); 3346 mlx5e_open_locked(ifp); 3347 } 3348 } 3349 if (mask & IFCAP_HWRXTSTMP) { 3350 ifp->if_capenable ^= IFCAP_HWRXTSTMP; 3351 if (ifp->if_capenable & IFCAP_HWRXTSTMP) { 3352 if (priv->clbr_done == 0) 3353 mlx5e_reset_calibration_callout(priv); 3354 } else { 3355 callout_drain(&priv->tstmp_clbr); 3356 priv->clbr_done = 0; 3357 } 3358 } 3359 out: 3360 PRIV_UNLOCK(priv); 3361 break; 3362 3363 case SIOCGI2C: 3364 ifr = (struct ifreq *)data; 3365 3366 /* 3367 * Copy from the user-space address ifr_data to the 3368 * kernel-space address i2c 3369 */ 3370 error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); 3371 if (error) 3372 break; 3373 3374 if (i2c.len > sizeof(i2c.data)) { 3375 error = EINVAL; 3376 break; 3377 } 3378 3379 PRIV_LOCK(priv); 3380 /* Get module_num which is required for the query_eeprom */ 3381 error = mlx5_query_module_num(priv->mdev, &module_num); 3382 if (error) { 3383 if_printf(ifp, "Query module num failed, eeprom " 3384 "reading is not supported\n"); 3385 error = EINVAL; 3386 goto err_i2c; 3387 } 3388 /* Check if module is present before doing an access */ 3389 module_status = mlx5_query_module_status(priv->mdev, module_num); 3390 if (module_status != MLX5_MODULE_STATUS_PLUGGED_ENABLED && 3391 module_status != MLX5_MODULE_STATUS_PLUGGED_DISABLED) { 3392 error = EINVAL; 3393 goto err_i2c; 3394 } 3395 /* 3396 * Currently 0XA0 and 0xA2 are the only addresses permitted. 3397 * The internal conversion is as follows: 3398 */ 3399 if (i2c.dev_addr == 0xA0) 3400 read_addr = MLX5E_I2C_ADDR_LOW; 3401 else if (i2c.dev_addr == 0xA2) 3402 read_addr = MLX5E_I2C_ADDR_HIGH; 3403 else { 3404 if_printf(ifp, "Query eeprom failed, " 3405 "Invalid Address: %X\n", i2c.dev_addr); 3406 error = EINVAL; 3407 goto err_i2c; 3408 } 3409 error = mlx5_query_eeprom(priv->mdev, 3410 read_addr, MLX5E_EEPROM_LOW_PAGE, 3411 (uint32_t)i2c.offset, (uint32_t)i2c.len, module_num, 3412 (uint32_t *)i2c.data, &size_read); 3413 if (error) { 3414 if_printf(ifp, "Query eeprom failed, eeprom " 3415 "reading is not supported\n"); 3416 error = EINVAL; 3417 goto err_i2c; 3418 } 3419 3420 if (i2c.len > MLX5_EEPROM_MAX_BYTES) { 3421 error = mlx5_query_eeprom(priv->mdev, 3422 read_addr, MLX5E_EEPROM_LOW_PAGE, 3423 (uint32_t)(i2c.offset + size_read), 3424 (uint32_t)(i2c.len - size_read), module_num, 3425 (uint32_t *)(i2c.data + size_read), &size_read); 3426 } 3427 if (error) { 3428 if_printf(ifp, "Query eeprom failed, eeprom " 3429 "reading is not supported\n"); 3430 error = EINVAL; 3431 goto err_i2c; 3432 } 3433 3434 error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); 3435 err_i2c: 3436 PRIV_UNLOCK(priv); 3437 break; 3438 3439 default: 3440 error = ether_ioctl(ifp, command, data); 3441 break; 3442 } 3443 return (error); 3444 } 3445 3446 static int 3447 mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) 3448 { 3449 /* 3450 * TODO: uncoment once FW really sets all these bits if 3451 * (!mdev->caps.eth.rss_ind_tbl_cap || !mdev->caps.eth.csum_cap || 3452 * !mdev->caps.eth.max_lso_cap || !mdev->caps.eth.vlan_cap || 3453 * !(mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD)) return 3454 * -ENOTSUPP; 3455 */ 3456 3457 /* TODO: add more must-to-have features */ 3458 3459 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 3460 return (-ENODEV); 3461 3462 return (0); 3463 } 3464 3465 static u16 3466 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev) 3467 { 3468 uint32_t bf_buf_size = (1U << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2U; 3469 3470 bf_buf_size -= sizeof(struct mlx5e_tx_wqe) - 2; 3471 3472 /* verify against driver hardware limit */ 3473 if (bf_buf_size > MLX5E_MAX_TX_INLINE) 3474 bf_buf_size = MLX5E_MAX_TX_INLINE; 3475 3476 return (bf_buf_size); 3477 } 3478 3479 static int 3480 mlx5e_build_ifp_priv(struct mlx5_core_dev *mdev, 3481 struct mlx5e_priv *priv, 3482 int num_comp_vectors) 3483 { 3484 int err; 3485 3486 /* 3487 * TODO: Consider link speed for setting "log_sq_size", 3488 * "log_rq_size" and "cq_moderation_xxx": 3489 */ 3490 priv->params.log_sq_size = 3491 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; 3492 priv->params.log_rq_size = 3493 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; 3494 priv->params.rx_cq_moderation_usec = 3495 MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 3496 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE : 3497 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; 3498 priv->params.rx_cq_moderation_mode = 3499 MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 1 : 0; 3500 priv->params.rx_cq_moderation_pkts = 3501 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; 3502 priv->params.tx_cq_moderation_usec = 3503 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; 3504 priv->params.tx_cq_moderation_pkts = 3505 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; 3506 priv->params.min_rx_wqes = 3507 MLX5E_PARAMS_DEFAULT_MIN_RX_WQES; 3508 priv->params.rx_hash_log_tbl_sz = 3509 (order_base_2(num_comp_vectors) > 3510 MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ? 3511 order_base_2(num_comp_vectors) : 3512 MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ; 3513 priv->params.num_tc = 1; 3514 priv->params.default_vlan_prio = 0; 3515 priv->counter_set_id = -1; 3516 priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); 3517 3518 err = mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode); 3519 if (err) 3520 return (err); 3521 3522 /* 3523 * hw lro is currently defaulted to off. when it won't anymore we 3524 * will consider the HW capability: "!!MLX5_CAP_ETH(mdev, lro_cap)" 3525 */ 3526 priv->params.hw_lro_en = false; 3527 priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; 3528 3529 /* 3530 * CQE zipping is currently defaulted to off. when it won't 3531 * anymore we will consider the HW capability: 3532 * "!!MLX5_CAP_GEN(mdev, cqe_compression)" 3533 */ 3534 priv->params.cqe_zipping_en = false; 3535 3536 priv->mdev = mdev; 3537 priv->params.num_channels = num_comp_vectors; 3538 priv->params.channels_rsss = 1; 3539 priv->order_base_2_num_channels = order_base_2(num_comp_vectors); 3540 priv->queue_mapping_channel_mask = 3541 roundup_pow_of_two(num_comp_vectors) - 1; 3542 priv->num_tc = priv->params.num_tc; 3543 priv->default_vlan_prio = priv->params.default_vlan_prio; 3544 3545 INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work); 3546 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); 3547 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); 3548 3549 return (0); 3550 } 3551 3552 static int 3553 mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn, 3554 struct mlx5_core_mr *mkey) 3555 { 3556 struct ifnet *ifp = priv->ifp; 3557 struct mlx5_core_dev *mdev = priv->mdev; 3558 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 3559 void *mkc; 3560 u32 *in; 3561 int err; 3562 3563 in = mlx5_vzalloc(inlen); 3564 if (in == NULL) { 3565 if_printf(ifp, "%s: failed to allocate inbox\n", __func__); 3566 return (-ENOMEM); 3567 } 3568 3569 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 3570 MLX5_SET(mkc, mkc, access_mode, MLX5_ACCESS_MODE_PA); 3571 MLX5_SET(mkc, mkc, lw, 1); 3572 MLX5_SET(mkc, mkc, lr, 1); 3573 3574 MLX5_SET(mkc, mkc, pd, pdn); 3575 MLX5_SET(mkc, mkc, length64, 1); 3576 MLX5_SET(mkc, mkc, qpn, 0xffffff); 3577 3578 err = mlx5_core_create_mkey(mdev, mkey, in, inlen); 3579 if (err) 3580 if_printf(ifp, "%s: mlx5_core_create_mkey failed, %d\n", 3581 __func__, err); 3582 3583 kvfree(in); 3584 return (err); 3585 } 3586 3587 static const char *mlx5e_vport_stats_desc[] = { 3588 MLX5E_VPORT_STATS(MLX5E_STATS_DESC) 3589 }; 3590 3591 static const char *mlx5e_pport_stats_desc[] = { 3592 MLX5E_PPORT_STATS(MLX5E_STATS_DESC) 3593 }; 3594 3595 static void 3596 mlx5e_priv_mtx_init(struct mlx5e_priv *priv) 3597 { 3598 mtx_init(&priv->async_events_mtx, "mlx5async", MTX_NETWORK_LOCK, MTX_DEF); 3599 sx_init(&priv->state_lock, "mlx5state"); 3600 callout_init_mtx(&priv->watchdog, &priv->async_events_mtx, 0); 3601 MLX5_INIT_DOORBELL_LOCK(&priv->doorbell_lock); 3602 } 3603 3604 static void 3605 mlx5e_priv_mtx_destroy(struct mlx5e_priv *priv) 3606 { 3607 mtx_destroy(&priv->async_events_mtx); 3608 sx_destroy(&priv->state_lock); 3609 } 3610 3611 static int 3612 sysctl_firmware(SYSCTL_HANDLER_ARGS) 3613 { 3614 /* 3615 * %d.%d%.d the string format. 3616 * fw_rev_{maj,min,sub} return u16, 2^16 = 65536. 3617 * We need at most 5 chars to store that. 3618 * It also has: two "." and NULL at the end, which means we need 18 3619 * (5*3 + 3) chars at most. 3620 */ 3621 char fw[18]; 3622 struct mlx5e_priv *priv = arg1; 3623 int error; 3624 3625 snprintf(fw, sizeof(fw), "%d.%d.%d", fw_rev_maj(priv->mdev), fw_rev_min(priv->mdev), 3626 fw_rev_sub(priv->mdev)); 3627 error = sysctl_handle_string(oidp, fw, sizeof(fw), req); 3628 return (error); 3629 } 3630 3631 static void 3632 mlx5e_disable_tx_dma(struct mlx5e_channel *ch) 3633 { 3634 int i; 3635 3636 for (i = 0; i < ch->num_tc; i++) 3637 mlx5e_drain_sq(&ch->sq[i]); 3638 } 3639 3640 static void 3641 mlx5e_reset_sq_doorbell_record(struct mlx5e_sq *sq) 3642 { 3643 3644 sq->doorbell.d32[0] = cpu_to_be32(MLX5_OPCODE_NOP); 3645 sq->doorbell.d32[1] = cpu_to_be32(sq->sqn << 8); 3646 mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0); 3647 sq->doorbell.d64 = 0; 3648 } 3649 3650 void 3651 mlx5e_resume_sq(struct mlx5e_sq *sq) 3652 { 3653 int err; 3654 3655 /* check if already enabled */ 3656 if (READ_ONCE(sq->running) != 0) 3657 return; 3658 3659 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_ERR, 3660 MLX5_SQC_STATE_RST); 3661 if (err != 0) { 3662 if_printf(sq->ifp, 3663 "mlx5e_modify_sq() from ERR to RST failed: %d\n", err); 3664 } 3665 3666 sq->cc = 0; 3667 sq->pc = 0; 3668 3669 /* reset doorbell prior to moving from RST to RDY */ 3670 mlx5e_reset_sq_doorbell_record(sq); 3671 3672 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, 3673 MLX5_SQC_STATE_RDY); 3674 if (err != 0) { 3675 if_printf(sq->ifp, 3676 "mlx5e_modify_sq() from RST to RDY failed: %d\n", err); 3677 } 3678 3679 sq->cev_next_state = MLX5E_CEV_STATE_INITIAL; 3680 WRITE_ONCE(sq->running, 1); 3681 } 3682 3683 static void 3684 mlx5e_enable_tx_dma(struct mlx5e_channel *ch) 3685 { 3686 int i; 3687 3688 for (i = 0; i < ch->num_tc; i++) 3689 mlx5e_resume_sq(&ch->sq[i]); 3690 } 3691 3692 static void 3693 mlx5e_disable_rx_dma(struct mlx5e_channel *ch) 3694 { 3695 struct mlx5e_rq *rq = &ch->rq; 3696 int err; 3697 3698 mtx_lock(&rq->mtx); 3699 rq->enabled = 0; 3700 callout_stop(&rq->watchdog); 3701 mtx_unlock(&rq->mtx); 3702 3703 callout_drain(&rq->watchdog); 3704 3705 err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); 3706 if (err != 0) { 3707 if_printf(rq->ifp, 3708 "mlx5e_modify_rq() from RDY to RST failed: %d\n", err); 3709 } 3710 3711 while (!mlx5_wq_ll_is_empty(&rq->wq)) { 3712 msleep(1); 3713 rq->cq.mcq.comp(&rq->cq.mcq); 3714 } 3715 3716 /* 3717 * Transitioning into RST state will allow the FW to track less ERR state queues, 3718 * thus reducing the recv queue flushing time 3719 */ 3720 err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_ERR, MLX5_RQC_STATE_RST); 3721 if (err != 0) { 3722 if_printf(rq->ifp, 3723 "mlx5e_modify_rq() from ERR to RST failed: %d\n", err); 3724 } 3725 } 3726 3727 static void 3728 mlx5e_enable_rx_dma(struct mlx5e_channel *ch) 3729 { 3730 struct mlx5e_rq *rq = &ch->rq; 3731 int err; 3732 3733 rq->wq.wqe_ctr = 0; 3734 mlx5_wq_ll_update_db_record(&rq->wq); 3735 err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); 3736 if (err != 0) { 3737 if_printf(rq->ifp, 3738 "mlx5e_modify_rq() from RST to RDY failed: %d\n", err); 3739 } 3740 3741 rq->enabled = 1; 3742 3743 rq->cq.mcq.comp(&rq->cq.mcq); 3744 } 3745 3746 void 3747 mlx5e_modify_tx_dma(struct mlx5e_priv *priv, uint8_t value) 3748 { 3749 int i; 3750 3751 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 3752 return; 3753 3754 for (i = 0; i < priv->params.num_channels; i++) { 3755 if (value) 3756 mlx5e_disable_tx_dma(&priv->channel[i]); 3757 else 3758 mlx5e_enable_tx_dma(&priv->channel[i]); 3759 } 3760 } 3761 3762 void 3763 mlx5e_modify_rx_dma(struct mlx5e_priv *priv, uint8_t value) 3764 { 3765 int i; 3766 3767 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 3768 return; 3769 3770 for (i = 0; i < priv->params.num_channels; i++) { 3771 if (value) 3772 mlx5e_disable_rx_dma(&priv->channel[i]); 3773 else 3774 mlx5e_enable_rx_dma(&priv->channel[i]); 3775 } 3776 } 3777 3778 static void 3779 mlx5e_add_hw_stats(struct mlx5e_priv *priv) 3780 { 3781 SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw), 3782 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, priv, 0, 3783 sysctl_firmware, "A", "HCA firmware version"); 3784 3785 SYSCTL_ADD_STRING(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw), 3786 OID_AUTO, "board_id", CTLFLAG_RD, priv->mdev->board_id, 0, 3787 "Board ID"); 3788 } 3789 3790 static int 3791 mlx5e_sysctl_tx_priority_flow_control(SYSCTL_HANDLER_ARGS) 3792 { 3793 struct mlx5e_priv *priv = arg1; 3794 uint8_t temp[MLX5E_MAX_PRIORITY]; 3795 uint32_t tx_pfc; 3796 int err; 3797 int i; 3798 3799 PRIV_LOCK(priv); 3800 3801 tx_pfc = priv->params.tx_priority_flow_control; 3802 3803 for (i = 0; i != MLX5E_MAX_PRIORITY; i++) 3804 temp[i] = (tx_pfc >> i) & 1; 3805 3806 err = SYSCTL_OUT(req, temp, MLX5E_MAX_PRIORITY); 3807 if (err || !req->newptr) 3808 goto done; 3809 err = SYSCTL_IN(req, temp, MLX5E_MAX_PRIORITY); 3810 if (err) 3811 goto done; 3812 3813 priv->params.tx_priority_flow_control = 0; 3814 3815 /* range check input value */ 3816 for (i = 0; i != MLX5E_MAX_PRIORITY; i++) { 3817 if (temp[i] > 1) { 3818 err = ERANGE; 3819 goto done; 3820 } 3821 priv->params.tx_priority_flow_control |= (temp[i] << i); 3822 } 3823 3824 /* check if update is required */ 3825 if (tx_pfc != priv->params.tx_priority_flow_control) 3826 err = -mlx5e_set_port_pfc(priv); 3827 done: 3828 if (err != 0) 3829 priv->params.tx_priority_flow_control= tx_pfc; 3830 PRIV_UNLOCK(priv); 3831 3832 return (err); 3833 } 3834 3835 static int 3836 mlx5e_sysctl_rx_priority_flow_control(SYSCTL_HANDLER_ARGS) 3837 { 3838 struct mlx5e_priv *priv = arg1; 3839 uint8_t temp[MLX5E_MAX_PRIORITY]; 3840 uint32_t rx_pfc; 3841 int err; 3842 int i; 3843 3844 PRIV_LOCK(priv); 3845 3846 rx_pfc = priv->params.rx_priority_flow_control; 3847 3848 for (i = 0; i != MLX5E_MAX_PRIORITY; i++) 3849 temp[i] = (rx_pfc >> i) & 1; 3850 3851 err = SYSCTL_OUT(req, temp, MLX5E_MAX_PRIORITY); 3852 if (err || !req->newptr) 3853 goto done; 3854 err = SYSCTL_IN(req, temp, MLX5E_MAX_PRIORITY); 3855 if (err) 3856 goto done; 3857 3858 priv->params.rx_priority_flow_control = 0; 3859 3860 /* range check input value */ 3861 for (i = 0; i != MLX5E_MAX_PRIORITY; i++) { 3862 if (temp[i] > 1) { 3863 err = ERANGE; 3864 goto done; 3865 } 3866 priv->params.rx_priority_flow_control |= (temp[i] << i); 3867 } 3868 3869 /* check if update is required */ 3870 if (rx_pfc != priv->params.rx_priority_flow_control) 3871 err = -mlx5e_set_port_pfc(priv); 3872 done: 3873 if (err != 0) 3874 priv->params.rx_priority_flow_control= rx_pfc; 3875 PRIV_UNLOCK(priv); 3876 3877 return (err); 3878 } 3879 3880 static void 3881 mlx5e_setup_pauseframes(struct mlx5e_priv *priv) 3882 { 3883 #if (__FreeBSD_version < 1100000) 3884 char path[96]; 3885 #endif 3886 int error; 3887 3888 /* enable pauseframes by default */ 3889 priv->params.tx_pauseframe_control = 1; 3890 priv->params.rx_pauseframe_control = 1; 3891 3892 /* disable ports flow control, PFC, by default */ 3893 priv->params.tx_priority_flow_control = 0; 3894 priv->params.rx_priority_flow_control = 0; 3895 3896 #if (__FreeBSD_version < 1100000) 3897 /* compute path for sysctl */ 3898 snprintf(path, sizeof(path), "dev.mce.%d.tx_pauseframe_control", 3899 device_get_unit(priv->mdev->pdev->dev.bsddev)); 3900 3901 /* try to fetch tunable, if any */ 3902 TUNABLE_INT_FETCH(path, &priv->params.tx_pauseframe_control); 3903 3904 /* compute path for sysctl */ 3905 snprintf(path, sizeof(path), "dev.mce.%d.rx_pauseframe_control", 3906 device_get_unit(priv->mdev->pdev->dev.bsddev)); 3907 3908 /* try to fetch tunable, if any */ 3909 TUNABLE_INT_FETCH(path, &priv->params.rx_pauseframe_control); 3910 #endif 3911 3912 /* register pauseframe SYSCTLs */ 3913 SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 3914 OID_AUTO, "tx_pauseframe_control", CTLFLAG_RDTUN, 3915 &priv->params.tx_pauseframe_control, 0, 3916 "Set to enable TX pause frames. Clear to disable."); 3917 3918 SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 3919 OID_AUTO, "rx_pauseframe_control", CTLFLAG_RDTUN, 3920 &priv->params.rx_pauseframe_control, 0, 3921 "Set to enable RX pause frames. Clear to disable."); 3922 3923 /* register priority flow control, PFC, SYSCTLs */ 3924 SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 3925 OID_AUTO, "tx_priority_flow_control", CTLTYPE_U8 | CTLFLAG_RWTUN | 3926 CTLFLAG_MPSAFE, priv, 0, &mlx5e_sysctl_tx_priority_flow_control, "CU", 3927 "Set to enable TX ports flow control frames for priorities 0..7. Clear to disable."); 3928 3929 SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 3930 OID_AUTO, "rx_priority_flow_control", CTLTYPE_U8 | CTLFLAG_RWTUN | 3931 CTLFLAG_MPSAFE, priv, 0, &mlx5e_sysctl_rx_priority_flow_control, "CU", 3932 "Set to enable RX ports flow control frames for priorities 0..7. Clear to disable."); 3933 3934 PRIV_LOCK(priv); 3935 3936 /* range check */ 3937 priv->params.tx_pauseframe_control = 3938 priv->params.tx_pauseframe_control ? 1 : 0; 3939 priv->params.rx_pauseframe_control = 3940 priv->params.rx_pauseframe_control ? 1 : 0; 3941 3942 /* update firmware */ 3943 error = mlx5e_set_port_pause_and_pfc(priv); 3944 if (error == -EINVAL) { 3945 if_printf(priv->ifp, 3946 "Global pauseframes must be disabled before enabling PFC.\n"); 3947 priv->params.rx_priority_flow_control = 0; 3948 priv->params.tx_priority_flow_control = 0; 3949 3950 /* update firmware */ 3951 (void) mlx5e_set_port_pause_and_pfc(priv); 3952 } 3953 PRIV_UNLOCK(priv); 3954 } 3955 3956 static int 3957 mlx5e_ul_snd_tag_alloc(struct ifnet *ifp, 3958 union if_snd_tag_alloc_params *params, 3959 struct m_snd_tag **ppmt) 3960 { 3961 struct mlx5e_priv *priv; 3962 struct mlx5e_channel *pch; 3963 3964 priv = ifp->if_softc; 3965 3966 if (unlikely(priv->gone || params->hdr.flowtype == M_HASHTYPE_NONE)) { 3967 return (EOPNOTSUPP); 3968 } else { 3969 /* keep this code synced with mlx5e_select_queue() */ 3970 u32 ch = priv->params.num_channels; 3971 #ifdef RSS 3972 u32 temp; 3973 3974 if (rss_hash2bucket(params->hdr.flowid, 3975 params->hdr.flowtype, &temp) == 0) 3976 ch = temp % ch; 3977 else 3978 #endif 3979 ch = (params->hdr.flowid % 128) % ch; 3980 3981 /* 3982 * NOTE: The channels array is only freed at detach 3983 * and it safe to return a pointer to the send tag 3984 * inside the channels structure as long as we 3985 * reference the priv. 3986 */ 3987 pch = priv->channel + ch; 3988 3989 /* check if send queue is not running */ 3990 if (unlikely(pch->sq[0].running == 0)) 3991 return (ENXIO); 3992 mlx5e_ref_channel(priv); 3993 MPASS(pch->tag.m_snd_tag.refcount == 0); 3994 m_snd_tag_init(&pch->tag.m_snd_tag, ifp); 3995 *ppmt = &pch->tag.m_snd_tag; 3996 return (0); 3997 } 3998 } 3999 4000 static int 4001 mlx5e_ul_snd_tag_query(struct m_snd_tag *pmt, union if_snd_tag_query_params *params) 4002 { 4003 struct mlx5e_channel *pch = 4004 container_of(pmt, struct mlx5e_channel, tag.m_snd_tag); 4005 4006 params->unlimited.max_rate = -1ULL; 4007 params->unlimited.queue_level = mlx5e_sq_queue_level(&pch->sq[0]); 4008 return (0); 4009 } 4010 4011 static void 4012 mlx5e_ul_snd_tag_free(struct m_snd_tag *pmt) 4013 { 4014 struct mlx5e_channel *pch = 4015 container_of(pmt, struct mlx5e_channel, tag.m_snd_tag); 4016 4017 mlx5e_unref_channel(pch->priv); 4018 } 4019 4020 static int 4021 mlx5e_snd_tag_alloc(struct ifnet *ifp, 4022 union if_snd_tag_alloc_params *params, 4023 struct m_snd_tag **ppmt) 4024 { 4025 4026 switch (params->hdr.type) { 4027 #ifdef RATELIMIT 4028 case IF_SND_TAG_TYPE_RATE_LIMIT: 4029 return (mlx5e_rl_snd_tag_alloc(ifp, params, ppmt)); 4030 #endif 4031 case IF_SND_TAG_TYPE_UNLIMITED: 4032 return (mlx5e_ul_snd_tag_alloc(ifp, params, ppmt)); 4033 default: 4034 return (EOPNOTSUPP); 4035 } 4036 } 4037 4038 static int 4039 mlx5e_snd_tag_modify(struct m_snd_tag *pmt, union if_snd_tag_modify_params *params) 4040 { 4041 struct mlx5e_snd_tag *tag = 4042 container_of(pmt, struct mlx5e_snd_tag, m_snd_tag); 4043 4044 switch (tag->type) { 4045 #ifdef RATELIMIT 4046 case IF_SND_TAG_TYPE_RATE_LIMIT: 4047 return (mlx5e_rl_snd_tag_modify(pmt, params)); 4048 #endif 4049 case IF_SND_TAG_TYPE_UNLIMITED: 4050 default: 4051 return (EOPNOTSUPP); 4052 } 4053 } 4054 4055 static int 4056 mlx5e_snd_tag_query(struct m_snd_tag *pmt, union if_snd_tag_query_params *params) 4057 { 4058 struct mlx5e_snd_tag *tag = 4059 container_of(pmt, struct mlx5e_snd_tag, m_snd_tag); 4060 4061 switch (tag->type) { 4062 #ifdef RATELIMIT 4063 case IF_SND_TAG_TYPE_RATE_LIMIT: 4064 return (mlx5e_rl_snd_tag_query(pmt, params)); 4065 #endif 4066 case IF_SND_TAG_TYPE_UNLIMITED: 4067 return (mlx5e_ul_snd_tag_query(pmt, params)); 4068 default: 4069 return (EOPNOTSUPP); 4070 } 4071 } 4072 4073 static void 4074 mlx5e_snd_tag_free(struct m_snd_tag *pmt) 4075 { 4076 struct mlx5e_snd_tag *tag = 4077 container_of(pmt, struct mlx5e_snd_tag, m_snd_tag); 4078 4079 switch (tag->type) { 4080 #ifdef RATELIMIT 4081 case IF_SND_TAG_TYPE_RATE_LIMIT: 4082 mlx5e_rl_snd_tag_free(pmt); 4083 break; 4084 #endif 4085 case IF_SND_TAG_TYPE_UNLIMITED: 4086 mlx5e_ul_snd_tag_free(pmt); 4087 break; 4088 default: 4089 break; 4090 } 4091 } 4092 4093 static void * 4094 mlx5e_create_ifp(struct mlx5_core_dev *mdev) 4095 { 4096 struct ifnet *ifp; 4097 struct mlx5e_priv *priv; 4098 u8 dev_addr[ETHER_ADDR_LEN] __aligned(4); 4099 u8 connector_type; 4100 struct sysctl_oid_list *child; 4101 int ncv = mdev->priv.eq_table.num_comp_vectors; 4102 char unit[16]; 4103 struct pfil_head_args pa; 4104 int err; 4105 int i,j; 4106 u32 eth_proto_cap; 4107 u32 out[MLX5_ST_SZ_DW(ptys_reg)]; 4108 bool ext = 0; 4109 u32 speeds_num; 4110 struct media media_entry = {}; 4111 4112 if (mlx5e_check_required_hca_cap(mdev)) { 4113 mlx5_core_dbg(mdev, "mlx5e_check_required_hca_cap() failed\n"); 4114 return (NULL); 4115 } 4116 /* 4117 * Try to allocate the priv and make room for worst-case 4118 * number of channel structures: 4119 */ 4120 priv = malloc(sizeof(*priv) + 4121 (sizeof(priv->channel[0]) * mdev->priv.eq_table.num_comp_vectors), 4122 M_MLX5EN, M_WAITOK | M_ZERO); 4123 mlx5e_priv_mtx_init(priv); 4124 4125 ifp = priv->ifp = if_alloc_dev(IFT_ETHER, mdev->pdev->dev.bsddev); 4126 if (ifp == NULL) { 4127 mlx5_core_err(mdev, "if_alloc() failed\n"); 4128 goto err_free_priv; 4129 } 4130 ifp->if_softc = priv; 4131 if_initname(ifp, "mce", device_get_unit(mdev->pdev->dev.bsddev)); 4132 ifp->if_mtu = ETHERMTU; 4133 ifp->if_init = mlx5e_open; 4134 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 4135 ifp->if_ioctl = mlx5e_ioctl; 4136 ifp->if_transmit = mlx5e_xmit; 4137 ifp->if_qflush = if_qflush; 4138 #if (__FreeBSD_version >= 1100000) 4139 ifp->if_get_counter = mlx5e_get_counter; 4140 #endif 4141 ifp->if_snd.ifq_maxlen = ifqmaxlen; 4142 /* 4143 * Set driver features 4144 */ 4145 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6; 4146 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 4147 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER; 4148 ifp->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU; 4149 ifp->if_capabilities |= IFCAP_LRO; 4150 ifp->if_capabilities |= IFCAP_TSO | IFCAP_VLAN_HWTSO; 4151 ifp->if_capabilities |= IFCAP_HWSTATS | IFCAP_HWRXTSTMP; 4152 ifp->if_capabilities |= IFCAP_NOMAP; 4153 ifp->if_capabilities |= IFCAP_TXRTLMT; 4154 ifp->if_snd_tag_alloc = mlx5e_snd_tag_alloc; 4155 ifp->if_snd_tag_free = mlx5e_snd_tag_free; 4156 ifp->if_snd_tag_modify = mlx5e_snd_tag_modify; 4157 ifp->if_snd_tag_query = mlx5e_snd_tag_query; 4158 4159 /* set TSO limits so that we don't have to drop TX packets */ 4160 ifp->if_hw_tsomax = MLX5E_MAX_TX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 4161 ifp->if_hw_tsomaxsegcount = MLX5E_MAX_TX_MBUF_FRAGS - 1 /* hdr */; 4162 ifp->if_hw_tsomaxsegsize = MLX5E_MAX_TX_MBUF_SIZE; 4163 4164 ifp->if_capenable = ifp->if_capabilities; 4165 ifp->if_hwassist = 0; 4166 if (ifp->if_capenable & IFCAP_TSO) 4167 ifp->if_hwassist |= CSUM_TSO; 4168 if (ifp->if_capenable & IFCAP_TXCSUM) 4169 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP); 4170 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) 4171 ifp->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 4172 4173 /* ifnet sysctl tree */ 4174 sysctl_ctx_init(&priv->sysctl_ctx); 4175 priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_dev), 4176 OID_AUTO, ifp->if_dname, CTLFLAG_RD, 0, "MLX5 ethernet - interface name"); 4177 if (priv->sysctl_ifnet == NULL) { 4178 mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n"); 4179 goto err_free_sysctl; 4180 } 4181 snprintf(unit, sizeof(unit), "%d", ifp->if_dunit); 4182 priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 4183 OID_AUTO, unit, CTLFLAG_RD, 0, "MLX5 ethernet - interface unit"); 4184 if (priv->sysctl_ifnet == NULL) { 4185 mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n"); 4186 goto err_free_sysctl; 4187 } 4188 4189 /* HW sysctl tree */ 4190 child = SYSCTL_CHILDREN(device_get_sysctl_tree(mdev->pdev->dev.bsddev)); 4191 priv->sysctl_hw = SYSCTL_ADD_NODE(&priv->sysctl_ctx, child, 4192 OID_AUTO, "hw", CTLFLAG_RD, 0, "MLX5 ethernet dev hw"); 4193 if (priv->sysctl_hw == NULL) { 4194 mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n"); 4195 goto err_free_sysctl; 4196 } 4197 4198 err = mlx5e_build_ifp_priv(mdev, priv, ncv); 4199 if (err) { 4200 mlx5_core_err(mdev, "mlx5e_build_ifp_priv() failed (%d)\n", err); 4201 goto err_free_sysctl; 4202 } 4203 4204 /* reuse mlx5core's watchdog workqueue */ 4205 priv->wq = mdev->priv.health.wq_watchdog; 4206 4207 err = mlx5_alloc_map_uar(mdev, &priv->cq_uar); 4208 if (err) { 4209 if_printf(ifp, "%s: mlx5_alloc_map_uar failed, %d\n", 4210 __func__, err); 4211 goto err_free_wq; 4212 } 4213 err = mlx5_core_alloc_pd(mdev, &priv->pdn); 4214 if (err) { 4215 if_printf(ifp, "%s: mlx5_core_alloc_pd failed, %d\n", 4216 __func__, err); 4217 goto err_unmap_free_uar; 4218 } 4219 err = mlx5_alloc_transport_domain(mdev, &priv->tdn); 4220 if (err) { 4221 if_printf(ifp, "%s: mlx5_alloc_transport_domain failed, %d\n", 4222 __func__, err); 4223 goto err_dealloc_pd; 4224 } 4225 err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr); 4226 if (err) { 4227 if_printf(ifp, "%s: mlx5e_create_mkey failed, %d\n", 4228 __func__, err); 4229 goto err_dealloc_transport_domain; 4230 } 4231 mlx5_query_nic_vport_mac_address(priv->mdev, 0, dev_addr); 4232 4233 /* check if we should generate a random MAC address */ 4234 if (MLX5_CAP_GEN(priv->mdev, vport_group_manager) == 0 && 4235 is_zero_ether_addr(dev_addr)) { 4236 random_ether_addr(dev_addr); 4237 if_printf(ifp, "Assigned random MAC address\n"); 4238 } 4239 #ifdef RATELIMIT 4240 err = mlx5e_rl_init(priv); 4241 if (err) { 4242 if_printf(ifp, "%s: mlx5e_rl_init failed, %d\n", 4243 __func__, err); 4244 goto err_create_mkey; 4245 } 4246 #endif 4247 4248 /* set default MTU */ 4249 mlx5e_set_dev_port_mtu(ifp, ifp->if_mtu); 4250 4251 /* Set default media status */ 4252 priv->media_status_last = IFM_AVALID; 4253 priv->media_active_last = IFM_ETHER | IFM_AUTO | 4254 IFM_ETH_RXPAUSE | IFM_FDX; 4255 4256 /* setup default pauseframes configuration */ 4257 mlx5e_setup_pauseframes(priv); 4258 4259 /* Setup supported medias */ 4260 //TODO: If we failed to query ptys is it ok to proceed?? 4261 if (!mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1)) { 4262 ext = MLX5_CAP_PCAM_FEATURE(mdev, 4263 ptys_extended_ethernet); 4264 eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, 4265 eth_proto_capability); 4266 if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type)) 4267 connector_type = MLX5_GET(ptys_reg, out, 4268 connector_type); 4269 } else { 4270 eth_proto_cap = 0; 4271 if_printf(ifp, "%s: Query port media capability failed," 4272 " %d\n", __func__, err); 4273 } 4274 4275 ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK, 4276 mlx5e_media_change, mlx5e_media_status); 4277 4278 speeds_num = ext ? MLX5E_EXT_LINK_SPEEDS_NUMBER : MLX5E_LINK_SPEEDS_NUMBER; 4279 for (i = 0; i != speeds_num; i++) { 4280 for (j = 0; j < MLX5E_LINK_MODES_NUMBER ; ++j) { 4281 media_entry = ext ? mlx5e_ext_mode_table[i][j] : 4282 mlx5e_mode_table[i][j]; 4283 if (media_entry.baudrate == 0) 4284 continue; 4285 if (MLX5E_PROT_MASK(i) & eth_proto_cap) { 4286 ifmedia_add(&priv->media, 4287 media_entry.subtype | 4288 IFM_ETHER, 0, NULL); 4289 ifmedia_add(&priv->media, 4290 media_entry.subtype | 4291 IFM_ETHER | IFM_FDX | 4292 IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL); 4293 } 4294 } 4295 } 4296 4297 ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL); 4298 ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX | 4299 IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL); 4300 4301 /* Set autoselect by default */ 4302 ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX | 4303 IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE); 4304 ether_ifattach(ifp, dev_addr); 4305 4306 /* Register for VLAN events */ 4307 priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 4308 mlx5e_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST); 4309 priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 4310 mlx5e_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST); 4311 4312 /* Link is down by default */ 4313 if_link_state_change(ifp, LINK_STATE_DOWN); 4314 4315 mlx5e_enable_async_events(priv); 4316 4317 mlx5e_add_hw_stats(priv); 4318 4319 mlx5e_create_stats(&priv->stats.vport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 4320 "vstats", mlx5e_vport_stats_desc, MLX5E_VPORT_STATS_NUM, 4321 priv->stats.vport.arg); 4322 4323 mlx5e_create_stats(&priv->stats.pport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 4324 "pstats", mlx5e_pport_stats_desc, MLX5E_PPORT_STATS_NUM, 4325 priv->stats.pport.arg); 4326 4327 mlx5e_create_ethtool(priv); 4328 4329 mtx_lock(&priv->async_events_mtx); 4330 mlx5e_update_stats(priv); 4331 mtx_unlock(&priv->async_events_mtx); 4332 4333 SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 4334 OID_AUTO, "rx_clbr_done", CTLFLAG_RD, 4335 &priv->clbr_done, 0, 4336 "RX timestamps calibration state"); 4337 callout_init(&priv->tstmp_clbr, CALLOUT_DIRECT); 4338 mlx5e_reset_calibration_callout(priv); 4339 4340 pa.pa_version = PFIL_VERSION; 4341 pa.pa_flags = PFIL_IN; 4342 pa.pa_type = PFIL_TYPE_ETHERNET; 4343 pa.pa_headname = ifp->if_xname; 4344 priv->pfil = pfil_head_register(&pa); 4345 4346 return (priv); 4347 4348 #ifdef RATELIMIT 4349 err_create_mkey: 4350 mlx5_core_destroy_mkey(priv->mdev, &priv->mr); 4351 #endif 4352 err_dealloc_transport_domain: 4353 mlx5_dealloc_transport_domain(mdev, priv->tdn); 4354 4355 err_dealloc_pd: 4356 mlx5_core_dealloc_pd(mdev, priv->pdn); 4357 4358 err_unmap_free_uar: 4359 mlx5_unmap_free_uar(mdev, &priv->cq_uar); 4360 4361 err_free_wq: 4362 flush_workqueue(priv->wq); 4363 4364 err_free_sysctl: 4365 sysctl_ctx_free(&priv->sysctl_ctx); 4366 if (priv->sysctl_debug) 4367 sysctl_ctx_free(&priv->stats.port_stats_debug.ctx); 4368 if_free(ifp); 4369 4370 err_free_priv: 4371 mlx5e_priv_mtx_destroy(priv); 4372 free(priv, M_MLX5EN); 4373 return (NULL); 4374 } 4375 4376 static void 4377 mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vpriv) 4378 { 4379 struct mlx5e_priv *priv = vpriv; 4380 struct ifnet *ifp = priv->ifp; 4381 4382 /* don't allow more IOCTLs */ 4383 priv->gone = 1; 4384 4385 /* XXX wait a bit to allow IOCTL handlers to complete */ 4386 pause("W", hz); 4387 4388 #ifdef RATELIMIT 4389 /* 4390 * The kernel can have reference(s) via the m_snd_tag's into 4391 * the ratelimit channels, and these must go away before 4392 * detaching: 4393 */ 4394 while (READ_ONCE(priv->rl.stats.tx_active_connections) != 0) { 4395 if_printf(priv->ifp, "Waiting for all ratelimit connections " 4396 "to terminate\n"); 4397 pause("W", hz); 4398 } 4399 #endif 4400 /* stop watchdog timer */ 4401 callout_drain(&priv->watchdog); 4402 4403 callout_drain(&priv->tstmp_clbr); 4404 4405 if (priv->vlan_attach != NULL) 4406 EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach); 4407 if (priv->vlan_detach != NULL) 4408 EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach); 4409 4410 /* make sure device gets closed */ 4411 PRIV_LOCK(priv); 4412 mlx5e_close_locked(ifp); 4413 PRIV_UNLOCK(priv); 4414 4415 /* wait for all unlimited send tags to go away */ 4416 while (priv->channel_refs != 0) { 4417 if_printf(priv->ifp, "Waiting for all unlimited connections " 4418 "to terminate\n"); 4419 pause("W", hz); 4420 } 4421 4422 /* deregister pfil */ 4423 if (priv->pfil != NULL) { 4424 pfil_head_unregister(priv->pfil); 4425 priv->pfil = NULL; 4426 } 4427 4428 /* unregister device */ 4429 ifmedia_removeall(&priv->media); 4430 ether_ifdetach(ifp); 4431 if_free(ifp); 4432 4433 #ifdef RATELIMIT 4434 mlx5e_rl_cleanup(priv); 4435 #endif 4436 /* destroy all remaining sysctl nodes */ 4437 sysctl_ctx_free(&priv->stats.vport.ctx); 4438 sysctl_ctx_free(&priv->stats.pport.ctx); 4439 if (priv->sysctl_debug) 4440 sysctl_ctx_free(&priv->stats.port_stats_debug.ctx); 4441 sysctl_ctx_free(&priv->sysctl_ctx); 4442 4443 mlx5_core_destroy_mkey(priv->mdev, &priv->mr); 4444 mlx5_dealloc_transport_domain(priv->mdev, priv->tdn); 4445 mlx5_core_dealloc_pd(priv->mdev, priv->pdn); 4446 mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); 4447 mlx5e_disable_async_events(priv); 4448 flush_workqueue(priv->wq); 4449 mlx5e_priv_mtx_destroy(priv); 4450 free(priv, M_MLX5EN); 4451 } 4452 4453 static void * 4454 mlx5e_get_ifp(void *vpriv) 4455 { 4456 struct mlx5e_priv *priv = vpriv; 4457 4458 return (priv->ifp); 4459 } 4460 4461 static struct mlx5_interface mlx5e_interface = { 4462 .add = mlx5e_create_ifp, 4463 .remove = mlx5e_destroy_ifp, 4464 .event = mlx5e_async_event, 4465 .protocol = MLX5_INTERFACE_PROTOCOL_ETH, 4466 .get_dev = mlx5e_get_ifp, 4467 }; 4468 4469 void 4470 mlx5e_init(void) 4471 { 4472 mlx5_register_interface(&mlx5e_interface); 4473 } 4474 4475 void 4476 mlx5e_cleanup(void) 4477 { 4478 mlx5_unregister_interface(&mlx5e_interface); 4479 } 4480 4481 static void 4482 mlx5e_show_version(void __unused *arg) 4483 { 4484 4485 printf("%s", mlx5e_version); 4486 } 4487 SYSINIT(mlx5e_show_version, SI_SUB_DRIVERS, SI_ORDER_ANY, mlx5e_show_version, NULL); 4488 4489 module_init_order(mlx5e_init, SI_ORDER_THIRD); 4490 module_exit_order(mlx5e_cleanup, SI_ORDER_THIRD); 4491 4492 #if (__FreeBSD_version >= 1100000) 4493 MODULE_DEPEND(mlx5en, linuxkpi, 1, 1, 1); 4494 #endif 4495 MODULE_DEPEND(mlx5en, mlx5, 1, 1, 1); 4496 MODULE_VERSION(mlx5en, 1); 4497