1 /*- 2 * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include "en.h" 29 30 #include <sys/sockio.h> 31 #include <machine/atomic.h> 32 33 #ifndef ETH_DRIVER_VERSION 34 #define ETH_DRIVER_VERSION "3.5.1" 35 #endif 36 #define DRIVER_RELDATE "April 2019" 37 38 static const char mlx5e_version[] = "mlx5en: Mellanox Ethernet driver " 39 ETH_DRIVER_VERSION " (" DRIVER_RELDATE ")\n"; 40 41 static int mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs); 42 43 struct mlx5e_channel_param { 44 struct mlx5e_rq_param rq; 45 struct mlx5e_sq_param sq; 46 struct mlx5e_cq_param rx_cq; 47 struct mlx5e_cq_param tx_cq; 48 }; 49 50 struct media { 51 u32 subtype; 52 u64 baudrate; 53 }; 54 55 static const struct media mlx5e_mode_table[MLX5E_LINK_SPEEDS_NUMBER][MLX5E_LINK_MODES_NUMBER] = { 56 57 [MLX5E_1000BASE_CX_SGMII][MLX5E_SGMII] = { 58 .subtype = IFM_1000_CX_SGMII, 59 .baudrate = IF_Mbps(1000ULL), 60 }, 61 [MLX5E_1000BASE_KX][MLX5E_KX] = { 62 .subtype = IFM_1000_KX, 63 .baudrate = IF_Mbps(1000ULL), 64 }, 65 [MLX5E_10GBASE_CX4][MLX5E_CX4] = { 66 .subtype = IFM_10G_CX4, 67 .baudrate = IF_Gbps(10ULL), 68 }, 69 [MLX5E_10GBASE_KX4][MLX5E_KX4] = { 70 .subtype = IFM_10G_KX4, 71 .baudrate = IF_Gbps(10ULL), 72 }, 73 [MLX5E_10GBASE_KR][MLX5E_KR] = { 74 .subtype = IFM_10G_KR, 75 .baudrate = IF_Gbps(10ULL), 76 }, 77 [MLX5E_20GBASE_KR2][MLX5E_KR2] = { 78 .subtype = IFM_20G_KR2, 79 .baudrate = IF_Gbps(20ULL), 80 }, 81 [MLX5E_40GBASE_CR4][MLX5E_CR4] = { 82 .subtype = IFM_40G_CR4, 83 .baudrate = IF_Gbps(40ULL), 84 }, 85 [MLX5E_40GBASE_KR4][MLX5E_KR4] = { 86 .subtype = IFM_40G_KR4, 87 .baudrate = IF_Gbps(40ULL), 88 }, 89 [MLX5E_56GBASE_R4][MLX5E_R] = { 90 .subtype = IFM_56G_R4, 91 .baudrate = IF_Gbps(56ULL), 92 }, 93 [MLX5E_10GBASE_CR][MLX5E_CR1] = { 94 .subtype = IFM_10G_CR1, 95 .baudrate = IF_Gbps(10ULL), 96 }, 97 [MLX5E_10GBASE_SR][MLX5E_SR] = { 98 .subtype = IFM_10G_SR, 99 .baudrate = IF_Gbps(10ULL), 100 }, 101 [MLX5E_10GBASE_ER_LR][MLX5E_ER] = { 102 .subtype = IFM_10G_ER, 103 .baudrate = IF_Gbps(10ULL), 104 }, 105 [MLX5E_10GBASE_ER_LR][MLX5E_LR] = { 106 .subtype = IFM_10G_LR, 107 .baudrate = IF_Gbps(10ULL), 108 }, 109 [MLX5E_40GBASE_SR4][MLX5E_SR4] = { 110 .subtype = IFM_40G_SR4, 111 .baudrate = IF_Gbps(40ULL), 112 }, 113 [MLX5E_40GBASE_LR4_ER4][MLX5E_LR4] = { 114 .subtype = IFM_40G_LR4, 115 .baudrate = IF_Gbps(40ULL), 116 }, 117 [MLX5E_40GBASE_LR4_ER4][MLX5E_ER4] = { 118 .subtype = IFM_40G_ER4, 119 .baudrate = IF_Gbps(40ULL), 120 }, 121 [MLX5E_100GBASE_CR4][MLX5E_CR4] = { 122 .subtype = IFM_100G_CR4, 123 .baudrate = IF_Gbps(100ULL), 124 }, 125 [MLX5E_100GBASE_SR4][MLX5E_SR4] = { 126 .subtype = IFM_100G_SR4, 127 .baudrate = IF_Gbps(100ULL), 128 }, 129 [MLX5E_100GBASE_KR4][MLX5E_KR4] = { 130 .subtype = IFM_100G_KR4, 131 .baudrate = IF_Gbps(100ULL), 132 }, 133 [MLX5E_100GBASE_LR4][MLX5E_LR4] = { 134 .subtype = IFM_100G_LR4, 135 .baudrate = IF_Gbps(100ULL), 136 }, 137 [MLX5E_100BASE_TX][MLX5E_TX] = { 138 .subtype = IFM_100_TX, 139 .baudrate = IF_Mbps(100ULL), 140 }, 141 [MLX5E_1000BASE_T][MLX5E_T] = { 142 .subtype = IFM_1000_T, 143 .baudrate = IF_Mbps(1000ULL), 144 }, 145 [MLX5E_10GBASE_T][MLX5E_T] = { 146 .subtype = IFM_10G_T, 147 .baudrate = IF_Gbps(10ULL), 148 }, 149 [MLX5E_25GBASE_CR][MLX5E_CR] = { 150 .subtype = IFM_25G_CR, 151 .baudrate = IF_Gbps(25ULL), 152 }, 153 [MLX5E_25GBASE_KR][MLX5E_KR] = { 154 .subtype = IFM_25G_KR, 155 .baudrate = IF_Gbps(25ULL), 156 }, 157 [MLX5E_25GBASE_SR][MLX5E_SR] = { 158 .subtype = IFM_25G_SR, 159 .baudrate = IF_Gbps(25ULL), 160 }, 161 [MLX5E_50GBASE_CR2][MLX5E_CR2] = { 162 .subtype = IFM_50G_CR2, 163 .baudrate = IF_Gbps(50ULL), 164 }, 165 [MLX5E_50GBASE_KR2][MLX5E_KR2] = { 166 .subtype = IFM_50G_KR2, 167 .baudrate = IF_Gbps(50ULL), 168 }, 169 }; 170 171 static const struct media mlx5e_ext_mode_table[MLX5E_EXT_LINK_SPEEDS_NUMBER][MLX5E_LINK_MODES_NUMBER] = { 172 [MLX5E_SGMII_100M][MLX5E_SGMII] = { 173 .subtype = IFM_100_SGMII, 174 .baudrate = IF_Mbps(100), 175 }, 176 [MLX5E_1000BASE_X_SGMII][MLX5E_KX] = { 177 .subtype = IFM_1000_KX, 178 .baudrate = IF_Mbps(1000), 179 }, 180 [MLX5E_1000BASE_X_SGMII][MLX5E_CX_SGMII] = { 181 .subtype = IFM_1000_CX_SGMII, 182 .baudrate = IF_Mbps(1000), 183 }, 184 [MLX5E_1000BASE_X_SGMII][MLX5E_CX] = { 185 .subtype = IFM_1000_CX, 186 .baudrate = IF_Mbps(1000), 187 }, 188 [MLX5E_1000BASE_X_SGMII][MLX5E_LX] = { 189 .subtype = IFM_1000_LX, 190 .baudrate = IF_Mbps(1000), 191 }, 192 [MLX5E_1000BASE_X_SGMII][MLX5E_SX] = { 193 .subtype = IFM_1000_SX, 194 .baudrate = IF_Mbps(1000), 195 }, 196 [MLX5E_1000BASE_X_SGMII][MLX5E_T] = { 197 .subtype = IFM_1000_T, 198 .baudrate = IF_Mbps(1000), 199 }, 200 [MLX5E_5GBASE_R][MLX5E_T] = { 201 .subtype = IFM_5000_T, 202 .baudrate = IF_Mbps(5000), 203 }, 204 [MLX5E_5GBASE_R][MLX5E_KR] = { 205 .subtype = IFM_5000_KR, 206 .baudrate = IF_Mbps(5000), 207 }, 208 [MLX5E_5GBASE_R][MLX5E_KR1] = { 209 .subtype = IFM_5000_KR1, 210 .baudrate = IF_Mbps(5000), 211 }, 212 [MLX5E_5GBASE_R][MLX5E_KR_S] = { 213 .subtype = IFM_5000_KR_S, 214 .baudrate = IF_Mbps(5000), 215 }, 216 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_ER] = { 217 .subtype = IFM_10G_ER, 218 .baudrate = IF_Gbps(10ULL), 219 }, 220 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_KR] = { 221 .subtype = IFM_10G_KR, 222 .baudrate = IF_Gbps(10ULL), 223 }, 224 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_LR] = { 225 .subtype = IFM_10G_LR, 226 .baudrate = IF_Gbps(10ULL), 227 }, 228 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_SR] = { 229 .subtype = IFM_10G_SR, 230 .baudrate = IF_Gbps(10ULL), 231 }, 232 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_T] = { 233 .subtype = IFM_10G_T, 234 .baudrate = IF_Gbps(10ULL), 235 }, 236 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_AOC] = { 237 .subtype = IFM_10G_AOC, 238 .baudrate = IF_Gbps(10ULL), 239 }, 240 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_CR1] = { 241 .subtype = IFM_10G_CR1, 242 .baudrate = IF_Gbps(10ULL), 243 }, 244 [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_CR4] = { 245 .subtype = IFM_40G_CR4, 246 .baudrate = IF_Gbps(40ULL), 247 }, 248 [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_KR4] = { 249 .subtype = IFM_40G_KR4, 250 .baudrate = IF_Gbps(40ULL), 251 }, 252 [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_LR4] = { 253 .subtype = IFM_40G_LR4, 254 .baudrate = IF_Gbps(40ULL), 255 }, 256 [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_SR4] = { 257 .subtype = IFM_40G_SR4, 258 .baudrate = IF_Gbps(40ULL), 259 }, 260 [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_ER4] = { 261 .subtype = IFM_40G_ER4, 262 .baudrate = IF_Gbps(40ULL), 263 }, 264 265 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CR] = { 266 .subtype = IFM_25G_CR, 267 .baudrate = IF_Gbps(25ULL), 268 }, 269 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_KR] = { 270 .subtype = IFM_25G_KR, 271 .baudrate = IF_Gbps(25ULL), 272 }, 273 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_SR] = { 274 .subtype = IFM_25G_SR, 275 .baudrate = IF_Gbps(25ULL), 276 }, 277 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_ACC] = { 278 .subtype = IFM_25G_ACC, 279 .baudrate = IF_Gbps(25ULL), 280 }, 281 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_AOC] = { 282 .subtype = IFM_25G_AOC, 283 .baudrate = IF_Gbps(25ULL), 284 }, 285 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CR1] = { 286 .subtype = IFM_25G_CR1, 287 .baudrate = IF_Gbps(25ULL), 288 }, 289 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CR_S] = { 290 .subtype = IFM_25G_CR_S, 291 .baudrate = IF_Gbps(25ULL), 292 }, 293 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_KR1] = { 294 .subtype = IFM_5000_KR1, 295 .baudrate = IF_Gbps(25ULL), 296 }, 297 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_KR_S] = { 298 .subtype = IFM_25G_KR_S, 299 .baudrate = IF_Gbps(25ULL), 300 }, 301 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_LR] = { 302 .subtype = IFM_25G_LR, 303 .baudrate = IF_Gbps(25ULL), 304 }, 305 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_T] = { 306 .subtype = IFM_25G_T, 307 .baudrate = IF_Gbps(25ULL), 308 }, 309 [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_CR2] = { 310 .subtype = IFM_50G_CR2, 311 .baudrate = IF_Gbps(50ULL), 312 }, 313 [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_KR2] = { 314 .subtype = IFM_50G_KR2, 315 .baudrate = IF_Gbps(50ULL), 316 }, 317 [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_SR2] = { 318 .subtype = IFM_50G_SR2, 319 .baudrate = IF_Gbps(50ULL), 320 }, 321 [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_LR2] = { 322 .subtype = IFM_50G_LR2, 323 .baudrate = IF_Gbps(50ULL), 324 }, 325 [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_LR] = { 326 .subtype = IFM_50G_LR, 327 .baudrate = IF_Gbps(50ULL), 328 }, 329 [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_SR] = { 330 .subtype = IFM_50G_SR, 331 .baudrate = IF_Gbps(50ULL), 332 }, 333 [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_CP] = { 334 .subtype = IFM_50G_CP, 335 .baudrate = IF_Gbps(50ULL), 336 }, 337 [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_FR] = { 338 .subtype = IFM_50G_FR, 339 .baudrate = IF_Gbps(50ULL), 340 }, 341 [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_KR_PAM4] = { 342 .subtype = IFM_50G_KR_PAM4, 343 .baudrate = IF_Gbps(50ULL), 344 }, 345 [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_CR4] = { 346 .subtype = IFM_100G_CR4, 347 .baudrate = IF_Gbps(100ULL), 348 }, 349 [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_KR4] = { 350 .subtype = IFM_100G_KR4, 351 .baudrate = IF_Gbps(100ULL), 352 }, 353 [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_LR4] = { 354 .subtype = IFM_100G_LR4, 355 .baudrate = IF_Gbps(100ULL), 356 }, 357 [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_SR4] = { 358 .subtype = IFM_100G_SR4, 359 .baudrate = IF_Gbps(100ULL), 360 }, 361 [MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_SR2] = { 362 .subtype = IFM_100G_SR2, 363 .baudrate = IF_Gbps(100ULL), 364 }, 365 [MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_CP2] = { 366 .subtype = IFM_100G_CP2, 367 .baudrate = IF_Gbps(100ULL), 368 }, 369 [MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_KR2_PAM4] = { 370 .subtype = IFM_100G_KR2_PAM4, 371 .baudrate = IF_Gbps(100ULL), 372 }, 373 [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_DR4] = { 374 .subtype = IFM_200G_DR4, 375 .baudrate = IF_Gbps(200ULL), 376 }, 377 [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_LR4] = { 378 .subtype = IFM_200G_LR4, 379 .baudrate = IF_Gbps(200ULL), 380 }, 381 [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_SR4] = { 382 .subtype = IFM_200G_SR4, 383 .baudrate = IF_Gbps(200ULL), 384 }, 385 [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_FR4] = { 386 .subtype = IFM_200G_FR4, 387 .baudrate = IF_Gbps(200ULL), 388 }, 389 [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_CR4_PAM4] = { 390 .subtype = IFM_200G_CR4_PAM4, 391 .baudrate = IF_Gbps(200ULL), 392 }, 393 [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_KR4_PAM4] = { 394 .subtype = IFM_200G_KR4_PAM4, 395 .baudrate = IF_Gbps(200ULL), 396 }, 397 }; 398 399 MALLOC_DEFINE(M_MLX5EN, "MLX5EN", "MLX5 Ethernet"); 400 401 static void 402 mlx5e_update_carrier(struct mlx5e_priv *priv) 403 { 404 struct mlx5_core_dev *mdev = priv->mdev; 405 u32 out[MLX5_ST_SZ_DW(ptys_reg)]; 406 u32 eth_proto_oper; 407 int error; 408 u8 port_state; 409 u8 is_er_type; 410 u8 i, j; 411 bool ext; 412 struct media media_entry = {}; 413 414 port_state = mlx5_query_vport_state(mdev, 415 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0); 416 417 if (port_state == VPORT_STATE_UP) { 418 priv->media_status_last |= IFM_ACTIVE; 419 } else { 420 priv->media_status_last &= ~IFM_ACTIVE; 421 priv->media_active_last = IFM_ETHER; 422 if_link_state_change(priv->ifp, LINK_STATE_DOWN); 423 return; 424 } 425 426 error = mlx5_query_port_ptys(mdev, out, sizeof(out), 427 MLX5_PTYS_EN, 1); 428 if (error) { 429 priv->media_active_last = IFM_ETHER; 430 priv->ifp->if_baudrate = 1; 431 if_printf(priv->ifp, "%s: query port ptys failed: " 432 "0x%x\n", __func__, error); 433 return; 434 } 435 436 ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); 437 eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, 438 eth_proto_oper); 439 440 i = ilog2(eth_proto_oper); 441 442 for (j = 0; j != MLX5E_LINK_MODES_NUMBER; j++) { 443 media_entry = ext ? mlx5e_ext_mode_table[i][j] : 444 mlx5e_mode_table[i][j]; 445 if (media_entry.baudrate != 0) 446 break; 447 } 448 449 if (media_entry.subtype == 0) { 450 if_printf(priv->ifp, "%s: Could not find operational " 451 "media subtype\n", __func__); 452 return; 453 } 454 455 switch (media_entry.subtype) { 456 case IFM_10G_ER: 457 error = mlx5_query_pddr_range_info(mdev, 1, &is_er_type); 458 if (error != 0) { 459 if_printf(priv->ifp, "%s: query port pddr failed: %d\n", 460 __func__, error); 461 } 462 if (error != 0 || is_er_type == 0) 463 media_entry.subtype = IFM_10G_LR; 464 break; 465 case IFM_40G_LR4: 466 error = mlx5_query_pddr_range_info(mdev, 1, &is_er_type); 467 if (error != 0) { 468 if_printf(priv->ifp, "%s: query port pddr failed: %d\n", 469 __func__, error); 470 } 471 if (error == 0 && is_er_type != 0) 472 media_entry.subtype = IFM_40G_ER4; 473 break; 474 } 475 priv->media_active_last = media_entry.subtype | IFM_ETHER | IFM_FDX; 476 priv->ifp->if_baudrate = media_entry.baudrate; 477 478 if_link_state_change(priv->ifp, LINK_STATE_UP); 479 } 480 481 static void 482 mlx5e_media_status(struct ifnet *dev, struct ifmediareq *ifmr) 483 { 484 struct mlx5e_priv *priv = dev->if_softc; 485 486 ifmr->ifm_status = priv->media_status_last; 487 ifmr->ifm_active = priv->media_active_last | 488 (priv->params.rx_pauseframe_control ? IFM_ETH_RXPAUSE : 0) | 489 (priv->params.tx_pauseframe_control ? IFM_ETH_TXPAUSE : 0); 490 491 } 492 493 static u32 494 mlx5e_find_link_mode(u32 subtype, bool ext) 495 { 496 u32 i; 497 u32 j; 498 u32 link_mode = 0; 499 u32 speeds_num = 0; 500 struct media media_entry = {}; 501 502 switch (subtype) { 503 case IFM_10G_LR: 504 subtype = IFM_10G_ER; 505 break; 506 case IFM_40G_ER4: 507 subtype = IFM_40G_LR4; 508 break; 509 } 510 511 speeds_num = ext ? MLX5E_EXT_LINK_SPEEDS_NUMBER : 512 MLX5E_LINK_SPEEDS_NUMBER; 513 514 for (i = 0; i != speeds_num; i++) { 515 for (j = 0; j < MLX5E_LINK_MODES_NUMBER ; ++j) { 516 media_entry = ext ? mlx5e_ext_mode_table[i][j] : 517 mlx5e_mode_table[i][j]; 518 if (media_entry.baudrate == 0) 519 continue; 520 if (media_entry.subtype == subtype) { 521 link_mode |= MLX5E_PROT_MASK(i); 522 } 523 } 524 } 525 526 return (link_mode); 527 } 528 529 static int 530 mlx5e_set_port_pause_and_pfc(struct mlx5e_priv *priv) 531 { 532 return (mlx5_set_port_pause_and_pfc(priv->mdev, 1, 533 priv->params.rx_pauseframe_control, 534 priv->params.tx_pauseframe_control, 535 priv->params.rx_priority_flow_control, 536 priv->params.tx_priority_flow_control)); 537 } 538 539 static int 540 mlx5e_set_port_pfc(struct mlx5e_priv *priv) 541 { 542 int error; 543 544 if (priv->gone != 0) { 545 error = -ENXIO; 546 } else if (priv->params.rx_pauseframe_control || 547 priv->params.tx_pauseframe_control) { 548 if_printf(priv->ifp, 549 "Global pauseframes must be disabled before " 550 "enabling PFC.\n"); 551 error = -EINVAL; 552 } else { 553 error = mlx5e_set_port_pause_and_pfc(priv); 554 } 555 return (error); 556 } 557 558 static int 559 mlx5e_media_change(struct ifnet *dev) 560 { 561 struct mlx5e_priv *priv = dev->if_softc; 562 struct mlx5_core_dev *mdev = priv->mdev; 563 u32 eth_proto_cap; 564 u32 link_mode; 565 u32 out[MLX5_ST_SZ_DW(ptys_reg)]; 566 int was_opened; 567 int locked; 568 int error; 569 bool ext; 570 571 locked = PRIV_LOCKED(priv); 572 if (!locked) 573 PRIV_LOCK(priv); 574 575 if (IFM_TYPE(priv->media.ifm_media) != IFM_ETHER) { 576 error = EINVAL; 577 goto done; 578 } 579 580 error = mlx5_query_port_ptys(mdev, out, sizeof(out), 581 MLX5_PTYS_EN, 1); 582 if (error != 0) { 583 if_printf(dev, "Query port media capability failed\n"); 584 goto done; 585 } 586 587 ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); 588 link_mode = mlx5e_find_link_mode(IFM_SUBTYPE(priv->media.ifm_media), ext); 589 590 /* query supported capabilities */ 591 eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, 592 eth_proto_capability); 593 594 /* check for autoselect */ 595 if (IFM_SUBTYPE(priv->media.ifm_media) == IFM_AUTO) { 596 link_mode = eth_proto_cap; 597 if (link_mode == 0) { 598 if_printf(dev, "Port media capability is zero\n"); 599 error = EINVAL; 600 goto done; 601 } 602 } else { 603 link_mode = link_mode & eth_proto_cap; 604 if (link_mode == 0) { 605 if_printf(dev, "Not supported link mode requested\n"); 606 error = EINVAL; 607 goto done; 608 } 609 } 610 if (priv->media.ifm_media & (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 611 /* check if PFC is enabled */ 612 if (priv->params.rx_priority_flow_control || 613 priv->params.tx_priority_flow_control) { 614 if_printf(dev, "PFC must be disabled before enabling global pauseframes.\n"); 615 error = EINVAL; 616 goto done; 617 } 618 } 619 /* update pauseframe control bits */ 620 priv->params.rx_pauseframe_control = 621 (priv->media.ifm_media & IFM_ETH_RXPAUSE) ? 1 : 0; 622 priv->params.tx_pauseframe_control = 623 (priv->media.ifm_media & IFM_ETH_TXPAUSE) ? 1 : 0; 624 625 /* check if device is opened */ 626 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); 627 628 /* reconfigure the hardware */ 629 mlx5_set_port_status(mdev, MLX5_PORT_DOWN); 630 mlx5_set_port_proto(mdev, link_mode, MLX5_PTYS_EN, ext); 631 error = -mlx5e_set_port_pause_and_pfc(priv); 632 if (was_opened) 633 mlx5_set_port_status(mdev, MLX5_PORT_UP); 634 635 done: 636 if (!locked) 637 PRIV_UNLOCK(priv); 638 return (error); 639 } 640 641 static void 642 mlx5e_update_carrier_work(struct work_struct *work) 643 { 644 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, 645 update_carrier_work); 646 647 PRIV_LOCK(priv); 648 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 649 mlx5e_update_carrier(priv); 650 PRIV_UNLOCK(priv); 651 } 652 653 #define MLX5E_PCIE_PERF_GET_64(a,b,c,d,e,f) \ 654 s_debug->c = MLX5_GET64(mpcnt_reg, out, counter_set.f.c); 655 656 #define MLX5E_PCIE_PERF_GET_32(a,b,c,d,e,f) \ 657 s_debug->c = MLX5_GET(mpcnt_reg, out, counter_set.f.c); 658 659 static void 660 mlx5e_update_pcie_counters(struct mlx5e_priv *priv) 661 { 662 struct mlx5_core_dev *mdev = priv->mdev; 663 struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug; 664 const unsigned sz = MLX5_ST_SZ_BYTES(mpcnt_reg); 665 void *out; 666 void *in; 667 int err; 668 669 /* allocate firmware request structures */ 670 in = mlx5_vzalloc(sz); 671 out = mlx5_vzalloc(sz); 672 if (in == NULL || out == NULL) 673 goto free_out; 674 675 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP); 676 err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); 677 if (err != 0) 678 goto free_out; 679 680 MLX5E_PCIE_PERFORMANCE_COUNTERS_64(MLX5E_PCIE_PERF_GET_64) 681 MLX5E_PCIE_PERFORMANCE_COUNTERS_32(MLX5E_PCIE_PERF_GET_32) 682 683 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP); 684 err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); 685 if (err != 0) 686 goto free_out; 687 688 MLX5E_PCIE_TIMERS_AND_STATES_COUNTERS_32(MLX5E_PCIE_PERF_GET_32) 689 690 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_LANE_COUNTERS_GROUP); 691 err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); 692 if (err != 0) 693 goto free_out; 694 695 MLX5E_PCIE_LANE_COUNTERS_32(MLX5E_PCIE_PERF_GET_32) 696 697 free_out: 698 /* free firmware request structures */ 699 kvfree(in); 700 kvfree(out); 701 } 702 703 /* 704 * This function reads the physical port counters from the firmware 705 * using a pre-defined layout defined by various MLX5E_PPORT_XXX() 706 * macros. The output is converted from big-endian 64-bit values into 707 * host endian ones and stored in the "priv->stats.pport" structure. 708 */ 709 static void 710 mlx5e_update_pport_counters(struct mlx5e_priv *priv) 711 { 712 struct mlx5_core_dev *mdev = priv->mdev; 713 struct mlx5e_pport_stats *s = &priv->stats.pport; 714 struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug; 715 u32 *in; 716 u32 *out; 717 const u64 *ptr; 718 unsigned sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 719 unsigned x; 720 unsigned y; 721 unsigned z; 722 723 /* allocate firmware request structures */ 724 in = mlx5_vzalloc(sz); 725 out = mlx5_vzalloc(sz); 726 if (in == NULL || out == NULL) 727 goto free_out; 728 729 /* 730 * Get pointer to the 64-bit counter set which is located at a 731 * fixed offset in the output firmware request structure: 732 */ 733 ptr = (const uint64_t *)MLX5_ADDR_OF(ppcnt_reg, out, counter_set); 734 735 MLX5_SET(ppcnt_reg, in, local_port, 1); 736 737 /* read IEEE802_3 counter group using predefined counter layout */ 738 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); 739 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 740 for (x = 0, y = MLX5E_PPORT_PER_PRIO_STATS_NUM; 741 x != MLX5E_PPORT_IEEE802_3_STATS_NUM; x++, y++) 742 s->arg[y] = be64toh(ptr[x]); 743 744 /* read RFC2819 counter group using predefined counter layout */ 745 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP); 746 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 747 for (x = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM; x++, y++) 748 s->arg[y] = be64toh(ptr[x]); 749 750 for (y = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM + 751 MLX5E_PPORT_RFC2819_STATS_DEBUG_NUM; x++, y++) 752 s_debug->arg[y] = be64toh(ptr[x]); 753 754 /* read RFC2863 counter group using predefined counter layout */ 755 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP); 756 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 757 for (x = 0; x != MLX5E_PPORT_RFC2863_STATS_DEBUG_NUM; x++, y++) 758 s_debug->arg[y] = be64toh(ptr[x]); 759 760 /* read physical layer stats counter group using predefined counter layout */ 761 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP); 762 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 763 for (x = 0; x != MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM; x++, y++) 764 s_debug->arg[y] = be64toh(ptr[x]); 765 766 /* read Extended Ethernet counter group using predefined counter layout */ 767 MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP); 768 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 769 for (x = 0; x != MLX5E_PPORT_ETHERNET_EXTENDED_STATS_DEBUG_NUM; x++, y++) 770 s_debug->arg[y] = be64toh(ptr[x]); 771 772 /* read Extended Statistical Group */ 773 if (MLX5_CAP_GEN(mdev, pcam_reg) && 774 MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) && 775 MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters)) { 776 /* read Extended Statistical counter group using predefined counter layout */ 777 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP); 778 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 779 780 for (x = 0; x != MLX5E_PPORT_STATISTICAL_DEBUG_NUM; x++, y++) 781 s_debug->arg[y] = be64toh(ptr[x]); 782 } 783 784 /* read PCIE counters */ 785 mlx5e_update_pcie_counters(priv); 786 787 /* read per-priority counters */ 788 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP); 789 790 /* iterate all the priorities */ 791 for (y = z = 0; z != MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO; z++) { 792 MLX5_SET(ppcnt_reg, in, prio_tc, z); 793 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 794 795 /* read per priority stats counter group using predefined counter layout */ 796 for (x = 0; x != (MLX5E_PPORT_PER_PRIO_STATS_NUM / 797 MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO); x++, y++) 798 s->arg[y] = be64toh(ptr[x]); 799 } 800 801 free_out: 802 /* free firmware request structures */ 803 kvfree(in); 804 kvfree(out); 805 } 806 807 static void 808 mlx5e_grp_vnic_env_update_stats(struct mlx5e_priv *priv) 809 { 810 u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {}; 811 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {}; 812 813 if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard)) 814 return; 815 816 MLX5_SET(query_vnic_env_in, in, opcode, 817 MLX5_CMD_OP_QUERY_VNIC_ENV); 818 MLX5_SET(query_vnic_env_in, in, op_mod, 0); 819 MLX5_SET(query_vnic_env_in, in, other_vport, 0); 820 821 if (mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out)) != 0) 822 return; 823 824 priv->stats.vport.rx_steer_missed_packets = 825 MLX5_GET64(query_vnic_env_out, out, 826 vport_env.nic_receive_steering_discard); 827 } 828 829 /* 830 * This function is called regularly to collect all statistics 831 * counters from the firmware. The values can be viewed through the 832 * sysctl interface. Execution is serialized using the priv's global 833 * configuration lock. 834 */ 835 static void 836 mlx5e_update_stats_locked(struct mlx5e_priv *priv) 837 { 838 struct mlx5_core_dev *mdev = priv->mdev; 839 struct mlx5e_vport_stats *s = &priv->stats.vport; 840 struct mlx5e_sq_stats *sq_stats; 841 struct buf_ring *sq_br; 842 #if (__FreeBSD_version < 1100000) 843 struct ifnet *ifp = priv->ifp; 844 #endif 845 846 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)]; 847 u32 *out; 848 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); 849 u64 tso_packets = 0; 850 u64 tso_bytes = 0; 851 u64 tx_queue_dropped = 0; 852 u64 tx_defragged = 0; 853 u64 tx_offload_none = 0; 854 u64 lro_packets = 0; 855 u64 lro_bytes = 0; 856 u64 sw_lro_queued = 0; 857 u64 sw_lro_flushed = 0; 858 u64 rx_csum_none = 0; 859 u64 rx_wqe_err = 0; 860 u64 rx_packets = 0; 861 u64 rx_bytes = 0; 862 u32 rx_out_of_buffer = 0; 863 int i; 864 int j; 865 866 out = mlx5_vzalloc(outlen); 867 if (out == NULL) 868 goto free_out; 869 870 /* Collect firts the SW counters and then HW for consistency */ 871 for (i = 0; i < priv->params.num_channels; i++) { 872 struct mlx5e_channel *pch = priv->channel + i; 873 struct mlx5e_rq *rq = &pch->rq; 874 struct mlx5e_rq_stats *rq_stats = &pch->rq.stats; 875 876 /* collect stats from LRO */ 877 rq_stats->sw_lro_queued = rq->lro.lro_queued; 878 rq_stats->sw_lro_flushed = rq->lro.lro_flushed; 879 sw_lro_queued += rq_stats->sw_lro_queued; 880 sw_lro_flushed += rq_stats->sw_lro_flushed; 881 lro_packets += rq_stats->lro_packets; 882 lro_bytes += rq_stats->lro_bytes; 883 rx_csum_none += rq_stats->csum_none; 884 rx_wqe_err += rq_stats->wqe_err; 885 rx_packets += rq_stats->packets; 886 rx_bytes += rq_stats->bytes; 887 888 for (j = 0; j < priv->num_tc; j++) { 889 sq_stats = &pch->sq[j].stats; 890 sq_br = pch->sq[j].br; 891 892 tso_packets += sq_stats->tso_packets; 893 tso_bytes += sq_stats->tso_bytes; 894 tx_queue_dropped += sq_stats->dropped; 895 if (sq_br != NULL) 896 tx_queue_dropped += sq_br->br_drops; 897 tx_defragged += sq_stats->defragged; 898 tx_offload_none += sq_stats->csum_offload_none; 899 } 900 } 901 902 /* update counters */ 903 s->tso_packets = tso_packets; 904 s->tso_bytes = tso_bytes; 905 s->tx_queue_dropped = tx_queue_dropped; 906 s->tx_defragged = tx_defragged; 907 s->lro_packets = lro_packets; 908 s->lro_bytes = lro_bytes; 909 s->sw_lro_queued = sw_lro_queued; 910 s->sw_lro_flushed = sw_lro_flushed; 911 s->rx_csum_none = rx_csum_none; 912 s->rx_wqe_err = rx_wqe_err; 913 s->rx_packets = rx_packets; 914 s->rx_bytes = rx_bytes; 915 916 mlx5e_grp_vnic_env_update_stats(priv); 917 918 /* HW counters */ 919 memset(in, 0, sizeof(in)); 920 921 MLX5_SET(query_vport_counter_in, in, opcode, 922 MLX5_CMD_OP_QUERY_VPORT_COUNTER); 923 MLX5_SET(query_vport_counter_in, in, op_mod, 0); 924 MLX5_SET(query_vport_counter_in, in, other_vport, 0); 925 926 memset(out, 0, outlen); 927 928 /* get number of out-of-buffer drops first */ 929 if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0 && 930 mlx5_vport_query_out_of_rx_buffer(mdev, priv->counter_set_id, 931 &rx_out_of_buffer) == 0) { 932 s->rx_out_of_buffer = rx_out_of_buffer; 933 } 934 935 /* get port statistics */ 936 if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen) == 0) { 937 #define MLX5_GET_CTR(out, x) \ 938 MLX5_GET64(query_vport_counter_out, out, x) 939 940 s->rx_error_packets = 941 MLX5_GET_CTR(out, received_errors.packets); 942 s->rx_error_bytes = 943 MLX5_GET_CTR(out, received_errors.octets); 944 s->tx_error_packets = 945 MLX5_GET_CTR(out, transmit_errors.packets); 946 s->tx_error_bytes = 947 MLX5_GET_CTR(out, transmit_errors.octets); 948 949 s->rx_unicast_packets = 950 MLX5_GET_CTR(out, received_eth_unicast.packets); 951 s->rx_unicast_bytes = 952 MLX5_GET_CTR(out, received_eth_unicast.octets); 953 s->tx_unicast_packets = 954 MLX5_GET_CTR(out, transmitted_eth_unicast.packets); 955 s->tx_unicast_bytes = 956 MLX5_GET_CTR(out, transmitted_eth_unicast.octets); 957 958 s->rx_multicast_packets = 959 MLX5_GET_CTR(out, received_eth_multicast.packets); 960 s->rx_multicast_bytes = 961 MLX5_GET_CTR(out, received_eth_multicast.octets); 962 s->tx_multicast_packets = 963 MLX5_GET_CTR(out, transmitted_eth_multicast.packets); 964 s->tx_multicast_bytes = 965 MLX5_GET_CTR(out, transmitted_eth_multicast.octets); 966 967 s->rx_broadcast_packets = 968 MLX5_GET_CTR(out, received_eth_broadcast.packets); 969 s->rx_broadcast_bytes = 970 MLX5_GET_CTR(out, received_eth_broadcast.octets); 971 s->tx_broadcast_packets = 972 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets); 973 s->tx_broadcast_bytes = 974 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); 975 976 s->tx_packets = s->tx_unicast_packets + 977 s->tx_multicast_packets + s->tx_broadcast_packets; 978 s->tx_bytes = s->tx_unicast_bytes + s->tx_multicast_bytes + 979 s->tx_broadcast_bytes; 980 981 /* Update calculated offload counters */ 982 s->tx_csum_offload = s->tx_packets - tx_offload_none; 983 s->rx_csum_good = s->rx_packets - s->rx_csum_none; 984 } 985 986 /* Get physical port counters */ 987 mlx5e_update_pport_counters(priv); 988 989 s->tx_jumbo_packets = 990 priv->stats.port_stats_debug.tx_stat_p1519to2047octets + 991 priv->stats.port_stats_debug.tx_stat_p2048to4095octets + 992 priv->stats.port_stats_debug.tx_stat_p4096to8191octets + 993 priv->stats.port_stats_debug.tx_stat_p8192to10239octets; 994 995 #if (__FreeBSD_version < 1100000) 996 /* no get_counters interface in fbsd 10 */ 997 ifp->if_ipackets = s->rx_packets; 998 ifp->if_ierrors = priv->stats.pport.in_range_len_errors + 999 priv->stats.pport.out_of_range_len + 1000 priv->stats.pport.too_long_errors + 1001 priv->stats.pport.check_seq_err + 1002 priv->stats.pport.alignment_err; 1003 ifp->if_iqdrops = s->rx_out_of_buffer; 1004 ifp->if_opackets = s->tx_packets; 1005 ifp->if_oerrors = priv->stats.port_stats_debug.out_discards; 1006 ifp->if_snd.ifq_drops = s->tx_queue_dropped; 1007 ifp->if_ibytes = s->rx_bytes; 1008 ifp->if_obytes = s->tx_bytes; 1009 ifp->if_collisions = 1010 priv->stats.pport.collisions; 1011 #endif 1012 1013 free_out: 1014 kvfree(out); 1015 1016 /* Update diagnostics, if any */ 1017 if (priv->params_ethtool.diag_pci_enable || 1018 priv->params_ethtool.diag_general_enable) { 1019 int error = mlx5_core_get_diagnostics_full(mdev, 1020 priv->params_ethtool.diag_pci_enable ? &priv->params_pci : NULL, 1021 priv->params_ethtool.diag_general_enable ? &priv->params_general : NULL); 1022 if (error != 0) 1023 if_printf(priv->ifp, "Failed reading diagnostics: %d\n", error); 1024 } 1025 } 1026 1027 static void 1028 mlx5e_update_stats_work(struct work_struct *work) 1029 { 1030 struct mlx5e_priv *priv; 1031 1032 priv = container_of(work, struct mlx5e_priv, update_stats_work); 1033 PRIV_LOCK(priv); 1034 if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0) 1035 mlx5e_update_stats_locked(priv); 1036 PRIV_UNLOCK(priv); 1037 } 1038 1039 static void 1040 mlx5e_update_stats(void *arg) 1041 { 1042 struct mlx5e_priv *priv = arg; 1043 1044 queue_work(priv->wq, &priv->update_stats_work); 1045 1046 callout_reset(&priv->watchdog, hz, &mlx5e_update_stats, priv); 1047 } 1048 1049 static void 1050 mlx5e_async_event_sub(struct mlx5e_priv *priv, 1051 enum mlx5_dev_event event) 1052 { 1053 switch (event) { 1054 case MLX5_DEV_EVENT_PORT_UP: 1055 case MLX5_DEV_EVENT_PORT_DOWN: 1056 queue_work(priv->wq, &priv->update_carrier_work); 1057 break; 1058 1059 default: 1060 break; 1061 } 1062 } 1063 1064 static void 1065 mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, 1066 enum mlx5_dev_event event, unsigned long param) 1067 { 1068 struct mlx5e_priv *priv = vpriv; 1069 1070 mtx_lock(&priv->async_events_mtx); 1071 if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state)) 1072 mlx5e_async_event_sub(priv, event); 1073 mtx_unlock(&priv->async_events_mtx); 1074 } 1075 1076 static void 1077 mlx5e_enable_async_events(struct mlx5e_priv *priv) 1078 { 1079 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); 1080 } 1081 1082 static void 1083 mlx5e_disable_async_events(struct mlx5e_priv *priv) 1084 { 1085 mtx_lock(&priv->async_events_mtx); 1086 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); 1087 mtx_unlock(&priv->async_events_mtx); 1088 } 1089 1090 static void mlx5e_calibration_callout(void *arg); 1091 static int mlx5e_calibration_duration = 20; 1092 static int mlx5e_fast_calibration = 1; 1093 static int mlx5e_normal_calibration = 30; 1094 1095 static SYSCTL_NODE(_hw_mlx5, OID_AUTO, calibr, CTLFLAG_RW, 0, 1096 "MLX5 timestamp calibration parameteres"); 1097 1098 SYSCTL_INT(_hw_mlx5_calibr, OID_AUTO, duration, CTLFLAG_RWTUN, 1099 &mlx5e_calibration_duration, 0, 1100 "Duration of initial calibration"); 1101 SYSCTL_INT(_hw_mlx5_calibr, OID_AUTO, fast, CTLFLAG_RWTUN, 1102 &mlx5e_fast_calibration, 0, 1103 "Recalibration interval during initial calibration"); 1104 SYSCTL_INT(_hw_mlx5_calibr, OID_AUTO, normal, CTLFLAG_RWTUN, 1105 &mlx5e_normal_calibration, 0, 1106 "Recalibration interval during normal operations"); 1107 1108 /* 1109 * Ignites the calibration process. 1110 */ 1111 static void 1112 mlx5e_reset_calibration_callout(struct mlx5e_priv *priv) 1113 { 1114 1115 if (priv->clbr_done == 0) 1116 mlx5e_calibration_callout(priv); 1117 else 1118 callout_reset_curcpu(&priv->tstmp_clbr, (priv->clbr_done < 1119 mlx5e_calibration_duration ? mlx5e_fast_calibration : 1120 mlx5e_normal_calibration) * hz, mlx5e_calibration_callout, 1121 priv); 1122 } 1123 1124 static uint64_t 1125 mlx5e_timespec2usec(const struct timespec *ts) 1126 { 1127 1128 return ((uint64_t)ts->tv_sec * 1000000000 + ts->tv_nsec); 1129 } 1130 1131 static uint64_t 1132 mlx5e_hw_clock(struct mlx5e_priv *priv) 1133 { 1134 struct mlx5_init_seg *iseg; 1135 uint32_t hw_h, hw_h1, hw_l; 1136 1137 iseg = priv->mdev->iseg; 1138 do { 1139 hw_h = ioread32be(&iseg->internal_timer_h); 1140 hw_l = ioread32be(&iseg->internal_timer_l); 1141 hw_h1 = ioread32be(&iseg->internal_timer_h); 1142 } while (hw_h1 != hw_h); 1143 return (((uint64_t)hw_h << 32) | hw_l); 1144 } 1145 1146 /* 1147 * The calibration callout, it runs either in the context of the 1148 * thread which enables calibration, or in callout. It takes the 1149 * snapshot of system and adapter clocks, then advances the pointers to 1150 * the calibration point to allow rx path to read the consistent data 1151 * lockless. 1152 */ 1153 static void 1154 mlx5e_calibration_callout(void *arg) 1155 { 1156 struct mlx5e_priv *priv; 1157 struct mlx5e_clbr_point *next, *curr; 1158 struct timespec ts; 1159 int clbr_curr_next; 1160 1161 priv = arg; 1162 curr = &priv->clbr_points[priv->clbr_curr]; 1163 clbr_curr_next = priv->clbr_curr + 1; 1164 if (clbr_curr_next >= nitems(priv->clbr_points)) 1165 clbr_curr_next = 0; 1166 next = &priv->clbr_points[clbr_curr_next]; 1167 1168 next->base_prev = curr->base_curr; 1169 next->clbr_hw_prev = curr->clbr_hw_curr; 1170 1171 next->clbr_hw_curr = mlx5e_hw_clock(priv); 1172 if (((next->clbr_hw_curr - curr->clbr_hw_curr) >> MLX5E_TSTMP_PREC) == 1173 0) { 1174 if (priv->clbr_done != 0) { 1175 if_printf(priv->ifp, "HW failed tstmp frozen %#jx %#jx," 1176 "disabling\n", 1177 next->clbr_hw_curr, curr->clbr_hw_prev); 1178 priv->clbr_done = 0; 1179 } 1180 atomic_store_rel_int(&curr->clbr_gen, 0); 1181 return; 1182 } 1183 1184 nanouptime(&ts); 1185 next->base_curr = mlx5e_timespec2usec(&ts); 1186 1187 curr->clbr_gen = 0; 1188 atomic_thread_fence_rel(); 1189 priv->clbr_curr = clbr_curr_next; 1190 atomic_store_rel_int(&next->clbr_gen, ++(priv->clbr_gen)); 1191 1192 if (priv->clbr_done < mlx5e_calibration_duration) 1193 priv->clbr_done++; 1194 mlx5e_reset_calibration_callout(priv); 1195 } 1196 1197 static const char *mlx5e_rq_stats_desc[] = { 1198 MLX5E_RQ_STATS(MLX5E_STATS_DESC) 1199 }; 1200 1201 static int 1202 mlx5e_create_rq(struct mlx5e_channel *c, 1203 struct mlx5e_rq_param *param, 1204 struct mlx5e_rq *rq) 1205 { 1206 struct mlx5e_priv *priv = c->priv; 1207 struct mlx5_core_dev *mdev = priv->mdev; 1208 char buffer[16]; 1209 void *rqc = param->rqc; 1210 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); 1211 int wq_sz; 1212 int err; 1213 int i; 1214 u32 nsegs, wqe_sz; 1215 1216 err = mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs); 1217 if (err != 0) 1218 goto done; 1219 1220 /* Create DMA descriptor TAG */ 1221 if ((err = -bus_dma_tag_create( 1222 bus_get_dma_tag(mdev->pdev->dev.bsddev), 1223 1, /* any alignment */ 1224 0, /* no boundary */ 1225 BUS_SPACE_MAXADDR, /* lowaddr */ 1226 BUS_SPACE_MAXADDR, /* highaddr */ 1227 NULL, NULL, /* filter, filterarg */ 1228 nsegs * MLX5E_MAX_RX_BYTES, /* maxsize */ 1229 nsegs, /* nsegments */ 1230 nsegs * MLX5E_MAX_RX_BYTES, /* maxsegsize */ 1231 0, /* flags */ 1232 NULL, NULL, /* lockfunc, lockfuncarg */ 1233 &rq->dma_tag))) 1234 goto done; 1235 1236 err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq, 1237 &rq->wq_ctrl); 1238 if (err) 1239 goto err_free_dma_tag; 1240 1241 rq->wq.db = &rq->wq.db[MLX5_RCV_DBR]; 1242 1243 err = mlx5e_get_wqe_sz(priv, &rq->wqe_sz, &rq->nsegs); 1244 if (err != 0) 1245 goto err_rq_wq_destroy; 1246 1247 wq_sz = mlx5_wq_ll_get_size(&rq->wq); 1248 1249 err = -tcp_lro_init_args(&rq->lro, c->tag.m_snd_tag.ifp, TCP_LRO_ENTRIES, wq_sz); 1250 if (err) 1251 goto err_rq_wq_destroy; 1252 1253 rq->mbuf = malloc(wq_sz * sizeof(rq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO); 1254 for (i = 0; i != wq_sz; i++) { 1255 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); 1256 int j; 1257 1258 err = -bus_dmamap_create(rq->dma_tag, 0, &rq->mbuf[i].dma_map); 1259 if (err != 0) { 1260 while (i--) 1261 bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map); 1262 goto err_rq_mbuf_free; 1263 } 1264 1265 /* set value for constant fields */ 1266 for (j = 0; j < rq->nsegs; j++) 1267 wqe->data[j].lkey = c->mkey_be; 1268 } 1269 1270 INIT_WORK(&rq->dim.work, mlx5e_dim_work); 1271 if (priv->params.rx_cq_moderation_mode < 2) { 1272 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED; 1273 } else { 1274 void *cqc = container_of(param, 1275 struct mlx5e_channel_param, rq)->rx_cq.cqc; 1276 1277 switch (MLX5_GET(cqc, cqc, cq_period_mode)) { 1278 case MLX5_CQ_PERIOD_MODE_START_FROM_EQE: 1279 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; 1280 break; 1281 case MLX5_CQ_PERIOD_MODE_START_FROM_CQE: 1282 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE; 1283 break; 1284 default: 1285 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED; 1286 break; 1287 } 1288 } 1289 1290 rq->ifp = c->tag.m_snd_tag.ifp; 1291 rq->channel = c; 1292 rq->ix = c->ix; 1293 1294 snprintf(buffer, sizeof(buffer), "rxstat%d", c->ix); 1295 mlx5e_create_stats(&rq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 1296 buffer, mlx5e_rq_stats_desc, MLX5E_RQ_STATS_NUM, 1297 rq->stats.arg); 1298 return (0); 1299 1300 err_rq_mbuf_free: 1301 free(rq->mbuf, M_MLX5EN); 1302 tcp_lro_free(&rq->lro); 1303 err_rq_wq_destroy: 1304 mlx5_wq_destroy(&rq->wq_ctrl); 1305 err_free_dma_tag: 1306 bus_dma_tag_destroy(rq->dma_tag); 1307 done: 1308 return (err); 1309 } 1310 1311 static void 1312 mlx5e_destroy_rq(struct mlx5e_rq *rq) 1313 { 1314 int wq_sz; 1315 int i; 1316 1317 /* destroy all sysctl nodes */ 1318 sysctl_ctx_free(&rq->stats.ctx); 1319 1320 /* free leftover LRO packets, if any */ 1321 tcp_lro_free(&rq->lro); 1322 1323 wq_sz = mlx5_wq_ll_get_size(&rq->wq); 1324 for (i = 0; i != wq_sz; i++) { 1325 if (rq->mbuf[i].mbuf != NULL) { 1326 bus_dmamap_unload(rq->dma_tag, rq->mbuf[i].dma_map); 1327 m_freem(rq->mbuf[i].mbuf); 1328 } 1329 bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map); 1330 } 1331 free(rq->mbuf, M_MLX5EN); 1332 mlx5_wq_destroy(&rq->wq_ctrl); 1333 } 1334 1335 static int 1336 mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) 1337 { 1338 struct mlx5e_channel *c = rq->channel; 1339 struct mlx5e_priv *priv = c->priv; 1340 struct mlx5_core_dev *mdev = priv->mdev; 1341 1342 void *in; 1343 void *rqc; 1344 void *wq; 1345 int inlen; 1346 int err; 1347 1348 inlen = MLX5_ST_SZ_BYTES(create_rq_in) + 1349 sizeof(u64) * rq->wq_ctrl.buf.npages; 1350 in = mlx5_vzalloc(inlen); 1351 if (in == NULL) 1352 return (-ENOMEM); 1353 1354 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); 1355 wq = MLX5_ADDR_OF(rqc, rqc, wq); 1356 1357 memcpy(rqc, param->rqc, sizeof(param->rqc)); 1358 1359 MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn); 1360 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); 1361 MLX5_SET(rqc, rqc, flush_in_error_en, 1); 1362 if (priv->counter_set_id >= 0) 1363 MLX5_SET(rqc, rqc, counter_set_id, priv->counter_set_id); 1364 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - 1365 PAGE_SHIFT); 1366 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma); 1367 1368 mlx5_fill_page_array(&rq->wq_ctrl.buf, 1369 (__be64 *) MLX5_ADDR_OF(wq, wq, pas)); 1370 1371 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn); 1372 1373 kvfree(in); 1374 1375 return (err); 1376 } 1377 1378 static int 1379 mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state) 1380 { 1381 struct mlx5e_channel *c = rq->channel; 1382 struct mlx5e_priv *priv = c->priv; 1383 struct mlx5_core_dev *mdev = priv->mdev; 1384 1385 void *in; 1386 void *rqc; 1387 int inlen; 1388 int err; 1389 1390 inlen = MLX5_ST_SZ_BYTES(modify_rq_in); 1391 in = mlx5_vzalloc(inlen); 1392 if (in == NULL) 1393 return (-ENOMEM); 1394 1395 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); 1396 1397 MLX5_SET(modify_rq_in, in, rqn, rq->rqn); 1398 MLX5_SET(modify_rq_in, in, rq_state, curr_state); 1399 MLX5_SET(rqc, rqc, state, next_state); 1400 1401 err = mlx5_core_modify_rq(mdev, in, inlen); 1402 1403 kvfree(in); 1404 1405 return (err); 1406 } 1407 1408 static void 1409 mlx5e_disable_rq(struct mlx5e_rq *rq) 1410 { 1411 struct mlx5e_channel *c = rq->channel; 1412 struct mlx5e_priv *priv = c->priv; 1413 struct mlx5_core_dev *mdev = priv->mdev; 1414 1415 mlx5_core_destroy_rq(mdev, rq->rqn); 1416 } 1417 1418 static int 1419 mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq) 1420 { 1421 struct mlx5e_channel *c = rq->channel; 1422 struct mlx5e_priv *priv = c->priv; 1423 struct mlx5_wq_ll *wq = &rq->wq; 1424 int i; 1425 1426 for (i = 0; i < 1000; i++) { 1427 if (wq->cur_sz >= priv->params.min_rx_wqes) 1428 return (0); 1429 1430 msleep(4); 1431 } 1432 return (-ETIMEDOUT); 1433 } 1434 1435 static int 1436 mlx5e_open_rq(struct mlx5e_channel *c, 1437 struct mlx5e_rq_param *param, 1438 struct mlx5e_rq *rq) 1439 { 1440 int err; 1441 1442 err = mlx5e_create_rq(c, param, rq); 1443 if (err) 1444 return (err); 1445 1446 err = mlx5e_enable_rq(rq, param); 1447 if (err) 1448 goto err_destroy_rq; 1449 1450 err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); 1451 if (err) 1452 goto err_disable_rq; 1453 1454 c->rq.enabled = 1; 1455 1456 return (0); 1457 1458 err_disable_rq: 1459 mlx5e_disable_rq(rq); 1460 err_destroy_rq: 1461 mlx5e_destroy_rq(rq); 1462 1463 return (err); 1464 } 1465 1466 static void 1467 mlx5e_close_rq(struct mlx5e_rq *rq) 1468 { 1469 mtx_lock(&rq->mtx); 1470 rq->enabled = 0; 1471 callout_stop(&rq->watchdog); 1472 mtx_unlock(&rq->mtx); 1473 1474 callout_drain(&rq->watchdog); 1475 1476 mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); 1477 } 1478 1479 static void 1480 mlx5e_close_rq_wait(struct mlx5e_rq *rq) 1481 { 1482 1483 mlx5e_disable_rq(rq); 1484 mlx5e_close_cq(&rq->cq); 1485 cancel_work_sync(&rq->dim.work); 1486 mlx5e_destroy_rq(rq); 1487 } 1488 1489 void 1490 mlx5e_free_sq_db(struct mlx5e_sq *sq) 1491 { 1492 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); 1493 int x; 1494 1495 for (x = 0; x != wq_sz; x++) { 1496 if (sq->mbuf[x].mbuf != NULL) { 1497 bus_dmamap_unload(sq->dma_tag, sq->mbuf[x].dma_map); 1498 m_freem(sq->mbuf[x].mbuf); 1499 } 1500 bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map); 1501 } 1502 free(sq->mbuf, M_MLX5EN); 1503 } 1504 1505 int 1506 mlx5e_alloc_sq_db(struct mlx5e_sq *sq) 1507 { 1508 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); 1509 int err; 1510 int x; 1511 1512 sq->mbuf = malloc(wq_sz * sizeof(sq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO); 1513 1514 /* Create DMA descriptor MAPs */ 1515 for (x = 0; x != wq_sz; x++) { 1516 err = -bus_dmamap_create(sq->dma_tag, 0, &sq->mbuf[x].dma_map); 1517 if (err != 0) { 1518 while (x--) 1519 bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map); 1520 free(sq->mbuf, M_MLX5EN); 1521 return (err); 1522 } 1523 } 1524 return (0); 1525 } 1526 1527 static const char *mlx5e_sq_stats_desc[] = { 1528 MLX5E_SQ_STATS(MLX5E_STATS_DESC) 1529 }; 1530 1531 void 1532 mlx5e_update_sq_inline(struct mlx5e_sq *sq) 1533 { 1534 sq->max_inline = sq->priv->params.tx_max_inline; 1535 sq->min_inline_mode = sq->priv->params.tx_min_inline_mode; 1536 1537 /* 1538 * Check if trust state is DSCP or if inline mode is NONE which 1539 * indicates CX-5 or newer hardware. 1540 */ 1541 if (sq->priv->params_ethtool.trust_state != MLX5_QPTS_TRUST_PCP || 1542 sq->min_inline_mode == MLX5_INLINE_MODE_NONE) { 1543 if (MLX5_CAP_ETH(sq->priv->mdev, wqe_vlan_insert)) 1544 sq->min_insert_caps = MLX5E_INSERT_VLAN | MLX5E_INSERT_NON_VLAN; 1545 else 1546 sq->min_insert_caps = MLX5E_INSERT_NON_VLAN; 1547 } else { 1548 sq->min_insert_caps = 0; 1549 } 1550 } 1551 1552 static void 1553 mlx5e_refresh_sq_inline_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c) 1554 { 1555 int i; 1556 1557 for (i = 0; i != c->num_tc; i++) { 1558 mtx_lock(&c->sq[i].lock); 1559 mlx5e_update_sq_inline(&c->sq[i]); 1560 mtx_unlock(&c->sq[i].lock); 1561 } 1562 } 1563 1564 void 1565 mlx5e_refresh_sq_inline(struct mlx5e_priv *priv) 1566 { 1567 int i; 1568 1569 /* check if channels are closed */ 1570 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 1571 return; 1572 1573 for (i = 0; i < priv->params.num_channels; i++) 1574 mlx5e_refresh_sq_inline_sub(priv, &priv->channel[i]); 1575 } 1576 1577 static int 1578 mlx5e_create_sq(struct mlx5e_channel *c, 1579 int tc, 1580 struct mlx5e_sq_param *param, 1581 struct mlx5e_sq *sq) 1582 { 1583 struct mlx5e_priv *priv = c->priv; 1584 struct mlx5_core_dev *mdev = priv->mdev; 1585 char buffer[16]; 1586 void *sqc = param->sqc; 1587 void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq); 1588 int err; 1589 1590 /* Create DMA descriptor TAG */ 1591 if ((err = -bus_dma_tag_create( 1592 bus_get_dma_tag(mdev->pdev->dev.bsddev), 1593 1, /* any alignment */ 1594 0, /* no boundary */ 1595 BUS_SPACE_MAXADDR, /* lowaddr */ 1596 BUS_SPACE_MAXADDR, /* highaddr */ 1597 NULL, NULL, /* filter, filterarg */ 1598 MLX5E_MAX_TX_PAYLOAD_SIZE, /* maxsize */ 1599 MLX5E_MAX_TX_MBUF_FRAGS, /* nsegments */ 1600 MLX5E_MAX_TX_MBUF_SIZE, /* maxsegsize */ 1601 0, /* flags */ 1602 NULL, NULL, /* lockfunc, lockfuncarg */ 1603 &sq->dma_tag))) 1604 goto done; 1605 1606 err = mlx5_alloc_map_uar(mdev, &sq->uar); 1607 if (err) 1608 goto err_free_dma_tag; 1609 1610 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, 1611 &sq->wq_ctrl); 1612 if (err) 1613 goto err_unmap_free_uar; 1614 1615 sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; 1616 sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; 1617 1618 err = mlx5e_alloc_sq_db(sq); 1619 if (err) 1620 goto err_sq_wq_destroy; 1621 1622 sq->mkey_be = c->mkey_be; 1623 sq->ifp = priv->ifp; 1624 sq->priv = priv; 1625 sq->tc = tc; 1626 1627 mlx5e_update_sq_inline(sq); 1628 1629 snprintf(buffer, sizeof(buffer), "txstat%dtc%d", c->ix, tc); 1630 mlx5e_create_stats(&sq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 1631 buffer, mlx5e_sq_stats_desc, MLX5E_SQ_STATS_NUM, 1632 sq->stats.arg); 1633 1634 return (0); 1635 1636 err_sq_wq_destroy: 1637 mlx5_wq_destroy(&sq->wq_ctrl); 1638 1639 err_unmap_free_uar: 1640 mlx5_unmap_free_uar(mdev, &sq->uar); 1641 1642 err_free_dma_tag: 1643 bus_dma_tag_destroy(sq->dma_tag); 1644 done: 1645 return (err); 1646 } 1647 1648 static void 1649 mlx5e_destroy_sq(struct mlx5e_sq *sq) 1650 { 1651 /* destroy all sysctl nodes */ 1652 sysctl_ctx_free(&sq->stats.ctx); 1653 1654 mlx5e_free_sq_db(sq); 1655 mlx5_wq_destroy(&sq->wq_ctrl); 1656 mlx5_unmap_free_uar(sq->priv->mdev, &sq->uar); 1657 } 1658 1659 int 1660 mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param, 1661 int tis_num) 1662 { 1663 void *in; 1664 void *sqc; 1665 void *wq; 1666 int inlen; 1667 int err; 1668 1669 inlen = MLX5_ST_SZ_BYTES(create_sq_in) + 1670 sizeof(u64) * sq->wq_ctrl.buf.npages; 1671 in = mlx5_vzalloc(inlen); 1672 if (in == NULL) 1673 return (-ENOMEM); 1674 1675 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); 1676 wq = MLX5_ADDR_OF(sqc, sqc, wq); 1677 1678 memcpy(sqc, param->sqc, sizeof(param->sqc)); 1679 1680 MLX5_SET(sqc, sqc, tis_num_0, tis_num); 1681 MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn); 1682 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); 1683 MLX5_SET(sqc, sqc, tis_lst_sz, 1); 1684 MLX5_SET(sqc, sqc, flush_in_error_en, 1); 1685 1686 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); 1687 MLX5_SET(wq, wq, uar_page, sq->uar.index); 1688 MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - 1689 PAGE_SHIFT); 1690 MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma); 1691 1692 mlx5_fill_page_array(&sq->wq_ctrl.buf, 1693 (__be64 *) MLX5_ADDR_OF(wq, wq, pas)); 1694 1695 err = mlx5_core_create_sq(sq->priv->mdev, in, inlen, &sq->sqn); 1696 1697 kvfree(in); 1698 1699 return (err); 1700 } 1701 1702 int 1703 mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state) 1704 { 1705 void *in; 1706 void *sqc; 1707 int inlen; 1708 int err; 1709 1710 inlen = MLX5_ST_SZ_BYTES(modify_sq_in); 1711 in = mlx5_vzalloc(inlen); 1712 if (in == NULL) 1713 return (-ENOMEM); 1714 1715 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); 1716 1717 MLX5_SET(modify_sq_in, in, sqn, sq->sqn); 1718 MLX5_SET(modify_sq_in, in, sq_state, curr_state); 1719 MLX5_SET(sqc, sqc, state, next_state); 1720 1721 err = mlx5_core_modify_sq(sq->priv->mdev, in, inlen); 1722 1723 kvfree(in); 1724 1725 return (err); 1726 } 1727 1728 void 1729 mlx5e_disable_sq(struct mlx5e_sq *sq) 1730 { 1731 1732 mlx5_core_destroy_sq(sq->priv->mdev, sq->sqn); 1733 } 1734 1735 static int 1736 mlx5e_open_sq(struct mlx5e_channel *c, 1737 int tc, 1738 struct mlx5e_sq_param *param, 1739 struct mlx5e_sq *sq) 1740 { 1741 int err; 1742 1743 err = mlx5e_create_sq(c, tc, param, sq); 1744 if (err) 1745 return (err); 1746 1747 err = mlx5e_enable_sq(sq, param, c->priv->tisn[tc]); 1748 if (err) 1749 goto err_destroy_sq; 1750 1751 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY); 1752 if (err) 1753 goto err_disable_sq; 1754 1755 WRITE_ONCE(sq->running, 1); 1756 1757 return (0); 1758 1759 err_disable_sq: 1760 mlx5e_disable_sq(sq); 1761 err_destroy_sq: 1762 mlx5e_destroy_sq(sq); 1763 1764 return (err); 1765 } 1766 1767 static void 1768 mlx5e_sq_send_nops_locked(struct mlx5e_sq *sq, int can_sleep) 1769 { 1770 /* fill up remainder with NOPs */ 1771 while (sq->cev_counter != 0) { 1772 while (!mlx5e_sq_has_room_for(sq, 1)) { 1773 if (can_sleep != 0) { 1774 mtx_unlock(&sq->lock); 1775 msleep(4); 1776 mtx_lock(&sq->lock); 1777 } else { 1778 goto done; 1779 } 1780 } 1781 /* send a single NOP */ 1782 mlx5e_send_nop(sq, 1); 1783 atomic_thread_fence_rel(); 1784 } 1785 done: 1786 /* Check if we need to write the doorbell */ 1787 if (likely(sq->doorbell.d64 != 0)) { 1788 mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0); 1789 sq->doorbell.d64 = 0; 1790 } 1791 } 1792 1793 void 1794 mlx5e_sq_cev_timeout(void *arg) 1795 { 1796 struct mlx5e_sq *sq = arg; 1797 1798 mtx_assert(&sq->lock, MA_OWNED); 1799 1800 /* check next state */ 1801 switch (sq->cev_next_state) { 1802 case MLX5E_CEV_STATE_SEND_NOPS: 1803 /* fill TX ring with NOPs, if any */ 1804 mlx5e_sq_send_nops_locked(sq, 0); 1805 1806 /* check if completed */ 1807 if (sq->cev_counter == 0) { 1808 sq->cev_next_state = MLX5E_CEV_STATE_INITIAL; 1809 return; 1810 } 1811 break; 1812 default: 1813 /* send NOPs on next timeout */ 1814 sq->cev_next_state = MLX5E_CEV_STATE_SEND_NOPS; 1815 break; 1816 } 1817 1818 /* restart timer */ 1819 callout_reset_curcpu(&sq->cev_callout, hz, mlx5e_sq_cev_timeout, sq); 1820 } 1821 1822 void 1823 mlx5e_drain_sq(struct mlx5e_sq *sq) 1824 { 1825 int error; 1826 struct mlx5_core_dev *mdev= sq->priv->mdev; 1827 1828 /* 1829 * Check if already stopped. 1830 * 1831 * NOTE: Serialization of this function is managed by the 1832 * caller ensuring the priv's state lock is locked or in case 1833 * of rate limit support, a single thread manages drain and 1834 * resume of SQs. The "running" variable can therefore safely 1835 * be read without any locks. 1836 */ 1837 if (READ_ONCE(sq->running) == 0) 1838 return; 1839 1840 /* don't put more packets into the SQ */ 1841 WRITE_ONCE(sq->running, 0); 1842 1843 /* serialize access to DMA rings */ 1844 mtx_lock(&sq->lock); 1845 1846 /* teardown event factor timer, if any */ 1847 sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS; 1848 callout_stop(&sq->cev_callout); 1849 1850 /* send dummy NOPs in order to flush the transmit ring */ 1851 mlx5e_sq_send_nops_locked(sq, 1); 1852 mtx_unlock(&sq->lock); 1853 1854 /* make sure it is safe to free the callout */ 1855 callout_drain(&sq->cev_callout); 1856 1857 /* wait till SQ is empty or link is down */ 1858 mtx_lock(&sq->lock); 1859 while (sq->cc != sq->pc && 1860 (sq->priv->media_status_last & IFM_ACTIVE) != 0 && 1861 mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) { 1862 mtx_unlock(&sq->lock); 1863 msleep(1); 1864 sq->cq.mcq.comp(&sq->cq.mcq); 1865 mtx_lock(&sq->lock); 1866 } 1867 mtx_unlock(&sq->lock); 1868 1869 /* error out remaining requests */ 1870 error = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR); 1871 if (error != 0) { 1872 if_printf(sq->ifp, 1873 "mlx5e_modify_sq() from RDY to ERR failed: %d\n", error); 1874 } 1875 1876 /* wait till SQ is empty */ 1877 mtx_lock(&sq->lock); 1878 while (sq->cc != sq->pc && 1879 mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) { 1880 mtx_unlock(&sq->lock); 1881 msleep(1); 1882 sq->cq.mcq.comp(&sq->cq.mcq); 1883 mtx_lock(&sq->lock); 1884 } 1885 mtx_unlock(&sq->lock); 1886 } 1887 1888 static void 1889 mlx5e_close_sq_wait(struct mlx5e_sq *sq) 1890 { 1891 1892 mlx5e_drain_sq(sq); 1893 mlx5e_disable_sq(sq); 1894 mlx5e_destroy_sq(sq); 1895 } 1896 1897 static int 1898 mlx5e_create_cq(struct mlx5e_priv *priv, 1899 struct mlx5e_cq_param *param, 1900 struct mlx5e_cq *cq, 1901 mlx5e_cq_comp_t *comp, 1902 int eq_ix) 1903 { 1904 struct mlx5_core_dev *mdev = priv->mdev; 1905 struct mlx5_core_cq *mcq = &cq->mcq; 1906 int eqn_not_used; 1907 int irqn; 1908 int err; 1909 u32 i; 1910 1911 param->wq.buf_numa_node = 0; 1912 param->wq.db_numa_node = 0; 1913 1914 err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, 1915 &cq->wq_ctrl); 1916 if (err) 1917 return (err); 1918 1919 mlx5_vector2eqn(mdev, eq_ix, &eqn_not_used, &irqn); 1920 1921 mcq->cqe_sz = 64; 1922 mcq->set_ci_db = cq->wq_ctrl.db.db; 1923 mcq->arm_db = cq->wq_ctrl.db.db + 1; 1924 *mcq->set_ci_db = 0; 1925 *mcq->arm_db = 0; 1926 mcq->vector = eq_ix; 1927 mcq->comp = comp; 1928 mcq->event = mlx5e_cq_error_event; 1929 mcq->irqn = irqn; 1930 mcq->uar = &priv->cq_uar; 1931 1932 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { 1933 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); 1934 1935 cqe->op_own = 0xf1; 1936 } 1937 1938 cq->priv = priv; 1939 1940 return (0); 1941 } 1942 1943 static void 1944 mlx5e_destroy_cq(struct mlx5e_cq *cq) 1945 { 1946 mlx5_wq_destroy(&cq->wq_ctrl); 1947 } 1948 1949 static int 1950 mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param, int eq_ix) 1951 { 1952 struct mlx5_core_cq *mcq = &cq->mcq; 1953 void *in; 1954 void *cqc; 1955 int inlen; 1956 int irqn_not_used; 1957 int eqn; 1958 int err; 1959 1960 inlen = MLX5_ST_SZ_BYTES(create_cq_in) + 1961 sizeof(u64) * cq->wq_ctrl.buf.npages; 1962 in = mlx5_vzalloc(inlen); 1963 if (in == NULL) 1964 return (-ENOMEM); 1965 1966 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); 1967 1968 memcpy(cqc, param->cqc, sizeof(param->cqc)); 1969 1970 mlx5_fill_page_array(&cq->wq_ctrl.buf, 1971 (__be64 *) MLX5_ADDR_OF(create_cq_in, in, pas)); 1972 1973 mlx5_vector2eqn(cq->priv->mdev, eq_ix, &eqn, &irqn_not_used); 1974 1975 MLX5_SET(cqc, cqc, c_eqn, eqn); 1976 MLX5_SET(cqc, cqc, uar_page, mcq->uar->index); 1977 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - 1978 PAGE_SHIFT); 1979 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); 1980 1981 err = mlx5_core_create_cq(cq->priv->mdev, mcq, in, inlen); 1982 1983 kvfree(in); 1984 1985 if (err) 1986 return (err); 1987 1988 mlx5e_cq_arm(cq, MLX5_GET_DOORBELL_LOCK(&cq->priv->doorbell_lock)); 1989 1990 return (0); 1991 } 1992 1993 static void 1994 mlx5e_disable_cq(struct mlx5e_cq *cq) 1995 { 1996 1997 mlx5_core_destroy_cq(cq->priv->mdev, &cq->mcq); 1998 } 1999 2000 int 2001 mlx5e_open_cq(struct mlx5e_priv *priv, 2002 struct mlx5e_cq_param *param, 2003 struct mlx5e_cq *cq, 2004 mlx5e_cq_comp_t *comp, 2005 int eq_ix) 2006 { 2007 int err; 2008 2009 err = mlx5e_create_cq(priv, param, cq, comp, eq_ix); 2010 if (err) 2011 return (err); 2012 2013 err = mlx5e_enable_cq(cq, param, eq_ix); 2014 if (err) 2015 goto err_destroy_cq; 2016 2017 return (0); 2018 2019 err_destroy_cq: 2020 mlx5e_destroy_cq(cq); 2021 2022 return (err); 2023 } 2024 2025 void 2026 mlx5e_close_cq(struct mlx5e_cq *cq) 2027 { 2028 mlx5e_disable_cq(cq); 2029 mlx5e_destroy_cq(cq); 2030 } 2031 2032 static int 2033 mlx5e_open_tx_cqs(struct mlx5e_channel *c, 2034 struct mlx5e_channel_param *cparam) 2035 { 2036 int err; 2037 int tc; 2038 2039 for (tc = 0; tc < c->num_tc; tc++) { 2040 /* open completion queue */ 2041 err = mlx5e_open_cq(c->priv, &cparam->tx_cq, &c->sq[tc].cq, 2042 &mlx5e_tx_cq_comp, c->ix); 2043 if (err) 2044 goto err_close_tx_cqs; 2045 } 2046 return (0); 2047 2048 err_close_tx_cqs: 2049 for (tc--; tc >= 0; tc--) 2050 mlx5e_close_cq(&c->sq[tc].cq); 2051 2052 return (err); 2053 } 2054 2055 static void 2056 mlx5e_close_tx_cqs(struct mlx5e_channel *c) 2057 { 2058 int tc; 2059 2060 for (tc = 0; tc < c->num_tc; tc++) 2061 mlx5e_close_cq(&c->sq[tc].cq); 2062 } 2063 2064 static int 2065 mlx5e_open_sqs(struct mlx5e_channel *c, 2066 struct mlx5e_channel_param *cparam) 2067 { 2068 int err; 2069 int tc; 2070 2071 for (tc = 0; tc < c->num_tc; tc++) { 2072 err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]); 2073 if (err) 2074 goto err_close_sqs; 2075 } 2076 2077 return (0); 2078 2079 err_close_sqs: 2080 for (tc--; tc >= 0; tc--) 2081 mlx5e_close_sq_wait(&c->sq[tc]); 2082 2083 return (err); 2084 } 2085 2086 static void 2087 mlx5e_close_sqs_wait(struct mlx5e_channel *c) 2088 { 2089 int tc; 2090 2091 for (tc = 0; tc < c->num_tc; tc++) 2092 mlx5e_close_sq_wait(&c->sq[tc]); 2093 } 2094 2095 static void 2096 mlx5e_chan_mtx_init(struct mlx5e_channel *c) 2097 { 2098 int tc; 2099 2100 mtx_init(&c->rq.mtx, "mlx5rx", MTX_NETWORK_LOCK, MTX_DEF); 2101 2102 callout_init_mtx(&c->rq.watchdog, &c->rq.mtx, 0); 2103 2104 for (tc = 0; tc < c->num_tc; tc++) { 2105 struct mlx5e_sq *sq = c->sq + tc; 2106 2107 mtx_init(&sq->lock, "mlx5tx", 2108 MTX_NETWORK_LOCK " TX", MTX_DEF); 2109 mtx_init(&sq->comp_lock, "mlx5comp", 2110 MTX_NETWORK_LOCK " TX", MTX_DEF); 2111 2112 callout_init_mtx(&sq->cev_callout, &sq->lock, 0); 2113 2114 sq->cev_factor = c->priv->params_ethtool.tx_completion_fact; 2115 2116 /* ensure the TX completion event factor is not zero */ 2117 if (sq->cev_factor == 0) 2118 sq->cev_factor = 1; 2119 } 2120 } 2121 2122 static void 2123 mlx5e_chan_mtx_destroy(struct mlx5e_channel *c) 2124 { 2125 int tc; 2126 2127 mtx_destroy(&c->rq.mtx); 2128 2129 for (tc = 0; tc < c->num_tc; tc++) { 2130 mtx_destroy(&c->sq[tc].lock); 2131 mtx_destroy(&c->sq[tc].comp_lock); 2132 } 2133 } 2134 2135 static int 2136 mlx5e_open_channel(struct mlx5e_priv *priv, int ix, 2137 struct mlx5e_channel_param *cparam, 2138 struct mlx5e_channel *c) 2139 { 2140 int err; 2141 2142 memset(c, 0, sizeof(*c)); 2143 2144 c->priv = priv; 2145 c->ix = ix; 2146 /* setup send tag */ 2147 c->tag.m_snd_tag.ifp = priv->ifp; 2148 c->tag.type = IF_SND_TAG_TYPE_UNLIMITED; 2149 c->mkey_be = cpu_to_be32(priv->mr.key); 2150 c->num_tc = priv->num_tc; 2151 2152 /* init mutexes */ 2153 mlx5e_chan_mtx_init(c); 2154 2155 /* open transmit completion queue */ 2156 err = mlx5e_open_tx_cqs(c, cparam); 2157 if (err) 2158 goto err_free; 2159 2160 /* open receive completion queue */ 2161 err = mlx5e_open_cq(c->priv, &cparam->rx_cq, &c->rq.cq, 2162 &mlx5e_rx_cq_comp, c->ix); 2163 if (err) 2164 goto err_close_tx_cqs; 2165 2166 err = mlx5e_open_sqs(c, cparam); 2167 if (err) 2168 goto err_close_rx_cq; 2169 2170 err = mlx5e_open_rq(c, &cparam->rq, &c->rq); 2171 if (err) 2172 goto err_close_sqs; 2173 2174 /* poll receive queue initially */ 2175 c->rq.cq.mcq.comp(&c->rq.cq.mcq); 2176 2177 return (0); 2178 2179 err_close_sqs: 2180 mlx5e_close_sqs_wait(c); 2181 2182 err_close_rx_cq: 2183 mlx5e_close_cq(&c->rq.cq); 2184 2185 err_close_tx_cqs: 2186 mlx5e_close_tx_cqs(c); 2187 2188 err_free: 2189 /* destroy mutexes */ 2190 mlx5e_chan_mtx_destroy(c); 2191 return (err); 2192 } 2193 2194 static void 2195 mlx5e_close_channel(struct mlx5e_channel *c) 2196 { 2197 mlx5e_close_rq(&c->rq); 2198 } 2199 2200 static void 2201 mlx5e_close_channel_wait(struct mlx5e_channel *c) 2202 { 2203 mlx5e_close_rq_wait(&c->rq); 2204 mlx5e_close_sqs_wait(c); 2205 mlx5e_close_tx_cqs(c); 2206 /* destroy mutexes */ 2207 mlx5e_chan_mtx_destroy(c); 2208 } 2209 2210 static int 2211 mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs) 2212 { 2213 u32 r, n; 2214 2215 r = priv->params.hw_lro_en ? priv->params.lro_wqe_sz : 2216 MLX5E_SW2MB_MTU(priv->ifp->if_mtu); 2217 if (r > MJUM16BYTES) 2218 return (-ENOMEM); 2219 2220 if (r > MJUM9BYTES) 2221 r = MJUM16BYTES; 2222 else if (r > MJUMPAGESIZE) 2223 r = MJUM9BYTES; 2224 else if (r > MCLBYTES) 2225 r = MJUMPAGESIZE; 2226 else 2227 r = MCLBYTES; 2228 2229 /* 2230 * n + 1 must be a power of two, because stride size must be. 2231 * Stride size is 16 * (n + 1), as the first segment is 2232 * control. 2233 */ 2234 for (n = howmany(r, MLX5E_MAX_RX_BYTES); !powerof2(n + 1); n++) 2235 ; 2236 2237 if (n > MLX5E_MAX_BUSDMA_RX_SEGS) 2238 return (-ENOMEM); 2239 2240 *wqe_sz = r; 2241 *nsegs = n; 2242 return (0); 2243 } 2244 2245 static void 2246 mlx5e_build_rq_param(struct mlx5e_priv *priv, 2247 struct mlx5e_rq_param *param) 2248 { 2249 void *rqc = param->rqc; 2250 void *wq = MLX5_ADDR_OF(rqc, rqc, wq); 2251 u32 wqe_sz, nsegs; 2252 2253 mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs); 2254 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); 2255 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); 2256 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe) + 2257 nsegs * sizeof(struct mlx5_wqe_data_seg))); 2258 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size); 2259 MLX5_SET(wq, wq, pd, priv->pdn); 2260 2261 param->wq.buf_numa_node = 0; 2262 param->wq.db_numa_node = 0; 2263 param->wq.linear = 1; 2264 } 2265 2266 static void 2267 mlx5e_build_sq_param(struct mlx5e_priv *priv, 2268 struct mlx5e_sq_param *param) 2269 { 2270 void *sqc = param->sqc; 2271 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 2272 2273 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size); 2274 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); 2275 MLX5_SET(wq, wq, pd, priv->pdn); 2276 2277 param->wq.buf_numa_node = 0; 2278 param->wq.db_numa_node = 0; 2279 param->wq.linear = 1; 2280 } 2281 2282 static void 2283 mlx5e_build_common_cq_param(struct mlx5e_priv *priv, 2284 struct mlx5e_cq_param *param) 2285 { 2286 void *cqc = param->cqc; 2287 2288 MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index); 2289 } 2290 2291 static void 2292 mlx5e_get_default_profile(struct mlx5e_priv *priv, int mode, struct net_dim_cq_moder *ptr) 2293 { 2294 2295 *ptr = net_dim_get_profile(mode, MLX5E_DIM_DEFAULT_PROFILE); 2296 2297 /* apply LRO restrictions */ 2298 if (priv->params.hw_lro_en && 2299 ptr->pkts > MLX5E_DIM_MAX_RX_CQ_MODERATION_PKTS_WITH_LRO) { 2300 ptr->pkts = MLX5E_DIM_MAX_RX_CQ_MODERATION_PKTS_WITH_LRO; 2301 } 2302 } 2303 2304 static void 2305 mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, 2306 struct mlx5e_cq_param *param) 2307 { 2308 struct net_dim_cq_moder curr; 2309 void *cqc = param->cqc; 2310 2311 /* 2312 * We use MLX5_CQE_FORMAT_HASH because the RX hash mini CQE 2313 * format is more beneficial for FreeBSD use case. 2314 * 2315 * Adding support for MLX5_CQE_FORMAT_CSUM will require changes 2316 * in mlx5e_decompress_cqe. 2317 */ 2318 if (priv->params.cqe_zipping_en) { 2319 MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_HASH); 2320 MLX5_SET(cqc, cqc, cqe_compression_en, 1); 2321 } 2322 2323 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size); 2324 2325 switch (priv->params.rx_cq_moderation_mode) { 2326 case 0: 2327 MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec); 2328 MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts); 2329 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 2330 break; 2331 case 1: 2332 MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec); 2333 MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts); 2334 if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) 2335 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 2336 else 2337 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 2338 break; 2339 case 2: 2340 mlx5e_get_default_profile(priv, NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE, &curr); 2341 MLX5_SET(cqc, cqc, cq_period, curr.usec); 2342 MLX5_SET(cqc, cqc, cq_max_count, curr.pkts); 2343 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 2344 break; 2345 case 3: 2346 mlx5e_get_default_profile(priv, NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE, &curr); 2347 MLX5_SET(cqc, cqc, cq_period, curr.usec); 2348 MLX5_SET(cqc, cqc, cq_max_count, curr.pkts); 2349 if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) 2350 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 2351 else 2352 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 2353 break; 2354 default: 2355 break; 2356 } 2357 2358 mlx5e_dim_build_cq_param(priv, param); 2359 2360 mlx5e_build_common_cq_param(priv, param); 2361 } 2362 2363 static void 2364 mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, 2365 struct mlx5e_cq_param *param) 2366 { 2367 void *cqc = param->cqc; 2368 2369 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size); 2370 MLX5_SET(cqc, cqc, cq_period, priv->params.tx_cq_moderation_usec); 2371 MLX5_SET(cqc, cqc, cq_max_count, priv->params.tx_cq_moderation_pkts); 2372 2373 switch (priv->params.tx_cq_moderation_mode) { 2374 case 0: 2375 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 2376 break; 2377 default: 2378 if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) 2379 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 2380 else 2381 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 2382 break; 2383 } 2384 2385 mlx5e_build_common_cq_param(priv, param); 2386 } 2387 2388 static void 2389 mlx5e_build_channel_param(struct mlx5e_priv *priv, 2390 struct mlx5e_channel_param *cparam) 2391 { 2392 memset(cparam, 0, sizeof(*cparam)); 2393 2394 mlx5e_build_rq_param(priv, &cparam->rq); 2395 mlx5e_build_sq_param(priv, &cparam->sq); 2396 mlx5e_build_rx_cq_param(priv, &cparam->rx_cq); 2397 mlx5e_build_tx_cq_param(priv, &cparam->tx_cq); 2398 } 2399 2400 static int 2401 mlx5e_open_channels(struct mlx5e_priv *priv) 2402 { 2403 struct mlx5e_channel_param cparam; 2404 int err; 2405 int i; 2406 int j; 2407 2408 mlx5e_build_channel_param(priv, &cparam); 2409 for (i = 0; i < priv->params.num_channels; i++) { 2410 err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]); 2411 if (err) 2412 goto err_close_channels; 2413 } 2414 2415 for (j = 0; j < priv->params.num_channels; j++) { 2416 err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j].rq); 2417 if (err) 2418 goto err_close_channels; 2419 } 2420 return (0); 2421 2422 err_close_channels: 2423 while (i--) { 2424 mlx5e_close_channel(&priv->channel[i]); 2425 mlx5e_close_channel_wait(&priv->channel[i]); 2426 } 2427 return (err); 2428 } 2429 2430 static void 2431 mlx5e_close_channels(struct mlx5e_priv *priv) 2432 { 2433 int i; 2434 2435 for (i = 0; i < priv->params.num_channels; i++) 2436 mlx5e_close_channel(&priv->channel[i]); 2437 for (i = 0; i < priv->params.num_channels; i++) 2438 mlx5e_close_channel_wait(&priv->channel[i]); 2439 } 2440 2441 static int 2442 mlx5e_refresh_sq_params(struct mlx5e_priv *priv, struct mlx5e_sq *sq) 2443 { 2444 2445 if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) { 2446 uint8_t cq_mode; 2447 2448 switch (priv->params.tx_cq_moderation_mode) { 2449 case 0: 2450 case 2: 2451 cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; 2452 break; 2453 default: 2454 cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE; 2455 break; 2456 } 2457 2458 return (mlx5_core_modify_cq_moderation_mode(priv->mdev, &sq->cq.mcq, 2459 priv->params.tx_cq_moderation_usec, 2460 priv->params.tx_cq_moderation_pkts, 2461 cq_mode)); 2462 } 2463 2464 return (mlx5_core_modify_cq_moderation(priv->mdev, &sq->cq.mcq, 2465 priv->params.tx_cq_moderation_usec, 2466 priv->params.tx_cq_moderation_pkts)); 2467 } 2468 2469 static int 2470 mlx5e_refresh_rq_params(struct mlx5e_priv *priv, struct mlx5e_rq *rq) 2471 { 2472 2473 if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) { 2474 uint8_t cq_mode; 2475 uint8_t dim_mode; 2476 int retval; 2477 2478 switch (priv->params.rx_cq_moderation_mode) { 2479 case 0: 2480 case 2: 2481 cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; 2482 dim_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; 2483 break; 2484 default: 2485 cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE; 2486 dim_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE; 2487 break; 2488 } 2489 2490 /* tear down dynamic interrupt moderation */ 2491 mtx_lock(&rq->mtx); 2492 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED; 2493 mtx_unlock(&rq->mtx); 2494 2495 /* wait for dynamic interrupt moderation work task, if any */ 2496 cancel_work_sync(&rq->dim.work); 2497 2498 if (priv->params.rx_cq_moderation_mode >= 2) { 2499 struct net_dim_cq_moder curr; 2500 2501 mlx5e_get_default_profile(priv, dim_mode, &curr); 2502 2503 retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq, 2504 curr.usec, curr.pkts, cq_mode); 2505 2506 /* set dynamic interrupt moderation mode and zero defaults */ 2507 mtx_lock(&rq->mtx); 2508 rq->dim.mode = dim_mode; 2509 rq->dim.state = 0; 2510 rq->dim.profile_ix = MLX5E_DIM_DEFAULT_PROFILE; 2511 mtx_unlock(&rq->mtx); 2512 } else { 2513 retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq, 2514 priv->params.rx_cq_moderation_usec, 2515 priv->params.rx_cq_moderation_pkts, 2516 cq_mode); 2517 } 2518 return (retval); 2519 } 2520 2521 return (mlx5_core_modify_cq_moderation(priv->mdev, &rq->cq.mcq, 2522 priv->params.rx_cq_moderation_usec, 2523 priv->params.rx_cq_moderation_pkts)); 2524 } 2525 2526 static int 2527 mlx5e_refresh_channel_params_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c) 2528 { 2529 int err; 2530 int i; 2531 2532 err = mlx5e_refresh_rq_params(priv, &c->rq); 2533 if (err) 2534 goto done; 2535 2536 for (i = 0; i != c->num_tc; i++) { 2537 err = mlx5e_refresh_sq_params(priv, &c->sq[i]); 2538 if (err) 2539 goto done; 2540 } 2541 done: 2542 return (err); 2543 } 2544 2545 int 2546 mlx5e_refresh_channel_params(struct mlx5e_priv *priv) 2547 { 2548 int i; 2549 2550 /* check if channels are closed */ 2551 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 2552 return (EINVAL); 2553 2554 for (i = 0; i < priv->params.num_channels; i++) { 2555 int err; 2556 2557 err = mlx5e_refresh_channel_params_sub(priv, &priv->channel[i]); 2558 if (err) 2559 return (err); 2560 } 2561 return (0); 2562 } 2563 2564 static int 2565 mlx5e_open_tis(struct mlx5e_priv *priv, int tc) 2566 { 2567 struct mlx5_core_dev *mdev = priv->mdev; 2568 u32 in[MLX5_ST_SZ_DW(create_tis_in)]; 2569 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); 2570 2571 memset(in, 0, sizeof(in)); 2572 2573 MLX5_SET(tisc, tisc, prio, tc); 2574 MLX5_SET(tisc, tisc, transport_domain, priv->tdn); 2575 2576 return (mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc])); 2577 } 2578 2579 static void 2580 mlx5e_close_tis(struct mlx5e_priv *priv, int tc) 2581 { 2582 mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]); 2583 } 2584 2585 static int 2586 mlx5e_open_tises(struct mlx5e_priv *priv) 2587 { 2588 int num_tc = priv->num_tc; 2589 int err; 2590 int tc; 2591 2592 for (tc = 0; tc < num_tc; tc++) { 2593 err = mlx5e_open_tis(priv, tc); 2594 if (err) 2595 goto err_close_tises; 2596 } 2597 2598 return (0); 2599 2600 err_close_tises: 2601 for (tc--; tc >= 0; tc--) 2602 mlx5e_close_tis(priv, tc); 2603 2604 return (err); 2605 } 2606 2607 static void 2608 mlx5e_close_tises(struct mlx5e_priv *priv) 2609 { 2610 int num_tc = priv->num_tc; 2611 int tc; 2612 2613 for (tc = 0; tc < num_tc; tc++) 2614 mlx5e_close_tis(priv, tc); 2615 } 2616 2617 static int 2618 mlx5e_open_rqt(struct mlx5e_priv *priv) 2619 { 2620 struct mlx5_core_dev *mdev = priv->mdev; 2621 u32 *in; 2622 u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {0}; 2623 void *rqtc; 2624 int inlen; 2625 int err; 2626 int sz; 2627 int i; 2628 2629 sz = 1 << priv->params.rx_hash_log_tbl_sz; 2630 2631 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; 2632 in = mlx5_vzalloc(inlen); 2633 if (in == NULL) 2634 return (-ENOMEM); 2635 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); 2636 2637 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); 2638 MLX5_SET(rqtc, rqtc, rqt_max_size, sz); 2639 2640 for (i = 0; i < sz; i++) { 2641 int ix = i; 2642 #ifdef RSS 2643 ix = rss_get_indirection_to_bucket(ix); 2644 #endif 2645 /* ensure we don't overflow */ 2646 ix %= priv->params.num_channels; 2647 2648 /* apply receive side scaling stride, if any */ 2649 ix -= ix % (int)priv->params.channels_rsss; 2650 2651 MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix].rq.rqn); 2652 } 2653 2654 MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT); 2655 2656 err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); 2657 if (!err) 2658 priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn); 2659 2660 kvfree(in); 2661 2662 return (err); 2663 } 2664 2665 static void 2666 mlx5e_close_rqt(struct mlx5e_priv *priv) 2667 { 2668 u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {0}; 2669 u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {0}; 2670 2671 MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT); 2672 MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn); 2673 2674 mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out)); 2675 } 2676 2677 static void 2678 mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt) 2679 { 2680 void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); 2681 __be32 *hkey; 2682 2683 MLX5_SET(tirc, tirc, transport_domain, priv->tdn); 2684 2685 #define ROUGH_MAX_L2_L3_HDR_SZ 256 2686 2687 #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ 2688 MLX5_HASH_FIELD_SEL_DST_IP) 2689 2690 #define MLX5_HASH_ALL (MLX5_HASH_FIELD_SEL_SRC_IP |\ 2691 MLX5_HASH_FIELD_SEL_DST_IP |\ 2692 MLX5_HASH_FIELD_SEL_L4_SPORT |\ 2693 MLX5_HASH_FIELD_SEL_L4_DPORT) 2694 2695 #define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\ 2696 MLX5_HASH_FIELD_SEL_DST_IP |\ 2697 MLX5_HASH_FIELD_SEL_IPSEC_SPI) 2698 2699 if (priv->params.hw_lro_en) { 2700 MLX5_SET(tirc, tirc, lro_enable_mask, 2701 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | 2702 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO); 2703 MLX5_SET(tirc, tirc, lro_max_msg_sz, 2704 (priv->params.lro_wqe_sz - 2705 ROUGH_MAX_L2_L3_HDR_SZ) >> 8); 2706 /* TODO: add the option to choose timer value dynamically */ 2707 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, 2708 MLX5_CAP_ETH(priv->mdev, 2709 lro_timer_supported_periods[2])); 2710 } 2711 2712 /* setup parameters for hashing TIR type, if any */ 2713 switch (tt) { 2714 case MLX5E_TT_ANY: 2715 MLX5_SET(tirc, tirc, disp_type, 2716 MLX5_TIRC_DISP_TYPE_DIRECT); 2717 MLX5_SET(tirc, tirc, inline_rqn, 2718 priv->channel[0].rq.rqn); 2719 break; 2720 default: 2721 MLX5_SET(tirc, tirc, disp_type, 2722 MLX5_TIRC_DISP_TYPE_INDIRECT); 2723 MLX5_SET(tirc, tirc, indirect_table, 2724 priv->rqtn); 2725 MLX5_SET(tirc, tirc, rx_hash_fn, 2726 MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ); 2727 hkey = (__be32 *) MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key); 2728 #ifdef RSS 2729 /* 2730 * The FreeBSD RSS implementation does currently not 2731 * support symmetric Toeplitz hashes: 2732 */ 2733 MLX5_SET(tirc, tirc, rx_hash_symmetric, 0); 2734 rss_getkey((uint8_t *)hkey); 2735 #else 2736 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); 2737 hkey[0] = cpu_to_be32(0xD181C62C); 2738 hkey[1] = cpu_to_be32(0xF7F4DB5B); 2739 hkey[2] = cpu_to_be32(0x1983A2FC); 2740 hkey[3] = cpu_to_be32(0x943E1ADB); 2741 hkey[4] = cpu_to_be32(0xD9389E6B); 2742 hkey[5] = cpu_to_be32(0xD1039C2C); 2743 hkey[6] = cpu_to_be32(0xA74499AD); 2744 hkey[7] = cpu_to_be32(0x593D56D9); 2745 hkey[8] = cpu_to_be32(0xF3253C06); 2746 hkey[9] = cpu_to_be32(0x2ADC1FFC); 2747 #endif 2748 break; 2749 } 2750 2751 switch (tt) { 2752 case MLX5E_TT_IPV4_TCP: 2753 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2754 MLX5_L3_PROT_TYPE_IPV4); 2755 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 2756 MLX5_L4_PROT_TYPE_TCP); 2757 #ifdef RSS 2758 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV4)) { 2759 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2760 MLX5_HASH_IP); 2761 } else 2762 #endif 2763 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2764 MLX5_HASH_ALL); 2765 break; 2766 2767 case MLX5E_TT_IPV6_TCP: 2768 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2769 MLX5_L3_PROT_TYPE_IPV6); 2770 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 2771 MLX5_L4_PROT_TYPE_TCP); 2772 #ifdef RSS 2773 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV6)) { 2774 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2775 MLX5_HASH_IP); 2776 } else 2777 #endif 2778 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2779 MLX5_HASH_ALL); 2780 break; 2781 2782 case MLX5E_TT_IPV4_UDP: 2783 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2784 MLX5_L3_PROT_TYPE_IPV4); 2785 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 2786 MLX5_L4_PROT_TYPE_UDP); 2787 #ifdef RSS 2788 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV4)) { 2789 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2790 MLX5_HASH_IP); 2791 } else 2792 #endif 2793 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2794 MLX5_HASH_ALL); 2795 break; 2796 2797 case MLX5E_TT_IPV6_UDP: 2798 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2799 MLX5_L3_PROT_TYPE_IPV6); 2800 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 2801 MLX5_L4_PROT_TYPE_UDP); 2802 #ifdef RSS 2803 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV6)) { 2804 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2805 MLX5_HASH_IP); 2806 } else 2807 #endif 2808 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2809 MLX5_HASH_ALL); 2810 break; 2811 2812 case MLX5E_TT_IPV4_IPSEC_AH: 2813 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2814 MLX5_L3_PROT_TYPE_IPV4); 2815 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2816 MLX5_HASH_IP_IPSEC_SPI); 2817 break; 2818 2819 case MLX5E_TT_IPV6_IPSEC_AH: 2820 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2821 MLX5_L3_PROT_TYPE_IPV6); 2822 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2823 MLX5_HASH_IP_IPSEC_SPI); 2824 break; 2825 2826 case MLX5E_TT_IPV4_IPSEC_ESP: 2827 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2828 MLX5_L3_PROT_TYPE_IPV4); 2829 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2830 MLX5_HASH_IP_IPSEC_SPI); 2831 break; 2832 2833 case MLX5E_TT_IPV6_IPSEC_ESP: 2834 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2835 MLX5_L3_PROT_TYPE_IPV6); 2836 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2837 MLX5_HASH_IP_IPSEC_SPI); 2838 break; 2839 2840 case MLX5E_TT_IPV4: 2841 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2842 MLX5_L3_PROT_TYPE_IPV4); 2843 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2844 MLX5_HASH_IP); 2845 break; 2846 2847 case MLX5E_TT_IPV6: 2848 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2849 MLX5_L3_PROT_TYPE_IPV6); 2850 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2851 MLX5_HASH_IP); 2852 break; 2853 2854 default: 2855 break; 2856 } 2857 } 2858 2859 static int 2860 mlx5e_open_tir(struct mlx5e_priv *priv, int tt) 2861 { 2862 struct mlx5_core_dev *mdev = priv->mdev; 2863 u32 *in; 2864 void *tirc; 2865 int inlen; 2866 int err; 2867 2868 inlen = MLX5_ST_SZ_BYTES(create_tir_in); 2869 in = mlx5_vzalloc(inlen); 2870 if (in == NULL) 2871 return (-ENOMEM); 2872 tirc = MLX5_ADDR_OF(create_tir_in, in, tir_context); 2873 2874 mlx5e_build_tir_ctx(priv, tirc, tt); 2875 2876 err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]); 2877 2878 kvfree(in); 2879 2880 return (err); 2881 } 2882 2883 static void 2884 mlx5e_close_tir(struct mlx5e_priv *priv, int tt) 2885 { 2886 mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]); 2887 } 2888 2889 static int 2890 mlx5e_open_tirs(struct mlx5e_priv *priv) 2891 { 2892 int err; 2893 int i; 2894 2895 for (i = 0; i < MLX5E_NUM_TT; i++) { 2896 err = mlx5e_open_tir(priv, i); 2897 if (err) 2898 goto err_close_tirs; 2899 } 2900 2901 return (0); 2902 2903 err_close_tirs: 2904 for (i--; i >= 0; i--) 2905 mlx5e_close_tir(priv, i); 2906 2907 return (err); 2908 } 2909 2910 static void 2911 mlx5e_close_tirs(struct mlx5e_priv *priv) 2912 { 2913 int i; 2914 2915 for (i = 0; i < MLX5E_NUM_TT; i++) 2916 mlx5e_close_tir(priv, i); 2917 } 2918 2919 /* 2920 * SW MTU does not include headers, 2921 * HW MTU includes all headers and checksums. 2922 */ 2923 static int 2924 mlx5e_set_dev_port_mtu(struct ifnet *ifp, int sw_mtu) 2925 { 2926 struct mlx5e_priv *priv = ifp->if_softc; 2927 struct mlx5_core_dev *mdev = priv->mdev; 2928 int hw_mtu; 2929 int err; 2930 2931 hw_mtu = MLX5E_SW2HW_MTU(sw_mtu); 2932 2933 err = mlx5_set_port_mtu(mdev, hw_mtu); 2934 if (err) { 2935 if_printf(ifp, "%s: mlx5_set_port_mtu failed setting %d, err=%d\n", 2936 __func__, sw_mtu, err); 2937 return (err); 2938 } 2939 2940 /* Update vport context MTU */ 2941 err = mlx5_set_vport_mtu(mdev, hw_mtu); 2942 if (err) { 2943 if_printf(ifp, "%s: Failed updating vport context with MTU size, err=%d\n", 2944 __func__, err); 2945 } 2946 2947 ifp->if_mtu = sw_mtu; 2948 2949 err = mlx5_query_vport_mtu(mdev, &hw_mtu); 2950 if (err || !hw_mtu) { 2951 /* fallback to port oper mtu */ 2952 err = mlx5_query_port_oper_mtu(mdev, &hw_mtu); 2953 } 2954 if (err) { 2955 if_printf(ifp, "Query port MTU, after setting new " 2956 "MTU value, failed\n"); 2957 return (err); 2958 } else if (MLX5E_HW2SW_MTU(hw_mtu) < sw_mtu) { 2959 err = -E2BIG, 2960 if_printf(ifp, "Port MTU %d is smaller than " 2961 "ifp mtu %d\n", hw_mtu, sw_mtu); 2962 } else if (MLX5E_HW2SW_MTU(hw_mtu) > sw_mtu) { 2963 err = -EINVAL; 2964 if_printf(ifp, "Port MTU %d is bigger than " 2965 "ifp mtu %d\n", hw_mtu, sw_mtu); 2966 } 2967 priv->params_ethtool.hw_mtu = hw_mtu; 2968 2969 return (err); 2970 } 2971 2972 int 2973 mlx5e_open_locked(struct ifnet *ifp) 2974 { 2975 struct mlx5e_priv *priv = ifp->if_softc; 2976 int err; 2977 u16 set_id; 2978 2979 /* check if already opened */ 2980 if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0) 2981 return (0); 2982 2983 #ifdef RSS 2984 if (rss_getnumbuckets() > priv->params.num_channels) { 2985 if_printf(ifp, "NOTE: There are more RSS buckets(%u) than " 2986 "channels(%u) available\n", rss_getnumbuckets(), 2987 priv->params.num_channels); 2988 } 2989 #endif 2990 err = mlx5e_open_tises(priv); 2991 if (err) { 2992 if_printf(ifp, "%s: mlx5e_open_tises failed, %d\n", 2993 __func__, err); 2994 return (err); 2995 } 2996 err = mlx5_vport_alloc_q_counter(priv->mdev, 2997 MLX5_INTERFACE_PROTOCOL_ETH, &set_id); 2998 if (err) { 2999 if_printf(priv->ifp, 3000 "%s: mlx5_vport_alloc_q_counter failed: %d\n", 3001 __func__, err); 3002 goto err_close_tises; 3003 } 3004 /* store counter set ID */ 3005 priv->counter_set_id = set_id; 3006 3007 err = mlx5e_open_channels(priv); 3008 if (err) { 3009 if_printf(ifp, "%s: mlx5e_open_channels failed, %d\n", 3010 __func__, err); 3011 goto err_dalloc_q_counter; 3012 } 3013 err = mlx5e_open_rqt(priv); 3014 if (err) { 3015 if_printf(ifp, "%s: mlx5e_open_rqt failed, %d\n", 3016 __func__, err); 3017 goto err_close_channels; 3018 } 3019 err = mlx5e_open_tirs(priv); 3020 if (err) { 3021 if_printf(ifp, "%s: mlx5e_open_tir failed, %d\n", 3022 __func__, err); 3023 goto err_close_rqls; 3024 } 3025 err = mlx5e_open_flow_table(priv); 3026 if (err) { 3027 if_printf(ifp, "%s: mlx5e_open_flow_table failed, %d\n", 3028 __func__, err); 3029 goto err_close_tirs; 3030 } 3031 err = mlx5e_add_all_vlan_rules(priv); 3032 if (err) { 3033 if_printf(ifp, "%s: mlx5e_add_all_vlan_rules failed, %d\n", 3034 __func__, err); 3035 goto err_close_flow_table; 3036 } 3037 set_bit(MLX5E_STATE_OPENED, &priv->state); 3038 3039 mlx5e_update_carrier(priv); 3040 mlx5e_set_rx_mode_core(priv); 3041 3042 return (0); 3043 3044 err_close_flow_table: 3045 mlx5e_close_flow_table(priv); 3046 3047 err_close_tirs: 3048 mlx5e_close_tirs(priv); 3049 3050 err_close_rqls: 3051 mlx5e_close_rqt(priv); 3052 3053 err_close_channels: 3054 mlx5e_close_channels(priv); 3055 3056 err_dalloc_q_counter: 3057 mlx5_vport_dealloc_q_counter(priv->mdev, 3058 MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id); 3059 3060 err_close_tises: 3061 mlx5e_close_tises(priv); 3062 3063 return (err); 3064 } 3065 3066 static void 3067 mlx5e_open(void *arg) 3068 { 3069 struct mlx5e_priv *priv = arg; 3070 3071 PRIV_LOCK(priv); 3072 if (mlx5_set_port_status(priv->mdev, MLX5_PORT_UP)) 3073 if_printf(priv->ifp, 3074 "%s: Setting port status to up failed\n", 3075 __func__); 3076 3077 mlx5e_open_locked(priv->ifp); 3078 priv->ifp->if_drv_flags |= IFF_DRV_RUNNING; 3079 PRIV_UNLOCK(priv); 3080 } 3081 3082 int 3083 mlx5e_close_locked(struct ifnet *ifp) 3084 { 3085 struct mlx5e_priv *priv = ifp->if_softc; 3086 3087 /* check if already closed */ 3088 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 3089 return (0); 3090 3091 clear_bit(MLX5E_STATE_OPENED, &priv->state); 3092 3093 mlx5e_set_rx_mode_core(priv); 3094 mlx5e_del_all_vlan_rules(priv); 3095 if_link_state_change(priv->ifp, LINK_STATE_DOWN); 3096 mlx5e_close_flow_table(priv); 3097 mlx5e_close_tirs(priv); 3098 mlx5e_close_rqt(priv); 3099 mlx5e_close_channels(priv); 3100 mlx5_vport_dealloc_q_counter(priv->mdev, 3101 MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id); 3102 mlx5e_close_tises(priv); 3103 3104 return (0); 3105 } 3106 3107 #if (__FreeBSD_version >= 1100000) 3108 static uint64_t 3109 mlx5e_get_counter(struct ifnet *ifp, ift_counter cnt) 3110 { 3111 struct mlx5e_priv *priv = ifp->if_softc; 3112 u64 retval; 3113 3114 /* PRIV_LOCK(priv); XXX not allowed */ 3115 switch (cnt) { 3116 case IFCOUNTER_IPACKETS: 3117 retval = priv->stats.vport.rx_packets; 3118 break; 3119 case IFCOUNTER_IERRORS: 3120 retval = priv->stats.pport.in_range_len_errors + 3121 priv->stats.pport.out_of_range_len + 3122 priv->stats.pport.too_long_errors + 3123 priv->stats.pport.check_seq_err + 3124 priv->stats.pport.alignment_err; 3125 break; 3126 case IFCOUNTER_IQDROPS: 3127 retval = priv->stats.vport.rx_out_of_buffer; 3128 break; 3129 case IFCOUNTER_OPACKETS: 3130 retval = priv->stats.vport.tx_packets; 3131 break; 3132 case IFCOUNTER_OERRORS: 3133 retval = priv->stats.port_stats_debug.out_discards; 3134 break; 3135 case IFCOUNTER_IBYTES: 3136 retval = priv->stats.vport.rx_bytes; 3137 break; 3138 case IFCOUNTER_OBYTES: 3139 retval = priv->stats.vport.tx_bytes; 3140 break; 3141 case IFCOUNTER_IMCASTS: 3142 retval = priv->stats.vport.rx_multicast_packets; 3143 break; 3144 case IFCOUNTER_OMCASTS: 3145 retval = priv->stats.vport.tx_multicast_packets; 3146 break; 3147 case IFCOUNTER_OQDROPS: 3148 retval = priv->stats.vport.tx_queue_dropped; 3149 break; 3150 case IFCOUNTER_COLLISIONS: 3151 retval = priv->stats.pport.collisions; 3152 break; 3153 default: 3154 retval = if_get_counter_default(ifp, cnt); 3155 break; 3156 } 3157 /* PRIV_UNLOCK(priv); XXX not allowed */ 3158 return (retval); 3159 } 3160 #endif 3161 3162 static void 3163 mlx5e_set_rx_mode(struct ifnet *ifp) 3164 { 3165 struct mlx5e_priv *priv = ifp->if_softc; 3166 3167 queue_work(priv->wq, &priv->set_rx_mode_work); 3168 } 3169 3170 static int 3171 mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 3172 { 3173 struct mlx5e_priv *priv; 3174 struct ifreq *ifr; 3175 struct ifi2creq i2c; 3176 int error = 0; 3177 int mask = 0; 3178 int size_read = 0; 3179 int module_status; 3180 int module_num; 3181 int max_mtu; 3182 uint8_t read_addr; 3183 3184 priv = ifp->if_softc; 3185 3186 /* check if detaching */ 3187 if (priv == NULL || priv->gone != 0) 3188 return (ENXIO); 3189 3190 switch (command) { 3191 case SIOCSIFMTU: 3192 ifr = (struct ifreq *)data; 3193 3194 PRIV_LOCK(priv); 3195 mlx5_query_port_max_mtu(priv->mdev, &max_mtu); 3196 3197 if (ifr->ifr_mtu >= MLX5E_MTU_MIN && 3198 ifr->ifr_mtu <= MIN(MLX5E_MTU_MAX, max_mtu)) { 3199 int was_opened; 3200 3201 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); 3202 if (was_opened) 3203 mlx5e_close_locked(ifp); 3204 3205 /* set new MTU */ 3206 mlx5e_set_dev_port_mtu(ifp, ifr->ifr_mtu); 3207 3208 if (was_opened) 3209 mlx5e_open_locked(ifp); 3210 } else { 3211 error = EINVAL; 3212 if_printf(ifp, "Invalid MTU value. Min val: %d, Max val: %d\n", 3213 MLX5E_MTU_MIN, MIN(MLX5E_MTU_MAX, max_mtu)); 3214 } 3215 PRIV_UNLOCK(priv); 3216 break; 3217 case SIOCSIFFLAGS: 3218 if ((ifp->if_flags & IFF_UP) && 3219 (ifp->if_drv_flags & IFF_DRV_RUNNING)) { 3220 mlx5e_set_rx_mode(ifp); 3221 break; 3222 } 3223 PRIV_LOCK(priv); 3224 if (ifp->if_flags & IFF_UP) { 3225 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 3226 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 3227 mlx5e_open_locked(ifp); 3228 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3229 mlx5_set_port_status(priv->mdev, MLX5_PORT_UP); 3230 } 3231 } else { 3232 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3233 mlx5_set_port_status(priv->mdev, 3234 MLX5_PORT_DOWN); 3235 if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0) 3236 mlx5e_close_locked(ifp); 3237 mlx5e_update_carrier(priv); 3238 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3239 } 3240 } 3241 PRIV_UNLOCK(priv); 3242 break; 3243 case SIOCADDMULTI: 3244 case SIOCDELMULTI: 3245 mlx5e_set_rx_mode(ifp); 3246 break; 3247 case SIOCSIFMEDIA: 3248 case SIOCGIFMEDIA: 3249 case SIOCGIFXMEDIA: 3250 ifr = (struct ifreq *)data; 3251 error = ifmedia_ioctl(ifp, ifr, &priv->media, command); 3252 break; 3253 case SIOCSIFCAP: 3254 ifr = (struct ifreq *)data; 3255 PRIV_LOCK(priv); 3256 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 3257 3258 if (mask & IFCAP_TXCSUM) { 3259 ifp->if_capenable ^= IFCAP_TXCSUM; 3260 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 3261 3262 if (IFCAP_TSO4 & ifp->if_capenable && 3263 !(IFCAP_TXCSUM & ifp->if_capenable)) { 3264 ifp->if_capenable &= ~IFCAP_TSO4; 3265 ifp->if_hwassist &= ~CSUM_IP_TSO; 3266 if_printf(ifp, 3267 "tso4 disabled due to -txcsum.\n"); 3268 } 3269 } 3270 if (mask & IFCAP_TXCSUM_IPV6) { 3271 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; 3272 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 3273 3274 if (IFCAP_TSO6 & ifp->if_capenable && 3275 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 3276 ifp->if_capenable &= ~IFCAP_TSO6; 3277 ifp->if_hwassist &= ~CSUM_IP6_TSO; 3278 if_printf(ifp, 3279 "tso6 disabled due to -txcsum6.\n"); 3280 } 3281 } 3282 if (mask & IFCAP_RXCSUM) 3283 ifp->if_capenable ^= IFCAP_RXCSUM; 3284 if (mask & IFCAP_RXCSUM_IPV6) 3285 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 3286 if (mask & IFCAP_TSO4) { 3287 if (!(IFCAP_TSO4 & ifp->if_capenable) && 3288 !(IFCAP_TXCSUM & ifp->if_capenable)) { 3289 if_printf(ifp, "enable txcsum first.\n"); 3290 error = EAGAIN; 3291 goto out; 3292 } 3293 ifp->if_capenable ^= IFCAP_TSO4; 3294 ifp->if_hwassist ^= CSUM_IP_TSO; 3295 } 3296 if (mask & IFCAP_TSO6) { 3297 if (!(IFCAP_TSO6 & ifp->if_capenable) && 3298 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 3299 if_printf(ifp, "enable txcsum6 first.\n"); 3300 error = EAGAIN; 3301 goto out; 3302 } 3303 ifp->if_capenable ^= IFCAP_TSO6; 3304 ifp->if_hwassist ^= CSUM_IP6_TSO; 3305 } 3306 if (mask & IFCAP_VLAN_HWFILTER) { 3307 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) 3308 mlx5e_disable_vlan_filter(priv); 3309 else 3310 mlx5e_enable_vlan_filter(priv); 3311 3312 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; 3313 } 3314 if (mask & IFCAP_VLAN_HWTAGGING) 3315 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 3316 if (mask & IFCAP_WOL_MAGIC) 3317 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 3318 3319 VLAN_CAPABILITIES(ifp); 3320 /* turn off LRO means also turn of HW LRO - if it's on */ 3321 if (mask & IFCAP_LRO) { 3322 int was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); 3323 bool need_restart = false; 3324 3325 ifp->if_capenable ^= IFCAP_LRO; 3326 3327 /* figure out if updating HW LRO is needed */ 3328 if (!(ifp->if_capenable & IFCAP_LRO)) { 3329 if (priv->params.hw_lro_en) { 3330 priv->params.hw_lro_en = false; 3331 need_restart = true; 3332 } 3333 } else { 3334 if (priv->params.hw_lro_en == false && 3335 priv->params_ethtool.hw_lro != 0) { 3336 priv->params.hw_lro_en = true; 3337 need_restart = true; 3338 } 3339 } 3340 if (was_opened && need_restart) { 3341 mlx5e_close_locked(ifp); 3342 mlx5e_open_locked(ifp); 3343 } 3344 } 3345 if (mask & IFCAP_HWRXTSTMP) { 3346 ifp->if_capenable ^= IFCAP_HWRXTSTMP; 3347 if (ifp->if_capenable & IFCAP_HWRXTSTMP) { 3348 if (priv->clbr_done == 0) 3349 mlx5e_reset_calibration_callout(priv); 3350 } else { 3351 callout_drain(&priv->tstmp_clbr); 3352 priv->clbr_done = 0; 3353 } 3354 } 3355 out: 3356 PRIV_UNLOCK(priv); 3357 break; 3358 3359 case SIOCGI2C: 3360 ifr = (struct ifreq *)data; 3361 3362 /* 3363 * Copy from the user-space address ifr_data to the 3364 * kernel-space address i2c 3365 */ 3366 error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); 3367 if (error) 3368 break; 3369 3370 if (i2c.len > sizeof(i2c.data)) { 3371 error = EINVAL; 3372 break; 3373 } 3374 3375 PRIV_LOCK(priv); 3376 /* Get module_num which is required for the query_eeprom */ 3377 error = mlx5_query_module_num(priv->mdev, &module_num); 3378 if (error) { 3379 if_printf(ifp, "Query module num failed, eeprom " 3380 "reading is not supported\n"); 3381 error = EINVAL; 3382 goto err_i2c; 3383 } 3384 /* Check if module is present before doing an access */ 3385 module_status = mlx5_query_module_status(priv->mdev, module_num); 3386 if (module_status != MLX5_MODULE_STATUS_PLUGGED_ENABLED && 3387 module_status != MLX5_MODULE_STATUS_PLUGGED_DISABLED) { 3388 error = EINVAL; 3389 goto err_i2c; 3390 } 3391 /* 3392 * Currently 0XA0 and 0xA2 are the only addresses permitted. 3393 * The internal conversion is as follows: 3394 */ 3395 if (i2c.dev_addr == 0xA0) 3396 read_addr = MLX5E_I2C_ADDR_LOW; 3397 else if (i2c.dev_addr == 0xA2) 3398 read_addr = MLX5E_I2C_ADDR_HIGH; 3399 else { 3400 if_printf(ifp, "Query eeprom failed, " 3401 "Invalid Address: %X\n", i2c.dev_addr); 3402 error = EINVAL; 3403 goto err_i2c; 3404 } 3405 error = mlx5_query_eeprom(priv->mdev, 3406 read_addr, MLX5E_EEPROM_LOW_PAGE, 3407 (uint32_t)i2c.offset, (uint32_t)i2c.len, module_num, 3408 (uint32_t *)i2c.data, &size_read); 3409 if (error) { 3410 if_printf(ifp, "Query eeprom failed, eeprom " 3411 "reading is not supported\n"); 3412 error = EINVAL; 3413 goto err_i2c; 3414 } 3415 3416 if (i2c.len > MLX5_EEPROM_MAX_BYTES) { 3417 error = mlx5_query_eeprom(priv->mdev, 3418 read_addr, MLX5E_EEPROM_LOW_PAGE, 3419 (uint32_t)(i2c.offset + size_read), 3420 (uint32_t)(i2c.len - size_read), module_num, 3421 (uint32_t *)(i2c.data + size_read), &size_read); 3422 } 3423 if (error) { 3424 if_printf(ifp, "Query eeprom failed, eeprom " 3425 "reading is not supported\n"); 3426 error = EINVAL; 3427 goto err_i2c; 3428 } 3429 3430 error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); 3431 err_i2c: 3432 PRIV_UNLOCK(priv); 3433 break; 3434 3435 default: 3436 error = ether_ioctl(ifp, command, data); 3437 break; 3438 } 3439 return (error); 3440 } 3441 3442 static int 3443 mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) 3444 { 3445 /* 3446 * TODO: uncoment once FW really sets all these bits if 3447 * (!mdev->caps.eth.rss_ind_tbl_cap || !mdev->caps.eth.csum_cap || 3448 * !mdev->caps.eth.max_lso_cap || !mdev->caps.eth.vlan_cap || 3449 * !(mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD)) return 3450 * -ENOTSUPP; 3451 */ 3452 3453 /* TODO: add more must-to-have features */ 3454 3455 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 3456 return (-ENODEV); 3457 3458 return (0); 3459 } 3460 3461 static u16 3462 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev) 3463 { 3464 uint32_t bf_buf_size = (1U << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2U; 3465 3466 bf_buf_size -= sizeof(struct mlx5e_tx_wqe) - 2; 3467 3468 /* verify against driver hardware limit */ 3469 if (bf_buf_size > MLX5E_MAX_TX_INLINE) 3470 bf_buf_size = MLX5E_MAX_TX_INLINE; 3471 3472 return (bf_buf_size); 3473 } 3474 3475 static int 3476 mlx5e_build_ifp_priv(struct mlx5_core_dev *mdev, 3477 struct mlx5e_priv *priv, 3478 int num_comp_vectors) 3479 { 3480 int err; 3481 3482 /* 3483 * TODO: Consider link speed for setting "log_sq_size", 3484 * "log_rq_size" and "cq_moderation_xxx": 3485 */ 3486 priv->params.log_sq_size = 3487 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; 3488 priv->params.log_rq_size = 3489 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; 3490 priv->params.rx_cq_moderation_usec = 3491 MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 3492 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE : 3493 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; 3494 priv->params.rx_cq_moderation_mode = 3495 MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 1 : 0; 3496 priv->params.rx_cq_moderation_pkts = 3497 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; 3498 priv->params.tx_cq_moderation_usec = 3499 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; 3500 priv->params.tx_cq_moderation_pkts = 3501 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; 3502 priv->params.min_rx_wqes = 3503 MLX5E_PARAMS_DEFAULT_MIN_RX_WQES; 3504 priv->params.rx_hash_log_tbl_sz = 3505 (order_base_2(num_comp_vectors) > 3506 MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ? 3507 order_base_2(num_comp_vectors) : 3508 MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ; 3509 priv->params.num_tc = 1; 3510 priv->params.default_vlan_prio = 0; 3511 priv->counter_set_id = -1; 3512 priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); 3513 3514 err = mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode); 3515 if (err) 3516 return (err); 3517 3518 /* 3519 * hw lro is currently defaulted to off. when it won't anymore we 3520 * will consider the HW capability: "!!MLX5_CAP_ETH(mdev, lro_cap)" 3521 */ 3522 priv->params.hw_lro_en = false; 3523 priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; 3524 3525 /* 3526 * CQE zipping is currently defaulted to off. when it won't 3527 * anymore we will consider the HW capability: 3528 * "!!MLX5_CAP_GEN(mdev, cqe_compression)" 3529 */ 3530 priv->params.cqe_zipping_en = false; 3531 3532 priv->mdev = mdev; 3533 priv->params.num_channels = num_comp_vectors; 3534 priv->params.channels_rsss = 1; 3535 priv->order_base_2_num_channels = order_base_2(num_comp_vectors); 3536 priv->queue_mapping_channel_mask = 3537 roundup_pow_of_two(num_comp_vectors) - 1; 3538 priv->num_tc = priv->params.num_tc; 3539 priv->default_vlan_prio = priv->params.default_vlan_prio; 3540 3541 INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work); 3542 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); 3543 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); 3544 3545 return (0); 3546 } 3547 3548 static int 3549 mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn, 3550 struct mlx5_core_mr *mkey) 3551 { 3552 struct ifnet *ifp = priv->ifp; 3553 struct mlx5_core_dev *mdev = priv->mdev; 3554 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 3555 void *mkc; 3556 u32 *in; 3557 int err; 3558 3559 in = mlx5_vzalloc(inlen); 3560 if (in == NULL) { 3561 if_printf(ifp, "%s: failed to allocate inbox\n", __func__); 3562 return (-ENOMEM); 3563 } 3564 3565 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 3566 MLX5_SET(mkc, mkc, access_mode, MLX5_ACCESS_MODE_PA); 3567 MLX5_SET(mkc, mkc, lw, 1); 3568 MLX5_SET(mkc, mkc, lr, 1); 3569 3570 MLX5_SET(mkc, mkc, pd, pdn); 3571 MLX5_SET(mkc, mkc, length64, 1); 3572 MLX5_SET(mkc, mkc, qpn, 0xffffff); 3573 3574 err = mlx5_core_create_mkey(mdev, mkey, in, inlen); 3575 if (err) 3576 if_printf(ifp, "%s: mlx5_core_create_mkey failed, %d\n", 3577 __func__, err); 3578 3579 kvfree(in); 3580 return (err); 3581 } 3582 3583 static const char *mlx5e_vport_stats_desc[] = { 3584 MLX5E_VPORT_STATS(MLX5E_STATS_DESC) 3585 }; 3586 3587 static const char *mlx5e_pport_stats_desc[] = { 3588 MLX5E_PPORT_STATS(MLX5E_STATS_DESC) 3589 }; 3590 3591 static void 3592 mlx5e_priv_mtx_init(struct mlx5e_priv *priv) 3593 { 3594 mtx_init(&priv->async_events_mtx, "mlx5async", MTX_NETWORK_LOCK, MTX_DEF); 3595 sx_init(&priv->state_lock, "mlx5state"); 3596 callout_init_mtx(&priv->watchdog, &priv->async_events_mtx, 0); 3597 MLX5_INIT_DOORBELL_LOCK(&priv->doorbell_lock); 3598 } 3599 3600 static void 3601 mlx5e_priv_mtx_destroy(struct mlx5e_priv *priv) 3602 { 3603 mtx_destroy(&priv->async_events_mtx); 3604 sx_destroy(&priv->state_lock); 3605 } 3606 3607 static int 3608 sysctl_firmware(SYSCTL_HANDLER_ARGS) 3609 { 3610 /* 3611 * %d.%d%.d the string format. 3612 * fw_rev_{maj,min,sub} return u16, 2^16 = 65536. 3613 * We need at most 5 chars to store that. 3614 * It also has: two "." and NULL at the end, which means we need 18 3615 * (5*3 + 3) chars at most. 3616 */ 3617 char fw[18]; 3618 struct mlx5e_priv *priv = arg1; 3619 int error; 3620 3621 snprintf(fw, sizeof(fw), "%d.%d.%d", fw_rev_maj(priv->mdev), fw_rev_min(priv->mdev), 3622 fw_rev_sub(priv->mdev)); 3623 error = sysctl_handle_string(oidp, fw, sizeof(fw), req); 3624 return (error); 3625 } 3626 3627 static void 3628 mlx5e_disable_tx_dma(struct mlx5e_channel *ch) 3629 { 3630 int i; 3631 3632 for (i = 0; i < ch->num_tc; i++) 3633 mlx5e_drain_sq(&ch->sq[i]); 3634 } 3635 3636 static void 3637 mlx5e_reset_sq_doorbell_record(struct mlx5e_sq *sq) 3638 { 3639 3640 sq->doorbell.d32[0] = cpu_to_be32(MLX5_OPCODE_NOP); 3641 sq->doorbell.d32[1] = cpu_to_be32(sq->sqn << 8); 3642 mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0); 3643 sq->doorbell.d64 = 0; 3644 } 3645 3646 void 3647 mlx5e_resume_sq(struct mlx5e_sq *sq) 3648 { 3649 int err; 3650 3651 /* check if already enabled */ 3652 if (READ_ONCE(sq->running) != 0) 3653 return; 3654 3655 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_ERR, 3656 MLX5_SQC_STATE_RST); 3657 if (err != 0) { 3658 if_printf(sq->ifp, 3659 "mlx5e_modify_sq() from ERR to RST failed: %d\n", err); 3660 } 3661 3662 sq->cc = 0; 3663 sq->pc = 0; 3664 3665 /* reset doorbell prior to moving from RST to RDY */ 3666 mlx5e_reset_sq_doorbell_record(sq); 3667 3668 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, 3669 MLX5_SQC_STATE_RDY); 3670 if (err != 0) { 3671 if_printf(sq->ifp, 3672 "mlx5e_modify_sq() from RST to RDY failed: %d\n", err); 3673 } 3674 3675 sq->cev_next_state = MLX5E_CEV_STATE_INITIAL; 3676 WRITE_ONCE(sq->running, 1); 3677 } 3678 3679 static void 3680 mlx5e_enable_tx_dma(struct mlx5e_channel *ch) 3681 { 3682 int i; 3683 3684 for (i = 0; i < ch->num_tc; i++) 3685 mlx5e_resume_sq(&ch->sq[i]); 3686 } 3687 3688 static void 3689 mlx5e_disable_rx_dma(struct mlx5e_channel *ch) 3690 { 3691 struct mlx5e_rq *rq = &ch->rq; 3692 int err; 3693 3694 mtx_lock(&rq->mtx); 3695 rq->enabled = 0; 3696 callout_stop(&rq->watchdog); 3697 mtx_unlock(&rq->mtx); 3698 3699 callout_drain(&rq->watchdog); 3700 3701 err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); 3702 if (err != 0) { 3703 if_printf(rq->ifp, 3704 "mlx5e_modify_rq() from RDY to RST failed: %d\n", err); 3705 } 3706 3707 while (!mlx5_wq_ll_is_empty(&rq->wq)) { 3708 msleep(1); 3709 rq->cq.mcq.comp(&rq->cq.mcq); 3710 } 3711 3712 /* 3713 * Transitioning into RST state will allow the FW to track less ERR state queues, 3714 * thus reducing the recv queue flushing time 3715 */ 3716 err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_ERR, MLX5_RQC_STATE_RST); 3717 if (err != 0) { 3718 if_printf(rq->ifp, 3719 "mlx5e_modify_rq() from ERR to RST failed: %d\n", err); 3720 } 3721 } 3722 3723 static void 3724 mlx5e_enable_rx_dma(struct mlx5e_channel *ch) 3725 { 3726 struct mlx5e_rq *rq = &ch->rq; 3727 int err; 3728 3729 rq->wq.wqe_ctr = 0; 3730 mlx5_wq_ll_update_db_record(&rq->wq); 3731 err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); 3732 if (err != 0) { 3733 if_printf(rq->ifp, 3734 "mlx5e_modify_rq() from RST to RDY failed: %d\n", err); 3735 } 3736 3737 rq->enabled = 1; 3738 3739 rq->cq.mcq.comp(&rq->cq.mcq); 3740 } 3741 3742 void 3743 mlx5e_modify_tx_dma(struct mlx5e_priv *priv, uint8_t value) 3744 { 3745 int i; 3746 3747 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 3748 return; 3749 3750 for (i = 0; i < priv->params.num_channels; i++) { 3751 if (value) 3752 mlx5e_disable_tx_dma(&priv->channel[i]); 3753 else 3754 mlx5e_enable_tx_dma(&priv->channel[i]); 3755 } 3756 } 3757 3758 void 3759 mlx5e_modify_rx_dma(struct mlx5e_priv *priv, uint8_t value) 3760 { 3761 int i; 3762 3763 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 3764 return; 3765 3766 for (i = 0; i < priv->params.num_channels; i++) { 3767 if (value) 3768 mlx5e_disable_rx_dma(&priv->channel[i]); 3769 else 3770 mlx5e_enable_rx_dma(&priv->channel[i]); 3771 } 3772 } 3773 3774 static void 3775 mlx5e_add_hw_stats(struct mlx5e_priv *priv) 3776 { 3777 SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw), 3778 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, priv, 0, 3779 sysctl_firmware, "A", "HCA firmware version"); 3780 3781 SYSCTL_ADD_STRING(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw), 3782 OID_AUTO, "board_id", CTLFLAG_RD, priv->mdev->board_id, 0, 3783 "Board ID"); 3784 } 3785 3786 static int 3787 mlx5e_sysctl_tx_priority_flow_control(SYSCTL_HANDLER_ARGS) 3788 { 3789 struct mlx5e_priv *priv = arg1; 3790 uint8_t temp[MLX5E_MAX_PRIORITY]; 3791 uint32_t tx_pfc; 3792 int err; 3793 int i; 3794 3795 PRIV_LOCK(priv); 3796 3797 tx_pfc = priv->params.tx_priority_flow_control; 3798 3799 for (i = 0; i != MLX5E_MAX_PRIORITY; i++) 3800 temp[i] = (tx_pfc >> i) & 1; 3801 3802 err = SYSCTL_OUT(req, temp, MLX5E_MAX_PRIORITY); 3803 if (err || !req->newptr) 3804 goto done; 3805 err = SYSCTL_IN(req, temp, MLX5E_MAX_PRIORITY); 3806 if (err) 3807 goto done; 3808 3809 priv->params.tx_priority_flow_control = 0; 3810 3811 /* range check input value */ 3812 for (i = 0; i != MLX5E_MAX_PRIORITY; i++) { 3813 if (temp[i] > 1) { 3814 err = ERANGE; 3815 goto done; 3816 } 3817 priv->params.tx_priority_flow_control |= (temp[i] << i); 3818 } 3819 3820 /* check if update is required */ 3821 if (tx_pfc != priv->params.tx_priority_flow_control) 3822 err = -mlx5e_set_port_pfc(priv); 3823 done: 3824 if (err != 0) 3825 priv->params.tx_priority_flow_control= tx_pfc; 3826 PRIV_UNLOCK(priv); 3827 3828 return (err); 3829 } 3830 3831 static int 3832 mlx5e_sysctl_rx_priority_flow_control(SYSCTL_HANDLER_ARGS) 3833 { 3834 struct mlx5e_priv *priv = arg1; 3835 uint8_t temp[MLX5E_MAX_PRIORITY]; 3836 uint32_t rx_pfc; 3837 int err; 3838 int i; 3839 3840 PRIV_LOCK(priv); 3841 3842 rx_pfc = priv->params.rx_priority_flow_control; 3843 3844 for (i = 0; i != MLX5E_MAX_PRIORITY; i++) 3845 temp[i] = (rx_pfc >> i) & 1; 3846 3847 err = SYSCTL_OUT(req, temp, MLX5E_MAX_PRIORITY); 3848 if (err || !req->newptr) 3849 goto done; 3850 err = SYSCTL_IN(req, temp, MLX5E_MAX_PRIORITY); 3851 if (err) 3852 goto done; 3853 3854 priv->params.rx_priority_flow_control = 0; 3855 3856 /* range check input value */ 3857 for (i = 0; i != MLX5E_MAX_PRIORITY; i++) { 3858 if (temp[i] > 1) { 3859 err = ERANGE; 3860 goto done; 3861 } 3862 priv->params.rx_priority_flow_control |= (temp[i] << i); 3863 } 3864 3865 /* check if update is required */ 3866 if (rx_pfc != priv->params.rx_priority_flow_control) 3867 err = -mlx5e_set_port_pfc(priv); 3868 done: 3869 if (err != 0) 3870 priv->params.rx_priority_flow_control= rx_pfc; 3871 PRIV_UNLOCK(priv); 3872 3873 return (err); 3874 } 3875 3876 static void 3877 mlx5e_setup_pauseframes(struct mlx5e_priv *priv) 3878 { 3879 #if (__FreeBSD_version < 1100000) 3880 char path[96]; 3881 #endif 3882 int error; 3883 3884 /* enable pauseframes by default */ 3885 priv->params.tx_pauseframe_control = 1; 3886 priv->params.rx_pauseframe_control = 1; 3887 3888 /* disable ports flow control, PFC, by default */ 3889 priv->params.tx_priority_flow_control = 0; 3890 priv->params.rx_priority_flow_control = 0; 3891 3892 #if (__FreeBSD_version < 1100000) 3893 /* compute path for sysctl */ 3894 snprintf(path, sizeof(path), "dev.mce.%d.tx_pauseframe_control", 3895 device_get_unit(priv->mdev->pdev->dev.bsddev)); 3896 3897 /* try to fetch tunable, if any */ 3898 TUNABLE_INT_FETCH(path, &priv->params.tx_pauseframe_control); 3899 3900 /* compute path for sysctl */ 3901 snprintf(path, sizeof(path), "dev.mce.%d.rx_pauseframe_control", 3902 device_get_unit(priv->mdev->pdev->dev.bsddev)); 3903 3904 /* try to fetch tunable, if any */ 3905 TUNABLE_INT_FETCH(path, &priv->params.rx_pauseframe_control); 3906 #endif 3907 3908 /* register pauseframe SYSCTLs */ 3909 SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 3910 OID_AUTO, "tx_pauseframe_control", CTLFLAG_RDTUN, 3911 &priv->params.tx_pauseframe_control, 0, 3912 "Set to enable TX pause frames. Clear to disable."); 3913 3914 SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 3915 OID_AUTO, "rx_pauseframe_control", CTLFLAG_RDTUN, 3916 &priv->params.rx_pauseframe_control, 0, 3917 "Set to enable RX pause frames. Clear to disable."); 3918 3919 /* register priority flow control, PFC, SYSCTLs */ 3920 SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 3921 OID_AUTO, "tx_priority_flow_control", CTLTYPE_U8 | CTLFLAG_RWTUN | 3922 CTLFLAG_MPSAFE, priv, 0, &mlx5e_sysctl_tx_priority_flow_control, "CU", 3923 "Set to enable TX ports flow control frames for priorities 0..7. Clear to disable."); 3924 3925 SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 3926 OID_AUTO, "rx_priority_flow_control", CTLTYPE_U8 | CTLFLAG_RWTUN | 3927 CTLFLAG_MPSAFE, priv, 0, &mlx5e_sysctl_rx_priority_flow_control, "CU", 3928 "Set to enable RX ports flow control frames for priorities 0..7. Clear to disable."); 3929 3930 PRIV_LOCK(priv); 3931 3932 /* range check */ 3933 priv->params.tx_pauseframe_control = 3934 priv->params.tx_pauseframe_control ? 1 : 0; 3935 priv->params.rx_pauseframe_control = 3936 priv->params.rx_pauseframe_control ? 1 : 0; 3937 3938 /* update firmware */ 3939 error = mlx5e_set_port_pause_and_pfc(priv); 3940 if (error == -EINVAL) { 3941 if_printf(priv->ifp, 3942 "Global pauseframes must be disabled before enabling PFC.\n"); 3943 priv->params.rx_priority_flow_control = 0; 3944 priv->params.tx_priority_flow_control = 0; 3945 3946 /* update firmware */ 3947 (void) mlx5e_set_port_pause_and_pfc(priv); 3948 } 3949 PRIV_UNLOCK(priv); 3950 } 3951 3952 static int 3953 mlx5e_ul_snd_tag_alloc(struct ifnet *ifp, 3954 union if_snd_tag_alloc_params *params, 3955 struct m_snd_tag **ppmt) 3956 { 3957 struct mlx5e_priv *priv; 3958 struct mlx5e_channel *pch; 3959 3960 priv = ifp->if_softc; 3961 3962 if (unlikely(priv->gone || params->hdr.flowtype == M_HASHTYPE_NONE)) { 3963 return (EOPNOTSUPP); 3964 } else { 3965 /* keep this code synced with mlx5e_select_queue() */ 3966 u32 ch = priv->params.num_channels; 3967 #ifdef RSS 3968 u32 temp; 3969 3970 if (rss_hash2bucket(params->hdr.flowid, 3971 params->hdr.flowtype, &temp) == 0) 3972 ch = temp % ch; 3973 else 3974 #endif 3975 ch = (params->hdr.flowid % 128) % ch; 3976 3977 /* 3978 * NOTE: The channels array is only freed at detach 3979 * and it safe to return a pointer to the send tag 3980 * inside the channels structure as long as we 3981 * reference the priv. 3982 */ 3983 pch = priv->channel + ch; 3984 3985 /* check if send queue is not running */ 3986 if (unlikely(pch->sq[0].running == 0)) 3987 return (ENXIO); 3988 mlx5e_ref_channel(priv); 3989 *ppmt = &pch->tag.m_snd_tag; 3990 return (0); 3991 } 3992 } 3993 3994 static int 3995 mlx5e_ul_snd_tag_query(struct m_snd_tag *pmt, union if_snd_tag_query_params *params) 3996 { 3997 struct mlx5e_channel *pch = 3998 container_of(pmt, struct mlx5e_channel, tag.m_snd_tag); 3999 4000 params->unlimited.max_rate = -1ULL; 4001 params->unlimited.queue_level = mlx5e_sq_queue_level(&pch->sq[0]); 4002 return (0); 4003 } 4004 4005 static void 4006 mlx5e_ul_snd_tag_free(struct m_snd_tag *pmt) 4007 { 4008 struct mlx5e_channel *pch = 4009 container_of(pmt, struct mlx5e_channel, tag.m_snd_tag); 4010 4011 mlx5e_unref_channel(pch->priv); 4012 } 4013 4014 static int 4015 mlx5e_snd_tag_alloc(struct ifnet *ifp, 4016 union if_snd_tag_alloc_params *params, 4017 struct m_snd_tag **ppmt) 4018 { 4019 4020 switch (params->hdr.type) { 4021 #ifdef RATELIMIT 4022 case IF_SND_TAG_TYPE_RATE_LIMIT: 4023 return (mlx5e_rl_snd_tag_alloc(ifp, params, ppmt)); 4024 #endif 4025 case IF_SND_TAG_TYPE_UNLIMITED: 4026 return (mlx5e_ul_snd_tag_alloc(ifp, params, ppmt)); 4027 default: 4028 return (EOPNOTSUPP); 4029 } 4030 } 4031 4032 static int 4033 mlx5e_snd_tag_modify(struct m_snd_tag *pmt, union if_snd_tag_modify_params *params) 4034 { 4035 struct mlx5e_snd_tag *tag = 4036 container_of(pmt, struct mlx5e_snd_tag, m_snd_tag); 4037 4038 switch (tag->type) { 4039 #ifdef RATELIMIT 4040 case IF_SND_TAG_TYPE_RATE_LIMIT: 4041 return (mlx5e_rl_snd_tag_modify(pmt, params)); 4042 #endif 4043 case IF_SND_TAG_TYPE_UNLIMITED: 4044 default: 4045 return (EOPNOTSUPP); 4046 } 4047 } 4048 4049 static int 4050 mlx5e_snd_tag_query(struct m_snd_tag *pmt, union if_snd_tag_query_params *params) 4051 { 4052 struct mlx5e_snd_tag *tag = 4053 container_of(pmt, struct mlx5e_snd_tag, m_snd_tag); 4054 4055 switch (tag->type) { 4056 #ifdef RATELIMIT 4057 case IF_SND_TAG_TYPE_RATE_LIMIT: 4058 return (mlx5e_rl_snd_tag_query(pmt, params)); 4059 #endif 4060 case IF_SND_TAG_TYPE_UNLIMITED: 4061 return (mlx5e_ul_snd_tag_query(pmt, params)); 4062 default: 4063 return (EOPNOTSUPP); 4064 } 4065 } 4066 4067 static void 4068 mlx5e_snd_tag_free(struct m_snd_tag *pmt) 4069 { 4070 struct mlx5e_snd_tag *tag = 4071 container_of(pmt, struct mlx5e_snd_tag, m_snd_tag); 4072 4073 switch (tag->type) { 4074 #ifdef RATELIMIT 4075 case IF_SND_TAG_TYPE_RATE_LIMIT: 4076 mlx5e_rl_snd_tag_free(pmt); 4077 break; 4078 #endif 4079 case IF_SND_TAG_TYPE_UNLIMITED: 4080 mlx5e_ul_snd_tag_free(pmt); 4081 break; 4082 default: 4083 break; 4084 } 4085 } 4086 4087 static void * 4088 mlx5e_create_ifp(struct mlx5_core_dev *mdev) 4089 { 4090 struct ifnet *ifp; 4091 struct mlx5e_priv *priv; 4092 u8 dev_addr[ETHER_ADDR_LEN] __aligned(4); 4093 u8 connector_type; 4094 struct sysctl_oid_list *child; 4095 int ncv = mdev->priv.eq_table.num_comp_vectors; 4096 char unit[16]; 4097 struct pfil_head_args pa; 4098 int err; 4099 int i,j; 4100 u32 eth_proto_cap; 4101 u32 out[MLX5_ST_SZ_DW(ptys_reg)]; 4102 bool ext = 0; 4103 u32 speeds_num; 4104 struct media media_entry = {}; 4105 4106 if (mlx5e_check_required_hca_cap(mdev)) { 4107 mlx5_core_dbg(mdev, "mlx5e_check_required_hca_cap() failed\n"); 4108 return (NULL); 4109 } 4110 /* 4111 * Try to allocate the priv and make room for worst-case 4112 * number of channel structures: 4113 */ 4114 priv = malloc(sizeof(*priv) + 4115 (sizeof(priv->channel[0]) * mdev->priv.eq_table.num_comp_vectors), 4116 M_MLX5EN, M_WAITOK | M_ZERO); 4117 mlx5e_priv_mtx_init(priv); 4118 4119 ifp = priv->ifp = if_alloc_dev(IFT_ETHER, mdev->pdev->dev.bsddev); 4120 if (ifp == NULL) { 4121 mlx5_core_err(mdev, "if_alloc() failed\n"); 4122 goto err_free_priv; 4123 } 4124 ifp->if_softc = priv; 4125 if_initname(ifp, "mce", device_get_unit(mdev->pdev->dev.bsddev)); 4126 ifp->if_mtu = ETHERMTU; 4127 ifp->if_init = mlx5e_open; 4128 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 4129 ifp->if_ioctl = mlx5e_ioctl; 4130 ifp->if_transmit = mlx5e_xmit; 4131 ifp->if_qflush = if_qflush; 4132 #if (__FreeBSD_version >= 1100000) 4133 ifp->if_get_counter = mlx5e_get_counter; 4134 #endif 4135 ifp->if_snd.ifq_maxlen = ifqmaxlen; 4136 /* 4137 * Set driver features 4138 */ 4139 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6; 4140 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 4141 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER; 4142 ifp->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU; 4143 ifp->if_capabilities |= IFCAP_LRO; 4144 ifp->if_capabilities |= IFCAP_TSO | IFCAP_VLAN_HWTSO; 4145 ifp->if_capabilities |= IFCAP_HWSTATS | IFCAP_HWRXTSTMP; 4146 ifp->if_capabilities |= IFCAP_TXRTLMT; 4147 ifp->if_snd_tag_alloc = mlx5e_snd_tag_alloc; 4148 ifp->if_snd_tag_free = mlx5e_snd_tag_free; 4149 ifp->if_snd_tag_modify = mlx5e_snd_tag_modify; 4150 ifp->if_snd_tag_query = mlx5e_snd_tag_query; 4151 4152 /* set TSO limits so that we don't have to drop TX packets */ 4153 ifp->if_hw_tsomax = MLX5E_MAX_TX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 4154 ifp->if_hw_tsomaxsegcount = MLX5E_MAX_TX_MBUF_FRAGS - 1 /* hdr */; 4155 ifp->if_hw_tsomaxsegsize = MLX5E_MAX_TX_MBUF_SIZE; 4156 4157 ifp->if_capenable = ifp->if_capabilities; 4158 ifp->if_hwassist = 0; 4159 if (ifp->if_capenable & IFCAP_TSO) 4160 ifp->if_hwassist |= CSUM_TSO; 4161 if (ifp->if_capenable & IFCAP_TXCSUM) 4162 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP); 4163 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) 4164 ifp->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 4165 4166 /* ifnet sysctl tree */ 4167 sysctl_ctx_init(&priv->sysctl_ctx); 4168 priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_dev), 4169 OID_AUTO, ifp->if_dname, CTLFLAG_RD, 0, "MLX5 ethernet - interface name"); 4170 if (priv->sysctl_ifnet == NULL) { 4171 mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n"); 4172 goto err_free_sysctl; 4173 } 4174 snprintf(unit, sizeof(unit), "%d", ifp->if_dunit); 4175 priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 4176 OID_AUTO, unit, CTLFLAG_RD, 0, "MLX5 ethernet - interface unit"); 4177 if (priv->sysctl_ifnet == NULL) { 4178 mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n"); 4179 goto err_free_sysctl; 4180 } 4181 4182 /* HW sysctl tree */ 4183 child = SYSCTL_CHILDREN(device_get_sysctl_tree(mdev->pdev->dev.bsddev)); 4184 priv->sysctl_hw = SYSCTL_ADD_NODE(&priv->sysctl_ctx, child, 4185 OID_AUTO, "hw", CTLFLAG_RD, 0, "MLX5 ethernet dev hw"); 4186 if (priv->sysctl_hw == NULL) { 4187 mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n"); 4188 goto err_free_sysctl; 4189 } 4190 4191 err = mlx5e_build_ifp_priv(mdev, priv, ncv); 4192 if (err) { 4193 mlx5_core_err(mdev, "mlx5e_build_ifp_priv() failed (%d)\n", err); 4194 goto err_free_sysctl; 4195 } 4196 4197 /* reuse mlx5core's watchdog workqueue */ 4198 priv->wq = mdev->priv.health.wq_watchdog; 4199 4200 err = mlx5_alloc_map_uar(mdev, &priv->cq_uar); 4201 if (err) { 4202 if_printf(ifp, "%s: mlx5_alloc_map_uar failed, %d\n", 4203 __func__, err); 4204 goto err_free_wq; 4205 } 4206 err = mlx5_core_alloc_pd(mdev, &priv->pdn); 4207 if (err) { 4208 if_printf(ifp, "%s: mlx5_core_alloc_pd failed, %d\n", 4209 __func__, err); 4210 goto err_unmap_free_uar; 4211 } 4212 err = mlx5_alloc_transport_domain(mdev, &priv->tdn); 4213 if (err) { 4214 if_printf(ifp, "%s: mlx5_alloc_transport_domain failed, %d\n", 4215 __func__, err); 4216 goto err_dealloc_pd; 4217 } 4218 err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr); 4219 if (err) { 4220 if_printf(ifp, "%s: mlx5e_create_mkey failed, %d\n", 4221 __func__, err); 4222 goto err_dealloc_transport_domain; 4223 } 4224 mlx5_query_nic_vport_mac_address(priv->mdev, 0, dev_addr); 4225 4226 /* check if we should generate a random MAC address */ 4227 if (MLX5_CAP_GEN(priv->mdev, vport_group_manager) == 0 && 4228 is_zero_ether_addr(dev_addr)) { 4229 random_ether_addr(dev_addr); 4230 if_printf(ifp, "Assigned random MAC address\n"); 4231 } 4232 #ifdef RATELIMIT 4233 err = mlx5e_rl_init(priv); 4234 if (err) { 4235 if_printf(ifp, "%s: mlx5e_rl_init failed, %d\n", 4236 __func__, err); 4237 goto err_create_mkey; 4238 } 4239 #endif 4240 4241 /* set default MTU */ 4242 mlx5e_set_dev_port_mtu(ifp, ifp->if_mtu); 4243 4244 /* Set default media status */ 4245 priv->media_status_last = IFM_AVALID; 4246 priv->media_active_last = IFM_ETHER | IFM_AUTO | 4247 IFM_ETH_RXPAUSE | IFM_FDX; 4248 4249 /* setup default pauseframes configuration */ 4250 mlx5e_setup_pauseframes(priv); 4251 4252 /* Setup supported medias */ 4253 //TODO: If we failed to query ptys is it ok to proceed?? 4254 if (!mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1)) { 4255 ext = MLX5_CAP_PCAM_FEATURE(mdev, 4256 ptys_extended_ethernet); 4257 eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, 4258 eth_proto_capability); 4259 if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type)) 4260 connector_type = MLX5_GET(ptys_reg, out, 4261 connector_type); 4262 } else { 4263 eth_proto_cap = 0; 4264 if_printf(ifp, "%s: Query port media capability failed," 4265 " %d\n", __func__, err); 4266 } 4267 4268 ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK, 4269 mlx5e_media_change, mlx5e_media_status); 4270 4271 speeds_num = ext ? MLX5E_EXT_LINK_SPEEDS_NUMBER : MLX5E_LINK_SPEEDS_NUMBER; 4272 for (i = 0; i != speeds_num; i++) { 4273 for (j = 0; j < MLX5E_LINK_MODES_NUMBER ; ++j) { 4274 media_entry = ext ? mlx5e_ext_mode_table[i][j] : 4275 mlx5e_mode_table[i][j]; 4276 if (media_entry.baudrate == 0) 4277 continue; 4278 if (MLX5E_PROT_MASK(i) & eth_proto_cap) { 4279 ifmedia_add(&priv->media, 4280 media_entry.subtype | 4281 IFM_ETHER, 0, NULL); 4282 ifmedia_add(&priv->media, 4283 media_entry.subtype | 4284 IFM_ETHER | IFM_FDX | 4285 IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL); 4286 } 4287 } 4288 } 4289 4290 ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL); 4291 ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX | 4292 IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL); 4293 4294 /* Set autoselect by default */ 4295 ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX | 4296 IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE); 4297 ether_ifattach(ifp, dev_addr); 4298 4299 /* Register for VLAN events */ 4300 priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 4301 mlx5e_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST); 4302 priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 4303 mlx5e_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST); 4304 4305 /* Link is down by default */ 4306 if_link_state_change(ifp, LINK_STATE_DOWN); 4307 4308 mlx5e_enable_async_events(priv); 4309 4310 mlx5e_add_hw_stats(priv); 4311 4312 mlx5e_create_stats(&priv->stats.vport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 4313 "vstats", mlx5e_vport_stats_desc, MLX5E_VPORT_STATS_NUM, 4314 priv->stats.vport.arg); 4315 4316 mlx5e_create_stats(&priv->stats.pport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 4317 "pstats", mlx5e_pport_stats_desc, MLX5E_PPORT_STATS_NUM, 4318 priv->stats.pport.arg); 4319 4320 mlx5e_create_ethtool(priv); 4321 4322 mtx_lock(&priv->async_events_mtx); 4323 mlx5e_update_stats(priv); 4324 mtx_unlock(&priv->async_events_mtx); 4325 4326 SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 4327 OID_AUTO, "rx_clbr_done", CTLFLAG_RD, 4328 &priv->clbr_done, 0, 4329 "RX timestamps calibration state"); 4330 callout_init(&priv->tstmp_clbr, CALLOUT_DIRECT); 4331 mlx5e_reset_calibration_callout(priv); 4332 4333 pa.pa_version = PFIL_VERSION; 4334 pa.pa_flags = PFIL_IN; 4335 pa.pa_type = PFIL_TYPE_ETHERNET; 4336 pa.pa_headname = ifp->if_xname; 4337 priv->pfil = pfil_head_register(&pa); 4338 4339 return (priv); 4340 4341 #ifdef RATELIMIT 4342 err_create_mkey: 4343 mlx5_core_destroy_mkey(priv->mdev, &priv->mr); 4344 #endif 4345 err_dealloc_transport_domain: 4346 mlx5_dealloc_transport_domain(mdev, priv->tdn); 4347 4348 err_dealloc_pd: 4349 mlx5_core_dealloc_pd(mdev, priv->pdn); 4350 4351 err_unmap_free_uar: 4352 mlx5_unmap_free_uar(mdev, &priv->cq_uar); 4353 4354 err_free_wq: 4355 flush_workqueue(priv->wq); 4356 4357 err_free_sysctl: 4358 sysctl_ctx_free(&priv->sysctl_ctx); 4359 if (priv->sysctl_debug) 4360 sysctl_ctx_free(&priv->stats.port_stats_debug.ctx); 4361 if_free(ifp); 4362 4363 err_free_priv: 4364 mlx5e_priv_mtx_destroy(priv); 4365 free(priv, M_MLX5EN); 4366 return (NULL); 4367 } 4368 4369 static void 4370 mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vpriv) 4371 { 4372 struct mlx5e_priv *priv = vpriv; 4373 struct ifnet *ifp = priv->ifp; 4374 4375 /* don't allow more IOCTLs */ 4376 priv->gone = 1; 4377 4378 /* XXX wait a bit to allow IOCTL handlers to complete */ 4379 pause("W", hz); 4380 4381 #ifdef RATELIMIT 4382 /* 4383 * The kernel can have reference(s) via the m_snd_tag's into 4384 * the ratelimit channels, and these must go away before 4385 * detaching: 4386 */ 4387 while (READ_ONCE(priv->rl.stats.tx_active_connections) != 0) { 4388 if_printf(priv->ifp, "Waiting for all ratelimit connections " 4389 "to terminate\n"); 4390 pause("W", hz); 4391 } 4392 #endif 4393 /* stop watchdog timer */ 4394 callout_drain(&priv->watchdog); 4395 4396 callout_drain(&priv->tstmp_clbr); 4397 4398 if (priv->vlan_attach != NULL) 4399 EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach); 4400 if (priv->vlan_detach != NULL) 4401 EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach); 4402 4403 /* make sure device gets closed */ 4404 PRIV_LOCK(priv); 4405 mlx5e_close_locked(ifp); 4406 PRIV_UNLOCK(priv); 4407 4408 /* wait for all unlimited send tags to go away */ 4409 while (priv->channel_refs != 0) { 4410 if_printf(priv->ifp, "Waiting for all unlimited connections " 4411 "to terminate\n"); 4412 pause("W", hz); 4413 } 4414 4415 /* deregister pfil */ 4416 if (priv->pfil != NULL) { 4417 pfil_head_unregister(priv->pfil); 4418 priv->pfil = NULL; 4419 } 4420 4421 /* unregister device */ 4422 ifmedia_removeall(&priv->media); 4423 ether_ifdetach(ifp); 4424 if_free(ifp); 4425 4426 #ifdef RATELIMIT 4427 mlx5e_rl_cleanup(priv); 4428 #endif 4429 /* destroy all remaining sysctl nodes */ 4430 sysctl_ctx_free(&priv->stats.vport.ctx); 4431 sysctl_ctx_free(&priv->stats.pport.ctx); 4432 if (priv->sysctl_debug) 4433 sysctl_ctx_free(&priv->stats.port_stats_debug.ctx); 4434 sysctl_ctx_free(&priv->sysctl_ctx); 4435 4436 mlx5_core_destroy_mkey(priv->mdev, &priv->mr); 4437 mlx5_dealloc_transport_domain(priv->mdev, priv->tdn); 4438 mlx5_core_dealloc_pd(priv->mdev, priv->pdn); 4439 mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); 4440 mlx5e_disable_async_events(priv); 4441 flush_workqueue(priv->wq); 4442 mlx5e_priv_mtx_destroy(priv); 4443 free(priv, M_MLX5EN); 4444 } 4445 4446 static void * 4447 mlx5e_get_ifp(void *vpriv) 4448 { 4449 struct mlx5e_priv *priv = vpriv; 4450 4451 return (priv->ifp); 4452 } 4453 4454 static struct mlx5_interface mlx5e_interface = { 4455 .add = mlx5e_create_ifp, 4456 .remove = mlx5e_destroy_ifp, 4457 .event = mlx5e_async_event, 4458 .protocol = MLX5_INTERFACE_PROTOCOL_ETH, 4459 .get_dev = mlx5e_get_ifp, 4460 }; 4461 4462 void 4463 mlx5e_init(void) 4464 { 4465 mlx5_register_interface(&mlx5e_interface); 4466 } 4467 4468 void 4469 mlx5e_cleanup(void) 4470 { 4471 mlx5_unregister_interface(&mlx5e_interface); 4472 } 4473 4474 static void 4475 mlx5e_show_version(void __unused *arg) 4476 { 4477 4478 printf("%s", mlx5e_version); 4479 } 4480 SYSINIT(mlx5e_show_version, SI_SUB_DRIVERS, SI_ORDER_ANY, mlx5e_show_version, NULL); 4481 4482 module_init_order(mlx5e_init, SI_ORDER_THIRD); 4483 module_exit_order(mlx5e_cleanup, SI_ORDER_THIRD); 4484 4485 #if (__FreeBSD_version >= 1100000) 4486 MODULE_DEPEND(mlx5en, linuxkpi, 1, 1, 1); 4487 #endif 4488 MODULE_DEPEND(mlx5en, mlx5, 1, 1, 1); 4489 MODULE_VERSION(mlx5en, 1); 4490