1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/etherdevice.h> 36 #include <linux/mlx4/cmd.h> 37 #include <linux/module.h> 38 #include <linux/cache.h> 39 #include <linux/kernel.h> 40 #include <uapi/rdma/mlx4-abi.h> 41 42 #include "fw.h" 43 #include "icm.h" 44 45 enum { 46 MLX4_COMMAND_INTERFACE_MIN_REV = 2, 47 MLX4_COMMAND_INTERFACE_MAX_REV = 3, 48 MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS = 3, 49 }; 50 51 extern void __buggy_use_of_MLX4_GET(void); 52 extern void __buggy_use_of_MLX4_PUT(void); 53 54 static bool enable_qos; 55 module_param(enable_qos, bool, 0444); 56 MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: off)"); 57 58 #define MLX4_GET(dest, source, offset) \ 59 do { \ 60 void *__p = (char *) (source) + (offset); \ 61 __be64 val; \ 62 switch (sizeof(dest)) { \ 63 case 1: (dest) = *(u8 *) __p; break; \ 64 case 2: (dest) = be16_to_cpup(__p); break; \ 65 case 4: (dest) = be32_to_cpup(__p); break; \ 66 case 8: val = get_unaligned((__be64 *)__p); \ 67 (dest) = be64_to_cpu(val); break; \ 68 default: __buggy_use_of_MLX4_GET(); \ 69 } \ 70 } while (0) 71 72 #define MLX4_PUT(dest, source, offset) \ 73 do { \ 74 void *__d = ((char *) (dest) + (offset)); \ 75 switch (sizeof(source)) { \ 76 case 1: *(u8 *) __d = (source); break; \ 77 case 2: *(__be16 *) __d = cpu_to_be16(source); break; \ 78 case 4: *(__be32 *) __d = cpu_to_be32(source); break; \ 79 case 8: *(__be64 *) __d = cpu_to_be64(source); break; \ 80 default: __buggy_use_of_MLX4_PUT(); \ 81 } \ 82 } while (0) 83 84 static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags) 85 { 86 static const char *fname[] = { 87 [ 0] = "RC transport", 88 [ 1] = "UC transport", 89 [ 2] = "UD transport", 90 [ 3] = "XRC transport", 91 [ 6] = "SRQ support", 92 [ 7] = "IPoIB checksum offload", 93 [ 8] = "P_Key violation counter", 94 [ 9] = "Q_Key violation counter", 95 [12] = "Dual Port Different Protocol (DPDP) support", 96 [15] = "Big LSO headers", 97 [16] = "MW support", 98 [17] = "APM support", 99 [18] = "Atomic ops support", 100 [19] = "Raw multicast support", 101 [20] = "Address vector port checking support", 102 [21] = "UD multicast support", 103 [30] = "IBoE support", 104 [32] = "Unicast loopback support", 105 [34] = "FCS header control", 106 [37] = "Wake On LAN (port1) support", 107 [38] = "Wake On LAN (port2) support", 108 [40] = "UDP RSS support", 109 [41] = "Unicast VEP steering support", 110 [42] = "Multicast VEP steering support", 111 [48] = "Counters support", 112 [52] = "RSS IP fragments support", 113 [53] = "Port ETS Scheduler support", 114 [55] = "Port link type sensing support", 115 [59] = "Port management change event support", 116 [61] = "64 byte EQE support", 117 [62] = "64 byte CQE support", 118 }; 119 int i; 120 121 mlx4_dbg(dev, "DEV_CAP flags:\n"); 122 for (i = 0; i < ARRAY_SIZE(fname); ++i) 123 if (fname[i] && (flags & (1LL << i))) 124 mlx4_dbg(dev, " %s\n", fname[i]); 125 } 126 127 static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) 128 { 129 static const char * const fname[] = { 130 [0] = "RSS support", 131 [1] = "RSS Toeplitz Hash Function support", 132 [2] = "RSS XOR Hash Function support", 133 [3] = "Device managed flow steering support", 134 [4] = "Automatic MAC reassignment support", 135 [5] = "Time stamping support", 136 [6] = "VST (control vlan insertion/stripping) support", 137 [7] = "FSM (MAC anti-spoofing) support", 138 [8] = "Dynamic QP updates support", 139 [9] = "Device managed flow steering IPoIB support", 140 [10] = "TCP/IP offloads/flow-steering for VXLAN support", 141 [11] = "MAD DEMUX (Secure-Host) support", 142 [12] = "Large cache line (>64B) CQE stride support", 143 [13] = "Large cache line (>64B) EQE stride support", 144 [14] = "Ethernet protocol control support", 145 [15] = "Ethernet Backplane autoneg support", 146 [16] = "CONFIG DEV support", 147 [17] = "Asymmetric EQs support", 148 [18] = "More than 80 VFs support", 149 [19] = "Performance optimized for limited rule configuration flow steering support", 150 [20] = "Recoverable error events support", 151 [21] = "Port Remap support", 152 [22] = "QCN support", 153 [23] = "QP rate limiting support", 154 [24] = "Ethernet Flow control statistics support", 155 [25] = "Granular QoS per VF support", 156 [26] = "Port ETS Scheduler support", 157 [27] = "Port beacon support", 158 [28] = "RX-ALL support", 159 [29] = "802.1ad offload support", 160 [31] = "Modifying loopback source checks using UPDATE_QP support", 161 [32] = "Loopback source checks support", 162 [33] = "RoCEv2 support", 163 [34] = "DMFS Sniffer support (UC & MC)", 164 [35] = "Diag counters per port", 165 [36] = "QinQ VST mode support", 166 [37] = "sl to vl mapping table change event support", 167 [38] = "user MAC support", 168 [39] = "Report driver version to FW support", 169 }; 170 int i; 171 172 for (i = 0; i < ARRAY_SIZE(fname); ++i) 173 if (fname[i] && (flags & (1LL << i))) 174 mlx4_dbg(dev, " %s\n", fname[i]); 175 } 176 177 int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg) 178 { 179 struct mlx4_cmd_mailbox *mailbox; 180 u32 *inbox; 181 int err = 0; 182 183 #define MOD_STAT_CFG_IN_SIZE 0x100 184 185 #define MOD_STAT_CFG_PG_SZ_M_OFFSET 0x002 186 #define MOD_STAT_CFG_PG_SZ_OFFSET 0x003 187 188 mailbox = mlx4_alloc_cmd_mailbox(dev); 189 if (IS_ERR(mailbox)) 190 return PTR_ERR(mailbox); 191 inbox = mailbox->buf; 192 193 MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET); 194 MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET); 195 196 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG, 197 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 198 199 mlx4_free_cmd_mailbox(dev, mailbox); 200 return err; 201 } 202 203 int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave) 204 { 205 struct mlx4_cmd_mailbox *mailbox; 206 u32 *outbox; 207 u8 in_modifier; 208 u8 field; 209 u16 field16; 210 int err; 211 212 #define QUERY_FUNC_BUS_OFFSET 0x00 213 #define QUERY_FUNC_DEVICE_OFFSET 0x01 214 #define QUERY_FUNC_FUNCTION_OFFSET 0x01 215 #define QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET 0x03 216 #define QUERY_FUNC_RSVD_EQS_OFFSET 0x04 217 #define QUERY_FUNC_MAX_EQ_OFFSET 0x06 218 #define QUERY_FUNC_RSVD_UARS_OFFSET 0x0b 219 220 mailbox = mlx4_alloc_cmd_mailbox(dev); 221 if (IS_ERR(mailbox)) 222 return PTR_ERR(mailbox); 223 outbox = mailbox->buf; 224 225 in_modifier = slave; 226 227 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, 0, 228 MLX4_CMD_QUERY_FUNC, 229 MLX4_CMD_TIME_CLASS_A, 230 MLX4_CMD_NATIVE); 231 if (err) 232 goto out; 233 234 MLX4_GET(field, outbox, QUERY_FUNC_BUS_OFFSET); 235 func->bus = field & 0xf; 236 MLX4_GET(field, outbox, QUERY_FUNC_DEVICE_OFFSET); 237 func->device = field & 0xf1; 238 MLX4_GET(field, outbox, QUERY_FUNC_FUNCTION_OFFSET); 239 func->function = field & 0x7; 240 MLX4_GET(field, outbox, QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET); 241 func->physical_function = field & 0xf; 242 MLX4_GET(field16, outbox, QUERY_FUNC_RSVD_EQS_OFFSET); 243 func->rsvd_eqs = field16 & 0xffff; 244 MLX4_GET(field16, outbox, QUERY_FUNC_MAX_EQ_OFFSET); 245 func->max_eq = field16 & 0xffff; 246 MLX4_GET(field, outbox, QUERY_FUNC_RSVD_UARS_OFFSET); 247 func->rsvd_uars = field & 0x0f; 248 249 mlx4_dbg(dev, "Bus: %d, Device: %d, Function: %d, Physical function: %d, Max EQs: %d, Reserved EQs: %d, Reserved UARs: %d\n", 250 func->bus, func->device, func->function, func->physical_function, 251 func->max_eq, func->rsvd_eqs, func->rsvd_uars); 252 253 out: 254 mlx4_free_cmd_mailbox(dev, mailbox); 255 return err; 256 } 257 258 static int mlx4_activate_vst_qinq(struct mlx4_priv *priv, int slave, int port) 259 { 260 struct mlx4_vport_oper_state *vp_oper; 261 struct mlx4_vport_state *vp_admin; 262 int err; 263 264 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 265 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; 266 267 if (vp_admin->default_vlan != vp_oper->state.default_vlan) { 268 err = __mlx4_register_vlan(&priv->dev, port, 269 vp_admin->default_vlan, 270 &vp_oper->vlan_idx); 271 if (err) { 272 vp_oper->vlan_idx = NO_INDX; 273 mlx4_warn(&priv->dev, 274 "No vlan resources slave %d, port %d\n", 275 slave, port); 276 return err; 277 } 278 mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n", 279 (int)(vp_oper->state.default_vlan), 280 vp_oper->vlan_idx, slave, port); 281 } 282 vp_oper->state.vlan_proto = vp_admin->vlan_proto; 283 vp_oper->state.default_vlan = vp_admin->default_vlan; 284 vp_oper->state.default_qos = vp_admin->default_qos; 285 286 return 0; 287 } 288 289 static int mlx4_handle_vst_qinq(struct mlx4_priv *priv, int slave, int port) 290 { 291 struct mlx4_vport_oper_state *vp_oper; 292 struct mlx4_slave_state *slave_state; 293 struct mlx4_vport_state *vp_admin; 294 int err; 295 296 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 297 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; 298 slave_state = &priv->mfunc.master.slave_state[slave]; 299 300 if ((vp_admin->vlan_proto != htons(ETH_P_8021AD)) || 301 (!slave_state->active)) 302 return 0; 303 304 if (vp_oper->state.vlan_proto == vp_admin->vlan_proto && 305 vp_oper->state.default_vlan == vp_admin->default_vlan && 306 vp_oper->state.default_qos == vp_admin->default_qos) 307 return 0; 308 309 if (!slave_state->vst_qinq_supported) { 310 /* Warn and revert the request to set vst QinQ mode */ 311 vp_admin->vlan_proto = vp_oper->state.vlan_proto; 312 vp_admin->default_vlan = vp_oper->state.default_vlan; 313 vp_admin->default_qos = vp_oper->state.default_qos; 314 315 mlx4_warn(&priv->dev, 316 "Slave %d does not support VST QinQ mode\n", slave); 317 return 0; 318 } 319 320 err = mlx4_activate_vst_qinq(priv, slave, port); 321 return err; 322 } 323 324 int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, 325 struct mlx4_vhcr *vhcr, 326 struct mlx4_cmd_mailbox *inbox, 327 struct mlx4_cmd_mailbox *outbox, 328 struct mlx4_cmd_info *cmd) 329 { 330 struct mlx4_priv *priv = mlx4_priv(dev); 331 u8 field, port; 332 u32 size, proxy_qp, qkey; 333 int err = 0; 334 struct mlx4_func func; 335 336 #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0 337 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1 338 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4 339 #define QUERY_FUNC_CAP_FMR_OFFSET 0x8 340 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP 0x10 341 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP 0x14 342 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP 0x18 343 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP 0x20 344 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP 0x24 345 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28 346 #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c 347 #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30 348 #define QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET 0x48 349 350 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50 351 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54 352 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x58 353 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x60 354 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64 355 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68 356 357 #define QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET 0x6c 358 359 #define QUERY_FUNC_CAP_FMR_FLAG 0x80 360 #define QUERY_FUNC_CAP_FLAG_RDMA 0x40 361 #define QUERY_FUNC_CAP_FLAG_ETH 0x80 362 #define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10 363 #define QUERY_FUNC_CAP_FLAG_RESD_LKEY 0x08 364 #define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX 0x04 365 366 #define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG (1UL << 31) 367 #define QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG (1UL << 30) 368 369 /* when opcode modifier = 1 */ 370 #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3 371 #define QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET 0x4 372 #define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8 373 #define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc 374 375 #define QUERY_FUNC_CAP_QP0_TUNNEL 0x10 376 #define QUERY_FUNC_CAP_QP0_PROXY 0x14 377 #define QUERY_FUNC_CAP_QP1_TUNNEL 0x18 378 #define QUERY_FUNC_CAP_QP1_PROXY 0x1c 379 #define QUERY_FUNC_CAP_PHYS_PORT_ID 0x28 380 381 #define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC 0x40 382 #define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN 0x80 383 #define QUERY_FUNC_CAP_FLAGS1_NIC_INFO 0x10 384 #define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08 385 386 #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80 387 #define QUERY_FUNC_CAP_PHV_BIT 0x40 388 #define QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE 0x20 389 390 #define QUERY_FUNC_CAP_SUPPORTS_VST_QINQ BIT(30) 391 #define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS BIT(31) 392 393 if (vhcr->op_modifier == 1) { 394 struct mlx4_active_ports actv_ports = 395 mlx4_get_active_ports(dev, slave); 396 int converted_port = mlx4_slave_convert_port( 397 dev, slave, vhcr->in_modifier); 398 struct mlx4_vport_oper_state *vp_oper; 399 400 if (converted_port < 0) 401 return -EINVAL; 402 403 vhcr->in_modifier = converted_port; 404 /* phys-port = logical-port */ 405 field = vhcr->in_modifier - 406 find_first_bit(actv_ports.ports, dev->caps.num_ports); 407 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); 408 409 port = vhcr->in_modifier; 410 proxy_qp = dev->phys_caps.base_proxy_sqpn + 8 * slave + port - 1; 411 412 /* Set nic_info bit to mark new fields support */ 413 field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO; 414 415 if (mlx4_vf_smi_enabled(dev, slave, port) && 416 !mlx4_get_parav_qkey(dev, proxy_qp, &qkey)) { 417 field |= QUERY_FUNC_CAP_VF_ENABLE_QP0; 418 MLX4_PUT(outbox->buf, qkey, 419 QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET); 420 } 421 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET); 422 423 /* size is now the QP number */ 424 size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + port - 1; 425 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL); 426 427 size += 2; 428 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_TUNNEL); 429 430 MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP0_PROXY); 431 proxy_qp += 2; 432 MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP1_PROXY); 433 434 MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier], 435 QUERY_FUNC_CAP_PHYS_PORT_ID); 436 437 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 438 err = mlx4_handle_vst_qinq(priv, slave, port); 439 if (err) 440 return err; 441 442 field = 0; 443 if (dev->caps.phv_bit[port]) 444 field |= QUERY_FUNC_CAP_PHV_BIT; 445 if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) 446 field |= QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE; 447 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS0_OFFSET); 448 449 } else if (vhcr->op_modifier == 0) { 450 struct mlx4_active_ports actv_ports = 451 mlx4_get_active_ports(dev, slave); 452 struct mlx4_slave_state *slave_state = 453 &priv->mfunc.master.slave_state[slave]; 454 455 /* enable rdma and ethernet interfaces, new quota locations, 456 * and reserved lkey 457 */ 458 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA | 459 QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX | 460 QUERY_FUNC_CAP_FLAG_RESD_LKEY); 461 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); 462 463 field = min( 464 bitmap_weight(actv_ports.ports, dev->caps.num_ports), 465 dev->caps.num_ports); 466 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); 467 468 size = dev->caps.function_caps; /* set PF behaviours */ 469 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET); 470 471 field = 0; /* protected FMR support not available as yet */ 472 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET); 473 474 size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave]; 475 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); 476 size = dev->caps.num_qps; 477 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP); 478 479 size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave]; 480 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); 481 size = dev->caps.num_srqs; 482 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP); 483 484 size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave]; 485 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); 486 size = dev->caps.num_cqs; 487 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP); 488 489 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) || 490 mlx4_QUERY_FUNC(dev, &func, slave)) { 491 size = vhcr->in_modifier & 492 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ? 493 dev->caps.num_eqs : 494 rounddown_pow_of_two(dev->caps.num_eqs); 495 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); 496 size = dev->caps.reserved_eqs; 497 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); 498 } else { 499 size = vhcr->in_modifier & 500 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ? 501 func.max_eq : 502 rounddown_pow_of_two(func.max_eq); 503 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); 504 size = func.rsvd_eqs; 505 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); 506 } 507 508 size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave]; 509 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); 510 size = dev->caps.num_mpts; 511 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP); 512 513 size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave]; 514 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); 515 size = dev->caps.num_mtts; 516 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP); 517 518 size = dev->caps.num_mgms + dev->caps.num_amgms; 519 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); 520 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP); 521 522 size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG | 523 QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG; 524 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET); 525 526 size = dev->caps.reserved_lkey + ((slave << 8) & 0xFF00); 527 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET); 528 529 if (vhcr->in_modifier & QUERY_FUNC_CAP_SUPPORTS_VST_QINQ) 530 slave_state->vst_qinq_supported = true; 531 532 } else 533 err = -EINVAL; 534 535 return err; 536 } 537 538 int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port, 539 struct mlx4_func_cap *func_cap) 540 { 541 struct mlx4_cmd_mailbox *mailbox; 542 u32 *outbox; 543 u8 field, op_modifier; 544 u32 size, qkey; 545 int err = 0, quotas = 0; 546 u32 in_modifier; 547 u32 slave_caps; 548 549 op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */ 550 slave_caps = QUERY_FUNC_CAP_SUPPORTS_VST_QINQ | 551 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS; 552 in_modifier = op_modifier ? gen_or_port : slave_caps; 553 554 mailbox = mlx4_alloc_cmd_mailbox(dev); 555 if (IS_ERR(mailbox)) 556 return PTR_ERR(mailbox); 557 558 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, op_modifier, 559 MLX4_CMD_QUERY_FUNC_CAP, 560 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 561 if (err) 562 goto out; 563 564 outbox = mailbox->buf; 565 566 if (!op_modifier) { 567 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET); 568 if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) { 569 mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n"); 570 err = -EPROTONOSUPPORT; 571 goto out; 572 } 573 func_cap->flags = field; 574 quotas = !!(func_cap->flags & QUERY_FUNC_CAP_FLAG_QUOTAS); 575 576 MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); 577 func_cap->num_ports = field; 578 579 MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET); 580 func_cap->pf_context_behaviour = size; 581 582 if (quotas) { 583 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); 584 func_cap->qp_quota = size & 0xFFFFFF; 585 586 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); 587 func_cap->srq_quota = size & 0xFFFFFF; 588 589 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); 590 func_cap->cq_quota = size & 0xFFFFFF; 591 592 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); 593 func_cap->mpt_quota = size & 0xFFFFFF; 594 595 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); 596 func_cap->mtt_quota = size & 0xFFFFFF; 597 598 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); 599 func_cap->mcg_quota = size & 0xFFFFFF; 600 601 } else { 602 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP); 603 func_cap->qp_quota = size & 0xFFFFFF; 604 605 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP); 606 func_cap->srq_quota = size & 0xFFFFFF; 607 608 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP); 609 func_cap->cq_quota = size & 0xFFFFFF; 610 611 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP); 612 func_cap->mpt_quota = size & 0xFFFFFF; 613 614 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP); 615 func_cap->mtt_quota = size & 0xFFFFFF; 616 617 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP); 618 func_cap->mcg_quota = size & 0xFFFFFF; 619 } 620 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET); 621 func_cap->max_eq = size & 0xFFFFFF; 622 623 MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); 624 func_cap->reserved_eq = size & 0xFFFFFF; 625 626 if (func_cap->flags & QUERY_FUNC_CAP_FLAG_RESD_LKEY) { 627 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET); 628 func_cap->reserved_lkey = size; 629 } else { 630 func_cap->reserved_lkey = 0; 631 } 632 633 func_cap->extra_flags = 0; 634 635 /* Mailbox data from 0x6c and onward should only be treated if 636 * QUERY_FUNC_CAP_FLAG_VALID_MAILBOX is set in func_cap->flags 637 */ 638 if (func_cap->flags & QUERY_FUNC_CAP_FLAG_VALID_MAILBOX) { 639 MLX4_GET(size, outbox, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET); 640 if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG) 641 func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_BF_RES_QP; 642 if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG) 643 func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_A0_RES_QP; 644 } 645 646 goto out; 647 } 648 649 /* logical port query */ 650 if (gen_or_port > dev->caps.num_ports) { 651 err = -EINVAL; 652 goto out; 653 } 654 655 MLX4_GET(func_cap->flags1, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET); 656 if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) { 657 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN) { 658 mlx4_err(dev, "VLAN is enforced on this port\n"); 659 err = -EPROTONOSUPPORT; 660 goto out; 661 } 662 663 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_MAC) { 664 mlx4_err(dev, "Force mac is enabled on this port\n"); 665 err = -EPROTONOSUPPORT; 666 goto out; 667 } 668 } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) { 669 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET); 670 if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) { 671 mlx4_err(dev, "phy_wqe_gid is enforced on this ib port\n"); 672 err = -EPROTONOSUPPORT; 673 goto out; 674 } 675 } 676 677 MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); 678 func_cap->physical_port = field; 679 if (func_cap->physical_port != gen_or_port) { 680 err = -EINVAL; 681 goto out; 682 } 683 684 if (func_cap->flags1 & QUERY_FUNC_CAP_VF_ENABLE_QP0) { 685 MLX4_GET(qkey, outbox, QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET); 686 func_cap->spec_qps.qp0_qkey = qkey; 687 } else { 688 func_cap->spec_qps.qp0_qkey = 0; 689 } 690 691 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL); 692 func_cap->spec_qps.qp0_tunnel = size & 0xFFFFFF; 693 694 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_PROXY); 695 func_cap->spec_qps.qp0_proxy = size & 0xFFFFFF; 696 697 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_TUNNEL); 698 func_cap->spec_qps.qp1_tunnel = size & 0xFFFFFF; 699 700 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY); 701 func_cap->spec_qps.qp1_proxy = size & 0xFFFFFF; 702 703 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_NIC_INFO) 704 MLX4_GET(func_cap->phys_port_id, outbox, 705 QUERY_FUNC_CAP_PHYS_PORT_ID); 706 707 MLX4_GET(func_cap->flags0, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET); 708 709 /* All other resources are allocated by the master, but we still report 710 * 'num' and 'reserved' capabilities as follows: 711 * - num remains the maximum resource index 712 * - 'num - reserved' is the total available objects of a resource, but 713 * resource indices may be less than 'reserved' 714 * TODO: set per-resource quotas */ 715 716 out: 717 mlx4_free_cmd_mailbox(dev, mailbox); 718 719 return err; 720 } 721 722 static void disable_unsupported_roce_caps(void *buf); 723 724 int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 725 { 726 struct mlx4_cmd_mailbox *mailbox; 727 u32 *outbox; 728 u8 field; 729 u32 field32, flags, ext_flags; 730 u16 size; 731 u16 stat_rate; 732 int err; 733 int i; 734 735 #define QUERY_DEV_CAP_OUT_SIZE 0x100 736 #define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10 737 #define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11 738 #define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12 739 #define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13 740 #define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14 741 #define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15 742 #define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16 743 #define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17 744 #define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19 745 #define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a 746 #define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b 747 #define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d 748 #define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e 749 #define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f 750 #define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20 751 #define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21 752 #define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22 753 #define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23 754 #define QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET 0x26 755 #define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27 756 #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29 757 #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b 758 #define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d 759 #define QUERY_DEV_CAP_RSS_OFFSET 0x2e 760 #define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f 761 #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33 762 #define QUERY_DEV_CAP_PORT_BEACON_OFFSET 0x34 763 #define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35 764 #define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36 765 #define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37 766 #define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38 767 #define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b 768 #define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c 769 #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e 770 #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f 771 #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40 772 #define QUERY_DEV_CAP_WOL_OFFSET 0x43 773 #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 774 #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48 775 #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49 776 #define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b 777 #define QUERY_DEV_CAP_BF_OFFSET 0x4c 778 #define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d 779 #define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e 780 #define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f 781 #define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51 782 #define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52 783 #define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55 784 #define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56 785 #define QUERY_DEV_CAP_USER_MAC_EN_OFFSET 0x5C 786 #define QUERY_DEV_CAP_SVLAN_BY_QP_OFFSET 0x5D 787 #define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61 788 #define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62 789 #define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63 790 #define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64 791 #define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65 792 #define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66 793 #define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67 794 #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68 795 #define QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET 0x70 796 #define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70 797 #define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74 798 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76 799 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77 800 #define QUERY_DEV_CAP_SL2VL_EVENT_OFFSET 0x78 801 #define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a 802 #define QUERY_DEV_CAP_ECN_QCN_VER_OFFSET 0x7b 803 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 804 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 805 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 806 #define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86 807 #define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88 808 #define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a 809 #define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c 810 #define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e 811 #define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90 812 #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92 813 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94 814 #define QUERY_DEV_CAP_CONFIG_DEV_OFFSET 0x94 815 #define QUERY_DEV_CAP_PHV_EN_OFFSET 0x96 816 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 817 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 818 #define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c 819 #define QUERY_DEV_CAP_DIAG_RPRT_PER_PORT 0x9c 820 #define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d 821 #define QUERY_DEV_CAP_VXLAN 0x9e 822 #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0 823 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET 0xa8 824 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET 0xac 825 #define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc 826 #define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0 827 #define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2 828 829 830 dev_cap->flags2 = 0; 831 mailbox = mlx4_alloc_cmd_mailbox(dev); 832 if (IS_ERR(mailbox)) 833 return PTR_ERR(mailbox); 834 outbox = mailbox->buf; 835 836 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, 837 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 838 if (err) 839 goto out; 840 841 if (mlx4_is_mfunc(dev)) 842 disable_unsupported_roce_caps(outbox); 843 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET); 844 dev_cap->reserved_qps = 1 << (field & 0xf); 845 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET); 846 dev_cap->max_qps = 1 << (field & 0x1f); 847 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET); 848 dev_cap->reserved_srqs = 1 << (field >> 4); 849 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET); 850 dev_cap->max_srqs = 1 << (field & 0x1f); 851 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET); 852 dev_cap->max_cq_sz = 1 << field; 853 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET); 854 dev_cap->reserved_cqs = 1 << (field & 0xf); 855 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET); 856 dev_cap->max_cqs = 1 << (field & 0x1f); 857 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET); 858 dev_cap->max_mpts = 1 << (field & 0x3f); 859 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET); 860 dev_cap->reserved_eqs = 1 << (field & 0xf); 861 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET); 862 dev_cap->max_eqs = 1 << (field & 0xf); 863 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET); 864 dev_cap->reserved_mtts = 1 << (field >> 4); 865 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET); 866 dev_cap->reserved_mrws = 1 << (field & 0xf); 867 MLX4_GET(size, outbox, QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET); 868 dev_cap->num_sys_eqs = size & 0xfff; 869 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET); 870 dev_cap->max_requester_per_qp = 1 << (field & 0x3f); 871 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET); 872 dev_cap->max_responder_per_qp = 1 << (field & 0x3f); 873 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET); 874 field &= 0x1f; 875 if (!field) 876 dev_cap->max_gso_sz = 0; 877 else 878 dev_cap->max_gso_sz = 1 << field; 879 880 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSS_OFFSET); 881 if (field & 0x20) 882 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_XOR; 883 if (field & 0x10) 884 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_TOP; 885 field &= 0xf; 886 if (field) { 887 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS; 888 dev_cap->max_rss_tbl_sz = 1 << field; 889 } else 890 dev_cap->max_rss_tbl_sz = 0; 891 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET); 892 dev_cap->max_rdma_global = 1 << (field & 0x3f); 893 MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET); 894 dev_cap->local_ca_ack_delay = field & 0x1f; 895 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); 896 dev_cap->num_ports = field & 0xf; 897 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET); 898 dev_cap->max_msg_sz = 1 << (field & 0x1f); 899 MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET); 900 if (field & 0x10) 901 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN; 902 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); 903 if (field & 0x80) 904 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN; 905 dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f; 906 if (field & 0x20) 907 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER; 908 MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_BEACON_OFFSET); 909 if (field & 0x80) 910 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_BEACON; 911 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); 912 if (field & 0x80) 913 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB; 914 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET); 915 dev_cap->fs_max_num_qp_per_entry = field; 916 MLX4_GET(field, outbox, QUERY_DEV_CAP_SL2VL_EVENT_OFFSET); 917 if (field & (1 << 5)) 918 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT; 919 MLX4_GET(field, outbox, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET); 920 if (field & 0x1) 921 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QCN; 922 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); 923 dev_cap->stat_rate_support = stat_rate; 924 MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); 925 if (field & 0x80) 926 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_TS; 927 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 928 MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); 929 dev_cap->flags = flags | (u64)ext_flags << 32; 930 MLX4_GET(field, outbox, QUERY_DEV_CAP_WOL_OFFSET); 931 dev_cap->wol_port[1] = !!(field & 0x20); 932 dev_cap->wol_port[2] = !!(field & 0x40); 933 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); 934 dev_cap->reserved_uars = field >> 4; 935 MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET); 936 dev_cap->uar_size = 1 << ((field & 0x3f) + 20); 937 MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET); 938 dev_cap->min_page_sz = 1 << field; 939 940 MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET); 941 if (field & 0x80) { 942 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET); 943 dev_cap->bf_reg_size = 1 << (field & 0x1f); 944 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET); 945 if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) 946 field = 3; 947 dev_cap->bf_regs_per_page = 1 << (field & 0x3f); 948 } else { 949 dev_cap->bf_reg_size = 0; 950 } 951 952 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET); 953 dev_cap->max_sq_sg = field; 954 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET); 955 dev_cap->max_sq_desc_sz = size; 956 957 MLX4_GET(field, outbox, QUERY_DEV_CAP_USER_MAC_EN_OFFSET); 958 if (field & (1 << 2)) 959 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_USER_MAC_EN; 960 MLX4_GET(field, outbox, QUERY_DEV_CAP_SVLAN_BY_QP_OFFSET); 961 if (field & 0x1) 962 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP; 963 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET); 964 dev_cap->max_qp_per_mcg = 1 << field; 965 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET); 966 dev_cap->reserved_mgms = field & 0xf; 967 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET); 968 dev_cap->max_mcgs = 1 << field; 969 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET); 970 dev_cap->reserved_pds = field >> 4; 971 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET); 972 dev_cap->max_pds = 1 << (field & 0x3f); 973 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET); 974 dev_cap->reserved_xrcds = field >> 4; 975 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_XRC_OFFSET); 976 dev_cap->max_xrcds = 1 << (field & 0x1f); 977 978 MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET); 979 dev_cap->rdmarc_entry_sz = size; 980 MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET); 981 dev_cap->qpc_entry_sz = size; 982 MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET); 983 dev_cap->aux_entry_sz = size; 984 MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET); 985 dev_cap->altc_entry_sz = size; 986 MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET); 987 dev_cap->eqc_entry_sz = size; 988 MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET); 989 dev_cap->cqc_entry_sz = size; 990 MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET); 991 dev_cap->srq_entry_sz = size; 992 MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET); 993 dev_cap->cmpt_entry_sz = size; 994 MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET); 995 dev_cap->mtt_entry_sz = size; 996 MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET); 997 dev_cap->dmpt_entry_sz = size; 998 999 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET); 1000 dev_cap->max_srq_sz = 1 << field; 1001 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET); 1002 dev_cap->max_qp_sz = 1 << field; 1003 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET); 1004 dev_cap->resize_srq = field & 1; 1005 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET); 1006 dev_cap->max_rq_sg = field; 1007 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET); 1008 dev_cap->max_rq_desc_sz = size; 1009 MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); 1010 if (field & (1 << 4)) 1011 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QOS_VPP; 1012 if (field & (1 << 5)) 1013 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL; 1014 if (field & (1 << 6)) 1015 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 1016 if (field & (1 << 7)) 1017 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 1018 MLX4_GET(dev_cap->bmme_flags, outbox, 1019 QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 1020 if (dev_cap->bmme_flags & MLX4_FLAG_ROCE_V1_V2) 1021 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ROCE_V1_V2; 1022 if (dev_cap->bmme_flags & MLX4_FLAG_PORT_REMAP) 1023 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_REMAP; 1024 MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); 1025 if (field & 0x20) 1026 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV; 1027 if (field & (1 << 2)) 1028 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_IGNORE_FCS; 1029 MLX4_GET(field, outbox, QUERY_DEV_CAP_PHV_EN_OFFSET); 1030 if (field & 0x80) 1031 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PHV_EN; 1032 if (field & 0x40) 1033 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN; 1034 1035 MLX4_GET(dev_cap->reserved_lkey, outbox, 1036 QUERY_DEV_CAP_RSVD_LKEY_OFFSET); 1037 MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET); 1038 if (field32 & (1 << 0)) 1039 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP; 1040 if (field32 & (1 << 7)) 1041 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT; 1042 if (field32 & (1 << 8)) 1043 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW; 1044 MLX4_GET(field32, outbox, QUERY_DEV_CAP_DIAG_RPRT_PER_PORT); 1045 if (field32 & (1 << 17)) 1046 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT; 1047 MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC); 1048 if (field & 1<<6) 1049 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN; 1050 MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN); 1051 if (field & 1<<3) 1052 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS; 1053 if (field & (1 << 5)) 1054 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG; 1055 MLX4_GET(dev_cap->max_icm_sz, outbox, 1056 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET); 1057 if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS) 1058 MLX4_GET(dev_cap->max_counters, outbox, 1059 QUERY_DEV_CAP_MAX_COUNTERS_OFFSET); 1060 1061 MLX4_GET(field32, outbox, 1062 QUERY_DEV_CAP_MAD_DEMUX_OFFSET); 1063 if (field32 & (1 << 0)) 1064 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_MAD_DEMUX; 1065 1066 MLX4_GET(dev_cap->dmfs_high_rate_qpn_base, outbox, 1067 QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET); 1068 dev_cap->dmfs_high_rate_qpn_base &= MGM_QPN_MASK; 1069 MLX4_GET(dev_cap->dmfs_high_rate_qpn_range, outbox, 1070 QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET); 1071 dev_cap->dmfs_high_rate_qpn_range &= MGM_QPN_MASK; 1072 1073 MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET); 1074 dev_cap->rl_caps.num_rates = size; 1075 if (dev_cap->rl_caps.num_rates) { 1076 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT; 1077 MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET); 1078 dev_cap->rl_caps.max_val = size & 0xfff; 1079 dev_cap->rl_caps.max_unit = size >> 14; 1080 MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET); 1081 dev_cap->rl_caps.min_val = size & 0xfff; 1082 dev_cap->rl_caps.min_unit = size >> 14; 1083 } 1084 1085 MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); 1086 if (field32 & (1 << 16)) 1087 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP; 1088 if (field32 & (1 << 18)) 1089 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB; 1090 if (field32 & (1 << 19)) 1091 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_LB_SRC_CHK; 1092 if (field32 & (1 << 26)) 1093 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL; 1094 if (field32 & (1 << 20)) 1095 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM; 1096 if (field32 & (1 << 21)) 1097 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_80_VFS; 1098 1099 for (i = 1; i <= dev_cap->num_ports; i++) { 1100 err = mlx4_QUERY_PORT(dev, i, dev_cap->port_cap + i); 1101 if (err) 1102 goto out; 1103 } 1104 1105 /* 1106 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then 1107 * we can't use any EQs whose doorbell falls on that page, 1108 * even if the EQ itself isn't reserved. 1109 */ 1110 if (dev_cap->num_sys_eqs == 0) 1111 dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4, 1112 dev_cap->reserved_eqs); 1113 else 1114 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SYS_EQS; 1115 1116 out: 1117 mlx4_free_cmd_mailbox(dev, mailbox); 1118 return err; 1119 } 1120 1121 void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 1122 { 1123 if (dev_cap->bf_reg_size > 0) 1124 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n", 1125 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page); 1126 else 1127 mlx4_dbg(dev, "BlueFlame not available\n"); 1128 1129 mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n", 1130 dev_cap->bmme_flags, dev_cap->reserved_lkey); 1131 mlx4_dbg(dev, "Max ICM size %lld MB\n", 1132 (unsigned long long) dev_cap->max_icm_sz >> 20); 1133 mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n", 1134 dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz); 1135 mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n", 1136 dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz); 1137 mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", 1138 dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz); 1139 mlx4_dbg(dev, "Num sys EQs: %d, max EQs: %d, reserved EQs: %d, entry size: %d\n", 1140 dev_cap->num_sys_eqs, dev_cap->max_eqs, dev_cap->reserved_eqs, 1141 dev_cap->eqc_entry_sz); 1142 mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n", 1143 dev_cap->reserved_mrws, dev_cap->reserved_mtts); 1144 mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n", 1145 dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars); 1146 mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n", 1147 dev_cap->max_pds, dev_cap->reserved_mgms); 1148 mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", 1149 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz); 1150 mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n", 1151 dev_cap->local_ca_ack_delay, 128 << dev_cap->port_cap[1].ib_mtu, 1152 dev_cap->port_cap[1].max_port_width); 1153 mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n", 1154 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg); 1155 mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n", 1156 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg); 1157 mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz); 1158 mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters); 1159 mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz); 1160 mlx4_dbg(dev, "DMFS high rate steer QPn base: %d\n", 1161 dev_cap->dmfs_high_rate_qpn_base); 1162 mlx4_dbg(dev, "DMFS high rate steer QPn range: %d\n", 1163 dev_cap->dmfs_high_rate_qpn_range); 1164 1165 if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT) { 1166 struct mlx4_rate_limit_caps *rl_caps = &dev_cap->rl_caps; 1167 1168 mlx4_dbg(dev, "QP Rate-Limit: #rates %d, unit/val max %d/%d, min %d/%d\n", 1169 rl_caps->num_rates, rl_caps->max_unit, rl_caps->max_val, 1170 rl_caps->min_unit, rl_caps->min_val); 1171 } 1172 1173 dump_dev_cap_flags(dev, dev_cap->flags); 1174 dump_dev_cap_flags2(dev, dev_cap->flags2); 1175 } 1176 1177 int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap) 1178 { 1179 struct mlx4_cmd_mailbox *mailbox; 1180 u32 *outbox; 1181 u8 field; 1182 u32 field32; 1183 int err; 1184 1185 mailbox = mlx4_alloc_cmd_mailbox(dev); 1186 if (IS_ERR(mailbox)) 1187 return PTR_ERR(mailbox); 1188 outbox = mailbox->buf; 1189 1190 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 1191 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, 1192 MLX4_CMD_TIME_CLASS_A, 1193 MLX4_CMD_NATIVE); 1194 1195 if (err) 1196 goto out; 1197 1198 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); 1199 port_cap->max_vl = field >> 4; 1200 MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET); 1201 port_cap->ib_mtu = field >> 4; 1202 port_cap->max_port_width = field & 0xf; 1203 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET); 1204 port_cap->max_gids = 1 << (field & 0xf); 1205 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET); 1206 port_cap->max_pkeys = 1 << (field & 0xf); 1207 } else { 1208 #define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00 1209 #define QUERY_PORT_MTU_OFFSET 0x01 1210 #define QUERY_PORT_ETH_MTU_OFFSET 0x02 1211 #define QUERY_PORT_WIDTH_OFFSET 0x06 1212 #define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07 1213 #define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a 1214 #define QUERY_PORT_MAX_VL_OFFSET 0x0b 1215 #define QUERY_PORT_MAC_OFFSET 0x10 1216 #define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18 1217 #define QUERY_PORT_WAVELENGTH_OFFSET 0x1c 1218 #define QUERY_PORT_TRANS_CODE_OFFSET 0x20 1219 1220 err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0, MLX4_CMD_QUERY_PORT, 1221 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 1222 if (err) 1223 goto out; 1224 1225 MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET); 1226 port_cap->link_state = (field & 0x80) >> 7; 1227 port_cap->supported_port_types = field & 3; 1228 port_cap->suggested_type = (field >> 3) & 1; 1229 port_cap->default_sense = (field >> 4) & 1; 1230 port_cap->dmfs_optimized_state = (field >> 5) & 1; 1231 MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET); 1232 port_cap->ib_mtu = field & 0xf; 1233 MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET); 1234 port_cap->max_port_width = field & 0xf; 1235 MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET); 1236 port_cap->max_gids = 1 << (field >> 4); 1237 port_cap->max_pkeys = 1 << (field & 0xf); 1238 MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET); 1239 port_cap->max_vl = field & 0xf; 1240 port_cap->max_tc_eth = field >> 4; 1241 MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET); 1242 port_cap->log_max_macs = field & 0xf; 1243 port_cap->log_max_vlans = field >> 4; 1244 MLX4_GET(port_cap->eth_mtu, outbox, QUERY_PORT_ETH_MTU_OFFSET); 1245 MLX4_GET(port_cap->def_mac, outbox, QUERY_PORT_MAC_OFFSET); 1246 MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET); 1247 port_cap->trans_type = field32 >> 24; 1248 port_cap->vendor_oui = field32 & 0xffffff; 1249 MLX4_GET(port_cap->wavelength, outbox, QUERY_PORT_WAVELENGTH_OFFSET); 1250 MLX4_GET(port_cap->trans_code, outbox, QUERY_PORT_TRANS_CODE_OFFSET); 1251 } 1252 1253 out: 1254 mlx4_free_cmd_mailbox(dev, mailbox); 1255 return err; 1256 } 1257 1258 #define DEV_CAP_EXT_2_FLAG_PFC_COUNTERS (1 << 28) 1259 #define DEV_CAP_EXT_2_FLAG_VLAN_CONTROL (1 << 26) 1260 #define DEV_CAP_EXT_2_FLAG_80_VFS (1 << 21) 1261 #define DEV_CAP_EXT_2_FLAG_FSM (1 << 20) 1262 1263 int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, 1264 struct mlx4_vhcr *vhcr, 1265 struct mlx4_cmd_mailbox *inbox, 1266 struct mlx4_cmd_mailbox *outbox, 1267 struct mlx4_cmd_info *cmd) 1268 { 1269 u64 flags; 1270 int err = 0; 1271 u8 field; 1272 u16 field16; 1273 u32 bmme_flags, field32; 1274 int real_port; 1275 int slave_port; 1276 int first_port; 1277 struct mlx4_active_ports actv_ports; 1278 1279 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, 1280 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1281 if (err) 1282 return err; 1283 1284 disable_unsupported_roce_caps(outbox->buf); 1285 /* add port mng change event capability and disable mw type 1 1286 * unconditionally to slaves 1287 */ 1288 MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 1289 flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV; 1290 flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW; 1291 actv_ports = mlx4_get_active_ports(dev, slave); 1292 first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports); 1293 for (slave_port = 0, real_port = first_port; 1294 real_port < first_port + 1295 bitmap_weight(actv_ports.ports, dev->caps.num_ports); 1296 ++real_port, ++slave_port) { 1297 if (flags & (MLX4_DEV_CAP_FLAG_WOL_PORT1 << real_port)) 1298 flags |= MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port; 1299 else 1300 flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port); 1301 } 1302 for (; slave_port < dev->caps.num_ports; ++slave_port) 1303 flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port); 1304 1305 /* Not exposing RSS IP fragments to guests */ 1306 flags &= ~MLX4_DEV_CAP_FLAG_RSS_IP_FRAG; 1307 MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 1308 1309 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VL_PORT_OFFSET); 1310 field &= ~0x0F; 1311 field |= bitmap_weight(actv_ports.ports, dev->caps.num_ports) & 0x0F; 1312 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VL_PORT_OFFSET); 1313 1314 /* For guests, disable timestamp */ 1315 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); 1316 field &= 0x7f; 1317 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); 1318 1319 /* For guests, disable vxlan tunneling and QoS support */ 1320 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN); 1321 field &= 0xd7; 1322 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN); 1323 1324 /* For guests, disable port BEACON */ 1325 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_PORT_BEACON_OFFSET); 1326 field &= 0x7f; 1327 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_PORT_BEACON_OFFSET); 1328 1329 /* For guests, report Blueflame disabled */ 1330 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET); 1331 field &= 0x7f; 1332 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET); 1333 1334 /* For guests, disable mw type 2 and port remap*/ 1335 MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 1336 bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN; 1337 bmme_flags &= ~MLX4_FLAG_PORT_REMAP; 1338 MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 1339 1340 /* turn off device-managed steering capability if not enabled */ 1341 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) { 1342 MLX4_GET(field, outbox->buf, 1343 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); 1344 field &= 0x7f; 1345 MLX4_PUT(outbox->buf, field, 1346 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); 1347 } 1348 1349 /* turn off ipoib managed steering for guests */ 1350 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); 1351 field &= ~0x80; 1352 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); 1353 1354 /* turn off host side virt features (VST, FSM, etc) for guests */ 1355 MLX4_GET(field32, outbox->buf, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); 1356 field32 &= ~(DEV_CAP_EXT_2_FLAG_VLAN_CONTROL | DEV_CAP_EXT_2_FLAG_80_VFS | 1357 DEV_CAP_EXT_2_FLAG_FSM | DEV_CAP_EXT_2_FLAG_PFC_COUNTERS); 1358 MLX4_PUT(outbox->buf, field32, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); 1359 1360 /* turn off QCN for guests */ 1361 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET); 1362 field &= 0xfe; 1363 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET); 1364 1365 /* turn off QP max-rate limiting for guests */ 1366 field16 = 0; 1367 MLX4_PUT(outbox->buf, field16, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET); 1368 1369 /* turn off QoS per VF support for guests */ 1370 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); 1371 field &= 0xef; 1372 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); 1373 1374 /* turn off ignore FCS feature for guests */ 1375 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); 1376 field &= 0xfb; 1377 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); 1378 1379 return 0; 1380 } 1381 1382 static void disable_unsupported_roce_caps(void *buf) 1383 { 1384 u32 flags; 1385 1386 MLX4_GET(flags, buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 1387 flags &= ~(1UL << 31); 1388 MLX4_PUT(buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 1389 MLX4_GET(flags, buf, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); 1390 flags &= ~(1UL << 24); 1391 MLX4_PUT(buf, flags, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); 1392 MLX4_GET(flags, buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 1393 flags &= ~(MLX4_FLAG_ROCE_V1_V2); 1394 MLX4_PUT(buf, flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 1395 } 1396 1397 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, 1398 struct mlx4_vhcr *vhcr, 1399 struct mlx4_cmd_mailbox *inbox, 1400 struct mlx4_cmd_mailbox *outbox, 1401 struct mlx4_cmd_info *cmd) 1402 { 1403 struct mlx4_priv *priv = mlx4_priv(dev); 1404 u64 def_mac; 1405 u8 port_type; 1406 u16 short_field; 1407 int err; 1408 int admin_link_state; 1409 int port = mlx4_slave_convert_port(dev, slave, 1410 vhcr->in_modifier & 0xFF); 1411 1412 #define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0 1413 #define MLX4_PORT_LINK_UP_MASK 0x80 1414 #define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c 1415 #define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e 1416 1417 if (port < 0) 1418 return -EINVAL; 1419 1420 /* Protect against untrusted guests: enforce that this is the 1421 * QUERY_PORT general query. 1422 */ 1423 if (vhcr->op_modifier || vhcr->in_modifier & ~0xFF) 1424 return -EINVAL; 1425 1426 vhcr->in_modifier = port; 1427 1428 err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0, 1429 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, 1430 MLX4_CMD_NATIVE); 1431 1432 if (!err && dev->caps.function != slave) { 1433 def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac; 1434 MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET); 1435 1436 /* get port type - currently only eth is enabled */ 1437 MLX4_GET(port_type, outbox->buf, 1438 QUERY_PORT_SUPPORTED_TYPE_OFFSET); 1439 1440 /* No link sensing allowed */ 1441 port_type &= MLX4_VF_PORT_NO_LINK_SENSE_MASK; 1442 /* set port type to currently operating port type */ 1443 port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3); 1444 1445 admin_link_state = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.link_state; 1446 if (IFLA_VF_LINK_STATE_ENABLE == admin_link_state) 1447 port_type |= MLX4_PORT_LINK_UP_MASK; 1448 else if (IFLA_VF_LINK_STATE_DISABLE == admin_link_state) 1449 port_type &= ~MLX4_PORT_LINK_UP_MASK; 1450 else if (IFLA_VF_LINK_STATE_AUTO == admin_link_state && mlx4_is_bonded(dev)) { 1451 int other_port = (port == 1) ? 2 : 1; 1452 struct mlx4_port_cap port_cap; 1453 1454 err = mlx4_QUERY_PORT(dev, other_port, &port_cap); 1455 if (err) 1456 goto out; 1457 port_type |= (port_cap.link_state << 7); 1458 } 1459 1460 MLX4_PUT(outbox->buf, port_type, 1461 QUERY_PORT_SUPPORTED_TYPE_OFFSET); 1462 1463 if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH) 1464 short_field = mlx4_get_slave_num_gids(dev, slave, port); 1465 else 1466 short_field = 1; /* slave max gids */ 1467 MLX4_PUT(outbox->buf, short_field, 1468 QUERY_PORT_CUR_MAX_GID_OFFSET); 1469 1470 short_field = dev->caps.pkey_table_len[vhcr->in_modifier]; 1471 MLX4_PUT(outbox->buf, short_field, 1472 QUERY_PORT_CUR_MAX_PKEY_OFFSET); 1473 } 1474 out: 1475 return err; 1476 } 1477 1478 int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port, 1479 int *gid_tbl_len, int *pkey_tbl_len) 1480 { 1481 struct mlx4_cmd_mailbox *mailbox; 1482 u32 *outbox; 1483 u16 field; 1484 int err; 1485 1486 mailbox = mlx4_alloc_cmd_mailbox(dev); 1487 if (IS_ERR(mailbox)) 1488 return PTR_ERR(mailbox); 1489 1490 err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0, 1491 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, 1492 MLX4_CMD_WRAPPED); 1493 if (err) 1494 goto out; 1495 1496 outbox = mailbox->buf; 1497 1498 MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_GID_OFFSET); 1499 *gid_tbl_len = field; 1500 1501 MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_PKEY_OFFSET); 1502 *pkey_tbl_len = field; 1503 1504 out: 1505 mlx4_free_cmd_mailbox(dev, mailbox); 1506 return err; 1507 } 1508 EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len); 1509 1510 int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt) 1511 { 1512 struct mlx4_cmd_mailbox *mailbox; 1513 struct mlx4_icm_iter iter; 1514 __be64 *pages; 1515 int lg; 1516 int nent = 0; 1517 int i; 1518 int err = 0; 1519 int ts = 0, tc = 0; 1520 1521 mailbox = mlx4_alloc_cmd_mailbox(dev); 1522 if (IS_ERR(mailbox)) 1523 return PTR_ERR(mailbox); 1524 pages = mailbox->buf; 1525 1526 for (mlx4_icm_first(icm, &iter); 1527 !mlx4_icm_last(&iter); 1528 mlx4_icm_next(&iter)) { 1529 /* 1530 * We have to pass pages that are aligned to their 1531 * size, so find the least significant 1 in the 1532 * address or size and use that as our log2 size. 1533 */ 1534 lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1; 1535 if (lg < MLX4_ICM_PAGE_SHIFT) { 1536 mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx)\n", 1537 MLX4_ICM_PAGE_SIZE, 1538 (unsigned long long) mlx4_icm_addr(&iter), 1539 mlx4_icm_size(&iter)); 1540 err = -EINVAL; 1541 goto out; 1542 } 1543 1544 for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) { 1545 if (virt != -1) { 1546 pages[nent * 2] = cpu_to_be64(virt); 1547 virt += 1ULL << lg; 1548 } 1549 1550 pages[nent * 2 + 1] = 1551 cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) | 1552 (lg - MLX4_ICM_PAGE_SHIFT)); 1553 ts += 1 << (lg - 10); 1554 ++tc; 1555 1556 if (++nent == MLX4_MAILBOX_SIZE / 16) { 1557 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, 1558 MLX4_CMD_TIME_CLASS_B, 1559 MLX4_CMD_NATIVE); 1560 if (err) 1561 goto out; 1562 nent = 0; 1563 } 1564 } 1565 } 1566 1567 if (nent) 1568 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, 1569 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 1570 if (err) 1571 goto out; 1572 1573 switch (op) { 1574 case MLX4_CMD_MAP_FA: 1575 mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW\n", tc, ts); 1576 break; 1577 case MLX4_CMD_MAP_ICM_AUX: 1578 mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux\n", tc, ts); 1579 break; 1580 case MLX4_CMD_MAP_ICM: 1581 mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM\n", 1582 tc, ts, (unsigned long long) virt - (ts << 10)); 1583 break; 1584 } 1585 1586 out: 1587 mlx4_free_cmd_mailbox(dev, mailbox); 1588 return err; 1589 } 1590 1591 int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm) 1592 { 1593 return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1); 1594 } 1595 1596 int mlx4_UNMAP_FA(struct mlx4_dev *dev) 1597 { 1598 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, 1599 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 1600 } 1601 1602 1603 int mlx4_RUN_FW(struct mlx4_dev *dev) 1604 { 1605 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, 1606 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1607 } 1608 1609 int mlx4_QUERY_FW(struct mlx4_dev *dev) 1610 { 1611 struct mlx4_fw *fw = &mlx4_priv(dev)->fw; 1612 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; 1613 struct mlx4_cmd_mailbox *mailbox; 1614 u32 *outbox; 1615 int err = 0; 1616 u64 fw_ver; 1617 u16 cmd_if_rev; 1618 u8 lg; 1619 1620 #define QUERY_FW_OUT_SIZE 0x100 1621 #define QUERY_FW_VER_OFFSET 0x00 1622 #define QUERY_FW_PPF_ID 0x09 1623 #define QUERY_FW_CMD_IF_REV_OFFSET 0x0a 1624 #define QUERY_FW_MAX_CMD_OFFSET 0x0f 1625 #define QUERY_FW_ERR_START_OFFSET 0x30 1626 #define QUERY_FW_ERR_SIZE_OFFSET 0x38 1627 #define QUERY_FW_ERR_BAR_OFFSET 0x3c 1628 1629 #define QUERY_FW_SIZE_OFFSET 0x00 1630 #define QUERY_FW_CLR_INT_BASE_OFFSET 0x20 1631 #define QUERY_FW_CLR_INT_BAR_OFFSET 0x28 1632 1633 #define QUERY_FW_COMM_BASE_OFFSET 0x40 1634 #define QUERY_FW_COMM_BAR_OFFSET 0x48 1635 1636 #define QUERY_FW_CLOCK_OFFSET 0x50 1637 #define QUERY_FW_CLOCK_BAR 0x58 1638 1639 mailbox = mlx4_alloc_cmd_mailbox(dev); 1640 if (IS_ERR(mailbox)) 1641 return PTR_ERR(mailbox); 1642 outbox = mailbox->buf; 1643 1644 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW, 1645 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1646 if (err) 1647 goto out; 1648 1649 MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET); 1650 /* 1651 * FW subminor version is at more significant bits than minor 1652 * version, so swap here. 1653 */ 1654 dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) | 1655 ((fw_ver & 0xffff0000ull) >> 16) | 1656 ((fw_ver & 0x0000ffffull) << 16); 1657 1658 MLX4_GET(lg, outbox, QUERY_FW_PPF_ID); 1659 dev->caps.function = lg; 1660 1661 if (mlx4_is_slave(dev)) 1662 goto out; 1663 1664 1665 MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET); 1666 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV || 1667 cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) { 1668 mlx4_err(dev, "Installed FW has unsupported command interface revision %d\n", 1669 cmd_if_rev); 1670 mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n", 1671 (int) (dev->caps.fw_ver >> 32), 1672 (int) (dev->caps.fw_ver >> 16) & 0xffff, 1673 (int) dev->caps.fw_ver & 0xffff); 1674 mlx4_err(dev, "This driver version supports only revisions %d to %d\n", 1675 MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV); 1676 err = -ENODEV; 1677 goto out; 1678 } 1679 1680 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS) 1681 dev->flags |= MLX4_FLAG_OLD_PORT_CMDS; 1682 1683 MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET); 1684 cmd->max_cmds = 1 << lg; 1685 1686 mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n", 1687 (int) (dev->caps.fw_ver >> 32), 1688 (int) (dev->caps.fw_ver >> 16) & 0xffff, 1689 (int) dev->caps.fw_ver & 0xffff, 1690 cmd_if_rev, cmd->max_cmds); 1691 1692 MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET); 1693 MLX4_GET(fw->catas_size, outbox, QUERY_FW_ERR_SIZE_OFFSET); 1694 MLX4_GET(fw->catas_bar, outbox, QUERY_FW_ERR_BAR_OFFSET); 1695 fw->catas_bar = (fw->catas_bar >> 6) * 2; 1696 1697 mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n", 1698 (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar); 1699 1700 MLX4_GET(fw->fw_pages, outbox, QUERY_FW_SIZE_OFFSET); 1701 MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET); 1702 MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET); 1703 fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2; 1704 1705 MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET); 1706 MLX4_GET(fw->comm_bar, outbox, QUERY_FW_COMM_BAR_OFFSET); 1707 fw->comm_bar = (fw->comm_bar >> 6) * 2; 1708 mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n", 1709 fw->comm_bar, fw->comm_base); 1710 mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2); 1711 1712 MLX4_GET(fw->clock_offset, outbox, QUERY_FW_CLOCK_OFFSET); 1713 MLX4_GET(fw->clock_bar, outbox, QUERY_FW_CLOCK_BAR); 1714 fw->clock_bar = (fw->clock_bar >> 6) * 2; 1715 mlx4_dbg(dev, "Internal clock bar:%d offset:0x%llx\n", 1716 fw->clock_bar, fw->clock_offset); 1717 1718 /* 1719 * Round up number of system pages needed in case 1720 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE. 1721 */ 1722 fw->fw_pages = 1723 ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >> 1724 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT); 1725 1726 mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n", 1727 (unsigned long long) fw->clr_int_base, fw->clr_int_bar); 1728 1729 out: 1730 mlx4_free_cmd_mailbox(dev, mailbox); 1731 return err; 1732 } 1733 1734 int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave, 1735 struct mlx4_vhcr *vhcr, 1736 struct mlx4_cmd_mailbox *inbox, 1737 struct mlx4_cmd_mailbox *outbox, 1738 struct mlx4_cmd_info *cmd) 1739 { 1740 u8 *outbuf; 1741 int err; 1742 1743 outbuf = outbox->buf; 1744 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW, 1745 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1746 if (err) 1747 return err; 1748 1749 /* for slaves, set pci PPF ID to invalid and zero out everything 1750 * else except FW version */ 1751 outbuf[0] = outbuf[1] = 0; 1752 memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8); 1753 outbuf[QUERY_FW_PPF_ID] = MLX4_INVALID_SLAVE_ID; 1754 1755 return 0; 1756 } 1757 1758 static void get_board_id(void *vsd, char *board_id) 1759 { 1760 int i; 1761 1762 #define VSD_OFFSET_SIG1 0x00 1763 #define VSD_OFFSET_SIG2 0xde 1764 #define VSD_OFFSET_MLX_BOARD_ID 0xd0 1765 #define VSD_OFFSET_TS_BOARD_ID 0x20 1766 1767 #define VSD_SIGNATURE_TOPSPIN 0x5ad 1768 1769 memset(board_id, 0, MLX4_BOARD_ID_LEN); 1770 1771 if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN && 1772 be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) { 1773 strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN); 1774 } else { 1775 /* 1776 * The board ID is a string but the firmware byte 1777 * swaps each 4-byte word before passing it back to 1778 * us. Therefore we need to swab it before printing. 1779 */ 1780 u32 *bid_u32 = (u32 *)board_id; 1781 1782 for (i = 0; i < 4; ++i) { 1783 u32 *addr; 1784 u32 val; 1785 1786 addr = (u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4); 1787 val = get_unaligned(addr); 1788 val = swab32(val); 1789 put_unaligned(val, &bid_u32[i]); 1790 } 1791 } 1792 } 1793 1794 int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter) 1795 { 1796 struct mlx4_cmd_mailbox *mailbox; 1797 u32 *outbox; 1798 int err; 1799 1800 #define QUERY_ADAPTER_OUT_SIZE 0x100 1801 #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10 1802 #define QUERY_ADAPTER_VSD_OFFSET 0x20 1803 1804 mailbox = mlx4_alloc_cmd_mailbox(dev); 1805 if (IS_ERR(mailbox)) 1806 return PTR_ERR(mailbox); 1807 outbox = mailbox->buf; 1808 1809 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER, 1810 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1811 if (err) 1812 goto out; 1813 1814 MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); 1815 1816 get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4, 1817 adapter->board_id); 1818 1819 out: 1820 mlx4_free_cmd_mailbox(dev, mailbox); 1821 return err; 1822 } 1823 1824 int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) 1825 { 1826 struct mlx4_cmd_mailbox *mailbox; 1827 __be32 *inbox; 1828 int err; 1829 static const u8 a0_dmfs_hw_steering[] = { 1830 [MLX4_STEERING_DMFS_A0_DEFAULT] = 0, 1831 [MLX4_STEERING_DMFS_A0_DYNAMIC] = 1, 1832 [MLX4_STEERING_DMFS_A0_STATIC] = 2, 1833 [MLX4_STEERING_DMFS_A0_DISABLE] = 3 1834 }; 1835 1836 #define INIT_HCA_IN_SIZE 0x200 1837 #define INIT_HCA_VERSION_OFFSET 0x000 1838 #define INIT_HCA_VERSION 2 1839 #define INIT_HCA_VXLAN_OFFSET 0x0c 1840 #define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e 1841 #define INIT_HCA_FLAGS_OFFSET 0x014 1842 #define INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET 0x018 1843 #define INIT_HCA_QPC_OFFSET 0x020 1844 #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10) 1845 #define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17) 1846 #define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28) 1847 #define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f) 1848 #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30) 1849 #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37) 1850 #define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38) 1851 #define INIT_HCA_EQE_CQE_STRIDE_OFFSET (INIT_HCA_QPC_OFFSET + 0x3b) 1852 #define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40) 1853 #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50) 1854 #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60) 1855 #define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67) 1856 #define INIT_HCA_NUM_SYS_EQS_OFFSET (INIT_HCA_QPC_OFFSET + 0x6a) 1857 #define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70) 1858 #define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77) 1859 #define INIT_HCA_MCAST_OFFSET 0x0c0 1860 #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00) 1861 #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12) 1862 #define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) 1863 #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18) 1864 #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) 1865 #define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6 1866 #define INIT_HCA_DRIVER_VERSION_OFFSET 0x140 1867 #define INIT_HCA_DRIVER_VERSION_SZ 0x40 1868 #define INIT_HCA_FS_PARAM_OFFSET 0x1d0 1869 #define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00) 1870 #define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12) 1871 #define INIT_HCA_FS_A0_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x18) 1872 #define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b) 1873 #define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21) 1874 #define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22) 1875 #define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25) 1876 #define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26) 1877 #define INIT_HCA_TPT_OFFSET 0x0f0 1878 #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) 1879 #define INIT_HCA_TPT_MW_OFFSET (INIT_HCA_TPT_OFFSET + 0x08) 1880 #define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b) 1881 #define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10) 1882 #define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18) 1883 #define INIT_HCA_UAR_OFFSET 0x120 1884 #define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a) 1885 #define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b) 1886 1887 mailbox = mlx4_alloc_cmd_mailbox(dev); 1888 if (IS_ERR(mailbox)) 1889 return PTR_ERR(mailbox); 1890 inbox = mailbox->buf; 1891 1892 *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION; 1893 1894 *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) = 1895 ((ilog2(cache_line_size()) - 4) << 5) | (1 << 4); 1896 1897 #if defined(__LITTLE_ENDIAN) 1898 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1); 1899 #elif defined(__BIG_ENDIAN) 1900 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1); 1901 #else 1902 #error Host endianness not defined 1903 #endif 1904 /* Check port for UD address vector: */ 1905 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1); 1906 1907 /* Enable IPoIB checksumming if we can: */ 1908 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) 1909 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3); 1910 1911 /* Enable QoS support if module parameter set */ 1912 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG && enable_qos) 1913 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2); 1914 1915 /* enable counters */ 1916 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS) 1917 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4); 1918 1919 /* Enable RSS spread to fragmented IP packets when supported */ 1920 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_RSS_IP_FRAG) 1921 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 13); 1922 1923 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */ 1924 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) { 1925 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29); 1926 dev->caps.eqe_size = 64; 1927 dev->caps.eqe_factor = 1; 1928 } else { 1929 dev->caps.eqe_size = 32; 1930 dev->caps.eqe_factor = 0; 1931 } 1932 1933 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) { 1934 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30); 1935 dev->caps.cqe_size = 64; 1936 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 1937 } else { 1938 dev->caps.cqe_size = 32; 1939 } 1940 1941 /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */ 1942 if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) && 1943 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) { 1944 dev->caps.eqe_size = cache_line_size(); 1945 dev->caps.cqe_size = cache_line_size(); 1946 dev->caps.eqe_factor = 0; 1947 MLX4_PUT(inbox, (u8)((ilog2(dev->caps.eqe_size) - 5) << 4 | 1948 (ilog2(dev->caps.eqe_size) - 5)), 1949 INIT_HCA_EQE_CQE_STRIDE_OFFSET); 1950 1951 /* User still need to know to support CQE > 32B */ 1952 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 1953 } 1954 1955 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT) 1956 *(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1 << 31); 1957 1958 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW) { 1959 u8 *dst = (u8 *)(inbox + INIT_HCA_DRIVER_VERSION_OFFSET / 4); 1960 1961 strncpy(dst, DRV_NAME_FOR_FW, INIT_HCA_DRIVER_VERSION_SZ - 1); 1962 mlx4_dbg(dev, "Reporting Driver Version to FW: %s\n", dst); 1963 } 1964 1965 /* QPC/EEC/CQC/EQC/RDMARC attributes */ 1966 1967 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); 1968 MLX4_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET); 1969 MLX4_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET); 1970 MLX4_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET); 1971 MLX4_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET); 1972 MLX4_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET); 1973 MLX4_PUT(inbox, param->altc_base, INIT_HCA_ALTC_BASE_OFFSET); 1974 MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET); 1975 MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET); 1976 MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET); 1977 MLX4_PUT(inbox, param->num_sys_eqs, INIT_HCA_NUM_SYS_EQS_OFFSET); 1978 MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET); 1979 MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET); 1980 1981 /* steering attributes */ 1982 if (dev->caps.steering_mode == 1983 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1984 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= 1985 cpu_to_be32(1 << 1986 INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN); 1987 1988 MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET); 1989 MLX4_PUT(inbox, param->log_mc_entry_sz, 1990 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); 1991 MLX4_PUT(inbox, param->log_mc_table_sz, 1992 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); 1993 /* Enable Ethernet flow steering 1994 * with udp unicast and tcp unicast 1995 */ 1996 if (dev->caps.dmfs_high_steer_mode != 1997 MLX4_STEERING_DMFS_A0_STATIC) 1998 MLX4_PUT(inbox, 1999 (u8)(MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN), 2000 INIT_HCA_FS_ETH_BITS_OFFSET); 2001 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR, 2002 INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET); 2003 /* Enable IPoIB flow steering 2004 * with udp unicast and tcp unicast 2005 */ 2006 MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN), 2007 INIT_HCA_FS_IB_BITS_OFFSET); 2008 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR, 2009 INIT_HCA_FS_IB_NUM_ADDRS_OFFSET); 2010 2011 if (dev->caps.dmfs_high_steer_mode != 2012 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 2013 MLX4_PUT(inbox, 2014 ((u8)(a0_dmfs_hw_steering[dev->caps.dmfs_high_steer_mode] 2015 << 6)), 2016 INIT_HCA_FS_A0_OFFSET); 2017 } else { 2018 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); 2019 MLX4_PUT(inbox, param->log_mc_entry_sz, 2020 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); 2021 MLX4_PUT(inbox, param->log_mc_hash_sz, 2022 INIT_HCA_LOG_MC_HASH_SZ_OFFSET); 2023 MLX4_PUT(inbox, param->log_mc_table_sz, 2024 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); 2025 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0) 2026 MLX4_PUT(inbox, (u8) (1 << 3), 2027 INIT_HCA_UC_STEERING_OFFSET); 2028 } 2029 2030 /* TPT attributes */ 2031 2032 MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET); 2033 MLX4_PUT(inbox, param->mw_enabled, INIT_HCA_TPT_MW_OFFSET); 2034 MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET); 2035 MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET); 2036 MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET); 2037 2038 /* UAR attributes */ 2039 2040 MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET); 2041 MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET); 2042 2043 /* set parser VXLAN attributes */ 2044 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) { 2045 u8 parser_params = 0; 2046 MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET); 2047 } 2048 2049 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 2050 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); 2051 2052 if (err) 2053 mlx4_err(dev, "INIT_HCA returns %d\n", err); 2054 2055 mlx4_free_cmd_mailbox(dev, mailbox); 2056 return err; 2057 } 2058 2059 int mlx4_QUERY_HCA(struct mlx4_dev *dev, 2060 struct mlx4_init_hca_param *param) 2061 { 2062 struct mlx4_cmd_mailbox *mailbox; 2063 __be32 *outbox; 2064 u32 dword_field; 2065 int err; 2066 u8 byte_field; 2067 static const u8 a0_dmfs_query_hw_steering[] = { 2068 [0] = MLX4_STEERING_DMFS_A0_DEFAULT, 2069 [1] = MLX4_STEERING_DMFS_A0_DYNAMIC, 2070 [2] = MLX4_STEERING_DMFS_A0_STATIC, 2071 [3] = MLX4_STEERING_DMFS_A0_DISABLE 2072 }; 2073 2074 #define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04 2075 #define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c 2076 2077 mailbox = mlx4_alloc_cmd_mailbox(dev); 2078 if (IS_ERR(mailbox)) 2079 return PTR_ERR(mailbox); 2080 outbox = mailbox->buf; 2081 2082 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, 2083 MLX4_CMD_QUERY_HCA, 2084 MLX4_CMD_TIME_CLASS_B, 2085 !mlx4_is_slave(dev)); 2086 if (err) 2087 goto out; 2088 2089 MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET); 2090 MLX4_GET(param->hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET); 2091 2092 /* QPC/EEC/CQC/EQC/RDMARC attributes */ 2093 2094 MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET); 2095 MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET); 2096 MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET); 2097 MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET); 2098 MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET); 2099 MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET); 2100 MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET); 2101 MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET); 2102 MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET); 2103 MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET); 2104 MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET); 2105 MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET); 2106 MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET); 2107 2108 MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET); 2109 if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) { 2110 param->steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; 2111 } else { 2112 MLX4_GET(byte_field, outbox, INIT_HCA_UC_STEERING_OFFSET); 2113 if (byte_field & 0x8) 2114 param->steering_mode = MLX4_STEERING_MODE_B0; 2115 else 2116 param->steering_mode = MLX4_STEERING_MODE_A0; 2117 } 2118 2119 if (dword_field & (1 << 13)) 2120 param->rss_ip_frags = 1; 2121 2122 /* steering attributes */ 2123 if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { 2124 MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET); 2125 MLX4_GET(param->log_mc_entry_sz, outbox, 2126 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); 2127 MLX4_GET(param->log_mc_table_sz, outbox, 2128 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); 2129 MLX4_GET(byte_field, outbox, 2130 INIT_HCA_FS_A0_OFFSET); 2131 param->dmfs_high_steer_mode = 2132 a0_dmfs_query_hw_steering[(byte_field >> 6) & 3]; 2133 } else { 2134 MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET); 2135 MLX4_GET(param->log_mc_entry_sz, outbox, 2136 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); 2137 MLX4_GET(param->log_mc_hash_sz, outbox, 2138 INIT_HCA_LOG_MC_HASH_SZ_OFFSET); 2139 MLX4_GET(param->log_mc_table_sz, outbox, 2140 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); 2141 } 2142 2143 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */ 2144 MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_OFFSETS); 2145 if (byte_field & 0x20) /* 64-bytes eqe enabled */ 2146 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED; 2147 if (byte_field & 0x40) /* 64-bytes cqe enabled */ 2148 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED; 2149 2150 /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */ 2151 MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_STRIDE_OFFSET); 2152 if (byte_field) { 2153 param->dev_cap_enabled |= MLX4_DEV_CAP_EQE_STRIDE_ENABLED; 2154 param->dev_cap_enabled |= MLX4_DEV_CAP_CQE_STRIDE_ENABLED; 2155 param->cqe_size = 1 << ((byte_field & 2156 MLX4_CQE_SIZE_MASK_STRIDE) + 5); 2157 param->eqe_size = 1 << (((byte_field & 2158 MLX4_EQE_SIZE_MASK_STRIDE) >> 4) + 5); 2159 } 2160 2161 /* TPT attributes */ 2162 2163 MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET); 2164 MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET); 2165 MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET); 2166 MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET); 2167 MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET); 2168 2169 /* UAR attributes */ 2170 2171 MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET); 2172 MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET); 2173 2174 /* phv_check enable */ 2175 MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET); 2176 if (byte_field & 0x2) 2177 param->phv_check_en = 1; 2178 out: 2179 mlx4_free_cmd_mailbox(dev, mailbox); 2180 2181 return err; 2182 } 2183 2184 static int mlx4_hca_core_clock_update(struct mlx4_dev *dev) 2185 { 2186 struct mlx4_cmd_mailbox *mailbox; 2187 __be32 *outbox; 2188 int err; 2189 2190 mailbox = mlx4_alloc_cmd_mailbox(dev); 2191 if (IS_ERR(mailbox)) { 2192 mlx4_warn(dev, "hca_core_clock mailbox allocation failed\n"); 2193 return PTR_ERR(mailbox); 2194 } 2195 outbox = mailbox->buf; 2196 2197 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, 2198 MLX4_CMD_QUERY_HCA, 2199 MLX4_CMD_TIME_CLASS_B, 2200 !mlx4_is_slave(dev)); 2201 if (err) { 2202 mlx4_warn(dev, "hca_core_clock update failed\n"); 2203 goto out; 2204 } 2205 2206 MLX4_GET(dev->caps.hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET); 2207 2208 out: 2209 mlx4_free_cmd_mailbox(dev, mailbox); 2210 2211 return err; 2212 } 2213 2214 /* for IB-type ports only in SRIOV mode. Checks that both proxy QP0 2215 * and real QP0 are active, so that the paravirtualized QP0 is ready 2216 * to operate */ 2217 static int check_qp0_state(struct mlx4_dev *dev, int function, int port) 2218 { 2219 struct mlx4_priv *priv = mlx4_priv(dev); 2220 /* irrelevant if not infiniband */ 2221 if (priv->mfunc.master.qp0_state[port].proxy_qp0_active && 2222 priv->mfunc.master.qp0_state[port].qp0_active) 2223 return 1; 2224 return 0; 2225 } 2226 2227 int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave, 2228 struct mlx4_vhcr *vhcr, 2229 struct mlx4_cmd_mailbox *inbox, 2230 struct mlx4_cmd_mailbox *outbox, 2231 struct mlx4_cmd_info *cmd) 2232 { 2233 struct mlx4_priv *priv = mlx4_priv(dev); 2234 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier); 2235 int err; 2236 2237 if (port < 0) 2238 return -EINVAL; 2239 2240 if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port)) 2241 return 0; 2242 2243 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) { 2244 /* Enable port only if it was previously disabled */ 2245 if (!priv->mfunc.master.init_port_ref[port]) { 2246 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, 2247 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2248 if (err) 2249 return err; 2250 } 2251 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port); 2252 } else { 2253 if (slave == mlx4_master_func_num(dev)) { 2254 if (check_qp0_state(dev, slave, port) && 2255 !priv->mfunc.master.qp0_state[port].port_active) { 2256 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, 2257 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2258 if (err) 2259 return err; 2260 priv->mfunc.master.qp0_state[port].port_active = 1; 2261 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port); 2262 } 2263 } else 2264 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port); 2265 } 2266 ++priv->mfunc.master.init_port_ref[port]; 2267 return 0; 2268 } 2269 2270 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port) 2271 { 2272 struct mlx4_cmd_mailbox *mailbox; 2273 u32 *inbox; 2274 int err; 2275 u32 flags; 2276 u16 field; 2277 2278 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 2279 #define INIT_PORT_IN_SIZE 256 2280 #define INIT_PORT_FLAGS_OFFSET 0x00 2281 #define INIT_PORT_FLAG_SIG (1 << 18) 2282 #define INIT_PORT_FLAG_NG (1 << 17) 2283 #define INIT_PORT_FLAG_G0 (1 << 16) 2284 #define INIT_PORT_VL_SHIFT 4 2285 #define INIT_PORT_PORT_WIDTH_SHIFT 8 2286 #define INIT_PORT_MTU_OFFSET 0x04 2287 #define INIT_PORT_MAX_GID_OFFSET 0x06 2288 #define INIT_PORT_MAX_PKEY_OFFSET 0x0a 2289 #define INIT_PORT_GUID0_OFFSET 0x10 2290 #define INIT_PORT_NODE_GUID_OFFSET 0x18 2291 #define INIT_PORT_SI_GUID_OFFSET 0x20 2292 2293 mailbox = mlx4_alloc_cmd_mailbox(dev); 2294 if (IS_ERR(mailbox)) 2295 return PTR_ERR(mailbox); 2296 inbox = mailbox->buf; 2297 2298 flags = 0; 2299 flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT; 2300 flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT; 2301 MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET); 2302 2303 field = 128 << dev->caps.ib_mtu_cap[port]; 2304 MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET); 2305 field = dev->caps.gid_table_len[port]; 2306 MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET); 2307 field = dev->caps.pkey_table_len[port]; 2308 MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET); 2309 2310 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT, 2311 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2312 2313 mlx4_free_cmd_mailbox(dev, mailbox); 2314 } else 2315 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, 2316 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 2317 2318 if (!err) 2319 mlx4_hca_core_clock_update(dev); 2320 2321 return err; 2322 } 2323 EXPORT_SYMBOL_GPL(mlx4_INIT_PORT); 2324 2325 int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, 2326 struct mlx4_vhcr *vhcr, 2327 struct mlx4_cmd_mailbox *inbox, 2328 struct mlx4_cmd_mailbox *outbox, 2329 struct mlx4_cmd_info *cmd) 2330 { 2331 struct mlx4_priv *priv = mlx4_priv(dev); 2332 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier); 2333 int err; 2334 2335 if (port < 0) 2336 return -EINVAL; 2337 2338 if (!(priv->mfunc.master.slave_state[slave].init_port_mask & 2339 (1 << port))) 2340 return 0; 2341 2342 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) { 2343 if (priv->mfunc.master.init_port_ref[port] == 1) { 2344 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 2345 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2346 if (err) 2347 return err; 2348 } 2349 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); 2350 } else { 2351 /* infiniband port */ 2352 if (slave == mlx4_master_func_num(dev)) { 2353 if (!priv->mfunc.master.qp0_state[port].qp0_active && 2354 priv->mfunc.master.qp0_state[port].port_active) { 2355 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 2356 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2357 if (err) 2358 return err; 2359 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); 2360 priv->mfunc.master.qp0_state[port].port_active = 0; 2361 } 2362 } else 2363 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); 2364 } 2365 --priv->mfunc.master.init_port_ref[port]; 2366 return 0; 2367 } 2368 2369 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port) 2370 { 2371 return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 2372 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 2373 } 2374 EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT); 2375 2376 int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic) 2377 { 2378 return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 2379 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); 2380 } 2381 2382 struct mlx4_config_dev { 2383 __be32 update_flags; 2384 __be32 rsvd1[3]; 2385 __be16 vxlan_udp_dport; 2386 __be16 rsvd2; 2387 __be16 roce_v2_entropy; 2388 __be16 roce_v2_udp_dport; 2389 __be32 roce_flags; 2390 __be32 rsvd4[25]; 2391 __be16 rsvd5; 2392 u8 rsvd6; 2393 u8 rx_checksum_val; 2394 }; 2395 2396 #define MLX4_VXLAN_UDP_DPORT (1 << 0) 2397 #define MLX4_ROCE_V2_UDP_DPORT BIT(3) 2398 #define MLX4_DISABLE_RX_PORT BIT(18) 2399 2400 static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev) 2401 { 2402 int err; 2403 struct mlx4_cmd_mailbox *mailbox; 2404 2405 mailbox = mlx4_alloc_cmd_mailbox(dev); 2406 if (IS_ERR(mailbox)) 2407 return PTR_ERR(mailbox); 2408 2409 memcpy(mailbox->buf, config_dev, sizeof(*config_dev)); 2410 2411 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_CONFIG_DEV, 2412 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 2413 2414 mlx4_free_cmd_mailbox(dev, mailbox); 2415 return err; 2416 } 2417 2418 static int mlx4_CONFIG_DEV_get(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev) 2419 { 2420 int err; 2421 struct mlx4_cmd_mailbox *mailbox; 2422 2423 mailbox = mlx4_alloc_cmd_mailbox(dev); 2424 if (IS_ERR(mailbox)) 2425 return PTR_ERR(mailbox); 2426 2427 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 1, MLX4_CMD_CONFIG_DEV, 2428 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2429 2430 if (!err) 2431 memcpy(config_dev, mailbox->buf, sizeof(*config_dev)); 2432 2433 mlx4_free_cmd_mailbox(dev, mailbox); 2434 return err; 2435 } 2436 2437 /* Conversion between the HW values and the actual functionality. 2438 * The value represented by the array index, 2439 * and the functionality determined by the flags. 2440 */ 2441 static const u8 config_dev_csum_flags[] = { 2442 [0] = 0, 2443 [1] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP, 2444 [2] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP | 2445 MLX4_RX_CSUM_MODE_L4, 2446 [3] = MLX4_RX_CSUM_MODE_L4 | 2447 MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP | 2448 MLX4_RX_CSUM_MODE_MULTI_VLAN 2449 }; 2450 2451 int mlx4_config_dev_retrieval(struct mlx4_dev *dev, 2452 struct mlx4_config_dev_params *params) 2453 { 2454 struct mlx4_config_dev config_dev = {0}; 2455 int err; 2456 u8 csum_mask; 2457 2458 #define CONFIG_DEV_RX_CSUM_MODE_MASK 0x7 2459 #define CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET 0 2460 #define CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET 4 2461 2462 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CONFIG_DEV)) 2463 return -EOPNOTSUPP; 2464 2465 err = mlx4_CONFIG_DEV_get(dev, &config_dev); 2466 if (err) 2467 return err; 2468 2469 csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET) & 2470 CONFIG_DEV_RX_CSUM_MODE_MASK; 2471 2472 if (csum_mask >= ARRAY_SIZE(config_dev_csum_flags)) 2473 return -EINVAL; 2474 params->rx_csum_flags_port_1 = config_dev_csum_flags[csum_mask]; 2475 2476 csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET) & 2477 CONFIG_DEV_RX_CSUM_MODE_MASK; 2478 2479 if (csum_mask >= ARRAY_SIZE(config_dev_csum_flags)) 2480 return -EINVAL; 2481 params->rx_csum_flags_port_2 = config_dev_csum_flags[csum_mask]; 2482 2483 params->vxlan_udp_dport = be16_to_cpu(config_dev.vxlan_udp_dport); 2484 2485 return 0; 2486 } 2487 EXPORT_SYMBOL_GPL(mlx4_config_dev_retrieval); 2488 2489 int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port) 2490 { 2491 struct mlx4_config_dev config_dev; 2492 2493 memset(&config_dev, 0, sizeof(config_dev)); 2494 config_dev.update_flags = cpu_to_be32(MLX4_VXLAN_UDP_DPORT); 2495 config_dev.vxlan_udp_dport = udp_port; 2496 2497 return mlx4_CONFIG_DEV_set(dev, &config_dev); 2498 } 2499 EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port); 2500 2501 #define CONFIG_DISABLE_RX_PORT BIT(15) 2502 int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis) 2503 { 2504 struct mlx4_config_dev config_dev; 2505 2506 memset(&config_dev, 0, sizeof(config_dev)); 2507 config_dev.update_flags = cpu_to_be32(MLX4_DISABLE_RX_PORT); 2508 if (dis) 2509 config_dev.roce_flags = 2510 cpu_to_be32(CONFIG_DISABLE_RX_PORT); 2511 2512 return mlx4_CONFIG_DEV_set(dev, &config_dev); 2513 } 2514 2515 int mlx4_config_roce_v2_port(struct mlx4_dev *dev, u16 udp_port) 2516 { 2517 struct mlx4_config_dev config_dev; 2518 2519 memset(&config_dev, 0, sizeof(config_dev)); 2520 config_dev.update_flags = cpu_to_be32(MLX4_ROCE_V2_UDP_DPORT); 2521 config_dev.roce_v2_udp_dport = cpu_to_be16(udp_port); 2522 2523 return mlx4_CONFIG_DEV_set(dev, &config_dev); 2524 } 2525 EXPORT_SYMBOL_GPL(mlx4_config_roce_v2_port); 2526 2527 int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2) 2528 { 2529 struct mlx4_cmd_mailbox *mailbox; 2530 struct { 2531 __be32 v_port1; 2532 __be32 v_port2; 2533 } *v2p; 2534 int err; 2535 2536 mailbox = mlx4_alloc_cmd_mailbox(dev); 2537 if (IS_ERR(mailbox)) 2538 return -ENOMEM; 2539 2540 v2p = mailbox->buf; 2541 v2p->v_port1 = cpu_to_be32(port1); 2542 v2p->v_port2 = cpu_to_be32(port2); 2543 2544 err = mlx4_cmd(dev, mailbox->dma, 0, 2545 MLX4_SET_PORT_VIRT2PHY, MLX4_CMD_VIRT_PORT_MAP, 2546 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 2547 2548 mlx4_free_cmd_mailbox(dev, mailbox); 2549 return err; 2550 } 2551 2552 2553 int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages) 2554 { 2555 int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0, 2556 MLX4_CMD_SET_ICM_SIZE, 2557 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2558 if (ret) 2559 return ret; 2560 2561 /* 2562 * Round up number of system pages needed in case 2563 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE. 2564 */ 2565 *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >> 2566 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT); 2567 2568 return 0; 2569 } 2570 2571 int mlx4_NOP(struct mlx4_dev *dev) 2572 { 2573 /* Input modifier of 0x1f means "finish as soon as possible." */ 2574 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, MLX4_CMD_TIME_CLASS_A, 2575 MLX4_CMD_NATIVE); 2576 } 2577 2578 int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier, 2579 const u32 offset[], 2580 u32 value[], size_t array_len, u8 port) 2581 { 2582 struct mlx4_cmd_mailbox *mailbox; 2583 u32 *outbox; 2584 size_t i; 2585 int ret; 2586 2587 mailbox = mlx4_alloc_cmd_mailbox(dev); 2588 if (IS_ERR(mailbox)) 2589 return PTR_ERR(mailbox); 2590 2591 outbox = mailbox->buf; 2592 2593 ret = mlx4_cmd_box(dev, 0, mailbox->dma, port, op_modifier, 2594 MLX4_CMD_DIAG_RPRT, MLX4_CMD_TIME_CLASS_A, 2595 MLX4_CMD_NATIVE); 2596 if (ret) 2597 goto out; 2598 2599 for (i = 0; i < array_len; i++) { 2600 if (offset[i] > MLX4_MAILBOX_SIZE) { 2601 ret = -EINVAL; 2602 goto out; 2603 } 2604 2605 MLX4_GET(value[i], outbox, offset[i]); 2606 } 2607 2608 out: 2609 mlx4_free_cmd_mailbox(dev, mailbox); 2610 return ret; 2611 } 2612 EXPORT_SYMBOL(mlx4_query_diag_counters); 2613 2614 int mlx4_get_phys_port_id(struct mlx4_dev *dev) 2615 { 2616 u8 port; 2617 u32 *outbox; 2618 struct mlx4_cmd_mailbox *mailbox; 2619 u32 in_mod; 2620 u32 guid_hi, guid_lo; 2621 int err, ret = 0; 2622 #define MOD_STAT_CFG_PORT_OFFSET 8 2623 #define MOD_STAT_CFG_GUID_H 0X14 2624 #define MOD_STAT_CFG_GUID_L 0X1c 2625 2626 mailbox = mlx4_alloc_cmd_mailbox(dev); 2627 if (IS_ERR(mailbox)) 2628 return PTR_ERR(mailbox); 2629 outbox = mailbox->buf; 2630 2631 for (port = 1; port <= dev->caps.num_ports; port++) { 2632 in_mod = port << MOD_STAT_CFG_PORT_OFFSET; 2633 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_mod, 0x2, 2634 MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A, 2635 MLX4_CMD_NATIVE); 2636 if (err) { 2637 mlx4_err(dev, "Fail to get port %d uplink guid\n", 2638 port); 2639 ret = err; 2640 } else { 2641 MLX4_GET(guid_hi, outbox, MOD_STAT_CFG_GUID_H); 2642 MLX4_GET(guid_lo, outbox, MOD_STAT_CFG_GUID_L); 2643 dev->caps.phys_port_id[port] = (u64)guid_lo | 2644 (u64)guid_hi << 32; 2645 } 2646 } 2647 mlx4_free_cmd_mailbox(dev, mailbox); 2648 return ret; 2649 } 2650 2651 #define MLX4_WOL_SETUP_MODE (5 << 28) 2652 int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port) 2653 { 2654 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; 2655 2656 return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3, 2657 MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A, 2658 MLX4_CMD_NATIVE); 2659 } 2660 EXPORT_SYMBOL_GPL(mlx4_wol_read); 2661 2662 int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port) 2663 { 2664 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; 2665 2666 return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG, 2667 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2668 } 2669 EXPORT_SYMBOL_GPL(mlx4_wol_write); 2670 2671 enum { 2672 ADD_TO_MCG = 0x26, 2673 }; 2674 2675 2676 void mlx4_opreq_action(struct work_struct *work) 2677 { 2678 struct mlx4_priv *priv = container_of(work, struct mlx4_priv, 2679 opreq_task); 2680 struct mlx4_dev *dev = &priv->dev; 2681 int num_tasks = atomic_read(&priv->opreq_count); 2682 struct mlx4_cmd_mailbox *mailbox; 2683 struct mlx4_mgm *mgm; 2684 u32 *outbox; 2685 u32 modifier; 2686 u16 token; 2687 u16 type; 2688 int err; 2689 u32 num_qps; 2690 struct mlx4_qp qp; 2691 int i; 2692 u8 rem_mcg; 2693 u8 prot; 2694 2695 #define GET_OP_REQ_MODIFIER_OFFSET 0x08 2696 #define GET_OP_REQ_TOKEN_OFFSET 0x14 2697 #define GET_OP_REQ_TYPE_OFFSET 0x1a 2698 #define GET_OP_REQ_DATA_OFFSET 0x20 2699 2700 mailbox = mlx4_alloc_cmd_mailbox(dev); 2701 if (IS_ERR(mailbox)) { 2702 mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n"); 2703 return; 2704 } 2705 outbox = mailbox->buf; 2706 2707 while (num_tasks) { 2708 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, 2709 MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A, 2710 MLX4_CMD_NATIVE); 2711 if (err) { 2712 mlx4_err(dev, "Failed to retrieve required operation: %d\n", 2713 err); 2714 return; 2715 } 2716 MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET); 2717 MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET); 2718 MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET); 2719 type &= 0xfff; 2720 2721 switch (type) { 2722 case ADD_TO_MCG: 2723 if (dev->caps.steering_mode == 2724 MLX4_STEERING_MODE_DEVICE_MANAGED) { 2725 mlx4_warn(dev, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n"); 2726 err = EPERM; 2727 break; 2728 } 2729 mgm = (struct mlx4_mgm *)((u8 *)(outbox) + 2730 GET_OP_REQ_DATA_OFFSET); 2731 num_qps = be32_to_cpu(mgm->members_count) & 2732 MGM_QPN_MASK; 2733 rem_mcg = ((u8 *)(&mgm->members_count))[0] & 1; 2734 prot = ((u8 *)(&mgm->members_count))[0] >> 6; 2735 2736 for (i = 0; i < num_qps; i++) { 2737 qp.qpn = be32_to_cpu(mgm->qp[i]); 2738 if (rem_mcg) 2739 err = mlx4_multicast_detach(dev, &qp, 2740 mgm->gid, 2741 prot, 0); 2742 else 2743 err = mlx4_multicast_attach(dev, &qp, 2744 mgm->gid, 2745 mgm->gid[5] 2746 , 0, prot, 2747 NULL); 2748 if (err) 2749 break; 2750 } 2751 break; 2752 default: 2753 mlx4_warn(dev, "Bad type for required operation\n"); 2754 err = EINVAL; 2755 break; 2756 } 2757 err = mlx4_cmd(dev, 0, ((u32) err | 2758 (__force u32)cpu_to_be32(token) << 16), 2759 1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A, 2760 MLX4_CMD_NATIVE); 2761 if (err) { 2762 mlx4_err(dev, "Failed to acknowledge required request: %d\n", 2763 err); 2764 goto out; 2765 } 2766 memset(outbox, 0, 0xffc); 2767 num_tasks = atomic_dec_return(&priv->opreq_count); 2768 } 2769 2770 out: 2771 mlx4_free_cmd_mailbox(dev, mailbox); 2772 } 2773 2774 static int mlx4_check_smp_firewall_active(struct mlx4_dev *dev, 2775 struct mlx4_cmd_mailbox *mailbox) 2776 { 2777 #define MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET 0x10 2778 #define MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET 0x20 2779 #define MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET 0x40 2780 #define MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET 0x70 2781 2782 u32 set_attr_mask, getresp_attr_mask; 2783 u32 trap_attr_mask, traprepress_attr_mask; 2784 2785 MLX4_GET(set_attr_mask, mailbox->buf, 2786 MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET); 2787 mlx4_dbg(dev, "SMP firewall set_attribute_mask = 0x%x\n", 2788 set_attr_mask); 2789 2790 MLX4_GET(getresp_attr_mask, mailbox->buf, 2791 MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET); 2792 mlx4_dbg(dev, "SMP firewall getresp_attribute_mask = 0x%x\n", 2793 getresp_attr_mask); 2794 2795 MLX4_GET(trap_attr_mask, mailbox->buf, 2796 MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET); 2797 mlx4_dbg(dev, "SMP firewall trap_attribute_mask = 0x%x\n", 2798 trap_attr_mask); 2799 2800 MLX4_GET(traprepress_attr_mask, mailbox->buf, 2801 MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET); 2802 mlx4_dbg(dev, "SMP firewall traprepress_attribute_mask = 0x%x\n", 2803 traprepress_attr_mask); 2804 2805 if (set_attr_mask && getresp_attr_mask && trap_attr_mask && 2806 traprepress_attr_mask) 2807 return 1; 2808 2809 return 0; 2810 } 2811 2812 int mlx4_config_mad_demux(struct mlx4_dev *dev) 2813 { 2814 struct mlx4_cmd_mailbox *mailbox; 2815 int err; 2816 2817 /* Check if mad_demux is supported */ 2818 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_MAD_DEMUX)) 2819 return 0; 2820 2821 mailbox = mlx4_alloc_cmd_mailbox(dev); 2822 if (IS_ERR(mailbox)) { 2823 mlx4_warn(dev, "Failed to allocate mailbox for cmd MAD_DEMUX"); 2824 return -ENOMEM; 2825 } 2826 2827 /* Query mad_demux to find out which MADs are handled by internal sma */ 2828 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0x01 /* subn mgmt class */, 2829 MLX4_CMD_MAD_DEMUX_QUERY_RESTR, MLX4_CMD_MAD_DEMUX, 2830 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 2831 if (err) { 2832 mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: query restrictions failed (%d)\n", 2833 err); 2834 goto out; 2835 } 2836 2837 if (mlx4_check_smp_firewall_active(dev, mailbox)) 2838 dev->flags |= MLX4_FLAG_SECURE_HOST; 2839 2840 /* Config mad_demux to handle all MADs returned by the query above */ 2841 err = mlx4_cmd(dev, mailbox->dma, 0x01 /* subn mgmt class */, 2842 MLX4_CMD_MAD_DEMUX_CONFIG, MLX4_CMD_MAD_DEMUX, 2843 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 2844 if (err) { 2845 mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: configure failed (%d)\n", err); 2846 goto out; 2847 } 2848 2849 if (dev->flags & MLX4_FLAG_SECURE_HOST) 2850 mlx4_warn(dev, "HCA operating in secure-host mode. SMP firewall activated.\n"); 2851 out: 2852 mlx4_free_cmd_mailbox(dev, mailbox); 2853 return err; 2854 } 2855 2856 /* Access Reg commands */ 2857 enum mlx4_access_reg_masks { 2858 MLX4_ACCESS_REG_STATUS_MASK = 0x7f, 2859 MLX4_ACCESS_REG_METHOD_MASK = 0x7f, 2860 MLX4_ACCESS_REG_LEN_MASK = 0x7ff 2861 }; 2862 2863 struct mlx4_access_reg { 2864 __be16 constant1; 2865 u8 status; 2866 u8 resrvd1; 2867 __be16 reg_id; 2868 u8 method; 2869 u8 constant2; 2870 __be32 resrvd2[2]; 2871 __be16 len_const; 2872 __be16 resrvd3; 2873 #define MLX4_ACCESS_REG_HEADER_SIZE (20) 2874 u8 reg_data[MLX4_MAILBOX_SIZE-MLX4_ACCESS_REG_HEADER_SIZE]; 2875 } __attribute__((__packed__)); 2876 2877 /** 2878 * mlx4_ACCESS_REG - Generic access reg command. 2879 * @dev: mlx4_dev. 2880 * @reg_id: register ID to access. 2881 * @method: Access method Read/Write. 2882 * @reg_len: register length to Read/Write in bytes. 2883 * @reg_data: reg_data pointer to Read/Write From/To. 2884 * 2885 * Access ConnectX registers FW command. 2886 * Returns 0 on success and copies outbox mlx4_access_reg data 2887 * field into reg_data or a negative error code. 2888 */ 2889 static int mlx4_ACCESS_REG(struct mlx4_dev *dev, u16 reg_id, 2890 enum mlx4_access_reg_method method, 2891 u16 reg_len, void *reg_data) 2892 { 2893 struct mlx4_cmd_mailbox *inbox, *outbox; 2894 struct mlx4_access_reg *inbuf, *outbuf; 2895 int err; 2896 2897 inbox = mlx4_alloc_cmd_mailbox(dev); 2898 if (IS_ERR(inbox)) 2899 return PTR_ERR(inbox); 2900 2901 outbox = mlx4_alloc_cmd_mailbox(dev); 2902 if (IS_ERR(outbox)) { 2903 mlx4_free_cmd_mailbox(dev, inbox); 2904 return PTR_ERR(outbox); 2905 } 2906 2907 inbuf = inbox->buf; 2908 outbuf = outbox->buf; 2909 2910 inbuf->constant1 = cpu_to_be16(0x1<<11 | 0x4); 2911 inbuf->constant2 = 0x1; 2912 inbuf->reg_id = cpu_to_be16(reg_id); 2913 inbuf->method = method & MLX4_ACCESS_REG_METHOD_MASK; 2914 2915 reg_len = min(reg_len, (u16)(sizeof(inbuf->reg_data))); 2916 inbuf->len_const = 2917 cpu_to_be16(((reg_len/4 + 1) & MLX4_ACCESS_REG_LEN_MASK) | 2918 ((0x3) << 12)); 2919 2920 memcpy(inbuf->reg_data, reg_data, reg_len); 2921 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, 0, 0, 2922 MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C, 2923 MLX4_CMD_WRAPPED); 2924 if (err) 2925 goto out; 2926 2927 if (outbuf->status & MLX4_ACCESS_REG_STATUS_MASK) { 2928 err = outbuf->status & MLX4_ACCESS_REG_STATUS_MASK; 2929 mlx4_err(dev, 2930 "MLX4_CMD_ACCESS_REG(%x) returned REG status (%x)\n", 2931 reg_id, err); 2932 goto out; 2933 } 2934 2935 memcpy(reg_data, outbuf->reg_data, reg_len); 2936 out: 2937 mlx4_free_cmd_mailbox(dev, inbox); 2938 mlx4_free_cmd_mailbox(dev, outbox); 2939 return err; 2940 } 2941 2942 /* ConnectX registers IDs */ 2943 enum mlx4_reg_id { 2944 MLX4_REG_ID_PTYS = 0x5004, 2945 }; 2946 2947 /** 2948 * mlx4_ACCESS_PTYS_REG - Access PTYs (Port Type and Speed) 2949 * register 2950 * @dev: mlx4_dev. 2951 * @method: Access method Read/Write. 2952 * @ptys_reg: PTYS register data pointer. 2953 * 2954 * Access ConnectX PTYS register, to Read/Write Port Type/Speed 2955 * configuration 2956 * Returns 0 on success or a negative error code. 2957 */ 2958 int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev, 2959 enum mlx4_access_reg_method method, 2960 struct mlx4_ptys_reg *ptys_reg) 2961 { 2962 return mlx4_ACCESS_REG(dev, MLX4_REG_ID_PTYS, 2963 method, sizeof(*ptys_reg), ptys_reg); 2964 } 2965 EXPORT_SYMBOL_GPL(mlx4_ACCESS_PTYS_REG); 2966 2967 int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave, 2968 struct mlx4_vhcr *vhcr, 2969 struct mlx4_cmd_mailbox *inbox, 2970 struct mlx4_cmd_mailbox *outbox, 2971 struct mlx4_cmd_info *cmd) 2972 { 2973 struct mlx4_access_reg *inbuf = inbox->buf; 2974 u8 method = inbuf->method & MLX4_ACCESS_REG_METHOD_MASK; 2975 u16 reg_id = be16_to_cpu(inbuf->reg_id); 2976 2977 if (slave != mlx4_master_func_num(dev) && 2978 method == MLX4_ACCESS_REG_WRITE) 2979 return -EPERM; 2980 2981 if (reg_id == MLX4_REG_ID_PTYS) { 2982 struct mlx4_ptys_reg *ptys_reg = 2983 (struct mlx4_ptys_reg *)inbuf->reg_data; 2984 2985 ptys_reg->local_port = 2986 mlx4_slave_convert_port(dev, slave, 2987 ptys_reg->local_port); 2988 } 2989 2990 return mlx4_cmd_box(dev, inbox->dma, outbox->dma, vhcr->in_modifier, 2991 0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C, 2992 MLX4_CMD_NATIVE); 2993 } 2994 2995 static int mlx4_SET_PORT_phv_bit(struct mlx4_dev *dev, u8 port, u8 phv_bit) 2996 { 2997 #define SET_PORT_GEN_PHV_VALID 0x10 2998 #define SET_PORT_GEN_PHV_EN 0x80 2999 3000 struct mlx4_cmd_mailbox *mailbox; 3001 struct mlx4_set_port_general_context *context; 3002 u32 in_mod; 3003 int err; 3004 3005 mailbox = mlx4_alloc_cmd_mailbox(dev); 3006 if (IS_ERR(mailbox)) 3007 return PTR_ERR(mailbox); 3008 context = mailbox->buf; 3009 3010 context->flags2 |= SET_PORT_GEN_PHV_VALID; 3011 if (phv_bit) 3012 context->phv_en |= SET_PORT_GEN_PHV_EN; 3013 3014 in_mod = MLX4_SET_PORT_GENERAL << 8 | port; 3015 err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, 3016 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 3017 MLX4_CMD_NATIVE); 3018 3019 mlx4_free_cmd_mailbox(dev, mailbox); 3020 return err; 3021 } 3022 3023 int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv) 3024 { 3025 int err; 3026 struct mlx4_func_cap func_cap; 3027 3028 memset(&func_cap, 0, sizeof(func_cap)); 3029 err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap); 3030 if (!err) 3031 *phv = func_cap.flags0 & QUERY_FUNC_CAP_PHV_BIT; 3032 return err; 3033 } 3034 EXPORT_SYMBOL(get_phv_bit); 3035 3036 int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val) 3037 { 3038 int ret; 3039 3040 if (mlx4_is_slave(dev)) 3041 return -EPERM; 3042 3043 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN && 3044 !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) { 3045 ret = mlx4_SET_PORT_phv_bit(dev, port, new_val); 3046 if (!ret) 3047 dev->caps.phv_bit[port] = new_val; 3048 return ret; 3049 } 3050 3051 return -EOPNOTSUPP; 3052 } 3053 EXPORT_SYMBOL(set_phv_bit); 3054 3055 int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port, 3056 bool *vlan_offload_disabled) 3057 { 3058 struct mlx4_func_cap func_cap; 3059 int err; 3060 3061 memset(&func_cap, 0, sizeof(func_cap)); 3062 err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap); 3063 if (!err) 3064 *vlan_offload_disabled = 3065 !!(func_cap.flags0 & 3066 QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE); 3067 return err; 3068 } 3069 EXPORT_SYMBOL(mlx4_get_is_vlan_offload_disabled); 3070 3071 void mlx4_replace_zero_macs(struct mlx4_dev *dev) 3072 { 3073 int i; 3074 u8 mac_addr[ETH_ALEN]; 3075 3076 dev->port_random_macs = 0; 3077 for (i = 1; i <= dev->caps.num_ports; ++i) 3078 if (!dev->caps.def_mac[i] && 3079 dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) { 3080 eth_random_addr(mac_addr); 3081 dev->port_random_macs |= 1 << i; 3082 dev->caps.def_mac[i] = mlx4_mac_to_u64(mac_addr); 3083 } 3084 } 3085 EXPORT_SYMBOL_GPL(mlx4_replace_zero_macs); 3086