1 /*- 2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 #include "opt_rss.h" 27 #include "opt_ratelimit.h" 28 29 #include <dev/mlx5/vport.h> 30 #include <rdma/ib_mad.h> 31 #include <rdma/ib_smi.h> 32 #include <rdma/ib_pma.h> 33 #include <dev/mlx5/mlx5_ib/mlx5_ib.h> 34 35 enum { 36 MLX5_IB_VENDOR_CLASS1 = 0x9, 37 MLX5_IB_VENDOR_CLASS2 = 0xa 38 }; 39 40 int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, 41 u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh, 42 const void *in_mad, void *response_mad) 43 { 44 u8 op_modifier = 0; 45 46 /* Key check traps can't be generated unless we have in_wc to 47 * tell us where to send the trap. 48 */ 49 if (ignore_mkey || !in_wc) 50 op_modifier |= 0x1; 51 if (ignore_bkey || !in_wc) 52 op_modifier |= 0x2; 53 54 return mlx5_core_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier, port); 55 } 56 57 static int process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 58 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 59 const struct ib_mad *in_mad, struct ib_mad *out_mad) 60 { 61 u16 slid; 62 int err; 63 64 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); 65 66 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) 67 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; 68 69 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || 70 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 71 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && 72 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET && 73 in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS) 74 return IB_MAD_RESULT_SUCCESS; 75 76 /* Don't process SMInfo queries -- the SMA can't handle them. 77 */ 78 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO) 79 return IB_MAD_RESULT_SUCCESS; 80 } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || 81 in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS1 || 82 in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS2 || 83 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) { 84 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && 85 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET) 86 return IB_MAD_RESULT_SUCCESS; 87 } else { 88 return IB_MAD_RESULT_SUCCESS; 89 } 90 91 err = mlx5_MAD_IFC(to_mdev(ibdev), 92 mad_flags & IB_MAD_IGNORE_MKEY, 93 mad_flags & IB_MAD_IGNORE_BKEY, 94 port_num, in_wc, in_grh, in_mad, out_mad); 95 if (err) 96 return IB_MAD_RESULT_FAILURE; 97 98 /* set return bit in status of directed route responses */ 99 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 100 out_mad->mad_hdr.status |= cpu_to_be16(1 << 15); 101 102 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) 103 /* no response for trap repress */ 104 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; 105 106 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; 107 } 108 109 static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext, 110 void *out) 111 { 112 #define MLX5_SUM_CNT(p, cntr1, cntr2) \ 113 (MLX5_GET64(query_vport_counter_out, p, cntr1) + \ 114 MLX5_GET64(query_vport_counter_out, p, cntr2)) 115 116 pma_cnt_ext->port_xmit_data = 117 cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets, 118 transmitted_ib_multicast.octets) >> 2); 119 pma_cnt_ext->port_rcv_data = 120 cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets, 121 received_ib_multicast.octets) >> 2); 122 pma_cnt_ext->port_xmit_packets = 123 cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.packets, 124 transmitted_ib_multicast.packets)); 125 pma_cnt_ext->port_rcv_packets = 126 cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.packets, 127 received_ib_multicast.packets)); 128 pma_cnt_ext->port_unicast_xmit_packets = 129 MLX5_GET64_BE(query_vport_counter_out, 130 out, transmitted_ib_unicast.packets); 131 pma_cnt_ext->port_unicast_rcv_packets = 132 MLX5_GET64_BE(query_vport_counter_out, 133 out, received_ib_unicast.packets); 134 pma_cnt_ext->port_multicast_xmit_packets = 135 MLX5_GET64_BE(query_vport_counter_out, 136 out, transmitted_ib_multicast.packets); 137 pma_cnt_ext->port_multicast_rcv_packets = 138 MLX5_GET64_BE(query_vport_counter_out, 139 out, received_ib_multicast.packets); 140 } 141 142 static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt, 143 void *out) 144 { 145 /* Traffic counters will be reported in 146 * their 64bit form via ib_pma_portcounters_ext by default. 147 */ 148 void *out_pma = MLX5_ADDR_OF(ppcnt_reg, out, 149 counter_set); 150 151 #define MLX5_ASSIGN_PMA_CNTR(counter_var, counter_name) { \ 152 counter_var = MLX5_GET_BE(typeof(counter_var), \ 153 ib_port_cntrs_grp_data_layout, \ 154 out_pma, counter_name); \ 155 } 156 157 MLX5_ASSIGN_PMA_CNTR(pma_cnt->symbol_error_counter, 158 symbol_error_counter); 159 MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_error_recovery_counter, 160 link_error_recovery_counter); 161 MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_downed_counter, 162 link_downed_counter); 163 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_errors, 164 port_rcv_errors); 165 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_remphys_errors, 166 port_rcv_remote_physical_errors); 167 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_switch_relay_errors, 168 port_rcv_switch_relay_errors); 169 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_discards, 170 port_xmit_discards); 171 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_constraint_errors, 172 port_xmit_constraint_errors); 173 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_constraint_errors, 174 port_rcv_constraint_errors); 175 MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_overrun_errors, 176 link_overrun_errors); 177 MLX5_ASSIGN_PMA_CNTR(pma_cnt->vl15_dropped, 178 vl_15_dropped); 179 } 180 181 static int process_pma_cmd(struct ib_device *ibdev, u8 port_num, 182 const struct ib_mad *in_mad, struct ib_mad *out_mad) 183 { 184 struct mlx5_ib_dev *dev = to_mdev(ibdev); 185 int err; 186 void *out_cnt; 187 188 /* Decalring support of extended counters */ 189 if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) { 190 struct ib_class_port_info cpi = {}; 191 192 cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH; 193 memcpy((out_mad->data + 40), &cpi, sizeof(cpi)); 194 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; 195 } 196 197 if (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT) { 198 struct ib_pma_portcounters_ext *pma_cnt_ext = 199 (struct ib_pma_portcounters_ext *)(out_mad->data + 40); 200 int sz = MLX5_ST_SZ_BYTES(query_vport_counter_out); 201 202 out_cnt = mlx5_vzalloc(sz); 203 if (!out_cnt) 204 return IB_MAD_RESULT_FAILURE; 205 206 err = mlx5_core_query_vport_counter(dev->mdev, 0, 0, 207 port_num, out_cnt, sz); 208 if (!err) 209 pma_cnt_ext_assign(pma_cnt_ext, out_cnt); 210 } else { 211 struct ib_pma_portcounters *pma_cnt = 212 (struct ib_pma_portcounters *)(out_mad->data + 40); 213 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 214 215 out_cnt = mlx5_vzalloc(sz); 216 if (!out_cnt) 217 return IB_MAD_RESULT_FAILURE; 218 219 err = mlx5_core_query_ib_ppcnt(dev->mdev, port_num, 220 out_cnt, sz); 221 if (!err) 222 pma_cnt_assign(pma_cnt, out_cnt); 223 } 224 225 kvfree(out_cnt); 226 if (err) 227 return IB_MAD_RESULT_FAILURE; 228 229 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; 230 } 231 232 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 233 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 234 const struct ib_mad_hdr *in, size_t in_mad_size, 235 struct ib_mad_hdr *out, size_t *out_mad_size, 236 u16 *out_mad_pkey_index) 237 { 238 struct mlx5_ib_dev *dev = to_mdev(ibdev); 239 struct mlx5_core_dev *mdev = dev->mdev; 240 const struct ib_mad *in_mad = (const struct ib_mad *)in; 241 struct ib_mad *out_mad = (struct ib_mad *)out; 242 243 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) || 244 *out_mad_size != sizeof(*out_mad))) 245 return IB_MAD_RESULT_FAILURE; 246 247 memset(out_mad->data, 0, sizeof(out_mad->data)); 248 249 if (MLX5_CAP_GEN(mdev, vport_counters) && 250 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT && 251 in_mad->mad_hdr.method == IB_MGMT_METHOD_GET) { 252 return process_pma_cmd(ibdev, port_num, in_mad, out_mad); 253 } else { 254 return process_mad(ibdev, mad_flags, port_num, in_wc, in_grh, 255 in_mad, out_mad); 256 } 257 } 258 259 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port) 260 { 261 struct ib_smp *in_mad = NULL; 262 struct ib_smp *out_mad = NULL; 263 int err = -ENOMEM; 264 u16 packet_error; 265 266 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 267 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 268 if (!in_mad || !out_mad) 269 goto out; 270 271 init_query_mad(in_mad); 272 in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; 273 in_mad->attr_mod = cpu_to_be32(port); 274 275 err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); 276 277 packet_error = be16_to_cpu(out_mad->status); 278 279 dev->mdev->port_caps[port - 1].ext_port_cap = (!err && !packet_error) ? 280 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0; 281 282 out: 283 kfree(in_mad); 284 kfree(out_mad); 285 return err; 286 } 287 288 int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev, 289 struct ib_smp *out_mad) 290 { 291 struct ib_smp *in_mad = NULL; 292 int err = -ENOMEM; 293 294 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 295 if (!in_mad) 296 return -ENOMEM; 297 298 init_query_mad(in_mad); 299 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 300 301 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, 302 out_mad); 303 304 kfree(in_mad); 305 return err; 306 } 307 308 int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev, 309 __be64 *sys_image_guid) 310 { 311 struct ib_smp *out_mad = NULL; 312 int err = -ENOMEM; 313 314 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 315 if (!out_mad) 316 return -ENOMEM; 317 318 err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad); 319 if (err) 320 goto out; 321 322 memcpy(sys_image_guid, out_mad->data + 4, 8); 323 324 out: 325 kfree(out_mad); 326 327 return err; 328 } 329 330 int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev, 331 u16 *max_pkeys) 332 { 333 struct ib_smp *out_mad = NULL; 334 int err = -ENOMEM; 335 336 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 337 if (!out_mad) 338 return -ENOMEM; 339 340 err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad); 341 if (err) 342 goto out; 343 344 *max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); 345 346 out: 347 kfree(out_mad); 348 349 return err; 350 } 351 352 int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev, 353 u32 *vendor_id) 354 { 355 struct ib_smp *out_mad = NULL; 356 int err = -ENOMEM; 357 358 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 359 if (!out_mad) 360 return -ENOMEM; 361 362 err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad); 363 if (err) 364 goto out; 365 366 *vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & 0xffff; 367 368 out: 369 kfree(out_mad); 370 371 return err; 372 } 373 374 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc) 375 { 376 struct ib_smp *in_mad = NULL; 377 struct ib_smp *out_mad = NULL; 378 int err = -ENOMEM; 379 380 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 381 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 382 if (!in_mad || !out_mad) 383 goto out; 384 385 init_query_mad(in_mad); 386 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; 387 388 err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); 389 if (err) 390 goto out; 391 392 memcpy(node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX); 393 out: 394 kfree(in_mad); 395 kfree(out_mad); 396 return err; 397 } 398 399 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid) 400 { 401 struct ib_smp *in_mad = NULL; 402 struct ib_smp *out_mad = NULL; 403 int err = -ENOMEM; 404 405 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 406 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 407 if (!in_mad || !out_mad) 408 goto out; 409 410 init_query_mad(in_mad); 411 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 412 413 err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); 414 if (err) 415 goto out; 416 417 memcpy(node_guid, out_mad->data + 12, 8); 418 out: 419 kfree(in_mad); 420 kfree(out_mad); 421 return err; 422 } 423 424 int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index, 425 u16 *pkey) 426 { 427 struct ib_smp *in_mad = NULL; 428 struct ib_smp *out_mad = NULL; 429 int err = -ENOMEM; 430 431 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 432 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 433 if (!in_mad || !out_mad) 434 goto out; 435 436 init_query_mad(in_mad); 437 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; 438 in_mad->attr_mod = cpu_to_be32(index / 32); 439 440 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, 441 out_mad); 442 if (err) 443 goto out; 444 445 *pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]); 446 447 out: 448 kfree(in_mad); 449 kfree(out_mad); 450 return err; 451 } 452 453 int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index, 454 union ib_gid *gid) 455 { 456 struct ib_smp *in_mad = NULL; 457 struct ib_smp *out_mad = NULL; 458 int err = -ENOMEM; 459 460 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 461 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 462 if (!in_mad || !out_mad) 463 goto out; 464 465 init_query_mad(in_mad); 466 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 467 in_mad->attr_mod = cpu_to_be32(port); 468 469 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, 470 out_mad); 471 if (err) 472 goto out; 473 474 memcpy(gid->raw, out_mad->data + 8, 8); 475 476 init_query_mad(in_mad); 477 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; 478 in_mad->attr_mod = cpu_to_be32(index / 8); 479 480 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, 481 out_mad); 482 if (err) 483 goto out; 484 485 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); 486 487 out: 488 kfree(in_mad); 489 kfree(out_mad); 490 return err; 491 } 492 493 int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port, 494 struct ib_port_attr *props) 495 { 496 struct mlx5_ib_dev *dev = to_mdev(ibdev); 497 struct mlx5_core_dev *mdev = dev->mdev; 498 struct ib_smp *in_mad = NULL; 499 struct ib_smp *out_mad = NULL; 500 int ext_active_speed; 501 int err = -ENOMEM; 502 503 if (port < 1 || port > MLX5_CAP_GEN(mdev, num_ports)) { 504 mlx5_ib_warn(dev, "invalid port number %d\n", port); 505 return -EINVAL; 506 } 507 508 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 509 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 510 if (!in_mad || !out_mad) 511 goto out; 512 513 memset(props, 0, sizeof(*props)); 514 515 init_query_mad(in_mad); 516 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 517 in_mad->attr_mod = cpu_to_be32(port); 518 519 err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad); 520 if (err) { 521 mlx5_ib_warn(dev, "err %d\n", err); 522 goto out; 523 } 524 525 props->lid = be16_to_cpup((__be16 *)(out_mad->data + 16)); 526 props->lmc = out_mad->data[34] & 0x7; 527 props->sm_lid = be16_to_cpup((__be16 *)(out_mad->data + 18)); 528 props->sm_sl = out_mad->data[36] & 0xf; 529 props->state = out_mad->data[32] & 0xf; 530 props->phys_state = out_mad->data[33] >> 4; 531 props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20)); 532 props->gid_tbl_len = out_mad->data[50]; 533 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg); 534 props->pkey_tbl_len = mdev->port_caps[port - 1].pkey_table_len; 535 props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46)); 536 props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48)); 537 props->active_width = out_mad->data[31] & 0xf; 538 props->active_speed = out_mad->data[35] >> 4; 539 props->max_mtu = out_mad->data[41] & 0xf; 540 props->active_mtu = out_mad->data[36] >> 4; 541 props->subnet_timeout = out_mad->data[51] & 0x1f; 542 props->max_vl_num = out_mad->data[37] >> 4; 543 props->init_type_reply = out_mad->data[41] >> 4; 544 545 /* Check if extended speeds (EDR/FDR/...) are supported */ 546 if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) { 547 ext_active_speed = out_mad->data[62] >> 4; 548 549 switch (ext_active_speed) { 550 case 1: 551 props->active_speed = 16; /* FDR */ 552 break; 553 case 2: 554 props->active_speed = 32; /* EDR */ 555 break; 556 } 557 } 558 559 /* If reported active speed is QDR, check if is FDR-10 */ 560 if (props->active_speed == 4) { 561 if (mdev->port_caps[port - 1].ext_port_cap & 562 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { 563 init_query_mad(in_mad); 564 in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; 565 in_mad->attr_mod = cpu_to_be32(port); 566 567 err = mlx5_MAD_IFC(dev, 1, 1, port, 568 NULL, NULL, in_mad, out_mad); 569 if (err) 570 goto out; 571 572 /* Checking LinkSpeedActive for FDR-10 */ 573 if (out_mad->data[15] & 0x1) 574 props->active_speed = 8; 575 } 576 } 577 578 out: 579 kfree(in_mad); 580 kfree(out_mad); 581 582 return err; 583 } 584