1 /*- 2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include <dev/mlx5/vport.h> 29 #include <rdma/ib_mad.h> 30 #include <rdma/ib_smi.h> 31 #include <rdma/ib_pma.h> 32 #include "mlx5_ib.h" 33 34 enum { 35 MLX5_IB_VENDOR_CLASS1 = 0x9, 36 MLX5_IB_VENDOR_CLASS2 = 0xa 37 }; 38 39 int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, 40 u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh, 41 const void *in_mad, void *response_mad) 42 { 43 u8 op_modifier = 0; 44 45 /* Key check traps can't be generated unless we have in_wc to 46 * tell us where to send the trap. 47 */ 48 if (ignore_mkey || !in_wc) 49 op_modifier |= 0x1; 50 if (ignore_bkey || !in_wc) 51 op_modifier |= 0x2; 52 53 return mlx5_core_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier, port); 54 } 55 56 static int process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 57 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 58 const struct ib_mad *in_mad, struct ib_mad *out_mad) 59 { 60 u16 slid; 61 int err; 62 63 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); 64 65 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) 66 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; 67 68 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || 69 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 70 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && 71 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET && 72 in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS) 73 return IB_MAD_RESULT_SUCCESS; 74 75 /* Don't process SMInfo queries -- the SMA can't handle them. 76 */ 77 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO) 78 return IB_MAD_RESULT_SUCCESS; 79 } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || 80 in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS1 || 81 in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS2 || 82 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) { 83 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && 84 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET) 85 return IB_MAD_RESULT_SUCCESS; 86 } else { 87 return IB_MAD_RESULT_SUCCESS; 88 } 89 90 err = mlx5_MAD_IFC(to_mdev(ibdev), 91 mad_flags & IB_MAD_IGNORE_MKEY, 92 mad_flags & IB_MAD_IGNORE_BKEY, 93 port_num, in_wc, in_grh, in_mad, out_mad); 94 if (err) 95 return IB_MAD_RESULT_FAILURE; 96 97 /* set return bit in status of directed route responses */ 98 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 99 out_mad->mad_hdr.status |= cpu_to_be16(1 << 15); 100 101 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) 102 /* no response for trap repress */ 103 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; 104 105 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; 106 } 107 108 static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext, 109 void *out) 110 { 111 #define MLX5_SUM_CNT(p, cntr1, cntr2) \ 112 (MLX5_GET64(query_vport_counter_out, p, cntr1) + \ 113 MLX5_GET64(query_vport_counter_out, p, cntr2)) 114 115 pma_cnt_ext->port_xmit_data = 116 cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets, 117 transmitted_ib_multicast.octets) >> 2); 118 pma_cnt_ext->port_rcv_data = 119 cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets, 120 received_ib_multicast.octets) >> 2); 121 pma_cnt_ext->port_xmit_packets = 122 cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.packets, 123 transmitted_ib_multicast.packets)); 124 pma_cnt_ext->port_rcv_packets = 125 cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.packets, 126 received_ib_multicast.packets)); 127 pma_cnt_ext->port_unicast_xmit_packets = 128 MLX5_GET64_BE(query_vport_counter_out, 129 out, transmitted_ib_unicast.packets); 130 pma_cnt_ext->port_unicast_rcv_packets = 131 MLX5_GET64_BE(query_vport_counter_out, 132 out, received_ib_unicast.packets); 133 pma_cnt_ext->port_multicast_xmit_packets = 134 MLX5_GET64_BE(query_vport_counter_out, 135 out, transmitted_ib_multicast.packets); 136 pma_cnt_ext->port_multicast_rcv_packets = 137 MLX5_GET64_BE(query_vport_counter_out, 138 out, received_ib_multicast.packets); 139 } 140 141 static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt, 142 void *out) 143 { 144 /* Traffic counters will be reported in 145 * their 64bit form via ib_pma_portcounters_ext by default. 146 */ 147 void *out_pma = MLX5_ADDR_OF(ppcnt_reg, out, 148 counter_set); 149 150 #define MLX5_ASSIGN_PMA_CNTR(counter_var, counter_name) { \ 151 counter_var = MLX5_GET_BE(typeof(counter_var), \ 152 ib_port_cntrs_grp_data_layout, \ 153 out_pma, counter_name); \ 154 } 155 156 MLX5_ASSIGN_PMA_CNTR(pma_cnt->symbol_error_counter, 157 symbol_error_counter); 158 MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_error_recovery_counter, 159 link_error_recovery_counter); 160 MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_downed_counter, 161 link_downed_counter); 162 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_errors, 163 port_rcv_errors); 164 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_remphys_errors, 165 port_rcv_remote_physical_errors); 166 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_switch_relay_errors, 167 port_rcv_switch_relay_errors); 168 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_discards, 169 port_xmit_discards); 170 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_constraint_errors, 171 port_xmit_constraint_errors); 172 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_constraint_errors, 173 port_rcv_constraint_errors); 174 MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_overrun_errors, 175 link_overrun_errors); 176 MLX5_ASSIGN_PMA_CNTR(pma_cnt->vl15_dropped, 177 vl_15_dropped); 178 } 179 180 static int process_pma_cmd(struct ib_device *ibdev, u8 port_num, 181 const struct ib_mad *in_mad, struct ib_mad *out_mad) 182 { 183 struct mlx5_ib_dev *dev = to_mdev(ibdev); 184 int err; 185 void *out_cnt; 186 187 /* Decalring support of extended counters */ 188 if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) { 189 struct ib_class_port_info cpi = {}; 190 191 cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH; 192 memcpy((out_mad->data + 40), &cpi, sizeof(cpi)); 193 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; 194 } 195 196 if (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT) { 197 struct ib_pma_portcounters_ext *pma_cnt_ext = 198 (struct ib_pma_portcounters_ext *)(out_mad->data + 40); 199 int sz = MLX5_ST_SZ_BYTES(query_vport_counter_out); 200 201 out_cnt = mlx5_vzalloc(sz); 202 if (!out_cnt) 203 return IB_MAD_RESULT_FAILURE; 204 205 err = mlx5_core_query_vport_counter(dev->mdev, 0, 0, 206 port_num, out_cnt, sz); 207 if (!err) 208 pma_cnt_ext_assign(pma_cnt_ext, out_cnt); 209 } else { 210 struct ib_pma_portcounters *pma_cnt = 211 (struct ib_pma_portcounters *)(out_mad->data + 40); 212 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 213 214 out_cnt = mlx5_vzalloc(sz); 215 if (!out_cnt) 216 return IB_MAD_RESULT_FAILURE; 217 218 err = mlx5_core_query_ib_ppcnt(dev->mdev, port_num, 219 out_cnt, sz); 220 if (!err) 221 pma_cnt_assign(pma_cnt, out_cnt); 222 } 223 224 kvfree(out_cnt); 225 if (err) 226 return IB_MAD_RESULT_FAILURE; 227 228 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; 229 } 230 231 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 232 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 233 const struct ib_mad_hdr *in, size_t in_mad_size, 234 struct ib_mad_hdr *out, size_t *out_mad_size, 235 u16 *out_mad_pkey_index) 236 { 237 struct mlx5_ib_dev *dev = to_mdev(ibdev); 238 struct mlx5_core_dev *mdev = dev->mdev; 239 const struct ib_mad *in_mad = (const struct ib_mad *)in; 240 struct ib_mad *out_mad = (struct ib_mad *)out; 241 242 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) || 243 *out_mad_size != sizeof(*out_mad))) 244 return IB_MAD_RESULT_FAILURE; 245 246 memset(out_mad->data, 0, sizeof(out_mad->data)); 247 248 if (MLX5_CAP_GEN(mdev, vport_counters) && 249 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT && 250 in_mad->mad_hdr.method == IB_MGMT_METHOD_GET) { 251 return process_pma_cmd(ibdev, port_num, in_mad, out_mad); 252 } else { 253 return process_mad(ibdev, mad_flags, port_num, in_wc, in_grh, 254 in_mad, out_mad); 255 } 256 } 257 258 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port) 259 { 260 struct ib_smp *in_mad = NULL; 261 struct ib_smp *out_mad = NULL; 262 int err = -ENOMEM; 263 u16 packet_error; 264 265 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 266 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 267 if (!in_mad || !out_mad) 268 goto out; 269 270 init_query_mad(in_mad); 271 in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; 272 in_mad->attr_mod = cpu_to_be32(port); 273 274 err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); 275 276 packet_error = be16_to_cpu(out_mad->status); 277 278 dev->mdev->port_caps[port - 1].ext_port_cap = (!err && !packet_error) ? 279 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0; 280 281 out: 282 kfree(in_mad); 283 kfree(out_mad); 284 return err; 285 } 286 287 int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev, 288 struct ib_smp *out_mad) 289 { 290 struct ib_smp *in_mad = NULL; 291 int err = -ENOMEM; 292 293 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 294 if (!in_mad) 295 return -ENOMEM; 296 297 init_query_mad(in_mad); 298 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 299 300 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, 301 out_mad); 302 303 kfree(in_mad); 304 return err; 305 } 306 307 int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev, 308 __be64 *sys_image_guid) 309 { 310 struct ib_smp *out_mad = NULL; 311 int err = -ENOMEM; 312 313 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 314 if (!out_mad) 315 return -ENOMEM; 316 317 err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad); 318 if (err) 319 goto out; 320 321 memcpy(sys_image_guid, out_mad->data + 4, 8); 322 323 out: 324 kfree(out_mad); 325 326 return err; 327 } 328 329 int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev, 330 u16 *max_pkeys) 331 { 332 struct ib_smp *out_mad = NULL; 333 int err = -ENOMEM; 334 335 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 336 if (!out_mad) 337 return -ENOMEM; 338 339 err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad); 340 if (err) 341 goto out; 342 343 *max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); 344 345 out: 346 kfree(out_mad); 347 348 return err; 349 } 350 351 int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev, 352 u32 *vendor_id) 353 { 354 struct ib_smp *out_mad = NULL; 355 int err = -ENOMEM; 356 357 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 358 if (!out_mad) 359 return -ENOMEM; 360 361 err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad); 362 if (err) 363 goto out; 364 365 *vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & 0xffff; 366 367 out: 368 kfree(out_mad); 369 370 return err; 371 } 372 373 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc) 374 { 375 struct ib_smp *in_mad = NULL; 376 struct ib_smp *out_mad = NULL; 377 int err = -ENOMEM; 378 379 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 380 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 381 if (!in_mad || !out_mad) 382 goto out; 383 384 init_query_mad(in_mad); 385 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; 386 387 err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); 388 if (err) 389 goto out; 390 391 memcpy(node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX); 392 out: 393 kfree(in_mad); 394 kfree(out_mad); 395 return err; 396 } 397 398 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid) 399 { 400 struct ib_smp *in_mad = NULL; 401 struct ib_smp *out_mad = NULL; 402 int err = -ENOMEM; 403 404 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 405 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 406 if (!in_mad || !out_mad) 407 goto out; 408 409 init_query_mad(in_mad); 410 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 411 412 err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); 413 if (err) 414 goto out; 415 416 memcpy(node_guid, out_mad->data + 12, 8); 417 out: 418 kfree(in_mad); 419 kfree(out_mad); 420 return err; 421 } 422 423 int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index, 424 u16 *pkey) 425 { 426 struct ib_smp *in_mad = NULL; 427 struct ib_smp *out_mad = NULL; 428 int err = -ENOMEM; 429 430 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 431 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 432 if (!in_mad || !out_mad) 433 goto out; 434 435 init_query_mad(in_mad); 436 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; 437 in_mad->attr_mod = cpu_to_be32(index / 32); 438 439 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, 440 out_mad); 441 if (err) 442 goto out; 443 444 *pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]); 445 446 out: 447 kfree(in_mad); 448 kfree(out_mad); 449 return err; 450 } 451 452 int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index, 453 union ib_gid *gid) 454 { 455 struct ib_smp *in_mad = NULL; 456 struct ib_smp *out_mad = NULL; 457 int err = -ENOMEM; 458 459 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 460 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 461 if (!in_mad || !out_mad) 462 goto out; 463 464 init_query_mad(in_mad); 465 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 466 in_mad->attr_mod = cpu_to_be32(port); 467 468 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, 469 out_mad); 470 if (err) 471 goto out; 472 473 memcpy(gid->raw, out_mad->data + 8, 8); 474 475 init_query_mad(in_mad); 476 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; 477 in_mad->attr_mod = cpu_to_be32(index / 8); 478 479 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, 480 out_mad); 481 if (err) 482 goto out; 483 484 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); 485 486 out: 487 kfree(in_mad); 488 kfree(out_mad); 489 return err; 490 } 491 492 int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port, 493 struct ib_port_attr *props) 494 { 495 struct mlx5_ib_dev *dev = to_mdev(ibdev); 496 struct mlx5_core_dev *mdev = dev->mdev; 497 struct ib_smp *in_mad = NULL; 498 struct ib_smp *out_mad = NULL; 499 int ext_active_speed; 500 int err = -ENOMEM; 501 502 if (port < 1 || port > MLX5_CAP_GEN(mdev, num_ports)) { 503 mlx5_ib_warn(dev, "invalid port number %d\n", port); 504 return -EINVAL; 505 } 506 507 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 508 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 509 if (!in_mad || !out_mad) 510 goto out; 511 512 memset(props, 0, sizeof(*props)); 513 514 init_query_mad(in_mad); 515 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 516 in_mad->attr_mod = cpu_to_be32(port); 517 518 err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad); 519 if (err) { 520 mlx5_ib_warn(dev, "err %d\n", err); 521 goto out; 522 } 523 524 props->lid = be16_to_cpup((__be16 *)(out_mad->data + 16)); 525 props->lmc = out_mad->data[34] & 0x7; 526 props->sm_lid = be16_to_cpup((__be16 *)(out_mad->data + 18)); 527 props->sm_sl = out_mad->data[36] & 0xf; 528 props->state = out_mad->data[32] & 0xf; 529 props->phys_state = out_mad->data[33] >> 4; 530 props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20)); 531 props->gid_tbl_len = out_mad->data[50]; 532 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg); 533 props->pkey_tbl_len = mdev->port_caps[port - 1].pkey_table_len; 534 props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46)); 535 props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48)); 536 props->active_width = out_mad->data[31] & 0xf; 537 props->active_speed = out_mad->data[35] >> 4; 538 props->max_mtu = out_mad->data[41] & 0xf; 539 props->active_mtu = out_mad->data[36] >> 4; 540 props->subnet_timeout = out_mad->data[51] & 0x1f; 541 props->max_vl_num = out_mad->data[37] >> 4; 542 props->init_type_reply = out_mad->data[41] >> 4; 543 544 /* Check if extended speeds (EDR/FDR/...) are supported */ 545 if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) { 546 ext_active_speed = out_mad->data[62] >> 4; 547 548 switch (ext_active_speed) { 549 case 1: 550 props->active_speed = 16; /* FDR */ 551 break; 552 case 2: 553 props->active_speed = 32; /* EDR */ 554 break; 555 } 556 } 557 558 /* If reported active speed is QDR, check if is FDR-10 */ 559 if (props->active_speed == 4) { 560 if (mdev->port_caps[port - 1].ext_port_cap & 561 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { 562 init_query_mad(in_mad); 563 in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; 564 in_mad->attr_mod = cpu_to_be32(port); 565 566 err = mlx5_MAD_IFC(dev, 1, 1, port, 567 NULL, NULL, in_mad, out_mad); 568 if (err) 569 goto out; 570 571 /* Checking LinkSpeedActive for FDR-10 */ 572 if (out_mad->data[15] & 0x1) 573 props->active_speed = 8; 574 } 575 } 576 577 out: 578 kfree(in_mad); 579 kfree(out_mad); 580 581 return err; 582 } 583