1 /*- 2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include <rdma/ib_mad.h> 29 #include <rdma/ib_smi.h> 30 #include <rdma/ib_pma.h> 31 #include "mlx5_ib.h" 32 #include <dev/mlx5/vport.h> 33 34 enum { 35 MLX5_IB_VENDOR_CLASS1 = 0x9, 36 MLX5_IB_VENDOR_CLASS2 = 0xa 37 }; 38 39 int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, 40 u8 port, struct ib_wc *in_wc, struct ib_grh *in_grh, 41 void *in_mad, void *response_mad) 42 { 43 u8 op_modifier = 0; 44 45 /* Key check traps can't be generated unless we have in_wc to 46 * tell us where to send the trap. 47 */ 48 if (ignore_mkey || !in_wc) 49 op_modifier |= 0x1; 50 if (ignore_bkey || !in_wc) 51 op_modifier |= 0x2; 52 53 return mlx5_core_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier, port); 54 } 55 56 static int process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 57 struct ib_wc *in_wc, struct ib_grh *in_grh, 58 struct ib_mad *in_mad, struct ib_mad *out_mad) 59 { 60 u16 slid; 61 int err; 62 63 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); 64 65 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) 66 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; 67 68 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || 69 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 70 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && 71 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET && 72 in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS) 73 return IB_MAD_RESULT_SUCCESS; 74 75 /* Don't process SMInfo queries -- the SMA can't handle them. 76 */ 77 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO) 78 return IB_MAD_RESULT_SUCCESS; 79 } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || 80 in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS1 || 81 in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS2 || 82 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) { 83 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && 84 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET) 85 return IB_MAD_RESULT_SUCCESS; 86 } else { 87 return IB_MAD_RESULT_SUCCESS; 88 } 89 90 err = mlx5_MAD_IFC(to_mdev(ibdev), 91 mad_flags & IB_MAD_IGNORE_MKEY, 92 mad_flags & IB_MAD_IGNORE_BKEY, 93 port_num, in_wc, in_grh, in_mad, out_mad); 94 if (err) 95 return IB_MAD_RESULT_FAILURE; 96 97 /* set return bit in status of directed route responses */ 98 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 99 out_mad->mad_hdr.status |= cpu_to_be16(1 << 15); 100 101 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) 102 /* no response for trap repress */ 103 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; 104 105 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; 106 } 107 108 static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext, 109 struct mlx5_vport_counters *vc) 110 { 111 pma_cnt_ext->port_xmit_data = cpu_to_be64((vc->transmitted_ib_unicast.octets + 112 vc->transmitted_ib_multicast.octets) >> 2); 113 pma_cnt_ext->port_rcv_data = cpu_to_be64((vc->received_ib_unicast.octets + 114 vc->received_ib_multicast.octets) >> 2); 115 pma_cnt_ext->port_xmit_packets = cpu_to_be64(vc->transmitted_ib_unicast.packets + 116 vc->transmitted_ib_multicast.packets); 117 pma_cnt_ext->port_rcv_packets = cpu_to_be64(vc->received_ib_unicast.packets + 118 vc->received_ib_multicast.packets); 119 pma_cnt_ext->port_unicast_xmit_packets = cpu_to_be64(vc->transmitted_ib_unicast.packets); 120 pma_cnt_ext->port_unicast_rcv_packets = cpu_to_be64(vc->received_ib_unicast.packets); 121 pma_cnt_ext->port_multicast_xmit_packets = cpu_to_be64(vc->transmitted_ib_multicast.packets); 122 pma_cnt_ext->port_multicast_rcv_packets = cpu_to_be64(vc->received_ib_multicast.packets); 123 } 124 125 static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt, 126 struct mlx5_vport_counters *vc) 127 { 128 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data, 129 (vc->transmitted_ib_unicast.octets + 130 vc->transmitted_ib_multicast.octets) >> 2); 131 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data, 132 (vc->received_ib_unicast.octets + 133 vc->received_ib_multicast.octets) >> 2); 134 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets, 135 vc->transmitted_ib_unicast.packets + 136 vc->transmitted_ib_multicast.packets); 137 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets, 138 vc->received_ib_unicast.packets + 139 vc->received_ib_multicast.packets); 140 } 141 142 static int process_pma_cmd(struct ib_device *ibdev, u8 port_num, 143 struct ib_mad *in_mad, struct ib_mad *out_mad) 144 { 145 struct mlx5_ib_dev *dev = to_mdev(ibdev); 146 struct mlx5_vport_counters *vc; 147 int err; 148 int ext; 149 150 vc = kzalloc(sizeof(*vc), GFP_KERNEL); 151 if (!vc) 152 return -ENOMEM; 153 154 ext = in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT; 155 156 err = mlx5_get_vport_counters(dev->mdev, port_num, vc); 157 if (!err) { 158 if (ext) { 159 struct ib_pma_portcounters_ext *pma_cnt_ext = 160 (struct ib_pma_portcounters_ext *)(out_mad->data + 40); 161 162 pma_cnt_ext_assign(pma_cnt_ext, vc); 163 } else { 164 struct ib_pma_portcounters *pma_cnt = 165 (struct ib_pma_portcounters *)(out_mad->data + 40); 166 167 ASSIGN_16BIT_COUNTER(pma_cnt->port_rcv_errors, 168 (u16)vc->received_errors.packets); 169 170 pma_cnt_assign(pma_cnt, vc); 171 } 172 err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; 173 } 174 175 kfree(vc); 176 return err; 177 } 178 179 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 180 struct ib_wc *in_wc, struct ib_grh *in_grh, 181 struct ib_mad *in_mad, struct ib_mad *out_mad) 182 { 183 struct mlx5_ib_dev *dev = to_mdev(ibdev); 184 struct mlx5_core_dev *mdev = dev->mdev; 185 186 memset(out_mad->data, 0, sizeof(out_mad->data)); 187 188 if (MLX5_CAP_GEN(mdev, vport_counters) && 189 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT && 190 in_mad->mad_hdr.method == IB_MGMT_METHOD_GET) { 191 /* TBD: read error counters from the PPCNT */ 192 return process_pma_cmd(ibdev, port_num, in_mad, out_mad); 193 } else { 194 return process_mad(ibdev, mad_flags, port_num, in_wc, in_grh, 195 in_mad, out_mad); 196 } 197 } 198 199 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port) 200 { 201 struct ib_smp *in_mad = NULL; 202 struct ib_smp *out_mad = NULL; 203 int err = -ENOMEM; 204 u16 packet_error; 205 206 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 207 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 208 if (!in_mad || !out_mad) 209 goto out; 210 211 init_query_mad(in_mad); 212 in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; 213 in_mad->attr_mod = cpu_to_be32(port); 214 215 err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); 216 217 packet_error = be16_to_cpu(out_mad->status); 218 219 dev->mdev->port_caps[port - 1].ext_port_cap = (!err && !packet_error) ? 220 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0; 221 222 out: 223 kfree(in_mad); 224 kfree(out_mad); 225 return err; 226 } 227 228 int mlx5_query_smp_attr_node_info_mad_ifc(struct ib_device *ibdev, 229 struct ib_smp *out_mad) 230 { 231 struct ib_smp *in_mad = NULL; 232 int err = -ENOMEM; 233 234 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 235 if (!in_mad) 236 return -ENOMEM; 237 238 init_query_mad(in_mad); 239 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 240 241 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, 242 out_mad); 243 244 kfree(in_mad); 245 return err; 246 } 247 248 int mlx5_query_system_image_guid_mad_ifc(struct ib_device *ibdev, 249 __be64 *sys_image_guid) 250 { 251 struct ib_smp *out_mad = NULL; 252 int err = -ENOMEM; 253 254 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 255 if (!out_mad) 256 return -ENOMEM; 257 258 err = mlx5_query_smp_attr_node_info_mad_ifc(ibdev, out_mad); 259 if (err) 260 goto out; 261 262 memcpy(sys_image_guid, out_mad->data + 4, 8); 263 264 out: 265 kfree(out_mad); 266 267 return err; 268 } 269 270 int mlx5_query_max_pkeys_mad_ifc(struct ib_device *ibdev, 271 u16 *max_pkeys) 272 { 273 struct ib_smp *out_mad = NULL; 274 int err = -ENOMEM; 275 276 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 277 if (!out_mad) 278 return -ENOMEM; 279 280 err = mlx5_query_smp_attr_node_info_mad_ifc(ibdev, out_mad); 281 if (err) 282 goto out; 283 284 *max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); 285 286 out: 287 kfree(out_mad); 288 289 return err; 290 } 291 292 int mlx5_query_vendor_id_mad_ifc(struct ib_device *ibdev, 293 u32 *vendor_id) 294 { 295 struct ib_smp *out_mad = NULL; 296 int err = -ENOMEM; 297 298 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 299 if (!out_mad) 300 return -ENOMEM; 301 302 err = mlx5_query_smp_attr_node_info_mad_ifc(ibdev, out_mad); 303 if (err) 304 goto out; 305 306 *vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & 0xffff; 307 308 out: 309 kfree(out_mad); 310 311 return err; 312 } 313 314 int mlx5_query_node_desc_mad_ifc(struct mlx5_ib_dev *dev, char *node_desc) 315 { 316 struct ib_smp *in_mad = NULL; 317 struct ib_smp *out_mad = NULL; 318 int err = -ENOMEM; 319 320 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 321 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 322 if (!in_mad || !out_mad) 323 goto out; 324 325 init_query_mad(in_mad); 326 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; 327 328 err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); 329 if (err) 330 goto out; 331 332 memcpy(node_desc, out_mad->data, 64); 333 out: 334 kfree(in_mad); 335 kfree(out_mad); 336 return err; 337 } 338 339 int mlx5_query_node_guid_mad_ifc(struct mlx5_ib_dev *dev, u64 *node_guid) 340 { 341 struct ib_smp *in_mad = NULL; 342 struct ib_smp *out_mad = NULL; 343 int err = -ENOMEM; 344 345 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 346 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 347 if (!in_mad || !out_mad) 348 goto out; 349 350 init_query_mad(in_mad); 351 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 352 353 err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); 354 if (err) 355 goto out; 356 357 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); 358 out: 359 kfree(in_mad); 360 kfree(out_mad); 361 return err; 362 } 363 364 int mlx5_query_pkey_mad_ifc(struct ib_device *ibdev, u8 port, u16 index, 365 u16 *pkey) 366 { 367 struct ib_smp *in_mad = NULL; 368 struct ib_smp *out_mad = NULL; 369 int err = -ENOMEM; 370 371 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 372 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 373 if (!in_mad || !out_mad) 374 goto out; 375 376 init_query_mad(in_mad); 377 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; 378 in_mad->attr_mod = cpu_to_be32(index / 32); 379 380 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, 381 out_mad); 382 if (err) 383 goto out; 384 385 *pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]); 386 387 out: 388 kfree(in_mad); 389 kfree(out_mad); 390 return err; 391 } 392 393 int mlx5_query_gids_mad_ifc(struct ib_device *ibdev, u8 port, int index, 394 union ib_gid *gid) 395 { 396 struct ib_smp *in_mad = NULL; 397 struct ib_smp *out_mad = NULL; 398 int err = -ENOMEM; 399 400 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 401 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 402 if (!in_mad || !out_mad) 403 goto out; 404 405 init_query_mad(in_mad); 406 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 407 in_mad->attr_mod = cpu_to_be32(port); 408 409 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, 410 out_mad); 411 if (err) 412 goto out; 413 414 memcpy(gid->raw, out_mad->data + 8, 8); 415 416 init_query_mad(in_mad); 417 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; 418 in_mad->attr_mod = cpu_to_be32(index / 8); 419 420 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, 421 out_mad); 422 if (err) 423 goto out; 424 425 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); 426 427 out: 428 kfree(in_mad); 429 kfree(out_mad); 430 return err; 431 } 432 433 int mlx5_query_port_mad_ifc(struct ib_device *ibdev, u8 port, 434 struct ib_port_attr *props) 435 { 436 struct mlx5_ib_dev *dev = to_mdev(ibdev); 437 struct mlx5_core_dev *mdev = dev->mdev; 438 struct ib_smp *in_mad = NULL; 439 struct ib_smp *out_mad = NULL; 440 int ext_active_speed; 441 int err = -ENOMEM; 442 443 if (port < 1 || port > MLX5_CAP_GEN(mdev, num_ports)) { 444 mlx5_ib_warn(dev, "invalid port number %d\n", port); 445 return -EINVAL; 446 } 447 448 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 449 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 450 if (!in_mad || !out_mad) 451 goto out; 452 453 memset(props, 0, sizeof(*props)); 454 455 init_query_mad(in_mad); 456 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 457 in_mad->attr_mod = cpu_to_be32(port); 458 459 err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad); 460 if (err) { 461 mlx5_ib_warn(dev, "err %d\n", err); 462 goto out; 463 } 464 465 props->lid = be16_to_cpup((__be16 *)(out_mad->data + 16)); 466 props->lmc = out_mad->data[34] & 0x7; 467 props->sm_lid = be16_to_cpup((__be16 *)(out_mad->data + 18)); 468 props->sm_sl = out_mad->data[36] & 0xf; 469 props->state = out_mad->data[32] & 0xf; 470 props->phys_state = out_mad->data[33] >> 4; 471 props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20)); 472 props->gid_tbl_len = out_mad->data[50]; 473 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg); 474 props->pkey_tbl_len = mdev->port_caps[port - 1].pkey_table_len; 475 props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46)); 476 props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48)); 477 props->active_width = out_mad->data[31] & 0xf; 478 props->active_speed = out_mad->data[35] >> 4; 479 props->max_mtu = out_mad->data[41] & 0xf; 480 props->active_mtu = out_mad->data[36] >> 4; 481 props->subnet_timeout = out_mad->data[51] & 0x1f; 482 props->max_vl_num = out_mad->data[37] >> 4; 483 props->init_type_reply = out_mad->data[41] >> 4; 484 485 /* Check if extended speeds (EDR/FDR/...) are supported */ 486 if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) { 487 ext_active_speed = out_mad->data[62] >> 4; 488 489 switch (ext_active_speed) { 490 case 1: 491 props->active_speed = 16; /* FDR */ 492 break; 493 case 2: 494 props->active_speed = 32; /* EDR */ 495 break; 496 } 497 } 498 499 /* If reported active speed is QDR, check if is FDR-10 */ 500 if (props->active_speed == 4) { 501 if (mdev->port_caps[port - 1].ext_port_cap & 502 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { 503 init_query_mad(in_mad); 504 in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; 505 in_mad->attr_mod = cpu_to_be32(port); 506 507 err = mlx5_MAD_IFC(dev, 1, 1, port, 508 NULL, NULL, in_mad, out_mad); 509 if (err) 510 goto out; 511 512 /* Checking LinkSpeedActive for FDR-10 */ 513 if (out_mad->data[15] & 0x1) 514 props->active_speed = 8; 515 } 516 } 517 518 out: 519 kfree(in_mad); 520 kfree(out_mad); 521 522 return err; 523 } 524