1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/mlx5/cmd.h> 34 #include <rdma/ib_mad.h> 35 #include <rdma/ib_smi.h> 36 #include "mlx5_ib.h" 37 38 enum { 39 MLX5_IB_VENDOR_CLASS1 = 0x9, 40 MLX5_IB_VENDOR_CLASS2 = 0xa 41 }; 42 43 int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, 44 u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh, 45 const void *in_mad, void *response_mad) 46 { 47 u8 op_modifier = 0; 48 49 /* Key check traps can't be generated unless we have in_wc to 50 * tell us where to send the trap. 51 */ 52 if (ignore_mkey || !in_wc) 53 op_modifier |= 0x1; 54 if (ignore_bkey || !in_wc) 55 op_modifier |= 0x2; 56 57 return mlx5_core_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier, port); 58 } 59 60 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 61 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 62 const struct ib_mad_hdr *in, size_t in_mad_size, 63 struct ib_mad_hdr *out, size_t *out_mad_size, 64 u16 *out_mad_pkey_index) 65 { 66 u16 slid; 67 int err; 68 const struct ib_mad *in_mad = (const struct ib_mad *)in; 69 struct ib_mad *out_mad = (struct ib_mad *)out; 70 71 BUG_ON(in_mad_size != sizeof(*in_mad) || 72 *out_mad_size != sizeof(*out_mad)); 73 74 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); 75 76 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) 77 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; 78 79 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || 80 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 81 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && 82 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET && 83 in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS) 84 return IB_MAD_RESULT_SUCCESS; 85 86 /* Don't process SMInfo queries -- the SMA can't handle them. 87 */ 88 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO) 89 return IB_MAD_RESULT_SUCCESS; 90 } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || 91 in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS1 || 92 in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS2 || 93 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) { 94 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && 95 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET) 96 return IB_MAD_RESULT_SUCCESS; 97 } else { 98 return IB_MAD_RESULT_SUCCESS; 99 } 100 101 err = mlx5_MAD_IFC(to_mdev(ibdev), 102 mad_flags & IB_MAD_IGNORE_MKEY, 103 mad_flags & IB_MAD_IGNORE_BKEY, 104 port_num, in_wc, in_grh, in_mad, out_mad); 105 if (err) 106 return IB_MAD_RESULT_FAILURE; 107 108 /* set return bit in status of directed route responses */ 109 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 110 out_mad->mad_hdr.status |= cpu_to_be16(1 << 15); 111 112 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) 113 /* no response for trap repress */ 114 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; 115 116 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; 117 } 118 119 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port) 120 { 121 struct ib_smp *in_mad = NULL; 122 struct ib_smp *out_mad = NULL; 123 int err = -ENOMEM; 124 u16 packet_error; 125 126 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 127 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 128 if (!in_mad || !out_mad) 129 goto out; 130 131 init_query_mad(in_mad); 132 in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; 133 in_mad->attr_mod = cpu_to_be32(port); 134 135 err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); 136 137 packet_error = be16_to_cpu(out_mad->status); 138 139 dev->mdev->port_caps[port - 1].ext_port_cap = (!err && !packet_error) ? 140 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0; 141 142 out: 143 kfree(in_mad); 144 kfree(out_mad); 145 return err; 146 } 147 148 int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev, 149 struct ib_smp *out_mad) 150 { 151 struct ib_smp *in_mad = NULL; 152 int err = -ENOMEM; 153 154 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 155 if (!in_mad) 156 return -ENOMEM; 157 158 init_query_mad(in_mad); 159 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 160 161 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, 162 out_mad); 163 164 kfree(in_mad); 165 return err; 166 } 167 168 int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev, 169 __be64 *sys_image_guid) 170 { 171 struct ib_smp *out_mad = NULL; 172 int err = -ENOMEM; 173 174 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 175 if (!out_mad) 176 return -ENOMEM; 177 178 err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad); 179 if (err) 180 goto out; 181 182 memcpy(sys_image_guid, out_mad->data + 4, 8); 183 184 out: 185 kfree(out_mad); 186 187 return err; 188 } 189 190 int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev, 191 u16 *max_pkeys) 192 { 193 struct ib_smp *out_mad = NULL; 194 int err = -ENOMEM; 195 196 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 197 if (!out_mad) 198 return -ENOMEM; 199 200 err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad); 201 if (err) 202 goto out; 203 204 *max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); 205 206 out: 207 kfree(out_mad); 208 209 return err; 210 } 211 212 int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev, 213 u32 *vendor_id) 214 { 215 struct ib_smp *out_mad = NULL; 216 int err = -ENOMEM; 217 218 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 219 if (!out_mad) 220 return -ENOMEM; 221 222 err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad); 223 if (err) 224 goto out; 225 226 *vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & 0xffff; 227 228 out: 229 kfree(out_mad); 230 231 return err; 232 } 233 234 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc) 235 { 236 struct ib_smp *in_mad = NULL; 237 struct ib_smp *out_mad = NULL; 238 int err = -ENOMEM; 239 240 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 241 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 242 if (!in_mad || !out_mad) 243 goto out; 244 245 init_query_mad(in_mad); 246 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; 247 248 err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); 249 if (err) 250 goto out; 251 252 memcpy(node_desc, out_mad->data, 64); 253 out: 254 kfree(in_mad); 255 kfree(out_mad); 256 return err; 257 } 258 259 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid) 260 { 261 struct ib_smp *in_mad = NULL; 262 struct ib_smp *out_mad = NULL; 263 int err = -ENOMEM; 264 265 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 266 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 267 if (!in_mad || !out_mad) 268 goto out; 269 270 init_query_mad(in_mad); 271 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 272 273 err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); 274 if (err) 275 goto out; 276 277 memcpy(node_guid, out_mad->data + 12, 8); 278 out: 279 kfree(in_mad); 280 kfree(out_mad); 281 return err; 282 } 283 284 int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index, 285 u16 *pkey) 286 { 287 struct ib_smp *in_mad = NULL; 288 struct ib_smp *out_mad = NULL; 289 int err = -ENOMEM; 290 291 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 292 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 293 if (!in_mad || !out_mad) 294 goto out; 295 296 init_query_mad(in_mad); 297 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; 298 in_mad->attr_mod = cpu_to_be32(index / 32); 299 300 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, 301 out_mad); 302 if (err) 303 goto out; 304 305 *pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]); 306 307 out: 308 kfree(in_mad); 309 kfree(out_mad); 310 return err; 311 } 312 313 int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index, 314 union ib_gid *gid) 315 { 316 struct ib_smp *in_mad = NULL; 317 struct ib_smp *out_mad = NULL; 318 int err = -ENOMEM; 319 320 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 321 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 322 if (!in_mad || !out_mad) 323 goto out; 324 325 init_query_mad(in_mad); 326 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 327 in_mad->attr_mod = cpu_to_be32(port); 328 329 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, 330 out_mad); 331 if (err) 332 goto out; 333 334 memcpy(gid->raw, out_mad->data + 8, 8); 335 336 init_query_mad(in_mad); 337 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; 338 in_mad->attr_mod = cpu_to_be32(index / 8); 339 340 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, 341 out_mad); 342 if (err) 343 goto out; 344 345 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); 346 347 out: 348 kfree(in_mad); 349 kfree(out_mad); 350 return err; 351 } 352 353 int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port, 354 struct ib_port_attr *props) 355 { 356 struct mlx5_ib_dev *dev = to_mdev(ibdev); 357 struct mlx5_core_dev *mdev = dev->mdev; 358 struct ib_smp *in_mad = NULL; 359 struct ib_smp *out_mad = NULL; 360 int ext_active_speed; 361 int err = -ENOMEM; 362 363 if (port < 1 || port > MLX5_CAP_GEN(mdev, num_ports)) { 364 mlx5_ib_warn(dev, "invalid port number %d\n", port); 365 return -EINVAL; 366 } 367 368 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 369 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 370 if (!in_mad || !out_mad) 371 goto out; 372 373 memset(props, 0, sizeof(*props)); 374 375 init_query_mad(in_mad); 376 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 377 in_mad->attr_mod = cpu_to_be32(port); 378 379 err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad); 380 if (err) { 381 mlx5_ib_warn(dev, "err %d\n", err); 382 goto out; 383 } 384 385 props->lid = be16_to_cpup((__be16 *)(out_mad->data + 16)); 386 props->lmc = out_mad->data[34] & 0x7; 387 props->sm_lid = be16_to_cpup((__be16 *)(out_mad->data + 18)); 388 props->sm_sl = out_mad->data[36] & 0xf; 389 props->state = out_mad->data[32] & 0xf; 390 props->phys_state = out_mad->data[33] >> 4; 391 props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20)); 392 props->gid_tbl_len = out_mad->data[50]; 393 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg); 394 props->pkey_tbl_len = mdev->port_caps[port - 1].pkey_table_len; 395 props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46)); 396 props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48)); 397 props->active_width = out_mad->data[31] & 0xf; 398 props->active_speed = out_mad->data[35] >> 4; 399 props->max_mtu = out_mad->data[41] & 0xf; 400 props->active_mtu = out_mad->data[36] >> 4; 401 props->subnet_timeout = out_mad->data[51] & 0x1f; 402 props->max_vl_num = out_mad->data[37] >> 4; 403 props->init_type_reply = out_mad->data[41] >> 4; 404 405 /* Check if extended speeds (EDR/FDR/...) are supported */ 406 if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) { 407 ext_active_speed = out_mad->data[62] >> 4; 408 409 switch (ext_active_speed) { 410 case 1: 411 props->active_speed = 16; /* FDR */ 412 break; 413 case 2: 414 props->active_speed = 32; /* EDR */ 415 break; 416 } 417 } 418 419 /* If reported active speed is QDR, check if is FDR-10 */ 420 if (props->active_speed == 4) { 421 if (mdev->port_caps[port - 1].ext_port_cap & 422 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { 423 init_query_mad(in_mad); 424 in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; 425 in_mad->attr_mod = cpu_to_be32(port); 426 427 err = mlx5_MAD_IFC(dev, 1, 1, port, 428 NULL, NULL, in_mad, out_mad); 429 if (err) 430 goto out; 431 432 /* Checking LinkSpeedActive for FDR-10 */ 433 if (out_mad->data[15] & 0x1) 434 props->active_speed = 8; 435 } 436 } 437 438 out: 439 kfree(in_mad); 440 kfree(out_mad); 441 442 return err; 443 } 444