1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <asm-generic/kmap_types.h> 34 #include <linux/module.h> 35 #include <linux/init.h> 36 #include <linux/errno.h> 37 #include <linux/pci.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/slab.h> 40 #include <linux/io-mapping.h> 41 #include <linux/sched.h> 42 #include <rdma/ib_user_verbs.h> 43 #include <linux/mlx5/vport.h> 44 #include <rdma/ib_smi.h> 45 #include <rdma/ib_umem.h> 46 #include "user.h" 47 #include "mlx5_ib.h" 48 49 #define DRIVER_NAME "mlx5_ib" 50 #define DRIVER_VERSION "2.2-1" 51 #define DRIVER_RELDATE "Feb 2014" 52 53 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); 54 MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver"); 55 MODULE_LICENSE("Dual BSD/GPL"); 56 MODULE_VERSION(DRIVER_VERSION); 57 58 static int deprecated_prof_sel = 2; 59 module_param_named(prof_sel, deprecated_prof_sel, int, 0444); 60 MODULE_PARM_DESC(prof_sel, "profile selector. Deprecated here. Moved to module mlx5_core"); 61 62 static char mlx5_version[] = 63 DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v" 64 DRIVER_VERSION " (" DRIVER_RELDATE ")\n"; 65 66 static enum rdma_link_layer 67 mlx5_ib_port_link_layer(struct ib_device *device) 68 { 69 struct mlx5_ib_dev *dev = to_mdev(device); 70 71 switch (MLX5_CAP_GEN(dev->mdev, port_type)) { 72 case MLX5_CAP_PORT_TYPE_IB: 73 return IB_LINK_LAYER_INFINIBAND; 74 case MLX5_CAP_PORT_TYPE_ETH: 75 return IB_LINK_LAYER_ETHERNET; 76 default: 77 return IB_LINK_LAYER_UNSPECIFIED; 78 } 79 } 80 81 static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev) 82 { 83 return !dev->mdev->issi; 84 } 85 86 enum { 87 MLX5_VPORT_ACCESS_METHOD_MAD, 88 MLX5_VPORT_ACCESS_METHOD_HCA, 89 MLX5_VPORT_ACCESS_METHOD_NIC, 90 }; 91 92 static int mlx5_get_vport_access_method(struct ib_device *ibdev) 93 { 94 if (mlx5_use_mad_ifc(to_mdev(ibdev))) 95 return MLX5_VPORT_ACCESS_METHOD_MAD; 96 97 if (mlx5_ib_port_link_layer(ibdev) == 98 IB_LINK_LAYER_ETHERNET) 99 return MLX5_VPORT_ACCESS_METHOD_NIC; 100 101 return MLX5_VPORT_ACCESS_METHOD_HCA; 102 } 103 104 static int mlx5_query_system_image_guid(struct ib_device *ibdev, 105 __be64 *sys_image_guid) 106 { 107 struct mlx5_ib_dev *dev = to_mdev(ibdev); 108 struct mlx5_core_dev *mdev = dev->mdev; 109 u64 tmp; 110 int err; 111 112 switch (mlx5_get_vport_access_method(ibdev)) { 113 case MLX5_VPORT_ACCESS_METHOD_MAD: 114 return mlx5_query_mad_ifc_system_image_guid(ibdev, 115 sys_image_guid); 116 117 case MLX5_VPORT_ACCESS_METHOD_HCA: 118 err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp); 119 if (!err) 120 *sys_image_guid = cpu_to_be64(tmp); 121 return err; 122 123 default: 124 return -EINVAL; 125 } 126 } 127 128 static int mlx5_query_max_pkeys(struct ib_device *ibdev, 129 u16 *max_pkeys) 130 { 131 struct mlx5_ib_dev *dev = to_mdev(ibdev); 132 struct mlx5_core_dev *mdev = dev->mdev; 133 134 switch (mlx5_get_vport_access_method(ibdev)) { 135 case MLX5_VPORT_ACCESS_METHOD_MAD: 136 return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys); 137 138 case MLX5_VPORT_ACCESS_METHOD_HCA: 139 case MLX5_VPORT_ACCESS_METHOD_NIC: 140 *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, 141 pkey_table_size)); 142 return 0; 143 144 default: 145 return -EINVAL; 146 } 147 } 148 149 static int mlx5_query_vendor_id(struct ib_device *ibdev, 150 u32 *vendor_id) 151 { 152 struct mlx5_ib_dev *dev = to_mdev(ibdev); 153 154 switch (mlx5_get_vport_access_method(ibdev)) { 155 case MLX5_VPORT_ACCESS_METHOD_MAD: 156 return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id); 157 158 case MLX5_VPORT_ACCESS_METHOD_HCA: 159 case MLX5_VPORT_ACCESS_METHOD_NIC: 160 return mlx5_core_query_vendor_id(dev->mdev, vendor_id); 161 162 default: 163 return -EINVAL; 164 } 165 } 166 167 static int mlx5_query_node_guid(struct mlx5_ib_dev *dev, 168 __be64 *node_guid) 169 { 170 u64 tmp; 171 int err; 172 173 switch (mlx5_get_vport_access_method(&dev->ib_dev)) { 174 case MLX5_VPORT_ACCESS_METHOD_MAD: 175 return mlx5_query_mad_ifc_node_guid(dev, node_guid); 176 177 case MLX5_VPORT_ACCESS_METHOD_HCA: 178 err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp); 179 if (!err) 180 *node_guid = cpu_to_be64(tmp); 181 return err; 182 183 default: 184 return -EINVAL; 185 } 186 } 187 188 struct mlx5_reg_node_desc { 189 u8 desc[64]; 190 }; 191 192 static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc) 193 { 194 struct mlx5_reg_node_desc in; 195 196 if (mlx5_use_mad_ifc(dev)) 197 return mlx5_query_mad_ifc_node_desc(dev, node_desc); 198 199 memset(&in, 0, sizeof(in)); 200 201 return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc, 202 sizeof(struct mlx5_reg_node_desc), 203 MLX5_REG_NODE_DESC, 0, 0); 204 } 205 206 static int mlx5_ib_query_device(struct ib_device *ibdev, 207 struct ib_device_attr *props, 208 struct ib_udata *uhw) 209 { 210 struct mlx5_ib_dev *dev = to_mdev(ibdev); 211 struct mlx5_core_dev *mdev = dev->mdev; 212 int err = -ENOMEM; 213 int max_rq_sg; 214 int max_sq_sg; 215 u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz); 216 217 if (uhw->inlen || uhw->outlen) 218 return -EINVAL; 219 220 memset(props, 0, sizeof(*props)); 221 err = mlx5_query_system_image_guid(ibdev, 222 &props->sys_image_guid); 223 if (err) 224 return err; 225 226 err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys); 227 if (err) 228 return err; 229 230 err = mlx5_query_vendor_id(ibdev, &props->vendor_id); 231 if (err) 232 return err; 233 234 props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) | 235 (fw_rev_min(dev->mdev) << 16) | 236 fw_rev_sub(dev->mdev); 237 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | 238 IB_DEVICE_PORT_ACTIVE_EVENT | 239 IB_DEVICE_SYS_IMAGE_GUID | 240 IB_DEVICE_RC_RNR_NAK_GEN; 241 242 if (MLX5_CAP_GEN(mdev, pkv)) 243 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; 244 if (MLX5_CAP_GEN(mdev, qkv)) 245 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; 246 if (MLX5_CAP_GEN(mdev, apm)) 247 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; 248 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; 249 if (MLX5_CAP_GEN(mdev, xrc)) 250 props->device_cap_flags |= IB_DEVICE_XRC; 251 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; 252 if (MLX5_CAP_GEN(mdev, sho)) { 253 props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER; 254 /* At this stage no support for signature handover */ 255 props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 | 256 IB_PROT_T10DIF_TYPE_2 | 257 IB_PROT_T10DIF_TYPE_3; 258 props->sig_guard_cap = IB_GUARD_T10DIF_CRC | 259 IB_GUARD_T10DIF_CSUM; 260 } 261 if (MLX5_CAP_GEN(mdev, block_lb_mc)) 262 props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; 263 264 props->vendor_part_id = mdev->pdev->device; 265 props->hw_ver = mdev->pdev->revision; 266 267 props->max_mr_size = ~0ull; 268 props->page_size_cap = ~(min_page_size - 1); 269 props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp); 270 props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); 271 max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) / 272 sizeof(struct mlx5_wqe_data_seg); 273 max_sq_sg = (MLX5_CAP_GEN(mdev, max_wqe_sz_sq) - 274 sizeof(struct mlx5_wqe_ctrl_seg)) / 275 sizeof(struct mlx5_wqe_data_seg); 276 props->max_sge = min(max_rq_sg, max_sq_sg); 277 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq); 278 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_eq_sz)) - 1; 279 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); 280 props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd); 281 props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp); 282 props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp); 283 props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq); 284 props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1; 285 props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay); 286 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; 287 props->max_srq_sge = max_rq_sg - 1; 288 props->max_fast_reg_page_list_len = (unsigned int)-1; 289 props->atomic_cap = IB_ATOMIC_NONE; 290 props->masked_atomic_cap = IB_ATOMIC_NONE; 291 props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg); 292 props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg); 293 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 294 props->max_mcast_grp; 295 props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */ 296 297 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 298 if (MLX5_CAP_GEN(mdev, pg)) 299 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING; 300 props->odp_caps = dev->odp_caps; 301 #endif 302 303 return 0; 304 } 305 306 enum mlx5_ib_width { 307 MLX5_IB_WIDTH_1X = 1 << 0, 308 MLX5_IB_WIDTH_2X = 1 << 1, 309 MLX5_IB_WIDTH_4X = 1 << 2, 310 MLX5_IB_WIDTH_8X = 1 << 3, 311 MLX5_IB_WIDTH_12X = 1 << 4 312 }; 313 314 static int translate_active_width(struct ib_device *ibdev, u8 active_width, 315 u8 *ib_width) 316 { 317 struct mlx5_ib_dev *dev = to_mdev(ibdev); 318 int err = 0; 319 320 if (active_width & MLX5_IB_WIDTH_1X) { 321 *ib_width = IB_WIDTH_1X; 322 } else if (active_width & MLX5_IB_WIDTH_2X) { 323 mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n", 324 (int)active_width); 325 err = -EINVAL; 326 } else if (active_width & MLX5_IB_WIDTH_4X) { 327 *ib_width = IB_WIDTH_4X; 328 } else if (active_width & MLX5_IB_WIDTH_8X) { 329 *ib_width = IB_WIDTH_8X; 330 } else if (active_width & MLX5_IB_WIDTH_12X) { 331 *ib_width = IB_WIDTH_12X; 332 } else { 333 mlx5_ib_dbg(dev, "Invalid active_width %d\n", 334 (int)active_width); 335 err = -EINVAL; 336 } 337 338 return err; 339 } 340 341 static int mlx5_mtu_to_ib_mtu(int mtu) 342 { 343 switch (mtu) { 344 case 256: return 1; 345 case 512: return 2; 346 case 1024: return 3; 347 case 2048: return 4; 348 case 4096: return 5; 349 default: 350 pr_warn("invalid mtu\n"); 351 return -1; 352 } 353 } 354 355 enum ib_max_vl_num { 356 __IB_MAX_VL_0 = 1, 357 __IB_MAX_VL_0_1 = 2, 358 __IB_MAX_VL_0_3 = 3, 359 __IB_MAX_VL_0_7 = 4, 360 __IB_MAX_VL_0_14 = 5, 361 }; 362 363 enum mlx5_vl_hw_cap { 364 MLX5_VL_HW_0 = 1, 365 MLX5_VL_HW_0_1 = 2, 366 MLX5_VL_HW_0_2 = 3, 367 MLX5_VL_HW_0_3 = 4, 368 MLX5_VL_HW_0_4 = 5, 369 MLX5_VL_HW_0_5 = 6, 370 MLX5_VL_HW_0_6 = 7, 371 MLX5_VL_HW_0_7 = 8, 372 MLX5_VL_HW_0_14 = 15 373 }; 374 375 static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap, 376 u8 *max_vl_num) 377 { 378 switch (vl_hw_cap) { 379 case MLX5_VL_HW_0: 380 *max_vl_num = __IB_MAX_VL_0; 381 break; 382 case MLX5_VL_HW_0_1: 383 *max_vl_num = __IB_MAX_VL_0_1; 384 break; 385 case MLX5_VL_HW_0_3: 386 *max_vl_num = __IB_MAX_VL_0_3; 387 break; 388 case MLX5_VL_HW_0_7: 389 *max_vl_num = __IB_MAX_VL_0_7; 390 break; 391 case MLX5_VL_HW_0_14: 392 *max_vl_num = __IB_MAX_VL_0_14; 393 break; 394 395 default: 396 return -EINVAL; 397 } 398 399 return 0; 400 } 401 402 static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port, 403 struct ib_port_attr *props) 404 { 405 struct mlx5_ib_dev *dev = to_mdev(ibdev); 406 struct mlx5_core_dev *mdev = dev->mdev; 407 struct mlx5_hca_vport_context *rep; 408 int max_mtu; 409 int oper_mtu; 410 int err; 411 u8 ib_link_width_oper; 412 u8 vl_hw_cap; 413 414 rep = kzalloc(sizeof(*rep), GFP_KERNEL); 415 if (!rep) { 416 err = -ENOMEM; 417 goto out; 418 } 419 420 memset(props, 0, sizeof(*props)); 421 422 err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep); 423 if (err) 424 goto out; 425 426 props->lid = rep->lid; 427 props->lmc = rep->lmc; 428 props->sm_lid = rep->sm_lid; 429 props->sm_sl = rep->sm_sl; 430 props->state = rep->vport_state; 431 props->phys_state = rep->port_physical_state; 432 props->port_cap_flags = rep->cap_mask1; 433 props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size)); 434 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg); 435 props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size)); 436 props->bad_pkey_cntr = rep->pkey_violation_counter; 437 props->qkey_viol_cntr = rep->qkey_violation_counter; 438 props->subnet_timeout = rep->subnet_timeout; 439 props->init_type_reply = rep->init_type_reply; 440 441 err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port); 442 if (err) 443 goto out; 444 445 err = translate_active_width(ibdev, ib_link_width_oper, 446 &props->active_width); 447 if (err) 448 goto out; 449 err = mlx5_query_port_proto_oper(mdev, &props->active_speed, MLX5_PTYS_IB, 450 port); 451 if (err) 452 goto out; 453 454 mlx5_query_port_max_mtu(mdev, &max_mtu, port); 455 456 props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu); 457 458 mlx5_query_port_oper_mtu(mdev, &oper_mtu, port); 459 460 props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu); 461 462 err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port); 463 if (err) 464 goto out; 465 466 err = translate_max_vl_num(ibdev, vl_hw_cap, 467 &props->max_vl_num); 468 out: 469 kfree(rep); 470 return err; 471 } 472 473 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, 474 struct ib_port_attr *props) 475 { 476 switch (mlx5_get_vport_access_method(ibdev)) { 477 case MLX5_VPORT_ACCESS_METHOD_MAD: 478 return mlx5_query_mad_ifc_port(ibdev, port, props); 479 480 case MLX5_VPORT_ACCESS_METHOD_HCA: 481 return mlx5_query_hca_port(ibdev, port, props); 482 483 default: 484 return -EINVAL; 485 } 486 } 487 488 static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index, 489 union ib_gid *gid) 490 { 491 struct mlx5_ib_dev *dev = to_mdev(ibdev); 492 struct mlx5_core_dev *mdev = dev->mdev; 493 494 switch (mlx5_get_vport_access_method(ibdev)) { 495 case MLX5_VPORT_ACCESS_METHOD_MAD: 496 return mlx5_query_mad_ifc_gids(ibdev, port, index, gid); 497 498 case MLX5_VPORT_ACCESS_METHOD_HCA: 499 return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid); 500 501 default: 502 return -EINVAL; 503 } 504 505 } 506 507 static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 508 u16 *pkey) 509 { 510 struct mlx5_ib_dev *dev = to_mdev(ibdev); 511 struct mlx5_core_dev *mdev = dev->mdev; 512 513 switch (mlx5_get_vport_access_method(ibdev)) { 514 case MLX5_VPORT_ACCESS_METHOD_MAD: 515 return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey); 516 517 case MLX5_VPORT_ACCESS_METHOD_HCA: 518 case MLX5_VPORT_ACCESS_METHOD_NIC: 519 return mlx5_query_hca_vport_pkey(mdev, 0, port, 0, index, 520 pkey); 521 default: 522 return -EINVAL; 523 } 524 } 525 526 static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask, 527 struct ib_device_modify *props) 528 { 529 struct mlx5_ib_dev *dev = to_mdev(ibdev); 530 struct mlx5_reg_node_desc in; 531 struct mlx5_reg_node_desc out; 532 int err; 533 534 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) 535 return -EOPNOTSUPP; 536 537 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC)) 538 return 0; 539 540 /* 541 * If possible, pass node desc to FW, so it can generate 542 * a 144 trap. If cmd fails, just ignore. 543 */ 544 memcpy(&in, props->node_desc, 64); 545 err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out, 546 sizeof(out), MLX5_REG_NODE_DESC, 0, 1); 547 if (err) 548 return err; 549 550 memcpy(ibdev->node_desc, props->node_desc, 64); 551 552 return err; 553 } 554 555 static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, 556 struct ib_port_modify *props) 557 { 558 struct mlx5_ib_dev *dev = to_mdev(ibdev); 559 struct ib_port_attr attr; 560 u32 tmp; 561 int err; 562 563 mutex_lock(&dev->cap_mask_mutex); 564 565 err = mlx5_ib_query_port(ibdev, port, &attr); 566 if (err) 567 goto out; 568 569 tmp = (attr.port_cap_flags | props->set_port_cap_mask) & 570 ~props->clr_port_cap_mask; 571 572 err = mlx5_set_port_caps(dev->mdev, port, tmp); 573 574 out: 575 mutex_unlock(&dev->cap_mask_mutex); 576 return err; 577 } 578 579 static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, 580 struct ib_udata *udata) 581 { 582 struct mlx5_ib_dev *dev = to_mdev(ibdev); 583 struct mlx5_ib_alloc_ucontext_req_v2 req; 584 struct mlx5_ib_alloc_ucontext_resp resp; 585 struct mlx5_ib_ucontext *context; 586 struct mlx5_uuar_info *uuari; 587 struct mlx5_uar *uars; 588 int gross_uuars; 589 int num_uars; 590 int ver; 591 int uuarn; 592 int err; 593 int i; 594 size_t reqlen; 595 596 if (!dev->ib_active) 597 return ERR_PTR(-EAGAIN); 598 599 memset(&req, 0, sizeof(req)); 600 reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr); 601 if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req)) 602 ver = 0; 603 else if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req_v2)) 604 ver = 2; 605 else 606 return ERR_PTR(-EINVAL); 607 608 err = ib_copy_from_udata(&req, udata, reqlen); 609 if (err) 610 return ERR_PTR(err); 611 612 if (req.flags || req.reserved) 613 return ERR_PTR(-EINVAL); 614 615 if (req.total_num_uuars > MLX5_MAX_UUARS) 616 return ERR_PTR(-ENOMEM); 617 618 if (req.total_num_uuars == 0) 619 return ERR_PTR(-EINVAL); 620 621 req.total_num_uuars = ALIGN(req.total_num_uuars, 622 MLX5_NON_FP_BF_REGS_PER_PAGE); 623 if (req.num_low_latency_uuars > req.total_num_uuars - 1) 624 return ERR_PTR(-EINVAL); 625 626 num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE; 627 gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE; 628 resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); 629 resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); 630 resp.cache_line_size = L1_CACHE_BYTES; 631 resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq); 632 resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq); 633 resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); 634 resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); 635 resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); 636 637 context = kzalloc(sizeof(*context), GFP_KERNEL); 638 if (!context) 639 return ERR_PTR(-ENOMEM); 640 641 uuari = &context->uuari; 642 mutex_init(&uuari->lock); 643 uars = kcalloc(num_uars, sizeof(*uars), GFP_KERNEL); 644 if (!uars) { 645 err = -ENOMEM; 646 goto out_ctx; 647 } 648 649 uuari->bitmap = kcalloc(BITS_TO_LONGS(gross_uuars), 650 sizeof(*uuari->bitmap), 651 GFP_KERNEL); 652 if (!uuari->bitmap) { 653 err = -ENOMEM; 654 goto out_uar_ctx; 655 } 656 /* 657 * clear all fast path uuars 658 */ 659 for (i = 0; i < gross_uuars; i++) { 660 uuarn = i & 3; 661 if (uuarn == 2 || uuarn == 3) 662 set_bit(i, uuari->bitmap); 663 } 664 665 uuari->count = kcalloc(gross_uuars, sizeof(*uuari->count), GFP_KERNEL); 666 if (!uuari->count) { 667 err = -ENOMEM; 668 goto out_bitmap; 669 } 670 671 for (i = 0; i < num_uars; i++) { 672 err = mlx5_cmd_alloc_uar(dev->mdev, &uars[i].index); 673 if (err) 674 goto out_count; 675 } 676 677 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 678 context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range; 679 #endif 680 681 INIT_LIST_HEAD(&context->db_page_list); 682 mutex_init(&context->db_page_mutex); 683 684 resp.tot_uuars = req.total_num_uuars; 685 resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports); 686 err = ib_copy_to_udata(udata, &resp, 687 sizeof(resp) - sizeof(resp.reserved)); 688 if (err) 689 goto out_uars; 690 691 uuari->ver = ver; 692 uuari->num_low_latency_uuars = req.num_low_latency_uuars; 693 uuari->uars = uars; 694 uuari->num_uars = num_uars; 695 return &context->ibucontext; 696 697 out_uars: 698 for (i--; i >= 0; i--) 699 mlx5_cmd_free_uar(dev->mdev, uars[i].index); 700 out_count: 701 kfree(uuari->count); 702 703 out_bitmap: 704 kfree(uuari->bitmap); 705 706 out_uar_ctx: 707 kfree(uars); 708 709 out_ctx: 710 kfree(context); 711 return ERR_PTR(err); 712 } 713 714 static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) 715 { 716 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); 717 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); 718 struct mlx5_uuar_info *uuari = &context->uuari; 719 int i; 720 721 for (i = 0; i < uuari->num_uars; i++) { 722 if (mlx5_cmd_free_uar(dev->mdev, uuari->uars[i].index)) 723 mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index); 724 } 725 726 kfree(uuari->count); 727 kfree(uuari->bitmap); 728 kfree(uuari->uars); 729 kfree(context); 730 731 return 0; 732 } 733 734 static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index) 735 { 736 return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + index; 737 } 738 739 static int get_command(unsigned long offset) 740 { 741 return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK; 742 } 743 744 static int get_arg(unsigned long offset) 745 { 746 return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1); 747 } 748 749 static int get_index(unsigned long offset) 750 { 751 return get_arg(offset); 752 } 753 754 static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) 755 { 756 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); 757 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); 758 struct mlx5_uuar_info *uuari = &context->uuari; 759 unsigned long command; 760 unsigned long idx; 761 phys_addr_t pfn; 762 763 command = get_command(vma->vm_pgoff); 764 switch (command) { 765 case MLX5_IB_MMAP_REGULAR_PAGE: 766 if (vma->vm_end - vma->vm_start != PAGE_SIZE) 767 return -EINVAL; 768 769 idx = get_index(vma->vm_pgoff); 770 if (idx >= uuari->num_uars) 771 return -EINVAL; 772 773 pfn = uar_index2pfn(dev, uuari->uars[idx].index); 774 mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn 0x%llx\n", idx, 775 (unsigned long long)pfn); 776 777 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 778 if (io_remap_pfn_range(vma, vma->vm_start, pfn, 779 PAGE_SIZE, vma->vm_page_prot)) 780 return -EAGAIN; 781 782 mlx5_ib_dbg(dev, "mapped WC at 0x%lx, PA 0x%llx\n", 783 vma->vm_start, 784 (unsigned long long)pfn << PAGE_SHIFT); 785 break; 786 787 case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES: 788 return -ENOSYS; 789 790 default: 791 return -EINVAL; 792 } 793 794 return 0; 795 } 796 797 static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn) 798 { 799 struct mlx5_create_mkey_mbox_in *in; 800 struct mlx5_mkey_seg *seg; 801 struct mlx5_core_mr mr; 802 int err; 803 804 in = kzalloc(sizeof(*in), GFP_KERNEL); 805 if (!in) 806 return -ENOMEM; 807 808 seg = &in->seg; 809 seg->flags = MLX5_PERM_LOCAL_READ | MLX5_ACCESS_MODE_PA; 810 seg->flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64); 811 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); 812 seg->start_addr = 0; 813 814 err = mlx5_core_create_mkey(dev->mdev, &mr, in, sizeof(*in), 815 NULL, NULL, NULL); 816 if (err) { 817 mlx5_ib_warn(dev, "failed to create mkey, %d\n", err); 818 goto err_in; 819 } 820 821 kfree(in); 822 *key = mr.key; 823 824 return 0; 825 826 err_in: 827 kfree(in); 828 829 return err; 830 } 831 832 static void free_pa_mkey(struct mlx5_ib_dev *dev, u32 key) 833 { 834 struct mlx5_core_mr mr; 835 int err; 836 837 memset(&mr, 0, sizeof(mr)); 838 mr.key = key; 839 err = mlx5_core_destroy_mkey(dev->mdev, &mr); 840 if (err) 841 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key); 842 } 843 844 static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, 845 struct ib_ucontext *context, 846 struct ib_udata *udata) 847 { 848 struct mlx5_ib_alloc_pd_resp resp; 849 struct mlx5_ib_pd *pd; 850 int err; 851 852 pd = kmalloc(sizeof(*pd), GFP_KERNEL); 853 if (!pd) 854 return ERR_PTR(-ENOMEM); 855 856 err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn); 857 if (err) { 858 kfree(pd); 859 return ERR_PTR(err); 860 } 861 862 if (context) { 863 resp.pdn = pd->pdn; 864 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { 865 mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn); 866 kfree(pd); 867 return ERR_PTR(-EFAULT); 868 } 869 } else { 870 err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn); 871 if (err) { 872 mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn); 873 kfree(pd); 874 return ERR_PTR(err); 875 } 876 } 877 878 return &pd->ibpd; 879 } 880 881 static int mlx5_ib_dealloc_pd(struct ib_pd *pd) 882 { 883 struct mlx5_ib_dev *mdev = to_mdev(pd->device); 884 struct mlx5_ib_pd *mpd = to_mpd(pd); 885 886 if (!pd->uobject) 887 free_pa_mkey(mdev, mpd->pa_lkey); 888 889 mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn); 890 kfree(mpd); 891 892 return 0; 893 } 894 895 static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 896 { 897 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 898 int err; 899 900 err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num); 901 if (err) 902 mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n", 903 ibqp->qp_num, gid->raw); 904 905 return err; 906 } 907 908 static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 909 { 910 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 911 int err; 912 913 err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num); 914 if (err) 915 mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n", 916 ibqp->qp_num, gid->raw); 917 918 return err; 919 } 920 921 static int init_node_data(struct mlx5_ib_dev *dev) 922 { 923 int err; 924 925 err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc); 926 if (err) 927 return err; 928 929 dev->mdev->rev_id = dev->mdev->pdev->revision; 930 931 return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid); 932 } 933 934 static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr, 935 char *buf) 936 { 937 struct mlx5_ib_dev *dev = 938 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 939 940 return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages); 941 } 942 943 static ssize_t show_reg_pages(struct device *device, 944 struct device_attribute *attr, char *buf) 945 { 946 struct mlx5_ib_dev *dev = 947 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 948 949 return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages)); 950 } 951 952 static ssize_t show_hca(struct device *device, struct device_attribute *attr, 953 char *buf) 954 { 955 struct mlx5_ib_dev *dev = 956 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 957 return sprintf(buf, "MT%d\n", dev->mdev->pdev->device); 958 } 959 960 static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, 961 char *buf) 962 { 963 struct mlx5_ib_dev *dev = 964 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 965 return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(dev->mdev), 966 fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev)); 967 } 968 969 static ssize_t show_rev(struct device *device, struct device_attribute *attr, 970 char *buf) 971 { 972 struct mlx5_ib_dev *dev = 973 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 974 return sprintf(buf, "%x\n", dev->mdev->rev_id); 975 } 976 977 static ssize_t show_board(struct device *device, struct device_attribute *attr, 978 char *buf) 979 { 980 struct mlx5_ib_dev *dev = 981 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 982 return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN, 983 dev->mdev->board_id); 984 } 985 986 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); 987 static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); 988 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); 989 static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); 990 static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL); 991 static DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL); 992 993 static struct device_attribute *mlx5_class_attributes[] = { 994 &dev_attr_hw_rev, 995 &dev_attr_fw_ver, 996 &dev_attr_hca_type, 997 &dev_attr_board_id, 998 &dev_attr_fw_pages, 999 &dev_attr_reg_pages, 1000 }; 1001 1002 static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, 1003 enum mlx5_dev_event event, unsigned long param) 1004 { 1005 struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context; 1006 struct ib_event ibev; 1007 1008 u8 port = 0; 1009 1010 switch (event) { 1011 case MLX5_DEV_EVENT_SYS_ERROR: 1012 ibdev->ib_active = false; 1013 ibev.event = IB_EVENT_DEVICE_FATAL; 1014 break; 1015 1016 case MLX5_DEV_EVENT_PORT_UP: 1017 ibev.event = IB_EVENT_PORT_ACTIVE; 1018 port = (u8)param; 1019 break; 1020 1021 case MLX5_DEV_EVENT_PORT_DOWN: 1022 ibev.event = IB_EVENT_PORT_ERR; 1023 port = (u8)param; 1024 break; 1025 1026 case MLX5_DEV_EVENT_PORT_INITIALIZED: 1027 /* not used by ULPs */ 1028 return; 1029 1030 case MLX5_DEV_EVENT_LID_CHANGE: 1031 ibev.event = IB_EVENT_LID_CHANGE; 1032 port = (u8)param; 1033 break; 1034 1035 case MLX5_DEV_EVENT_PKEY_CHANGE: 1036 ibev.event = IB_EVENT_PKEY_CHANGE; 1037 port = (u8)param; 1038 break; 1039 1040 case MLX5_DEV_EVENT_GUID_CHANGE: 1041 ibev.event = IB_EVENT_GID_CHANGE; 1042 port = (u8)param; 1043 break; 1044 1045 case MLX5_DEV_EVENT_CLIENT_REREG: 1046 ibev.event = IB_EVENT_CLIENT_REREGISTER; 1047 port = (u8)param; 1048 break; 1049 } 1050 1051 ibev.device = &ibdev->ib_dev; 1052 ibev.element.port_num = port; 1053 1054 if (port < 1 || port > ibdev->num_ports) { 1055 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port); 1056 return; 1057 } 1058 1059 if (ibdev->ib_active) 1060 ib_dispatch_event(&ibev); 1061 } 1062 1063 static void get_ext_port_caps(struct mlx5_ib_dev *dev) 1064 { 1065 int port; 1066 1067 for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) 1068 mlx5_query_ext_port_caps(dev, port); 1069 } 1070 1071 static int get_port_caps(struct mlx5_ib_dev *dev) 1072 { 1073 struct ib_device_attr *dprops = NULL; 1074 struct ib_port_attr *pprops = NULL; 1075 int err = -ENOMEM; 1076 int port; 1077 struct ib_udata uhw = {.inlen = 0, .outlen = 0}; 1078 1079 pprops = kmalloc(sizeof(*pprops), GFP_KERNEL); 1080 if (!pprops) 1081 goto out; 1082 1083 dprops = kmalloc(sizeof(*dprops), GFP_KERNEL); 1084 if (!dprops) 1085 goto out; 1086 1087 err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw); 1088 if (err) { 1089 mlx5_ib_warn(dev, "query_device failed %d\n", err); 1090 goto out; 1091 } 1092 1093 for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) { 1094 err = mlx5_ib_query_port(&dev->ib_dev, port, pprops); 1095 if (err) { 1096 mlx5_ib_warn(dev, "query_port %d failed %d\n", 1097 port, err); 1098 break; 1099 } 1100 dev->mdev->port_caps[port - 1].pkey_table_len = 1101 dprops->max_pkeys; 1102 dev->mdev->port_caps[port - 1].gid_table_len = 1103 pprops->gid_tbl_len; 1104 mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n", 1105 dprops->max_pkeys, pprops->gid_tbl_len); 1106 } 1107 1108 out: 1109 kfree(pprops); 1110 kfree(dprops); 1111 1112 return err; 1113 } 1114 1115 static void destroy_umrc_res(struct mlx5_ib_dev *dev) 1116 { 1117 int err; 1118 1119 err = mlx5_mr_cache_cleanup(dev); 1120 if (err) 1121 mlx5_ib_warn(dev, "mr cache cleanup failed\n"); 1122 1123 mlx5_ib_destroy_qp(dev->umrc.qp); 1124 ib_destroy_cq(dev->umrc.cq); 1125 ib_dereg_mr(dev->umrc.mr); 1126 ib_dealloc_pd(dev->umrc.pd); 1127 } 1128 1129 enum { 1130 MAX_UMR_WR = 128, 1131 }; 1132 1133 static int create_umr_res(struct mlx5_ib_dev *dev) 1134 { 1135 struct ib_qp_init_attr *init_attr = NULL; 1136 struct ib_qp_attr *attr = NULL; 1137 struct ib_pd *pd; 1138 struct ib_cq *cq; 1139 struct ib_qp *qp; 1140 struct ib_mr *mr; 1141 struct ib_cq_init_attr cq_attr = {}; 1142 int ret; 1143 1144 attr = kzalloc(sizeof(*attr), GFP_KERNEL); 1145 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL); 1146 if (!attr || !init_attr) { 1147 ret = -ENOMEM; 1148 goto error_0; 1149 } 1150 1151 pd = ib_alloc_pd(&dev->ib_dev); 1152 if (IS_ERR(pd)) { 1153 mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n"); 1154 ret = PTR_ERR(pd); 1155 goto error_0; 1156 } 1157 1158 mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE); 1159 if (IS_ERR(mr)) { 1160 mlx5_ib_dbg(dev, "Couldn't create DMA MR for sync UMR QP\n"); 1161 ret = PTR_ERR(mr); 1162 goto error_1; 1163 } 1164 1165 cq_attr.cqe = 128; 1166 cq = ib_create_cq(&dev->ib_dev, mlx5_umr_cq_handler, NULL, NULL, 1167 &cq_attr); 1168 if (IS_ERR(cq)) { 1169 mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n"); 1170 ret = PTR_ERR(cq); 1171 goto error_2; 1172 } 1173 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 1174 1175 init_attr->send_cq = cq; 1176 init_attr->recv_cq = cq; 1177 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 1178 init_attr->cap.max_send_wr = MAX_UMR_WR; 1179 init_attr->cap.max_send_sge = 1; 1180 init_attr->qp_type = MLX5_IB_QPT_REG_UMR; 1181 init_attr->port_num = 1; 1182 qp = mlx5_ib_create_qp(pd, init_attr, NULL); 1183 if (IS_ERR(qp)) { 1184 mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n"); 1185 ret = PTR_ERR(qp); 1186 goto error_3; 1187 } 1188 qp->device = &dev->ib_dev; 1189 qp->real_qp = qp; 1190 qp->uobject = NULL; 1191 qp->qp_type = MLX5_IB_QPT_REG_UMR; 1192 1193 attr->qp_state = IB_QPS_INIT; 1194 attr->port_num = 1; 1195 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX | 1196 IB_QP_PORT, NULL); 1197 if (ret) { 1198 mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n"); 1199 goto error_4; 1200 } 1201 1202 memset(attr, 0, sizeof(*attr)); 1203 attr->qp_state = IB_QPS_RTR; 1204 attr->path_mtu = IB_MTU_256; 1205 1206 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); 1207 if (ret) { 1208 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n"); 1209 goto error_4; 1210 } 1211 1212 memset(attr, 0, sizeof(*attr)); 1213 attr->qp_state = IB_QPS_RTS; 1214 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); 1215 if (ret) { 1216 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n"); 1217 goto error_4; 1218 } 1219 1220 dev->umrc.qp = qp; 1221 dev->umrc.cq = cq; 1222 dev->umrc.mr = mr; 1223 dev->umrc.pd = pd; 1224 1225 sema_init(&dev->umrc.sem, MAX_UMR_WR); 1226 ret = mlx5_mr_cache_init(dev); 1227 if (ret) { 1228 mlx5_ib_warn(dev, "mr cache init failed %d\n", ret); 1229 goto error_4; 1230 } 1231 1232 kfree(attr); 1233 kfree(init_attr); 1234 1235 return 0; 1236 1237 error_4: 1238 mlx5_ib_destroy_qp(qp); 1239 1240 error_3: 1241 ib_destroy_cq(cq); 1242 1243 error_2: 1244 ib_dereg_mr(mr); 1245 1246 error_1: 1247 ib_dealloc_pd(pd); 1248 1249 error_0: 1250 kfree(attr); 1251 kfree(init_attr); 1252 return ret; 1253 } 1254 1255 static int create_dev_resources(struct mlx5_ib_resources *devr) 1256 { 1257 struct ib_srq_init_attr attr; 1258 struct mlx5_ib_dev *dev; 1259 struct ib_cq_init_attr cq_attr = {.cqe = 1}; 1260 u32 rsvd_lkey; 1261 int ret = 0; 1262 1263 dev = container_of(devr, struct mlx5_ib_dev, devr); 1264 1265 ret = mlx5_core_query_special_context(dev->mdev, &rsvd_lkey); 1266 if (ret) { 1267 pr_err("Failed to query special context %d\n", ret); 1268 return ret; 1269 } 1270 dev->ib_dev.local_dma_lkey = rsvd_lkey; 1271 1272 devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL); 1273 if (IS_ERR(devr->p0)) { 1274 ret = PTR_ERR(devr->p0); 1275 goto error0; 1276 } 1277 devr->p0->device = &dev->ib_dev; 1278 devr->p0->uobject = NULL; 1279 atomic_set(&devr->p0->usecnt, 0); 1280 1281 devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL); 1282 if (IS_ERR(devr->c0)) { 1283 ret = PTR_ERR(devr->c0); 1284 goto error1; 1285 } 1286 devr->c0->device = &dev->ib_dev; 1287 devr->c0->uobject = NULL; 1288 devr->c0->comp_handler = NULL; 1289 devr->c0->event_handler = NULL; 1290 devr->c0->cq_context = NULL; 1291 atomic_set(&devr->c0->usecnt, 0); 1292 1293 devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL); 1294 if (IS_ERR(devr->x0)) { 1295 ret = PTR_ERR(devr->x0); 1296 goto error2; 1297 } 1298 devr->x0->device = &dev->ib_dev; 1299 devr->x0->inode = NULL; 1300 atomic_set(&devr->x0->usecnt, 0); 1301 mutex_init(&devr->x0->tgt_qp_mutex); 1302 INIT_LIST_HEAD(&devr->x0->tgt_qp_list); 1303 1304 devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL); 1305 if (IS_ERR(devr->x1)) { 1306 ret = PTR_ERR(devr->x1); 1307 goto error3; 1308 } 1309 devr->x1->device = &dev->ib_dev; 1310 devr->x1->inode = NULL; 1311 atomic_set(&devr->x1->usecnt, 0); 1312 mutex_init(&devr->x1->tgt_qp_mutex); 1313 INIT_LIST_HEAD(&devr->x1->tgt_qp_list); 1314 1315 memset(&attr, 0, sizeof(attr)); 1316 attr.attr.max_sge = 1; 1317 attr.attr.max_wr = 1; 1318 attr.srq_type = IB_SRQT_XRC; 1319 attr.ext.xrc.cq = devr->c0; 1320 attr.ext.xrc.xrcd = devr->x0; 1321 1322 devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL); 1323 if (IS_ERR(devr->s0)) { 1324 ret = PTR_ERR(devr->s0); 1325 goto error4; 1326 } 1327 devr->s0->device = &dev->ib_dev; 1328 devr->s0->pd = devr->p0; 1329 devr->s0->uobject = NULL; 1330 devr->s0->event_handler = NULL; 1331 devr->s0->srq_context = NULL; 1332 devr->s0->srq_type = IB_SRQT_XRC; 1333 devr->s0->ext.xrc.xrcd = devr->x0; 1334 devr->s0->ext.xrc.cq = devr->c0; 1335 atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt); 1336 atomic_inc(&devr->s0->ext.xrc.cq->usecnt); 1337 atomic_inc(&devr->p0->usecnt); 1338 atomic_set(&devr->s0->usecnt, 0); 1339 1340 memset(&attr, 0, sizeof(attr)); 1341 attr.attr.max_sge = 1; 1342 attr.attr.max_wr = 1; 1343 attr.srq_type = IB_SRQT_BASIC; 1344 devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL); 1345 if (IS_ERR(devr->s1)) { 1346 ret = PTR_ERR(devr->s1); 1347 goto error5; 1348 } 1349 devr->s1->device = &dev->ib_dev; 1350 devr->s1->pd = devr->p0; 1351 devr->s1->uobject = NULL; 1352 devr->s1->event_handler = NULL; 1353 devr->s1->srq_context = NULL; 1354 devr->s1->srq_type = IB_SRQT_BASIC; 1355 devr->s1->ext.xrc.cq = devr->c0; 1356 atomic_inc(&devr->p0->usecnt); 1357 atomic_set(&devr->s0->usecnt, 0); 1358 1359 return 0; 1360 1361 error5: 1362 mlx5_ib_destroy_srq(devr->s0); 1363 error4: 1364 mlx5_ib_dealloc_xrcd(devr->x1); 1365 error3: 1366 mlx5_ib_dealloc_xrcd(devr->x0); 1367 error2: 1368 mlx5_ib_destroy_cq(devr->c0); 1369 error1: 1370 mlx5_ib_dealloc_pd(devr->p0); 1371 error0: 1372 return ret; 1373 } 1374 1375 static void destroy_dev_resources(struct mlx5_ib_resources *devr) 1376 { 1377 mlx5_ib_destroy_srq(devr->s1); 1378 mlx5_ib_destroy_srq(devr->s0); 1379 mlx5_ib_dealloc_xrcd(devr->x0); 1380 mlx5_ib_dealloc_xrcd(devr->x1); 1381 mlx5_ib_destroy_cq(devr->c0); 1382 mlx5_ib_dealloc_pd(devr->p0); 1383 } 1384 1385 static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num, 1386 struct ib_port_immutable *immutable) 1387 { 1388 struct ib_port_attr attr; 1389 int err; 1390 1391 err = mlx5_ib_query_port(ibdev, port_num, &attr); 1392 if (err) 1393 return err; 1394 1395 immutable->pkey_tbl_len = attr.pkey_tbl_len; 1396 immutable->gid_tbl_len = attr.gid_tbl_len; 1397 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB; 1398 immutable->max_mad_size = IB_MGMT_MAD_SIZE; 1399 1400 return 0; 1401 } 1402 1403 static void *mlx5_ib_add(struct mlx5_core_dev *mdev) 1404 { 1405 struct mlx5_ib_dev *dev; 1406 int err; 1407 int i; 1408 1409 /* don't create IB instance over Eth ports, no RoCE yet! */ 1410 if (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) 1411 return NULL; 1412 1413 printk_once(KERN_INFO "%s", mlx5_version); 1414 1415 dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev)); 1416 if (!dev) 1417 return NULL; 1418 1419 dev->mdev = mdev; 1420 1421 err = get_port_caps(dev); 1422 if (err) 1423 goto err_dealloc; 1424 1425 if (mlx5_use_mad_ifc(dev)) 1426 get_ext_port_caps(dev); 1427 1428 MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock); 1429 1430 strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX); 1431 dev->ib_dev.owner = THIS_MODULE; 1432 dev->ib_dev.node_type = RDMA_NODE_IB_CA; 1433 dev->num_ports = MLX5_CAP_GEN(mdev, num_ports); 1434 dev->ib_dev.phys_port_cnt = dev->num_ports; 1435 dev->ib_dev.num_comp_vectors = 1436 dev->mdev->priv.eq_table.num_comp_vectors; 1437 dev->ib_dev.dma_device = &mdev->pdev->dev; 1438 1439 dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION; 1440 dev->ib_dev.uverbs_cmd_mask = 1441 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 1442 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 1443 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 1444 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 1445 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 1446 (1ull << IB_USER_VERBS_CMD_REG_MR) | 1447 (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 1448 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 1449 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 1450 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | 1451 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 1452 (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 1453 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 1454 (1ull << IB_USER_VERBS_CMD_QUERY_QP) | 1455 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 1456 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | 1457 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | 1458 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | 1459 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | 1460 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | 1461 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | 1462 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | 1463 (1ull << IB_USER_VERBS_CMD_OPEN_QP); 1464 dev->ib_dev.uverbs_ex_cmd_mask = 1465 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE); 1466 1467 dev->ib_dev.query_device = mlx5_ib_query_device; 1468 dev->ib_dev.query_port = mlx5_ib_query_port; 1469 dev->ib_dev.query_gid = mlx5_ib_query_gid; 1470 dev->ib_dev.query_pkey = mlx5_ib_query_pkey; 1471 dev->ib_dev.modify_device = mlx5_ib_modify_device; 1472 dev->ib_dev.modify_port = mlx5_ib_modify_port; 1473 dev->ib_dev.alloc_ucontext = mlx5_ib_alloc_ucontext; 1474 dev->ib_dev.dealloc_ucontext = mlx5_ib_dealloc_ucontext; 1475 dev->ib_dev.mmap = mlx5_ib_mmap; 1476 dev->ib_dev.alloc_pd = mlx5_ib_alloc_pd; 1477 dev->ib_dev.dealloc_pd = mlx5_ib_dealloc_pd; 1478 dev->ib_dev.create_ah = mlx5_ib_create_ah; 1479 dev->ib_dev.query_ah = mlx5_ib_query_ah; 1480 dev->ib_dev.destroy_ah = mlx5_ib_destroy_ah; 1481 dev->ib_dev.create_srq = mlx5_ib_create_srq; 1482 dev->ib_dev.modify_srq = mlx5_ib_modify_srq; 1483 dev->ib_dev.query_srq = mlx5_ib_query_srq; 1484 dev->ib_dev.destroy_srq = mlx5_ib_destroy_srq; 1485 dev->ib_dev.post_srq_recv = mlx5_ib_post_srq_recv; 1486 dev->ib_dev.create_qp = mlx5_ib_create_qp; 1487 dev->ib_dev.modify_qp = mlx5_ib_modify_qp; 1488 dev->ib_dev.query_qp = mlx5_ib_query_qp; 1489 dev->ib_dev.destroy_qp = mlx5_ib_destroy_qp; 1490 dev->ib_dev.post_send = mlx5_ib_post_send; 1491 dev->ib_dev.post_recv = mlx5_ib_post_recv; 1492 dev->ib_dev.create_cq = mlx5_ib_create_cq; 1493 dev->ib_dev.modify_cq = mlx5_ib_modify_cq; 1494 dev->ib_dev.resize_cq = mlx5_ib_resize_cq; 1495 dev->ib_dev.destroy_cq = mlx5_ib_destroy_cq; 1496 dev->ib_dev.poll_cq = mlx5_ib_poll_cq; 1497 dev->ib_dev.req_notify_cq = mlx5_ib_arm_cq; 1498 dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr; 1499 dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr; 1500 dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr; 1501 dev->ib_dev.destroy_mr = mlx5_ib_destroy_mr; 1502 dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach; 1503 dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach; 1504 dev->ib_dev.process_mad = mlx5_ib_process_mad; 1505 dev->ib_dev.create_mr = mlx5_ib_create_mr; 1506 dev->ib_dev.alloc_fast_reg_mr = mlx5_ib_alloc_fast_reg_mr; 1507 dev->ib_dev.alloc_fast_reg_page_list = mlx5_ib_alloc_fast_reg_page_list; 1508 dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list; 1509 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; 1510 dev->ib_dev.get_port_immutable = mlx5_port_immutable; 1511 1512 mlx5_ib_internal_fill_odp_caps(dev); 1513 1514 if (MLX5_CAP_GEN(mdev, xrc)) { 1515 dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd; 1516 dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd; 1517 dev->ib_dev.uverbs_cmd_mask |= 1518 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) | 1519 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD); 1520 } 1521 1522 err = init_node_data(dev); 1523 if (err) 1524 goto err_dealloc; 1525 1526 mutex_init(&dev->cap_mask_mutex); 1527 1528 err = create_dev_resources(&dev->devr); 1529 if (err) 1530 goto err_dealloc; 1531 1532 err = mlx5_ib_odp_init_one(dev); 1533 if (err) 1534 goto err_rsrc; 1535 1536 err = ib_register_device(&dev->ib_dev, NULL); 1537 if (err) 1538 goto err_odp; 1539 1540 err = create_umr_res(dev); 1541 if (err) 1542 goto err_dev; 1543 1544 for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) { 1545 err = device_create_file(&dev->ib_dev.dev, 1546 mlx5_class_attributes[i]); 1547 if (err) 1548 goto err_umrc; 1549 } 1550 1551 dev->ib_active = true; 1552 1553 return dev; 1554 1555 err_umrc: 1556 destroy_umrc_res(dev); 1557 1558 err_dev: 1559 ib_unregister_device(&dev->ib_dev); 1560 1561 err_odp: 1562 mlx5_ib_odp_remove_one(dev); 1563 1564 err_rsrc: 1565 destroy_dev_resources(&dev->devr); 1566 1567 err_dealloc: 1568 ib_dealloc_device((struct ib_device *)dev); 1569 1570 return NULL; 1571 } 1572 1573 static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) 1574 { 1575 struct mlx5_ib_dev *dev = context; 1576 1577 ib_unregister_device(&dev->ib_dev); 1578 destroy_umrc_res(dev); 1579 mlx5_ib_odp_remove_one(dev); 1580 destroy_dev_resources(&dev->devr); 1581 ib_dealloc_device(&dev->ib_dev); 1582 } 1583 1584 static struct mlx5_interface mlx5_ib_interface = { 1585 .add = mlx5_ib_add, 1586 .remove = mlx5_ib_remove, 1587 .event = mlx5_ib_event, 1588 .protocol = MLX5_INTERFACE_PROTOCOL_IB, 1589 }; 1590 1591 static int __init mlx5_ib_init(void) 1592 { 1593 int err; 1594 1595 if (deprecated_prof_sel != 2) 1596 pr_warn("prof_sel is deprecated for mlx5_ib, set it for mlx5_core\n"); 1597 1598 err = mlx5_ib_odp_init(); 1599 if (err) 1600 return err; 1601 1602 err = mlx5_register_interface(&mlx5_ib_interface); 1603 if (err) 1604 goto clean_odp; 1605 1606 return err; 1607 1608 clean_odp: 1609 mlx5_ib_odp_cleanup(); 1610 return err; 1611 } 1612 1613 static void __exit mlx5_ib_cleanup(void) 1614 { 1615 mlx5_unregister_interface(&mlx5_ib_interface); 1616 mlx5_ib_odp_cleanup(); 1617 } 1618 1619 module_init(mlx5_ib_init); 1620 module_exit(mlx5_ib_cleanup); 1621