1 /* 2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/module.h> 35 #include <linux/init.h> 36 #include <linux/slab.h> 37 #include <linux/errno.h> 38 #include <linux/netdevice.h> 39 #include <linux/inetdevice.h> 40 #include <linux/rtnetlink.h> 41 #include <linux/if_vlan.h> 42 #include <net/ipv6.h> 43 #include <net/addrconf.h> 44 #include <net/devlink.h> 45 46 #include <rdma/ib_smi.h> 47 #include <rdma/ib_user_verbs.h> 48 #include <rdma/ib_addr.h> 49 #include <rdma/ib_cache.h> 50 51 #include <net/bonding.h> 52 53 #include <linux/mlx4/driver.h> 54 #include <linux/mlx4/cmd.h> 55 #include <linux/mlx4/qp.h> 56 57 #include "mlx4_ib.h" 58 #include "user.h" 59 60 #define DRV_NAME MLX4_IB_DRV_NAME 61 #define DRV_VERSION "2.2-1" 62 #define DRV_RELDATE "Feb 2014" 63 64 #define MLX4_IB_FLOW_MAX_PRIO 0xFFF 65 #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF 66 #define MLX4_IB_CARD_REV_A0 0xA0 67 68 MODULE_AUTHOR("Roland Dreier"); 69 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver"); 70 MODULE_LICENSE("Dual BSD/GPL"); 71 MODULE_VERSION(DRV_VERSION); 72 73 int mlx4_ib_sm_guid_assign = 0; 74 module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444); 75 MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)"); 76 77 static const char mlx4_ib_version[] = 78 DRV_NAME ": Mellanox ConnectX InfiniBand driver v" 79 DRV_VERSION " (" DRV_RELDATE ")\n"; 80 81 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init); 82 83 static struct workqueue_struct *wq; 84 85 static void init_query_mad(struct ib_smp *mad) 86 { 87 mad->base_version = 1; 88 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; 89 mad->class_version = 1; 90 mad->method = IB_MGMT_METHOD_GET; 91 } 92 93 static int check_flow_steering_support(struct mlx4_dev *dev) 94 { 95 int eth_num_ports = 0; 96 int ib_num_ports = 0; 97 98 int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED; 99 100 if (dmfs) { 101 int i; 102 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) 103 eth_num_ports++; 104 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) 105 ib_num_ports++; 106 dmfs &= (!ib_num_ports || 107 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) && 108 (!eth_num_ports || 109 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN)); 110 if (ib_num_ports && mlx4_is_mfunc(dev)) { 111 pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n"); 112 dmfs = 0; 113 } 114 } 115 return dmfs; 116 } 117 118 static int num_ib_ports(struct mlx4_dev *dev) 119 { 120 int ib_ports = 0; 121 int i; 122 123 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) 124 ib_ports++; 125 126 return ib_ports; 127 } 128 129 static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_num) 130 { 131 struct mlx4_ib_dev *ibdev = to_mdev(device); 132 struct net_device *dev; 133 134 rcu_read_lock(); 135 dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num); 136 137 if (dev) { 138 if (mlx4_is_bonded(ibdev->dev)) { 139 struct net_device *upper = NULL; 140 141 upper = netdev_master_upper_dev_get_rcu(dev); 142 if (upper) { 143 struct net_device *active; 144 145 active = bond_option_active_slave_get_rcu(netdev_priv(upper)); 146 if (active) 147 dev = active; 148 } 149 } 150 } 151 if (dev) 152 dev_hold(dev); 153 154 rcu_read_unlock(); 155 return dev; 156 } 157 158 static int mlx4_ib_update_gids_v1(struct gid_entry *gids, 159 struct mlx4_ib_dev *ibdev, 160 u8 port_num) 161 { 162 struct mlx4_cmd_mailbox *mailbox; 163 int err; 164 struct mlx4_dev *dev = ibdev->dev; 165 int i; 166 union ib_gid *gid_tbl; 167 168 mailbox = mlx4_alloc_cmd_mailbox(dev); 169 if (IS_ERR(mailbox)) 170 return -ENOMEM; 171 172 gid_tbl = mailbox->buf; 173 174 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) 175 memcpy(&gid_tbl[i], &gids[i].gid, sizeof(union ib_gid)); 176 177 err = mlx4_cmd(dev, mailbox->dma, 178 MLX4_SET_PORT_GID_TABLE << 8 | port_num, 179 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 180 MLX4_CMD_WRAPPED); 181 if (mlx4_is_bonded(dev)) 182 err += mlx4_cmd(dev, mailbox->dma, 183 MLX4_SET_PORT_GID_TABLE << 8 | 2, 184 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 185 MLX4_CMD_WRAPPED); 186 187 mlx4_free_cmd_mailbox(dev, mailbox); 188 return err; 189 } 190 191 static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids, 192 struct mlx4_ib_dev *ibdev, 193 u8 port_num) 194 { 195 struct mlx4_cmd_mailbox *mailbox; 196 int err; 197 struct mlx4_dev *dev = ibdev->dev; 198 int i; 199 struct { 200 union ib_gid gid; 201 __be32 rsrvd1[2]; 202 __be16 rsrvd2; 203 u8 type; 204 u8 version; 205 __be32 rsrvd3; 206 } *gid_tbl; 207 208 mailbox = mlx4_alloc_cmd_mailbox(dev); 209 if (IS_ERR(mailbox)) 210 return -ENOMEM; 211 212 gid_tbl = mailbox->buf; 213 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) { 214 memcpy(&gid_tbl[i].gid, &gids[i].gid, sizeof(union ib_gid)); 215 if (gids[i].gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) { 216 gid_tbl[i].version = 2; 217 if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid)) 218 gid_tbl[i].type = 1; 219 else 220 memset(&gid_tbl[i].gid, 0, 12); 221 } 222 } 223 224 err = mlx4_cmd(dev, mailbox->dma, 225 MLX4_SET_PORT_ROCE_ADDR << 8 | port_num, 226 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 227 MLX4_CMD_WRAPPED); 228 if (mlx4_is_bonded(dev)) 229 err += mlx4_cmd(dev, mailbox->dma, 230 MLX4_SET_PORT_ROCE_ADDR << 8 | 2, 231 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 232 MLX4_CMD_WRAPPED); 233 234 mlx4_free_cmd_mailbox(dev, mailbox); 235 return err; 236 } 237 238 static int mlx4_ib_update_gids(struct gid_entry *gids, 239 struct mlx4_ib_dev *ibdev, 240 u8 port_num) 241 { 242 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) 243 return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num); 244 245 return mlx4_ib_update_gids_v1(gids, ibdev, port_num); 246 } 247 248 static int mlx4_ib_add_gid(struct ib_device *device, 249 u8 port_num, 250 unsigned int index, 251 const union ib_gid *gid, 252 const struct ib_gid_attr *attr, 253 void **context) 254 { 255 struct mlx4_ib_dev *ibdev = to_mdev(device); 256 struct mlx4_ib_iboe *iboe = &ibdev->iboe; 257 struct mlx4_port_gid_table *port_gid_table; 258 int free = -1, found = -1; 259 int ret = 0; 260 int hw_update = 0; 261 int i; 262 struct gid_entry *gids = NULL; 263 264 if (!rdma_cap_roce_gid_table(device, port_num)) 265 return -EINVAL; 266 267 if (port_num > MLX4_MAX_PORTS) 268 return -EINVAL; 269 270 if (!context) 271 return -EINVAL; 272 273 port_gid_table = &iboe->gids[port_num - 1]; 274 spin_lock_bh(&iboe->lock); 275 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) { 276 if (!memcmp(&port_gid_table->gids[i].gid, gid, sizeof(*gid)) && 277 (port_gid_table->gids[i].gid_type == attr->gid_type)) { 278 found = i; 279 break; 280 } 281 if (free < 0 && !memcmp(&port_gid_table->gids[i].gid, &zgid, sizeof(*gid))) 282 free = i; /* HW has space */ 283 } 284 285 if (found < 0) { 286 if (free < 0) { 287 ret = -ENOSPC; 288 } else { 289 port_gid_table->gids[free].ctx = kmalloc(sizeof(*port_gid_table->gids[free].ctx), GFP_ATOMIC); 290 if (!port_gid_table->gids[free].ctx) { 291 ret = -ENOMEM; 292 } else { 293 *context = port_gid_table->gids[free].ctx; 294 memcpy(&port_gid_table->gids[free].gid, gid, sizeof(*gid)); 295 port_gid_table->gids[free].gid_type = attr->gid_type; 296 port_gid_table->gids[free].ctx->real_index = free; 297 port_gid_table->gids[free].ctx->refcount = 1; 298 hw_update = 1; 299 } 300 } 301 } else { 302 struct gid_cache_context *ctx = port_gid_table->gids[found].ctx; 303 *context = ctx; 304 ctx->refcount++; 305 } 306 if (!ret && hw_update) { 307 gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC); 308 if (!gids) { 309 ret = -ENOMEM; 310 } else { 311 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) { 312 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid)); 313 gids[i].gid_type = port_gid_table->gids[i].gid_type; 314 } 315 } 316 } 317 spin_unlock_bh(&iboe->lock); 318 319 if (!ret && hw_update) { 320 ret = mlx4_ib_update_gids(gids, ibdev, port_num); 321 kfree(gids); 322 } 323 324 return ret; 325 } 326 327 static int mlx4_ib_del_gid(struct ib_device *device, 328 u8 port_num, 329 unsigned int index, 330 void **context) 331 { 332 struct gid_cache_context *ctx = *context; 333 struct mlx4_ib_dev *ibdev = to_mdev(device); 334 struct mlx4_ib_iboe *iboe = &ibdev->iboe; 335 struct mlx4_port_gid_table *port_gid_table; 336 int ret = 0; 337 int hw_update = 0; 338 struct gid_entry *gids = NULL; 339 340 if (!rdma_cap_roce_gid_table(device, port_num)) 341 return -EINVAL; 342 343 if (port_num > MLX4_MAX_PORTS) 344 return -EINVAL; 345 346 port_gid_table = &iboe->gids[port_num - 1]; 347 spin_lock_bh(&iboe->lock); 348 if (ctx) { 349 ctx->refcount--; 350 if (!ctx->refcount) { 351 unsigned int real_index = ctx->real_index; 352 353 memcpy(&port_gid_table->gids[real_index].gid, &zgid, sizeof(zgid)); 354 kfree(port_gid_table->gids[real_index].ctx); 355 port_gid_table->gids[real_index].ctx = NULL; 356 hw_update = 1; 357 } 358 } 359 if (!ret && hw_update) { 360 int i; 361 362 gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC); 363 if (!gids) { 364 ret = -ENOMEM; 365 } else { 366 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) 367 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid)); 368 } 369 } 370 spin_unlock_bh(&iboe->lock); 371 372 if (!ret && hw_update) { 373 ret = mlx4_ib_update_gids(gids, ibdev, port_num); 374 kfree(gids); 375 } 376 return ret; 377 } 378 379 int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev, 380 u8 port_num, int index) 381 { 382 struct mlx4_ib_iboe *iboe = &ibdev->iboe; 383 struct gid_cache_context *ctx = NULL; 384 union ib_gid gid; 385 struct mlx4_port_gid_table *port_gid_table; 386 int real_index = -EINVAL; 387 int i; 388 int ret; 389 unsigned long flags; 390 struct ib_gid_attr attr; 391 392 if (port_num > MLX4_MAX_PORTS) 393 return -EINVAL; 394 395 if (mlx4_is_bonded(ibdev->dev)) 396 port_num = 1; 397 398 if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num)) 399 return index; 400 401 ret = ib_get_cached_gid(&ibdev->ib_dev, port_num, index, &gid, &attr); 402 if (ret) 403 return ret; 404 405 if (attr.ndev) 406 dev_put(attr.ndev); 407 408 if (!memcmp(&gid, &zgid, sizeof(gid))) 409 return -EINVAL; 410 411 spin_lock_irqsave(&iboe->lock, flags); 412 port_gid_table = &iboe->gids[port_num - 1]; 413 414 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) 415 if (!memcmp(&port_gid_table->gids[i].gid, &gid, sizeof(gid)) && 416 attr.gid_type == port_gid_table->gids[i].gid_type) { 417 ctx = port_gid_table->gids[i].ctx; 418 break; 419 } 420 if (ctx) 421 real_index = ctx->real_index; 422 spin_unlock_irqrestore(&iboe->lock, flags); 423 return real_index; 424 } 425 426 static int mlx4_ib_query_device(struct ib_device *ibdev, 427 struct ib_device_attr *props, 428 struct ib_udata *uhw) 429 { 430 struct mlx4_ib_dev *dev = to_mdev(ibdev); 431 struct ib_smp *in_mad = NULL; 432 struct ib_smp *out_mad = NULL; 433 int err = -ENOMEM; 434 int have_ib_ports; 435 struct mlx4_uverbs_ex_query_device cmd; 436 struct mlx4_uverbs_ex_query_device_resp resp = {.comp_mask = 0}; 437 struct mlx4_clock_params clock_params; 438 439 if (uhw->inlen) { 440 if (uhw->inlen < sizeof(cmd)) 441 return -EINVAL; 442 443 err = ib_copy_from_udata(&cmd, uhw, sizeof(cmd)); 444 if (err) 445 return err; 446 447 if (cmd.comp_mask) 448 return -EINVAL; 449 450 if (cmd.reserved) 451 return -EINVAL; 452 } 453 454 resp.response_length = offsetof(typeof(resp), response_length) + 455 sizeof(resp.response_length); 456 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 457 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 458 if (!in_mad || !out_mad) 459 goto out; 460 461 init_query_mad(in_mad); 462 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 463 464 err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS, 465 1, NULL, NULL, in_mad, out_mad); 466 if (err) 467 goto out; 468 469 memset(props, 0, sizeof *props); 470 471 have_ib_ports = num_ib_ports(dev->dev); 472 473 props->fw_ver = dev->dev->caps.fw_ver; 474 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | 475 IB_DEVICE_PORT_ACTIVE_EVENT | 476 IB_DEVICE_SYS_IMAGE_GUID | 477 IB_DEVICE_RC_RNR_NAK_GEN | 478 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; 479 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR) 480 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; 481 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR) 482 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; 483 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports) 484 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; 485 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT) 486 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; 487 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) 488 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; 489 if (dev->dev->caps.max_gso_sz && 490 (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) && 491 (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)) 492 props->device_cap_flags |= IB_DEVICE_UD_TSO; 493 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY) 494 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; 495 if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) && 496 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) && 497 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR)) 498 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; 499 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) 500 props->device_cap_flags |= IB_DEVICE_XRC; 501 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW) 502 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW; 503 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) { 504 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B) 505 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B; 506 else 507 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A; 508 } 509 if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) 510 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; 511 512 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM; 513 514 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & 515 0xffffff; 516 props->vendor_part_id = dev->dev->persist->pdev->device; 517 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32)); 518 memcpy(&props->sys_image_guid, out_mad->data + 4, 8); 519 520 props->max_mr_size = ~0ull; 521 props->page_size_cap = dev->dev->caps.page_size_cap; 522 props->max_qp = dev->dev->quotas.qp; 523 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE; 524 props->max_sge = min(dev->dev->caps.max_sq_sg, 525 dev->dev->caps.max_rq_sg); 526 props->max_sge_rd = MLX4_MAX_SGE_RD; 527 props->max_cq = dev->dev->quotas.cq; 528 props->max_cqe = dev->dev->caps.max_cqes; 529 props->max_mr = dev->dev->quotas.mpt; 530 props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds; 531 props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma; 532 props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma; 533 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; 534 props->max_srq = dev->dev->quotas.srq; 535 props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1; 536 props->max_srq_sge = dev->dev->caps.max_srq_sge; 537 props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES; 538 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay; 539 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ? 540 IB_ATOMIC_HCA : IB_ATOMIC_NONE; 541 props->masked_atomic_cap = props->atomic_cap; 542 props->max_pkeys = dev->dev->caps.pkey_table_len[1]; 543 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms; 544 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm; 545 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 546 props->max_mcast_grp; 547 props->max_map_per_fmr = dev->dev->caps.max_fmr_maps; 548 props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL; 549 props->timestamp_mask = 0xFFFFFFFFFFFFULL; 550 551 if (!mlx4_is_slave(dev->dev)) 552 err = mlx4_get_internal_clock_params(dev->dev, &clock_params); 553 554 if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) { 555 resp.response_length += sizeof(resp.hca_core_clock_offset); 556 if (!err && !mlx4_is_slave(dev->dev)) { 557 resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP; 558 resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE; 559 } 560 } 561 562 if (uhw->outlen) { 563 err = ib_copy_to_udata(uhw, &resp, resp.response_length); 564 if (err) 565 goto out; 566 } 567 out: 568 kfree(in_mad); 569 kfree(out_mad); 570 571 return err; 572 } 573 574 static enum rdma_link_layer 575 mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num) 576 { 577 struct mlx4_dev *dev = to_mdev(device)->dev; 578 579 return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ? 580 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET; 581 } 582 583 static int ib_link_query_port(struct ib_device *ibdev, u8 port, 584 struct ib_port_attr *props, int netw_view) 585 { 586 struct ib_smp *in_mad = NULL; 587 struct ib_smp *out_mad = NULL; 588 int ext_active_speed; 589 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS; 590 int err = -ENOMEM; 591 592 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 593 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 594 if (!in_mad || !out_mad) 595 goto out; 596 597 init_query_mad(in_mad); 598 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 599 in_mad->attr_mod = cpu_to_be32(port); 600 601 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view) 602 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW; 603 604 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL, 605 in_mad, out_mad); 606 if (err) 607 goto out; 608 609 610 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); 611 props->lmc = out_mad->data[34] & 0x7; 612 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18)); 613 props->sm_sl = out_mad->data[36] & 0xf; 614 props->state = out_mad->data[32] & 0xf; 615 props->phys_state = out_mad->data[33] >> 4; 616 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20)); 617 if (netw_view) 618 props->gid_tbl_len = out_mad->data[50]; 619 else 620 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port]; 621 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz; 622 props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port]; 623 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46)); 624 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); 625 props->active_width = out_mad->data[31] & 0xf; 626 props->active_speed = out_mad->data[35] >> 4; 627 props->max_mtu = out_mad->data[41] & 0xf; 628 props->active_mtu = out_mad->data[36] >> 4; 629 props->subnet_timeout = out_mad->data[51] & 0x1f; 630 props->max_vl_num = out_mad->data[37] >> 4; 631 props->init_type_reply = out_mad->data[41] >> 4; 632 633 /* Check if extended speeds (EDR/FDR/...) are supported */ 634 if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) { 635 ext_active_speed = out_mad->data[62] >> 4; 636 637 switch (ext_active_speed) { 638 case 1: 639 props->active_speed = IB_SPEED_FDR; 640 break; 641 case 2: 642 props->active_speed = IB_SPEED_EDR; 643 break; 644 } 645 } 646 647 /* If reported active speed is QDR, check if is FDR-10 */ 648 if (props->active_speed == IB_SPEED_QDR) { 649 init_query_mad(in_mad); 650 in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO; 651 in_mad->attr_mod = cpu_to_be32(port); 652 653 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, 654 NULL, NULL, in_mad, out_mad); 655 if (err) 656 goto out; 657 658 /* Checking LinkSpeedActive for FDR-10 */ 659 if (out_mad->data[15] & 0x1) 660 props->active_speed = IB_SPEED_FDR10; 661 } 662 663 /* Avoid wrong speed value returned by FW if the IB link is down. */ 664 if (props->state == IB_PORT_DOWN) 665 props->active_speed = IB_SPEED_SDR; 666 667 out: 668 kfree(in_mad); 669 kfree(out_mad); 670 return err; 671 } 672 673 static u8 state_to_phys_state(enum ib_port_state state) 674 { 675 return state == IB_PORT_ACTIVE ? 5 : 3; 676 } 677 678 static int eth_link_query_port(struct ib_device *ibdev, u8 port, 679 struct ib_port_attr *props, int netw_view) 680 { 681 682 struct mlx4_ib_dev *mdev = to_mdev(ibdev); 683 struct mlx4_ib_iboe *iboe = &mdev->iboe; 684 struct net_device *ndev; 685 enum ib_mtu tmp; 686 struct mlx4_cmd_mailbox *mailbox; 687 int err = 0; 688 int is_bonded = mlx4_is_bonded(mdev->dev); 689 690 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); 691 if (IS_ERR(mailbox)) 692 return PTR_ERR(mailbox); 693 694 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0, 695 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, 696 MLX4_CMD_WRAPPED); 697 if (err) 698 goto out; 699 700 props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ? 701 IB_WIDTH_4X : IB_WIDTH_1X; 702 props->active_speed = IB_SPEED_QDR; 703 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS; 704 props->gid_tbl_len = mdev->dev->caps.gid_table_len[port]; 705 props->max_msg_sz = mdev->dev->caps.max_msg_sz; 706 props->pkey_tbl_len = 1; 707 props->max_mtu = IB_MTU_4096; 708 props->max_vl_num = 2; 709 props->state = IB_PORT_DOWN; 710 props->phys_state = state_to_phys_state(props->state); 711 props->active_mtu = IB_MTU_256; 712 spin_lock_bh(&iboe->lock); 713 ndev = iboe->netdevs[port - 1]; 714 if (ndev && is_bonded) { 715 rcu_read_lock(); /* required to get upper dev */ 716 ndev = netdev_master_upper_dev_get_rcu(ndev); 717 rcu_read_unlock(); 718 } 719 if (!ndev) 720 goto out_unlock; 721 722 tmp = iboe_get_mtu(ndev->mtu); 723 props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256; 724 725 props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ? 726 IB_PORT_ACTIVE : IB_PORT_DOWN; 727 props->phys_state = state_to_phys_state(props->state); 728 out_unlock: 729 spin_unlock_bh(&iboe->lock); 730 out: 731 mlx4_free_cmd_mailbox(mdev->dev, mailbox); 732 return err; 733 } 734 735 int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port, 736 struct ib_port_attr *props, int netw_view) 737 { 738 int err; 739 740 memset(props, 0, sizeof *props); 741 742 err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ? 743 ib_link_query_port(ibdev, port, props, netw_view) : 744 eth_link_query_port(ibdev, port, props, netw_view); 745 746 return err; 747 } 748 749 static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port, 750 struct ib_port_attr *props) 751 { 752 /* returns host view */ 753 return __mlx4_ib_query_port(ibdev, port, props, 0); 754 } 755 756 int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, 757 union ib_gid *gid, int netw_view) 758 { 759 struct ib_smp *in_mad = NULL; 760 struct ib_smp *out_mad = NULL; 761 int err = -ENOMEM; 762 struct mlx4_ib_dev *dev = to_mdev(ibdev); 763 int clear = 0; 764 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS; 765 766 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 767 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 768 if (!in_mad || !out_mad) 769 goto out; 770 771 init_query_mad(in_mad); 772 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 773 in_mad->attr_mod = cpu_to_be32(port); 774 775 if (mlx4_is_mfunc(dev->dev) && netw_view) 776 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW; 777 778 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad); 779 if (err) 780 goto out; 781 782 memcpy(gid->raw, out_mad->data + 8, 8); 783 784 if (mlx4_is_mfunc(dev->dev) && !netw_view) { 785 if (index) { 786 /* For any index > 0, return the null guid */ 787 err = 0; 788 clear = 1; 789 goto out; 790 } 791 } 792 793 init_query_mad(in_mad); 794 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; 795 in_mad->attr_mod = cpu_to_be32(index / 8); 796 797 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, 798 NULL, NULL, in_mad, out_mad); 799 if (err) 800 goto out; 801 802 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); 803 804 out: 805 if (clear) 806 memset(gid->raw + 8, 0, 8); 807 kfree(in_mad); 808 kfree(out_mad); 809 return err; 810 } 811 812 static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, 813 union ib_gid *gid) 814 { 815 int ret; 816 817 if (rdma_protocol_ib(ibdev, port)) 818 return __mlx4_ib_query_gid(ibdev, port, index, gid, 0); 819 820 if (!rdma_protocol_roce(ibdev, port)) 821 return -ENODEV; 822 823 if (!rdma_cap_roce_gid_table(ibdev, port)) 824 return -ENODEV; 825 826 ret = ib_get_cached_gid(ibdev, port, index, gid, NULL); 827 if (ret == -EAGAIN) { 828 memcpy(gid, &zgid, sizeof(*gid)); 829 return 0; 830 } 831 832 return ret; 833 } 834 835 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 836 u16 *pkey, int netw_view) 837 { 838 struct ib_smp *in_mad = NULL; 839 struct ib_smp *out_mad = NULL; 840 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS; 841 int err = -ENOMEM; 842 843 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 844 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 845 if (!in_mad || !out_mad) 846 goto out; 847 848 init_query_mad(in_mad); 849 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; 850 in_mad->attr_mod = cpu_to_be32(index / 32); 851 852 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view) 853 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW; 854 855 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL, 856 in_mad, out_mad); 857 if (err) 858 goto out; 859 860 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]); 861 862 out: 863 kfree(in_mad); 864 kfree(out_mad); 865 return err; 866 } 867 868 static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) 869 { 870 return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0); 871 } 872 873 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask, 874 struct ib_device_modify *props) 875 { 876 struct mlx4_cmd_mailbox *mailbox; 877 unsigned long flags; 878 879 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) 880 return -EOPNOTSUPP; 881 882 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC)) 883 return 0; 884 885 if (mlx4_is_slave(to_mdev(ibdev)->dev)) 886 return -EOPNOTSUPP; 887 888 spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags); 889 memcpy(ibdev->node_desc, props->node_desc, 64); 890 spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags); 891 892 /* 893 * If possible, pass node desc to FW, so it can generate 894 * a 144 trap. If cmd fails, just ignore. 895 */ 896 mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev); 897 if (IS_ERR(mailbox)) 898 return 0; 899 900 memcpy(mailbox->buf, props->node_desc, 64); 901 mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0, 902 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 903 904 mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox); 905 906 return 0; 907 } 908 909 static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols, 910 u32 cap_mask) 911 { 912 struct mlx4_cmd_mailbox *mailbox; 913 int err; 914 915 mailbox = mlx4_alloc_cmd_mailbox(dev->dev); 916 if (IS_ERR(mailbox)) 917 return PTR_ERR(mailbox); 918 919 if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 920 *(u8 *) mailbox->buf = !!reset_qkey_viols << 6; 921 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask); 922 } else { 923 ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols; 924 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask); 925 } 926 927 err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE, 928 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 929 MLX4_CMD_WRAPPED); 930 931 mlx4_free_cmd_mailbox(dev->dev, mailbox); 932 return err; 933 } 934 935 static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, 936 struct ib_port_modify *props) 937 { 938 struct mlx4_ib_dev *mdev = to_mdev(ibdev); 939 u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH; 940 struct ib_port_attr attr; 941 u32 cap_mask; 942 int err; 943 944 /* return OK if this is RoCE. CM calls ib_modify_port() regardless 945 * of whether port link layer is ETH or IB. For ETH ports, qkey 946 * violations and port capabilities are not meaningful. 947 */ 948 if (is_eth) 949 return 0; 950 951 mutex_lock(&mdev->cap_mask_mutex); 952 953 err = mlx4_ib_query_port(ibdev, port, &attr); 954 if (err) 955 goto out; 956 957 cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) & 958 ~props->clr_port_cap_mask; 959 960 err = mlx4_ib_SET_PORT(mdev, port, 961 !!(mask & IB_PORT_RESET_QKEY_CNTR), 962 cap_mask); 963 964 out: 965 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex); 966 return err; 967 } 968 969 static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev, 970 struct ib_udata *udata) 971 { 972 struct mlx4_ib_dev *dev = to_mdev(ibdev); 973 struct mlx4_ib_ucontext *context; 974 struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3; 975 struct mlx4_ib_alloc_ucontext_resp resp; 976 int err; 977 978 if (!dev->ib_active) 979 return ERR_PTR(-EAGAIN); 980 981 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) { 982 resp_v3.qp_tab_size = dev->dev->caps.num_qps; 983 resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size; 984 resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page; 985 } else { 986 resp.dev_caps = dev->dev->caps.userspace_caps; 987 resp.qp_tab_size = dev->dev->caps.num_qps; 988 resp.bf_reg_size = dev->dev->caps.bf_reg_size; 989 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page; 990 resp.cqe_size = dev->dev->caps.cqe_size; 991 } 992 993 context = kzalloc(sizeof(*context), GFP_KERNEL); 994 if (!context) 995 return ERR_PTR(-ENOMEM); 996 997 err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar); 998 if (err) { 999 kfree(context); 1000 return ERR_PTR(err); 1001 } 1002 1003 INIT_LIST_HEAD(&context->db_page_list); 1004 mutex_init(&context->db_page_mutex); 1005 1006 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) 1007 err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3)); 1008 else 1009 err = ib_copy_to_udata(udata, &resp, sizeof(resp)); 1010 1011 if (err) { 1012 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar); 1013 kfree(context); 1014 return ERR_PTR(-EFAULT); 1015 } 1016 1017 return &context->ibucontext; 1018 } 1019 1020 static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) 1021 { 1022 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext); 1023 1024 mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar); 1025 kfree(context); 1026 1027 return 0; 1028 } 1029 1030 static void mlx4_ib_vma_open(struct vm_area_struct *area) 1031 { 1032 /* vma_open is called when a new VMA is created on top of our VMA. 1033 * This is done through either mremap flow or split_vma (usually due 1034 * to mlock, madvise, munmap, etc.). We do not support a clone of the 1035 * vma, as this VMA is strongly hardware related. Therefore we set the 1036 * vm_ops of the newly created/cloned VMA to NULL, to prevent it from 1037 * calling us again and trying to do incorrect actions. We assume that 1038 * the original vma size is exactly a single page that there will be no 1039 * "splitting" operations on. 1040 */ 1041 area->vm_ops = NULL; 1042 } 1043 1044 static void mlx4_ib_vma_close(struct vm_area_struct *area) 1045 { 1046 struct mlx4_ib_vma_private_data *mlx4_ib_vma_priv_data; 1047 1048 /* It's guaranteed that all VMAs opened on a FD are closed before the 1049 * file itself is closed, therefore no sync is needed with the regular 1050 * closing flow. (e.g. mlx4_ib_dealloc_ucontext) However need a sync 1051 * with accessing the vma as part of mlx4_ib_disassociate_ucontext. 1052 * The close operation is usually called under mm->mmap_sem except when 1053 * process is exiting. The exiting case is handled explicitly as part 1054 * of mlx4_ib_disassociate_ucontext. 1055 */ 1056 mlx4_ib_vma_priv_data = (struct mlx4_ib_vma_private_data *) 1057 area->vm_private_data; 1058 1059 /* set the vma context pointer to null in the mlx4_ib driver's private 1060 * data to protect against a race condition in mlx4_ib_dissassociate_ucontext(). 1061 */ 1062 mlx4_ib_vma_priv_data->vma = NULL; 1063 } 1064 1065 static const struct vm_operations_struct mlx4_ib_vm_ops = { 1066 .open = mlx4_ib_vma_open, 1067 .close = mlx4_ib_vma_close 1068 }; 1069 1070 static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) 1071 { 1072 int i; 1073 int ret = 0; 1074 struct vm_area_struct *vma; 1075 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext); 1076 struct task_struct *owning_process = NULL; 1077 struct mm_struct *owning_mm = NULL; 1078 1079 owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID); 1080 if (!owning_process) 1081 return; 1082 1083 owning_mm = get_task_mm(owning_process); 1084 if (!owning_mm) { 1085 pr_info("no mm, disassociate ucontext is pending task termination\n"); 1086 while (1) { 1087 /* make sure that task is dead before returning, it may 1088 * prevent a rare case of module down in parallel to a 1089 * call to mlx4_ib_vma_close. 1090 */ 1091 put_task_struct(owning_process); 1092 msleep(1); 1093 owning_process = get_pid_task(ibcontext->tgid, 1094 PIDTYPE_PID); 1095 if (!owning_process || 1096 owning_process->state == TASK_DEAD) { 1097 pr_info("disassociate ucontext done, task was terminated\n"); 1098 /* in case task was dead need to release the task struct */ 1099 if (owning_process) 1100 put_task_struct(owning_process); 1101 return; 1102 } 1103 } 1104 } 1105 1106 /* need to protect from a race on closing the vma as part of 1107 * mlx4_ib_vma_close(). 1108 */ 1109 down_read(&owning_mm->mmap_sem); 1110 for (i = 0; i < HW_BAR_COUNT; i++) { 1111 vma = context->hw_bar_info[i].vma; 1112 if (!vma) 1113 continue; 1114 1115 ret = zap_vma_ptes(context->hw_bar_info[i].vma, 1116 context->hw_bar_info[i].vma->vm_start, 1117 PAGE_SIZE); 1118 if (ret) { 1119 pr_err("Error: zap_vma_ptes failed for index=%d, ret=%d\n", i, ret); 1120 BUG_ON(1); 1121 } 1122 1123 /* context going to be destroyed, should not access ops any more */ 1124 context->hw_bar_info[i].vma->vm_ops = NULL; 1125 } 1126 1127 up_read(&owning_mm->mmap_sem); 1128 mmput(owning_mm); 1129 put_task_struct(owning_process); 1130 } 1131 1132 static void mlx4_ib_set_vma_data(struct vm_area_struct *vma, 1133 struct mlx4_ib_vma_private_data *vma_private_data) 1134 { 1135 vma_private_data->vma = vma; 1136 vma->vm_private_data = vma_private_data; 1137 vma->vm_ops = &mlx4_ib_vm_ops; 1138 } 1139 1140 static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) 1141 { 1142 struct mlx4_ib_dev *dev = to_mdev(context->device); 1143 struct mlx4_ib_ucontext *mucontext = to_mucontext(context); 1144 1145 if (vma->vm_end - vma->vm_start != PAGE_SIZE) 1146 return -EINVAL; 1147 1148 if (vma->vm_pgoff == 0) { 1149 /* We prevent double mmaping on same context */ 1150 if (mucontext->hw_bar_info[HW_BAR_DB].vma) 1151 return -EINVAL; 1152 1153 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1154 1155 if (io_remap_pfn_range(vma, vma->vm_start, 1156 to_mucontext(context)->uar.pfn, 1157 PAGE_SIZE, vma->vm_page_prot)) 1158 return -EAGAIN; 1159 1160 mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_DB]); 1161 1162 } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) { 1163 /* We prevent double mmaping on same context */ 1164 if (mucontext->hw_bar_info[HW_BAR_BF].vma) 1165 return -EINVAL; 1166 1167 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 1168 1169 if (io_remap_pfn_range(vma, vma->vm_start, 1170 to_mucontext(context)->uar.pfn + 1171 dev->dev->caps.num_uars, 1172 PAGE_SIZE, vma->vm_page_prot)) 1173 return -EAGAIN; 1174 1175 mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_BF]); 1176 1177 } else if (vma->vm_pgoff == 3) { 1178 struct mlx4_clock_params params; 1179 int ret; 1180 1181 /* We prevent double mmaping on same context */ 1182 if (mucontext->hw_bar_info[HW_BAR_CLOCK].vma) 1183 return -EINVAL; 1184 1185 ret = mlx4_get_internal_clock_params(dev->dev, ¶ms); 1186 1187 if (ret) 1188 return ret; 1189 1190 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1191 if (io_remap_pfn_range(vma, vma->vm_start, 1192 (pci_resource_start(dev->dev->persist->pdev, 1193 params.bar) + 1194 params.offset) 1195 >> PAGE_SHIFT, 1196 PAGE_SIZE, vma->vm_page_prot)) 1197 return -EAGAIN; 1198 1199 mlx4_ib_set_vma_data(vma, 1200 &mucontext->hw_bar_info[HW_BAR_CLOCK]); 1201 } else { 1202 return -EINVAL; 1203 } 1204 1205 return 0; 1206 } 1207 1208 static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev, 1209 struct ib_ucontext *context, 1210 struct ib_udata *udata) 1211 { 1212 struct mlx4_ib_pd *pd; 1213 int err; 1214 1215 pd = kmalloc(sizeof *pd, GFP_KERNEL); 1216 if (!pd) 1217 return ERR_PTR(-ENOMEM); 1218 1219 err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn); 1220 if (err) { 1221 kfree(pd); 1222 return ERR_PTR(err); 1223 } 1224 1225 if (context) 1226 if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) { 1227 mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn); 1228 kfree(pd); 1229 return ERR_PTR(-EFAULT); 1230 } 1231 1232 return &pd->ibpd; 1233 } 1234 1235 static int mlx4_ib_dealloc_pd(struct ib_pd *pd) 1236 { 1237 mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn); 1238 kfree(pd); 1239 1240 return 0; 1241 } 1242 1243 static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev, 1244 struct ib_ucontext *context, 1245 struct ib_udata *udata) 1246 { 1247 struct mlx4_ib_xrcd *xrcd; 1248 struct ib_cq_init_attr cq_attr = {}; 1249 int err; 1250 1251 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) 1252 return ERR_PTR(-ENOSYS); 1253 1254 xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL); 1255 if (!xrcd) 1256 return ERR_PTR(-ENOMEM); 1257 1258 err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn); 1259 if (err) 1260 goto err1; 1261 1262 xrcd->pd = ib_alloc_pd(ibdev); 1263 if (IS_ERR(xrcd->pd)) { 1264 err = PTR_ERR(xrcd->pd); 1265 goto err2; 1266 } 1267 1268 cq_attr.cqe = 1; 1269 xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr); 1270 if (IS_ERR(xrcd->cq)) { 1271 err = PTR_ERR(xrcd->cq); 1272 goto err3; 1273 } 1274 1275 return &xrcd->ibxrcd; 1276 1277 err3: 1278 ib_dealloc_pd(xrcd->pd); 1279 err2: 1280 mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn); 1281 err1: 1282 kfree(xrcd); 1283 return ERR_PTR(err); 1284 } 1285 1286 static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd) 1287 { 1288 ib_destroy_cq(to_mxrcd(xrcd)->cq); 1289 ib_dealloc_pd(to_mxrcd(xrcd)->pd); 1290 mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn); 1291 kfree(xrcd); 1292 1293 return 0; 1294 } 1295 1296 static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid) 1297 { 1298 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 1299 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); 1300 struct mlx4_ib_gid_entry *ge; 1301 1302 ge = kzalloc(sizeof *ge, GFP_KERNEL); 1303 if (!ge) 1304 return -ENOMEM; 1305 1306 ge->gid = *gid; 1307 if (mlx4_ib_add_mc(mdev, mqp, gid)) { 1308 ge->port = mqp->port; 1309 ge->added = 1; 1310 } 1311 1312 mutex_lock(&mqp->mutex); 1313 list_add_tail(&ge->list, &mqp->gid_list); 1314 mutex_unlock(&mqp->mutex); 1315 1316 return 0; 1317 } 1318 1319 static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev, 1320 struct mlx4_ib_counters *ctr_table) 1321 { 1322 struct counter_index *counter, *tmp_count; 1323 1324 mutex_lock(&ctr_table->mutex); 1325 list_for_each_entry_safe(counter, tmp_count, &ctr_table->counters_list, 1326 list) { 1327 if (counter->allocated) 1328 mlx4_counter_free(ibdev->dev, counter->index); 1329 list_del(&counter->list); 1330 kfree(counter); 1331 } 1332 mutex_unlock(&ctr_table->mutex); 1333 } 1334 1335 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, 1336 union ib_gid *gid) 1337 { 1338 struct net_device *ndev; 1339 int ret = 0; 1340 1341 if (!mqp->port) 1342 return 0; 1343 1344 spin_lock_bh(&mdev->iboe.lock); 1345 ndev = mdev->iboe.netdevs[mqp->port - 1]; 1346 if (ndev) 1347 dev_hold(ndev); 1348 spin_unlock_bh(&mdev->iboe.lock); 1349 1350 if (ndev) { 1351 ret = 1; 1352 dev_put(ndev); 1353 } 1354 1355 return ret; 1356 } 1357 1358 struct mlx4_ib_steering { 1359 struct list_head list; 1360 struct mlx4_flow_reg_id reg_id; 1361 union ib_gid gid; 1362 }; 1363 1364 static int parse_flow_attr(struct mlx4_dev *dev, 1365 u32 qp_num, 1366 union ib_flow_spec *ib_spec, 1367 struct _rule_hw *mlx4_spec) 1368 { 1369 enum mlx4_net_trans_rule_id type; 1370 1371 switch (ib_spec->type) { 1372 case IB_FLOW_SPEC_ETH: 1373 type = MLX4_NET_TRANS_RULE_ID_ETH; 1374 memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac, 1375 ETH_ALEN); 1376 memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac, 1377 ETH_ALEN); 1378 mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag; 1379 mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag; 1380 break; 1381 case IB_FLOW_SPEC_IB: 1382 type = MLX4_NET_TRANS_RULE_ID_IB; 1383 mlx4_spec->ib.l3_qpn = 1384 cpu_to_be32(qp_num); 1385 mlx4_spec->ib.qpn_mask = 1386 cpu_to_be32(MLX4_IB_FLOW_QPN_MASK); 1387 break; 1388 1389 1390 case IB_FLOW_SPEC_IPV4: 1391 type = MLX4_NET_TRANS_RULE_ID_IPV4; 1392 mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip; 1393 mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip; 1394 mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip; 1395 mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip; 1396 break; 1397 1398 case IB_FLOW_SPEC_TCP: 1399 case IB_FLOW_SPEC_UDP: 1400 type = ib_spec->type == IB_FLOW_SPEC_TCP ? 1401 MLX4_NET_TRANS_RULE_ID_TCP : 1402 MLX4_NET_TRANS_RULE_ID_UDP; 1403 mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port; 1404 mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port; 1405 mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port; 1406 mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port; 1407 break; 1408 1409 default: 1410 return -EINVAL; 1411 } 1412 if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 || 1413 mlx4_hw_rule_sz(dev, type) < 0) 1414 return -EINVAL; 1415 mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type)); 1416 mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2; 1417 return mlx4_hw_rule_sz(dev, type); 1418 } 1419 1420 struct default_rules { 1421 __u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS]; 1422 __u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS]; 1423 __u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS]; 1424 __u8 link_layer; 1425 }; 1426 static const struct default_rules default_table[] = { 1427 { 1428 .mandatory_fields = {IB_FLOW_SPEC_IPV4}, 1429 .mandatory_not_fields = {IB_FLOW_SPEC_ETH}, 1430 .rules_create_list = {IB_FLOW_SPEC_IB}, 1431 .link_layer = IB_LINK_LAYER_INFINIBAND 1432 } 1433 }; 1434 1435 static int __mlx4_ib_default_rules_match(struct ib_qp *qp, 1436 struct ib_flow_attr *flow_attr) 1437 { 1438 int i, j, k; 1439 void *ib_flow; 1440 const struct default_rules *pdefault_rules = default_table; 1441 u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port); 1442 1443 for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) { 1444 __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS]; 1445 memset(&field_types, 0, sizeof(field_types)); 1446 1447 if (link_layer != pdefault_rules->link_layer) 1448 continue; 1449 1450 ib_flow = flow_attr + 1; 1451 /* we assume the specs are sorted */ 1452 for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS && 1453 j < flow_attr->num_of_specs; k++) { 1454 union ib_flow_spec *current_flow = 1455 (union ib_flow_spec *)ib_flow; 1456 1457 /* same layer but different type */ 1458 if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) == 1459 (pdefault_rules->mandatory_fields[k] & 1460 IB_FLOW_SPEC_LAYER_MASK)) && 1461 (current_flow->type != 1462 pdefault_rules->mandatory_fields[k])) 1463 goto out; 1464 1465 /* same layer, try match next one */ 1466 if (current_flow->type == 1467 pdefault_rules->mandatory_fields[k]) { 1468 j++; 1469 ib_flow += 1470 ((union ib_flow_spec *)ib_flow)->size; 1471 } 1472 } 1473 1474 ib_flow = flow_attr + 1; 1475 for (j = 0; j < flow_attr->num_of_specs; 1476 j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size) 1477 for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++) 1478 /* same layer and same type */ 1479 if (((union ib_flow_spec *)ib_flow)->type == 1480 pdefault_rules->mandatory_not_fields[k]) 1481 goto out; 1482 1483 return i; 1484 } 1485 out: 1486 return -1; 1487 } 1488 1489 static int __mlx4_ib_create_default_rules( 1490 struct mlx4_ib_dev *mdev, 1491 struct ib_qp *qp, 1492 const struct default_rules *pdefault_rules, 1493 struct _rule_hw *mlx4_spec) { 1494 int size = 0; 1495 int i; 1496 1497 for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) { 1498 int ret; 1499 union ib_flow_spec ib_spec; 1500 switch (pdefault_rules->rules_create_list[i]) { 1501 case 0: 1502 /* no rule */ 1503 continue; 1504 case IB_FLOW_SPEC_IB: 1505 ib_spec.type = IB_FLOW_SPEC_IB; 1506 ib_spec.size = sizeof(struct ib_flow_spec_ib); 1507 1508 break; 1509 default: 1510 /* invalid rule */ 1511 return -EINVAL; 1512 } 1513 /* We must put empty rule, qpn is being ignored */ 1514 ret = parse_flow_attr(mdev->dev, 0, &ib_spec, 1515 mlx4_spec); 1516 if (ret < 0) { 1517 pr_info("invalid parsing\n"); 1518 return -EINVAL; 1519 } 1520 1521 mlx4_spec = (void *)mlx4_spec + ret; 1522 size += ret; 1523 } 1524 return size; 1525 } 1526 1527 static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr, 1528 int domain, 1529 enum mlx4_net_trans_promisc_mode flow_type, 1530 u64 *reg_id) 1531 { 1532 int ret, i; 1533 int size = 0; 1534 void *ib_flow; 1535 struct mlx4_ib_dev *mdev = to_mdev(qp->device); 1536 struct mlx4_cmd_mailbox *mailbox; 1537 struct mlx4_net_trans_rule_hw_ctrl *ctrl; 1538 int default_flow; 1539 1540 static const u16 __mlx4_domain[] = { 1541 [IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS, 1542 [IB_FLOW_DOMAIN_ETHTOOL] = MLX4_DOMAIN_ETHTOOL, 1543 [IB_FLOW_DOMAIN_RFS] = MLX4_DOMAIN_RFS, 1544 [IB_FLOW_DOMAIN_NIC] = MLX4_DOMAIN_NIC, 1545 }; 1546 1547 if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) { 1548 pr_err("Invalid priority value %d\n", flow_attr->priority); 1549 return -EINVAL; 1550 } 1551 1552 if (domain >= IB_FLOW_DOMAIN_NUM) { 1553 pr_err("Invalid domain value %d\n", domain); 1554 return -EINVAL; 1555 } 1556 1557 if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0) 1558 return -EINVAL; 1559 1560 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); 1561 if (IS_ERR(mailbox)) 1562 return PTR_ERR(mailbox); 1563 ctrl = mailbox->buf; 1564 1565 ctrl->prio = cpu_to_be16(__mlx4_domain[domain] | 1566 flow_attr->priority); 1567 ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type); 1568 ctrl->port = flow_attr->port; 1569 ctrl->qpn = cpu_to_be32(qp->qp_num); 1570 1571 ib_flow = flow_attr + 1; 1572 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); 1573 /* Add default flows */ 1574 default_flow = __mlx4_ib_default_rules_match(qp, flow_attr); 1575 if (default_flow >= 0) { 1576 ret = __mlx4_ib_create_default_rules( 1577 mdev, qp, default_table + default_flow, 1578 mailbox->buf + size); 1579 if (ret < 0) { 1580 mlx4_free_cmd_mailbox(mdev->dev, mailbox); 1581 return -EINVAL; 1582 } 1583 size += ret; 1584 } 1585 for (i = 0; i < flow_attr->num_of_specs; i++) { 1586 ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow, 1587 mailbox->buf + size); 1588 if (ret < 0) { 1589 mlx4_free_cmd_mailbox(mdev->dev, mailbox); 1590 return -EINVAL; 1591 } 1592 ib_flow += ((union ib_flow_spec *) ib_flow)->size; 1593 size += ret; 1594 } 1595 1596 ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0, 1597 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, 1598 MLX4_CMD_WRAPPED); 1599 if (ret == -ENOMEM) 1600 pr_err("mcg table is full. Fail to register network rule.\n"); 1601 else if (ret == -ENXIO) 1602 pr_err("Device managed flow steering is disabled. Fail to register network rule.\n"); 1603 else if (ret) 1604 pr_err("Invalid argument. Fail to register network rule.\n"); 1605 1606 mlx4_free_cmd_mailbox(mdev->dev, mailbox); 1607 return ret; 1608 } 1609 1610 static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id) 1611 { 1612 int err; 1613 err = mlx4_cmd(dev, reg_id, 0, 0, 1614 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, 1615 MLX4_CMD_WRAPPED); 1616 if (err) 1617 pr_err("Fail to detach network rule. registration id = 0x%llx\n", 1618 reg_id); 1619 return err; 1620 } 1621 1622 static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr, 1623 u64 *reg_id) 1624 { 1625 void *ib_flow; 1626 union ib_flow_spec *ib_spec; 1627 struct mlx4_dev *dev = to_mdev(qp->device)->dev; 1628 int err = 0; 1629 1630 if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN || 1631 dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) 1632 return 0; /* do nothing */ 1633 1634 ib_flow = flow_attr + 1; 1635 ib_spec = (union ib_flow_spec *)ib_flow; 1636 1637 if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1) 1638 return 0; /* do nothing */ 1639 1640 err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac, 1641 flow_attr->port, qp->qp_num, 1642 MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff), 1643 reg_id); 1644 return err; 1645 } 1646 1647 static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev, 1648 struct ib_flow_attr *flow_attr, 1649 enum mlx4_net_trans_promisc_mode *type) 1650 { 1651 int err = 0; 1652 1653 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER) || 1654 (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) || 1655 (flow_attr->num_of_specs > 1) || (flow_attr->priority != 0)) { 1656 return -EOPNOTSUPP; 1657 } 1658 1659 if (flow_attr->num_of_specs == 0) { 1660 type[0] = MLX4_FS_MC_SNIFFER; 1661 type[1] = MLX4_FS_UC_SNIFFER; 1662 } else { 1663 union ib_flow_spec *ib_spec; 1664 1665 ib_spec = (union ib_flow_spec *)(flow_attr + 1); 1666 if (ib_spec->type != IB_FLOW_SPEC_ETH) 1667 return -EINVAL; 1668 1669 /* if all is zero than MC and UC */ 1670 if (is_zero_ether_addr(ib_spec->eth.mask.dst_mac)) { 1671 type[0] = MLX4_FS_MC_SNIFFER; 1672 type[1] = MLX4_FS_UC_SNIFFER; 1673 } else { 1674 u8 mac[ETH_ALEN] = {ib_spec->eth.mask.dst_mac[0] ^ 0x01, 1675 ib_spec->eth.mask.dst_mac[1], 1676 ib_spec->eth.mask.dst_mac[2], 1677 ib_spec->eth.mask.dst_mac[3], 1678 ib_spec->eth.mask.dst_mac[4], 1679 ib_spec->eth.mask.dst_mac[5]}; 1680 1681 /* Above xor was only on MC bit, non empty mask is valid 1682 * only if this bit is set and rest are zero. 1683 */ 1684 if (!is_zero_ether_addr(&mac[0])) 1685 return -EINVAL; 1686 1687 if (is_multicast_ether_addr(ib_spec->eth.val.dst_mac)) 1688 type[0] = MLX4_FS_MC_SNIFFER; 1689 else 1690 type[0] = MLX4_FS_UC_SNIFFER; 1691 } 1692 } 1693 1694 return err; 1695 } 1696 1697 static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, 1698 struct ib_flow_attr *flow_attr, 1699 int domain) 1700 { 1701 int err = 0, i = 0, j = 0; 1702 struct mlx4_ib_flow *mflow; 1703 enum mlx4_net_trans_promisc_mode type[2]; 1704 struct mlx4_dev *dev = (to_mdev(qp->device))->dev; 1705 int is_bonded = mlx4_is_bonded(dev); 1706 1707 if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt) 1708 return ERR_PTR(-EINVAL); 1709 1710 if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) && 1711 (flow_attr->type != IB_FLOW_ATTR_NORMAL)) 1712 return ERR_PTR(-EOPNOTSUPP); 1713 1714 memset(type, 0, sizeof(type)); 1715 1716 mflow = kzalloc(sizeof(*mflow), GFP_KERNEL); 1717 if (!mflow) { 1718 err = -ENOMEM; 1719 goto err_free; 1720 } 1721 1722 switch (flow_attr->type) { 1723 case IB_FLOW_ATTR_NORMAL: 1724 /* If dont trap flag (continue match) is set, under specific 1725 * condition traffic be replicated to given qp, 1726 * without stealing it 1727 */ 1728 if (unlikely(flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)) { 1729 err = mlx4_ib_add_dont_trap_rule(dev, 1730 flow_attr, 1731 type); 1732 if (err) 1733 goto err_free; 1734 } else { 1735 type[0] = MLX4_FS_REGULAR; 1736 } 1737 break; 1738 1739 case IB_FLOW_ATTR_ALL_DEFAULT: 1740 type[0] = MLX4_FS_ALL_DEFAULT; 1741 break; 1742 1743 case IB_FLOW_ATTR_MC_DEFAULT: 1744 type[0] = MLX4_FS_MC_DEFAULT; 1745 break; 1746 1747 case IB_FLOW_ATTR_SNIFFER: 1748 type[0] = MLX4_FS_MIRROR_RX_PORT; 1749 type[1] = MLX4_FS_MIRROR_SX_PORT; 1750 break; 1751 1752 default: 1753 err = -EINVAL; 1754 goto err_free; 1755 } 1756 1757 while (i < ARRAY_SIZE(type) && type[i]) { 1758 err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i], 1759 &mflow->reg_id[i].id); 1760 if (err) 1761 goto err_create_flow; 1762 if (is_bonded) { 1763 /* Application always sees one port so the mirror rule 1764 * must be on port #2 1765 */ 1766 flow_attr->port = 2; 1767 err = __mlx4_ib_create_flow(qp, flow_attr, 1768 domain, type[j], 1769 &mflow->reg_id[j].mirror); 1770 flow_attr->port = 1; 1771 if (err) 1772 goto err_create_flow; 1773 j++; 1774 } 1775 1776 i++; 1777 } 1778 1779 if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) { 1780 err = mlx4_ib_tunnel_steer_add(qp, flow_attr, 1781 &mflow->reg_id[i].id); 1782 if (err) 1783 goto err_create_flow; 1784 1785 if (is_bonded) { 1786 flow_attr->port = 2; 1787 err = mlx4_ib_tunnel_steer_add(qp, flow_attr, 1788 &mflow->reg_id[j].mirror); 1789 flow_attr->port = 1; 1790 if (err) 1791 goto err_create_flow; 1792 j++; 1793 } 1794 /* function to create mirror rule */ 1795 i++; 1796 } 1797 1798 return &mflow->ibflow; 1799 1800 err_create_flow: 1801 while (i) { 1802 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, 1803 mflow->reg_id[i].id); 1804 i--; 1805 } 1806 1807 while (j) { 1808 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, 1809 mflow->reg_id[j].mirror); 1810 j--; 1811 } 1812 err_free: 1813 kfree(mflow); 1814 return ERR_PTR(err); 1815 } 1816 1817 static int mlx4_ib_destroy_flow(struct ib_flow *flow_id) 1818 { 1819 int err, ret = 0; 1820 int i = 0; 1821 struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device); 1822 struct mlx4_ib_flow *mflow = to_mflow(flow_id); 1823 1824 while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) { 1825 err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id); 1826 if (err) 1827 ret = err; 1828 if (mflow->reg_id[i].mirror) { 1829 err = __mlx4_ib_destroy_flow(mdev->dev, 1830 mflow->reg_id[i].mirror); 1831 if (err) 1832 ret = err; 1833 } 1834 i++; 1835 } 1836 1837 kfree(mflow); 1838 return ret; 1839 } 1840 1841 static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 1842 { 1843 int err; 1844 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); 1845 struct mlx4_dev *dev = mdev->dev; 1846 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 1847 struct mlx4_ib_steering *ib_steering = NULL; 1848 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6; 1849 struct mlx4_flow_reg_id reg_id; 1850 1851 if (mdev->dev->caps.steering_mode == 1852 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1853 ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL); 1854 if (!ib_steering) 1855 return -ENOMEM; 1856 } 1857 1858 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port, 1859 !!(mqp->flags & 1860 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), 1861 prot, ®_id.id); 1862 if (err) { 1863 pr_err("multicast attach op failed, err %d\n", err); 1864 goto err_malloc; 1865 } 1866 1867 reg_id.mirror = 0; 1868 if (mlx4_is_bonded(dev)) { 1869 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, 1870 (mqp->port == 1) ? 2 : 1, 1871 !!(mqp->flags & 1872 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), 1873 prot, ®_id.mirror); 1874 if (err) 1875 goto err_add; 1876 } 1877 1878 err = add_gid_entry(ibqp, gid); 1879 if (err) 1880 goto err_add; 1881 1882 if (ib_steering) { 1883 memcpy(ib_steering->gid.raw, gid->raw, 16); 1884 ib_steering->reg_id = reg_id; 1885 mutex_lock(&mqp->mutex); 1886 list_add(&ib_steering->list, &mqp->steering_rules); 1887 mutex_unlock(&mqp->mutex); 1888 } 1889 return 0; 1890 1891 err_add: 1892 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, 1893 prot, reg_id.id); 1894 if (reg_id.mirror) 1895 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, 1896 prot, reg_id.mirror); 1897 err_malloc: 1898 kfree(ib_steering); 1899 1900 return err; 1901 } 1902 1903 static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw) 1904 { 1905 struct mlx4_ib_gid_entry *ge; 1906 struct mlx4_ib_gid_entry *tmp; 1907 struct mlx4_ib_gid_entry *ret = NULL; 1908 1909 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { 1910 if (!memcmp(raw, ge->gid.raw, 16)) { 1911 ret = ge; 1912 break; 1913 } 1914 } 1915 1916 return ret; 1917 } 1918 1919 static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 1920 { 1921 int err; 1922 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); 1923 struct mlx4_dev *dev = mdev->dev; 1924 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 1925 struct net_device *ndev; 1926 struct mlx4_ib_gid_entry *ge; 1927 struct mlx4_flow_reg_id reg_id = {0, 0}; 1928 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6; 1929 1930 if (mdev->dev->caps.steering_mode == 1931 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1932 struct mlx4_ib_steering *ib_steering; 1933 1934 mutex_lock(&mqp->mutex); 1935 list_for_each_entry(ib_steering, &mqp->steering_rules, list) { 1936 if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) { 1937 list_del(&ib_steering->list); 1938 break; 1939 } 1940 } 1941 mutex_unlock(&mqp->mutex); 1942 if (&ib_steering->list == &mqp->steering_rules) { 1943 pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n"); 1944 return -EINVAL; 1945 } 1946 reg_id = ib_steering->reg_id; 1947 kfree(ib_steering); 1948 } 1949 1950 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, 1951 prot, reg_id.id); 1952 if (err) 1953 return err; 1954 1955 if (mlx4_is_bonded(dev)) { 1956 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, 1957 prot, reg_id.mirror); 1958 if (err) 1959 return err; 1960 } 1961 1962 mutex_lock(&mqp->mutex); 1963 ge = find_gid_entry(mqp, gid->raw); 1964 if (ge) { 1965 spin_lock_bh(&mdev->iboe.lock); 1966 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL; 1967 if (ndev) 1968 dev_hold(ndev); 1969 spin_unlock_bh(&mdev->iboe.lock); 1970 if (ndev) 1971 dev_put(ndev); 1972 list_del(&ge->list); 1973 kfree(ge); 1974 } else 1975 pr_warn("could not find mgid entry\n"); 1976 1977 mutex_unlock(&mqp->mutex); 1978 1979 return 0; 1980 } 1981 1982 static int init_node_data(struct mlx4_ib_dev *dev) 1983 { 1984 struct ib_smp *in_mad = NULL; 1985 struct ib_smp *out_mad = NULL; 1986 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS; 1987 int err = -ENOMEM; 1988 1989 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 1990 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 1991 if (!in_mad || !out_mad) 1992 goto out; 1993 1994 init_query_mad(in_mad); 1995 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; 1996 if (mlx4_is_master(dev->dev)) 1997 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW; 1998 1999 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad); 2000 if (err) 2001 goto out; 2002 2003 memcpy(dev->ib_dev.node_desc, out_mad->data, 64); 2004 2005 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 2006 2007 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad); 2008 if (err) 2009 goto out; 2010 2011 dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32)); 2012 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); 2013 2014 out: 2015 kfree(in_mad); 2016 kfree(out_mad); 2017 return err; 2018 } 2019 2020 static ssize_t show_hca(struct device *device, struct device_attribute *attr, 2021 char *buf) 2022 { 2023 struct mlx4_ib_dev *dev = 2024 container_of(device, struct mlx4_ib_dev, ib_dev.dev); 2025 return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device); 2026 } 2027 2028 static ssize_t show_rev(struct device *device, struct device_attribute *attr, 2029 char *buf) 2030 { 2031 struct mlx4_ib_dev *dev = 2032 container_of(device, struct mlx4_ib_dev, ib_dev.dev); 2033 return sprintf(buf, "%x\n", dev->dev->rev_id); 2034 } 2035 2036 static ssize_t show_board(struct device *device, struct device_attribute *attr, 2037 char *buf) 2038 { 2039 struct mlx4_ib_dev *dev = 2040 container_of(device, struct mlx4_ib_dev, ib_dev.dev); 2041 return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN, 2042 dev->dev->board_id); 2043 } 2044 2045 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); 2046 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); 2047 static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); 2048 2049 static struct device_attribute *mlx4_class_attributes[] = { 2050 &dev_attr_hw_rev, 2051 &dev_attr_hca_type, 2052 &dev_attr_board_id 2053 }; 2054 2055 struct diag_counter { 2056 const char *name; 2057 u32 offset; 2058 }; 2059 2060 #define DIAG_COUNTER(_name, _offset) \ 2061 { .name = #_name, .offset = _offset } 2062 2063 static const struct diag_counter diag_basic[] = { 2064 DIAG_COUNTER(rq_num_lle, 0x00), 2065 DIAG_COUNTER(sq_num_lle, 0x04), 2066 DIAG_COUNTER(rq_num_lqpoe, 0x08), 2067 DIAG_COUNTER(sq_num_lqpoe, 0x0C), 2068 DIAG_COUNTER(rq_num_lpe, 0x18), 2069 DIAG_COUNTER(sq_num_lpe, 0x1C), 2070 DIAG_COUNTER(rq_num_wrfe, 0x20), 2071 DIAG_COUNTER(sq_num_wrfe, 0x24), 2072 DIAG_COUNTER(sq_num_mwbe, 0x2C), 2073 DIAG_COUNTER(sq_num_bre, 0x34), 2074 DIAG_COUNTER(sq_num_rire, 0x44), 2075 DIAG_COUNTER(rq_num_rire, 0x48), 2076 DIAG_COUNTER(sq_num_rae, 0x4C), 2077 DIAG_COUNTER(rq_num_rae, 0x50), 2078 DIAG_COUNTER(sq_num_roe, 0x54), 2079 DIAG_COUNTER(sq_num_tree, 0x5C), 2080 DIAG_COUNTER(sq_num_rree, 0x64), 2081 DIAG_COUNTER(rq_num_rnr, 0x68), 2082 DIAG_COUNTER(sq_num_rnr, 0x6C), 2083 DIAG_COUNTER(rq_num_oos, 0x100), 2084 DIAG_COUNTER(sq_num_oos, 0x104), 2085 }; 2086 2087 static const struct diag_counter diag_ext[] = { 2088 DIAG_COUNTER(rq_num_dup, 0x130), 2089 DIAG_COUNTER(sq_num_to, 0x134), 2090 }; 2091 2092 static const struct diag_counter diag_device_only[] = { 2093 DIAG_COUNTER(num_cqovf, 0x1A0), 2094 DIAG_COUNTER(rq_num_udsdprd, 0x118), 2095 }; 2096 2097 static struct rdma_hw_stats *mlx4_ib_alloc_hw_stats(struct ib_device *ibdev, 2098 u8 port_num) 2099 { 2100 struct mlx4_ib_dev *dev = to_mdev(ibdev); 2101 struct mlx4_ib_diag_counters *diag = dev->diag_counters; 2102 2103 if (!diag[!!port_num].name) 2104 return NULL; 2105 2106 return rdma_alloc_hw_stats_struct(diag[!!port_num].name, 2107 diag[!!port_num].num_counters, 2108 RDMA_HW_STATS_DEFAULT_LIFESPAN); 2109 } 2110 2111 static int mlx4_ib_get_hw_stats(struct ib_device *ibdev, 2112 struct rdma_hw_stats *stats, 2113 u8 port, int index) 2114 { 2115 struct mlx4_ib_dev *dev = to_mdev(ibdev); 2116 struct mlx4_ib_diag_counters *diag = dev->diag_counters; 2117 u32 hw_value[ARRAY_SIZE(diag_device_only) + 2118 ARRAY_SIZE(diag_ext) + ARRAY_SIZE(diag_basic)] = {}; 2119 int ret; 2120 int i; 2121 2122 ret = mlx4_query_diag_counters(dev->dev, 2123 MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS, 2124 diag[!!port].offset, hw_value, 2125 diag[!!port].num_counters, port); 2126 2127 if (ret) 2128 return ret; 2129 2130 for (i = 0; i < diag[!!port].num_counters; i++) 2131 stats->value[i] = hw_value[i]; 2132 2133 return diag[!!port].num_counters; 2134 } 2135 2136 static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev, 2137 const char ***name, 2138 u32 **offset, 2139 u32 *num, 2140 bool port) 2141 { 2142 u32 num_counters; 2143 2144 num_counters = ARRAY_SIZE(diag_basic); 2145 2146 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) 2147 num_counters += ARRAY_SIZE(diag_ext); 2148 2149 if (!port) 2150 num_counters += ARRAY_SIZE(diag_device_only); 2151 2152 *name = kcalloc(num_counters, sizeof(**name), GFP_KERNEL); 2153 if (!*name) 2154 return -ENOMEM; 2155 2156 *offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL); 2157 if (!*offset) 2158 goto err_name; 2159 2160 *num = num_counters; 2161 2162 return 0; 2163 2164 err_name: 2165 kfree(*name); 2166 return -ENOMEM; 2167 } 2168 2169 static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev, 2170 const char **name, 2171 u32 *offset, 2172 bool port) 2173 { 2174 int i; 2175 int j; 2176 2177 for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) { 2178 name[i] = diag_basic[i].name; 2179 offset[i] = diag_basic[i].offset; 2180 } 2181 2182 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) { 2183 for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) { 2184 name[j] = diag_ext[i].name; 2185 offset[j] = diag_ext[i].offset; 2186 } 2187 } 2188 2189 if (!port) { 2190 for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) { 2191 name[j] = diag_device_only[i].name; 2192 offset[j] = diag_device_only[i].offset; 2193 } 2194 } 2195 } 2196 2197 static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev) 2198 { 2199 struct mlx4_ib_diag_counters *diag = ibdev->diag_counters; 2200 int i; 2201 int ret; 2202 bool per_port = !!(ibdev->dev->caps.flags2 & 2203 MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT); 2204 2205 for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) { 2206 /* i == 1 means we are building port counters */ 2207 if (i && !per_port) 2208 continue; 2209 2210 ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name, 2211 &diag[i].offset, 2212 &diag[i].num_counters, i); 2213 if (ret) 2214 goto err_alloc; 2215 2216 mlx4_ib_fill_diag_counters(ibdev, diag[i].name, 2217 diag[i].offset, i); 2218 } 2219 2220 ibdev->ib_dev.get_hw_stats = mlx4_ib_get_hw_stats; 2221 ibdev->ib_dev.alloc_hw_stats = mlx4_ib_alloc_hw_stats; 2222 2223 return 0; 2224 2225 err_alloc: 2226 if (i) { 2227 kfree(diag[i - 1].name); 2228 kfree(diag[i - 1].offset); 2229 } 2230 2231 return ret; 2232 } 2233 2234 static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev) 2235 { 2236 int i; 2237 2238 for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) { 2239 kfree(ibdev->diag_counters[i].offset); 2240 kfree(ibdev->diag_counters[i].name); 2241 } 2242 } 2243 2244 #define MLX4_IB_INVALID_MAC ((u64)-1) 2245 static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev, 2246 struct net_device *dev, 2247 int port) 2248 { 2249 u64 new_smac = 0; 2250 u64 release_mac = MLX4_IB_INVALID_MAC; 2251 struct mlx4_ib_qp *qp; 2252 2253 read_lock(&dev_base_lock); 2254 new_smac = mlx4_mac_to_u64(dev->dev_addr); 2255 read_unlock(&dev_base_lock); 2256 2257 atomic64_set(&ibdev->iboe.mac[port - 1], new_smac); 2258 2259 /* no need for update QP1 and mac registration in non-SRIOV */ 2260 if (!mlx4_is_mfunc(ibdev->dev)) 2261 return; 2262 2263 mutex_lock(&ibdev->qp1_proxy_lock[port - 1]); 2264 qp = ibdev->qp1_proxy[port - 1]; 2265 if (qp) { 2266 int new_smac_index; 2267 u64 old_smac; 2268 struct mlx4_update_qp_params update_params; 2269 2270 mutex_lock(&qp->mutex); 2271 old_smac = qp->pri.smac; 2272 if (new_smac == old_smac) 2273 goto unlock; 2274 2275 new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac); 2276 2277 if (new_smac_index < 0) 2278 goto unlock; 2279 2280 update_params.smac_index = new_smac_index; 2281 if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC, 2282 &update_params)) { 2283 release_mac = new_smac; 2284 goto unlock; 2285 } 2286 /* if old port was zero, no mac was yet registered for this QP */ 2287 if (qp->pri.smac_port) 2288 release_mac = old_smac; 2289 qp->pri.smac = new_smac; 2290 qp->pri.smac_port = port; 2291 qp->pri.smac_index = new_smac_index; 2292 } 2293 2294 unlock: 2295 if (release_mac != MLX4_IB_INVALID_MAC) 2296 mlx4_unregister_mac(ibdev->dev, port, release_mac); 2297 if (qp) 2298 mutex_unlock(&qp->mutex); 2299 mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]); 2300 } 2301 2302 static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, 2303 struct net_device *dev, 2304 unsigned long event) 2305 2306 { 2307 struct mlx4_ib_iboe *iboe; 2308 int update_qps_port = -1; 2309 int port; 2310 2311 ASSERT_RTNL(); 2312 2313 iboe = &ibdev->iboe; 2314 2315 spin_lock_bh(&iboe->lock); 2316 mlx4_foreach_ib_transport_port(port, ibdev->dev) { 2317 2318 iboe->netdevs[port - 1] = 2319 mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port); 2320 2321 if (dev == iboe->netdevs[port - 1] && 2322 (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER || 2323 event == NETDEV_UP || event == NETDEV_CHANGE)) 2324 update_qps_port = port; 2325 2326 } 2327 spin_unlock_bh(&iboe->lock); 2328 2329 if (update_qps_port > 0) 2330 mlx4_ib_update_qps(ibdev, dev, update_qps_port); 2331 } 2332 2333 static int mlx4_ib_netdev_event(struct notifier_block *this, 2334 unsigned long event, void *ptr) 2335 { 2336 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 2337 struct mlx4_ib_dev *ibdev; 2338 2339 if (!net_eq(dev_net(dev), &init_net)) 2340 return NOTIFY_DONE; 2341 2342 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb); 2343 mlx4_ib_scan_netdevs(ibdev, dev, event); 2344 2345 return NOTIFY_DONE; 2346 } 2347 2348 static void init_pkeys(struct mlx4_ib_dev *ibdev) 2349 { 2350 int port; 2351 int slave; 2352 int i; 2353 2354 if (mlx4_is_master(ibdev->dev)) { 2355 for (slave = 0; slave <= ibdev->dev->persist->num_vfs; 2356 ++slave) { 2357 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) { 2358 for (i = 0; 2359 i < ibdev->dev->phys_caps.pkey_phys_table_len[port]; 2360 ++i) { 2361 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] = 2362 /* master has the identity virt2phys pkey mapping */ 2363 (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i : 2364 ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1; 2365 mlx4_sync_pkey_table(ibdev->dev, slave, port, i, 2366 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]); 2367 } 2368 } 2369 } 2370 /* initialize pkey cache */ 2371 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) { 2372 for (i = 0; 2373 i < ibdev->dev->phys_caps.pkey_phys_table_len[port]; 2374 ++i) 2375 ibdev->pkeys.phys_pkey_cache[port-1][i] = 2376 (i) ? 0 : 0xFFFF; 2377 } 2378 } 2379 } 2380 2381 static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) 2382 { 2383 int i, j, eq = 0, total_eqs = 0; 2384 2385 ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors, 2386 sizeof(ibdev->eq_table[0]), GFP_KERNEL); 2387 if (!ibdev->eq_table) 2388 return; 2389 2390 for (i = 1; i <= dev->caps.num_ports; i++) { 2391 for (j = 0; j < mlx4_get_eqs_per_port(dev, i); 2392 j++, total_eqs++) { 2393 if (i > 1 && mlx4_is_eq_shared(dev, total_eqs)) 2394 continue; 2395 ibdev->eq_table[eq] = total_eqs; 2396 if (!mlx4_assign_eq(dev, i, 2397 &ibdev->eq_table[eq])) 2398 eq++; 2399 else 2400 ibdev->eq_table[eq] = -1; 2401 } 2402 } 2403 2404 for (i = eq; i < dev->caps.num_comp_vectors; 2405 ibdev->eq_table[i++] = -1) 2406 ; 2407 2408 /* Advertise the new number of EQs to clients */ 2409 ibdev->ib_dev.num_comp_vectors = eq; 2410 } 2411 2412 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) 2413 { 2414 int i; 2415 int total_eqs = ibdev->ib_dev.num_comp_vectors; 2416 2417 /* no eqs were allocated */ 2418 if (!ibdev->eq_table) 2419 return; 2420 2421 /* Reset the advertised EQ number */ 2422 ibdev->ib_dev.num_comp_vectors = 0; 2423 2424 for (i = 0; i < total_eqs; i++) 2425 mlx4_release_eq(dev, ibdev->eq_table[i]); 2426 2427 kfree(ibdev->eq_table); 2428 ibdev->eq_table = NULL; 2429 } 2430 2431 static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num, 2432 struct ib_port_immutable *immutable) 2433 { 2434 struct ib_port_attr attr; 2435 struct mlx4_ib_dev *mdev = to_mdev(ibdev); 2436 int err; 2437 2438 err = mlx4_ib_query_port(ibdev, port_num, &attr); 2439 if (err) 2440 return err; 2441 2442 immutable->pkey_tbl_len = attr.pkey_tbl_len; 2443 immutable->gid_tbl_len = attr.gid_tbl_len; 2444 2445 if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) { 2446 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB; 2447 } else { 2448 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) 2449 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE; 2450 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) 2451 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE | 2452 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; 2453 } 2454 2455 immutable->max_mad_size = IB_MGMT_MAD_SIZE; 2456 2457 return 0; 2458 } 2459 2460 static void get_fw_ver_str(struct ib_device *device, char *str, 2461 size_t str_len) 2462 { 2463 struct mlx4_ib_dev *dev = 2464 container_of(device, struct mlx4_ib_dev, ib_dev); 2465 snprintf(str, str_len, "%d.%d.%d", 2466 (int) (dev->dev->caps.fw_ver >> 32), 2467 (int) (dev->dev->caps.fw_ver >> 16) & 0xffff, 2468 (int) dev->dev->caps.fw_ver & 0xffff); 2469 } 2470 2471 static void *mlx4_ib_add(struct mlx4_dev *dev) 2472 { 2473 struct mlx4_ib_dev *ibdev; 2474 int num_ports = 0; 2475 int i, j; 2476 int err; 2477 struct mlx4_ib_iboe *iboe; 2478 int ib_num_ports = 0; 2479 int num_req_counters; 2480 int allocated; 2481 u32 counter_index; 2482 struct counter_index *new_counter_index = NULL; 2483 2484 pr_info_once("%s", mlx4_ib_version); 2485 2486 num_ports = 0; 2487 mlx4_foreach_ib_transport_port(i, dev) 2488 num_ports++; 2489 2490 /* No point in registering a device with no ports... */ 2491 if (num_ports == 0) 2492 return NULL; 2493 2494 ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev); 2495 if (!ibdev) { 2496 dev_err(&dev->persist->pdev->dev, 2497 "Device struct alloc failed\n"); 2498 return NULL; 2499 } 2500 2501 iboe = &ibdev->iboe; 2502 2503 if (mlx4_pd_alloc(dev, &ibdev->priv_pdn)) 2504 goto err_dealloc; 2505 2506 if (mlx4_uar_alloc(dev, &ibdev->priv_uar)) 2507 goto err_pd; 2508 2509 ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT, 2510 PAGE_SIZE); 2511 if (!ibdev->uar_map) 2512 goto err_uar; 2513 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); 2514 2515 ibdev->dev = dev; 2516 ibdev->bond_next_port = 0; 2517 2518 strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX); 2519 ibdev->ib_dev.owner = THIS_MODULE; 2520 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; 2521 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey; 2522 ibdev->num_ports = num_ports; 2523 ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ? 2524 1 : ibdev->num_ports; 2525 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; 2526 ibdev->ib_dev.dma_device = &dev->persist->pdev->dev; 2527 ibdev->ib_dev.get_netdev = mlx4_ib_get_netdev; 2528 ibdev->ib_dev.add_gid = mlx4_ib_add_gid; 2529 ibdev->ib_dev.del_gid = mlx4_ib_del_gid; 2530 2531 if (dev->caps.userspace_caps) 2532 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION; 2533 else 2534 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION; 2535 2536 ibdev->ib_dev.uverbs_cmd_mask = 2537 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 2538 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 2539 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 2540 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 2541 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 2542 (1ull << IB_USER_VERBS_CMD_REG_MR) | 2543 (1ull << IB_USER_VERBS_CMD_REREG_MR) | 2544 (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 2545 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 2546 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 2547 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | 2548 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 2549 (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 2550 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 2551 (1ull << IB_USER_VERBS_CMD_QUERY_QP) | 2552 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 2553 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | 2554 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | 2555 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | 2556 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | 2557 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | 2558 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | 2559 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | 2560 (1ull << IB_USER_VERBS_CMD_OPEN_QP); 2561 2562 ibdev->ib_dev.query_device = mlx4_ib_query_device; 2563 ibdev->ib_dev.query_port = mlx4_ib_query_port; 2564 ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer; 2565 ibdev->ib_dev.query_gid = mlx4_ib_query_gid; 2566 ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey; 2567 ibdev->ib_dev.modify_device = mlx4_ib_modify_device; 2568 ibdev->ib_dev.modify_port = mlx4_ib_modify_port; 2569 ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext; 2570 ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext; 2571 ibdev->ib_dev.mmap = mlx4_ib_mmap; 2572 ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd; 2573 ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd; 2574 ibdev->ib_dev.create_ah = mlx4_ib_create_ah; 2575 ibdev->ib_dev.query_ah = mlx4_ib_query_ah; 2576 ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah; 2577 ibdev->ib_dev.create_srq = mlx4_ib_create_srq; 2578 ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq; 2579 ibdev->ib_dev.query_srq = mlx4_ib_query_srq; 2580 ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq; 2581 ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv; 2582 ibdev->ib_dev.create_qp = mlx4_ib_create_qp; 2583 ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp; 2584 ibdev->ib_dev.query_qp = mlx4_ib_query_qp; 2585 ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp; 2586 ibdev->ib_dev.post_send = mlx4_ib_post_send; 2587 ibdev->ib_dev.post_recv = mlx4_ib_post_recv; 2588 ibdev->ib_dev.create_cq = mlx4_ib_create_cq; 2589 ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq; 2590 ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq; 2591 ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq; 2592 ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq; 2593 ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq; 2594 ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr; 2595 ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr; 2596 ibdev->ib_dev.rereg_user_mr = mlx4_ib_rereg_user_mr; 2597 ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr; 2598 ibdev->ib_dev.alloc_mr = mlx4_ib_alloc_mr; 2599 ibdev->ib_dev.map_mr_sg = mlx4_ib_map_mr_sg; 2600 ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach; 2601 ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach; 2602 ibdev->ib_dev.process_mad = mlx4_ib_process_mad; 2603 ibdev->ib_dev.get_port_immutable = mlx4_port_immutable; 2604 ibdev->ib_dev.get_dev_fw_str = get_fw_ver_str; 2605 ibdev->ib_dev.disassociate_ucontext = mlx4_ib_disassociate_ucontext; 2606 2607 if (!mlx4_is_slave(ibdev->dev)) { 2608 ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc; 2609 ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr; 2610 ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr; 2611 ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc; 2612 } 2613 2614 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || 2615 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) { 2616 ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw; 2617 ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw; 2618 2619 ibdev->ib_dev.uverbs_cmd_mask |= 2620 (1ull << IB_USER_VERBS_CMD_ALLOC_MW) | 2621 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW); 2622 } 2623 2624 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) { 2625 ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd; 2626 ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd; 2627 ibdev->ib_dev.uverbs_cmd_mask |= 2628 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) | 2629 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD); 2630 } 2631 2632 if (check_flow_steering_support(dev)) { 2633 ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED; 2634 ibdev->ib_dev.create_flow = mlx4_ib_create_flow; 2635 ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow; 2636 2637 ibdev->ib_dev.uverbs_ex_cmd_mask |= 2638 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) | 2639 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW); 2640 } 2641 2642 ibdev->ib_dev.uverbs_ex_cmd_mask |= 2643 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) | 2644 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) | 2645 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP); 2646 2647 mlx4_ib_alloc_eqs(dev, ibdev); 2648 2649 spin_lock_init(&iboe->lock); 2650 2651 if (init_node_data(ibdev)) 2652 goto err_map; 2653 2654 for (i = 0; i < ibdev->num_ports; ++i) { 2655 mutex_init(&ibdev->counters_table[i].mutex); 2656 INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list); 2657 } 2658 2659 num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports; 2660 for (i = 0; i < num_req_counters; ++i) { 2661 mutex_init(&ibdev->qp1_proxy_lock[i]); 2662 allocated = 0; 2663 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) == 2664 IB_LINK_LAYER_ETHERNET) { 2665 err = mlx4_counter_alloc(ibdev->dev, &counter_index); 2666 /* if failed to allocate a new counter, use default */ 2667 if (err) 2668 counter_index = 2669 mlx4_get_default_counter_index(dev, 2670 i + 1); 2671 else 2672 allocated = 1; 2673 } else { /* IB_LINK_LAYER_INFINIBAND use the default counter */ 2674 counter_index = mlx4_get_default_counter_index(dev, 2675 i + 1); 2676 } 2677 new_counter_index = kmalloc(sizeof(*new_counter_index), 2678 GFP_KERNEL); 2679 if (!new_counter_index) { 2680 if (allocated) 2681 mlx4_counter_free(ibdev->dev, counter_index); 2682 goto err_counter; 2683 } 2684 new_counter_index->index = counter_index; 2685 new_counter_index->allocated = allocated; 2686 list_add_tail(&new_counter_index->list, 2687 &ibdev->counters_table[i].counters_list); 2688 ibdev->counters_table[i].default_counter = counter_index; 2689 pr_info("counter index %d for port %d allocated %d\n", 2690 counter_index, i + 1, allocated); 2691 } 2692 if (mlx4_is_bonded(dev)) 2693 for (i = 1; i < ibdev->num_ports ; ++i) { 2694 new_counter_index = 2695 kmalloc(sizeof(struct counter_index), 2696 GFP_KERNEL); 2697 if (!new_counter_index) 2698 goto err_counter; 2699 new_counter_index->index = counter_index; 2700 new_counter_index->allocated = 0; 2701 list_add_tail(&new_counter_index->list, 2702 &ibdev->counters_table[i].counters_list); 2703 ibdev->counters_table[i].default_counter = 2704 counter_index; 2705 } 2706 2707 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) 2708 ib_num_ports++; 2709 2710 spin_lock_init(&ibdev->sm_lock); 2711 mutex_init(&ibdev->cap_mask_mutex); 2712 INIT_LIST_HEAD(&ibdev->qp_list); 2713 spin_lock_init(&ibdev->reset_flow_resource_lock); 2714 2715 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED && 2716 ib_num_ports) { 2717 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS; 2718 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count, 2719 MLX4_IB_UC_STEER_QPN_ALIGN, 2720 &ibdev->steer_qpn_base, 0); 2721 if (err) 2722 goto err_counter; 2723 2724 ibdev->ib_uc_qpns_bitmap = 2725 kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) * 2726 sizeof(long), 2727 GFP_KERNEL); 2728 if (!ibdev->ib_uc_qpns_bitmap) { 2729 dev_err(&dev->persist->pdev->dev, 2730 "bit map alloc failed\n"); 2731 goto err_steer_qp_release; 2732 } 2733 2734 bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count); 2735 2736 err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE( 2737 dev, ibdev->steer_qpn_base, 2738 ibdev->steer_qpn_base + 2739 ibdev->steer_qpn_count - 1); 2740 if (err) 2741 goto err_steer_free_bitmap; 2742 } 2743 2744 for (j = 1; j <= ibdev->dev->caps.num_ports; j++) 2745 atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]); 2746 2747 if (mlx4_ib_alloc_diag_counters(ibdev)) 2748 goto err_steer_free_bitmap; 2749 2750 if (ib_register_device(&ibdev->ib_dev, NULL)) 2751 goto err_diag_counters; 2752 2753 if (mlx4_ib_mad_init(ibdev)) 2754 goto err_reg; 2755 2756 if (mlx4_ib_init_sriov(ibdev)) 2757 goto err_mad; 2758 2759 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE || 2760 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) { 2761 if (!iboe->nb.notifier_call) { 2762 iboe->nb.notifier_call = mlx4_ib_netdev_event; 2763 err = register_netdevice_notifier(&iboe->nb); 2764 if (err) { 2765 iboe->nb.notifier_call = NULL; 2766 goto err_notif; 2767 } 2768 } 2769 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) { 2770 err = mlx4_config_roce_v2_port(dev, ROCE_V2_UDP_DPORT); 2771 if (err) { 2772 goto err_notif; 2773 } 2774 } 2775 } 2776 2777 for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) { 2778 if (device_create_file(&ibdev->ib_dev.dev, 2779 mlx4_class_attributes[j])) 2780 goto err_notif; 2781 } 2782 2783 ibdev->ib_active = true; 2784 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) 2785 devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i), 2786 &ibdev->ib_dev); 2787 2788 if (mlx4_is_mfunc(ibdev->dev)) 2789 init_pkeys(ibdev); 2790 2791 /* create paravirt contexts for any VFs which are active */ 2792 if (mlx4_is_master(ibdev->dev)) { 2793 for (j = 0; j < MLX4_MFUNC_MAX; j++) { 2794 if (j == mlx4_master_func_num(ibdev->dev)) 2795 continue; 2796 if (mlx4_is_slave_active(ibdev->dev, j)) 2797 do_slave_init(ibdev, j, 1); 2798 } 2799 } 2800 return ibdev; 2801 2802 err_notif: 2803 if (ibdev->iboe.nb.notifier_call) { 2804 if (unregister_netdevice_notifier(&ibdev->iboe.nb)) 2805 pr_warn("failure unregistering notifier\n"); 2806 ibdev->iboe.nb.notifier_call = NULL; 2807 } 2808 flush_workqueue(wq); 2809 2810 mlx4_ib_close_sriov(ibdev); 2811 2812 err_mad: 2813 mlx4_ib_mad_cleanup(ibdev); 2814 2815 err_reg: 2816 ib_unregister_device(&ibdev->ib_dev); 2817 2818 err_diag_counters: 2819 mlx4_ib_diag_cleanup(ibdev); 2820 2821 err_steer_free_bitmap: 2822 kfree(ibdev->ib_uc_qpns_bitmap); 2823 2824 err_steer_qp_release: 2825 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) 2826 mlx4_qp_release_range(dev, ibdev->steer_qpn_base, 2827 ibdev->steer_qpn_count); 2828 err_counter: 2829 for (i = 0; i < ibdev->num_ports; ++i) 2830 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]); 2831 2832 err_map: 2833 iounmap(ibdev->uar_map); 2834 2835 err_uar: 2836 mlx4_uar_free(dev, &ibdev->priv_uar); 2837 2838 err_pd: 2839 mlx4_pd_free(dev, ibdev->priv_pdn); 2840 2841 err_dealloc: 2842 ib_dealloc_device(&ibdev->ib_dev); 2843 2844 return NULL; 2845 } 2846 2847 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn) 2848 { 2849 int offset; 2850 2851 WARN_ON(!dev->ib_uc_qpns_bitmap); 2852 2853 offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap, 2854 dev->steer_qpn_count, 2855 get_count_order(count)); 2856 if (offset < 0) 2857 return offset; 2858 2859 *qpn = dev->steer_qpn_base + offset; 2860 return 0; 2861 } 2862 2863 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count) 2864 { 2865 if (!qpn || 2866 dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED) 2867 return; 2868 2869 BUG_ON(qpn < dev->steer_qpn_base); 2870 2871 bitmap_release_region(dev->ib_uc_qpns_bitmap, 2872 qpn - dev->steer_qpn_base, 2873 get_count_order(count)); 2874 } 2875 2876 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, 2877 int is_attach) 2878 { 2879 int err; 2880 size_t flow_size; 2881 struct ib_flow_attr *flow = NULL; 2882 struct ib_flow_spec_ib *ib_spec; 2883 2884 if (is_attach) { 2885 flow_size = sizeof(struct ib_flow_attr) + 2886 sizeof(struct ib_flow_spec_ib); 2887 flow = kzalloc(flow_size, GFP_KERNEL); 2888 if (!flow) 2889 return -ENOMEM; 2890 flow->port = mqp->port; 2891 flow->num_of_specs = 1; 2892 flow->size = flow_size; 2893 ib_spec = (struct ib_flow_spec_ib *)(flow + 1); 2894 ib_spec->type = IB_FLOW_SPEC_IB; 2895 ib_spec->size = sizeof(struct ib_flow_spec_ib); 2896 /* Add an empty rule for IB L2 */ 2897 memset(&ib_spec->mask, 0, sizeof(ib_spec->mask)); 2898 2899 err = __mlx4_ib_create_flow(&mqp->ibqp, flow, 2900 IB_FLOW_DOMAIN_NIC, 2901 MLX4_FS_REGULAR, 2902 &mqp->reg_id); 2903 } else { 2904 err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id); 2905 } 2906 kfree(flow); 2907 return err; 2908 } 2909 2910 static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) 2911 { 2912 struct mlx4_ib_dev *ibdev = ibdev_ptr; 2913 int p; 2914 int i; 2915 2916 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) 2917 devlink_port_type_clear(mlx4_get_devlink_port(dev, i)); 2918 ibdev->ib_active = false; 2919 flush_workqueue(wq); 2920 2921 mlx4_ib_close_sriov(ibdev); 2922 mlx4_ib_mad_cleanup(ibdev); 2923 ib_unregister_device(&ibdev->ib_dev); 2924 mlx4_ib_diag_cleanup(ibdev); 2925 if (ibdev->iboe.nb.notifier_call) { 2926 if (unregister_netdevice_notifier(&ibdev->iboe.nb)) 2927 pr_warn("failure unregistering notifier\n"); 2928 ibdev->iboe.nb.notifier_call = NULL; 2929 } 2930 2931 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) { 2932 mlx4_qp_release_range(dev, ibdev->steer_qpn_base, 2933 ibdev->steer_qpn_count); 2934 kfree(ibdev->ib_uc_qpns_bitmap); 2935 } 2936 2937 iounmap(ibdev->uar_map); 2938 for (p = 0; p < ibdev->num_ports; ++p) 2939 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]); 2940 2941 mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB) 2942 mlx4_CLOSE_PORT(dev, p); 2943 2944 mlx4_ib_free_eqs(dev, ibdev); 2945 2946 mlx4_uar_free(dev, &ibdev->priv_uar); 2947 mlx4_pd_free(dev, ibdev->priv_pdn); 2948 ib_dealloc_device(&ibdev->ib_dev); 2949 } 2950 2951 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init) 2952 { 2953 struct mlx4_ib_demux_work **dm = NULL; 2954 struct mlx4_dev *dev = ibdev->dev; 2955 int i; 2956 unsigned long flags; 2957 struct mlx4_active_ports actv_ports; 2958 unsigned int ports; 2959 unsigned int first_port; 2960 2961 if (!mlx4_is_master(dev)) 2962 return; 2963 2964 actv_ports = mlx4_get_active_ports(dev, slave); 2965 ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports); 2966 first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports); 2967 2968 dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC); 2969 if (!dm) { 2970 pr_err("failed to allocate memory for tunneling qp update\n"); 2971 return; 2972 } 2973 2974 for (i = 0; i < ports; i++) { 2975 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC); 2976 if (!dm[i]) { 2977 pr_err("failed to allocate memory for tunneling qp update work struct\n"); 2978 while (--i >= 0) 2979 kfree(dm[i]); 2980 goto out; 2981 } 2982 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work); 2983 dm[i]->port = first_port + i + 1; 2984 dm[i]->slave = slave; 2985 dm[i]->do_init = do_init; 2986 dm[i]->dev = ibdev; 2987 } 2988 /* initialize or tear down tunnel QPs for the slave */ 2989 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags); 2990 if (!ibdev->sriov.is_going_down) { 2991 for (i = 0; i < ports; i++) 2992 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work); 2993 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); 2994 } else { 2995 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); 2996 for (i = 0; i < ports; i++) 2997 kfree(dm[i]); 2998 } 2999 out: 3000 kfree(dm); 3001 return; 3002 } 3003 3004 static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev) 3005 { 3006 struct mlx4_ib_qp *mqp; 3007 unsigned long flags_qp; 3008 unsigned long flags_cq; 3009 struct mlx4_ib_cq *send_mcq, *recv_mcq; 3010 struct list_head cq_notify_list; 3011 struct mlx4_cq *mcq; 3012 unsigned long flags; 3013 3014 pr_warn("mlx4_ib_handle_catas_error was started\n"); 3015 INIT_LIST_HEAD(&cq_notify_list); 3016 3017 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/ 3018 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags); 3019 3020 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) { 3021 spin_lock_irqsave(&mqp->sq.lock, flags_qp); 3022 if (mqp->sq.tail != mqp->sq.head) { 3023 send_mcq = to_mcq(mqp->ibqp.send_cq); 3024 spin_lock_irqsave(&send_mcq->lock, flags_cq); 3025 if (send_mcq->mcq.comp && 3026 mqp->ibqp.send_cq->comp_handler) { 3027 if (!send_mcq->mcq.reset_notify_added) { 3028 send_mcq->mcq.reset_notify_added = 1; 3029 list_add_tail(&send_mcq->mcq.reset_notify, 3030 &cq_notify_list); 3031 } 3032 } 3033 spin_unlock_irqrestore(&send_mcq->lock, flags_cq); 3034 } 3035 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp); 3036 /* Now, handle the QP's receive queue */ 3037 spin_lock_irqsave(&mqp->rq.lock, flags_qp); 3038 /* no handling is needed for SRQ */ 3039 if (!mqp->ibqp.srq) { 3040 if (mqp->rq.tail != mqp->rq.head) { 3041 recv_mcq = to_mcq(mqp->ibqp.recv_cq); 3042 spin_lock_irqsave(&recv_mcq->lock, flags_cq); 3043 if (recv_mcq->mcq.comp && 3044 mqp->ibqp.recv_cq->comp_handler) { 3045 if (!recv_mcq->mcq.reset_notify_added) { 3046 recv_mcq->mcq.reset_notify_added = 1; 3047 list_add_tail(&recv_mcq->mcq.reset_notify, 3048 &cq_notify_list); 3049 } 3050 } 3051 spin_unlock_irqrestore(&recv_mcq->lock, 3052 flags_cq); 3053 } 3054 } 3055 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp); 3056 } 3057 3058 list_for_each_entry(mcq, &cq_notify_list, reset_notify) { 3059 mcq->comp(mcq); 3060 } 3061 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags); 3062 pr_warn("mlx4_ib_handle_catas_error ended\n"); 3063 } 3064 3065 static void handle_bonded_port_state_event(struct work_struct *work) 3066 { 3067 struct ib_event_work *ew = 3068 container_of(work, struct ib_event_work, work); 3069 struct mlx4_ib_dev *ibdev = ew->ib_dev; 3070 enum ib_port_state bonded_port_state = IB_PORT_NOP; 3071 int i; 3072 struct ib_event ibev; 3073 3074 kfree(ew); 3075 spin_lock_bh(&ibdev->iboe.lock); 3076 for (i = 0; i < MLX4_MAX_PORTS; ++i) { 3077 struct net_device *curr_netdev = ibdev->iboe.netdevs[i]; 3078 enum ib_port_state curr_port_state; 3079 3080 if (!curr_netdev) 3081 continue; 3082 3083 curr_port_state = 3084 (netif_running(curr_netdev) && 3085 netif_carrier_ok(curr_netdev)) ? 3086 IB_PORT_ACTIVE : IB_PORT_DOWN; 3087 3088 bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ? 3089 curr_port_state : IB_PORT_ACTIVE; 3090 } 3091 spin_unlock_bh(&ibdev->iboe.lock); 3092 3093 ibev.device = &ibdev->ib_dev; 3094 ibev.element.port_num = 1; 3095 ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ? 3096 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; 3097 3098 ib_dispatch_event(&ibev); 3099 } 3100 3101 static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, 3102 enum mlx4_dev_event event, unsigned long param) 3103 { 3104 struct ib_event ibev; 3105 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr); 3106 struct mlx4_eqe *eqe = NULL; 3107 struct ib_event_work *ew; 3108 int p = 0; 3109 3110 if (mlx4_is_bonded(dev) && 3111 ((event == MLX4_DEV_EVENT_PORT_UP) || 3112 (event == MLX4_DEV_EVENT_PORT_DOWN))) { 3113 ew = kmalloc(sizeof(*ew), GFP_ATOMIC); 3114 if (!ew) 3115 return; 3116 INIT_WORK(&ew->work, handle_bonded_port_state_event); 3117 ew->ib_dev = ibdev; 3118 queue_work(wq, &ew->work); 3119 return; 3120 } 3121 3122 if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE) 3123 eqe = (struct mlx4_eqe *)param; 3124 else 3125 p = (int) param; 3126 3127 switch (event) { 3128 case MLX4_DEV_EVENT_PORT_UP: 3129 if (p > ibdev->num_ports) 3130 return; 3131 if (mlx4_is_master(dev) && 3132 rdma_port_get_link_layer(&ibdev->ib_dev, p) == 3133 IB_LINK_LAYER_INFINIBAND) { 3134 mlx4_ib_invalidate_all_guid_record(ibdev, p); 3135 } 3136 ibev.event = IB_EVENT_PORT_ACTIVE; 3137 break; 3138 3139 case MLX4_DEV_EVENT_PORT_DOWN: 3140 if (p > ibdev->num_ports) 3141 return; 3142 ibev.event = IB_EVENT_PORT_ERR; 3143 break; 3144 3145 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR: 3146 ibdev->ib_active = false; 3147 ibev.event = IB_EVENT_DEVICE_FATAL; 3148 mlx4_ib_handle_catas_error(ibdev); 3149 break; 3150 3151 case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: 3152 ew = kmalloc(sizeof *ew, GFP_ATOMIC); 3153 if (!ew) { 3154 pr_err("failed to allocate memory for events work\n"); 3155 break; 3156 } 3157 3158 INIT_WORK(&ew->work, handle_port_mgmt_change_event); 3159 memcpy(&ew->ib_eqe, eqe, sizeof *eqe); 3160 ew->ib_dev = ibdev; 3161 /* need to queue only for port owner, which uses GEN_EQE */ 3162 if (mlx4_is_master(dev)) 3163 queue_work(wq, &ew->work); 3164 else 3165 handle_port_mgmt_change_event(&ew->work); 3166 return; 3167 3168 case MLX4_DEV_EVENT_SLAVE_INIT: 3169 /* here, p is the slave id */ 3170 do_slave_init(ibdev, p, 1); 3171 if (mlx4_is_master(dev)) { 3172 int i; 3173 3174 for (i = 1; i <= ibdev->num_ports; i++) { 3175 if (rdma_port_get_link_layer(&ibdev->ib_dev, i) 3176 == IB_LINK_LAYER_INFINIBAND) 3177 mlx4_ib_slave_alias_guid_event(ibdev, 3178 p, i, 3179 1); 3180 } 3181 } 3182 return; 3183 3184 case MLX4_DEV_EVENT_SLAVE_SHUTDOWN: 3185 if (mlx4_is_master(dev)) { 3186 int i; 3187 3188 for (i = 1; i <= ibdev->num_ports; i++) { 3189 if (rdma_port_get_link_layer(&ibdev->ib_dev, i) 3190 == IB_LINK_LAYER_INFINIBAND) 3191 mlx4_ib_slave_alias_guid_event(ibdev, 3192 p, i, 3193 0); 3194 } 3195 } 3196 /* here, p is the slave id */ 3197 do_slave_init(ibdev, p, 0); 3198 return; 3199 3200 default: 3201 return; 3202 } 3203 3204 ibev.device = ibdev_ptr; 3205 ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p; 3206 3207 ib_dispatch_event(&ibev); 3208 } 3209 3210 static struct mlx4_interface mlx4_ib_interface = { 3211 .add = mlx4_ib_add, 3212 .remove = mlx4_ib_remove, 3213 .event = mlx4_ib_event, 3214 .protocol = MLX4_PROT_IB_IPV6, 3215 .flags = MLX4_INTFF_BONDING 3216 }; 3217 3218 static int __init mlx4_ib_init(void) 3219 { 3220 int err; 3221 3222 wq = create_singlethread_workqueue("mlx4_ib"); 3223 if (!wq) 3224 return -ENOMEM; 3225 3226 err = mlx4_ib_mcg_init(); 3227 if (err) 3228 goto clean_wq; 3229 3230 err = mlx4_register_interface(&mlx4_ib_interface); 3231 if (err) 3232 goto clean_mcg; 3233 3234 return 0; 3235 3236 clean_mcg: 3237 mlx4_ib_mcg_destroy(); 3238 3239 clean_wq: 3240 destroy_workqueue(wq); 3241 return err; 3242 } 3243 3244 static void __exit mlx4_ib_cleanup(void) 3245 { 3246 mlx4_unregister_interface(&mlx4_ib_interface); 3247 mlx4_ib_mcg_destroy(); 3248 destroy_workqueue(wq); 3249 } 3250 3251 module_init(mlx4_ib_init); 3252 module_exit(mlx4_ib_cleanup); 3253