Lines Matching +full:diag +full:- +full:version
7 * General Public License (GPL) Version 2, available from the file
15 * - Redistributions of source code must retain the above
19 * - Redistributions in binary form must reproduce the above
61 #include <rdma/mlx4-abi.h>
64 #define DRV_VERSION "4.0-0"
95 int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED; in check_flow_steering_support()
104 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) && in check_flow_steering_support()
106 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN)); in check_flow_steering_support()
134 if (dev->dev.parent != ibdev->ib_dev.dev.parent || in mlx4_ib_get_netdev()
135 dev->dev_port + 1 != port_num) in mlx4_ib_get_netdev()
138 if (mlx4_is_bonded(ibdev->dev)) { in mlx4_ib_get_netdev()
166 struct mlx4_dev *dev = ibdev->dev; in mlx4_ib_update_gids_v1()
172 return -ENOMEM; in mlx4_ib_update_gids_v1()
174 gid_tbl = mailbox->buf; in mlx4_ib_update_gids_v1()
179 err = mlx4_cmd(dev, mailbox->dma, in mlx4_ib_update_gids_v1()
184 err += mlx4_cmd(dev, mailbox->dma, in mlx4_ib_update_gids_v1()
199 struct mlx4_dev *dev = ibdev->dev; in mlx4_ib_update_gids_v1_v2()
206 u8 version; in mlx4_ib_update_gids_v1_v2() member
212 return -ENOMEM; in mlx4_ib_update_gids_v1_v2()
214 gid_tbl = mailbox->buf; in mlx4_ib_update_gids_v1_v2()
218 gid_tbl[i].version = 2; in mlx4_ib_update_gids_v1_v2()
224 err = mlx4_cmd(dev, mailbox->dma, in mlx4_ib_update_gids_v1_v2()
229 err += mlx4_cmd(dev, mailbox->dma, in mlx4_ib_update_gids_v1_v2()
242 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) in mlx4_ib_update_gids()
250 memset(&entry->gid, 0, sizeof(entry->gid)); in free_gid_entry()
251 kfree(entry->ctx); in free_gid_entry()
252 entry->ctx = NULL; in free_gid_entry()
257 struct mlx4_ib_dev *ibdev = to_mdev(attr->device); in mlx4_ib_add_gid()
258 struct mlx4_ib_iboe *iboe = &ibdev->iboe; in mlx4_ib_add_gid()
260 int free = -1, found = -1; in mlx4_ib_add_gid()
268 if (!rdma_cap_roce_gid_table(attr->device, attr->port_num)) in mlx4_ib_add_gid()
269 return -EINVAL; in mlx4_ib_add_gid()
271 if (attr->port_num > MLX4_MAX_PORTS) in mlx4_ib_add_gid()
272 return -EINVAL; in mlx4_ib_add_gid()
275 return -EINVAL; in mlx4_ib_add_gid()
280 port_gid_table = &iboe->gids[attr->port_num - 1]; in mlx4_ib_add_gid()
281 spin_lock_bh(&iboe->lock); in mlx4_ib_add_gid()
283 if (!memcmp(&port_gid_table->gids[i].gid, in mlx4_ib_add_gid()
284 &attr->gid, sizeof(attr->gid)) && in mlx4_ib_add_gid()
285 port_gid_table->gids[i].gid_type == attr->gid_type && in mlx4_ib_add_gid()
286 port_gid_table->gids[i].vlan_id == vlan_id) { in mlx4_ib_add_gid()
290 if (free < 0 && rdma_is_zero_gid(&port_gid_table->gids[i].gid)) in mlx4_ib_add_gid()
296 ret = -ENOSPC; in mlx4_ib_add_gid()
298 port_gid_table->gids[free].ctx = kmalloc(sizeof(*port_gid_table->gids[free].ctx), GFP_ATOMIC); in mlx4_ib_add_gid()
299 if (!port_gid_table->gids[free].ctx) { in mlx4_ib_add_gid()
300 ret = -ENOMEM; in mlx4_ib_add_gid()
302 *context = port_gid_table->gids[free].ctx; in mlx4_ib_add_gid()
303 port_gid_table->gids[free].gid = attr->gid; in mlx4_ib_add_gid()
304 port_gid_table->gids[free].gid_type = attr->gid_type; in mlx4_ib_add_gid()
305 port_gid_table->gids[free].vlan_id = vlan_id; in mlx4_ib_add_gid()
306 port_gid_table->gids[free].ctx->real_index = free; in mlx4_ib_add_gid()
307 port_gid_table->gids[free].ctx->refcount = 1; in mlx4_ib_add_gid()
312 struct gid_cache_context *ctx = port_gid_table->gids[found].ctx; in mlx4_ib_add_gid()
314 ctx->refcount++; in mlx4_ib_add_gid()
320 ret = -ENOMEM; in mlx4_ib_add_gid()
322 free_gid_entry(&port_gid_table->gids[free]); in mlx4_ib_add_gid()
325 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid)); in mlx4_ib_add_gid()
326 gids[i].gid_type = port_gid_table->gids[i].gid_type; in mlx4_ib_add_gid()
330 spin_unlock_bh(&iboe->lock); in mlx4_ib_add_gid()
333 ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num); in mlx4_ib_add_gid()
335 spin_lock_bh(&iboe->lock); in mlx4_ib_add_gid()
337 free_gid_entry(&port_gid_table->gids[free]); in mlx4_ib_add_gid()
338 spin_unlock_bh(&iboe->lock); in mlx4_ib_add_gid()
349 struct mlx4_ib_dev *ibdev = to_mdev(attr->device); in mlx4_ib_del_gid()
350 struct mlx4_ib_iboe *iboe = &ibdev->iboe; in mlx4_ib_del_gid()
356 if (!rdma_cap_roce_gid_table(attr->device, attr->port_num)) in mlx4_ib_del_gid()
357 return -EINVAL; in mlx4_ib_del_gid()
359 if (attr->port_num > MLX4_MAX_PORTS) in mlx4_ib_del_gid()
360 return -EINVAL; in mlx4_ib_del_gid()
362 port_gid_table = &iboe->gids[attr->port_num - 1]; in mlx4_ib_del_gid()
363 spin_lock_bh(&iboe->lock); in mlx4_ib_del_gid()
365 ctx->refcount--; in mlx4_ib_del_gid()
366 if (!ctx->refcount) { in mlx4_ib_del_gid()
367 unsigned int real_index = ctx->real_index; in mlx4_ib_del_gid()
369 free_gid_entry(&port_gid_table->gids[real_index]); in mlx4_ib_del_gid()
379 ret = -ENOMEM; in mlx4_ib_del_gid()
383 &port_gid_table->gids[i].gid, in mlx4_ib_del_gid()
386 port_gid_table->gids[i].gid_type; in mlx4_ib_del_gid()
390 spin_unlock_bh(&iboe->lock); in mlx4_ib_del_gid()
393 ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num); in mlx4_ib_del_gid()
402 struct mlx4_ib_iboe *iboe = &ibdev->iboe; in mlx4_ib_gid_index_to_real_index()
405 int real_index = -EINVAL; in mlx4_ib_gid_index_to_real_index()
408 u32 port_num = attr->port_num; in mlx4_ib_gid_index_to_real_index()
411 return -EINVAL; in mlx4_ib_gid_index_to_real_index()
413 if (mlx4_is_bonded(ibdev->dev)) in mlx4_ib_gid_index_to_real_index()
416 if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num)) in mlx4_ib_gid_index_to_real_index()
417 return attr->index; in mlx4_ib_gid_index_to_real_index()
419 spin_lock_irqsave(&iboe->lock, flags); in mlx4_ib_gid_index_to_real_index()
420 port_gid_table = &iboe->gids[port_num - 1]; in mlx4_ib_gid_index_to_real_index()
423 if (!memcmp(&port_gid_table->gids[i].gid, in mlx4_ib_gid_index_to_real_index()
424 &attr->gid, sizeof(attr->gid)) && in mlx4_ib_gid_index_to_real_index()
425 attr->gid_type == port_gid_table->gids[i].gid_type) { in mlx4_ib_gid_index_to_real_index()
426 ctx = port_gid_table->gids[i].ctx; in mlx4_ib_gid_index_to_real_index()
430 real_index = ctx->real_index; in mlx4_ib_gid_index_to_real_index()
431 spin_unlock_irqrestore(&iboe->lock, flags); in mlx4_ib_gid_index_to_real_index()
448 if (uhw->inlen) { in mlx4_ib_query_device()
449 if (uhw->inlen < sizeof(cmd)) in mlx4_ib_query_device()
450 return -EINVAL; in mlx4_ib_query_device()
457 return -EINVAL; in mlx4_ib_query_device()
460 return -EINVAL; in mlx4_ib_query_device()
467 err = -ENOMEM; in mlx4_ib_query_device()
472 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; in mlx4_ib_query_device()
481 have_ib_ports = num_ib_ports(dev->dev); in mlx4_ib_query_device()
483 props->fw_ver = dev->dev->caps.fw_ver; in mlx4_ib_query_device()
484 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | in mlx4_ib_query_device()
488 props->kernel_cap_flags = IBK_BLOCK_MULTICAST_LOOPBACK; in mlx4_ib_query_device()
489 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR) in mlx4_ib_query_device()
490 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; in mlx4_ib_query_device()
491 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR) in mlx4_ib_query_device()
492 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; in mlx4_ib_query_device()
493 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports) in mlx4_ib_query_device()
494 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; in mlx4_ib_query_device()
495 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT) in mlx4_ib_query_device()
496 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; in mlx4_ib_query_device()
497 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) in mlx4_ib_query_device()
498 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; in mlx4_ib_query_device()
499 if (dev->dev->caps.max_gso_sz && in mlx4_ib_query_device()
500 (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) && in mlx4_ib_query_device()
501 (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)) in mlx4_ib_query_device()
502 props->kernel_cap_flags |= IBK_UD_TSO; in mlx4_ib_query_device()
503 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY) in mlx4_ib_query_device()
504 props->kernel_cap_flags |= IBK_LOCAL_DMA_LKEY; in mlx4_ib_query_device()
505 if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) && in mlx4_ib_query_device()
506 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) && in mlx4_ib_query_device()
507 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR)) in mlx4_ib_query_device()
508 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; in mlx4_ib_query_device()
509 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) in mlx4_ib_query_device()
510 props->device_cap_flags |= IB_DEVICE_XRC; in mlx4_ib_query_device()
511 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW) in mlx4_ib_query_device()
512 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW; in mlx4_ib_query_device()
513 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) { in mlx4_ib_query_device()
514 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B) in mlx4_ib_query_device()
515 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B; in mlx4_ib_query_device()
517 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A; in mlx4_ib_query_device()
519 if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) in mlx4_ib_query_device()
520 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; in mlx4_ib_query_device()
522 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM; in mlx4_ib_query_device()
524 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & in mlx4_ib_query_device()
526 props->vendor_part_id = dev->dev->persist->pdev->device; in mlx4_ib_query_device()
527 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32)); in mlx4_ib_query_device()
528 memcpy(&props->sys_image_guid, out_mad->data + 4, 8); in mlx4_ib_query_device()
530 props->max_mr_size = ~0ull; in mlx4_ib_query_device()
531 props->page_size_cap = dev->dev->caps.page_size_cap; in mlx4_ib_query_device()
532 props->max_qp = dev->dev->quotas.qp; in mlx4_ib_query_device()
533 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE; in mlx4_ib_query_device()
534 props->max_send_sge = in mlx4_ib_query_device()
535 min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg); in mlx4_ib_query_device()
536 props->max_recv_sge = in mlx4_ib_query_device()
537 min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg); in mlx4_ib_query_device()
538 props->max_sge_rd = MLX4_MAX_SGE_RD; in mlx4_ib_query_device()
539 props->max_cq = dev->dev->quotas.cq; in mlx4_ib_query_device()
540 props->max_cqe = dev->dev->caps.max_cqes; in mlx4_ib_query_device()
541 props->max_mr = dev->dev->quotas.mpt; in mlx4_ib_query_device()
542 props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds; in mlx4_ib_query_device()
543 props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma; in mlx4_ib_query_device()
544 props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma; in mlx4_ib_query_device()
545 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; in mlx4_ib_query_device()
546 props->max_srq = dev->dev->quotas.srq; in mlx4_ib_query_device()
547 props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1; in mlx4_ib_query_device()
548 props->max_srq_sge = dev->dev->caps.max_srq_sge; in mlx4_ib_query_device()
549 props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES; in mlx4_ib_query_device()
550 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay; in mlx4_ib_query_device()
551 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ? in mlx4_ib_query_device()
553 props->masked_atomic_cap = props->atomic_cap; in mlx4_ib_query_device()
554 props->max_pkeys = dev->dev->caps.pkey_table_len[1]; in mlx4_ib_query_device()
555 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms; in mlx4_ib_query_device()
556 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm; in mlx4_ib_query_device()
557 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * in mlx4_ib_query_device()
558 props->max_mcast_grp; in mlx4_ib_query_device()
559 props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL; in mlx4_ib_query_device()
560 props->timestamp_mask = 0xFFFFFFFFFFFFULL; in mlx4_ib_query_device()
561 props->max_ah = INT_MAX; in mlx4_ib_query_device()
565 if (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) { in mlx4_ib_query_device()
566 props->rss_caps.max_rwq_indirection_tables = in mlx4_ib_query_device()
567 props->max_qp; in mlx4_ib_query_device()
568 props->rss_caps.max_rwq_indirection_table_size = in mlx4_ib_query_device()
569 dev->dev->caps.max_rss_tbl_sz; in mlx4_ib_query_device()
570 props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET; in mlx4_ib_query_device()
571 props->max_wq_type_rq = props->max_qp; in mlx4_ib_query_device()
574 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) in mlx4_ib_query_device()
575 props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS; in mlx4_ib_query_device()
578 props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT; in mlx4_ib_query_device()
579 props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD; in mlx4_ib_query_device()
581 if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) { in mlx4_ib_query_device()
583 if (!mlx4_get_internal_clock_params(dev->dev, &clock_params)) { in mlx4_ib_query_device()
589 if (uhw->outlen >= resp.response_length + in mlx4_ib_query_device()
592 resp.max_inl_recv_sz = dev->dev->caps.max_rq_sg * in mlx4_ib_query_device()
596 if (offsetofend(typeof(resp), rss_caps) <= uhw->outlen) { in mlx4_ib_query_device()
597 if (props->rss_caps.supported_qpts) { in mlx4_ib_query_device()
611 if (dev->dev->caps.tunnel_offload_mode == in mlx4_ib_query_device()
620 if (offsetofend(typeof(resp), tso_caps) <= uhw->outlen) { in mlx4_ib_query_device()
621 if (dev->dev->caps.max_gso_sz && in mlx4_ib_query_device()
626 resp.tso_caps.max_tso = dev->dev->caps.max_gso_sz; in mlx4_ib_query_device()
634 if (uhw->outlen) { in mlx4_ib_query_device()
649 struct mlx4_dev *dev = to_mdev(device)->dev; in mlx4_ib_port_link_layer()
651 return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ? in mlx4_ib_port_link_layer()
662 int err = -ENOMEM; in ib_link_query_port()
670 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; in ib_link_query_port()
671 in_mad->attr_mod = cpu_to_be32(port); in ib_link_query_port()
673 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view) in ib_link_query_port()
682 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); in ib_link_query_port()
683 props->lmc = out_mad->data[34] & 0x7; in ib_link_query_port()
684 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18)); in ib_link_query_port()
685 props->sm_sl = out_mad->data[36] & 0xf; in ib_link_query_port()
686 props->state = out_mad->data[32] & 0xf; in ib_link_query_port()
687 props->phys_state = out_mad->data[33] >> 4; in ib_link_query_port()
688 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20)); in ib_link_query_port()
690 props->gid_tbl_len = out_mad->data[50]; in ib_link_query_port()
692 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port]; in ib_link_query_port()
693 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz; in ib_link_query_port()
694 props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port]; in ib_link_query_port()
695 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46)); in ib_link_query_port()
696 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); in ib_link_query_port()
697 props->active_width = out_mad->data[31] & 0xf; in ib_link_query_port()
698 props->active_speed = out_mad->data[35] >> 4; in ib_link_query_port()
699 props->max_mtu = out_mad->data[41] & 0xf; in ib_link_query_port()
700 props->active_mtu = out_mad->data[36] >> 4; in ib_link_query_port()
701 props->subnet_timeout = out_mad->data[51] & 0x1f; in ib_link_query_port()
702 props->max_vl_num = out_mad->data[37] >> 4; in ib_link_query_port()
703 props->init_type_reply = out_mad->data[41] >> 4; in ib_link_query_port()
706 if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) { in ib_link_query_port()
707 ext_active_speed = out_mad->data[62] >> 4; in ib_link_query_port()
711 props->active_speed = IB_SPEED_FDR; in ib_link_query_port()
714 props->active_speed = IB_SPEED_EDR; in ib_link_query_port()
719 /* If reported active speed is QDR, check if is FDR-10 */ in ib_link_query_port()
720 if (props->active_speed == IB_SPEED_QDR) { in ib_link_query_port()
722 in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO; in ib_link_query_port()
723 in_mad->attr_mod = cpu_to_be32(port); in ib_link_query_port()
730 /* Checking LinkSpeedActive for FDR-10 */ in ib_link_query_port()
731 if (out_mad->data[15] & 0x1) in ib_link_query_port()
732 props->active_speed = IB_SPEED_FDR10; in ib_link_query_port()
736 if (props->state == IB_PORT_DOWN) in ib_link_query_port()
737 props->active_speed = IB_SPEED_SDR; in ib_link_query_port()
756 struct mlx4_ib_iboe *iboe = &mdev->iboe; in eth_link_query_port()
761 int is_bonded = mlx4_is_bonded(mdev->dev); in eth_link_query_port()
763 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); in eth_link_query_port()
767 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0, in eth_link_query_port()
773 props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) || in eth_link_query_port()
774 (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ? in eth_link_query_port()
776 props->active_speed = (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ? in eth_link_query_port()
778 props->port_cap_flags = IB_PORT_CM_SUP; in eth_link_query_port()
779 props->ip_gids = true; in eth_link_query_port()
780 props->gid_tbl_len = mdev->dev->caps.gid_table_len[port]; in eth_link_query_port()
781 props->max_msg_sz = mdev->dev->caps.max_msg_sz; in eth_link_query_port()
782 if (mdev->dev->caps.pkey_table_len[port]) in eth_link_query_port()
783 props->pkey_tbl_len = 1; in eth_link_query_port()
784 props->max_mtu = IB_MTU_4096; in eth_link_query_port()
785 props->max_vl_num = 2; in eth_link_query_port()
786 props->state = IB_PORT_DOWN; in eth_link_query_port()
787 props->phys_state = state_to_phys_state(props->state); in eth_link_query_port()
788 props->active_mtu = IB_MTU_256; in eth_link_query_port()
789 spin_lock_bh(&iboe->lock); in eth_link_query_port()
790 ndev = iboe->netdevs[port - 1]; in eth_link_query_port()
799 tmp = iboe_get_mtu(ndev->mtu); in eth_link_query_port()
800 props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256; in eth_link_query_port()
802 props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ? in eth_link_query_port()
804 props->phys_state = state_to_phys_state(props->state); in eth_link_query_port()
806 spin_unlock_bh(&iboe->lock); in eth_link_query_port()
808 mlx4_free_cmd_mailbox(mdev->dev, mailbox); in eth_link_query_port()
838 int err = -ENOMEM; in __mlx4_ib_query_gid()
849 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; in __mlx4_ib_query_gid()
850 in_mad->attr_mod = cpu_to_be32(port); in __mlx4_ib_query_gid()
852 if (mlx4_is_mfunc(dev->dev) && netw_view) in __mlx4_ib_query_gid()
859 memcpy(gid->raw, out_mad->data + 8, 8); in __mlx4_ib_query_gid()
861 if (mlx4_is_mfunc(dev->dev) && !netw_view) { in __mlx4_ib_query_gid()
871 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; in __mlx4_ib_query_gid()
872 in_mad->attr_mod = cpu_to_be32(index / 8); in __mlx4_ib_query_gid()
879 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); in __mlx4_ib_query_gid()
883 memset(gid->raw + 8, 0, 8); in __mlx4_ib_query_gid()
904 int err = -ENOMEM; in mlx4_ib_query_sl2vl()
907 if (mlx4_is_slave(to_mdev(ibdev)->dev)) { in mlx4_ib_query_sl2vl()
918 in_mad->attr_id = IB_SMP_ATTR_SL_TO_VL_TABLE; in mlx4_ib_query_sl2vl()
919 in_mad->attr_mod = 0; in mlx4_ib_query_sl2vl()
921 if (mlx4_is_mfunc(to_mdev(ibdev)->dev)) in mlx4_ib_query_sl2vl()
930 sl2vl64.sl8[jj] = ((struct ib_smp *)out_mad)->data[jj]; in mlx4_ib_query_sl2vl()
945 for (i = 1; i <= mdev->dev->caps.num_ports; i++) { in mlx4_init_sl2vl_tbl()
946 if (mdev->dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) in mlx4_init_sl2vl_tbl()
948 err = mlx4_ib_query_sl2vl(&mdev->ib_dev, i, &sl2vl); in mlx4_init_sl2vl_tbl()
954 atomic64_set(&mdev->sl2vl[i - 1], sl2vl); in mlx4_init_sl2vl_tbl()
964 int err = -ENOMEM; in __mlx4_ib_query_pkey()
972 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; in __mlx4_ib_query_pkey()
973 in_mad->attr_mod = cpu_to_be32(index / 32); in __mlx4_ib_query_pkey()
975 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view) in __mlx4_ib_query_pkey()
983 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]); in __mlx4_ib_query_pkey()
1004 return -EOPNOTSUPP; in mlx4_ib_modify_device()
1009 if (mlx4_is_slave(to_mdev(ibdev)->dev)) in mlx4_ib_modify_device()
1010 return -EOPNOTSUPP; in mlx4_ib_modify_device()
1012 spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags); in mlx4_ib_modify_device()
1013 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX); in mlx4_ib_modify_device()
1014 spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags); in mlx4_ib_modify_device()
1020 mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev); in mlx4_ib_modify_device()
1024 memcpy(mailbox->buf, props->node_desc, IB_DEVICE_NODE_DESC_MAX); in mlx4_ib_modify_device()
1025 mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0, in mlx4_ib_modify_device()
1028 mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox); in mlx4_ib_modify_device()
1039 mailbox = mlx4_alloc_cmd_mailbox(dev->dev); in mlx4_ib_SET_PORT()
1043 if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { in mlx4_ib_SET_PORT()
1044 *(u8 *) mailbox->buf = !!reset_qkey_viols << 6; in mlx4_ib_SET_PORT()
1045 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask); in mlx4_ib_SET_PORT()
1047 ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols; in mlx4_ib_SET_PORT()
1048 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask); in mlx4_ib_SET_PORT()
1051 err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE, in mlx4_ib_SET_PORT()
1055 mlx4_free_cmd_mailbox(dev->dev, mailbox); in mlx4_ib_SET_PORT()
1063 u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH; in mlx4_ib_modify_port()
1075 mutex_lock(&mdev->cap_mask_mutex); in mlx4_ib_modify_port()
1081 cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) & in mlx4_ib_modify_port()
1082 ~props->clr_port_cap_mask; in mlx4_ib_modify_port()
1089 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex); in mlx4_ib_modify_port()
1096 struct ib_device *ibdev = uctx->device; in mlx4_ib_alloc_ucontext()
1103 if (!dev->ib_active) in mlx4_ib_alloc_ucontext()
1104 return -EAGAIN; in mlx4_ib_alloc_ucontext()
1106 if (ibdev->ops.uverbs_abi_ver == in mlx4_ib_alloc_ucontext()
1108 resp_v3.qp_tab_size = dev->dev->caps.num_qps; in mlx4_ib_alloc_ucontext()
1109 resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size; in mlx4_ib_alloc_ucontext()
1110 resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page; in mlx4_ib_alloc_ucontext()
1112 resp.dev_caps = dev->dev->caps.userspace_caps; in mlx4_ib_alloc_ucontext()
1113 resp.qp_tab_size = dev->dev->caps.num_qps; in mlx4_ib_alloc_ucontext()
1114 resp.bf_reg_size = dev->dev->caps.bf_reg_size; in mlx4_ib_alloc_ucontext()
1115 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page; in mlx4_ib_alloc_ucontext()
1116 resp.cqe_size = dev->dev->caps.cqe_size; in mlx4_ib_alloc_ucontext()
1119 err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar); in mlx4_ib_alloc_ucontext()
1123 INIT_LIST_HEAD(&context->db_page_list); in mlx4_ib_alloc_ucontext()
1124 mutex_init(&context->db_page_mutex); in mlx4_ib_alloc_ucontext()
1126 INIT_LIST_HEAD(&context->wqn_ranges_list); in mlx4_ib_alloc_ucontext()
1127 mutex_init(&context->wqn_ranges_mutex); in mlx4_ib_alloc_ucontext()
1129 if (ibdev->ops.uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) in mlx4_ib_alloc_ucontext()
1135 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar); in mlx4_ib_alloc_ucontext()
1136 return -EFAULT; in mlx4_ib_alloc_ucontext()
1146 mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar); in mlx4_ib_dealloc_ucontext()
1155 struct mlx4_ib_dev *dev = to_mdev(context->device); in mlx4_ib_mmap()
1157 switch (vma->vm_pgoff) { in mlx4_ib_mmap()
1160 to_mucontext(context)->uar.pfn, in mlx4_ib_mmap()
1162 pgprot_noncached(vma->vm_page_prot), in mlx4_ib_mmap()
1166 if (dev->dev->caps.bf_reg_size == 0) in mlx4_ib_mmap()
1167 return -EINVAL; in mlx4_ib_mmap()
1170 to_mucontext(context)->uar.pfn + in mlx4_ib_mmap()
1171 dev->dev->caps.num_uars, in mlx4_ib_mmap()
1172 PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot), in mlx4_ib_mmap()
1179 ret = mlx4_get_internal_clock_params(dev->dev, ¶ms); in mlx4_ib_mmap()
1185 (pci_resource_start(dev->dev->persist->pdev, in mlx4_ib_mmap()
1189 PAGE_SIZE, pgprot_noncached(vma->vm_page_prot), in mlx4_ib_mmap()
1194 return -EINVAL; in mlx4_ib_mmap()
1201 struct ib_device *ibdev = ibpd->device; in mlx4_ib_alloc_pd()
1204 err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn); in mlx4_ib_alloc_pd()
1208 if (udata && ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) { in mlx4_ib_alloc_pd()
1209 mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn); in mlx4_ib_alloc_pd()
1210 return -EFAULT; in mlx4_ib_alloc_pd()
1217 mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn); in mlx4_ib_dealloc_pd()
1223 struct mlx4_ib_dev *dev = to_mdev(ibxrcd->device); in mlx4_ib_alloc_xrcd()
1228 if (!(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) in mlx4_ib_alloc_xrcd()
1229 return -EOPNOTSUPP; in mlx4_ib_alloc_xrcd()
1231 err = mlx4_xrcd_alloc(dev->dev, &xrcd->xrcdn); in mlx4_ib_alloc_xrcd()
1235 xrcd->pd = ib_alloc_pd(ibxrcd->device, 0); in mlx4_ib_alloc_xrcd()
1236 if (IS_ERR(xrcd->pd)) { in mlx4_ib_alloc_xrcd()
1237 err = PTR_ERR(xrcd->pd); in mlx4_ib_alloc_xrcd()
1242 xrcd->cq = ib_create_cq(ibxrcd->device, NULL, NULL, xrcd, &cq_attr); in mlx4_ib_alloc_xrcd()
1243 if (IS_ERR(xrcd->cq)) { in mlx4_ib_alloc_xrcd()
1244 err = PTR_ERR(xrcd->cq); in mlx4_ib_alloc_xrcd()
1251 ib_dealloc_pd(xrcd->pd); in mlx4_ib_alloc_xrcd()
1253 mlx4_xrcd_free(dev->dev, xrcd->xrcdn); in mlx4_ib_alloc_xrcd()
1259 ib_destroy_cq(to_mxrcd(xrcd)->cq); in mlx4_ib_dealloc_xrcd()
1260 ib_dealloc_pd(to_mxrcd(xrcd)->pd); in mlx4_ib_dealloc_xrcd()
1261 mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn); in mlx4_ib_dealloc_xrcd()
1268 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); in add_gid_entry()
1273 return -ENOMEM; in add_gid_entry()
1275 ge->gid = *gid; in add_gid_entry()
1277 ge->port = mqp->port; in add_gid_entry()
1278 ge->added = 1; in add_gid_entry()
1281 mutex_lock(&mqp->mutex); in add_gid_entry()
1282 list_add_tail(&ge->list, &mqp->gid_list); in add_gid_entry()
1283 mutex_unlock(&mqp->mutex); in add_gid_entry()
1293 mutex_lock(&ctr_table->mutex); in mlx4_ib_delete_counters_table()
1294 list_for_each_entry_safe(counter, tmp_count, &ctr_table->counters_list, in mlx4_ib_delete_counters_table()
1296 if (counter->allocated) in mlx4_ib_delete_counters_table()
1297 mlx4_counter_free(ibdev->dev, counter->index); in mlx4_ib_delete_counters_table()
1298 list_del(&counter->list); in mlx4_ib_delete_counters_table()
1301 mutex_unlock(&ctr_table->mutex); in mlx4_ib_delete_counters_table()
1310 if (!mqp->port) in mlx4_ib_add_mc()
1313 spin_lock_bh(&mdev->iboe.lock); in mlx4_ib_add_mc()
1314 ndev = mdev->iboe.netdevs[mqp->port - 1]; in mlx4_ib_add_mc()
1316 spin_unlock_bh(&mdev->iboe.lock); in mlx4_ib_add_mc()
1341 sizeof(filter) -\
1342 offsetof(typeof(filter), field) -\
1352 switch (ib_spec->type) { in parse_flow_attr()
1354 if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD)) in parse_flow_attr()
1355 return -ENOTSUPP; in parse_flow_attr()
1358 memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac, in parse_flow_attr()
1360 memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac, in parse_flow_attr()
1362 mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag; in parse_flow_attr()
1363 mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag; in parse_flow_attr()
1366 if (FIELDS_NOT_SUPPORTED(ib_spec->ib.mask, LAST_IB_FIELD)) in parse_flow_attr()
1367 return -ENOTSUPP; in parse_flow_attr()
1370 mlx4_spec->ib.l3_qpn = in parse_flow_attr()
1372 mlx4_spec->ib.qpn_mask = in parse_flow_attr()
1378 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD)) in parse_flow_attr()
1379 return -ENOTSUPP; in parse_flow_attr()
1382 mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip; in parse_flow_attr()
1383 mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip; in parse_flow_attr()
1384 mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip; in parse_flow_attr()
1385 mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip; in parse_flow_attr()
1390 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, LAST_TCP_UDP_FIELD)) in parse_flow_attr()
1391 return -ENOTSUPP; in parse_flow_attr()
1393 type = ib_spec->type == IB_FLOW_SPEC_TCP ? in parse_flow_attr()
1396 mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port; in parse_flow_attr()
1397 mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port; in parse_flow_attr()
1398 mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port; in parse_flow_attr()
1399 mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port; in parse_flow_attr()
1403 return -EINVAL; in parse_flow_attr()
1407 return -EINVAL; in parse_flow_attr()
1408 mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type)); in parse_flow_attr()
1409 mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2; in parse_flow_attr()
1434 u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port); in __mlx4_ib_default_rules_match()
1440 if (link_layer != pdefault_rules->link_layer) in __mlx4_ib_default_rules_match()
1446 j < flow_attr->num_of_specs; k++) { in __mlx4_ib_default_rules_match()
1451 if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) == in __mlx4_ib_default_rules_match()
1452 (pdefault_rules->mandatory_fields[k] & in __mlx4_ib_default_rules_match()
1454 (current_flow->type != in __mlx4_ib_default_rules_match()
1455 pdefault_rules->mandatory_fields[k])) in __mlx4_ib_default_rules_match()
1459 if (current_flow->type == in __mlx4_ib_default_rules_match()
1460 pdefault_rules->mandatory_fields[k]) { in __mlx4_ib_default_rules_match()
1463 ((union ib_flow_spec *)ib_flow)->size; in __mlx4_ib_default_rules_match()
1468 for (j = 0; j < flow_attr->num_of_specs; in __mlx4_ib_default_rules_match()
1469 j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size) in __mlx4_ib_default_rules_match()
1472 if (((union ib_flow_spec *)ib_flow)->type == in __mlx4_ib_default_rules_match()
1473 pdefault_rules->mandatory_not_fields[k]) in __mlx4_ib_default_rules_match()
1479 return -1; in __mlx4_ib_default_rules_match()
1490 for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) { in __mlx4_ib_create_default_rules()
1494 switch (pdefault_rules->rules_create_list[i]) { in __mlx4_ib_create_default_rules()
1505 return -EINVAL; in __mlx4_ib_create_default_rules()
1508 ret = parse_flow_attr(mdev->dev, 0, &ib_spec, in __mlx4_ib_create_default_rules()
1512 return -EINVAL; in __mlx4_ib_create_default_rules()
1529 struct mlx4_ib_dev *mdev = to_mdev(qp->device); in __mlx4_ib_create_flow()
1534 if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) { in __mlx4_ib_create_flow()
1535 pr_err("Invalid priority value %d\n", flow_attr->priority); in __mlx4_ib_create_flow()
1536 return -EINVAL; in __mlx4_ib_create_flow()
1539 if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0) in __mlx4_ib_create_flow()
1540 return -EINVAL; in __mlx4_ib_create_flow()
1542 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); in __mlx4_ib_create_flow()
1545 ctrl = mailbox->buf; in __mlx4_ib_create_flow()
1547 ctrl->prio = cpu_to_be16(domain | flow_attr->priority); in __mlx4_ib_create_flow()
1548 ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type); in __mlx4_ib_create_flow()
1549 ctrl->port = flow_attr->port; in __mlx4_ib_create_flow()
1550 ctrl->qpn = cpu_to_be32(qp->qp_num); in __mlx4_ib_create_flow()
1559 mailbox->buf + size); in __mlx4_ib_create_flow()
1561 mlx4_free_cmd_mailbox(mdev->dev, mailbox); in __mlx4_ib_create_flow()
1562 return -EINVAL; in __mlx4_ib_create_flow()
1566 for (i = 0; i < flow_attr->num_of_specs; i++) { in __mlx4_ib_create_flow()
1567 ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow, in __mlx4_ib_create_flow()
1568 mailbox->buf + size); in __mlx4_ib_create_flow()
1570 mlx4_free_cmd_mailbox(mdev->dev, mailbox); in __mlx4_ib_create_flow()
1571 return -EINVAL; in __mlx4_ib_create_flow()
1573 ib_flow += ((union ib_flow_spec *) ib_flow)->size; in __mlx4_ib_create_flow()
1577 if (mlx4_is_master(mdev->dev) && flow_type == MLX4_FS_REGULAR && in __mlx4_ib_create_flow()
1578 flow_attr->num_of_specs == 1) { in __mlx4_ib_create_flow()
1581 ((union ib_flow_spec *)(flow_attr + 1))->type; in __mlx4_ib_create_flow()
1587 ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0, in __mlx4_ib_create_flow()
1590 if (ret == -ENOMEM) in __mlx4_ib_create_flow()
1592 else if (ret == -ENXIO) in __mlx4_ib_create_flow()
1597 mlx4_free_cmd_mailbox(mdev->dev, mailbox); in __mlx4_ib_create_flow()
1618 struct mlx4_dev *dev = to_mdev(qp->device)->dev; in mlx4_ib_tunnel_steer_add()
1621 if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN || in mlx4_ib_tunnel_steer_add()
1622 dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) in mlx4_ib_tunnel_steer_add()
1628 if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1) in mlx4_ib_tunnel_steer_add()
1631 err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac, in mlx4_ib_tunnel_steer_add()
1632 flow_attr->port, qp->qp_num, in mlx4_ib_tunnel_steer_add()
1633 MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff), in mlx4_ib_tunnel_steer_add()
1644 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER) || in mlx4_ib_add_dont_trap_rule()
1645 (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) || in mlx4_ib_add_dont_trap_rule()
1646 (flow_attr->num_of_specs > 1) || (flow_attr->priority != 0)) { in mlx4_ib_add_dont_trap_rule()
1647 return -EOPNOTSUPP; in mlx4_ib_add_dont_trap_rule()
1650 if (flow_attr->num_of_specs == 0) { in mlx4_ib_add_dont_trap_rule()
1657 if (ib_spec->type != IB_FLOW_SPEC_ETH) in mlx4_ib_add_dont_trap_rule()
1658 return -EINVAL; in mlx4_ib_add_dont_trap_rule()
1661 if (is_zero_ether_addr(ib_spec->eth.mask.dst_mac)) { in mlx4_ib_add_dont_trap_rule()
1665 u8 mac[ETH_ALEN] = {ib_spec->eth.mask.dst_mac[0] ^ 0x01, in mlx4_ib_add_dont_trap_rule()
1666 ib_spec->eth.mask.dst_mac[1], in mlx4_ib_add_dont_trap_rule()
1667 ib_spec->eth.mask.dst_mac[2], in mlx4_ib_add_dont_trap_rule()
1668 ib_spec->eth.mask.dst_mac[3], in mlx4_ib_add_dont_trap_rule()
1669 ib_spec->eth.mask.dst_mac[4], in mlx4_ib_add_dont_trap_rule()
1670 ib_spec->eth.mask.dst_mac[5]}; in mlx4_ib_add_dont_trap_rule()
1676 return -EINVAL; in mlx4_ib_add_dont_trap_rule()
1678 if (is_multicast_ether_addr(ib_spec->eth.val.dst_mac)) in mlx4_ib_add_dont_trap_rule()
1695 struct mlx4_dev *dev = (to_mdev(qp->device))->dev; in mlx4_ib_create_flow()
1698 if (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP) in mlx4_ib_create_flow()
1699 return ERR_PTR(-EOPNOTSUPP); in mlx4_ib_create_flow()
1701 if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) && in mlx4_ib_create_flow()
1702 (flow_attr->type != IB_FLOW_ATTR_NORMAL)) in mlx4_ib_create_flow()
1703 return ERR_PTR(-EOPNOTSUPP); in mlx4_ib_create_flow()
1706 udata->inlen && !ib_is_udata_cleared(udata, 0, udata->inlen)) in mlx4_ib_create_flow()
1707 return ERR_PTR(-EOPNOTSUPP); in mlx4_ib_create_flow()
1713 err = -ENOMEM; in mlx4_ib_create_flow()
1717 switch (flow_attr->type) { in mlx4_ib_create_flow()
1723 if (unlikely(flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)) { in mlx4_ib_create_flow()
1748 err = -EINVAL; in mlx4_ib_create_flow()
1754 type[i], &mflow->reg_id[i].id); in mlx4_ib_create_flow()
1761 flow_attr->port = 2; in mlx4_ib_create_flow()
1764 &mflow->reg_id[j].mirror); in mlx4_ib_create_flow()
1765 flow_attr->port = 1; in mlx4_ib_create_flow()
1774 if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) { in mlx4_ib_create_flow()
1776 &mflow->reg_id[i].id); in mlx4_ib_create_flow()
1781 flow_attr->port = 2; in mlx4_ib_create_flow()
1783 &mflow->reg_id[j].mirror); in mlx4_ib_create_flow()
1784 flow_attr->port = 1; in mlx4_ib_create_flow()
1793 return &mflow->ibflow; in mlx4_ib_create_flow()
1797 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, in mlx4_ib_create_flow()
1798 mflow->reg_id[i].id); in mlx4_ib_create_flow()
1799 i--; in mlx4_ib_create_flow()
1803 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, in mlx4_ib_create_flow()
1804 mflow->reg_id[j].mirror); in mlx4_ib_create_flow()
1805 j--; in mlx4_ib_create_flow()
1816 struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device); in mlx4_ib_destroy_flow()
1819 while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) { in mlx4_ib_destroy_flow()
1820 err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id); in mlx4_ib_destroy_flow()
1823 if (mflow->reg_id[i].mirror) { in mlx4_ib_destroy_flow()
1824 err = __mlx4_ib_destroy_flow(mdev->dev, in mlx4_ib_destroy_flow()
1825 mflow->reg_id[i].mirror); in mlx4_ib_destroy_flow()
1839 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); in mlx4_ib_mcg_attach()
1840 struct mlx4_dev *dev = mdev->dev; in mlx4_ib_mcg_attach()
1846 if (mdev->dev->caps.steering_mode == in mlx4_ib_mcg_attach()
1850 return -ENOMEM; in mlx4_ib_mcg_attach()
1853 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port, in mlx4_ib_mcg_attach()
1854 !!(mqp->flags & in mlx4_ib_mcg_attach()
1864 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, in mlx4_ib_mcg_attach()
1865 (mqp->port == 1) ? 2 : 1, in mlx4_ib_mcg_attach()
1866 !!(mqp->flags & in mlx4_ib_mcg_attach()
1878 memcpy(ib_steering->gid.raw, gid->raw, 16); in mlx4_ib_mcg_attach()
1879 ib_steering->reg_id = reg_id; in mlx4_ib_mcg_attach()
1880 mutex_lock(&mqp->mutex); in mlx4_ib_mcg_attach()
1881 list_add(&ib_steering->list, &mqp->steering_rules); in mlx4_ib_mcg_attach()
1882 mutex_unlock(&mqp->mutex); in mlx4_ib_mcg_attach()
1887 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, in mlx4_ib_mcg_attach()
1890 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, in mlx4_ib_mcg_attach()
1904 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { in find_gid_entry()
1905 if (!memcmp(raw, ge->gid.raw, 16)) { in find_gid_entry()
1917 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); in mlx4_ib_mcg_detach()
1918 struct mlx4_dev *dev = mdev->dev; in mlx4_ib_mcg_detach()
1925 if (mdev->dev->caps.steering_mode == in mlx4_ib_mcg_detach()
1929 mutex_lock(&mqp->mutex); in mlx4_ib_mcg_detach()
1930 list_for_each_entry(ib_steering, &mqp->steering_rules, list) { in mlx4_ib_mcg_detach()
1931 if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) { in mlx4_ib_mcg_detach()
1932 list_del(&ib_steering->list); in mlx4_ib_mcg_detach()
1936 mutex_unlock(&mqp->mutex); in mlx4_ib_mcg_detach()
1937 if (&ib_steering->list == &mqp->steering_rules) { in mlx4_ib_mcg_detach()
1939 return -EINVAL; in mlx4_ib_mcg_detach()
1941 reg_id = ib_steering->reg_id; in mlx4_ib_mcg_detach()
1945 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, in mlx4_ib_mcg_detach()
1951 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, in mlx4_ib_mcg_detach()
1957 mutex_lock(&mqp->mutex); in mlx4_ib_mcg_detach()
1958 ge = find_gid_entry(mqp, gid->raw); in mlx4_ib_mcg_detach()
1960 spin_lock_bh(&mdev->iboe.lock); in mlx4_ib_mcg_detach()
1961 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL; in mlx4_ib_mcg_detach()
1963 spin_unlock_bh(&mdev->iboe.lock); in mlx4_ib_mcg_detach()
1965 list_del(&ge->list); in mlx4_ib_mcg_detach()
1970 mutex_unlock(&mqp->mutex); in mlx4_ib_mcg_detach()
1980 int err = -ENOMEM; in init_node_data()
1988 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; in init_node_data()
1989 if (mlx4_is_master(dev->dev)) in init_node_data()
1996 memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX); in init_node_data()
1998 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; in init_node_data()
2004 dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32)); in init_node_data()
2005 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); in init_node_data()
2019 return sysfs_emit(buf, "MT%d\n", dev->dev->persist->pdev->device); in hca_type_show()
2029 return sysfs_emit(buf, "%x\n", dev->dev->rev_id); in hw_rev_show()
2039 return sysfs_emit(buf, "%.*s\n", MLX4_BOARD_ID_LEN, dev->dev->board_id); in board_id_show()
2100 struct mlx4_ib_diag_counters *diag = dev->diag_counters; in mlx4_ib_alloc_hw_device_stats() local
2102 if (!diag[0].descs) in mlx4_ib_alloc_hw_device_stats()
2105 return rdma_alloc_hw_stats_struct(diag[0].descs, diag[0].num_counters, in mlx4_ib_alloc_hw_device_stats()
2113 struct mlx4_ib_diag_counters *diag = dev->diag_counters; in mlx4_ib_alloc_hw_port_stats() local
2115 if (!diag[1].descs) in mlx4_ib_alloc_hw_port_stats()
2118 return rdma_alloc_hw_stats_struct(diag[1].descs, diag[1].num_counters, in mlx4_ib_alloc_hw_port_stats()
2127 struct mlx4_ib_diag_counters *diag = dev->diag_counters; in mlx4_ib_get_hw_stats() local
2133 ret = mlx4_query_diag_counters(dev->dev, in mlx4_ib_get_hw_stats()
2135 diag[!!port].offset, hw_value, in mlx4_ib_get_hw_stats()
2136 diag[!!port].num_counters, port); in mlx4_ib_get_hw_stats()
2141 for (i = 0; i < diag[!!port].num_counters; i++) in mlx4_ib_get_hw_stats()
2142 stats->value[i] = hw_value[i]; in mlx4_ib_get_hw_stats()
2144 return diag[!!port].num_counters; in mlx4_ib_get_hw_stats()
2155 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) in __mlx4_ib_alloc_diag_counters()
2164 return -ENOMEM; in __mlx4_ib_alloc_diag_counters()
2176 return -ENOMEM; in __mlx4_ib_alloc_diag_counters()
2191 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) { in mlx4_ib_fill_diag_counters()
2219 struct mlx4_ib_diag_counters *diag = ibdev->diag_counters; in mlx4_ib_alloc_diag_counters() local
2222 bool per_port = !!(ibdev->dev->caps.flags2 & in mlx4_ib_alloc_diag_counters()
2225 if (mlx4_is_slave(ibdev->dev)) in mlx4_ib_alloc_diag_counters()
2234 ib_set_device_ops(&ibdev->ib_dev, in mlx4_ib_alloc_diag_counters()
2240 ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].descs, in mlx4_ib_alloc_diag_counters()
2241 &diag[i].offset, in mlx4_ib_alloc_diag_counters()
2242 &diag[i].num_counters, i); in mlx4_ib_alloc_diag_counters()
2246 mlx4_ib_fill_diag_counters(ibdev, diag[i].descs, in mlx4_ib_alloc_diag_counters()
2247 diag[i].offset, i); in mlx4_ib_alloc_diag_counters()
2250 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_hw_stats_ops); in mlx4_ib_alloc_diag_counters()
2256 kfree(diag[i - 1].descs); in mlx4_ib_alloc_diag_counters()
2257 kfree(diag[i - 1].offset); in mlx4_ib_alloc_diag_counters()
2268 kfree(ibdev->diag_counters[i].offset); in mlx4_ib_diag_cleanup()
2269 kfree(ibdev->diag_counters[i].descs); in mlx4_ib_diag_cleanup()
2273 #define MLX4_IB_INVALID_MAC ((u64)-1)
2282 new_smac = ether_addr_to_u64(dev->dev_addr); in mlx4_ib_update_qps()
2283 atomic64_set(&ibdev->iboe.mac[port - 1], new_smac); in mlx4_ib_update_qps()
2285 /* no need for update QP1 and mac registration in non-SRIOV */ in mlx4_ib_update_qps()
2286 if (!mlx4_is_mfunc(ibdev->dev)) in mlx4_ib_update_qps()
2289 mutex_lock(&ibdev->qp1_proxy_lock[port - 1]); in mlx4_ib_update_qps()
2290 qp = ibdev->qp1_proxy[port - 1]; in mlx4_ib_update_qps()
2296 mutex_lock(&qp->mutex); in mlx4_ib_update_qps()
2297 old_smac = qp->pri.smac; in mlx4_ib_update_qps()
2301 new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac); in mlx4_ib_update_qps()
2307 if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC, in mlx4_ib_update_qps()
2313 if (qp->pri.smac_port) in mlx4_ib_update_qps()
2315 qp->pri.smac = new_smac; in mlx4_ib_update_qps()
2316 qp->pri.smac_port = port; in mlx4_ib_update_qps()
2317 qp->pri.smac_index = new_smac_index; in mlx4_ib_update_qps()
2322 mlx4_unregister_mac(ibdev->dev, port, release_mac); in mlx4_ib_update_qps()
2324 mutex_unlock(&qp->mutex); in mlx4_ib_update_qps()
2325 mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]); in mlx4_ib_update_qps()
2333 struct mlx4_ib_iboe *iboe = &ibdev->iboe; in mlx4_ib_scan_netdev()
2337 if (dev->dev.parent != ibdev->ib_dev.dev.parent) in mlx4_ib_scan_netdev()
2340 spin_lock_bh(&iboe->lock); in mlx4_ib_scan_netdev()
2342 iboe->netdevs[dev->dev_port] = event != NETDEV_UNREGISTER ? dev : NULL; in mlx4_ib_scan_netdev()
2348 if (ib_get_cached_port_state(&ibdev->ib_dev, dev->dev_port + 1, in mlx4_ib_scan_netdev()
2354 iboe->last_port_state[dev->dev_port] != IB_PORT_DOWN)) in mlx4_ib_scan_netdev()
2358 iboe->last_port_state[dev->dev_port] != IB_PORT_ACTIVE)) in mlx4_ib_scan_netdev()
2360 iboe->last_port_state[dev->dev_port] = port_state; in mlx4_ib_scan_netdev()
2362 ibev.device = &ibdev->ib_dev; in mlx4_ib_scan_netdev()
2363 ibev.element.port_num = dev->dev_port + 1; in mlx4_ib_scan_netdev()
2370 spin_unlock_bh(&iboe->lock); in mlx4_ib_scan_netdev()
2374 mlx4_ib_update_qps(ibdev, dev, dev->dev_port + 1); in mlx4_ib_scan_netdev()
2398 if (mlx4_is_master(ibdev->dev)) { in init_pkeys()
2399 for (slave = 0; slave <= ibdev->dev->persist->num_vfs; in init_pkeys()
2401 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) { in init_pkeys()
2403 i < ibdev->dev->phys_caps.pkey_phys_table_len[port]; in init_pkeys()
2405 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] = in init_pkeys()
2407 (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i : in init_pkeys()
2408 ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1; in init_pkeys()
2409 mlx4_sync_pkey_table(ibdev->dev, slave, port, i, in init_pkeys()
2410 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]); in init_pkeys()
2415 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) { in init_pkeys()
2417 i < ibdev->dev->phys_caps.pkey_phys_table_len[port]; in init_pkeys()
2419 ibdev->pkeys.phys_pkey_cache[port-1][i] = in init_pkeys()
2429 ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors, in mlx4_ib_alloc_eqs()
2430 sizeof(ibdev->eq_table[0]), GFP_KERNEL); in mlx4_ib_alloc_eqs()
2431 if (!ibdev->eq_table) in mlx4_ib_alloc_eqs()
2434 for (i = 1; i <= dev->caps.num_ports; i++) { in mlx4_ib_alloc_eqs()
2439 ibdev->eq_table[eq] = total_eqs; in mlx4_ib_alloc_eqs()
2441 &ibdev->eq_table[eq])) in mlx4_ib_alloc_eqs()
2444 ibdev->eq_table[eq] = -1; in mlx4_ib_alloc_eqs()
2448 for (i = eq; i < dev->caps.num_comp_vectors; in mlx4_ib_alloc_eqs()
2449 ibdev->eq_table[i++] = -1) in mlx4_ib_alloc_eqs()
2453 ibdev->ib_dev.num_comp_vectors = eq; in mlx4_ib_alloc_eqs()
2459 int total_eqs = ibdev->ib_dev.num_comp_vectors; in mlx4_ib_free_eqs()
2462 if (!ibdev->eq_table) in mlx4_ib_free_eqs()
2466 ibdev->ib_dev.num_comp_vectors = 0; in mlx4_ib_free_eqs()
2469 mlx4_release_eq(dev, ibdev->eq_table[i]); in mlx4_ib_free_eqs()
2471 kfree(ibdev->eq_table); in mlx4_ib_free_eqs()
2472 ibdev->eq_table = NULL; in mlx4_ib_free_eqs()
2483 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB; in mlx4_port_immutable()
2484 immutable->max_mad_size = IB_MGMT_MAD_SIZE; in mlx4_port_immutable()
2486 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) in mlx4_port_immutable()
2487 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE; in mlx4_port_immutable()
2488 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) in mlx4_port_immutable()
2489 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE | in mlx4_port_immutable()
2491 immutable->core_cap_flags |= RDMA_CORE_PORT_RAW_PACKET; in mlx4_port_immutable()
2492 if (immutable->core_cap_flags & (RDMA_CORE_PORT_IBA_ROCE | in mlx4_port_immutable()
2494 immutable->max_mad_size = IB_MGMT_MAD_SIZE; in mlx4_port_immutable()
2501 immutable->pkey_tbl_len = attr.pkey_tbl_len; in mlx4_port_immutable()
2502 immutable->gid_tbl_len = attr.gid_tbl_len; in mlx4_port_immutable()
2512 (int) (dev->dev->caps.fw_ver >> 32), in get_fw_ver_str()
2513 (int) (dev->dev->caps.fw_ver >> 16) & 0xffff, in get_fw_ver_str()
2514 (int) dev->dev->caps.fw_ver & 0xffff); in get_fw_ver_str()
2615 struct mlx4_dev *dev = madev->mdev; in mlx4_ib_probe()
2635 return -ENODEV; in mlx4_ib_probe()
2639 dev_err(&dev->persist->pdev->dev, in mlx4_ib_probe()
2641 return -ENOMEM; in mlx4_ib_probe()
2644 iboe = &ibdev->iboe; in mlx4_ib_probe()
2646 err = mlx4_pd_alloc(dev, &ibdev->priv_pdn); in mlx4_ib_probe()
2650 err = mlx4_uar_alloc(dev, &ibdev->priv_uar); in mlx4_ib_probe()
2654 ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT, in mlx4_ib_probe()
2656 if (!ibdev->uar_map) { in mlx4_ib_probe()
2657 err = -ENOMEM; in mlx4_ib_probe()
2660 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); in mlx4_ib_probe()
2662 ibdev->dev = dev; in mlx4_ib_probe()
2663 ibdev->bond_next_port = 0; in mlx4_ib_probe()
2665 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; in mlx4_ib_probe()
2666 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey; in mlx4_ib_probe()
2667 ibdev->num_ports = num_ports; in mlx4_ib_probe()
2668 ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ? in mlx4_ib_probe()
2669 1 : ibdev->num_ports; in mlx4_ib_probe()
2670 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; in mlx4_ib_probe()
2671 ibdev->ib_dev.dev.parent = &dev->persist->pdev->dev; in mlx4_ib_probe()
2673 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_ops); in mlx4_ib_probe()
2675 if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) && in mlx4_ib_probe()
2676 ((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) == in mlx4_ib_probe()
2678 (mlx4_ib_port_link_layer(&ibdev->ib_dev, 2) == in mlx4_ib_probe()
2680 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_wq_ops); in mlx4_ib_probe()
2682 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || in mlx4_ib_probe()
2683 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) in mlx4_ib_probe()
2684 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_mw_ops); in mlx4_ib_probe()
2686 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) { in mlx4_ib_probe()
2687 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_xrc_ops); in mlx4_ib_probe()
2691 ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED; in mlx4_ib_probe()
2692 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_fs_ops); in mlx4_ib_probe()
2695 if (!dev->caps.userspace_caps) in mlx4_ib_probe()
2696 ibdev->ib_dev.ops.uverbs_abi_ver = in mlx4_ib_probe()
2701 spin_lock_init(&iboe->lock); in mlx4_ib_probe()
2708 for (i = 0; i < ibdev->num_ports; ++i) { in mlx4_ib_probe()
2709 mutex_init(&ibdev->counters_table[i].mutex); in mlx4_ib_probe()
2710 INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list); in mlx4_ib_probe()
2711 iboe->last_port_state[i] = IB_PORT_DOWN; in mlx4_ib_probe()
2714 num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports; in mlx4_ib_probe()
2716 mutex_init(&ibdev->qp1_proxy_lock[i]); in mlx4_ib_probe()
2718 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) == in mlx4_ib_probe()
2720 err = mlx4_counter_alloc(ibdev->dev, &counter_index, in mlx4_ib_probe()
2736 err = -ENOMEM; in mlx4_ib_probe()
2738 mlx4_counter_free(ibdev->dev, counter_index); in mlx4_ib_probe()
2741 new_counter_index->index = counter_index; in mlx4_ib_probe()
2742 new_counter_index->allocated = allocated; in mlx4_ib_probe()
2743 list_add_tail(&new_counter_index->list, in mlx4_ib_probe()
2744 &ibdev->counters_table[i].counters_list); in mlx4_ib_probe()
2745 ibdev->counters_table[i].default_counter = counter_index; in mlx4_ib_probe()
2750 for (i = 1; i < ibdev->num_ports ; ++i) { in mlx4_ib_probe()
2755 err = -ENOMEM; in mlx4_ib_probe()
2758 new_counter_index->index = counter_index; in mlx4_ib_probe()
2759 new_counter_index->allocated = 0; in mlx4_ib_probe()
2760 list_add_tail(&new_counter_index->list, in mlx4_ib_probe()
2761 &ibdev->counters_table[i].counters_list); in mlx4_ib_probe()
2762 ibdev->counters_table[i].default_counter = in mlx4_ib_probe()
2769 spin_lock_init(&ibdev->sm_lock); in mlx4_ib_probe()
2770 mutex_init(&ibdev->cap_mask_mutex); in mlx4_ib_probe()
2771 INIT_LIST_HEAD(&ibdev->qp_list); in mlx4_ib_probe()
2772 spin_lock_init(&ibdev->reset_flow_resource_lock); in mlx4_ib_probe()
2774 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED && in mlx4_ib_probe()
2776 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS; in mlx4_ib_probe()
2777 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count, in mlx4_ib_probe()
2779 &ibdev->steer_qpn_base, 0, in mlx4_ib_probe()
2784 ibdev->ib_uc_qpns_bitmap = bitmap_alloc(ibdev->steer_qpn_count, in mlx4_ib_probe()
2786 if (!ibdev->ib_uc_qpns_bitmap) { in mlx4_ib_probe()
2787 err = -ENOMEM; in mlx4_ib_probe()
2791 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) { in mlx4_ib_probe()
2792 bitmap_zero(ibdev->ib_uc_qpns_bitmap, in mlx4_ib_probe()
2793 ibdev->steer_qpn_count); in mlx4_ib_probe()
2795 dev, ibdev->steer_qpn_base, in mlx4_ib_probe()
2796 ibdev->steer_qpn_base + in mlx4_ib_probe()
2797 ibdev->steer_qpn_count - 1); in mlx4_ib_probe()
2801 bitmap_fill(ibdev->ib_uc_qpns_bitmap, in mlx4_ib_probe()
2802 ibdev->steer_qpn_count); in mlx4_ib_probe()
2806 for (j = 1; j <= ibdev->dev->caps.num_ports; j++) in mlx4_ib_probe()
2807 atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]); in mlx4_ib_probe()
2813 err = ib_register_device(&ibdev->ib_dev, "mlx4_%d", in mlx4_ib_probe()
2814 &dev->persist->pdev->dev); in mlx4_ib_probe()
2826 if (!iboe->nb.notifier_call) { in mlx4_ib_probe()
2827 iboe->nb.notifier_call = mlx4_ib_netdev_event; in mlx4_ib_probe()
2828 err = register_netdevice_notifier(&iboe->nb); in mlx4_ib_probe()
2830 iboe->nb.notifier_call = NULL; in mlx4_ib_probe()
2834 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) { in mlx4_ib_probe()
2840 ibdev->ib_active = true; in mlx4_ib_probe()
2843 &ibdev->ib_dev); in mlx4_ib_probe()
2845 if (mlx4_is_mfunc(ibdev->dev)) in mlx4_ib_probe()
2849 if (mlx4_is_master(ibdev->dev)) { in mlx4_ib_probe()
2851 if (j == mlx4_master_func_num(ibdev->dev)) in mlx4_ib_probe()
2853 if (mlx4_is_slave_active(ibdev->dev, j)) in mlx4_ib_probe()
2859 ibdev->mlx_nb.notifier_call = mlx4_ib_event; in mlx4_ib_probe()
2860 err = mlx4_register_event_notifier(dev, &ibdev->mlx_nb); in mlx4_ib_probe()
2867 if (ibdev->iboe.nb.notifier_call) { in mlx4_ib_probe()
2868 if (unregister_netdevice_notifier(&ibdev->iboe.nb)) in mlx4_ib_probe()
2870 ibdev->iboe.nb.notifier_call = NULL; in mlx4_ib_probe()
2880 ib_unregister_device(&ibdev->ib_dev); in mlx4_ib_probe()
2886 bitmap_free(ibdev->ib_uc_qpns_bitmap); in mlx4_ib_probe()
2889 mlx4_qp_release_range(dev, ibdev->steer_qpn_base, in mlx4_ib_probe()
2890 ibdev->steer_qpn_count); in mlx4_ib_probe()
2892 for (i = 0; i < ibdev->num_ports; ++i) in mlx4_ib_probe()
2893 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]); in mlx4_ib_probe()
2897 iounmap(ibdev->uar_map); in mlx4_ib_probe()
2900 mlx4_uar_free(dev, &ibdev->priv_uar); in mlx4_ib_probe()
2903 mlx4_pd_free(dev, ibdev->priv_pdn); in mlx4_ib_probe()
2906 ib_dealloc_device(&ibdev->ib_dev); in mlx4_ib_probe()
2915 WARN_ON(!dev->ib_uc_qpns_bitmap); in mlx4_ib_steer_qp_alloc()
2917 offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap, in mlx4_ib_steer_qp_alloc()
2918 dev->steer_qpn_count, in mlx4_ib_steer_qp_alloc()
2923 *qpn = dev->steer_qpn_base + offset; in mlx4_ib_steer_qp_alloc()
2930 dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED) in mlx4_ib_steer_qp_free()
2933 if (WARN(qpn < dev->steer_qpn_base, "qpn = %u, steer_qpn_base = %u\n", in mlx4_ib_steer_qp_free()
2934 qpn, dev->steer_qpn_base)) in mlx4_ib_steer_qp_free()
2938 bitmap_release_region(dev->ib_uc_qpns_bitmap, in mlx4_ib_steer_qp_free()
2939 qpn - dev->steer_qpn_base, in mlx4_ib_steer_qp_free()
2956 return -ENOMEM; in mlx4_ib_steer_qp_reg()
2957 flow->port = mqp->port; in mlx4_ib_steer_qp_reg()
2958 flow->num_of_specs = 1; in mlx4_ib_steer_qp_reg()
2959 flow->size = flow_size; in mlx4_ib_steer_qp_reg()
2961 ib_spec->type = IB_FLOW_SPEC_IB; in mlx4_ib_steer_qp_reg()
2962 ib_spec->size = sizeof(struct ib_flow_spec_ib); in mlx4_ib_steer_qp_reg()
2964 memset(&ib_spec->mask, 0, sizeof(ib_spec->mask)); in mlx4_ib_steer_qp_reg()
2966 err = __mlx4_ib_create_flow(&mqp->ibqp, flow, MLX4_DOMAIN_NIC, in mlx4_ib_steer_qp_reg()
2967 MLX4_FS_REGULAR, &mqp->reg_id); in mlx4_ib_steer_qp_reg()
2972 return __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id); in mlx4_ib_steer_qp_reg()
2978 struct mlx4_dev *dev = madev->mdev; in mlx4_ib_remove()
2983 mlx4_unregister_event_notifier(dev, &ibdev->mlx_nb); in mlx4_ib_remove()
2987 ibdev->ib_active = false; in mlx4_ib_remove()
2990 if (ibdev->iboe.nb.notifier_call) { in mlx4_ib_remove()
2991 if (unregister_netdevice_notifier(&ibdev->iboe.nb)) in mlx4_ib_remove()
2993 ibdev->iboe.nb.notifier_call = NULL; in mlx4_ib_remove()
2998 ib_unregister_device(&ibdev->ib_dev); in mlx4_ib_remove()
3001 mlx4_qp_release_range(dev, ibdev->steer_qpn_base, in mlx4_ib_remove()
3002 ibdev->steer_qpn_count); in mlx4_ib_remove()
3003 bitmap_free(ibdev->ib_uc_qpns_bitmap); in mlx4_ib_remove()
3005 iounmap(ibdev->uar_map); in mlx4_ib_remove()
3006 for (p = 0; p < ibdev->num_ports; ++p) in mlx4_ib_remove()
3007 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]); in mlx4_ib_remove()
3014 mlx4_uar_free(dev, &ibdev->priv_uar); in mlx4_ib_remove()
3015 mlx4_pd_free(dev, ibdev->priv_pdn); in mlx4_ib_remove()
3016 ib_dealloc_device(&ibdev->ib_dev); in mlx4_ib_remove()
3022 struct mlx4_dev *dev = ibdev->dev; in do_slave_init()
3033 ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports); in do_slave_init()
3034 first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports); in do_slave_init()
3043 while (--i >= 0) in do_slave_init()
3047 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work); in do_slave_init()
3048 dm[i]->port = first_port + i + 1; in do_slave_init()
3049 dm[i]->slave = slave; in do_slave_init()
3050 dm[i]->do_init = do_init; in do_slave_init()
3051 dm[i]->dev = ibdev; in do_slave_init()
3054 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags); in do_slave_init()
3055 if (!ibdev->sriov.is_going_down) { in do_slave_init()
3057 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work); in do_slave_init()
3058 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); in do_slave_init()
3060 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); in do_slave_init()
3083 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags); in mlx4_ib_handle_catas_error()
3085 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) { in mlx4_ib_handle_catas_error()
3086 spin_lock_irqsave(&mqp->sq.lock, flags_qp); in mlx4_ib_handle_catas_error()
3087 if (mqp->sq.tail != mqp->sq.head) { in mlx4_ib_handle_catas_error()
3088 send_mcq = to_mcq(mqp->ibqp.send_cq); in mlx4_ib_handle_catas_error()
3089 spin_lock_irqsave(&send_mcq->lock, flags_cq); in mlx4_ib_handle_catas_error()
3090 if (send_mcq->mcq.comp && in mlx4_ib_handle_catas_error()
3091 mqp->ibqp.send_cq->comp_handler) { in mlx4_ib_handle_catas_error()
3092 if (!send_mcq->mcq.reset_notify_added) { in mlx4_ib_handle_catas_error()
3093 send_mcq->mcq.reset_notify_added = 1; in mlx4_ib_handle_catas_error()
3094 list_add_tail(&send_mcq->mcq.reset_notify, in mlx4_ib_handle_catas_error()
3098 spin_unlock_irqrestore(&send_mcq->lock, flags_cq); in mlx4_ib_handle_catas_error()
3100 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp); in mlx4_ib_handle_catas_error()
3102 spin_lock_irqsave(&mqp->rq.lock, flags_qp); in mlx4_ib_handle_catas_error()
3104 if (!mqp->ibqp.srq) { in mlx4_ib_handle_catas_error()
3105 if (mqp->rq.tail != mqp->rq.head) { in mlx4_ib_handle_catas_error()
3106 recv_mcq = to_mcq(mqp->ibqp.recv_cq); in mlx4_ib_handle_catas_error()
3107 spin_lock_irqsave(&recv_mcq->lock, flags_cq); in mlx4_ib_handle_catas_error()
3108 if (recv_mcq->mcq.comp && in mlx4_ib_handle_catas_error()
3109 mqp->ibqp.recv_cq->comp_handler) { in mlx4_ib_handle_catas_error()
3110 if (!recv_mcq->mcq.reset_notify_added) { in mlx4_ib_handle_catas_error()
3111 recv_mcq->mcq.reset_notify_added = 1; in mlx4_ib_handle_catas_error()
3112 list_add_tail(&recv_mcq->mcq.reset_notify, in mlx4_ib_handle_catas_error()
3116 spin_unlock_irqrestore(&recv_mcq->lock, in mlx4_ib_handle_catas_error()
3120 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp); in mlx4_ib_handle_catas_error()
3124 mcq->comp(mcq); in mlx4_ib_handle_catas_error()
3126 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags); in mlx4_ib_handle_catas_error()
3134 struct mlx4_ib_dev *ibdev = ew->ib_dev; in handle_bonded_port_state_event()
3140 spin_lock_bh(&ibdev->iboe.lock); in handle_bonded_port_state_event()
3142 struct net_device *curr_netdev = ibdev->iboe.netdevs[i]; in handle_bonded_port_state_event()
3156 spin_unlock_bh(&ibdev->iboe.lock); in handle_bonded_port_state_event()
3158 ibev.device = &ibdev->ib_dev; in handle_bonded_port_state_event()
3171 err = mlx4_ib_query_sl2vl(&mdev->ib_dev, port, &sl2vl); in mlx4_ib_sl2vl_update()
3177 atomic64_set(&mdev->sl2vl[port - 1], sl2vl); in mlx4_ib_sl2vl_update()
3183 struct mlx4_ib_dev *mdev = ew->ib_dev; in ib_sl2vl_update_work()
3184 int port = ew->port; in ib_sl2vl_update_work()
3198 INIT_WORK(&ew->work, ib_sl2vl_update_work); in mlx4_sched_ib_sl2vl_update_work()
3199 ew->port = port; in mlx4_sched_ib_sl2vl_update_work()
3200 ew->ib_dev = ibdev; in mlx4_sched_ib_sl2vl_update_work()
3201 queue_work(wq, &ew->work); in mlx4_sched_ib_sl2vl_update_work()
3210 struct mlx4_dev *dev = ibdev->dev; in mlx4_ib_event()
3222 INIT_WORK(&ew->work, handle_bonded_port_state_event); in mlx4_ib_event()
3223 ew->ib_dev = ibdev; in mlx4_ib_event()
3224 queue_work(wq, &ew->work); in mlx4_ib_event()
3241 if (p > ibdev->num_ports) in mlx4_ib_event()
3244 rdma_port_get_link_layer(&ibdev->ib_dev, p) == in mlx4_ib_event()
3248 if (ibdev->dev->flags & MLX4_FLAG_SECURE_HOST && in mlx4_ib_event()
3249 !(ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT)) in mlx4_ib_event()
3256 if (p > ibdev->num_ports) in mlx4_ib_event()
3262 ibdev->ib_active = false; in mlx4_ib_event()
3272 INIT_WORK(&ew->work, handle_port_mgmt_change_event); in mlx4_ib_event()
3273 memcpy(&ew->ib_eqe, eqe, sizeof *eqe); in mlx4_ib_event()
3274 ew->ib_dev = ibdev; in mlx4_ib_event()
3277 queue_work(wq, &ew->work); in mlx4_ib_event()
3279 handle_port_mgmt_change_event(&ew->work); in mlx4_ib_event()
3288 for (i = 1; i <= ibdev->num_ports; i++) { in mlx4_ib_event()
3289 if (rdma_port_get_link_layer(&ibdev->ib_dev, i) in mlx4_ib_event()
3302 for (i = 1; i <= ibdev->num_ports; i++) { in mlx4_ib_event()
3303 if (rdma_port_get_link_layer(&ibdev->ib_dev, i) in mlx4_ib_event()
3318 ibev.device = &ibdev->ib_dev; in mlx4_ib_event()
3319 ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p; in mlx4_ib_event()
3349 return -ENOMEM; in mlx4_ib_init()