| /linux/drivers/target/ |
| H A D | target_core_iblock.c | 60 struct iblock_dev *ib_dev = NULL; in iblock_alloc_device() local 62 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL); in iblock_alloc_device() 63 if (!ib_dev) { in iblock_alloc_device() 67 ib_dev->ibd_exclusive = true; in iblock_alloc_device() 69 ib_dev->ibd_plug = kcalloc(nr_cpu_ids, sizeof(*ib_dev->ibd_plug), in iblock_alloc_device() 71 if (!ib_dev->ibd_plug) in iblock_alloc_device() 76 return &ib_dev->dev; in iblock_alloc_device() 79 kfree(ib_dev); in iblock_alloc_device() 85 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); in iblock_configure_unmap() local 88 ib_dev->ibd_bd); in iblock_configure_unmap() [all …]
|
| /linux/drivers/infiniband/hw/usnic/ |
| H A D | usnic_ib_main.c | 79 return scnprintf(buf, buf_sz, "PF: %s ", dev_name(&vf->pf->ib_dev.dev)); in usnic_ib_dump_vf_hdr() 147 usnic_info("PF Reset on %s\n", dev_name(&us_ibdev->ib_dev.dev)); in usnic_ib_handle_usdev_event() 150 ib_event.device = &us_ibdev->ib_dev; in usnic_ib_handle_usdev_event() 158 dev_name(&us_ibdev->ib_dev.dev)); in usnic_ib_handle_usdev_event() 161 dev_name(&us_ibdev->ib_dev.dev), in usnic_ib_handle_usdev_event() 167 ib_event.device = &us_ibdev->ib_dev; in usnic_ib_handle_usdev_event() 176 dev_name(&us_ibdev->ib_dev.dev), in usnic_ib_handle_usdev_event() 182 dev_name(&us_ibdev->ib_dev.dev)); in usnic_ib_handle_usdev_event() 188 dev_name(&us_ibdev->ib_dev.dev)); in usnic_ib_handle_usdev_event() 198 container_of(ibdev, struct usnic_ib_dev, ib_dev); in usnic_ib_handle_port_event() [all …]
|
| H A D | usnic_ib_sysfs.c | 52 rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); in board_id_show() 70 rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); in config_show() 87 dev_name(&us_ibdev->ib_dev.dev), in config_show() 109 dev_name(&us_ibdev->ib_dev.dev)); in config_show() 122 rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); in iface_show() 132 rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); in max_vf_show() 142 rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); in qp_per_vf_show() 156 rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); in cq_per_vf_show() 255 kobject_get(&us_ibdev->ib_dev.dev.kobj); in usnic_ib_sysfs_register_usdev() 257 &us_ibdev->ib_dev.dev.kobj); in usnic_ib_sysfs_register_usdev() [all …]
|
| H A D | usnic_ib.h | 74 struct ib_device ib_dev; member 103 return container_of(ibdev, struct usnic_ib_dev, ib_dev); in to_usdev()
|
| /linux/drivers/infiniband/core/ |
| H A D | cache.c | 124 static void dispatch_gid_change_event(struct ib_device *ib_dev, u32 port) in dispatch_gid_change_event() argument 128 event.device = ib_dev; in dispatch_gid_change_event() 382 static void del_gid(struct ib_device *ib_dev, u32 port, in del_gid() argument 390 dev_dbg(&ib_dev->dev, "%s port=%u index=%d gid %pI6\n", __func__, port, in del_gid() 399 if (!rdma_protocol_roce(ib_dev, port)) in del_gid() 403 if (rdma_cap_roce_gid_table(ib_dev, port)) in del_gid() 404 ib_dev->ops.del_gid(&entry->attr, &entry->context); in del_gid() 546 static int __ib_cache_gid_add(struct ib_device *ib_dev, u32 port, in __ib_cache_gid_add() argument 562 table = rdma_gid_table(ib_dev, port); in __ib_cache_gid_add() 574 attr->device = ib_dev; in __ib_cache_gid_add() [all …]
|
| H A D | uverbs_std_types_cq.c | 68 struct ib_device *ib_dev = attrs->context->device; in UVERBS_HANDLER() local 81 if ((!ib_dev->ops.create_cq && !ib_dev->ops.create_cq_umem) || !ib_dev->ops.destroy_cq) in UVERBS_HANDLER() 133 !ib_dev->ops.create_cq_umem) { in UVERBS_HANDLER() 138 umem = ib_umem_get(ib_dev, buffer_va, buffer_length, IB_ACCESS_LOCAL_WRITE); in UVERBS_HANDLER() 158 !ib_dev->ops.create_cq_umem) { in UVERBS_HANDLER() 163 umem_dmabuf = ib_umem_dmabuf_get_pinned(ib_dev, buffer_offset, buffer_length, in UVERBS_HANDLER() 172 !ib_dev->ops.create_cq) { in UVERBS_HANDLER() 177 cq = rdma_zalloc_drv_obj(ib_dev, ib_cq); in UVERBS_HANDLER() 184 cq->device = ib_dev; in UVERBS_HANDLER() 194 ret = umem ? ib_dev->ops.create_cq_umem(cq, &attr, umem, attrs) : in UVERBS_HANDLER() [all …]
|
| H A D | device.c | 213 static void free_netdevs(struct ib_device *ib_dev); 857 pdata->ib_dev = device; in alloc_port_data() 1380 static void prevent_dealloc_device(struct ib_device *ib_dev) in prevent_dealloc_device() argument 1530 static void __ib_unregister_device(struct ib_device *ib_dev) in __ib_unregister_device() argument 1534 mutex_lock(&ib_dev->subdev_lock); in __ib_unregister_device() 1536 &ib_dev->subdev_list_head, in __ib_unregister_device() 1539 ib_dev->ops.del_sub_dev(sub); in __ib_unregister_device() 1540 ib_device_put(ib_dev); in __ib_unregister_device() 1542 mutex_unlock(&ib_dev->subdev_lock); in __ib_unregister_device() 1551 mutex_lock(&ib_dev->unregistration_lock); in __ib_unregister_device() [all …]
|
| H A D | uverbs_std_types_dm.c | 56 struct ib_device *ib_dev = attrs->context->device; in UVERBS_HANDLER() local 60 if (!ib_dev->ops.alloc_dm) in UVERBS_HANDLER() 73 dm = ib_dev->ops.alloc_dm(ib_dev, attrs->context, &attr, attrs); in UVERBS_HANDLER() 77 dm->device = ib_dev; in UVERBS_HANDLER()
|
| H A D | uverbs_std_types_counters.c | 60 struct ib_device *ib_dev = attrs->context->device; in UVERBS_HANDLER() local 69 if (!ib_dev->ops.create_counters) in UVERBS_HANDLER() 72 counters = rdma_zalloc_drv_obj(ib_dev, ib_counters); in UVERBS_HANDLER() 76 counters->device = ib_dev; in UVERBS_HANDLER() 81 ret = ib_dev->ops.create_counters(counters, attrs); in UVERBS_HANDLER()
|
| H A D | core_priv.h | 90 struct net_device *ib_device_get_netdev(struct ib_device *ib_dev, 93 void ib_enum_roce_netdev(struct ib_device *ib_dev, 129 void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u32 port, 134 int ib_cache_gid_add(struct ib_device *ib_dev, u32 port, 137 int ib_cache_gid_del(struct ib_device *ib_dev, u32 port, 140 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u32 port, 146 unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u32 port);
|
| H A D | user_mad.c | 103 struct ib_device *ib_dev; member 704 if (!file->port->ib_dev) { in ib_umad_reg_agent() 752 agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num, in ib_umad_reg_agent() 805 if (!file->port->ib_dev) { in ib_umad_reg_agent2() 865 agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num, in ib_umad_reg_agent2() 1007 if (!port->ib_dev) { in ib_umad_open() 1012 if (!rdma_dev_access_netns(port->ib_dev, current->nsproxy->net_ns)) { in ib_umad_open() 1109 if (!rdma_dev_access_netns(port->ib_dev, current->nsproxy->net_ns)) { in ib_umad_sm_open() 1114 ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props); in ib_umad_sm_open() 1139 if (port->ib_dev) in ib_umad_sm_close() [all …]
|
| /linux/drivers/infiniband/hw/mana/ |
| H A D | wq.c | 13 container_of(pd->device, struct mana_ib_dev, ib_dev); in mana_ib_create_wq() 23 ibdev_dbg(&mdev->ib_dev, in mana_ib_create_wq() 32 ibdev_dbg(&mdev->ib_dev, "ucmd wq_buf_addr 0x%llx\n", ucmd.wq_buf_addr); in mana_ib_create_wq() 36 ibdev_dbg(&mdev->ib_dev, in mana_ib_create_wq() 62 struct ib_device *ib_dev = ibwq->device; in mana_ib_destroy_wq() local 65 mdev = container_of(ib_dev, struct mana_ib_dev, ib_dev); in mana_ib_destroy_wq()
|
| /linux/drivers/infiniband/hw/hns/ |
| H A D | hns_roce_main.c | 99 net_dev = ib_device_get_netdev(&hr_dev->ib_dev, port_num); in hns_roce_get_port_state() 121 struct ib_device *ibdev = &hr_dev->ib_dev; in handle_en_event() 218 static int hns_roce_query_device(struct ib_device *ib_dev, in hns_roce_query_device() argument 222 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); in hns_roce_query_device() 274 static int hns_roce_query_port(struct ib_device *ib_dev, u32 port_num, in hns_roce_query_port() argument 277 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); in hns_roce_query_port() 294 ret = ib_get_eth_speed(ib_dev, port_num, &props->active_speed, in hns_roce_query_port() 297 ibdev_warn(ib_dev, "failed to get speed, ret = %d.\n", ret); in hns_roce_query_port() 299 net_dev = ib_device_get_netdev(ib_dev, port_num); in hns_roce_query_port() 301 ibdev_err(ib_dev, "find netdev %u failed!\n", port); in hns_roce_query_port() [all …]
|
| H A D | hns_roce_pd.c | 46 struct ib_device *ib_dev = ibpd->device; in hns_roce_alloc_pd() local 47 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); in hns_roce_alloc_pd() 56 ibdev_err(ib_dev, "failed to alloc pd, id = %d.\n", id); in hns_roce_alloc_pd() 68 ibdev_err(ib_dev, "failed to copy to udata, ret = %d\n", ret); in hns_roce_alloc_pd() 93 ibdev_err(&hr_dev->ib_dev, "failed to alloc uar id(%d).\n", id); in hns_roce_uar_alloc() 128 ibdev_err(&hr_dev->ib_dev, "failed to alloc xrcdn(%d).\n", id); in hns_roce_xrcd_alloc()
|
| H A D | hns_roce_bond.c | 22 return container_of(ibdev, struct hns_roce_dev, ib_dev); in hns_roce_get_hrdev_by_netdev() 103 old_dev = ib_device_get_netdev(&hr_dev->ib_dev, 1); in hns_roce_set_bond_netdev() 107 ret = ib_device_set_netdev(&hr_dev->ib_dev, active_dev, 1); in hns_roce_set_bond_netdev() 115 roce_del_all_netdev_gids(&hr_dev->ib_dev, 1, old_dev); in hns_roce_set_bond_netdev() 116 rdma_roce_rescan_port(&hr_dev->ib_dev, 1); in hns_roce_set_bond_netdev() 362 ibdev_info(&bond_grp->main_hr_dev->ib_dev, in hns_roce_set_bond() 405 ibdev_err(&bond_grp->main_hr_dev->ib_dev, in hns_roce_slave_changestate() 409 ibdev_info(&bond_grp->main_hr_dev->ib_dev, in hns_roce_slave_changestate() 447 ibdev_info(&bond_grp->main_hr_dev->ib_dev, in hns_roce_slave_change_num() 476 ib_device_put(&hr_dev->ib_dev); in hns_roce_bond_info_update_nolock() [all …]
|
| H A D | hns_roce_qp.c | 266 ibdev_err(&hr_dev->ib_dev, in alloc_qpn() 484 ibdev_err(&hr_dev->ib_dev, in set_rq_size() 492 ibdev_err(&hr_dev->ib_dev, "rq depth %u too large\n", in set_rq_size() 628 ibdev_err(&hr_dev->ib_dev, "failed to check SQ stride size.\n"); in check_sq_size_with_integrity() 633 ibdev_err(&hr_dev->ib_dev, "failed to check SQ SGE size %u.\n", in check_sq_size_with_integrity() 645 struct ib_device *ibdev = &hr_dev->ib_dev; in set_user_sq_size() 722 struct ib_device *ibdev = &hr_dev->ib_dev; in set_kernel_sq_size() 771 struct ib_device *ibdev = &hr_dev->ib_dev; in alloc_qp_buf() 851 ibdev_err(&hr_dev->ib_dev, "failed to get dwqe mmap entry.\n"); in qp_mmap_entry() 872 struct ib_device *ibdev = &hr_dev->ib_dev; in alloc_user_qp_db() [all …]
|
| H A D | hns_roce_srq.c | 70 ibdev_err(&hr_dev->ib_dev, "failed to alloc srq(%d).\n", id); in alloc_srqn() 87 struct ib_device *ibdev = &hr_dev->ib_dev; in hns_roce_create_srqc() 116 struct ib_device *ibdev = &hr_dev->ib_dev; in alloc_srqc() 169 struct ib_device *ibdev = &hr_dev->ib_dev; in alloc_srq_idx() 222 struct ib_device *ibdev = &hr_dev->ib_dev; in alloc_srq_wqe_buf() 300 ibdev_err(&hr_dev->ib_dev, in set_srq_basic_param() 352 ibdev_err(&hr_dev->ib_dev, in alloc_srq_buf()
|
| /linux/drivers/infiniband/ulp/isert/ |
| H A D | ib_isert.c | 105 struct ib_device *ib_dev = device->ib_device; in isert_create_qp() local 109 isert_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_WORKQUEUE); in isert_create_qp() 149 struct ib_device *ib_dev = device->ib_device; in isert_alloc_rx_descriptors() local 164 dma_addr = ib_dma_map_single(ib_dev, rx_desc->buf, in isert_alloc_rx_descriptors() 166 if (ib_dma_mapping_error(ib_dev, dma_addr)) in isert_alloc_rx_descriptors() 183 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, in isert_alloc_rx_descriptors() 195 struct ib_device *ib_dev = isert_conn->device->ib_device; in isert_free_rx_descriptors() local 204 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, in isert_free_rx_descriptors() 215 struct ib_device *ib_dev = device->ib_device; in isert_create_device_ib_res() local 219 ib_dev->attrs.max_send_sge, ib_dev->attrs.max_recv_sge); in isert_create_device_ib_res() [all …]
|
| /linux/drivers/infiniband/sw/rxe/ |
| H A D | rxe_net.c | 231 ib_device_put(&rxe->ib_dev); in rxe_udp_encap_recv() 480 if (WARN_ON(!ib_device_try_get(&pkt->rxe->ib_dev))) { in rxe_loopback() 540 attr = rdma_get_gid_attr(&rxe->ib_dev, port_num, av->grh.sgid_index); in rxe_init_packet() 598 ndev = rxe_ib_device_get_netdev(&rxe->ib_dev); in rxe_parent_name() 612 rxe = ib_alloc_device(rxe_dev, ib_dev); in rxe_net_add() 616 ib_mark_name_assigned_by_user(&rxe->ib_dev); in rxe_net_add() 620 ib_dealloc_device(&rxe->ib_dev); in rxe_net_add() 632 ev.device = &rxe->ib_dev; in rxe_port_event() 643 dev_info(&rxe->ib_dev.dev, "set active\n"); in rxe_port_up() 651 dev_info(&rxe->ib_dev.dev, "set down\n"); in rxe_port_down() [all …]
|
| /linux/drivers/infiniband/hw/mlx5/ |
| H A D | main.c | 153 rep_ndev = ib_device_get_netdev(&dev->ib_dev, i + 1); in mlx5_get_rep_roce() 197 return ib_device_get_netdev(&ibdev->ib_dev, i + 1); in mlx5_ib_get_rep_uplink_netdev() 225 ib_ndev = ib_device_get_netdev(&ibdev->ib_dev, port_num); in mlx5_netdev_event() 231 ib_device_set_netdev(&ibdev->ib_dev, ndev, port_num); in mlx5_netdev_event() 238 ib_ndev = ib_device_get_netdev(&ibdev->ib_dev, port_num); in mlx5_netdev_event() 240 ib_device_set_netdev(&ibdev->ib_dev, NULL, port_num); in mlx5_netdev_event() 256 lag_ndev = ib_device_get_netdev(&ibdev->ib_dev, 1); in mlx5_netdev_event() 273 ib_ndev = ib_device_get_netdev(&ibdev->ib_dev, port_num); in mlx5_netdev_event() 279 if (get_port_state(&ibdev->ib_dev, port_num, in mlx5_netdev_event() 287 ibev.device = &ibdev->ib_dev; in mlx5_netdev_event() [all …]
|
| H A D | ib_rep.c | 26 return ib_device_set_netdev(&ibdev->ib_dev, ndev, vport_index + 1); in mlx5_ib_set_vport_rep() 154 ibdev = ib_alloc_device_with_net(mlx5_ib_dev, ib_dev, in mlx5_ib_vport_rep_load() 173 ibdev->ib_dev.phys_port_cnt = num_ports; in mlx5_ib_vport_rep_load() 174 ret = ib_device_set_netdev(&ibdev->ib_dev, in mlx5_ib_vport_rep_load() 194 ib_dealloc_device(&ibdev->ib_dev); in mlx5_ib_vport_rep_load() 237 ib_device_set_netdev(&dev->ib_dev, NULL, vport_index + 1); in mlx5_ib_vport_rep_unload()
|
| /linux/drivers/infiniband/ulp/iser/ |
| H A D | iser_verbs.c | 63 struct ib_device *ib_dev = device->ib_device; in iser_create_device_ib_res() local 65 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) { in iser_create_device_ib_res() 70 device->pd = ib_alloc_pd(ib_dev, in iser_create_device_ib_res() 75 INIT_IB_EVENT_HANDLER(&device->event_handler, ib_dev, in iser_create_device_ib_res() 104 struct ib_device *ib_dev = device->ib_device; in iser_create_fastreg_desc() local 112 if (ib_dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG) in iser_create_fastreg_desc() 232 struct ib_device *ib_dev; in iser_create_ib_conn_res() local 240 ib_dev = device->ib_device; in iser_create_ib_conn_res() 248 (unsigned int)ib_dev->attrs.max_qp_wr); in iser_create_ib_conn_res() 251 ib_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_SOFTIRQ); in iser_create_ib_conn_res() [all …]
|
| /linux/include/rdma/ |
| H A D | uverbs_std_types.h | 102 struct uverbs_attr_bundle *attrs, struct ib_device **ib_dev) in __uobj_alloc() argument 107 *ib_dev = attrs->context->device; in __uobj_alloc() 116 struct ib_device *ib_dev, in uverbs_flow_action_fill_action() argument 120 action->device = ib_dev; in uverbs_flow_action_fill_action()
|
| /linux/drivers/infiniband/ulp/rtrs/ |
| H A D | rtrs-clt.c | 433 ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist, in complete_rdma_req() 483 ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, in rtrs_post_send_rdma() 541 ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr, in rtrs_clt_rkey_rsp_done() 567 ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, iu->dma_addr, in rtrs_clt_rkey_rsp_done() 1058 ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, in rtrs_post_rdma_write_sg() 1102 count = ib_dma_map_sg(clt_path->s.dev->ib_dev, req->sglist, in rtrs_clt_write_req() 1127 ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist, in rtrs_clt_write_req() 1163 ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist, in rtrs_clt_write_req() 1194 count = ib_dma_map_sg(dev->ib_dev, req->sglist, req->sg_cnt, in rtrs_clt_read_req() 1213 ib_dma_unmap_sg(dev->ib_dev, req->sglist, req->sg_cnt, in rtrs_clt_read_req() [all …]
|
| H A D | rtrs-srv.c | 307 ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, in rdma_write_sg() 320 ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, dma_addr, in rdma_write_sg() 428 ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, in send_io_resp_imm() 559 rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1); in unmap_cont_bufs() 562 ib_dma_unmap_sg(srv_path->s.dev->ib_dev, srv_mr->sgt.sgl, in unmap_cont_bufs() 591 srv_path->s.dev->ib_dev->attrs.max_fast_reg_page_list_len; in map_cont_bufs() 620 nr_sgt = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl, in map_cont_bufs() 642 GFP_KERNEL, srv_path->s.dev->ib_dev, in map_cont_bufs() 666 ib_dma_unmap_sg(srv_path->s.dev->ib_dev, sgt->sgl, in map_cont_bufs() 712 rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1); in rtrs_srv_info_rsp_done() [all …]
|