Lines Matching +full:protection +full:- +full:domain
2 * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
7 * 2-Clause License. This program is distributed in the hope that it
11 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
17 * The BSD 2-Clause License
23 * - Redistributions of source code must retain the above
27 * - Redistributions in binary form must reproduce the above
52 #include <rdma/vmw_pvrdma-abi.h>
58 * pvrdma_query_device - query device
71 if (uhw->inlen || uhw->outlen)
72 return -EINVAL;
74 props->fw_ver = dev->dsr->caps.fw_ver;
75 props->sys_image_guid = dev->dsr->caps.sys_image_guid;
76 props->max_mr_size = dev->dsr->caps.max_mr_size;
77 props->page_size_cap = dev->dsr->caps.page_size_cap;
78 props->vendor_id = dev->dsr->caps.vendor_id;
79 props->vendor_part_id = dev->pdev->device;
80 props->hw_ver = dev->dsr->caps.hw_ver;
81 props->max_qp = dev->dsr->caps.max_qp;
82 props->max_qp_wr = dev->dsr->caps.max_qp_wr;
83 props->device_cap_flags = dev->dsr->caps.device_cap_flags;
84 props->max_send_sge = dev->dsr->caps.max_sge;
85 props->max_recv_sge = dev->dsr->caps.max_sge;
86 props->max_sge_rd = PVRDMA_GET_CAP(dev, dev->dsr->caps.max_sge,
87 dev->dsr->caps.max_sge_rd);
88 props->max_srq = dev->dsr->caps.max_srq;
89 props->max_srq_wr = dev->dsr->caps.max_srq_wr;
90 props->max_srq_sge = dev->dsr->caps.max_srq_sge;
91 props->max_cq = dev->dsr->caps.max_cq;
92 props->max_cqe = dev->dsr->caps.max_cqe;
93 props->max_mr = dev->dsr->caps.max_mr;
94 props->max_pd = dev->dsr->caps.max_pd;
95 props->max_qp_rd_atom = dev->dsr->caps.max_qp_rd_atom;
96 props->max_qp_init_rd_atom = dev->dsr->caps.max_qp_init_rd_atom;
97 props->atomic_cap =
98 dev->dsr->caps.atomic_ops &
101 props->masked_atomic_cap = props->atomic_cap;
102 props->max_ah = dev->dsr->caps.max_ah;
103 props->max_pkeys = dev->dsr->caps.max_pkeys;
104 props->local_ca_ack_delay = dev->dsr->caps.local_ca_ack_delay;
105 if ((dev->dsr->caps.bmme_flags & PVRDMA_BMME_FLAG_LOCAL_INV) &&
106 (dev->dsr->caps.bmme_flags & PVRDMA_BMME_FLAG_REMOTE_INV) &&
107 (dev->dsr->caps.bmme_flags & PVRDMA_BMME_FLAG_FAST_REG_WR)) {
108 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
109 props->max_fast_reg_page_list_len = PVRDMA_GET_CAP(dev,
111 dev->dsr->caps.max_fast_reg_page_list_len);
114 props->device_cap_flags |= IB_DEVICE_PORT_ACTIVE_EVENT |
121 * pvrdma_query_port - query device port attributes
139 cmd->hdr.cmd = PVRDMA_CMD_QUERY_PORT;
140 cmd->port_num = port;
144 dev_warn(&dev->pdev->dev,
151 props->state = pvrdma_port_state_to_ib(resp->attrs.state);
152 props->max_mtu = pvrdma_mtu_to_ib(resp->attrs.max_mtu);
153 props->active_mtu = pvrdma_mtu_to_ib(resp->attrs.active_mtu);
154 props->gid_tbl_len = resp->attrs.gid_tbl_len;
155 props->port_cap_flags =
156 pvrdma_port_cap_flags_to_ib(resp->attrs.port_cap_flags);
157 props->port_cap_flags |= IB_PORT_CM_SUP;
158 props->ip_gids = true;
159 props->max_msg_sz = resp->attrs.max_msg_sz;
160 props->bad_pkey_cntr = resp->attrs.bad_pkey_cntr;
161 props->qkey_viol_cntr = resp->attrs.qkey_viol_cntr;
162 props->pkey_tbl_len = resp->attrs.pkey_tbl_len;
163 props->lid = resp->attrs.lid;
164 props->sm_lid = resp->attrs.sm_lid;
165 props->lmc = resp->attrs.lmc;
166 props->max_vl_num = resp->attrs.max_vl_num;
167 props->sm_sl = resp->attrs.sm_sl;
168 props->subnet_timeout = resp->attrs.subnet_timeout;
169 props->init_type_reply = resp->attrs.init_type_reply;
170 props->active_width = pvrdma_port_width_to_ib(resp->attrs.active_width);
171 props->active_speed = pvrdma_port_speed_to_ib(resp->attrs.active_speed);
172 props->phys_state = resp->attrs.phys_state;
178 * pvrdma_query_gid - query device gid
191 if (index >= dev->dsr->caps.gid_tbl_len)
192 return -EINVAL;
194 memcpy(gid, &dev->sgid_tbl[index], sizeof(union ib_gid));
200 * pvrdma_query_pkey - query device port's P_Key table
217 cmd->hdr.cmd = PVRDMA_CMD_QUERY_PKEY;
218 cmd->port_num = port;
219 cmd->index = index;
224 dev_warn(&to_vdev(ibdev)->pdev->dev,
241 * pvrdma_modify_port - modify device port attributes
257 dev_warn(&vdev->pdev->dev,
259 return -EOPNOTSUPP;
262 mutex_lock(&vdev->port_mutex);
267 vdev->port_cap_mask |= props->set_port_cap_mask;
268 vdev->port_cap_mask &= ~props->clr_port_cap_mask;
271 vdev->ib_active = false;
274 mutex_unlock(&vdev->port_mutex);
279 * pvrdma_alloc_ucontext - allocate ucontext
287 struct ib_device *ibdev = uctx->device;
297 if (!vdev->ib_active)
298 return -EAGAIN;
300 context->dev = vdev;
301 ret = pvrdma_uar_alloc(vdev, &context->uar);
303 return -ENOMEM;
306 if (vdev->dsr_version < PVRDMA_PPN64_VERSION)
307 cmd->pfn = context->uar.pfn;
309 cmd->pfn64 = context->uar.pfn;
311 cmd->hdr.cmd = PVRDMA_CMD_CREATE_UC;
314 dev_warn(&vdev->pdev->dev,
319 context->ctx_handle = resp->ctx_handle;
322 uresp.qp_tab_size = vdev->dsr->caps.max_qp;
325 pvrdma_uar_free(vdev, &context->uar);
326 pvrdma_dealloc_ucontext(&context->ibucontext);
327 return -EFAULT;
333 pvrdma_uar_free(vdev, &context->uar);
338 * pvrdma_dealloc_ucontext - deallocate ucontext
348 cmd->hdr.cmd = PVRDMA_CMD_DESTROY_UC;
349 cmd->ctx_handle = context->ctx_handle;
351 ret = pvrdma_cmd_post(context->dev, &req, NULL, 0);
353 dev_warn(&context->dev->pdev->dev,
357 pvrdma_uar_free(to_vdev(ibcontext->device), &context->uar);
361 * pvrdma_mmap - create mmap region
370 unsigned long start = vma->vm_start;
371 unsigned long size = vma->vm_end - vma->vm_start;
372 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
374 dev_dbg(&context->dev->pdev->dev, "create mmap region\n");
377 dev_warn(&context->dev->pdev->dev,
379 return -EINVAL;
384 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
385 if (io_remap_pfn_range(vma, start, context->uar.pfn, size,
386 vma->vm_page_prot))
387 return -EAGAIN;
393 * pvrdma_alloc_pd - allocate protection domain
397 * @return: the ib_pd protection domain pointer on success, otherwise errno.
401 struct ib_device *ibdev = ibpd->device;
414 if (!atomic_add_unless(&dev->num_pds, 1, dev->dsr->caps.max_pd))
415 return -ENOMEM;
417 cmd->hdr.cmd = PVRDMA_CMD_CREATE_PD;
418 cmd->ctx_handle = context ? context->ctx_handle : 0;
421 dev_warn(&dev->pdev->dev,
422 "failed to allocate protection domain, error: %d\n",
427 pd->privileged = !udata;
428 pd->pd_handle = resp->pd_handle;
429 pd->pdn = resp->pd_handle;
430 pd_resp.pdn = resp->pd_handle;
434 dev_warn(&dev->pdev->dev,
435 "failed to copy back protection domain\n");
436 pvrdma_dealloc_pd(&pd->ibpd, udata);
437 return -EFAULT;
445 atomic_dec(&dev->num_pds);
450 * pvrdma_dealloc_pd - deallocate protection domain
451 * @pd: the protection domain to be released
458 struct pvrdma_dev *dev = to_vdev(pd->device);
463 cmd->hdr.cmd = PVRDMA_CMD_DESTROY_PD;
464 cmd->pd_handle = to_vpd(pd)->pd_handle;
468 dev_warn(&dev->pdev->dev,
469 "could not dealloc protection domain, error: %d\n",
472 atomic_dec(&dev->num_pds);
477 * pvrdma_create_ah - create an address handle
487 struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
488 struct pvrdma_dev *dev = to_vdev(ibah->device);
494 return -EINVAL;
497 if ((ah_attr->type != RDMA_AH_ATTR_TYPE_ROCE) ||
498 rdma_is_multicast_addr((struct in6_addr *)grh->dgid.raw))
499 return -EINVAL;
501 if (!atomic_add_unless(&dev->num_ahs, 1, dev->dsr->caps.max_ah))
502 return -ENOMEM;
504 ah->av.port_pd = to_vpd(ibah->pd)->pd_handle | (port_num << 24);
505 ah->av.src_path_bits = rdma_ah_get_path_bits(ah_attr);
506 ah->av.src_path_bits |= 0x80;
507 ah->av.gid_index = grh->sgid_index;
508 ah->av.hop_limit = grh->hop_limit;
509 ah->av.sl_tclass_flowlabel = (grh->traffic_class << 20) |
510 grh->flow_label;
511 memcpy(ah->av.dgid, grh->dgid.raw, 16);
512 memcpy(ah->av.dmac, ah_attr->roce.dmac, ETH_ALEN);
518 * pvrdma_destroy_ah - destroy an address handle
525 struct pvrdma_dev *dev = to_vdev(ah->device);
527 atomic_dec(&dev->num_ahs);