Lines Matching +full:supports +full:- +full:cqe
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
22 * - Redistributions of source code must retain the above
26 * - Redistributions in binary form must reproduce the above
47 #include <linux/dma-mapping.h>
195 * This device supports a per-device lkey or stag that can be
216 * This device supports the IB "base memory management extension",
310 unsigned int cqe; member
412 default: return -1; in ib_mtu_enum_to_int()
423 IB_PORT_DUMMY = -1, /* force enum signed */
479 default: return -1; in ib_width_enum_to_int()
496 * @lock - Mutex to protect parallel write access to lifespan and values
499 * @timestamp - Used by the core code to track when the last update was
500 * @lifespan - Used by the core code to determine how old the counters
504 * @name - Array of pointers to static names used for the counters in
506 * @num_counters - How many hardware counters there are. If name is
510 * @value - Array of u64 counters that are accessed by the sysfs code and
524 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
526 * @names - Array of static const char *
527 * @num_counters - How many elements in array
528 * @lifespan - How many milliseconds between updates
540 stats->names = names; in rdma_alloc_hw_stats_struct()
541 stats->num_counters = num_counters; in rdma_alloc_hw_stats_struct()
542 stats->lifespan = msecs_to_jiffies(lifespan); in rdma_alloc_hw_stats_struct()
686 (_ptr)->device = _device; \
687 (_ptr)->handler = _handler; \
688 INIT_LIST_HEAD(&(_ptr)->list); \
756 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
764 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
772 * enum ib_mr_type - memory region type
777 * the normal mr constraints - see
781 * @IB_MR_TYPE_USER: memory region that is used for the user-space
802 * struct ib_mr_status - Memory region status container
815 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
876 IB_WC_DUMMY = -1, /* force enum signed */
989 * indices into a 2-entry table.
1033 /* reserve bits 26-31 for low level drivers' internal use */
1142 IB_QPS_DUMMY = -1, /* force enum signed */
1214 IB_WR_DUMMY = -1, /* force enum signed */
1224 /* reserve bits 26-31 for low level drivers' internal use */
1346 ((IB_ACCESS_HUGETLB << 1) - 1) | IB_ACCESS_OPTIONAL,
1350 * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1357 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
1376 /* Driver is being hot-unplugged. This call should delete the actual object itself */
1378 /* uobj is being cleaned-up before being committed */
1408 /* FIXME, save memory: ufile->context == context */
1472 int cqe; member
1654 /* default unicast and multicast rule -
1658 /* default multicast rule -
1662 /* sniffer rule - receive all port traffic */
1935 /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
1936 * This is done in order to share the same flags between user-space and
2074 !__same_type(((struct drv_struct *)NULL)->member, \
2078 ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp))
2097 return (u64)entry->start_pgoff << PAGE_SHIFT; in rdma_user_mmap_get_offset()
2130 * spinlock and the lists_rwsem read-write semaphore */
2144 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
2146 * core when the device is removed. A lifespan of -1 in the return
2152 * get_hw_stats - Fill in the counter value(s) in the stats struct.
2153 * @index - The index in the value array we wish to have updated, or
2155 * Return codes -
2156 * < 0 - Error, no counters updated
2157 * index - Updated the single counter pointed to by index
2158 * num_counters - Updated all counters (will reset the timestamp
2188 * port @port_num to be @gid. Meta-info of that gid (for example,
2275 int (*resize_cq)(struct ib_cq *cq, int cqe,
2508 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0; in ib_copy_from_udata()
2513 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; in ib_copy_to_udata()
2538 return ib_is_buffer_cleared(udata->inbuf + offset, len); in ib_is_udata_cleared()
2542 * ib_is_destroy_retryable - Check whether the uobject destruction
2548 * This function is a helper function that IB layer and low-level drivers
2550 * retry-able.
2560 uobj->context->cleanup_retryable); in ib_is_destroy_retryable()
2564 * ib_destroy_usecnt - Called during destruction to check the usecnt
2569 * Non-zero usecnts will block destruction unless destruction was triggered by
2576 if (atomic_read(usecnt) && ib_is_destroy_retryable(-EBUSY, why, uobj)) in ib_destroy_usecnt()
2577 return -EBUSY; in ib_destroy_usecnt()
2582 * ib_modify_qp_is_ok - Check that the supplied attribute mask
2590 * This function is a helper function that a low-level driver's
2610 * rdma_cap_ib_switch - Check if the device is IB switch
2620 return device->is_switch; in rdma_cap_ib_switch()
2624 * rdma_start_port - Return the first valid port number for the device
2637 * rdma_end_port - Return the last valid port number for the device
2646 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt; in rdma_end_port()
2658 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB; in rdma_protocol_ib()
2663 return device->port_immutable[port_num].core_cap_flags & in rdma_protocol_roce()
2669 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP; in rdma_protocol_roce_udp_encap()
2674 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE; in rdma_protocol_roce_eth_encap()
2679 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP; in rdma_protocol_iwarp()
2689 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
2698 * Return: true if the port supports sending/receiving of MAD packets.
2702 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD; in rdma_cap_ib_mad()
2706 * rdma_cap_opa_mad - Check if the port of device provides support for OPA
2722 * Return: true if the port supports OPA MAD packet formats.
2726 return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD) in rdma_cap_opa_mad()
2731 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
2752 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI; in rdma_cap_ib_smi()
2756 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
2761 * The InfiniBand Communication Manager is one of many pre-defined General
2767 * Return: true if the port supports an IB CM (this does not guarantee that
2772 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM; in rdma_cap_ib_cm()
2776 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
2784 * Return: true if the port supports an iWARP CM (this does not guarantee that
2789 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM; in rdma_cap_iw_cm()
2793 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
2798 * An InfiniBand Subnet Administration (SA) service is a pre-defined General
2809 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA; in rdma_cap_ib_sa()
2813 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
2835 * rdma_cap_af_ib - Check if the port of device has the capability
2849 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB; in rdma_cap_af_ib()
2853 * rdma_cap_eth_ah - Check if the port of device has the capability
2870 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH; in rdma_cap_eth_ah()
2874 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
2887 return device->port_immutable[port_num].max_mad_size; in rdma_max_mad_size()
2891 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
2907 device->add_gid && device->del_gid; in rdma_cap_roce_gid_table()
2911 * Check if the device supports READ W/ INVALIDATE.
2956 * the rkey for it into pd->unsafe_global_rkey. This can be used by
2972 * ib_dealloc_pd_user - Deallocate kernel/user PD
2979 * ib_dealloc_pd - Deallocate kernel PD
2995 * ib_create_ah - Creates an address handle for the given address vector.
3007 * ib_create_user_ah - Creates an address handle for the given address vector.
3023 * ib_init_ah_from_wc - Initializes address handle attributes from a
3038 * ib_create_ah_from_wc - Creates an address handle associated with the
3053 * ib_modify_ah - Modifies the address vector associated with an address
3062 * ib_query_ah - Queries the address vector associated with an address
3076 * ib_destroy_ah_user - Destroys an address handle.
3084 * rdma_destroy_ah - Destroys an kernel address handle.
3096 * ib_create_srq - Creates a SRQ associated with the specified protection
3103 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
3112 * ib_modify_srq - Modifies the attributes for the specified SRQ.
3116 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
3128 * ib_query_srq - Returns the attribute list and current values for the
3137 * ib_destroy_srq_user - Destroys the specified SRQ.
3144 * ib_destroy_srq - Destroys the specified kernel SRQ.
3155 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
3165 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr); in ib_post_srq_recv()
3169 * ib_create_qp - Creates a QP associated with the specified protection
3180 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
3184 * @attr_mask: A bit-mask used to specify which attributes of the QP
3196 * ib_modify_qp - Modifies the attributes for the specified QP and then
3201 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
3209 * ib_query_qp - Returns the attribute list and current values for the
3213 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
3225 * ib_destroy_qp - Destroys the specified QP.
3232 * ib_destroy_qp - Destroys the specified kernel QP.
3243 * ib_open_qp - Obtain a reference to an existing sharable QP.
3244 * @xrcd - XRC domain
3253 * ib_close_qp - Release an external reference to a QP.
3262 * ib_post_send - Posts a list of work requests to the send queue of
3278 return qp->device->post_send(qp, send_wr, bad_send_wr); in ib_post_send()
3282 * ib_post_recv - Posts a list of work requests to the receive queue of
3293 return qp->device->post_recv(qp, recv_wr, bad_recv_wr); in ib_post_recv()
3304 * @private: Private data attached to the CQE
3323 * @private: Private data attached to the CQE
3339 * ib_free_cq_user - Free kernel/user CQ
3346 * ib_free_cq - Free kernel CQ
3357 * ib_create_cq - Creates a CQ on the specified device.
3359 * @comp_handler: A user-specified callback that is invoked when a
3361 * @event_handler: A user-specified callback that is invoked when an
3379 * ib_resize_cq - Modifies the capacity of the CQ.
3381 * @cqe: The minimum size of the CQ.
3385 int ib_resize_cq(struct ib_cq *cq, int cqe);
3388 * ib_modify_cq - Modifies moderation params of the CQ
3397 * ib_destroy_cq_user - Destroys the specified CQ.
3404 * ib_destroy_cq - Destroys the specified kernel CQ.
3415 * ib_poll_cq - poll a CQ for completion(s)
3424 * non-negative and < num_entries, then the CQ was emptied.
3429 return cq->device->poll_cq(cq, num_entries, wc); in ib_poll_cq()
3433 * ib_peek_cq - Returns the number of unreaped completions currently
3445 * ib_req_notify_cq - Request completion notification on a CQ.
3474 return cq->device->req_notify_cq(cq, flags); in ib_req_notify_cq()
3478 * ib_req_ncomp_notif - Request completion notification when there are
3486 return cq->device->req_ncomp_notif ? in ib_req_ncomp_notif()
3487 cq->device->req_ncomp_notif(cq, wc_cnt) : in ib_req_ncomp_notif()
3488 -ENOSYS; in ib_req_ncomp_notif()
3492 * ib_dma_mapping_error - check a DMA addr for error
3498 if (dev->dma_ops) in ib_dma_mapping_error()
3499 return dev->dma_ops->mapping_error(dev, dma_addr); in ib_dma_mapping_error()
3500 return dma_mapping_error(dev->dma_device, dma_addr); in ib_dma_mapping_error()
3504 * ib_dma_map_single - Map a kernel virtual address to DMA address
3514 if (dev->dma_ops) in ib_dma_map_single()
3515 return dev->dma_ops->map_single(dev, cpu_addr, size, direction); in ib_dma_map_single()
3516 return dma_map_single(dev->dma_device, cpu_addr, size, direction); in ib_dma_map_single()
3520 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
3530 if (dev->dma_ops) in ib_dma_unmap_single()
3531 dev->dma_ops->unmap_single(dev, addr, size, direction); in ib_dma_unmap_single()
3533 dma_unmap_single(dev->dma_device, addr, size, direction); in ib_dma_unmap_single()
3541 return dma_map_single_attrs(dev->dma_device, cpu_addr, size, in ib_dma_map_single_attrs()
3550 return dma_unmap_single_attrs(dev->dma_device, addr, size, in ib_dma_unmap_single_attrs()
3555 * ib_dma_map_page - Map a physical page to DMA address
3568 if (dev->dma_ops) in ib_dma_map_page()
3569 return dev->dma_ops->map_page(dev, page, offset, size, direction); in ib_dma_map_page()
3570 return dma_map_page(dev->dma_device, page, offset, size, direction); in ib_dma_map_page()
3574 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
3584 if (dev->dma_ops) in ib_dma_unmap_page()
3585 dev->dma_ops->unmap_page(dev, addr, size, direction); in ib_dma_unmap_page()
3587 dma_unmap_page(dev->dma_device, addr, size, direction); in ib_dma_unmap_page()
3591 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
3601 if (dev->dma_ops) in ib_dma_map_sg()
3602 return dev->dma_ops->map_sg(dev, sg, nents, direction); in ib_dma_map_sg()
3603 return dma_map_sg(dev->dma_device, sg, nents, direction); in ib_dma_map_sg()
3607 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
3617 if (dev->dma_ops) in ib_dma_unmap_sg()
3618 dev->dma_ops->unmap_sg(dev, sg, nents, direction); in ib_dma_unmap_sg()
3620 dma_unmap_sg(dev->dma_device, sg, nents, direction); in ib_dma_unmap_sg()
3628 if (dev->dma_ops) in ib_dma_map_sg_attrs()
3629 return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction, in ib_dma_map_sg_attrs()
3632 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, in ib_dma_map_sg_attrs()
3641 if (dev->dma_ops) in ib_dma_unmap_sg_attrs()
3642 return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction, in ib_dma_unmap_sg_attrs()
3645 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, in ib_dma_unmap_sg_attrs()
3649 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
3663 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
3677 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
3688 if (dev->dma_ops) in ib_dma_sync_single_for_cpu()
3689 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir); in ib_dma_sync_single_for_cpu()
3691 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); in ib_dma_sync_single_for_cpu()
3695 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
3706 if (dev->dma_ops) in ib_dma_sync_single_for_device()
3707 dev->dma_ops->sync_single_for_device(dev, addr, size, dir); in ib_dma_sync_single_for_device()
3709 dma_sync_single_for_device(dev->dma_device, addr, size, dir); in ib_dma_sync_single_for_device()
3713 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
3724 if (dev->dma_ops) in ib_dma_alloc_coherent()
3725 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag); in ib_dma_alloc_coherent()
3730 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag); in ib_dma_alloc_coherent()
3737 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
3747 if (dev->dma_ops) in ib_dma_free_coherent()
3748 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); in ib_dma_free_coherent()
3750 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); in ib_dma_free_coherent()
3754 * ib_dereg_mr - Deregisters a memory region and removes it from the
3763 * ib_dereg_mr - Deregisters a kernel memory region and removes it from the
3790 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
3792 * @mr - struct ib_mr pointer to be updated.
3793 * @newkey - new key to be used.
3797 mr->lkey = (mr->lkey & 0xffffff00) | newkey; in ib_update_fast_reg_key()
3798 mr->rkey = (mr->rkey & 0xffffff00) | newkey; in ib_update_fast_reg_key()
3802 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
3804 * @rkey - the rkey to increment.
3813 * ib_alloc_fmr - Allocates a unmapped fast memory region.
3826 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
3836 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova); in ib_map_phys_fmr()
3840 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
3846 * ib_dealloc_fmr - Deallocates a fast memory region.
3852 * ib_attach_mcast - Attaches the specified QP to a multicast group.
3866 * ib_detach_mcast - Detaches the specified QP from a multicast group.
3874 * ib_alloc_xrcd - Allocates an XRC domain.
3883 * ib_dealloc_xrcd - Deallocates an XRC domain.
3897 return -EINVAL; in ib_check_mr_access()
3900 return -EINVAL; in ib_check_mr_access()
3957 mr->iova = 0; in ib_map_mr_sg_zbva()