xref: /linux/drivers/infiniband/hw/bnxt_re/ib_verbs.h (revision 55a42f78ffd386e01a5404419f8c5ded7db70a21)
1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: IB Verbs interpreter (header)
37  */
38 
39 #ifndef __BNXT_RE_IB_VERBS_H__
40 #define __BNXT_RE_IB_VERBS_H__
41 
42 struct bnxt_re_gid_ctx {
43 	u32			idx;
44 	u32			refcnt;
45 };
46 
47 #define BNXT_RE_FENCE_BYTES	64
48 struct bnxt_re_fence_data {
49 	u32 size;
50 	u8 va[BNXT_RE_FENCE_BYTES];
51 	dma_addr_t dma_addr;
52 	struct bnxt_re_mr *mr;
53 	struct ib_mw *mw;
54 	struct bnxt_qplib_swqe bind_wqe;
55 	u32 bind_rkey;
56 };
57 
58 struct bnxt_re_pd {
59 	struct ib_pd            ib_pd;
60 	struct bnxt_re_dev	*rdev;
61 	struct bnxt_qplib_pd	qplib_pd;
62 	struct bnxt_re_fence_data fence;
63 	struct rdma_user_mmap_entry *pd_db_mmap;
64 	struct rdma_user_mmap_entry *pd_wcdb_mmap;
65 };
66 
67 struct bnxt_re_ah {
68 	struct ib_ah		ib_ah;
69 	struct bnxt_re_dev	*rdev;
70 	struct bnxt_qplib_ah	qplib_ah;
71 };
72 
73 struct bnxt_re_srq {
74 	struct ib_srq		ib_srq;
75 	struct bnxt_re_dev	*rdev;
76 	u32			srq_limit;
77 	struct bnxt_qplib_srq	qplib_srq;
78 	struct ib_umem		*umem;
79 	spinlock_t		lock;		/* protect srq */
80 	void			*uctx_srq_page;
81 	struct hlist_node       hash_entry;
82 };
83 
84 struct bnxt_re_qp {
85 	struct ib_qp		ib_qp;
86 	struct list_head	list;
87 	struct bnxt_re_dev	*rdev;
88 	spinlock_t		sq_lock;	/* protect sq */
89 	spinlock_t		rq_lock;	/* protect rq */
90 	struct bnxt_qplib_qp	qplib_qp;
91 	struct ib_umem		*sumem;
92 	struct ib_umem		*rumem;
93 	/* QP1 */
94 	u32			send_psn;
95 	struct ib_ud_header	qp1_hdr;
96 	struct bnxt_re_cq	*scq;
97 	struct bnxt_re_cq	*rcq;
98 	struct dentry		*dentry;
99 };
100 
101 struct bnxt_re_cq {
102 	struct ib_cq		ib_cq;
103 	struct bnxt_re_dev	*rdev;
104 	spinlock_t              cq_lock;	/* protect cq */
105 	u16			cq_count;
106 	u16			cq_period;
107 	struct bnxt_qplib_cq	qplib_cq;
108 	struct bnxt_qplib_cqe	*cql;
109 #define MAX_CQL_PER_POLL	1024
110 	u32			max_cql;
111 	struct ib_umem		*umem;
112 	struct ib_umem		*resize_umem;
113 	int			resize_cqe;
114 	void			*uctx_cq_page;
115 	struct hlist_node	hash_entry;
116 };
117 
118 struct bnxt_re_mr {
119 	struct bnxt_re_dev	*rdev;
120 	struct ib_mr		ib_mr;
121 	struct ib_umem		*ib_umem;
122 	struct bnxt_qplib_mrw	qplib_mr;
123 	u32			npages;
124 	u64			*pages;
125 	struct bnxt_qplib_frpl	qplib_frpl;
126 };
127 
128 struct bnxt_re_frpl {
129 	struct bnxt_re_dev		*rdev;
130 	struct bnxt_qplib_frpl		qplib_frpl;
131 	u64				*page_list;
132 };
133 
134 struct bnxt_re_mw {
135 	struct bnxt_re_dev	*rdev;
136 	struct ib_mw		ib_mw;
137 	struct bnxt_qplib_mrw	qplib_mw;
138 };
139 
140 struct bnxt_re_ucontext {
141 	struct ib_ucontext      ib_uctx;
142 	struct bnxt_re_dev	*rdev;
143 	struct bnxt_qplib_dpi	dpi;
144 	struct bnxt_qplib_dpi   wcdpi;
145 	void			*shpg;
146 	spinlock_t		sh_lock;	/* protect shpg */
147 	struct rdma_user_mmap_entry *shpage_mmap;
148 	u64 cmask;
149 };
150 
151 enum bnxt_re_mmap_flag {
152 	BNXT_RE_MMAP_SH_PAGE,
153 	BNXT_RE_MMAP_UC_DB,
154 	BNXT_RE_MMAP_WC_DB,
155 	BNXT_RE_MMAP_DBR_PAGE,
156 	BNXT_RE_MMAP_DBR_BAR,
157 	BNXT_RE_MMAP_TOGGLE_PAGE,
158 };
159 
160 struct bnxt_re_user_mmap_entry {
161 	struct rdma_user_mmap_entry rdma_entry;
162 	struct bnxt_re_ucontext *uctx;
163 	u64 mem_offset;
164 	u8 mmap_flag;
165 };
166 
167 struct bnxt_re_flow {
168 	struct ib_flow		ib_flow;
169 	struct bnxt_re_dev	*rdev;
170 };
171 
172 static inline u16 bnxt_re_get_swqe_size(int nsge)
173 {
174 	return sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
175 }
176 
177 static inline u16 bnxt_re_get_rwqe_size(int nsge)
178 {
179 	return sizeof(struct rq_wqe_hdr) + (nsge * sizeof(struct sq_sge));
180 }
181 
182 enum {
183 	BNXT_RE_UCNTX_CAP_POW2_DISABLED = 0x1ULL,
184 	BNXT_RE_UCNTX_CAP_VAR_WQE_ENABLED = 0x2ULL,
185 };
186 
187 static inline u32 bnxt_re_init_depth(u32 ent, struct bnxt_re_ucontext *uctx)
188 {
189 	return uctx ? (uctx->cmask & BNXT_RE_UCNTX_CAP_POW2_DISABLED) ?
190 		ent : roundup_pow_of_two(ent) : ent;
191 }
192 
193 static inline bool bnxt_re_is_var_size_supported(struct bnxt_re_dev *rdev,
194 						 struct bnxt_re_ucontext *uctx)
195 {
196 	if (uctx)
197 		return uctx->cmask & BNXT_RE_UCNTX_CAP_VAR_WQE_ENABLED;
198 	else
199 		return rdev->chip_ctx->modes.wqe_mode;
200 }
201 
202 int bnxt_re_query_device(struct ib_device *ibdev,
203 			 struct ib_device_attr *ib_attr,
204 			 struct ib_udata *udata);
205 int bnxt_re_modify_device(struct ib_device *ibdev,
206 			  int device_modify_mask,
207 			  struct ib_device_modify *device_modify);
208 int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
209 		       struct ib_port_attr *port_attr);
210 int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num,
211 			       struct ib_port_immutable *immutable);
212 void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str);
213 int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
214 		       u16 index, u16 *pkey);
215 int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context);
216 int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context);
217 int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num,
218 		      int index, union ib_gid *gid);
219 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
220 					    u32 port_num);
221 int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
222 int bnxt_re_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
223 int bnxt_re_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
224 		      struct ib_udata *udata);
225 int bnxt_re_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
226 int bnxt_re_destroy_ah(struct ib_ah *ah, u32 flags);
227 int bnxt_re_create_srq(struct ib_srq *srq,
228 		       struct ib_srq_init_attr *srq_init_attr,
229 		       struct ib_udata *udata);
230 int bnxt_re_modify_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
231 		       enum ib_srq_attr_mask srq_attr_mask,
232 		       struct ib_udata *udata);
233 int bnxt_re_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
234 int bnxt_re_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
235 int bnxt_re_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *recv_wr,
236 			  const struct ib_recv_wr **bad_recv_wr);
237 int bnxt_re_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr,
238 		      struct ib_udata *udata);
239 int bnxt_re_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
240 		      int qp_attr_mask, struct ib_udata *udata);
241 int bnxt_re_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
242 		     int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
243 int bnxt_re_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
244 int bnxt_re_post_send(struct ib_qp *qp, const struct ib_send_wr *send_wr,
245 		      const struct ib_send_wr **bad_send_wr);
246 int bnxt_re_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
247 		      const struct ib_recv_wr **bad_recv_wr);
248 int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
249 		      struct uverbs_attr_bundle *attrs);
250 int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
251 int bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
252 int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
253 int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
254 struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
255 
256 int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
257 		      unsigned int *sg_offset);
258 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type,
259 			       u32 max_num_sg);
260 int bnxt_re_dereg_mr(struct ib_mr *mr, struct ib_udata *udata);
261 struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
262 			       struct ib_udata *udata);
263 int bnxt_re_dealloc_mw(struct ib_mw *mw);
264 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
265 				  u64 virt_addr, int mr_access_flags,
266 				  struct ib_dmah *dmah,
267 				  struct ib_udata *udata);
268 struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
269 					 u64 length, u64 virt_addr,
270 					 int fd, int mr_access_flags,
271 					 struct ib_dmah *dmah,
272 					 struct uverbs_attr_bundle *attrs);
273 int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata);
274 void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
275 struct ib_flow *bnxt_re_create_flow(struct ib_qp *ib_qp,
276 				    struct ib_flow_attr *attr,
277 				    struct ib_udata *udata);
278 int bnxt_re_destroy_flow(struct ib_flow *flow_id);
279 
280 int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
281 void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
282 
283 int bnxt_re_process_mad(struct ib_device *device, int process_mad_flags,
284 			u32 port_num, const struct ib_wc *in_wc,
285 			const struct ib_grh *in_grh,
286 			const struct ib_mad *in_mad, struct ib_mad *out_mad,
287 			size_t *out_mad_size, u16 *out_mad_pkey_index);
288 
289 static inline u32 __to_ib_port_num(u16 port_id)
290 {
291 	return (u32)port_id + 1;
292 }
293 
294 unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
295 void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);
296 #endif /* __BNXT_RE_IB_VERBS_H__ */
297