xref: /freebsd/sys/dev/bnxt/bnxt_re/ib_verbs.h (revision 7fdf597e96a02165cfe22ff357b857d5fa15ed8a)
1 /*
2  * Copyright (c) 2015-2024, Broadcom. All rights reserved.  The term
3  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in
13  *    the documentation and/or other materials provided with the
14  *    distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
18  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
23  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
26  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  * Description: IB Verbs interpreter (header)
29  */
30 
31 #ifndef __BNXT_RE_IB_VERBS_H__
32 #define __BNXT_RE_IB_VERBS_H__
33 
34 #include <rdma/ib_addr.h>
35 #include "bnxt_re-abi.h"
36 #include "qplib_res.h"
37 #include "qplib_fp.h"
38 
39 struct bnxt_re_dev;
40 
41 #define BNXT_RE_ROCE_V2_UDP_SPORT	0x8CD1
42 #define BNXT_RE_QP_RANDOM_QKEY		0x81818181
43 
44 #ifndef IB_MTU_8192
45 #define IB_MTU_8192 8192
46 #endif
47 
48 #ifndef SPEED_1000
49 #define SPEED_1000		1000
50 #endif
51 
52 #ifndef SPEED_10000
53 #define SPEED_10000		10000
54 #endif
55 
56 #ifndef SPEED_20000
57 #define SPEED_20000		20000
58 #endif
59 
60 #ifndef SPEED_25000
61 #define SPEED_25000		25000
62 #endif
63 
64 #ifndef SPEED_40000
65 #define SPEED_40000		40000
66 #endif
67 
68 #ifndef SPEED_50000
69 #define SPEED_50000		50000
70 #endif
71 
72 #ifndef SPEED_100000
73 #define SPEED_100000		100000
74 #endif
75 
76 #ifndef SPEED_200000
77 #define SPEED_200000		200000
78 #endif
79 
80 #ifndef IB_SPEED_HDR
81 #define IB_SPEED_HDR		64
82 #endif
83 
84 #define RDMA_NETWORK_IPV4	1
85 #define RDMA_NETWORK_IPV6	2
86 
87 #define ROCE_DMAC(x) (x)->dmac
88 
89 #define dma_rmb()       rmb()
90 
91 #define compat_ib_alloc_device(size) ib_alloc_device(size);
92 
93 #define rdev_from_cq_in(cq_in) to_bnxt_re_dev(cq_in->device, ibdev)
94 
95 #define GET_UVERBS_ABI_VERSION(ibdev)	(ibdev->uverbs_abi_ver)
96 
97 #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256MB 0x1cUL
98 
99 #define IB_POLL_UNBOUND_WORKQUEUE       IB_POLL_WORKQUEUE
100 
101 #define BNXT_RE_LEGACY_FENCE_BYTES	64
102 #define BNXT_RE_LEGACY_FENCE_PBL_SIZE	DIV_ROUND_UP(BNXT_RE_LEGACY_FENCE_BYTES, PAGE_SIZE)
103 
104 
105 static inline struct
106 bnxt_re_cq *__get_cq_from_cq_in(struct ib_cq *cq_in,
107 				struct bnxt_re_dev *rdev);
108 static inline struct
109 bnxt_re_qp *__get_qp_from_qp_in(struct ib_pd *qp_in,
110 				struct bnxt_re_dev *rdev);
111 
112 static inline bool
113 bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev, u16 vlan_id);
114 
115 #define bnxt_re_compat_qfwstr(void)			\
116 	bnxt_re_query_fw_str(struct ib_device *ibdev,	\
117 			     char *str, size_t str_len)
118 
119 static inline
120 struct scatterlist *get_ib_umem_sgl(struct ib_umem *umem, u32 *nmap);
121 
122 struct bnxt_re_gid_ctx {
123 	u32			idx;
124 	u32			refcnt;
125 };
126 
127 struct bnxt_re_legacy_fence_data {
128 	u32 size;
129 	void *va;
130 	dma_addr_t dma_addr;
131 	struct bnxt_re_mr *mr;
132 	struct ib_mw *mw;
133 	struct bnxt_qplib_swqe bind_wqe;
134 	u32 bind_rkey;
135 };
136 
137 struct bnxt_re_pd {
138 	struct ib_pd		ibpd;
139 	struct bnxt_re_dev	*rdev;
140 	struct bnxt_qplib_pd	qplib_pd;
141 	struct bnxt_re_legacy_fence_data fence;
142 };
143 
144 struct bnxt_re_ah {
145 	struct ib_ah		ibah;
146 	struct bnxt_re_dev	*rdev;
147 	struct bnxt_qplib_ah	qplib_ah;
148 };
149 
150 struct bnxt_re_srq {
151 	struct ib_srq		ibsrq;
152 	struct bnxt_re_dev	*rdev;
153 	u32			srq_limit;
154 	struct bnxt_qplib_srq	qplib_srq;
155 	struct ib_umem		*umem;
156 	spinlock_t		lock;
157 };
158 
159 union ip_addr {
160 	u32 ipv4_addr;
161 	u8  ipv6_addr[16];
162 };
163 
164 struct bnxt_re_qp_info_entry {
165 	union ib_gid		sgid;
166 	union ib_gid 		dgid;
167 	union ip_addr		s_ip;
168 	union ip_addr		d_ip;
169 	u16			s_port;
170 #define BNXT_RE_QP_DEST_PORT	4791
171 	u16			d_port;
172 };
173 
174 struct bnxt_re_qp {
175 	struct ib_qp		ib_qp;
176 	struct list_head	list;
177 	struct bnxt_re_dev	*rdev;
178 	spinlock_t		sq_lock;
179 	spinlock_t		rq_lock;
180 	struct bnxt_qplib_qp	qplib_qp;
181 	struct ib_umem		*sumem;
182 	struct ib_umem		*rumem;
183 	/* QP1 */
184 	u32			send_psn;
185 	struct ib_ud_header	qp1_hdr;
186 	struct bnxt_re_cq	*scq;
187 	struct bnxt_re_cq	*rcq;
188 	struct dentry		*qp_info_pdev_dentry;
189 	struct bnxt_re_qp_info_entry qp_info_entry;
190 	void			*qp_data;
191 };
192 
193 struct bnxt_re_cq {
194 	struct ib_cq		ibcq;
195 	struct list_head	cq_list;
196 	struct bnxt_re_dev	*rdev;
197 	struct bnxt_re_ucontext *uctx;
198 	spinlock_t              cq_lock;
199 	u16			cq_count;
200 	u16			cq_period;
201 	struct bnxt_qplib_cq	qplib_cq;
202 	struct bnxt_qplib_cqe	*cql;
203 #define MAX_CQL_PER_POLL	1024
204 	u32			max_cql;
205 	struct ib_umem		*umem;
206 	struct ib_umem		*resize_umem;
207 	struct ib_ucontext	*context;
208 	int			resize_cqe;
209 	/* list of cq per uctx. Used only for Thor-2 */
210 	void			*uctx_cq_page;
211 	void			*dbr_recov_cq_page;
212 	bool			is_dbr_soft_cq;
213 };
214 
215 struct bnxt_re_mr {
216 	struct bnxt_re_dev	*rdev;
217 	struct ib_mr		ib_mr;
218 	struct ib_umem		*ib_umem;
219 	struct bnxt_qplib_mrw	qplib_mr;
220 	u32			npages;
221 	u64			*pages;
222 	struct bnxt_qplib_frpl	qplib_frpl;
223 	bool                    is_invalcb_active;
224 };
225 
226 struct bnxt_re_frpl {
227 	struct bnxt_re_dev		*rdev;
228 	struct bnxt_qplib_frpl		qplib_frpl;
229 	u64				*page_list;
230 };
231 
232 struct bnxt_re_mw {
233 	struct bnxt_re_dev	*rdev;
234 	struct ib_mw		ib_mw;
235 	struct bnxt_qplib_mrw	qplib_mw;
236 };
237 
238 struct bnxt_re_ucontext {
239 	struct ib_ucontext	ibucontext;
240 	struct bnxt_re_dev	*rdev;
241 	struct list_head	cq_list;
242 	struct bnxt_qplib_dpi	dpi;
243 	struct bnxt_qplib_dpi	wcdpi;
244 	void			*shpg;
245 	spinlock_t		sh_lock;
246 	uint64_t		cmask;
247 	struct mutex		cq_lock;	/* Protect cq list */
248 	void			*dbr_recov_cq_page;
249 	struct bnxt_re_cq	*dbr_recov_cq;
250 };
251 
252 struct bnxt_re_ah_info {
253 	union ib_gid		sgid;
254 	struct ib_gid_attr	sgid_attr;
255 	u16			vlan_tag;
256 	u8			nw_type;
257 };
258 
259 struct ifnet *bnxt_re_get_netdev(struct ib_device *ibdev,
260 				 u8 port_num);
261 
262 int bnxt_re_query_device(struct ib_device *ibdev,
263 			 struct ib_device_attr *ib_attr,
264 			 struct ib_udata *udata);
265 int bnxt_re_modify_device(struct ib_device *ibdev,
266 			  int device_modify_mask,
267 			  struct ib_device_modify *device_modify);
268 int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
269 		       struct ib_port_attr *port_attr);
270 int bnxt_re_modify_port(struct ib_device *ibdev, u8 port_num,
271 			int port_modify_mask,
272 			struct ib_port_modify *port_modify);
273 int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
274 			       struct ib_port_immutable *immutable);
275 void bnxt_re_compat_qfwstr(void);
276 int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
277 		       u16 index, u16 *pkey);
278 int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
279 		    unsigned int index, void **context);
280 int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
281 		    unsigned int index, const union ib_gid *gid,
282 		    const struct ib_gid_attr *attr, void **context);
283 int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
284 		      int index, union ib_gid *gid);
285 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
286 					    u8 port_num);
287 int bnxt_re_alloc_pd(struct ib_pd *pd_in, struct ib_udata *udata);
288 void bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata);
289 
290 int bnxt_re_create_ah(struct ib_ah *ah_in, struct ib_ah_attr *attr,
291 		      u32 flags, struct ib_udata *udata);
292 
293 int bnxt_re_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
294 int bnxt_re_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
295 
296 void bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags);
297 int bnxt_re_create_srq(struct ib_srq *srq_in,
298 		       struct ib_srq_init_attr *srq_init_attr,
299 		       struct ib_udata *udata);
300 int bnxt_re_modify_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
301 		       enum ib_srq_attr_mask srq_attr_mask,
302 		       struct ib_udata *udata);
303 int bnxt_re_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
304 void bnxt_re_destroy_srq(struct ib_srq *ib_srq,
305 			 struct ib_udata *udata);
306 int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
307 			  const struct ib_recv_wr **bad_wr);
308 struct ib_qp *bnxt_re_create_qp(struct ib_pd *qp_in,
309 			       struct ib_qp_init_attr *qp_init_attr,
310 			       struct ib_udata *udata);
311 int bnxt_re_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
312 		      int qp_attr_mask, struct ib_udata *udata);
313 int bnxt_re_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
314 		     int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
315 int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata);
316 int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
317 		      const struct ib_send_wr **bad_wr);
318 int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
319 		      const struct ib_recv_wr **bad_wr);
320 int bnxt_re_create_cq(struct ib_cq *cq_in,
321 		      const struct ib_cq_init_attr *attr,
322 		      struct ib_udata *udata);
323 void bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
324 int bnxt_re_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
325 int bnxt_re_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
326 int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
327 int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
328 struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
329 int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg,
330 		      int sg_nents, unsigned int *sg_offset);
331 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
332 			       u32 max_num_sg, struct ib_udata *udata);
333 int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
334 struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
335 			       struct ib_udata *udata);
336 int bnxt_re_dealloc_mw(struct ib_mw *mw);
337 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
338 				  u64 virt_addr, int mr_access_flags,
339 				  struct ib_udata *udata);
340 int
341 bnxt_re_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length,
342 		      u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
343 		      struct ib_udata *udata);
344 int bnxt_re_alloc_ucontext(struct ib_ucontext *uctx_in,
345 			   struct ib_udata *udata);
346 void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx);
347 int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
348 int bnxt_re_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
349 			const struct ib_wc *wc, const struct ib_grh *grh,
350 			const struct ib_mad_hdr *in_mad, size_t in_mad_size,
351 			struct ib_mad_hdr *out_mad, size_t *out_mad_size,
352 			u16 *out_mad_pkey_index);
353 unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
354 void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);
355 void bnxt_re_disassociate_ucntx(struct ib_ucontext *ibcontext);
356 static inline int __bnxt_re_set_vma_data(void *bnxt_re_uctx,
357 					 struct vm_area_struct *vma);
358 void bnxt_re_update_shadow_ah(struct bnxt_re_dev *rdev);
359 void bnxt_re_handle_cqn(struct bnxt_qplib_cq *cq);
360 static inline int
361 bnxt_re_get_cached_gid(struct ib_device *dev, u8 port_num, int index,
362 		       union ib_gid *sgid, struct ib_gid_attr **sgid_attr,
363 		       struct ib_global_route *grh, struct ib_ah *ah);
364 static inline enum rdma_network_type
365 bnxt_re_gid_to_network_type(struct ib_gid_attr *sgid_attr,
366 			    union ib_gid *sgid);
367 static inline
368 struct ib_umem *ib_umem_get_compat(struct bnxt_re_dev *rdev,
369 				   struct ib_ucontext *ucontext,
370 				   struct ib_udata *udata,
371 				   unsigned long addr,
372 				   size_t size, int access, int dmasync);
373 static inline
374 struct ib_umem *ib_umem_get_flags_compat(struct bnxt_re_dev *rdev,
375 					 struct ib_ucontext *ucontext,
376 					 struct ib_udata *udata,
377 					 unsigned long addr,
378 					 size_t size, int access, int dmasync);
379 static inline size_t ib_umem_num_pages_compat(struct ib_umem *umem);
380 static inline void bnxt_re_peer_mem_release(struct ib_umem *umem);
381 void bnxt_re_resolve_dmac_task(struct work_struct *work);
382 
383 static inline enum ib_qp_type  __from_hw_to_ib_qp_type(u8 type)
384 {
385 	switch (type) {
386 	case CMDQ_CREATE_QP1_TYPE_GSI:
387 	case CMDQ_CREATE_QP_TYPE_GSI:
388 		return IB_QPT_GSI;
389 	case CMDQ_CREATE_QP_TYPE_RC:
390 		return IB_QPT_RC;
391 	case CMDQ_CREATE_QP_TYPE_UD:
392 		return IB_QPT_UD;
393 	case CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE:
394 		return IB_QPT_RAW_ETHERTYPE;
395 	default:
396 		return IB_QPT_MAX;
397 	}
398 }
399 
400 static inline u8 __from_ib_qp_state(enum ib_qp_state state)
401 {
402 	switch (state) {
403 	case IB_QPS_RESET:
404 		return CMDQ_MODIFY_QP_NEW_STATE_RESET;
405 	case IB_QPS_INIT:
406 		return CMDQ_MODIFY_QP_NEW_STATE_INIT;
407 	case IB_QPS_RTR:
408 		return CMDQ_MODIFY_QP_NEW_STATE_RTR;
409 	case IB_QPS_RTS:
410 		return CMDQ_MODIFY_QP_NEW_STATE_RTS;
411 	case IB_QPS_SQD:
412 		return CMDQ_MODIFY_QP_NEW_STATE_SQD;
413 	case IB_QPS_SQE:
414 		return CMDQ_MODIFY_QP_NEW_STATE_SQE;
415 	case IB_QPS_ERR:
416 	default:
417 		return CMDQ_MODIFY_QP_NEW_STATE_ERR;
418 	}
419 }
420 
421 static inline u32 __from_ib_mtu(enum ib_mtu mtu)
422 {
423 	switch (mtu) {
424 	case IB_MTU_256:
425 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
426 	case IB_MTU_512:
427 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
428 	case IB_MTU_1024:
429 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
430 	case IB_MTU_2048:
431 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
432 	case IB_MTU_4096:
433 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
434 	default:
435 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
436 	}
437 }
438 
439 static inline enum ib_mtu __to_ib_mtu(u32 mtu)
440 {
441 	switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
442 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
443 		return IB_MTU_256;
444 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
445 		return IB_MTU_512;
446 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
447 		return IB_MTU_1024;
448 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
449 		return IB_MTU_2048;
450 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
451 		return IB_MTU_4096;
452 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_8192:
453 		return IB_MTU_8192;
454 	default:
455 		return IB_MTU_2048;
456 	}
457 }
458 
459 static inline enum ib_qp_state __to_ib_qp_state(u8 state)
460 {
461 	switch (state) {
462 	case CMDQ_MODIFY_QP_NEW_STATE_RESET:
463 		return IB_QPS_RESET;
464 	case CMDQ_MODIFY_QP_NEW_STATE_INIT:
465 		return IB_QPS_INIT;
466 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
467 		return IB_QPS_RTR;
468 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
469 		return IB_QPS_RTS;
470 	case CMDQ_MODIFY_QP_NEW_STATE_SQD:
471 		return IB_QPS_SQD;
472 	case CMDQ_MODIFY_QP_NEW_STATE_SQE:
473 		return IB_QPS_SQE;
474 	case CMDQ_MODIFY_QP_NEW_STATE_ERR:
475 	default:
476 		return IB_QPS_ERR;
477 	}
478 }
479 
480 static inline int bnxt_re_init_pow2_flag(struct bnxt_re_uctx_req *req,
481 					 struct bnxt_re_uctx_resp *resp)
482 {
483 	resp->comp_mask |= BNXT_RE_COMP_MASK_UCNTX_POW2_DISABLED;
484 	if (!(req->comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT)) {
485 		resp->comp_mask &= ~BNXT_RE_COMP_MASK_UCNTX_POW2_DISABLED;
486 		return -EINVAL;
487 	}
488 	return 0;
489 }
490 
491 static inline u32 bnxt_re_init_depth(u32 ent, struct bnxt_re_ucontext *uctx)
492 {
493 	return uctx ? (uctx->cmask & BNXT_RE_COMP_MASK_UCNTX_POW2_DISABLED) ?
494 		       ent : roundup_pow_of_two(ent) : ent;
495 }
496 
497 static inline int bnxt_re_init_rsvd_wqe_flag(struct bnxt_re_uctx_req *req,
498 					     struct bnxt_re_uctx_resp *resp,
499 					     bool genp5)
500 {
501 	resp->comp_mask |= BNXT_RE_COMP_MASK_UCNTX_RSVD_WQE_DISABLED;
502 	if (!(req->comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_RSVD_WQE)) {
503 		resp->comp_mask &= ~BNXT_RE_COMP_MASK_UCNTX_RSVD_WQE_DISABLED;
504 		return -EINVAL;
505 	} else if (!genp5) {
506 		resp->comp_mask &= ~BNXT_RE_COMP_MASK_UCNTX_RSVD_WQE_DISABLED;
507 	}
508 	return 0;
509 }
510 
511 static inline u32 bnxt_re_get_diff(struct bnxt_re_ucontext *uctx,
512 				   struct bnxt_qplib_chip_ctx *cctx)
513 {
514 	if (!uctx) {
515 		/* return res-wqe only for gen p4 for user resource */
516 		return _is_chip_gen_p5_p7(cctx) ? 0 : BNXT_QPLIB_RESERVED_QP_WRS;
517 	} else if (uctx->cmask & BNXT_RE_COMP_MASK_UCNTX_RSVD_WQE_DISABLED) {
518 		return 0;
519 	}
520 	/* old lib */
521 	return BNXT_QPLIB_RESERVED_QP_WRS;
522 }
523 
524 static inline void bnxt_re_init_qpmtu(struct bnxt_re_qp *qp, int mtu,
525 				      int mask, struct ib_qp_attr *qp_attr,
526 				      bool *is_qpmtu_high)
527 {
528 	int qpmtu, qpmtu_int;
529 	int ifmtu, ifmtu_int;
530 
531 	ifmtu = iboe_get_mtu(mtu);
532 	ifmtu_int = ib_mtu_enum_to_int(ifmtu);
533 	qpmtu = ifmtu;
534 	qpmtu_int = ifmtu_int;
535 	if (mask & IB_QP_PATH_MTU) {
536 		qpmtu = qp_attr->path_mtu;
537 		qpmtu_int = ib_mtu_enum_to_int(qpmtu);
538 		if (qpmtu_int > ifmtu_int) {
539 			/* Trim the QP path mtu to interface mtu and update
540 			 * the new mtu to user qp for retransmission psn
541 			 * calculations.
542 			 */
543 			qpmtu = ifmtu;
544 			qpmtu_int = ifmtu_int;
545 			*is_qpmtu_high = true;
546 		}
547 	}
548 	qp->qplib_qp.path_mtu = __from_ib_mtu(qpmtu);
549 	qp->qplib_qp.mtu = qpmtu_int;
550 	qp->qplib_qp.modify_flags |=
551 		CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
552 }
553 
554 inline unsigned long compare_ether_header(void *a, void *b)
555 {
556 	u32 *a32 = (u32 *)((u8 *)a + 2);
557 	u32 *b32 = (u32 *)((u8 *)b + 2);
558 
559 	return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) |
560 	       (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]);
561 }
562 
563 struct vlan_hdr {
564 	__be16	h_vlan_TCI;
565 	__be16	h_vlan_encapsulated_proto;
566 };
567 
568 inline uint16_t
569 crc16(uint16_t crc, const void *buffer, unsigned int len)
570 {
571 	const unsigned char *cp = buffer;
572 	/* CRC table for the CRC-16. The poly is 0x8005 (x16 + x15 + x2 + 1). */
573 	static uint16_t const crc16_table[256] = {
574 		0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241,
575 		0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
576 		0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40,
577 		0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841,
578 		0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40,
579 		0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41,
580 		0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641,
581 		0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040,
582 		0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240,
583 		0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441,
584 		0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41,
585 		0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840,
586 		0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41,
587 		0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40,
588 		0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640,
589 		0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041,
590 		0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,
591 		0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441,
592 		0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41,
593 		0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840,
594 		0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41,
595 		0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40,
596 		0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640,
597 		0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041,
598 		0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241,
599 		0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440,
600 		0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40,
601 		0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841,
602 		0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40,
603 		0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41,
604 		0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641,
605 		0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040
606 	};
607 
608 	while (len--)
609 		crc = (((crc >> 8) & 0xffU) ^
610 		    crc16_table[(crc ^ *cp++) & 0xffU]) & 0x0000ffffU;
611 	return crc;
612 }
613 
614 static inline int __bnxt_re_set_vma_data(void *bnxt_re_uctx,
615 					 struct vm_area_struct *vma)
616 {
617 	return 0;
618 }
619 
620 static inline bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev,
621 					       u16 vlan_id)
622 {
623 	bool ret = true;
624 	/*
625 	 * Check if the vlan is configured in the host.
626 	 * If not configured, it  can be a transparent
627 	 * VLAN. So dont report the vlan id.
628 	 */
629 	return ret;
630 }
631 
632 #endif
633