1 /*
2 * Copyright (c) 2015-2024, Broadcom. All rights reserved. The term
3 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in
13 * the documentation and/or other materials provided with the
14 * distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
18 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
23 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
26 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Description: IB Verbs interpreter (header)
29 */
30
31 #ifndef __BNXT_RE_IB_VERBS_H__
32 #define __BNXT_RE_IB_VERBS_H__
33
34 #include <rdma/ib_addr.h>
35 #include "bnxt_re-abi.h"
36 #include "qplib_res.h"
37 #include "qplib_fp.h"
38
39 struct bnxt_re_dev;
40
41 #define BNXT_RE_ROCE_V2_UDP_SPORT 0x8CD1
42 #define BNXT_RE_QP_RANDOM_QKEY 0x81818181
43
44 #ifndef IB_MTU_8192
45 #define IB_MTU_8192 8192
46 #endif
47
48 #ifndef SPEED_1000
49 #define SPEED_1000 1000
50 #endif
51
52 #ifndef SPEED_2500
53 #define SPEED_2500 2500
54 #endif
55
56 #ifndef SPEED_5000
57 #define SPEED_5000 5000
58 #endif
59
60 #ifndef SPEED_10000
61 #define SPEED_10000 10000
62 #endif
63
64 #ifndef SPEED_14000
65 #define SPEED_14000 14000
66 #endif
67
68 #ifndef SPEED_20000
69 #define SPEED_20000 20000
70 #endif
71
72 #ifndef SPEED_25000
73 #define SPEED_25000 25000
74 #endif
75
76 #ifndef SPEED_40000
77 #define SPEED_40000 40000
78 #endif
79
80 #ifndef SPEED_50000
81 #define SPEED_50000 50000
82 #endif
83
84 #ifndef SPEED_100000
85 #define SPEED_100000 100000
86 #endif
87
88 #ifndef SPEED_200000
89 #define SPEED_200000 200000
90 #endif
91
92 #ifndef SPEED_400000
93 #define SPEED_400000 400000
94 #endif
95
96 #ifndef IB_SPEED_HDR
97 #define IB_SPEED_HDR 64
98 #endif
99
100 #ifndef IB_SPEED_NDR
101 #define IB_SPEED_NDR 128
102 #endif
103
104 #define RDMA_NETWORK_IPV4 1
105 #define RDMA_NETWORK_IPV6 2
106
107 #define ROCE_DMAC(x) (x)->dmac
108
109 #define dma_rmb() rmb()
110
111 #define compat_ib_alloc_device(size) ib_alloc_device(size);
112
113 #define rdev_from_cq_in(cq_in) to_bnxt_re_dev(cq_in->device, ibdev)
114
115 #define GET_UVERBS_ABI_VERSION(ibdev) (ibdev->uverbs_abi_ver)
116
117 #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256MB 0x1cUL
118
119 #define IB_POLL_UNBOUND_WORKQUEUE IB_POLL_WORKQUEUE
120
121 #define BNXT_RE_LEGACY_FENCE_BYTES 64
122 #define BNXT_RE_LEGACY_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_LEGACY_FENCE_BYTES, PAGE_SIZE)
123
124
125 static inline struct
126 bnxt_re_cq *__get_cq_from_cq_in(struct ib_cq *cq_in,
127 struct bnxt_re_dev *rdev);
128 static inline struct
129 bnxt_re_qp *__get_qp_from_qp_in(struct ib_pd *qp_in,
130 struct bnxt_re_dev *rdev);
131
132 static inline bool
133 bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev, u16 vlan_id);
134
135 #define bnxt_re_compat_qfwstr(void) \
136 bnxt_re_query_fw_str(struct ib_device *ibdev, \
137 char *str, size_t str_len)
138
139 static inline
140 struct scatterlist *get_ib_umem_sgl(struct ib_umem *umem, u32 *nmap);
141
142 struct bnxt_re_gid_ctx {
143 u32 idx;
144 u32 refcnt;
145 };
146
147 struct bnxt_re_legacy_fence_data {
148 u32 size;
149 void *va;
150 dma_addr_t dma_addr;
151 struct bnxt_re_mr *mr;
152 struct ib_mw *mw;
153 struct bnxt_qplib_swqe bind_wqe;
154 u32 bind_rkey;
155 };
156
157 struct bnxt_re_pd {
158 struct ib_pd ibpd;
159 struct bnxt_re_dev *rdev;
160 struct bnxt_qplib_pd qplib_pd;
161 struct bnxt_re_legacy_fence_data fence;
162 };
163
164 struct bnxt_re_ah {
165 struct ib_ah ibah;
166 struct bnxt_re_dev *rdev;
167 struct bnxt_qplib_ah qplib_ah;
168 };
169
170 struct bnxt_re_srq {
171 struct ib_srq ibsrq;
172 struct bnxt_re_dev *rdev;
173 u32 srq_limit;
174 struct bnxt_qplib_srq qplib_srq;
175 struct ib_umem *umem;
176 spinlock_t lock;
177 };
178
179 union ip_addr {
180 u32 ipv4_addr;
181 u8 ipv6_addr[16];
182 };
183
184 struct bnxt_re_qp_info_entry {
185 union ib_gid sgid;
186 union ib_gid dgid;
187 union ip_addr s_ip;
188 union ip_addr d_ip;
189 u16 s_port;
190 #define BNXT_RE_QP_DEST_PORT 4791
191 u16 d_port;
192 };
193
194 struct bnxt_re_qp {
195 struct ib_qp ib_qp;
196 struct list_head list;
197 struct bnxt_re_dev *rdev;
198 spinlock_t sq_lock;
199 spinlock_t rq_lock;
200 struct bnxt_qplib_qp qplib_qp;
201 struct ib_umem *sumem;
202 struct ib_umem *rumem;
203 /* QP1 */
204 u32 send_psn;
205 struct ib_ud_header qp1_hdr;
206 struct bnxt_re_cq *scq;
207 struct bnxt_re_cq *rcq;
208 struct dentry *qp_info_pdev_dentry;
209 struct bnxt_re_qp_info_entry qp_info_entry;
210 void *qp_data;
211 };
212
213 struct bnxt_re_cq {
214 struct ib_cq ibcq;
215 struct list_head cq_list;
216 struct bnxt_re_dev *rdev;
217 struct bnxt_re_ucontext *uctx;
218 spinlock_t cq_lock;
219 u16 cq_count;
220 u16 cq_period;
221 struct bnxt_qplib_cq qplib_cq;
222 struct bnxt_qplib_cqe *cql;
223 #define MAX_CQL_PER_POLL 1024
224 u32 max_cql;
225 struct ib_umem *umem;
226 struct ib_umem *resize_umem;
227 struct ib_ucontext *context;
228 int resize_cqe;
229 /* list of cq per uctx. Used only for Thor-2 */
230 void *uctx_cq_page;
231 void *dbr_recov_cq_page;
232 bool is_dbr_soft_cq;
233 };
234
235 struct bnxt_re_mr {
236 struct bnxt_re_dev *rdev;
237 struct ib_mr ib_mr;
238 struct ib_umem *ib_umem;
239 struct bnxt_qplib_mrw qplib_mr;
240 u32 npages;
241 u64 *pages;
242 struct bnxt_qplib_frpl qplib_frpl;
243 bool is_invalcb_active;
244 };
245
246 struct bnxt_re_frpl {
247 struct bnxt_re_dev *rdev;
248 struct bnxt_qplib_frpl qplib_frpl;
249 u64 *page_list;
250 };
251
252 struct bnxt_re_mw {
253 struct bnxt_re_dev *rdev;
254 struct ib_mw ib_mw;
255 struct bnxt_qplib_mrw qplib_mw;
256 };
257
258 struct bnxt_re_ucontext {
259 struct ib_ucontext ibucontext;
260 struct bnxt_re_dev *rdev;
261 struct list_head cq_list;
262 struct bnxt_qplib_dpi dpi;
263 struct bnxt_qplib_dpi wcdpi;
264 void *shpg;
265 spinlock_t sh_lock;
266 uint64_t cmask;
267 struct mutex cq_lock; /* Protect cq list */
268 void *dbr_recov_cq_page;
269 struct bnxt_re_cq *dbr_recov_cq;
270 };
271
272 struct bnxt_re_ah_info {
273 union ib_gid sgid;
274 struct ib_gid_attr sgid_attr;
275 u16 vlan_tag;
276 u8 nw_type;
277 };
278
279 struct ifnet *bnxt_re_get_netdev(struct ib_device *ibdev,
280 u8 port_num);
281
282 int bnxt_re_query_device(struct ib_device *ibdev,
283 struct ib_device_attr *ib_attr,
284 struct ib_udata *udata);
285 int bnxt_re_modify_device(struct ib_device *ibdev,
286 int device_modify_mask,
287 struct ib_device_modify *device_modify);
288 int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
289 struct ib_port_attr *port_attr);
290 int bnxt_re_modify_port(struct ib_device *ibdev, u8 port_num,
291 int port_modify_mask,
292 struct ib_port_modify *port_modify);
293 int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
294 struct ib_port_immutable *immutable);
295 void bnxt_re_compat_qfwstr(void);
296 int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
297 u16 index, u16 *pkey);
298 int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
299 unsigned int index, void **context);
300 int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
301 unsigned int index, const union ib_gid *gid,
302 const struct ib_gid_attr *attr, void **context);
303 int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
304 int index, union ib_gid *gid);
305 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
306 u8 port_num);
307 int bnxt_re_alloc_pd(struct ib_pd *pd_in, struct ib_udata *udata);
308 void bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata);
309
310 int bnxt_re_create_ah(struct ib_ah *ah_in, struct ib_ah_attr *attr,
311 u32 flags, struct ib_udata *udata);
312
313 int bnxt_re_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
314 int bnxt_re_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
315
316 void bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags);
317 int bnxt_re_create_srq(struct ib_srq *srq_in,
318 struct ib_srq_init_attr *srq_init_attr,
319 struct ib_udata *udata);
320 int bnxt_re_modify_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
321 enum ib_srq_attr_mask srq_attr_mask,
322 struct ib_udata *udata);
323 int bnxt_re_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
324 void bnxt_re_destroy_srq(struct ib_srq *ib_srq,
325 struct ib_udata *udata);
326 int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
327 const struct ib_recv_wr **bad_wr);
328 struct ib_qp *bnxt_re_create_qp(struct ib_pd *qp_in,
329 struct ib_qp_init_attr *qp_init_attr,
330 struct ib_udata *udata);
331 int bnxt_re_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
332 int qp_attr_mask, struct ib_udata *udata);
333 int bnxt_re_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
334 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
335 int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata);
336 int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
337 const struct ib_send_wr **bad_wr);
338 int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
339 const struct ib_recv_wr **bad_wr);
340 int bnxt_re_create_cq(struct ib_cq *cq_in,
341 const struct ib_cq_init_attr *attr,
342 struct ib_udata *udata);
343 void bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
344 int bnxt_re_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
345 int bnxt_re_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
346 int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
347 int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
348 struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
349 int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg,
350 int sg_nents, unsigned int *sg_offset);
351 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
352 u32 max_num_sg, struct ib_udata *udata);
353 int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
354 struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
355 struct ib_udata *udata);
356 int bnxt_re_dealloc_mw(struct ib_mw *mw);
357 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
358 u64 virt_addr, int mr_access_flags,
359 struct ib_udata *udata);
360 int
361 bnxt_re_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length,
362 u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
363 struct ib_udata *udata);
364 int bnxt_re_alloc_ucontext(struct ib_ucontext *uctx_in,
365 struct ib_udata *udata);
366 void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx);
367 int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
368 int bnxt_re_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
369 const struct ib_wc *wc, const struct ib_grh *grh,
370 const struct ib_mad_hdr *in_mad, size_t in_mad_size,
371 struct ib_mad_hdr *out_mad, size_t *out_mad_size,
372 u16 *out_mad_pkey_index);
373 unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
374 void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);
375 void bnxt_re_disassociate_ucntx(struct ib_ucontext *ibcontext);
376 static inline int __bnxt_re_set_vma_data(void *bnxt_re_uctx,
377 struct vm_area_struct *vma);
378 void bnxt_re_update_shadow_ah(struct bnxt_re_dev *rdev);
379 void bnxt_re_handle_cqn(struct bnxt_qplib_cq *cq);
380 static inline int
381 bnxt_re_get_cached_gid(struct ib_device *dev, u8 port_num, int index,
382 union ib_gid *sgid, struct ib_gid_attr **sgid_attr,
383 struct ib_global_route *grh, struct ib_ah *ah);
384 static inline enum rdma_network_type
385 bnxt_re_gid_to_network_type(struct ib_gid_attr *sgid_attr,
386 union ib_gid *sgid);
387 static inline
388 struct ib_umem *ib_umem_get_compat(struct bnxt_re_dev *rdev,
389 struct ib_ucontext *ucontext,
390 struct ib_udata *udata,
391 unsigned long addr,
392 size_t size, int access, int dmasync);
393 static inline
394 struct ib_umem *ib_umem_get_flags_compat(struct bnxt_re_dev *rdev,
395 struct ib_ucontext *ucontext,
396 struct ib_udata *udata,
397 unsigned long addr,
398 size_t size, int access, int dmasync);
399 static inline size_t ib_umem_num_pages_compat(struct ib_umem *umem);
400 static inline void bnxt_re_peer_mem_release(struct ib_umem *umem);
401 void bnxt_re_resolve_dmac_task(struct work_struct *work);
402
__from_hw_to_ib_qp_type(u8 type)403 static inline enum ib_qp_type __from_hw_to_ib_qp_type(u8 type)
404 {
405 switch (type) {
406 case CMDQ_CREATE_QP1_TYPE_GSI:
407 case CMDQ_CREATE_QP_TYPE_GSI:
408 return IB_QPT_GSI;
409 case CMDQ_CREATE_QP_TYPE_RC:
410 return IB_QPT_RC;
411 case CMDQ_CREATE_QP_TYPE_UD:
412 return IB_QPT_UD;
413 case CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE:
414 return IB_QPT_RAW_ETHERTYPE;
415 default:
416 return IB_QPT_MAX;
417 }
418 }
419
__from_ib_qp_state(enum ib_qp_state state)420 static inline u8 __from_ib_qp_state(enum ib_qp_state state)
421 {
422 switch (state) {
423 case IB_QPS_RESET:
424 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
425 case IB_QPS_INIT:
426 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
427 case IB_QPS_RTR:
428 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
429 case IB_QPS_RTS:
430 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
431 case IB_QPS_SQD:
432 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
433 case IB_QPS_SQE:
434 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
435 case IB_QPS_ERR:
436 default:
437 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
438 }
439 }
440
__from_ib_mtu(enum ib_mtu mtu)441 static inline u32 __from_ib_mtu(enum ib_mtu mtu)
442 {
443 switch (mtu) {
444 case IB_MTU_256:
445 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
446 case IB_MTU_512:
447 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
448 case IB_MTU_1024:
449 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
450 case IB_MTU_2048:
451 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
452 case IB_MTU_4096:
453 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
454 default:
455 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
456 }
457 }
458
__to_ib_mtu(u32 mtu)459 static inline enum ib_mtu __to_ib_mtu(u32 mtu)
460 {
461 switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
462 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
463 return IB_MTU_256;
464 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
465 return IB_MTU_512;
466 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
467 return IB_MTU_1024;
468 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
469 return IB_MTU_2048;
470 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
471 return IB_MTU_4096;
472 case CMDQ_MODIFY_QP_PATH_MTU_MTU_8192:
473 return IB_MTU_8192;
474 default:
475 return IB_MTU_2048;
476 }
477 }
478
__to_ib_qp_state(u8 state)479 static inline enum ib_qp_state __to_ib_qp_state(u8 state)
480 {
481 switch (state) {
482 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
483 return IB_QPS_RESET;
484 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
485 return IB_QPS_INIT;
486 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
487 return IB_QPS_RTR;
488 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
489 return IB_QPS_RTS;
490 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
491 return IB_QPS_SQD;
492 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
493 return IB_QPS_SQE;
494 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
495 default:
496 return IB_QPS_ERR;
497 }
498 }
499
bnxt_re_init_pow2_flag(struct bnxt_re_uctx_req * req,struct bnxt_re_uctx_resp * resp)500 static inline int bnxt_re_init_pow2_flag(struct bnxt_re_uctx_req *req,
501 struct bnxt_re_uctx_resp *resp)
502 {
503 resp->comp_mask |= BNXT_RE_COMP_MASK_UCNTX_POW2_DISABLED;
504 if (!(req->comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT)) {
505 resp->comp_mask &= ~BNXT_RE_COMP_MASK_UCNTX_POW2_DISABLED;
506 return -EINVAL;
507 }
508 return 0;
509 }
510
511 enum {
512 BNXT_RE_UCNTX_CAP_POW2_DISABLED = 0x1ULL,
513 BNXT_RE_UCNTX_CAP_VAR_WQE_ENABLED = 0x2ULL,
514 };
515
bnxt_re_init_depth(u32 ent,struct bnxt_re_ucontext * uctx)516 static inline u32 bnxt_re_init_depth(u32 ent, struct bnxt_re_ucontext *uctx)
517 {
518 return uctx ? (uctx->cmask & BNXT_RE_COMP_MASK_UCNTX_POW2_DISABLED) ?
519 ent : roundup_pow_of_two(ent) : ent;
520 }
521
bnxt_re_init_rsvd_wqe_flag(struct bnxt_re_uctx_req * req,struct bnxt_re_uctx_resp * resp,bool genp5)522 static inline int bnxt_re_init_rsvd_wqe_flag(struct bnxt_re_uctx_req *req,
523 struct bnxt_re_uctx_resp *resp,
524 bool genp5)
525 {
526 resp->comp_mask |= BNXT_RE_COMP_MASK_UCNTX_RSVD_WQE_DISABLED;
527 if (!(req->comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_RSVD_WQE)) {
528 resp->comp_mask &= ~BNXT_RE_COMP_MASK_UCNTX_RSVD_WQE_DISABLED;
529 return -EINVAL;
530 } else if (!genp5) {
531 resp->comp_mask &= ~BNXT_RE_COMP_MASK_UCNTX_RSVD_WQE_DISABLED;
532 }
533 return 0;
534 }
535
bnxt_re_get_diff(struct bnxt_re_ucontext * uctx,struct bnxt_qplib_chip_ctx * cctx)536 static inline u32 bnxt_re_get_diff(struct bnxt_re_ucontext *uctx,
537 struct bnxt_qplib_chip_ctx *cctx)
538 {
539 if (!uctx) {
540 /* return res-wqe only for gen p4 for user resource */
541 return _is_chip_gen_p5_p7(cctx) ? 0 : BNXT_QPLIB_RESERVED_QP_WRS;
542 } else if (uctx->cmask & BNXT_RE_COMP_MASK_UCNTX_RSVD_WQE_DISABLED) {
543 return 0;
544 }
545 /* old lib */
546 return BNXT_QPLIB_RESERVED_QP_WRS;
547 }
548
bnxt_re_init_qpmtu(struct bnxt_re_qp * qp,int mtu,int mask,struct ib_qp_attr * qp_attr,bool * is_qpmtu_high)549 static inline void bnxt_re_init_qpmtu(struct bnxt_re_qp *qp, int mtu,
550 int mask, struct ib_qp_attr *qp_attr,
551 bool *is_qpmtu_high)
552 {
553 int qpmtu, qpmtu_int;
554 int ifmtu, ifmtu_int;
555
556 ifmtu = iboe_get_mtu(mtu);
557 ifmtu_int = ib_mtu_enum_to_int(ifmtu);
558 qpmtu = ifmtu;
559 qpmtu_int = ifmtu_int;
560 if (mask & IB_QP_PATH_MTU) {
561 qpmtu = qp_attr->path_mtu;
562 qpmtu_int = ib_mtu_enum_to_int(qpmtu);
563 if (qpmtu_int > ifmtu_int) {
564 /* Trim the QP path mtu to interface mtu and update
565 * the new mtu to user qp for retransmission psn
566 * calculations.
567 */
568 qpmtu = ifmtu;
569 qpmtu_int = ifmtu_int;
570 *is_qpmtu_high = true;
571 }
572 }
573 qp->qplib_qp.path_mtu = __from_ib_mtu(qpmtu);
574 qp->qplib_qp.mtu = qpmtu_int;
575 qp->qplib_qp.modify_flags |=
576 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
577 }
578
compare_ether_header(void * a,void * b)579 inline unsigned long compare_ether_header(void *a, void *b)
580 {
581 u32 *a32 = (u32 *)((u8 *)a + 2);
582 u32 *b32 = (u32 *)((u8 *)b + 2);
583
584 return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) |
585 (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]);
586 }
587
588 struct vlan_hdr {
589 __be16 h_vlan_TCI;
590 __be16 h_vlan_encapsulated_proto;
591 };
592
593 inline uint16_t
crc16(uint16_t crc,const void * buffer,unsigned int len)594 crc16(uint16_t crc, const void *buffer, unsigned int len)
595 {
596 const unsigned char *cp = buffer;
597 /* CRC table for the CRC-16. The poly is 0x8005 (x16 + x15 + x2 + 1). */
598 static uint16_t const crc16_table[256] = {
599 0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241,
600 0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
601 0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40,
602 0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841,
603 0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40,
604 0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41,
605 0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641,
606 0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040,
607 0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240,
608 0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441,
609 0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41,
610 0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840,
611 0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41,
612 0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40,
613 0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640,
614 0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041,
615 0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,
616 0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441,
617 0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41,
618 0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840,
619 0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41,
620 0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40,
621 0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640,
622 0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041,
623 0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241,
624 0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440,
625 0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40,
626 0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841,
627 0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40,
628 0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41,
629 0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641,
630 0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040
631 };
632
633 while (len--)
634 crc = (((crc >> 8) & 0xffU) ^
635 crc16_table[(crc ^ *cp++) & 0xffU]) & 0x0000ffffU;
636 return crc;
637 }
638
__bnxt_re_set_vma_data(void * bnxt_re_uctx,struct vm_area_struct * vma)639 static inline int __bnxt_re_set_vma_data(void *bnxt_re_uctx,
640 struct vm_area_struct *vma)
641 {
642 return 0;
643 }
644
bnxt_re_check_if_vlan_valid(struct bnxt_re_dev * rdev,u16 vlan_id)645 static inline bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev,
646 u16 vlan_id)
647 {
648 bool ret = true;
649 /*
650 * Check if the vlan is configured in the host.
651 * If not configured, it can be a transparent
652 * VLAN. So dont report the vlan id.
653 */
654 return ret;
655 }
656
657 #endif
658