1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3 * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
4 * Copyright (c) 2020, Intel Corporation. All rights reserved.
5 */
6
7 #ifndef MLX5_IB_H
8 #define MLX5_IB_H
9
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <rdma/ib_verbs.h>
13 #include <rdma/ib_umem.h>
14 #include <rdma/ib_smi.h>
15 #include <linux/mlx5/driver.h>
16 #include <linux/mlx5/cq.h>
17 #include <linux/mlx5/fs.h>
18 #include <linux/mlx5/qp.h>
19 #include <linux/types.h>
20 #include <linux/mlx5/transobj.h>
21 #include <rdma/ib_user_verbs.h>
22 #include <rdma/mlx5-abi.h>
23 #include <rdma/uverbs_ioctl.h>
24 #include <rdma/mlx5_user_ioctl_cmds.h>
25 #include <rdma/mlx5_user_ioctl_verbs.h>
26
27 #include "srq.h"
28 #include "qp.h"
29 #include "macsec.h"
30
31 #define mlx5_ib_dbg(_dev, format, arg...) \
32 dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
33 __LINE__, current->pid, ##arg)
34
35 #define mlx5_ib_err(_dev, format, arg...) \
36 dev_err(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
37 __LINE__, current->pid, ##arg)
38
39 #define mlx5_ib_warn(_dev, format, arg...) \
40 dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
41 __LINE__, current->pid, ##arg)
42
43 #define mlx5_ib_log(lvl, _dev, format, arg...) \
44 dev_printk(lvl, &(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, \
45 __func__, __LINE__, current->pid, ##arg)
46
47 #define MLX5_IB_DEFAULT_UIDX 0xffffff
48 #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
49
50 static __always_inline unsigned long
__mlx5_log_page_size_to_bitmap(unsigned int log_pgsz_bits,unsigned int pgsz_shift)51 __mlx5_log_page_size_to_bitmap(unsigned int log_pgsz_bits,
52 unsigned int pgsz_shift)
53 {
54 unsigned int largest_pg_shift =
55 min_t(unsigned long, (1ULL << log_pgsz_bits) - 1 + pgsz_shift,
56 BITS_PER_LONG - 1);
57
58 /*
59 * Despite a command allowing it, the device does not support lower than
60 * 4k page size.
61 */
62 pgsz_shift = max_t(unsigned int, MLX5_ADAPTER_PAGE_SHIFT, pgsz_shift);
63 return GENMASK(largest_pg_shift, pgsz_shift);
64 }
65
66 /*
67 * For mkc users, instead of a page_offset the command has a start_iova which
68 * specifies both the page_offset and the on-the-wire IOVA
69 */
70 #define mlx5_umem_find_best_pgsz(umem, typ, log_pgsz_fld, pgsz_shift, iova) \
71 ib_umem_find_best_pgsz(umem, \
72 __mlx5_log_page_size_to_bitmap( \
73 __mlx5_bit_sz(typ, log_pgsz_fld), \
74 pgsz_shift), \
75 iova)
76
77 static __always_inline unsigned long
__mlx5_page_offset_to_bitmask(unsigned int page_offset_bits,unsigned int offset_shift)78 __mlx5_page_offset_to_bitmask(unsigned int page_offset_bits,
79 unsigned int offset_shift)
80 {
81 unsigned int largest_offset_shift =
82 min_t(unsigned long, page_offset_bits - 1 + offset_shift,
83 BITS_PER_LONG - 1);
84
85 return GENMASK(largest_offset_shift, offset_shift);
86 }
87
88 /*
89 * QP/CQ/WQ/etc type commands take a page offset that satisifies:
90 * page_offset_quantized * (page_size/scale) = page_offset
91 * Which restricts allowed page sizes to ones that satisify the above.
92 */
93 unsigned long __mlx5_umem_find_best_quantized_pgoff(
94 struct ib_umem *umem, unsigned long pgsz_bitmap,
95 unsigned int page_offset_bits, u64 pgoff_bitmask, unsigned int scale,
96 unsigned int *page_offset_quantized);
97 #define mlx5_umem_find_best_quantized_pgoff(umem, typ, log_pgsz_fld, \
98 pgsz_shift, page_offset_fld, \
99 scale, page_offset_quantized) \
100 __mlx5_umem_find_best_quantized_pgoff( \
101 umem, \
102 __mlx5_log_page_size_to_bitmap( \
103 __mlx5_bit_sz(typ, log_pgsz_fld), pgsz_shift), \
104 __mlx5_bit_sz(typ, page_offset_fld), \
105 GENMASK(31, order_base_2(scale)), scale, \
106 page_offset_quantized)
107
108 #define mlx5_umem_find_best_cq_quantized_pgoff(umem, typ, log_pgsz_fld, \
109 pgsz_shift, page_offset_fld, \
110 scale, page_offset_quantized) \
111 __mlx5_umem_find_best_quantized_pgoff( \
112 umem, \
113 __mlx5_log_page_size_to_bitmap( \
114 __mlx5_bit_sz(typ, log_pgsz_fld), pgsz_shift), \
115 __mlx5_bit_sz(typ, page_offset_fld), 0, scale, \
116 page_offset_quantized)
117
118 static inline unsigned long
mlx5_umem_dmabuf_find_best_pgsz(struct ib_umem_dmabuf * umem_dmabuf)119 mlx5_umem_dmabuf_find_best_pgsz(struct ib_umem_dmabuf *umem_dmabuf)
120 {
121 /*
122 * mkeys used for dmabuf are fixed at PAGE_SIZE because we must be able
123 * to hold any sgl after a move operation. Ideally the mkc page size
124 * could be changed at runtime to be optimal, but right now the driver
125 * cannot do that.
126 */
127 return ib_umem_find_best_pgsz(&umem_dmabuf->umem, PAGE_SIZE,
128 umem_dmabuf->umem.iova);
129 }
130
131 enum {
132 MLX5_IB_MMAP_OFFSET_START = 9,
133 MLX5_IB_MMAP_OFFSET_END = 255,
134 };
135
136 enum {
137 MLX5_IB_MMAP_CMD_SHIFT = 8,
138 MLX5_IB_MMAP_CMD_MASK = 0xff,
139 };
140
141 enum {
142 MLX5_RES_SCAT_DATA32_CQE = 0x1,
143 MLX5_RES_SCAT_DATA64_CQE = 0x2,
144 MLX5_REQ_SCAT_DATA32_CQE = 0x11,
145 MLX5_REQ_SCAT_DATA64_CQE = 0x22,
146 };
147
148 enum mlx5_ib_mad_ifc_flags {
149 MLX5_MAD_IFC_IGNORE_MKEY = 1,
150 MLX5_MAD_IFC_IGNORE_BKEY = 2,
151 MLX5_MAD_IFC_NET_VIEW = 4,
152 };
153
154 enum {
155 MLX5_CROSS_CHANNEL_BFREG = 0,
156 };
157
158 enum {
159 MLX5_CQE_VERSION_V0,
160 MLX5_CQE_VERSION_V1,
161 };
162
163 enum {
164 MLX5_TM_MAX_RNDV_MSG_SIZE = 64,
165 MLX5_TM_MAX_SGE = 1,
166 };
167
168 enum {
169 MLX5_IB_INVALID_UAR_INDEX = BIT(31),
170 MLX5_IB_INVALID_BFREG = BIT(31),
171 };
172
173 enum {
174 MLX5_MAX_MEMIC_PAGES = 0x100,
175 MLX5_MEMIC_ALLOC_SIZE_MASK = 0x3f,
176 };
177
178 enum {
179 MLX5_MEMIC_BASE_ALIGN = 6,
180 MLX5_MEMIC_BASE_SIZE = 1 << MLX5_MEMIC_BASE_ALIGN,
181 };
182
183 enum mlx5_ib_mmap_type {
184 MLX5_IB_MMAP_TYPE_MEMIC = 1,
185 MLX5_IB_MMAP_TYPE_VAR = 2,
186 MLX5_IB_MMAP_TYPE_UAR_WC = 3,
187 MLX5_IB_MMAP_TYPE_UAR_NC = 4,
188 MLX5_IB_MMAP_TYPE_MEMIC_OP = 5,
189 };
190
191 struct mlx5_bfreg_info {
192 u32 *sys_pages;
193 int num_low_latency_bfregs;
194 unsigned int *count;
195
196 /*
197 * protect bfreg allocation data structs
198 */
199 struct mutex lock;
200 u32 ver;
201 u8 lib_uar_4k : 1;
202 u8 lib_uar_dyn : 1;
203 u32 num_sys_pages;
204 u32 num_static_sys_pages;
205 u32 total_num_bfregs;
206 u32 num_dyn_bfregs;
207 };
208
209 struct mlx5_ib_ucontext {
210 struct ib_ucontext ibucontext;
211 struct list_head db_page_list;
212
213 /* protect doorbell record alloc/free
214 */
215 struct mutex db_page_mutex;
216 struct mlx5_bfreg_info bfregi;
217 u8 cqe_version;
218 /* Transport Domain number */
219 u32 tdn;
220
221 u64 lib_caps;
222 u16 devx_uid;
223 /* For RoCE LAG TX affinity */
224 atomic_t tx_port_affinity;
225 };
226
to_mucontext(struct ib_ucontext * ibucontext)227 static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
228 {
229 return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
230 }
231
232 struct mlx5_ib_pd {
233 struct ib_pd ibpd;
234 u32 pdn;
235 u16 uid;
236 };
237
238 enum {
239 MLX5_IB_FLOW_ACTION_MODIFY_HEADER,
240 MLX5_IB_FLOW_ACTION_PACKET_REFORMAT,
241 MLX5_IB_FLOW_ACTION_DECAP,
242 };
243
244 #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
245 #define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
246 #if (MLX5_IB_FLOW_LAST_PRIO <= 0)
247 #error "Invalid number of bypass priorities"
248 #endif
249 #define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
250
251 #define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
252 #define MLX5_IB_NUM_SNIFFER_FTS 2
253 #define MLX5_IB_NUM_EGRESS_FTS 1
254 #define MLX5_IB_NUM_FDB_FTS MLX5_BY_PASS_NUM_REGULAR_PRIOS
255
256 struct mlx5_ib_anchor {
257 struct mlx5_flow_table *ft;
258 struct mlx5_flow_group *fg_goto_table;
259 struct mlx5_flow_group *fg_drop;
260 struct mlx5_flow_handle *rule_goto_table;
261 struct mlx5_flow_handle *rule_drop;
262 unsigned int rule_goto_table_ref;
263 };
264
265 struct mlx5_ib_flow_prio {
266 struct mlx5_flow_table *flow_table;
267 struct mlx5_ib_anchor anchor;
268 unsigned int refcount;
269 };
270
271 struct mlx5_ib_flow_handler {
272 struct list_head list;
273 struct ib_flow ibflow;
274 struct mlx5_ib_flow_prio *prio;
275 struct mlx5_flow_handle *rule;
276 struct ib_counters *ibcounters;
277 struct mlx5_ib_dev *dev;
278 struct mlx5_ib_flow_matcher *flow_matcher;
279 };
280
281 struct mlx5_ib_flow_matcher {
282 struct mlx5_ib_match_params matcher_mask;
283 int mask_len;
284 enum mlx5_ib_flow_type flow_type;
285 enum mlx5_flow_namespace_type ns_type;
286 u16 priority;
287 struct mlx5_core_dev *mdev;
288 atomic_t usecnt;
289 u8 match_criteria_enable;
290 };
291
292 struct mlx5_ib_steering_anchor {
293 struct mlx5_ib_flow_prio *ft_prio;
294 struct mlx5_ib_dev *dev;
295 atomic_t usecnt;
296 };
297
298 struct mlx5_ib_pp {
299 u16 index;
300 struct mlx5_core_dev *mdev;
301 };
302
303 enum mlx5_ib_optional_counter_type {
304 MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS,
305 MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS,
306 MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS,
307
308 MLX5_IB_OPCOUNTER_MAX,
309 };
310
311 struct mlx5_ib_flow_db {
312 struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
313 struct mlx5_ib_flow_prio egress_prios[MLX5_IB_NUM_FLOW_FT];
314 struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS];
315 struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS];
316 struct mlx5_ib_flow_prio fdb[MLX5_IB_NUM_FDB_FTS];
317 struct mlx5_ib_flow_prio rdma_rx[MLX5_IB_NUM_FLOW_FT];
318 struct mlx5_ib_flow_prio rdma_tx[MLX5_IB_NUM_FLOW_FT];
319 struct mlx5_ib_flow_prio opfcs[MLX5_IB_OPCOUNTER_MAX];
320 struct mlx5_flow_table *lag_demux_ft;
321 /* Protect flow steering bypass flow tables
322 * when add/del flow rules.
323 * only single add/removal of flow steering rule could be done
324 * simultaneously.
325 */
326 struct mutex lock;
327 };
328
329 /* Use macros here so that don't have to duplicate
330 * enum ib_qp_type for low-level driver
331 */
332
333 #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
334 /*
335 * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
336 * creates the actual hardware QP.
337 */
338 #define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
339 #define MLX5_IB_QPT_DCI IB_QPT_RESERVED3
340 #define MLX5_IB_QPT_DCT IB_QPT_RESERVED4
341 #define MLX5_IB_WR_UMR IB_WR_RESERVED1
342
343 #define MLX5_IB_UPD_XLT_ZAP BIT(0)
344 #define MLX5_IB_UPD_XLT_ENABLE BIT(1)
345 #define MLX5_IB_UPD_XLT_ATOMIC BIT(2)
346 #define MLX5_IB_UPD_XLT_ADDR BIT(3)
347 #define MLX5_IB_UPD_XLT_PD BIT(4)
348 #define MLX5_IB_UPD_XLT_ACCESS BIT(5)
349 #define MLX5_IB_UPD_XLT_INDIRECT BIT(6)
350
351 /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
352 *
353 * These flags are intended for internal use by the mlx5_ib driver, and they
354 * rely on the range reserved for that use in the ib_qp_create_flags enum.
355 */
356 #define MLX5_IB_QP_CREATE_SQPN_QP1 IB_QP_CREATE_RESERVED_START
357
358 struct wr_list {
359 u16 opcode;
360 u16 next;
361 };
362
363 enum mlx5_ib_rq_flags {
364 MLX5_IB_RQ_CVLAN_STRIPPING = 1 << 0,
365 MLX5_IB_RQ_PCI_WRITE_END_PADDING = 1 << 1,
366 };
367
368 struct mlx5_ib_wq {
369 struct mlx5_frag_buf_ctrl fbc;
370 u64 *wrid;
371 u32 *wr_data;
372 struct wr_list *w_list;
373 unsigned *wqe_head;
374 u16 unsig_count;
375
376 /* serialize post to the work queue
377 */
378 spinlock_t lock;
379 int wqe_cnt;
380 int max_post;
381 int max_gs;
382 int offset;
383 int wqe_shift;
384 unsigned head;
385 unsigned tail;
386 u16 cur_post;
387 u16 last_poll;
388 void *cur_edge;
389 };
390
391 enum mlx5_ib_wq_flags {
392 MLX5_IB_WQ_FLAGS_DELAY_DROP = 0x1,
393 MLX5_IB_WQ_FLAGS_STRIDING_RQ = 0x2,
394 };
395
396 #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9
397 #define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16
398 #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
399 #define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13
400 #define MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES 3
401
402 struct mlx5_ib_rwq {
403 struct ib_wq ibwq;
404 struct mlx5_core_qp core_qp;
405 u32 rq_num_pas;
406 u32 log_rq_stride;
407 u32 log_rq_size;
408 u32 rq_page_offset;
409 u32 log_page_size;
410 u32 log_num_strides;
411 u32 two_byte_shift_en;
412 u32 single_stride_log_num_of_bytes;
413 struct ib_umem *umem;
414 size_t buf_size;
415 unsigned int page_shift;
416 struct mlx5_db db;
417 u32 user_index;
418 u32 wqe_count;
419 u32 wqe_shift;
420 int wq_sig;
421 u32 create_flags; /* Use enum mlx5_ib_wq_flags */
422 };
423
424 struct mlx5_ib_rwq_ind_table {
425 struct ib_rwq_ind_table ib_rwq_ind_tbl;
426 u32 rqtn;
427 u16 uid;
428 };
429
430 struct mlx5_ib_ubuffer {
431 struct ib_umem *umem;
432 int buf_size;
433 u64 buf_addr;
434 };
435
436 struct mlx5_ib_qp_base {
437 struct mlx5_ib_qp *container_mibqp;
438 struct mlx5_core_qp mqp;
439 struct mlx5_ib_ubuffer ubuffer;
440 };
441
442 struct mlx5_ib_qp_trans {
443 struct mlx5_ib_qp_base base;
444 u16 xrcdn;
445 u32 alt_port;
446 u8 atomic_rd_en;
447 u8 resp_depth;
448 };
449
450 struct mlx5_ib_rss_qp {
451 u32 tirn;
452 };
453
454 struct mlx5_ib_rq {
455 struct mlx5_ib_qp_base base;
456 struct mlx5_ib_wq *rq;
457 struct mlx5_ib_ubuffer ubuffer;
458 struct mlx5_db *doorbell;
459 u32 tirn;
460 u8 state;
461 u32 flags;
462 };
463
464 struct mlx5_ib_sq {
465 struct mlx5_ib_qp_base base;
466 struct mlx5_ib_wq *sq;
467 struct mlx5_ib_ubuffer ubuffer;
468 struct mlx5_db *doorbell;
469 struct mlx5_flow_handle *flow_rule;
470 u32 tisn;
471 u8 state;
472 };
473
474 struct mlx5_ib_raw_packet_qp {
475 struct mlx5_ib_sq sq;
476 struct mlx5_ib_rq rq;
477 };
478
479 struct mlx5_bf {
480 int buf_size;
481 unsigned long offset;
482 struct mlx5_sq_bfreg *bfreg;
483 };
484
485 struct mlx5_ib_dct {
486 struct mlx5_core_dct mdct;
487 u32 *in;
488 };
489
490 struct mlx5_ib_gsi_qp {
491 struct ib_qp *rx_qp;
492 u32 port_num;
493 struct ib_qp_cap cap;
494 struct ib_cq *cq;
495 struct mlx5_ib_gsi_wr *outstanding_wrs;
496 u32 outstanding_pi, outstanding_ci;
497 int num_qps;
498 /* Protects access to the tx_qps. Post send operations synchronize
499 * with tx_qp creation in setup_qp(). Also protects the
500 * outstanding_wrs array and indices.
501 */
502 spinlock_t lock;
503 struct ib_qp **tx_qps;
504 };
505
506 struct mlx5_ib_qp {
507 struct ib_qp ibqp;
508 union {
509 struct mlx5_ib_qp_trans trans_qp;
510 struct mlx5_ib_raw_packet_qp raw_packet_qp;
511 struct mlx5_ib_rss_qp rss_qp;
512 struct mlx5_ib_dct dct;
513 struct mlx5_ib_gsi_qp gsi;
514 };
515 struct mlx5_frag_buf buf;
516
517 struct mlx5_db db;
518 struct mlx5_ib_wq rq;
519
520 u8 sq_signal_bits;
521 u8 next_fence;
522 struct mlx5_ib_wq sq;
523
524 /* serialize qp state modifications
525 */
526 struct mutex mutex;
527 /* cached variant of create_flags from struct ib_qp_init_attr */
528 u32 flags;
529 u32 port;
530 u8 state;
531 int max_inline_data;
532 struct mlx5_bf bf;
533 u8 has_rq:1;
534 u8 is_rss:1;
535
536 /* only for user space QPs. For kernel
537 * we have it from the bf object
538 */
539 int bfregn;
540
541 struct list_head qps_list;
542 struct list_head cq_recv_list;
543 struct list_head cq_send_list;
544 struct mlx5_rate_limit rl;
545 u32 underlay_qpn;
546 u32 flags_en;
547 /*
548 * IB/core doesn't store low-level QP types, so
549 * store both MLX and IBTA types in the field below.
550 */
551 enum ib_qp_type type;
552 /* A flag to indicate if there's a new counter is configured
553 * but not take effective
554 */
555 u32 counter_pending;
556 u16 gsi_lag_port;
557 };
558
559 struct mlx5_ib_cq_buf {
560 struct mlx5_frag_buf_ctrl fbc;
561 struct mlx5_frag_buf frag_buf;
562 struct ib_umem *umem;
563 int cqe_size;
564 int nent;
565 };
566
567 enum mlx5_ib_cq_pr_flags {
568 MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0,
569 MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS = 1 << 1,
570 };
571
572 struct mlx5_ib_cq {
573 struct ib_cq ibcq;
574 struct mlx5_core_cq mcq;
575 struct mlx5_ib_cq_buf buf;
576 struct mlx5_db db;
577
578 /* serialize access to the CQ
579 */
580 spinlock_t lock;
581
582 /* protect resize cq
583 */
584 struct mutex resize_mutex;
585 struct mlx5_ib_cq_buf *resize_buf;
586 struct ib_umem *resize_umem;
587 int cqe_size;
588 struct list_head list_send_qp;
589 struct list_head list_recv_qp;
590 u32 create_flags;
591 struct list_head wc_list;
592 enum ib_cq_notify_flags notify_flags;
593 struct work_struct notify_work;
594 u16 private_flags; /* Use mlx5_ib_cq_pr_flags */
595 };
596
597 struct mlx5_ib_wc {
598 struct ib_wc wc;
599 struct list_head list;
600 };
601
602 struct mlx5_ib_srq {
603 struct ib_srq ibsrq;
604 struct mlx5_core_srq msrq;
605 struct mlx5_frag_buf buf;
606 struct mlx5_db db;
607 struct mlx5_frag_buf_ctrl fbc;
608 u64 *wrid;
609 /* protect SRQ hanlding
610 */
611 spinlock_t lock;
612 int head;
613 int tail;
614 u16 wqe_ctr;
615 struct ib_umem *umem;
616 /* serialize arming a SRQ
617 */
618 struct mutex mutex;
619 int wq_sig;
620 };
621
622 struct mlx5_ib_xrcd {
623 struct ib_xrcd ibxrcd;
624 u32 xrcdn;
625 };
626
627 enum mlx5_ib_mtt_access_flags {
628 MLX5_IB_MTT_READ = (1 << 0),
629 MLX5_IB_MTT_WRITE = (1 << 1),
630 };
631
632 struct mlx5_user_mmap_entry {
633 struct rdma_user_mmap_entry rdma_entry;
634 u8 mmap_flag;
635 u64 address;
636 u32 page_idx;
637 };
638
639 enum mlx5_mkey_type {
640 MLX5_MKEY_MR = 1,
641 MLX5_MKEY_MW,
642 MLX5_MKEY_INDIRECT_DEVX,
643 };
644
645 struct mlx5r_cache_rb_key {
646 u8 ats:1;
647 unsigned int access_mode;
648 unsigned int access_flags;
649 unsigned int ndescs;
650 };
651
652 struct mlx5_ib_mkey {
653 u32 key;
654 enum mlx5_mkey_type type;
655 unsigned int ndescs;
656 struct wait_queue_head wait;
657 refcount_t usecount;
658 /* Cacheable user Mkey must hold either a rb_key or a cache_ent. */
659 struct mlx5r_cache_rb_key rb_key;
660 struct mlx5_cache_ent *cache_ent;
661 u8 cacheable : 1;
662 };
663
664 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
665
666 #define MLX5_IB_DM_MEMIC_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
667 IB_ACCESS_REMOTE_WRITE |\
668 IB_ACCESS_REMOTE_READ |\
669 IB_ACCESS_REMOTE_ATOMIC |\
670 IB_ZERO_BASED)
671
672 #define MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
673 IB_ACCESS_REMOTE_WRITE |\
674 IB_ACCESS_REMOTE_READ |\
675 IB_ZERO_BASED)
676
677 #define mlx5_update_odp_stats(mr, counter_name, value) \
678 atomic64_add(value, &((mr)->odp_stats.counter_name))
679
680 struct mlx5_ib_mr {
681 struct ib_mr ibmr;
682 struct mlx5_ib_mkey mmkey;
683
684 struct ib_umem *umem;
685
686 union {
687 /* Used only by kernel MRs (umem == NULL) */
688 struct {
689 void *descs;
690 void *descs_alloc;
691 dma_addr_t desc_map;
692 int max_descs;
693 int desc_size;
694 int access_mode;
695
696 /* For Kernel IB_MR_TYPE_INTEGRITY */
697 struct mlx5_core_sig_ctx *sig;
698 struct mlx5_ib_mr *pi_mr;
699 struct mlx5_ib_mr *klm_mr;
700 struct mlx5_ib_mr *mtt_mr;
701 u64 data_iova;
702 u64 pi_iova;
703 int meta_ndescs;
704 int meta_length;
705 int data_length;
706 };
707
708 /* Used only by User MRs (umem != NULL) */
709 struct {
710 unsigned int page_shift;
711 /* Current access_flags */
712 int access_flags;
713
714 /* For User ODP */
715 struct mlx5_ib_mr *parent;
716 struct xarray implicit_children;
717 union {
718 struct work_struct work;
719 } odp_destroy;
720 struct ib_odp_counters odp_stats;
721 bool is_odp_implicit;
722 };
723 };
724 };
725
is_odp_mr(struct mlx5_ib_mr * mr)726 static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
727 {
728 return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
729 mr->umem->is_odp;
730 }
731
is_dmabuf_mr(struct mlx5_ib_mr * mr)732 static inline bool is_dmabuf_mr(struct mlx5_ib_mr *mr)
733 {
734 return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
735 mr->umem->is_dmabuf;
736 }
737
738 struct mlx5_ib_mw {
739 struct ib_mw ibmw;
740 struct mlx5_ib_mkey mmkey;
741 };
742
743 struct mlx5_ib_umr_context {
744 struct ib_cqe cqe;
745 enum ib_wc_status status;
746 struct completion done;
747 };
748
749 enum {
750 MLX5_UMR_STATE_UNINIT,
751 MLX5_UMR_STATE_ACTIVE,
752 MLX5_UMR_STATE_RECOVER,
753 MLX5_UMR_STATE_ERR,
754 };
755
756 struct umr_common {
757 struct ib_pd *pd;
758 struct ib_cq *cq;
759 struct ib_qp *qp;
760 /* Protects from UMR QP overflow
761 */
762 struct semaphore sem;
763 /* Protects from using UMR while the UMR is not active
764 */
765 struct mutex lock;
766 unsigned int state;
767 /* Protects from repeat UMR QP creation */
768 struct mutex init_lock;
769 };
770
771 #define NUM_MKEYS_PER_PAGE \
772 ((PAGE_SIZE - sizeof(struct list_head)) / sizeof(u32))
773
774 struct mlx5_mkeys_page {
775 u32 mkeys[NUM_MKEYS_PER_PAGE];
776 struct list_head list;
777 };
778 static_assert(sizeof(struct mlx5_mkeys_page) == PAGE_SIZE);
779
780 struct mlx5_mkeys_queue {
781 struct list_head pages_list;
782 u32 num_pages;
783 unsigned long ci;
784 spinlock_t lock; /* sync list ops */
785 };
786
787 struct mlx5_cache_ent {
788 struct mlx5_mkeys_queue mkeys_queue;
789 u32 pending;
790
791 char name[4];
792
793 struct rb_node node;
794 struct mlx5r_cache_rb_key rb_key;
795
796 u8 is_tmp:1;
797 u8 disabled:1;
798 u8 fill_to_high_water:1;
799
800 /*
801 * - limit is the low water mark for stored mkeys, 2* limit is the
802 * upper water mark.
803 */
804 u32 in_use;
805 u32 limit;
806
807 /* Statistics */
808 u32 miss;
809
810 struct mlx5_ib_dev *dev;
811 struct delayed_work dwork;
812 };
813
814 struct mlx5r_async_create_mkey {
815 union {
816 u32 in[MLX5_ST_SZ_BYTES(create_mkey_in)];
817 u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
818 };
819 struct mlx5_async_work cb_work;
820 struct mlx5_cache_ent *ent;
821 u32 mkey;
822 };
823
824 struct mlx5_mkey_cache {
825 struct workqueue_struct *wq;
826 struct rb_root rb_root;
827 struct mutex rb_lock;
828 struct dentry *fs_root;
829 unsigned long last_add;
830 struct delayed_work remove_ent_dwork;
831 };
832
833 struct mlx5_ib_port_resources {
834 struct mlx5_ib_gsi_qp *gsi;
835 struct work_struct pkey_change_work;
836 };
837
838 struct mlx5_ib_resources {
839 struct ib_cq *c0;
840 struct mutex cq_lock;
841 u32 xrcdn0;
842 u32 xrcdn1;
843 struct ib_pd *p0;
844 struct ib_srq *s0;
845 struct ib_srq *s1;
846 struct mutex srq_lock;
847 struct mlx5_ib_port_resources ports[2];
848 };
849
850 #define MAX_OPFC_RULES 2
851
852 struct mlx5_ib_op_fc {
853 struct mlx5_fc *fc;
854 struct mlx5_flow_handle *rule[MAX_OPFC_RULES];
855 };
856
857 struct mlx5_ib_counters {
858 struct rdma_stat_desc *descs;
859 size_t *offsets;
860 u32 num_q_counters;
861 u32 num_cong_counters;
862 u32 num_ext_ppcnt_counters;
863 u32 num_op_counters;
864 u16 set_id;
865 struct mlx5_ib_op_fc opfcs[MLX5_IB_OPCOUNTER_MAX];
866 };
867
868 int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num,
869 struct mlx5_ib_op_fc *opfc,
870 enum mlx5_ib_optional_counter_type type);
871
872 void mlx5_ib_fs_remove_op_fc(struct mlx5_ib_dev *dev,
873 struct mlx5_ib_op_fc *opfc,
874 enum mlx5_ib_optional_counter_type type);
875
876 struct mlx5_ib_multiport_info;
877
878 struct mlx5_ib_multiport {
879 struct mlx5_ib_multiport_info *mpi;
880 /* To be held when accessing the multiport info */
881 spinlock_t mpi_lock;
882 };
883
884 struct mlx5_roce {
885 /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
886 * netdev pointer
887 */
888 rwlock_t netdev_lock;
889 struct net_device *netdev;
890 struct notifier_block nb;
891 struct netdev_net_notifier nn;
892 struct notifier_block mdev_nb;
893 struct net_device *tracking_netdev;
894 atomic_t tx_port_affinity;
895 enum ib_port_state last_port_state;
896 struct mlx5_ib_dev *dev;
897 u32 native_port_num;
898 };
899
900 struct mlx5_ib_port {
901 struct mlx5_ib_counters cnts;
902 struct mlx5_ib_multiport mp;
903 struct mlx5_ib_dbg_cc_params *dbg_cc_params;
904 struct mlx5_roce roce;
905 struct mlx5_eswitch_rep *rep;
906 #ifdef CONFIG_MLX5_MACSEC
907 struct mlx5_reserved_gids *reserved_gids;
908 #endif
909 };
910
911 struct mlx5_ib_dbg_param {
912 int offset;
913 struct mlx5_ib_dev *dev;
914 struct dentry *dentry;
915 u32 port_num;
916 };
917
918 enum mlx5_ib_dbg_cc_types {
919 MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE,
920 MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI,
921 MLX5_IB_DBG_CC_RP_TIME_RESET,
922 MLX5_IB_DBG_CC_RP_BYTE_RESET,
923 MLX5_IB_DBG_CC_RP_THRESHOLD,
924 MLX5_IB_DBG_CC_RP_AI_RATE,
925 MLX5_IB_DBG_CC_RP_MAX_RATE,
926 MLX5_IB_DBG_CC_RP_HAI_RATE,
927 MLX5_IB_DBG_CC_RP_MIN_DEC_FAC,
928 MLX5_IB_DBG_CC_RP_MIN_RATE,
929 MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP,
930 MLX5_IB_DBG_CC_RP_DCE_TCP_G,
931 MLX5_IB_DBG_CC_RP_DCE_TCP_RTT,
932 MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD,
933 MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE,
934 MLX5_IB_DBG_CC_RP_GD,
935 MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS,
936 MLX5_IB_DBG_CC_NP_CNP_DSCP,
937 MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE,
938 MLX5_IB_DBG_CC_NP_CNP_PRIO,
939 MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP_VALID,
940 MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP,
941 MLX5_IB_DBG_CC_MAX,
942 };
943
944 struct mlx5_ib_dbg_cc_params {
945 struct dentry *root;
946 struct mlx5_ib_dbg_param params[MLX5_IB_DBG_CC_MAX];
947 };
948
949 enum {
950 MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100,
951 };
952
953 struct mlx5_ib_delay_drop {
954 struct mlx5_ib_dev *dev;
955 struct work_struct delay_drop_work;
956 /* serialize setting of delay drop */
957 struct mutex lock;
958 u32 timeout;
959 bool activate;
960 atomic_t events_cnt;
961 atomic_t rqs_cnt;
962 struct dentry *dir_debugfs;
963 };
964
965 enum mlx5_ib_stages {
966 MLX5_IB_STAGE_INIT,
967 MLX5_IB_STAGE_FS,
968 MLX5_IB_STAGE_CAPS,
969 MLX5_IB_STAGE_NON_DEFAULT_CB,
970 MLX5_IB_STAGE_ROCE,
971 MLX5_IB_STAGE_QP,
972 MLX5_IB_STAGE_SRQ,
973 MLX5_IB_STAGE_DEVICE_RESOURCES,
974 MLX5_IB_STAGE_DEVICE_NOTIFIER,
975 MLX5_IB_STAGE_ODP,
976 MLX5_IB_STAGE_COUNTERS,
977 MLX5_IB_STAGE_CONG_DEBUGFS,
978 MLX5_IB_STAGE_UAR,
979 MLX5_IB_STAGE_BFREG,
980 MLX5_IB_STAGE_PRE_IB_REG_UMR,
981 MLX5_IB_STAGE_WHITELIST_UID,
982 MLX5_IB_STAGE_IB_REG,
983 MLX5_IB_STAGE_POST_IB_REG_UMR,
984 MLX5_IB_STAGE_DELAY_DROP,
985 MLX5_IB_STAGE_RESTRACK,
986 MLX5_IB_STAGE_MAX,
987 };
988
989 struct mlx5_ib_stage {
990 int (*init)(struct mlx5_ib_dev *dev);
991 void (*cleanup)(struct mlx5_ib_dev *dev);
992 };
993
994 #define STAGE_CREATE(_stage, _init, _cleanup) \
995 .stage[_stage] = {.init = _init, .cleanup = _cleanup}
996
997 struct mlx5_ib_profile {
998 struct mlx5_ib_stage stage[MLX5_IB_STAGE_MAX];
999 };
1000
1001 struct mlx5_ib_multiport_info {
1002 struct list_head list;
1003 struct mlx5_ib_dev *ibdev;
1004 struct mlx5_core_dev *mdev;
1005 struct notifier_block mdev_events;
1006 struct completion unref_comp;
1007 u64 sys_image_guid;
1008 u32 mdev_refcnt;
1009 bool is_master;
1010 bool unaffiliate;
1011 };
1012
1013 struct mlx5_ib_flow_action {
1014 struct ib_flow_action ib_action;
1015 union {
1016 struct {
1017 u64 ib_flags;
1018 struct mlx5_accel_esp_xfrm *ctx;
1019 } esp_aes_gcm;
1020 struct {
1021 struct mlx5_ib_dev *dev;
1022 u32 sub_type;
1023 union {
1024 struct mlx5_modify_hdr *modify_hdr;
1025 struct mlx5_pkt_reformat *pkt_reformat;
1026 };
1027 } flow_action_raw;
1028 };
1029 };
1030
1031 struct mlx5_dm {
1032 struct mlx5_core_dev *dev;
1033 /* This lock is used to protect the access to the shared
1034 * allocation map when concurrent requests by different
1035 * processes are handled.
1036 */
1037 spinlock_t lock;
1038 DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES);
1039 };
1040
1041 struct mlx5_read_counters_attr {
1042 struct mlx5_fc *hw_cntrs_hndl;
1043 u64 *out;
1044 u32 flags;
1045 };
1046
1047 enum mlx5_ib_counters_type {
1048 MLX5_IB_COUNTERS_FLOW,
1049 };
1050
1051 struct mlx5_ib_mcounters {
1052 struct ib_counters ibcntrs;
1053 enum mlx5_ib_counters_type type;
1054 /* number of counters supported for this counters type */
1055 u32 counters_num;
1056 struct mlx5_fc *hw_cntrs_hndl;
1057 /* read function for this counters type */
1058 int (*read_counters)(struct ib_device *ibdev,
1059 struct mlx5_read_counters_attr *read_attr);
1060 /* max index set as part of create_flow */
1061 u32 cntrs_max_index;
1062 /* number of counters data entries (<description,index> pair) */
1063 u32 ncounters;
1064 /* counters data array for descriptions and indexes */
1065 struct mlx5_ib_flow_counters_desc *counters_data;
1066 /* protects access to mcounters internal data */
1067 struct mutex mcntrs_mutex;
1068 };
1069
1070 static inline struct mlx5_ib_mcounters *
to_mcounters(struct ib_counters * ibcntrs)1071 to_mcounters(struct ib_counters *ibcntrs)
1072 {
1073 return container_of(ibcntrs, struct mlx5_ib_mcounters, ibcntrs);
1074 }
1075
1076 int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
1077 bool is_egress,
1078 struct mlx5_flow_act *action);
1079 struct mlx5_ib_lb_state {
1080 /* protect the user_td */
1081 struct mutex mutex;
1082 u32 user_td;
1083 int qps;
1084 bool enabled;
1085 };
1086
1087 struct mlx5_ib_pf_eq {
1088 struct notifier_block irq_nb;
1089 struct mlx5_ib_dev *dev;
1090 struct mlx5_eq *core;
1091 struct work_struct work;
1092 spinlock_t lock; /* Pagefaults spinlock */
1093 struct workqueue_struct *wq;
1094 mempool_t *pool;
1095 };
1096
1097 struct mlx5_devx_event_table {
1098 struct mlx5_nb devx_nb;
1099 /* serialize updating the event_xa */
1100 struct mutex event_xa_lock;
1101 struct xarray event_xa;
1102 };
1103
1104 struct mlx5_var_table {
1105 /* serialize updating the bitmap */
1106 struct mutex bitmap_lock;
1107 unsigned long *bitmap;
1108 u64 hw_start_addr;
1109 u32 stride_size;
1110 u64 num_var_hw_entries;
1111 };
1112
1113 struct mlx5_port_caps {
1114 bool has_smi;
1115 u8 ext_port_cap;
1116 };
1117
1118
1119 struct mlx5_special_mkeys {
1120 u32 dump_fill_mkey;
1121 __be32 null_mkey;
1122 __be32 terminate_scatter_list_mkey;
1123 };
1124
1125 struct mlx5_macsec {
1126 struct mutex lock; /* Protects mlx5_macsec internal contexts */
1127 struct list_head macsec_devices_list;
1128 struct notifier_block blocking_events_nb;
1129 };
1130
1131 struct mlx5_ib_dev {
1132 struct ib_device ib_dev;
1133 struct mlx5_core_dev *mdev;
1134 struct notifier_block mdev_events;
1135 int num_ports;
1136 /* serialize update of capability mask
1137 */
1138 struct mutex cap_mask_mutex;
1139 u8 ib_active:1;
1140 u8 is_rep:1;
1141 u8 lag_active:1;
1142 u8 fill_delay;
1143 struct umr_common umrc;
1144 /* sync used page count stats
1145 */
1146 struct mlx5_ib_resources devr;
1147
1148 atomic_t mkey_var;
1149 struct mlx5_mkey_cache cache;
1150 struct timer_list delay_timer;
1151 /* Prevents soft lock on massive reg MRs */
1152 struct mutex slow_path_mutex;
1153 struct ib_odp_caps odp_caps;
1154 u64 odp_max_size;
1155 struct mutex odp_eq_mutex;
1156 struct mlx5_ib_pf_eq odp_pf_eq;
1157
1158 struct xarray odp_mkeys;
1159
1160 struct mlx5_ib_flow_db *flow_db;
1161 /* protect resources needed as part of reset flow */
1162 spinlock_t reset_flow_resource_lock;
1163 struct list_head qp_list;
1164 /* Array with num_ports elements */
1165 struct mlx5_ib_port *port;
1166 struct mlx5_sq_bfreg bfreg;
1167 struct mlx5_sq_bfreg fp_bfreg;
1168 struct mlx5_ib_delay_drop delay_drop;
1169 const struct mlx5_ib_profile *profile;
1170
1171 struct mlx5_ib_lb_state lb;
1172 u8 umr_fence;
1173 struct list_head ib_dev_list;
1174 u64 sys_image_guid;
1175 struct mlx5_dm dm;
1176 u16 devx_whitelist_uid;
1177 struct mlx5_srq_table srq_table;
1178 struct mlx5_qp_table qp_table;
1179 struct mlx5_async_ctx async_ctx;
1180 struct mlx5_devx_event_table devx_event_table;
1181 struct mlx5_var_table var_table;
1182
1183 struct xarray sig_mrs;
1184 struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
1185 u16 pkey_table_len;
1186 u8 lag_ports;
1187 struct mlx5_special_mkeys mkeys;
1188
1189 #ifdef CONFIG_MLX5_MACSEC
1190 struct mlx5_macsec macsec;
1191 #endif
1192
1193 u8 num_plane;
1194 struct mlx5_ib_dev *smi_dev;
1195 const char *sub_dev_name;
1196 };
1197
to_mibcq(struct mlx5_core_cq * mcq)1198 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
1199 {
1200 return container_of(mcq, struct mlx5_ib_cq, mcq);
1201 }
1202
to_mxrcd(struct ib_xrcd * ibxrcd)1203 static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
1204 {
1205 return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
1206 }
1207
to_mdev(struct ib_device * ibdev)1208 static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
1209 {
1210 return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
1211 }
1212
mr_to_mdev(struct mlx5_ib_mr * mr)1213 static inline struct mlx5_ib_dev *mr_to_mdev(struct mlx5_ib_mr *mr)
1214 {
1215 return to_mdev(mr->ibmr.device);
1216 }
1217
mlx5_udata_to_mdev(struct ib_udata * udata)1218 static inline struct mlx5_ib_dev *mlx5_udata_to_mdev(struct ib_udata *udata)
1219 {
1220 struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
1221 udata, struct mlx5_ib_ucontext, ibucontext);
1222
1223 return to_mdev(context->ibucontext.device);
1224 }
1225
to_mcq(struct ib_cq * ibcq)1226 static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
1227 {
1228 return container_of(ibcq, struct mlx5_ib_cq, ibcq);
1229 }
1230
to_mibqp(struct mlx5_core_qp * mqp)1231 static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
1232 {
1233 return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
1234 }
1235
to_mibrwq(struct mlx5_core_qp * core_qp)1236 static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp)
1237 {
1238 return container_of(core_qp, struct mlx5_ib_rwq, core_qp);
1239 }
1240
to_mpd(struct ib_pd * ibpd)1241 static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
1242 {
1243 return container_of(ibpd, struct mlx5_ib_pd, ibpd);
1244 }
1245
to_msrq(struct ib_srq * ibsrq)1246 static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
1247 {
1248 return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
1249 }
1250
to_mqp(struct ib_qp * ibqp)1251 static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
1252 {
1253 return container_of(ibqp, struct mlx5_ib_qp, ibqp);
1254 }
1255
to_mrwq(struct ib_wq * ibwq)1256 static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq)
1257 {
1258 return container_of(ibwq, struct mlx5_ib_rwq, ibwq);
1259 }
1260
to_mrwq_ind_table(struct ib_rwq_ind_table * ib_rwq_ind_tbl)1261 static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
1262 {
1263 return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl);
1264 }
1265
to_mibsrq(struct mlx5_core_srq * msrq)1266 static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
1267 {
1268 return container_of(msrq, struct mlx5_ib_srq, msrq);
1269 }
1270
to_mmr(struct ib_mr * ibmr)1271 static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
1272 {
1273 return container_of(ibmr, struct mlx5_ib_mr, ibmr);
1274 }
1275
to_mmw(struct ib_mw * ibmw)1276 static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
1277 {
1278 return container_of(ibmw, struct mlx5_ib_mw, ibmw);
1279 }
1280
1281 static inline struct mlx5_ib_flow_action *
to_mflow_act(struct ib_flow_action * ibact)1282 to_mflow_act(struct ib_flow_action *ibact)
1283 {
1284 return container_of(ibact, struct mlx5_ib_flow_action, ib_action);
1285 }
1286
1287 static inline struct mlx5_user_mmap_entry *
to_mmmap(struct rdma_user_mmap_entry * rdma_entry)1288 to_mmmap(struct rdma_user_mmap_entry *rdma_entry)
1289 {
1290 return container_of(rdma_entry,
1291 struct mlx5_user_mmap_entry, rdma_entry);
1292 }
1293
1294 int mlx5_ib_dev_res_cq_init(struct mlx5_ib_dev *dev);
1295 int mlx5_ib_dev_res_srq_init(struct mlx5_ib_dev *dev);
1296 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
1297 struct mlx5_db *db);
1298 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
1299 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
1300 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
1301 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
1302 int mlx5_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
1303 struct ib_udata *udata);
1304 int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
mlx5_ib_destroy_ah(struct ib_ah * ah,u32 flags)1305 static inline int mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags)
1306 {
1307 return 0;
1308 }
1309 int mlx5_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
1310 struct ib_udata *udata);
1311 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1312 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
1313 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
1314 int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
1315 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
1316 const struct ib_recv_wr **bad_wr);
1317 int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
1318 void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
1319 int mlx5_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
1320 struct ib_udata *udata);
1321 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1322 int attr_mask, struct ib_udata *udata);
1323 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
1324 struct ib_qp_init_attr *qp_init_attr);
1325 int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
1326 void mlx5_ib_drain_sq(struct ib_qp *qp);
1327 void mlx5_ib_drain_rq(struct ib_qp *qp);
1328 int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
1329 size_t buflen, size_t *bc);
1330 int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
1331 size_t buflen, size_t *bc);
1332 int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
1333 size_t buflen, size_t *bc);
1334 int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1335 struct uverbs_attr_bundle *attrs);
1336 int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
1337 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
1338 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
1339 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1340 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
1341 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
1342 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1343 u64 virt_addr, int access_flags,
1344 struct ib_udata *udata);
1345 struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
1346 u64 length, u64 virt_addr,
1347 int fd, int access_flags,
1348 struct ib_udata *udata);
1349 int mlx5_ib_advise_mr(struct ib_pd *pd,
1350 enum ib_uverbs_advise_mr_advice advice,
1351 u32 flags,
1352 struct ib_sge *sg_list,
1353 u32 num_sge,
1354 struct uverbs_attr_bundle *attrs);
1355 int mlx5_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
1356 int mlx5_ib_dealloc_mw(struct ib_mw *mw);
1357 struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
1358 int access_flags);
1359 void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
1360 void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr);
1361 struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1362 u64 length, u64 virt_addr, int access_flags,
1363 struct ib_pd *pd, struct ib_udata *udata);
1364 int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
1365 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1366 u32 max_num_sg);
1367 struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
1368 u32 max_num_sg,
1369 u32 max_num_meta_sg);
1370 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1371 unsigned int *sg_offset);
1372 int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
1373 int data_sg_nents, unsigned int *data_sg_offset,
1374 struct scatterlist *meta_sg, int meta_sg_nents,
1375 unsigned int *meta_sg_offset);
1376 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
1377 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
1378 const struct ib_mad *in, struct ib_mad *out,
1379 size_t *out_mad_size, u16 *out_mad_pkey_index);
1380 int mlx5_ib_alloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
1381 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
1382 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, unsigned int port);
1383 int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
1384 __be64 *sys_image_guid);
1385 int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
1386 u16 *max_pkeys);
1387 int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
1388 u32 *vendor_id);
1389 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
1390 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
1391 int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u32 port, u16 index,
1392 u16 *pkey);
1393 int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index,
1394 union ib_gid *gid);
1395 int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port,
1396 struct ib_port_attr *props);
1397 int mlx5_ib_query_port(struct ib_device *ibdev, u32 port,
1398 struct ib_port_attr *props);
1399 void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas,
1400 u64 access_flags);
1401 int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
1402 int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev);
1403 void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev);
1404 struct mlx5_cache_ent *
1405 mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev,
1406 struct mlx5r_cache_rb_key rb_key,
1407 bool persistent_entry);
1408
1409 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
1410 int access_flags, int access_mode,
1411 int ndescs);
1412
1413 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1414 struct ib_mr_status *mr_status);
1415 struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
1416 struct ib_wq_init_attr *init_attr,
1417 struct ib_udata *udata);
1418 int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
1419 int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
1420 u32 wq_attr_mask, struct ib_udata *udata);
1421 int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
1422 struct ib_rwq_ind_table_init_attr *init_attr,
1423 struct ib_udata *udata);
1424 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
1425 struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
1426 struct ib_dm_mr_attr *attr,
1427 struct uverbs_attr_bundle *attrs);
1428
1429 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1430 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
1431 int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq);
1432 void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
1433 int __init mlx5_ib_odp_init(void);
1434 void mlx5_ib_odp_cleanup(void);
1435 int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev);
1436 void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
1437 struct mlx5_ib_mr *mr, int flags);
1438
1439 int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1440 enum ib_uverbs_advise_mr_advice advice,
1441 u32 flags, struct ib_sge *sg_list, u32 num_sge);
1442 int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr);
1443 int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr);
1444 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
mlx5_ib_odp_init_one(struct mlx5_ib_dev * ibdev)1445 static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
mlx5r_odp_create_eq(struct mlx5_ib_dev * dev,struct mlx5_ib_pf_eq * eq)1446 static inline int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev,
1447 struct mlx5_ib_pf_eq *eq)
1448 {
1449 return 0;
1450 }
mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev * ibdev)1451 static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {}
mlx5_ib_odp_init(void)1452 static inline int mlx5_ib_odp_init(void) { return 0; }
mlx5_ib_odp_cleanup(void)1453 static inline void mlx5_ib_odp_cleanup(void) {}
mlx5_odp_init_mkey_cache(struct mlx5_ib_dev * dev)1454 static inline int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev)
1455 {
1456 return 0;
1457 }
mlx5_odp_populate_xlt(void * xlt,size_t idx,size_t nentries,struct mlx5_ib_mr * mr,int flags)1458 static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
1459 struct mlx5_ib_mr *mr, int flags) {}
1460
1461 static inline int
mlx5_ib_advise_mr_prefetch(struct ib_pd * pd,enum ib_uverbs_advise_mr_advice advice,u32 flags,struct ib_sge * sg_list,u32 num_sge)1462 mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1463 enum ib_uverbs_advise_mr_advice advice, u32 flags,
1464 struct ib_sge *sg_list, u32 num_sge)
1465 {
1466 return -EOPNOTSUPP;
1467 }
mlx5_ib_init_odp_mr(struct mlx5_ib_mr * mr)1468 static inline int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr)
1469 {
1470 return -EOPNOTSUPP;
1471 }
mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr * mr)1472 static inline int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr)
1473 {
1474 return -EOPNOTSUPP;
1475 }
1476 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
1477
1478 extern const struct mmu_interval_notifier_ops mlx5_mn_ops;
1479
1480 /* Needed for rep profile */
1481 void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
1482 const struct mlx5_ib_profile *profile,
1483 int stage);
1484 int __mlx5_ib_add(struct mlx5_ib_dev *dev,
1485 const struct mlx5_ib_profile *profile);
1486
1487 int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
1488 u32 port, struct ifla_vf_info *info);
1489 int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
1490 u32 port, int state);
1491 int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
1492 u32 port, struct ifla_vf_stats *stats);
1493 int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
1494 struct ifla_vf_guid *node_guid,
1495 struct ifla_vf_guid *port_guid);
1496 int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u32 port,
1497 u64 guid, int type);
1498
1499 __be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
1500 const struct ib_gid_attr *attr);
1501
1502 void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num);
1503 void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num);
1504
1505 /* GSI QP helper functions */
1506 int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
1507 struct ib_qp_init_attr *attr);
1508 int mlx5_ib_destroy_gsi(struct mlx5_ib_qp *mqp);
1509 int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1510 int attr_mask);
1511 int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
1512 int qp_attr_mask,
1513 struct ib_qp_init_attr *qp_init_attr);
1514 int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
1515 const struct ib_send_wr **bad_wr);
1516 int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
1517 const struct ib_recv_wr **bad_wr);
1518 void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
1519
1520 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
1521
1522 void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi,
1523 int bfregn);
1524 struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi);
1525 struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
1526 u32 ib_port_num,
1527 u32 *native_port_num);
1528 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
1529 u32 port_num);
1530
1531 extern const struct uapi_definition mlx5_ib_devx_defs[];
1532 extern const struct uapi_definition mlx5_ib_flow_defs[];
1533 extern const struct uapi_definition mlx5_ib_qos_defs[];
1534 extern const struct uapi_definition mlx5_ib_std_types_defs[];
1535 extern const struct uapi_definition mlx5_ib_create_cq_defs[];
1536
is_qp1(enum ib_qp_type qp_type)1537 static inline int is_qp1(enum ib_qp_type qp_type)
1538 {
1539 return qp_type == MLX5_IB_QPT_HW_GSI || qp_type == IB_QPT_GSI;
1540 }
1541
check_cq_create_flags(u32 flags)1542 static inline u32 check_cq_create_flags(u32 flags)
1543 {
1544 /*
1545 * It returns non-zero value for unsupported CQ
1546 * create flags, otherwise it returns zero.
1547 */
1548 return (flags & ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN |
1549 IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION));
1550 }
1551
verify_assign_uidx(u8 cqe_version,u32 cmd_uidx,u32 * user_index)1552 static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
1553 u32 *user_index)
1554 {
1555 if (cqe_version) {
1556 if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) ||
1557 (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK))
1558 return -EINVAL;
1559 *user_index = cmd_uidx;
1560 } else {
1561 *user_index = MLX5_IB_DEFAULT_UIDX;
1562 }
1563
1564 return 0;
1565 }
1566
get_qp_user_index(struct mlx5_ib_ucontext * ucontext,struct mlx5_ib_create_qp * ucmd,int inlen,u32 * user_index)1567 static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
1568 struct mlx5_ib_create_qp *ucmd,
1569 int inlen,
1570 u32 *user_index)
1571 {
1572 u8 cqe_version = ucontext->cqe_version;
1573
1574 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version &&
1575 (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
1576 return 0;
1577
1578 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version)
1579 return -EINVAL;
1580
1581 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1582 }
1583
get_srq_user_index(struct mlx5_ib_ucontext * ucontext,struct mlx5_ib_create_srq * ucmd,int inlen,u32 * user_index)1584 static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
1585 struct mlx5_ib_create_srq *ucmd,
1586 int inlen,
1587 u32 *user_index)
1588 {
1589 u8 cqe_version = ucontext->cqe_version;
1590
1591 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version &&
1592 (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
1593 return 0;
1594
1595 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version)
1596 return -EINVAL;
1597
1598 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1599 }
1600
get_uars_per_sys_page(struct mlx5_ib_dev * dev,bool lib_support)1601 static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support)
1602 {
1603 return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1604 MLX5_UARS_IN_PAGE : 1;
1605 }
1606
1607 extern void *xlt_emergency_page;
1608
1609 int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
1610 struct mlx5_bfreg_info *bfregi, u32 bfregn,
1611 bool dyn_bfreg);
1612
mlx5r_store_odp_mkey(struct mlx5_ib_dev * dev,struct mlx5_ib_mkey * mmkey)1613 static inline int mlx5r_store_odp_mkey(struct mlx5_ib_dev *dev,
1614 struct mlx5_ib_mkey *mmkey)
1615 {
1616 refcount_set(&mmkey->usecount, 1);
1617
1618 return xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mmkey->key),
1619 mmkey, GFP_KERNEL));
1620 }
1621
1622 /* deref an mkey that can participate in ODP flow */
mlx5r_deref_odp_mkey(struct mlx5_ib_mkey * mmkey)1623 static inline void mlx5r_deref_odp_mkey(struct mlx5_ib_mkey *mmkey)
1624 {
1625 if (refcount_dec_and_test(&mmkey->usecount))
1626 wake_up(&mmkey->wait);
1627 }
1628
1629 /* deref an mkey that can participate in ODP flow and wait for relese */
mlx5r_deref_wait_odp_mkey(struct mlx5_ib_mkey * mmkey)1630 static inline void mlx5r_deref_wait_odp_mkey(struct mlx5_ib_mkey *mmkey)
1631 {
1632 mlx5r_deref_odp_mkey(mmkey);
1633 wait_event(mmkey->wait, refcount_read(&mmkey->usecount) == 0);
1634 }
1635
1636 int mlx5_ib_test_wc(struct mlx5_ib_dev *dev);
1637
mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev * dev)1638 static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev)
1639 {
1640 /*
1641 * If the driver is in hash mode and the port_select_flow_table_bypass cap
1642 * is supported, it means that the driver no longer needs to assign the port
1643 * affinity by default. If a user wants to set the port affinity explicitly,
1644 * the user has a dedicated API to do that, so there is no need to assign
1645 * the port affinity by default.
1646 */
1647 if (dev->lag_active &&
1648 mlx5_lag_mode_is_hash(dev->mdev) &&
1649 MLX5_CAP_PORT_SELECTION(dev->mdev, port_select_flow_table_bypass))
1650 return 0;
1651
1652 if (mlx5_lag_is_lacp_owner(dev->mdev) && !dev->lag_active)
1653 return 0;
1654
1655 return dev->lag_active ||
1656 (MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 &&
1657 MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity));
1658 }
1659
rt_supported(int ts_cap)1660 static inline bool rt_supported(int ts_cap)
1661 {
1662 return ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_REAL_TIME ||
1663 ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
1664 }
1665
1666 /*
1667 * PCI Peer to Peer is a trainwreck. If no switch is present then things
1668 * sometimes work, depending on the pci_distance_p2p logic for excluding broken
1669 * root complexes. However if a switch is present in the path, then things get
1670 * really ugly depending on how the switch is setup. This table assumes that the
1671 * root complex is strict and is validating that all req/reps are matches
1672 * perfectly - so any scenario where it sees only half the transaction is a
1673 * failure.
1674 *
1675 * CR/RR/DT ATS RO P2P
1676 * 00X X X OK
1677 * 010 X X fails (request is routed to root but root never sees comp)
1678 * 011 0 X fails (request is routed to root but root never sees comp)
1679 * 011 1 X OK
1680 * 10X X 1 OK
1681 * 101 X 0 fails (completion is routed to root but root didn't see req)
1682 * 110 X 0 SLOW
1683 * 111 0 0 SLOW
1684 * 111 1 0 fails (completion is routed to root but root didn't see req)
1685 * 111 1 1 OK
1686 *
1687 * Unfortunately we cannot reliably know if a switch is present or what the
1688 * CR/RR/DT ACS settings are, as in a VM that is all hidden. Assume that
1689 * CR/RR/DT is 111 if the ATS cap is enabled and follow the last three rows.
1690 *
1691 * For now assume if the umem is a dma_buf then it is P2P.
1692 */
mlx5_umem_needs_ats(struct mlx5_ib_dev * dev,struct ib_umem * umem,int access_flags)1693 static inline bool mlx5_umem_needs_ats(struct mlx5_ib_dev *dev,
1694 struct ib_umem *umem, int access_flags)
1695 {
1696 if (!MLX5_CAP_GEN(dev->mdev, ats) || !umem->is_dmabuf)
1697 return false;
1698 return access_flags & IB_ACCESS_RELAXED_ORDERING;
1699 }
1700
1701 int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num,
1702 unsigned int index, const union ib_gid *gid,
1703 const struct ib_gid_attr *attr);
1704
smi_to_native_portnum(struct mlx5_ib_dev * dev,u32 port)1705 static inline u32 smi_to_native_portnum(struct mlx5_ib_dev *dev, u32 port)
1706 {
1707 return (port - 1) / dev->num_ports + 1;
1708 }
1709
1710 #endif /* MLX5_IB_H */
1711