Lines Matching full:struct

110 struct mlx5_bfreg_info {
118 struct mutex lock;
128 struct mlx5_ib_ucontext {
129 struct ib_ucontext ibucontext;
130 struct list_head db_page_list;
134 struct mutex db_page_mutex;
135 struct mlx5_bfreg_info bfregi;
144 static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
146 return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
149 struct mlx5_ib_pd {
150 struct ib_pd ibpd;
164 struct mlx5_ib_flow_prio {
165 struct mlx5_flow_table *flow_table;
169 struct mlx5_ib_flow_handler {
170 struct list_head list;
171 struct ib_flow ibflow;
172 struct mlx5_ib_flow_prio *prio;
173 struct mlx5_flow_handle *rule;
176 struct mlx5_ib_flow_db {
177 struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
178 struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS];
179 struct mlx5_flow_table *lag_demux_ft;
185 struct mutex lock;
218 struct wr_list {
223 struct mlx5_ib_wq {
226 struct wr_list *w_list;
245 struct mlx5_ib_rwq {
246 struct ib_wq ibwq;
247 struct mlx5_core_qp core_qp;
253 struct ib_umem *umem;
257 struct mlx5_db db;
275 struct mlx5_ib_rwq_ind_table {
276 struct ib_rwq_ind_table ib_rwq_ind_tbl;
294 mlx5_ib_get_pagefault_context(struct mlx5_pagefault *pagefault)
299 struct mlx5_ib_pfault {
300 struct work_struct work;
301 struct mlx5_pagefault mpfault;
304 struct mlx5_ib_ubuffer {
305 struct ib_umem *umem;
310 struct mlx5_ib_qp_base {
311 struct mlx5_ib_qp *container_mibqp;
312 struct mlx5_core_qp mqp;
313 struct mlx5_ib_ubuffer ubuffer;
316 struct mlx5_ib_qp_trans {
317 struct mlx5_ib_qp_base base;
324 struct mlx5_ib_rss_qp {
328 struct mlx5_ib_rq {
329 struct mlx5_ib_qp_base base;
330 struct mlx5_ib_wq *rq;
331 struct mlx5_ib_ubuffer ubuffer;
332 struct mlx5_db *doorbell;
337 struct mlx5_ib_sq {
338 struct mlx5_ib_qp_base base;
339 struct mlx5_ib_wq *sq;
340 struct mlx5_ib_ubuffer ubuffer;
341 struct mlx5_db *doorbell;
346 struct mlx5_ib_raw_packet_qp {
347 struct mlx5_ib_sq sq;
348 struct mlx5_ib_rq rq;
351 struct mlx5_bf {
354 struct mlx5_sq_bfreg *bfreg;
358 struct mlx5_ib_dct {
359 struct mlx5_core_dct mdct;
363 struct mlx5_ib_qp {
364 struct ib_qp ibqp;
366 struct mlx5_ib_qp_trans trans_qp;
367 struct mlx5_ib_raw_packet_qp raw_packet_qp;
368 struct mlx5_ib_rss_qp rss_qp;
369 struct mlx5_ib_dct dct;
371 struct mlx5_buf buf;
373 struct mlx5_db db;
374 struct mlx5_ib_wq rq;
378 struct mlx5_ib_wq sq;
382 struct mutex mutex;
389 struct mlx5_bf bf;
414 struct mlx5_ib_pfault pagefaults[MLX5_IB_PAGEFAULT_CONTEXTS];
416 struct list_head qps_list;
417 struct list_head cq_recv_list;
418 struct list_head cq_send_list;
421 struct mlx5_ib_cq_buf {
422 struct mlx5_buf buf;
423 struct ib_umem *umem;
442 struct mlx5_umr_wr {
443 struct ib_send_wr wr;
448 struct ib_pd *pd;
456 static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
458 return container_of(wr, struct mlx5_umr_wr, wr);
461 struct mlx5_shared_mr_info {
463 struct ib_umem *umem;
466 struct mlx5_ib_cq {
467 struct ib_cq ibcq;
468 struct mlx5_core_cq mcq;
469 struct mlx5_ib_cq_buf buf;
470 struct mlx5_db db;
478 struct mutex resize_mutex;
479 struct mlx5_ib_cq_buf *resize_buf;
480 struct ib_umem *resize_umem;
482 struct list_head list_send_qp;
483 struct list_head list_recv_qp;
485 struct list_head wc_list;
487 struct work_struct notify_work;
490 struct mlx5_ib_wc {
491 struct ib_wc wc;
492 struct list_head list;
495 struct mlx5_ib_srq {
496 struct ib_srq ibsrq;
497 struct mlx5_core_srq msrq;
498 struct mlx5_buf buf;
499 struct mlx5_db db;
507 struct ib_umem *umem;
510 struct mutex mutex;
514 struct mlx5_ib_xrcd {
515 struct ib_xrcd ibxrcd;
524 struct mlx5_user_mmap_entry {
525 struct rdma_user_mmap_entry rdma_entry;
533 struct mlx5_ib_mr {
534 struct ib_mr ibmr;
541 struct mlx5_core_mkey mmkey;
542 struct ib_umem *umem;
543 struct mlx5_shared_mr_info *smr_info;
544 struct list_head list;
548 struct mlx5_ib_dev *dev;
550 struct mlx5_core_sig_ctx *sig;
554 struct mlx5_async_work cb_work;
557 struct mlx5_ib_mw {
558 struct ib_mw ibmw;
559 struct mlx5_core_mkey mmkey;
562 struct mlx5_ib_devx_mr {
563 struct mlx5_core_mkey mmkey;
567 struct mlx5_ib_umr_context {
568 struct ib_cqe cqe;
570 struct completion done;
573 struct umr_common {
574 struct ib_pd *pd;
575 struct ib_cq *cq;
576 struct ib_qp *qp;
579 struct semaphore sem;
588 struct mlx5_cache_ent {
589 struct list_head head;
602 struct mlx5_ib_dev *dev;
603 struct work_struct work;
604 struct delayed_work dwork;
608 struct mlx5_mr_cache {
609 struct workqueue_struct *wq;
610 struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
615 struct mlx5_ib_gsi_qp;
617 struct mlx5_ib_port_resources {
618 struct mlx5_ib_resources *devr;
619 struct mlx5_ib_gsi_qp *gsi;
620 struct work_struct pkey_change_work;
623 struct mlx5_ib_resources {
624 struct ib_cq *c0;
625 struct ib_xrcd *x0;
626 struct ib_xrcd *x1;
627 struct ib_pd *p0;
628 struct ib_srq *s0;
629 struct ib_srq *s1;
630 struct mlx5_ib_port_resources ports[2];
632 struct mutex mutex;
635 struct mlx5_ib_port {
639 struct mlx5_roce {
645 struct notifier_block nb;
728 struct mlx5_ib_congestion {
729 struct sysctl_ctx_list ctx;
730 struct sx lock;
731 struct delayed_work dwork;
734 struct {
742 struct mlx5_devx_event_table {
744 struct mutex event_xa_lock;
745 struct xarray event_xa;
748 struct mlx5_ib_dev {
749 struct ib_device ib_dev;
750 struct mlx5_core_dev *mdev;
751 struct mlx5_roce roce;
756 struct mutex cap_mask_mutex;
759 struct umr_common umrc;
762 struct mlx5_ib_resources devr;
763 struct mlx5_mr_cache cache;
764 struct timer_list delay_timer;
766 struct mutex slow_path_mutex;
769 struct ib_odp_caps odp_caps;
774 struct srcu_struct mr_srcu;
776 struct mlx5_ib_flow_db flow_db;
779 struct list_head qp_list;
781 struct mlx5_ib_port *port;
782 struct mlx5_sq_bfreg bfreg;
783 struct mlx5_sq_bfreg wc_bfreg;
784 struct mlx5_sq_bfreg fp_bfreg;
785 struct mlx5_devx_event_table devx_event_table;
786 struct mlx5_ib_congestion congestion;
788 struct mlx5_async_ctx async_ctx;
791 struct mutex lb_mutex;
795 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
797 return container_of(mcq, struct mlx5_ib_cq, mcq);
800 static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
802 return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
805 static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
807 return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
810 static inline struct mlx5_ib_dev *mlx5_udata_to_mdev(struct ib_udata *udata)
812 struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
813 udata, struct mlx5_ib_ucontext, ibucontext);
818 static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
820 return container_of(ibcq, struct mlx5_ib_cq, ibcq);
823 static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
825 return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
828 static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp)
830 return container_of(core_qp, struct mlx5_ib_rwq, core_qp);
833 static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey)
835 return container_of(mmkey, struct mlx5_ib_mr, mmkey);
838 static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
840 return container_of(ibpd, struct mlx5_ib_pd, ibpd);
843 static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
845 return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
848 static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
850 return container_of(ibqp, struct mlx5_ib_qp, ibqp);
853 static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq)
855 return container_of(ibwq, struct mlx5_ib_rwq, ibwq);
858 static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
860 return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl);
863 static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
865 return container_of(msrq, struct mlx5_ib_srq, msrq);
868 static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
870 return container_of(ibmr, struct mlx5_ib_mr, ibmr);
873 static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
875 return container_of(ibmw, struct mlx5_ib_mw, ibmw);
878 struct mlx5_ib_ah {
879 struct ib_ah ibah;
880 struct mlx5_av av;
883 static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah)
885 return container_of(ibah, struct mlx5_ib_ah, ibah);
888 static inline struct mlx5_user_mmap_entry *
889 to_mmmap(struct rdma_user_mmap_entry *rdma_entry)
892 struct mlx5_user_mmap_entry, rdma_entry);
895 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
896 struct mlx5_db *db);
897 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
898 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
899 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
900 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
901 int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
902 u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
904 int mlx5_ib_create_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr, u32 flags,
905 struct ib_udata *udata);
906 int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
907 void mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags);
908 int mlx5_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
909 struct ib_udata *udata);
910 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
911 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
912 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
913 void mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
914 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
915 const struct ib_recv_wr **bad_wr);
916 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
917 struct ib_qp_init_attr *init_attr,
918 struct ib_udata *udata);
919 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
920 int attr_mask, struct ib_udata *udata);
921 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
922 struct ib_qp_init_attr *qp_init_attr);
923 int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
924 int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
925 const struct ib_send_wr **bad_wr);
926 int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
927 const struct ib_recv_wr **bad_wr);
928 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
929 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
931 struct mlx5_ib_qp_base *base);
932 int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
933 struct ib_udata *udata);
934 void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
935 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
936 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
937 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
938 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
939 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
940 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
942 struct ib_udata *udata);
943 struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
944 struct ib_udata *udata);
945 int mlx5_ib_dealloc_mw(struct ib_mw *mw);
946 int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index,
948 int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
950 struct ib_pd *pd, struct ib_udata *udata);
951 int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
952 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
953 u32 max_num_sg, struct ib_udata *udata);
954 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
956 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
957 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
958 const struct ib_mad_hdr *in, size_t in_mad_size,
959 struct ib_mad_hdr *out, size_t *out_mad_size,
961 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
962 struct ib_udata *udata);
963 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
965 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
966 int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
967 struct ib_smp *out_mad);
968 int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
970 int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
972 int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
974 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
975 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
976 int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
978 int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
980 int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
981 struct ib_port_attr *props);
982 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
983 struct ib_port_attr *props);
984 int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
985 void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
986 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
990 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
993 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
996 int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
997 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
998 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
999 int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
1000 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1001 struct ib_mr_status *mr_status);
1002 struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
1003 struct ib_wq_init_attr *init_attr,
1004 struct ib_udata *udata);
1005 void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
1006 int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
1007 u32 wq_attr_mask, struct ib_udata *udata);
1008 struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
1009 struct ib_rwq_ind_table_init_attr *init_attr,
1010 struct ib_udata *udata);
1011 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
1014 extern struct workqueue_struct *mlx5_ib_page_fault_wq;
1016 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
1017 void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
1018 struct mlx5_ib_pfault *pfault);
1019 void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp);
1020 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
1021 void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev);
1024 void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp);
1025 void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp);
1026 void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
1029 static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
1034 static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) {}
1035 static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
1036 static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) {}
1039 static inline void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp) {}
1040 static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {}
1044 int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
1045 u8 port, struct ifla_vf_info *info);
1046 int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
1048 int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
1049 u8 port, struct ifla_vf_stats *stats);
1050 int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
1053 __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
1055 int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
1059 struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
1060 struct ib_qp_init_attr *init_attr);
1061 int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp);
1062 int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1064 int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
1066 struct ib_qp_init_attr *qp_init_attr);
1067 int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
1068 const struct ib_send_wr **bad_wr);
1069 int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
1070 const struct ib_recv_wr **bad_wr);
1071 void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
1073 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
1075 void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi,
1079 int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user);
1080 void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid);
1081 void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev);
1082 void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev);
1087 mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
1089 static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid) {}
1090 static inline void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev) {}
1091 static inline void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev) {}
1099 static inline void init_query_mad(struct ib_smp *mad)
1149 static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
1150 struct mlx5_ib_create_qp *ucmd,
1156 if (field_avail(struct mlx5_ib_create_qp, uidx, inlen) &&
1160 if (!!(field_avail(struct mlx5_ib_create_qp, uidx, inlen) !=
1167 static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
1168 struct mlx5_ib_create_srq *ucmd,
1174 if (field_avail(struct mlx5_ib_create_srq, uidx, inlen) &&
1178 if (!!(field_avail(struct mlx5_ib_create_srq, uidx, inlen) !=
1185 void mlx5_ib_cleanup_congestion(struct mlx5_ib_dev *);
1186 int mlx5_ib_init_congestion(struct mlx5_ib_dev *);
1188 static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support)
1194 static inline int get_num_static_uars(struct mlx5_ib_dev *dev,
1195 struct mlx5_bfreg_info *bfregi)
1200 int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
1201 struct mlx5_bfreg_info *bfregi, u32 bfregn,