1 /* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB */ 2 /* 3 * Copyright (c) 2006 - 2021 Intel Corporation. All rights reserved. 4 * Copyright (c) 2005 Topspin Communications. All rights reserved. 5 * Copyright (c) 2005 Cisco Systems. All rights reserved. 6 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. 7 */ 8 9 #ifndef IRDMA_ABI_H 10 #define IRDMA_ABI_H 11 12 #include <linux/types.h> 13 14 /* irdma must support legacy GEN_1 i40iw kernel 15 * and user-space whose last ABI ver is 5 16 */ 17 #define IRDMA_ABI_VER 5 18 19 enum irdma_memreg_type { 20 IRDMA_MEMREG_TYPE_MEM = 0, 21 IRDMA_MEMREG_TYPE_QP = 1, 22 IRDMA_MEMREG_TYPE_CQ = 2, 23 IRDMA_MEMREG_TYPE_SRQ = 3, 24 }; 25 26 enum { 27 IRDMA_ALLOC_UCTX_USE_RAW_ATTR = 1 << 0, 28 IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE = 1 << 1, 29 IRDMA_ALLOC_UCTX_MAX_HW_SRQ_QUANTA = 1 << 2, 30 IRDMA_SUPPORT_WQE_FORMAT_V2 = 1 << 3, 31 }; 32 33 struct irdma_alloc_ucontext_req { 34 __u32 rsvd32; 35 __u8 userspace_ver; 36 __u8 rsvd8[3]; 37 __aligned_u64 comp_mask; 38 }; 39 40 struct irdma_alloc_ucontext_resp { 41 __u32 max_pds; 42 __u32 max_qps; 43 __u32 wq_size; /* size of the WQs (SQ+RQ) in the mmaped area */ 44 __u8 kernel_ver; 45 __u8 rsvd[3]; 46 __aligned_u64 feature_flags; 47 __aligned_u64 db_mmap_key; 48 __u32 max_hw_wq_frags; 49 __u32 max_hw_read_sges; 50 __u32 max_hw_inline; 51 __u32 max_hw_rq_quanta; 52 __u32 max_hw_wq_quanta; 53 __u32 min_hw_cq_size; 54 __u32 max_hw_cq_size; 55 __u16 max_hw_sq_chunk; 56 __u8 hw_rev; 57 __u8 rsvd2; 58 __aligned_u64 comp_mask; 59 __u16 min_hw_wq_size; 60 __u32 max_hw_srq_quanta; 61 __u8 rsvd3[2]; 62 }; 63 64 struct irdma_alloc_pd_resp { 65 __u32 pd_id; 66 __u8 rsvd[4]; 67 }; 68 69 struct irdma_resize_cq_req { 70 __aligned_u64 user_cq_buffer; 71 }; 72 73 struct irdma_create_cq_req { 74 __aligned_u64 user_cq_buf; 75 __aligned_u64 user_shadow_area; 76 }; 77 78 struct irdma_create_srq_req { 79 __aligned_u64 user_srq_buf; 80 __aligned_u64 user_shadow_area; 81 }; 82 83 struct irdma_create_srq_resp { 84 __u32 srq_id; 85 __u32 srq_size; 86 }; 87 88 struct irdma_create_qp_req { 89 __aligned_u64 user_wqe_bufs; 90 __aligned_u64 user_compl_ctx; 91 }; 92 93 struct irdma_mem_reg_req { 94 __u16 reg_type; /* enum irdma_memreg_type */ 95 __u16 cq_pages; 96 __u16 rq_pages; 97 __u16 sq_pages; 98 }; 99 100 struct irdma_modify_qp_req { 101 __u8 sq_flush; 102 __u8 rq_flush; 103 __u8 rsvd[6]; 104 }; 105 106 struct irdma_create_cq_resp { 107 __u32 cq_id; 108 __u32 cq_size; 109 }; 110 111 struct irdma_create_qp_resp { 112 __u32 qp_id; 113 __u32 actual_sq_size; 114 __u32 actual_rq_size; 115 __u32 irdma_drv_opt; 116 __u16 push_idx; 117 __u8 lsmm; 118 __u8 rsvd; 119 __u32 qp_caps; 120 }; 121 122 struct irdma_modify_qp_resp { 123 __aligned_u64 push_wqe_mmap_key; 124 __aligned_u64 push_db_mmap_key; 125 __u16 push_offset; 126 __u8 push_valid; 127 __u8 rsvd[5]; 128 }; 129 130 struct irdma_create_ah_resp { 131 __u32 ah_id; 132 __u8 rsvd[4]; 133 }; 134 #endif /* IRDMA_ABI_H */ 135