1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2 3 /* Authors: Cheng Xu <chengyou@linux.alibaba.com> */ 4 /* Kai Shen <kaishen@linux.alibaba.com> */ 5 /* Copyright (c) 2020-2022, Alibaba Group. */ 6 7 #ifndef __ERDMA_VERBS_H__ 8 #define __ERDMA_VERBS_H__ 9 10 #include "erdma.h" 11 12 /* RDMA Capability. */ 13 #define ERDMA_MAX_PD (128 * 1024) 14 #define ERDMA_MAX_SEND_WR 8192 15 #define ERDMA_MAX_ORD 128 16 #define ERDMA_MAX_IRD 128 17 #define ERDMA_MAX_SGE_RD 1 18 #define ERDMA_MAX_CONTEXT (128 * 1024) 19 #define ERDMA_MAX_SEND_SGE 6 20 #define ERDMA_MAX_RECV_SGE 1 21 #define ERDMA_MAX_INLINE (sizeof(struct erdma_sge) * (ERDMA_MAX_SEND_SGE)) 22 #define ERDMA_MAX_FRMR_PA 512 23 24 enum { 25 ERDMA_MMAP_IO_NC = 0, /* no cache */ 26 }; 27 28 struct erdma_user_mmap_entry { 29 struct rdma_user_mmap_entry rdma_entry; 30 u64 address; 31 u8 mmap_flag; 32 }; 33 34 struct erdma_ext_db_info { 35 bool enable; 36 u16 sdb_off; 37 u16 rdb_off; 38 u16 cdb_off; 39 }; 40 41 struct erdma_ucontext { 42 struct ib_ucontext ibucontext; 43 44 struct erdma_ext_db_info ext_db; 45 46 u64 sdb; 47 u64 rdb; 48 u64 cdb; 49 50 struct rdma_user_mmap_entry *sq_db_mmap_entry; 51 struct rdma_user_mmap_entry *rq_db_mmap_entry; 52 struct rdma_user_mmap_entry *cq_db_mmap_entry; 53 54 /* doorbell records */ 55 struct list_head dbrecords_page_list; 56 struct mutex dbrecords_page_mutex; 57 }; 58 59 struct erdma_pd { 60 struct ib_pd ibpd; 61 u32 pdn; 62 }; 63 64 /* 65 * MemoryRegion definition. 66 */ 67 #define ERDMA_MAX_INLINE_MTT_ENTRIES 4 68 #define MTT_SIZE(mtt_cnt) ((mtt_cnt) << 3) /* per mtt entry takes 8 Bytes. */ 69 #define ERDMA_MR_MAX_MTT_CNT 524288 70 #define ERDMA_MTT_ENTRY_SIZE 8 71 72 #define ERDMA_MR_TYPE_NORMAL 0 73 #define ERDMA_MR_TYPE_FRMR 1 74 #define ERDMA_MR_TYPE_DMA 2 75 76 #define ERDMA_MR_MTT_0LEVEL 0 77 #define ERDMA_MR_MTT_1LEVEL 1 78 79 #define ERDMA_MR_ACC_RA BIT(0) 80 #define ERDMA_MR_ACC_LR BIT(1) 81 #define ERDMA_MR_ACC_LW BIT(2) 82 #define ERDMA_MR_ACC_RR BIT(3) 83 #define ERDMA_MR_ACC_RW BIT(4) 84 85 static inline u8 to_erdma_access_flags(int access) 86 { 87 return (access & IB_ACCESS_REMOTE_READ ? ERDMA_MR_ACC_RR : 0) | 88 (access & IB_ACCESS_LOCAL_WRITE ? ERDMA_MR_ACC_LW : 0) | 89 (access & IB_ACCESS_REMOTE_WRITE ? ERDMA_MR_ACC_RW : 0) | 90 (access & IB_ACCESS_REMOTE_ATOMIC ? ERDMA_MR_ACC_RA : 0); 91 } 92 93 /* Hierarchical storage structure for MTT entries */ 94 struct erdma_mtt { 95 u64 *buf; 96 size_t size; 97 98 bool continuous; 99 union { 100 dma_addr_t buf_dma; 101 struct { 102 struct scatterlist *sglist; 103 u32 nsg; 104 u32 level; 105 }; 106 }; 107 108 struct erdma_mtt *low_level; 109 }; 110 111 struct erdma_mem { 112 struct ib_umem *umem; 113 struct erdma_mtt *mtt; 114 115 u32 page_size; 116 u32 page_offset; 117 u32 page_cnt; 118 u32 mtt_nents; 119 120 u64 va; 121 u64 len; 122 }; 123 124 struct erdma_mr { 125 struct ib_mr ibmr; 126 struct erdma_mem mem; 127 u8 type; 128 u8 access; 129 u8 valid; 130 }; 131 132 struct erdma_user_dbrecords_page { 133 struct list_head list; 134 struct ib_umem *umem; 135 u64 va; 136 int refcnt; 137 }; 138 139 struct erdma_uqp { 140 struct erdma_mem sq_mem; 141 struct erdma_mem rq_mem; 142 143 dma_addr_t sq_dbrec_dma; 144 dma_addr_t rq_dbrec_dma; 145 146 struct erdma_user_dbrecords_page *user_dbr_page; 147 148 u32 rq_offset; 149 }; 150 151 struct erdma_kqp { 152 u16 sq_pi; 153 u16 sq_ci; 154 155 u16 rq_pi; 156 u16 rq_ci; 157 158 u64 *swr_tbl; 159 u64 *rwr_tbl; 160 161 void __iomem *hw_sq_db; 162 void __iomem *hw_rq_db; 163 164 void *sq_buf; 165 dma_addr_t sq_buf_dma_addr; 166 167 void *rq_buf; 168 dma_addr_t rq_buf_dma_addr; 169 170 void *sq_dbrec; 171 void *rq_dbrec; 172 173 dma_addr_t sq_dbrec_dma; 174 dma_addr_t rq_dbrec_dma; 175 176 u8 sig_all; 177 }; 178 179 enum erdma_qp_state { 180 ERDMA_QP_STATE_IDLE = 0, 181 ERDMA_QP_STATE_RTR = 1, 182 ERDMA_QP_STATE_RTS = 2, 183 ERDMA_QP_STATE_CLOSING = 3, 184 ERDMA_QP_STATE_TERMINATE = 4, 185 ERDMA_QP_STATE_ERROR = 5, 186 ERDMA_QP_STATE_UNDEF = 7, 187 ERDMA_QP_STATE_COUNT = 8 188 }; 189 190 enum erdma_qp_attr_mask { 191 ERDMA_QP_ATTR_STATE = (1 << 0), 192 ERDMA_QP_ATTR_LLP_HANDLE = (1 << 2), 193 ERDMA_QP_ATTR_ORD = (1 << 3), 194 ERDMA_QP_ATTR_IRD = (1 << 4), 195 ERDMA_QP_ATTR_SQ_SIZE = (1 << 5), 196 ERDMA_QP_ATTR_RQ_SIZE = (1 << 6), 197 ERDMA_QP_ATTR_MPA = (1 << 7) 198 }; 199 200 enum erdma_qp_flags { 201 ERDMA_QP_IN_FLUSHING = (1 << 0), 202 }; 203 204 struct erdma_qp_attrs { 205 enum erdma_qp_state state; 206 enum erdma_cc_alg cc; /* Congestion control algorithm */ 207 u32 sq_size; 208 u32 rq_size; 209 u32 orq_size; 210 u32 irq_size; 211 u32 max_send_sge; 212 u32 max_recv_sge; 213 u32 cookie; 214 #define ERDMA_QP_ACTIVE 0 215 #define ERDMA_QP_PASSIVE 1 216 u8 qp_type; 217 u8 pd_len; 218 }; 219 220 struct erdma_qp { 221 struct ib_qp ibqp; 222 struct kref ref; 223 struct completion safe_free; 224 struct erdma_dev *dev; 225 struct erdma_cep *cep; 226 struct rw_semaphore state_lock; 227 228 unsigned long flags; 229 struct delayed_work reflush_dwork; 230 231 union { 232 struct erdma_kqp kern_qp; 233 struct erdma_uqp user_qp; 234 }; 235 236 struct erdma_cq *scq; 237 struct erdma_cq *rcq; 238 239 struct erdma_qp_attrs attrs; 240 spinlock_t lock; 241 }; 242 243 struct erdma_kcq_info { 244 void *qbuf; 245 dma_addr_t qbuf_dma_addr; 246 u32 ci; 247 u32 cmdsn; 248 u32 notify_cnt; 249 250 spinlock_t lock; 251 u8 __iomem *db; 252 u64 *dbrec; 253 dma_addr_t dbrec_dma; 254 }; 255 256 struct erdma_ucq_info { 257 struct erdma_mem qbuf_mem; 258 struct erdma_user_dbrecords_page *user_dbr_page; 259 dma_addr_t dbrec_dma; 260 }; 261 262 struct erdma_cq { 263 struct ib_cq ibcq; 264 u32 cqn; 265 266 u32 depth; 267 u32 assoc_eqn; 268 269 union { 270 struct erdma_kcq_info kern_cq; 271 struct erdma_ucq_info user_cq; 272 }; 273 }; 274 275 #define QP_ID(qp) ((qp)->ibqp.qp_num) 276 277 static inline struct erdma_qp *find_qp_by_qpn(struct erdma_dev *dev, int id) 278 { 279 return (struct erdma_qp *)xa_load(&dev->qp_xa, id); 280 } 281 282 static inline struct erdma_cq *find_cq_by_cqn(struct erdma_dev *dev, int id) 283 { 284 return (struct erdma_cq *)xa_load(&dev->cq_xa, id); 285 } 286 287 void erdma_qp_get(struct erdma_qp *qp); 288 void erdma_qp_put(struct erdma_qp *qp); 289 int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs, 290 enum erdma_qp_attr_mask mask); 291 void erdma_qp_llp_close(struct erdma_qp *qp); 292 void erdma_qp_cm_drop(struct erdma_qp *qp); 293 294 static inline struct erdma_ucontext *to_ectx(struct ib_ucontext *ibctx) 295 { 296 return container_of(ibctx, struct erdma_ucontext, ibucontext); 297 } 298 299 static inline struct erdma_pd *to_epd(struct ib_pd *pd) 300 { 301 return container_of(pd, struct erdma_pd, ibpd); 302 } 303 304 static inline struct erdma_mr *to_emr(struct ib_mr *ibmr) 305 { 306 return container_of(ibmr, struct erdma_mr, ibmr); 307 } 308 309 static inline struct erdma_qp *to_eqp(struct ib_qp *qp) 310 { 311 return container_of(qp, struct erdma_qp, ibqp); 312 } 313 314 static inline struct erdma_cq *to_ecq(struct ib_cq *ibcq) 315 { 316 return container_of(ibcq, struct erdma_cq, ibcq); 317 } 318 319 static inline struct erdma_user_mmap_entry * 320 to_emmap(struct rdma_user_mmap_entry *ibmmap) 321 { 322 return container_of(ibmmap, struct erdma_user_mmap_entry, rdma_entry); 323 } 324 325 int erdma_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *data); 326 void erdma_dealloc_ucontext(struct ib_ucontext *ibctx); 327 int erdma_query_device(struct ib_device *dev, struct ib_device_attr *attr, 328 struct ib_udata *data); 329 int erdma_get_port_immutable(struct ib_device *dev, u32 port, 330 struct ib_port_immutable *ib_port_immutable); 331 int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, 332 struct uverbs_attr_bundle *attrs); 333 int erdma_query_port(struct ib_device *dev, u32 port, 334 struct ib_port_attr *attr); 335 int erdma_query_gid(struct ib_device *dev, u32 port, int idx, 336 union ib_gid *gid); 337 int erdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *data); 338 int erdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); 339 int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr, 340 struct ib_udata *data); 341 int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask, 342 struct ib_qp_init_attr *init_attr); 343 int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask, 344 struct ib_udata *data); 345 int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata); 346 int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); 347 void erdma_disassociate_ucontext(struct ib_ucontext *ibcontext); 348 int erdma_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); 349 struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, 350 u64 virt, int access, struct ib_udata *udata); 351 struct ib_mr *erdma_get_dma_mr(struct ib_pd *ibpd, int rights); 352 int erdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *data); 353 int erdma_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma); 354 void erdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry); 355 void erdma_qp_get_ref(struct ib_qp *ibqp); 356 void erdma_qp_put_ref(struct ib_qp *ibqp); 357 struct ib_qp *erdma_get_ibqp(struct ib_device *dev, int id); 358 int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr, 359 const struct ib_send_wr **bad_send_wr); 360 int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr, 361 const struct ib_recv_wr **bad_recv_wr); 362 int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); 363 struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, 364 u32 max_num_sg); 365 int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, 366 unsigned int *sg_offset); 367 void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason); 368 void erdma_set_mtu(struct erdma_dev *dev, u32 mtu); 369 struct rdma_hw_stats *erdma_alloc_hw_port_stats(struct ib_device *device, 370 u32 port_num); 371 int erdma_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, 372 u32 port, int index); 373 374 #endif 375