1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 /* 3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 5 */ 6 7 #ifndef RXE_VERBS_H 8 #define RXE_VERBS_H 9 10 #include <linux/interrupt.h> 11 #include <linux/workqueue.h> 12 #include "rxe_pool.h" 13 #include "rxe_task.h" 14 #include "rxe_hw_counters.h" 15 16 static inline int pkey_match(u16 key1, u16 key2) 17 { 18 return (((key1 & 0x7fff) != 0) && 19 ((key1 & 0x7fff) == (key2 & 0x7fff)) && 20 ((key1 & 0x8000) || (key2 & 0x8000))) ? 1 : 0; 21 } 22 23 /* Return >0 if psn_a > psn_b 24 * 0 if psn_a == psn_b 25 * <0 if psn_a < psn_b 26 */ 27 static inline int psn_compare(u32 psn_a, u32 psn_b) 28 { 29 s32 diff; 30 31 diff = (psn_a - psn_b) << 8; 32 return diff; 33 } 34 35 struct rxe_ucontext { 36 struct ib_ucontext ibuc; 37 struct rxe_pool_elem elem; 38 }; 39 40 struct rxe_pd { 41 struct ib_pd ibpd; 42 struct rxe_pool_elem elem; 43 }; 44 45 struct rxe_ah { 46 struct ib_ah ibah; 47 struct rxe_pool_elem elem; 48 struct rxe_av av; 49 bool is_user; 50 int ah_num; 51 }; 52 53 struct rxe_cqe { 54 union { 55 struct ib_wc ibwc; 56 struct ib_uverbs_wc uibwc; 57 }; 58 }; 59 60 struct rxe_cq { 61 struct ib_cq ibcq; 62 struct rxe_pool_elem elem; 63 struct rxe_queue *queue; 64 spinlock_t cq_lock; 65 u8 notify; 66 bool is_user; 67 atomic_t num_wq; 68 }; 69 70 enum wqe_state { 71 wqe_state_posted, 72 wqe_state_processing, 73 wqe_state_pending, 74 wqe_state_done, 75 wqe_state_error, 76 }; 77 78 struct rxe_sq { 79 int max_wr; 80 int max_sge; 81 int max_inline; 82 spinlock_t sq_lock; /* guard queue */ 83 struct rxe_queue *queue; 84 }; 85 86 struct rxe_rq { 87 int max_wr; 88 int max_sge; 89 spinlock_t producer_lock; /* guard queue producer */ 90 spinlock_t consumer_lock; /* guard queue consumer */ 91 struct rxe_queue *queue; 92 }; 93 94 struct rxe_srq { 95 struct ib_srq ibsrq; 96 struct rxe_pool_elem elem; 97 struct rxe_pd *pd; 98 struct rxe_rq rq; 99 u32 srq_num; 100 101 int limit; 102 int error; 103 }; 104 105 struct rxe_req_info { 106 int wqe_index; 107 u32 psn; 108 int opcode; 109 atomic_t rd_atomic; 110 int wait_fence; 111 int need_rd_atomic; 112 int wait_psn; 113 int need_retry; 114 int wait_for_rnr_timer; 115 int noack_pkts; 116 struct rxe_task task; 117 }; 118 119 struct rxe_comp_info { 120 u32 psn; 121 int opcode; 122 int timeout; 123 int timeout_retry; 124 int started_retry; 125 u32 retry_cnt; 126 u32 rnr_retry; 127 struct rxe_task task; 128 }; 129 130 enum rdatm_res_state { 131 rdatm_res_state_next, 132 rdatm_res_state_new, 133 rdatm_res_state_replay, 134 }; 135 136 struct resp_res { 137 int type; 138 int replay; 139 u32 first_psn; 140 u32 last_psn; 141 u32 cur_psn; 142 enum rdatm_res_state state; 143 144 union { 145 struct { 146 u64 orig_val; 147 } atomic; 148 struct { 149 u64 va_org; 150 u32 rkey; 151 u32 length; 152 u64 va; 153 u32 resid; 154 } read; 155 struct { 156 u32 length; 157 u64 va; 158 u8 type; 159 u8 level; 160 } flush; 161 }; 162 }; 163 164 struct rxe_resp_info { 165 u32 msn; 166 u32 psn; 167 u32 ack_psn; 168 int opcode; 169 int drop_msg; 170 int goto_error; 171 int sent_psn_nak; 172 enum ib_wc_status status; 173 u8 aeth_syndrome; 174 175 /* Receive only */ 176 struct rxe_recv_wqe *wqe; 177 178 /* RDMA read / atomic only */ 179 u64 va; 180 u64 offset; 181 struct rxe_mr *mr; 182 u32 resid; 183 u32 rkey; 184 u32 length; 185 186 /* SRQ only */ 187 struct { 188 struct rxe_recv_wqe wqe; 189 struct ib_sge sge[RXE_MAX_SGE]; 190 } srq_wqe; 191 192 /* Responder resources. It's a circular list where the oldest 193 * resource is dropped first. 194 */ 195 struct resp_res *resources; 196 unsigned int res_head; 197 unsigned int res_tail; 198 struct resp_res *res; 199 struct rxe_task task; 200 }; 201 202 struct rxe_qp { 203 struct ib_qp ibqp; 204 struct rxe_pool_elem elem; 205 struct ib_qp_attr attr; 206 unsigned int valid; 207 unsigned int mtu; 208 bool is_user; 209 210 struct rxe_pd *pd; 211 struct rxe_srq *srq; 212 struct rxe_cq *scq; 213 struct rxe_cq *rcq; 214 215 enum ib_sig_type sq_sig_type; 216 217 struct rxe_sq sq; 218 struct rxe_rq rq; 219 220 struct socket *sk; 221 u32 dst_cookie; 222 u16 src_port; 223 224 struct rxe_av pri_av; 225 struct rxe_av alt_av; 226 227 atomic_t mcg_num; 228 229 struct sk_buff_head req_pkts; 230 struct sk_buff_head resp_pkts; 231 232 struct rxe_req_info req; 233 struct rxe_comp_info comp; 234 struct rxe_resp_info resp; 235 236 atomic_t ssn; 237 atomic_t skb_out; 238 int need_req_skb; 239 240 /* Timer for retranmitting packet when ACKs have been lost. RC 241 * only. The requester sets it when it is not already 242 * started. The responder resets it whenever an ack is 243 * received. 244 */ 245 struct timer_list retrans_timer; 246 u64 qp_timeout_jiffies; 247 248 /* Timer for handling RNR NAKS. */ 249 struct timer_list rnr_nak_timer; 250 251 spinlock_t state_lock; /* guard requester and completer */ 252 253 struct execute_work cleanup_work; 254 }; 255 256 enum rxe_mr_state { 257 RXE_MR_STATE_INVALID, 258 RXE_MR_STATE_FREE, 259 RXE_MR_STATE_VALID, 260 }; 261 262 enum rxe_mr_copy_dir { 263 RXE_TO_MR_OBJ, 264 RXE_FROM_MR_OBJ, 265 }; 266 267 enum rxe_mr_lookup_type { 268 RXE_LOOKUP_LOCAL, 269 RXE_LOOKUP_REMOTE, 270 }; 271 272 static inline int rkey_is_mw(u32 rkey) 273 { 274 u32 index = rkey >> 8; 275 276 return (index >= RXE_MIN_MW_INDEX) && (index <= RXE_MAX_MW_INDEX); 277 } 278 279 struct rxe_mr { 280 struct rxe_pool_elem elem; 281 struct ib_mr ibmr; 282 283 struct ib_umem *umem; 284 285 u32 lkey; 286 u32 rkey; 287 enum rxe_mr_state state; 288 int access; 289 atomic_t num_mw; 290 291 unsigned int page_offset; 292 unsigned int page_shift; 293 u64 page_mask; 294 295 u32 num_buf; 296 u32 nbuf; 297 298 struct xarray page_list; 299 }; 300 301 static inline unsigned int mr_page_size(struct rxe_mr *mr) 302 { 303 return mr ? mr->ibmr.page_size : PAGE_SIZE; 304 } 305 306 enum rxe_mw_state { 307 RXE_MW_STATE_INVALID = RXE_MR_STATE_INVALID, 308 RXE_MW_STATE_FREE = RXE_MR_STATE_FREE, 309 RXE_MW_STATE_VALID = RXE_MR_STATE_VALID, 310 }; 311 312 struct rxe_mw { 313 struct ib_mw ibmw; 314 struct rxe_pool_elem elem; 315 spinlock_t lock; 316 enum rxe_mw_state state; 317 struct rxe_qp *qp; /* Type 2 only */ 318 struct rxe_mr *mr; 319 u32 rkey; 320 int access; 321 u64 addr; 322 u64 length; 323 }; 324 325 struct rxe_mcg { 326 struct rb_node node; 327 struct kref ref_cnt; 328 struct rxe_dev *rxe; 329 struct list_head qp_list; 330 union ib_gid mgid; 331 atomic_t qp_num; 332 u32 qkey; 333 u16 pkey; 334 }; 335 336 struct rxe_mca { 337 struct list_head qp_list; 338 struct rxe_qp *qp; 339 }; 340 341 struct rxe_port { 342 struct ib_port_attr attr; 343 __be64 port_guid; 344 __be64 subnet_prefix; 345 spinlock_t port_lock; /* guard port */ 346 unsigned int mtu_cap; 347 /* special QPs */ 348 u32 qp_gsi_index; 349 }; 350 351 struct rxe_dev { 352 struct ib_device ib_dev; 353 struct ib_device_attr attr; 354 int max_ucontext; 355 int max_inline_data; 356 struct mutex usdev_lock; 357 358 struct net_device *ndev; 359 360 struct rxe_pool uc_pool; 361 struct rxe_pool pd_pool; 362 struct rxe_pool ah_pool; 363 struct rxe_pool srq_pool; 364 struct rxe_pool qp_pool; 365 struct rxe_pool cq_pool; 366 struct rxe_pool mr_pool; 367 struct rxe_pool mw_pool; 368 369 /* multicast support */ 370 spinlock_t mcg_lock; 371 struct rb_root mcg_tree; 372 atomic_t mcg_num; 373 atomic_t mcg_attach; 374 375 spinlock_t pending_lock; /* guard pending_mmaps */ 376 struct list_head pending_mmaps; 377 378 spinlock_t mmap_offset_lock; /* guard mmap_offset */ 379 u64 mmap_offset; 380 381 atomic64_t stats_counters[RXE_NUM_OF_COUNTERS]; 382 383 struct rxe_port port; 384 struct crypto_shash *tfm; 385 }; 386 387 static inline void rxe_counter_inc(struct rxe_dev *rxe, enum rxe_counters index) 388 { 389 atomic64_inc(&rxe->stats_counters[index]); 390 } 391 392 static inline struct rxe_dev *to_rdev(struct ib_device *dev) 393 { 394 return dev ? container_of(dev, struct rxe_dev, ib_dev) : NULL; 395 } 396 397 static inline struct rxe_ucontext *to_ruc(struct ib_ucontext *uc) 398 { 399 return uc ? container_of(uc, struct rxe_ucontext, ibuc) : NULL; 400 } 401 402 static inline struct rxe_pd *to_rpd(struct ib_pd *pd) 403 { 404 return pd ? container_of(pd, struct rxe_pd, ibpd) : NULL; 405 } 406 407 static inline struct rxe_ah *to_rah(struct ib_ah *ah) 408 { 409 return ah ? container_of(ah, struct rxe_ah, ibah) : NULL; 410 } 411 412 static inline struct rxe_srq *to_rsrq(struct ib_srq *srq) 413 { 414 return srq ? container_of(srq, struct rxe_srq, ibsrq) : NULL; 415 } 416 417 static inline struct rxe_qp *to_rqp(struct ib_qp *qp) 418 { 419 return qp ? container_of(qp, struct rxe_qp, ibqp) : NULL; 420 } 421 422 static inline struct rxe_cq *to_rcq(struct ib_cq *cq) 423 { 424 return cq ? container_of(cq, struct rxe_cq, ibcq) : NULL; 425 } 426 427 static inline struct rxe_mr *to_rmr(struct ib_mr *mr) 428 { 429 return mr ? container_of(mr, struct rxe_mr, ibmr) : NULL; 430 } 431 432 static inline struct rxe_mw *to_rmw(struct ib_mw *mw) 433 { 434 return mw ? container_of(mw, struct rxe_mw, ibmw) : NULL; 435 } 436 437 static inline struct rxe_pd *rxe_ah_pd(struct rxe_ah *ah) 438 { 439 return to_rpd(ah->ibah.pd); 440 } 441 442 static inline struct rxe_pd *mr_pd(struct rxe_mr *mr) 443 { 444 return to_rpd(mr->ibmr.pd); 445 } 446 447 static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw) 448 { 449 return to_rpd(mw->ibmw.pd); 450 } 451 452 int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name); 453 454 #endif /* RXE_VERBS_H */ 455