xref: /linux/drivers/infiniband/hw/irdma/verbs.h (revision ed9836c040bac2823dffd4e10c107206d88ac548)
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #ifndef IRDMA_VERBS_H
4 #define IRDMA_VERBS_H
5 
6 #define IRDMA_MAX_SAVED_PHY_PGADDR	4
7 #define IRDMA_FLUSH_DELAY_MS		20
8 
9 #define IRDMA_PKEY_TBL_SZ		1
10 #define IRDMA_DEFAULT_PKEY		0xFFFF
11 #define IRDMA_SHADOW_PGCNT		1
12 
13 struct irdma_ucontext {
14 	struct ib_ucontext ibucontext;
15 	struct irdma_device *iwdev;
16 	struct rdma_user_mmap_entry *db_mmap_entry;
17 	struct list_head cq_reg_mem_list;
18 	spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */
19 	struct list_head qp_reg_mem_list;
20 	spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */
21 	struct list_head srq_reg_mem_list;
22 	spinlock_t srq_reg_mem_list_lock; /* protect SRQ memory list */
23 	int abi_ver;
24 	u8 legacy_mode : 1;
25 	u8 use_raw_attrs : 1;
26 };
27 
28 struct irdma_pd {
29 	struct ib_pd ibpd;
30 	struct irdma_sc_pd sc_pd;
31 };
32 
33 union irdma_sockaddr {
34 	struct sockaddr_in saddr_in;
35 	struct sockaddr_in6 saddr_in6;
36 };
37 
38 struct irdma_av {
39 	u8 macaddr[16];
40 	struct rdma_ah_attr attrs;
41 	union irdma_sockaddr sgid_addr;
42 	union irdma_sockaddr dgid_addr;
43 	u8 net_type;
44 };
45 
46 struct irdma_ah {
47 	struct ib_ah ibah;
48 	struct irdma_sc_ah sc_ah;
49 	struct irdma_pd *pd;
50 	struct irdma_av av;
51 	u8 sgid_index;
52 	union ib_gid dgid;
53 	struct hlist_node list;
54 	refcount_t refcnt;
55 	struct irdma_ah *parent_ah; /* AH from cached list */
56 };
57 
58 struct irdma_hmc_pble {
59 	union {
60 		u32 idx;
61 		dma_addr_t addr;
62 	};
63 };
64 
65 struct irdma_cq_mr {
66 	struct irdma_hmc_pble cq_pbl;
67 	dma_addr_t shadow;
68 	bool split;
69 };
70 
71 struct irdma_srq_mr {
72 	struct irdma_hmc_pble srq_pbl;
73 	dma_addr_t shadow;
74 };
75 
76 struct irdma_qp_mr {
77 	struct irdma_hmc_pble sq_pbl;
78 	struct irdma_hmc_pble rq_pbl;
79 	dma_addr_t shadow;
80 	dma_addr_t rq_pa;
81 	struct page *sq_page;
82 };
83 
84 struct irdma_cq_buf {
85 	struct irdma_dma_mem kmem_buf;
86 	struct irdma_cq_uk cq_uk;
87 	struct irdma_hw *hw;
88 	struct list_head list;
89 	struct work_struct work;
90 };
91 
92 struct irdma_pbl {
93 	struct list_head list;
94 	union {
95 		struct irdma_qp_mr qp_mr;
96 		struct irdma_cq_mr cq_mr;
97 		struct irdma_srq_mr srq_mr;
98 	};
99 
100 	bool pbl_allocated:1;
101 	bool on_list:1;
102 	u64 user_base;
103 	struct irdma_pble_alloc pble_alloc;
104 	struct irdma_mr *iwmr;
105 };
106 
107 struct irdma_mr {
108 	union {
109 		struct ib_mr ibmr;
110 		struct ib_mw ibmw;
111 	};
112 	struct ib_umem *region;
113 	int access;
114 	u8 is_hwreg;
115 	u16 type;
116 	u32 page_cnt;
117 	u64 page_size;
118 	u32 npages;
119 	u32 stag;
120 	u64 len;
121 	u64 pgaddrmem[IRDMA_MAX_SAVED_PHY_PGADDR];
122 	struct irdma_pbl iwpbl;
123 };
124 
125 struct irdma_srq {
126 	struct ib_srq ibsrq;
127 	struct irdma_sc_srq sc_srq __aligned(64);
128 	struct irdma_dma_mem kmem;
129 	u64 *srq_wrid_mem;
130 	refcount_t refcnt;
131 	spinlock_t lock; /* for poll srq */
132 	struct irdma_pbl *iwpbl;
133 	struct irdma_sge *sg_list;
134 	u16 srq_head;
135 	u32 srq_num;
136 	u32 max_wr;
137 	bool user_mode:1;
138 };
139 
140 struct irdma_cq {
141 	struct ib_cq ibcq;
142 	struct irdma_sc_cq sc_cq;
143 	u16 cq_head;
144 	u16 cq_size;
145 	u16 cq_num;
146 	bool user_mode;
147 	atomic_t armed;
148 	enum irdma_cmpl_notify last_notify;
149 	u32 polled_cmpls;
150 	u32 cq_mem_size;
151 	struct irdma_dma_mem kmem;
152 	struct irdma_dma_mem kmem_shadow;
153 	struct completion free_cq;
154 	refcount_t refcnt;
155 	spinlock_t lock; /* for poll cq */
156 	struct irdma_pbl *iwpbl;
157 	struct irdma_pbl *iwpbl_shadow;
158 	struct list_head resize_list;
159 	struct irdma_cq_poll_info cur_cqe;
160 	struct list_head cmpl_generated;
161 };
162 
163 struct irdma_cmpl_gen {
164 	struct list_head list;
165 	struct irdma_cq_poll_info cpi;
166 };
167 
168 struct disconn_work {
169 	struct work_struct work;
170 	struct irdma_qp *iwqp;
171 };
172 
173 struct iw_cm_id;
174 
175 struct irdma_qp_kmode {
176 	struct irdma_dma_mem dma_mem;
177 	struct irdma_sq_uk_wr_trk_info *sq_wrid_mem;
178 	u64 *rq_wrid_mem;
179 };
180 
181 struct irdma_qp {
182 	struct ib_qp ibqp;
183 	struct irdma_sc_qp sc_qp;
184 	struct irdma_device *iwdev;
185 	struct irdma_cq *iwscq;
186 	struct irdma_cq *iwrcq;
187 	struct irdma_pd *iwpd;
188 	struct rdma_user_mmap_entry *push_wqe_mmap_entry;
189 	struct rdma_user_mmap_entry *push_db_mmap_entry;
190 	struct irdma_qp_host_ctx_info ctx_info;
191 	union {
192 		struct irdma_iwarp_offload_info iwarp_info;
193 		struct irdma_roce_offload_info roce_info;
194 	};
195 
196 	union {
197 		struct irdma_tcp_offload_info tcp_info;
198 		struct irdma_udp_offload_info udp_info;
199 	};
200 
201 	struct irdma_ah roce_ah;
202 	struct list_head teardown_entry;
203 	refcount_t refcnt;
204 	struct iw_cm_id *cm_id;
205 	struct irdma_cm_node *cm_node;
206 	struct delayed_work dwork_flush;
207 	struct ib_mr *lsmm_mr;
208 	atomic_t hw_mod_qp_pend;
209 	enum ib_qp_state ibqp_state;
210 	u32 qp_mem_size;
211 	u32 last_aeq;
212 	int max_send_wr;
213 	int max_recv_wr;
214 	atomic_t close_timer_started;
215 	spinlock_t lock; /* serialize posting WRs to SQ/RQ */
216 	struct irdma_qp_context *iwqp_context;
217 	void *pbl_vbase;
218 	dma_addr_t pbl_pbase;
219 	struct page *page;
220 	u8 active_conn : 1;
221 	u8 user_mode : 1;
222 	u8 hte_added : 1;
223 	u8 flush_issued : 1;
224 	u8 sig_all : 1;
225 	u8 pau_mode : 1;
226 	u8 suspend_pending : 1;
227 	u8 rsvd : 1;
228 	u8 iwarp_state;
229 	u16 term_sq_flush_code;
230 	u16 term_rq_flush_code;
231 	u8 hw_iwarp_state;
232 	u8 hw_tcp_state;
233 	struct irdma_qp_kmode kqp;
234 	struct irdma_dma_mem host_ctx;
235 	struct timer_list terminate_timer;
236 	struct irdma_pbl *iwpbl;
237 	struct irdma_dma_mem q2_ctx_mem;
238 	struct irdma_dma_mem ietf_mem;
239 	struct completion free_qp;
240 	wait_queue_head_t waitq;
241 	wait_queue_head_t mod_qp_waitq;
242 	u8 rts_ae_rcvd;
243 };
244 
245 enum irdma_mmap_flag {
246 	IRDMA_MMAP_IO_NC,
247 	IRDMA_MMAP_IO_WC,
248 };
249 
250 struct irdma_user_mmap_entry {
251 	struct rdma_user_mmap_entry rdma_entry;
252 	u64 bar_offset;
253 	u8 mmap_flag;
254 };
255 
256 static inline u16 irdma_fw_major_ver(struct irdma_sc_dev *dev)
257 {
258 	return (u16)FIELD_GET(IRDMA_FW_VER_MAJOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
259 }
260 
261 static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev)
262 {
263 	return (u16)FIELD_GET(IRDMA_FW_VER_MINOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
264 }
265 
266 static inline void set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
267 				   struct ib_wc *entry)
268 {
269 	switch (cq_poll_info->op_type) {
270 	case IRDMA_OP_TYPE_RDMA_WRITE:
271 	case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
272 		entry->opcode = IB_WC_RDMA_WRITE;
273 		break;
274 	case IRDMA_OP_TYPE_RDMA_READ_INV_STAG:
275 	case IRDMA_OP_TYPE_RDMA_READ:
276 		entry->opcode = IB_WC_RDMA_READ;
277 		break;
278 	case IRDMA_OP_TYPE_SEND_SOL:
279 	case IRDMA_OP_TYPE_SEND_SOL_INV:
280 	case IRDMA_OP_TYPE_SEND_INV:
281 	case IRDMA_OP_TYPE_SEND:
282 		entry->opcode = IB_WC_SEND;
283 		break;
284 	case IRDMA_OP_TYPE_FAST_REG_NSMR:
285 		entry->opcode = IB_WC_REG_MR;
286 		break;
287 	case IRDMA_OP_TYPE_ATOMIC_COMPARE_AND_SWAP:
288 		entry->opcode = IB_WC_COMP_SWAP;
289 		break;
290 	case IRDMA_OP_TYPE_ATOMIC_FETCH_AND_ADD:
291 		entry->opcode = IB_WC_FETCH_ADD;
292 		break;
293 	case IRDMA_OP_TYPE_INV_STAG:
294 		entry->opcode = IB_WC_LOCAL_INV;
295 		break;
296 	default:
297 		entry->status = IB_WC_GENERAL_ERR;
298 	}
299 }
300 
301 static inline void set_ib_wc_op_rq_gen_3(struct irdma_cq_poll_info *info,
302 					 struct ib_wc *entry)
303 {
304 	switch (info->op_type) {
305 	case IRDMA_OP_TYPE_RDMA_WRITE:
306 	case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
307 		entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
308 		break;
309 	default:
310 		entry->opcode = IB_WC_RECV;
311 	}
312 }
313 
314 static inline void set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info,
315 				   struct ib_wc *entry, bool send_imm_support)
316 {
317 	/**
318 	 * iWARP does not support sendImm, so the presence of Imm data
319 	 * must be WriteImm.
320 	 */
321 	if (!send_imm_support) {
322 		entry->opcode = cq_poll_info->imm_valid ?
323 					IB_WC_RECV_RDMA_WITH_IMM :
324 					IB_WC_RECV;
325 		return;
326 	}
327 
328 	switch (cq_poll_info->op_type) {
329 	case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
330 	case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
331 		entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
332 		break;
333 	default:
334 		entry->opcode = IB_WC_RECV;
335 	}
336 }
337 
338 void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4);
339 int irdma_ib_register_device(struct irdma_device *iwdev);
340 void irdma_ib_unregister_device(struct irdma_device *iwdev);
341 void irdma_ib_dealloc_device(struct ib_device *ibdev);
342 void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event);
343 void irdma_generate_flush_completions(struct irdma_qp *iwqp);
344 void irdma_remove_cmpls_list(struct irdma_cq *iwcq);
345 int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info);
346 #endif /* IRDMA_VERBS_H */
347