xref: /freebsd/sys/dev/irdma/irdma_verbs.h (revision bdcbfde31e8e9b343f113a1956384bdf30d1ed62)
1 /*-
2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3  *
4  * Copyright (c) 2015 - 2023 Intel Corporation
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenFabrics.org BSD license below:
11  *
12  *   Redistribution and use in source and binary forms, with or
13  *   without modification, are permitted provided that the following
14  *   conditions are met:
15  *
16  *    - Redistributions of source code must retain the above
17  *	copyright notice, this list of conditions and the following
18  *	disclaimer.
19  *
20  *    - Redistributions in binary form must reproduce the above
21  *	copyright notice, this list of conditions and the following
22  *	disclaimer in the documentation and/or other materials
23  *	provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #ifndef IRDMA_VERBS_H
36 #define IRDMA_VERBS_H
37 
38 #define IRDMA_MAX_SAVED_PHY_PGADDR	4
39 #define IRDMA_FLUSH_DELAY_MS		20
40 
41 #define IRDMA_PKEY_TBL_SZ		1
42 #define IRDMA_DEFAULT_PKEY		0xFFFF
43 
44 #define IRDMA_SHADOW_PGCNT		1
45 
46 #define iwdev_to_idev(iwdev)	(&(iwdev)->rf->sc_dev)
47 
48 struct irdma_ucontext {
49 	struct ib_ucontext ibucontext;
50 	struct irdma_device *iwdev;
51 #if __FreeBSD_version >= 1400026
52 	struct rdma_user_mmap_entry *db_mmap_entry;
53 #else
54 	struct irdma_user_mmap_entry *db_mmap_entry;
55 	DECLARE_HASHTABLE(mmap_hash_tbl, 6);
56 	spinlock_t mmap_tbl_lock; /* protect mmap hash table entries */
57 #endif
58 	struct list_head cq_reg_mem_list;
59 	spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */
60 	struct list_head qp_reg_mem_list;
61 	spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */
62 	/* FIXME: Move to kcompat ideally. Used < 4.20.0 for old diassasscoaite flow */
63 	struct list_head vma_list;
64 	struct mutex vma_list_mutex; /* protect the vma_list */
65 	int abi_ver;
66 	bool legacy_mode:1;
67 	bool use_raw_attrs:1;
68 };
69 
70 struct irdma_pd {
71 	struct ib_pd ibpd;
72 	struct irdma_sc_pd sc_pd;
73 	struct list_head udqp_list;
74 	spinlock_t udqp_list_lock;
75 };
76 
77 union irdma_sockaddr {
78 	struct sockaddr_in saddr_in;
79 	struct sockaddr_in6 saddr_in6;
80 };
81 
82 struct irdma_av {
83 	u8 macaddr[16];
84 	struct ib_ah_attr attrs;
85 	union irdma_sockaddr sgid_addr;
86 	union irdma_sockaddr dgid_addr;
87 	u8 net_type;
88 };
89 
90 struct irdma_ah {
91 	struct ib_ah ibah;
92 	struct irdma_sc_ah sc_ah;
93 	struct irdma_pd *pd;
94 	struct irdma_av av;
95 	u8 sgid_index;
96 	union ib_gid dgid;
97 };
98 
99 struct irdma_hmc_pble {
100 	union {
101 		u32 idx;
102 		dma_addr_t addr;
103 	};
104 };
105 
106 struct irdma_cq_mr {
107 	struct irdma_hmc_pble cq_pbl;
108 	dma_addr_t shadow;
109 	bool split;
110 };
111 
112 struct irdma_qp_mr {
113 	struct irdma_hmc_pble sq_pbl;
114 	struct irdma_hmc_pble rq_pbl;
115 	dma_addr_t shadow;
116 	struct page *sq_page;
117 };
118 
119 struct irdma_cq_buf {
120 	struct irdma_dma_mem kmem_buf;
121 	struct irdma_cq_uk cq_uk;
122 	struct irdma_hw *hw;
123 	struct list_head list;
124 	struct work_struct work;
125 };
126 
127 struct irdma_pbl {
128 	struct list_head list;
129 	union {
130 		struct irdma_qp_mr qp_mr;
131 		struct irdma_cq_mr cq_mr;
132 	};
133 
134 	bool pbl_allocated:1;
135 	bool on_list:1;
136 	u64 user_base;
137 	struct irdma_pble_alloc pble_alloc;
138 	struct irdma_mr *iwmr;
139 };
140 
141 struct irdma_mr {
142 	union {
143 		struct ib_mr ibmr;
144 		struct ib_mw ibmw;
145 	};
146 	struct ib_umem *region;
147 	int access;
148 	u8 is_hwreg;
149 	u16 type;
150 	u32 page_cnt;
151 	u64 page_size;
152 	u64 page_msk;
153 	u32 npages;
154 	u32 stag;
155 	u64 len;
156 	u64 pgaddrmem[IRDMA_MAX_SAVED_PHY_PGADDR];
157 	struct irdma_pbl iwpbl;
158 };
159 
160 struct irdma_cq {
161 	struct ib_cq ibcq;
162 	struct irdma_sc_cq sc_cq;
163 	u16 cq_head;
164 	u16 cq_size;
165 	u16 cq_num;
166 	bool user_mode;
167 	atomic_t armed;
168 	enum irdma_cmpl_notify last_notify;
169 	u32 polled_cmpls;
170 	u32 cq_mem_size;
171 	struct irdma_dma_mem kmem;
172 	struct irdma_dma_mem kmem_shadow;
173 	struct completion free_cq;
174 	atomic_t refcnt;
175 	spinlock_t lock; /* for poll cq */
176 	struct irdma_pbl *iwpbl;
177 	struct irdma_pbl *iwpbl_shadow;
178 	struct list_head resize_list;
179 	struct irdma_cq_poll_info cur_cqe;
180 	struct list_head cmpl_generated;
181 };
182 
183 struct irdma_cmpl_gen {
184 	struct list_head list;
185 	struct irdma_cq_poll_info cpi;
186 };
187 
188 struct disconn_work {
189 	struct work_struct work;
190 	struct irdma_qp *iwqp;
191 };
192 
193 struct if_notify_work {
194 	struct work_struct work;
195 	struct irdma_device *iwdev;
196 	u32 ipaddr[4];
197 	u16 vlan_id;
198 	bool ipv4:1;
199 	bool ifup:1;
200 };
201 
202 struct iw_cm_id;
203 
204 struct irdma_qp_kmode {
205 	struct irdma_dma_mem dma_mem;
206 	u32 *sig_trk_mem;
207 	struct irdma_sq_uk_wr_trk_info *sq_wrid_mem;
208 	u64 *rq_wrid_mem;
209 };
210 
211 struct irdma_qp {
212 	struct ib_qp ibqp;
213 	struct irdma_sc_qp sc_qp;
214 	struct irdma_device *iwdev;
215 	struct irdma_cq *iwscq;
216 	struct irdma_cq *iwrcq;
217 	struct irdma_pd *iwpd;
218 #if __FreeBSD_version >= 1400026
219 	struct rdma_user_mmap_entry *push_wqe_mmap_entry;
220 	struct rdma_user_mmap_entry *push_db_mmap_entry;
221 #else
222 	struct irdma_user_mmap_entry *push_wqe_mmap_entry;
223 	struct irdma_user_mmap_entry *push_db_mmap_entry;
224 #endif
225 	struct irdma_qp_host_ctx_info ctx_info;
226 	union {
227 		struct irdma_iwarp_offload_info iwarp_info;
228 		struct irdma_roce_offload_info roce_info;
229 	};
230 
231 	union {
232 		struct irdma_tcp_offload_info tcp_info;
233 		struct irdma_udp_offload_info udp_info;
234 	};
235 
236 	struct irdma_ah roce_ah;
237 	struct list_head teardown_entry;
238 	struct list_head ud_list_elem;
239 	atomic_t refcnt;
240 	struct iw_cm_id *cm_id;
241 	struct irdma_cm_node *cm_node;
242 	struct delayed_work dwork_flush;
243 	struct ib_mr *lsmm_mr;
244 	atomic_t hw_mod_qp_pend;
245 	enum ib_qp_state ibqp_state;
246 	u32 qp_mem_size;
247 	u32 last_aeq;
248 	int max_send_wr;
249 	int max_recv_wr;
250 	atomic_t close_timer_started;
251 	spinlock_t lock; /* serialize posting WRs to SQ/RQ */
252 	spinlock_t dwork_flush_lock; /* protect mod_delayed_work */
253 	struct irdma_qp_context *iwqp_context;
254 	void *pbl_vbase;
255 	dma_addr_t pbl_pbase;
256 	struct page *page;
257 	u8 iwarp_state;
258 	u16 term_sq_flush_code;
259 	u16 term_rq_flush_code;
260 	u8 hw_iwarp_state;
261 	u8 hw_tcp_state;
262 	struct irdma_qp_kmode kqp;
263 	struct irdma_dma_mem host_ctx;
264 	struct timer_list terminate_timer;
265 	struct irdma_pbl *iwpbl;
266 	struct irdma_sge *sg_list;
267 	struct irdma_dma_mem q2_ctx_mem;
268 	struct irdma_dma_mem ietf_mem;
269 	struct completion free_qp;
270 	wait_queue_head_t waitq;
271 	wait_queue_head_t mod_qp_waitq;
272 	u8 rts_ae_rcvd;
273 	bool active_conn:1;
274 	bool user_mode:1;
275 	bool hte_added:1;
276 	bool flush_issued:1;
277 	bool sig_all:1;
278 	bool pau_mode:1;
279 	bool suspend_pending:1;
280 };
281 
282 struct irdma_udqs_work {
283 	struct work_struct work;
284 	struct irdma_qp *iwqp;
285 	u8 user_prio;
286 	bool qs_change:1;
287 };
288 
289 enum irdma_mmap_flag {
290 	IRDMA_MMAP_IO_NC,
291 	IRDMA_MMAP_IO_WC,
292 };
293 
294 struct irdma_user_mmap_entry {
295 #if __FreeBSD_version >= 1400026
296 	struct rdma_user_mmap_entry rdma_entry;
297 #else
298 	struct irdma_ucontext *ucontext;
299 	struct hlist_node hlist;
300 	u64 pgoff_key; /* Used to compute offset (in bytes) returned to user libc's mmap */
301 #endif
302 	u64 bar_offset;
303 	u8 mmap_flag;
304 };
305 
306 static inline u16 irdma_fw_major_ver(struct irdma_sc_dev *dev)
307 {
308 	return (u16)FIELD_GET(IRDMA_FW_VER_MAJOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
309 }
310 
311 static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev)
312 {
313 	return (u16)FIELD_GET(IRDMA_FW_VER_MINOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
314 }
315 
316 static inline void set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
317 				   struct ib_wc *entry)
318 {
319 	struct irdma_sc_qp *qp;
320 
321 	switch (cq_poll_info->op_type) {
322 	case IRDMA_OP_TYPE_RDMA_WRITE:
323 	case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
324 		entry->opcode = IB_WC_RDMA_WRITE;
325 		break;
326 	case IRDMA_OP_TYPE_RDMA_READ_INV_STAG:
327 	case IRDMA_OP_TYPE_RDMA_READ:
328 		entry->opcode = IB_WC_RDMA_READ;
329 		break;
330 	case IRDMA_OP_TYPE_SEND_SOL:
331 	case IRDMA_OP_TYPE_SEND_SOL_INV:
332 	case IRDMA_OP_TYPE_SEND_INV:
333 	case IRDMA_OP_TYPE_SEND:
334 		entry->opcode = IB_WC_SEND;
335 		break;
336 	case IRDMA_OP_TYPE_FAST_REG_NSMR:
337 		entry->opcode = IB_WC_REG_MR;
338 		break;
339 	case IRDMA_OP_TYPE_INV_STAG:
340 		entry->opcode = IB_WC_LOCAL_INV;
341 		break;
342 	default:
343 		qp = cq_poll_info->qp_handle;
344 		irdma_dev_err(to_ibdev(qp->dev), "Invalid opcode = %d in CQE\n",
345 			  cq_poll_info->op_type);
346 		entry->status = IB_WC_GENERAL_ERR;
347 	}
348 }
349 
350 static inline void set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info,
351 				   struct ib_wc *entry, bool send_imm_support)
352 {
353 	/**
354 	 * iWARP does not support sendImm, so the presence of Imm data
355 	 * must be WriteImm.
356 	 */
357 	if (!send_imm_support) {
358 		entry->opcode = cq_poll_info->imm_valid ?
359 				IB_WC_RECV_RDMA_WITH_IMM :
360 				IB_WC_RECV;
361 		return;
362 	}
363 	switch (cq_poll_info->op_type) {
364 	case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
365 	case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
366 		entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
367 		break;
368 	default:
369 		entry->opcode = IB_WC_RECV;
370 	}
371 }
372 
373 /**
374  * irdma_mcast_mac_v4 - Get the multicast MAC for an IP address
375  * @ip_addr: IPv4 address
376  * @mac: pointer to result MAC address
377  *
378  */
379 static inline void irdma_mcast_mac_v4(u32 *ip_addr, u8 *mac)
380 {
381 	u8 *ip = (u8 *)ip_addr;
382 	unsigned char mac4[ETHER_ADDR_LEN] = {0x01, 0x00, 0x5E, ip[2] & 0x7F, ip[1],
383 					ip[0]};
384 
385 	ether_addr_copy(mac, mac4);
386 }
387 
388 /**
389  * irdma_mcast_mac_v6 - Get the multicast MAC for an IP address
390  * @ip_addr: IPv6 address
391  * @mac: pointer to result MAC address
392  *
393  */
394 static inline void irdma_mcast_mac_v6(u32 *ip_addr, u8 *mac)
395 {
396 	u8 *ip = (u8 *)ip_addr;
397 	unsigned char mac6[ETHER_ADDR_LEN] = {0x33, 0x33, ip[3], ip[2], ip[1], ip[0]};
398 
399 	ether_addr_copy(mac, mac6);
400 }
401 
402 #if __FreeBSD_version >= 1400026
403 struct rdma_user_mmap_entry*
404 irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset,
405 			     enum irdma_mmap_flag mmap_flag, u64 *mmap_offset);
406 #else
407 struct irdma_user_mmap_entry *
408 irdma_user_mmap_entry_add_hash(struct irdma_ucontext *ucontext, u64 bar_offset,
409 			       enum irdma_mmap_flag mmap_flag, u64 *mmap_offset);
410 void irdma_user_mmap_entry_del_hash(struct irdma_user_mmap_entry *entry);
411 #endif
412 int irdma_ib_register_device(struct irdma_device *iwdev);
413 void irdma_ib_unregister_device(struct irdma_device *iwdev);
414 void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event);
415 void irdma_generate_flush_completions(struct irdma_qp *iwqp);
416 void irdma_remove_cmpls_list(struct irdma_cq *iwcq);
417 int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info);
418 void irdma_sched_qp_flush_work(struct irdma_qp *iwqp);
419 void irdma_flush_worker(struct work_struct *work);
420 #endif /* IRDMA_VERBS_H */
421