xref: /freebsd/sys/dev/irdma/irdma_verbs.h (revision 5b5f7d0e77a9eee73eb5d596f43aef4e1a3674d8)
1 /*-
2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3  *
4  * Copyright (c) 2015 - 2023 Intel Corporation
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenFabrics.org BSD license below:
11  *
12  *   Redistribution and use in source and binary forms, with or
13  *   without modification, are permitted provided that the following
14  *   conditions are met:
15  *
16  *    - Redistributions of source code must retain the above
17  *	copyright notice, this list of conditions and the following
18  *	disclaimer.
19  *
20  *    - Redistributions in binary form must reproduce the above
21  *	copyright notice, this list of conditions and the following
22  *	disclaimer in the documentation and/or other materials
23  *	provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #ifndef IRDMA_VERBS_H
36 #define IRDMA_VERBS_H
37 
38 #define IRDMA_MAX_SAVED_PHY_PGADDR	4
39 #define IRDMA_FLUSH_DELAY_MS		20
40 
41 #define IRDMA_PKEY_TBL_SZ		1
42 #define IRDMA_DEFAULT_PKEY		0xFFFF
43 
44 #define IRDMA_SHADOW_PGCNT		1
45 
46 #define iwdev_to_idev(iwdev)	(&(iwdev)->rf->sc_dev)
47 
48 struct irdma_ucontext {
49 	struct ib_ucontext ibucontext;
50 	struct irdma_device *iwdev;
51 	struct rdma_user_mmap_entry *db_mmap_entry;
52 	struct list_head cq_reg_mem_list;
53 	spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */
54 	struct list_head qp_reg_mem_list;
55 	spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */
56 	/* FIXME: Move to kcompat ideally. Used < 4.20.0 for old diassasscoaite flow */
57 	struct list_head vma_list;
58 	struct mutex vma_list_mutex; /* protect the vma_list */
59 	int abi_ver;
60 	bool legacy_mode:1;
61 	bool use_raw_attrs:1;
62 };
63 
64 struct irdma_pd {
65 	struct ib_pd ibpd;
66 	struct irdma_sc_pd sc_pd;
67 	struct list_head udqp_list;
68 	spinlock_t udqp_list_lock;
69 };
70 
71 union irdma_sockaddr {
72 	struct sockaddr_in saddr_in;
73 	struct sockaddr_in6 saddr_in6;
74 };
75 
76 struct irdma_av {
77 	u8 macaddr[16];
78 	struct ib_ah_attr attrs;
79 	union irdma_sockaddr sgid_addr;
80 	union irdma_sockaddr dgid_addr;
81 	u8 net_type;
82 };
83 
84 struct irdma_ah {
85 	struct ib_ah ibah;
86 	struct irdma_sc_ah sc_ah;
87 	struct irdma_pd *pd;
88 	struct irdma_av av;
89 	u8 sgid_index;
90 	union ib_gid dgid;
91 };
92 
93 struct irdma_hmc_pble {
94 	union {
95 		u32 idx;
96 		dma_addr_t addr;
97 	};
98 };
99 
100 struct irdma_cq_mr {
101 	struct irdma_hmc_pble cq_pbl;
102 	dma_addr_t shadow;
103 	bool split;
104 };
105 
106 struct irdma_qp_mr {
107 	struct irdma_hmc_pble sq_pbl;
108 	struct irdma_hmc_pble rq_pbl;
109 	dma_addr_t shadow;
110 	struct page *sq_page;
111 };
112 
113 struct irdma_cq_buf {
114 	struct irdma_dma_mem kmem_buf;
115 	struct irdma_cq_uk cq_uk;
116 	struct irdma_hw *hw;
117 	struct list_head list;
118 	struct work_struct work;
119 };
120 
121 struct irdma_pbl {
122 	struct list_head list;
123 	union {
124 		struct irdma_qp_mr qp_mr;
125 		struct irdma_cq_mr cq_mr;
126 	};
127 
128 	bool pbl_allocated:1;
129 	bool on_list:1;
130 	u64 user_base;
131 	struct irdma_pble_alloc pble_alloc;
132 	struct irdma_mr *iwmr;
133 };
134 
135 struct irdma_mr {
136 	union {
137 		struct ib_mr ibmr;
138 		struct ib_mw ibmw;
139 	};
140 	struct ib_umem *region;
141 	int access;
142 	u8 is_hwreg;
143 	u16 type;
144 	u32 page_cnt;
145 	u64 page_size;
146 	u64 page_msk;
147 	u32 npages;
148 	u32 stag;
149 	u64 len;
150 	u64 pgaddrmem[IRDMA_MAX_SAVED_PHY_PGADDR];
151 	struct irdma_pbl iwpbl;
152 };
153 
154 struct irdma_cq {
155 	struct ib_cq ibcq;
156 	struct irdma_sc_cq sc_cq;
157 	u16 cq_head;
158 	u16 cq_size;
159 	u16 cq_num;
160 	bool user_mode;
161 	atomic_t armed;
162 	enum irdma_cmpl_notify last_notify;
163 	u32 polled_cmpls;
164 	u32 cq_mem_size;
165 	struct irdma_dma_mem kmem;
166 	struct irdma_dma_mem kmem_shadow;
167 	struct completion free_cq;
168 	atomic_t refcnt;
169 	spinlock_t lock; /* for poll cq */
170 	struct irdma_pbl *iwpbl;
171 	struct irdma_pbl *iwpbl_shadow;
172 	struct list_head resize_list;
173 	struct irdma_cq_poll_info cur_cqe;
174 	struct list_head cmpl_generated;
175 };
176 
177 struct irdma_cmpl_gen {
178 	struct list_head list;
179 	struct irdma_cq_poll_info cpi;
180 };
181 
182 struct disconn_work {
183 	struct work_struct work;
184 	struct irdma_qp *iwqp;
185 };
186 
187 struct if_notify_work {
188 	struct work_struct work;
189 	struct irdma_device *iwdev;
190 	u32 ipaddr[4];
191 	u16 vlan_id;
192 	bool ipv4:1;
193 	bool ifup:1;
194 };
195 
196 struct iw_cm_id;
197 
198 struct irdma_qp_kmode {
199 	struct irdma_dma_mem dma_mem;
200 	u32 *sig_trk_mem;
201 	struct irdma_sq_uk_wr_trk_info *sq_wrid_mem;
202 	u64 *rq_wrid_mem;
203 };
204 
205 struct irdma_qp {
206 	struct ib_qp ibqp;
207 	struct irdma_sc_qp sc_qp;
208 	struct irdma_device *iwdev;
209 	struct irdma_cq *iwscq;
210 	struct irdma_cq *iwrcq;
211 	struct irdma_pd *iwpd;
212 	struct rdma_user_mmap_entry *push_wqe_mmap_entry;
213 	struct rdma_user_mmap_entry *push_db_mmap_entry;
214 	struct irdma_qp_host_ctx_info ctx_info;
215 	union {
216 		struct irdma_iwarp_offload_info iwarp_info;
217 		struct irdma_roce_offload_info roce_info;
218 	};
219 
220 	union {
221 		struct irdma_tcp_offload_info tcp_info;
222 		struct irdma_udp_offload_info udp_info;
223 	};
224 
225 	struct irdma_ah roce_ah;
226 	struct list_head teardown_entry;
227 	struct list_head ud_list_elem;
228 	atomic_t refcnt;
229 	struct iw_cm_id *cm_id;
230 	struct irdma_cm_node *cm_node;
231 	struct delayed_work dwork_flush;
232 	struct ib_mr *lsmm_mr;
233 	atomic_t hw_mod_qp_pend;
234 	enum ib_qp_state ibqp_state;
235 	u32 qp_mem_size;
236 	u32 last_aeq;
237 	int max_send_wr;
238 	int max_recv_wr;
239 	atomic_t close_timer_started;
240 	spinlock_t lock; /* serialize posting WRs to SQ/RQ */
241 	spinlock_t dwork_flush_lock; /* protect mod_delayed_work */
242 	struct irdma_qp_context *iwqp_context;
243 	void *pbl_vbase;
244 	dma_addr_t pbl_pbase;
245 	struct page *page;
246 	u8 iwarp_state;
247 	u16 term_sq_flush_code;
248 	u16 term_rq_flush_code;
249 	u8 hw_iwarp_state;
250 	u8 hw_tcp_state;
251 	struct irdma_qp_kmode kqp;
252 	struct irdma_dma_mem host_ctx;
253 	struct timer_list terminate_timer;
254 	struct irdma_pbl *iwpbl;
255 	struct ib_sge *sg_list;
256 	struct irdma_dma_mem q2_ctx_mem;
257 	struct irdma_dma_mem ietf_mem;
258 	struct completion free_qp;
259 	wait_queue_head_t waitq;
260 	wait_queue_head_t mod_qp_waitq;
261 	u8 rts_ae_rcvd;
262 	bool active_conn:1;
263 	bool user_mode:1;
264 	bool hte_added:1;
265 	bool flush_issued:1;
266 	bool sig_all:1;
267 	bool pau_mode:1;
268 	bool suspend_pending:1;
269 };
270 
271 struct irdma_udqs_work {
272 	struct work_struct work;
273 	struct irdma_qp *iwqp;
274 	u8 user_prio;
275 	bool qs_change:1;
276 };
277 
278 enum irdma_mmap_flag {
279 	IRDMA_MMAP_IO_NC,
280 	IRDMA_MMAP_IO_WC,
281 };
282 
283 struct irdma_user_mmap_entry {
284 	struct rdma_user_mmap_entry rdma_entry;
285 	u64 bar_offset;
286 	u8 mmap_flag;
287 };
288 
irdma_fw_major_ver(struct irdma_sc_dev * dev)289 static inline u16 irdma_fw_major_ver(struct irdma_sc_dev *dev)
290 {
291 	return (u16)FIELD_GET(IRDMA_FW_VER_MAJOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
292 }
293 
irdma_fw_minor_ver(struct irdma_sc_dev * dev)294 static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev)
295 {
296 	return (u16)FIELD_GET(IRDMA_FW_VER_MINOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
297 }
298 
set_ib_wc_op_sq(struct irdma_cq_poll_info * cq_poll_info,struct ib_wc * entry)299 static inline void set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
300 				   struct ib_wc *entry)
301 {
302 	struct irdma_sc_qp *qp;
303 
304 	switch (cq_poll_info->op_type) {
305 	case IRDMA_OP_TYPE_RDMA_WRITE:
306 	case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
307 		entry->opcode = IB_WC_RDMA_WRITE;
308 		break;
309 	case IRDMA_OP_TYPE_RDMA_READ_INV_STAG:
310 	case IRDMA_OP_TYPE_RDMA_READ:
311 		entry->opcode = IB_WC_RDMA_READ;
312 		break;
313 	case IRDMA_OP_TYPE_SEND_SOL:
314 	case IRDMA_OP_TYPE_SEND_SOL_INV:
315 	case IRDMA_OP_TYPE_SEND_INV:
316 	case IRDMA_OP_TYPE_SEND:
317 		entry->opcode = IB_WC_SEND;
318 		break;
319 	case IRDMA_OP_TYPE_FAST_REG_NSMR:
320 		entry->opcode = IB_WC_REG_MR;
321 		break;
322 	case IRDMA_OP_TYPE_INV_STAG:
323 		entry->opcode = IB_WC_LOCAL_INV;
324 		break;
325 	default:
326 		qp = cq_poll_info->qp_handle;
327 		irdma_dev_err(to_ibdev(qp->dev), "Invalid opcode = %d in CQE\n",
328 			  cq_poll_info->op_type);
329 		entry->status = IB_WC_GENERAL_ERR;
330 	}
331 }
332 
set_ib_wc_op_rq(struct irdma_cq_poll_info * cq_poll_info,struct ib_wc * entry,bool send_imm_support)333 static inline void set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info,
334 				   struct ib_wc *entry, bool send_imm_support)
335 {
336 	/**
337 	 * iWARP does not support sendImm, so the presence of Imm data
338 	 * must be WriteImm.
339 	 */
340 	if (!send_imm_support) {
341 		entry->opcode = cq_poll_info->imm_valid ?
342 				IB_WC_RECV_RDMA_WITH_IMM :
343 				IB_WC_RECV;
344 		return;
345 	}
346 	switch (cq_poll_info->op_type) {
347 	case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
348 	case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
349 		entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
350 		break;
351 	default:
352 		entry->opcode = IB_WC_RECV;
353 	}
354 }
355 
356 /**
357  * irdma_mcast_mac_v4 - Get the multicast MAC for an IP address
358  * @ip_addr: IPv4 address
359  * @mac: pointer to result MAC address
360  *
361  */
irdma_mcast_mac_v4(u32 * ip_addr,u8 * mac)362 static inline void irdma_mcast_mac_v4(u32 *ip_addr, u8 *mac)
363 {
364 	u8 *ip = (u8 *)ip_addr;
365 	unsigned char mac4[ETHER_ADDR_LEN] = {0x01, 0x00, 0x5E, ip[2] & 0x7F, ip[1],
366 					ip[0]};
367 
368 	ether_addr_copy(mac, mac4);
369 }
370 
371 /**
372  * irdma_mcast_mac_v6 - Get the multicast MAC for an IP address
373  * @ip_addr: IPv6 address
374  * @mac: pointer to result MAC address
375  *
376  */
irdma_mcast_mac_v6(u32 * ip_addr,u8 * mac)377 static inline void irdma_mcast_mac_v6(u32 *ip_addr, u8 *mac)
378 {
379 	u8 *ip = (u8 *)ip_addr;
380 	unsigned char mac6[ETHER_ADDR_LEN] = {0x33, 0x33, ip[3], ip[2], ip[1], ip[0]};
381 
382 	ether_addr_copy(mac, mac6);
383 }
384 
385 struct rdma_user_mmap_entry*
386 irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset,
387 			     enum irdma_mmap_flag mmap_flag, u64 *mmap_offset);
388 int irdma_ib_register_device(struct irdma_device *iwdev);
389 void irdma_ib_unregister_device(struct irdma_device *iwdev);
390 void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event);
391 void irdma_generate_flush_completions(struct irdma_qp *iwqp);
392 void irdma_remove_cmpls_list(struct irdma_cq *iwcq);
393 int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info);
394 void irdma_sched_qp_flush_work(struct irdma_qp *iwqp);
395 void irdma_flush_worker(struct work_struct *work);
396 #endif /* IRDMA_VERBS_H */
397