xref: /freebsd/sys/dev/ice/ice_rdma.c (revision 66fd12cf4896eb08ad8e7a2627537f84ead84dd3)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /*  Copyright (c) 2023, Intel Corporation
3  *  All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions are met:
7  *
8  *   1. Redistributions of source code must retain the above copyright notice,
9  *      this list of conditions and the following disclaimer.
10  *
11  *   2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *
15  *   3. Neither the name of the Intel Corporation nor the names of its
16  *      contributors may be used to endorse or promote products derived from
17  *      this software without specific prior written permission.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*$FreeBSD$*/
32 
33 /**
34  * @file ice_rdma.c
35  * @brief RDMA client driver interface
36  *
37  * Functions to interface with the RDMA client driver, for enabling RMDA
38  * functionality for the ice driver.
39  *
40  * The RDMA client interface is based on a simple kobject interface which is
41  * defined by the rmda_if.m and irdma_di_if.m interfaces.
42  *
43  * The ice device driver provides the rmda_di_if.m interface methods, while
44  * the client RDMA driver provides the irdma_if.m interface methods as an
45  * extension ontop of the irdma_di_if kobject.
46  *
47  * The initial connection between drivers is done via the RDMA client driver
48  * calling ice_rdma_register.
49  */
50 
51 #include "ice_iflib.h"
52 #include "ice_rdma_internal.h"
53 
54 #include "irdma_if.h"
55 #include "irdma_di_if.h"
56 
57 /**
58  * @var ice_rdma
59  * @brief global RDMA driver state
60  *
61  * Contains global state the driver uses to connect to a client RDMA interface
62  * driver.
63  */
64 static struct ice_rdma_state ice_rdma;
65 
66 /*
67  * Helper function prototypes
68  */
69 static int ice_rdma_pf_attach_locked(struct ice_softc *sc);
70 static void ice_rdma_pf_detach_locked(struct ice_softc *sc);
71 static int ice_rdma_check_version(struct ice_rdma_info *info);
72 static void ice_rdma_cp_qos_info(struct ice_hw *hw,
73 				 struct ice_dcbx_cfg *dcbx_cfg,
74 				 struct ice_qos_params *qos_info);
75 
76 /*
77  * RDMA Device Interface prototypes
78  */
79 static int ice_rdma_pf_reset(struct ice_rdma_peer *peer);
80 static int ice_rdma_pf_msix_init(struct ice_rdma_peer *peer,
81 				 struct ice_rdma_msix_mapping *msix_info);
82 static int ice_rdma_qset_register_request(struct ice_rdma_peer *peer,
83 			     struct ice_rdma_qset_update *res);
84 static int ice_rdma_update_vsi_filter(struct ice_rdma_peer *peer_dev,
85 				      bool enable);
86 static void ice_rdma_request_handler(struct ice_rdma_peer *peer,
87 				     struct ice_rdma_request *req);
88 
89 
90 /**
91  * @var ice_rdma_di_methods
92  * @brief RDMA driver interface methods
93  *
94  * Kobject methods implementing the driver-side interface for the RDMA peer
95  * clients. This method table contains the operations which the client can
96  * request from the driver.
97  *
98  * The client driver will then extend this kobject class with methods that the
99  * driver can request from the client.
100  */
101 static kobj_method_t ice_rdma_di_methods[] = {
102 	KOBJMETHOD(irdma_di_reset, ice_rdma_pf_reset),
103 	KOBJMETHOD(irdma_di_msix_init, ice_rdma_pf_msix_init),
104 	KOBJMETHOD(irdma_di_qset_register_request, ice_rdma_qset_register_request),
105 	KOBJMETHOD(irdma_di_vsi_filter_update, ice_rdma_update_vsi_filter),
106 	KOBJMETHOD(irdma_di_req_handler, ice_rdma_request_handler),
107 	KOBJMETHOD_END
108 };
109 
110 /* Define ice_rdma_di class which will be extended by the iRDMA driver */
111 DEFINE_CLASS_0(ice_rdma_di, ice_rdma_di_class, ice_rdma_di_methods, sizeof(struct ice_rdma_peer));
112 
113 /**
114  * ice_rdma_pf_reset - RDMA client interface requested a reset
115  * @peer: the RDMA peer client structure
116  *
117  * Implements IRDMA_DI_RESET, called by the RDMA client driver to request
118  * a reset of an ice driver device.
119  */
120 static int
121 ice_rdma_pf_reset(struct ice_rdma_peer *peer)
122 {
123 	struct ice_softc *sc = ice_rdma_peer_to_sc(peer);
124 
125 	/*
126 	 * Request that the driver re-initialize by bringing the interface
127 	 * down and up.
128 	 */
129 	ice_request_stack_reinit(sc);
130 
131 	return (0);
132 }
133 
134 /**
135  * ice_rdma_pf_msix_init - RDMA client interface request MSI-X initialization
136  * @peer: the RDMA peer client structure
137  * @msix_info: requested MSI-X mapping
138  *
139  * Implements IRDMA_DI_MSIX_INIT, called by the RDMA client driver to
140  * initialize the MSI-X resources required for RDMA functionality.
141  */
142 static int
143 ice_rdma_pf_msix_init(struct ice_rdma_peer *peer,
144 		      struct ice_rdma_msix_mapping __unused *msix_info)
145 {
146 	struct ice_softc *sc = ice_rdma_peer_to_sc(peer);
147 
148 	MPASS(msix_info != NULL);
149 
150 	device_printf(sc->dev, "%s: iRDMA MSI-X initialization request is not yet implemented\n", __func__);
151 
152 	/* TODO: implement MSI-X initialization for RDMA */
153 	return (ENOSYS);
154 }
155 
156 /**
157  * ice_rdma_register_request - RDMA client interface request qset
158  *                             registration or unregistration
159  * @peer: the RDMA peer client structure
160  * @res: resources to be registered or unregistered
161  */
162 static int
163 ice_rdma_qset_register_request(struct ice_rdma_peer *peer, struct ice_rdma_qset_update *res)
164 {
165 	struct ice_softc *sc = ice_rdma_peer_to_sc(peer);
166 	struct ice_vsi *vsi = NULL;
167 	struct ice_dcbx_cfg *dcbx_cfg;
168 	struct ice_hw *hw = &sc->hw;
169 	enum ice_status status;
170 	int count, i, ret = 0;
171 	uint32_t *qset_teid;
172 	uint16_t *qs_handle;
173 	uint16_t max_rdmaqs[ICE_MAX_TRAFFIC_CLASS];
174 	uint16_t vsi_id;
175 	uint8_t ena_tc = 0;
176 
177 	if (!res)
178 		return -EINVAL;
179 
180 	if (res->cnt_req > ICE_MAX_TXQ_PER_TXQG)
181 		return -EINVAL;
182 
183 	switch(res->res_type) {
184 	case ICE_RDMA_QSET_ALLOC:
185 		count = res->cnt_req;
186 		vsi_id = peer->pf_vsi_num;
187 		break;
188 	case ICE_RDMA_QSET_FREE:
189 		count = res->res_allocated;
190 		vsi_id = res->qsets.vsi_id;
191 		break;
192 	default:
193 		return -EINVAL;
194 	}
195 	qset_teid = (uint32_t *)ice_calloc(hw, count, sizeof(*qset_teid));
196 	if (!qset_teid)
197 		return -ENOMEM;
198 
199 	qs_handle = (uint16_t *)ice_calloc(hw, count, sizeof(*qs_handle));
200 	if (!qs_handle) {
201 		ice_free(hw, qset_teid);
202 		return -ENOMEM;
203 	}
204 
205 	ice_for_each_traffic_class(i)
206 		max_rdmaqs[i] = 0;
207 	for (i = 0; i < sc->num_available_vsi; i++) {
208 		if (sc->all_vsi[i] &&
209 		    ice_get_hw_vsi_num(hw, sc->all_vsi[i]->idx) == vsi_id) {
210 			vsi = sc->all_vsi[i];
211 			break;
212 		}
213 	}
214 
215 	if (!vsi) {
216 		ice_debug(hw, ICE_DBG_RDMA, "RDMA QSet invalid VSI\n");
217 		ret = -EINVAL;
218 		goto out;
219 	}
220 	if (sc != vsi->sc) {
221 		ice_debug(hw, ICE_DBG_RDMA, "VSI is tied to unexpected device\n");
222 		ret = -EXDEV;
223 		goto out;
224 	}
225 
226 	for (i = 0; i < count; i++) {
227 		struct ice_rdma_qset_params *qset;
228 
229 		qset = &res->qsets;
230 		if (qset->vsi_id != peer->pf_vsi_num) {
231 			ice_debug(hw, ICE_DBG_RDMA, "RDMA QSet invalid VSI requested %d %d\n",
232 				  qset->vsi_id, peer->pf_vsi_num);
233 			ret = -EINVAL;
234 			goto out;
235 		}
236 		max_rdmaqs[qset->tc]++;
237 		qs_handle[i] = qset->qs_handle;
238 		qset_teid[i] = qset->teid;
239 	}
240 
241 	switch(res->res_type) {
242 	case ICE_RDMA_QSET_ALLOC:
243 		dcbx_cfg = &hw->port_info->qos_cfg.local_dcbx_cfg;
244 		ena_tc = ice_dcb_get_tc_map(dcbx_cfg);
245 
246 		ice_debug(hw, ICE_DBG_RDMA, "%s:%d ena_tc=%x\n", __func__, __LINE__, ena_tc);
247 		status = ice_cfg_vsi_rdma(hw->port_info, vsi->idx, ena_tc,
248 					  max_rdmaqs);
249 		if (status) {
250 			ice_debug(hw, ICE_DBG_RDMA, "Failed VSI RDMA qset config\n");
251 			ret = -EINVAL;
252 			goto out;
253 		}
254 
255 		for (i = 0; i < count; i++) {
256 			struct ice_rdma_qset_params *qset;
257 
258 			qset = &res->qsets;
259 			status = ice_ena_vsi_rdma_qset(hw->port_info, vsi->idx,
260 						       qset->tc, &qs_handle[i], 1,
261 						       &qset_teid[i]);
262 			if (status) {
263 				ice_debug(hw, ICE_DBG_RDMA, "Failed VSI RDMA qset enable\n");
264 				ret = -EINVAL;
265 				goto out;
266 			}
267 			qset->teid = qset_teid[i];
268 		}
269 		break;
270 	case ICE_RDMA_QSET_FREE:
271 		status = ice_dis_vsi_rdma_qset(hw->port_info, count, qset_teid, qs_handle);
272 		if (status)
273 			ret = -EINVAL;
274 		break;
275 	default:
276 		ret = -EINVAL;
277 		break;
278 	}
279 
280 out:
281 	ice_free(hw, qs_handle);
282 	ice_free(hw, qset_teid);
283 
284 	return ret;
285 }
286 
287 /**
288  *  ice_rdma_update_vsi_filter - configure vsi information
289  *                               when opening or closing rdma driver
290  *  @peer: the RDMA peer client structure
291  *  @enable: enable or disable the rdma filter
292  */
293 static int
294 ice_rdma_update_vsi_filter(struct ice_rdma_peer *peer,
295 			   bool enable)
296 {
297 	struct ice_softc *sc = ice_rdma_peer_to_sc(peer);
298 	struct ice_vsi *vsi;
299 	int ret;
300 
301 	vsi = &sc->pf_vsi;
302 	if (!vsi)
303 		return -EINVAL;
304 
305 	ret = ice_cfg_iwarp_fltr(&sc->hw, vsi->idx, enable);
306 	if (ret) {
307 		device_printf(sc->dev, "Failed to  %sable iWARP filtering\n",
308 				enable ? "en" : "dis");
309 	} else {
310 		if (enable)
311 			vsi->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
312 		else
313 			vsi->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
314 	}
315 
316 	return ret;
317 }
318 
319 /**
320  * ice_rdma_request_handler - handle requests incoming from RDMA driver
321  * @peer: the RDMA peer client structure
322  * @req: structure containing request
323  */
324 static void
325 ice_rdma_request_handler(struct ice_rdma_peer *peer,
326 			 struct ice_rdma_request *req)
327 {
328 	if (!req || !peer) {
329 		log(LOG_WARNING, "%s: peer or req are not valid\n", __func__);
330 		return;
331 	}
332 
333 	switch(req->type) {
334 	case ICE_RDMA_EVENT_RESET:
335 		break;
336 	case ICE_RDMA_EVENT_QSET_REGISTER:
337 		ice_rdma_qset_register_request(peer, &req->res);
338 		break;
339 	case ICE_RDMA_EVENT_VSI_FILTER_UPDATE:
340 		ice_rdma_update_vsi_filter(peer, req->enable_filter);
341 		break;
342 	default:
343 		log(LOG_WARNING, "%s: Event %d not supported\n", __func__, req->type);
344 		break;
345 	}
346 }
347 
348 /**
349  * ice_rdma_cp_qos_info - gather current QOS/DCB settings in LAN to pass
350  *                        to RDMA driver
351  * @hw: ice hw structure
352  * @dcbx_cfg: current DCB settings in ice driver
353  * @qos_info: destination of the DCB settings
354  */
355 static void
356 ice_rdma_cp_qos_info(struct ice_hw *hw, struct ice_dcbx_cfg *dcbx_cfg,
357 		     struct ice_qos_params *qos_info)
358 {
359 	u32 up2tc;
360 	u8 j;
361 	u8 num_tc = 0;
362 	u8 val_tc = 0;  /* number of TC for validation */
363 	u8 cnt_tc = 0;
364 
365 	/* setup qos_info fields with defaults */
366 	qos_info->num_apps = 0;
367 	qos_info->num_tc = 1;
368 
369 	for (j = 0; j < ICE_TC_MAX_USER_PRIORITY; j++)
370 		qos_info->up2tc[j] = 0;
371 
372 	qos_info->tc_info[0].rel_bw = 100;
373 	for (j = 1; j < IEEE_8021QAZ_MAX_TCS; j++)
374 		qos_info->tc_info[j].rel_bw = 0;
375 
376 	/* gather current values */
377 	up2tc = rd32(hw, PRTDCB_TUP2TC);
378 	qos_info->num_apps = dcbx_cfg->numapps;
379 
380 	for (j = 0; j < ICE_MAX_TRAFFIC_CLASS; j++) {
381 		num_tc |= BIT(dcbx_cfg->etscfg.prio_table[j]);
382 	}
383 	for (j = 0; j < ICE_MAX_TRAFFIC_CLASS; j++) {
384 		if (num_tc & BIT(j)) {
385 			cnt_tc++;
386 			val_tc |= BIT(j);
387 		} else {
388 			break;
389 		}
390 	}
391 	qos_info->num_tc = (val_tc == num_tc && num_tc != 0) ? cnt_tc : 1;
392 	for (j = 0; j < ICE_TC_MAX_USER_PRIORITY; j++)
393 		qos_info->up2tc[j] = (up2tc >> (j * 3)) & 0x7;
394 
395 	for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++)
396 		qos_info->tc_info[j].rel_bw = dcbx_cfg->etscfg.tcbwtable[j];
397 	for (j = 0; j < qos_info->num_apps; j++) {
398 		qos_info->apps[j].priority = dcbx_cfg->app[j].priority;
399 		qos_info->apps[j].prot_id = dcbx_cfg->app[j].prot_id;
400 		qos_info->apps[j].selector = dcbx_cfg->app[j].selector;
401 	}
402 
403 	/* Gather DSCP-to-TC mapping and QoS/PFC mode */
404 	memcpy(qos_info->dscp_map, dcbx_cfg->dscp_map, sizeof(qos_info->dscp_map));
405 	qos_info->pfc_mode = dcbx_cfg->pfc_mode;
406 }
407 
408 /**
409  * ice_rdma_check_version - Check that the provided RDMA version is compatible
410  * @info: the RDMA client information structure
411  *
412  * Verify that the client RDMA driver provided a version that is compatible
413  * with the driver interface.
414  */
415 static int
416 ice_rdma_check_version(struct ice_rdma_info *info)
417 {
418 	/* Make sure the MAJOR version matches */
419 	if (info->major_version != ICE_RDMA_MAJOR_VERSION) {
420 		log(LOG_WARNING, "%s: the iRDMA driver requested version %d.%d.%d, but this driver only supports major version %d.x.x\n",
421 		    __func__,
422 		    info->major_version, info->minor_version, info->patch_version,
423 		    ICE_RDMA_MAJOR_VERSION);
424 		return (ENOTSUP);
425 	}
426 
427 	/*
428 	 * Make sure that the MINOR version is compatible.
429 	 *
430 	 * This means that the RDMA client driver version MUST not be greater
431 	 * than the version provided by the driver, as it would indicate that
432 	 * the RDMA client expects features which are not supported by the
433 	 * main driver.
434 	 */
435 	if (info->minor_version > ICE_RDMA_MINOR_VERSION) {
436 		log(LOG_WARNING, "%s: the iRDMA driver requested version %d.%d.%d, but this driver only supports up to minor version %d.%d.x\n",
437 		__func__,
438 		info->major_version, info->minor_version, info->patch_version,
439 		ICE_RDMA_MAJOR_VERSION, ICE_RDMA_MINOR_VERSION);
440 		return (ENOTSUP);
441 	}
442 
443 	/*
444 	 * Make sure that the PATCH version is compatible.
445 	 *
446 	 * This means that the RDMA client version MUST not be greater than
447 	 * the version provided by the driver, as it may indicate that the
448 	 * RDMA client expects certain backwards compatible bug fixes which
449 	 * are not implemented by this version of the main driver.
450 	 */
451 	if ((info->minor_version == ICE_RDMA_MINOR_VERSION) &&
452 	    (info->patch_version > ICE_RDMA_PATCH_VERSION)) {
453 		log(LOG_WARNING, "%s: the iRDMA driver requested version %d.%d.%d, but this driver only supports up to patch version %d.%d.%d\n",
454 		__func__,
455 		info->major_version, info->minor_version, info->patch_version,
456 		ICE_RDMA_MAJOR_VERSION, ICE_RDMA_MINOR_VERSION, ICE_RDMA_PATCH_VERSION);
457 		return (ENOTSUP);
458 	}
459 
460 	/* Make sure that the kobject class is initialized */
461 	if (info->rdma_class == NULL) {
462 		log(LOG_WARNING, "%s: the iRDMA driver did not specify a kobject interface\n",
463 		    __func__);
464 		return (EINVAL);
465 	}
466 
467 	return (0);
468 }
469 
470 /**
471  * ice_rdma_register - Register an RDMA client driver
472  * @info: the RDMA client information structure
473  *
474  * Called by the RDMA client driver on load. Used to initialize the RDMA
475  * client driver interface and enable interop between the ice driver and the
476  * RDMA client driver.
477  *
478  * The RDMA client driver must provide the version number it expects, along
479  * with a pointer to a kobject class that extends the irdma_di_if class, and
480  * implements the irdma_if class interface.
481  */
482 int
483 ice_rdma_register(struct ice_rdma_info *info)
484 {
485 	struct ice_rdma_entry *entry;
486 	struct ice_softc *sc;
487 	int err = 0;
488 
489 	sx_xlock(&ice_rdma.mtx);
490 
491 	if (!ice_enable_irdma) {
492 		log(LOG_INFO, "%s: The iRDMA driver interface has been disabled\n", __func__);
493 		err = (ECONNREFUSED);
494 		goto return_unlock;
495 	}
496 
497 	if (ice_rdma.registered) {
498 		log(LOG_WARNING, "%s: iRDMA driver already registered\n", __func__);
499 		err = (EBUSY);
500 		goto return_unlock;
501 	}
502 
503 	/* Make sure the iRDMA version is compatible */
504 	err = ice_rdma_check_version(info);
505 	if (err)
506 		goto return_unlock;
507 
508 	log(LOG_INFO, "%s: iRDMA driver registered using version %d.%d.%d\n",
509 	    __func__, info->major_version, info->minor_version, info->patch_version);
510 
511 	ice_rdma.peer_class = info->rdma_class;
512 
513 	/*
514 	 * Initialize the kobject interface and notify the RDMA client of each
515 	 * existing PF interface.
516 	 */
517 	LIST_FOREACH(entry, &ice_rdma.peers, node) {
518 		kobj_init((kobj_t)&entry->peer, ice_rdma.peer_class);
519 		/* Gather DCB/QOS info into peer */
520 		sc = __containerof(entry, struct ice_softc, rdma_entry);
521 		memset(&entry->peer.initial_qos_info, 0, sizeof(entry->peer.initial_qos_info));
522 		ice_rdma_cp_qos_info(&sc->hw, &sc->hw.port_info->qos_cfg.local_dcbx_cfg,
523 				     &entry->peer.initial_qos_info);
524 
525 		IRDMA_PROBE(&entry->peer);
526 		if (entry->initiated)
527 			IRDMA_OPEN(&entry->peer);
528 	}
529 	ice_rdma.registered = true;
530 
531 return_unlock:
532 	sx_xunlock(&ice_rdma.mtx);
533 
534 	return (err);
535 }
536 
537 /**
538  * ice_rdma_unregister - Unregister an RDMA client driver
539  *
540  * Called by the RDMA client driver on unload. Used to de-initialize the RDMA
541  * client driver interface and shut down communication between the ice driver
542  * and the RDMA client driver.
543  */
544 int
545 ice_rdma_unregister(void)
546 {
547 	struct ice_rdma_entry *entry;
548 
549 	sx_xlock(&ice_rdma.mtx);
550 
551 	if (!ice_rdma.registered) {
552 		log(LOG_WARNING, "%s: iRDMA driver was not previously registered\n",
553 		       __func__);
554 		sx_xunlock(&ice_rdma.mtx);
555 		return (ENOENT);
556 	}
557 
558 	log(LOG_INFO, "%s: iRDMA driver unregistered\n", __func__);
559 	ice_rdma.registered = false;
560 	ice_rdma.peer_class = NULL;
561 
562 	/*
563 	 * Release the kobject interface for each of the existing PF
564 	 * interfaces. Note that we do not notify the client about removing
565 	 * each PF, as it is assumed that the client will have already cleaned
566 	 * up any associated resources when it is unregistered.
567 	 */
568 	LIST_FOREACH(entry, &ice_rdma.peers, node)
569 		kobj_delete((kobj_t)&entry->peer, NULL);
570 
571 	sx_xunlock(&ice_rdma.mtx);
572 
573 	return (0);
574 }
575 
576 /**
577  * ice_rdma_init - RDMA driver init routine
578  *
579  * Called during ice driver module initialization to setup the RDMA client
580  * interface mutex and RDMA peer structure list.
581  */
582 void
583 ice_rdma_init(void)
584 {
585 	LIST_INIT(&ice_rdma.peers);
586 	sx_init_flags(&ice_rdma.mtx, "ice rdma interface", SX_DUPOK);
587 
588 	ice_rdma.registered = false;
589 	ice_rdma.peer_class = NULL;
590 }
591 
592 /**
593  * ice_rdma_exit - RDMA driver exit routine
594  *
595  * Called during ice driver module exit to shutdown the RDMA client interface
596  * mutex.
597  */
598 void
599 ice_rdma_exit(void)
600 {
601 	MPASS(LIST_EMPTY(&ice_rdma.peers));
602 	sx_destroy(&ice_rdma.mtx);
603 }
604 
605 /**
606  * ice_rdma_pf_attach_locked - Prepare a PF for RDMA connections
607  * @sc: the ice driver softc
608  *
609  * Initialize a peer entry for this PF and add it to the RDMA interface list.
610  * Notify the client RDMA driver of a new PF device.
611  *
612  * @pre must be called while holding the ice_rdma mutex.
613  */
614 static int
615 ice_rdma_pf_attach_locked(struct ice_softc *sc)
616 {
617 	struct ice_rdma_entry *entry;
618 
619 	/* Do not attach the PF unless RDMA is supported */
620 	if (!ice_is_bit_set(sc->feat_cap, ICE_FEATURE_RDMA))
621 		return (0);
622 
623 	entry = &sc->rdma_entry;
624 	if (entry->attached) {
625 		device_printf(sc->dev, "iRDMA peer entry already exists\n");
626 		return (EEXIST);
627 	}
628 
629 	entry->attached = true;
630 	entry->peer.dev = sc->dev;
631 	entry->peer.ifp = sc->ifp;
632 	entry->peer.pf_id = sc->hw.pf_id;
633 	entry->peer.pci_mem = sc->bar0.res;
634 	entry->peer.pf_vsi_num = ice_get_hw_vsi_num(&sc->hw, sc->pf_vsi.idx);
635 	if (sc->rdma_imap && sc->rdma_imap[0] != ICE_INVALID_RES_IDX &&
636 	    sc->irdma_vectors > 0) {
637 		entry->peer.msix.base = sc->rdma_imap[0];
638 		entry->peer.msix.count = sc->irdma_vectors;
639 	}
640 
641 	/* Gather DCB/QOS info into peer */
642 	memset(&entry->peer.initial_qos_info, 0, sizeof(entry->peer.initial_qos_info));
643 	ice_rdma_cp_qos_info(&sc->hw, &sc->hw.port_info->qos_cfg.local_dcbx_cfg,
644 			     &entry->peer.initial_qos_info);
645 
646 	/*
647 	 * If the RDMA client driver has already registered, initialize the
648 	 * kobject and notify the client of a new PF
649 	 */
650 	if (ice_rdma.registered) {
651 		kobj_init((kobj_t)&entry->peer, ice_rdma.peer_class);
652 		IRDMA_PROBE(&entry->peer);
653 	}
654 
655 	LIST_INSERT_HEAD(&ice_rdma.peers, entry, node);
656 
657 	ice_set_bit(ICE_FEATURE_RDMA, sc->feat_en);
658 
659 	return (0);
660 }
661 
662 /**
663  * ice_rdma_pf_attach - Notify the RDMA client of a new PF
664  * @sc: the ice driver softc
665  *
666  * Called during PF attach to notify the RDMA client of a new PF.
667  */
668 int
669 ice_rdma_pf_attach(struct ice_softc *sc)
670 {
671 	int err;
672 
673 	sx_xlock(&ice_rdma.mtx);
674 	err = ice_rdma_pf_attach_locked(sc);
675 	sx_xunlock(&ice_rdma.mtx);
676 
677 	return (err);
678 }
679 
680 /**
681  * ice_rdma_pf_detach_locked - Notify the RDMA client on PF detach
682  * @sc: the ice driver softc
683  *
684  * Notify the RDMA peer client driver of removal of a PF, and release any
685  * RDMA-specific resources associated with that PF. Remove the PF from the
686  * list of available RDMA entries.
687  *
688  * @pre must be called while holding the ice_rdma mutex.
689  */
690 static void
691 ice_rdma_pf_detach_locked(struct ice_softc *sc)
692 {
693 	struct ice_rdma_entry *entry;
694 
695 	/* No need to detach the PF if RDMA is not enabled */
696 	if (!ice_is_bit_set(sc->feat_en, ICE_FEATURE_RDMA))
697 		return;
698 
699 	entry = &sc->rdma_entry;
700 	if (!entry->attached) {
701 		device_printf(sc->dev, "iRDMA peer entry was not attached\n");
702 		return;
703 	}
704 
705 	/*
706 	 * If the RDMA client driver is registered, notify the client that
707 	 * a PF has been removed, and release the kobject reference.
708 	 */
709 	if (ice_rdma.registered) {
710 		IRDMA_REMOVE(&entry->peer);
711 		kobj_delete((kobj_t)&entry->peer, NULL);
712 	}
713 
714 	LIST_REMOVE(entry, node);
715 	entry->attached = false;
716 
717 	ice_clear_bit(ICE_FEATURE_RDMA, sc->feat_en);
718 }
719 
720 /**
721  * ice_rdma_pf_detach - Notify the RDMA client of a PF detaching
722  * @sc: the ice driver softc
723  *
724  * Take the ice_rdma mutex and then notify the RDMA client that a PF has been
725  * removed.
726  */
727 void
728 ice_rdma_pf_detach(struct ice_softc *sc)
729 {
730 	sx_xlock(&ice_rdma.mtx);
731 	ice_rdma_pf_detach_locked(sc);
732 	sx_xunlock(&ice_rdma.mtx);
733 }
734 
735 /**
736  * ice_rdma_pf_init - Notify the RDMA client that a PF has initialized
737  * @sc: the ice driver softc
738  *
739  * Called by the ice driver when a PF has been initialized. Notifies the RDMA
740  * client that a PF is up and ready to operate.
741  */
742 int
743 ice_rdma_pf_init(struct ice_softc *sc)
744 {
745 	struct ice_rdma_peer *peer = &sc->rdma_entry.peer;
746 
747 	sx_xlock(&ice_rdma.mtx);
748 
749 	/* Update the MTU */
750 	peer->mtu = if_getmtu(sc->ifp);
751 	sc->rdma_entry.initiated = true;
752 
753 	if (sc->rdma_entry.attached && ice_rdma.registered) {
754 		sx_xunlock(&ice_rdma.mtx);
755 		return IRDMA_OPEN(peer);
756 	}
757 
758 	sx_xunlock(&ice_rdma.mtx);
759 
760 	return (0);
761 }
762 
763 /**
764  * ice_rdma_pf_stop - Notify the RDMA client of a stopped PF device
765  * @sc: the ice driver softc
766  *
767  * Called by the ice driver when a PF is stopped. Notifies the RDMA client
768  * driver that the PF has stopped and is not ready to operate.
769  */
770 int
771 ice_rdma_pf_stop(struct ice_softc *sc)
772 {
773 	sx_xlock(&ice_rdma.mtx);
774 
775 	sc->rdma_entry.initiated = false;
776 	if (sc->rdma_entry.attached && ice_rdma.registered) {
777 		sx_xunlock(&ice_rdma.mtx);
778 		return IRDMA_CLOSE(&sc->rdma_entry.peer);
779 	}
780 
781 	sx_xunlock(&ice_rdma.mtx);
782 
783 	return (0);
784 }
785 
786 /**
787  * ice_rdma_link_change - Notify RDMA client of a change in link status
788  * @sc: the ice driver softc
789  * @linkstate: the link status
790  * @baudrate: the link rate in bits per second
791  *
792  * Notify the RDMA client of a link status change, by sending it the new link
793  * state and baudrate.
794  *
795  * The link state is represented the same was as in the ifnet structure. It
796  * should be LINK_STATE_UNKNOWN, LINK_STATE_DOWN, or LINK_STATE_UP.
797  */
798 void
799 ice_rdma_link_change(struct ice_softc *sc, int linkstate, uint64_t baudrate)
800 {
801 	struct ice_rdma_peer *peer = &sc->rdma_entry.peer;
802 	struct ice_rdma_event event;
803 
804 	memset(&event, 0, sizeof(struct ice_rdma_event));
805 	event.type = ICE_RDMA_EVENT_LINK_CHANGE;
806 	event.linkstate = linkstate;
807 	event.baudrate = baudrate;
808 
809 	sx_xlock(&ice_rdma.mtx);
810 
811 	if (sc->rdma_entry.attached && ice_rdma.registered)
812 		IRDMA_EVENT_HANDLER(peer, &event);
813 
814 	sx_xunlock(&ice_rdma.mtx);
815 }
816 
817 /**
818  *  ice_rdma_notify_dcb_qos_change - notify RDMA driver to pause traffic
819  *  @sc: the ice driver softc
820  *
821  *  Notify the RDMA driver that QOS/DCB settings are about to change.
822  *  Once the function return, all the QPs should be suspended.
823  */
824 void
825 ice_rdma_notify_dcb_qos_change(struct ice_softc *sc)
826 {
827 	struct ice_rdma_peer *peer = &sc->rdma_entry.peer;
828 	struct ice_rdma_event event;
829 
830 	memset(&event, 0, sizeof(struct ice_rdma_event));
831 	event.type = ICE_RDMA_EVENT_TC_CHANGE;
832 	/* pre-event */
833 	event.prep = true;
834 
835 	sx_xlock(&ice_rdma.mtx);
836 	if (sc->rdma_entry.attached && ice_rdma.registered)
837 		IRDMA_EVENT_HANDLER(peer, &event);
838 	sx_xunlock(&ice_rdma.mtx);
839 }
840 
841 /**
842  *  ice_rdma_dcb_qos_update - pass the changed dcb settings to RDMA driver
843  *  @sc: the ice driver softc
844  *  @pi: the port info structure
845  *
846  *  Pass the changed DCB settings to RDMA traffic. This function should be
847  *  called only after ice_rdma_notify_dcb_qos_change has been called and
848  *  returned before. After the function returns, all the RDMA traffic
849  *  should be resumed.
850  */
851 void
852 ice_rdma_dcb_qos_update(struct ice_softc *sc, struct ice_port_info *pi)
853 {
854 	struct ice_rdma_peer *peer = &sc->rdma_entry.peer;
855 	struct ice_rdma_event event;
856 
857 	memset(&event, 0, sizeof(struct ice_rdma_event));
858 	event.type = ICE_RDMA_EVENT_TC_CHANGE;
859 	/* post-event */
860 	event.prep = false;
861 
862 	/* gather current configuration */
863 	ice_rdma_cp_qos_info(&sc->hw, &pi->qos_cfg.local_dcbx_cfg, &event.port_qos);
864 	sx_xlock(&ice_rdma.mtx);
865 	if (sc->rdma_entry.attached && ice_rdma.registered)
866 		IRDMA_EVENT_HANDLER(peer, &event);
867 	sx_xunlock(&ice_rdma.mtx);
868 }
869