xref: /freebsd/contrib/ofed/libbnxtre/main.c (revision 9207f9d206a4017001f01ca27d3d25a26c268a95)
1 /*
2  * Copyright (c) 2024, Broadcom. All rights reserved.  The term
3  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in
13  *    the documentation and/or other materials provided with the
14  *    distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
18  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
23  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
26  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  */
29 
30 #include <sys/types.h>
31 #include <sys/mman.h>
32 #include <sys/stat.h>
33 
34 #include <errno.h>
35 #include <fcntl.h>
36 #include <pthread.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <unistd.h>
41 
42 #include "abi.h"
43 #include "main.h"
44 #include "verbs.h"
45 
46 #define PCI_VENDOR_ID_BROADCOM		0x14E4
47 
48 BNXT_RE_DEFINE_CNA_TABLE(cna_table) = {
49 	CNA(BROADCOM, 0x1605),  /* BCM57454 Stratus NPAR */
50 	CNA(BROADCOM, 0x1606),	/* BCM57454 Stratus VF */
51 	CNA(BROADCOM, 0x1614),	/* BCM57454 Stratus */
52 	CNA(BROADCOM, 0x16C0),	/* BCM57417 NPAR */
53 	CNA(BROADCOM, 0x16C1),  /* BMC57414 VF */
54 	CNA(BROADCOM, 0x16CE),	/* BMC57311 */
55 	CNA(BROADCOM, 0x16CF),	/* BMC57312 */
56 	CNA(BROADCOM, 0x16D6),	/* BMC57412*/
57 	CNA(BROADCOM, 0x16D7),	/* BMC57414 */
58 	CNA(BROADCOM, 0x16D8),	/* BMC57416 Cu */
59 	CNA(BROADCOM, 0x16D9),	/* BMC57417 Cu */
60 	CNA(BROADCOM, 0x16DF),	/* BMC57314 */
61 	CNA(BROADCOM, 0x16E2),	/* BMC57417 */
62 	CNA(BROADCOM, 0x16E3),	/* BMC57416 */
63 	CNA(BROADCOM, 0x16E5),	/* BMC57314 VF */
64 	CNA(BROADCOM, 0x16EB),	/* BCM57412 NPAR */
65 	CNA(BROADCOM, 0x16ED),	/* BCM57414 NPAR */
66 	CNA(BROADCOM, 0x16EF),	/* BCM57416 NPAR */
67 	CNA(BROADCOM, 0x16F0),  /* BCM58730 */
68 	CNA(BROADCOM, 0x16F1),	/* BCM57452 Stratus Mezz */
69 	CNA(BROADCOM, 0x1750),	/* Chip num 57500 */
70 	CNA(BROADCOM, 0x1751),  /* BCM57504 Gen P5 */
71 	CNA(BROADCOM, 0x1752),  /* BCM57502 Gen P5 */
72 	CNA(BROADCOM, 0x1760),  /* BCM57608 Thor 2*/
73 	CNA(BROADCOM, 0xD82E),  /* BCM5760x TH2 VF */
74 	CNA(BROADCOM, 0x1803),  /* BCM57508 Gen P5 NPAR */
75 	CNA(BROADCOM, 0x1804),  /* BCM57504 Gen P5 NPAR */
76 	CNA(BROADCOM, 0x1805),  /* BCM57502 Gen P5 NPAR */
77 	CNA(BROADCOM, 0x1807),  /* BCM5750x Gen P5 VF */
78 	CNA(BROADCOM, 0x1809),  /* BCM5750x Gen P5 VF HV */
79 	CNA(BROADCOM, 0xD800),  /* BCM880xx SR VF */
80 	CNA(BROADCOM, 0xD802),  /* BCM58802 SR */
81 	CNA(BROADCOM, 0xD804),  /* BCM58804 SR */
82 	CNA(BROADCOM, 0xD818)   /* BCM58818 Gen P5 SR2 */
83 };
84 
85 static struct ibv_context_ops bnxt_re_cntx_ops = {
86 	.query_device  = bnxt_re_query_device,
87 	.query_port    = bnxt_re_query_port,
88 	.alloc_pd      = bnxt_re_alloc_pd,
89 	.dealloc_pd    = bnxt_re_free_pd,
90 	.reg_mr        = bnxt_re_reg_mr,
91 	.dereg_mr      = bnxt_re_dereg_mr,
92 	.create_cq     = bnxt_re_create_cq,
93 	.poll_cq       = bnxt_re_poll_cq,
94 	.req_notify_cq = bnxt_re_arm_cq,
95 	.cq_event      = bnxt_re_cq_event,
96 	.resize_cq     = bnxt_re_resize_cq,
97 	.destroy_cq    = bnxt_re_destroy_cq,
98 	.create_srq    = bnxt_re_create_srq,
99 	.modify_srq    = bnxt_re_modify_srq,
100 	.query_srq     = bnxt_re_query_srq,
101 	.destroy_srq   = bnxt_re_destroy_srq,
102 	.post_srq_recv = bnxt_re_post_srq_recv,
103 	.create_qp     = bnxt_re_create_qp,
104 	.query_qp      = bnxt_re_query_qp,
105 	.modify_qp     = bnxt_re_modify_qp,
106 	.destroy_qp    = bnxt_re_destroy_qp,
107 	.post_send     = bnxt_re_post_send,
108 	.post_recv     = bnxt_re_post_recv,
109 	.async_event   = bnxt_re_async_event,
110 	.create_ah     = bnxt_re_create_ah,
111 	.destroy_ah    = bnxt_re_destroy_ah
112 };
113 
114 bool _is_chip_gen_p5(struct bnxt_re_chip_ctx *cctx)
115 {
116 	return (cctx->chip_num == CHIP_NUM_57508 ||
117 		cctx->chip_num == CHIP_NUM_57504 ||
118 		cctx->chip_num == CHIP_NUM_57502);
119 }
120 
121 bool _is_chip_a0(struct bnxt_re_chip_ctx *cctx)
122 {
123 	return !cctx->chip_rev;
124 }
125 
126 bool _is_chip_thor2(struct bnxt_re_chip_ctx *cctx)
127 {
128 	return (cctx->chip_num == CHIP_NUM_58818 ||
129 		cctx->chip_num == CHIP_NUM_57608);
130 }
131 
132 bool _is_chip_gen_p5_thor2(struct bnxt_re_chip_ctx *cctx)
133 {
134 	return(_is_chip_gen_p5(cctx) || _is_chip_thor2(cctx));
135 }
136 
137 bool _is_db_drop_recovery_enable(struct bnxt_re_context *cntx)
138 {
139 	return cntx->comp_mask & BNXT_RE_COMP_MASK_UCNTX_DBR_RECOVERY_ENABLED;
140 }
141 
142 /* Determine the env variable */
143 static int single_threaded_app(void)
144 {
145 	char *env;
146 
147 	env = getenv("BNXT_SINGLE_THREADED");
148 	if (env)
149 		return strcmp(env, "1") ? 0 : 1;
150 
151 	return 0;
152 }
153 
154 static int enable_dynamic_debug(void)
155 {
156 	char *env;
157 
158 	env = getenv("BNXT_DYN_DBG");
159 	if (env)
160 		return strcmp(env, "1") ? 0 : 1;
161 
162 	return 0;
163 }
164 
165 /* Static Context Init functions */
166 static int _bnxt_re_init_context(struct bnxt_re_dev *dev,
167 				 struct bnxt_re_context *cntx,
168 				 struct bnxt_re_cntx_resp *resp, int cmd_fd)
169 {
170 	bnxt_single_threaded = 0;
171 	cntx->cctx = malloc(sizeof(struct bnxt_re_chip_ctx));
172 	if (!cntx->cctx)
173 		goto failed;
174 
175 	if (BNXT_RE_ABI_VERSION >= 4) {
176 		cntx->cctx->chip_num = resp->chip_id0 & 0xFFFF;
177 		cntx->cctx->chip_rev = (resp->chip_id0 >>
178 					BNXT_RE_CHIP_ID0_CHIP_REV_SFT) & 0xFF;
179 		cntx->cctx->chip_metal = (resp->chip_id0 >>
180 					  BNXT_RE_CHIP_ID0_CHIP_MET_SFT) &
181 					  0xFF;
182 		cntx->cctx->chip_is_gen_p5_thor2 = _is_chip_gen_p5_thor2(cntx->cctx);
183 	}
184 	if (BNXT_RE_ABI_VERSION != 4) {
185 		cntx->dev_id = resp->dev_id;
186 		cntx->max_qp = resp->max_qp;
187 	}
188 
189 	if (BNXT_RE_ABI_VERSION > 5)
190 		cntx->modes = resp->modes;
191 	cntx->comp_mask = resp->comp_mask;
192 	dev->pg_size = resp->pg_size;
193 	dev->cqe_size = resp->cqe_size;
194 	dev->max_cq_depth = resp->max_cqd;
195 
196 	/* mmap shared page. */
197 	cntx->shpg = mmap(NULL, dev->pg_size, PROT_READ | PROT_WRITE,
198 			  MAP_SHARED, cmd_fd, 0);
199 	if (cntx->shpg == MAP_FAILED) {
200 		cntx->shpg = NULL;
201 		goto free;
202 	}
203 
204 	if (cntx->comp_mask & BNXT_RE_COMP_MASK_UCNTX_DBR_PACING_ENABLED) {
205 		cntx->dbr_page = mmap(NULL, dev->pg_size, PROT_READ,
206 				      MAP_SHARED, cmd_fd, BNXT_RE_DBR_PAGE);
207 		if (cntx->dbr_page == MAP_FAILED) {
208 			munmap(cntx->shpg, dev->pg_size);
209 			cntx->shpg = NULL;
210 			cntx->dbr_page = NULL;
211 			goto free;
212 		}
213 	}
214 
215 	/* check for ENV for single thread */
216 	bnxt_single_threaded = single_threaded_app();
217 	if (bnxt_single_threaded)
218 		fprintf(stderr, DEV " Running in Single threaded mode\n");
219 	bnxt_dyn_debug = enable_dynamic_debug();
220 	pthread_mutex_init(&cntx->shlock, NULL);
221 
222 	return 0;
223 
224 free:
225 	free(cntx->cctx);
226 failed:
227 	fprintf(stderr, DEV "Failed to initialize context for device\n");
228 	return errno;
229 }
230 
231 static void _bnxt_re_uninit_context(struct bnxt_re_dev *dev,
232 				    struct bnxt_re_context *cntx)
233 {
234 	int ret;
235 
236 	if (cntx->comp_mask & BNXT_RE_COMP_MASK_UCNTX_DBR_PACING_ENABLED)
237 		munmap(cntx->dbr_page, dev->pg_size);
238 	/* Unmap if anything device specific was
239 	 * mapped in init_context.
240 	 */
241 	pthread_mutex_destroy(&cntx->shlock);
242 	if (cntx->shpg)
243 		munmap(cntx->shpg, dev->pg_size);
244 
245 	/* Un-map DPI only for the first PD that was
246 	 * allocated in this context.
247 	 */
248 	if (cntx->udpi.wcdbpg && cntx->udpi.wcdbpg != MAP_FAILED) {
249 		munmap(cntx->udpi.wcdbpg, dev->pg_size);
250 		cntx->udpi.wcdbpg = NULL;
251 		bnxt_re_destroy_pbuf_list(cntx);
252 	}
253 
254 	if (cntx->udpi.dbpage && cntx->udpi.dbpage != MAP_FAILED) {
255 		munmap(cntx->udpi.dbpage, dev->pg_size);
256 		cntx->udpi.dbpage = NULL;
257 	}
258 	if (_is_db_drop_recovery_enable(cntx)) {
259 		if (cntx->dbr_cq) {
260 			ret = pthread_cancel(cntx->dbr_thread);
261 			if (ret)
262 				fprintf(stderr, DEV "pthread_cancel error %d\n", ret);
263 
264 			if (cntx->db_recovery_page)
265 				munmap(cntx->db_recovery_page, dev->pg_size);
266 			ret = ibv_destroy_cq(cntx->dbr_cq);
267 			if (ret)
268 				fprintf(stderr, DEV "ibv_destroy_cq error %d\n", ret);
269 		}
270 
271 		if (cntx->dbr_ev_chan) {
272 			ret = ibv_destroy_comp_channel(cntx->dbr_ev_chan);
273 			if (ret)
274 				fprintf(stderr,
275 					DEV "ibv_destroy_comp_channel error\n");
276 		}
277 		pthread_spin_destroy(&cntx->qp_dbr_res.lock);
278 		pthread_spin_destroy(&cntx->cq_dbr_res.lock);
279 		pthread_spin_destroy(&cntx->srq_dbr_res.lock);
280 	}
281 	free(cntx->cctx);
282 }
283 
284 /* Context Init functions */
285 int bnxt_re_init_context(struct verbs_device *vdev, struct ibv_context *ibvctx,
286 			 int cmd_fd)
287 {
288 	struct bnxt_re_cntx_resp resp = {};
289 	struct bnxt_re_cntx_req req = {};
290 	struct bnxt_re_context *cntx;
291 	struct bnxt_re_dev *rdev;
292 	int ret = 0;
293 
294 	rdev = to_bnxt_re_dev(&vdev->device);
295 	cntx = to_bnxt_re_context(ibvctx);
296 	ibvctx->cmd_fd = cmd_fd;
297 
298 	req.comp_mask |= BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT;
299 	req.comp_mask |= BNXT_RE_COMP_MASK_REQ_UCNTX_RSVD_WQE;
300 	ret = ibv_cmd_get_context(ibvctx, &req.cmd, sizeof(req),
301 				  &resp.resp, sizeof(resp));
302 
303 	if (ret) {
304 		fprintf(stderr, DEV "Failed to get context for device, ret = 0x%x, errno %d\n", ret, errno);
305 		return errno;
306 	}
307 
308 	ret = _bnxt_re_init_context(rdev, cntx, &resp, cmd_fd);
309 	if (!ret)
310 		ibvctx->ops = bnxt_re_cntx_ops;
311 
312 	cntx->rdev = rdev;
313 	ret = bnxt_re_query_device_compat(&cntx->ibvctx, &rdev->devattr);
314 
315 	return ret;
316 }
317 
318 void bnxt_re_uninit_context(struct verbs_device *vdev,
319 			    struct ibv_context *ibvctx)
320 {
321 	struct bnxt_re_context *cntx;
322 	struct bnxt_re_dev *rdev;
323 
324 	cntx = to_bnxt_re_context(ibvctx);
325 	rdev = cntx->rdev;
326 	_bnxt_re_uninit_context(rdev, cntx);
327 }
328 
329 static struct verbs_device_ops bnxt_re_dev_ops = {
330 	.init_context = bnxt_re_init_context,
331 	.uninit_context = bnxt_re_uninit_context,
332 };
333 
334 static struct verbs_device *bnxt_re_driver_init(const char *uverbs_sys_path,
335 						int abi_version)
336 {
337 	char value[10];
338 	struct bnxt_re_dev *dev;
339 	unsigned vendor, device;
340 	int i;
341 
342 	if (ibv_read_sysfs_file(uverbs_sys_path, "device/vendor",
343 				value, sizeof(value)) < 0)
344 		return NULL;
345 	vendor = strtol(value, NULL, 16);
346 
347 	if (ibv_read_sysfs_file(uverbs_sys_path, "device/device",
348 				value, sizeof(value)) < 0)
349 		return NULL;
350 	device = strtol(value, NULL, 16);
351 
352 	for (i = 0; i < sizeof(cna_table) / sizeof(cna_table[0]); ++i)
353 		if (vendor == cna_table[i].vendor &&
354 		    device == cna_table[i].device)
355 			goto found;
356 	return NULL;
357 found:
358 	if (abi_version != BNXT_RE_ABI_VERSION) {
359 		fprintf(stderr, DEV "FATAL: Max supported ABI of %s is %d "
360 			"check for the latest version of kernel driver and"
361 			"user library\n", uverbs_sys_path, abi_version);
362 		return NULL;
363 	}
364 
365 	dev = calloc(1, sizeof(*dev));
366 	if (!dev) {
367 		fprintf(stderr, DEV "Failed to allocate device for %s\n",
368 			uverbs_sys_path);
369 		return NULL;
370 	}
371 
372 	dev->vdev.sz = sizeof(*dev);
373 	dev->vdev.size_of_context =
374 		sizeof(struct bnxt_re_context) - sizeof(struct ibv_context);
375 	dev->vdev.ops = &bnxt_re_dev_ops;
376 
377 	return &dev->vdev;
378 }
379 
380 static __attribute__((constructor)) void bnxt_re_register_driver(void)
381 {
382 	verbs_register_driver("bnxtre", bnxt_re_driver_init);
383 }
384