xref: /freebsd/sys/dev/cxgbe/iw_cxgbe/device.c (revision f0cfa1b168014f56c02b83e5f28412cc5f78d117)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *	  copyright notice, this list of conditions and the following
18  *	  disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *	  copyright notice, this list of conditions and the following
22  *	  disclaimer in the documentation and/or other materials
23  *	  provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "opt_inet.h"
38 
39 #include <sys/ktr.h>
40 
41 #include <linux/module.h>
42 #include <linux/moduleparam.h>
43 
44 #include <rdma/ib_verbs.h>
45 #include <linux/idr.h>
46 
47 #ifdef TCP_OFFLOAD
48 #include "iw_cxgbe.h"
49 
50 void
51 c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
52     struct c4iw_dev_ucontext *uctx)
53 {
54 	struct list_head *pos, *nxt;
55 	struct c4iw_qid_list *entry;
56 
57 	mutex_lock(&uctx->lock);
58 	list_for_each_safe(pos, nxt, &uctx->qpids) {
59 		entry = list_entry(pos, struct c4iw_qid_list, entry);
60 		list_del_init(&entry->entry);
61 		if (!(entry->qid & rdev->qpmask)) {
62 			c4iw_put_resource(&rdev->resource.qid_table,
63 					  entry->qid);
64 			mutex_lock(&rdev->stats.lock);
65 			rdev->stats.qid.cur -= rdev->qpmask + 1;
66 			mutex_unlock(&rdev->stats.lock);
67 		}
68 		kfree(entry);
69 	}
70 
71 	list_for_each_safe(pos, nxt, &uctx->cqids) {
72 		entry = list_entry(pos, struct c4iw_qid_list, entry);
73 		list_del_init(&entry->entry);
74 		kfree(entry);
75 	}
76 	mutex_unlock(&uctx->lock);
77 }
78 
79 void
80 c4iw_init_dev_ucontext(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
81 {
82 
83 	INIT_LIST_HEAD(&uctx->qpids);
84 	INIT_LIST_HEAD(&uctx->cqids);
85 	mutex_init(&uctx->lock);
86 }
87 
88 static int
89 c4iw_rdev_open(struct c4iw_rdev *rdev)
90 {
91 	struct adapter *sc = rdev->adap;
92 	struct sge_params *sp = &sc->params.sge;
93 	int rc;
94 	unsigned short ucq_density = 1 << sp->iq_s_qpp; /* # of user CQs/page */
95 	unsigned short udb_density = 1 << sp->eq_s_qpp; /* # of user DB/page */
96 
97 
98 	c4iw_init_dev_ucontext(rdev, &rdev->uctx);
99 
100 	/*
101 	 * This implementation assumes udb_density == ucq_density!  Eventually
102 	 * we might need to support this but for now fail the open. Also the
103 	 * cqid and qpid range must match for now.
104 	 */
105 	if (udb_density != ucq_density) {
106 		device_printf(sc->dev, "unsupported udb/ucq densities %u/%u\n",
107 		    udb_density, ucq_density);
108 		rc = -EINVAL;
109 		goto err1;
110 	}
111 	if (sc->vres.qp.start != sc->vres.cq.start ||
112 	    sc->vres.qp.size != sc->vres.cq.size) {
113 		device_printf(sc->dev, "%s: unsupported qp and cq id ranges "
114 			"qp start %u size %u cq start %u size %u\n", __func__,
115 			sc->vres.qp.start, sc->vres.qp.size, sc->vres.cq.start,
116 			sc->vres.cq.size);
117 		rc = -EINVAL;
118 		goto err1;
119 	}
120 
121 	rdev->qpshift = PAGE_SHIFT - sp->eq_s_qpp;
122 	rdev->qpmask = udb_density - 1;
123 	rdev->cqshift = PAGE_SHIFT - sp->iq_s_qpp;
124 	rdev->cqmask = ucq_density - 1;
125 	CTR5(KTR_IW_CXGBE, "%s dev %s stag start 0x%0x size 0x%0x num stags %d",
126 		__func__, device_get_nameunit(sc->dev), sc->vres.stag.start,
127 		sc->vres.stag.size, c4iw_num_stags(rdev));
128 	CTR5(KTR_IW_CXGBE, "%s pbl start 0x%0x size 0x%0x"
129 			" rq start 0x%0x size 0x%0x", __func__,
130 			sc->vres.pbl.start, sc->vres.pbl.size,
131 			sc->vres.rq.start, sc->vres.rq.size);
132 	CTR5(KTR_IW_CXGBE, "%s:qp qid start %u size %u cq qid start %u size %u",
133 			 __func__, sc->vres.qp.start, sc->vres.qp.size,
134 			 sc->vres.cq.start, sc->vres.cq.size);
135 	/*TODO
136 	CTR5(KTR_IW_CXGBE, "%s udb %pR db_reg %p gts_reg %p"
137 			"qpmask 0x%x cqmask 0x%x", __func__,
138 			db_reg,gts_reg,rdev->qpmask, rdev->cqmask);
139 			*/
140 
141 
142 
143 	if (c4iw_num_stags(rdev) == 0) {
144 		rc = -EINVAL;
145 		goto err1;
146 	}
147 
148 	rdev->stats.pd.total = T4_MAX_NUM_PD;
149 	rdev->stats.stag.total = sc->vres.stag.size;
150 	rdev->stats.pbl.total = sc->vres.pbl.size;
151 	rdev->stats.rqt.total = sc->vres.rq.size;
152 	rdev->stats.qid.total = sc->vres.qp.size;
153 
154 	rc = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
155 	if (rc) {
156 		device_printf(sc->dev, "error %d initializing resources\n", rc);
157 		goto err1;
158 	}
159 	rc = c4iw_pblpool_create(rdev);
160 	if (rc) {
161 		device_printf(sc->dev, "error %d initializing pbl pool\n", rc);
162 		goto err2;
163 	}
164 	rc = c4iw_rqtpool_create(rdev);
165 	if (rc) {
166 		device_printf(sc->dev, "error %d initializing rqt pool\n", rc);
167 		goto err3;
168 	}
169 	rdev->status_page = (struct t4_dev_status_page *)
170 				__get_free_page(GFP_KERNEL);
171 	if (!rdev->status_page) {
172 		rc = -ENOMEM;
173 		goto err4;
174 	}
175 	rdev->status_page->qp_start = sc->vres.qp.start;
176 	rdev->status_page->qp_size = sc->vres.qp.size;
177 	rdev->status_page->cq_start = sc->vres.cq.start;
178 	rdev->status_page->cq_size = sc->vres.cq.size;
179 
180 	/* T5 and above devices don't need Doorbell recovery logic,
181 	 * so db_off is always set to '0'.
182 	 */
183 	rdev->status_page->db_off = 0;
184 
185 	rdev->status_page->wc_supported = rdev->adap->iwt.wc_en;
186 
187 	rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
188 	if (!rdev->free_workq) {
189 		rc = -ENOMEM;
190 		goto err5;
191 	}
192 	return (0);
193 err5:
194 	free_page((unsigned long)rdev->status_page);
195 err4:
196 	c4iw_rqtpool_destroy(rdev);
197 err3:
198 	c4iw_pblpool_destroy(rdev);
199 err2:
200 	c4iw_destroy_resource(&rdev->resource);
201 err1:
202 	return (rc);
203 }
204 
205 static void c4iw_rdev_close(struct c4iw_rdev *rdev)
206 {
207 	free_page((unsigned long)rdev->status_page);
208 	c4iw_pblpool_destroy(rdev);
209 	c4iw_rqtpool_destroy(rdev);
210 	c4iw_destroy_resource(&rdev->resource);
211 }
212 
213 static void
214 c4iw_dealloc(struct c4iw_dev *iwsc)
215 {
216 
217 	c4iw_rdev_close(&iwsc->rdev);
218 	idr_destroy(&iwsc->cqidr);
219 	idr_destroy(&iwsc->qpidr);
220 	idr_destroy(&iwsc->mmidr);
221 	ib_dealloc_device(&iwsc->ibdev);
222 }
223 
224 static struct c4iw_dev *
225 c4iw_alloc(struct adapter *sc)
226 {
227 	struct c4iw_dev *iwsc;
228 	int rc;
229 
230 	iwsc = (struct c4iw_dev *)ib_alloc_device(sizeof(*iwsc));
231 	if (iwsc == NULL) {
232 		device_printf(sc->dev, "Cannot allocate ib device.\n");
233 		return (ERR_PTR(-ENOMEM));
234 	}
235 	iwsc->rdev.adap = sc;
236 
237 	/* init various hw-queue params based on lld info */
238 	CTR3(KTR_IW_CXGBE, "%s: Ing. padding boundary is %d, "
239 			"egrsstatuspagesize = %d", __func__,
240 			sc->params.sge.pad_boundary,
241 			sc->params.sge.spg_len);
242 
243 	iwsc->rdev.hw_queue.t4_eq_status_entries =
244 		sc->params.sge.spg_len / EQ_ESIZE;
245 	iwsc->rdev.hw_queue.t4_max_eq_size = 65520;
246 	iwsc->rdev.hw_queue.t4_max_iq_size = 65520;
247 	iwsc->rdev.hw_queue.t4_max_rq_size = 8192 -
248 		iwsc->rdev.hw_queue.t4_eq_status_entries - 1;
249 	iwsc->rdev.hw_queue.t4_max_sq_size =
250 		iwsc->rdev.hw_queue.t4_max_eq_size -
251 		iwsc->rdev.hw_queue.t4_eq_status_entries - 1;
252 	iwsc->rdev.hw_queue.t4_max_qp_depth =
253 		iwsc->rdev.hw_queue.t4_max_rq_size;
254 	iwsc->rdev.hw_queue.t4_max_cq_depth =
255 		iwsc->rdev.hw_queue.t4_max_iq_size - 2;
256 	iwsc->rdev.hw_queue.t4_stat_len = iwsc->rdev.adap->params.sge.spg_len;
257 
258 	/* As T5 and above devices support BAR2 kernel doorbells & WC, we map
259 	 * all of BAR2, for both User and Kernel Doorbells-GTS.
260 	 */
261 	iwsc->rdev.bar2_kva = (void __iomem *)((u64)iwsc->rdev.adap->udbs_base);
262 	iwsc->rdev.bar2_pa = vtophys(iwsc->rdev.adap->udbs_base);
263 	iwsc->rdev.bar2_len = rman_get_size(iwsc->rdev.adap->udbs_res);
264 
265 	rc = c4iw_rdev_open(&iwsc->rdev);
266 	if (rc != 0) {
267 		device_printf(sc->dev, "Unable to open CXIO rdev (%d)\n", rc);
268 		ib_dealloc_device(&iwsc->ibdev);
269 		return (ERR_PTR(rc));
270 	}
271 
272 	idr_init(&iwsc->cqidr);
273 	idr_init(&iwsc->qpidr);
274 	idr_init(&iwsc->mmidr);
275 	spin_lock_init(&iwsc->lock);
276 	mutex_init(&iwsc->rdev.stats.lock);
277 	iwsc->avail_ird = iwsc->rdev.adap->params.max_ird_adapter;
278 
279 	return (iwsc);
280 }
281 
282 static int c4iw_mod_load(void);
283 static int c4iw_mod_unload(void);
284 static int c4iw_activate(struct adapter *);
285 static int c4iw_deactivate(struct adapter *);
286 
287 static struct uld_info c4iw_uld_info = {
288 	.uld_id = ULD_IWARP,
289 	.activate = c4iw_activate,
290 	.deactivate = c4iw_deactivate,
291 };
292 
293 static int
294 c4iw_activate(struct adapter *sc)
295 {
296 	struct c4iw_dev *iwsc;
297 	int rc;
298 
299 	ASSERT_SYNCHRONIZED_OP(sc);
300 
301 	if (is_t4(sc)) {
302 		device_printf(sc->dev, "No iWARP support for T4 devices, "
303 				"please install T5 or above devices.\n");
304 		return (ENOSYS);
305 	}
306 
307 	if (uld_active(sc, ULD_IWARP)) {
308 		KASSERT(0, ("%s: RDMA already eanbled on sc %p", __func__, sc));
309 		return (0);
310 	}
311 
312 	if (sc->rdmacaps == 0) {
313 		device_printf(sc->dev,
314 		    "RDMA not supported or RDMA cap is not enabled.\n");
315 		return (ENOSYS);
316 	}
317 
318 	iwsc = c4iw_alloc(sc);
319 	if (IS_ERR(iwsc)) {
320 		rc = -PTR_ERR(iwsc);
321 		device_printf(sc->dev, "initialization failed: %d\n", rc);
322 		return (rc);
323 	}
324 
325 	sc->iwarp_softc = iwsc;
326 
327 	rc = -c4iw_register_device(iwsc);
328 	if (rc) {
329 		device_printf(sc->dev, "RDMA registration failed: %d\n", rc);
330 		c4iw_dealloc(iwsc);
331 		sc->iwarp_softc = NULL;
332 	}
333 
334 	return (rc);
335 }
336 
337 static int
338 c4iw_deactivate(struct adapter *sc)
339 {
340 	struct c4iw_dev *iwsc = sc->iwarp_softc;
341 
342 	ASSERT_SYNCHRONIZED_OP(sc);
343 
344 	c4iw_unregister_device(iwsc);
345 	c4iw_dealloc(iwsc);
346 	sc->iwarp_softc = NULL;
347 
348 	return (0);
349 }
350 
351 static void
352 c4iw_activate_all(struct adapter *sc, void *arg __unused)
353 {
354 
355 	if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4iwact") != 0)
356 		return;
357 
358 	/* Activate iWARP if any port on this adapter has IFCAP_TOE enabled. */
359 	if (sc->offload_map && !uld_active(sc, ULD_IWARP))
360 		(void) t4_activate_uld(sc, ULD_IWARP);
361 
362 	end_synchronized_op(sc, 0);
363 }
364 
365 static void
366 c4iw_deactivate_all(struct adapter *sc, void *arg __unused)
367 {
368 
369 	if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4iwdea") != 0)
370 		return;
371 
372 	if (uld_active(sc, ULD_IWARP))
373 	    (void) t4_deactivate_uld(sc, ULD_IWARP);
374 
375 	end_synchronized_op(sc, 0);
376 }
377 
378 static int
379 c4iw_mod_load(void)
380 {
381 	int rc;
382 
383 	rc = -c4iw_cm_init();
384 	if (rc != 0)
385 		return (rc);
386 
387 	rc = t4_register_uld(&c4iw_uld_info);
388 	if (rc != 0) {
389 		c4iw_cm_term();
390 		return (rc);
391 	}
392 
393 	t4_iterate(c4iw_activate_all, NULL);
394 
395 	return (rc);
396 }
397 
398 static int
399 c4iw_mod_unload(void)
400 {
401 
402 	t4_iterate(c4iw_deactivate_all, NULL);
403 
404 	c4iw_cm_term();
405 
406 	if (t4_unregister_uld(&c4iw_uld_info) == EBUSY)
407 		return (EBUSY);
408 
409 	return (0);
410 }
411 
412 #endif
413 
414 /*
415  * t4_tom won't load on kernels without TCP_OFFLOAD and this module's dependency
416  * on t4_tom ensures that it won't either.  So we don't directly check for
417  * TCP_OFFLOAD here.
418  */
419 static int
420 c4iw_modevent(module_t mod, int cmd, void *arg)
421 {
422 	int rc = 0;
423 
424 #ifdef TCP_OFFLOAD
425 	switch (cmd) {
426 	case MOD_LOAD:
427 		rc = c4iw_mod_load();
428 		if (rc == 0)
429 			printf("iw_cxgbe: Chelsio T5/T6 RDMA driver loaded.\n");
430 		break;
431 
432 	case MOD_UNLOAD:
433 		rc = c4iw_mod_unload();
434 		break;
435 
436 	default:
437 		rc = EINVAL;
438 	}
439 #else
440 	printf("t4_tom: compiled without TCP_OFFLOAD support.\n");
441 	rc = EOPNOTSUPP;
442 #endif
443 	return (rc);
444 }
445 
446 static moduledata_t c4iw_mod_data = {
447 	"iw_cxgbe",
448 	c4iw_modevent,
449 	0
450 };
451 
452 MODULE_VERSION(iw_cxgbe, 1);
453 MODULE_DEPEND(iw_cxgbe, t4nex, 1, 1, 1);
454 MODULE_DEPEND(iw_cxgbe, t4_tom, 1, 1, 1);
455 MODULE_DEPEND(iw_cxgbe, ibcore, 1, 1, 1);
456 MODULE_DEPEND(iw_cxgbe, linuxkpi, 1, 1, 1);
457 DECLARE_MODULE(iw_cxgbe, c4iw_mod_data, SI_SUB_EXEC, SI_ORDER_ANY);
458