xref: /freebsd/sys/dev/cxgbe/iw_cxgbe/device.c (revision f4b37ed0f8b307b1f3f0f630ca725d68f1dff30d)
1 /*
2  * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *	  copyright notice, this list of conditions and the following
16  *	  disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *	  copyright notice, this list of conditions and the following
20  *	  disclaimer in the documentation and/or other materials
21  *	  provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_inet.h"
36 
37 #include <sys/ktr.h>
38 
39 #include <linux/module.h>
40 #include <linux/moduleparam.h>
41 
42 #include <rdma/ib_verbs.h>
43 #include <linux/idr.h>
44 
45 #ifdef TCP_OFFLOAD
46 #include "iw_cxgbe.h"
47 
48 int spg_creds = 2; /* Default status page size is 2 credits = 128B */
49 
50 void
51 c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
52     struct c4iw_dev_ucontext *uctx)
53 {
54 	struct list_head *pos, *nxt;
55 	struct c4iw_qid_list *entry;
56 
57 	mutex_lock(&uctx->lock);
58 	list_for_each_safe(pos, nxt, &uctx->qpids) {
59 		entry = list_entry(pos, struct c4iw_qid_list, entry);
60 		list_del_init(&entry->entry);
61 		if (!(entry->qid & rdev->qpmask)) {
62 			c4iw_put_resource(&rdev->resource.qid_table,
63 					  entry->qid);
64 			mutex_lock(&rdev->stats.lock);
65 			rdev->stats.qid.cur -= rdev->qpmask + 1;
66 			mutex_unlock(&rdev->stats.lock);
67 		}
68 		kfree(entry);
69 	}
70 
71 	list_for_each_safe(pos, nxt, &uctx->qpids) {
72 		entry = list_entry(pos, struct c4iw_qid_list, entry);
73 		list_del_init(&entry->entry);
74 		kfree(entry);
75 	}
76 	mutex_unlock(&uctx->lock);
77 }
78 
79 void
80 c4iw_init_dev_ucontext(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
81 {
82 
83 	INIT_LIST_HEAD(&uctx->qpids);
84 	INIT_LIST_HEAD(&uctx->cqids);
85 	mutex_init(&uctx->lock);
86 }
87 
88 static int
89 c4iw_rdev_open(struct c4iw_rdev *rdev)
90 {
91 	struct adapter *sc = rdev->adap;
92 	int rc;
93 
94 	c4iw_init_dev_ucontext(rdev, &rdev->uctx);
95 
96 	/* Save the status page size set by if_cxgbe */
97 	spg_creds = (t4_read_reg(sc, A_SGE_CONTROL) & F_EGRSTATUSPAGESIZE) ?
98 	    2 : 1;
99 
100 	/* XXX: we can probably make this work */
101 	if (sc->sge.eq_s_qpp > PAGE_SHIFT || sc->sge.iq_s_qpp > PAGE_SHIFT) {
102 		device_printf(sc->dev,
103 		    "doorbell density too high (eq %d, iq %d, pg %d).\n",
104 		    sc->sge.eq_s_qpp, sc->sge.eq_s_qpp, PAGE_SHIFT);
105 		rc = -EINVAL;
106 		goto err1;
107 	}
108 
109 	rdev->qpshift = PAGE_SHIFT - sc->sge.eq_s_qpp;
110 	rdev->qpmask = (1 << sc->sge.eq_s_qpp) - 1;
111 	rdev->cqshift = PAGE_SHIFT - sc->sge.iq_s_qpp;
112 	rdev->cqmask = (1 << sc->sge.iq_s_qpp) - 1;
113 
114 	if (c4iw_num_stags(rdev) == 0) {
115 		rc = -EINVAL;
116 		goto err1;
117 	}
118 
119 	rdev->stats.pd.total = T4_MAX_NUM_PD;
120 	rdev->stats.stag.total = sc->vres.stag.size;
121 	rdev->stats.pbl.total = sc->vres.pbl.size;
122 	rdev->stats.rqt.total = sc->vres.rq.size;
123 	rdev->stats.qid.total = sc->vres.qp.size;
124 
125 	rc = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
126 	if (rc) {
127 		device_printf(sc->dev, "error %d initializing resources\n", rc);
128 		goto err1;
129 	}
130 	rc = c4iw_pblpool_create(rdev);
131 	if (rc) {
132 		device_printf(sc->dev, "error %d initializing pbl pool\n", rc);
133 		goto err2;
134 	}
135 	rc = c4iw_rqtpool_create(rdev);
136 	if (rc) {
137 		device_printf(sc->dev, "error %d initializing rqt pool\n", rc);
138 		goto err3;
139 	}
140 
141 	return (0);
142 err3:
143 	c4iw_pblpool_destroy(rdev);
144 err2:
145 	c4iw_destroy_resource(&rdev->resource);
146 err1:
147 	return (rc);
148 }
149 
150 static void c4iw_rdev_close(struct c4iw_rdev *rdev)
151 {
152 	c4iw_pblpool_destroy(rdev);
153 	c4iw_rqtpool_destroy(rdev);
154 	c4iw_destroy_resource(&rdev->resource);
155 }
156 
157 static void
158 c4iw_dealloc(struct c4iw_dev *iwsc)
159 {
160 
161 	c4iw_rdev_close(&iwsc->rdev);
162 	idr_destroy(&iwsc->cqidr);
163 	idr_destroy(&iwsc->qpidr);
164 	idr_destroy(&iwsc->mmidr);
165 	ib_dealloc_device(&iwsc->ibdev);
166 }
167 
168 static struct c4iw_dev *
169 c4iw_alloc(struct adapter *sc)
170 {
171 	struct c4iw_dev *iwsc;
172 	int rc;
173 
174 	iwsc = (struct c4iw_dev *)ib_alloc_device(sizeof(*iwsc));
175 	if (iwsc == NULL) {
176 		device_printf(sc->dev, "Cannot allocate ib device.\n");
177 		return (ERR_PTR(-ENOMEM));
178 	}
179 	iwsc->rdev.adap = sc;
180 
181 	rc = c4iw_rdev_open(&iwsc->rdev);
182 	if (rc != 0) {
183 		device_printf(sc->dev, "Unable to open CXIO rdev (%d)\n", rc);
184 		ib_dealloc_device(&iwsc->ibdev);
185 		return (ERR_PTR(rc));
186 	}
187 
188 	idr_init(&iwsc->cqidr);
189 	idr_init(&iwsc->qpidr);
190 	idr_init(&iwsc->mmidr);
191 	spin_lock_init(&iwsc->lock);
192 	mutex_init(&iwsc->rdev.stats.lock);
193 
194 	return (iwsc);
195 }
196 
197 static int c4iw_mod_load(void);
198 static int c4iw_mod_unload(void);
199 static int c4iw_activate(struct adapter *);
200 static int c4iw_deactivate(struct adapter *);
201 
202 static struct uld_info c4iw_uld_info = {
203 	.uld_id = ULD_IWARP,
204 	.activate = c4iw_activate,
205 	.deactivate = c4iw_deactivate,
206 };
207 
208 static int
209 c4iw_activate(struct adapter *sc)
210 {
211 	struct c4iw_dev *iwsc;
212 	int rc;
213 
214 	ASSERT_SYNCHRONIZED_OP(sc);
215 
216 	if (uld_active(sc, ULD_IWARP)) {
217 		KASSERT(0, ("%s: RDMA already eanbled on sc %p", __func__, sc));
218 		return (0);
219 	}
220 
221 	if (sc->rdmacaps == 0) {
222 		device_printf(sc->dev,
223 		    "RDMA not supported or RDMA cap is not enabled.\n");
224 		return (ENOSYS);
225 	}
226 
227 	iwsc = c4iw_alloc(sc);
228 	if (IS_ERR(iwsc)) {
229 		rc = -PTR_ERR(iwsc);
230 		device_printf(sc->dev, "initialization failed: %d\n", rc);
231 		return (rc);
232 	}
233 
234 	sc->iwarp_softc = iwsc;
235 	c4iw_cm_init_cpl(sc);
236 
237 	rc = -c4iw_register_device(iwsc);
238 	if (rc) {
239 		device_printf(sc->dev, "RDMA registration failed: %d\n", rc);
240 		c4iw_dealloc(iwsc);
241 		sc->iwarp_softc = NULL;
242 	}
243 
244 	return (rc);
245 }
246 
247 static int
248 c4iw_deactivate(struct adapter *sc)
249 {
250 	struct c4iw_dev *iwsc = sc->iwarp_softc;
251 
252 	ASSERT_SYNCHRONIZED_OP(sc);
253 
254 	c4iw_unregister_device(iwsc);
255 	c4iw_dealloc(iwsc);
256 	sc->iwarp_softc = NULL;
257 
258 	return (0);
259 }
260 
261 static void
262 c4iw_activate_all(struct adapter *sc, void *arg __unused)
263 {
264 
265 	if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4iwact") != 0)
266 		return;
267 
268 	/* Activate iWARP if any port on this adapter has IFCAP_TOE enabled. */
269 	if (sc->offload_map && !uld_active(sc, ULD_IWARP))
270 		(void) t4_activate_uld(sc, ULD_IWARP);
271 
272 	end_synchronized_op(sc, 0);
273 }
274 
275 static void
276 c4iw_deactivate_all(struct adapter *sc, void *arg __unused)
277 {
278 
279 	if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4iwdea") != 0)
280 		return;
281 
282 	if (uld_active(sc, ULD_IWARP))
283 	    (void) t4_deactivate_uld(sc, ULD_IWARP);
284 
285 	end_synchronized_op(sc, 0);
286 }
287 
288 static int
289 c4iw_mod_load(void)
290 {
291 	int rc;
292 
293 	rc = -c4iw_cm_init();
294 	if (rc != 0)
295 		return (rc);
296 
297 	rc = t4_register_uld(&c4iw_uld_info);
298 	if (rc != 0) {
299 		c4iw_cm_term();
300 		return (rc);
301 	}
302 
303 	t4_iterate(c4iw_activate_all, NULL);
304 
305 	return (rc);
306 }
307 
308 static int
309 c4iw_mod_unload(void)
310 {
311 
312 	t4_iterate(c4iw_deactivate_all, NULL);
313 
314 	c4iw_cm_term();
315 
316 	if (t4_unregister_uld(&c4iw_uld_info) == EBUSY)
317 		return (EBUSY);
318 
319 	return (0);
320 }
321 
322 #endif
323 
324 /*
325  * t4_tom won't load on kernels without TCP_OFFLOAD and this module's dependency
326  * on t4_tom ensures that it won't either.  So we don't directly check for
327  * TCP_OFFLOAD here.
328  */
329 static int
330 c4iw_modevent(module_t mod, int cmd, void *arg)
331 {
332 	int rc = 0;
333 
334 #ifdef TCP_OFFLOAD
335 	switch (cmd) {
336 	case MOD_LOAD:
337 		rc = c4iw_mod_load();
338 		if (rc == 0)
339 			printf("iw_cxgbe: Chelsio T4/T5 RDMA driver loaded.\n");
340 		break;
341 
342 	case MOD_UNLOAD:
343 		rc = c4iw_mod_unload();
344 		break;
345 
346 	default:
347 		rc = EINVAL;
348 	}
349 #else
350 	printf("t4_tom: compiled without TCP_OFFLOAD support.\n");
351 	rc = EOPNOTSUPP;
352 #endif
353 	return (rc);
354 }
355 
356 static moduledata_t c4iw_mod_data = {
357 	"iw_cxgbe",
358 	c4iw_modevent,
359 	0
360 };
361 
362 MODULE_VERSION(iw_cxgbe, 1);
363 MODULE_DEPEND(iw_cxgbe, t4nex, 1, 1, 1);
364 MODULE_DEPEND(iw_cxgbe, t4_tom, 1, 1, 1);
365 MODULE_DEPEND(iw_cxgbe, ibcore, 1, 1, 1);
366 MODULE_DEPEND(iw_cxgbe, linuxapi, 1, 1, 1);
367 DECLARE_MODULE(iw_cxgbe, c4iw_mod_data, SI_SUB_EXEC, SI_ORDER_ANY);
368