xref: /freebsd/sys/dev/ixl/ixl_iw.c (revision d2ba5111c125104b09aa1acd1bfe8af2a24c79cc)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2015, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixl.h"
36 #include "ixl_pf.h"
37 #include "ixl_iw.h"
38 #include "ixl_iw_int.h"
39 
40 #ifdef	IXL_IW
41 
42 #define IXL_IW_VEC_BASE(pf)	((pf)->msix - (pf)->iw_msix)
43 #define IXL_IW_VEC_COUNT(pf)	((pf)->iw_msix)
44 #define IXL_IW_VEC_LIMIT(pf)	((pf)->msix)
45 
46 extern int ixl_enable_iwarp;
47 
48 static struct ixl_iw_state ixl_iw;
49 static int ixl_iw_ref_cnt;
50 
51 static void
52 ixl_iw_pf_msix_reset(struct ixl_pf *pf)
53 {
54 	struct i40e_hw *hw = &pf->hw;
55 	u32 reg;
56 	int vec;
57 
58 	for (vec = IXL_IW_VEC_BASE(pf); vec < IXL_IW_VEC_LIMIT(pf); vec++) {
59 		reg = I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK;
60 		wr32(hw, I40E_PFINT_LNKLSTN(vec - 1), reg);
61 	}
62 
63 	return;
64 }
65 
66 static void
67 ixl_iw_invoke_op(void *context, int pending)
68 {
69 	struct ixl_iw_pf_entry *pf_entry = (struct ixl_iw_pf_entry *)context;
70 	struct ixl_iw_pf info;
71 	bool initialize;
72 	int err;
73 
74 	INIT_DEBUGOUT("begin");
75 
76 	mtx_lock(&ixl_iw.mtx);
77 	if ((pf_entry->state.iw_scheduled == IXL_IW_PF_STATE_ON) &&
78 	    (pf_entry->state.iw_current == IXL_IW_PF_STATE_OFF))
79 		initialize = true;
80 	else if ((pf_entry->state.iw_scheduled == IXL_IW_PF_STATE_OFF) &&
81 	         (pf_entry->state.iw_current == IXL_IW_PF_STATE_ON))
82 		initialize = false;
83 	else {
84 		/* nothing to be done, so finish here */
85 		mtx_unlock(&ixl_iw.mtx);
86 		return;
87 	}
88 	info = pf_entry->pf_info;
89 	mtx_unlock(&ixl_iw.mtx);
90 
91 	if (initialize) {
92 		err = ixl_iw.ops->init(&info);
93 		if (err)
94 			device_printf(pf_entry->pf->dev,
95 				"%s: failed to initialize iwarp (err %d)\n",
96 				__func__, err);
97 		else
98 			pf_entry->state.iw_current = IXL_IW_PF_STATE_ON;
99 	} else {
100 		err = ixl_iw.ops->stop(&info);
101 		if (err)
102 			device_printf(pf_entry->pf->dev,
103 				"%s: failed to stop iwarp (err %d)\n",
104 				__func__, err);
105 		else {
106 			ixl_iw_pf_msix_reset(pf_entry->pf);
107 			pf_entry->state.iw_current = IXL_IW_PF_STATE_OFF;
108 		}
109 	}
110 	return;
111 }
112 
113 static void
114 ixl_iw_uninit(void)
115 {
116 	INIT_DEBUGOUT("begin");
117 
118 	mtx_destroy(&ixl_iw.mtx);
119 
120 	return;
121 }
122 
123 static void
124 ixl_iw_init(void)
125 {
126 	INIT_DEBUGOUT("begin");
127 
128 	LIST_INIT(&ixl_iw.pfs);
129 	mtx_init(&ixl_iw.mtx, "ixl_iw_pfs", NULL, MTX_DEF);
130 	ixl_iw.registered = false;
131 
132 	return;
133 }
134 
135 /******************************************************************************
136  * if_ixl internal API
137  *****************************************************************************/
138 
139 int
140 ixl_iw_pf_init(struct ixl_pf *pf)
141 {
142 	struct ixl_iw_pf_entry *pf_entry;
143 	struct ixl_iw_pf *pf_info;
144 	int err = 0;
145 
146 	INIT_DEBUGOUT("begin");
147 
148 	mtx_lock(&ixl_iw.mtx);
149 
150 	LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
151 		if (pf_entry->pf == pf)
152 			break;
153 	if (pf_entry == NULL) {
154 		/* attempt to initialize PF not yet attached - sth is wrong */
155 		device_printf(pf->dev, "%s: PF not found\n", __func__);
156 		err = ENOENT;
157 		goto out;
158 	}
159 
160 	pf_info = &pf_entry->pf_info;
161 
162 	pf_info->handle	= (void *)pf;
163 
164 	pf_info->ifp		= pf->vsi.ifp;
165 	pf_info->dev		= pf->dev;
166 	pf_info->pci_mem	= pf->pci_mem;
167 	pf_info->pf_id		= pf->hw.pf_id;
168 	pf_info->mtu		= pf->vsi.ifp->if_mtu;
169 
170 	pf_info->iw_msix.count	= IXL_IW_VEC_COUNT(pf);
171 	pf_info->iw_msix.base	= IXL_IW_VEC_BASE(pf);
172 
173 	for (int i = 0; i < IXL_IW_MAX_USER_PRIORITY; i++)
174 		pf_info->qs_handle[i] = le16_to_cpu(pf->vsi.info.qs_handle[0]);
175 
176 	pf_entry->state.pf = IXL_IW_PF_STATE_ON;
177 	if (ixl_iw.registered) {
178 		pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_ON;
179 		taskqueue_enqueue(ixl_iw.tq, &pf_entry->iw_task);
180 	}
181 
182 out:
183 	mtx_unlock(&ixl_iw.mtx);
184 
185 	return (err);
186 }
187 
188 void
189 ixl_iw_pf_stop(struct ixl_pf *pf)
190 {
191 	struct ixl_iw_pf_entry *pf_entry;
192 
193 	INIT_DEBUGOUT("begin");
194 
195 	mtx_lock(&ixl_iw.mtx);
196 
197 	LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
198 		if (pf_entry->pf == pf)
199 			break;
200 	if (pf_entry == NULL) {
201 		/* attempt to stop PF which has not been attached - sth is wrong */
202 		device_printf(pf->dev, "%s: PF not found\n", __func__);
203 		goto out;
204 	}
205 
206 	pf_entry->state.pf = IXL_IW_PF_STATE_OFF;
207 	if (pf_entry->state.iw_scheduled == IXL_IW_PF_STATE_ON) {
208 		pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_OFF;
209 		if (ixl_iw.registered)
210 			taskqueue_enqueue(ixl_iw.tq, &pf_entry->iw_task);
211 	}
212 
213 out:
214 	mtx_unlock(&ixl_iw.mtx);
215 
216 	return;
217 }
218 
219 int
220 ixl_iw_pf_attach(struct ixl_pf *pf)
221 {
222 	struct ixl_iw_pf_entry *pf_entry;
223 	int err = 0;
224 
225 	INIT_DEBUGOUT("begin");
226 
227 	if (ixl_iw_ref_cnt == 0)
228 		ixl_iw_init();
229 
230 	mtx_lock(&ixl_iw.mtx);
231 
232 	LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
233 		if (pf_entry->pf == pf) {
234 			device_printf(pf->dev, "%s: PF already exists\n",
235 			    __func__);
236 			err = EEXIST;
237 			goto out;
238 		}
239 
240 	pf_entry = malloc(sizeof(struct ixl_iw_pf_entry),
241 			M_DEVBUF, M_NOWAIT | M_ZERO);
242 	if (pf_entry == NULL) {
243 		device_printf(pf->dev,
244 		    "%s: failed to allocate memory to attach new PF\n",
245 		    __func__);
246 		err = ENOMEM;
247 		goto out;
248 	}
249 	pf_entry->pf = pf;
250 	pf_entry->state.pf		= IXL_IW_PF_STATE_OFF;
251 	pf_entry->state.iw_scheduled	= IXL_IW_PF_STATE_OFF;
252 	pf_entry->state.iw_current	= IXL_IW_PF_STATE_OFF;
253 
254 	LIST_INSERT_HEAD(&ixl_iw.pfs, pf_entry, node);
255 	ixl_iw_ref_cnt++;
256 
257 	TASK_INIT(&pf_entry->iw_task, 0, ixl_iw_invoke_op, pf_entry);
258 out:
259 	mtx_unlock(&ixl_iw.mtx);
260 
261 	return (err);
262 }
263 
264 int
265 ixl_iw_pf_detach(struct ixl_pf *pf)
266 {
267 	struct ixl_iw_pf_entry *pf_entry;
268 	int err = 0;
269 
270 	INIT_DEBUGOUT("begin");
271 
272 	mtx_lock(&ixl_iw.mtx);
273 
274 	LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
275 		if (pf_entry->pf == pf)
276 			break;
277 	if (pf_entry == NULL) {
278 		/* attempt to stop PF which has not been attached - sth is wrong */
279 		device_printf(pf->dev, "%s: PF not found\n", __func__);
280 		err = ENOENT;
281 		goto out;
282 	}
283 
284 	if (pf_entry->state.pf != IXL_IW_PF_STATE_OFF) {
285 		/* attempt to detach PF which has not yet been stopped - sth is wrong */
286 		device_printf(pf->dev, "%s: failed - PF is still active\n",
287 		    __func__);
288 		err = EBUSY;
289 		goto out;
290 	}
291 	LIST_REMOVE(pf_entry, node);
292 	free(pf_entry, M_DEVBUF);
293 	ixl_iw_ref_cnt--;
294 
295 out:
296 	mtx_unlock(&ixl_iw.mtx);
297 
298 	if (ixl_iw_ref_cnt == 0)
299 		ixl_iw_uninit();
300 
301 	return (err);
302 }
303 
304 
305 /******************************************************************************
306  * API exposed to iw_ixl module
307  *****************************************************************************/
308 
309 int
310 ixl_iw_pf_reset(void *pf_handle)
311 {
312 	struct ixl_pf *pf = (struct ixl_pf *)pf_handle;
313 
314 	INIT_DEBUGOUT("begin");
315 
316 	IXL_PF_LOCK(pf);
317 	ixl_init_locked(pf);
318 	IXL_PF_UNLOCK(pf);
319 
320 	return (0);
321 }
322 
323 int
324 ixl_iw_pf_msix_init(void *pf_handle,
325 	struct ixl_iw_msix_mapping *msix_info)
326 {
327 	struct ixl_pf *pf = (struct ixl_pf *)pf_handle;
328 	struct i40e_hw *hw = &pf->hw;
329 	u32 reg;
330 	int vec, i;
331 
332 	INIT_DEBUGOUT("begin");
333 
334 	if ((msix_info->aeq_vector < IXL_IW_VEC_BASE(pf)) ||
335 	    (msix_info->aeq_vector >= IXL_IW_VEC_LIMIT(pf))) {
336 		printf("%s: invalid MSIX vector (%i) for AEQ\n",
337 		    __func__, msix_info->aeq_vector);
338 		return (EINVAL);
339 	}
340 	reg = I40E_PFINT_AEQCTL_CAUSE_ENA_MASK |
341 		(msix_info->aeq_vector << I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT) |
342 		(msix_info->itr_indx << I40E_PFINT_AEQCTL_ITR_INDX_SHIFT);
343 	wr32(hw, I40E_PFINT_AEQCTL, reg);
344 
345 	for (vec = IXL_IW_VEC_BASE(pf); vec < IXL_IW_VEC_LIMIT(pf); vec++) {
346 		for (i = 0; i < msix_info->ceq_cnt; i++)
347 			if (msix_info->ceq_vector[i] == vec)
348 				break;
349 		if (i == msix_info->ceq_cnt) {
350 			/* this vector has no CEQ mapped */
351 			reg = I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK;
352 			wr32(hw, I40E_PFINT_LNKLSTN(vec - 1), reg);
353 		} else {
354 			reg = (i & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
355 			    (I40E_QUEUE_TYPE_PE_CEQ <<
356 			    I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
357 			wr32(hw, I40E_PFINT_LNKLSTN(vec - 1), reg);
358 
359 			reg = I40E_PFINT_CEQCTL_CAUSE_ENA_MASK |
360 			    (vec << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT) |
361 			    (msix_info->itr_indx <<
362 			    I40E_PFINT_CEQCTL_ITR_INDX_SHIFT) |
363 			    (IXL_QUEUE_EOL <<
364 			    I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT);
365 			wr32(hw, I40E_PFINT_CEQCTL(i), reg);
366 		}
367 	}
368 
369 	return (0);
370 }
371 
372 int
373 ixl_iw_register(struct ixl_iw_ops *ops)
374 {
375 	struct ixl_iw_pf_entry *pf_entry;
376 	int err = 0;
377 
378 	INIT_DEBUGOUT("begin");
379 
380 	if (ixl_enable_iwarp == 0) {
381 		printf("%s: enable_iwarp is off, registering dropped\n",
382 		    __func__);
383 		return (EACCES);
384 	}
385 
386 	if ((ops->init == NULL) || (ops->stop == NULL)) {
387 		printf("%s: invalid iwarp driver ops\n", __func__);
388 		return (EINVAL);
389 	}
390 
391 	mtx_lock(&ixl_iw.mtx);
392 
393 	if (ixl_iw.registered) {
394 		printf("%s: iwarp driver already registered\n", __func__);
395 		err = EBUSY;
396 		goto out;
397 	}
398 
399 	ixl_iw.tq = taskqueue_create("ixl_iw", M_NOWAIT,
400 		taskqueue_thread_enqueue, &ixl_iw.tq);
401 	if (ixl_iw.tq == NULL) {
402 		printf("%s: failed to create queue\n", __func__);
403 		err = ENOMEM;
404 		goto out;
405 	}
406 	taskqueue_start_threads(&ixl_iw.tq, 1, PI_NET, "ixl iw");
407 
408 	ixl_iw.ops = malloc(sizeof(struct ixl_iw_ops),
409 			M_DEVBUF, M_NOWAIT | M_ZERO);
410 	if (ixl_iw.ops == NULL) {
411 		printf("%s: failed to allocate memory\n", __func__);
412 		taskqueue_free(ixl_iw.tq);
413 		err = ENOMEM;
414 		goto out;
415 	}
416 
417 	ixl_iw.ops->init = ops->init;
418 	ixl_iw.ops->stop = ops->stop;
419 	ixl_iw.registered = true;
420 
421 	LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
422 		if (pf_entry->state.pf == IXL_IW_PF_STATE_ON) {
423 			pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_ON;
424 			taskqueue_enqueue(ixl_iw.tq, &pf_entry->iw_task);
425 		}
426 
427 out:
428 	mtx_unlock(&ixl_iw.mtx);
429 
430 	return (err);
431 }
432 
433 int
434 ixl_iw_unregister(void)
435 {
436 	struct ixl_iw_pf_entry *pf_entry;
437 
438 	INIT_DEBUGOUT("begin");
439 
440 	mtx_lock(&ixl_iw.mtx);
441 
442 	if (!ixl_iw.registered) {
443 		printf("%s: failed - iwarp driver has not been registered\n",
444 		    __func__);
445 		mtx_unlock(&ixl_iw.mtx);
446 		return (ENOENT);
447 	}
448 
449 	LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
450 		if (pf_entry->state.iw_scheduled == IXL_IW_PF_STATE_ON) {
451 			pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_OFF;
452 			taskqueue_enqueue(ixl_iw.tq, &pf_entry->iw_task);
453 		}
454 
455 	ixl_iw.registered = false;
456 
457 	mtx_unlock(&ixl_iw.mtx);
458 
459 	LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
460 		taskqueue_drain(ixl_iw.tq, &pf_entry->iw_task);
461 	taskqueue_free(ixl_iw.tq);
462 	ixl_iw.tq = NULL;
463 	free(ixl_iw.ops, M_DEVBUF);
464 	ixl_iw.ops = NULL;
465 
466 	return (0);
467 }
468 
469 #endif /* IXL_IW */
470