xref: /freebsd/sys/dev/ixl/ixl_iw.c (revision 5e3190f700637fcfc1a52daeaa4a031fdd2557c7)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 
34 #include "ixl.h"
35 #include "ixl_pf.h"
36 #include "ixl_iw.h"
37 #include "ixl_iw_int.h"
38 
39 #ifdef	IXL_IW
40 
41 #define IXL_IW_VEC_BASE(pf)	((pf)->msix - (pf)->iw_msix)
42 #define IXL_IW_VEC_COUNT(pf)	((pf)->iw_msix)
43 #define IXL_IW_VEC_LIMIT(pf)	((pf)->msix)
44 
45 extern int ixl_enable_iwarp;
46 
47 static struct ixl_iw_state ixl_iw;
48 static int ixl_iw_ref_cnt;
49 
50 static void
51 ixl_iw_pf_msix_reset(struct ixl_pf *pf)
52 {
53 	struct i40e_hw *hw = &pf->hw;
54 	u32 reg;
55 	int vec;
56 
57 	for (vec = IXL_IW_VEC_BASE(pf); vec < IXL_IW_VEC_LIMIT(pf); vec++) {
58 		reg = I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK;
59 		wr32(hw, I40E_PFINT_LNKLSTN(vec - 1), reg);
60 	}
61 
62 	return;
63 }
64 
65 static void
66 ixl_iw_invoke_op(void *context, int pending)
67 {
68 	struct ixl_iw_pf_entry *pf_entry = (struct ixl_iw_pf_entry *)context;
69 	struct ixl_iw_pf info;
70 	bool initialize;
71 	int err;
72 
73 	INIT_DEBUGOUT("begin");
74 
75 	mtx_lock(&ixl_iw.mtx);
76 	if ((pf_entry->state.iw_scheduled == IXL_IW_PF_STATE_ON) &&
77 	    (pf_entry->state.iw_current == IXL_IW_PF_STATE_OFF))
78 		initialize = true;
79 	else if ((pf_entry->state.iw_scheduled == IXL_IW_PF_STATE_OFF) &&
80 	         (pf_entry->state.iw_current == IXL_IW_PF_STATE_ON))
81 		initialize = false;
82 	else {
83 		/* nothing to be done, so finish here */
84 		mtx_unlock(&ixl_iw.mtx);
85 		return;
86 	}
87 	info = pf_entry->pf_info;
88 	mtx_unlock(&ixl_iw.mtx);
89 
90 	if (initialize) {
91 		err = ixl_iw.ops->init(&info);
92 		if (err)
93 			device_printf(pf_entry->pf->dev,
94 				"%s: failed to initialize iwarp (err %d)\n",
95 				__func__, err);
96 		else
97 			pf_entry->state.iw_current = IXL_IW_PF_STATE_ON;
98 	} else {
99 		err = ixl_iw.ops->stop(&info);
100 		if (err)
101 			device_printf(pf_entry->pf->dev,
102 				"%s: failed to stop iwarp (err %d)\n",
103 				__func__, err);
104 		else {
105 			ixl_iw_pf_msix_reset(pf_entry->pf);
106 			pf_entry->state.iw_current = IXL_IW_PF_STATE_OFF;
107 		}
108 	}
109 	return;
110 }
111 
112 static void
113 ixl_iw_uninit(void)
114 {
115 	INIT_DEBUGOUT("begin");
116 
117 	mtx_destroy(&ixl_iw.mtx);
118 
119 	return;
120 }
121 
122 static void
123 ixl_iw_init(void)
124 {
125 	INIT_DEBUGOUT("begin");
126 
127 	LIST_INIT(&ixl_iw.pfs);
128 	mtx_init(&ixl_iw.mtx, "ixl_iw_pfs", NULL, MTX_DEF);
129 	ixl_iw.registered = false;
130 
131 	return;
132 }
133 
134 /******************************************************************************
135  * if_ixl internal API
136  *****************************************************************************/
137 
138 int
139 ixl_iw_pf_init(struct ixl_pf *pf)
140 {
141 	struct ixl_iw_pf_entry *pf_entry;
142 	struct ixl_iw_pf *pf_info;
143 	int err = 0;
144 
145 	INIT_DEBUGOUT("begin");
146 
147 	mtx_lock(&ixl_iw.mtx);
148 
149 	LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
150 		if (pf_entry->pf == pf)
151 			break;
152 	if (pf_entry == NULL) {
153 		/* attempt to initialize PF not yet attached - sth is wrong */
154 		device_printf(pf->dev, "%s: PF not found\n", __func__);
155 		err = ENOENT;
156 		goto out;
157 	}
158 
159 	pf_info = &pf_entry->pf_info;
160 
161 	pf_info->handle	= (void *)pf;
162 
163 	pf_info->ifp		= pf->vsi.ifp;
164 	pf_info->dev		= pf->dev;
165 	pf_info->pci_mem	= pf->pci_mem;
166 	pf_info->pf_id		= pf->hw.pf_id;
167 	pf_info->mtu		= if_getmtu(pf->vsi.ifp);
168 
169 	pf_info->iw_msix.count	= IXL_IW_VEC_COUNT(pf);
170 	pf_info->iw_msix.base	= IXL_IW_VEC_BASE(pf);
171 
172 	for (int i = 0; i < IXL_IW_MAX_USER_PRIORITY; i++)
173 		pf_info->qs_handle[i] = le16_to_cpu(pf->vsi.info.qs_handle[0]);
174 
175 	pf_entry->state.pf = IXL_IW_PF_STATE_ON;
176 	if (ixl_iw.registered) {
177 		pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_ON;
178 		taskqueue_enqueue(ixl_iw.tq, &pf_entry->iw_task);
179 	}
180 
181 out:
182 	mtx_unlock(&ixl_iw.mtx);
183 
184 	return (err);
185 }
186 
187 void
188 ixl_iw_pf_stop(struct ixl_pf *pf)
189 {
190 	struct ixl_iw_pf_entry *pf_entry;
191 
192 	INIT_DEBUGOUT("begin");
193 
194 	mtx_lock(&ixl_iw.mtx);
195 
196 	LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
197 		if (pf_entry->pf == pf)
198 			break;
199 	if (pf_entry == NULL) {
200 		/* attempt to stop PF which has not been attached - sth is wrong */
201 		device_printf(pf->dev, "%s: PF not found\n", __func__);
202 		goto out;
203 	}
204 
205 	pf_entry->state.pf = IXL_IW_PF_STATE_OFF;
206 	if (pf_entry->state.iw_scheduled == IXL_IW_PF_STATE_ON) {
207 		pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_OFF;
208 		if (ixl_iw.registered)
209 			taskqueue_enqueue(ixl_iw.tq, &pf_entry->iw_task);
210 	}
211 
212 out:
213 	mtx_unlock(&ixl_iw.mtx);
214 
215 	return;
216 }
217 
218 int
219 ixl_iw_pf_attach(struct ixl_pf *pf)
220 {
221 	struct ixl_iw_pf_entry *pf_entry;
222 	int err = 0;
223 
224 	INIT_DEBUGOUT("begin");
225 
226 	if (ixl_iw_ref_cnt == 0)
227 		ixl_iw_init();
228 
229 	mtx_lock(&ixl_iw.mtx);
230 
231 	LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
232 		if (pf_entry->pf == pf) {
233 			device_printf(pf->dev, "%s: PF already exists\n",
234 			    __func__);
235 			err = EEXIST;
236 			goto out;
237 		}
238 
239 	pf_entry = malloc(sizeof(struct ixl_iw_pf_entry),
240 			M_IXL, M_NOWAIT | M_ZERO);
241 	if (pf_entry == NULL) {
242 		device_printf(pf->dev,
243 		    "%s: failed to allocate memory to attach new PF\n",
244 		    __func__);
245 		err = ENOMEM;
246 		goto out;
247 	}
248 	pf_entry->pf = pf;
249 	pf_entry->state.pf		= IXL_IW_PF_STATE_OFF;
250 	pf_entry->state.iw_scheduled	= IXL_IW_PF_STATE_OFF;
251 	pf_entry->state.iw_current	= IXL_IW_PF_STATE_OFF;
252 
253 	LIST_INSERT_HEAD(&ixl_iw.pfs, pf_entry, node);
254 	ixl_iw_ref_cnt++;
255 
256 	TASK_INIT(&pf_entry->iw_task, 0, ixl_iw_invoke_op, pf_entry);
257 out:
258 	mtx_unlock(&ixl_iw.mtx);
259 
260 	return (err);
261 }
262 
263 int
264 ixl_iw_pf_detach(struct ixl_pf *pf)
265 {
266 	struct ixl_iw_pf_entry *pf_entry;
267 	int err = 0;
268 
269 	INIT_DEBUGOUT("begin");
270 
271 	mtx_lock(&ixl_iw.mtx);
272 
273 	LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
274 		if (pf_entry->pf == pf)
275 			break;
276 	if (pf_entry == NULL) {
277 		/* attempt to stop PF which has not been attached - sth is wrong */
278 		device_printf(pf->dev, "%s: PF not found\n", __func__);
279 		err = ENOENT;
280 		goto out;
281 	}
282 
283 	if (pf_entry->state.pf != IXL_IW_PF_STATE_OFF) {
284 		/* attempt to detach PF which has not yet been stopped - sth is wrong */
285 		device_printf(pf->dev, "%s: failed - PF is still active\n",
286 		    __func__);
287 		err = EBUSY;
288 		goto out;
289 	}
290 	LIST_REMOVE(pf_entry, node);
291 	free(pf_entry, M_IXL);
292 	ixl_iw_ref_cnt--;
293 
294 out:
295 	mtx_unlock(&ixl_iw.mtx);
296 
297 	if (ixl_iw_ref_cnt == 0)
298 		ixl_iw_uninit();
299 
300 	return (err);
301 }
302 
303 
304 /******************************************************************************
305  * API exposed to iw_ixl module
306  *****************************************************************************/
307 
308 int
309 ixl_iw_pf_reset(void *pf_handle)
310 {
311 	struct ixl_pf *pf = (struct ixl_pf *)pf_handle;
312 
313 	INIT_DEBUGOUT("begin");
314 
315 	IXL_PF_LOCK(pf);
316 	ixl_init_locked(pf);
317 	IXL_PF_UNLOCK(pf);
318 
319 	return (0);
320 }
321 
322 int
323 ixl_iw_pf_msix_init(void *pf_handle,
324 	struct ixl_iw_msix_mapping *msix_info)
325 {
326 	struct ixl_pf *pf = (struct ixl_pf *)pf_handle;
327 	struct i40e_hw *hw = &pf->hw;
328 	u32 reg;
329 	int vec, i;
330 
331 	INIT_DEBUGOUT("begin");
332 
333 	if ((msix_info->aeq_vector < IXL_IW_VEC_BASE(pf)) ||
334 	    (msix_info->aeq_vector >= IXL_IW_VEC_LIMIT(pf))) {
335 		printf("%s: invalid MSI-X vector (%i) for AEQ\n",
336 		    __func__, msix_info->aeq_vector);
337 		return (EINVAL);
338 	}
339 	reg = I40E_PFINT_AEQCTL_CAUSE_ENA_MASK |
340 		(msix_info->aeq_vector << I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT) |
341 		(msix_info->itr_indx << I40E_PFINT_AEQCTL_ITR_INDX_SHIFT);
342 	wr32(hw, I40E_PFINT_AEQCTL, reg);
343 
344 	for (vec = IXL_IW_VEC_BASE(pf); vec < IXL_IW_VEC_LIMIT(pf); vec++) {
345 		for (i = 0; i < msix_info->ceq_cnt; i++)
346 			if (msix_info->ceq_vector[i] == vec)
347 				break;
348 		if (i == msix_info->ceq_cnt) {
349 			/* this vector has no CEQ mapped */
350 			reg = I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK;
351 			wr32(hw, I40E_PFINT_LNKLSTN(vec - 1), reg);
352 		} else {
353 			reg = (i & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
354 			    (I40E_QUEUE_TYPE_PE_CEQ <<
355 			    I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
356 			wr32(hw, I40E_PFINT_LNKLSTN(vec - 1), reg);
357 
358 			reg = I40E_PFINT_CEQCTL_CAUSE_ENA_MASK |
359 			    (vec << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT) |
360 			    (msix_info->itr_indx <<
361 			    I40E_PFINT_CEQCTL_ITR_INDX_SHIFT) |
362 			    (IXL_QUEUE_EOL <<
363 			    I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT);
364 			wr32(hw, I40E_PFINT_CEQCTL(i), reg);
365 		}
366 	}
367 
368 	return (0);
369 }
370 
371 int
372 ixl_iw_register(struct ixl_iw_ops *ops)
373 {
374 	struct ixl_iw_pf_entry *pf_entry;
375 	int err = 0;
376 	int iwarp_cap_on_pfs = 0;
377 
378 	INIT_DEBUGOUT("begin");
379 	LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
380 		iwarp_cap_on_pfs += pf_entry->pf->hw.func_caps.iwarp;
381 	if (!iwarp_cap_on_pfs && ixl_enable_iwarp) {
382 		printf("%s: the device is not iwarp-capable, registering dropped\n",
383 		    __func__);
384 		return (ENODEV);
385 	}
386 	if (ixl_enable_iwarp == 0) {
387 		printf("%s: enable_iwarp is off, registering dropped\n",
388 		    __func__);
389 		return (EACCES);
390 	}
391 
392 	if ((ops->init == NULL) || (ops->stop == NULL)) {
393 		printf("%s: invalid iwarp driver ops\n", __func__);
394 		return (EINVAL);
395 	}
396 
397 	mtx_lock(&ixl_iw.mtx);
398 	if (ixl_iw.registered) {
399 		printf("%s: iwarp driver already registered\n", __func__);
400 		err = (EBUSY);
401 		goto out;
402 	}
403 	ixl_iw.registered = true;
404 	mtx_unlock(&ixl_iw.mtx);
405 
406 	ixl_iw.tq = taskqueue_create("ixl_iw", M_NOWAIT,
407 		taskqueue_thread_enqueue, &ixl_iw.tq);
408 	if (ixl_iw.tq == NULL) {
409 		printf("%s: failed to create queue\n", __func__);
410 		ixl_iw.registered = false;
411 		return (ENOMEM);
412 	}
413 	taskqueue_start_threads(&ixl_iw.tq, 1, PI_NET, "ixl iw");
414 
415 	ixl_iw.ops = malloc(sizeof(struct ixl_iw_ops),
416 			M_IXL, M_NOWAIT | M_ZERO);
417 	if (ixl_iw.ops == NULL) {
418 		printf("%s: failed to allocate memory\n", __func__);
419 		taskqueue_free(ixl_iw.tq);
420 		ixl_iw.registered = false;
421 		return (ENOMEM);
422 	}
423 
424 	ixl_iw.ops->init = ops->init;
425 	ixl_iw.ops->stop = ops->stop;
426 
427 	mtx_lock(&ixl_iw.mtx);
428 	LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
429 		if (pf_entry->state.pf == IXL_IW_PF_STATE_ON) {
430 			pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_ON;
431 			taskqueue_enqueue(ixl_iw.tq, &pf_entry->iw_task);
432 		}
433 out:
434 	mtx_unlock(&ixl_iw.mtx);
435 
436 	return (err);
437 }
438 
439 int
440 ixl_iw_unregister(void)
441 {
442 	struct ixl_iw_pf_entry *pf_entry;
443 	int iwarp_cap_on_pfs = 0;
444 
445 	INIT_DEBUGOUT("begin");
446 
447 	LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
448 		iwarp_cap_on_pfs += pf_entry->pf->hw.func_caps.iwarp;
449 	if (!iwarp_cap_on_pfs && ixl_enable_iwarp) {
450 		printf("%s: attempt to unregister driver when no iwarp-capable device present\n",
451 		    __func__);
452 		return (ENODEV);
453 	}
454 
455 	if (ixl_enable_iwarp == 0) {
456 		printf("%s: attempt to unregister driver when enable_iwarp is off\n",
457 		    __func__);
458 		return (ENODEV);
459 	}
460 	mtx_lock(&ixl_iw.mtx);
461 
462 	if (!ixl_iw.registered) {
463 		printf("%s: failed - iwarp driver has not been registered\n",
464 		    __func__);
465 		mtx_unlock(&ixl_iw.mtx);
466 		return (ENOENT);
467 	}
468 
469 	LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
470 		if (pf_entry->state.iw_scheduled == IXL_IW_PF_STATE_ON) {
471 			pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_OFF;
472 			taskqueue_enqueue(ixl_iw.tq, &pf_entry->iw_task);
473 		}
474 
475 	ixl_iw.registered = false;
476 
477 	mtx_unlock(&ixl_iw.mtx);
478 
479 	LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
480 		taskqueue_drain(ixl_iw.tq, &pf_entry->iw_task);
481 	taskqueue_free(ixl_iw.tq);
482 	ixl_iw.tq = NULL;
483 	free(ixl_iw.ops, M_IXL);
484 	ixl_iw.ops = NULL;
485 
486 	return (0);
487 }
488 
489 #endif /* IXL_IW */
490