xref: /freebsd/sys/compat/linuxkpi/common/src/linux_netdev.c (revision 105331f658e2f14fb4daa64dcd076c7369505295)
1d1058958SBjoern A. Zeeb /*-
2d1058958SBjoern A. Zeeb  * Copyright (c) 2021 The FreeBSD Foundation
3ac07a3b8SBjoern A. Zeeb  * Copyright (c) 2022 Bjoern A. Zeeb
4d1058958SBjoern A. Zeeb  *
5d1058958SBjoern A. Zeeb  * This software was developed by Björn Zeeb under sponsorship from
6d1058958SBjoern A. Zeeb  * the FreeBSD Foundation.
7d1058958SBjoern A. Zeeb  *
8d1058958SBjoern A. Zeeb  * Redistribution and use in source and binary forms, with or without
9d1058958SBjoern A. Zeeb  * modification, are permitted provided that the following conditions
10d1058958SBjoern A. Zeeb  * are met:
11d1058958SBjoern A. Zeeb  * 1. Redistributions of source code must retain the above copyright
12d1058958SBjoern A. Zeeb  *    notice, this list of conditions and the following disclaimer.
13d1058958SBjoern A. Zeeb  * 2. Redistributions in binary form must reproduce the above copyright
14d1058958SBjoern A. Zeeb  *    notice, this list of conditions and the following disclaimer in the
15d1058958SBjoern A. Zeeb  *    documentation and/or other materials provided with the distribution.
16d1058958SBjoern A. Zeeb  *
17d1058958SBjoern A. Zeeb  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18d1058958SBjoern A. Zeeb  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19d1058958SBjoern A. Zeeb  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20d1058958SBjoern A. Zeeb  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21d1058958SBjoern A. Zeeb  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22d1058958SBjoern A. Zeeb  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23d1058958SBjoern A. Zeeb  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24d1058958SBjoern A. Zeeb  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25d1058958SBjoern A. Zeeb  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26d1058958SBjoern A. Zeeb  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27d1058958SBjoern A. Zeeb  * SUCH DAMAGE.
28d1058958SBjoern A. Zeeb  */
29d1058958SBjoern A. Zeeb 
30d1058958SBjoern A. Zeeb #include <sys/param.h>
31d1058958SBjoern A. Zeeb #include <sys/types.h>
32d1058958SBjoern A. Zeeb #include <sys/kernel.h>
33d1058958SBjoern A. Zeeb #include <sys/sysctl.h>
34d1058958SBjoern A. Zeeb 
35d1058958SBjoern A. Zeeb #include <linux/bitops.h>
36d1058958SBjoern A. Zeeb #include <linux/list.h>
37d1058958SBjoern A. Zeeb #include <linux/netdevice.h>
38d1058958SBjoern A. Zeeb 
39d1058958SBjoern A. Zeeb MALLOC_DEFINE(M_NETDEV, "lkpindev", "Linux KPI netdevice compat");
40d1058958SBjoern A. Zeeb 
41d1058958SBjoern A. Zeeb #define	NAPI_LOCK_INIT(_ndev)		\
42d1058958SBjoern A. Zeeb     mtx_init(&(_ndev)->napi_mtx, "napi_mtx", NULL, MTX_DEF)
43d1058958SBjoern A. Zeeb #define	NAPI_LOCK_DESTROY(_ndev)	mtx_destroy(&(_ndev)->napi_mtx)
44d1058958SBjoern A. Zeeb #define	NAPI_LOCK_ASSERT(_ndev)		mtx_assert(&(_ndev)->napi_mtx, MA_OWNED)
45d1058958SBjoern A. Zeeb #define	NAPI_LOCK(_ndev)		mtx_lock(&(_ndev)->napi_mtx)
46d1058958SBjoern A. Zeeb #define	NAPI_UNLOCK(_ndev)		mtx_unlock(&(_ndev)->napi_mtx)
47d1058958SBjoern A. Zeeb 
48d1058958SBjoern A. Zeeb /* -------------------------------------------------------------------------- */
49d1058958SBjoern A. Zeeb 
50d1058958SBjoern A. Zeeb #define LKPI_NAPI_FLAGS \
51d1058958SBjoern A. Zeeb         "\20\1DISABLE_PENDING\2IS_SCHEDULED\3LOST_RACE_TRY_AGAIN"
52d1058958SBjoern A. Zeeb 
53d1058958SBjoern A. Zeeb /* #define	NAPI_DEBUG */
54d1058958SBjoern A. Zeeb #ifdef NAPI_DEBUG
55d1058958SBjoern A. Zeeb static int debug_napi;
56d1058958SBjoern A. Zeeb SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug_napi, CTLFLAG_RWTUN,
57d1058958SBjoern A. Zeeb     &debug_napi, 0, "NAPI debug level");
58d1058958SBjoern A. Zeeb 
59d1058958SBjoern A. Zeeb #define	DNAPI_TODO		0x01
60d1058958SBjoern A. Zeeb #define	DNAPI_IMPROVE		0x02
61d1058958SBjoern A. Zeeb #define	DNAPI_TRACE		0x10
62d1058958SBjoern A. Zeeb #define	DNAPI_TRACE_TASK	0x20
63d1058958SBjoern A. Zeeb #define	DNAPI_DIRECT_DISPATCH	0x1000
64d1058958SBjoern A. Zeeb 
65d1058958SBjoern A. Zeeb #define	NAPI_TRACE(_n)		if (debug_napi & DNAPI_TRACE)		\
66d1058958SBjoern A. Zeeb     printf("NAPI_TRACE %s:%d %u %p (%#jx %b)\n", __func__, __LINE__,	\
67ac07a3b8SBjoern A. Zeeb 	(unsigned int)ticks, _n, (uintmax_t)(_n)->state,		\
68ac07a3b8SBjoern A. Zeeb 	(int)(_n)->state, LKPI_NAPI_FLAGS)
69d1058958SBjoern A. Zeeb #define	NAPI_TRACE2D(_n, _d)	if (debug_napi & DNAPI_TRACE)		\
70d1058958SBjoern A. Zeeb     printf("NAPI_TRACE %s:%d %u %p (%#jx %b) %d\n", __func__, __LINE__, \
71ac07a3b8SBjoern A. Zeeb 	(unsigned int)ticks, _n, (uintmax_t)(_n)->state,		\
72ac07a3b8SBjoern A. Zeeb 	(int)(_n)->state, LKPI_NAPI_FLAGS, _d)
73d1058958SBjoern A. Zeeb #define	NAPI_TRACE_TASK(_n, _p, _c) if (debug_napi & DNAPI_TRACE_TASK)	\
74d1058958SBjoern A. Zeeb     printf("NAPI_TRACE %s:%d %u %p (%#jx %b) pending %d count %d "	\
75d1058958SBjoern A. Zeeb 	"rx_count %d\n", __func__, __LINE__,				\
76ac07a3b8SBjoern A. Zeeb 	(unsigned int)ticks, _n, (uintmax_t)(_n)->state,		\
77ac07a3b8SBjoern A. Zeeb 	(int)(_n)->state, LKPI_NAPI_FLAGS, _p, _c, (_n)->rx_count)
78d1058958SBjoern A. Zeeb #define	NAPI_TODO()		if (debug_napi & DNAPI_TODO)		\
79d1058958SBjoern A. Zeeb     printf("NAPI_TODO %s:%d %d\n", __func__, __LINE__, ticks)
80d1058958SBjoern A. Zeeb #define	NAPI_IMPROVE()		if (debug_napi & DNAPI_IMPROVE)		\
81d1058958SBjoern A. Zeeb     printf("NAPI_IMPROVE %s:%d %d\n", __func__, __LINE__, ticks)
82d1058958SBjoern A. Zeeb 
83d1058958SBjoern A. Zeeb #define	NAPI_DIRECT_DISPATCH()	((debug_napi & DNAPI_DIRECT_DISPATCH) != 0)
84d1058958SBjoern A. Zeeb #else
85d1058958SBjoern A. Zeeb #define	NAPI_TRACE(_n)			do { } while(0)
86d1058958SBjoern A. Zeeb #define	NAPI_TRACE2D(_n, _d)		do { } while(0)
87d1058958SBjoern A. Zeeb #define	NAPI_TRACE_TASK(_n, _p, _c)	do { } while(0)
88d1058958SBjoern A. Zeeb #define	NAPI_TODO()			do { } while(0)
89d1058958SBjoern A. Zeeb #define	NAPI_IMPROVE()			do { } while(0)
90d1058958SBjoern A. Zeeb 
91d1058958SBjoern A. Zeeb #define	NAPI_DIRECT_DISPATCH()		(0)
92d1058958SBjoern A. Zeeb #endif
93d1058958SBjoern A. Zeeb 
94d1058958SBjoern A. Zeeb /* -------------------------------------------------------------------------- */
95d1058958SBjoern A. Zeeb 
96d1058958SBjoern A. Zeeb /*
97d1058958SBjoern A. Zeeb  * Check if a poll is running or can run and and if the latter
98d1058958SBjoern A. Zeeb  * make us as running.  That way we ensure that only one poll
99d1058958SBjoern A. Zeeb  * can only ever run at the same time.  Returns true if no poll
100d1058958SBjoern A. Zeeb  * was scheduled yet.
101d1058958SBjoern A. Zeeb  */
102d1058958SBjoern A. Zeeb bool
linuxkpi_napi_schedule_prep(struct napi_struct * napi)103d1058958SBjoern A. Zeeb linuxkpi_napi_schedule_prep(struct napi_struct *napi)
104d1058958SBjoern A. Zeeb {
105d1058958SBjoern A. Zeeb 	unsigned long old, new;
106d1058958SBjoern A. Zeeb 
107d1058958SBjoern A. Zeeb 	NAPI_TRACE(napi);
108d1058958SBjoern A. Zeeb 
109d1058958SBjoern A. Zeeb 	/* Can can only update/return if all flags agree. */
110d1058958SBjoern A. Zeeb 	do {
111ac07a3b8SBjoern A. Zeeb 		old = READ_ONCE(napi->state);
112d1058958SBjoern A. Zeeb 
113d1058958SBjoern A. Zeeb 		/* If we are stopping, cannot run again. */
114d1058958SBjoern A. Zeeb 		if ((old & BIT(LKPI_NAPI_FLAG_DISABLE_PENDING)) != 0) {
115d1058958SBjoern A. Zeeb 			NAPI_TRACE(napi);
116d1058958SBjoern A. Zeeb 			return (false);
117d1058958SBjoern A. Zeeb 		}
118d1058958SBjoern A. Zeeb 
119d1058958SBjoern A. Zeeb 		new = old;
120d1058958SBjoern A. Zeeb 		/* We were already scheduled. Need to try again? */
121d1058958SBjoern A. Zeeb 		if ((old & BIT(LKPI_NAPI_FLAG_IS_SCHEDULED)) != 0)
122d1058958SBjoern A. Zeeb 			new |= BIT(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN);
123d1058958SBjoern A. Zeeb 		new |= BIT(LKPI_NAPI_FLAG_IS_SCHEDULED);
124d1058958SBjoern A. Zeeb 
125ac07a3b8SBjoern A. Zeeb 	} while (atomic_cmpset_acq_long(&napi->state, old, new) == 0);
126d1058958SBjoern A. Zeeb 
127d1058958SBjoern A. Zeeb 	NAPI_TRACE(napi);
128d1058958SBjoern A. Zeeb         return ((old & BIT(LKPI_NAPI_FLAG_IS_SCHEDULED)) == 0);
129d1058958SBjoern A. Zeeb }
130d1058958SBjoern A. Zeeb 
131d1058958SBjoern A. Zeeb static void
lkpi___napi_schedule_dd(struct napi_struct * napi)132d1058958SBjoern A. Zeeb lkpi___napi_schedule_dd(struct napi_struct *napi)
133d1058958SBjoern A. Zeeb {
134d1058958SBjoern A. Zeeb 	unsigned long old, new;
135d1058958SBjoern A. Zeeb 	int rc;
136d1058958SBjoern A. Zeeb 
137d1058958SBjoern A. Zeeb 	rc = 0;
138d1058958SBjoern A. Zeeb again:
139d1058958SBjoern A. Zeeb 	NAPI_TRACE2D(napi, rc);
140d1058958SBjoern A. Zeeb 	if (napi->poll != NULL)
141d1058958SBjoern A. Zeeb 		rc = napi->poll(napi, napi->budget);
142d1058958SBjoern A. Zeeb 	napi->rx_count += rc;
143d1058958SBjoern A. Zeeb 
144d1058958SBjoern A. Zeeb 	/* Check if interrupts are still disabled, more work to do. */
145d1058958SBjoern A. Zeeb 	/* Bandaid for now. */
146d1058958SBjoern A. Zeeb 	if (rc >= napi->budget)
147d1058958SBjoern A. Zeeb 		goto again;
148d1058958SBjoern A. Zeeb 
149d1058958SBjoern A. Zeeb 	/* Bandaid for now. */
150ac07a3b8SBjoern A. Zeeb 	if (test_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &napi->state))
151d1058958SBjoern A. Zeeb 		goto again;
152d1058958SBjoern A. Zeeb 
153d1058958SBjoern A. Zeeb 	do {
154ac07a3b8SBjoern A. Zeeb 		new = old = READ_ONCE(napi->state);
155d1058958SBjoern A. Zeeb 		clear_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &new);
156d1058958SBjoern A. Zeeb 		clear_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &new);
157ac07a3b8SBjoern A. Zeeb 	} while (atomic_cmpset_acq_long(&napi->state, old, new) == 0);
158d1058958SBjoern A. Zeeb 
159d1058958SBjoern A. Zeeb 	NAPI_TRACE2D(napi, rc);
160d1058958SBjoern A. Zeeb }
161d1058958SBjoern A. Zeeb 
162d1058958SBjoern A. Zeeb void
linuxkpi___napi_schedule(struct napi_struct * napi)163d1058958SBjoern A. Zeeb linuxkpi___napi_schedule(struct napi_struct *napi)
164d1058958SBjoern A. Zeeb {
165d1058958SBjoern A. Zeeb 	int rc;
166d1058958SBjoern A. Zeeb 
167d1058958SBjoern A. Zeeb 	NAPI_TRACE(napi);
168ac07a3b8SBjoern A. Zeeb 	if (test_bit(LKPI_NAPI_FLAG_SHUTDOWN, &napi->state)) {
169ac07a3b8SBjoern A. Zeeb 		clear_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &napi->state);
170ac07a3b8SBjoern A. Zeeb 		clear_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state);
171d1058958SBjoern A. Zeeb 		NAPI_TRACE(napi);
172d1058958SBjoern A. Zeeb 		return;
173d1058958SBjoern A. Zeeb 	}
174d1058958SBjoern A. Zeeb 
175d1058958SBjoern A. Zeeb 	if (NAPI_DIRECT_DISPATCH()) {
176d1058958SBjoern A. Zeeb 		lkpi___napi_schedule_dd(napi);
177d1058958SBjoern A. Zeeb 	} else {
178d1058958SBjoern A. Zeeb 		rc = taskqueue_enqueue(napi->dev->napi_tq, &napi->napi_task);
179d1058958SBjoern A. Zeeb 		NAPI_TRACE2D(napi, rc);
180d1058958SBjoern A. Zeeb 		if (rc != 0) {
181d1058958SBjoern A. Zeeb 			/* Should we assert EPIPE? */
182d1058958SBjoern A. Zeeb 			return;
183d1058958SBjoern A. Zeeb 		}
184d1058958SBjoern A. Zeeb 	}
185d1058958SBjoern A. Zeeb }
186d1058958SBjoern A. Zeeb 
18721761f2eSBjoern A. Zeeb bool
linuxkpi_napi_schedule(struct napi_struct * napi)188d1058958SBjoern A. Zeeb linuxkpi_napi_schedule(struct napi_struct *napi)
189d1058958SBjoern A. Zeeb {
190d1058958SBjoern A. Zeeb 
191d1058958SBjoern A. Zeeb 	NAPI_TRACE(napi);
192d1058958SBjoern A. Zeeb 
193d1058958SBjoern A. Zeeb 	/*
194d1058958SBjoern A. Zeeb 	 * iwlwifi calls this sequence instead of napi_schedule()
195d1058958SBjoern A. Zeeb 	 * to be able to test the prep result.
196d1058958SBjoern A. Zeeb 	 */
19721761f2eSBjoern A. Zeeb 	if (napi_schedule_prep(napi)) {
198d1058958SBjoern A. Zeeb 		__napi_schedule(napi);
19921761f2eSBjoern A. Zeeb 		return (true);
20021761f2eSBjoern A. Zeeb 	}
20121761f2eSBjoern A. Zeeb 
20221761f2eSBjoern A. Zeeb 	return (false);
203d1058958SBjoern A. Zeeb }
204d1058958SBjoern A. Zeeb 
205d1058958SBjoern A. Zeeb void
linuxkpi_napi_reschedule(struct napi_struct * napi)206d1058958SBjoern A. Zeeb linuxkpi_napi_reschedule(struct napi_struct *napi)
207d1058958SBjoern A. Zeeb {
208d1058958SBjoern A. Zeeb 
209d1058958SBjoern A. Zeeb 	NAPI_TRACE(napi);
210d1058958SBjoern A. Zeeb 
211d1058958SBjoern A. Zeeb 	/* Not sure what is different to napi_schedule yet. */
212d1058958SBjoern A. Zeeb 	if (napi_schedule_prep(napi))
213d1058958SBjoern A. Zeeb 		__napi_schedule(napi);
214d1058958SBjoern A. Zeeb }
215d1058958SBjoern A. Zeeb 
216d1058958SBjoern A. Zeeb bool
linuxkpi_napi_complete_done(struct napi_struct * napi,int ret)217d1058958SBjoern A. Zeeb linuxkpi_napi_complete_done(struct napi_struct *napi, int ret)
218d1058958SBjoern A. Zeeb {
219d1058958SBjoern A. Zeeb 	unsigned long old, new;
220d1058958SBjoern A. Zeeb 
221d1058958SBjoern A. Zeeb 	NAPI_TRACE(napi);
222d1058958SBjoern A. Zeeb 	if (NAPI_DIRECT_DISPATCH())
223d1058958SBjoern A. Zeeb 		return (true);
224d1058958SBjoern A. Zeeb 
225d1058958SBjoern A. Zeeb 	do {
226ac07a3b8SBjoern A. Zeeb 		new = old = READ_ONCE(napi->state);
227d1058958SBjoern A. Zeeb 
228d1058958SBjoern A. Zeeb 		/*
229d1058958SBjoern A. Zeeb 		 * If we lost a race before, we need to re-schedule.
230d1058958SBjoern A. Zeeb 		 * Leave IS_SCHEDULED set essentially doing "_prep".
231d1058958SBjoern A. Zeeb 		 */
232d1058958SBjoern A. Zeeb 		if (!test_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &old))
233d1058958SBjoern A. Zeeb 			clear_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &new);
234d1058958SBjoern A. Zeeb 		clear_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &new);
235ac07a3b8SBjoern A. Zeeb 	} while (atomic_cmpset_acq_long(&napi->state, old, new) == 0);
236d1058958SBjoern A. Zeeb 
237d1058958SBjoern A. Zeeb 	NAPI_TRACE(napi);
238d1058958SBjoern A. Zeeb 
239d1058958SBjoern A. Zeeb 	/* Someone tried to schedule while poll was running. Re-sched. */
240d1058958SBjoern A. Zeeb 	if (test_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &old)) {
241d1058958SBjoern A. Zeeb 		__napi_schedule(napi);
242d1058958SBjoern A. Zeeb 		return (false);
243d1058958SBjoern A. Zeeb 	}
244d1058958SBjoern A. Zeeb 
245d1058958SBjoern A. Zeeb 	return (true);
246d1058958SBjoern A. Zeeb }
247d1058958SBjoern A. Zeeb 
248d1058958SBjoern A. Zeeb bool
linuxkpi_napi_complete(struct napi_struct * napi)249d1058958SBjoern A. Zeeb linuxkpi_napi_complete(struct napi_struct *napi)
250d1058958SBjoern A. Zeeb {
251d1058958SBjoern A. Zeeb 
252d1058958SBjoern A. Zeeb 	NAPI_TRACE(napi);
253d1058958SBjoern A. Zeeb 	return (napi_complete_done(napi, 0));
254d1058958SBjoern A. Zeeb }
255d1058958SBjoern A. Zeeb 
256d1058958SBjoern A. Zeeb void
linuxkpi_napi_disable(struct napi_struct * napi)257d1058958SBjoern A. Zeeb linuxkpi_napi_disable(struct napi_struct *napi)
258d1058958SBjoern A. Zeeb {
259d1058958SBjoern A. Zeeb 	NAPI_TRACE(napi);
260ac07a3b8SBjoern A. Zeeb 	set_bit(LKPI_NAPI_FLAG_DISABLE_PENDING, &napi->state);
261ac07a3b8SBjoern A. Zeeb 	while (test_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state))
262d1058958SBjoern A. Zeeb 		pause_sbt("napidslp", SBT_1MS, 0, C_HARDCLOCK);
263ac07a3b8SBjoern A. Zeeb 	clear_bit(LKPI_NAPI_FLAG_DISABLE_PENDING, &napi->state);
264d1058958SBjoern A. Zeeb }
265d1058958SBjoern A. Zeeb 
266d1058958SBjoern A. Zeeb void
linuxkpi_napi_enable(struct napi_struct * napi)267d1058958SBjoern A. Zeeb linuxkpi_napi_enable(struct napi_struct *napi)
268d1058958SBjoern A. Zeeb {
269d1058958SBjoern A. Zeeb 
270d1058958SBjoern A. Zeeb 	NAPI_TRACE(napi);
271ac07a3b8SBjoern A. Zeeb 	KASSERT(!test_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state),
272d1058958SBjoern A. Zeeb 	    ("%s: enabling napi %p already scheduled\n", __func__, napi));
273d1058958SBjoern A. Zeeb 	mb();
274d1058958SBjoern A. Zeeb 	/* Let us be scheduled. */
275ac07a3b8SBjoern A. Zeeb 	clear_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state);
276d1058958SBjoern A. Zeeb }
277d1058958SBjoern A. Zeeb 
278d1058958SBjoern A. Zeeb void
linuxkpi_napi_synchronize(struct napi_struct * napi)279d1058958SBjoern A. Zeeb linuxkpi_napi_synchronize(struct napi_struct *napi)
280d1058958SBjoern A. Zeeb {
281d1058958SBjoern A. Zeeb 	NAPI_TRACE(napi);
282d1058958SBjoern A. Zeeb #if defined(SMP)
283d1058958SBjoern A. Zeeb 	/* Check & sleep while a napi is scheduled. */
284ac07a3b8SBjoern A. Zeeb 	while (test_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state))
285d1058958SBjoern A. Zeeb 		pause_sbt("napisslp", SBT_1MS, 0, C_HARDCLOCK);
286d1058958SBjoern A. Zeeb #else
287d1058958SBjoern A. Zeeb 	mb();
288d1058958SBjoern A. Zeeb #endif
289d1058958SBjoern A. Zeeb }
290d1058958SBjoern A. Zeeb 
291d1058958SBjoern A. Zeeb /* -------------------------------------------------------------------------- */
292d1058958SBjoern A. Zeeb 
293d1058958SBjoern A. Zeeb static void
lkpi_napi_task(void * ctx,int pending)294d1058958SBjoern A. Zeeb lkpi_napi_task(void *ctx, int pending)
295d1058958SBjoern A. Zeeb {
296d1058958SBjoern A. Zeeb 	struct napi_struct *napi;
297d1058958SBjoern A. Zeeb 	int count;
298d1058958SBjoern A. Zeeb 
299d1058958SBjoern A. Zeeb 	KASSERT(ctx != NULL, ("%s: napi %p, pending %d\n",
300d1058958SBjoern A. Zeeb 	    __func__, ctx, pending));
301d1058958SBjoern A. Zeeb 	napi = ctx;
302d1058958SBjoern A. Zeeb 	KASSERT(napi->poll != NULL, ("%s: napi %p poll is NULL\n",
303d1058958SBjoern A. Zeeb 	    __func__, napi));
304d1058958SBjoern A. Zeeb 
305d1058958SBjoern A. Zeeb 	NAPI_TRACE_TASK(napi, pending, napi->budget);
306d1058958SBjoern A. Zeeb 	count = napi->poll(napi, napi->budget);
307d1058958SBjoern A. Zeeb 	napi->rx_count += count;
308d1058958SBjoern A. Zeeb 	NAPI_TRACE_TASK(napi, pending, count);
309d1058958SBjoern A. Zeeb 
310d1058958SBjoern A. Zeeb 	/*
311d1058958SBjoern A. Zeeb 	 * We must not check against count < pending here.  There are situations
312d1058958SBjoern A. Zeeb 	 * when a driver may "poll" and we may not have any work to do and that
313d1058958SBjoern A. Zeeb 	 * would make us re-schedule ourseless for ever.
314d1058958SBjoern A. Zeeb 	 */
315d1058958SBjoern A. Zeeb 	if (count >= napi->budget) {
316d1058958SBjoern A. Zeeb 		/*
317d1058958SBjoern A. Zeeb 		 * Have to re-schedule ourselves.  napi_complete() was not run
318d1058958SBjoern A. Zeeb 		 * in this case which means we are still SCHEDULED.
319d1058958SBjoern A. Zeeb 		 * In order to queue another task we have to directly call
320d1058958SBjoern A. Zeeb 		 * __napi_schedule() without _prep() in the way.
321d1058958SBjoern A. Zeeb 		 */
322d1058958SBjoern A. Zeeb 		__napi_schedule(napi);
323d1058958SBjoern A. Zeeb 	}
324d1058958SBjoern A. Zeeb }
325d1058958SBjoern A. Zeeb 
326d1058958SBjoern A. Zeeb /* -------------------------------------------------------------------------- */
327d1058958SBjoern A. Zeeb 
328d1058958SBjoern A. Zeeb void
linuxkpi_netif_napi_add(struct net_device * ndev,struct napi_struct * napi,int (* napi_poll)(struct napi_struct *,int))329d1058958SBjoern A. Zeeb linuxkpi_netif_napi_add(struct net_device *ndev, struct napi_struct *napi,
33023c73dbaSBjoern A. Zeeb     int(*napi_poll)(struct napi_struct *, int))
331d1058958SBjoern A. Zeeb {
332d1058958SBjoern A. Zeeb 
333d1058958SBjoern A. Zeeb 	napi->dev = ndev;
334d1058958SBjoern A. Zeeb 	napi->poll = napi_poll;
33523c73dbaSBjoern A. Zeeb 	napi->budget = NAPI_POLL_WEIGHT;
336d1058958SBjoern A. Zeeb 
337d1058958SBjoern A. Zeeb 	INIT_LIST_HEAD(&napi->rx_list);
338d1058958SBjoern A. Zeeb 	napi->rx_count = 0;
339d1058958SBjoern A. Zeeb 
340d1058958SBjoern A. Zeeb 	TASK_INIT(&napi->napi_task, 0, lkpi_napi_task, napi);
341d1058958SBjoern A. Zeeb 
342d1058958SBjoern A. Zeeb 	NAPI_LOCK(ndev);
343d1058958SBjoern A. Zeeb 	TAILQ_INSERT_TAIL(&ndev->napi_head, napi, entry);
344d1058958SBjoern A. Zeeb 	NAPI_UNLOCK(ndev);
345d1058958SBjoern A. Zeeb 
346d1058958SBjoern A. Zeeb 	/* Anything else to do on the ndev? */
347ac07a3b8SBjoern A. Zeeb 	clear_bit(LKPI_NAPI_FLAG_SHUTDOWN, &napi->state);
348d1058958SBjoern A. Zeeb }
349d1058958SBjoern A. Zeeb 
350d1058958SBjoern A. Zeeb static void
lkpi_netif_napi_del_locked(struct napi_struct * napi)351d1058958SBjoern A. Zeeb lkpi_netif_napi_del_locked(struct napi_struct *napi)
352d1058958SBjoern A. Zeeb {
353d1058958SBjoern A. Zeeb 	struct net_device *ndev;
354d1058958SBjoern A. Zeeb 
355d1058958SBjoern A. Zeeb 	ndev = napi->dev;
356d1058958SBjoern A. Zeeb 	NAPI_LOCK_ASSERT(ndev);
357d1058958SBjoern A. Zeeb 
358ac07a3b8SBjoern A. Zeeb 	set_bit(LKPI_NAPI_FLAG_SHUTDOWN, &napi->state);
359d1058958SBjoern A. Zeeb 	TAILQ_REMOVE(&ndev->napi_head, napi, entry);
360d1058958SBjoern A. Zeeb 	while (taskqueue_cancel(ndev->napi_tq, &napi->napi_task, NULL) != 0)
361d1058958SBjoern A. Zeeb 		taskqueue_drain(ndev->napi_tq, &napi->napi_task);
362d1058958SBjoern A. Zeeb }
363d1058958SBjoern A. Zeeb 
364d1058958SBjoern A. Zeeb void
linuxkpi_netif_napi_del(struct napi_struct * napi)365d1058958SBjoern A. Zeeb linuxkpi_netif_napi_del(struct napi_struct *napi)
366d1058958SBjoern A. Zeeb {
367d1058958SBjoern A. Zeeb 	struct net_device *ndev;
368d1058958SBjoern A. Zeeb 
369d1058958SBjoern A. Zeeb 	ndev = napi->dev;
370d1058958SBjoern A. Zeeb 	NAPI_LOCK(ndev);
371d1058958SBjoern A. Zeeb 	lkpi_netif_napi_del_locked(napi);
372d1058958SBjoern A. Zeeb 	NAPI_UNLOCK(ndev);
373d1058958SBjoern A. Zeeb }
374d1058958SBjoern A. Zeeb 
375d1058958SBjoern A. Zeeb /* -------------------------------------------------------------------------- */
376d1058958SBjoern A. Zeeb 
377d1058958SBjoern A. Zeeb void
linuxkpi_init_dummy_netdev(struct net_device * ndev)378d1058958SBjoern A. Zeeb linuxkpi_init_dummy_netdev(struct net_device *ndev)
379d1058958SBjoern A. Zeeb {
380d1058958SBjoern A. Zeeb 
381d1058958SBjoern A. Zeeb 	memset(ndev, 0, sizeof(*ndev));
382d1058958SBjoern A. Zeeb 
383d1058958SBjoern A. Zeeb 	ndev->reg_state = NETREG_DUMMY;
384d1058958SBjoern A. Zeeb 	NAPI_LOCK_INIT(ndev);
385d1058958SBjoern A. Zeeb 	TAILQ_INIT(&ndev->napi_head);
386d1058958SBjoern A. Zeeb 	/* Anything else? */
387d1058958SBjoern A. Zeeb 
388d1058958SBjoern A. Zeeb 	ndev->napi_tq = taskqueue_create("tq_ndev_napi", M_WAITOK,
389d1058958SBjoern A. Zeeb 	    taskqueue_thread_enqueue, &ndev->napi_tq);
390d1058958SBjoern A. Zeeb 	/* One thread for now. */
391d1058958SBjoern A. Zeeb 	(void) taskqueue_start_threads(&ndev->napi_tq, 1, PWAIT,
392d1058958SBjoern A. Zeeb 	    "ndev napi taskq");
393d1058958SBjoern A. Zeeb }
394d1058958SBjoern A. Zeeb 
395d1058958SBjoern A. Zeeb struct net_device *
linuxkpi_alloc_netdev(size_t len,const char * name,uint32_t flags,void (* setup_func)(struct net_device *))396d1058958SBjoern A. Zeeb linuxkpi_alloc_netdev(size_t len, const char *name, uint32_t flags,
397d1058958SBjoern A. Zeeb     void(*setup_func)(struct net_device *))
398d1058958SBjoern A. Zeeb {
399d1058958SBjoern A. Zeeb 	struct net_device *ndev;
400d1058958SBjoern A. Zeeb 
401d1058958SBjoern A. Zeeb 	ndev = malloc(sizeof(*ndev) + len, M_NETDEV, M_NOWAIT);
402d1058958SBjoern A. Zeeb 	if (ndev == NULL)
403d1058958SBjoern A. Zeeb 		return (ndev);
404d1058958SBjoern A. Zeeb 
405d1058958SBjoern A. Zeeb 	/* Always first as it zeros! */
406d1058958SBjoern A. Zeeb 	linuxkpi_init_dummy_netdev(ndev);
407d1058958SBjoern A. Zeeb 
408d1058958SBjoern A. Zeeb 	strlcpy(ndev->name, name, sizeof(*ndev->name));
409d1058958SBjoern A. Zeeb 
410d1058958SBjoern A. Zeeb 	/* This needs extending as we support more. */
411d1058958SBjoern A. Zeeb 
412*105331f6SBjoern A. Zeeb 	if (setup_func != NULL)
413d1058958SBjoern A. Zeeb 		setup_func(ndev);
414d1058958SBjoern A. Zeeb 
415d1058958SBjoern A. Zeeb 	return (ndev);
416d1058958SBjoern A. Zeeb }
417d1058958SBjoern A. Zeeb 
418d1058958SBjoern A. Zeeb void
linuxkpi_free_netdev(struct net_device * ndev)419d1058958SBjoern A. Zeeb linuxkpi_free_netdev(struct net_device *ndev)
420d1058958SBjoern A. Zeeb {
421d1058958SBjoern A. Zeeb 	struct napi_struct *napi, *temp;
422d1058958SBjoern A. Zeeb 
423d1058958SBjoern A. Zeeb 	NAPI_LOCK(ndev);
424d1058958SBjoern A. Zeeb 	TAILQ_FOREACH_SAFE(napi, &ndev->napi_head, entry, temp) {
425d1058958SBjoern A. Zeeb 		lkpi_netif_napi_del_locked(napi);
426d1058958SBjoern A. Zeeb 	}
427d1058958SBjoern A. Zeeb 	NAPI_UNLOCK(ndev);
428d1058958SBjoern A. Zeeb 
429d1058958SBjoern A. Zeeb 	taskqueue_free(ndev->napi_tq);
430d1058958SBjoern A. Zeeb 	ndev->napi_tq = NULL;
431d1058958SBjoern A. Zeeb 	NAPI_LOCK_DESTROY(ndev);
432d1058958SBjoern A. Zeeb 
433d1058958SBjoern A. Zeeb 	/* This needs extending as we support more. */
434d1058958SBjoern A. Zeeb 
435d1058958SBjoern A. Zeeb 	free(ndev, M_NETDEV);
436d1058958SBjoern A. Zeeb }
437