xref: /freebsd/sys/net/ifq.h (revision 40a8ac8f62b535d30349faf28cf47106b7041b83)
1 /*-
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 4. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  *	From: @(#)if.h	8.1 (Berkeley) 6/10/93
30  * $FreeBSD$
31  */
32 
33 #ifndef	_NET_IFQ_H_
34 #define	_NET_IFQ_H_
35 
36 #ifdef _KERNEL
37 #include <sys/mbuf.h>		/* ifqueue only? */
38 #include <sys/buf_ring.h>
39 #include <net/vnet.h>
40 #endif /* _KERNEL */
41 #include <sys/lock.h>		/* XXX */
42 #include <sys/mutex.h>		/* struct ifqueue */
43 
44 #define	IF_DUNIT_NONE	-1
45 
46 #include <altq/if_altq.h>
47 
48 /*
49  * Structure defining a queue for a network interface.
50  */
51 struct	ifqueue {
52 	struct	mbuf *ifq_head;
53 	struct	mbuf *ifq_tail;
54 	int	ifq_len;
55 	int	ifq_maxlen;
56 	struct	mtx ifq_mtx;
57 };
58 
59 #ifdef _KERNEL
60 /*
61  * Output queues (ifp->if_snd) and slow device input queues (*ifp->if_slowq)
62  * are queues of messages stored on ifqueue structures
63  * (defined above).  Entries are added to and deleted from these structures
64  * by these macros.
65  */
66 #define IF_LOCK(ifq)		mtx_lock(&(ifq)->ifq_mtx)
67 #define IF_UNLOCK(ifq)		mtx_unlock(&(ifq)->ifq_mtx)
68 #define	IF_LOCK_ASSERT(ifq)	mtx_assert(&(ifq)->ifq_mtx, MA_OWNED)
69 #define	_IF_QFULL(ifq)		((ifq)->ifq_len >= (ifq)->ifq_maxlen)
70 #define	_IF_QLEN(ifq)		((ifq)->ifq_len)
71 
72 #define	_IF_ENQUEUE(ifq, m) do { 				\
73 	(m)->m_nextpkt = NULL;					\
74 	if ((ifq)->ifq_tail == NULL) 				\
75 		(ifq)->ifq_head = m; 				\
76 	else 							\
77 		(ifq)->ifq_tail->m_nextpkt = m; 		\
78 	(ifq)->ifq_tail = m; 					\
79 	(ifq)->ifq_len++; 					\
80 } while (0)
81 
82 #define IF_ENQUEUE(ifq, m) do {					\
83 	IF_LOCK(ifq); 						\
84 	_IF_ENQUEUE(ifq, m); 					\
85 	IF_UNLOCK(ifq); 					\
86 } while (0)
87 
88 #define	_IF_PREPEND(ifq, m) do {				\
89 	(m)->m_nextpkt = (ifq)->ifq_head; 			\
90 	if ((ifq)->ifq_tail == NULL) 				\
91 		(ifq)->ifq_tail = (m); 				\
92 	(ifq)->ifq_head = (m); 					\
93 	(ifq)->ifq_len++; 					\
94 } while (0)
95 
96 #define IF_PREPEND(ifq, m) do {		 			\
97 	IF_LOCK(ifq); 						\
98 	_IF_PREPEND(ifq, m); 					\
99 	IF_UNLOCK(ifq); 					\
100 } while (0)
101 
102 #define	_IF_DEQUEUE(ifq, m) do { 				\
103 	(m) = (ifq)->ifq_head; 					\
104 	if (m) { 						\
105 		if (((ifq)->ifq_head = (m)->m_nextpkt) == NULL)	\
106 			(ifq)->ifq_tail = NULL; 		\
107 		(m)->m_nextpkt = NULL; 				\
108 		(ifq)->ifq_len--; 				\
109 	} 							\
110 } while (0)
111 
112 #define IF_DEQUEUE(ifq, m) do { 				\
113 	IF_LOCK(ifq); 						\
114 	_IF_DEQUEUE(ifq, m); 					\
115 	IF_UNLOCK(ifq); 					\
116 } while (0)
117 
118 #define	_IF_DEQUEUE_ALL(ifq, m) do {				\
119 	(m) = (ifq)->ifq_head;					\
120 	(ifq)->ifq_head = (ifq)->ifq_tail = NULL;		\
121 	(ifq)->ifq_len = 0;					\
122 } while (0)
123 
124 #define	IF_DEQUEUE_ALL(ifq, m) do {				\
125 	IF_LOCK(ifq); 						\
126 	_IF_DEQUEUE_ALL(ifq, m);				\
127 	IF_UNLOCK(ifq); 					\
128 } while (0)
129 
130 #define	_IF_POLL(ifq, m)	((m) = (ifq)->ifq_head)
131 #define	IF_POLL(ifq, m)		_IF_POLL(ifq, m)
132 
133 #define _IF_DRAIN(ifq) do { 					\
134 	struct mbuf *m; 					\
135 	for (;;) { 						\
136 		_IF_DEQUEUE(ifq, m); 				\
137 		if (m == NULL) 					\
138 			break; 					\
139 		m_freem(m); 					\
140 	} 							\
141 } while (0)
142 
143 #define IF_DRAIN(ifq) do {					\
144 	IF_LOCK(ifq);						\
145 	_IF_DRAIN(ifq);						\
146 	IF_UNLOCK(ifq);						\
147 } while(0)
148 
149 int	if_handoff(struct ifqueue *ifq, struct mbuf *m, struct ifnet *ifp,
150 	    int adjust);
151 #define	IF_HANDOFF(ifq, m, ifp)			\
152 	if_handoff((struct ifqueue *)ifq, m, ifp, 0)
153 #define	IF_HANDOFF_ADJ(ifq, m, ifp, adj)	\
154 	if_handoff((struct ifqueue *)ifq, m, ifp, adj)
155 
156 void	if_start(struct ifnet *);
157 
158 #define	IFQ_ENQUEUE(ifq, m, err)					\
159 do {									\
160 	IF_LOCK(ifq);							\
161 	if (ALTQ_IS_ENABLED(ifq))					\
162 		ALTQ_ENQUEUE(ifq, m, NULL, err);			\
163 	else {								\
164 		if (_IF_QFULL(ifq)) {					\
165 			m_freem(m);					\
166 			(err) = ENOBUFS;				\
167 		} else {						\
168 			_IF_ENQUEUE(ifq, m);				\
169 			(err) = 0;					\
170 		}							\
171 	}								\
172 	IF_UNLOCK(ifq);							\
173 } while (0)
174 
175 #define	IFQ_DEQUEUE_NOLOCK(ifq, m)					\
176 do {									\
177 	if (TBR_IS_ENABLED(ifq))					\
178 		(m) = tbr_dequeue_ptr(ifq, ALTDQ_REMOVE);		\
179 	else if (ALTQ_IS_ENABLED(ifq))					\
180 		ALTQ_DEQUEUE(ifq, m);					\
181 	else								\
182 		_IF_DEQUEUE(ifq, m);					\
183 } while (0)
184 
185 #define	IFQ_DEQUEUE(ifq, m)						\
186 do {									\
187 	IF_LOCK(ifq);							\
188 	IFQ_DEQUEUE_NOLOCK(ifq, m);					\
189 	IF_UNLOCK(ifq);							\
190 } while (0)
191 
192 #define	IFQ_POLL_NOLOCK(ifq, m)						\
193 do {									\
194 	if (TBR_IS_ENABLED(ifq))					\
195 		(m) = tbr_dequeue_ptr(ifq, ALTDQ_POLL);			\
196 	else if (ALTQ_IS_ENABLED(ifq))					\
197 		ALTQ_POLL(ifq, m);					\
198 	else								\
199 		_IF_POLL(ifq, m);					\
200 } while (0)
201 
202 #define	IFQ_POLL(ifq, m)						\
203 do {									\
204 	IF_LOCK(ifq);							\
205 	IFQ_POLL_NOLOCK(ifq, m);					\
206 	IF_UNLOCK(ifq);							\
207 } while (0)
208 
209 #define	IFQ_PURGE_NOLOCK(ifq)						\
210 do {									\
211 	if (ALTQ_IS_ENABLED(ifq)) {					\
212 		ALTQ_PURGE(ifq);					\
213 	} else								\
214 		_IF_DRAIN(ifq);						\
215 } while (0)
216 
217 #define	IFQ_PURGE(ifq)							\
218 do {									\
219 	IF_LOCK(ifq);							\
220 	IFQ_PURGE_NOLOCK(ifq);						\
221 	IF_UNLOCK(ifq);							\
222 } while (0)
223 
224 #define	IFQ_SET_READY(ifq)						\
225 	do { ((ifq)->altq_flags |= ALTQF_READY); } while (0)
226 
227 #define	IFQ_LOCK(ifq)			IF_LOCK(ifq)
228 #define	IFQ_UNLOCK(ifq)			IF_UNLOCK(ifq)
229 #define	IFQ_LOCK_ASSERT(ifq)		IF_LOCK_ASSERT(ifq)
230 #define	IFQ_IS_EMPTY(ifq)		((ifq)->ifq_len == 0)
231 #define	IFQ_INC_LEN(ifq)		((ifq)->ifq_len++)
232 #define	IFQ_DEC_LEN(ifq)		(--(ifq)->ifq_len)
233 #define	IFQ_SET_MAXLEN(ifq, len)	((ifq)->ifq_maxlen = (len))
234 
235 /*
236  * The IFF_DRV_OACTIVE test should really occur in the device driver, not in
237  * the handoff logic, as that flag is locked by the device driver.
238  */
239 #define	IFQ_HANDOFF_ADJ(ifp, m, adj, err)				\
240 do {									\
241 	int len;							\
242 	short mflags;							\
243 									\
244 	len = (m)->m_pkthdr.len;					\
245 	mflags = (m)->m_flags;						\
246 	IFQ_ENQUEUE(&(ifp)->if_snd, m, err);				\
247 	if ((err) == 0) {						\
248 		(ifp)->if_obytes += len + (adj);			\
249 		if (mflags & M_MCAST)					\
250 			(ifp)->if_omcasts++;				\
251 		if (((ifp)->if_drv_flags & IFF_DRV_OACTIVE) == 0)	\
252 			if_start(ifp);					\
253 	} else								\
254 		ifp->if_oqdrops++;					\
255 } while (0)
256 
257 #define	IFQ_HANDOFF(ifp, m, err)					\
258 	IFQ_HANDOFF_ADJ(ifp, m, 0, err)
259 
260 #define	IFQ_DRV_DEQUEUE(ifq, m)						\
261 do {									\
262 	(m) = (ifq)->ifq_drv_head;					\
263 	if (m) {							\
264 		if (((ifq)->ifq_drv_head = (m)->m_nextpkt) == NULL)	\
265 			(ifq)->ifq_drv_tail = NULL;			\
266 		(m)->m_nextpkt = NULL;					\
267 		(ifq)->ifq_drv_len--;					\
268 	} else {							\
269 		IFQ_LOCK(ifq);						\
270 		IFQ_DEQUEUE_NOLOCK(ifq, m);				\
271 		while ((ifq)->ifq_drv_len < (ifq)->ifq_drv_maxlen) {	\
272 			struct mbuf *m0;				\
273 			IFQ_DEQUEUE_NOLOCK(ifq, m0);			\
274 			if (m0 == NULL)					\
275 				break;					\
276 			m0->m_nextpkt = NULL;				\
277 			if ((ifq)->ifq_drv_tail == NULL)		\
278 				(ifq)->ifq_drv_head = m0;		\
279 			else						\
280 				(ifq)->ifq_drv_tail->m_nextpkt = m0;	\
281 			(ifq)->ifq_drv_tail = m0;			\
282 			(ifq)->ifq_drv_len++;				\
283 		}							\
284 		IFQ_UNLOCK(ifq);					\
285 	}								\
286 } while (0)
287 
288 #define	IFQ_DRV_PREPEND(ifq, m)						\
289 do {									\
290 	(m)->m_nextpkt = (ifq)->ifq_drv_head;				\
291 	if ((ifq)->ifq_drv_tail == NULL)				\
292 		(ifq)->ifq_drv_tail = (m);				\
293 	(ifq)->ifq_drv_head = (m);					\
294 	(ifq)->ifq_drv_len++;						\
295 } while (0)
296 
297 #define	IFQ_DRV_IS_EMPTY(ifq)						\
298 	(((ifq)->ifq_drv_len == 0) && ((ifq)->ifq_len == 0))
299 
300 #define	IFQ_DRV_PURGE(ifq)						\
301 do {									\
302 	struct mbuf *m, *n = (ifq)->ifq_drv_head;			\
303 	while((m = n) != NULL) {					\
304 		n = m->m_nextpkt;					\
305 		m_freem(m);						\
306 	}								\
307 	(ifq)->ifq_drv_head = (ifq)->ifq_drv_tail = NULL;		\
308 	(ifq)->ifq_drv_len = 0;						\
309 	IFQ_PURGE(ifq);							\
310 } while (0)
311 
312 static __inline int
313 drbr_enqueue(struct ifnet *ifp, struct buf_ring *br, struct mbuf *m)
314 {
315 	int error = 0;
316 
317 #ifdef ALTQ
318 	if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
319 		IFQ_ENQUEUE(&ifp->if_snd, m, error);
320 		if (error)
321 			ifp->if_oqdrops++;
322 		return (error);
323 	}
324 #endif
325 	error = buf_ring_enqueue(br, m);
326 	if (error)
327 		m_freem(m);
328 
329 	return (error);
330 }
331 
332 static __inline void
333 drbr_putback(struct ifnet *ifp, struct buf_ring *br, struct mbuf *new)
334 {
335 	/*
336 	 * The top of the list needs to be swapped
337 	 * for this one.
338 	 */
339 #ifdef ALTQ
340 	if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
341 		/*
342 		 * Peek in altq case dequeued it
343 		 * so put it back.
344 		 */
345 		IFQ_DRV_PREPEND(&ifp->if_snd, new);
346 		return;
347 	}
348 #endif
349 	buf_ring_putback_sc(br, new);
350 }
351 
352 static __inline struct mbuf *
353 drbr_peek(struct ifnet *ifp, struct buf_ring *br)
354 {
355 #ifdef ALTQ
356 	struct mbuf *m;
357 	if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
358 		/*
359 		 * Pull it off like a dequeue
360 		 * since drbr_advance() does nothing
361 		 * for altq and drbr_putback() will
362 		 * use the old prepend function.
363 		 */
364 		IFQ_DEQUEUE(&ifp->if_snd, m);
365 		return (m);
366 	}
367 #endif
368 	return(buf_ring_peek(br));
369 }
370 
371 static __inline void
372 drbr_flush(struct ifnet *ifp, struct buf_ring *br)
373 {
374 	struct mbuf *m;
375 
376 #ifdef ALTQ
377 	if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd))
378 		IFQ_PURGE(&ifp->if_snd);
379 #endif
380 	while ((m = buf_ring_dequeue_sc(br)) != NULL)
381 		m_freem(m);
382 }
383 
384 static __inline void
385 drbr_free(struct buf_ring *br, struct malloc_type *type)
386 {
387 
388 	drbr_flush(NULL, br);
389 	buf_ring_free(br, type);
390 }
391 
392 static __inline struct mbuf *
393 drbr_dequeue(struct ifnet *ifp, struct buf_ring *br)
394 {
395 #ifdef ALTQ
396 	struct mbuf *m;
397 
398 	if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
399 		IFQ_DEQUEUE(&ifp->if_snd, m);
400 		return (m);
401 	}
402 #endif
403 	return (buf_ring_dequeue_sc(br));
404 }
405 
406 static __inline void
407 drbr_advance(struct ifnet *ifp, struct buf_ring *br)
408 {
409 #ifdef ALTQ
410 	/* Nothing to do here since peek dequeues in altq case */
411 	if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd))
412 		return;
413 #endif
414 	return (buf_ring_advance_sc(br));
415 }
416 
417 
418 static __inline struct mbuf *
419 drbr_dequeue_cond(struct ifnet *ifp, struct buf_ring *br,
420     int (*func) (struct mbuf *, void *), void *arg)
421 {
422 	struct mbuf *m;
423 #ifdef ALTQ
424 	if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
425 		IFQ_LOCK(&ifp->if_snd);
426 		IFQ_POLL_NOLOCK(&ifp->if_snd, m);
427 		if (m != NULL && func(m, arg) == 0) {
428 			IFQ_UNLOCK(&ifp->if_snd);
429 			return (NULL);
430 		}
431 		IFQ_DEQUEUE_NOLOCK(&ifp->if_snd, m);
432 		IFQ_UNLOCK(&ifp->if_snd);
433 		return (m);
434 	}
435 #endif
436 	m = buf_ring_peek(br);
437 	if (m == NULL || func(m, arg) == 0)
438 		return (NULL);
439 
440 	return (buf_ring_dequeue_sc(br));
441 }
442 
443 static __inline int
444 drbr_empty(struct ifnet *ifp, struct buf_ring *br)
445 {
446 #ifdef ALTQ
447 	if (ALTQ_IS_ENABLED(&ifp->if_snd))
448 		return (IFQ_IS_EMPTY(&ifp->if_snd));
449 #endif
450 	return (buf_ring_empty(br));
451 }
452 
453 static __inline int
454 drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br)
455 {
456 #ifdef ALTQ
457 	if (ALTQ_IS_ENABLED(&ifp->if_snd))
458 		return (1);
459 #endif
460 	return (!buf_ring_empty(br));
461 }
462 
463 static __inline int
464 drbr_inuse(struct ifnet *ifp, struct buf_ring *br)
465 {
466 #ifdef ALTQ
467 	if (ALTQ_IS_ENABLED(&ifp->if_snd))
468 		return (ifp->if_snd.ifq_len);
469 #endif
470 	return (buf_ring_count(br));
471 }
472 
473 extern	int ifqmaxlen;
474 
475 void	if_qflush(struct ifnet *);
476 void	ifq_init(struct ifaltq *, struct ifnet *ifp);
477 void	ifq_delete(struct ifaltq *);
478 
479 #ifdef DEVICE_POLLING
480 enum poll_cmd {	POLL_ONLY, POLL_AND_CHECK_STATUS };
481 
482 typedef	int poll_handler_t(struct ifnet *ifp, enum poll_cmd cmd, int count);
483 int    ether_poll_register(poll_handler_t *h, struct ifnet *ifp);
484 int    ether_poll_deregister(struct ifnet *ifp);
485 /* The following should be temporary, till all drivers use the driver API */
486 typedef	int poll_handler_drv_t(if_t ifh, enum poll_cmd cmd, int count);
487 int	ether_poll_register_drv(poll_handler_drv_t *h, if_t ifh);
488 int	ether_poll_deregister_drv(if_t ifh);
489 #endif /* DEVICE_POLLING */
490 
491 #endif /* _KERNEL */
492 #endif /* !_NET_IFQ_H_ */
493