xref: /freebsd/sys/net/ifq.h (revision cfd6422a5217410fbd66f7a7a8a64d9d85e61229)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1986, 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	From: @(#)if.h	8.1 (Berkeley) 6/10/93
32  * $FreeBSD$
33  */
34 
35 #ifndef	_NET_IFQ_H_
36 #define	_NET_IFQ_H_
37 
38 #ifdef _KERNEL
39 #include <sys/mbuf.h>		/* ifqueue only? */
40 #include <sys/buf_ring.h>
41 #include <net/vnet.h>
42 #endif /* _KERNEL */
43 #include <sys/lock.h>		/* XXX */
44 #include <sys/mutex.h>		/* struct ifqueue */
45 
46 /*
47  * Couple of ugly extra definitions that are required since ifq.h
48  * is splitted from if_var.h.
49  */
50 #define	IF_DUNIT_NONE	-1
51 
52 #include <net/altq/if_altq.h>
53 
54 /*
55  * Structure defining a queue for a network interface.
56  */
57 struct	ifqueue {
58 	struct	mbuf *ifq_head;
59 	struct	mbuf *ifq_tail;
60 	int	ifq_len;
61 	int	ifq_maxlen;
62 	struct	mtx ifq_mtx;
63 };
64 
65 #ifdef _KERNEL
66 /*
67  * Output queues (ifp->if_snd) and slow device input queues (*ifp->if_slowq)
68  * are queues of messages stored on ifqueue structures
69  * (defined above).  Entries are added to and deleted from these structures
70  * by these macros.
71  */
72 #define IF_LOCK(ifq)		mtx_lock(&(ifq)->ifq_mtx)
73 #define IF_UNLOCK(ifq)		mtx_unlock(&(ifq)->ifq_mtx)
74 #define	IF_LOCK_ASSERT(ifq)	mtx_assert(&(ifq)->ifq_mtx, MA_OWNED)
75 #define	_IF_QFULL(ifq)		((ifq)->ifq_len >= (ifq)->ifq_maxlen)
76 #define	_IF_QLEN(ifq)		((ifq)->ifq_len)
77 
78 #define	_IF_ENQUEUE(ifq, m) do { 				\
79 	(m)->m_nextpkt = NULL;					\
80 	if ((ifq)->ifq_tail == NULL) 				\
81 		(ifq)->ifq_head = m; 				\
82 	else 							\
83 		(ifq)->ifq_tail->m_nextpkt = m; 		\
84 	(ifq)->ifq_tail = m; 					\
85 	(ifq)->ifq_len++; 					\
86 } while (0)
87 
88 #define IF_ENQUEUE(ifq, m) do {					\
89 	IF_LOCK(ifq); 						\
90 	_IF_ENQUEUE(ifq, m); 					\
91 	IF_UNLOCK(ifq); 					\
92 } while (0)
93 
94 #define	_IF_PREPEND(ifq, m) do {				\
95 	(m)->m_nextpkt = (ifq)->ifq_head; 			\
96 	if ((ifq)->ifq_tail == NULL) 				\
97 		(ifq)->ifq_tail = (m); 				\
98 	(ifq)->ifq_head = (m); 					\
99 	(ifq)->ifq_len++; 					\
100 } while (0)
101 
102 #define IF_PREPEND(ifq, m) do {		 			\
103 	IF_LOCK(ifq); 						\
104 	_IF_PREPEND(ifq, m); 					\
105 	IF_UNLOCK(ifq); 					\
106 } while (0)
107 
108 #define	_IF_DEQUEUE(ifq, m) do { 				\
109 	(m) = (ifq)->ifq_head; 					\
110 	if (m) { 						\
111 		if (((ifq)->ifq_head = (m)->m_nextpkt) == NULL)	\
112 			(ifq)->ifq_tail = NULL; 		\
113 		(m)->m_nextpkt = NULL; 				\
114 		(ifq)->ifq_len--; 				\
115 	} 							\
116 } while (0)
117 
118 #define IF_DEQUEUE(ifq, m) do { 				\
119 	IF_LOCK(ifq); 						\
120 	_IF_DEQUEUE(ifq, m); 					\
121 	IF_UNLOCK(ifq); 					\
122 } while (0)
123 
124 #define	_IF_DEQUEUE_ALL(ifq, m) do {				\
125 	(m) = (ifq)->ifq_head;					\
126 	(ifq)->ifq_head = (ifq)->ifq_tail = NULL;		\
127 	(ifq)->ifq_len = 0;					\
128 } while (0)
129 
130 #define	IF_DEQUEUE_ALL(ifq, m) do {				\
131 	IF_LOCK(ifq); 						\
132 	_IF_DEQUEUE_ALL(ifq, m);				\
133 	IF_UNLOCK(ifq); 					\
134 } while (0)
135 
136 #define	_IF_POLL(ifq, m)	((m) = (ifq)->ifq_head)
137 #define	IF_POLL(ifq, m)		_IF_POLL(ifq, m)
138 
139 #define _IF_DRAIN(ifq) do { 					\
140 	struct mbuf *m; 					\
141 	for (;;) { 						\
142 		_IF_DEQUEUE(ifq, m); 				\
143 		if (m == NULL) 					\
144 			break; 					\
145 		m_freem(m); 					\
146 	} 							\
147 } while (0)
148 
149 #define IF_DRAIN(ifq) do {					\
150 	IF_LOCK(ifq);						\
151 	_IF_DRAIN(ifq);						\
152 	IF_UNLOCK(ifq);						\
153 } while(0)
154 
155 int	if_handoff(struct ifqueue *ifq, struct mbuf *m, struct ifnet *ifp,
156 	    int adjust);
157 #define	IF_HANDOFF(ifq, m, ifp)			\
158 	if_handoff((struct ifqueue *)ifq, m, ifp, 0)
159 #define	IF_HANDOFF_ADJ(ifq, m, ifp, adj)	\
160 	if_handoff((struct ifqueue *)ifq, m, ifp, adj)
161 
162 void	if_start(struct ifnet *);
163 
164 #define	IFQ_ENQUEUE(ifq, m, err)					\
165 do {									\
166 	IF_LOCK(ifq);							\
167 	if (ALTQ_IS_ENABLED(ifq))					\
168 		ALTQ_ENQUEUE(ifq, m, NULL, err);			\
169 	else {								\
170 		if (_IF_QFULL(ifq)) {					\
171 			m_freem(m);					\
172 			(err) = ENOBUFS;				\
173 		} else {						\
174 			_IF_ENQUEUE(ifq, m);				\
175 			(err) = 0;					\
176 		}							\
177 	}								\
178 	IF_UNLOCK(ifq);							\
179 } while (0)
180 
181 #define	IFQ_DEQUEUE_NOLOCK(ifq, m)					\
182 do {									\
183 	if (TBR_IS_ENABLED(ifq))					\
184 		(m) = tbr_dequeue_ptr(ifq, ALTDQ_REMOVE);		\
185 	else if (ALTQ_IS_ENABLED(ifq))					\
186 		ALTQ_DEQUEUE(ifq, m);					\
187 	else								\
188 		_IF_DEQUEUE(ifq, m);					\
189 } while (0)
190 
191 #define	IFQ_DEQUEUE(ifq, m)						\
192 do {									\
193 	IF_LOCK(ifq);							\
194 	IFQ_DEQUEUE_NOLOCK(ifq, m);					\
195 	IF_UNLOCK(ifq);							\
196 } while (0)
197 
198 #define	IFQ_POLL_NOLOCK(ifq, m)						\
199 do {									\
200 	if (TBR_IS_ENABLED(ifq))					\
201 		(m) = tbr_dequeue_ptr(ifq, ALTDQ_POLL);			\
202 	else if (ALTQ_IS_ENABLED(ifq))					\
203 		ALTQ_POLL(ifq, m);					\
204 	else								\
205 		_IF_POLL(ifq, m);					\
206 } while (0)
207 
208 #define	IFQ_POLL(ifq, m)						\
209 do {									\
210 	IF_LOCK(ifq);							\
211 	IFQ_POLL_NOLOCK(ifq, m);					\
212 	IF_UNLOCK(ifq);							\
213 } while (0)
214 
215 #define	IFQ_PURGE_NOLOCK(ifq)						\
216 do {									\
217 	if (ALTQ_IS_ENABLED(ifq)) {					\
218 		ALTQ_PURGE(ifq);					\
219 	} else								\
220 		_IF_DRAIN(ifq);						\
221 } while (0)
222 
223 #define	IFQ_PURGE(ifq)							\
224 do {									\
225 	IF_LOCK(ifq);							\
226 	IFQ_PURGE_NOLOCK(ifq);						\
227 	IF_UNLOCK(ifq);							\
228 } while (0)
229 
230 #define	IFQ_SET_READY(ifq)						\
231 	do { ((ifq)->altq_flags |= ALTQF_READY); } while (0)
232 
233 #define	IFQ_LOCK(ifq)			IF_LOCK(ifq)
234 #define	IFQ_UNLOCK(ifq)			IF_UNLOCK(ifq)
235 #define	IFQ_LOCK_ASSERT(ifq)		IF_LOCK_ASSERT(ifq)
236 #define	IFQ_IS_EMPTY(ifq)		((ifq)->ifq_len == 0)
237 #define	IFQ_INC_LEN(ifq)		((ifq)->ifq_len++)
238 #define	IFQ_DEC_LEN(ifq)		(--(ifq)->ifq_len)
239 #define	IFQ_SET_MAXLEN(ifq, len)	((ifq)->ifq_maxlen = (len))
240 
241 /*
242  * The IFF_DRV_OACTIVE test should really occur in the device driver, not in
243  * the handoff logic, as that flag is locked by the device driver.
244  */
245 #define	IFQ_HANDOFF_ADJ(ifp, m, adj, err)				\
246 do {									\
247 	int len;							\
248 	short mflags;							\
249 									\
250 	len = (m)->m_pkthdr.len;					\
251 	mflags = (m)->m_flags;						\
252 	IFQ_ENQUEUE(&(ifp)->if_snd, m, err);				\
253 	if ((err) == 0) {						\
254 		if_inc_counter((ifp), IFCOUNTER_OBYTES, len + (adj));	\
255 		if (mflags & M_MCAST)					\
256 			if_inc_counter((ifp), IFCOUNTER_OMCASTS, 1);	\
257 		if (((ifp)->if_drv_flags & IFF_DRV_OACTIVE) == 0)	\
258 			if_start(ifp);					\
259 	} else								\
260 		if_inc_counter((ifp), IFCOUNTER_OQDROPS, 1);		\
261 } while (0)
262 
263 #define	IFQ_HANDOFF(ifp, m, err)					\
264 	IFQ_HANDOFF_ADJ(ifp, m, 0, err)
265 
266 #define	IFQ_DRV_DEQUEUE(ifq, m)						\
267 do {									\
268 	(m) = (ifq)->ifq_drv_head;					\
269 	if (m) {							\
270 		if (((ifq)->ifq_drv_head = (m)->m_nextpkt) == NULL)	\
271 			(ifq)->ifq_drv_tail = NULL;			\
272 		(m)->m_nextpkt = NULL;					\
273 		(ifq)->ifq_drv_len--;					\
274 	} else {							\
275 		IFQ_LOCK(ifq);						\
276 		IFQ_DEQUEUE_NOLOCK(ifq, m);				\
277 		while ((ifq)->ifq_drv_len < (ifq)->ifq_drv_maxlen) {	\
278 			struct mbuf *m0;				\
279 			IFQ_DEQUEUE_NOLOCK(ifq, m0);			\
280 			if (m0 == NULL)					\
281 				break;					\
282 			m0->m_nextpkt = NULL;				\
283 			if ((ifq)->ifq_drv_tail == NULL)		\
284 				(ifq)->ifq_drv_head = m0;		\
285 			else						\
286 				(ifq)->ifq_drv_tail->m_nextpkt = m0;	\
287 			(ifq)->ifq_drv_tail = m0;			\
288 			(ifq)->ifq_drv_len++;				\
289 		}							\
290 		IFQ_UNLOCK(ifq);					\
291 	}								\
292 } while (0)
293 
294 #define	IFQ_DRV_PREPEND(ifq, m)						\
295 do {									\
296 	(m)->m_nextpkt = (ifq)->ifq_drv_head;				\
297 	if ((ifq)->ifq_drv_tail == NULL)				\
298 		(ifq)->ifq_drv_tail = (m);				\
299 	(ifq)->ifq_drv_head = (m);					\
300 	(ifq)->ifq_drv_len++;						\
301 } while (0)
302 
303 #define	IFQ_DRV_IS_EMPTY(ifq)						\
304 	(((ifq)->ifq_drv_len == 0) && ((ifq)->ifq_len == 0))
305 
306 #define	IFQ_DRV_PURGE(ifq)						\
307 do {									\
308 	struct mbuf *m, *n = (ifq)->ifq_drv_head;			\
309 	while((m = n) != NULL) {					\
310 		n = m->m_nextpkt;					\
311 		m_freem(m);						\
312 	}								\
313 	(ifq)->ifq_drv_head = (ifq)->ifq_drv_tail = NULL;		\
314 	(ifq)->ifq_drv_len = 0;						\
315 	IFQ_PURGE(ifq);							\
316 } while (0)
317 
318 static __inline int
319 drbr_enqueue(struct ifnet *ifp, struct buf_ring *br, struct mbuf *m)
320 {
321 	int error = 0;
322 
323 #ifdef ALTQ
324 	if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
325 		IFQ_ENQUEUE(&ifp->if_snd, m, error);
326 		if (error)
327 			if_inc_counter((ifp), IFCOUNTER_OQDROPS, 1);
328 		return (error);
329 	}
330 #endif
331 	error = buf_ring_enqueue(br, m);
332 	if (error)
333 		m_freem(m);
334 
335 	return (error);
336 }
337 
338 static __inline void
339 drbr_putback(struct ifnet *ifp, struct buf_ring *br, struct mbuf *m_new)
340 {
341 	/*
342 	 * The top of the list needs to be swapped
343 	 * for this one.
344 	 */
345 #ifdef ALTQ
346 	if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
347 		/*
348 		 * Peek in altq case dequeued it
349 		 * so put it back.
350 		 */
351 		IFQ_DRV_PREPEND(&ifp->if_snd, m_new);
352 		return;
353 	}
354 #endif
355 	buf_ring_putback_sc(br, m_new);
356 }
357 
358 static __inline struct mbuf *
359 drbr_peek(struct ifnet *ifp, struct buf_ring *br)
360 {
361 #ifdef ALTQ
362 	struct mbuf *m;
363 	if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
364 		/*
365 		 * Pull it off like a dequeue
366 		 * since drbr_advance() does nothing
367 		 * for altq and drbr_putback() will
368 		 * use the old prepend function.
369 		 */
370 		IFQ_DEQUEUE(&ifp->if_snd, m);
371 		return (m);
372 	}
373 #endif
374 	return ((struct mbuf *)buf_ring_peek_clear_sc(br));
375 }
376 
377 static __inline void
378 drbr_flush(struct ifnet *ifp, struct buf_ring *br)
379 {
380 	struct mbuf *m;
381 
382 #ifdef ALTQ
383 	if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd))
384 		IFQ_PURGE(&ifp->if_snd);
385 #endif
386 	while ((m = (struct mbuf *)buf_ring_dequeue_sc(br)) != NULL)
387 		m_freem(m);
388 }
389 
390 static __inline void
391 drbr_free(struct buf_ring *br, struct malloc_type *type)
392 {
393 
394 	drbr_flush(NULL, br);
395 	buf_ring_free(br, type);
396 }
397 
398 static __inline struct mbuf *
399 drbr_dequeue(struct ifnet *ifp, struct buf_ring *br)
400 {
401 #ifdef ALTQ
402 	struct mbuf *m;
403 
404 	if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
405 		IFQ_DEQUEUE(&ifp->if_snd, m);
406 		return (m);
407 	}
408 #endif
409 	return ((struct mbuf *)buf_ring_dequeue_sc(br));
410 }
411 
412 static __inline void
413 drbr_advance(struct ifnet *ifp, struct buf_ring *br)
414 {
415 #ifdef ALTQ
416 	/* Nothing to do here since peek dequeues in altq case */
417 	if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd))
418 		return;
419 #endif
420 	return (buf_ring_advance_sc(br));
421 }
422 
423 static __inline struct mbuf *
424 drbr_dequeue_cond(struct ifnet *ifp, struct buf_ring *br,
425     int (*func) (struct mbuf *, void *), void *arg)
426 {
427 	struct mbuf *m;
428 #ifdef ALTQ
429 	if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
430 		IFQ_LOCK(&ifp->if_snd);
431 		IFQ_POLL_NOLOCK(&ifp->if_snd, m);
432 		if (m != NULL && func(m, arg) == 0) {
433 			IFQ_UNLOCK(&ifp->if_snd);
434 			return (NULL);
435 		}
436 		IFQ_DEQUEUE_NOLOCK(&ifp->if_snd, m);
437 		IFQ_UNLOCK(&ifp->if_snd);
438 		return (m);
439 	}
440 #endif
441 	m = (struct mbuf *)buf_ring_peek(br);
442 	if (m == NULL || func(m, arg) == 0)
443 		return (NULL);
444 
445 	return ((struct mbuf *)buf_ring_dequeue_sc(br));
446 }
447 
448 static __inline int
449 drbr_empty(struct ifnet *ifp, struct buf_ring *br)
450 {
451 #ifdef ALTQ
452 	if (ALTQ_IS_ENABLED(&ifp->if_snd))
453 		return (IFQ_IS_EMPTY(&ifp->if_snd));
454 #endif
455 	return (buf_ring_empty(br));
456 }
457 
458 static __inline int
459 drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br)
460 {
461 #ifdef ALTQ
462 	if (ALTQ_IS_ENABLED(&ifp->if_snd))
463 		return (1);
464 #endif
465 	return (!buf_ring_empty(br));
466 }
467 
468 static __inline int
469 drbr_inuse(struct ifnet *ifp, struct buf_ring *br)
470 {
471 #ifdef ALTQ
472 	if (ALTQ_IS_ENABLED(&ifp->if_snd))
473 		return (ifp->if_snd.ifq_len);
474 #endif
475 	return (buf_ring_count(br));
476 }
477 
478 extern	int ifqmaxlen;
479 
480 void	if_qflush(struct ifnet *);
481 void	ifq_init(struct ifaltq *, struct ifnet *ifp);
482 void	ifq_delete(struct ifaltq *);
483 
484 #endif /* _KERNEL */
485 #endif /* !_NET_IFQ_H_ */
486