xref: /linux/include/net/netdev_queues.h (revision ab63a2387cb906d43b72a8effb611bbaecb2d0cd)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_NET_QUEUES_H
3 #define _LINUX_NET_QUEUES_H
4 
5 #include <linux/netdevice.h>
6 
7 struct netdev_queue_stats_rx {
8 	u64 bytes;
9 	u64 packets;
10 };
11 
12 struct netdev_queue_stats_tx {
13 	u64 bytes;
14 	u64 packets;
15 };
16 
17 /**
18  * struct netdev_stat_ops - netdev ops for fine grained stats
19  * @get_queue_stats_rx:	get stats for a given Rx queue
20  * @get_queue_stats_tx:	get stats for a given Tx queue
21  * @get_base_stats:	get base stats (not belonging to any live instance)
22  *
23  * Query stats for a given object. The values of the statistics are undefined
24  * on entry (specifically they are *not* zero-initialized). Drivers should
25  * assign values only to the statistics they collect. Statistics which are not
26  * collected must be left undefined.
27  *
28  * Queue objects are not necessarily persistent, and only currently active
29  * queues are queried by the per-queue callbacks. This means that per-queue
30  * statistics will not generally add up to the total number of events for
31  * the device. The @get_base_stats callback allows filling in the delta
32  * between events for currently live queues and overall device history.
33  * When the statistics for the entire device are queried, first @get_base_stats
34  * is issued to collect the delta, and then a series of per-queue callbacks.
35  * Only statistics which are set in @get_base_stats will be reported
36  * at the device level, meaning that unlike in queue callbacks, setting
37  * a statistic to zero in @get_base_stats is a legitimate thing to do.
38  * This is because @get_base_stats has a second function of designating which
39  * statistics are in fact correct for the entire device (e.g. when history
40  * for some of the events is not maintained, and reliable "total" cannot
41  * be provided).
42  *
43  * Device drivers can assume that when collecting total device stats,
44  * the @get_base_stats and subsequent per-queue calls are performed
45  * "atomically" (without releasing the rtnl_lock).
46  *
47  * Device drivers are encouraged to reset the per-queue statistics when
48  * number of queues change. This is because the primary use case for
49  * per-queue statistics is currently to detect traffic imbalance.
50  */
51 struct netdev_stat_ops {
52 	void (*get_queue_stats_rx)(struct net_device *dev, int idx,
53 				   struct netdev_queue_stats_rx *stats);
54 	void (*get_queue_stats_tx)(struct net_device *dev, int idx,
55 				   struct netdev_queue_stats_tx *stats);
56 	void (*get_base_stats)(struct net_device *dev,
57 			       struct netdev_queue_stats_rx *rx,
58 			       struct netdev_queue_stats_tx *tx);
59 };
60 
61 /**
62  * DOC: Lockless queue stopping / waking helpers.
63  *
64  * The netif_txq_maybe_stop() and __netif_txq_completed_wake()
65  * macros are designed to safely implement stopping
66  * and waking netdev queues without full lock protection.
67  *
68  * We assume that there can be no concurrent stop attempts and no concurrent
69  * wake attempts. The try-stop should happen from the xmit handler,
70  * while wake up should be triggered from NAPI poll context.
71  * The two may run concurrently (single producer, single consumer).
72  *
73  * The try-stop side is expected to run from the xmit handler and therefore
74  * it does not reschedule Tx (netif_tx_start_queue() instead of
75  * netif_tx_wake_queue()). Uses of the ``stop`` macros outside of the xmit
76  * handler may lead to xmit queue being enabled but not run.
77  * The waking side does not have similar context restrictions.
78  *
79  * The macros guarantee that rings will not remain stopped if there's
80  * space available, but they do *not* prevent false wake ups when
81  * the ring is full! Drivers should check for ring full at the start
82  * for the xmit handler.
83  *
84  * All descriptor ring indexes (and other relevant shared state) must
85  * be updated before invoking the macros.
86  */
87 
88 #define netif_txq_try_stop(txq, get_desc, start_thrs)			\
89 	({								\
90 		int _res;						\
91 									\
92 		netif_tx_stop_queue(txq);				\
93 		/* Producer index and stop bit must be visible		\
94 		 * to consumer before we recheck.			\
95 		 * Pairs with a barrier in __netif_txq_completed_wake(). \
96 		 */							\
97 		smp_mb__after_atomic();					\
98 									\
99 		/* We need to check again in a case another		\
100 		 * CPU has just made room available.			\
101 		 */							\
102 		_res = 0;						\
103 		if (unlikely(get_desc >= start_thrs)) {			\
104 			netif_tx_start_queue(txq);			\
105 			_res = -1;					\
106 		}							\
107 		_res;							\
108 	})								\
109 
110 /**
111  * netif_txq_maybe_stop() - locklessly stop a Tx queue, if needed
112  * @txq:	struct netdev_queue to stop/start
113  * @get_desc:	get current number of free descriptors (see requirements below!)
114  * @stop_thrs:	minimal number of available descriptors for queue to be left
115  *		enabled
116  * @start_thrs:	minimal number of descriptors to re-enable the queue, can be
117  *		equal to @stop_thrs or higher to avoid frequent waking
118  *
119  * All arguments may be evaluated multiple times, beware of side effects.
120  * @get_desc must be a formula or a function call, it must always
121  * return up-to-date information when evaluated!
122  * Expected to be used from ndo_start_xmit, see the comment on top of the file.
123  *
124  * Returns:
125  *	 0 if the queue was stopped
126  *	 1 if the queue was left enabled
127  *	-1 if the queue was re-enabled (raced with waking)
128  */
129 #define netif_txq_maybe_stop(txq, get_desc, stop_thrs, start_thrs)	\
130 	({								\
131 		int _res;						\
132 									\
133 		_res = 1;						\
134 		if (unlikely(get_desc < stop_thrs))			\
135 			_res = netif_txq_try_stop(txq, get_desc, start_thrs); \
136 		_res;							\
137 	})								\
138 
139 /* Variant of netdev_tx_completed_queue() which guarantees smp_mb() if
140  * @bytes != 0, regardless of kernel config.
141  */
142 static inline void
143 netdev_txq_completed_mb(struct netdev_queue *dev_queue,
144 			unsigned int pkts, unsigned int bytes)
145 {
146 	if (IS_ENABLED(CONFIG_BQL))
147 		netdev_tx_completed_queue(dev_queue, pkts, bytes);
148 	else if (bytes)
149 		smp_mb();
150 }
151 
152 /**
153  * __netif_txq_completed_wake() - locklessly wake a Tx queue, if needed
154  * @txq:	struct netdev_queue to stop/start
155  * @pkts:	number of packets completed
156  * @bytes:	number of bytes completed
157  * @get_desc:	get current number of free descriptors (see requirements below!)
158  * @start_thrs:	minimal number of descriptors to re-enable the queue
159  * @down_cond:	down condition, predicate indicating that the queue should
160  *		not be woken up even if descriptors are available
161  *
162  * All arguments may be evaluated multiple times.
163  * @get_desc must be a formula or a function call, it must always
164  * return up-to-date information when evaluated!
165  * Reports completed pkts/bytes to BQL.
166  *
167  * Returns:
168  *	 0 if the queue was woken up
169  *	 1 if the queue was already enabled (or disabled but @down_cond is true)
170  *	-1 if the queue was left unchanged (@start_thrs not reached)
171  */
172 #define __netif_txq_completed_wake(txq, pkts, bytes,			\
173 				   get_desc, start_thrs, down_cond)	\
174 	({								\
175 		int _res;						\
176 									\
177 		/* Report to BQL and piggy back on its barrier.		\
178 		 * Barrier makes sure that anybody stopping the queue	\
179 		 * after this point sees the new consumer index.	\
180 		 * Pairs with barrier in netif_txq_try_stop().		\
181 		 */							\
182 		netdev_txq_completed_mb(txq, pkts, bytes);		\
183 									\
184 		_res = -1;						\
185 		if (pkts && likely(get_desc >= start_thrs)) {		\
186 			_res = 1;					\
187 			if (unlikely(netif_tx_queue_stopped(txq)) &&	\
188 			    !(down_cond)) {				\
189 				netif_tx_wake_queue(txq);		\
190 				_res = 0;				\
191 			}						\
192 		}							\
193 		_res;							\
194 	})
195 
196 #define netif_txq_completed_wake(txq, pkts, bytes, get_desc, start_thrs) \
197 	__netif_txq_completed_wake(txq, pkts, bytes, get_desc, start_thrs, false)
198 
199 /* subqueue variants follow */
200 
201 #define netif_subqueue_try_stop(dev, idx, get_desc, start_thrs)		\
202 	({								\
203 		struct netdev_queue *txq;				\
204 									\
205 		txq = netdev_get_tx_queue(dev, idx);			\
206 		netif_txq_try_stop(txq, get_desc, start_thrs);		\
207 	})
208 
209 #define netif_subqueue_maybe_stop(dev, idx, get_desc, stop_thrs, start_thrs) \
210 	({								\
211 		struct netdev_queue *txq;				\
212 									\
213 		txq = netdev_get_tx_queue(dev, idx);			\
214 		netif_txq_maybe_stop(txq, get_desc, stop_thrs, start_thrs); \
215 	})
216 
217 #define netif_subqueue_completed_wake(dev, idx, pkts, bytes,		\
218 				      get_desc, start_thrs)		\
219 	({								\
220 		struct netdev_queue *txq;				\
221 									\
222 		txq = netdev_get_tx_queue(dev, idx);			\
223 		netif_txq_completed_wake(txq, pkts, bytes,		\
224 					 get_desc, start_thrs);		\
225 	})
226 
227 #endif
228