xref: /linux/include/net/netdev_queues.h (revision 6be87fbb27763c2999e1c69bbec1f3a63cf05422)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_NET_QUEUES_H
3 #define _LINUX_NET_QUEUES_H
4 
5 #include <linux/netdevice.h>
6 
7 /**
8  * struct netdev_config - queue-related configuration for a netdev
9  * @hds_thresh:		HDS Threshold value.
10  * @hds_config:		HDS value from userspace.
11  */
12 struct netdev_config {
13 	u32	hds_thresh;
14 	u8	hds_config;
15 };
16 
17 /* See the netdev.yaml spec for definition of each statistic */
18 struct netdev_queue_stats_rx {
19 	u64 bytes;
20 	u64 packets;
21 	u64 alloc_fail;
22 
23 	u64 hw_drops;
24 	u64 hw_drop_overruns;
25 
26 	u64 csum_complete;
27 	u64 csum_unnecessary;
28 	u64 csum_none;
29 	u64 csum_bad;
30 
31 	u64 hw_gro_packets;
32 	u64 hw_gro_bytes;
33 	u64 hw_gro_wire_packets;
34 	u64 hw_gro_wire_bytes;
35 
36 	u64 hw_drop_ratelimits;
37 };
38 
39 struct netdev_queue_stats_tx {
40 	u64 bytes;
41 	u64 packets;
42 
43 	u64 hw_drops;
44 	u64 hw_drop_errors;
45 
46 	u64 csum_none;
47 	u64 needs_csum;
48 
49 	u64 hw_gso_packets;
50 	u64 hw_gso_bytes;
51 	u64 hw_gso_wire_packets;
52 	u64 hw_gso_wire_bytes;
53 
54 	u64 hw_drop_ratelimits;
55 
56 	u64 stop;
57 	u64 wake;
58 };
59 
60 /**
61  * struct netdev_stat_ops - netdev ops for fine grained stats
62  * @get_queue_stats_rx:	get stats for a given Rx queue
63  * @get_queue_stats_tx:	get stats for a given Tx queue
64  * @get_base_stats:	get base stats (not belonging to any live instance)
65  *
66  * Query stats for a given object. The values of the statistics are undefined
67  * on entry (specifically they are *not* zero-initialized). Drivers should
68  * assign values only to the statistics they collect. Statistics which are not
69  * collected must be left undefined.
70  *
71  * Queue objects are not necessarily persistent, and only currently active
72  * queues are queried by the per-queue callbacks. This means that per-queue
73  * statistics will not generally add up to the total number of events for
74  * the device. The @get_base_stats callback allows filling in the delta
75  * between events for currently live queues and overall device history.
76  * @get_base_stats can also be used to report any miscellaneous packets
77  * transferred outside of the main set of queues used by the networking stack.
78  * When the statistics for the entire device are queried, first @get_base_stats
79  * is issued to collect the delta, and then a series of per-queue callbacks.
80  * Only statistics which are set in @get_base_stats will be reported
81  * at the device level, meaning that unlike in queue callbacks, setting
82  * a statistic to zero in @get_base_stats is a legitimate thing to do.
83  * This is because @get_base_stats has a second function of designating which
84  * statistics are in fact correct for the entire device (e.g. when history
85  * for some of the events is not maintained, and reliable "total" cannot
86  * be provided).
87  *
88  * Ops are called under the instance lock if netdev_need_ops_lock()
89  * returns true, otherwise under rtnl_lock.
90  * Device drivers can assume that when collecting total device stats,
91  * the @get_base_stats and subsequent per-queue calls are performed
92  * "atomically" (without releasing the relevant lock).
93  *
94  * Device drivers are encouraged to reset the per-queue statistics when
95  * number of queues change. This is because the primary use case for
96  * per-queue statistics is currently to detect traffic imbalance.
97  */
98 struct netdev_stat_ops {
99 	void (*get_queue_stats_rx)(struct net_device *dev, int idx,
100 				   struct netdev_queue_stats_rx *stats);
101 	void (*get_queue_stats_tx)(struct net_device *dev, int idx,
102 				   struct netdev_queue_stats_tx *stats);
103 	void (*get_base_stats)(struct net_device *dev,
104 			       struct netdev_queue_stats_rx *rx,
105 			       struct netdev_queue_stats_tx *tx);
106 };
107 
108 void netdev_stat_queue_sum(struct net_device *netdev,
109 			   int rx_start, int rx_end,
110 			   struct netdev_queue_stats_rx *rx_sum,
111 			   int tx_start, int tx_end,
112 			   struct netdev_queue_stats_tx *tx_sum);
113 
114 /**
115  * struct netdev_queue_mgmt_ops - netdev ops for queue management
116  *
117  * @ndo_queue_mem_size: Size of the struct that describes a queue's memory.
118  *
119  * @ndo_queue_mem_alloc: Allocate memory for an RX queue at the specified index.
120  *			 The new memory is written at the specified address.
121  *
122  * @ndo_queue_mem_free:	Free memory from an RX queue.
123  *
124  * @ndo_queue_start:	Start an RX queue with the specified memory and at the
125  *			specified index.
126  *
127  * @ndo_queue_stop:	Stop the RX queue at the specified index. The stopped
128  *			queue's memory is written at the specified address.
129  *
130  * @ndo_queue_get_dma_dev: Get dma device for zero-copy operations to be used
131  *			   for this queue. Return NULL on error.
132  *
133  * @ndo_queue_create: Create a new RX queue which can be leased to another queue.
134  *		      Ops on this queue are redirected to the leased queue e.g.
135  *		      when opening a memory provider. Return the new queue id on
136  *		      success. Return negative error code on failure.
137  *
138  * Note that @ndo_queue_mem_alloc and @ndo_queue_mem_free may be called while
139  * the interface is closed. @ndo_queue_start and @ndo_queue_stop will only
140  * be called for an interface which is open.
141  */
142 struct netdev_queue_mgmt_ops {
143 	size_t			ndo_queue_mem_size;
144 	int			(*ndo_queue_mem_alloc)(struct net_device *dev,
145 						       void *per_queue_mem,
146 						       int idx);
147 	void			(*ndo_queue_mem_free)(struct net_device *dev,
148 						      void *per_queue_mem);
149 	int			(*ndo_queue_start)(struct net_device *dev,
150 						   void *per_queue_mem,
151 						   int idx);
152 	int			(*ndo_queue_stop)(struct net_device *dev,
153 						  void *per_queue_mem,
154 						  int idx);
155 	struct device *		(*ndo_queue_get_dma_dev)(struct net_device *dev,
156 							 int idx);
157 	int			(*ndo_queue_create)(struct net_device *dev);
158 };
159 
160 bool netif_rxq_has_unreadable_mp(struct net_device *dev, unsigned int rxq_idx);
161 bool netif_rxq_has_mp(struct net_device *dev, unsigned int rxq_idx);
162 bool netif_rxq_is_leased(struct net_device *dev, unsigned int rxq_idx);
163 
164 /**
165  * DOC: Lockless queue stopping / waking helpers.
166  *
167  * The netif_txq_maybe_stop() and __netif_txq_completed_wake()
168  * macros are designed to safely implement stopping
169  * and waking netdev queues without full lock protection.
170  *
171  * We assume that there can be no concurrent stop attempts and no concurrent
172  * wake attempts. The try-stop should happen from the xmit handler,
173  * while wake up should be triggered from NAPI poll context.
174  * The two may run concurrently (single producer, single consumer).
175  *
176  * The try-stop side is expected to run from the xmit handler and therefore
177  * it does not reschedule Tx (netif_tx_start_queue() instead of
178  * netif_tx_wake_queue()). Uses of the ``stop`` macros outside of the xmit
179  * handler may lead to xmit queue being enabled but not run.
180  * The waking side does not have similar context restrictions.
181  *
182  * The macros guarantee that rings will not remain stopped if there's
183  * space available, but they do *not* prevent false wake ups when
184  * the ring is full! Drivers should check for ring full at the start
185  * for the xmit handler.
186  *
187  * All descriptor ring indexes (and other relevant shared state) must
188  * be updated before invoking the macros.
189  */
190 
191 #define netif_txq_try_stop(txq, get_desc, start_thrs)			\
192 	({								\
193 		int _res;						\
194 									\
195 		netif_tx_stop_queue(txq);				\
196 		/* Producer index and stop bit must be visible		\
197 		 * to consumer before we recheck.			\
198 		 * Pairs with a barrier in __netif_txq_completed_wake(). \
199 		 */							\
200 		smp_mb__after_atomic();					\
201 									\
202 		/* We need to check again in a case another		\
203 		 * CPU has just made room available.			\
204 		 */							\
205 		_res = 0;						\
206 		if (unlikely(get_desc >= start_thrs)) {			\
207 			netif_tx_start_queue(txq);			\
208 			_res = -1;					\
209 		}							\
210 		_res;							\
211 	})								\
212 
213 /**
214  * netif_txq_maybe_stop() - locklessly stop a Tx queue, if needed
215  * @txq:	struct netdev_queue to stop/start
216  * @get_desc:	get current number of free descriptors (see requirements below!)
217  * @stop_thrs:	minimal number of available descriptors for queue to be left
218  *		enabled
219  * @start_thrs:	minimal number of descriptors to re-enable the queue, can be
220  *		equal to @stop_thrs or higher to avoid frequent waking
221  *
222  * All arguments may be evaluated multiple times, beware of side effects.
223  * @get_desc must be a formula or a function call, it must always
224  * return up-to-date information when evaluated!
225  * Expected to be used from ndo_start_xmit, see the comment on top of the file.
226  *
227  * Returns:
228  *	 0 if the queue was stopped
229  *	 1 if the queue was left enabled
230  *	-1 if the queue was re-enabled (raced with waking)
231  */
232 #define netif_txq_maybe_stop(txq, get_desc, stop_thrs, start_thrs)	\
233 	({								\
234 		int _res;						\
235 									\
236 		_res = 1;						\
237 		if (unlikely(get_desc < stop_thrs))			\
238 			_res = netif_txq_try_stop(txq, get_desc, start_thrs); \
239 		_res;							\
240 	})								\
241 
242 /* Variant of netdev_tx_completed_queue() which guarantees smp_mb() if
243  * @bytes != 0, regardless of kernel config.
244  */
245 static inline void
246 netdev_txq_completed_mb(struct netdev_queue *dev_queue,
247 			unsigned int pkts, unsigned int bytes)
248 {
249 	if (IS_ENABLED(CONFIG_BQL))
250 		netdev_tx_completed_queue(dev_queue, pkts, bytes);
251 	else if (bytes)
252 		smp_mb();
253 }
254 
255 /**
256  * __netif_txq_completed_wake() - locklessly wake a Tx queue, if needed
257  * @txq:	struct netdev_queue to stop/start
258  * @pkts:	number of packets completed
259  * @bytes:	number of bytes completed
260  * @get_desc:	get current number of free descriptors (see requirements below!)
261  * @start_thrs:	minimal number of descriptors to re-enable the queue
262  * @down_cond:	down condition, predicate indicating that the queue should
263  *		not be woken up even if descriptors are available
264  *
265  * All arguments may be evaluated multiple times.
266  * @get_desc must be a formula or a function call, it must always
267  * return up-to-date information when evaluated!
268  * Reports completed pkts/bytes to BQL.
269  *
270  * Returns:
271  *	 0 if the queue was woken up
272  *	 1 if the queue was already enabled (or disabled but @down_cond is true)
273  *	-1 if the queue was left unchanged (@start_thrs not reached)
274  */
275 #define __netif_txq_completed_wake(txq, pkts, bytes,			\
276 				   get_desc, start_thrs, down_cond)	\
277 	({								\
278 		int _res;						\
279 									\
280 		/* Report to BQL and piggy back on its barrier.		\
281 		 * Barrier makes sure that anybody stopping the queue	\
282 		 * after this point sees the new consumer index.	\
283 		 * Pairs with barrier in netif_txq_try_stop().		\
284 		 */							\
285 		netdev_txq_completed_mb(txq, pkts, bytes);		\
286 									\
287 		_res = -1;						\
288 		if (pkts && likely(get_desc >= start_thrs)) {		\
289 			_res = 1;					\
290 			if (unlikely(netif_tx_queue_stopped(txq)) &&	\
291 			    !(down_cond)) {				\
292 				netif_tx_wake_queue(txq);		\
293 				_res = 0;				\
294 			}						\
295 		}							\
296 		_res;							\
297 	})
298 
299 #define netif_txq_completed_wake(txq, pkts, bytes, get_desc, start_thrs) \
300 	__netif_txq_completed_wake(txq, pkts, bytes, get_desc, start_thrs, false)
301 
302 /* subqueue variants follow */
303 
304 #define netif_subqueue_try_stop(dev, idx, get_desc, start_thrs)		\
305 	({								\
306 		struct netdev_queue *_txq;				\
307 									\
308 		_txq = netdev_get_tx_queue(dev, idx);			\
309 		netif_txq_try_stop(_txq, get_desc, start_thrs);		\
310 	})
311 
312 static inline void netif_subqueue_sent(const struct net_device *dev,
313 				       unsigned int idx, unsigned int bytes)
314 {
315 	struct netdev_queue *txq;
316 
317 	txq = netdev_get_tx_queue(dev, idx);
318 	netdev_tx_sent_queue(txq, bytes);
319 }
320 
321 static inline unsigned int netif_xmit_timeout_ms(struct netdev_queue *txq)
322 {
323 	unsigned long trans_start = READ_ONCE(txq->trans_start);
324 
325 	if (netif_xmit_stopped(txq) &&
326 	    time_after(jiffies, trans_start + txq->dev->watchdog_timeo))
327 		return jiffies_to_msecs(jiffies - trans_start);
328 
329 	return 0;
330 }
331 
332 #define netif_subqueue_maybe_stop(dev, idx, get_desc, stop_thrs, start_thrs) \
333 	({								\
334 		struct netdev_queue *_txq;				\
335 									\
336 		_txq = netdev_get_tx_queue(dev, idx);			\
337 		netif_txq_maybe_stop(_txq, get_desc, stop_thrs, start_thrs); \
338 	})
339 
340 #define netif_subqueue_completed_wake(dev, idx, pkts, bytes,		\
341 				      get_desc, start_thrs)		\
342 	({								\
343 		struct netdev_queue *_txq;				\
344 									\
345 		_txq = netdev_get_tx_queue(dev, idx);			\
346 		netif_txq_completed_wake(_txq, pkts, bytes,		\
347 					 get_desc, start_thrs);		\
348 	})
349 
350 struct device *netdev_queue_get_dma_dev(struct net_device *dev, int idx);
351 bool netdev_can_create_queue(const struct net_device *dev,
352 			     struct netlink_ext_ack *extack);
353 bool netdev_can_lease_queue(const struct net_device *dev,
354 			    struct netlink_ext_ack *extack);
355 bool netdev_queue_busy(struct net_device *dev, int idx,
356 		       struct netlink_ext_ack *extack);
357 #endif /* _LINUX_NET_QUEUES_H */
358