xref: /linux/include/net/netdev_queues.h (revision 37a93dd5c49b5fda807fd204edf2547c3493319c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_NET_QUEUES_H
3 #define _LINUX_NET_QUEUES_H
4 
5 #include <linux/netdevice.h>
6 
7 /**
8  * struct netdev_config - queue-related configuration for a netdev
9  * @hds_thresh:		HDS Threshold value.
10  * @hds_config:		HDS value from userspace.
11  */
12 struct netdev_config {
13 	u32	hds_thresh;
14 	u8	hds_config;
15 };
16 
17 struct netdev_queue_config {
18 	u32	rx_page_size;
19 };
20 
21 /* See the netdev.yaml spec for definition of each statistic */
22 struct netdev_queue_stats_rx {
23 	u64 bytes;
24 	u64 packets;
25 	u64 alloc_fail;
26 
27 	u64 hw_drops;
28 	u64 hw_drop_overruns;
29 
30 	u64 csum_complete;
31 	u64 csum_unnecessary;
32 	u64 csum_none;
33 	u64 csum_bad;
34 
35 	u64 hw_gro_packets;
36 	u64 hw_gro_bytes;
37 	u64 hw_gro_wire_packets;
38 	u64 hw_gro_wire_bytes;
39 
40 	u64 hw_drop_ratelimits;
41 };
42 
43 struct netdev_queue_stats_tx {
44 	u64 bytes;
45 	u64 packets;
46 
47 	u64 hw_drops;
48 	u64 hw_drop_errors;
49 
50 	u64 csum_none;
51 	u64 needs_csum;
52 
53 	u64 hw_gso_packets;
54 	u64 hw_gso_bytes;
55 	u64 hw_gso_wire_packets;
56 	u64 hw_gso_wire_bytes;
57 
58 	u64 hw_drop_ratelimits;
59 
60 	u64 stop;
61 	u64 wake;
62 };
63 
64 /**
65  * struct netdev_stat_ops - netdev ops for fine grained stats
66  * @get_queue_stats_rx:	get stats for a given Rx queue
67  * @get_queue_stats_tx:	get stats for a given Tx queue
68  * @get_base_stats:	get base stats (not belonging to any live instance)
69  *
70  * Query stats for a given object. The values of the statistics are undefined
71  * on entry (specifically they are *not* zero-initialized). Drivers should
72  * assign values only to the statistics they collect. Statistics which are not
73  * collected must be left undefined.
74  *
75  * Queue objects are not necessarily persistent, and only currently active
76  * queues are queried by the per-queue callbacks. This means that per-queue
77  * statistics will not generally add up to the total number of events for
78  * the device. The @get_base_stats callback allows filling in the delta
79  * between events for currently live queues and overall device history.
80  * @get_base_stats can also be used to report any miscellaneous packets
81  * transferred outside of the main set of queues used by the networking stack.
82  * When the statistics for the entire device are queried, first @get_base_stats
83  * is issued to collect the delta, and then a series of per-queue callbacks.
84  * Only statistics which are set in @get_base_stats will be reported
85  * at the device level, meaning that unlike in queue callbacks, setting
86  * a statistic to zero in @get_base_stats is a legitimate thing to do.
87  * This is because @get_base_stats has a second function of designating which
88  * statistics are in fact correct for the entire device (e.g. when history
89  * for some of the events is not maintained, and reliable "total" cannot
90  * be provided).
91  *
92  * Ops are called under the instance lock if netdev_need_ops_lock()
93  * returns true, otherwise under rtnl_lock.
94  * Device drivers can assume that when collecting total device stats,
95  * the @get_base_stats and subsequent per-queue calls are performed
96  * "atomically" (without releasing the relevant lock).
97  *
98  * Device drivers are encouraged to reset the per-queue statistics when
99  * number of queues change. This is because the primary use case for
100  * per-queue statistics is currently to detect traffic imbalance.
101  */
102 struct netdev_stat_ops {
103 	void (*get_queue_stats_rx)(struct net_device *dev, int idx,
104 				   struct netdev_queue_stats_rx *stats);
105 	void (*get_queue_stats_tx)(struct net_device *dev, int idx,
106 				   struct netdev_queue_stats_tx *stats);
107 	void (*get_base_stats)(struct net_device *dev,
108 			       struct netdev_queue_stats_rx *rx,
109 			       struct netdev_queue_stats_tx *tx);
110 };
111 
112 void netdev_stat_queue_sum(struct net_device *netdev,
113 			   int rx_start, int rx_end,
114 			   struct netdev_queue_stats_rx *rx_sum,
115 			   int tx_start, int tx_end,
116 			   struct netdev_queue_stats_tx *tx_sum);
117 
118 enum {
119 	/* The queue checks and honours the page size qcfg parameter */
120 	QCFG_RX_PAGE_SIZE	= 0x1,
121 };
122 
123 /**
124  * struct netdev_queue_mgmt_ops - netdev ops for queue management
125  *
126  * @ndo_queue_mem_size: Size of the struct that describes a queue's memory.
127  *
128  * @ndo_queue_mem_alloc: Allocate memory for an RX queue at the specified index.
129  *			 The new memory is written at the specified address.
130  *
131  * @ndo_queue_mem_free:	Free memory from an RX queue.
132  *
133  * @ndo_queue_start:	Start an RX queue with the specified memory and at the
134  *			specified index.
135  *
136  * @ndo_queue_stop:	Stop the RX queue at the specified index. The stopped
137  *			queue's memory is written at the specified address.
138  *
139  * @ndo_queue_get_dma_dev: Get dma device for zero-copy operations to be used
140  *			   for this queue. Return NULL on error.
141  *
142  * @ndo_default_qcfg:	(Optional) Populate queue config struct with defaults.
143  *			Queue config structs are passed to this helper before
144  *			the user-requested settings are applied.
145  *
146  * @ndo_validate_qcfg: (Optional) Check if queue config is supported.
147  *			Called when configuration affecting a queue may be
148  *			changing, either due to NIC-wide config, or config
149  *			scoped to the queue at a specified index.
150  *			When NIC-wide config is changed the callback will
151  *			be invoked for all queues.
152  *
153  * @supported_params:	Bitmask of supported parameters, see QCFG_*.
154  *
155  * Note that @ndo_queue_mem_alloc and @ndo_queue_mem_free may be called while
156  * the interface is closed. @ndo_queue_start and @ndo_queue_stop will only
157  * be called for an interface which is open.
158  */
159 struct netdev_queue_mgmt_ops {
160 	size_t	ndo_queue_mem_size;
161 	int	(*ndo_queue_mem_alloc)(struct net_device *dev,
162 				       struct netdev_queue_config *qcfg,
163 				       void *per_queue_mem,
164 				       int idx);
165 	void	(*ndo_queue_mem_free)(struct net_device *dev,
166 				      void *per_queue_mem);
167 	int	(*ndo_queue_start)(struct net_device *dev,
168 				   struct netdev_queue_config *qcfg,
169 				   void *per_queue_mem,
170 				   int idx);
171 	int	(*ndo_queue_stop)(struct net_device *dev,
172 				  void *per_queue_mem,
173 				  int idx);
174 	void	(*ndo_default_qcfg)(struct net_device *dev,
175 				    struct netdev_queue_config *qcfg);
176 	int	(*ndo_validate_qcfg)(struct net_device *dev,
177 				     struct netdev_queue_config *qcfg,
178 				     struct netlink_ext_ack *extack);
179 	struct device *	(*ndo_queue_get_dma_dev)(struct net_device *dev,
180 						 int idx);
181 
182 	unsigned int supported_params;
183 };
184 
185 void netdev_queue_config(struct net_device *dev, int rxq,
186 			 struct netdev_queue_config *qcfg);
187 
188 bool netif_rxq_has_unreadable_mp(struct net_device *dev, int idx);
189 
190 /**
191  * DOC: Lockless queue stopping / waking helpers.
192  *
193  * The netif_txq_maybe_stop() and __netif_txq_completed_wake()
194  * macros are designed to safely implement stopping
195  * and waking netdev queues without full lock protection.
196  *
197  * We assume that there can be no concurrent stop attempts and no concurrent
198  * wake attempts. The try-stop should happen from the xmit handler,
199  * while wake up should be triggered from NAPI poll context.
200  * The two may run concurrently (single producer, single consumer).
201  *
202  * The try-stop side is expected to run from the xmit handler and therefore
203  * it does not reschedule Tx (netif_tx_start_queue() instead of
204  * netif_tx_wake_queue()). Uses of the ``stop`` macros outside of the xmit
205  * handler may lead to xmit queue being enabled but not run.
206  * The waking side does not have similar context restrictions.
207  *
208  * The macros guarantee that rings will not remain stopped if there's
209  * space available, but they do *not* prevent false wake ups when
210  * the ring is full! Drivers should check for ring full at the start
211  * for the xmit handler.
212  *
213  * All descriptor ring indexes (and other relevant shared state) must
214  * be updated before invoking the macros.
215  */
216 
217 #define netif_txq_try_stop(txq, get_desc, start_thrs)			\
218 	({								\
219 		int _res;						\
220 									\
221 		netif_tx_stop_queue(txq);				\
222 		/* Producer index and stop bit must be visible		\
223 		 * to consumer before we recheck.			\
224 		 * Pairs with a barrier in __netif_txq_completed_wake(). \
225 		 */							\
226 		smp_mb__after_atomic();					\
227 									\
228 		/* We need to check again in a case another		\
229 		 * CPU has just made room available.			\
230 		 */							\
231 		_res = 0;						\
232 		if (unlikely(get_desc >= start_thrs)) {			\
233 			netif_tx_start_queue(txq);			\
234 			_res = -1;					\
235 		}							\
236 		_res;							\
237 	})								\
238 
239 /**
240  * netif_txq_maybe_stop() - locklessly stop a Tx queue, if needed
241  * @txq:	struct netdev_queue to stop/start
242  * @get_desc:	get current number of free descriptors (see requirements below!)
243  * @stop_thrs:	minimal number of available descriptors for queue to be left
244  *		enabled
245  * @start_thrs:	minimal number of descriptors to re-enable the queue, can be
246  *		equal to @stop_thrs or higher to avoid frequent waking
247  *
248  * All arguments may be evaluated multiple times, beware of side effects.
249  * @get_desc must be a formula or a function call, it must always
250  * return up-to-date information when evaluated!
251  * Expected to be used from ndo_start_xmit, see the comment on top of the file.
252  *
253  * Returns:
254  *	 0 if the queue was stopped
255  *	 1 if the queue was left enabled
256  *	-1 if the queue was re-enabled (raced with waking)
257  */
258 #define netif_txq_maybe_stop(txq, get_desc, stop_thrs, start_thrs)	\
259 	({								\
260 		int _res;						\
261 									\
262 		_res = 1;						\
263 		if (unlikely(get_desc < stop_thrs))			\
264 			_res = netif_txq_try_stop(txq, get_desc, start_thrs); \
265 		_res;							\
266 	})								\
267 
268 /* Variant of netdev_tx_completed_queue() which guarantees smp_mb() if
269  * @bytes != 0, regardless of kernel config.
270  */
271 static inline void
272 netdev_txq_completed_mb(struct netdev_queue *dev_queue,
273 			unsigned int pkts, unsigned int bytes)
274 {
275 	if (IS_ENABLED(CONFIG_BQL))
276 		netdev_tx_completed_queue(dev_queue, pkts, bytes);
277 	else if (bytes)
278 		smp_mb();
279 }
280 
281 /**
282  * __netif_txq_completed_wake() - locklessly wake a Tx queue, if needed
283  * @txq:	struct netdev_queue to stop/start
284  * @pkts:	number of packets completed
285  * @bytes:	number of bytes completed
286  * @get_desc:	get current number of free descriptors (see requirements below!)
287  * @start_thrs:	minimal number of descriptors to re-enable the queue
288  * @down_cond:	down condition, predicate indicating that the queue should
289  *		not be woken up even if descriptors are available
290  *
291  * All arguments may be evaluated multiple times.
292  * @get_desc must be a formula or a function call, it must always
293  * return up-to-date information when evaluated!
294  * Reports completed pkts/bytes to BQL.
295  *
296  * Returns:
297  *	 0 if the queue was woken up
298  *	 1 if the queue was already enabled (or disabled but @down_cond is true)
299  *	-1 if the queue was left unchanged (@start_thrs not reached)
300  */
301 #define __netif_txq_completed_wake(txq, pkts, bytes,			\
302 				   get_desc, start_thrs, down_cond)	\
303 	({								\
304 		int _res;						\
305 									\
306 		/* Report to BQL and piggy back on its barrier.		\
307 		 * Barrier makes sure that anybody stopping the queue	\
308 		 * after this point sees the new consumer index.	\
309 		 * Pairs with barrier in netif_txq_try_stop().		\
310 		 */							\
311 		netdev_txq_completed_mb(txq, pkts, bytes);		\
312 									\
313 		_res = -1;						\
314 		if (pkts && likely(get_desc >= start_thrs)) {		\
315 			_res = 1;					\
316 			if (unlikely(netif_tx_queue_stopped(txq)) &&	\
317 			    !(down_cond)) {				\
318 				netif_tx_wake_queue(txq);		\
319 				_res = 0;				\
320 			}						\
321 		}							\
322 		_res;							\
323 	})
324 
325 #define netif_txq_completed_wake(txq, pkts, bytes, get_desc, start_thrs) \
326 	__netif_txq_completed_wake(txq, pkts, bytes, get_desc, start_thrs, false)
327 
328 /* subqueue variants follow */
329 
330 #define netif_subqueue_try_stop(dev, idx, get_desc, start_thrs)		\
331 	({								\
332 		struct netdev_queue *_txq;				\
333 									\
334 		_txq = netdev_get_tx_queue(dev, idx);			\
335 		netif_txq_try_stop(_txq, get_desc, start_thrs);		\
336 	})
337 
338 static inline void netif_subqueue_sent(const struct net_device *dev,
339 				       unsigned int idx, unsigned int bytes)
340 {
341 	struct netdev_queue *txq;
342 
343 	txq = netdev_get_tx_queue(dev, idx);
344 	netdev_tx_sent_queue(txq, bytes);
345 }
346 
347 static inline unsigned int netif_xmit_timeout_ms(struct netdev_queue *txq)
348 {
349 	unsigned long trans_start = READ_ONCE(txq->trans_start);
350 
351 	if (netif_xmit_stopped(txq) &&
352 	    time_after(jiffies, trans_start + txq->dev->watchdog_timeo))
353 		return jiffies_to_msecs(jiffies - trans_start);
354 
355 	return 0;
356 }
357 
358 #define netif_subqueue_maybe_stop(dev, idx, get_desc, stop_thrs, start_thrs) \
359 	({								\
360 		struct netdev_queue *_txq;				\
361 									\
362 		_txq = netdev_get_tx_queue(dev, idx);			\
363 		netif_txq_maybe_stop(_txq, get_desc, stop_thrs, start_thrs); \
364 	})
365 
366 #define netif_subqueue_completed_wake(dev, idx, pkts, bytes,		\
367 				      get_desc, start_thrs)		\
368 	({								\
369 		struct netdev_queue *_txq;				\
370 									\
371 		_txq = netdev_get_tx_queue(dev, idx);			\
372 		netif_txq_completed_wake(_txq, pkts, bytes,		\
373 					 get_desc, start_thrs);		\
374 	})
375 
376 struct device *netdev_queue_get_dma_dev(struct net_device *dev, int idx);
377 
378 #endif
379