xref: /linux/include/linux/netdevice.h (revision 64edfa65062dc4509ba75978116b2f6d392346f5)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Definitions for the Interfaces handler.
8  *
9  * Version:	@(#)dev.h	1.0.10	08/12/93
10  *
11  * Authors:	Ross Biro
12  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
14  *		Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
15  *		Alan Cox, <alan@lxorguk.ukuu.org.uk>
16  *		Bjorn Ekwall. <bj0rn@blox.se>
17  *              Pekka Riikonen <priikone@poseidon.pspt.fi>
18  *
19  *		Moved to /usr/include/linux for NET3
20  */
21 #ifndef _LINUX_NETDEVICE_H
22 #define _LINUX_NETDEVICE_H
23 
24 #include <linux/timer.h>
25 #include <linux/bug.h>
26 #include <linux/delay.h>
27 #include <linux/atomic.h>
28 #include <linux/prefetch.h>
29 #include <asm/cache.h>
30 #include <asm/byteorder.h>
31 #include <asm/local.h>
32 
33 #include <linux/percpu.h>
34 #include <linux/rculist.h>
35 #include <linux/workqueue.h>
36 #include <linux/dynamic_queue_limits.h>
37 
38 #include <net/net_namespace.h>
39 #ifdef CONFIG_DCB
40 #include <net/dcbnl.h>
41 #endif
42 #include <net/netprio_cgroup.h>
43 #include <linux/netdev_features.h>
44 #include <linux/neighbour.h>
45 #include <linux/netdevice_xmit.h>
46 #include <uapi/linux/netdevice.h>
47 #include <uapi/linux/if_bonding.h>
48 #include <uapi/linux/pkt_cls.h>
49 #include <uapi/linux/netdev.h>
50 #include <linux/hashtable.h>
51 #include <linux/rbtree.h>
52 #include <net/net_trackers.h>
53 #include <net/net_debug.h>
54 #include <net/dropreason-core.h>
55 #include <net/neighbour_tables.h>
56 
57 struct netpoll_info;
58 struct device;
59 struct ethtool_ops;
60 struct kernel_hwtstamp_config;
61 struct phy_device;
62 struct dsa_port;
63 struct ip_tunnel_parm_kern;
64 struct macsec_context;
65 struct macsec_ops;
66 struct netdev_config;
67 struct netdev_name_node;
68 struct sd_flow_limit;
69 struct sfp_bus;
70 /* 802.11 specific */
71 struct wireless_dev;
72 /* 802.15.4 specific */
73 struct wpan_dev;
74 struct mpls_dev;
75 /* UDP Tunnel offloads */
76 struct udp_tunnel_info;
77 struct udp_tunnel_nic_info;
78 struct udp_tunnel_nic;
79 struct bpf_prog;
80 struct xdp_buff;
81 struct xdp_frame;
82 struct xdp_metadata_ops;
83 struct xdp_md;
84 struct ethtool_netdev_state;
85 struct phy_link_topology;
86 struct hwtstamp_provider;
87 
88 typedef u32 xdp_features_t;
89 
90 void synchronize_net(void);
91 void netdev_set_default_ethtool_ops(struct net_device *dev,
92 				    const struct ethtool_ops *ops);
93 void netdev_sw_irq_coalesce_default_on(struct net_device *dev);
94 
95 /* Backlog congestion levels */
96 #define NET_RX_SUCCESS		0	/* keep 'em coming, baby */
97 #define NET_RX_DROP		1	/* packet dropped */
98 
99 #define MAX_NEST_DEV 8
100 
101 /*
102  * Transmit return codes: transmit return codes originate from three different
103  * namespaces:
104  *
105  * - qdisc return codes
106  * - driver transmit return codes
107  * - errno values
108  *
109  * Drivers are allowed to return any one of those in their hard_start_xmit()
110  * function. Real network devices commonly used with qdiscs should only return
111  * the driver transmit return codes though - when qdiscs are used, the actual
112  * transmission happens asynchronously, so the value is not propagated to
113  * higher layers. Virtual network devices transmit synchronously; in this case
114  * the driver transmit return codes are consumed by dev_queue_xmit(), and all
115  * others are propagated to higher layers.
116  */
117 
118 /* qdisc ->enqueue() return codes. */
119 #define NET_XMIT_SUCCESS	0x00
120 #define NET_XMIT_DROP		0x01	/* skb dropped			*/
121 #define NET_XMIT_CN		0x02	/* congestion notification	*/
122 #define NET_XMIT_MASK		0x0f	/* qdisc flags in net/sch_generic.h */
123 
124 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
125  * indicates that the device will soon be dropping packets, or already drops
126  * some packets of the same priority; prompting us to send less aggressively. */
127 #define net_xmit_eval(e)	((e) == NET_XMIT_CN ? 0 : (e))
128 #define net_xmit_errno(e)	((e) != NET_XMIT_CN ? -ENOBUFS : 0)
129 
130 /* Driver transmit return codes */
131 #define NETDEV_TX_MASK		0xf0
132 
133 enum netdev_tx {
134 	__NETDEV_TX_MIN	 = INT_MIN,	/* make sure enum is signed */
135 	NETDEV_TX_OK	 = 0x00,	/* driver took care of packet */
136 	NETDEV_TX_BUSY	 = 0x10,	/* driver tx path was busy*/
137 };
138 typedef enum netdev_tx netdev_tx_t;
139 
140 /*
141  * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
142  * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
143  */
dev_xmit_complete(int rc)144 static inline bool dev_xmit_complete(int rc)
145 {
146 	/*
147 	 * Positive cases with an skb consumed by a driver:
148 	 * - successful transmission (rc == NETDEV_TX_OK)
149 	 * - error while transmitting (rc < 0)
150 	 * - error while queueing to a different device (rc & NET_XMIT_MASK)
151 	 */
152 	if (likely(rc < NET_XMIT_MASK))
153 		return true;
154 
155 	return false;
156 }
157 
158 /*
159  *	Compute the worst-case header length according to the protocols
160  *	used.
161  */
162 
163 #if defined(CONFIG_HYPERV_NET)
164 # define LL_MAX_HEADER 128
165 #elif defined(CONFIG_WLAN)
166 # if defined(CONFIG_MAC80211_MESH)
167 #  define LL_MAX_HEADER 128
168 # else
169 #  define LL_MAX_HEADER 96
170 # endif
171 #else
172 # define LL_MAX_HEADER 32
173 #endif
174 
175 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
176     !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
177 #define MAX_HEADER LL_MAX_HEADER
178 #else
179 #define MAX_HEADER (LL_MAX_HEADER + 48)
180 #endif
181 
182 /*
183  *	Old network device statistics. Fields are native words
184  *	(unsigned long) so they can be read and written atomically.
185  */
186 
187 #define NET_DEV_STAT(FIELD)			\
188 	union {					\
189 		unsigned long FIELD;		\
190 		atomic_long_t __##FIELD;	\
191 	}
192 
193 struct net_device_stats {
194 	NET_DEV_STAT(rx_packets);
195 	NET_DEV_STAT(tx_packets);
196 	NET_DEV_STAT(rx_bytes);
197 	NET_DEV_STAT(tx_bytes);
198 	NET_DEV_STAT(rx_errors);
199 	NET_DEV_STAT(tx_errors);
200 	NET_DEV_STAT(rx_dropped);
201 	NET_DEV_STAT(tx_dropped);
202 	NET_DEV_STAT(multicast);
203 	NET_DEV_STAT(collisions);
204 	NET_DEV_STAT(rx_length_errors);
205 	NET_DEV_STAT(rx_over_errors);
206 	NET_DEV_STAT(rx_crc_errors);
207 	NET_DEV_STAT(rx_frame_errors);
208 	NET_DEV_STAT(rx_fifo_errors);
209 	NET_DEV_STAT(rx_missed_errors);
210 	NET_DEV_STAT(tx_aborted_errors);
211 	NET_DEV_STAT(tx_carrier_errors);
212 	NET_DEV_STAT(tx_fifo_errors);
213 	NET_DEV_STAT(tx_heartbeat_errors);
214 	NET_DEV_STAT(tx_window_errors);
215 	NET_DEV_STAT(rx_compressed);
216 	NET_DEV_STAT(tx_compressed);
217 };
218 #undef NET_DEV_STAT
219 
220 /* per-cpu stats, allocated on demand.
221  * Try to fit them in a single cache line, for dev_get_stats() sake.
222  */
223 struct net_device_core_stats {
224 	unsigned long	rx_dropped;
225 	unsigned long	tx_dropped;
226 	unsigned long	rx_nohandler;
227 	unsigned long	rx_otherhost_dropped;
228 } __aligned(4 * sizeof(unsigned long));
229 
230 #include <linux/cache.h>
231 #include <linux/skbuff.h>
232 
233 struct neighbour;
234 struct neigh_parms;
235 struct sk_buff;
236 
237 struct netdev_hw_addr {
238 	struct list_head	list;
239 	struct rb_node		node;
240 	unsigned char		addr[MAX_ADDR_LEN];
241 	unsigned char		type;
242 #define NETDEV_HW_ADDR_T_LAN		1
243 #define NETDEV_HW_ADDR_T_SAN		2
244 #define NETDEV_HW_ADDR_T_UNICAST	3
245 #define NETDEV_HW_ADDR_T_MULTICAST	4
246 	bool			global_use;
247 	int			sync_cnt;
248 	int			refcount;
249 	int			synced;
250 	struct rcu_head		rcu_head;
251 };
252 
253 struct netdev_hw_addr_list {
254 	struct list_head	list;
255 	int			count;
256 
257 	/* Auxiliary tree for faster lookup on addition and deletion */
258 	struct rb_root		tree;
259 };
260 
261 #define netdev_hw_addr_list_count(l) ((l)->count)
262 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
263 #define netdev_hw_addr_list_for_each(ha, l) \
264 	list_for_each_entry(ha, &(l)->list, list)
265 
266 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
267 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
268 #define netdev_for_each_uc_addr(ha, dev) \
269 	netdev_hw_addr_list_for_each(ha, &(dev)->uc)
270 #define netdev_for_each_synced_uc_addr(_ha, _dev) \
271 	netdev_for_each_uc_addr((_ha), (_dev)) \
272 		if ((_ha)->sync_cnt)
273 
274 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
275 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
276 #define netdev_for_each_mc_addr(ha, dev) \
277 	netdev_hw_addr_list_for_each(ha, &(dev)->mc)
278 #define netdev_for_each_synced_mc_addr(_ha, _dev) \
279 	netdev_for_each_mc_addr((_ha), (_dev)) \
280 		if ((_ha)->sync_cnt)
281 
282 struct hh_cache {
283 	unsigned int	hh_len;
284 	seqlock_t	hh_lock;
285 
286 	/* cached hardware header; allow for machine alignment needs.        */
287 #define HH_DATA_MOD	16
288 #define HH_DATA_OFF(__len) \
289 	(HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
290 #define HH_DATA_ALIGN(__len) \
291 	(((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
292 	unsigned long	hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
293 };
294 
295 /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much.
296  * Alternative is:
297  *   dev->hard_header_len ? (dev->hard_header_len +
298  *                           (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
299  *
300  * We could use other alignment values, but we must maintain the
301  * relationship HH alignment <= LL alignment.
302  */
303 #define LL_RESERVED_SPACE(dev) \
304 	((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \
305 	  & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
306 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
307 	((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \
308 	  & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
309 
310 struct header_ops {
311 	int	(*create) (struct sk_buff *skb, struct net_device *dev,
312 			   unsigned short type, const void *daddr,
313 			   const void *saddr, unsigned int len);
314 	int	(*parse)(const struct sk_buff *skb,
315 			 const struct net_device *dev,
316 			 unsigned char *haddr);
317 	int	(*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
318 	void	(*cache_update)(struct hh_cache *hh,
319 				const struct net_device *dev,
320 				const unsigned char *haddr);
321 	bool	(*validate)(const char *ll_header, unsigned int len);
322 	__be16	(*parse_protocol)(const struct sk_buff *skb);
323 };
324 
325 /* These flag bits are private to the generic network queueing
326  * layer; they may not be explicitly referenced by any other
327  * code.
328  */
329 
330 enum netdev_state_t {
331 	__LINK_STATE_START,
332 	__LINK_STATE_PRESENT,
333 	__LINK_STATE_NOCARRIER,
334 	__LINK_STATE_LINKWATCH_PENDING,
335 	__LINK_STATE_DORMANT,
336 	__LINK_STATE_TESTING,
337 };
338 
339 struct gro_list {
340 	struct list_head	list;
341 	int			count;
342 };
343 
344 /*
345  * size of gro hash buckets, must be <= the number of bits in
346  * gro_node::bitmask
347  */
348 #define GRO_HASH_BUCKETS	8
349 
350 /**
351  * struct gro_node - structure to support Generic Receive Offload
352  * @bitmask: bitmask to indicate used buckets in @hash
353  * @hash: hashtable of pending aggregated skbs, separated by flows
354  * @rx_list: list of pending ``GRO_NORMAL`` skbs
355  * @rx_count: cached current length of @rx_list
356  * @cached_napi_id: napi_struct::napi_id cached for hotpath, 0 for standalone
357  */
358 struct gro_node {
359 	unsigned long		bitmask;
360 	struct gro_list		hash[GRO_HASH_BUCKETS];
361 	struct list_head	rx_list;
362 	u32			rx_count;
363 	u32			cached_napi_id;
364 };
365 
366 /*
367  * Structure for per-NAPI config
368  */
369 struct napi_config {
370 	u64 gro_flush_timeout;
371 	u64 irq_suspend_timeout;
372 	u32 defer_hard_irqs;
373 	cpumask_t affinity_mask;
374 	u8 threaded;
375 	unsigned int napi_id;
376 };
377 
378 /*
379  * Structure for NAPI scheduling similar to tasklet but with weighting
380  */
381 struct napi_struct {
382 	/* This field should be first or softnet_data.backlog needs tweaks. */
383 	unsigned long		state;
384 	/* The poll_list must only be managed by the entity which
385 	 * changes the state of the NAPI_STATE_SCHED bit.  This means
386 	 * whoever atomically sets that bit can add this napi_struct
387 	 * to the per-CPU poll_list, and whoever clears that bit
388 	 * can remove from the list right before clearing the bit.
389 	 */
390 	struct list_head	poll_list;
391 
392 	int			weight;
393 	u32			defer_hard_irqs_count;
394 	int			(*poll)(struct napi_struct *, int);
395 #ifdef CONFIG_NETPOLL
396 	/* CPU actively polling if netpoll is configured */
397 	int			poll_owner;
398 #endif
399 	/* CPU on which NAPI has been scheduled for processing */
400 	int			list_owner;
401 	struct net_device	*dev;
402 	struct sk_buff		*skb;
403 	struct gro_node		gro;
404 	struct hrtimer		timer;
405 	/* all fields past this point are write-protected by netdev_lock */
406 	struct task_struct	*thread;
407 	unsigned long		gro_flush_timeout;
408 	unsigned long		irq_suspend_timeout;
409 	u32			defer_hard_irqs;
410 	/* control-path-only fields follow */
411 	u32			napi_id;
412 	struct list_head	dev_list;
413 	struct hlist_node	napi_hash_node;
414 	int			irq;
415 	struct irq_affinity_notify notify;
416 	int			napi_rmap_idx;
417 	int			index;
418 	struct napi_config	*config;
419 };
420 
421 enum {
422 	NAPI_STATE_SCHED,		/* Poll is scheduled */
423 	NAPI_STATE_MISSED,		/* reschedule a napi */
424 	NAPI_STATE_DISABLE,		/* Disable pending */
425 	NAPI_STATE_NPSVC,		/* Netpoll - don't dequeue from poll_list */
426 	NAPI_STATE_LISTED,		/* NAPI added to system lists */
427 	NAPI_STATE_NO_BUSY_POLL,	/* Do not add in napi_hash, no busy polling */
428 	NAPI_STATE_IN_BUSY_POLL,	/* Do not rearm NAPI interrupt */
429 	NAPI_STATE_PREFER_BUSY_POLL,	/* prefer busy-polling over softirq processing*/
430 	NAPI_STATE_THREADED,		/* The poll is performed inside its own thread*/
431 	NAPI_STATE_SCHED_THREADED,	/* Napi is currently scheduled in threaded mode */
432 	NAPI_STATE_HAS_NOTIFIER,	/* Napi has an IRQ notifier */
433 	NAPI_STATE_THREADED_BUSY_POLL,	/* The threaded NAPI poller will busy poll */
434 };
435 
436 enum {
437 	NAPIF_STATE_SCHED		= BIT(NAPI_STATE_SCHED),
438 	NAPIF_STATE_MISSED		= BIT(NAPI_STATE_MISSED),
439 	NAPIF_STATE_DISABLE		= BIT(NAPI_STATE_DISABLE),
440 	NAPIF_STATE_NPSVC		= BIT(NAPI_STATE_NPSVC),
441 	NAPIF_STATE_LISTED		= BIT(NAPI_STATE_LISTED),
442 	NAPIF_STATE_NO_BUSY_POLL	= BIT(NAPI_STATE_NO_BUSY_POLL),
443 	NAPIF_STATE_IN_BUSY_POLL	= BIT(NAPI_STATE_IN_BUSY_POLL),
444 	NAPIF_STATE_PREFER_BUSY_POLL	= BIT(NAPI_STATE_PREFER_BUSY_POLL),
445 	NAPIF_STATE_THREADED		= BIT(NAPI_STATE_THREADED),
446 	NAPIF_STATE_SCHED_THREADED	= BIT(NAPI_STATE_SCHED_THREADED),
447 	NAPIF_STATE_HAS_NOTIFIER	= BIT(NAPI_STATE_HAS_NOTIFIER),
448 	NAPIF_STATE_THREADED_BUSY_POLL	= BIT(NAPI_STATE_THREADED_BUSY_POLL),
449 };
450 
451 enum gro_result {
452 	GRO_MERGED,
453 	GRO_MERGED_FREE,
454 	GRO_HELD,
455 	GRO_NORMAL,
456 	GRO_CONSUMED,
457 };
458 typedef enum gro_result gro_result_t;
459 
460 /*
461  * enum rx_handler_result - Possible return values for rx_handlers.
462  * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
463  * further.
464  * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
465  * case skb->dev was changed by rx_handler.
466  * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
467  * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called.
468  *
469  * rx_handlers are functions called from inside __netif_receive_skb(), to do
470  * special processing of the skb, prior to delivery to protocol handlers.
471  *
472  * Currently, a net_device can only have a single rx_handler registered. Trying
473  * to register a second rx_handler will return -EBUSY.
474  *
475  * To register a rx_handler on a net_device, use netdev_rx_handler_register().
476  * To unregister a rx_handler on a net_device, use
477  * netdev_rx_handler_unregister().
478  *
479  * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
480  * do with the skb.
481  *
482  * If the rx_handler consumed the skb in some way, it should return
483  * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
484  * the skb to be delivered in some other way.
485  *
486  * If the rx_handler changed skb->dev, to divert the skb to another
487  * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
488  * new device will be called if it exists.
489  *
490  * If the rx_handler decides the skb should be ignored, it should return
491  * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
492  * are registered on exact device (ptype->dev == skb->dev).
493  *
494  * If the rx_handler didn't change skb->dev, but wants the skb to be normally
495  * delivered, it should return RX_HANDLER_PASS.
496  *
497  * A device without a registered rx_handler will behave as if rx_handler
498  * returned RX_HANDLER_PASS.
499  */
500 
501 enum rx_handler_result {
502 	RX_HANDLER_CONSUMED,
503 	RX_HANDLER_ANOTHER,
504 	RX_HANDLER_EXACT,
505 	RX_HANDLER_PASS,
506 };
507 typedef enum rx_handler_result rx_handler_result_t;
508 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
509 
510 void __napi_schedule(struct napi_struct *n);
511 void __napi_schedule_irqoff(struct napi_struct *n);
512 
napi_disable_pending(struct napi_struct * n)513 static inline bool napi_disable_pending(struct napi_struct *n)
514 {
515 	return test_bit(NAPI_STATE_DISABLE, &n->state);
516 }
517 
napi_prefer_busy_poll(struct napi_struct * n)518 static inline bool napi_prefer_busy_poll(struct napi_struct *n)
519 {
520 	return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state);
521 }
522 
523 /**
524  * napi_is_scheduled - test if NAPI is scheduled
525  * @n: NAPI context
526  *
527  * This check is "best-effort". With no locking implemented,
528  * a NAPI can be scheduled or terminate right after this check
529  * and produce not precise results.
530  *
531  * NAPI_STATE_SCHED is an internal state, napi_is_scheduled
532  * should not be used normally and napi_schedule should be
533  * used instead.
534  *
535  * Use only if the driver really needs to check if a NAPI
536  * is scheduled for example in the context of delayed timer
537  * that can be skipped if a NAPI is already scheduled.
538  *
539  * Return: True if NAPI is scheduled, False otherwise.
540  */
napi_is_scheduled(struct napi_struct * n)541 static inline bool napi_is_scheduled(struct napi_struct *n)
542 {
543 	return test_bit(NAPI_STATE_SCHED, &n->state);
544 }
545 
546 bool napi_schedule_prep(struct napi_struct *n);
547 
548 /**
549  *	napi_schedule - schedule NAPI poll
550  *	@n: NAPI context
551  *
552  * Schedule NAPI poll routine to be called if it is not already
553  * running.
554  * Return: true if we schedule a NAPI or false if not.
555  * Refer to napi_schedule_prep() for additional reason on why
556  * a NAPI might not be scheduled.
557  */
napi_schedule(struct napi_struct * n)558 static inline bool napi_schedule(struct napi_struct *n)
559 {
560 	if (napi_schedule_prep(n)) {
561 		__napi_schedule(n);
562 		return true;
563 	}
564 
565 	return false;
566 }
567 
568 /**
569  *	napi_schedule_irqoff - schedule NAPI poll
570  *	@n: NAPI context
571  *
572  * Variant of napi_schedule(), assuming hard irqs are masked.
573  */
napi_schedule_irqoff(struct napi_struct * n)574 static inline void napi_schedule_irqoff(struct napi_struct *n)
575 {
576 	if (napi_schedule_prep(n))
577 		__napi_schedule_irqoff(n);
578 }
579 
580 /**
581  * napi_complete_done - NAPI processing complete
582  * @n: NAPI context
583  * @work_done: number of packets processed
584  *
585  * Mark NAPI processing as complete. Should only be called if poll budget
586  * has not been completely consumed.
587  * Prefer over napi_complete().
588  * Return: false if device should avoid rearming interrupts.
589  */
590 bool napi_complete_done(struct napi_struct *n, int work_done);
591 
napi_complete(struct napi_struct * n)592 static inline bool napi_complete(struct napi_struct *n)
593 {
594 	return napi_complete_done(n, 0);
595 }
596 
597 void netif_threaded_enable(struct net_device *dev);
598 int dev_set_threaded(struct net_device *dev,
599 		     enum netdev_napi_threaded threaded);
600 
601 void napi_disable(struct napi_struct *n);
602 void napi_disable_locked(struct napi_struct *n);
603 
604 void napi_enable(struct napi_struct *n);
605 void napi_enable_locked(struct napi_struct *n);
606 
607 /**
608  *	napi_synchronize - wait until NAPI is not running
609  *	@n: NAPI context
610  *
611  * Wait until NAPI is done being scheduled on this context.
612  * Waits till any outstanding processing completes but
613  * does not disable future activations.
614  */
napi_synchronize(const struct napi_struct * n)615 static inline void napi_synchronize(const struct napi_struct *n)
616 {
617 	if (IS_ENABLED(CONFIG_SMP))
618 		while (test_bit(NAPI_STATE_SCHED, &n->state))
619 			msleep(1);
620 	else
621 		barrier();
622 }
623 
624 /**
625  *	napi_if_scheduled_mark_missed - if napi is running, set the
626  *	NAPIF_STATE_MISSED
627  *	@n: NAPI context
628  *
629  * If napi is running, set the NAPIF_STATE_MISSED, and return true if
630  * NAPI is scheduled.
631  **/
napi_if_scheduled_mark_missed(struct napi_struct * n)632 static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n)
633 {
634 	unsigned long val, new;
635 
636 	val = READ_ONCE(n->state);
637 	do {
638 		if (val & NAPIF_STATE_DISABLE)
639 			return true;
640 
641 		if (!(val & NAPIF_STATE_SCHED))
642 			return false;
643 
644 		new = val | NAPIF_STATE_MISSED;
645 	} while (!try_cmpxchg(&n->state, &val, new));
646 
647 	return true;
648 }
649 
650 enum netdev_queue_state_t {
651 	__QUEUE_STATE_DRV_XOFF,
652 	__QUEUE_STATE_STACK_XOFF,
653 	__QUEUE_STATE_FROZEN,
654 };
655 
656 #define QUEUE_STATE_DRV_XOFF	(1 << __QUEUE_STATE_DRV_XOFF)
657 #define QUEUE_STATE_STACK_XOFF	(1 << __QUEUE_STATE_STACK_XOFF)
658 #define QUEUE_STATE_FROZEN	(1 << __QUEUE_STATE_FROZEN)
659 
660 #define QUEUE_STATE_ANY_XOFF	(QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
661 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
662 					QUEUE_STATE_FROZEN)
663 #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
664 					QUEUE_STATE_FROZEN)
665 
666 /*
667  * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue.  The
668  * netif_tx_* functions below are used to manipulate this flag.  The
669  * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
670  * queue independently.  The netif_xmit_*stopped functions below are called
671  * to check if the queue has been stopped by the driver or stack (either
672  * of the XOFF bits are set in the state).  Drivers should not need to call
673  * netif_xmit*stopped functions, they should only be using netif_tx_*.
674  */
675 
676 struct netdev_queue {
677 /*
678  * read-mostly part
679  */
680 	struct net_device	*dev;
681 	netdevice_tracker	dev_tracker;
682 
683 	struct Qdisc __rcu	*qdisc;
684 	struct Qdisc __rcu	*qdisc_sleeping;
685 #ifdef CONFIG_SYSFS
686 	struct kobject		kobj;
687 	const struct attribute_group	**groups;
688 #endif
689 	unsigned long		tx_maxrate;
690 	/*
691 	 * Number of TX timeouts for this queue
692 	 * (/sys/class/net/DEV/Q/trans_timeout)
693 	 */
694 	atomic_long_t		trans_timeout;
695 
696 	/* Subordinate device that the queue has been assigned to */
697 	struct net_device	*sb_dev;
698 #ifdef CONFIG_XDP_SOCKETS
699 	/* "ops protected", see comment about net_device::lock */
700 	struct xsk_buff_pool    *pool;
701 #endif
702 
703 /*
704  * write-mostly part
705  */
706 #ifdef CONFIG_BQL
707 	struct dql		dql;
708 #endif
709 	spinlock_t		_xmit_lock ____cacheline_aligned_in_smp;
710 	int			xmit_lock_owner;
711 	/*
712 	 * Time (in jiffies) of last Tx
713 	 */
714 	unsigned long		trans_start;
715 
716 	unsigned long		state;
717 
718 /*
719  * slow- / control-path part
720  */
721 	/* NAPI instance for the queue
722 	 * "ops protected", see comment about net_device::lock
723 	 */
724 	struct napi_struct	*napi;
725 
726 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
727 	int			numa_node;
728 #endif
729 } ____cacheline_aligned_in_smp;
730 
731 extern int sysctl_fb_tunnels_only_for_init_net;
732 extern int sysctl_devconf_inherit_init_net;
733 
734 /*
735  * sysctl_fb_tunnels_only_for_init_net == 0 : For all netns
736  *                                     == 1 : For initns only
737  *                                     == 2 : For none.
738  */
net_has_fallback_tunnels(const struct net * net)739 static inline bool net_has_fallback_tunnels(const struct net *net)
740 {
741 #if IS_ENABLED(CONFIG_SYSCTL)
742 	int fb_tunnels_only_for_init_net = READ_ONCE(sysctl_fb_tunnels_only_for_init_net);
743 
744 	return !fb_tunnels_only_for_init_net ||
745 		(net_eq(net, &init_net) && fb_tunnels_only_for_init_net == 1);
746 #else
747 	return true;
748 #endif
749 }
750 
net_inherit_devconf(void)751 static inline int net_inherit_devconf(void)
752 {
753 #if IS_ENABLED(CONFIG_SYSCTL)
754 	return READ_ONCE(sysctl_devconf_inherit_init_net);
755 #else
756 	return 0;
757 #endif
758 }
759 
netdev_queue_numa_node_read(const struct netdev_queue * q)760 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
761 {
762 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
763 	return q->numa_node;
764 #else
765 	return NUMA_NO_NODE;
766 #endif
767 }
768 
netdev_queue_numa_node_write(struct netdev_queue * q,int node)769 static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
770 {
771 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
772 	q->numa_node = node;
773 #endif
774 }
775 
776 #ifdef CONFIG_RFS_ACCEL
777 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
778 			 u16 filter_id);
779 #endif
780 
781 /* XPS map type and offset of the xps map within net_device->xps_maps[]. */
782 enum xps_map_type {
783 	XPS_CPUS = 0,
784 	XPS_RXQS,
785 	XPS_MAPS_MAX,
786 };
787 
788 #ifdef CONFIG_XPS
789 /*
790  * This structure holds an XPS map which can be of variable length.  The
791  * map is an array of queues.
792  */
793 struct xps_map {
794 	unsigned int len;
795 	unsigned int alloc_len;
796 	struct rcu_head rcu;
797 	u16 queues[];
798 };
799 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
800 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
801        - sizeof(struct xps_map)) / sizeof(u16))
802 
803 /*
804  * This structure holds all XPS maps for device.  Maps are indexed by CPU.
805  *
806  * We keep track of the number of cpus/rxqs used when the struct is allocated,
807  * in nr_ids. This will help not accessing out-of-bound memory.
808  *
809  * We keep track of the number of traffic classes used when the struct is
810  * allocated, in num_tc. This will be used to navigate the maps, to ensure we're
811  * not crossing its upper bound, as the original dev->num_tc can be updated in
812  * the meantime.
813  */
814 struct xps_dev_maps {
815 	struct rcu_head rcu;
816 	unsigned int nr_ids;
817 	s16 num_tc;
818 	struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */
819 };
820 
821 #define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) +	\
822 	(nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
823 
824 #define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\
825 	(_rxqs * (_tcs) * sizeof(struct xps_map *)))
826 
827 #endif /* CONFIG_XPS */
828 
829 #define TC_MAX_QUEUE	16
830 #define TC_BITMASK	15
831 /* HW offloaded queuing disciplines txq count and offset maps */
832 struct netdev_tc_txq {
833 	u16 count;
834 	u16 offset;
835 };
836 
837 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
838 /*
839  * This structure is to hold information about the device
840  * configured to run FCoE protocol stack.
841  */
842 struct netdev_fcoe_hbainfo {
843 	char	manufacturer[64];
844 	char	serial_number[64];
845 	char	hardware_version[64];
846 	char	driver_version[64];
847 	char	optionrom_version[64];
848 	char	firmware_version[64];
849 	char	model[256];
850 	char	model_description[256];
851 };
852 #endif
853 
854 #define MAX_PHYS_ITEM_ID_LEN 32
855 
856 /* This structure holds a unique identifier to identify some
857  * physical item (port for example) used by a netdevice.
858  */
859 struct netdev_phys_item_id {
860 	unsigned char id[MAX_PHYS_ITEM_ID_LEN];
861 	unsigned char id_len;
862 };
863 
netdev_phys_item_id_same(struct netdev_phys_item_id * a,struct netdev_phys_item_id * b)864 static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
865 					    struct netdev_phys_item_id *b)
866 {
867 	return a->id_len == b->id_len &&
868 	       memcmp(a->id, b->id, a->id_len) == 0;
869 }
870 
871 typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
872 				       struct sk_buff *skb,
873 				       struct net_device *sb_dev);
874 
875 enum net_device_path_type {
876 	DEV_PATH_ETHERNET = 0,
877 	DEV_PATH_VLAN,
878 	DEV_PATH_BRIDGE,
879 	DEV_PATH_PPPOE,
880 	DEV_PATH_DSA,
881 	DEV_PATH_MTK_WDMA,
882 	DEV_PATH_TUN,
883 };
884 
885 struct net_device_path {
886 	enum net_device_path_type	type;
887 	const struct net_device		*dev;
888 	union {
889 		struct {
890 			u16		id;
891 			__be16		proto;
892 			u8		h_dest[ETH_ALEN];
893 		} encap;
894 		struct {
895 			union {
896 				struct in_addr	src_v4;
897 				struct in6_addr	src_v6;
898 			};
899 			union {
900 				struct in_addr	dst_v4;
901 				struct in6_addr	dst_v6;
902 			};
903 
904 			u8	l3_proto;
905 		} tun;
906 		struct {
907 			enum {
908 				DEV_PATH_BR_VLAN_KEEP,
909 				DEV_PATH_BR_VLAN_TAG,
910 				DEV_PATH_BR_VLAN_UNTAG,
911 				DEV_PATH_BR_VLAN_UNTAG_HW,
912 			}		vlan_mode;
913 			u16		vlan_id;
914 			__be16		vlan_proto;
915 		} bridge;
916 		struct {
917 			int port;
918 			u16 proto;
919 		} dsa;
920 		struct {
921 			u8 wdma_idx;
922 			u8 queue;
923 			u16 wcid;
924 			u8 bss;
925 			u8 amsdu;
926 		} mtk_wdma;
927 	};
928 };
929 
930 #define NET_DEVICE_PATH_STACK_MAX	5
931 #define NET_DEVICE_PATH_VLAN_MAX	2
932 
933 struct net_device_path_stack {
934 	int			num_paths;
935 	struct net_device_path	path[NET_DEVICE_PATH_STACK_MAX];
936 };
937 
938 struct net_device_path_ctx {
939 	const struct net_device *dev;
940 	u8			daddr[ETH_ALEN];
941 
942 	int			num_vlans;
943 	struct {
944 		u16		id;
945 		__be16		proto;
946 	} vlan[NET_DEVICE_PATH_VLAN_MAX];
947 };
948 
949 enum tc_setup_type {
950 	TC_QUERY_CAPS,
951 	TC_SETUP_QDISC_MQPRIO,
952 	TC_SETUP_CLSU32,
953 	TC_SETUP_CLSFLOWER,
954 	TC_SETUP_CLSMATCHALL,
955 	TC_SETUP_CLSBPF,
956 	TC_SETUP_BLOCK,
957 	TC_SETUP_QDISC_CBS,
958 	TC_SETUP_QDISC_RED,
959 	TC_SETUP_QDISC_PRIO,
960 	TC_SETUP_QDISC_MQ,
961 	TC_SETUP_QDISC_ETF,
962 	TC_SETUP_ROOT_QDISC,
963 	TC_SETUP_QDISC_GRED,
964 	TC_SETUP_QDISC_TAPRIO,
965 	TC_SETUP_FT,
966 	TC_SETUP_QDISC_ETS,
967 	TC_SETUP_QDISC_TBF,
968 	TC_SETUP_QDISC_FIFO,
969 	TC_SETUP_QDISC_HTB,
970 	TC_SETUP_ACT,
971 };
972 
973 /* These structures hold the attributes of bpf state that are being passed
974  * to the netdevice through the bpf op.
975  */
976 enum bpf_netdev_command {
977 	/* Set or clear a bpf program used in the earliest stages of packet
978 	 * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee
979 	 * is responsible for calling bpf_prog_put on any old progs that are
980 	 * stored. In case of error, the callee need not release the new prog
981 	 * reference, but on success it takes ownership and must bpf_prog_put
982 	 * when it is no longer used.
983 	 */
984 	XDP_SETUP_PROG,
985 	XDP_SETUP_PROG_HW,
986 	/* BPF program for offload callbacks, invoked at program load time. */
987 	BPF_OFFLOAD_MAP_ALLOC,
988 	BPF_OFFLOAD_MAP_FREE,
989 	XDP_SETUP_XSK_POOL,
990 };
991 
992 struct bpf_prog_offload_ops;
993 struct netlink_ext_ack;
994 struct xdp_umem;
995 struct xdp_dev_bulk_queue;
996 struct bpf_xdp_link;
997 
998 enum bpf_xdp_mode {
999 	XDP_MODE_SKB = 0,
1000 	XDP_MODE_DRV = 1,
1001 	XDP_MODE_HW = 2,
1002 	__MAX_XDP_MODE
1003 };
1004 
1005 struct bpf_xdp_entity {
1006 	struct bpf_prog *prog;
1007 	struct bpf_xdp_link *link;
1008 };
1009 
1010 struct netdev_bpf {
1011 	enum bpf_netdev_command command;
1012 	union {
1013 		/* XDP_SETUP_PROG */
1014 		struct {
1015 			u32 flags;
1016 			struct bpf_prog *prog;
1017 			struct netlink_ext_ack *extack;
1018 		};
1019 		/* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */
1020 		struct {
1021 			struct bpf_offloaded_map *offmap;
1022 		};
1023 		/* XDP_SETUP_XSK_POOL */
1024 		struct {
1025 			struct xsk_buff_pool *pool;
1026 			u16 queue_id;
1027 		} xsk;
1028 	};
1029 };
1030 
1031 /* Flags for ndo_xsk_wakeup. */
1032 #define XDP_WAKEUP_RX (1 << 0)
1033 #define XDP_WAKEUP_TX (1 << 1)
1034 
1035 #ifdef CONFIG_XFRM_OFFLOAD
1036 struct xfrmdev_ops {
1037 	int	(*xdo_dev_state_add)(struct net_device *dev,
1038 				     struct xfrm_state *x,
1039 				     struct netlink_ext_ack *extack);
1040 	void	(*xdo_dev_state_delete)(struct net_device *dev,
1041 					struct xfrm_state *x);
1042 	void	(*xdo_dev_state_free)(struct net_device *dev,
1043 				      struct xfrm_state *x);
1044 	bool	(*xdo_dev_offload_ok) (struct sk_buff *skb,
1045 				       struct xfrm_state *x);
1046 	void	(*xdo_dev_state_advance_esn) (struct xfrm_state *x);
1047 	void	(*xdo_dev_state_update_stats) (struct xfrm_state *x);
1048 	int	(*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack);
1049 	void	(*xdo_dev_policy_delete) (struct xfrm_policy *x);
1050 	void	(*xdo_dev_policy_free) (struct xfrm_policy *x);
1051 };
1052 #endif
1053 
1054 struct dev_ifalias {
1055 	struct rcu_head rcuhead;
1056 	char ifalias[];
1057 };
1058 
1059 struct devlink;
1060 struct tlsdev_ops;
1061 
1062 struct netdev_net_notifier {
1063 	struct list_head list;
1064 	struct notifier_block *nb;
1065 };
1066 
1067 /*
1068  * This structure defines the management hooks for network devices.
1069  * The following hooks can be defined; unless noted otherwise, they are
1070  * optional and can be filled with a null pointer.
1071  *
1072  * int (*ndo_init)(struct net_device *dev);
1073  *     This function is called once when a network device is registered.
1074  *     The network device can use this for any late stage initialization
1075  *     or semantic validation. It can fail with an error code which will
1076  *     be propagated back to register_netdev.
1077  *
1078  * void (*ndo_uninit)(struct net_device *dev);
1079  *     This function is called when device is unregistered or when registration
1080  *     fails. It is not called if init fails.
1081  *
1082  * int (*ndo_open)(struct net_device *dev);
1083  *     This function is called when a network device transitions to the up
1084  *     state.
1085  *
1086  * int (*ndo_stop)(struct net_device *dev);
1087  *     This function is called when a network device transitions to the down
1088  *     state.
1089  *
1090  * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
1091  *                               struct net_device *dev);
1092  *	Called when a packet needs to be transmitted.
1093  *	Returns NETDEV_TX_OK.  Can return NETDEV_TX_BUSY, but you should stop
1094  *	the queue before that can happen; it's for obsolete devices and weird
1095  *	corner cases, but the stack really does a non-trivial amount
1096  *	of useless work if you return NETDEV_TX_BUSY.
1097  *	Required; cannot be NULL.
1098  *
1099  * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1100  *					   struct net_device *dev
1101  *					   netdev_features_t features);
1102  *	Called by core transmit path to determine if device is capable of
1103  *	performing offload operations on a given packet. This is to give
1104  *	the device an opportunity to implement any restrictions that cannot
1105  *	be otherwise expressed by feature flags. The check is called with
1106  *	the set of features that the stack has calculated and it returns
1107  *	those the driver believes to be appropriate.
1108  *
1109  * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
1110  *                         struct net_device *sb_dev);
1111  *	Called to decide which queue to use when device supports multiple
1112  *	transmit queues.
1113  *
1114  * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
1115  *	This function is called to allow device receiver to make
1116  *	changes to configuration when multicast or promiscuous is enabled.
1117  *
1118  * void (*ndo_set_rx_mode)(struct net_device *dev);
1119  *	This function is called device changes address list filtering.
1120  *	If driver handles unicast address filtering, it should set
1121  *	IFF_UNICAST_FLT in its priv_flags.
1122  *	Cannot sleep, called with netif_addr_lock_bh held.
1123  *	Deprecated in favor of ndo_set_rx_mode_async.
1124  *
1125  * void (*ndo_set_rx_mode_async)(struct net_device *dev,
1126  *				 struct netdev_hw_addr_list *uc,
1127  *				 struct netdev_hw_addr_list *mc);
1128  *	Async version of ndo_set_rx_mode which runs in process context
1129  *	with rtnl_lock and netdev_lock_ops(dev) held. The uc/mc parameters
1130  *	are snapshots of the address lists - iterate with
1131  *	netdev_hw_addr_list_for_each(ha, uc).
1132  *
1133  * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
1134  *	This function  is called when the Media Access Control address
1135  *	needs to be changed. If this interface is not defined, the
1136  *	MAC address can not be changed.
1137  *
1138  * int (*ndo_validate_addr)(struct net_device *dev);
1139  *	Test if Media Access Control address is valid for the device.
1140  *
1141  * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
1142  *	Old-style ioctl entry point. This is used internally by the
1143  *	ieee802154 subsystem but is no longer called by the device
1144  *	ioctl handler.
1145  *
1146  * int (*ndo_siocbond)(struct net_device *dev, struct ifreq *ifr, int cmd);
1147  *	Used by the bonding driver for its device specific ioctls:
1148  *	SIOCBONDENSLAVE, SIOCBONDRELEASE, SIOCBONDSETHWADDR, SIOCBONDCHANGEACTIVE,
1149  *	SIOCBONDSLAVEINFOQUERY, and SIOCBONDINFOQUERY
1150  *
1151  * * int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
1152  *	Called for ethernet specific ioctls: SIOCGMIIPHY, SIOCGMIIREG,
1153  *	SIOCSMIIREG, SIOCSHWTSTAMP and SIOCGHWTSTAMP.
1154  *
1155  * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
1156  *	Used to set network devices bus interface parameters. This interface
1157  *	is retained for legacy reasons; new devices should use the bus
1158  *	interface (PCI) for low level management.
1159  *
1160  * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
1161  *	Called when a user wants to change the Maximum Transfer Unit
1162  *	of a device.
1163  *
1164  * void (*ndo_tx_timeout)(struct net_device *dev, unsigned int txqueue);
1165  *	Callback used when the transmitter has not made any progress
1166  *	for dev->watchdog ticks.
1167  *
1168  * void (*ndo_get_stats64)(struct net_device *dev,
1169  *                         struct rtnl_link_stats64 *storage);
1170  * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1171  *	Called when a user wants to get the network device usage
1172  *	statistics. Drivers must do one of the following:
1173  *	1. Define @ndo_get_stats64 to fill in a zero-initialised
1174  *	   rtnl_link_stats64 structure passed by the caller.
1175  *	2. Define @ndo_get_stats to update a net_device_stats structure
1176  *	   (which should normally be dev->stats) and return a pointer to
1177  *	   it. The structure may be changed asynchronously only if each
1178  *	   field is written atomically.
1179  *	3. Update dev->stats asynchronously and atomically, and define
1180  *	   neither operation.
1181  *
1182  * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id)
1183  *	Return true if this device supports offload stats of this attr_id.
1184  *
1185  * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev,
1186  *	void *attr_data)
1187  *	Get statistics for offload operations by attr_id. Write it into the
1188  *	attr_data pointer.
1189  *
1190  * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
1191  *	If device supports VLAN filtering this function is called when a
1192  *	VLAN id is registered.
1193  *
1194  * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
1195  *	If device supports VLAN filtering this function is called when a
1196  *	VLAN id is unregistered.
1197  *
1198  * void (*ndo_poll_controller)(struct net_device *dev);
1199  *
1200  *	SR-IOV management functions.
1201  * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
1202  * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan,
1203  *			  u8 qos, __be16 proto);
1204  * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
1205  *			  int max_tx_rate);
1206  * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
1207  * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting);
1208  * int (*ndo_get_vf_config)(struct net_device *dev,
1209  *			    int vf, struct ifla_vf_info *ivf);
1210  * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
1211  * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
1212  *			  struct nlattr *port[]);
1213  *
1214  *      Enable or disable the VF ability to query its RSS Redirection Table and
1215  *      Hash Key. This is needed since on some devices VF share this information
1216  *      with PF and querying it may introduce a theoretical security risk.
1217  * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
1218  * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
1219  * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type,
1220  *		       void *type_data);
1221  *	Called to setup any 'tc' scheduler, classifier or action on @dev.
1222  *	This is always called from the stack with the rtnl lock held and netif
1223  *	tx queues stopped. This allows the netdevice to perform queue
1224  *	management safely.
1225  *
1226  *	Fiber Channel over Ethernet (FCoE) offload functions.
1227  * int (*ndo_fcoe_enable)(struct net_device *dev);
1228  *	Called when the FCoE protocol stack wants to start using LLD for FCoE
1229  *	so the underlying device can perform whatever needed configuration or
1230  *	initialization to support acceleration of FCoE traffic.
1231  *
1232  * int (*ndo_fcoe_disable)(struct net_device *dev);
1233  *	Called when the FCoE protocol stack wants to stop using LLD for FCoE
1234  *	so the underlying device can perform whatever needed clean-ups to
1235  *	stop supporting acceleration of FCoE traffic.
1236  *
1237  * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
1238  *			     struct scatterlist *sgl, unsigned int sgc);
1239  *	Called when the FCoE Initiator wants to initialize an I/O that
1240  *	is a possible candidate for Direct Data Placement (DDP). The LLD can
1241  *	perform necessary setup and returns 1 to indicate the device is set up
1242  *	successfully to perform DDP on this I/O, otherwise this returns 0.
1243  *
1244  * int (*ndo_fcoe_ddp_done)(struct net_device *dev,  u16 xid);
1245  *	Called when the FCoE Initiator/Target is done with the DDPed I/O as
1246  *	indicated by the FC exchange id 'xid', so the underlying device can
1247  *	clean up and reuse resources for later DDP requests.
1248  *
1249  * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
1250  *			      struct scatterlist *sgl, unsigned int sgc);
1251  *	Called when the FCoE Target wants to initialize an I/O that
1252  *	is a possible candidate for Direct Data Placement (DDP). The LLD can
1253  *	perform necessary setup and returns 1 to indicate the device is set up
1254  *	successfully to perform DDP on this I/O, otherwise this returns 0.
1255  *
1256  * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1257  *			       struct netdev_fcoe_hbainfo *hbainfo);
1258  *	Called when the FCoE Protocol stack wants information on the underlying
1259  *	device. This information is utilized by the FCoE protocol stack to
1260  *	register attributes with Fiber Channel management service as per the
1261  *	FC-GS Fabric Device Management Information(FDMI) specification.
1262  *
1263  * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
1264  *	Called when the underlying device wants to override default World Wide
1265  *	Name (WWN) generation mechanism in FCoE protocol stack to pass its own
1266  *	World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
1267  *	protocol stack to use.
1268  *
1269  *	RFS acceleration.
1270  * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
1271  *			    u16 rxq_index, u32 flow_id);
1272  *	Set hardware filter for RFS.  rxq_index is the target queue index;
1273  *	flow_id is a flow ID to be passed to rps_may_expire_flow() later.
1274  *	Return the filter ID on success, or a negative error code.
1275  *
1276  *	Slave management functions (for bridge, bonding, etc).
1277  * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
1278  *	Called to make another netdev an underling.
1279  *
1280  * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
1281  *	Called to release previously enslaved netdev.
1282  *
1283  * struct net_device *(*ndo_get_xmit_slave)(struct net_device *dev,
1284  *					    struct sk_buff *skb,
1285  *					    bool all_slaves);
1286  *	Get the xmit slave of master device. If all_slaves is true, function
1287  *	assume all the slaves can transmit.
1288  *
1289  *      Feature/offload setting functions.
1290  * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1291  *		netdev_features_t features);
1292  *	Adjusts the requested feature flags according to device-specific
1293  *	constraints, and returns the resulting flags. Must not modify
1294  *	the device state.
1295  *
1296  * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
1297  *	Called to update device configuration to new features. Passed
1298  *	feature set might be less than what was returned by ndo_fix_features()).
1299  *	Must return >0 or -errno if it changed dev->features itself.
1300  *
1301  * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
1302  *		      struct net_device *dev,
1303  *		      const unsigned char *addr, u16 vid, u16 flags,
1304  *		      bool *notified, struct netlink_ext_ack *extack);
1305  *	Adds an FDB entry to dev for addr.
1306  *	Callee shall set *notified to true if it sent any appropriate
1307  *	notification(s). Otherwise core will send a generic one.
1308  * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
1309  *		      struct net_device *dev,
1310  *		      const unsigned char *addr, u16 vid
1311  *		      bool *notified, struct netlink_ext_ack *extack);
1312  *	Deletes the FDB entry from dev corresponding to addr.
1313  *	Callee shall set *notified to true if it sent any appropriate
1314  *	notification(s). Otherwise core will send a generic one.
1315  * int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, struct net_device *dev,
1316  *			   struct netlink_ext_ack *extack);
1317  * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
1318  *		       struct net_device *dev, struct net_device *filter_dev,
1319  *		       int *idx)
1320  *	Used to add FDB entries to dump requests. Implementers should add
1321  *	entries to skb and update idx with the number of entries.
1322  *
1323  * int (*ndo_mdb_add)(struct net_device *dev, struct nlattr *tb[],
1324  *		      u16 nlmsg_flags, struct netlink_ext_ack *extack);
1325  *	Adds an MDB entry to dev.
1326  * int (*ndo_mdb_del)(struct net_device *dev, struct nlattr *tb[],
1327  *		      struct netlink_ext_ack *extack);
1328  *	Deletes the MDB entry from dev.
1329  * int (*ndo_mdb_del_bulk)(struct net_device *dev, struct nlattr *tb[],
1330  *			   struct netlink_ext_ack *extack);
1331  *	Bulk deletes MDB entries from dev.
1332  * int (*ndo_mdb_dump)(struct net_device *dev, struct sk_buff *skb,
1333  *		       struct netlink_callback *cb);
1334  *	Dumps MDB entries from dev. The first argument (marker) in the netlink
1335  *	callback is used by core rtnetlink code.
1336  *
1337  * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
1338  *			     u16 flags, struct netlink_ext_ack *extack)
1339  * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
1340  *			     struct net_device *dev, u32 filter_mask,
1341  *			     int nlflags)
1342  * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
1343  *			     u16 flags);
1344  *
1345  * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
1346  *	Called to change device carrier. Soft-devices (like dummy, team, etc)
1347  *	which do not represent real hardware may define this to allow their
1348  *	userspace components to manage their virtual carrier state. Devices
1349  *	that determine carrier state from physical hardware properties (eg
1350  *	network cables) or protocol-dependent mechanisms (eg
1351  *	USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
1352  *
1353  * int (*ndo_get_phys_port_id)(struct net_device *dev,
1354  *			       struct netdev_phys_item_id *ppid);
1355  *	Called to get ID of physical port of this device. If driver does
1356  *	not implement this, it is assumed that the hw is not able to have
1357  *	multiple net devices on single physical port.
1358  *
1359  * int (*ndo_get_port_parent_id)(struct net_device *dev,
1360  *				 struct netdev_phys_item_id *ppid)
1361  *	Called to get the parent ID of the physical port of this device.
1362  *
1363  * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1364  *				 struct net_device *dev)
1365  *	Called by upper layer devices to accelerate switching or other
1366  *	station functionality into hardware. 'pdev is the lowerdev
1367  *	to use for the offload and 'dev' is the net device that will
1368  *	back the offload. Returns a pointer to the private structure
1369  *	the upper layer will maintain.
1370  * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
1371  *	Called by upper layer device to delete the station created
1372  *	by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
1373  *	the station and priv is the structure returned by the add
1374  *	operation.
1375  * int (*ndo_set_tx_maxrate)(struct net_device *dev,
1376  *			     int queue_index, u32 maxrate);
1377  *	Called when a user wants to set a max-rate limitation of specific
1378  *	TX queue.
1379  * int (*ndo_get_iflink)(const struct net_device *dev);
1380  *	Called to get the iflink value of this device.
1381  * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
1382  *	This function is used to get egress tunnel information for given skb.
1383  *	This is useful for retrieving outer tunnel header parameters while
1384  *	sampling packet.
1385  * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom);
1386  *	This function is used to specify the headroom that the skb must
1387  *	consider when allocation skb during packet reception. Setting
1388  *	appropriate rx headroom value allows avoiding skb head copy on
1389  *	forward. Setting a negative value resets the rx headroom to the
1390  *	default value.
1391  * int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf);
1392  *	This function is used to set or query state related to XDP on the
1393  *	netdevice and manage BPF offload. See definition of
1394  *	enum bpf_netdev_command for details.
1395  * int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp,
1396  *			u32 flags);
1397  *	This function is used to submit @n XDP packets for transmit on a
1398  *	netdevice. Returns number of frames successfully transmitted, frames
1399  *	that got dropped are freed/returned via xdp_return_frame().
1400  *	Returns negative number, means general error invoking ndo, meaning
1401  *	no frames were xmit'ed and core-caller will free all frames.
1402  * struct net_device *(*ndo_xdp_get_xmit_slave)(struct net_device *dev,
1403  *					        struct xdp_buff *xdp);
1404  *      Get the xmit slave of master device based on the xdp_buff.
1405  * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags);
1406  *      This function is used to wake up the softirq, ksoftirqd or kthread
1407  *	responsible for sending and/or receiving packets on a specific
1408  *	queue id bound to an AF_XDP socket. The flags field specifies if
1409  *	only RX, only Tx, or both should be woken up using the flags
1410  *	XDP_WAKEUP_RX and XDP_WAKEUP_TX.
1411  * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm_kern *p,
1412  *			 int cmd);
1413  *	Add, change, delete or get information on an IPv4 tunnel.
1414  * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev);
1415  *	If a device is paired with a peer device, return the peer instance.
1416  *	The caller must be under RCU read context.
1417  * int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, struct net_device_path *path);
1418  *     Get the forwarding path to reach the real device from the HW destination address
1419  * ktime_t (*ndo_get_tstamp)(struct net_device *dev,
1420  *			     const struct skb_shared_hwtstamps *hwtstamps,
1421  *			     bool cycles);
1422  *	Get hardware timestamp based on normal/adjustable time or free running
1423  *	cycle counter. This function is required if physical clock supports a
1424  *	free running cycle counter.
1425  *
1426  * int (*ndo_hwtstamp_get)(struct net_device *dev,
1427  *			   struct kernel_hwtstamp_config *kernel_config);
1428  *	Get the currently configured hardware timestamping parameters for the
1429  *	NIC device.
1430  *
1431  * int (*ndo_hwtstamp_set)(struct net_device *dev,
1432  *			   struct kernel_hwtstamp_config *kernel_config,
1433  *			   struct netlink_ext_ack *extack);
1434  *	Change the hardware timestamping parameters for NIC device.
1435  */
1436 struct net_device_ops {
1437 	int			(*ndo_init)(struct net_device *dev);
1438 	void			(*ndo_uninit)(struct net_device *dev);
1439 	int			(*ndo_open)(struct net_device *dev);
1440 	int			(*ndo_stop)(struct net_device *dev);
1441 	netdev_tx_t		(*ndo_start_xmit)(struct sk_buff *skb,
1442 						  struct net_device *dev);
1443 	netdev_features_t	(*ndo_features_check)(struct sk_buff *skb,
1444 						      struct net_device *dev,
1445 						      netdev_features_t features);
1446 	u16			(*ndo_select_queue)(struct net_device *dev,
1447 						    struct sk_buff *skb,
1448 						    struct net_device *sb_dev);
1449 	void			(*ndo_change_rx_flags)(struct net_device *dev,
1450 						       int flags);
1451 	void			(*ndo_set_rx_mode)(struct net_device *dev);
1452 	void			(*ndo_set_rx_mode_async)(
1453 					struct net_device *dev,
1454 					struct netdev_hw_addr_list *uc,
1455 					struct netdev_hw_addr_list *mc);
1456 	int			(*ndo_set_mac_address)(struct net_device *dev,
1457 						       void *addr);
1458 	int			(*ndo_validate_addr)(struct net_device *dev);
1459 	int			(*ndo_do_ioctl)(struct net_device *dev,
1460 					        struct ifreq *ifr, int cmd);
1461 	int			(*ndo_eth_ioctl)(struct net_device *dev,
1462 						 struct ifreq *ifr, int cmd);
1463 	int			(*ndo_siocbond)(struct net_device *dev,
1464 						struct ifreq *ifr, int cmd);
1465 	int			(*ndo_siocwandev)(struct net_device *dev,
1466 						  struct if_settings *ifs);
1467 	int			(*ndo_siocdevprivate)(struct net_device *dev,
1468 						      struct ifreq *ifr,
1469 						      void __user *data, int cmd);
1470 	int			(*ndo_set_config)(struct net_device *dev,
1471 					          struct ifmap *map);
1472 	int			(*ndo_change_mtu)(struct net_device *dev,
1473 						  int new_mtu);
1474 	int			(*ndo_neigh_setup)(struct net_device *dev,
1475 						   struct neigh_parms *);
1476 	void			(*ndo_tx_timeout) (struct net_device *dev,
1477 						   unsigned int txqueue);
1478 
1479 	void			(*ndo_get_stats64)(struct net_device *dev,
1480 						   struct rtnl_link_stats64 *storage);
1481 	bool			(*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
1482 	int			(*ndo_get_offload_stats)(int attr_id,
1483 							 const struct net_device *dev,
1484 							 void *attr_data);
1485 	struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1486 
1487 	int			(*ndo_vlan_rx_add_vid)(struct net_device *dev,
1488 						       __be16 proto, u16 vid);
1489 	int			(*ndo_vlan_rx_kill_vid)(struct net_device *dev,
1490 						        __be16 proto, u16 vid);
1491 #ifdef CONFIG_NET_POLL_CONTROLLER
1492 	void                    (*ndo_poll_controller)(struct net_device *dev);
1493 	int			(*ndo_netpoll_setup)(struct net_device *dev);
1494 	void			(*ndo_netpoll_cleanup)(struct net_device *dev);
1495 #endif
1496 	int			(*ndo_set_vf_mac)(struct net_device *dev,
1497 						  int queue, u8 *mac);
1498 	int			(*ndo_set_vf_vlan)(struct net_device *dev,
1499 						   int queue, u16 vlan,
1500 						   u8 qos, __be16 proto);
1501 	int			(*ndo_set_vf_rate)(struct net_device *dev,
1502 						   int vf, int min_tx_rate,
1503 						   int max_tx_rate);
1504 	int			(*ndo_set_vf_spoofchk)(struct net_device *dev,
1505 						       int vf, bool setting);
1506 	int			(*ndo_set_vf_trust)(struct net_device *dev,
1507 						    int vf, bool setting);
1508 	int			(*ndo_get_vf_config)(struct net_device *dev,
1509 						     int vf,
1510 						     struct ifla_vf_info *ivf);
1511 	int			(*ndo_set_vf_link_state)(struct net_device *dev,
1512 							 int vf, int link_state);
1513 	int			(*ndo_get_vf_stats)(struct net_device *dev,
1514 						    int vf,
1515 						    struct ifla_vf_stats
1516 						    *vf_stats);
1517 	int			(*ndo_set_vf_port)(struct net_device *dev,
1518 						   int vf,
1519 						   struct nlattr *port[]);
1520 	int			(*ndo_get_vf_port)(struct net_device *dev,
1521 						   int vf, struct sk_buff *skb);
1522 	int			(*ndo_get_vf_guid)(struct net_device *dev,
1523 						   int vf,
1524 						   struct ifla_vf_guid *node_guid,
1525 						   struct ifla_vf_guid *port_guid);
1526 	int			(*ndo_set_vf_guid)(struct net_device *dev,
1527 						   int vf, u64 guid,
1528 						   int guid_type);
1529 	int			(*ndo_set_vf_rss_query_en)(
1530 						   struct net_device *dev,
1531 						   int vf, bool setting);
1532 	int			(*ndo_setup_tc)(struct net_device *dev,
1533 						enum tc_setup_type type,
1534 						void *type_data);
1535 #if IS_ENABLED(CONFIG_FCOE)
1536 	int			(*ndo_fcoe_enable)(struct net_device *dev);
1537 	int			(*ndo_fcoe_disable)(struct net_device *dev);
1538 	int			(*ndo_fcoe_ddp_setup)(struct net_device *dev,
1539 						      u16 xid,
1540 						      struct scatterlist *sgl,
1541 						      unsigned int sgc);
1542 	int			(*ndo_fcoe_ddp_done)(struct net_device *dev,
1543 						     u16 xid);
1544 	int			(*ndo_fcoe_ddp_target)(struct net_device *dev,
1545 						       u16 xid,
1546 						       struct scatterlist *sgl,
1547 						       unsigned int sgc);
1548 	int			(*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1549 							struct netdev_fcoe_hbainfo *hbainfo);
1550 #endif
1551 
1552 #if IS_ENABLED(CONFIG_LIBFCOE)
1553 #define NETDEV_FCOE_WWNN 0
1554 #define NETDEV_FCOE_WWPN 1
1555 	int			(*ndo_fcoe_get_wwn)(struct net_device *dev,
1556 						    u64 *wwn, int type);
1557 #endif
1558 
1559 #ifdef CONFIG_RFS_ACCEL
1560 	int			(*ndo_rx_flow_steer)(struct net_device *dev,
1561 						     const struct sk_buff *skb,
1562 						     u16 rxq_index,
1563 						     u32 flow_id);
1564 #endif
1565 	int			(*ndo_add_slave)(struct net_device *dev,
1566 						 struct net_device *slave_dev,
1567 						 struct netlink_ext_ack *extack);
1568 	int			(*ndo_del_slave)(struct net_device *dev,
1569 						 struct net_device *slave_dev);
1570 	struct net_device*	(*ndo_get_xmit_slave)(struct net_device *dev,
1571 						      struct sk_buff *skb,
1572 						      bool all_slaves);
1573 	struct net_device*	(*ndo_sk_get_lower_dev)(struct net_device *dev,
1574 							struct sock *sk);
1575 	netdev_features_t	(*ndo_fix_features)(struct net_device *dev,
1576 						    netdev_features_t features);
1577 	int			(*ndo_set_features)(struct net_device *dev,
1578 						    netdev_features_t features);
1579 	int			(*ndo_neigh_construct)(struct net_device *dev,
1580 						       struct neighbour *n);
1581 	void			(*ndo_neigh_destroy)(struct net_device *dev,
1582 						     struct neighbour *n);
1583 
1584 	int			(*ndo_fdb_add)(struct ndmsg *ndm,
1585 					       struct nlattr *tb[],
1586 					       struct net_device *dev,
1587 					       const unsigned char *addr,
1588 					       u16 vid,
1589 					       u16 flags,
1590 					       bool *notified,
1591 					       struct netlink_ext_ack *extack);
1592 	int			(*ndo_fdb_del)(struct ndmsg *ndm,
1593 					       struct nlattr *tb[],
1594 					       struct net_device *dev,
1595 					       const unsigned char *addr,
1596 					       u16 vid,
1597 					       bool *notified,
1598 					       struct netlink_ext_ack *extack);
1599 	int			(*ndo_fdb_del_bulk)(struct nlmsghdr *nlh,
1600 						    struct net_device *dev,
1601 						    struct netlink_ext_ack *extack);
1602 	int			(*ndo_fdb_dump)(struct sk_buff *skb,
1603 						struct netlink_callback *cb,
1604 						struct net_device *dev,
1605 						struct net_device *filter_dev,
1606 						int *idx);
1607 	int			(*ndo_fdb_get)(struct sk_buff *skb,
1608 					       struct nlattr *tb[],
1609 					       struct net_device *dev,
1610 					       const unsigned char *addr,
1611 					       u16 vid, u32 portid, u32 seq,
1612 					       struct netlink_ext_ack *extack);
1613 	int			(*ndo_mdb_add)(struct net_device *dev,
1614 					       struct nlattr *tb[],
1615 					       u16 nlmsg_flags,
1616 					       struct netlink_ext_ack *extack);
1617 	int			(*ndo_mdb_del)(struct net_device *dev,
1618 					       struct nlattr *tb[],
1619 					       struct netlink_ext_ack *extack);
1620 	int			(*ndo_mdb_del_bulk)(struct net_device *dev,
1621 						    struct nlattr *tb[],
1622 						    struct netlink_ext_ack *extack);
1623 	int			(*ndo_mdb_dump)(struct net_device *dev,
1624 						struct sk_buff *skb,
1625 						struct netlink_callback *cb);
1626 	int			(*ndo_mdb_get)(struct net_device *dev,
1627 					       struct nlattr *tb[], u32 portid,
1628 					       u32 seq,
1629 					       struct netlink_ext_ack *extack);
1630 	int			(*ndo_bridge_setlink)(struct net_device *dev,
1631 						      struct nlmsghdr *nlh,
1632 						      u16 flags,
1633 						      struct netlink_ext_ack *extack);
1634 	int			(*ndo_bridge_getlink)(struct sk_buff *skb,
1635 						      u32 pid, u32 seq,
1636 						      struct net_device *dev,
1637 						      u32 filter_mask,
1638 						      int nlflags);
1639 	int			(*ndo_bridge_dellink)(struct net_device *dev,
1640 						      struct nlmsghdr *nlh,
1641 						      u16 flags);
1642 	int			(*ndo_change_carrier)(struct net_device *dev,
1643 						      bool new_carrier);
1644 	int			(*ndo_get_phys_port_id)(struct net_device *dev,
1645 							struct netdev_phys_item_id *ppid);
1646 	int			(*ndo_get_port_parent_id)(struct net_device *dev,
1647 							  struct netdev_phys_item_id *ppid);
1648 	int			(*ndo_get_phys_port_name)(struct net_device *dev,
1649 							  char *name, size_t len);
1650 	void*			(*ndo_dfwd_add_station)(struct net_device *pdev,
1651 							struct net_device *dev);
1652 	void			(*ndo_dfwd_del_station)(struct net_device *pdev,
1653 							void *priv);
1654 
1655 	int			(*ndo_set_tx_maxrate)(struct net_device *dev,
1656 						      int queue_index,
1657 						      u32 maxrate);
1658 	int			(*ndo_get_iflink)(const struct net_device *dev);
1659 	int			(*ndo_fill_metadata_dst)(struct net_device *dev,
1660 						       struct sk_buff *skb);
1661 	void			(*ndo_set_rx_headroom)(struct net_device *dev,
1662 						       int needed_headroom);
1663 	int			(*ndo_bpf)(struct net_device *dev,
1664 					   struct netdev_bpf *bpf);
1665 	int			(*ndo_xdp_xmit)(struct net_device *dev, int n,
1666 						struct xdp_frame **xdp,
1667 						u32 flags);
1668 	struct net_device *	(*ndo_xdp_get_xmit_slave)(struct net_device *dev,
1669 							  struct xdp_buff *xdp);
1670 	int			(*ndo_xsk_wakeup)(struct net_device *dev,
1671 						  u32 queue_id, u32 flags);
1672 	int			(*ndo_tunnel_ctl)(struct net_device *dev,
1673 						  struct ip_tunnel_parm_kern *p,
1674 						  int cmd);
1675 	struct net_device *	(*ndo_get_peer_dev)(struct net_device *dev);
1676 	int                     (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx,
1677                                                          struct net_device_path *path);
1678 	ktime_t			(*ndo_get_tstamp)(struct net_device *dev,
1679 						  const struct skb_shared_hwtstamps *hwtstamps,
1680 						  bool cycles);
1681 	int			(*ndo_hwtstamp_get)(struct net_device *dev,
1682 						    struct kernel_hwtstamp_config *kernel_config);
1683 	int			(*ndo_hwtstamp_set)(struct net_device *dev,
1684 						    struct kernel_hwtstamp_config *kernel_config,
1685 						    struct netlink_ext_ack *extack);
1686 
1687 #if IS_ENABLED(CONFIG_NET_SHAPER)
1688 	/**
1689 	 * @net_shaper_ops: Device shaping offload operations
1690 	 * see include/net/net_shapers.h
1691 	 */
1692 	const struct net_shaper_ops *net_shaper_ops;
1693 #endif
1694 };
1695 
1696 /**
1697  * enum netdev_priv_flags - &struct net_device priv_flags
1698  *
1699  * These are the &struct net_device, they are only set internally
1700  * by drivers and used in the kernel. These flags are invisible to
1701  * userspace; this means that the order of these flags can change
1702  * during any kernel release.
1703  *
1704  * You should add bitfield booleans after either net_device::priv_flags
1705  * (hotpath) or ::threaded (slowpath) instead of extending these flags.
1706  *
1707  * @IFF_802_1Q_VLAN: 802.1Q VLAN device
1708  * @IFF_EBRIDGE: Ethernet bridging device
1709  * @IFF_BONDING: bonding master or slave
1710  * @IFF_ISATAP: ISATAP interface (RFC4214)
1711  * @IFF_WAN_HDLC: WAN HDLC device
1712  * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
1713  *	release skb->dst
1714  * @IFF_DONT_BRIDGE: disallow bridging this ether dev
1715  * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
1716  * @IFF_MACVLAN_PORT: device used as macvlan port
1717  * @IFF_BRIDGE_PORT: device used as bridge port
1718  * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port
1719  * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit
1720  * @IFF_UNICAST_FLT: Supports unicast filtering
1721  * @IFF_TEAM_PORT: device used as team port
1722  * @IFF_SUPP_NOFCS: device supports sending custom FCS
1723  * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
1724  *	change when it's running
1725  * @IFF_MACVLAN: Macvlan device
1726  * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account
1727  *	underlying stacked devices
1728  * @IFF_L3MDEV_MASTER: device is an L3 master device
1729  * @IFF_NO_QUEUE: device can run without qdisc attached
1730  * @IFF_OPENVSWITCH: device is a Open vSwitch master
1731  * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
1732  * @IFF_TEAM: device is a team device
1733  * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
1734  *	entity (i.e. the master device for bridged veth)
1735  * @IFF_MACSEC: device is a MACsec device
1736  * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook
1737  * @IFF_FAILOVER: device is a failover master device
1738  * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
1739  * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
1740  * @IFF_NO_ADDRCONF: prevent ipv6 addrconf
1741  * @IFF_TX_SKB_NO_LINEAR: device/driver is capable of xmitting frames with
1742  *	skb_headlen(skb) == 0 (data starts from frag0)
1743  */
1744 enum netdev_priv_flags {
1745 	IFF_802_1Q_VLAN			= 1<<0,
1746 	IFF_EBRIDGE			= 1<<1,
1747 	IFF_BONDING			= 1<<2,
1748 	IFF_ISATAP			= 1<<3,
1749 	IFF_WAN_HDLC			= 1<<4,
1750 	IFF_XMIT_DST_RELEASE		= 1<<5,
1751 	IFF_DONT_BRIDGE			= 1<<6,
1752 	IFF_DISABLE_NETPOLL		= 1<<7,
1753 	IFF_MACVLAN_PORT		= 1<<8,
1754 	IFF_BRIDGE_PORT			= 1<<9,
1755 	IFF_OVS_DATAPATH		= 1<<10,
1756 	IFF_TX_SKB_SHARING		= 1<<11,
1757 	IFF_UNICAST_FLT			= 1<<12,
1758 	IFF_TEAM_PORT			= 1<<13,
1759 	IFF_SUPP_NOFCS			= 1<<14,
1760 	IFF_LIVE_ADDR_CHANGE		= 1<<15,
1761 	IFF_MACVLAN			= 1<<16,
1762 	IFF_XMIT_DST_RELEASE_PERM	= 1<<17,
1763 	IFF_L3MDEV_MASTER		= 1<<18,
1764 	IFF_NO_QUEUE			= 1<<19,
1765 	IFF_OPENVSWITCH			= 1<<20,
1766 	IFF_L3MDEV_SLAVE		= 1<<21,
1767 	IFF_TEAM			= 1<<22,
1768 	IFF_PHONY_HEADROOM		= 1<<24,
1769 	IFF_MACSEC			= 1<<25,
1770 	IFF_NO_RX_HANDLER		= 1<<26,
1771 	IFF_FAILOVER			= 1<<27,
1772 	IFF_FAILOVER_SLAVE		= 1<<28,
1773 	IFF_L3MDEV_RX_HANDLER		= 1<<29,
1774 	IFF_NO_ADDRCONF			= BIT_ULL(30),
1775 	IFF_TX_SKB_NO_LINEAR		= BIT_ULL(31),
1776 };
1777 
1778 /* Specifies the type of the struct net_device::ml_priv pointer */
1779 enum netdev_ml_priv_type {
1780 	ML_PRIV_NONE,
1781 	ML_PRIV_CAN,
1782 };
1783 
1784 enum netdev_stat_type {
1785 	NETDEV_PCPU_STAT_NONE,
1786 	NETDEV_PCPU_STAT_LSTATS, /* struct pcpu_lstats */
1787 	NETDEV_PCPU_STAT_TSTATS, /* struct pcpu_sw_netstats */
1788 	NETDEV_PCPU_STAT_DSTATS, /* struct pcpu_dstats */
1789 };
1790 
1791 enum netdev_reg_state {
1792 	NETREG_UNINITIALIZED = 0,
1793 	NETREG_REGISTERED,	/* completed register_netdevice */
1794 	NETREG_UNREGISTERING,	/* called unregister_netdevice */
1795 	NETREG_UNREGISTERED,	/* completed unregister todo */
1796 	NETREG_RELEASED,	/* called free_netdev */
1797 	NETREG_DUMMY,		/* dummy device for NAPI poll */
1798 };
1799 
1800 /**
1801  *	struct net_device - The DEVICE structure.
1802  *
1803  *	Actually, this whole structure is a big mistake.  It mixes I/O
1804  *	data with strictly "high-level" data, and it has to know about
1805  *	almost every data structure used in the INET module.
1806  *
1807  *	@priv_flags:	flags invisible to userspace defined as bits, see
1808  *			enum netdev_priv_flags for the definitions
1809  *	@lltx:		device supports lockless Tx. Deprecated for real HW
1810  *			drivers. Mainly used by logical interfaces, such as
1811  *			bonding and tunnels
1812  *	@netmem_tx:	device support netmem_tx.
1813  *
1814  *	@name:	This is the first field of the "visible" part of this structure
1815  *		(i.e. as seen by users in the "Space.c" file).  It is the name
1816  *		of the interface.
1817  *
1818  *	@name_node:	Name hashlist node
1819  *	@ifalias:	SNMP alias
1820  *	@mem_end:	Shared memory end
1821  *	@mem_start:	Shared memory start
1822  *	@base_addr:	Device I/O address
1823  *	@irq:		Device IRQ number
1824  *
1825  *	@state:		Generic network queuing layer state, see netdev_state_t
1826  *	@dev_list:	The global list of network devices
1827  *	@napi_list:	List entry used for polling NAPI devices
1828  *	@unreg_list:	List entry  when we are unregistering the
1829  *			device; see the function unregister_netdev
1830  *	@close_list:	List entry used when we are closing the device
1831  *	@ptype_all:     Device-specific packet handlers for all protocols
1832  *	@ptype_specific: Device-specific, protocol-specific packet handlers
1833  *
1834  *	@adj_list:	Directly linked devices, like slaves for bonding
1835  *	@features:	Currently active device features
1836  *	@hw_features:	User-changeable features
1837  *
1838  *	@wanted_features:	User-requested features
1839  *	@vlan_features:		Mask of features inheritable by VLAN devices
1840  *
1841  *	@hw_enc_features:	Mask of features inherited by encapsulating devices
1842  *				This field indicates what encapsulation
1843  *				offloads the hardware is capable of doing,
1844  *				and drivers will need to set them appropriately.
1845  *
1846  *	@mpls_features:	Mask of features inheritable by MPLS
1847  *	@gso_partial_features: value(s) from NETIF_F_GSO\*
1848  *	@mangleid_features:	Mask of features requiring MANGLEID, will be
1849  *				disabled together with the latter.
1850  *
1851  *	@ifindex:	interface index
1852  *	@group:		The group the device belongs to
1853  *
1854  *	@stats:		Statistics struct, which was left as a legacy, use
1855  *			rtnl_link_stats64 instead
1856  *
1857  *	@core_stats:	core networking counters,
1858  *			do not use this in drivers
1859  *	@carrier_up_count:	Number of times the carrier has been up
1860  *	@carrier_down_count:	Number of times the carrier has been down
1861  *
1862  *	@wireless_handlers:	List of functions to handle Wireless Extensions,
1863  *				instead of ioctl,
1864  *				see <net/iw_handler.h> for details.
1865  *
1866  *	@netdev_ops:	Includes several pointers to callbacks,
1867  *			if one wants to override the ndo_*() functions
1868  *	@xdp_metadata_ops:	Includes pointers to XDP metadata callbacks.
1869  *	@xsk_tx_metadata_ops:	Includes pointers to AF_XDP TX metadata callbacks.
1870  *	@ethtool_ops:	Management operations
1871  *	@l3mdev_ops:	Layer 3 master device operations
1872  *	@ndisc_ops:	Includes callbacks for different IPv6 neighbour
1873  *			discovery handling. Necessary for e.g. 6LoWPAN.
1874  *	@xfrmdev_ops:	Transformation offload operations
1875  *	@tlsdev_ops:	Transport Layer Security offload operations
1876  *	@header_ops:	Includes callbacks for creating,parsing,caching,etc
1877  *			of Layer 2 headers.
1878  *
1879  *	@flags:		Interface flags (a la BSD)
1880  *	@xdp_features:	XDP capability supported by the device
1881  *	@gflags:	Global flags ( kept as legacy )
1882  *	@priv_len:	Size of the ->priv flexible array
1883  *	@priv:		Flexible array containing private data
1884  *	@operstate:	RFC2863 operstate
1885  *	@link_mode:	Mapping policy to operstate
1886  *	@if_port:	Selectable AUI, TP, ...
1887  *	@dma:		DMA channel
1888  *	@mtu:		Interface MTU value
1889  *	@min_mtu:	Interface Minimum MTU value
1890  *	@max_mtu:	Interface Maximum MTU value
1891  *	@type:		Interface hardware type
1892  *	@hard_header_len: Maximum hardware header length.
1893  *	@min_header_len:  Minimum hardware header length
1894  *
1895  *	@needed_headroom: Extra headroom the hardware may need, but not in all
1896  *			  cases can this be guaranteed
1897  *	@needed_tailroom: Extra tailroom the hardware may need, but not in all
1898  *			  cases can this be guaranteed. Some cases also use
1899  *			  LL_MAX_HEADER instead to allocate the skb
1900  *
1901  *	interface address info:
1902  *
1903  * 	@perm_addr:		Permanent hw address
1904  * 	@addr_assign_type:	Hw address assignment type
1905  * 	@addr_len:		Hardware address length
1906  *	@upper_level:		Maximum depth level of upper devices.
1907  *	@lower_level:		Maximum depth level of lower devices.
1908  *	@threaded:		napi threaded state.
1909  *	@neigh_priv_len:	Used in neigh_alloc()
1910  * 	@dev_id:		Used to differentiate devices that share
1911  * 				the same link layer address
1912  * 	@dev_port:		Used to differentiate devices that share
1913  * 				the same function
1914  *	@addr_list_lock:	XXX: need comments on this one
1915  *	@name_assign_type:	network interface name assignment type
1916  *	@uc_promisc:		Counter that indicates promiscuous mode
1917  *				has been enabled due to the need to listen to
1918  *				additional unicast addresses in a device that
1919  *				does not implement ndo_set_rx_mode()
1920  *	@rx_mode_node:		List entry for rx_mode work processing
1921  *	@rx_mode_tracker:	Refcount tracker for rx_mode work
1922  *	@rx_mode_addr_cache:	Recycled snapshot entries for rx_mode work
1923  *	@uc:			unicast mac addresses
1924  *	@mc:			multicast mac addresses
1925  *	@dev_addrs:		list of device hw addresses
1926  *	@queues_kset:		Group of all Kobjects in the Tx and RX queues
1927  *	@promiscuity:		Number of times the NIC is told to work in
1928  *				promiscuous mode; if it becomes 0 the NIC will
1929  *				exit promiscuous mode
1930  *	@allmulti:		Counter, enables or disables allmulticast mode
1931  *
1932  *	@vlan_info:	VLAN info
1933  *	@dsa_ptr:	dsa specific data
1934  *	@tipc_ptr:	TIPC specific data
1935  *	@atalk_ptr:	AppleTalk link
1936  *	@ip_ptr:	IPv4 specific data
1937  *	@ip6_ptr:	IPv6 specific data
1938  *	@ax25_ptr:	AX.25 specific data
1939  *	@ieee80211_ptr:	IEEE 802.11 specific data, assign before registering
1940  *	@ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network
1941  *			 device struct
1942  *	@mpls_ptr:	mpls_dev struct pointer
1943  *	@mctp_ptr:	MCTP specific data
1944  *	@psp_dev:	PSP crypto device registered for this netdev
1945  *
1946  *	@dev_addr:	Hw address (before bcast,
1947  *			because most packets are unicast)
1948  *
1949  *	@_rx:			Array of RX queues
1950  *	@num_rx_queues:		Number of RX queues
1951  *				allocated at register_netdev() time
1952  *	@real_num_rx_queues: 	Number of RX queues currently active in device
1953  *	@xdp_prog:		XDP sockets filter program pointer
1954  *
1955  *	@rx_handler:		handler for received packets
1956  *	@rx_handler_data: 	XXX: need comments on this one
1957  *	@tcx_ingress:		BPF & clsact qdisc specific data for ingress processing
1958  *	@ingress_queue:		XXX: need comments on this one
1959  *	@nf_hooks_ingress:	netfilter hooks executed for ingress packets
1960  *	@broadcast:		hw bcast address
1961  *
1962  *	@rx_cpu_rmap:	CPU reverse-mapping for RX completion interrupts,
1963  *			indexed by RX queue number. Assigned by driver.
1964  *			This must only be set if the ndo_rx_flow_steer
1965  *			operation is defined
1966  *	@index_hlist:		Device index hash chain
1967  *
1968  *	@_tx:			Array of TX queues
1969  *	@num_tx_queues:		Number of TX queues allocated at alloc_netdev_mq() time
1970  *	@real_num_tx_queues: 	Number of TX queues currently active in device
1971  *	@qdisc:			Root qdisc from userspace point of view
1972  *	@tx_queue_len:		Max frames per queue allowed
1973  *	@tx_global_lock: 	XXX: need comments on this one
1974  *	@xdp_bulkq:		XDP device bulk queue
1975  *	@xps_maps:		all CPUs/RXQs maps for XPS device
1976  *
1977  *	@xps_maps:	XXX: need comments on this one
1978  *	@tcx_egress:		BPF & clsact qdisc specific data for egress processing
1979  *	@nf_hooks_egress:	netfilter hooks executed for egress packets
1980  *	@qdisc_hash:		qdisc hash table
1981  *	@watchdog_timeo:	Represents the timeout that is used by
1982  *				the watchdog (see dev_watchdog())
1983  *	@watchdog_timer:	List of timers
1984  *
1985  *	@proto_down_reason:	reason a netdev interface is held down
1986  *	@pcpu_refcnt:		Number of references to this device
1987  *	@dev_refcnt:		Number of references to this device
1988  *	@refcnt_tracker:	Tracker directory for tracked references to this device
1989  *	@todo_list:		Delayed register/unregister
1990  *	@link_watch_list:	XXX: need comments on this one
1991  *
1992  *	@reg_state:		Register/unregister state machine
1993  *	@dismantle:		Device is going to be freed
1994  *	@needs_free_netdev:	Should unregister perform free_netdev?
1995  *	@priv_destructor:	Called from unregister
1996  *	@npinfo:		XXX: need comments on this one
1997  * 	@nd_net:		Network namespace this network device is inside
1998  *				protected by @lock
1999  *
2000  * 	@ml_priv:	Mid-layer private
2001  *	@ml_priv_type:  Mid-layer private type
2002  *
2003  *	@pcpu_stat_type:	Type of device statistics which the core should
2004  *				allocate/free: none, lstats, tstats, dstats. none
2005  *				means the driver is handling statistics allocation/
2006  *				freeing internally.
2007  *	@lstats:		Loopback statistics: packets, bytes
2008  *	@tstats:		Tunnel statistics: RX/TX packets, RX/TX bytes
2009  *	@dstats:		Dummy statistics: RX/TX/drop packets, RX/TX bytes
2010  *
2011  *	@garp_port:	GARP
2012  *	@mrp_port:	MRP
2013  *
2014  *	@dm_private:	Drop monitor private
2015  *
2016  *	@dev:		Class/net/name entry
2017  *	@sysfs_groups:	Space for optional device, statistics and wireless
2018  *			sysfs groups
2019  *
2020  *	@sysfs_rx_queue_group:	Space for optional per-rx queue attributes
2021  *	@rtnl_link_ops:	Rtnl_link_ops
2022  *	@stat_ops:	Optional ops for queue-aware statistics
2023  *	@queue_mgmt_ops:	Optional ops for queue management
2024  *
2025  *	@gso_max_size:	Maximum size of generic segmentation offload
2026  *	@tso_max_size:	Device (as in HW) limit on the max TSO request size
2027  *	@gso_max_segs:	Maximum number of segments that can be passed to the
2028  *			NIC for GSO
2029  *	@tso_max_segs:	Device (as in HW) limit on the max TSO segment count
2030  * 	@gso_ipv4_max_size:	Maximum size of generic segmentation offload,
2031  * 				for IPv4.
2032  *
2033  *	@dcbnl_ops:	Data Center Bridging netlink ops
2034  *	@num_tc:	Number of traffic classes in the net device
2035  *	@tc_to_txq:	XXX: need comments on this one
2036  *	@prio_tc_map:	XXX: need comments on this one
2037  *
2038  *	@fcoe_ddp_xid:	Max exchange id for FCoE LRO by ddp
2039  *
2040  *	@priomap:	XXX: need comments on this one
2041  *	@link_topo:	Physical link topology tracking attached PHYs
2042  *	@phydev:	Physical device may attach itself
2043  *			for hardware timestamping
2044  *	@sfp_bus:	attached &struct sfp_bus structure.
2045  *
2046  *	@qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
2047  *
2048  *	@proto_down:	protocol port state information can be sent to the
2049  *			switch driver and used to set the phys state of the
2050  *			switch port.
2051  *
2052  *	@irq_affinity_auto: driver wants the core to store and re-assign the IRQ
2053  *			    affinity. Set by netif_enable_irq_affinity(), then
2054  *			    the driver must create a persistent napi by
2055  *			    netif_napi_add_config() and finally bind the napi to
2056  *			    IRQ (via netif_napi_set_irq()).
2057  *
2058  *	@rx_cpu_rmap_auto: driver wants the core to manage the ARFS rmap.
2059  *	                   Set by calling netif_enable_cpu_rmap().
2060  *
2061  *	@see_all_hwtstamp_requests: device wants to see calls to
2062  *			ndo_hwtstamp_set() for all timestamp requests
2063  *			regardless of source, even if those aren't
2064  *			HWTSTAMP_SOURCE_NETDEV
2065  *	@change_proto_down: device supports setting carrier via IFLA_PROTO_DOWN
2066  *	@netns_immutable: interface can't change network namespaces
2067  *	@fcoe_mtu:	device supports maximum FCoE MTU, 2158 bytes
2068  *
2069  *	@net_notifier_list:	List of per-net netdev notifier block
2070  *				that follow this device when it is moved
2071  *				to another network namespace.
2072  *
2073  *	@macsec_ops:    MACsec offloading ops
2074  *
2075  *	@udp_tunnel_nic_info:	static structure describing the UDP tunnel
2076  *				offload capabilities of the device
2077  *	@udp_tunnel_nic:	UDP tunnel offload state
2078  *	@ethtool:	ethtool related state
2079  *	@xdp_state:		stores info on attached XDP BPF programs
2080  *
2081  *	@nested_level:	Used as a parameter of spin_lock_nested() of
2082  *			dev->addr_list_lock.
2083  *	@unlink_list:	As netif_addr_lock() can be called recursively,
2084  *			keep a list of interfaces to be deleted.
2085  *	@gro_max_size:	Maximum size of aggregated packet in generic
2086  *			receive offload (GRO)
2087  * 	@gro_ipv4_max_size:	Maximum size of aggregated packet in generic
2088  * 				receive offload (GRO), for IPv4.
2089  *	@xdp_zc_max_segs:	Maximum number of segments supported by AF_XDP
2090  *				zero copy driver
2091  *
2092  *	@dev_addr_shadow:	Copy of @dev_addr to catch direct writes.
2093  *	@linkwatch_dev_tracker:	refcount tracker used by linkwatch.
2094  *	@watchdog_dev_tracker:	refcount tracker used by watchdog.
2095  *	@dev_registered_tracker:	tracker for reference held while
2096  *					registered
2097  *	@offload_xstats_l3:	L3 HW stats for this netdevice.
2098  *
2099  *	@devlink_port:	Pointer to related devlink port structure.
2100  *			Assigned by a driver before netdev registration using
2101  *			SET_NETDEV_DEVLINK_PORT macro. This pointer is static
2102  *			during the time netdevice is registered.
2103  *
2104  *	@dpll_pin: Pointer to the SyncE source pin of a DPLL subsystem,
2105  *		   where the clock is recovered.
2106  *
2107  *	@max_pacing_offload_horizon: max EDT offload horizon in nsec.
2108  *	@napi_config: An array of napi_config structures containing per-NAPI
2109  *		      settings.
2110  *	@num_napi_configs:	number of allocated NAPI config structs,
2111  *		always >= max(num_rx_queues, num_tx_queues).
2112  *	@gro_flush_timeout:	timeout for GRO layer in NAPI
2113  *	@napi_defer_hard_irqs:	If not zero, provides a counter that would
2114  *				allow to avoid NIC hard IRQ, on busy queues.
2115  *
2116  *	@neighbours:	List heads pointing to this device's neighbours'
2117  *			dev_list, one per address-family.
2118  *	@hwprov: Tracks which PTP performs hardware packet time stamping.
2119  *
2120  *	FIXME: cleanup struct net_device such that network protocol info
2121  *	moves out.
2122  */
2123 
2124 struct net_device {
2125 	/* Cacheline organization can be found documented in
2126 	 * Documentation/networking/net_cachelines/net_device.rst.
2127 	 * Please update the document when adding new fields.
2128 	 */
2129 
2130 	/* TX read-mostly hotpath */
2131 	__cacheline_group_begin(net_device_read_tx);
2132 	struct_group(priv_flags_fast,
2133 		unsigned long		priv_flags:32;
2134 		unsigned long		lltx:1;
2135 		unsigned long		netmem_tx:1;
2136 	);
2137 	const struct net_device_ops *netdev_ops;
2138 	const struct header_ops *header_ops;
2139 	struct netdev_queue	*_tx;
2140 	netdev_features_t	gso_partial_features;
2141 	unsigned int		real_num_tx_queues;
2142 	unsigned int		gso_max_size;
2143 	unsigned int		gso_ipv4_max_size;
2144 	u16			gso_max_segs;
2145 	s16			num_tc;
2146 	/* Note : dev->mtu is often read without holding a lock.
2147 	 * Writers usually hold RTNL.
2148 	 * It is recommended to use READ_ONCE() to annotate the reads,
2149 	 * and to use WRITE_ONCE() to annotate the writes.
2150 	 */
2151 	unsigned int		mtu;
2152 	unsigned short		needed_headroom;
2153 	struct netdev_tc_txq	tc_to_txq[TC_MAX_QUEUE];
2154 #ifdef CONFIG_XPS
2155 	struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX];
2156 #endif
2157 #ifdef CONFIG_NETFILTER_EGRESS
2158 	struct nf_hook_entries __rcu *nf_hooks_egress;
2159 #endif
2160 #ifdef CONFIG_NET_XGRESS
2161 	struct bpf_mprog_entry __rcu *tcx_egress;
2162 #endif
2163 	__cacheline_group_end(net_device_read_tx);
2164 
2165 	/* TXRX read-mostly hotpath */
2166 	__cacheline_group_begin(net_device_read_txrx);
2167 	union {
2168 		struct pcpu_lstats __percpu		*lstats;
2169 		struct pcpu_sw_netstats __percpu	*tstats;
2170 		struct pcpu_dstats __percpu		*dstats;
2171 	};
2172 	unsigned long		state;
2173 	unsigned int		flags;
2174 	unsigned short		hard_header_len;
2175 	enum netdev_stat_type	pcpu_stat_type:8;
2176 	netdev_features_t	features;
2177 	struct inet6_dev __rcu	*ip6_ptr;
2178 	__cacheline_group_end(net_device_read_txrx);
2179 
2180 	/* RX read-mostly hotpath */
2181 	__cacheline_group_begin(net_device_read_rx);
2182 	struct bpf_prog __rcu	*xdp_prog;
2183 	struct list_head	ptype_specific;
2184 	int			ifindex;
2185 	unsigned int		real_num_rx_queues;
2186 	struct netdev_rx_queue	*_rx;
2187 	unsigned int		gro_max_size;
2188 	unsigned int		gro_ipv4_max_size;
2189 	rx_handler_func_t __rcu	*rx_handler;
2190 	void __rcu		*rx_handler_data;
2191 	possible_net_t			nd_net;
2192 #ifdef CONFIG_NETPOLL
2193 	struct netpoll_info __rcu	*npinfo;
2194 #endif
2195 #ifdef CONFIG_NET_XGRESS
2196 	struct bpf_mprog_entry __rcu *tcx_ingress;
2197 #endif
2198 	__cacheline_group_end(net_device_read_rx);
2199 
2200 	char			name[IFNAMSIZ];
2201 	struct netdev_name_node	*name_node;
2202 	struct dev_ifalias	__rcu *ifalias;
2203 	/*
2204 	 *	I/O specific fields
2205 	 *	FIXME: Merge these and struct ifmap into one
2206 	 */
2207 	unsigned long		mem_end;
2208 	unsigned long		mem_start;
2209 	unsigned long		base_addr;
2210 
2211 	/*
2212 	 *	Some hardware also needs these fields (state,dev_list,
2213 	 *	napi_list,unreg_list,close_list) but they are not
2214 	 *	part of the usual set specified in Space.c.
2215 	 */
2216 
2217 
2218 	struct list_head	dev_list;
2219 	struct list_head	napi_list;
2220 	struct list_head	unreg_list;
2221 	struct list_head	close_list;
2222 	struct list_head	ptype_all;
2223 
2224 	struct {
2225 		struct list_head upper;
2226 		struct list_head lower;
2227 	} adj_list;
2228 
2229 	/* Read-mostly cache-line for fast-path access */
2230 	xdp_features_t		xdp_features;
2231 	const struct xdp_metadata_ops *xdp_metadata_ops;
2232 	const struct xsk_tx_metadata_ops *xsk_tx_metadata_ops;
2233 	unsigned short		gflags;
2234 
2235 	unsigned short		needed_tailroom;
2236 
2237 	netdev_features_t	hw_features;
2238 	netdev_features_t	wanted_features;
2239 	netdev_features_t	vlan_features;
2240 	netdev_features_t	hw_enc_features;
2241 	netdev_features_t	mpls_features;
2242 	netdev_features_t	mangleid_features;
2243 
2244 	unsigned int		min_mtu;
2245 	unsigned int		max_mtu;
2246 	unsigned short		type;
2247 	unsigned char		min_header_len;
2248 	unsigned char		name_assign_type;
2249 
2250 	int			group;
2251 
2252 	struct net_device_stats	stats; /* not used by modern drivers */
2253 
2254 	struct net_device_core_stats __percpu *core_stats;
2255 
2256 	/* Stats to monitor link on/off, flapping */
2257 	atomic_t		carrier_up_count;
2258 	atomic_t		carrier_down_count;
2259 
2260 #ifdef CONFIG_WIRELESS_EXT
2261 	const struct iw_handler_def *wireless_handlers;
2262 #endif
2263 	const struct ethtool_ops *ethtool_ops;
2264 #ifdef CONFIG_NET_L3_MASTER_DEV
2265 	const struct l3mdev_ops	*l3mdev_ops;
2266 #endif
2267 #if IS_ENABLED(CONFIG_IPV6)
2268 	const struct ndisc_ops *ndisc_ops;
2269 #endif
2270 
2271 #ifdef CONFIG_XFRM_OFFLOAD
2272 	const struct xfrmdev_ops *xfrmdev_ops;
2273 #endif
2274 
2275 #if IS_ENABLED(CONFIG_TLS_DEVICE)
2276 	const struct tlsdev_ops *tlsdev_ops;
2277 #endif
2278 
2279 	unsigned int		operstate;
2280 	unsigned char		link_mode;
2281 
2282 	unsigned char		if_port;
2283 	unsigned char		dma;
2284 
2285 	/* Interface address info. */
2286 	unsigned char		perm_addr[MAX_ADDR_LEN];
2287 	unsigned char		addr_assign_type;
2288 	unsigned char		addr_len;
2289 	unsigned char		upper_level;
2290 	unsigned char		lower_level;
2291 	u8			threaded;
2292 
2293 	unsigned short		neigh_priv_len;
2294 	unsigned short          dev_id;
2295 	unsigned short          dev_port;
2296 	int			irq;
2297 	u32			priv_len;
2298 
2299 	spinlock_t		addr_list_lock;
2300 
2301 	struct netdev_hw_addr_list	uc;
2302 	struct netdev_hw_addr_list	mc;
2303 	struct netdev_hw_addr_list	dev_addrs;
2304 
2305 #ifdef CONFIG_SYSFS
2306 	struct kset		*queues_kset;
2307 #endif
2308 #ifdef CONFIG_LOCKDEP
2309 	struct list_head	unlink_list;
2310 #endif
2311 	unsigned int		promiscuity;
2312 	unsigned int		allmulti;
2313 	bool			uc_promisc;
2314 	struct list_head	rx_mode_node;
2315 	netdevice_tracker	rx_mode_tracker;
2316 	struct netdev_hw_addr_list	rx_mode_addr_cache;
2317 #ifdef CONFIG_LOCKDEP
2318 	unsigned char		nested_level;
2319 #endif
2320 
2321 
2322 	/* Protocol-specific pointers */
2323 	struct in_device __rcu	*ip_ptr;
2324 	/** @fib_nh_head: nexthops associated with this netdev */
2325 	struct hlist_head	fib_nh_head;
2326 
2327 #if IS_ENABLED(CONFIG_VLAN_8021Q)
2328 	struct vlan_info __rcu	*vlan_info;
2329 #endif
2330 #if IS_ENABLED(CONFIG_NET_DSA)
2331 	struct dsa_port		*dsa_ptr;
2332 #endif
2333 #if IS_ENABLED(CONFIG_TIPC)
2334 	struct tipc_bearer __rcu *tipc_ptr;
2335 #endif
2336 #if IS_ENABLED(CONFIG_ATALK)
2337 	void 			*atalk_ptr;
2338 #endif
2339 #if IS_ENABLED(CONFIG_CFG80211)
2340 	struct wireless_dev	*ieee80211_ptr;
2341 #endif
2342 #if IS_ENABLED(CONFIG_IEEE802154) || IS_ENABLED(CONFIG_6LOWPAN)
2343 	struct wpan_dev		*ieee802154_ptr;
2344 #endif
2345 #if IS_ENABLED(CONFIG_MPLS_ROUTING)
2346 	struct mpls_dev __rcu	*mpls_ptr;
2347 #endif
2348 #if IS_ENABLED(CONFIG_MCTP)
2349 	struct mctp_dev __rcu	*mctp_ptr;
2350 #endif
2351 #if IS_ENABLED(CONFIG_INET_PSP)
2352 	struct psp_dev __rcu	*psp_dev;
2353 #endif
2354 
2355 /*
2356  * Cache lines mostly used on receive path (including eth_type_trans())
2357  */
2358 	/* Interface address info used in eth_type_trans() */
2359 	const unsigned char	*dev_addr;
2360 
2361 	unsigned int		num_rx_queues;
2362 #define GRO_LEGACY_MAX_SIZE	65536u
2363 /* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE),
2364  * and shinfo->gso_segs is a 16bit field.
2365  */
2366 #define GRO_MAX_SIZE		(8 * 65535u)
2367 	unsigned int		xdp_zc_max_segs;
2368 	struct netdev_queue __rcu *ingress_queue;
2369 #ifdef CONFIG_NETFILTER_INGRESS
2370 	struct nf_hook_entries __rcu *nf_hooks_ingress;
2371 #endif
2372 
2373 	unsigned char		broadcast[MAX_ADDR_LEN];
2374 #ifdef CONFIG_RFS_ACCEL
2375 	struct cpu_rmap		*rx_cpu_rmap;
2376 #endif
2377 	struct hlist_node	index_hlist;
2378 
2379 /*
2380  * Cache lines mostly used on transmit path
2381  */
2382 	unsigned int		num_tx_queues;
2383 	struct Qdisc __rcu	*qdisc;
2384 	unsigned int		tx_queue_len;
2385 	spinlock_t		tx_global_lock;
2386 
2387 	struct xdp_dev_bulk_queue __percpu *xdp_bulkq;
2388 
2389 #ifdef CONFIG_NET_SCHED
2390 	DECLARE_HASHTABLE	(qdisc_hash, 4);
2391 #endif
2392 	/* These may be needed for future network-power-down code. */
2393 	struct timer_list	watchdog_timer;
2394 	int			watchdog_timeo;
2395 
2396 	u32                     proto_down_reason;
2397 
2398 	struct list_head	todo_list;
2399 
2400 #ifdef CONFIG_PCPU_DEV_REFCNT
2401 	int __percpu		*pcpu_refcnt;
2402 #else
2403 	refcount_t		dev_refcnt;
2404 #endif
2405 	struct ref_tracker_dir	refcnt_tracker;
2406 
2407 	struct list_head	link_watch_list;
2408 
2409 	u8 reg_state;
2410 
2411 	bool dismantle;
2412 
2413 	/** @moving_ns: device is changing netns, protected by @lock */
2414 	bool moving_ns;
2415 	/** @rtnl_link_initializing: Device being created, suppress events */
2416 	bool rtnl_link_initializing;
2417 
2418 	bool needs_free_netdev;
2419 	void (*priv_destructor)(struct net_device *dev);
2420 
2421 	/* mid-layer private */
2422 	void				*ml_priv;
2423 	enum netdev_ml_priv_type	ml_priv_type;
2424 
2425 #if IS_ENABLED(CONFIG_GARP)
2426 	struct garp_port __rcu	*garp_port;
2427 #endif
2428 #if IS_ENABLED(CONFIG_MRP)
2429 	struct mrp_port __rcu	*mrp_port;
2430 #endif
2431 #if IS_ENABLED(CONFIG_NET_DROP_MONITOR)
2432 	struct dm_hw_stat_delta __rcu *dm_private;
2433 #endif
2434 	struct device		dev;
2435 	const struct attribute_group *sysfs_groups[5];
2436 	const struct attribute_group *sysfs_rx_queue_group;
2437 
2438 	const struct rtnl_link_ops *rtnl_link_ops;
2439 
2440 	const struct netdev_stat_ops *stat_ops;
2441 
2442 	const struct netdev_queue_mgmt_ops *queue_mgmt_ops;
2443 
2444 	/* for setting kernel sock attribute on TCP connection setup */
2445 #define GSO_MAX_SEGS		65535u
2446 #define GSO_LEGACY_MAX_SIZE	65536u
2447 /* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE),
2448  * and shinfo->gso_segs is a 16bit field.
2449  */
2450 #define GSO_MAX_SIZE		(8 * GSO_MAX_SEGS)
2451 
2452 #define TSO_LEGACY_MAX_SIZE	65536
2453 #define TSO_MAX_SIZE		UINT_MAX
2454 	unsigned int		tso_max_size;
2455 #define TSO_MAX_SEGS		U16_MAX
2456 	u16			tso_max_segs;
2457 
2458 #ifdef CONFIG_DCB
2459 	const struct dcbnl_rtnl_ops *dcbnl_ops;
2460 #endif
2461 	u8			prio_tc_map[TC_BITMASK + 1];
2462 
2463 #if IS_ENABLED(CONFIG_FCOE)
2464 	unsigned int		fcoe_ddp_xid;
2465 #endif
2466 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
2467 	struct netprio_map __rcu *priomap;
2468 #endif
2469 	struct phy_link_topology	*link_topo;
2470 	struct phy_device	*phydev;
2471 	struct sfp_bus		*sfp_bus;
2472 	struct lock_class_key	*qdisc_tx_busylock;
2473 	bool			proto_down;
2474 	bool			irq_affinity_auto;
2475 	bool			rx_cpu_rmap_auto;
2476 
2477 	/* priv_flags_slow, ungrouped to save space */
2478 	unsigned long		see_all_hwtstamp_requests:1;
2479 	unsigned long		change_proto_down:1;
2480 	unsigned long		netns_immutable:1;
2481 	unsigned long		fcoe_mtu:1;
2482 
2483 	struct list_head	net_notifier_list;
2484 
2485 #if IS_ENABLED(CONFIG_MACSEC)
2486 	/* MACsec management functions */
2487 	const struct macsec_ops *macsec_ops;
2488 #endif
2489 	const struct udp_tunnel_nic_info	*udp_tunnel_nic_info;
2490 	struct udp_tunnel_nic	*udp_tunnel_nic;
2491 
2492 	/** @cfg: net_device queue-related configuration */
2493 	struct netdev_config	*cfg;
2494 	/**
2495 	 * @cfg_pending: same as @cfg but when device is being actively
2496 	 *	reconfigured includes any changes to the configuration
2497 	 *	requested by the user, but which may or may not be rejected.
2498 	 */
2499 	struct netdev_config	*cfg_pending;
2500 	struct ethtool_netdev_state *ethtool;
2501 
2502 	/* protected by rtnl_lock */
2503 	struct bpf_xdp_entity	xdp_state[__MAX_XDP_MODE];
2504 
2505 	u8 dev_addr_shadow[MAX_ADDR_LEN];
2506 	netdevice_tracker	linkwatch_dev_tracker;
2507 	netdevice_tracker	watchdog_dev_tracker;
2508 	netdevice_tracker	dev_registered_tracker;
2509 	struct rtnl_hw_stats64	*offload_xstats_l3;
2510 
2511 	struct devlink_port	*devlink_port;
2512 
2513 #if IS_ENABLED(CONFIG_DPLL)
2514 	struct dpll_pin	__rcu	*dpll_pin;
2515 #endif
2516 #if IS_ENABLED(CONFIG_PAGE_POOL)
2517 	/** @page_pools: page pools created for this netdevice */
2518 	struct hlist_head	page_pools;
2519 #endif
2520 
2521 	/** @irq_moder: dim parameters used if IS_ENABLED(CONFIG_DIMLIB). */
2522 	struct dim_irq_moder	*irq_moder;
2523 
2524 	u64			max_pacing_offload_horizon;
2525 	struct napi_config	*napi_config;
2526 	u32			num_napi_configs;
2527 	u32			napi_defer_hard_irqs;
2528 	unsigned long		gro_flush_timeout;
2529 
2530 	/**
2531 	 * @up: copy of @state's IFF_UP, but safe to read with just @lock.
2532 	 *	May report false negatives while the device is being opened
2533 	 *	or closed (@lock does not protect .ndo_open, or .ndo_close).
2534 	 */
2535 	bool			up;
2536 
2537 	/**
2538 	 * @request_ops_lock: request the core to run all @netdev_ops and
2539 	 * @ethtool_ops under the @lock.
2540 	 */
2541 	bool			request_ops_lock;
2542 
2543 	/**
2544 	 * @lock: netdev-scope lock, protects a small selection of fields.
2545 	 * Should always be taken using netdev_lock() / netdev_unlock() helpers.
2546 	 * Drivers are free to use it for other protection.
2547 	 *
2548 	 * For the drivers that implement shaper or queue API, the scope
2549 	 * of this lock is expanded to cover most ndo/queue/ethtool/sysfs
2550 	 * operations. Drivers may opt-in to this behavior by setting
2551 	 * @request_ops_lock.
2552 	 *
2553 	 * @lock protection mixes with rtnl_lock in multiple ways, fields are
2554 	 * either:
2555 	 *
2556 	 * - simply protected by the instance @lock;
2557 	 *
2558 	 * - double protected - writers hold both locks, readers hold either;
2559 	 *
2560 	 * - ops protected - protected by the lock held around the NDOs
2561 	 *   and other callbacks, that is the instance lock on devices for
2562 	 *   which netdev_need_ops_lock() returns true, otherwise by rtnl_lock;
2563 	 *
2564 	 * - double ops protected - always protected by rtnl_lock but for
2565 	 *   devices for which netdev_need_ops_lock() returns true - also
2566 	 *   the instance lock.
2567 	 *
2568 	 * Simply protects:
2569 	 *	@gro_flush_timeout, @napi_defer_hard_irqs, @napi_list,
2570 	 *	@net_shaper_hierarchy, @reg_state, @threaded
2571 	 *
2572 	 * Double protects:
2573 	 *	@up, @moving_ns, @nd_net, @xdp_features
2574 	 *
2575 	 * Double ops protects:
2576 	 *	@real_num_rx_queues, @real_num_tx_queues
2577 	 *
2578 	 * Also protects some fields in:
2579 	 *	struct napi_struct, struct netdev_queue, struct netdev_rx_queue
2580 	 *
2581 	 * Ordering:
2582 	 *
2583 	 * - take after rtnl_lock
2584 	 *
2585 	 * - for the case of netdev queue leasing, the netdev-scope lock is
2586 	 *   taken for both the virtual and the physical device; to prevent
2587 	 *   deadlocks, the virtual device's lock must always be acquired
2588 	 *   before the physical device's (see netdev_nl_queue_create_doit)
2589 	 */
2590 	struct mutex		lock;
2591 
2592 #if IS_ENABLED(CONFIG_NET_SHAPER)
2593 	/**
2594 	 * @net_shaper_hierarchy: data tracking the current shaper status
2595 	 *  see include/net/net_shapers.h
2596 	 */
2597 	struct net_shaper_hierarchy *net_shaper_hierarchy;
2598 #endif
2599 
2600 	struct hlist_head neighbours[NEIGH_NR_TABLES];
2601 
2602 	struct hwtstamp_provider __rcu	*hwprov;
2603 
2604 	u8			priv[] ____cacheline_aligned
2605 				       __counted_by(priv_len);
2606 } ____cacheline_aligned;
2607 #define to_net_dev(d) container_of(d, struct net_device, dev)
2608 
2609 /*
2610  * Driver should use this to assign devlink port instance to a netdevice
2611  * before it registers the netdevice. Therefore devlink_port is static
2612  * during the netdev lifetime after it is registered.
2613  */
2614 #define SET_NETDEV_DEVLINK_PORT(dev, port)			\
2615 ({								\
2616 	WARN_ON((dev)->reg_state != NETREG_UNINITIALIZED);	\
2617 	((dev)->devlink_port = (port));				\
2618 })
2619 
netif_elide_gro(const struct net_device * dev)2620 static inline bool netif_elide_gro(const struct net_device *dev)
2621 {
2622 	if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog)
2623 		return true;
2624 	return false;
2625 }
2626 
2627 #define	NETDEV_ALIGN		32
2628 
2629 static inline
netdev_get_prio_tc_map(const struct net_device * dev,u32 prio)2630 int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
2631 {
2632 	return dev->prio_tc_map[prio & TC_BITMASK];
2633 }
2634 
2635 static inline
netdev_set_prio_tc_map(struct net_device * dev,u8 prio,u8 tc)2636 int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
2637 {
2638 	if (tc >= dev->num_tc)
2639 		return -EINVAL;
2640 
2641 	dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
2642 	return 0;
2643 }
2644 
2645 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
2646 void netdev_reset_tc(struct net_device *dev);
2647 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset);
2648 int netdev_set_num_tc(struct net_device *dev, u8 num_tc);
2649 
2650 static inline
netdev_get_num_tc(struct net_device * dev)2651 int netdev_get_num_tc(struct net_device *dev)
2652 {
2653 	return dev->num_tc;
2654 }
2655 
net_prefetch(void * p)2656 static inline void net_prefetch(void *p)
2657 {
2658 	prefetch(p);
2659 #if L1_CACHE_BYTES < 128
2660 	prefetch((u8 *)p + L1_CACHE_BYTES);
2661 #endif
2662 }
2663 
net_prefetchw(void * p)2664 static inline void net_prefetchw(void *p)
2665 {
2666 	prefetchw(p);
2667 #if L1_CACHE_BYTES < 128
2668 	prefetchw((u8 *)p + L1_CACHE_BYTES);
2669 #endif
2670 }
2671 
2672 void netdev_unbind_sb_channel(struct net_device *dev,
2673 			      struct net_device *sb_dev);
2674 int netdev_bind_sb_channel_queue(struct net_device *dev,
2675 				 struct net_device *sb_dev,
2676 				 u8 tc, u16 count, u16 offset);
2677 int netdev_set_sb_channel(struct net_device *dev, u16 channel);
netdev_get_sb_channel(struct net_device * dev)2678 static inline int netdev_get_sb_channel(struct net_device *dev)
2679 {
2680 	return max_t(int, -dev->num_tc, 0);
2681 }
2682 
2683 static inline
netdev_get_tx_queue(const struct net_device * dev,unsigned int index)2684 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
2685 					 unsigned int index)
2686 {
2687 	DEBUG_NET_WARN_ON_ONCE(index >= dev->num_tx_queues);
2688 	return &dev->_tx[index];
2689 }
2690 
skb_get_tx_queue(const struct net_device * dev,const struct sk_buff * skb)2691 static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
2692 						    const struct sk_buff *skb)
2693 {
2694 	return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2695 }
2696 
netdev_for_each_tx_queue(struct net_device * dev,void (* f)(struct net_device *,struct netdev_queue *,void *),void * arg)2697 static inline void netdev_for_each_tx_queue(struct net_device *dev,
2698 					    void (*f)(struct net_device *,
2699 						      struct netdev_queue *,
2700 						      void *),
2701 					    void *arg)
2702 {
2703 	unsigned int i;
2704 
2705 	for (i = 0; i < dev->num_tx_queues; i++)
2706 		f(dev, &dev->_tx[i], arg);
2707 }
2708 
2709 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
2710 		     struct net_device *sb_dev);
2711 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
2712 					 struct sk_buff *skb,
2713 					 struct net_device *sb_dev);
2714 
2715 /* returns the headroom that the master device needs to take in account
2716  * when forwarding to this dev
2717  */
netdev_get_fwd_headroom(struct net_device * dev)2718 static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
2719 {
2720 	return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
2721 }
2722 
netdev_set_rx_headroom(struct net_device * dev,int new_hr)2723 static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
2724 {
2725 	if (dev->netdev_ops->ndo_set_rx_headroom)
2726 		dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
2727 }
2728 
2729 /* set the device rx headroom to the dev's default */
netdev_reset_rx_headroom(struct net_device * dev)2730 static inline void netdev_reset_rx_headroom(struct net_device *dev)
2731 {
2732 	netdev_set_rx_headroom(dev, -1);
2733 }
2734 
netdev_get_ml_priv(struct net_device * dev,enum netdev_ml_priv_type type)2735 static inline void *netdev_get_ml_priv(struct net_device *dev,
2736 				       enum netdev_ml_priv_type type)
2737 {
2738 	if (dev->ml_priv_type != type)
2739 		return NULL;
2740 
2741 	return dev->ml_priv;
2742 }
2743 
netdev_set_ml_priv(struct net_device * dev,void * ml_priv,enum netdev_ml_priv_type type)2744 static inline void netdev_set_ml_priv(struct net_device *dev,
2745 				      void *ml_priv,
2746 				      enum netdev_ml_priv_type type)
2747 {
2748 	WARN(dev->ml_priv_type && dev->ml_priv_type != type,
2749 	     "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n",
2750 	     dev->ml_priv_type, type);
2751 	WARN(!dev->ml_priv_type && dev->ml_priv,
2752 	     "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n");
2753 
2754 	dev->ml_priv = ml_priv;
2755 	dev->ml_priv_type = type;
2756 }
2757 
2758 /*
2759  * Net namespace inlines
2760  */
2761 static inline
dev_net(const struct net_device * dev)2762 struct net *dev_net(const struct net_device *dev)
2763 {
2764 	return read_pnet(&dev->nd_net);
2765 }
2766 
2767 static inline
dev_net_rcu(const struct net_device * dev)2768 struct net *dev_net_rcu(const struct net_device *dev)
2769 {
2770 	return read_pnet_rcu(&dev->nd_net);
2771 }
2772 
2773 static inline
dev_net_set(struct net_device * dev,struct net * net)2774 void dev_net_set(struct net_device *dev, struct net *net)
2775 {
2776 	write_pnet(&dev->nd_net, net);
2777 }
2778 
2779 /**
2780  *	netdev_priv - access network device private data
2781  *	@dev: network device
2782  *
2783  * Get network device private data
2784  */
netdev_priv(const struct net_device * dev)2785 static inline void *netdev_priv(const struct net_device *dev)
2786 {
2787 	return (void *)dev->priv;
2788 }
2789 
2790 /**
2791  * netdev_from_priv() - get network device from priv
2792  * @priv: network device private data
2793  *
2794  * Returns: net_device to which @priv belongs
2795  */
netdev_from_priv(const void * priv)2796 static inline struct net_device *netdev_from_priv(const void *priv)
2797 {
2798 	return container_of(priv, struct net_device, priv);
2799 }
2800 
2801 /* Set the sysfs physical device reference for the network logical device
2802  * if set prior to registration will cause a symlink during initialization.
2803  */
2804 #define SET_NETDEV_DEV(net, pdev)	((net)->dev.parent = (pdev))
2805 
2806 /* Set the sysfs device type for the network logical device to allow
2807  * fine-grained identification of different network device types. For
2808  * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc.
2809  */
2810 #define SET_NETDEV_DEVTYPE(net, devtype)	((net)->dev.type = (devtype))
2811 
2812 void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index,
2813 			  enum netdev_queue_type type,
2814 			  struct napi_struct *napi);
2815 
netdev_lock(struct net_device * dev)2816 static inline void netdev_lock(struct net_device *dev)
2817 {
2818 	mutex_lock(&dev->lock);
2819 }
2820 
netdev_unlock(struct net_device * dev)2821 static inline void netdev_unlock(struct net_device *dev)
2822 {
2823 	mutex_unlock(&dev->lock);
2824 }
2825 /* Additional netdev_lock()-related helpers are in net/netdev_lock.h */
2826 
2827 void netif_napi_set_irq_locked(struct napi_struct *napi, int irq);
2828 
netif_napi_set_irq(struct napi_struct * napi,int irq)2829 static inline void netif_napi_set_irq(struct napi_struct *napi, int irq)
2830 {
2831 	netdev_lock(napi->dev);
2832 	netif_napi_set_irq_locked(napi, irq);
2833 	netdev_unlock(napi->dev);
2834 }
2835 
2836 /* Default NAPI poll() weight
2837  * Device drivers are strongly advised to not use bigger value
2838  */
2839 #define NAPI_POLL_WEIGHT 64
2840 
2841 void netif_napi_add_weight_locked(struct net_device *dev,
2842 				  struct napi_struct *napi,
2843 				  int (*poll)(struct napi_struct *, int),
2844 				  int weight);
2845 
2846 static inline void
netif_napi_add_weight(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int),int weight)2847 netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
2848 		      int (*poll)(struct napi_struct *, int), int weight)
2849 {
2850 	netdev_lock(dev);
2851 	netif_napi_add_weight_locked(dev, napi, poll, weight);
2852 	netdev_unlock(dev);
2853 }
2854 
2855 /**
2856  * netif_napi_add() - initialize a NAPI context
2857  * @dev:  network device
2858  * @napi: NAPI context
2859  * @poll: polling function
2860  *
2861  * netif_napi_add() must be used to initialize a NAPI context prior to calling
2862  * *any* of the other NAPI-related functions.
2863  */
2864 static inline void
netif_napi_add(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int))2865 netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2866 	       int (*poll)(struct napi_struct *, int))
2867 {
2868 	netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
2869 }
2870 
2871 static inline void
netif_napi_add_locked(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int))2872 netif_napi_add_locked(struct net_device *dev, struct napi_struct *napi,
2873 		      int (*poll)(struct napi_struct *, int))
2874 {
2875 	netif_napi_add_weight_locked(dev, napi, poll, NAPI_POLL_WEIGHT);
2876 }
2877 
2878 static inline void
netif_napi_add_tx_weight(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int),int weight)2879 netif_napi_add_tx_weight(struct net_device *dev,
2880 			 struct napi_struct *napi,
2881 			 int (*poll)(struct napi_struct *, int),
2882 			 int weight)
2883 {
2884 	set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
2885 	netif_napi_add_weight(dev, napi, poll, weight);
2886 }
2887 
2888 static inline void
netif_napi_add_config_locked(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int),int index)2889 netif_napi_add_config_locked(struct net_device *dev, struct napi_struct *napi,
2890 			     int (*poll)(struct napi_struct *, int), int index)
2891 {
2892 	napi->index = index;
2893 	napi->config = &dev->napi_config[index];
2894 	netif_napi_add_weight_locked(dev, napi, poll, NAPI_POLL_WEIGHT);
2895 }
2896 
2897 /**
2898  * netif_napi_add_config - initialize a NAPI context with persistent config
2899  * @dev: network device
2900  * @napi: NAPI context
2901  * @poll: polling function
2902  * @index: the NAPI index
2903  */
2904 static inline void
netif_napi_add_config(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int),int index)2905 netif_napi_add_config(struct net_device *dev, struct napi_struct *napi,
2906 		      int (*poll)(struct napi_struct *, int), int index)
2907 {
2908 	netdev_lock(dev);
2909 	netif_napi_add_config_locked(dev, napi, poll, index);
2910 	netdev_unlock(dev);
2911 }
2912 
2913 /**
2914  * netif_napi_add_tx() - initialize a NAPI context to be used for Tx only
2915  * @dev:  network device
2916  * @napi: NAPI context
2917  * @poll: polling function
2918  *
2919  * This variant of netif_napi_add() should be used from drivers using NAPI
2920  * to exclusively poll a TX queue.
2921  * This will avoid we add it into napi_hash[], thus polluting this hash table.
2922  */
netif_napi_add_tx(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int))2923 static inline void netif_napi_add_tx(struct net_device *dev,
2924 				     struct napi_struct *napi,
2925 				     int (*poll)(struct napi_struct *, int))
2926 {
2927 	netif_napi_add_tx_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
2928 }
2929 
2930 void __netif_napi_del_locked(struct napi_struct *napi);
2931 
2932 /**
2933  *  __netif_napi_del - remove a NAPI context
2934  *  @napi: NAPI context
2935  *
2936  * Warning: caller must observe RCU grace period before freeing memory
2937  * containing @napi. Drivers might want to call this helper to combine
2938  * all the needed RCU grace periods into a single one.
2939  */
__netif_napi_del(struct napi_struct * napi)2940 static inline void __netif_napi_del(struct napi_struct *napi)
2941 {
2942 	netdev_lock(napi->dev);
2943 	__netif_napi_del_locked(napi);
2944 	netdev_unlock(napi->dev);
2945 }
2946 
netif_napi_del_locked(struct napi_struct * napi)2947 static inline void netif_napi_del_locked(struct napi_struct *napi)
2948 {
2949 	__netif_napi_del_locked(napi);
2950 	synchronize_net();
2951 }
2952 
2953 /**
2954  *  netif_napi_del - remove a NAPI context
2955  *  @napi: NAPI context
2956  *
2957  *  netif_napi_del() removes a NAPI context from the network device NAPI list
2958  */
netif_napi_del(struct napi_struct * napi)2959 static inline void netif_napi_del(struct napi_struct *napi)
2960 {
2961 	__netif_napi_del(napi);
2962 	synchronize_net();
2963 }
2964 
2965 int netif_enable_cpu_rmap(struct net_device *dev, unsigned int num_irqs);
2966 void netif_set_affinity_auto(struct net_device *dev);
2967 
2968 struct packet_type {
2969 	__be16			type;	/* This is really htons(ether_type). */
2970 	bool			ignore_outgoing;
2971 	struct net_device	*dev;	/* NULL is wildcarded here	     */
2972 	netdevice_tracker	dev_tracker;
2973 	int			(*func) (struct sk_buff *,
2974 					 struct net_device *,
2975 					 struct packet_type *,
2976 					 struct net_device *);
2977 	void			(*list_func) (struct list_head *,
2978 					      struct packet_type *,
2979 					      struct net_device *);
2980 	bool			(*id_match)(struct packet_type *ptype,
2981 					    struct sock *sk);
2982 	struct net		*af_packet_net;
2983 	void			*af_packet_priv;
2984 	struct list_head	list;
2985 };
2986 
2987 struct offload_callbacks {
2988 	struct sk_buff		*(*gso_segment)(struct sk_buff *skb,
2989 						netdev_features_t features);
2990 	struct sk_buff		*(*gro_receive)(struct list_head *head,
2991 						struct sk_buff *skb);
2992 	int			(*gro_complete)(struct sk_buff *skb, int nhoff);
2993 };
2994 
2995 struct packet_offload {
2996 	__be16			 type;	/* This is really htons(ether_type). */
2997 	u16			 priority;
2998 	struct offload_callbacks callbacks;
2999 	struct list_head	 list;
3000 };
3001 
3002 /* often modified stats are per-CPU, other are shared (netdev->stats) */
3003 struct pcpu_sw_netstats {
3004 	u64_stats_t		rx_packets;
3005 	u64_stats_t		rx_bytes;
3006 	u64_stats_t		tx_packets;
3007 	u64_stats_t		tx_bytes;
3008 	struct u64_stats_sync   syncp;
3009 } __aligned(4 * sizeof(u64));
3010 
3011 struct pcpu_dstats {
3012 	u64_stats_t		rx_packets;
3013 	u64_stats_t		rx_bytes;
3014 	u64_stats_t		tx_packets;
3015 	u64_stats_t		tx_bytes;
3016 	u64_stats_t		rx_drops;
3017 	u64_stats_t		tx_drops;
3018 	struct u64_stats_sync	syncp;
3019 } __aligned(8 * sizeof(u64));
3020 
3021 struct pcpu_lstats {
3022 	u64_stats_t packets;
3023 	u64_stats_t bytes;
3024 	struct u64_stats_sync syncp;
3025 } __aligned(2 * sizeof(u64));
3026 
3027 void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes);
3028 
dev_sw_netstats_rx_add(struct net_device * dev,unsigned int len)3029 static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len)
3030 {
3031 	struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
3032 
3033 	u64_stats_update_begin(&tstats->syncp);
3034 	u64_stats_add(&tstats->rx_bytes, len);
3035 	u64_stats_inc(&tstats->rx_packets);
3036 	u64_stats_update_end(&tstats->syncp);
3037 }
3038 
dev_sw_netstats_tx_add(struct net_device * dev,unsigned int packets,unsigned int len)3039 static inline void dev_sw_netstats_tx_add(struct net_device *dev,
3040 					  unsigned int packets,
3041 					  unsigned int len)
3042 {
3043 	struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
3044 
3045 	u64_stats_update_begin(&tstats->syncp);
3046 	u64_stats_add(&tstats->tx_bytes, len);
3047 	u64_stats_add(&tstats->tx_packets, packets);
3048 	u64_stats_update_end(&tstats->syncp);
3049 }
3050 
dev_lstats_add(struct net_device * dev,unsigned int len)3051 static inline void dev_lstats_add(struct net_device *dev, unsigned int len)
3052 {
3053 	struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats);
3054 
3055 	u64_stats_update_begin(&lstats->syncp);
3056 	u64_stats_add(&lstats->bytes, len);
3057 	u64_stats_inc(&lstats->packets);
3058 	u64_stats_update_end(&lstats->syncp);
3059 }
3060 
dev_dstats_rx_add(struct net_device * dev,unsigned int len)3061 static inline void dev_dstats_rx_add(struct net_device *dev,
3062 				     unsigned int len)
3063 {
3064 	struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
3065 
3066 	u64_stats_update_begin(&dstats->syncp);
3067 	u64_stats_inc(&dstats->rx_packets);
3068 	u64_stats_add(&dstats->rx_bytes, len);
3069 	u64_stats_update_end(&dstats->syncp);
3070 }
3071 
dev_dstats_rx_dropped(struct net_device * dev)3072 static inline void dev_dstats_rx_dropped(struct net_device *dev)
3073 {
3074 	struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
3075 
3076 	u64_stats_update_begin(&dstats->syncp);
3077 	u64_stats_inc(&dstats->rx_drops);
3078 	u64_stats_update_end(&dstats->syncp);
3079 }
3080 
dev_dstats_rx_dropped_add(struct net_device * dev,unsigned int packets)3081 static inline void dev_dstats_rx_dropped_add(struct net_device *dev,
3082 					     unsigned int packets)
3083 {
3084 	struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
3085 
3086 	u64_stats_update_begin(&dstats->syncp);
3087 	u64_stats_add(&dstats->rx_drops, packets);
3088 	u64_stats_update_end(&dstats->syncp);
3089 }
3090 
dev_dstats_tx_add(struct net_device * dev,unsigned int len)3091 static inline void dev_dstats_tx_add(struct net_device *dev,
3092 				     unsigned int len)
3093 {
3094 	struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
3095 
3096 	u64_stats_update_begin(&dstats->syncp);
3097 	u64_stats_inc(&dstats->tx_packets);
3098 	u64_stats_add(&dstats->tx_bytes, len);
3099 	u64_stats_update_end(&dstats->syncp);
3100 }
3101 
dev_dstats_tx_dropped(struct net_device * dev)3102 static inline void dev_dstats_tx_dropped(struct net_device *dev)
3103 {
3104 	struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
3105 
3106 	u64_stats_update_begin(&dstats->syncp);
3107 	u64_stats_inc(&dstats->tx_drops);
3108 	u64_stats_update_end(&dstats->syncp);
3109 }
3110 
3111 #define __netdev_alloc_pcpu_stats(type, gfp)				\
3112 ({									\
3113 	typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
3114 	if (pcpu_stats)	{						\
3115 		int __cpu;						\
3116 		for_each_possible_cpu(__cpu) {				\
3117 			typeof(type) *stat;				\
3118 			stat = per_cpu_ptr(pcpu_stats, __cpu);		\
3119 			u64_stats_init(&stat->syncp);			\
3120 		}							\
3121 	}								\
3122 	pcpu_stats;							\
3123 })
3124 
3125 #define netdev_alloc_pcpu_stats(type)					\
3126 	__netdev_alloc_pcpu_stats(type, GFP_KERNEL)
3127 
3128 #define devm_netdev_alloc_pcpu_stats(dev, type)				\
3129 ({									\
3130 	typeof(type) __percpu *pcpu_stats = devm_alloc_percpu(dev, type);\
3131 	if (pcpu_stats) {						\
3132 		int __cpu;						\
3133 		for_each_possible_cpu(__cpu) {				\
3134 			typeof(type) *stat;				\
3135 			stat = per_cpu_ptr(pcpu_stats, __cpu);		\
3136 			u64_stats_init(&stat->syncp);			\
3137 		}							\
3138 	}								\
3139 	pcpu_stats;							\
3140 })
3141 
3142 enum netdev_lag_tx_type {
3143 	NETDEV_LAG_TX_TYPE_UNKNOWN,
3144 	NETDEV_LAG_TX_TYPE_RANDOM,
3145 	NETDEV_LAG_TX_TYPE_BROADCAST,
3146 	NETDEV_LAG_TX_TYPE_ROUNDROBIN,
3147 	NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
3148 	NETDEV_LAG_TX_TYPE_HASH,
3149 };
3150 
3151 enum netdev_lag_hash {
3152 	NETDEV_LAG_HASH_NONE,
3153 	NETDEV_LAG_HASH_L2,
3154 	NETDEV_LAG_HASH_L34,
3155 	NETDEV_LAG_HASH_L23,
3156 	NETDEV_LAG_HASH_E23,
3157 	NETDEV_LAG_HASH_E34,
3158 	NETDEV_LAG_HASH_VLAN_SRCMAC,
3159 	NETDEV_LAG_HASH_UNKNOWN,
3160 };
3161 
3162 struct netdev_lag_upper_info {
3163 	enum netdev_lag_tx_type tx_type;
3164 	enum netdev_lag_hash hash_type;
3165 };
3166 
3167 struct netdev_lag_lower_state_info {
3168 	u8 link_up : 1,
3169 	   tx_enabled : 1;
3170 };
3171 
3172 #include <linux/notifier.h>
3173 
3174 /* netdevice notifier chain. Please remember to update netdev_cmd_to_name()
3175  * and the rtnetlink notification exclusion list in rtnetlink_event() when
3176  * adding new types.
3177  */
3178 enum netdev_cmd {
3179 	NETDEV_UP	= 1,	/* For now you can't veto a device up/down */
3180 	NETDEV_DOWN,
3181 	NETDEV_REBOOT,		/* Tell a protocol stack a network interface
3182 				   detected a hardware crash and restarted
3183 				   - we can use this eg to kick tcp sessions
3184 				   once done */
3185 	NETDEV_CHANGE,		/* Notify device state change */
3186 	NETDEV_REGISTER,
3187 	NETDEV_UNREGISTER,
3188 	NETDEV_CHANGEMTU,	/* notify after mtu change happened */
3189 	NETDEV_CHANGEADDR,	/* notify after the address change */
3190 	NETDEV_PRE_CHANGEADDR,	/* notify before the address change */
3191 	NETDEV_GOING_DOWN,
3192 	NETDEV_CHANGENAME,
3193 	NETDEV_FEAT_CHANGE,
3194 	NETDEV_BONDING_FAILOVER,
3195 	NETDEV_PRE_UP,
3196 	NETDEV_PRE_TYPE_CHANGE,
3197 	NETDEV_POST_TYPE_CHANGE,
3198 	NETDEV_POST_INIT,
3199 	NETDEV_PRE_UNINIT,
3200 	NETDEV_RELEASE,
3201 	NETDEV_NOTIFY_PEERS,
3202 	NETDEV_JOIN,
3203 	NETDEV_CHANGEUPPER,
3204 	NETDEV_RESEND_IGMP,
3205 	NETDEV_PRECHANGEMTU,	/* notify before mtu change happened */
3206 	NETDEV_CHANGEINFODATA,
3207 	NETDEV_BONDING_INFO,
3208 	NETDEV_PRECHANGEUPPER,
3209 	NETDEV_CHANGELOWERSTATE,
3210 	NETDEV_UDP_TUNNEL_PUSH_INFO,
3211 	NETDEV_UDP_TUNNEL_DROP_INFO,
3212 	NETDEV_CHANGE_TX_QUEUE_LEN,
3213 	NETDEV_CVLAN_FILTER_PUSH_INFO,
3214 	NETDEV_CVLAN_FILTER_DROP_INFO,
3215 	NETDEV_SVLAN_FILTER_PUSH_INFO,
3216 	NETDEV_SVLAN_FILTER_DROP_INFO,
3217 	NETDEV_OFFLOAD_XSTATS_ENABLE,
3218 	NETDEV_OFFLOAD_XSTATS_DISABLE,
3219 	NETDEV_OFFLOAD_XSTATS_REPORT_USED,
3220 	NETDEV_OFFLOAD_XSTATS_REPORT_DELTA,
3221 	NETDEV_XDP_FEAT_CHANGE,
3222 };
3223 const char *netdev_cmd_to_name(enum netdev_cmd cmd);
3224 
3225 int register_netdevice_notifier(struct notifier_block *nb);
3226 int unregister_netdevice_notifier(struct notifier_block *nb);
3227 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb);
3228 int unregister_netdevice_notifier_net(struct net *net,
3229 				      struct notifier_block *nb);
3230 int register_netdevice_notifier_dev_net(struct net_device *dev,
3231 					struct notifier_block *nb,
3232 					struct netdev_net_notifier *nn);
3233 int unregister_netdevice_notifier_dev_net(struct net_device *dev,
3234 					  struct notifier_block *nb,
3235 					  struct netdev_net_notifier *nn);
3236 
3237 struct netdev_notifier_info {
3238 	struct net_device	*dev;
3239 	struct netlink_ext_ack	*extack;
3240 };
3241 
3242 struct netdev_notifier_info_ext {
3243 	struct netdev_notifier_info info; /* must be first */
3244 	union {
3245 		u32 mtu;
3246 	} ext;
3247 };
3248 
3249 struct netdev_notifier_change_info {
3250 	struct netdev_notifier_info info; /* must be first */
3251 	unsigned int flags_changed;
3252 };
3253 
3254 struct netdev_notifier_changeupper_info {
3255 	struct netdev_notifier_info info; /* must be first */
3256 	struct net_device *upper_dev; /* new upper dev */
3257 	bool master; /* is upper dev master */
3258 	bool linking; /* is the notification for link or unlink */
3259 	void *upper_info; /* upper dev info */
3260 };
3261 
3262 struct netdev_notifier_changelowerstate_info {
3263 	struct netdev_notifier_info info; /* must be first */
3264 	void *lower_state_info; /* is lower dev state */
3265 };
3266 
3267 struct netdev_notifier_pre_changeaddr_info {
3268 	struct netdev_notifier_info info; /* must be first */
3269 	const unsigned char *dev_addr;
3270 };
3271 
3272 enum netdev_offload_xstats_type {
3273 	NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1,
3274 };
3275 
3276 struct netdev_notifier_offload_xstats_info {
3277 	struct netdev_notifier_info info; /* must be first */
3278 	enum netdev_offload_xstats_type type;
3279 
3280 	union {
3281 		/* NETDEV_OFFLOAD_XSTATS_REPORT_DELTA */
3282 		struct netdev_notifier_offload_xstats_rd *report_delta;
3283 		/* NETDEV_OFFLOAD_XSTATS_REPORT_USED */
3284 		struct netdev_notifier_offload_xstats_ru *report_used;
3285 	};
3286 };
3287 
3288 int netdev_offload_xstats_enable(struct net_device *dev,
3289 				 enum netdev_offload_xstats_type type,
3290 				 struct netlink_ext_ack *extack);
3291 int netdev_offload_xstats_disable(struct net_device *dev,
3292 				  enum netdev_offload_xstats_type type);
3293 bool netdev_offload_xstats_enabled(const struct net_device *dev,
3294 				   enum netdev_offload_xstats_type type);
3295 int netdev_offload_xstats_get(struct net_device *dev,
3296 			      enum netdev_offload_xstats_type type,
3297 			      struct rtnl_hw_stats64 *stats, bool *used,
3298 			      struct netlink_ext_ack *extack);
3299 void
3300 netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *rd,
3301 				   const struct rtnl_hw_stats64 *stats);
3302 void
3303 netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *ru);
3304 void netdev_offload_xstats_push_delta(struct net_device *dev,
3305 				      enum netdev_offload_xstats_type type,
3306 				      const struct rtnl_hw_stats64 *stats);
3307 
netdev_notifier_info_init(struct netdev_notifier_info * info,struct net_device * dev)3308 static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
3309 					     struct net_device *dev)
3310 {
3311 	info->dev = dev;
3312 	info->extack = NULL;
3313 }
3314 
3315 static inline struct net_device *
netdev_notifier_info_to_dev(const struct netdev_notifier_info * info)3316 netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
3317 {
3318 	return info->dev;
3319 }
3320 
3321 static inline struct netlink_ext_ack *
netdev_notifier_info_to_extack(const struct netdev_notifier_info * info)3322 netdev_notifier_info_to_extack(const struct netdev_notifier_info *info)
3323 {
3324 	return info->extack;
3325 }
3326 
3327 int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
3328 int call_netdevice_notifiers_info(unsigned long val,
3329 				  struct netdev_notifier_info *info);
3330 
3331 #define for_each_netdev(net, d)		\
3332 		list_for_each_entry(d, &(net)->dev_base_head, dev_list)
3333 #define for_each_netdev_reverse(net, d)	\
3334 		list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
3335 #define for_each_netdev_rcu(net, d)		\
3336 		list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
3337 #define for_each_netdev_safe(net, d, n)	\
3338 		list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
3339 #define for_each_netdev_continue(net, d)		\
3340 		list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
3341 #define for_each_netdev_continue_reverse(net, d)		\
3342 		list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \
3343 						     dev_list)
3344 #define for_each_netdev_continue_rcu(net, d)		\
3345 	list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
3346 #define for_each_netdev_in_bond_rcu(bond, slave)	\
3347 		for_each_netdev_rcu(dev_net_rcu(bond), slave)	\
3348 			if (netdev_master_upper_dev_get_rcu(slave) == (bond))
3349 #define net_device_entry(lh)	list_entry(lh, struct net_device, dev_list)
3350 
3351 #define for_each_netdev_dump(net, d, ifindex)				\
3352 	for (; (d = xa_find(&(net)->dev_by_index, &ifindex,		\
3353 			    ULONG_MAX, XA_PRESENT)); ifindex++)
3354 
next_net_device(struct net_device * dev)3355 static inline struct net_device *next_net_device(struct net_device *dev)
3356 {
3357 	struct list_head *lh;
3358 	struct net *net;
3359 
3360 	net = dev_net(dev);
3361 	lh = dev->dev_list.next;
3362 	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
3363 }
3364 
next_net_device_rcu(struct net_device * dev)3365 static inline struct net_device *next_net_device_rcu(struct net_device *dev)
3366 {
3367 	struct list_head *lh;
3368 	struct net *net;
3369 
3370 	net = dev_net(dev);
3371 	lh = rcu_dereference(list_next_rcu(&dev->dev_list));
3372 	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
3373 }
3374 
first_net_device(struct net * net)3375 static inline struct net_device *first_net_device(struct net *net)
3376 {
3377 	return list_empty(&net->dev_base_head) ? NULL :
3378 		net_device_entry(net->dev_base_head.next);
3379 }
3380 
3381 int netdev_boot_setup_check(struct net_device *dev);
3382 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type,
3383 				   const char *hwaddr);
3384 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
3385 				       const char *hwaddr);
3386 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
3387 void dev_add_pack(struct packet_type *pt);
3388 void dev_remove_pack(struct packet_type *pt);
3389 void __dev_remove_pack(struct packet_type *pt);
3390 void dev_add_offload(struct packet_offload *po);
3391 void dev_remove_offload(struct packet_offload *po);
3392 
3393 int dev_get_iflink(const struct net_device *dev);
3394 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
3395 int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
3396 			  struct net_device_path_stack *stack);
3397 struct net_device *dev_get_by_name(struct net *net, const char *name);
3398 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
3399 struct net_device *__dev_get_by_name(struct net *net, const char *name);
3400 bool netdev_name_in_use(struct net *net, const char *name);
3401 int dev_alloc_name(struct net_device *dev, const char *name);
3402 int netif_open(struct net_device *dev, struct netlink_ext_ack *extack);
3403 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack);
3404 void netif_close(struct net_device *dev);
3405 void dev_close(struct net_device *dev);
3406 void netif_close_many(struct list_head *head, bool unlink);
3407 void netif_disable_lro(struct net_device *dev);
3408 void dev_disable_lro(struct net_device *dev);
3409 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
3410 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
3411 		     struct net_device *sb_dev);
3412 
3413 int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev);
3414 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
3415 
dev_queue_xmit(struct sk_buff * skb)3416 static inline int dev_queue_xmit(struct sk_buff *skb)
3417 {
3418 	return __dev_queue_xmit(skb, NULL);
3419 }
3420 
dev_queue_xmit_accel(struct sk_buff * skb,struct net_device * sb_dev)3421 static inline int dev_queue_xmit_accel(struct sk_buff *skb,
3422 				       struct net_device *sb_dev)
3423 {
3424 	return __dev_queue_xmit(skb, sb_dev);
3425 }
3426 
dev_direct_xmit(struct sk_buff * skb,u16 queue_id)3427 static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
3428 {
3429 	int ret;
3430 
3431 	ret = __dev_direct_xmit(skb, queue_id);
3432 	if (!dev_xmit_complete(ret))
3433 		kfree_skb(skb);
3434 	return ret;
3435 }
3436 
3437 int register_netdevice(struct net_device *dev);
3438 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
3439 void unregister_netdevice_many(struct list_head *head);
3440 bool unregister_netdevice_queued(const struct net_device *dev);
3441 
unregister_netdevice(struct net_device * dev)3442 static inline void unregister_netdevice(struct net_device *dev)
3443 {
3444 	unregister_netdevice_queue(dev, NULL);
3445 }
3446 
3447 int netdev_refcnt_read(const struct net_device *dev);
3448 void free_netdev(struct net_device *dev);
3449 
3450 struct net_device *netdev_get_xmit_slave(struct net_device *dev,
3451 					 struct sk_buff *skb,
3452 					 bool all_slaves);
3453 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
3454 					    struct sock *sk);
3455 struct net_device *dev_get_by_index(struct net *net, int ifindex);
3456 struct net_device *__dev_get_by_index(struct net *net, int ifindex);
3457 struct net_device *netdev_get_by_index(struct net *net, int ifindex,
3458 				       netdevice_tracker *tracker, gfp_t gfp);
3459 struct net_device *netdev_get_by_index_lock(struct net *net, int ifindex);
3460 struct net_device *netdev_get_by_name(struct net *net, const char *name,
3461 				      netdevice_tracker *tracker, gfp_t gfp);
3462 struct net_device *netdev_get_by_flags_rcu(struct net *net, netdevice_tracker *tracker,
3463 					   unsigned short flags, unsigned short mask);
3464 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
3465 void netdev_copy_name(struct net_device *dev, char *name);
3466 
dev_hard_header(struct sk_buff * skb,struct net_device * dev,unsigned short type,const void * daddr,const void * saddr,unsigned int len)3467 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
3468 				  unsigned short type,
3469 				  const void *daddr, const void *saddr,
3470 				  unsigned int len)
3471 {
3472 	if (!dev->header_ops || !dev->header_ops->create)
3473 		return 0;
3474 
3475 	return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
3476 }
3477 
dev_parse_header(const struct sk_buff * skb,unsigned char * haddr)3478 static inline int dev_parse_header(const struct sk_buff *skb,
3479 				   unsigned char *haddr)
3480 {
3481 	const struct net_device *dev = skb->dev;
3482 
3483 	if (!dev->header_ops || !dev->header_ops->parse)
3484 		return 0;
3485 	return dev->header_ops->parse(skb, dev, haddr);
3486 }
3487 
dev_parse_header_protocol(const struct sk_buff * skb)3488 static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb)
3489 {
3490 	const struct net_device *dev = skb->dev;
3491 
3492 	if (!dev->header_ops || !dev->header_ops->parse_protocol)
3493 		return 0;
3494 	return dev->header_ops->parse_protocol(skb);
3495 }
3496 
3497 /* ll_header must have at least hard_header_len allocated */
dev_validate_header(const struct net_device * dev,char * ll_header,int len)3498 static inline bool dev_validate_header(const struct net_device *dev,
3499 				       char *ll_header, int len)
3500 {
3501 	if (likely(len >= dev->hard_header_len))
3502 		return true;
3503 	if (len < dev->min_header_len)
3504 		return false;
3505 
3506 	if (capable(CAP_SYS_RAWIO)) {
3507 		memset(ll_header + len, 0, dev->hard_header_len - len);
3508 		return true;
3509 	}
3510 
3511 	if (dev->header_ops && dev->header_ops->validate)
3512 		return dev->header_ops->validate(ll_header, len);
3513 
3514 	return false;
3515 }
3516 
dev_has_header(const struct net_device * dev)3517 static inline bool dev_has_header(const struct net_device *dev)
3518 {
3519 	return dev->header_ops && dev->header_ops->create;
3520 }
3521 
3522 struct numa_drop_counters {
3523 	atomic_t	drops0 ____cacheline_aligned_in_smp;
3524 	atomic_t	drops1 ____cacheline_aligned_in_smp;
3525 };
3526 
numa_drop_read(const struct numa_drop_counters * ndc)3527 static inline int numa_drop_read(const struct numa_drop_counters *ndc)
3528 {
3529 	return atomic_read(&ndc->drops0) + atomic_read(&ndc->drops1);
3530 }
3531 
numa_drop_add(struct numa_drop_counters * ndc,int val)3532 static inline void numa_drop_add(struct numa_drop_counters *ndc, int val)
3533 {
3534 	int n = numa_node_id() % 2;
3535 
3536 	if (n)
3537 		atomic_add(val, &ndc->drops1);
3538 	else
3539 		atomic_add(val, &ndc->drops0);
3540 }
3541 
numa_drop_reset(struct numa_drop_counters * ndc)3542 static inline void numa_drop_reset(struct numa_drop_counters *ndc)
3543 {
3544 	atomic_set(&ndc->drops0, 0);
3545 	atomic_set(&ndc->drops1, 0);
3546 }
3547 
3548 /*
3549  * Incoming packets are placed on per-CPU queues
3550  */
3551 struct softnet_data {
3552 	struct list_head	poll_list;
3553 	struct sk_buff_head	process_queue;
3554 	local_lock_t		process_queue_bh_lock;
3555 
3556 	/* stats */
3557 	unsigned int		processed;
3558 	unsigned int		time_squeeze;
3559 #ifdef CONFIG_RPS
3560 	struct softnet_data	*rps_ipi_list;
3561 #endif
3562 
3563 	unsigned int		received_rps;
3564 	bool			in_net_rx_action;
3565 	bool			in_napi_threaded_poll;
3566 
3567 #ifdef CONFIG_NET_FLOW_LIMIT
3568 	struct sd_flow_limit __rcu *flow_limit;
3569 #endif
3570 	struct Qdisc		*output_queue;
3571 	struct Qdisc		**output_queue_tailp;
3572 	struct sk_buff		*completion_queue;
3573 #ifdef CONFIG_XFRM_OFFLOAD
3574 	struct sk_buff_head	xfrm_backlog;
3575 #endif
3576 	/* written and read only by owning cpu: */
3577 	struct netdev_xmit xmit;
3578 #ifdef CONFIG_RPS
3579 	/* input_queue_head should be written by cpu owning this struct,
3580 	 * and only read by other cpus. Worth using a cache line.
3581 	 */
3582 	unsigned int		input_queue_head ____cacheline_aligned_in_smp;
3583 
3584 	/* Elements below can be accessed between CPUs for RPS/RFS */
3585 	call_single_data_t	csd ____cacheline_aligned_in_smp;
3586 	struct softnet_data	*rps_ipi_next;
3587 	unsigned int		cpu;
3588 
3589 	/* We force a cacheline alignment from here, to hold together
3590 	 * input_queue_tail, input_pkt_queue and backlog.state.
3591 	 * We add holes so that backlog.state is the last field
3592 	 * of this cache line.
3593 	 */
3594 	long			pad[3] ____cacheline_aligned_in_smp;
3595 	unsigned int		input_queue_tail;
3596 #endif
3597 	struct sk_buff_head	input_pkt_queue;
3598 
3599 	struct napi_struct	backlog;
3600 
3601 	struct numa_drop_counters drop_counters;
3602 
3603 	int			defer_ipi_scheduled ____cacheline_aligned_in_smp;
3604 	call_single_data_t	defer_csd;
3605 };
3606 
3607 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
3608 
3609 struct page_pool_bh {
3610 	struct page_pool *pool;
3611 	local_lock_t bh_lock;
3612 };
3613 DECLARE_PER_CPU(struct page_pool_bh, system_page_pool);
3614 
3615 #define XMIT_RECURSION_LIMIT	8
3616 
3617 #ifndef CONFIG_PREEMPT_RT
dev_recursion_level(void)3618 static inline int dev_recursion_level(void)
3619 {
3620 	return this_cpu_read(softnet_data.xmit.recursion);
3621 }
3622 
dev_xmit_recursion(void)3623 static inline bool dev_xmit_recursion(void)
3624 {
3625 	return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
3626 			XMIT_RECURSION_LIMIT);
3627 }
3628 
dev_xmit_recursion_inc(void)3629 static inline void dev_xmit_recursion_inc(void)
3630 {
3631 	__this_cpu_inc(softnet_data.xmit.recursion);
3632 }
3633 
dev_xmit_recursion_dec(void)3634 static inline void dev_xmit_recursion_dec(void)
3635 {
3636 	__this_cpu_dec(softnet_data.xmit.recursion);
3637 }
3638 #else
dev_recursion_level(void)3639 static inline int dev_recursion_level(void)
3640 {
3641 	return current->net_xmit.recursion;
3642 }
3643 
dev_xmit_recursion(void)3644 static inline bool dev_xmit_recursion(void)
3645 {
3646 	return unlikely(current->net_xmit.recursion > XMIT_RECURSION_LIMIT);
3647 }
3648 
dev_xmit_recursion_inc(void)3649 static inline void dev_xmit_recursion_inc(void)
3650 {
3651 	current->net_xmit.recursion++;
3652 }
3653 
dev_xmit_recursion_dec(void)3654 static inline void dev_xmit_recursion_dec(void)
3655 {
3656 	current->net_xmit.recursion--;
3657 }
3658 #endif
3659 
3660 void __netif_schedule(struct Qdisc *q);
3661 void netif_schedule_queue(struct netdev_queue *txq);
3662 
netif_tx_schedule_all(struct net_device * dev)3663 static inline void netif_tx_schedule_all(struct net_device *dev)
3664 {
3665 	unsigned int i;
3666 
3667 	for (i = 0; i < dev->num_tx_queues; i++)
3668 		netif_schedule_queue(netdev_get_tx_queue(dev, i));
3669 }
3670 
netif_tx_start_queue(struct netdev_queue * dev_queue)3671 static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
3672 {
3673 	clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3674 }
3675 
3676 /**
3677  *	netif_start_queue - allow transmit
3678  *	@dev: network device
3679  *
3680  *	Allow upper layers to call the device hard_start_xmit routine.
3681  */
netif_start_queue(struct net_device * dev)3682 static inline void netif_start_queue(struct net_device *dev)
3683 {
3684 	netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
3685 }
3686 
netif_tx_start_all_queues(struct net_device * dev)3687 static inline void netif_tx_start_all_queues(struct net_device *dev)
3688 {
3689 	unsigned int i;
3690 
3691 	for (i = 0; i < dev->num_tx_queues; i++) {
3692 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3693 		netif_tx_start_queue(txq);
3694 	}
3695 }
3696 
3697 void netif_tx_wake_queue(struct netdev_queue *dev_queue);
3698 
3699 /**
3700  *	netif_wake_queue - restart transmit
3701  *	@dev: network device
3702  *
3703  *	Allow upper layers to call the device hard_start_xmit routine.
3704  *	Used for flow control when transmit resources are available.
3705  */
netif_wake_queue(struct net_device * dev)3706 static inline void netif_wake_queue(struct net_device *dev)
3707 {
3708 	netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
3709 }
3710 
netif_tx_wake_all_queues(struct net_device * dev)3711 static inline void netif_tx_wake_all_queues(struct net_device *dev)
3712 {
3713 	unsigned int i;
3714 
3715 	for (i = 0; i < dev->num_tx_queues; i++) {
3716 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3717 		netif_tx_wake_queue(txq);
3718 	}
3719 }
3720 
netif_tx_stop_queue(struct netdev_queue * dev_queue)3721 static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
3722 {
3723 	/* Paired with READ_ONCE() from dev_watchdog() */
3724 	WRITE_ONCE(dev_queue->trans_start, jiffies);
3725 
3726 	/* This barrier is paired with smp_mb() from dev_watchdog() */
3727 	smp_mb__before_atomic();
3728 
3729 	/* Must be an atomic op see netif_txq_try_stop() */
3730 	set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3731 }
3732 
3733 /**
3734  *	netif_stop_queue - stop transmitted packets
3735  *	@dev: network device
3736  *
3737  *	Stop upper layers calling the device hard_start_xmit routine.
3738  *	Used for flow control when transmit resources are unavailable.
3739  */
netif_stop_queue(struct net_device * dev)3740 static inline void netif_stop_queue(struct net_device *dev)
3741 {
3742 	netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
3743 }
3744 
3745 void netif_tx_stop_all_queues(struct net_device *dev);
3746 
netif_tx_queue_stopped(const struct netdev_queue * dev_queue)3747 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
3748 {
3749 	return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3750 }
3751 
3752 /**
3753  *	netif_queue_stopped - test if transmit queue is flowblocked
3754  *	@dev: network device
3755  *
3756  *	Test if transmit queue on device is currently unable to send.
3757  */
netif_queue_stopped(const struct net_device * dev)3758 static inline bool netif_queue_stopped(const struct net_device *dev)
3759 {
3760 	return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
3761 }
3762 
netif_xmit_stopped(const struct netdev_queue * dev_queue)3763 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
3764 {
3765 	return dev_queue->state & QUEUE_STATE_ANY_XOFF;
3766 }
3767 
3768 static inline bool
netif_xmit_frozen_or_stopped(const struct netdev_queue * dev_queue)3769 netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
3770 {
3771 	return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
3772 }
3773 
3774 static inline bool
netif_xmit_frozen_or_drv_stopped(const struct netdev_queue * dev_queue)3775 netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
3776 {
3777 	return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
3778 }
3779 
3780 /**
3781  *	netdev_queue_set_dql_min_limit - set dql minimum limit
3782  *	@dev_queue: pointer to transmit queue
3783  *	@min_limit: dql minimum limit
3784  *
3785  * Forces xmit_more() to return true until the minimum threshold
3786  * defined by @min_limit is reached (or until the tx queue is
3787  * empty). Warning: to be use with care, misuse will impact the
3788  * latency.
3789  */
netdev_queue_set_dql_min_limit(struct netdev_queue * dev_queue,unsigned int min_limit)3790 static inline void netdev_queue_set_dql_min_limit(struct netdev_queue *dev_queue,
3791 						  unsigned int min_limit)
3792 {
3793 #ifdef CONFIG_BQL
3794 	dev_queue->dql.min_limit = min_limit;
3795 #endif
3796 }
3797 
netdev_queue_dql_avail(const struct netdev_queue * txq)3798 static inline int netdev_queue_dql_avail(const struct netdev_queue *txq)
3799 {
3800 #ifdef CONFIG_BQL
3801 	/* Non-BQL migrated drivers will return 0, too. */
3802 	return dql_avail(&txq->dql);
3803 #else
3804 	return 0;
3805 #endif
3806 }
3807 
3808 /**
3809  *	netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
3810  *	@dev_queue: pointer to transmit queue
3811  *
3812  * BQL enabled drivers might use this helper in their ndo_start_xmit(),
3813  * to give appropriate hint to the CPU.
3814  */
netdev_txq_bql_enqueue_prefetchw(struct netdev_queue * dev_queue)3815 static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
3816 {
3817 #ifdef CONFIG_BQL
3818 	prefetchw(&dev_queue->dql.num_queued);
3819 #endif
3820 }
3821 
3822 /**
3823  *	netdev_txq_bql_complete_prefetchw - prefetch bql data for write
3824  *	@dev_queue: pointer to transmit queue
3825  *
3826  * BQL enabled drivers might use this helper in their TX completion path,
3827  * to give appropriate hint to the CPU.
3828  */
netdev_txq_bql_complete_prefetchw(struct netdev_queue * dev_queue)3829 static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
3830 {
3831 #ifdef CONFIG_BQL
3832 	prefetchw(&dev_queue->dql.limit);
3833 #endif
3834 }
3835 
3836 /**
3837  *	netdev_tx_sent_queue - report the number of bytes queued to a given tx queue
3838  *	@dev_queue: network device queue
3839  *	@bytes: number of bytes queued to the device queue
3840  *
3841  *	Report the number of bytes queued for sending/completion to the network
3842  *	device hardware queue. @bytes should be a good approximation and should
3843  *	exactly match netdev_completed_queue() @bytes.
3844  *	This is typically called once per packet, from ndo_start_xmit().
3845  */
netdev_tx_sent_queue(struct netdev_queue * dev_queue,unsigned int bytes)3846 static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3847 					unsigned int bytes)
3848 {
3849 #ifdef CONFIG_BQL
3850 	dql_queued(&dev_queue->dql, bytes);
3851 
3852 	if (likely(dql_avail(&dev_queue->dql) >= 0))
3853 		return;
3854 
3855 	/* Paired with READ_ONCE() from dev_watchdog() */
3856 	WRITE_ONCE(dev_queue->trans_start, jiffies);
3857 
3858 	/* This barrier is paired with smp_mb() from dev_watchdog() */
3859 	smp_mb__before_atomic();
3860 
3861 	set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
3862 
3863 	/*
3864 	 * The XOFF flag must be set before checking the dql_avail below,
3865 	 * because in netdev_tx_completed_queue we update the dql_completed
3866 	 * before checking the XOFF flag.
3867 	 */
3868 	smp_mb__after_atomic();
3869 
3870 	/* check again in case another CPU has just made room avail */
3871 	if (unlikely(dql_avail(&dev_queue->dql) >= 0))
3872 		clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
3873 #endif
3874 }
3875 
3876 /* Variant of netdev_tx_sent_queue() for drivers that are aware
3877  * that they should not test BQL status themselves.
3878  * We do want to change __QUEUE_STATE_STACK_XOFF only for the last
3879  * skb of a batch.
3880  * Returns true if the doorbell must be used to kick the NIC.
3881  */
__netdev_tx_sent_queue(struct netdev_queue * dev_queue,unsigned int bytes,bool xmit_more)3882 static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3883 					  unsigned int bytes,
3884 					  bool xmit_more)
3885 {
3886 	if (xmit_more) {
3887 #ifdef CONFIG_BQL
3888 		dql_queued(&dev_queue->dql, bytes);
3889 #endif
3890 		return netif_tx_queue_stopped(dev_queue);
3891 	}
3892 	netdev_tx_sent_queue(dev_queue, bytes);
3893 	return true;
3894 }
3895 
3896 /**
3897  *	netdev_sent_queue - report the number of bytes queued to hardware
3898  *	@dev: network device
3899  *	@bytes: number of bytes queued to the hardware device queue
3900  *
3901  *	Report the number of bytes queued for sending/completion to the network
3902  *	device hardware queue#0. @bytes should be a good approximation and should
3903  *	exactly match netdev_completed_queue() @bytes.
3904  *	This is typically called once per packet, from ndo_start_xmit().
3905  */
netdev_sent_queue(struct net_device * dev,unsigned int bytes)3906 static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
3907 {
3908 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
3909 }
3910 
__netdev_sent_queue(struct net_device * dev,unsigned int bytes,bool xmit_more)3911 static inline bool __netdev_sent_queue(struct net_device *dev,
3912 				       unsigned int bytes,
3913 				       bool xmit_more)
3914 {
3915 	return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes,
3916 				      xmit_more);
3917 }
3918 
3919 /**
3920  *	netdev_tx_completed_queue - report number of packets/bytes at TX completion.
3921  *	@dev_queue: network device queue
3922  *	@pkts: number of packets (currently ignored)
3923  *	@bytes: number of bytes dequeued from the device queue
3924  *
3925  *	Must be called at most once per TX completion round (and not per
3926  *	individual packet), so that BQL can adjust its limits appropriately.
3927  */
netdev_tx_completed_queue(struct netdev_queue * dev_queue,unsigned int pkts,unsigned int bytes)3928 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
3929 					     unsigned int pkts, unsigned int bytes)
3930 {
3931 #ifdef CONFIG_BQL
3932 	if (unlikely(!bytes))
3933 		return;
3934 
3935 	dql_completed(&dev_queue->dql, bytes);
3936 
3937 	/*
3938 	 * Without the memory barrier there is a small possibility that
3939 	 * netdev_tx_sent_queue will miss the update and cause the queue to
3940 	 * be stopped forever
3941 	 */
3942 	smp_mb(); /* NOTE: netdev_txq_completed_mb() assumes this exists */
3943 
3944 	if (unlikely(dql_avail(&dev_queue->dql) < 0))
3945 		return;
3946 
3947 	if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
3948 		netif_schedule_queue(dev_queue);
3949 #endif
3950 }
3951 
3952 /**
3953  * 	netdev_completed_queue - report bytes and packets completed by device
3954  * 	@dev: network device
3955  * 	@pkts: actual number of packets sent over the medium
3956  * 	@bytes: actual number of bytes sent over the medium
3957  *
3958  * 	Report the number of bytes and packets transmitted by the network device
3959  * 	hardware queue over the physical medium, @bytes must exactly match the
3960  * 	@bytes amount passed to netdev_sent_queue()
3961  */
netdev_completed_queue(struct net_device * dev,unsigned int pkts,unsigned int bytes)3962 static inline void netdev_completed_queue(struct net_device *dev,
3963 					  unsigned int pkts, unsigned int bytes)
3964 {
3965 	netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
3966 }
3967 
netdev_tx_reset_queue(struct netdev_queue * q)3968 static inline void netdev_tx_reset_queue(struct netdev_queue *q)
3969 {
3970 #ifdef CONFIG_BQL
3971 	clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
3972 	dql_reset(&q->dql);
3973 #endif
3974 }
3975 
3976 /**
3977  * netdev_tx_reset_subqueue - reset the BQL stats and state of a netdev queue
3978  * @dev: network device
3979  * @qid: stack index of the queue to reset
3980  */
netdev_tx_reset_subqueue(const struct net_device * dev,u32 qid)3981 static inline void netdev_tx_reset_subqueue(const struct net_device *dev,
3982 					    u32 qid)
3983 {
3984 	netdev_tx_reset_queue(netdev_get_tx_queue(dev, qid));
3985 }
3986 
3987 /**
3988  * 	netdev_reset_queue - reset the packets and bytes count of a network device
3989  * 	@dev_queue: network device
3990  *
3991  * 	Reset the bytes and packet count of a network device and clear the
3992  * 	software flow control OFF bit for this network device
3993  */
netdev_reset_queue(struct net_device * dev_queue)3994 static inline void netdev_reset_queue(struct net_device *dev_queue)
3995 {
3996 	netdev_tx_reset_subqueue(dev_queue, 0);
3997 }
3998 
3999 /**
4000  * 	netdev_cap_txqueue - check if selected tx queue exceeds device queues
4001  * 	@dev: network device
4002  * 	@queue_index: given tx queue index
4003  *
4004  * 	Returns 0 if given tx queue index >= number of device tx queues,
4005  * 	otherwise returns the originally passed tx queue index.
4006  */
netdev_cap_txqueue(struct net_device * dev,u16 queue_index)4007 static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
4008 {
4009 	if (unlikely(queue_index >= dev->real_num_tx_queues)) {
4010 		net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
4011 				     dev->name, queue_index,
4012 				     dev->real_num_tx_queues);
4013 		return 0;
4014 	}
4015 
4016 	return queue_index;
4017 }
4018 
4019 /**
4020  *	netif_running - test if up
4021  *	@dev: network device
4022  *
4023  *	Test if the device has been brought up.
4024  */
netif_running(const struct net_device * dev)4025 static inline bool netif_running(const struct net_device *dev)
4026 {
4027 	return test_bit(__LINK_STATE_START, &dev->state);
4028 }
4029 
4030 /*
4031  * Routines to manage the subqueues on a device.  We only need start,
4032  * stop, and a check if it's stopped.  All other device management is
4033  * done at the overall netdevice level.
4034  * Also test the device if we're multiqueue.
4035  */
4036 
4037 /**
4038  *	netif_start_subqueue - allow sending packets on subqueue
4039  *	@dev: network device
4040  *	@queue_index: sub queue index
4041  *
4042  * Start individual transmit queue of a device with multiple transmit queues.
4043  */
netif_start_subqueue(struct net_device * dev,u16 queue_index)4044 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
4045 {
4046 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
4047 
4048 	netif_tx_start_queue(txq);
4049 }
4050 
4051 /**
4052  *	netif_stop_subqueue - stop sending packets on subqueue
4053  *	@dev: network device
4054  *	@queue_index: sub queue index
4055  *
4056  * Stop individual transmit queue of a device with multiple transmit queues.
4057  */
netif_stop_subqueue(struct net_device * dev,u16 queue_index)4058 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
4059 {
4060 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
4061 	netif_tx_stop_queue(txq);
4062 }
4063 
4064 /**
4065  *	__netif_subqueue_stopped - test status of subqueue
4066  *	@dev: network device
4067  *	@queue_index: sub queue index
4068  *
4069  * Check individual transmit queue of a device with multiple transmit queues.
4070  */
__netif_subqueue_stopped(const struct net_device * dev,u16 queue_index)4071 static inline bool __netif_subqueue_stopped(const struct net_device *dev,
4072 					    u16 queue_index)
4073 {
4074 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
4075 
4076 	return netif_tx_queue_stopped(txq);
4077 }
4078 
4079 /**
4080  *	netif_subqueue_stopped - test status of subqueue
4081  *	@dev: network device
4082  *	@skb: sub queue buffer pointer
4083  *
4084  * Check individual transmit queue of a device with multiple transmit queues.
4085  */
netif_subqueue_stopped(const struct net_device * dev,struct sk_buff * skb)4086 static inline bool netif_subqueue_stopped(const struct net_device *dev,
4087 					  struct sk_buff *skb)
4088 {
4089 	return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
4090 }
4091 
4092 /**
4093  *	netif_wake_subqueue - allow sending packets on subqueue
4094  *	@dev: network device
4095  *	@queue_index: sub queue index
4096  *
4097  * Resume individual transmit queue of a device with multiple transmit queues.
4098  */
netif_wake_subqueue(struct net_device * dev,u16 queue_index)4099 static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
4100 {
4101 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
4102 
4103 	netif_tx_wake_queue(txq);
4104 }
4105 
4106 #ifdef CONFIG_XPS
4107 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
4108 			u16 index);
4109 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
4110 			  u16 index, enum xps_map_type type);
4111 
4112 /**
4113  *	netif_attr_test_mask - Test a CPU or Rx queue set in a mask
4114  *	@j: CPU/Rx queue index
4115  *	@mask: bitmask of all cpus/rx queues
4116  *	@nr_bits: number of bits in the bitmask
4117  *
4118  * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues.
4119  */
netif_attr_test_mask(unsigned long j,const unsigned long * mask,unsigned int nr_bits)4120 static inline bool netif_attr_test_mask(unsigned long j,
4121 					const unsigned long *mask,
4122 					unsigned int nr_bits)
4123 {
4124 	cpu_max_bits_warn(j, nr_bits);
4125 	return test_bit(j, mask);
4126 }
4127 
4128 /**
4129  *	netif_attr_test_online - Test for online CPU/Rx queue
4130  *	@j: CPU/Rx queue index
4131  *	@online_mask: bitmask for CPUs/Rx queues that are online
4132  *	@nr_bits: number of bits in the bitmask
4133  *
4134  * Returns: true if a CPU/Rx queue is online.
4135  */
netif_attr_test_online(unsigned long j,const unsigned long * online_mask,unsigned int nr_bits)4136 static inline bool netif_attr_test_online(unsigned long j,
4137 					  const unsigned long *online_mask,
4138 					  unsigned int nr_bits)
4139 {
4140 	cpu_max_bits_warn(j, nr_bits);
4141 
4142 	if (online_mask)
4143 		return test_bit(j, online_mask);
4144 
4145 	return (j < nr_bits);
4146 }
4147 
4148 /**
4149  *	netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask
4150  *	@n: CPU/Rx queue index
4151  *	@srcp: the cpumask/Rx queue mask pointer
4152  *	@nr_bits: number of bits in the bitmask
4153  *
4154  * Returns: next (after n) CPU/Rx queue index in the mask;
4155  * >= nr_bits if no further CPUs/Rx queues set.
4156  */
netif_attrmask_next(int n,const unsigned long * srcp,unsigned int nr_bits)4157 static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
4158 					       unsigned int nr_bits)
4159 {
4160 	/* -1 is a legal arg here. */
4161 	if (n != -1)
4162 		cpu_max_bits_warn(n, nr_bits);
4163 
4164 	if (srcp)
4165 		return find_next_bit(srcp, nr_bits, n + 1);
4166 
4167 	return n + 1;
4168 }
4169 
4170 /**
4171  *	netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p
4172  *	@n: CPU/Rx queue index
4173  *	@src1p: the first CPUs/Rx queues mask pointer
4174  *	@src2p: the second CPUs/Rx queues mask pointer
4175  *	@nr_bits: number of bits in the bitmask
4176  *
4177  * Returns: next (after n) CPU/Rx queue index set in both masks;
4178  * >= nr_bits if no further CPUs/Rx queues set in both.
4179  */
netif_attrmask_next_and(int n,const unsigned long * src1p,const unsigned long * src2p,unsigned int nr_bits)4180 static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
4181 					  const unsigned long *src2p,
4182 					  unsigned int nr_bits)
4183 {
4184 	/* -1 is a legal arg here. */
4185 	if (n != -1)
4186 		cpu_max_bits_warn(n, nr_bits);
4187 
4188 	if (src1p && src2p)
4189 		return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
4190 	else if (src1p)
4191 		return find_next_bit(src1p, nr_bits, n + 1);
4192 	else if (src2p)
4193 		return find_next_bit(src2p, nr_bits, n + 1);
4194 
4195 	return n + 1;
4196 }
4197 #else
netif_set_xps_queue(struct net_device * dev,const struct cpumask * mask,u16 index)4198 static inline int netif_set_xps_queue(struct net_device *dev,
4199 				      const struct cpumask *mask,
4200 				      u16 index)
4201 {
4202 	return 0;
4203 }
4204 
__netif_set_xps_queue(struct net_device * dev,const unsigned long * mask,u16 index,enum xps_map_type type)4205 static inline int __netif_set_xps_queue(struct net_device *dev,
4206 					const unsigned long *mask,
4207 					u16 index, enum xps_map_type type)
4208 {
4209 	return 0;
4210 }
4211 #endif
4212 
4213 /**
4214  *	netif_is_multiqueue - test if device has multiple transmit queues
4215  *	@dev: network device
4216  *
4217  * Check if device has multiple transmit queues
4218  */
netif_is_multiqueue(const struct net_device * dev)4219 static inline bool netif_is_multiqueue(const struct net_device *dev)
4220 {
4221 	return dev->num_tx_queues > 1;
4222 }
4223 
4224 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
4225 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
4226 int netif_set_real_num_queues(struct net_device *dev,
4227 			      unsigned int txq, unsigned int rxq);
4228 
4229 int netif_get_num_default_rss_queues(void);
4230 
4231 void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason);
4232 void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason);
4233 
4234 /*
4235  * It is not allowed to call kfree_skb() or consume_skb() from hardware
4236  * interrupt context or with hardware interrupts being disabled.
4237  * (in_hardirq() || irqs_disabled())
4238  *
4239  * We provide four helpers that can be used in following contexts :
4240  *
4241  * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
4242  *  replacing kfree_skb(skb)
4243  *
4244  * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
4245  *  Typically used in place of consume_skb(skb) in TX completion path
4246  *
4247  * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
4248  *  replacing kfree_skb(skb)
4249  *
4250  * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
4251  *  and consumed a packet. Used in place of consume_skb(skb)
4252  */
dev_kfree_skb_irq(struct sk_buff * skb)4253 static inline void dev_kfree_skb_irq(struct sk_buff *skb)
4254 {
4255 	dev_kfree_skb_irq_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED);
4256 }
4257 
dev_consume_skb_irq(struct sk_buff * skb)4258 static inline void dev_consume_skb_irq(struct sk_buff *skb)
4259 {
4260 	dev_kfree_skb_irq_reason(skb, SKB_CONSUMED);
4261 }
4262 
dev_kfree_skb_any(struct sk_buff * skb)4263 static inline void dev_kfree_skb_any(struct sk_buff *skb)
4264 {
4265 	dev_kfree_skb_any_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED);
4266 }
4267 
dev_consume_skb_any(struct sk_buff * skb)4268 static inline void dev_consume_skb_any(struct sk_buff *skb)
4269 {
4270 	dev_kfree_skb_any_reason(skb, SKB_CONSUMED);
4271 }
4272 
4273 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
4274 			     const struct bpf_prog *xdp_prog);
4275 void generic_xdp_tx(struct sk_buff *skb, const struct bpf_prog *xdp_prog);
4276 int do_xdp_generic(const struct bpf_prog *xdp_prog, struct sk_buff **pskb);
4277 int netif_rx(struct sk_buff *skb);
4278 int __netif_rx(struct sk_buff *skb);
4279 
4280 int netif_receive_skb(struct sk_buff *skb);
4281 int netif_receive_skb_core(struct sk_buff *skb);
4282 void netif_receive_skb_list_internal(struct list_head *head);
4283 void netif_receive_skb_list(struct list_head *head);
4284 gro_result_t gro_receive_skb(struct gro_node *gro, struct sk_buff *skb);
4285 
napi_gro_receive(struct napi_struct * napi,struct sk_buff * skb)4286 static inline gro_result_t napi_gro_receive(struct napi_struct *napi,
4287 					    struct sk_buff *skb)
4288 {
4289 	return gro_receive_skb(&napi->gro, skb);
4290 }
4291 
4292 struct sk_buff *napi_get_frags(struct napi_struct *napi);
4293 gro_result_t napi_gro_frags(struct napi_struct *napi);
4294 
napi_free_frags(struct napi_struct * napi)4295 static inline void napi_free_frags(struct napi_struct *napi)
4296 {
4297 	kfree_skb(napi->skb);
4298 	napi->skb = NULL;
4299 }
4300 
4301 bool netdev_is_rx_handler_busy(struct net_device *dev);
4302 int netdev_rx_handler_register(struct net_device *dev,
4303 			       rx_handler_func_t *rx_handler,
4304 			       void *rx_handler_data);
4305 void netdev_rx_handler_unregister(struct net_device *dev);
4306 
4307 bool dev_valid_name(const char *name);
is_socket_ioctl_cmd(unsigned int cmd)4308 static inline bool is_socket_ioctl_cmd(unsigned int cmd)
4309 {
4310 	return _IOC_TYPE(cmd) == SOCK_IOC_TYPE;
4311 }
4312 int get_user_ifreq(struct ifreq *ifr, void __user **ifrdata, void __user *arg);
4313 int put_user_ifreq(struct ifreq *ifr, void __user *arg);
4314 int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
4315 		void __user *data, bool *need_copyout);
4316 int dev_ifconf(struct net *net, struct ifconf __user *ifc);
4317 int dev_eth_ioctl(struct net_device *dev,
4318 		  struct ifreq *ifr, unsigned int cmd);
4319 int generic_hwtstamp_get_lower(struct net_device *dev,
4320 			       struct kernel_hwtstamp_config *kernel_cfg);
4321 int generic_hwtstamp_set_lower(struct net_device *dev,
4322 			       struct kernel_hwtstamp_config *kernel_cfg,
4323 			       struct netlink_ext_ack *extack);
4324 int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata);
4325 unsigned int netif_get_flags(const struct net_device *dev);
4326 int __dev_change_flags(struct net_device *dev, unsigned int flags,
4327 		       struct netlink_ext_ack *extack);
4328 int netif_change_flags(struct net_device *dev, unsigned int flags,
4329 		       struct netlink_ext_ack *extack);
4330 int dev_change_flags(struct net_device *dev, unsigned int flags,
4331 		     struct netlink_ext_ack *extack);
4332 int netif_set_alias(struct net_device *dev, const char *alias, size_t len);
4333 int dev_set_alias(struct net_device *, const char *, size_t);
4334 int dev_get_alias(const struct net_device *, char *, size_t);
4335 int __dev_change_net_namespace(struct net_device *dev, struct net *net,
4336 			       const char *pat, int new_ifindex,
4337 			       struct netlink_ext_ack *extack);
4338 int dev_change_net_namespace(struct net_device *dev, struct net *net,
4339 			     const char *pat);
4340 int __netif_set_mtu(struct net_device *dev, int new_mtu);
4341 int netif_set_mtu(struct net_device *dev, int new_mtu);
4342 int dev_set_mtu(struct net_device *, int);
4343 int netif_pre_changeaddr_notify(struct net_device *dev, const char *addr,
4344 				struct netlink_ext_ack *extack);
4345 int netif_set_mac_address(struct net_device *dev, struct sockaddr_storage *ss,
4346 			  struct netlink_ext_ack *extack);
4347 int dev_set_mac_address(struct net_device *dev, struct sockaddr_storage *ss,
4348 			struct netlink_ext_ack *extack);
4349 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr_storage *ss,
4350 			     struct netlink_ext_ack *extack);
4351 int netif_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
4352 int netif_get_port_parent_id(struct net_device *dev,
4353 			     struct netdev_phys_item_id *ppid, bool recurse);
4354 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
4355 
4356 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
4357 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
4358 				    struct netdev_queue *txq, int *ret);
4359 
4360 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
4361 u8 dev_xdp_prog_count(struct net_device *dev);
4362 int netif_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf);
4363 int dev_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf);
4364 u8 dev_xdp_sb_prog_count(struct net_device *dev);
4365 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode);
4366 
4367 u32 dev_get_min_mp_channel_count(const struct net_device *dev);
4368 
4369 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
4370 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
4371 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb);
4372 bool is_skb_forwardable(const struct net_device *dev,
4373 			const struct sk_buff *skb);
4374 
__is_skb_forwardable(const struct net_device * dev,const struct sk_buff * skb,const bool check_mtu)4375 static __always_inline bool __is_skb_forwardable(const struct net_device *dev,
4376 						 const struct sk_buff *skb,
4377 						 const bool check_mtu)
4378 {
4379 	const u32 vlan_hdr_len = 4; /* VLAN_HLEN */
4380 	unsigned int len;
4381 
4382 	if (!(dev->flags & IFF_UP))
4383 		return false;
4384 
4385 	if (!check_mtu)
4386 		return true;
4387 
4388 	len = dev->mtu + dev->hard_header_len + vlan_hdr_len;
4389 	if (skb->len <= len)
4390 		return true;
4391 
4392 	/* if TSO is enabled, we don't care about the length as the packet
4393 	 * could be forwarded without being segmented before
4394 	 */
4395 	if (skb_is_gso(skb))
4396 		return true;
4397 
4398 	return false;
4399 }
4400 
4401 void netdev_core_stats_inc(struct net_device *dev, u32 offset);
4402 
4403 #define DEV_CORE_STATS_INC(FIELD)						\
4404 static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev)		\
4405 {										\
4406 	netdev_core_stats_inc(dev,						\
4407 			offsetof(struct net_device_core_stats, FIELD));		\
4408 }
4409 DEV_CORE_STATS_INC(rx_dropped)
DEV_CORE_STATS_INC(tx_dropped)4410 DEV_CORE_STATS_INC(tx_dropped)
4411 DEV_CORE_STATS_INC(rx_nohandler)
4412 DEV_CORE_STATS_INC(rx_otherhost_dropped)
4413 #undef DEV_CORE_STATS_INC
4414 
4415 static __always_inline int ____dev_forward_skb(struct net_device *dev,
4416 					       struct sk_buff *skb,
4417 					       const bool check_mtu)
4418 {
4419 	if (skb_orphan_frags(skb, GFP_ATOMIC) ||
4420 	    unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) {
4421 		dev_core_stats_rx_dropped_inc(dev);
4422 		kfree_skb(skb);
4423 		return NET_RX_DROP;
4424 	}
4425 
4426 	skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev)));
4427 	skb->priority = 0;
4428 	return 0;
4429 }
4430 
4431 bool dev_nit_active_rcu(const struct net_device *dev);
dev_nit_active(const struct net_device * dev)4432 static inline bool dev_nit_active(const struct net_device *dev)
4433 {
4434 	bool ret;
4435 
4436 	rcu_read_lock();
4437 	ret = dev_nit_active_rcu(dev);
4438 	rcu_read_unlock();
4439 	return ret;
4440 }
4441 
4442 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
4443 
__dev_put(struct net_device * dev)4444 static inline void __dev_put(struct net_device *dev)
4445 {
4446 	if (dev) {
4447 #ifdef CONFIG_PCPU_DEV_REFCNT
4448 		this_cpu_dec(*dev->pcpu_refcnt);
4449 #else
4450 		refcount_dec(&dev->dev_refcnt);
4451 #endif
4452 	}
4453 }
4454 
__dev_hold(struct net_device * dev)4455 static inline void __dev_hold(struct net_device *dev)
4456 {
4457 	if (dev) {
4458 #ifdef CONFIG_PCPU_DEV_REFCNT
4459 		this_cpu_inc(*dev->pcpu_refcnt);
4460 #else
4461 		refcount_inc(&dev->dev_refcnt);
4462 #endif
4463 	}
4464 }
4465 
__netdev_tracker_alloc(struct net_device * dev,netdevice_tracker * tracker,gfp_t gfp)4466 static inline void __netdev_tracker_alloc(struct net_device *dev,
4467 					  netdevice_tracker *tracker,
4468 					  gfp_t gfp)
4469 {
4470 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER
4471 	ref_tracker_alloc(&dev->refcnt_tracker, tracker, gfp);
4472 #endif
4473 }
4474 
4475 /* netdev_tracker_alloc() can upgrade a prior untracked reference
4476  * taken by dev_get_by_name()/dev_get_by_index() to a tracked one.
4477  */
netdev_tracker_alloc(struct net_device * dev,netdevice_tracker * tracker,gfp_t gfp)4478 static inline void netdev_tracker_alloc(struct net_device *dev,
4479 					netdevice_tracker *tracker, gfp_t gfp)
4480 {
4481 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER
4482 	refcount_dec(&dev->refcnt_tracker.no_tracker);
4483 	__netdev_tracker_alloc(dev, tracker, gfp);
4484 #endif
4485 }
4486 
netdev_tracker_free(struct net_device * dev,netdevice_tracker * tracker)4487 static inline void netdev_tracker_free(struct net_device *dev,
4488 				       netdevice_tracker *tracker)
4489 {
4490 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER
4491 	ref_tracker_free(&dev->refcnt_tracker, tracker);
4492 #endif
4493 }
4494 
netdev_hold(struct net_device * dev,netdevice_tracker * tracker,gfp_t gfp)4495 static inline void netdev_hold(struct net_device *dev,
4496 			       netdevice_tracker *tracker, gfp_t gfp)
4497 {
4498 	if (dev) {
4499 		__dev_hold(dev);
4500 		__netdev_tracker_alloc(dev, tracker, gfp);
4501 	}
4502 }
4503 
netdev_put(struct net_device * dev,netdevice_tracker * tracker)4504 static inline void netdev_put(struct net_device *dev,
4505 			      netdevice_tracker *tracker)
4506 {
4507 	if (dev) {
4508 		netdev_tracker_free(dev, tracker);
4509 		__dev_put(dev);
4510 	}
4511 }
4512 
4513 /**
4514  *	dev_hold - get reference to device
4515  *	@dev: network device
4516  *
4517  * Hold reference to device to keep it from being freed.
4518  * Try using netdev_hold() instead.
4519  */
dev_hold(struct net_device * dev)4520 static inline void dev_hold(struct net_device *dev)
4521 {
4522 	netdev_hold(dev, NULL, GFP_ATOMIC);
4523 }
4524 
4525 /**
4526  *	dev_put - release reference to device
4527  *	@dev: network device
4528  *
4529  * Release reference to device to allow it to be freed.
4530  * Try using netdev_put() instead.
4531  */
dev_put(struct net_device * dev)4532 static inline void dev_put(struct net_device *dev)
4533 {
4534 	netdev_put(dev, NULL);
4535 }
4536 
DEFINE_FREE(dev_put,struct net_device *,if (_T)dev_put (_T))4537 DEFINE_FREE(dev_put, struct net_device *, if (_T) dev_put(_T))
4538 
4539 static inline void netdev_ref_replace(struct net_device *odev,
4540 				      struct net_device *ndev,
4541 				      netdevice_tracker *tracker,
4542 				      gfp_t gfp)
4543 {
4544 	if (odev)
4545 		netdev_tracker_free(odev, tracker);
4546 
4547 	__dev_hold(ndev);
4548 	__dev_put(odev);
4549 
4550 	if (ndev)
4551 		__netdev_tracker_alloc(ndev, tracker, gfp);
4552 }
4553 
4554 /* Carrier loss detection, dial on demand. The functions netif_carrier_on
4555  * and _off may be called from IRQ context, but it is caller
4556  * who is responsible for serialization of these calls.
4557  *
4558  * The name carrier is inappropriate, these functions should really be
4559  * called netif_lowerlayer_*() because they represent the state of any
4560  * kind of lower layer not just hardware media.
4561  */
4562 void linkwatch_fire_event(struct net_device *dev);
4563 
4564 /**
4565  * linkwatch_sync_dev - sync linkwatch for the given device
4566  * @dev: network device to sync linkwatch for
4567  *
4568  * Sync linkwatch for the given device, removing it from the
4569  * pending work list (if queued).
4570  */
4571 void linkwatch_sync_dev(struct net_device *dev);
4572 void __linkwatch_sync_dev(struct net_device *dev);
4573 
4574 /**
4575  *	netif_carrier_ok - test if carrier present
4576  *	@dev: network device
4577  *
4578  * Check if carrier is present on device
4579  */
netif_carrier_ok(const struct net_device * dev)4580 static inline bool netif_carrier_ok(const struct net_device *dev)
4581 {
4582 	return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
4583 }
4584 
4585 unsigned long dev_trans_start(struct net_device *dev);
4586 
4587 void netdev_watchdog_up(struct net_device *dev);
4588 
4589 void netif_carrier_on(struct net_device *dev);
4590 void netif_carrier_off(struct net_device *dev);
4591 void netif_carrier_event(struct net_device *dev);
4592 
4593 /**
4594  *	netif_dormant_on - mark device as dormant.
4595  *	@dev: network device
4596  *
4597  * Mark device as dormant (as per RFC2863).
4598  *
4599  * The dormant state indicates that the relevant interface is not
4600  * actually in a condition to pass packets (i.e., it is not 'up') but is
4601  * in a "pending" state, waiting for some external event.  For "on-
4602  * demand" interfaces, this new state identifies the situation where the
4603  * interface is waiting for events to place it in the up state.
4604  */
netif_dormant_on(struct net_device * dev)4605 static inline void netif_dormant_on(struct net_device *dev)
4606 {
4607 	if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
4608 		linkwatch_fire_event(dev);
4609 }
4610 
4611 /**
4612  *	netif_dormant_off - set device as not dormant.
4613  *	@dev: network device
4614  *
4615  * Device is not in dormant state.
4616  */
netif_dormant_off(struct net_device * dev)4617 static inline void netif_dormant_off(struct net_device *dev)
4618 {
4619 	if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
4620 		linkwatch_fire_event(dev);
4621 }
4622 
4623 /**
4624  *	netif_dormant - test if device is dormant
4625  *	@dev: network device
4626  *
4627  * Check if device is dormant.
4628  */
netif_dormant(const struct net_device * dev)4629 static inline bool netif_dormant(const struct net_device *dev)
4630 {
4631 	return test_bit(__LINK_STATE_DORMANT, &dev->state);
4632 }
4633 
4634 
4635 /**
4636  *	netif_testing_on - mark device as under test.
4637  *	@dev: network device
4638  *
4639  * Mark device as under test (as per RFC2863).
4640  *
4641  * The testing state indicates that some test(s) must be performed on
4642  * the interface. After completion, of the test, the interface state
4643  * will change to up, dormant, or down, as appropriate.
4644  */
netif_testing_on(struct net_device * dev)4645 static inline void netif_testing_on(struct net_device *dev)
4646 {
4647 	if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state))
4648 		linkwatch_fire_event(dev);
4649 }
4650 
4651 /**
4652  *	netif_testing_off - set device as not under test.
4653  *	@dev: network device
4654  *
4655  * Device is not in testing state.
4656  */
netif_testing_off(struct net_device * dev)4657 static inline void netif_testing_off(struct net_device *dev)
4658 {
4659 	if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state))
4660 		linkwatch_fire_event(dev);
4661 }
4662 
4663 /**
4664  *	netif_testing - test if device is under test
4665  *	@dev: network device
4666  *
4667  * Check if device is under test
4668  */
netif_testing(const struct net_device * dev)4669 static inline bool netif_testing(const struct net_device *dev)
4670 {
4671 	return test_bit(__LINK_STATE_TESTING, &dev->state);
4672 }
4673 
4674 
4675 /**
4676  *	netif_oper_up - test if device is operational
4677  *	@dev: network device
4678  *
4679  * Check if carrier is operational
4680  */
netif_oper_up(const struct net_device * dev)4681 static inline bool netif_oper_up(const struct net_device *dev)
4682 {
4683 	unsigned int operstate = READ_ONCE(dev->operstate);
4684 
4685 	return	operstate == IF_OPER_UP ||
4686 		operstate == IF_OPER_UNKNOWN /* backward compat */;
4687 }
4688 
4689 /**
4690  *	netif_device_present - is device available or removed
4691  *	@dev: network device
4692  *
4693  * Check if device has not been removed from system.
4694  */
netif_device_present(const struct net_device * dev)4695 static inline bool netif_device_present(const struct net_device *dev)
4696 {
4697 	return test_bit(__LINK_STATE_PRESENT, &dev->state);
4698 }
4699 
4700 void netif_device_detach(struct net_device *dev);
4701 
4702 void netif_device_attach(struct net_device *dev);
4703 
4704 /*
4705  * Network interface message level settings
4706  */
4707 
4708 enum {
4709 	NETIF_MSG_DRV_BIT,
4710 	NETIF_MSG_PROBE_BIT,
4711 	NETIF_MSG_LINK_BIT,
4712 	NETIF_MSG_TIMER_BIT,
4713 	NETIF_MSG_IFDOWN_BIT,
4714 	NETIF_MSG_IFUP_BIT,
4715 	NETIF_MSG_RX_ERR_BIT,
4716 	NETIF_MSG_TX_ERR_BIT,
4717 	NETIF_MSG_TX_QUEUED_BIT,
4718 	NETIF_MSG_INTR_BIT,
4719 	NETIF_MSG_TX_DONE_BIT,
4720 	NETIF_MSG_RX_STATUS_BIT,
4721 	NETIF_MSG_PKTDATA_BIT,
4722 	NETIF_MSG_HW_BIT,
4723 	NETIF_MSG_WOL_BIT,
4724 
4725 	/* When you add a new bit above, update netif_msg_class_names array
4726 	 * in net/ethtool/common.c
4727 	 */
4728 	NETIF_MSG_CLASS_COUNT,
4729 };
4730 /* Both ethtool_ops interface and internal driver implementation use u32 */
4731 static_assert(NETIF_MSG_CLASS_COUNT <= 32);
4732 
4733 #define __NETIF_MSG_BIT(bit)	((u32)1 << (bit))
4734 #define __NETIF_MSG(name)	__NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT)
4735 
4736 #define NETIF_MSG_DRV		__NETIF_MSG(DRV)
4737 #define NETIF_MSG_PROBE		__NETIF_MSG(PROBE)
4738 #define NETIF_MSG_LINK		__NETIF_MSG(LINK)
4739 #define NETIF_MSG_TIMER		__NETIF_MSG(TIMER)
4740 #define NETIF_MSG_IFDOWN	__NETIF_MSG(IFDOWN)
4741 #define NETIF_MSG_IFUP		__NETIF_MSG(IFUP)
4742 #define NETIF_MSG_RX_ERR	__NETIF_MSG(RX_ERR)
4743 #define NETIF_MSG_TX_ERR	__NETIF_MSG(TX_ERR)
4744 #define NETIF_MSG_TX_QUEUED	__NETIF_MSG(TX_QUEUED)
4745 #define NETIF_MSG_INTR		__NETIF_MSG(INTR)
4746 #define NETIF_MSG_TX_DONE	__NETIF_MSG(TX_DONE)
4747 #define NETIF_MSG_RX_STATUS	__NETIF_MSG(RX_STATUS)
4748 #define NETIF_MSG_PKTDATA	__NETIF_MSG(PKTDATA)
4749 #define NETIF_MSG_HW		__NETIF_MSG(HW)
4750 #define NETIF_MSG_WOL		__NETIF_MSG(WOL)
4751 
4752 #define netif_msg_drv(p)	((p)->msg_enable & NETIF_MSG_DRV)
4753 #define netif_msg_probe(p)	((p)->msg_enable & NETIF_MSG_PROBE)
4754 #define netif_msg_link(p)	((p)->msg_enable & NETIF_MSG_LINK)
4755 #define netif_msg_timer(p)	((p)->msg_enable & NETIF_MSG_TIMER)
4756 #define netif_msg_ifdown(p)	((p)->msg_enable & NETIF_MSG_IFDOWN)
4757 #define netif_msg_ifup(p)	((p)->msg_enable & NETIF_MSG_IFUP)
4758 #define netif_msg_rx_err(p)	((p)->msg_enable & NETIF_MSG_RX_ERR)
4759 #define netif_msg_tx_err(p)	((p)->msg_enable & NETIF_MSG_TX_ERR)
4760 #define netif_msg_tx_queued(p)	((p)->msg_enable & NETIF_MSG_TX_QUEUED)
4761 #define netif_msg_intr(p)	((p)->msg_enable & NETIF_MSG_INTR)
4762 #define netif_msg_tx_done(p)	((p)->msg_enable & NETIF_MSG_TX_DONE)
4763 #define netif_msg_rx_status(p)	((p)->msg_enable & NETIF_MSG_RX_STATUS)
4764 #define netif_msg_pktdata(p)	((p)->msg_enable & NETIF_MSG_PKTDATA)
4765 #define netif_msg_hw(p)		((p)->msg_enable & NETIF_MSG_HW)
4766 #define netif_msg_wol(p)	((p)->msg_enable & NETIF_MSG_WOL)
4767 
netif_msg_init(int debug_value,int default_msg_enable_bits)4768 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
4769 {
4770 	/* use default */
4771 	if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
4772 		return default_msg_enable_bits;
4773 	if (debug_value == 0)	/* no output */
4774 		return 0;
4775 	/* set low N bits */
4776 	return (1U << debug_value) - 1;
4777 }
4778 
__netif_tx_lock(struct netdev_queue * txq,int cpu)4779 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
4780 {
4781 	spin_lock(&txq->_xmit_lock);
4782 	/* Pairs with READ_ONCE() in netif_tx_owned() */
4783 	WRITE_ONCE(txq->xmit_lock_owner, cpu);
4784 }
4785 
__netif_tx_acquire(struct netdev_queue * txq)4786 static inline bool __netif_tx_acquire(struct netdev_queue *txq)
4787 {
4788 	__acquire(&txq->_xmit_lock);
4789 	return true;
4790 }
4791 
__netif_tx_release(struct netdev_queue * txq)4792 static inline void __netif_tx_release(struct netdev_queue *txq)
4793 {
4794 	__release(&txq->_xmit_lock);
4795 }
4796 
__netif_tx_lock_bh(struct netdev_queue * txq)4797 static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
4798 {
4799 	spin_lock_bh(&txq->_xmit_lock);
4800 	/* Pairs with READ_ONCE() in netif_tx_owned() */
4801 	WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
4802 }
4803 
__netif_tx_trylock(struct netdev_queue * txq)4804 static inline bool __netif_tx_trylock(struct netdev_queue *txq)
4805 {
4806 	bool ok = spin_trylock(&txq->_xmit_lock);
4807 
4808 	if (likely(ok)) {
4809 		/* Pairs with READ_ONCE() in netif_tx_owned() */
4810 		WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
4811 	}
4812 	return ok;
4813 }
4814 
__netif_tx_unlock(struct netdev_queue * txq)4815 static inline void __netif_tx_unlock(struct netdev_queue *txq)
4816 {
4817 	/* Pairs with READ_ONCE() in netif_tx_owned() */
4818 	WRITE_ONCE(txq->xmit_lock_owner, -1);
4819 	spin_unlock(&txq->_xmit_lock);
4820 }
4821 
__netif_tx_unlock_bh(struct netdev_queue * txq)4822 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
4823 {
4824 	/* Pairs with READ_ONCE() in netif_tx_owned() */
4825 	WRITE_ONCE(txq->xmit_lock_owner, -1);
4826 	spin_unlock_bh(&txq->_xmit_lock);
4827 }
4828 
4829 /*
4830  * txq->trans_start can be read locklessly from dev_watchdog()
4831  */
txq_trans_update(const struct net_device * dev,struct netdev_queue * txq)4832 static inline void txq_trans_update(const struct net_device *dev,
4833 				    struct netdev_queue *txq)
4834 {
4835 	if (!dev->lltx)
4836 		WRITE_ONCE(txq->trans_start, jiffies);
4837 }
4838 
txq_trans_cond_update(struct netdev_queue * txq)4839 static inline void txq_trans_cond_update(struct netdev_queue *txq)
4840 {
4841 	unsigned long now = jiffies;
4842 
4843 	if (READ_ONCE(txq->trans_start) != now)
4844 		WRITE_ONCE(txq->trans_start, now);
4845 }
4846 
4847 /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
netif_trans_update(struct net_device * dev)4848 static inline void netif_trans_update(struct net_device *dev)
4849 {
4850 	struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
4851 
4852 	txq_trans_cond_update(txq);
4853 }
4854 
4855 /**
4856  *	netif_tx_lock - grab network device transmit lock
4857  *	@dev: network device
4858  *
4859  * Get network device transmit lock
4860  */
4861 void netif_tx_lock(struct net_device *dev);
4862 
netif_tx_lock_bh(struct net_device * dev)4863 static inline void netif_tx_lock_bh(struct net_device *dev)
4864 {
4865 	local_bh_disable();
4866 	netif_tx_lock(dev);
4867 }
4868 
4869 void netif_tx_unlock(struct net_device *dev);
4870 
netif_tx_unlock_bh(struct net_device * dev)4871 static inline void netif_tx_unlock_bh(struct net_device *dev)
4872 {
4873 	netif_tx_unlock(dev);
4874 	local_bh_enable();
4875 }
4876 
4877 #define HARD_TX_LOCK(dev, txq, cpu) {			\
4878 	if (!(dev)->lltx) {				\
4879 		__netif_tx_lock(txq, cpu);		\
4880 	} else {					\
4881 		__netif_tx_acquire(txq);		\
4882 	}						\
4883 }
4884 
4885 #define HARD_TX_TRYLOCK(dev, txq)			\
4886 	(!(dev)->lltx ?					\
4887 		__netif_tx_trylock(txq) :		\
4888 		__netif_tx_acquire(txq))
4889 
4890 #define HARD_TX_UNLOCK(dev, txq) {			\
4891 	if (!(dev)->lltx) {				\
4892 		__netif_tx_unlock(txq);			\
4893 	} else {					\
4894 		__netif_tx_release(txq);		\
4895 	}						\
4896 }
4897 
netif_tx_disable(struct net_device * dev)4898 static inline void netif_tx_disable(struct net_device *dev)
4899 {
4900 	unsigned int i;
4901 	int cpu;
4902 
4903 	local_bh_disable();
4904 	cpu = smp_processor_id();
4905 	spin_lock(&dev->tx_global_lock);
4906 	for (i = 0; i < dev->num_tx_queues; i++) {
4907 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
4908 
4909 		__netif_tx_lock(txq, cpu);
4910 		netif_tx_stop_queue(txq);
4911 		__netif_tx_unlock(txq);
4912 	}
4913 	spin_unlock(&dev->tx_global_lock);
4914 	local_bh_enable();
4915 }
4916 
4917 #ifndef CONFIG_PREEMPT_RT
netif_tx_owned(struct netdev_queue * txq,unsigned int cpu)4918 static inline bool netif_tx_owned(struct netdev_queue *txq, unsigned int cpu)
4919 {
4920 	/* Other cpus might concurrently change txq->xmit_lock_owner
4921 	 * to -1 or to their cpu id, but not to our id.
4922 	 */
4923 	return READ_ONCE(txq->xmit_lock_owner) == cpu;
4924 }
4925 
4926 #else
netif_tx_owned(struct netdev_queue * txq,unsigned int cpu)4927 static inline bool netif_tx_owned(struct netdev_queue *txq, unsigned int cpu)
4928 {
4929 	return rt_mutex_owner(&txq->_xmit_lock.lock) == current;
4930 }
4931 
4932 #endif
4933 
netif_addr_lock(struct net_device * dev)4934 static inline void netif_addr_lock(struct net_device *dev)
4935 {
4936 	unsigned char nest_level = 0;
4937 
4938 #ifdef CONFIG_LOCKDEP
4939 	nest_level = dev->nested_level;
4940 #endif
4941 	spin_lock_nested(&dev->addr_list_lock, nest_level);
4942 }
4943 
netif_addr_lock_bh(struct net_device * dev)4944 static inline void netif_addr_lock_bh(struct net_device *dev)
4945 {
4946 	unsigned char nest_level = 0;
4947 
4948 #ifdef CONFIG_LOCKDEP
4949 	nest_level = dev->nested_level;
4950 #endif
4951 	local_bh_disable();
4952 	spin_lock_nested(&dev->addr_list_lock, nest_level);
4953 }
4954 
netif_addr_unlock(struct net_device * dev)4955 static inline void netif_addr_unlock(struct net_device *dev)
4956 {
4957 	spin_unlock(&dev->addr_list_lock);
4958 }
4959 
netif_addr_unlock_bh(struct net_device * dev)4960 static inline void netif_addr_unlock_bh(struct net_device *dev)
4961 {
4962 	spin_unlock_bh(&dev->addr_list_lock);
4963 }
4964 
4965 /*
4966  * dev_addrs walker. Should be used only for read access. Call with
4967  * rcu_read_lock held.
4968  */
4969 #define for_each_dev_addr(dev, ha) \
4970 		list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
4971 
4972 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
4973 
4974 void ether_setup(struct net_device *dev);
4975 
4976 /* Allocate dummy net_device */
4977 struct net_device *alloc_netdev_dummy(int sizeof_priv);
4978 
4979 /* Support for loadable net-drivers */
4980 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
4981 				    unsigned char name_assign_type,
4982 				    void (*setup)(struct net_device *),
4983 				    unsigned int txqs, unsigned int rxqs);
4984 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
4985 	alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
4986 
4987 #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
4988 	alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
4989 			 count)
4990 
4991 int register_netdev(struct net_device *dev);
4992 void unregister_netdev(struct net_device *dev);
4993 
4994 int devm_register_netdev(struct device *dev, struct net_device *ndev);
4995 
4996 /* General hardware address lists handling functions */
4997 int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
4998 		   struct netdev_hw_addr_list *from_list, int addr_len);
4999 int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list,
5000 			    struct netdev_hw_addr_list *from_list,
5001 			    int addr_len);
5002 void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
5003 		      struct netdev_hw_addr_list *from_list, int addr_len);
5004 int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
5005 		       struct net_device *dev,
5006 		       int (*sync)(struct net_device *, const unsigned char *),
5007 		       int (*unsync)(struct net_device *,
5008 				     const unsigned char *));
5009 int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list,
5010 			   struct net_device *dev,
5011 			   int (*sync)(struct net_device *,
5012 				       const unsigned char *, int),
5013 			   int (*unsync)(struct net_device *,
5014 					 const unsigned char *, int));
5015 void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list,
5016 			      struct net_device *dev,
5017 			      int (*unsync)(struct net_device *,
5018 					    const unsigned char *, int));
5019 void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
5020 			  struct net_device *dev,
5021 			  int (*unsync)(struct net_device *,
5022 					const unsigned char *));
5023 void __hw_addr_init(struct netdev_hw_addr_list *list);
5024 void __hw_addr_flush(struct netdev_hw_addr_list *list);
5025 int __hw_addr_list_snapshot(struct netdev_hw_addr_list *snap,
5026 			    const struct netdev_hw_addr_list *list,
5027 			    int addr_len, struct netdev_hw_addr_list *cache);
5028 void __hw_addr_list_reconcile(struct netdev_hw_addr_list *real_list,
5029 			      struct netdev_hw_addr_list *work,
5030 			      struct netdev_hw_addr_list *ref, int addr_len,
5031 			      struct netdev_hw_addr_list *cache);
5032 
5033 /* Functions used for device addresses handling */
5034 void dev_addr_mod(struct net_device *dev, unsigned int offset,
5035 		  const void *addr, size_t len);
5036 
5037 static inline void
__dev_addr_set(struct net_device * dev,const void * addr,size_t len)5038 __dev_addr_set(struct net_device *dev, const void *addr, size_t len)
5039 {
5040 	dev_addr_mod(dev, 0, addr, len);
5041 }
5042 
dev_addr_set(struct net_device * dev,const u8 * addr)5043 static inline void dev_addr_set(struct net_device *dev, const u8 *addr)
5044 {
5045 	__dev_addr_set(dev, addr, dev->addr_len);
5046 }
5047 
5048 int dev_addr_add(struct net_device *dev, const unsigned char *addr,
5049 		 unsigned char addr_type);
5050 int dev_addr_del(struct net_device *dev, const unsigned char *addr,
5051 		 unsigned char addr_type);
5052 
5053 /* Functions used for unicast addresses handling */
5054 int dev_uc_add(struct net_device *dev, const unsigned char *addr);
5055 int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
5056 int dev_uc_del(struct net_device *dev, const unsigned char *addr);
5057 int dev_uc_sync(struct net_device *to, struct net_device *from);
5058 int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
5059 void dev_uc_unsync(struct net_device *to, struct net_device *from);
5060 void dev_uc_flush(struct net_device *dev);
5061 void dev_uc_init(struct net_device *dev);
5062 
5063 /**
5064  *  __dev_uc_sync - Synchronize device's unicast list
5065  *  @dev:  device to sync
5066  *  @sync: function to call if address should be added
5067  *  @unsync: function to call if address should be removed
5068  *
5069  *  Add newly added addresses to the interface, and release
5070  *  addresses that have been deleted.
5071  */
__dev_uc_sync(struct net_device * dev,int (* sync)(struct net_device *,const unsigned char *),int (* unsync)(struct net_device *,const unsigned char *))5072 static inline int __dev_uc_sync(struct net_device *dev,
5073 				int (*sync)(struct net_device *,
5074 					    const unsigned char *),
5075 				int (*unsync)(struct net_device *,
5076 					      const unsigned char *))
5077 {
5078 	return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
5079 }
5080 
5081 /**
5082  *  __dev_uc_unsync - Remove synchronized addresses from device
5083  *  @dev:  device to sync
5084  *  @unsync: function to call if address should be removed
5085  *
5086  *  Remove all addresses that were added to the device by dev_uc_sync().
5087  */
__dev_uc_unsync(struct net_device * dev,int (* unsync)(struct net_device *,const unsigned char *))5088 static inline void __dev_uc_unsync(struct net_device *dev,
5089 				   int (*unsync)(struct net_device *,
5090 						 const unsigned char *))
5091 {
5092 	__hw_addr_unsync_dev(&dev->uc, dev, unsync);
5093 }
5094 
5095 /* Functions used for multicast addresses handling */
5096 int dev_mc_add(struct net_device *dev, const unsigned char *addr);
5097 int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
5098 int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
5099 int dev_mc_del(struct net_device *dev, const unsigned char *addr);
5100 int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
5101 int dev_mc_sync(struct net_device *to, struct net_device *from);
5102 int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
5103 void dev_mc_unsync(struct net_device *to, struct net_device *from);
5104 void dev_mc_flush(struct net_device *dev);
5105 void dev_mc_init(struct net_device *dev);
5106 
5107 /**
5108  *  __dev_mc_sync - Synchronize device's multicast list
5109  *  @dev:  device to sync
5110  *  @sync: function to call if address should be added
5111  *  @unsync: function to call if address should be removed
5112  *
5113  *  Add newly added addresses to the interface, and release
5114  *  addresses that have been deleted.
5115  */
__dev_mc_sync(struct net_device * dev,int (* sync)(struct net_device *,const unsigned char *),int (* unsync)(struct net_device *,const unsigned char *))5116 static inline int __dev_mc_sync(struct net_device *dev,
5117 				int (*sync)(struct net_device *,
5118 					    const unsigned char *),
5119 				int (*unsync)(struct net_device *,
5120 					      const unsigned char *))
5121 {
5122 	return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
5123 }
5124 
5125 /**
5126  *  __dev_mc_unsync - Remove synchronized addresses from device
5127  *  @dev:  device to sync
5128  *  @unsync: function to call if address should be removed
5129  *
5130  *  Remove all addresses that were added to the device by dev_mc_sync().
5131  */
__dev_mc_unsync(struct net_device * dev,int (* unsync)(struct net_device *,const unsigned char *))5132 static inline void __dev_mc_unsync(struct net_device *dev,
5133 				   int (*unsync)(struct net_device *,
5134 						 const unsigned char *))
5135 {
5136 	__hw_addr_unsync_dev(&dev->mc, dev, unsync);
5137 }
5138 
5139 /* Functions used for secondary unicast and multicast support */
5140 void dev_set_rx_mode(struct net_device *dev);
5141 int netif_set_promiscuity(struct net_device *dev, int inc);
5142 int dev_set_promiscuity(struct net_device *dev, int inc);
5143 int netif_set_allmulti(struct net_device *dev, int inc, bool notify);
5144 int dev_set_allmulti(struct net_device *dev, int inc);
5145 void netif_state_change(struct net_device *dev);
5146 void netdev_state_change(struct net_device *dev);
5147 void __netdev_notify_peers(struct net_device *dev);
5148 void netdev_notify_peers(struct net_device *dev);
5149 void netdev_features_change(struct net_device *dev);
5150 /* Load a device via the kmod */
5151 void dev_load(struct net *net, const char *name);
5152 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
5153 					struct rtnl_link_stats64 *storage);
5154 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5155 			     const struct net_device_stats *netdev_stats);
5156 void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
5157 			   const struct pcpu_sw_netstats __percpu *netstats);
5158 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s);
5159 
5160 enum {
5161 	NESTED_SYNC_IMM_BIT,
5162 	NESTED_SYNC_TODO_BIT,
5163 };
5164 
5165 #define __NESTED_SYNC_BIT(bit)	((u32)1 << (bit))
5166 #define __NESTED_SYNC(name)	__NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT)
5167 
5168 #define NESTED_SYNC_IMM		__NESTED_SYNC(IMM)
5169 #define NESTED_SYNC_TODO	__NESTED_SYNC(TODO)
5170 
5171 struct netdev_nested_priv {
5172 	unsigned char flags;
5173 	void *data;
5174 };
5175 
5176 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
5177 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
5178 						     struct list_head **iter);
5179 
5180 /* iterate through upper list, must be called under RCU read lock */
5181 #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
5182 	for (iter = &(dev)->adj_list.upper, \
5183 	     updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
5184 	     updev; \
5185 	     updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
5186 
5187 int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
5188 				  int (*fn)(struct net_device *upper_dev,
5189 					    struct netdev_nested_priv *priv),
5190 				  struct netdev_nested_priv *priv);
5191 
5192 bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
5193 				  struct net_device *upper_dev);
5194 
5195 bool netdev_has_any_upper_dev(struct net_device *dev);
5196 
5197 void *netdev_lower_get_next_private(struct net_device *dev,
5198 				    struct list_head **iter);
5199 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
5200 					struct list_head **iter);
5201 
5202 #define netdev_for_each_lower_private(dev, priv, iter) \
5203 	for (iter = (dev)->adj_list.lower.next, \
5204 	     priv = netdev_lower_get_next_private(dev, &(iter)); \
5205 	     priv; \
5206 	     priv = netdev_lower_get_next_private(dev, &(iter)))
5207 
5208 #define netdev_for_each_lower_private_rcu(dev, priv, iter) \
5209 	for (iter = &(dev)->adj_list.lower, \
5210 	     priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
5211 	     priv; \
5212 	     priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
5213 
5214 void *netdev_lower_get_next(struct net_device *dev,
5215 				struct list_head **iter);
5216 
5217 #define netdev_for_each_lower_dev(dev, ldev, iter) \
5218 	for (iter = (dev)->adj_list.lower.next, \
5219 	     ldev = netdev_lower_get_next(dev, &(iter)); \
5220 	     ldev; \
5221 	     ldev = netdev_lower_get_next(dev, &(iter)))
5222 
5223 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
5224 					     struct list_head **iter);
5225 int netdev_walk_all_lower_dev(struct net_device *dev,
5226 			      int (*fn)(struct net_device *lower_dev,
5227 					struct netdev_nested_priv *priv),
5228 			      struct netdev_nested_priv *priv);
5229 int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
5230 				  int (*fn)(struct net_device *lower_dev,
5231 					    struct netdev_nested_priv *priv),
5232 				  struct netdev_nested_priv *priv);
5233 
5234 void *netdev_adjacent_get_private(struct list_head *adj_list);
5235 void *netdev_lower_get_first_private_rcu(struct net_device *dev);
5236 struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
5237 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
5238 int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev,
5239 			  struct netlink_ext_ack *extack);
5240 int netdev_master_upper_dev_link(struct net_device *dev,
5241 				 struct net_device *upper_dev,
5242 				 void *upper_priv, void *upper_info,
5243 				 struct netlink_ext_ack *extack);
5244 void netdev_upper_dev_unlink(struct net_device *dev,
5245 			     struct net_device *upper_dev);
5246 int netdev_adjacent_change_prepare(struct net_device *old_dev,
5247 				   struct net_device *new_dev,
5248 				   struct net_device *dev,
5249 				   struct netlink_ext_ack *extack);
5250 void netdev_adjacent_change_commit(struct net_device *old_dev,
5251 				   struct net_device *new_dev,
5252 				   struct net_device *dev);
5253 void netdev_adjacent_change_abort(struct net_device *old_dev,
5254 				  struct net_device *new_dev,
5255 				  struct net_device *dev);
5256 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
5257 void *netdev_lower_dev_get_private(struct net_device *dev,
5258 				   struct net_device *lower_dev);
5259 void netdev_lower_state_changed(struct net_device *lower_dev,
5260 				void *lower_state_info);
5261 
5262 #define NETDEV_RSS_KEY_LEN 256
5263 extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
5264 void netdev_rss_key_fill(void *buffer, size_t len);
5265 
5266 int skb_checksum_help(struct sk_buff *skb);
5267 int skb_crc32c_csum_help(struct sk_buff *skb);
5268 int skb_csum_hwoffload_help(struct sk_buff *skb,
5269 			    const netdev_features_t features);
5270 
5271 struct netdev_bonding_info {
5272 	ifslave	slave;
5273 	ifbond	master;
5274 };
5275 
5276 struct netdev_notifier_bonding_info {
5277 	struct netdev_notifier_info info; /* must be first */
5278 	struct netdev_bonding_info  bonding_info;
5279 };
5280 
5281 void netdev_bonding_info_change(struct net_device *dev,
5282 				struct netdev_bonding_info *bonding_info);
5283 
5284 #if IS_ENABLED(CONFIG_ETHTOOL_NETLINK)
5285 void ethtool_notify(struct net_device *dev, unsigned int cmd);
5286 #else
ethtool_notify(struct net_device * dev,unsigned int cmd)5287 static inline void ethtool_notify(struct net_device *dev, unsigned int cmd)
5288 {
5289 }
5290 #endif
5291 
5292 __be16 skb_network_protocol(struct sk_buff *skb, int *depth);
5293 
can_checksum_protocol(netdev_features_t features,__be16 protocol)5294 static inline bool can_checksum_protocol(netdev_features_t features,
5295 					 __be16 protocol)
5296 {
5297 	if (protocol == htons(ETH_P_FCOE))
5298 		return !!(features & NETIF_F_FCOE_CRC);
5299 
5300 	/* Assume this is an IP checksum (not SCTP CRC) */
5301 
5302 	if (features & NETIF_F_HW_CSUM) {
5303 		/* Can checksum everything */
5304 		return true;
5305 	}
5306 
5307 	switch (protocol) {
5308 	case htons(ETH_P_IP):
5309 		return !!(features & NETIF_F_IP_CSUM);
5310 	case htons(ETH_P_IPV6):
5311 		return !!(features & NETIF_F_IPV6_CSUM);
5312 	default:
5313 		return false;
5314 	}
5315 }
5316 
5317 #ifdef CONFIG_BUG
5318 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb);
5319 #else
netdev_rx_csum_fault(struct net_device * dev,struct sk_buff * skb)5320 static inline void netdev_rx_csum_fault(struct net_device *dev,
5321 					struct sk_buff *skb)
5322 {
5323 }
5324 #endif
5325 /* rx skb timestamps */
5326 void net_enable_timestamp(void);
5327 void net_disable_timestamp(void);
5328 
netdev_get_tstamp(struct net_device * dev,const struct skb_shared_hwtstamps * hwtstamps,bool cycles)5329 static inline ktime_t netdev_get_tstamp(struct net_device *dev,
5330 					const struct skb_shared_hwtstamps *hwtstamps,
5331 					bool cycles)
5332 {
5333 	const struct net_device_ops *ops = dev->netdev_ops;
5334 
5335 	if (ops->ndo_get_tstamp)
5336 		return ops->ndo_get_tstamp(dev, hwtstamps, cycles);
5337 
5338 	return hwtstamps->hwtstamp;
5339 }
5340 
5341 #ifndef CONFIG_PREEMPT_RT
netdev_xmit_set_more(bool more)5342 static inline void netdev_xmit_set_more(bool more)
5343 {
5344 	__this_cpu_write(softnet_data.xmit.more, more);
5345 }
5346 
netdev_xmit_more(void)5347 static inline bool netdev_xmit_more(void)
5348 {
5349 	return __this_cpu_read(softnet_data.xmit.more);
5350 }
5351 #else
netdev_xmit_set_more(bool more)5352 static inline void netdev_xmit_set_more(bool more)
5353 {
5354 	current->net_xmit.more = more;
5355 }
5356 
netdev_xmit_more(void)5357 static inline bool netdev_xmit_more(void)
5358 {
5359 	return current->net_xmit.more;
5360 }
5361 #endif
5362 
__netdev_start_xmit(const struct net_device_ops * ops,struct sk_buff * skb,struct net_device * dev,bool more)5363 static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
5364 					      struct sk_buff *skb, struct net_device *dev,
5365 					      bool more)
5366 {
5367 	netdev_xmit_set_more(more);
5368 	return ops->ndo_start_xmit(skb, dev);
5369 }
5370 
netdev_start_xmit(struct sk_buff * skb,struct net_device * dev,struct netdev_queue * txq,bool more)5371 static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
5372 					    struct netdev_queue *txq, bool more)
5373 {
5374 	const struct net_device_ops *ops = dev->netdev_ops;
5375 	netdev_tx_t rc;
5376 
5377 	rc = __netdev_start_xmit(ops, skb, dev, more);
5378 	if (rc == NETDEV_TX_OK)
5379 		txq_trans_update(dev, txq);
5380 
5381 	return rc;
5382 }
5383 
5384 int netdev_class_create_file_ns(const struct class_attribute *class_attr,
5385 				const struct ns_common *ns);
5386 void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
5387 				 const struct ns_common *ns);
5388 
5389 extern const struct kobj_ns_type_operations net_ns_type_operations;
5390 
5391 const char *netdev_drivername(const struct net_device *dev);
5392 
netdev_intersect_features(netdev_features_t f1,netdev_features_t f2)5393 static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
5394 							  netdev_features_t f2)
5395 {
5396 	if ((f1 ^ f2) & NETIF_F_HW_CSUM) {
5397 		if (f1 & NETIF_F_HW_CSUM)
5398 			f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5399 		else
5400 			f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5401 	}
5402 
5403 	return f1 & f2;
5404 }
5405 
netdev_get_wanted_features(struct net_device * dev)5406 static inline netdev_features_t netdev_get_wanted_features(
5407 	struct net_device *dev)
5408 {
5409 	return (dev->features & ~dev->hw_features) | dev->wanted_features;
5410 }
5411 netdev_features_t netdev_increment_features(netdev_features_t all,
5412 	netdev_features_t one, netdev_features_t mask);
5413 
5414 /* Allow TSO being used on stacked device :
5415  * Performing the GSO segmentation before last device
5416  * is a performance improvement.
5417  */
netdev_add_tso_features(netdev_features_t features,netdev_features_t mask)5418 static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
5419 							netdev_features_t mask)
5420 {
5421 	return netdev_increment_features(features, NETIF_F_ALL_TSO |
5422 					 NETIF_F_ALL_FOR_ALL, mask);
5423 }
5424 
5425 int __netdev_update_features(struct net_device *dev);
5426 void netdev_update_features(struct net_device *dev);
5427 void netdev_change_features(struct net_device *dev);
5428 void netdev_compute_master_upper_features(struct net_device *dev, bool update_header);
5429 
5430 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5431 					struct net_device *dev);
5432 
5433 netdev_features_t passthru_features_check(struct sk_buff *skb,
5434 					  struct net_device *dev,
5435 					  netdev_features_t features);
5436 netdev_features_t netif_skb_features(struct sk_buff *skb);
5437 void skb_warn_bad_offload(const struct sk_buff *skb);
5438 
net_gso_ok(netdev_features_t features,int gso_type)5439 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
5440 {
5441 	netdev_features_t feature;
5442 
5443 	if (gso_type & (SKB_GSO_TCP_FIXEDID | SKB_GSO_TCP_FIXEDID_INNER))
5444 		gso_type |= __SKB_GSO_TCP_FIXEDID;
5445 
5446 	feature = ((netdev_features_t)gso_type << NETIF_F_GSO_SHIFT) & NETIF_F_GSO_MASK;
5447 
5448 	/* check flags correspondence */
5449 	BUILD_BUG_ON(SKB_GSO_TCPV4   != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
5450 	BUILD_BUG_ON(SKB_GSO_DODGY   != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
5451 	BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
5452 	BUILD_BUG_ON(__SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
5453 	BUILD_BUG_ON(SKB_GSO_TCPV6   != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
5454 	BUILD_BUG_ON(SKB_GSO_FCOE    != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
5455 	BUILD_BUG_ON(SKB_GSO_GRE     != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
5456 	BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
5457 	BUILD_BUG_ON(SKB_GSO_IPXIP4  != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT));
5458 	BUILD_BUG_ON(SKB_GSO_IPXIP6  != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT));
5459 	BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
5460 	BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
5461 	BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
5462 	BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
5463 	BUILD_BUG_ON(SKB_GSO_SCTP    != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
5464 	BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
5465 	BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
5466 	BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT));
5467 	BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT));
5468 	BUILD_BUG_ON(SKB_GSO_TCP_ACCECN !=
5469 		     (NETIF_F_GSO_ACCECN >> NETIF_F_GSO_SHIFT));
5470 
5471 	return (features & feature) == feature;
5472 }
5473 
skb_gso_ok(struct sk_buff * skb,netdev_features_t features)5474 static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
5475 {
5476 	return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
5477 	       (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
5478 }
5479 
netif_needs_gso(struct sk_buff * skb,netdev_features_t features)5480 static inline bool netif_needs_gso(struct sk_buff *skb,
5481 				   netdev_features_t features)
5482 {
5483 	return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
5484 		unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
5485 			 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
5486 }
5487 
5488 void netif_set_tso_max_size(struct net_device *dev, unsigned int size);
5489 void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs);
5490 void netif_inherit_tso_max(struct net_device *to,
5491 			   const struct net_device *from);
5492 
5493 static inline unsigned int
netif_get_gro_max_size(const struct net_device * dev,const struct sk_buff * skb)5494 netif_get_gro_max_size(const struct net_device *dev, const struct sk_buff *skb)
5495 {
5496 	/* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */
5497 	return skb->protocol == htons(ETH_P_IPV6) ?
5498 	       READ_ONCE(dev->gro_max_size) :
5499 	       READ_ONCE(dev->gro_ipv4_max_size);
5500 }
5501 
5502 static inline unsigned int
netif_get_gso_max_size(const struct net_device * dev,const struct sk_buff * skb)5503 netif_get_gso_max_size(const struct net_device *dev, const struct sk_buff *skb)
5504 {
5505 	/* pairs with WRITE_ONCE() in netif_set_gso(_ipv4)_max_size() */
5506 	return skb->protocol == htons(ETH_P_IPV6) ?
5507 	       READ_ONCE(dev->gso_max_size) :
5508 	       READ_ONCE(dev->gso_ipv4_max_size);
5509 }
5510 
netif_is_macsec(const struct net_device * dev)5511 static inline bool netif_is_macsec(const struct net_device *dev)
5512 {
5513 	return dev->priv_flags & IFF_MACSEC;
5514 }
5515 
netif_is_macvlan(const struct net_device * dev)5516 static inline bool netif_is_macvlan(const struct net_device *dev)
5517 {
5518 	return dev->priv_flags & IFF_MACVLAN;
5519 }
5520 
netif_is_macvlan_port(const struct net_device * dev)5521 static inline bool netif_is_macvlan_port(const struct net_device *dev)
5522 {
5523 	return dev->priv_flags & IFF_MACVLAN_PORT;
5524 }
5525 
netif_is_bond_master(const struct net_device * dev)5526 static inline bool netif_is_bond_master(const struct net_device *dev)
5527 {
5528 	return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
5529 }
5530 
netif_is_bond_slave(const struct net_device * dev)5531 static inline bool netif_is_bond_slave(const struct net_device *dev)
5532 {
5533 	return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
5534 }
5535 
netif_supports_nofcs(struct net_device * dev)5536 static inline bool netif_supports_nofcs(struct net_device *dev)
5537 {
5538 	return dev->priv_flags & IFF_SUPP_NOFCS;
5539 }
5540 
netif_has_l3_rx_handler(const struct net_device * dev)5541 static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
5542 {
5543 	return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
5544 }
5545 
netif_is_l3_master(const struct net_device * dev)5546 static inline bool netif_is_l3_master(const struct net_device *dev)
5547 {
5548 	return dev->priv_flags & IFF_L3MDEV_MASTER;
5549 }
5550 
netif_is_l3_slave(const struct net_device * dev)5551 static inline bool netif_is_l3_slave(const struct net_device *dev)
5552 {
5553 	return dev->priv_flags & IFF_L3MDEV_SLAVE;
5554 }
5555 
dev_sdif(const struct net_device * dev)5556 static inline int dev_sdif(const struct net_device *dev)
5557 {
5558 #ifdef CONFIG_NET_L3_MASTER_DEV
5559 	if (netif_is_l3_slave(dev))
5560 		return dev->ifindex;
5561 #endif
5562 	return 0;
5563 }
5564 
netif_is_bridge_master(const struct net_device * dev)5565 static inline bool netif_is_bridge_master(const struct net_device *dev)
5566 {
5567 	return dev->priv_flags & IFF_EBRIDGE;
5568 }
5569 
netif_is_bridge_port(const struct net_device * dev)5570 static inline bool netif_is_bridge_port(const struct net_device *dev)
5571 {
5572 	return dev->priv_flags & IFF_BRIDGE_PORT;
5573 }
5574 
netif_is_ovs_master(const struct net_device * dev)5575 static inline bool netif_is_ovs_master(const struct net_device *dev)
5576 {
5577 	return dev->priv_flags & IFF_OPENVSWITCH;
5578 }
5579 
netif_is_ovs_port(const struct net_device * dev)5580 static inline bool netif_is_ovs_port(const struct net_device *dev)
5581 {
5582 	return dev->priv_flags & IFF_OVS_DATAPATH;
5583 }
5584 
netif_is_any_bridge_master(const struct net_device * dev)5585 static inline bool netif_is_any_bridge_master(const struct net_device *dev)
5586 {
5587 	return netif_is_bridge_master(dev) || netif_is_ovs_master(dev);
5588 }
5589 
netif_is_any_bridge_port(const struct net_device * dev)5590 static inline bool netif_is_any_bridge_port(const struct net_device *dev)
5591 {
5592 	return netif_is_bridge_port(dev) || netif_is_ovs_port(dev);
5593 }
5594 
netif_is_team_master(const struct net_device * dev)5595 static inline bool netif_is_team_master(const struct net_device *dev)
5596 {
5597 	return dev->priv_flags & IFF_TEAM;
5598 }
5599 
netif_is_team_port(const struct net_device * dev)5600 static inline bool netif_is_team_port(const struct net_device *dev)
5601 {
5602 	return dev->priv_flags & IFF_TEAM_PORT;
5603 }
5604 
netif_is_lag_master(const struct net_device * dev)5605 static inline bool netif_is_lag_master(const struct net_device *dev)
5606 {
5607 	return netif_is_bond_master(dev) || netif_is_team_master(dev);
5608 }
5609 
netif_is_lag_port(const struct net_device * dev)5610 static inline bool netif_is_lag_port(const struct net_device *dev)
5611 {
5612 	return netif_is_bond_slave(dev) || netif_is_team_port(dev);
5613 }
5614 
5615 bool netif_is_rxfh_configured(const struct net_device *dev);
5616 
netif_is_failover(const struct net_device * dev)5617 static inline bool netif_is_failover(const struct net_device *dev)
5618 {
5619 	return dev->priv_flags & IFF_FAILOVER;
5620 }
5621 
netif_is_failover_slave(const struct net_device * dev)5622 static inline bool netif_is_failover_slave(const struct net_device *dev)
5623 {
5624 	return dev->priv_flags & IFF_FAILOVER_SLAVE;
5625 }
5626 
5627 /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
netif_keep_dst(struct net_device * dev)5628 static inline void netif_keep_dst(struct net_device *dev)
5629 {
5630 	dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
5631 }
5632 
5633 /* return true if dev can't cope with mtu frames that need vlan tag insertion */
netif_reduces_vlan_mtu(struct net_device * dev)5634 static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
5635 {
5636 	/* TODO: reserve and use an additional IFF bit, if we get more users */
5637 	return netif_is_macsec(dev);
5638 }
5639 
5640 extern struct pernet_operations __net_initdata loopback_net_ops;
5641 
5642 /* Logging, debugging and troubleshooting/diagnostic helpers. */
5643 
5644 /* netdev_printk helpers, similar to dev_printk */
5645 
netdev_name(const struct net_device * dev)5646 static inline const char *netdev_name(const struct net_device *dev)
5647 {
5648 	if (!dev->name[0] || strchr(dev->name, '%'))
5649 		return "(unnamed net_device)";
5650 	return dev->name;
5651 }
5652 
netdev_reg_state(const struct net_device * dev)5653 static inline const char *netdev_reg_state(const struct net_device *dev)
5654 {
5655 	u8 reg_state = READ_ONCE(dev->reg_state);
5656 
5657 	switch (reg_state) {
5658 	case NETREG_UNINITIALIZED: return " (uninitialized)";
5659 	case NETREG_REGISTERED: return "";
5660 	case NETREG_UNREGISTERING: return " (unregistering)";
5661 	case NETREG_UNREGISTERED: return " (unregistered)";
5662 	case NETREG_RELEASED: return " (released)";
5663 	case NETREG_DUMMY: return " (dummy)";
5664 	}
5665 
5666 	WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, reg_state);
5667 	return " (unknown)";
5668 }
5669 
5670 #define MODULE_ALIAS_NETDEV(device) \
5671 	MODULE_ALIAS("netdev-" device)
5672 
5673 /*
5674  * netdev_WARN() acts like dev_printk(), but with the key difference
5675  * of using a WARN/WARN_ON to get the message out, including the
5676  * file/line information and a backtrace.
5677  */
5678 #define netdev_WARN(dev, format, args...)			\
5679 	WARN(1, "netdevice: %s%s: " format, netdev_name(dev),	\
5680 	     netdev_reg_state(dev), ##args)
5681 
5682 #define netdev_WARN_ONCE(dev, format, args...)				\
5683 	WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev),	\
5684 		  netdev_reg_state(dev), ##args)
5685 
5686 /*
5687  *	The list of packet types we will receive (as opposed to discard)
5688  *	and the routines to invoke.
5689  *
5690  *	Why 16. Because with 16 the only overlap we get on a hash of the
5691  *	low nibble of the protocol value is RARP/SNAP/X.25.
5692  *
5693  *		0800	IP
5694  *		0001	802.3
5695  *		0002	AX.25
5696  *		0004	802.2
5697  *		8035	RARP
5698  *		0005	SNAP
5699  *		0805	X.25
5700  *		0806	ARP
5701  *		8137	IPX
5702  *		0009	Localtalk
5703  *		86DD	IPv6
5704  */
5705 #define PTYPE_HASH_SIZE	(16)
5706 #define PTYPE_HASH_MASK	(PTYPE_HASH_SIZE - 1)
5707 
5708 extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
5709 
5710 extern struct net_device *blackhole_netdev;
5711 
5712 /* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */
5713 #define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD)
5714 #define DEV_STATS_ADD(DEV, FIELD, VAL) 	\
5715 		atomic_long_add((VAL), &(DEV)->stats.__##FIELD)
5716 #define DEV_STATS_READ(DEV, FIELD) atomic_long_read(&(DEV)->stats.__##FIELD)
5717 
5718 #endif	/* _LINUX_NETDEVICE_H */
5719