xref: /linux/include/linux/netdevice.h (revision 1a9239bb4253f9076b5b4b2a1a4e8d7defd77a95)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Definitions for the Interfaces handler.
8  *
9  * Version:	@(#)dev.h	1.0.10	08/12/93
10  *
11  * Authors:	Ross Biro
12  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
14  *		Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
15  *		Alan Cox, <alan@lxorguk.ukuu.org.uk>
16  *		Bjorn Ekwall. <bj0rn@blox.se>
17  *              Pekka Riikonen <priikone@poseidon.pspt.fi>
18  *
19  *		Moved to /usr/include/linux for NET3
20  */
21 #ifndef _LINUX_NETDEVICE_H
22 #define _LINUX_NETDEVICE_H
23 
24 #include <linux/timer.h>
25 #include <linux/bug.h>
26 #include <linux/delay.h>
27 #include <linux/atomic.h>
28 #include <linux/prefetch.h>
29 #include <asm/cache.h>
30 #include <asm/byteorder.h>
31 #include <asm/local.h>
32 
33 #include <linux/percpu.h>
34 #include <linux/rculist.h>
35 #include <linux/workqueue.h>
36 #include <linux/dynamic_queue_limits.h>
37 
38 #include <net/net_namespace.h>
39 #ifdef CONFIG_DCB
40 #include <net/dcbnl.h>
41 #endif
42 #include <net/netprio_cgroup.h>
43 #include <linux/netdev_features.h>
44 #include <linux/neighbour.h>
45 #include <linux/netdevice_xmit.h>
46 #include <uapi/linux/netdevice.h>
47 #include <uapi/linux/if_bonding.h>
48 #include <uapi/linux/pkt_cls.h>
49 #include <uapi/linux/netdev.h>
50 #include <linux/hashtable.h>
51 #include <linux/rbtree.h>
52 #include <net/net_trackers.h>
53 #include <net/net_debug.h>
54 #include <net/dropreason-core.h>
55 #include <net/neighbour_tables.h>
56 
57 struct netpoll_info;
58 struct device;
59 struct ethtool_ops;
60 struct kernel_hwtstamp_config;
61 struct phy_device;
62 struct dsa_port;
63 struct ip_tunnel_parm_kern;
64 struct macsec_context;
65 struct macsec_ops;
66 struct netdev_config;
67 struct netdev_name_node;
68 struct sd_flow_limit;
69 struct sfp_bus;
70 /* 802.11 specific */
71 struct wireless_dev;
72 /* 802.15.4 specific */
73 struct wpan_dev;
74 struct mpls_dev;
75 /* UDP Tunnel offloads */
76 struct udp_tunnel_info;
77 struct udp_tunnel_nic_info;
78 struct udp_tunnel_nic;
79 struct bpf_prog;
80 struct xdp_buff;
81 struct xdp_frame;
82 struct xdp_metadata_ops;
83 struct xdp_md;
84 struct ethtool_netdev_state;
85 struct phy_link_topology;
86 struct hwtstamp_provider;
87 
88 typedef u32 xdp_features_t;
89 
90 void synchronize_net(void);
91 void netdev_set_default_ethtool_ops(struct net_device *dev,
92 				    const struct ethtool_ops *ops);
93 void netdev_sw_irq_coalesce_default_on(struct net_device *dev);
94 
95 /* Backlog congestion levels */
96 #define NET_RX_SUCCESS		0	/* keep 'em coming, baby */
97 #define NET_RX_DROP		1	/* packet dropped */
98 
99 #define MAX_NEST_DEV 8
100 
101 /*
102  * Transmit return codes: transmit return codes originate from three different
103  * namespaces:
104  *
105  * - qdisc return codes
106  * - driver transmit return codes
107  * - errno values
108  *
109  * Drivers are allowed to return any one of those in their hard_start_xmit()
110  * function. Real network devices commonly used with qdiscs should only return
111  * the driver transmit return codes though - when qdiscs are used, the actual
112  * transmission happens asynchronously, so the value is not propagated to
113  * higher layers. Virtual network devices transmit synchronously; in this case
114  * the driver transmit return codes are consumed by dev_queue_xmit(), and all
115  * others are propagated to higher layers.
116  */
117 
118 /* qdisc ->enqueue() return codes. */
119 #define NET_XMIT_SUCCESS	0x00
120 #define NET_XMIT_DROP		0x01	/* skb dropped			*/
121 #define NET_XMIT_CN		0x02	/* congestion notification	*/
122 #define NET_XMIT_MASK		0x0f	/* qdisc flags in net/sch_generic.h */
123 
124 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
125  * indicates that the device will soon be dropping packets, or already drops
126  * some packets of the same priority; prompting us to send less aggressively. */
127 #define net_xmit_eval(e)	((e) == NET_XMIT_CN ? 0 : (e))
128 #define net_xmit_errno(e)	((e) != NET_XMIT_CN ? -ENOBUFS : 0)
129 
130 /* Driver transmit return codes */
131 #define NETDEV_TX_MASK		0xf0
132 
133 enum netdev_tx {
134 	__NETDEV_TX_MIN	 = INT_MIN,	/* make sure enum is signed */
135 	NETDEV_TX_OK	 = 0x00,	/* driver took care of packet */
136 	NETDEV_TX_BUSY	 = 0x10,	/* driver tx path was busy*/
137 };
138 typedef enum netdev_tx netdev_tx_t;
139 
140 /*
141  * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
142  * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
143  */
dev_xmit_complete(int rc)144 static inline bool dev_xmit_complete(int rc)
145 {
146 	/*
147 	 * Positive cases with an skb consumed by a driver:
148 	 * - successful transmission (rc == NETDEV_TX_OK)
149 	 * - error while transmitting (rc < 0)
150 	 * - error while queueing to a different device (rc & NET_XMIT_MASK)
151 	 */
152 	if (likely(rc < NET_XMIT_MASK))
153 		return true;
154 
155 	return false;
156 }
157 
158 /*
159  *	Compute the worst-case header length according to the protocols
160  *	used.
161  */
162 
163 #if defined(CONFIG_HYPERV_NET)
164 # define LL_MAX_HEADER 128
165 #elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
166 # if defined(CONFIG_MAC80211_MESH)
167 #  define LL_MAX_HEADER 128
168 # else
169 #  define LL_MAX_HEADER 96
170 # endif
171 #else
172 # define LL_MAX_HEADER 32
173 #endif
174 
175 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
176     !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
177 #define MAX_HEADER LL_MAX_HEADER
178 #else
179 #define MAX_HEADER (LL_MAX_HEADER + 48)
180 #endif
181 
182 /*
183  *	Old network device statistics. Fields are native words
184  *	(unsigned long) so they can be read and written atomically.
185  */
186 
187 #define NET_DEV_STAT(FIELD)			\
188 	union {					\
189 		unsigned long FIELD;		\
190 		atomic_long_t __##FIELD;	\
191 	}
192 
193 struct net_device_stats {
194 	NET_DEV_STAT(rx_packets);
195 	NET_DEV_STAT(tx_packets);
196 	NET_DEV_STAT(rx_bytes);
197 	NET_DEV_STAT(tx_bytes);
198 	NET_DEV_STAT(rx_errors);
199 	NET_DEV_STAT(tx_errors);
200 	NET_DEV_STAT(rx_dropped);
201 	NET_DEV_STAT(tx_dropped);
202 	NET_DEV_STAT(multicast);
203 	NET_DEV_STAT(collisions);
204 	NET_DEV_STAT(rx_length_errors);
205 	NET_DEV_STAT(rx_over_errors);
206 	NET_DEV_STAT(rx_crc_errors);
207 	NET_DEV_STAT(rx_frame_errors);
208 	NET_DEV_STAT(rx_fifo_errors);
209 	NET_DEV_STAT(rx_missed_errors);
210 	NET_DEV_STAT(tx_aborted_errors);
211 	NET_DEV_STAT(tx_carrier_errors);
212 	NET_DEV_STAT(tx_fifo_errors);
213 	NET_DEV_STAT(tx_heartbeat_errors);
214 	NET_DEV_STAT(tx_window_errors);
215 	NET_DEV_STAT(rx_compressed);
216 	NET_DEV_STAT(tx_compressed);
217 };
218 #undef NET_DEV_STAT
219 
220 /* per-cpu stats, allocated on demand.
221  * Try to fit them in a single cache line, for dev_get_stats() sake.
222  */
223 struct net_device_core_stats {
224 	unsigned long	rx_dropped;
225 	unsigned long	tx_dropped;
226 	unsigned long	rx_nohandler;
227 	unsigned long	rx_otherhost_dropped;
228 } __aligned(4 * sizeof(unsigned long));
229 
230 #include <linux/cache.h>
231 #include <linux/skbuff.h>
232 
233 struct neighbour;
234 struct neigh_parms;
235 struct sk_buff;
236 
237 struct netdev_hw_addr {
238 	struct list_head	list;
239 	struct rb_node		node;
240 	unsigned char		addr[MAX_ADDR_LEN];
241 	unsigned char		type;
242 #define NETDEV_HW_ADDR_T_LAN		1
243 #define NETDEV_HW_ADDR_T_SAN		2
244 #define NETDEV_HW_ADDR_T_UNICAST	3
245 #define NETDEV_HW_ADDR_T_MULTICAST	4
246 	bool			global_use;
247 	int			sync_cnt;
248 	int			refcount;
249 	int			synced;
250 	struct rcu_head		rcu_head;
251 };
252 
253 struct netdev_hw_addr_list {
254 	struct list_head	list;
255 	int			count;
256 
257 	/* Auxiliary tree for faster lookup on addition and deletion */
258 	struct rb_root		tree;
259 };
260 
261 #define netdev_hw_addr_list_count(l) ((l)->count)
262 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
263 #define netdev_hw_addr_list_for_each(ha, l) \
264 	list_for_each_entry(ha, &(l)->list, list)
265 
266 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
267 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
268 #define netdev_for_each_uc_addr(ha, dev) \
269 	netdev_hw_addr_list_for_each(ha, &(dev)->uc)
270 #define netdev_for_each_synced_uc_addr(_ha, _dev) \
271 	netdev_for_each_uc_addr((_ha), (_dev)) \
272 		if ((_ha)->sync_cnt)
273 
274 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
275 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
276 #define netdev_for_each_mc_addr(ha, dev) \
277 	netdev_hw_addr_list_for_each(ha, &(dev)->mc)
278 #define netdev_for_each_synced_mc_addr(_ha, _dev) \
279 	netdev_for_each_mc_addr((_ha), (_dev)) \
280 		if ((_ha)->sync_cnt)
281 
282 struct hh_cache {
283 	unsigned int	hh_len;
284 	seqlock_t	hh_lock;
285 
286 	/* cached hardware header; allow for machine alignment needs.        */
287 #define HH_DATA_MOD	16
288 #define HH_DATA_OFF(__len) \
289 	(HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
290 #define HH_DATA_ALIGN(__len) \
291 	(((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
292 	unsigned long	hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
293 };
294 
295 /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much.
296  * Alternative is:
297  *   dev->hard_header_len ? (dev->hard_header_len +
298  *                           (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
299  *
300  * We could use other alignment values, but we must maintain the
301  * relationship HH alignment <= LL alignment.
302  */
303 #define LL_RESERVED_SPACE(dev) \
304 	((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \
305 	  & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
306 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
307 	((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \
308 	  & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
309 
310 struct header_ops {
311 	int	(*create) (struct sk_buff *skb, struct net_device *dev,
312 			   unsigned short type, const void *daddr,
313 			   const void *saddr, unsigned int len);
314 	int	(*parse)(const struct sk_buff *skb, unsigned char *haddr);
315 	int	(*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
316 	void	(*cache_update)(struct hh_cache *hh,
317 				const struct net_device *dev,
318 				const unsigned char *haddr);
319 	bool	(*validate)(const char *ll_header, unsigned int len);
320 	__be16	(*parse_protocol)(const struct sk_buff *skb);
321 };
322 
323 /* These flag bits are private to the generic network queueing
324  * layer; they may not be explicitly referenced by any other
325  * code.
326  */
327 
328 enum netdev_state_t {
329 	__LINK_STATE_START,
330 	__LINK_STATE_PRESENT,
331 	__LINK_STATE_NOCARRIER,
332 	__LINK_STATE_LINKWATCH_PENDING,
333 	__LINK_STATE_DORMANT,
334 	__LINK_STATE_TESTING,
335 };
336 
337 struct gro_list {
338 	struct list_head	list;
339 	int			count;
340 };
341 
342 /*
343  * size of gro hash buckets, must be <= the number of bits in
344  * gro_node::bitmask
345  */
346 #define GRO_HASH_BUCKETS	8
347 
348 /**
349  * struct gro_node - structure to support Generic Receive Offload
350  * @bitmask: bitmask to indicate used buckets in @hash
351  * @hash: hashtable of pending aggregated skbs, separated by flows
352  * @rx_list: list of pending ``GRO_NORMAL`` skbs
353  * @rx_count: cached current length of @rx_list
354  * @cached_napi_id: napi_struct::napi_id cached for hotpath, 0 for standalone
355  */
356 struct gro_node {
357 	unsigned long		bitmask;
358 	struct gro_list		hash[GRO_HASH_BUCKETS];
359 	struct list_head	rx_list;
360 	u32			rx_count;
361 	u32			cached_napi_id;
362 };
363 
364 /*
365  * Structure for per-NAPI config
366  */
367 struct napi_config {
368 	u64 gro_flush_timeout;
369 	u64 irq_suspend_timeout;
370 	u32 defer_hard_irqs;
371 	cpumask_t affinity_mask;
372 	unsigned int napi_id;
373 };
374 
375 /*
376  * Structure for NAPI scheduling similar to tasklet but with weighting
377  */
378 struct napi_struct {
379 	/* The poll_list must only be managed by the entity which
380 	 * changes the state of the NAPI_STATE_SCHED bit.  This means
381 	 * whoever atomically sets that bit can add this napi_struct
382 	 * to the per-CPU poll_list, and whoever clears that bit
383 	 * can remove from the list right before clearing the bit.
384 	 */
385 	struct list_head	poll_list;
386 
387 	unsigned long		state;
388 	int			weight;
389 	u32			defer_hard_irqs_count;
390 	int			(*poll)(struct napi_struct *, int);
391 #ifdef CONFIG_NETPOLL
392 	/* CPU actively polling if netpoll is configured */
393 	int			poll_owner;
394 #endif
395 	/* CPU on which NAPI has been scheduled for processing */
396 	int			list_owner;
397 	struct net_device	*dev;
398 	struct sk_buff		*skb;
399 	struct gro_node		gro;
400 	struct hrtimer		timer;
401 	/* all fields past this point are write-protected by netdev_lock */
402 	struct task_struct	*thread;
403 	unsigned long		gro_flush_timeout;
404 	unsigned long		irq_suspend_timeout;
405 	u32			defer_hard_irqs;
406 	/* control-path-only fields follow */
407 	u32			napi_id;
408 	struct list_head	dev_list;
409 	struct hlist_node	napi_hash_node;
410 	int			irq;
411 	struct irq_affinity_notify notify;
412 	int			napi_rmap_idx;
413 	int			index;
414 	struct napi_config	*config;
415 };
416 
417 enum {
418 	NAPI_STATE_SCHED,		/* Poll is scheduled */
419 	NAPI_STATE_MISSED,		/* reschedule a napi */
420 	NAPI_STATE_DISABLE,		/* Disable pending */
421 	NAPI_STATE_NPSVC,		/* Netpoll - don't dequeue from poll_list */
422 	NAPI_STATE_LISTED,		/* NAPI added to system lists */
423 	NAPI_STATE_NO_BUSY_POLL,	/* Do not add in napi_hash, no busy polling */
424 	NAPI_STATE_IN_BUSY_POLL,	/* sk_busy_loop() owns this NAPI */
425 	NAPI_STATE_PREFER_BUSY_POLL,	/* prefer busy-polling over softirq processing*/
426 	NAPI_STATE_THREADED,		/* The poll is performed inside its own thread*/
427 	NAPI_STATE_SCHED_THREADED,	/* Napi is currently scheduled in threaded mode */
428 	NAPI_STATE_HAS_NOTIFIER,	/* Napi has an IRQ notifier */
429 };
430 
431 enum {
432 	NAPIF_STATE_SCHED		= BIT(NAPI_STATE_SCHED),
433 	NAPIF_STATE_MISSED		= BIT(NAPI_STATE_MISSED),
434 	NAPIF_STATE_DISABLE		= BIT(NAPI_STATE_DISABLE),
435 	NAPIF_STATE_NPSVC		= BIT(NAPI_STATE_NPSVC),
436 	NAPIF_STATE_LISTED		= BIT(NAPI_STATE_LISTED),
437 	NAPIF_STATE_NO_BUSY_POLL	= BIT(NAPI_STATE_NO_BUSY_POLL),
438 	NAPIF_STATE_IN_BUSY_POLL	= BIT(NAPI_STATE_IN_BUSY_POLL),
439 	NAPIF_STATE_PREFER_BUSY_POLL	= BIT(NAPI_STATE_PREFER_BUSY_POLL),
440 	NAPIF_STATE_THREADED		= BIT(NAPI_STATE_THREADED),
441 	NAPIF_STATE_SCHED_THREADED	= BIT(NAPI_STATE_SCHED_THREADED),
442 	NAPIF_STATE_HAS_NOTIFIER	= BIT(NAPI_STATE_HAS_NOTIFIER),
443 };
444 
445 enum gro_result {
446 	GRO_MERGED,
447 	GRO_MERGED_FREE,
448 	GRO_HELD,
449 	GRO_NORMAL,
450 	GRO_CONSUMED,
451 };
452 typedef enum gro_result gro_result_t;
453 
454 /*
455  * enum rx_handler_result - Possible return values for rx_handlers.
456  * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
457  * further.
458  * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
459  * case skb->dev was changed by rx_handler.
460  * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
461  * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called.
462  *
463  * rx_handlers are functions called from inside __netif_receive_skb(), to do
464  * special processing of the skb, prior to delivery to protocol handlers.
465  *
466  * Currently, a net_device can only have a single rx_handler registered. Trying
467  * to register a second rx_handler will return -EBUSY.
468  *
469  * To register a rx_handler on a net_device, use netdev_rx_handler_register().
470  * To unregister a rx_handler on a net_device, use
471  * netdev_rx_handler_unregister().
472  *
473  * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
474  * do with the skb.
475  *
476  * If the rx_handler consumed the skb in some way, it should return
477  * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
478  * the skb to be delivered in some other way.
479  *
480  * If the rx_handler changed skb->dev, to divert the skb to another
481  * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
482  * new device will be called if it exists.
483  *
484  * If the rx_handler decides the skb should be ignored, it should return
485  * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
486  * are registered on exact device (ptype->dev == skb->dev).
487  *
488  * If the rx_handler didn't change skb->dev, but wants the skb to be normally
489  * delivered, it should return RX_HANDLER_PASS.
490  *
491  * A device without a registered rx_handler will behave as if rx_handler
492  * returned RX_HANDLER_PASS.
493  */
494 
495 enum rx_handler_result {
496 	RX_HANDLER_CONSUMED,
497 	RX_HANDLER_ANOTHER,
498 	RX_HANDLER_EXACT,
499 	RX_HANDLER_PASS,
500 };
501 typedef enum rx_handler_result rx_handler_result_t;
502 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
503 
504 void __napi_schedule(struct napi_struct *n);
505 void __napi_schedule_irqoff(struct napi_struct *n);
506 
napi_disable_pending(struct napi_struct * n)507 static inline bool napi_disable_pending(struct napi_struct *n)
508 {
509 	return test_bit(NAPI_STATE_DISABLE, &n->state);
510 }
511 
napi_prefer_busy_poll(struct napi_struct * n)512 static inline bool napi_prefer_busy_poll(struct napi_struct *n)
513 {
514 	return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state);
515 }
516 
517 /**
518  * napi_is_scheduled - test if NAPI is scheduled
519  * @n: NAPI context
520  *
521  * This check is "best-effort". With no locking implemented,
522  * a NAPI can be scheduled or terminate right after this check
523  * and produce not precise results.
524  *
525  * NAPI_STATE_SCHED is an internal state, napi_is_scheduled
526  * should not be used normally and napi_schedule should be
527  * used instead.
528  *
529  * Use only if the driver really needs to check if a NAPI
530  * is scheduled for example in the context of delayed timer
531  * that can be skipped if a NAPI is already scheduled.
532  *
533  * Return: True if NAPI is scheduled, False otherwise.
534  */
napi_is_scheduled(struct napi_struct * n)535 static inline bool napi_is_scheduled(struct napi_struct *n)
536 {
537 	return test_bit(NAPI_STATE_SCHED, &n->state);
538 }
539 
540 bool napi_schedule_prep(struct napi_struct *n);
541 
542 /**
543  *	napi_schedule - schedule NAPI poll
544  *	@n: NAPI context
545  *
546  * Schedule NAPI poll routine to be called if it is not already
547  * running.
548  * Return: true if we schedule a NAPI or false if not.
549  * Refer to napi_schedule_prep() for additional reason on why
550  * a NAPI might not be scheduled.
551  */
napi_schedule(struct napi_struct * n)552 static inline bool napi_schedule(struct napi_struct *n)
553 {
554 	if (napi_schedule_prep(n)) {
555 		__napi_schedule(n);
556 		return true;
557 	}
558 
559 	return false;
560 }
561 
562 /**
563  *	napi_schedule_irqoff - schedule NAPI poll
564  *	@n: NAPI context
565  *
566  * Variant of napi_schedule(), assuming hard irqs are masked.
567  */
napi_schedule_irqoff(struct napi_struct * n)568 static inline void napi_schedule_irqoff(struct napi_struct *n)
569 {
570 	if (napi_schedule_prep(n))
571 		__napi_schedule_irqoff(n);
572 }
573 
574 /**
575  * napi_complete_done - NAPI processing complete
576  * @n: NAPI context
577  * @work_done: number of packets processed
578  *
579  * Mark NAPI processing as complete. Should only be called if poll budget
580  * has not been completely consumed.
581  * Prefer over napi_complete().
582  * Return: false if device should avoid rearming interrupts.
583  */
584 bool napi_complete_done(struct napi_struct *n, int work_done);
585 
napi_complete(struct napi_struct * n)586 static inline bool napi_complete(struct napi_struct *n)
587 {
588 	return napi_complete_done(n, 0);
589 }
590 
591 int dev_set_threaded(struct net_device *dev, bool threaded);
592 
593 void napi_disable(struct napi_struct *n);
594 void napi_disable_locked(struct napi_struct *n);
595 
596 void napi_enable(struct napi_struct *n);
597 void napi_enable_locked(struct napi_struct *n);
598 
599 /**
600  *	napi_synchronize - wait until NAPI is not running
601  *	@n: NAPI context
602  *
603  * Wait until NAPI is done being scheduled on this context.
604  * Waits till any outstanding processing completes but
605  * does not disable future activations.
606  */
napi_synchronize(const struct napi_struct * n)607 static inline void napi_synchronize(const struct napi_struct *n)
608 {
609 	if (IS_ENABLED(CONFIG_SMP))
610 		while (test_bit(NAPI_STATE_SCHED, &n->state))
611 			msleep(1);
612 	else
613 		barrier();
614 }
615 
616 /**
617  *	napi_if_scheduled_mark_missed - if napi is running, set the
618  *	NAPIF_STATE_MISSED
619  *	@n: NAPI context
620  *
621  * If napi is running, set the NAPIF_STATE_MISSED, and return true if
622  * NAPI is scheduled.
623  **/
napi_if_scheduled_mark_missed(struct napi_struct * n)624 static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n)
625 {
626 	unsigned long val, new;
627 
628 	val = READ_ONCE(n->state);
629 	do {
630 		if (val & NAPIF_STATE_DISABLE)
631 			return true;
632 
633 		if (!(val & NAPIF_STATE_SCHED))
634 			return false;
635 
636 		new = val | NAPIF_STATE_MISSED;
637 	} while (!try_cmpxchg(&n->state, &val, new));
638 
639 	return true;
640 }
641 
642 enum netdev_queue_state_t {
643 	__QUEUE_STATE_DRV_XOFF,
644 	__QUEUE_STATE_STACK_XOFF,
645 	__QUEUE_STATE_FROZEN,
646 };
647 
648 #define QUEUE_STATE_DRV_XOFF	(1 << __QUEUE_STATE_DRV_XOFF)
649 #define QUEUE_STATE_STACK_XOFF	(1 << __QUEUE_STATE_STACK_XOFF)
650 #define QUEUE_STATE_FROZEN	(1 << __QUEUE_STATE_FROZEN)
651 
652 #define QUEUE_STATE_ANY_XOFF	(QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
653 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
654 					QUEUE_STATE_FROZEN)
655 #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
656 					QUEUE_STATE_FROZEN)
657 
658 /*
659  * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue.  The
660  * netif_tx_* functions below are used to manipulate this flag.  The
661  * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
662  * queue independently.  The netif_xmit_*stopped functions below are called
663  * to check if the queue has been stopped by the driver or stack (either
664  * of the XOFF bits are set in the state).  Drivers should not need to call
665  * netif_xmit*stopped functions, they should only be using netif_tx_*.
666  */
667 
668 struct netdev_queue {
669 /*
670  * read-mostly part
671  */
672 	struct net_device	*dev;
673 	netdevice_tracker	dev_tracker;
674 
675 	struct Qdisc __rcu	*qdisc;
676 	struct Qdisc __rcu	*qdisc_sleeping;
677 #ifdef CONFIG_SYSFS
678 	struct kobject		kobj;
679 	const struct attribute_group	**groups;
680 #endif
681 	unsigned long		tx_maxrate;
682 	/*
683 	 * Number of TX timeouts for this queue
684 	 * (/sys/class/net/DEV/Q/trans_timeout)
685 	 */
686 	atomic_long_t		trans_timeout;
687 
688 	/* Subordinate device that the queue has been assigned to */
689 	struct net_device	*sb_dev;
690 #ifdef CONFIG_XDP_SOCKETS
691 	struct xsk_buff_pool    *pool;
692 #endif
693 
694 /*
695  * write-mostly part
696  */
697 #ifdef CONFIG_BQL
698 	struct dql		dql;
699 #endif
700 	spinlock_t		_xmit_lock ____cacheline_aligned_in_smp;
701 	int			xmit_lock_owner;
702 	/*
703 	 * Time (in jiffies) of last Tx
704 	 */
705 	unsigned long		trans_start;
706 
707 	unsigned long		state;
708 
709 /*
710  * slow- / control-path part
711  */
712 	/* NAPI instance for the queue
713 	 * "ops protected", see comment about net_device::lock
714 	 */
715 	struct napi_struct	*napi;
716 
717 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
718 	int			numa_node;
719 #endif
720 } ____cacheline_aligned_in_smp;
721 
722 extern int sysctl_fb_tunnels_only_for_init_net;
723 extern int sysctl_devconf_inherit_init_net;
724 
725 /*
726  * sysctl_fb_tunnels_only_for_init_net == 0 : For all netns
727  *                                     == 1 : For initns only
728  *                                     == 2 : For none.
729  */
net_has_fallback_tunnels(const struct net * net)730 static inline bool net_has_fallback_tunnels(const struct net *net)
731 {
732 #if IS_ENABLED(CONFIG_SYSCTL)
733 	int fb_tunnels_only_for_init_net = READ_ONCE(sysctl_fb_tunnels_only_for_init_net);
734 
735 	return !fb_tunnels_only_for_init_net ||
736 		(net_eq(net, &init_net) && fb_tunnels_only_for_init_net == 1);
737 #else
738 	return true;
739 #endif
740 }
741 
net_inherit_devconf(void)742 static inline int net_inherit_devconf(void)
743 {
744 #if IS_ENABLED(CONFIG_SYSCTL)
745 	return READ_ONCE(sysctl_devconf_inherit_init_net);
746 #else
747 	return 0;
748 #endif
749 }
750 
netdev_queue_numa_node_read(const struct netdev_queue * q)751 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
752 {
753 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
754 	return q->numa_node;
755 #else
756 	return NUMA_NO_NODE;
757 #endif
758 }
759 
netdev_queue_numa_node_write(struct netdev_queue * q,int node)760 static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
761 {
762 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
763 	q->numa_node = node;
764 #endif
765 }
766 
767 #ifdef CONFIG_RFS_ACCEL
768 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
769 			 u16 filter_id);
770 #endif
771 
772 /* XPS map type and offset of the xps map within net_device->xps_maps[]. */
773 enum xps_map_type {
774 	XPS_CPUS = 0,
775 	XPS_RXQS,
776 	XPS_MAPS_MAX,
777 };
778 
779 #ifdef CONFIG_XPS
780 /*
781  * This structure holds an XPS map which can be of variable length.  The
782  * map is an array of queues.
783  */
784 struct xps_map {
785 	unsigned int len;
786 	unsigned int alloc_len;
787 	struct rcu_head rcu;
788 	u16 queues[];
789 };
790 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
791 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
792        - sizeof(struct xps_map)) / sizeof(u16))
793 
794 /*
795  * This structure holds all XPS maps for device.  Maps are indexed by CPU.
796  *
797  * We keep track of the number of cpus/rxqs used when the struct is allocated,
798  * in nr_ids. This will help not accessing out-of-bound memory.
799  *
800  * We keep track of the number of traffic classes used when the struct is
801  * allocated, in num_tc. This will be used to navigate the maps, to ensure we're
802  * not crossing its upper bound, as the original dev->num_tc can be updated in
803  * the meantime.
804  */
805 struct xps_dev_maps {
806 	struct rcu_head rcu;
807 	unsigned int nr_ids;
808 	s16 num_tc;
809 	struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */
810 };
811 
812 #define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) +	\
813 	(nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
814 
815 #define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\
816 	(_rxqs * (_tcs) * sizeof(struct xps_map *)))
817 
818 #endif /* CONFIG_XPS */
819 
820 #define TC_MAX_QUEUE	16
821 #define TC_BITMASK	15
822 /* HW offloaded queuing disciplines txq count and offset maps */
823 struct netdev_tc_txq {
824 	u16 count;
825 	u16 offset;
826 };
827 
828 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
829 /*
830  * This structure is to hold information about the device
831  * configured to run FCoE protocol stack.
832  */
833 struct netdev_fcoe_hbainfo {
834 	char	manufacturer[64];
835 	char	serial_number[64];
836 	char	hardware_version[64];
837 	char	driver_version[64];
838 	char	optionrom_version[64];
839 	char	firmware_version[64];
840 	char	model[256];
841 	char	model_description[256];
842 };
843 #endif
844 
845 #define MAX_PHYS_ITEM_ID_LEN 32
846 
847 /* This structure holds a unique identifier to identify some
848  * physical item (port for example) used by a netdevice.
849  */
850 struct netdev_phys_item_id {
851 	unsigned char id[MAX_PHYS_ITEM_ID_LEN];
852 	unsigned char id_len;
853 };
854 
netdev_phys_item_id_same(struct netdev_phys_item_id * a,struct netdev_phys_item_id * b)855 static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
856 					    struct netdev_phys_item_id *b)
857 {
858 	return a->id_len == b->id_len &&
859 	       memcmp(a->id, b->id, a->id_len) == 0;
860 }
861 
862 typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
863 				       struct sk_buff *skb,
864 				       struct net_device *sb_dev);
865 
866 enum net_device_path_type {
867 	DEV_PATH_ETHERNET = 0,
868 	DEV_PATH_VLAN,
869 	DEV_PATH_BRIDGE,
870 	DEV_PATH_PPPOE,
871 	DEV_PATH_DSA,
872 	DEV_PATH_MTK_WDMA,
873 };
874 
875 struct net_device_path {
876 	enum net_device_path_type	type;
877 	const struct net_device		*dev;
878 	union {
879 		struct {
880 			u16		id;
881 			__be16		proto;
882 			u8		h_dest[ETH_ALEN];
883 		} encap;
884 		struct {
885 			enum {
886 				DEV_PATH_BR_VLAN_KEEP,
887 				DEV_PATH_BR_VLAN_TAG,
888 				DEV_PATH_BR_VLAN_UNTAG,
889 				DEV_PATH_BR_VLAN_UNTAG_HW,
890 			}		vlan_mode;
891 			u16		vlan_id;
892 			__be16		vlan_proto;
893 		} bridge;
894 		struct {
895 			int port;
896 			u16 proto;
897 		} dsa;
898 		struct {
899 			u8 wdma_idx;
900 			u8 queue;
901 			u16 wcid;
902 			u8 bss;
903 			u8 amsdu;
904 		} mtk_wdma;
905 	};
906 };
907 
908 #define NET_DEVICE_PATH_STACK_MAX	5
909 #define NET_DEVICE_PATH_VLAN_MAX	2
910 
911 struct net_device_path_stack {
912 	int			num_paths;
913 	struct net_device_path	path[NET_DEVICE_PATH_STACK_MAX];
914 };
915 
916 struct net_device_path_ctx {
917 	const struct net_device *dev;
918 	u8			daddr[ETH_ALEN];
919 
920 	int			num_vlans;
921 	struct {
922 		u16		id;
923 		__be16		proto;
924 	} vlan[NET_DEVICE_PATH_VLAN_MAX];
925 };
926 
927 enum tc_setup_type {
928 	TC_QUERY_CAPS,
929 	TC_SETUP_QDISC_MQPRIO,
930 	TC_SETUP_CLSU32,
931 	TC_SETUP_CLSFLOWER,
932 	TC_SETUP_CLSMATCHALL,
933 	TC_SETUP_CLSBPF,
934 	TC_SETUP_BLOCK,
935 	TC_SETUP_QDISC_CBS,
936 	TC_SETUP_QDISC_RED,
937 	TC_SETUP_QDISC_PRIO,
938 	TC_SETUP_QDISC_MQ,
939 	TC_SETUP_QDISC_ETF,
940 	TC_SETUP_ROOT_QDISC,
941 	TC_SETUP_QDISC_GRED,
942 	TC_SETUP_QDISC_TAPRIO,
943 	TC_SETUP_FT,
944 	TC_SETUP_QDISC_ETS,
945 	TC_SETUP_QDISC_TBF,
946 	TC_SETUP_QDISC_FIFO,
947 	TC_SETUP_QDISC_HTB,
948 	TC_SETUP_ACT,
949 };
950 
951 /* These structures hold the attributes of bpf state that are being passed
952  * to the netdevice through the bpf op.
953  */
954 enum bpf_netdev_command {
955 	/* Set or clear a bpf program used in the earliest stages of packet
956 	 * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee
957 	 * is responsible for calling bpf_prog_put on any old progs that are
958 	 * stored. In case of error, the callee need not release the new prog
959 	 * reference, but on success it takes ownership and must bpf_prog_put
960 	 * when it is no longer used.
961 	 */
962 	XDP_SETUP_PROG,
963 	XDP_SETUP_PROG_HW,
964 	/* BPF program for offload callbacks, invoked at program load time. */
965 	BPF_OFFLOAD_MAP_ALLOC,
966 	BPF_OFFLOAD_MAP_FREE,
967 	XDP_SETUP_XSK_POOL,
968 };
969 
970 struct bpf_prog_offload_ops;
971 struct netlink_ext_ack;
972 struct xdp_umem;
973 struct xdp_dev_bulk_queue;
974 struct bpf_xdp_link;
975 
976 enum bpf_xdp_mode {
977 	XDP_MODE_SKB = 0,
978 	XDP_MODE_DRV = 1,
979 	XDP_MODE_HW = 2,
980 	__MAX_XDP_MODE
981 };
982 
983 struct bpf_xdp_entity {
984 	struct bpf_prog *prog;
985 	struct bpf_xdp_link *link;
986 };
987 
988 struct netdev_bpf {
989 	enum bpf_netdev_command command;
990 	union {
991 		/* XDP_SETUP_PROG */
992 		struct {
993 			u32 flags;
994 			struct bpf_prog *prog;
995 			struct netlink_ext_ack *extack;
996 		};
997 		/* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */
998 		struct {
999 			struct bpf_offloaded_map *offmap;
1000 		};
1001 		/* XDP_SETUP_XSK_POOL */
1002 		struct {
1003 			struct xsk_buff_pool *pool;
1004 			u16 queue_id;
1005 		} xsk;
1006 	};
1007 };
1008 
1009 /* Flags for ndo_xsk_wakeup. */
1010 #define XDP_WAKEUP_RX (1 << 0)
1011 #define XDP_WAKEUP_TX (1 << 1)
1012 
1013 #ifdef CONFIG_XFRM_OFFLOAD
1014 struct xfrmdev_ops {
1015 	int	(*xdo_dev_state_add) (struct xfrm_state *x, struct netlink_ext_ack *extack);
1016 	void	(*xdo_dev_state_delete) (struct xfrm_state *x);
1017 	void	(*xdo_dev_state_free) (struct xfrm_state *x);
1018 	bool	(*xdo_dev_offload_ok) (struct sk_buff *skb,
1019 				       struct xfrm_state *x);
1020 	void	(*xdo_dev_state_advance_esn) (struct xfrm_state *x);
1021 	void	(*xdo_dev_state_update_stats) (struct xfrm_state *x);
1022 	int	(*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack);
1023 	void	(*xdo_dev_policy_delete) (struct xfrm_policy *x);
1024 	void	(*xdo_dev_policy_free) (struct xfrm_policy *x);
1025 };
1026 #endif
1027 
1028 struct dev_ifalias {
1029 	struct rcu_head rcuhead;
1030 	char ifalias[];
1031 };
1032 
1033 struct devlink;
1034 struct tlsdev_ops;
1035 
1036 struct netdev_net_notifier {
1037 	struct list_head list;
1038 	struct notifier_block *nb;
1039 };
1040 
1041 /*
1042  * This structure defines the management hooks for network devices.
1043  * The following hooks can be defined; unless noted otherwise, they are
1044  * optional and can be filled with a null pointer.
1045  *
1046  * int (*ndo_init)(struct net_device *dev);
1047  *     This function is called once when a network device is registered.
1048  *     The network device can use this for any late stage initialization
1049  *     or semantic validation. It can fail with an error code which will
1050  *     be propagated back to register_netdev.
1051  *
1052  * void (*ndo_uninit)(struct net_device *dev);
1053  *     This function is called when device is unregistered or when registration
1054  *     fails. It is not called if init fails.
1055  *
1056  * int (*ndo_open)(struct net_device *dev);
1057  *     This function is called when a network device transitions to the up
1058  *     state.
1059  *
1060  * int (*ndo_stop)(struct net_device *dev);
1061  *     This function is called when a network device transitions to the down
1062  *     state.
1063  *
1064  * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
1065  *                               struct net_device *dev);
1066  *	Called when a packet needs to be transmitted.
1067  *	Returns NETDEV_TX_OK.  Can return NETDEV_TX_BUSY, but you should stop
1068  *	the queue before that can happen; it's for obsolete devices and weird
1069  *	corner cases, but the stack really does a non-trivial amount
1070  *	of useless work if you return NETDEV_TX_BUSY.
1071  *	Required; cannot be NULL.
1072  *
1073  * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1074  *					   struct net_device *dev
1075  *					   netdev_features_t features);
1076  *	Called by core transmit path to determine if device is capable of
1077  *	performing offload operations on a given packet. This is to give
1078  *	the device an opportunity to implement any restrictions that cannot
1079  *	be otherwise expressed by feature flags. The check is called with
1080  *	the set of features that the stack has calculated and it returns
1081  *	those the driver believes to be appropriate.
1082  *
1083  * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
1084  *                         struct net_device *sb_dev);
1085  *	Called to decide which queue to use when device supports multiple
1086  *	transmit queues.
1087  *
1088  * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
1089  *	This function is called to allow device receiver to make
1090  *	changes to configuration when multicast or promiscuous is enabled.
1091  *
1092  * void (*ndo_set_rx_mode)(struct net_device *dev);
1093  *	This function is called device changes address list filtering.
1094  *	If driver handles unicast address filtering, it should set
1095  *	IFF_UNICAST_FLT in its priv_flags.
1096  *
1097  * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
1098  *	This function  is called when the Media Access Control address
1099  *	needs to be changed. If this interface is not defined, the
1100  *	MAC address can not be changed.
1101  *
1102  * int (*ndo_validate_addr)(struct net_device *dev);
1103  *	Test if Media Access Control address is valid for the device.
1104  *
1105  * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
1106  *	Old-style ioctl entry point. This is used internally by the
1107  *	ieee802154 subsystem but is no longer called by the device
1108  *	ioctl handler.
1109  *
1110  * int (*ndo_siocbond)(struct net_device *dev, struct ifreq *ifr, int cmd);
1111  *	Used by the bonding driver for its device specific ioctls:
1112  *	SIOCBONDENSLAVE, SIOCBONDRELEASE, SIOCBONDSETHWADDR, SIOCBONDCHANGEACTIVE,
1113  *	SIOCBONDSLAVEINFOQUERY, and SIOCBONDINFOQUERY
1114  *
1115  * * int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
1116  *	Called for ethernet specific ioctls: SIOCGMIIPHY, SIOCGMIIREG,
1117  *	SIOCSMIIREG, SIOCSHWTSTAMP and SIOCGHWTSTAMP.
1118  *
1119  * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
1120  *	Used to set network devices bus interface parameters. This interface
1121  *	is retained for legacy reasons; new devices should use the bus
1122  *	interface (PCI) for low level management.
1123  *
1124  * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
1125  *	Called when a user wants to change the Maximum Transfer Unit
1126  *	of a device.
1127  *
1128  * void (*ndo_tx_timeout)(struct net_device *dev, unsigned int txqueue);
1129  *	Callback used when the transmitter has not made any progress
1130  *	for dev->watchdog ticks.
1131  *
1132  * void (*ndo_get_stats64)(struct net_device *dev,
1133  *                         struct rtnl_link_stats64 *storage);
1134  * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1135  *	Called when a user wants to get the network device usage
1136  *	statistics. Drivers must do one of the following:
1137  *	1. Define @ndo_get_stats64 to fill in a zero-initialised
1138  *	   rtnl_link_stats64 structure passed by the caller.
1139  *	2. Define @ndo_get_stats to update a net_device_stats structure
1140  *	   (which should normally be dev->stats) and return a pointer to
1141  *	   it. The structure may be changed asynchronously only if each
1142  *	   field is written atomically.
1143  *	3. Update dev->stats asynchronously and atomically, and define
1144  *	   neither operation.
1145  *
1146  * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id)
1147  *	Return true if this device supports offload stats of this attr_id.
1148  *
1149  * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev,
1150  *	void *attr_data)
1151  *	Get statistics for offload operations by attr_id. Write it into the
1152  *	attr_data pointer.
1153  *
1154  * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
1155  *	If device supports VLAN filtering this function is called when a
1156  *	VLAN id is registered.
1157  *
1158  * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
1159  *	If device supports VLAN filtering this function is called when a
1160  *	VLAN id is unregistered.
1161  *
1162  * void (*ndo_poll_controller)(struct net_device *dev);
1163  *
1164  *	SR-IOV management functions.
1165  * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
1166  * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan,
1167  *			  u8 qos, __be16 proto);
1168  * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
1169  *			  int max_tx_rate);
1170  * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
1171  * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting);
1172  * int (*ndo_get_vf_config)(struct net_device *dev,
1173  *			    int vf, struct ifla_vf_info *ivf);
1174  * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
1175  * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
1176  *			  struct nlattr *port[]);
1177  *
1178  *      Enable or disable the VF ability to query its RSS Redirection Table and
1179  *      Hash Key. This is needed since on some devices VF share this information
1180  *      with PF and querying it may introduce a theoretical security risk.
1181  * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
1182  * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
1183  * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type,
1184  *		       void *type_data);
1185  *	Called to setup any 'tc' scheduler, classifier or action on @dev.
1186  *	This is always called from the stack with the rtnl lock held and netif
1187  *	tx queues stopped. This allows the netdevice to perform queue
1188  *	management safely.
1189  *
1190  *	Fiber Channel over Ethernet (FCoE) offload functions.
1191  * int (*ndo_fcoe_enable)(struct net_device *dev);
1192  *	Called when the FCoE protocol stack wants to start using LLD for FCoE
1193  *	so the underlying device can perform whatever needed configuration or
1194  *	initialization to support acceleration of FCoE traffic.
1195  *
1196  * int (*ndo_fcoe_disable)(struct net_device *dev);
1197  *	Called when the FCoE protocol stack wants to stop using LLD for FCoE
1198  *	so the underlying device can perform whatever needed clean-ups to
1199  *	stop supporting acceleration of FCoE traffic.
1200  *
1201  * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
1202  *			     struct scatterlist *sgl, unsigned int sgc);
1203  *	Called when the FCoE Initiator wants to initialize an I/O that
1204  *	is a possible candidate for Direct Data Placement (DDP). The LLD can
1205  *	perform necessary setup and returns 1 to indicate the device is set up
1206  *	successfully to perform DDP on this I/O, otherwise this returns 0.
1207  *
1208  * int (*ndo_fcoe_ddp_done)(struct net_device *dev,  u16 xid);
1209  *	Called when the FCoE Initiator/Target is done with the DDPed I/O as
1210  *	indicated by the FC exchange id 'xid', so the underlying device can
1211  *	clean up and reuse resources for later DDP requests.
1212  *
1213  * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
1214  *			      struct scatterlist *sgl, unsigned int sgc);
1215  *	Called when the FCoE Target wants to initialize an I/O that
1216  *	is a possible candidate for Direct Data Placement (DDP). The LLD can
1217  *	perform necessary setup and returns 1 to indicate the device is set up
1218  *	successfully to perform DDP on this I/O, otherwise this returns 0.
1219  *
1220  * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1221  *			       struct netdev_fcoe_hbainfo *hbainfo);
1222  *	Called when the FCoE Protocol stack wants information on the underlying
1223  *	device. This information is utilized by the FCoE protocol stack to
1224  *	register attributes with Fiber Channel management service as per the
1225  *	FC-GS Fabric Device Management Information(FDMI) specification.
1226  *
1227  * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
1228  *	Called when the underlying device wants to override default World Wide
1229  *	Name (WWN) generation mechanism in FCoE protocol stack to pass its own
1230  *	World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
1231  *	protocol stack to use.
1232  *
1233  *	RFS acceleration.
1234  * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
1235  *			    u16 rxq_index, u32 flow_id);
1236  *	Set hardware filter for RFS.  rxq_index is the target queue index;
1237  *	flow_id is a flow ID to be passed to rps_may_expire_flow() later.
1238  *	Return the filter ID on success, or a negative error code.
1239  *
1240  *	Slave management functions (for bridge, bonding, etc).
1241  * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
1242  *	Called to make another netdev an underling.
1243  *
1244  * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
1245  *	Called to release previously enslaved netdev.
1246  *
1247  * struct net_device *(*ndo_get_xmit_slave)(struct net_device *dev,
1248  *					    struct sk_buff *skb,
1249  *					    bool all_slaves);
1250  *	Get the xmit slave of master device. If all_slaves is true, function
1251  *	assume all the slaves can transmit.
1252  *
1253  *      Feature/offload setting functions.
1254  * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1255  *		netdev_features_t features);
1256  *	Adjusts the requested feature flags according to device-specific
1257  *	constraints, and returns the resulting flags. Must not modify
1258  *	the device state.
1259  *
1260  * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
1261  *	Called to update device configuration to new features. Passed
1262  *	feature set might be less than what was returned by ndo_fix_features()).
1263  *	Must return >0 or -errno if it changed dev->features itself.
1264  *
1265  * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
1266  *		      struct net_device *dev,
1267  *		      const unsigned char *addr, u16 vid, u16 flags,
1268  *		      bool *notified, struct netlink_ext_ack *extack);
1269  *	Adds an FDB entry to dev for addr.
1270  *	Callee shall set *notified to true if it sent any appropriate
1271  *	notification(s). Otherwise core will send a generic one.
1272  * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
1273  *		      struct net_device *dev,
1274  *		      const unsigned char *addr, u16 vid
1275  *		      bool *notified, struct netlink_ext_ack *extack);
1276  *	Deletes the FDB entry from dev corresponding to addr.
1277  *	Callee shall set *notified to true if it sent any appropriate
1278  *	notification(s). Otherwise core will send a generic one.
1279  * int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, struct net_device *dev,
1280  *			   struct netlink_ext_ack *extack);
1281  * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
1282  *		       struct net_device *dev, struct net_device *filter_dev,
1283  *		       int *idx)
1284  *	Used to add FDB entries to dump requests. Implementers should add
1285  *	entries to skb and update idx with the number of entries.
1286  *
1287  * int (*ndo_mdb_add)(struct net_device *dev, struct nlattr *tb[],
1288  *		      u16 nlmsg_flags, struct netlink_ext_ack *extack);
1289  *	Adds an MDB entry to dev.
1290  * int (*ndo_mdb_del)(struct net_device *dev, struct nlattr *tb[],
1291  *		      struct netlink_ext_ack *extack);
1292  *	Deletes the MDB entry from dev.
1293  * int (*ndo_mdb_del_bulk)(struct net_device *dev, struct nlattr *tb[],
1294  *			   struct netlink_ext_ack *extack);
1295  *	Bulk deletes MDB entries from dev.
1296  * int (*ndo_mdb_dump)(struct net_device *dev, struct sk_buff *skb,
1297  *		       struct netlink_callback *cb);
1298  *	Dumps MDB entries from dev. The first argument (marker) in the netlink
1299  *	callback is used by core rtnetlink code.
1300  *
1301  * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
1302  *			     u16 flags, struct netlink_ext_ack *extack)
1303  * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
1304  *			     struct net_device *dev, u32 filter_mask,
1305  *			     int nlflags)
1306  * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
1307  *			     u16 flags);
1308  *
1309  * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
1310  *	Called to change device carrier. Soft-devices (like dummy, team, etc)
1311  *	which do not represent real hardware may define this to allow their
1312  *	userspace components to manage their virtual carrier state. Devices
1313  *	that determine carrier state from physical hardware properties (eg
1314  *	network cables) or protocol-dependent mechanisms (eg
1315  *	USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
1316  *
1317  * int (*ndo_get_phys_port_id)(struct net_device *dev,
1318  *			       struct netdev_phys_item_id *ppid);
1319  *	Called to get ID of physical port of this device. If driver does
1320  *	not implement this, it is assumed that the hw is not able to have
1321  *	multiple net devices on single physical port.
1322  *
1323  * int (*ndo_get_port_parent_id)(struct net_device *dev,
1324  *				 struct netdev_phys_item_id *ppid)
1325  *	Called to get the parent ID of the physical port of this device.
1326  *
1327  * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1328  *				 struct net_device *dev)
1329  *	Called by upper layer devices to accelerate switching or other
1330  *	station functionality into hardware. 'pdev is the lowerdev
1331  *	to use for the offload and 'dev' is the net device that will
1332  *	back the offload. Returns a pointer to the private structure
1333  *	the upper layer will maintain.
1334  * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
1335  *	Called by upper layer device to delete the station created
1336  *	by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
1337  *	the station and priv is the structure returned by the add
1338  *	operation.
1339  * int (*ndo_set_tx_maxrate)(struct net_device *dev,
1340  *			     int queue_index, u32 maxrate);
1341  *	Called when a user wants to set a max-rate limitation of specific
1342  *	TX queue.
1343  * int (*ndo_get_iflink)(const struct net_device *dev);
1344  *	Called to get the iflink value of this device.
1345  * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
1346  *	This function is used to get egress tunnel information for given skb.
1347  *	This is useful for retrieving outer tunnel header parameters while
1348  *	sampling packet.
1349  * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom);
1350  *	This function is used to specify the headroom that the skb must
1351  *	consider when allocation skb during packet reception. Setting
1352  *	appropriate rx headroom value allows avoiding skb head copy on
1353  *	forward. Setting a negative value resets the rx headroom to the
1354  *	default value.
1355  * int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf);
1356  *	This function is used to set or query state related to XDP on the
1357  *	netdevice and manage BPF offload. See definition of
1358  *	enum bpf_netdev_command for details.
1359  * int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp,
1360  *			u32 flags);
1361  *	This function is used to submit @n XDP packets for transmit on a
1362  *	netdevice. Returns number of frames successfully transmitted, frames
1363  *	that got dropped are freed/returned via xdp_return_frame().
1364  *	Returns negative number, means general error invoking ndo, meaning
1365  *	no frames were xmit'ed and core-caller will free all frames.
1366  * struct net_device *(*ndo_xdp_get_xmit_slave)(struct net_device *dev,
1367  *					        struct xdp_buff *xdp);
1368  *      Get the xmit slave of master device based on the xdp_buff.
1369  * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags);
1370  *      This function is used to wake up the softirq, ksoftirqd or kthread
1371  *	responsible for sending and/or receiving packets on a specific
1372  *	queue id bound to an AF_XDP socket. The flags field specifies if
1373  *	only RX, only Tx, or both should be woken up using the flags
1374  *	XDP_WAKEUP_RX and XDP_WAKEUP_TX.
1375  * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm_kern *p,
1376  *			 int cmd);
1377  *	Add, change, delete or get information on an IPv4 tunnel.
1378  * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev);
1379  *	If a device is paired with a peer device, return the peer instance.
1380  *	The caller must be under RCU read context.
1381  * int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, struct net_device_path *path);
1382  *     Get the forwarding path to reach the real device from the HW destination address
1383  * ktime_t (*ndo_get_tstamp)(struct net_device *dev,
1384  *			     const struct skb_shared_hwtstamps *hwtstamps,
1385  *			     bool cycles);
1386  *	Get hardware timestamp based on normal/adjustable time or free running
1387  *	cycle counter. This function is required if physical clock supports a
1388  *	free running cycle counter.
1389  *
1390  * int (*ndo_hwtstamp_get)(struct net_device *dev,
1391  *			   struct kernel_hwtstamp_config *kernel_config);
1392  *	Get the currently configured hardware timestamping parameters for the
1393  *	NIC device.
1394  *
1395  * int (*ndo_hwtstamp_set)(struct net_device *dev,
1396  *			   struct kernel_hwtstamp_config *kernel_config,
1397  *			   struct netlink_ext_ack *extack);
1398  *	Change the hardware timestamping parameters for NIC device.
1399  */
1400 struct net_device_ops {
1401 	int			(*ndo_init)(struct net_device *dev);
1402 	void			(*ndo_uninit)(struct net_device *dev);
1403 	int			(*ndo_open)(struct net_device *dev);
1404 	int			(*ndo_stop)(struct net_device *dev);
1405 	netdev_tx_t		(*ndo_start_xmit)(struct sk_buff *skb,
1406 						  struct net_device *dev);
1407 	netdev_features_t	(*ndo_features_check)(struct sk_buff *skb,
1408 						      struct net_device *dev,
1409 						      netdev_features_t features);
1410 	u16			(*ndo_select_queue)(struct net_device *dev,
1411 						    struct sk_buff *skb,
1412 						    struct net_device *sb_dev);
1413 	void			(*ndo_change_rx_flags)(struct net_device *dev,
1414 						       int flags);
1415 	void			(*ndo_set_rx_mode)(struct net_device *dev);
1416 	int			(*ndo_set_mac_address)(struct net_device *dev,
1417 						       void *addr);
1418 	int			(*ndo_validate_addr)(struct net_device *dev);
1419 	int			(*ndo_do_ioctl)(struct net_device *dev,
1420 					        struct ifreq *ifr, int cmd);
1421 	int			(*ndo_eth_ioctl)(struct net_device *dev,
1422 						 struct ifreq *ifr, int cmd);
1423 	int			(*ndo_siocbond)(struct net_device *dev,
1424 						struct ifreq *ifr, int cmd);
1425 	int			(*ndo_siocwandev)(struct net_device *dev,
1426 						  struct if_settings *ifs);
1427 	int			(*ndo_siocdevprivate)(struct net_device *dev,
1428 						      struct ifreq *ifr,
1429 						      void __user *data, int cmd);
1430 	int			(*ndo_set_config)(struct net_device *dev,
1431 					          struct ifmap *map);
1432 	int			(*ndo_change_mtu)(struct net_device *dev,
1433 						  int new_mtu);
1434 	int			(*ndo_neigh_setup)(struct net_device *dev,
1435 						   struct neigh_parms *);
1436 	void			(*ndo_tx_timeout) (struct net_device *dev,
1437 						   unsigned int txqueue);
1438 
1439 	void			(*ndo_get_stats64)(struct net_device *dev,
1440 						   struct rtnl_link_stats64 *storage);
1441 	bool			(*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
1442 	int			(*ndo_get_offload_stats)(int attr_id,
1443 							 const struct net_device *dev,
1444 							 void *attr_data);
1445 	struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1446 
1447 	int			(*ndo_vlan_rx_add_vid)(struct net_device *dev,
1448 						       __be16 proto, u16 vid);
1449 	int			(*ndo_vlan_rx_kill_vid)(struct net_device *dev,
1450 						        __be16 proto, u16 vid);
1451 #ifdef CONFIG_NET_POLL_CONTROLLER
1452 	void                    (*ndo_poll_controller)(struct net_device *dev);
1453 	int			(*ndo_netpoll_setup)(struct net_device *dev);
1454 	void			(*ndo_netpoll_cleanup)(struct net_device *dev);
1455 #endif
1456 	int			(*ndo_set_vf_mac)(struct net_device *dev,
1457 						  int queue, u8 *mac);
1458 	int			(*ndo_set_vf_vlan)(struct net_device *dev,
1459 						   int queue, u16 vlan,
1460 						   u8 qos, __be16 proto);
1461 	int			(*ndo_set_vf_rate)(struct net_device *dev,
1462 						   int vf, int min_tx_rate,
1463 						   int max_tx_rate);
1464 	int			(*ndo_set_vf_spoofchk)(struct net_device *dev,
1465 						       int vf, bool setting);
1466 	int			(*ndo_set_vf_trust)(struct net_device *dev,
1467 						    int vf, bool setting);
1468 	int			(*ndo_get_vf_config)(struct net_device *dev,
1469 						     int vf,
1470 						     struct ifla_vf_info *ivf);
1471 	int			(*ndo_set_vf_link_state)(struct net_device *dev,
1472 							 int vf, int link_state);
1473 	int			(*ndo_get_vf_stats)(struct net_device *dev,
1474 						    int vf,
1475 						    struct ifla_vf_stats
1476 						    *vf_stats);
1477 	int			(*ndo_set_vf_port)(struct net_device *dev,
1478 						   int vf,
1479 						   struct nlattr *port[]);
1480 	int			(*ndo_get_vf_port)(struct net_device *dev,
1481 						   int vf, struct sk_buff *skb);
1482 	int			(*ndo_get_vf_guid)(struct net_device *dev,
1483 						   int vf,
1484 						   struct ifla_vf_guid *node_guid,
1485 						   struct ifla_vf_guid *port_guid);
1486 	int			(*ndo_set_vf_guid)(struct net_device *dev,
1487 						   int vf, u64 guid,
1488 						   int guid_type);
1489 	int			(*ndo_set_vf_rss_query_en)(
1490 						   struct net_device *dev,
1491 						   int vf, bool setting);
1492 	int			(*ndo_setup_tc)(struct net_device *dev,
1493 						enum tc_setup_type type,
1494 						void *type_data);
1495 #if IS_ENABLED(CONFIG_FCOE)
1496 	int			(*ndo_fcoe_enable)(struct net_device *dev);
1497 	int			(*ndo_fcoe_disable)(struct net_device *dev);
1498 	int			(*ndo_fcoe_ddp_setup)(struct net_device *dev,
1499 						      u16 xid,
1500 						      struct scatterlist *sgl,
1501 						      unsigned int sgc);
1502 	int			(*ndo_fcoe_ddp_done)(struct net_device *dev,
1503 						     u16 xid);
1504 	int			(*ndo_fcoe_ddp_target)(struct net_device *dev,
1505 						       u16 xid,
1506 						       struct scatterlist *sgl,
1507 						       unsigned int sgc);
1508 	int			(*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1509 							struct netdev_fcoe_hbainfo *hbainfo);
1510 #endif
1511 
1512 #if IS_ENABLED(CONFIG_LIBFCOE)
1513 #define NETDEV_FCOE_WWNN 0
1514 #define NETDEV_FCOE_WWPN 1
1515 	int			(*ndo_fcoe_get_wwn)(struct net_device *dev,
1516 						    u64 *wwn, int type);
1517 #endif
1518 
1519 #ifdef CONFIG_RFS_ACCEL
1520 	int			(*ndo_rx_flow_steer)(struct net_device *dev,
1521 						     const struct sk_buff *skb,
1522 						     u16 rxq_index,
1523 						     u32 flow_id);
1524 #endif
1525 	int			(*ndo_add_slave)(struct net_device *dev,
1526 						 struct net_device *slave_dev,
1527 						 struct netlink_ext_ack *extack);
1528 	int			(*ndo_del_slave)(struct net_device *dev,
1529 						 struct net_device *slave_dev);
1530 	struct net_device*	(*ndo_get_xmit_slave)(struct net_device *dev,
1531 						      struct sk_buff *skb,
1532 						      bool all_slaves);
1533 	struct net_device*	(*ndo_sk_get_lower_dev)(struct net_device *dev,
1534 							struct sock *sk);
1535 	netdev_features_t	(*ndo_fix_features)(struct net_device *dev,
1536 						    netdev_features_t features);
1537 	int			(*ndo_set_features)(struct net_device *dev,
1538 						    netdev_features_t features);
1539 	int			(*ndo_neigh_construct)(struct net_device *dev,
1540 						       struct neighbour *n);
1541 	void			(*ndo_neigh_destroy)(struct net_device *dev,
1542 						     struct neighbour *n);
1543 
1544 	int			(*ndo_fdb_add)(struct ndmsg *ndm,
1545 					       struct nlattr *tb[],
1546 					       struct net_device *dev,
1547 					       const unsigned char *addr,
1548 					       u16 vid,
1549 					       u16 flags,
1550 					       bool *notified,
1551 					       struct netlink_ext_ack *extack);
1552 	int			(*ndo_fdb_del)(struct ndmsg *ndm,
1553 					       struct nlattr *tb[],
1554 					       struct net_device *dev,
1555 					       const unsigned char *addr,
1556 					       u16 vid,
1557 					       bool *notified,
1558 					       struct netlink_ext_ack *extack);
1559 	int			(*ndo_fdb_del_bulk)(struct nlmsghdr *nlh,
1560 						    struct net_device *dev,
1561 						    struct netlink_ext_ack *extack);
1562 	int			(*ndo_fdb_dump)(struct sk_buff *skb,
1563 						struct netlink_callback *cb,
1564 						struct net_device *dev,
1565 						struct net_device *filter_dev,
1566 						int *idx);
1567 	int			(*ndo_fdb_get)(struct sk_buff *skb,
1568 					       struct nlattr *tb[],
1569 					       struct net_device *dev,
1570 					       const unsigned char *addr,
1571 					       u16 vid, u32 portid, u32 seq,
1572 					       struct netlink_ext_ack *extack);
1573 	int			(*ndo_mdb_add)(struct net_device *dev,
1574 					       struct nlattr *tb[],
1575 					       u16 nlmsg_flags,
1576 					       struct netlink_ext_ack *extack);
1577 	int			(*ndo_mdb_del)(struct net_device *dev,
1578 					       struct nlattr *tb[],
1579 					       struct netlink_ext_ack *extack);
1580 	int			(*ndo_mdb_del_bulk)(struct net_device *dev,
1581 						    struct nlattr *tb[],
1582 						    struct netlink_ext_ack *extack);
1583 	int			(*ndo_mdb_dump)(struct net_device *dev,
1584 						struct sk_buff *skb,
1585 						struct netlink_callback *cb);
1586 	int			(*ndo_mdb_get)(struct net_device *dev,
1587 					       struct nlattr *tb[], u32 portid,
1588 					       u32 seq,
1589 					       struct netlink_ext_ack *extack);
1590 	int			(*ndo_bridge_setlink)(struct net_device *dev,
1591 						      struct nlmsghdr *nlh,
1592 						      u16 flags,
1593 						      struct netlink_ext_ack *extack);
1594 	int			(*ndo_bridge_getlink)(struct sk_buff *skb,
1595 						      u32 pid, u32 seq,
1596 						      struct net_device *dev,
1597 						      u32 filter_mask,
1598 						      int nlflags);
1599 	int			(*ndo_bridge_dellink)(struct net_device *dev,
1600 						      struct nlmsghdr *nlh,
1601 						      u16 flags);
1602 	int			(*ndo_change_carrier)(struct net_device *dev,
1603 						      bool new_carrier);
1604 	int			(*ndo_get_phys_port_id)(struct net_device *dev,
1605 							struct netdev_phys_item_id *ppid);
1606 	int			(*ndo_get_port_parent_id)(struct net_device *dev,
1607 							  struct netdev_phys_item_id *ppid);
1608 	int			(*ndo_get_phys_port_name)(struct net_device *dev,
1609 							  char *name, size_t len);
1610 	void*			(*ndo_dfwd_add_station)(struct net_device *pdev,
1611 							struct net_device *dev);
1612 	void			(*ndo_dfwd_del_station)(struct net_device *pdev,
1613 							void *priv);
1614 
1615 	int			(*ndo_set_tx_maxrate)(struct net_device *dev,
1616 						      int queue_index,
1617 						      u32 maxrate);
1618 	int			(*ndo_get_iflink)(const struct net_device *dev);
1619 	int			(*ndo_fill_metadata_dst)(struct net_device *dev,
1620 						       struct sk_buff *skb);
1621 	void			(*ndo_set_rx_headroom)(struct net_device *dev,
1622 						       int needed_headroom);
1623 	int			(*ndo_bpf)(struct net_device *dev,
1624 					   struct netdev_bpf *bpf);
1625 	int			(*ndo_xdp_xmit)(struct net_device *dev, int n,
1626 						struct xdp_frame **xdp,
1627 						u32 flags);
1628 	struct net_device *	(*ndo_xdp_get_xmit_slave)(struct net_device *dev,
1629 							  struct xdp_buff *xdp);
1630 	int			(*ndo_xsk_wakeup)(struct net_device *dev,
1631 						  u32 queue_id, u32 flags);
1632 	int			(*ndo_tunnel_ctl)(struct net_device *dev,
1633 						  struct ip_tunnel_parm_kern *p,
1634 						  int cmd);
1635 	struct net_device *	(*ndo_get_peer_dev)(struct net_device *dev);
1636 	int                     (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx,
1637                                                          struct net_device_path *path);
1638 	ktime_t			(*ndo_get_tstamp)(struct net_device *dev,
1639 						  const struct skb_shared_hwtstamps *hwtstamps,
1640 						  bool cycles);
1641 	int			(*ndo_hwtstamp_get)(struct net_device *dev,
1642 						    struct kernel_hwtstamp_config *kernel_config);
1643 	int			(*ndo_hwtstamp_set)(struct net_device *dev,
1644 						    struct kernel_hwtstamp_config *kernel_config,
1645 						    struct netlink_ext_ack *extack);
1646 
1647 #if IS_ENABLED(CONFIG_NET_SHAPER)
1648 	/**
1649 	 * @net_shaper_ops: Device shaping offload operations
1650 	 * see include/net/net_shapers.h
1651 	 */
1652 	const struct net_shaper_ops *net_shaper_ops;
1653 #endif
1654 };
1655 
1656 /**
1657  * enum netdev_priv_flags - &struct net_device priv_flags
1658  *
1659  * These are the &struct net_device, they are only set internally
1660  * by drivers and used in the kernel. These flags are invisible to
1661  * userspace; this means that the order of these flags can change
1662  * during any kernel release.
1663  *
1664  * You should add bitfield booleans after either net_device::priv_flags
1665  * (hotpath) or ::threaded (slowpath) instead of extending these flags.
1666  *
1667  * @IFF_802_1Q_VLAN: 802.1Q VLAN device
1668  * @IFF_EBRIDGE: Ethernet bridging device
1669  * @IFF_BONDING: bonding master or slave
1670  * @IFF_ISATAP: ISATAP interface (RFC4214)
1671  * @IFF_WAN_HDLC: WAN HDLC device
1672  * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
1673  *	release skb->dst
1674  * @IFF_DONT_BRIDGE: disallow bridging this ether dev
1675  * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
1676  * @IFF_MACVLAN_PORT: device used as macvlan port
1677  * @IFF_BRIDGE_PORT: device used as bridge port
1678  * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port
1679  * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit
1680  * @IFF_UNICAST_FLT: Supports unicast filtering
1681  * @IFF_TEAM_PORT: device used as team port
1682  * @IFF_SUPP_NOFCS: device supports sending custom FCS
1683  * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
1684  *	change when it's running
1685  * @IFF_MACVLAN: Macvlan device
1686  * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account
1687  *	underlying stacked devices
1688  * @IFF_L3MDEV_MASTER: device is an L3 master device
1689  * @IFF_NO_QUEUE: device can run without qdisc attached
1690  * @IFF_OPENVSWITCH: device is a Open vSwitch master
1691  * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
1692  * @IFF_TEAM: device is a team device
1693  * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured
1694  * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
1695  *	entity (i.e. the master device for bridged veth)
1696  * @IFF_MACSEC: device is a MACsec device
1697  * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook
1698  * @IFF_FAILOVER: device is a failover master device
1699  * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
1700  * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
1701  * @IFF_NO_ADDRCONF: prevent ipv6 addrconf
1702  * @IFF_TX_SKB_NO_LINEAR: device/driver is capable of xmitting frames with
1703  *	skb_headlen(skb) == 0 (data starts from frag0)
1704  */
1705 enum netdev_priv_flags {
1706 	IFF_802_1Q_VLAN			= 1<<0,
1707 	IFF_EBRIDGE			= 1<<1,
1708 	IFF_BONDING			= 1<<2,
1709 	IFF_ISATAP			= 1<<3,
1710 	IFF_WAN_HDLC			= 1<<4,
1711 	IFF_XMIT_DST_RELEASE		= 1<<5,
1712 	IFF_DONT_BRIDGE			= 1<<6,
1713 	IFF_DISABLE_NETPOLL		= 1<<7,
1714 	IFF_MACVLAN_PORT		= 1<<8,
1715 	IFF_BRIDGE_PORT			= 1<<9,
1716 	IFF_OVS_DATAPATH		= 1<<10,
1717 	IFF_TX_SKB_SHARING		= 1<<11,
1718 	IFF_UNICAST_FLT			= 1<<12,
1719 	IFF_TEAM_PORT			= 1<<13,
1720 	IFF_SUPP_NOFCS			= 1<<14,
1721 	IFF_LIVE_ADDR_CHANGE		= 1<<15,
1722 	IFF_MACVLAN			= 1<<16,
1723 	IFF_XMIT_DST_RELEASE_PERM	= 1<<17,
1724 	IFF_L3MDEV_MASTER		= 1<<18,
1725 	IFF_NO_QUEUE			= 1<<19,
1726 	IFF_OPENVSWITCH			= 1<<20,
1727 	IFF_L3MDEV_SLAVE		= 1<<21,
1728 	IFF_TEAM			= 1<<22,
1729 	IFF_RXFH_CONFIGURED		= 1<<23,
1730 	IFF_PHONY_HEADROOM		= 1<<24,
1731 	IFF_MACSEC			= 1<<25,
1732 	IFF_NO_RX_HANDLER		= 1<<26,
1733 	IFF_FAILOVER			= 1<<27,
1734 	IFF_FAILOVER_SLAVE		= 1<<28,
1735 	IFF_L3MDEV_RX_HANDLER		= 1<<29,
1736 	IFF_NO_ADDRCONF			= BIT_ULL(30),
1737 	IFF_TX_SKB_NO_LINEAR		= BIT_ULL(31),
1738 };
1739 
1740 /* Specifies the type of the struct net_device::ml_priv pointer */
1741 enum netdev_ml_priv_type {
1742 	ML_PRIV_NONE,
1743 	ML_PRIV_CAN,
1744 };
1745 
1746 enum netdev_stat_type {
1747 	NETDEV_PCPU_STAT_NONE,
1748 	NETDEV_PCPU_STAT_LSTATS, /* struct pcpu_lstats */
1749 	NETDEV_PCPU_STAT_TSTATS, /* struct pcpu_sw_netstats */
1750 	NETDEV_PCPU_STAT_DSTATS, /* struct pcpu_dstats */
1751 };
1752 
1753 enum netdev_reg_state {
1754 	NETREG_UNINITIALIZED = 0,
1755 	NETREG_REGISTERED,	/* completed register_netdevice */
1756 	NETREG_UNREGISTERING,	/* called unregister_netdevice */
1757 	NETREG_UNREGISTERED,	/* completed unregister todo */
1758 	NETREG_RELEASED,	/* called free_netdev */
1759 	NETREG_DUMMY,		/* dummy device for NAPI poll */
1760 };
1761 
1762 /**
1763  *	struct net_device - The DEVICE structure.
1764  *
1765  *	Actually, this whole structure is a big mistake.  It mixes I/O
1766  *	data with strictly "high-level" data, and it has to know about
1767  *	almost every data structure used in the INET module.
1768  *
1769  *	@priv_flags:	flags invisible to userspace defined as bits, see
1770  *			enum netdev_priv_flags for the definitions
1771  *	@lltx:		device supports lockless Tx. Deprecated for real HW
1772  *			drivers. Mainly used by logical interfaces, such as
1773  *			bonding and tunnels
1774  *
1775  *	@name:	This is the first field of the "visible" part of this structure
1776  *		(i.e. as seen by users in the "Space.c" file).  It is the name
1777  *		of the interface.
1778  *
1779  *	@name_node:	Name hashlist node
1780  *	@ifalias:	SNMP alias
1781  *	@mem_end:	Shared memory end
1782  *	@mem_start:	Shared memory start
1783  *	@base_addr:	Device I/O address
1784  *	@irq:		Device IRQ number
1785  *
1786  *	@state:		Generic network queuing layer state, see netdev_state_t
1787  *	@dev_list:	The global list of network devices
1788  *	@napi_list:	List entry used for polling NAPI devices
1789  *	@unreg_list:	List entry  when we are unregistering the
1790  *			device; see the function unregister_netdev
1791  *	@close_list:	List entry used when we are closing the device
1792  *	@ptype_all:     Device-specific packet handlers for all protocols
1793  *	@ptype_specific: Device-specific, protocol-specific packet handlers
1794  *
1795  *	@adj_list:	Directly linked devices, like slaves for bonding
1796  *	@features:	Currently active device features
1797  *	@hw_features:	User-changeable features
1798  *
1799  *	@wanted_features:	User-requested features
1800  *	@vlan_features:		Mask of features inheritable by VLAN devices
1801  *
1802  *	@hw_enc_features:	Mask of features inherited by encapsulating devices
1803  *				This field indicates what encapsulation
1804  *				offloads the hardware is capable of doing,
1805  *				and drivers will need to set them appropriately.
1806  *
1807  *	@mpls_features:	Mask of features inheritable by MPLS
1808  *	@gso_partial_features: value(s) from NETIF_F_GSO\*
1809  *
1810  *	@ifindex:	interface index
1811  *	@group:		The group the device belongs to
1812  *
1813  *	@stats:		Statistics struct, which was left as a legacy, use
1814  *			rtnl_link_stats64 instead
1815  *
1816  *	@core_stats:	core networking counters,
1817  *			do not use this in drivers
1818  *	@carrier_up_count:	Number of times the carrier has been up
1819  *	@carrier_down_count:	Number of times the carrier has been down
1820  *
1821  *	@wireless_handlers:	List of functions to handle Wireless Extensions,
1822  *				instead of ioctl,
1823  *				see <net/iw_handler.h> for details.
1824  *
1825  *	@netdev_ops:	Includes several pointers to callbacks,
1826  *			if one wants to override the ndo_*() functions
1827  *	@xdp_metadata_ops:	Includes pointers to XDP metadata callbacks.
1828  *	@xsk_tx_metadata_ops:	Includes pointers to AF_XDP TX metadata callbacks.
1829  *	@ethtool_ops:	Management operations
1830  *	@l3mdev_ops:	Layer 3 master device operations
1831  *	@ndisc_ops:	Includes callbacks for different IPv6 neighbour
1832  *			discovery handling. Necessary for e.g. 6LoWPAN.
1833  *	@xfrmdev_ops:	Transformation offload operations
1834  *	@tlsdev_ops:	Transport Layer Security offload operations
1835  *	@header_ops:	Includes callbacks for creating,parsing,caching,etc
1836  *			of Layer 2 headers.
1837  *
1838  *	@flags:		Interface flags (a la BSD)
1839  *	@xdp_features:	XDP capability supported by the device
1840  *	@gflags:	Global flags ( kept as legacy )
1841  *	@priv_len:	Size of the ->priv flexible array
1842  *	@priv:		Flexible array containing private data
1843  *	@operstate:	RFC2863 operstate
1844  *	@link_mode:	Mapping policy to operstate
1845  *	@if_port:	Selectable AUI, TP, ...
1846  *	@dma:		DMA channel
1847  *	@mtu:		Interface MTU value
1848  *	@min_mtu:	Interface Minimum MTU value
1849  *	@max_mtu:	Interface Maximum MTU value
1850  *	@type:		Interface hardware type
1851  *	@hard_header_len: Maximum hardware header length.
1852  *	@min_header_len:  Minimum hardware header length
1853  *
1854  *	@needed_headroom: Extra headroom the hardware may need, but not in all
1855  *			  cases can this be guaranteed
1856  *	@needed_tailroom: Extra tailroom the hardware may need, but not in all
1857  *			  cases can this be guaranteed. Some cases also use
1858  *			  LL_MAX_HEADER instead to allocate the skb
1859  *
1860  *	interface address info:
1861  *
1862  * 	@perm_addr:		Permanent hw address
1863  * 	@addr_assign_type:	Hw address assignment type
1864  * 	@addr_len:		Hardware address length
1865  *	@upper_level:		Maximum depth level of upper devices.
1866  *	@lower_level:		Maximum depth level of lower devices.
1867  *	@neigh_priv_len:	Used in neigh_alloc()
1868  * 	@dev_id:		Used to differentiate devices that share
1869  * 				the same link layer address
1870  * 	@dev_port:		Used to differentiate devices that share
1871  * 				the same function
1872  *	@addr_list_lock:	XXX: need comments on this one
1873  *	@name_assign_type:	network interface name assignment type
1874  *	@uc_promisc:		Counter that indicates promiscuous mode
1875  *				has been enabled due to the need to listen to
1876  *				additional unicast addresses in a device that
1877  *				does not implement ndo_set_rx_mode()
1878  *	@uc:			unicast mac addresses
1879  *	@mc:			multicast mac addresses
1880  *	@dev_addrs:		list of device hw addresses
1881  *	@queues_kset:		Group of all Kobjects in the Tx and RX queues
1882  *	@promiscuity:		Number of times the NIC is told to work in
1883  *				promiscuous mode; if it becomes 0 the NIC will
1884  *				exit promiscuous mode
1885  *	@allmulti:		Counter, enables or disables allmulticast mode
1886  *
1887  *	@vlan_info:	VLAN info
1888  *	@dsa_ptr:	dsa specific data
1889  *	@tipc_ptr:	TIPC specific data
1890  *	@atalk_ptr:	AppleTalk link
1891  *	@ip_ptr:	IPv4 specific data
1892  *	@ip6_ptr:	IPv6 specific data
1893  *	@ax25_ptr:	AX.25 specific data
1894  *	@ieee80211_ptr:	IEEE 802.11 specific data, assign before registering
1895  *	@ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network
1896  *			 device struct
1897  *	@mpls_ptr:	mpls_dev struct pointer
1898  *	@mctp_ptr:	MCTP specific data
1899  *
1900  *	@dev_addr:	Hw address (before bcast,
1901  *			because most packets are unicast)
1902  *
1903  *	@_rx:			Array of RX queues
1904  *	@num_rx_queues:		Number of RX queues
1905  *				allocated at register_netdev() time
1906  *	@real_num_rx_queues: 	Number of RX queues currently active in device
1907  *	@xdp_prog:		XDP sockets filter program pointer
1908  *
1909  *	@rx_handler:		handler for received packets
1910  *	@rx_handler_data: 	XXX: need comments on this one
1911  *	@tcx_ingress:		BPF & clsact qdisc specific data for ingress processing
1912  *	@ingress_queue:		XXX: need comments on this one
1913  *	@nf_hooks_ingress:	netfilter hooks executed for ingress packets
1914  *	@broadcast:		hw bcast address
1915  *
1916  *	@rx_cpu_rmap:	CPU reverse-mapping for RX completion interrupts,
1917  *			indexed by RX queue number. Assigned by driver.
1918  *			This must only be set if the ndo_rx_flow_steer
1919  *			operation is defined
1920  *	@index_hlist:		Device index hash chain
1921  *
1922  *	@_tx:			Array of TX queues
1923  *	@num_tx_queues:		Number of TX queues allocated at alloc_netdev_mq() time
1924  *	@real_num_tx_queues: 	Number of TX queues currently active in device
1925  *	@qdisc:			Root qdisc from userspace point of view
1926  *	@tx_queue_len:		Max frames per queue allowed
1927  *	@tx_global_lock: 	XXX: need comments on this one
1928  *	@xdp_bulkq:		XDP device bulk queue
1929  *	@xps_maps:		all CPUs/RXQs maps for XPS device
1930  *
1931  *	@xps_maps:	XXX: need comments on this one
1932  *	@tcx_egress:		BPF & clsact qdisc specific data for egress processing
1933  *	@nf_hooks_egress:	netfilter hooks executed for egress packets
1934  *	@qdisc_hash:		qdisc hash table
1935  *	@watchdog_timeo:	Represents the timeout that is used by
1936  *				the watchdog (see dev_watchdog())
1937  *	@watchdog_timer:	List of timers
1938  *
1939  *	@proto_down_reason:	reason a netdev interface is held down
1940  *	@pcpu_refcnt:		Number of references to this device
1941  *	@dev_refcnt:		Number of references to this device
1942  *	@refcnt_tracker:	Tracker directory for tracked references to this device
1943  *	@todo_list:		Delayed register/unregister
1944  *	@link_watch_list:	XXX: need comments on this one
1945  *
1946  *	@reg_state:		Register/unregister state machine
1947  *	@dismantle:		Device is going to be freed
1948  *	@rtnl_link_state:	This enum represents the phases of creating
1949  *				a new link
1950  *
1951  *	@needs_free_netdev:	Should unregister perform free_netdev?
1952  *	@priv_destructor:	Called from unregister
1953  *	@npinfo:		XXX: need comments on this one
1954  * 	@nd_net:		Network namespace this network device is inside
1955  *
1956  * 	@ml_priv:	Mid-layer private
1957  *	@ml_priv_type:  Mid-layer private type
1958  *
1959  *	@pcpu_stat_type:	Type of device statistics which the core should
1960  *				allocate/free: none, lstats, tstats, dstats. none
1961  *				means the driver is handling statistics allocation/
1962  *				freeing internally.
1963  *	@lstats:		Loopback statistics: packets, bytes
1964  *	@tstats:		Tunnel statistics: RX/TX packets, RX/TX bytes
1965  *	@dstats:		Dummy statistics: RX/TX/drop packets, RX/TX bytes
1966  *
1967  *	@garp_port:	GARP
1968  *	@mrp_port:	MRP
1969  *
1970  *	@dm_private:	Drop monitor private
1971  *
1972  *	@dev:		Class/net/name entry
1973  *	@sysfs_groups:	Space for optional device, statistics and wireless
1974  *			sysfs groups
1975  *
1976  *	@sysfs_rx_queue_group:	Space for optional per-rx queue attributes
1977  *	@rtnl_link_ops:	Rtnl_link_ops
1978  *	@stat_ops:	Optional ops for queue-aware statistics
1979  *	@queue_mgmt_ops:	Optional ops for queue management
1980  *
1981  *	@gso_max_size:	Maximum size of generic segmentation offload
1982  *	@tso_max_size:	Device (as in HW) limit on the max TSO request size
1983  *	@gso_max_segs:	Maximum number of segments that can be passed to the
1984  *			NIC for GSO
1985  *	@tso_max_segs:	Device (as in HW) limit on the max TSO segment count
1986  * 	@gso_ipv4_max_size:	Maximum size of generic segmentation offload,
1987  * 				for IPv4.
1988  *
1989  *	@dcbnl_ops:	Data Center Bridging netlink ops
1990  *	@num_tc:	Number of traffic classes in the net device
1991  *	@tc_to_txq:	XXX: need comments on this one
1992  *	@prio_tc_map:	XXX: need comments on this one
1993  *
1994  *	@fcoe_ddp_xid:	Max exchange id for FCoE LRO by ddp
1995  *
1996  *	@priomap:	XXX: need comments on this one
1997  *	@link_topo:	Physical link topology tracking attached PHYs
1998  *	@phydev:	Physical device may attach itself
1999  *			for hardware timestamping
2000  *	@sfp_bus:	attached &struct sfp_bus structure.
2001  *
2002  *	@qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
2003  *
2004  *	@proto_down:	protocol port state information can be sent to the
2005  *			switch driver and used to set the phys state of the
2006  *			switch port.
2007  *
2008  *	@threaded:	napi threaded mode is enabled
2009  *
2010  *	@irq_affinity_auto: driver wants the core to store and re-assign the IRQ
2011  *			    affinity. Set by netif_enable_irq_affinity(), then
2012  *			    the driver must create a persistent napi by
2013  *			    netif_napi_add_config() and finally bind the napi to
2014  *			    IRQ (via netif_napi_set_irq()).
2015  *
2016  *	@rx_cpu_rmap_auto: driver wants the core to manage the ARFS rmap.
2017  *	                   Set by calling netif_enable_cpu_rmap().
2018  *
2019  *	@see_all_hwtstamp_requests: device wants to see calls to
2020  *			ndo_hwtstamp_set() for all timestamp requests
2021  *			regardless of source, even if those aren't
2022  *			HWTSTAMP_SOURCE_NETDEV
2023  *	@change_proto_down: device supports setting carrier via IFLA_PROTO_DOWN
2024  *	@netns_immutable: interface can't change network namespaces
2025  *	@fcoe_mtu:	device supports maximum FCoE MTU, 2158 bytes
2026  *
2027  *	@net_notifier_list:	List of per-net netdev notifier block
2028  *				that follow this device when it is moved
2029  *				to another network namespace.
2030  *
2031  *	@macsec_ops:    MACsec offloading ops
2032  *
2033  *	@udp_tunnel_nic_info:	static structure describing the UDP tunnel
2034  *				offload capabilities of the device
2035  *	@udp_tunnel_nic:	UDP tunnel offload state
2036  *	@ethtool:	ethtool related state
2037  *	@xdp_state:		stores info on attached XDP BPF programs
2038  *
2039  *	@nested_level:	Used as a parameter of spin_lock_nested() of
2040  *			dev->addr_list_lock.
2041  *	@unlink_list:	As netif_addr_lock() can be called recursively,
2042  *			keep a list of interfaces to be deleted.
2043  *	@gro_max_size:	Maximum size of aggregated packet in generic
2044  *			receive offload (GRO)
2045  * 	@gro_ipv4_max_size:	Maximum size of aggregated packet in generic
2046  * 				receive offload (GRO), for IPv4.
2047  *	@xdp_zc_max_segs:	Maximum number of segments supported by AF_XDP
2048  *				zero copy driver
2049  *
2050  *	@dev_addr_shadow:	Copy of @dev_addr to catch direct writes.
2051  *	@linkwatch_dev_tracker:	refcount tracker used by linkwatch.
2052  *	@watchdog_dev_tracker:	refcount tracker used by watchdog.
2053  *	@dev_registered_tracker:	tracker for reference held while
2054  *					registered
2055  *	@offload_xstats_l3:	L3 HW stats for this netdevice.
2056  *
2057  *	@devlink_port:	Pointer to related devlink port structure.
2058  *			Assigned by a driver before netdev registration using
2059  *			SET_NETDEV_DEVLINK_PORT macro. This pointer is static
2060  *			during the time netdevice is registered.
2061  *
2062  *	@dpll_pin: Pointer to the SyncE source pin of a DPLL subsystem,
2063  *		   where the clock is recovered.
2064  *
2065  *	@max_pacing_offload_horizon: max EDT offload horizon in nsec.
2066  *	@napi_config: An array of napi_config structures containing per-NAPI
2067  *		      settings.
2068  *	@gro_flush_timeout:	timeout for GRO layer in NAPI
2069  *	@napi_defer_hard_irqs:	If not zero, provides a counter that would
2070  *				allow to avoid NIC hard IRQ, on busy queues.
2071  *
2072  *	@neighbours:	List heads pointing to this device's neighbours'
2073  *			dev_list, one per address-family.
2074  *	@hwprov: Tracks which PTP performs hardware packet time stamping.
2075  *
2076  *	FIXME: cleanup struct net_device such that network protocol info
2077  *	moves out.
2078  */
2079 
2080 struct net_device {
2081 	/* Cacheline organization can be found documented in
2082 	 * Documentation/networking/net_cachelines/net_device.rst.
2083 	 * Please update the document when adding new fields.
2084 	 */
2085 
2086 	/* TX read-mostly hotpath */
2087 	__cacheline_group_begin(net_device_read_tx);
2088 	struct_group(priv_flags_fast,
2089 		unsigned long		priv_flags:32;
2090 		unsigned long		lltx:1;
2091 	);
2092 	const struct net_device_ops *netdev_ops;
2093 	const struct header_ops *header_ops;
2094 	struct netdev_queue	*_tx;
2095 	netdev_features_t	gso_partial_features;
2096 	unsigned int		real_num_tx_queues;
2097 	unsigned int		gso_max_size;
2098 	unsigned int		gso_ipv4_max_size;
2099 	u16			gso_max_segs;
2100 	s16			num_tc;
2101 	/* Note : dev->mtu is often read without holding a lock.
2102 	 * Writers usually hold RTNL.
2103 	 * It is recommended to use READ_ONCE() to annotate the reads,
2104 	 * and to use WRITE_ONCE() to annotate the writes.
2105 	 */
2106 	unsigned int		mtu;
2107 	unsigned short		needed_headroom;
2108 	struct netdev_tc_txq	tc_to_txq[TC_MAX_QUEUE];
2109 #ifdef CONFIG_XPS
2110 	struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX];
2111 #endif
2112 #ifdef CONFIG_NETFILTER_EGRESS
2113 	struct nf_hook_entries __rcu *nf_hooks_egress;
2114 #endif
2115 #ifdef CONFIG_NET_XGRESS
2116 	struct bpf_mprog_entry __rcu *tcx_egress;
2117 #endif
2118 	__cacheline_group_end(net_device_read_tx);
2119 
2120 	/* TXRX read-mostly hotpath */
2121 	__cacheline_group_begin(net_device_read_txrx);
2122 	union {
2123 		struct pcpu_lstats __percpu		*lstats;
2124 		struct pcpu_sw_netstats __percpu	*tstats;
2125 		struct pcpu_dstats __percpu		*dstats;
2126 	};
2127 	unsigned long		state;
2128 	unsigned int		flags;
2129 	unsigned short		hard_header_len;
2130 	netdev_features_t	features;
2131 	struct inet6_dev __rcu	*ip6_ptr;
2132 	__cacheline_group_end(net_device_read_txrx);
2133 
2134 	/* RX read-mostly hotpath */
2135 	__cacheline_group_begin(net_device_read_rx);
2136 	struct bpf_prog __rcu	*xdp_prog;
2137 	struct list_head	ptype_specific;
2138 	int			ifindex;
2139 	unsigned int		real_num_rx_queues;
2140 	struct netdev_rx_queue	*_rx;
2141 	unsigned int		gro_max_size;
2142 	unsigned int		gro_ipv4_max_size;
2143 	rx_handler_func_t __rcu	*rx_handler;
2144 	void __rcu		*rx_handler_data;
2145 	possible_net_t			nd_net;
2146 #ifdef CONFIG_NETPOLL
2147 	struct netpoll_info __rcu	*npinfo;
2148 #endif
2149 #ifdef CONFIG_NET_XGRESS
2150 	struct bpf_mprog_entry __rcu *tcx_ingress;
2151 #endif
2152 	__cacheline_group_end(net_device_read_rx);
2153 
2154 	char			name[IFNAMSIZ];
2155 	struct netdev_name_node	*name_node;
2156 	struct dev_ifalias	__rcu *ifalias;
2157 	/*
2158 	 *	I/O specific fields
2159 	 *	FIXME: Merge these and struct ifmap into one
2160 	 */
2161 	unsigned long		mem_end;
2162 	unsigned long		mem_start;
2163 	unsigned long		base_addr;
2164 
2165 	/*
2166 	 *	Some hardware also needs these fields (state,dev_list,
2167 	 *	napi_list,unreg_list,close_list) but they are not
2168 	 *	part of the usual set specified in Space.c.
2169 	 */
2170 
2171 
2172 	struct list_head	dev_list;
2173 	struct list_head	napi_list;
2174 	struct list_head	unreg_list;
2175 	struct list_head	close_list;
2176 	struct list_head	ptype_all;
2177 
2178 	struct {
2179 		struct list_head upper;
2180 		struct list_head lower;
2181 	} adj_list;
2182 
2183 	/* Read-mostly cache-line for fast-path access */
2184 	xdp_features_t		xdp_features;
2185 	const struct xdp_metadata_ops *xdp_metadata_ops;
2186 	const struct xsk_tx_metadata_ops *xsk_tx_metadata_ops;
2187 	unsigned short		gflags;
2188 
2189 	unsigned short		needed_tailroom;
2190 
2191 	netdev_features_t	hw_features;
2192 	netdev_features_t	wanted_features;
2193 	netdev_features_t	vlan_features;
2194 	netdev_features_t	hw_enc_features;
2195 	netdev_features_t	mpls_features;
2196 
2197 	unsigned int		min_mtu;
2198 	unsigned int		max_mtu;
2199 	unsigned short		type;
2200 	unsigned char		min_header_len;
2201 	unsigned char		name_assign_type;
2202 
2203 	int			group;
2204 
2205 	struct net_device_stats	stats; /* not used by modern drivers */
2206 
2207 	struct net_device_core_stats __percpu *core_stats;
2208 
2209 	/* Stats to monitor link on/off, flapping */
2210 	atomic_t		carrier_up_count;
2211 	atomic_t		carrier_down_count;
2212 
2213 #ifdef CONFIG_WIRELESS_EXT
2214 	const struct iw_handler_def *wireless_handlers;
2215 #endif
2216 	const struct ethtool_ops *ethtool_ops;
2217 #ifdef CONFIG_NET_L3_MASTER_DEV
2218 	const struct l3mdev_ops	*l3mdev_ops;
2219 #endif
2220 #if IS_ENABLED(CONFIG_IPV6)
2221 	const struct ndisc_ops *ndisc_ops;
2222 #endif
2223 
2224 #ifdef CONFIG_XFRM_OFFLOAD
2225 	const struct xfrmdev_ops *xfrmdev_ops;
2226 #endif
2227 
2228 #if IS_ENABLED(CONFIG_TLS_DEVICE)
2229 	const struct tlsdev_ops *tlsdev_ops;
2230 #endif
2231 
2232 	unsigned int		operstate;
2233 	unsigned char		link_mode;
2234 
2235 	unsigned char		if_port;
2236 	unsigned char		dma;
2237 
2238 	/* Interface address info. */
2239 	unsigned char		perm_addr[MAX_ADDR_LEN];
2240 	unsigned char		addr_assign_type;
2241 	unsigned char		addr_len;
2242 	unsigned char		upper_level;
2243 	unsigned char		lower_level;
2244 
2245 	unsigned short		neigh_priv_len;
2246 	unsigned short          dev_id;
2247 	unsigned short          dev_port;
2248 	int			irq;
2249 	u32			priv_len;
2250 
2251 	spinlock_t		addr_list_lock;
2252 
2253 	struct netdev_hw_addr_list	uc;
2254 	struct netdev_hw_addr_list	mc;
2255 	struct netdev_hw_addr_list	dev_addrs;
2256 
2257 #ifdef CONFIG_SYSFS
2258 	struct kset		*queues_kset;
2259 #endif
2260 #ifdef CONFIG_LOCKDEP
2261 	struct list_head	unlink_list;
2262 #endif
2263 	unsigned int		promiscuity;
2264 	unsigned int		allmulti;
2265 	bool			uc_promisc;
2266 #ifdef CONFIG_LOCKDEP
2267 	unsigned char		nested_level;
2268 #endif
2269 
2270 
2271 	/* Protocol-specific pointers */
2272 	struct in_device __rcu	*ip_ptr;
2273 	/** @fib_nh_head: nexthops associated with this netdev */
2274 	struct hlist_head	fib_nh_head;
2275 
2276 #if IS_ENABLED(CONFIG_VLAN_8021Q)
2277 	struct vlan_info __rcu	*vlan_info;
2278 #endif
2279 #if IS_ENABLED(CONFIG_NET_DSA)
2280 	struct dsa_port		*dsa_ptr;
2281 #endif
2282 #if IS_ENABLED(CONFIG_TIPC)
2283 	struct tipc_bearer __rcu *tipc_ptr;
2284 #endif
2285 #if IS_ENABLED(CONFIG_ATALK)
2286 	void 			*atalk_ptr;
2287 #endif
2288 #if IS_ENABLED(CONFIG_AX25)
2289 	struct ax25_dev	__rcu	*ax25_ptr;
2290 #endif
2291 #if IS_ENABLED(CONFIG_CFG80211)
2292 	struct wireless_dev	*ieee80211_ptr;
2293 #endif
2294 #if IS_ENABLED(CONFIG_IEEE802154) || IS_ENABLED(CONFIG_6LOWPAN)
2295 	struct wpan_dev		*ieee802154_ptr;
2296 #endif
2297 #if IS_ENABLED(CONFIG_MPLS_ROUTING)
2298 	struct mpls_dev __rcu	*mpls_ptr;
2299 #endif
2300 #if IS_ENABLED(CONFIG_MCTP)
2301 	struct mctp_dev __rcu	*mctp_ptr;
2302 #endif
2303 
2304 /*
2305  * Cache lines mostly used on receive path (including eth_type_trans())
2306  */
2307 	/* Interface address info used in eth_type_trans() */
2308 	const unsigned char	*dev_addr;
2309 
2310 	unsigned int		num_rx_queues;
2311 #define GRO_LEGACY_MAX_SIZE	65536u
2312 /* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE),
2313  * and shinfo->gso_segs is a 16bit field.
2314  */
2315 #define GRO_MAX_SIZE		(8 * 65535u)
2316 	unsigned int		xdp_zc_max_segs;
2317 	struct netdev_queue __rcu *ingress_queue;
2318 #ifdef CONFIG_NETFILTER_INGRESS
2319 	struct nf_hook_entries __rcu *nf_hooks_ingress;
2320 #endif
2321 
2322 	unsigned char		broadcast[MAX_ADDR_LEN];
2323 #ifdef CONFIG_RFS_ACCEL
2324 	struct cpu_rmap		*rx_cpu_rmap;
2325 #endif
2326 	struct hlist_node	index_hlist;
2327 
2328 /*
2329  * Cache lines mostly used on transmit path
2330  */
2331 	unsigned int		num_tx_queues;
2332 	struct Qdisc __rcu	*qdisc;
2333 	unsigned int		tx_queue_len;
2334 	spinlock_t		tx_global_lock;
2335 
2336 	struct xdp_dev_bulk_queue __percpu *xdp_bulkq;
2337 
2338 #ifdef CONFIG_NET_SCHED
2339 	DECLARE_HASHTABLE	(qdisc_hash, 4);
2340 #endif
2341 	/* These may be needed for future network-power-down code. */
2342 	struct timer_list	watchdog_timer;
2343 	int			watchdog_timeo;
2344 
2345 	u32                     proto_down_reason;
2346 
2347 	struct list_head	todo_list;
2348 
2349 #ifdef CONFIG_PCPU_DEV_REFCNT
2350 	int __percpu		*pcpu_refcnt;
2351 #else
2352 	refcount_t		dev_refcnt;
2353 #endif
2354 	struct ref_tracker_dir	refcnt_tracker;
2355 
2356 	struct list_head	link_watch_list;
2357 
2358 	u8 reg_state;
2359 
2360 	bool dismantle;
2361 
2362 	enum {
2363 		RTNL_LINK_INITIALIZED,
2364 		RTNL_LINK_INITIALIZING,
2365 	} rtnl_link_state:16;
2366 
2367 	bool needs_free_netdev;
2368 	void (*priv_destructor)(struct net_device *dev);
2369 
2370 	/* mid-layer private */
2371 	void				*ml_priv;
2372 	enum netdev_ml_priv_type	ml_priv_type;
2373 
2374 	enum netdev_stat_type		pcpu_stat_type:8;
2375 
2376 #if IS_ENABLED(CONFIG_GARP)
2377 	struct garp_port __rcu	*garp_port;
2378 #endif
2379 #if IS_ENABLED(CONFIG_MRP)
2380 	struct mrp_port __rcu	*mrp_port;
2381 #endif
2382 #if IS_ENABLED(CONFIG_NET_DROP_MONITOR)
2383 	struct dm_hw_stat_delta __rcu *dm_private;
2384 #endif
2385 	struct device		dev;
2386 	const struct attribute_group *sysfs_groups[4];
2387 	const struct attribute_group *sysfs_rx_queue_group;
2388 
2389 	const struct rtnl_link_ops *rtnl_link_ops;
2390 
2391 	const struct netdev_stat_ops *stat_ops;
2392 
2393 	const struct netdev_queue_mgmt_ops *queue_mgmt_ops;
2394 
2395 	/* for setting kernel sock attribute on TCP connection setup */
2396 #define GSO_MAX_SEGS		65535u
2397 #define GSO_LEGACY_MAX_SIZE	65536u
2398 /* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE),
2399  * and shinfo->gso_segs is a 16bit field.
2400  */
2401 #define GSO_MAX_SIZE		(8 * GSO_MAX_SEGS)
2402 
2403 #define TSO_LEGACY_MAX_SIZE	65536
2404 #define TSO_MAX_SIZE		UINT_MAX
2405 	unsigned int		tso_max_size;
2406 #define TSO_MAX_SEGS		U16_MAX
2407 	u16			tso_max_segs;
2408 
2409 #ifdef CONFIG_DCB
2410 	const struct dcbnl_rtnl_ops *dcbnl_ops;
2411 #endif
2412 	u8			prio_tc_map[TC_BITMASK + 1];
2413 
2414 #if IS_ENABLED(CONFIG_FCOE)
2415 	unsigned int		fcoe_ddp_xid;
2416 #endif
2417 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
2418 	struct netprio_map __rcu *priomap;
2419 #endif
2420 	struct phy_link_topology	*link_topo;
2421 	struct phy_device	*phydev;
2422 	struct sfp_bus		*sfp_bus;
2423 	struct lock_class_key	*qdisc_tx_busylock;
2424 	bool			proto_down;
2425 	bool			threaded;
2426 	bool			irq_affinity_auto;
2427 	bool			rx_cpu_rmap_auto;
2428 
2429 	/* priv_flags_slow, ungrouped to save space */
2430 	unsigned long		see_all_hwtstamp_requests:1;
2431 	unsigned long		change_proto_down:1;
2432 	unsigned long		netns_immutable:1;
2433 	unsigned long		fcoe_mtu:1;
2434 
2435 	struct list_head	net_notifier_list;
2436 
2437 #if IS_ENABLED(CONFIG_MACSEC)
2438 	/* MACsec management functions */
2439 	const struct macsec_ops *macsec_ops;
2440 #endif
2441 	const struct udp_tunnel_nic_info	*udp_tunnel_nic_info;
2442 	struct udp_tunnel_nic	*udp_tunnel_nic;
2443 
2444 	/** @cfg: net_device queue-related configuration */
2445 	struct netdev_config	*cfg;
2446 	/**
2447 	 * @cfg_pending: same as @cfg but when device is being actively
2448 	 *	reconfigured includes any changes to the configuration
2449 	 *	requested by the user, but which may or may not be rejected.
2450 	 */
2451 	struct netdev_config	*cfg_pending;
2452 	struct ethtool_netdev_state *ethtool;
2453 
2454 	/* protected by rtnl_lock */
2455 	struct bpf_xdp_entity	xdp_state[__MAX_XDP_MODE];
2456 
2457 	u8 dev_addr_shadow[MAX_ADDR_LEN];
2458 	netdevice_tracker	linkwatch_dev_tracker;
2459 	netdevice_tracker	watchdog_dev_tracker;
2460 	netdevice_tracker	dev_registered_tracker;
2461 	struct rtnl_hw_stats64	*offload_xstats_l3;
2462 
2463 	struct devlink_port	*devlink_port;
2464 
2465 #if IS_ENABLED(CONFIG_DPLL)
2466 	struct dpll_pin	__rcu	*dpll_pin;
2467 #endif
2468 #if IS_ENABLED(CONFIG_PAGE_POOL)
2469 	/** @page_pools: page pools created for this netdevice */
2470 	struct hlist_head	page_pools;
2471 #endif
2472 
2473 	/** @irq_moder: dim parameters used if IS_ENABLED(CONFIG_DIMLIB). */
2474 	struct dim_irq_moder	*irq_moder;
2475 
2476 	u64			max_pacing_offload_horizon;
2477 	struct napi_config	*napi_config;
2478 	unsigned long		gro_flush_timeout;
2479 	u32			napi_defer_hard_irqs;
2480 
2481 	/**
2482 	 * @up: copy of @state's IFF_UP, but safe to read with just @lock.
2483 	 *	May report false negatives while the device is being opened
2484 	 *	or closed (@lock does not protect .ndo_open, or .ndo_close).
2485 	 */
2486 	bool			up;
2487 
2488 	/**
2489 	 * @request_ops_lock: request the core to run all @netdev_ops and
2490 	 * @ethtool_ops under the @lock.
2491 	 */
2492 	bool			request_ops_lock;
2493 
2494 	/**
2495 	 * @lock: netdev-scope lock, protects a small selection of fields.
2496 	 * Should always be taken using netdev_lock() / netdev_unlock() helpers.
2497 	 * Drivers are free to use it for other protection.
2498 	 *
2499 	 * For the drivers that implement shaper or queue API, the scope
2500 	 * of this lock is expanded to cover most ndo/queue/ethtool/sysfs
2501 	 * operations. Drivers may opt-in to this behavior by setting
2502 	 * @request_ops_lock.
2503 	 *
2504 	 * @lock protection mixes with rtnl_lock in multiple ways, fields are
2505 	 * either:
2506 	 *
2507 	 * - simply protected by the instance @lock;
2508 	 *
2509 	 * - double protected - writers hold both locks, readers hold either;
2510 	 *
2511 	 * - ops protected - protected by the lock held around the NDOs
2512 	 *   and other callbacks, that is the instance lock on devices for
2513 	 *   which netdev_need_ops_lock() returns true, otherwise by rtnl_lock;
2514 	 *
2515 	 * - double ops protected - always protected by rtnl_lock but for
2516 	 *   devices for which netdev_need_ops_lock() returns true - also
2517 	 *   the instance lock.
2518 	 *
2519 	 * Simply protects:
2520 	 *	@gro_flush_timeout, @napi_defer_hard_irqs, @napi_list,
2521 	 *	@net_shaper_hierarchy, @reg_state, @threaded
2522 	 *
2523 	 * Double protects:
2524 	 *	@up
2525 	 *
2526 	 * Double ops protects:
2527 	 *	@real_num_rx_queues, @real_num_tx_queues
2528 	 *
2529 	 * Also protects some fields in:
2530 	 *	struct napi_struct, struct netdev_queue, struct netdev_rx_queue
2531 	 *
2532 	 * Ordering: take after rtnl_lock.
2533 	 */
2534 	struct mutex		lock;
2535 
2536 #if IS_ENABLED(CONFIG_NET_SHAPER)
2537 	/**
2538 	 * @net_shaper_hierarchy: data tracking the current shaper status
2539 	 *  see include/net/net_shapers.h
2540 	 */
2541 	struct net_shaper_hierarchy *net_shaper_hierarchy;
2542 #endif
2543 
2544 	struct hlist_head neighbours[NEIGH_NR_TABLES];
2545 
2546 	struct hwtstamp_provider __rcu	*hwprov;
2547 
2548 	u8			priv[] ____cacheline_aligned
2549 				       __counted_by(priv_len);
2550 } ____cacheline_aligned;
2551 #define to_net_dev(d) container_of(d, struct net_device, dev)
2552 
2553 /*
2554  * Driver should use this to assign devlink port instance to a netdevice
2555  * before it registers the netdevice. Therefore devlink_port is static
2556  * during the netdev lifetime after it is registered.
2557  */
2558 #define SET_NETDEV_DEVLINK_PORT(dev, port)			\
2559 ({								\
2560 	WARN_ON((dev)->reg_state != NETREG_UNINITIALIZED);	\
2561 	((dev)->devlink_port = (port));				\
2562 })
2563 
netif_elide_gro(const struct net_device * dev)2564 static inline bool netif_elide_gro(const struct net_device *dev)
2565 {
2566 	if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog)
2567 		return true;
2568 	return false;
2569 }
2570 
2571 #define	NETDEV_ALIGN		32
2572 
2573 static inline
netdev_get_prio_tc_map(const struct net_device * dev,u32 prio)2574 int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
2575 {
2576 	return dev->prio_tc_map[prio & TC_BITMASK];
2577 }
2578 
2579 static inline
netdev_set_prio_tc_map(struct net_device * dev,u8 prio,u8 tc)2580 int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
2581 {
2582 	if (tc >= dev->num_tc)
2583 		return -EINVAL;
2584 
2585 	dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
2586 	return 0;
2587 }
2588 
2589 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
2590 void netdev_reset_tc(struct net_device *dev);
2591 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset);
2592 int netdev_set_num_tc(struct net_device *dev, u8 num_tc);
2593 
2594 static inline
netdev_get_num_tc(struct net_device * dev)2595 int netdev_get_num_tc(struct net_device *dev)
2596 {
2597 	return dev->num_tc;
2598 }
2599 
net_prefetch(void * p)2600 static inline void net_prefetch(void *p)
2601 {
2602 	prefetch(p);
2603 #if L1_CACHE_BYTES < 128
2604 	prefetch((u8 *)p + L1_CACHE_BYTES);
2605 #endif
2606 }
2607 
net_prefetchw(void * p)2608 static inline void net_prefetchw(void *p)
2609 {
2610 	prefetchw(p);
2611 #if L1_CACHE_BYTES < 128
2612 	prefetchw((u8 *)p + L1_CACHE_BYTES);
2613 #endif
2614 }
2615 
2616 void netdev_unbind_sb_channel(struct net_device *dev,
2617 			      struct net_device *sb_dev);
2618 int netdev_bind_sb_channel_queue(struct net_device *dev,
2619 				 struct net_device *sb_dev,
2620 				 u8 tc, u16 count, u16 offset);
2621 int netdev_set_sb_channel(struct net_device *dev, u16 channel);
netdev_get_sb_channel(struct net_device * dev)2622 static inline int netdev_get_sb_channel(struct net_device *dev)
2623 {
2624 	return max_t(int, -dev->num_tc, 0);
2625 }
2626 
2627 static inline
netdev_get_tx_queue(const struct net_device * dev,unsigned int index)2628 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
2629 					 unsigned int index)
2630 {
2631 	DEBUG_NET_WARN_ON_ONCE(index >= dev->num_tx_queues);
2632 	return &dev->_tx[index];
2633 }
2634 
skb_get_tx_queue(const struct net_device * dev,const struct sk_buff * skb)2635 static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
2636 						    const struct sk_buff *skb)
2637 {
2638 	return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2639 }
2640 
netdev_for_each_tx_queue(struct net_device * dev,void (* f)(struct net_device *,struct netdev_queue *,void *),void * arg)2641 static inline void netdev_for_each_tx_queue(struct net_device *dev,
2642 					    void (*f)(struct net_device *,
2643 						      struct netdev_queue *,
2644 						      void *),
2645 					    void *arg)
2646 {
2647 	unsigned int i;
2648 
2649 	for (i = 0; i < dev->num_tx_queues; i++)
2650 		f(dev, &dev->_tx[i], arg);
2651 }
2652 
2653 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
2654 		     struct net_device *sb_dev);
2655 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
2656 					 struct sk_buff *skb,
2657 					 struct net_device *sb_dev);
2658 
2659 /* returns the headroom that the master device needs to take in account
2660  * when forwarding to this dev
2661  */
netdev_get_fwd_headroom(struct net_device * dev)2662 static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
2663 {
2664 	return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
2665 }
2666 
netdev_set_rx_headroom(struct net_device * dev,int new_hr)2667 static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
2668 {
2669 	if (dev->netdev_ops->ndo_set_rx_headroom)
2670 		dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
2671 }
2672 
2673 /* set the device rx headroom to the dev's default */
netdev_reset_rx_headroom(struct net_device * dev)2674 static inline void netdev_reset_rx_headroom(struct net_device *dev)
2675 {
2676 	netdev_set_rx_headroom(dev, -1);
2677 }
2678 
netdev_get_ml_priv(struct net_device * dev,enum netdev_ml_priv_type type)2679 static inline void *netdev_get_ml_priv(struct net_device *dev,
2680 				       enum netdev_ml_priv_type type)
2681 {
2682 	if (dev->ml_priv_type != type)
2683 		return NULL;
2684 
2685 	return dev->ml_priv;
2686 }
2687 
netdev_set_ml_priv(struct net_device * dev,void * ml_priv,enum netdev_ml_priv_type type)2688 static inline void netdev_set_ml_priv(struct net_device *dev,
2689 				      void *ml_priv,
2690 				      enum netdev_ml_priv_type type)
2691 {
2692 	WARN(dev->ml_priv_type && dev->ml_priv_type != type,
2693 	     "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n",
2694 	     dev->ml_priv_type, type);
2695 	WARN(!dev->ml_priv_type && dev->ml_priv,
2696 	     "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n");
2697 
2698 	dev->ml_priv = ml_priv;
2699 	dev->ml_priv_type = type;
2700 }
2701 
2702 /*
2703  * Net namespace inlines
2704  */
2705 static inline
dev_net(const struct net_device * dev)2706 struct net *dev_net(const struct net_device *dev)
2707 {
2708 	return read_pnet(&dev->nd_net);
2709 }
2710 
2711 static inline
dev_net_rcu(const struct net_device * dev)2712 struct net *dev_net_rcu(const struct net_device *dev)
2713 {
2714 	return read_pnet_rcu(&dev->nd_net);
2715 }
2716 
2717 static inline
dev_net_set(struct net_device * dev,struct net * net)2718 void dev_net_set(struct net_device *dev, struct net *net)
2719 {
2720 	write_pnet(&dev->nd_net, net);
2721 }
2722 
2723 /**
2724  *	netdev_priv - access network device private data
2725  *	@dev: network device
2726  *
2727  * Get network device private data
2728  */
netdev_priv(const struct net_device * dev)2729 static inline void *netdev_priv(const struct net_device *dev)
2730 {
2731 	return (void *)dev->priv;
2732 }
2733 
2734 /* Set the sysfs physical device reference for the network logical device
2735  * if set prior to registration will cause a symlink during initialization.
2736  */
2737 #define SET_NETDEV_DEV(net, pdev)	((net)->dev.parent = (pdev))
2738 
2739 /* Set the sysfs device type for the network logical device to allow
2740  * fine-grained identification of different network device types. For
2741  * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc.
2742  */
2743 #define SET_NETDEV_DEVTYPE(net, devtype)	((net)->dev.type = (devtype))
2744 
2745 void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index,
2746 			  enum netdev_queue_type type,
2747 			  struct napi_struct *napi);
2748 
netdev_lock(struct net_device * dev)2749 static inline void netdev_lock(struct net_device *dev)
2750 {
2751 	mutex_lock(&dev->lock);
2752 }
2753 
netdev_unlock(struct net_device * dev)2754 static inline void netdev_unlock(struct net_device *dev)
2755 {
2756 	mutex_unlock(&dev->lock);
2757 }
2758 /* Additional netdev_lock()-related helpers are in net/netdev_lock.h */
2759 
2760 void netif_napi_set_irq_locked(struct napi_struct *napi, int irq);
2761 
netif_napi_set_irq(struct napi_struct * napi,int irq)2762 static inline void netif_napi_set_irq(struct napi_struct *napi, int irq)
2763 {
2764 	netdev_lock(napi->dev);
2765 	netif_napi_set_irq_locked(napi, irq);
2766 	netdev_unlock(napi->dev);
2767 }
2768 
2769 /* Default NAPI poll() weight
2770  * Device drivers are strongly advised to not use bigger value
2771  */
2772 #define NAPI_POLL_WEIGHT 64
2773 
2774 void netif_napi_add_weight_locked(struct net_device *dev,
2775 				  struct napi_struct *napi,
2776 				  int (*poll)(struct napi_struct *, int),
2777 				  int weight);
2778 
2779 static inline void
netif_napi_add_weight(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int),int weight)2780 netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
2781 		      int (*poll)(struct napi_struct *, int), int weight)
2782 {
2783 	netdev_lock(dev);
2784 	netif_napi_add_weight_locked(dev, napi, poll, weight);
2785 	netdev_unlock(dev);
2786 }
2787 
2788 /**
2789  * netif_napi_add() - initialize a NAPI context
2790  * @dev:  network device
2791  * @napi: NAPI context
2792  * @poll: polling function
2793  *
2794  * netif_napi_add() must be used to initialize a NAPI context prior to calling
2795  * *any* of the other NAPI-related functions.
2796  */
2797 static inline void
netif_napi_add(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int))2798 netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2799 	       int (*poll)(struct napi_struct *, int))
2800 {
2801 	netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
2802 }
2803 
2804 static inline void
netif_napi_add_locked(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int))2805 netif_napi_add_locked(struct net_device *dev, struct napi_struct *napi,
2806 		      int (*poll)(struct napi_struct *, int))
2807 {
2808 	netif_napi_add_weight_locked(dev, napi, poll, NAPI_POLL_WEIGHT);
2809 }
2810 
2811 static inline void
netif_napi_add_tx_weight(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int),int weight)2812 netif_napi_add_tx_weight(struct net_device *dev,
2813 			 struct napi_struct *napi,
2814 			 int (*poll)(struct napi_struct *, int),
2815 			 int weight)
2816 {
2817 	set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
2818 	netif_napi_add_weight(dev, napi, poll, weight);
2819 }
2820 
2821 static inline void
netif_napi_add_config_locked(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int),int index)2822 netif_napi_add_config_locked(struct net_device *dev, struct napi_struct *napi,
2823 			     int (*poll)(struct napi_struct *, int), int index)
2824 {
2825 	napi->index = index;
2826 	napi->config = &dev->napi_config[index];
2827 	netif_napi_add_weight_locked(dev, napi, poll, NAPI_POLL_WEIGHT);
2828 }
2829 
2830 /**
2831  * netif_napi_add_config - initialize a NAPI context with persistent config
2832  * @dev: network device
2833  * @napi: NAPI context
2834  * @poll: polling function
2835  * @index: the NAPI index
2836  */
2837 static inline void
netif_napi_add_config(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int),int index)2838 netif_napi_add_config(struct net_device *dev, struct napi_struct *napi,
2839 		      int (*poll)(struct napi_struct *, int), int index)
2840 {
2841 	netdev_lock(dev);
2842 	netif_napi_add_config_locked(dev, napi, poll, index);
2843 	netdev_unlock(dev);
2844 }
2845 
2846 /**
2847  * netif_napi_add_tx() - initialize a NAPI context to be used for Tx only
2848  * @dev:  network device
2849  * @napi: NAPI context
2850  * @poll: polling function
2851  *
2852  * This variant of netif_napi_add() should be used from drivers using NAPI
2853  * to exclusively poll a TX queue.
2854  * This will avoid we add it into napi_hash[], thus polluting this hash table.
2855  */
netif_napi_add_tx(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int))2856 static inline void netif_napi_add_tx(struct net_device *dev,
2857 				     struct napi_struct *napi,
2858 				     int (*poll)(struct napi_struct *, int))
2859 {
2860 	netif_napi_add_tx_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
2861 }
2862 
2863 void __netif_napi_del_locked(struct napi_struct *napi);
2864 
2865 /**
2866  *  __netif_napi_del - remove a NAPI context
2867  *  @napi: NAPI context
2868  *
2869  * Warning: caller must observe RCU grace period before freeing memory
2870  * containing @napi. Drivers might want to call this helper to combine
2871  * all the needed RCU grace periods into a single one.
2872  */
__netif_napi_del(struct napi_struct * napi)2873 static inline void __netif_napi_del(struct napi_struct *napi)
2874 {
2875 	netdev_lock(napi->dev);
2876 	__netif_napi_del_locked(napi);
2877 	netdev_unlock(napi->dev);
2878 }
2879 
netif_napi_del_locked(struct napi_struct * napi)2880 static inline void netif_napi_del_locked(struct napi_struct *napi)
2881 {
2882 	__netif_napi_del_locked(napi);
2883 	synchronize_net();
2884 }
2885 
2886 /**
2887  *  netif_napi_del - remove a NAPI context
2888  *  @napi: NAPI context
2889  *
2890  *  netif_napi_del() removes a NAPI context from the network device NAPI list
2891  */
netif_napi_del(struct napi_struct * napi)2892 static inline void netif_napi_del(struct napi_struct *napi)
2893 {
2894 	__netif_napi_del(napi);
2895 	synchronize_net();
2896 }
2897 
2898 int netif_enable_cpu_rmap(struct net_device *dev, unsigned int num_irqs);
2899 void netif_set_affinity_auto(struct net_device *dev);
2900 
2901 struct packet_type {
2902 	__be16			type;	/* This is really htons(ether_type). */
2903 	bool			ignore_outgoing;
2904 	struct net_device	*dev;	/* NULL is wildcarded here	     */
2905 	netdevice_tracker	dev_tracker;
2906 	int			(*func) (struct sk_buff *,
2907 					 struct net_device *,
2908 					 struct packet_type *,
2909 					 struct net_device *);
2910 	void			(*list_func) (struct list_head *,
2911 					      struct packet_type *,
2912 					      struct net_device *);
2913 	bool			(*id_match)(struct packet_type *ptype,
2914 					    struct sock *sk);
2915 	struct net		*af_packet_net;
2916 	void			*af_packet_priv;
2917 	struct list_head	list;
2918 };
2919 
2920 struct offload_callbacks {
2921 	struct sk_buff		*(*gso_segment)(struct sk_buff *skb,
2922 						netdev_features_t features);
2923 	struct sk_buff		*(*gro_receive)(struct list_head *head,
2924 						struct sk_buff *skb);
2925 	int			(*gro_complete)(struct sk_buff *skb, int nhoff);
2926 };
2927 
2928 struct packet_offload {
2929 	__be16			 type;	/* This is really htons(ether_type). */
2930 	u16			 priority;
2931 	struct offload_callbacks callbacks;
2932 	struct list_head	 list;
2933 };
2934 
2935 /* often modified stats are per-CPU, other are shared (netdev->stats) */
2936 struct pcpu_sw_netstats {
2937 	u64_stats_t		rx_packets;
2938 	u64_stats_t		rx_bytes;
2939 	u64_stats_t		tx_packets;
2940 	u64_stats_t		tx_bytes;
2941 	struct u64_stats_sync   syncp;
2942 } __aligned(4 * sizeof(u64));
2943 
2944 struct pcpu_dstats {
2945 	u64_stats_t		rx_packets;
2946 	u64_stats_t		rx_bytes;
2947 	u64_stats_t		tx_packets;
2948 	u64_stats_t		tx_bytes;
2949 	u64_stats_t		rx_drops;
2950 	u64_stats_t		tx_drops;
2951 	struct u64_stats_sync	syncp;
2952 } __aligned(8 * sizeof(u64));
2953 
2954 struct pcpu_lstats {
2955 	u64_stats_t packets;
2956 	u64_stats_t bytes;
2957 	struct u64_stats_sync syncp;
2958 } __aligned(2 * sizeof(u64));
2959 
2960 void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes);
2961 
dev_sw_netstats_rx_add(struct net_device * dev,unsigned int len)2962 static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len)
2963 {
2964 	struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
2965 
2966 	u64_stats_update_begin(&tstats->syncp);
2967 	u64_stats_add(&tstats->rx_bytes, len);
2968 	u64_stats_inc(&tstats->rx_packets);
2969 	u64_stats_update_end(&tstats->syncp);
2970 }
2971 
dev_sw_netstats_tx_add(struct net_device * dev,unsigned int packets,unsigned int len)2972 static inline void dev_sw_netstats_tx_add(struct net_device *dev,
2973 					  unsigned int packets,
2974 					  unsigned int len)
2975 {
2976 	struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
2977 
2978 	u64_stats_update_begin(&tstats->syncp);
2979 	u64_stats_add(&tstats->tx_bytes, len);
2980 	u64_stats_add(&tstats->tx_packets, packets);
2981 	u64_stats_update_end(&tstats->syncp);
2982 }
2983 
dev_lstats_add(struct net_device * dev,unsigned int len)2984 static inline void dev_lstats_add(struct net_device *dev, unsigned int len)
2985 {
2986 	struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats);
2987 
2988 	u64_stats_update_begin(&lstats->syncp);
2989 	u64_stats_add(&lstats->bytes, len);
2990 	u64_stats_inc(&lstats->packets);
2991 	u64_stats_update_end(&lstats->syncp);
2992 }
2993 
dev_dstats_rx_add(struct net_device * dev,unsigned int len)2994 static inline void dev_dstats_rx_add(struct net_device *dev,
2995 				     unsigned int len)
2996 {
2997 	struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
2998 
2999 	u64_stats_update_begin(&dstats->syncp);
3000 	u64_stats_inc(&dstats->rx_packets);
3001 	u64_stats_add(&dstats->rx_bytes, len);
3002 	u64_stats_update_end(&dstats->syncp);
3003 }
3004 
dev_dstats_rx_dropped(struct net_device * dev)3005 static inline void dev_dstats_rx_dropped(struct net_device *dev)
3006 {
3007 	struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
3008 
3009 	u64_stats_update_begin(&dstats->syncp);
3010 	u64_stats_inc(&dstats->rx_drops);
3011 	u64_stats_update_end(&dstats->syncp);
3012 }
3013 
dev_dstats_tx_add(struct net_device * dev,unsigned int len)3014 static inline void dev_dstats_tx_add(struct net_device *dev,
3015 				     unsigned int len)
3016 {
3017 	struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
3018 
3019 	u64_stats_update_begin(&dstats->syncp);
3020 	u64_stats_inc(&dstats->tx_packets);
3021 	u64_stats_add(&dstats->tx_bytes, len);
3022 	u64_stats_update_end(&dstats->syncp);
3023 }
3024 
dev_dstats_tx_dropped(struct net_device * dev)3025 static inline void dev_dstats_tx_dropped(struct net_device *dev)
3026 {
3027 	struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
3028 
3029 	u64_stats_update_begin(&dstats->syncp);
3030 	u64_stats_inc(&dstats->tx_drops);
3031 	u64_stats_update_end(&dstats->syncp);
3032 }
3033 
3034 #define __netdev_alloc_pcpu_stats(type, gfp)				\
3035 ({									\
3036 	typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
3037 	if (pcpu_stats)	{						\
3038 		int __cpu;						\
3039 		for_each_possible_cpu(__cpu) {				\
3040 			typeof(type) *stat;				\
3041 			stat = per_cpu_ptr(pcpu_stats, __cpu);		\
3042 			u64_stats_init(&stat->syncp);			\
3043 		}							\
3044 	}								\
3045 	pcpu_stats;							\
3046 })
3047 
3048 #define netdev_alloc_pcpu_stats(type)					\
3049 	__netdev_alloc_pcpu_stats(type, GFP_KERNEL)
3050 
3051 #define devm_netdev_alloc_pcpu_stats(dev, type)				\
3052 ({									\
3053 	typeof(type) __percpu *pcpu_stats = devm_alloc_percpu(dev, type);\
3054 	if (pcpu_stats) {						\
3055 		int __cpu;						\
3056 		for_each_possible_cpu(__cpu) {				\
3057 			typeof(type) *stat;				\
3058 			stat = per_cpu_ptr(pcpu_stats, __cpu);		\
3059 			u64_stats_init(&stat->syncp);			\
3060 		}							\
3061 	}								\
3062 	pcpu_stats;							\
3063 })
3064 
3065 enum netdev_lag_tx_type {
3066 	NETDEV_LAG_TX_TYPE_UNKNOWN,
3067 	NETDEV_LAG_TX_TYPE_RANDOM,
3068 	NETDEV_LAG_TX_TYPE_BROADCAST,
3069 	NETDEV_LAG_TX_TYPE_ROUNDROBIN,
3070 	NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
3071 	NETDEV_LAG_TX_TYPE_HASH,
3072 };
3073 
3074 enum netdev_lag_hash {
3075 	NETDEV_LAG_HASH_NONE,
3076 	NETDEV_LAG_HASH_L2,
3077 	NETDEV_LAG_HASH_L34,
3078 	NETDEV_LAG_HASH_L23,
3079 	NETDEV_LAG_HASH_E23,
3080 	NETDEV_LAG_HASH_E34,
3081 	NETDEV_LAG_HASH_VLAN_SRCMAC,
3082 	NETDEV_LAG_HASH_UNKNOWN,
3083 };
3084 
3085 struct netdev_lag_upper_info {
3086 	enum netdev_lag_tx_type tx_type;
3087 	enum netdev_lag_hash hash_type;
3088 };
3089 
3090 struct netdev_lag_lower_state_info {
3091 	u8 link_up : 1,
3092 	   tx_enabled : 1;
3093 };
3094 
3095 #include <linux/notifier.h>
3096 
3097 /* netdevice notifier chain. Please remember to update netdev_cmd_to_name()
3098  * and the rtnetlink notification exclusion list in rtnetlink_event() when
3099  * adding new types.
3100  */
3101 enum netdev_cmd {
3102 	NETDEV_UP	= 1,	/* For now you can't veto a device up/down */
3103 	NETDEV_DOWN,
3104 	NETDEV_REBOOT,		/* Tell a protocol stack a network interface
3105 				   detected a hardware crash and restarted
3106 				   - we can use this eg to kick tcp sessions
3107 				   once done */
3108 	NETDEV_CHANGE,		/* Notify device state change */
3109 	NETDEV_REGISTER,
3110 	NETDEV_UNREGISTER,
3111 	NETDEV_CHANGEMTU,	/* notify after mtu change happened */
3112 	NETDEV_CHANGEADDR,	/* notify after the address change */
3113 	NETDEV_PRE_CHANGEADDR,	/* notify before the address change */
3114 	NETDEV_GOING_DOWN,
3115 	NETDEV_CHANGENAME,
3116 	NETDEV_FEAT_CHANGE,
3117 	NETDEV_BONDING_FAILOVER,
3118 	NETDEV_PRE_UP,
3119 	NETDEV_PRE_TYPE_CHANGE,
3120 	NETDEV_POST_TYPE_CHANGE,
3121 	NETDEV_POST_INIT,
3122 	NETDEV_PRE_UNINIT,
3123 	NETDEV_RELEASE,
3124 	NETDEV_NOTIFY_PEERS,
3125 	NETDEV_JOIN,
3126 	NETDEV_CHANGEUPPER,
3127 	NETDEV_RESEND_IGMP,
3128 	NETDEV_PRECHANGEMTU,	/* notify before mtu change happened */
3129 	NETDEV_CHANGEINFODATA,
3130 	NETDEV_BONDING_INFO,
3131 	NETDEV_PRECHANGEUPPER,
3132 	NETDEV_CHANGELOWERSTATE,
3133 	NETDEV_UDP_TUNNEL_PUSH_INFO,
3134 	NETDEV_UDP_TUNNEL_DROP_INFO,
3135 	NETDEV_CHANGE_TX_QUEUE_LEN,
3136 	NETDEV_CVLAN_FILTER_PUSH_INFO,
3137 	NETDEV_CVLAN_FILTER_DROP_INFO,
3138 	NETDEV_SVLAN_FILTER_PUSH_INFO,
3139 	NETDEV_SVLAN_FILTER_DROP_INFO,
3140 	NETDEV_OFFLOAD_XSTATS_ENABLE,
3141 	NETDEV_OFFLOAD_XSTATS_DISABLE,
3142 	NETDEV_OFFLOAD_XSTATS_REPORT_USED,
3143 	NETDEV_OFFLOAD_XSTATS_REPORT_DELTA,
3144 	NETDEV_XDP_FEAT_CHANGE,
3145 };
3146 const char *netdev_cmd_to_name(enum netdev_cmd cmd);
3147 
3148 int register_netdevice_notifier(struct notifier_block *nb);
3149 int unregister_netdevice_notifier(struct notifier_block *nb);
3150 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb);
3151 int unregister_netdevice_notifier_net(struct net *net,
3152 				      struct notifier_block *nb);
3153 int register_netdevice_notifier_dev_net(struct net_device *dev,
3154 					struct notifier_block *nb,
3155 					struct netdev_net_notifier *nn);
3156 int unregister_netdevice_notifier_dev_net(struct net_device *dev,
3157 					  struct notifier_block *nb,
3158 					  struct netdev_net_notifier *nn);
3159 
3160 struct netdev_notifier_info {
3161 	struct net_device	*dev;
3162 	struct netlink_ext_ack	*extack;
3163 };
3164 
3165 struct netdev_notifier_info_ext {
3166 	struct netdev_notifier_info info; /* must be first */
3167 	union {
3168 		u32 mtu;
3169 	} ext;
3170 };
3171 
3172 struct netdev_notifier_change_info {
3173 	struct netdev_notifier_info info; /* must be first */
3174 	unsigned int flags_changed;
3175 };
3176 
3177 struct netdev_notifier_changeupper_info {
3178 	struct netdev_notifier_info info; /* must be first */
3179 	struct net_device *upper_dev; /* new upper dev */
3180 	bool master; /* is upper dev master */
3181 	bool linking; /* is the notification for link or unlink */
3182 	void *upper_info; /* upper dev info */
3183 };
3184 
3185 struct netdev_notifier_changelowerstate_info {
3186 	struct netdev_notifier_info info; /* must be first */
3187 	void *lower_state_info; /* is lower dev state */
3188 };
3189 
3190 struct netdev_notifier_pre_changeaddr_info {
3191 	struct netdev_notifier_info info; /* must be first */
3192 	const unsigned char *dev_addr;
3193 };
3194 
3195 enum netdev_offload_xstats_type {
3196 	NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1,
3197 };
3198 
3199 struct netdev_notifier_offload_xstats_info {
3200 	struct netdev_notifier_info info; /* must be first */
3201 	enum netdev_offload_xstats_type type;
3202 
3203 	union {
3204 		/* NETDEV_OFFLOAD_XSTATS_REPORT_DELTA */
3205 		struct netdev_notifier_offload_xstats_rd *report_delta;
3206 		/* NETDEV_OFFLOAD_XSTATS_REPORT_USED */
3207 		struct netdev_notifier_offload_xstats_ru *report_used;
3208 	};
3209 };
3210 
3211 int netdev_offload_xstats_enable(struct net_device *dev,
3212 				 enum netdev_offload_xstats_type type,
3213 				 struct netlink_ext_ack *extack);
3214 int netdev_offload_xstats_disable(struct net_device *dev,
3215 				  enum netdev_offload_xstats_type type);
3216 bool netdev_offload_xstats_enabled(const struct net_device *dev,
3217 				   enum netdev_offload_xstats_type type);
3218 int netdev_offload_xstats_get(struct net_device *dev,
3219 			      enum netdev_offload_xstats_type type,
3220 			      struct rtnl_hw_stats64 *stats, bool *used,
3221 			      struct netlink_ext_ack *extack);
3222 void
3223 netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *rd,
3224 				   const struct rtnl_hw_stats64 *stats);
3225 void
3226 netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *ru);
3227 void netdev_offload_xstats_push_delta(struct net_device *dev,
3228 				      enum netdev_offload_xstats_type type,
3229 				      const struct rtnl_hw_stats64 *stats);
3230 
netdev_notifier_info_init(struct netdev_notifier_info * info,struct net_device * dev)3231 static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
3232 					     struct net_device *dev)
3233 {
3234 	info->dev = dev;
3235 	info->extack = NULL;
3236 }
3237 
3238 static inline struct net_device *
netdev_notifier_info_to_dev(const struct netdev_notifier_info * info)3239 netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
3240 {
3241 	return info->dev;
3242 }
3243 
3244 static inline struct netlink_ext_ack *
netdev_notifier_info_to_extack(const struct netdev_notifier_info * info)3245 netdev_notifier_info_to_extack(const struct netdev_notifier_info *info)
3246 {
3247 	return info->extack;
3248 }
3249 
3250 int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
3251 int call_netdevice_notifiers_info(unsigned long val,
3252 				  struct netdev_notifier_info *info);
3253 
3254 #define for_each_netdev(net, d)		\
3255 		list_for_each_entry(d, &(net)->dev_base_head, dev_list)
3256 #define for_each_netdev_reverse(net, d)	\
3257 		list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
3258 #define for_each_netdev_rcu(net, d)		\
3259 		list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
3260 #define for_each_netdev_safe(net, d, n)	\
3261 		list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
3262 #define for_each_netdev_continue(net, d)		\
3263 		list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
3264 #define for_each_netdev_continue_reverse(net, d)		\
3265 		list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \
3266 						     dev_list)
3267 #define for_each_netdev_continue_rcu(net, d)		\
3268 	list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
3269 #define for_each_netdev_in_bond_rcu(bond, slave)	\
3270 		for_each_netdev_rcu(&init_net, slave)	\
3271 			if (netdev_master_upper_dev_get_rcu(slave) == (bond))
3272 #define net_device_entry(lh)	list_entry(lh, struct net_device, dev_list)
3273 
3274 #define for_each_netdev_dump(net, d, ifindex)				\
3275 	for (; (d = xa_find(&(net)->dev_by_index, &ifindex,		\
3276 			    ULONG_MAX, XA_PRESENT)); ifindex++)
3277 
next_net_device(struct net_device * dev)3278 static inline struct net_device *next_net_device(struct net_device *dev)
3279 {
3280 	struct list_head *lh;
3281 	struct net *net;
3282 
3283 	net = dev_net(dev);
3284 	lh = dev->dev_list.next;
3285 	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
3286 }
3287 
next_net_device_rcu(struct net_device * dev)3288 static inline struct net_device *next_net_device_rcu(struct net_device *dev)
3289 {
3290 	struct list_head *lh;
3291 	struct net *net;
3292 
3293 	net = dev_net(dev);
3294 	lh = rcu_dereference(list_next_rcu(&dev->dev_list));
3295 	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
3296 }
3297 
first_net_device(struct net * net)3298 static inline struct net_device *first_net_device(struct net *net)
3299 {
3300 	return list_empty(&net->dev_base_head) ? NULL :
3301 		net_device_entry(net->dev_base_head.next);
3302 }
3303 
first_net_device_rcu(struct net * net)3304 static inline struct net_device *first_net_device_rcu(struct net *net)
3305 {
3306 	struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
3307 
3308 	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
3309 }
3310 
3311 int netdev_boot_setup_check(struct net_device *dev);
3312 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type,
3313 				   const char *hwaddr);
3314 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
3315 				       const char *hwaddr);
3316 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
3317 void dev_add_pack(struct packet_type *pt);
3318 void dev_remove_pack(struct packet_type *pt);
3319 void __dev_remove_pack(struct packet_type *pt);
3320 void dev_add_offload(struct packet_offload *po);
3321 void dev_remove_offload(struct packet_offload *po);
3322 
3323 int dev_get_iflink(const struct net_device *dev);
3324 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
3325 int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
3326 			  struct net_device_path_stack *stack);
3327 struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
3328 				      unsigned short mask);
3329 struct net_device *dev_get_by_name(struct net *net, const char *name);
3330 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
3331 struct net_device *__dev_get_by_name(struct net *net, const char *name);
3332 bool netdev_name_in_use(struct net *net, const char *name);
3333 int dev_alloc_name(struct net_device *dev, const char *name);
3334 int netif_open(struct net_device *dev, struct netlink_ext_ack *extack);
3335 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack);
3336 void netif_close(struct net_device *dev);
3337 void dev_close(struct net_device *dev);
3338 void dev_close_many(struct list_head *head, bool unlink);
3339 void netif_disable_lro(struct net_device *dev);
3340 void dev_disable_lro(struct net_device *dev);
3341 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
3342 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
3343 		     struct net_device *sb_dev);
3344 
3345 int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev);
3346 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
3347 
dev_queue_xmit(struct sk_buff * skb)3348 static inline int dev_queue_xmit(struct sk_buff *skb)
3349 {
3350 	return __dev_queue_xmit(skb, NULL);
3351 }
3352 
dev_queue_xmit_accel(struct sk_buff * skb,struct net_device * sb_dev)3353 static inline int dev_queue_xmit_accel(struct sk_buff *skb,
3354 				       struct net_device *sb_dev)
3355 {
3356 	return __dev_queue_xmit(skb, sb_dev);
3357 }
3358 
dev_direct_xmit(struct sk_buff * skb,u16 queue_id)3359 static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
3360 {
3361 	int ret;
3362 
3363 	ret = __dev_direct_xmit(skb, queue_id);
3364 	if (!dev_xmit_complete(ret))
3365 		kfree_skb(skb);
3366 	return ret;
3367 }
3368 
3369 int register_netdevice(struct net_device *dev);
3370 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
3371 void unregister_netdevice_many(struct list_head *head);
unregister_netdevice(struct net_device * dev)3372 static inline void unregister_netdevice(struct net_device *dev)
3373 {
3374 	unregister_netdevice_queue(dev, NULL);
3375 }
3376 
3377 int netdev_refcnt_read(const struct net_device *dev);
3378 void free_netdev(struct net_device *dev);
3379 
3380 struct net_device *netdev_get_xmit_slave(struct net_device *dev,
3381 					 struct sk_buff *skb,
3382 					 bool all_slaves);
3383 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
3384 					    struct sock *sk);
3385 struct net_device *dev_get_by_index(struct net *net, int ifindex);
3386 struct net_device *__dev_get_by_index(struct net *net, int ifindex);
3387 struct net_device *netdev_get_by_index(struct net *net, int ifindex,
3388 				       netdevice_tracker *tracker, gfp_t gfp);
3389 struct net_device *netdev_get_by_name(struct net *net, const char *name,
3390 				      netdevice_tracker *tracker, gfp_t gfp);
3391 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
3392 void netdev_copy_name(struct net_device *dev, char *name);
3393 
dev_hard_header(struct sk_buff * skb,struct net_device * dev,unsigned short type,const void * daddr,const void * saddr,unsigned int len)3394 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
3395 				  unsigned short type,
3396 				  const void *daddr, const void *saddr,
3397 				  unsigned int len)
3398 {
3399 	if (!dev->header_ops || !dev->header_ops->create)
3400 		return 0;
3401 
3402 	return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
3403 }
3404 
dev_parse_header(const struct sk_buff * skb,unsigned char * haddr)3405 static inline int dev_parse_header(const struct sk_buff *skb,
3406 				   unsigned char *haddr)
3407 {
3408 	const struct net_device *dev = skb->dev;
3409 
3410 	if (!dev->header_ops || !dev->header_ops->parse)
3411 		return 0;
3412 	return dev->header_ops->parse(skb, haddr);
3413 }
3414 
dev_parse_header_protocol(const struct sk_buff * skb)3415 static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb)
3416 {
3417 	const struct net_device *dev = skb->dev;
3418 
3419 	if (!dev->header_ops || !dev->header_ops->parse_protocol)
3420 		return 0;
3421 	return dev->header_ops->parse_protocol(skb);
3422 }
3423 
3424 /* ll_header must have at least hard_header_len allocated */
dev_validate_header(const struct net_device * dev,char * ll_header,int len)3425 static inline bool dev_validate_header(const struct net_device *dev,
3426 				       char *ll_header, int len)
3427 {
3428 	if (likely(len >= dev->hard_header_len))
3429 		return true;
3430 	if (len < dev->min_header_len)
3431 		return false;
3432 
3433 	if (capable(CAP_SYS_RAWIO)) {
3434 		memset(ll_header + len, 0, dev->hard_header_len - len);
3435 		return true;
3436 	}
3437 
3438 	if (dev->header_ops && dev->header_ops->validate)
3439 		return dev->header_ops->validate(ll_header, len);
3440 
3441 	return false;
3442 }
3443 
dev_has_header(const struct net_device * dev)3444 static inline bool dev_has_header(const struct net_device *dev)
3445 {
3446 	return dev->header_ops && dev->header_ops->create;
3447 }
3448 
3449 /*
3450  * Incoming packets are placed on per-CPU queues
3451  */
3452 struct softnet_data {
3453 	struct list_head	poll_list;
3454 	struct sk_buff_head	process_queue;
3455 	local_lock_t		process_queue_bh_lock;
3456 
3457 	/* stats */
3458 	unsigned int		processed;
3459 	unsigned int		time_squeeze;
3460 #ifdef CONFIG_RPS
3461 	struct softnet_data	*rps_ipi_list;
3462 #endif
3463 
3464 	unsigned int		received_rps;
3465 	bool			in_net_rx_action;
3466 	bool			in_napi_threaded_poll;
3467 
3468 #ifdef CONFIG_NET_FLOW_LIMIT
3469 	struct sd_flow_limit __rcu *flow_limit;
3470 #endif
3471 	struct Qdisc		*output_queue;
3472 	struct Qdisc		**output_queue_tailp;
3473 	struct sk_buff		*completion_queue;
3474 #ifdef CONFIG_XFRM_OFFLOAD
3475 	struct sk_buff_head	xfrm_backlog;
3476 #endif
3477 	/* written and read only by owning cpu: */
3478 	struct netdev_xmit xmit;
3479 #ifdef CONFIG_RPS
3480 	/* input_queue_head should be written by cpu owning this struct,
3481 	 * and only read by other cpus. Worth using a cache line.
3482 	 */
3483 	unsigned int		input_queue_head ____cacheline_aligned_in_smp;
3484 
3485 	/* Elements below can be accessed between CPUs for RPS/RFS */
3486 	call_single_data_t	csd ____cacheline_aligned_in_smp;
3487 	struct softnet_data	*rps_ipi_next;
3488 	unsigned int		cpu;
3489 	unsigned int		input_queue_tail;
3490 #endif
3491 	struct sk_buff_head	input_pkt_queue;
3492 	struct napi_struct	backlog;
3493 
3494 	atomic_t		dropped ____cacheline_aligned_in_smp;
3495 
3496 	/* Another possibly contended cache line */
3497 	spinlock_t		defer_lock ____cacheline_aligned_in_smp;
3498 	int			defer_count;
3499 	int			defer_ipi_scheduled;
3500 	struct sk_buff		*defer_list;
3501 	call_single_data_t	defer_csd;
3502 };
3503 
3504 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
3505 DECLARE_PER_CPU(struct page_pool *, system_page_pool);
3506 
3507 #ifndef CONFIG_PREEMPT_RT
dev_recursion_level(void)3508 static inline int dev_recursion_level(void)
3509 {
3510 	return this_cpu_read(softnet_data.xmit.recursion);
3511 }
3512 #else
dev_recursion_level(void)3513 static inline int dev_recursion_level(void)
3514 {
3515 	return current->net_xmit.recursion;
3516 }
3517 
3518 #endif
3519 
3520 void __netif_schedule(struct Qdisc *q);
3521 void netif_schedule_queue(struct netdev_queue *txq);
3522 
netif_tx_schedule_all(struct net_device * dev)3523 static inline void netif_tx_schedule_all(struct net_device *dev)
3524 {
3525 	unsigned int i;
3526 
3527 	for (i = 0; i < dev->num_tx_queues; i++)
3528 		netif_schedule_queue(netdev_get_tx_queue(dev, i));
3529 }
3530 
netif_tx_start_queue(struct netdev_queue * dev_queue)3531 static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
3532 {
3533 	clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3534 }
3535 
3536 /**
3537  *	netif_start_queue - allow transmit
3538  *	@dev: network device
3539  *
3540  *	Allow upper layers to call the device hard_start_xmit routine.
3541  */
netif_start_queue(struct net_device * dev)3542 static inline void netif_start_queue(struct net_device *dev)
3543 {
3544 	netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
3545 }
3546 
netif_tx_start_all_queues(struct net_device * dev)3547 static inline void netif_tx_start_all_queues(struct net_device *dev)
3548 {
3549 	unsigned int i;
3550 
3551 	for (i = 0; i < dev->num_tx_queues; i++) {
3552 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3553 		netif_tx_start_queue(txq);
3554 	}
3555 }
3556 
3557 void netif_tx_wake_queue(struct netdev_queue *dev_queue);
3558 
3559 /**
3560  *	netif_wake_queue - restart transmit
3561  *	@dev: network device
3562  *
3563  *	Allow upper layers to call the device hard_start_xmit routine.
3564  *	Used for flow control when transmit resources are available.
3565  */
netif_wake_queue(struct net_device * dev)3566 static inline void netif_wake_queue(struct net_device *dev)
3567 {
3568 	netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
3569 }
3570 
netif_tx_wake_all_queues(struct net_device * dev)3571 static inline void netif_tx_wake_all_queues(struct net_device *dev)
3572 {
3573 	unsigned int i;
3574 
3575 	for (i = 0; i < dev->num_tx_queues; i++) {
3576 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3577 		netif_tx_wake_queue(txq);
3578 	}
3579 }
3580 
netif_tx_stop_queue(struct netdev_queue * dev_queue)3581 static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
3582 {
3583 	/* Paired with READ_ONCE() from dev_watchdog() */
3584 	WRITE_ONCE(dev_queue->trans_start, jiffies);
3585 
3586 	/* This barrier is paired with smp_mb() from dev_watchdog() */
3587 	smp_mb__before_atomic();
3588 
3589 	/* Must be an atomic op see netif_txq_try_stop() */
3590 	set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3591 }
3592 
3593 /**
3594  *	netif_stop_queue - stop transmitted packets
3595  *	@dev: network device
3596  *
3597  *	Stop upper layers calling the device hard_start_xmit routine.
3598  *	Used for flow control when transmit resources are unavailable.
3599  */
netif_stop_queue(struct net_device * dev)3600 static inline void netif_stop_queue(struct net_device *dev)
3601 {
3602 	netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
3603 }
3604 
3605 void netif_tx_stop_all_queues(struct net_device *dev);
3606 
netif_tx_queue_stopped(const struct netdev_queue * dev_queue)3607 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
3608 {
3609 	return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3610 }
3611 
3612 /**
3613  *	netif_queue_stopped - test if transmit queue is flowblocked
3614  *	@dev: network device
3615  *
3616  *	Test if transmit queue on device is currently unable to send.
3617  */
netif_queue_stopped(const struct net_device * dev)3618 static inline bool netif_queue_stopped(const struct net_device *dev)
3619 {
3620 	return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
3621 }
3622 
netif_xmit_stopped(const struct netdev_queue * dev_queue)3623 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
3624 {
3625 	return dev_queue->state & QUEUE_STATE_ANY_XOFF;
3626 }
3627 
3628 static inline bool
netif_xmit_frozen_or_stopped(const struct netdev_queue * dev_queue)3629 netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
3630 {
3631 	return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
3632 }
3633 
3634 static inline bool
netif_xmit_frozen_or_drv_stopped(const struct netdev_queue * dev_queue)3635 netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
3636 {
3637 	return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
3638 }
3639 
3640 /**
3641  *	netdev_queue_set_dql_min_limit - set dql minimum limit
3642  *	@dev_queue: pointer to transmit queue
3643  *	@min_limit: dql minimum limit
3644  *
3645  * Forces xmit_more() to return true until the minimum threshold
3646  * defined by @min_limit is reached (or until the tx queue is
3647  * empty). Warning: to be use with care, misuse will impact the
3648  * latency.
3649  */
netdev_queue_set_dql_min_limit(struct netdev_queue * dev_queue,unsigned int min_limit)3650 static inline void netdev_queue_set_dql_min_limit(struct netdev_queue *dev_queue,
3651 						  unsigned int min_limit)
3652 {
3653 #ifdef CONFIG_BQL
3654 	dev_queue->dql.min_limit = min_limit;
3655 #endif
3656 }
3657 
netdev_queue_dql_avail(const struct netdev_queue * txq)3658 static inline int netdev_queue_dql_avail(const struct netdev_queue *txq)
3659 {
3660 #ifdef CONFIG_BQL
3661 	/* Non-BQL migrated drivers will return 0, too. */
3662 	return dql_avail(&txq->dql);
3663 #else
3664 	return 0;
3665 #endif
3666 }
3667 
3668 /**
3669  *	netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
3670  *	@dev_queue: pointer to transmit queue
3671  *
3672  * BQL enabled drivers might use this helper in their ndo_start_xmit(),
3673  * to give appropriate hint to the CPU.
3674  */
netdev_txq_bql_enqueue_prefetchw(struct netdev_queue * dev_queue)3675 static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
3676 {
3677 #ifdef CONFIG_BQL
3678 	prefetchw(&dev_queue->dql.num_queued);
3679 #endif
3680 }
3681 
3682 /**
3683  *	netdev_txq_bql_complete_prefetchw - prefetch bql data for write
3684  *	@dev_queue: pointer to transmit queue
3685  *
3686  * BQL enabled drivers might use this helper in their TX completion path,
3687  * to give appropriate hint to the CPU.
3688  */
netdev_txq_bql_complete_prefetchw(struct netdev_queue * dev_queue)3689 static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
3690 {
3691 #ifdef CONFIG_BQL
3692 	prefetchw(&dev_queue->dql.limit);
3693 #endif
3694 }
3695 
3696 /**
3697  *	netdev_tx_sent_queue - report the number of bytes queued to a given tx queue
3698  *	@dev_queue: network device queue
3699  *	@bytes: number of bytes queued to the device queue
3700  *
3701  *	Report the number of bytes queued for sending/completion to the network
3702  *	device hardware queue. @bytes should be a good approximation and should
3703  *	exactly match netdev_completed_queue() @bytes.
3704  *	This is typically called once per packet, from ndo_start_xmit().
3705  */
netdev_tx_sent_queue(struct netdev_queue * dev_queue,unsigned int bytes)3706 static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3707 					unsigned int bytes)
3708 {
3709 #ifdef CONFIG_BQL
3710 	dql_queued(&dev_queue->dql, bytes);
3711 
3712 	if (likely(dql_avail(&dev_queue->dql) >= 0))
3713 		return;
3714 
3715 	/* Paired with READ_ONCE() from dev_watchdog() */
3716 	WRITE_ONCE(dev_queue->trans_start, jiffies);
3717 
3718 	/* This barrier is paired with smp_mb() from dev_watchdog() */
3719 	smp_mb__before_atomic();
3720 
3721 	set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
3722 
3723 	/*
3724 	 * The XOFF flag must be set before checking the dql_avail below,
3725 	 * because in netdev_tx_completed_queue we update the dql_completed
3726 	 * before checking the XOFF flag.
3727 	 */
3728 	smp_mb__after_atomic();
3729 
3730 	/* check again in case another CPU has just made room avail */
3731 	if (unlikely(dql_avail(&dev_queue->dql) >= 0))
3732 		clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
3733 #endif
3734 }
3735 
3736 /* Variant of netdev_tx_sent_queue() for drivers that are aware
3737  * that they should not test BQL status themselves.
3738  * We do want to change __QUEUE_STATE_STACK_XOFF only for the last
3739  * skb of a batch.
3740  * Returns true if the doorbell must be used to kick the NIC.
3741  */
__netdev_tx_sent_queue(struct netdev_queue * dev_queue,unsigned int bytes,bool xmit_more)3742 static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3743 					  unsigned int bytes,
3744 					  bool xmit_more)
3745 {
3746 	if (xmit_more) {
3747 #ifdef CONFIG_BQL
3748 		dql_queued(&dev_queue->dql, bytes);
3749 #endif
3750 		return netif_tx_queue_stopped(dev_queue);
3751 	}
3752 	netdev_tx_sent_queue(dev_queue, bytes);
3753 	return true;
3754 }
3755 
3756 /**
3757  *	netdev_sent_queue - report the number of bytes queued to hardware
3758  *	@dev: network device
3759  *	@bytes: number of bytes queued to the hardware device queue
3760  *
3761  *	Report the number of bytes queued for sending/completion to the network
3762  *	device hardware queue#0. @bytes should be a good approximation and should
3763  *	exactly match netdev_completed_queue() @bytes.
3764  *	This is typically called once per packet, from ndo_start_xmit().
3765  */
netdev_sent_queue(struct net_device * dev,unsigned int bytes)3766 static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
3767 {
3768 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
3769 }
3770 
__netdev_sent_queue(struct net_device * dev,unsigned int bytes,bool xmit_more)3771 static inline bool __netdev_sent_queue(struct net_device *dev,
3772 				       unsigned int bytes,
3773 				       bool xmit_more)
3774 {
3775 	return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes,
3776 				      xmit_more);
3777 }
3778 
3779 /**
3780  *	netdev_tx_completed_queue - report number of packets/bytes at TX completion.
3781  *	@dev_queue: network device queue
3782  *	@pkts: number of packets (currently ignored)
3783  *	@bytes: number of bytes dequeued from the device queue
3784  *
3785  *	Must be called at most once per TX completion round (and not per
3786  *	individual packet), so that BQL can adjust its limits appropriately.
3787  */
netdev_tx_completed_queue(struct netdev_queue * dev_queue,unsigned int pkts,unsigned int bytes)3788 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
3789 					     unsigned int pkts, unsigned int bytes)
3790 {
3791 #ifdef CONFIG_BQL
3792 	if (unlikely(!bytes))
3793 		return;
3794 
3795 	dql_completed(&dev_queue->dql, bytes);
3796 
3797 	/*
3798 	 * Without the memory barrier there is a small possibility that
3799 	 * netdev_tx_sent_queue will miss the update and cause the queue to
3800 	 * be stopped forever
3801 	 */
3802 	smp_mb(); /* NOTE: netdev_txq_completed_mb() assumes this exists */
3803 
3804 	if (unlikely(dql_avail(&dev_queue->dql) < 0))
3805 		return;
3806 
3807 	if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
3808 		netif_schedule_queue(dev_queue);
3809 #endif
3810 }
3811 
3812 /**
3813  * 	netdev_completed_queue - report bytes and packets completed by device
3814  * 	@dev: network device
3815  * 	@pkts: actual number of packets sent over the medium
3816  * 	@bytes: actual number of bytes sent over the medium
3817  *
3818  * 	Report the number of bytes and packets transmitted by the network device
3819  * 	hardware queue over the physical medium, @bytes must exactly match the
3820  * 	@bytes amount passed to netdev_sent_queue()
3821  */
netdev_completed_queue(struct net_device * dev,unsigned int pkts,unsigned int bytes)3822 static inline void netdev_completed_queue(struct net_device *dev,
3823 					  unsigned int pkts, unsigned int bytes)
3824 {
3825 	netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
3826 }
3827 
netdev_tx_reset_queue(struct netdev_queue * q)3828 static inline void netdev_tx_reset_queue(struct netdev_queue *q)
3829 {
3830 #ifdef CONFIG_BQL
3831 	clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
3832 	dql_reset(&q->dql);
3833 #endif
3834 }
3835 
3836 /**
3837  * netdev_tx_reset_subqueue - reset the BQL stats and state of a netdev queue
3838  * @dev: network device
3839  * @qid: stack index of the queue to reset
3840  */
netdev_tx_reset_subqueue(const struct net_device * dev,u32 qid)3841 static inline void netdev_tx_reset_subqueue(const struct net_device *dev,
3842 					    u32 qid)
3843 {
3844 	netdev_tx_reset_queue(netdev_get_tx_queue(dev, qid));
3845 }
3846 
3847 /**
3848  * 	netdev_reset_queue - reset the packets and bytes count of a network device
3849  * 	@dev_queue: network device
3850  *
3851  * 	Reset the bytes and packet count of a network device and clear the
3852  * 	software flow control OFF bit for this network device
3853  */
netdev_reset_queue(struct net_device * dev_queue)3854 static inline void netdev_reset_queue(struct net_device *dev_queue)
3855 {
3856 	netdev_tx_reset_subqueue(dev_queue, 0);
3857 }
3858 
3859 /**
3860  * 	netdev_cap_txqueue - check if selected tx queue exceeds device queues
3861  * 	@dev: network device
3862  * 	@queue_index: given tx queue index
3863  *
3864  * 	Returns 0 if given tx queue index >= number of device tx queues,
3865  * 	otherwise returns the originally passed tx queue index.
3866  */
netdev_cap_txqueue(struct net_device * dev,u16 queue_index)3867 static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
3868 {
3869 	if (unlikely(queue_index >= dev->real_num_tx_queues)) {
3870 		net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
3871 				     dev->name, queue_index,
3872 				     dev->real_num_tx_queues);
3873 		return 0;
3874 	}
3875 
3876 	return queue_index;
3877 }
3878 
3879 /**
3880  *	netif_running - test if up
3881  *	@dev: network device
3882  *
3883  *	Test if the device has been brought up.
3884  */
netif_running(const struct net_device * dev)3885 static inline bool netif_running(const struct net_device *dev)
3886 {
3887 	return test_bit(__LINK_STATE_START, &dev->state);
3888 }
3889 
3890 /*
3891  * Routines to manage the subqueues on a device.  We only need start,
3892  * stop, and a check if it's stopped.  All other device management is
3893  * done at the overall netdevice level.
3894  * Also test the device if we're multiqueue.
3895  */
3896 
3897 /**
3898  *	netif_start_subqueue - allow sending packets on subqueue
3899  *	@dev: network device
3900  *	@queue_index: sub queue index
3901  *
3902  * Start individual transmit queue of a device with multiple transmit queues.
3903  */
netif_start_subqueue(struct net_device * dev,u16 queue_index)3904 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
3905 {
3906 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3907 
3908 	netif_tx_start_queue(txq);
3909 }
3910 
3911 /**
3912  *	netif_stop_subqueue - stop sending packets on subqueue
3913  *	@dev: network device
3914  *	@queue_index: sub queue index
3915  *
3916  * Stop individual transmit queue of a device with multiple transmit queues.
3917  */
netif_stop_subqueue(struct net_device * dev,u16 queue_index)3918 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
3919 {
3920 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3921 	netif_tx_stop_queue(txq);
3922 }
3923 
3924 /**
3925  *	__netif_subqueue_stopped - test status of subqueue
3926  *	@dev: network device
3927  *	@queue_index: sub queue index
3928  *
3929  * Check individual transmit queue of a device with multiple transmit queues.
3930  */
__netif_subqueue_stopped(const struct net_device * dev,u16 queue_index)3931 static inline bool __netif_subqueue_stopped(const struct net_device *dev,
3932 					    u16 queue_index)
3933 {
3934 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3935 
3936 	return netif_tx_queue_stopped(txq);
3937 }
3938 
3939 /**
3940  *	netif_subqueue_stopped - test status of subqueue
3941  *	@dev: network device
3942  *	@skb: sub queue buffer pointer
3943  *
3944  * Check individual transmit queue of a device with multiple transmit queues.
3945  */
netif_subqueue_stopped(const struct net_device * dev,struct sk_buff * skb)3946 static inline bool netif_subqueue_stopped(const struct net_device *dev,
3947 					  struct sk_buff *skb)
3948 {
3949 	return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
3950 }
3951 
3952 /**
3953  *	netif_wake_subqueue - allow sending packets on subqueue
3954  *	@dev: network device
3955  *	@queue_index: sub queue index
3956  *
3957  * Resume individual transmit queue of a device with multiple transmit queues.
3958  */
netif_wake_subqueue(struct net_device * dev,u16 queue_index)3959 static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
3960 {
3961 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3962 
3963 	netif_tx_wake_queue(txq);
3964 }
3965 
3966 #ifdef CONFIG_XPS
3967 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
3968 			u16 index);
3969 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
3970 			  u16 index, enum xps_map_type type);
3971 
3972 /**
3973  *	netif_attr_test_mask - Test a CPU or Rx queue set in a mask
3974  *	@j: CPU/Rx queue index
3975  *	@mask: bitmask of all cpus/rx queues
3976  *	@nr_bits: number of bits in the bitmask
3977  *
3978  * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues.
3979  */
netif_attr_test_mask(unsigned long j,const unsigned long * mask,unsigned int nr_bits)3980 static inline bool netif_attr_test_mask(unsigned long j,
3981 					const unsigned long *mask,
3982 					unsigned int nr_bits)
3983 {
3984 	cpu_max_bits_warn(j, nr_bits);
3985 	return test_bit(j, mask);
3986 }
3987 
3988 /**
3989  *	netif_attr_test_online - Test for online CPU/Rx queue
3990  *	@j: CPU/Rx queue index
3991  *	@online_mask: bitmask for CPUs/Rx queues that are online
3992  *	@nr_bits: number of bits in the bitmask
3993  *
3994  * Returns: true if a CPU/Rx queue is online.
3995  */
netif_attr_test_online(unsigned long j,const unsigned long * online_mask,unsigned int nr_bits)3996 static inline bool netif_attr_test_online(unsigned long j,
3997 					  const unsigned long *online_mask,
3998 					  unsigned int nr_bits)
3999 {
4000 	cpu_max_bits_warn(j, nr_bits);
4001 
4002 	if (online_mask)
4003 		return test_bit(j, online_mask);
4004 
4005 	return (j < nr_bits);
4006 }
4007 
4008 /**
4009  *	netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask
4010  *	@n: CPU/Rx queue index
4011  *	@srcp: the cpumask/Rx queue mask pointer
4012  *	@nr_bits: number of bits in the bitmask
4013  *
4014  * Returns: next (after n) CPU/Rx queue index in the mask;
4015  * >= nr_bits if no further CPUs/Rx queues set.
4016  */
netif_attrmask_next(int n,const unsigned long * srcp,unsigned int nr_bits)4017 static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
4018 					       unsigned int nr_bits)
4019 {
4020 	/* -1 is a legal arg here. */
4021 	if (n != -1)
4022 		cpu_max_bits_warn(n, nr_bits);
4023 
4024 	if (srcp)
4025 		return find_next_bit(srcp, nr_bits, n + 1);
4026 
4027 	return n + 1;
4028 }
4029 
4030 /**
4031  *	netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p
4032  *	@n: CPU/Rx queue index
4033  *	@src1p: the first CPUs/Rx queues mask pointer
4034  *	@src2p: the second CPUs/Rx queues mask pointer
4035  *	@nr_bits: number of bits in the bitmask
4036  *
4037  * Returns: next (after n) CPU/Rx queue index set in both masks;
4038  * >= nr_bits if no further CPUs/Rx queues set in both.
4039  */
netif_attrmask_next_and(int n,const unsigned long * src1p,const unsigned long * src2p,unsigned int nr_bits)4040 static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
4041 					  const unsigned long *src2p,
4042 					  unsigned int nr_bits)
4043 {
4044 	/* -1 is a legal arg here. */
4045 	if (n != -1)
4046 		cpu_max_bits_warn(n, nr_bits);
4047 
4048 	if (src1p && src2p)
4049 		return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
4050 	else if (src1p)
4051 		return find_next_bit(src1p, nr_bits, n + 1);
4052 	else if (src2p)
4053 		return find_next_bit(src2p, nr_bits, n + 1);
4054 
4055 	return n + 1;
4056 }
4057 #else
netif_set_xps_queue(struct net_device * dev,const struct cpumask * mask,u16 index)4058 static inline int netif_set_xps_queue(struct net_device *dev,
4059 				      const struct cpumask *mask,
4060 				      u16 index)
4061 {
4062 	return 0;
4063 }
4064 
__netif_set_xps_queue(struct net_device * dev,const unsigned long * mask,u16 index,enum xps_map_type type)4065 static inline int __netif_set_xps_queue(struct net_device *dev,
4066 					const unsigned long *mask,
4067 					u16 index, enum xps_map_type type)
4068 {
4069 	return 0;
4070 }
4071 #endif
4072 
4073 /**
4074  *	netif_is_multiqueue - test if device has multiple transmit queues
4075  *	@dev: network device
4076  *
4077  * Check if device has multiple transmit queues
4078  */
netif_is_multiqueue(const struct net_device * dev)4079 static inline bool netif_is_multiqueue(const struct net_device *dev)
4080 {
4081 	return dev->num_tx_queues > 1;
4082 }
4083 
4084 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
4085 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
4086 int netif_set_real_num_queues(struct net_device *dev,
4087 			      unsigned int txq, unsigned int rxq);
4088 
4089 int netif_get_num_default_rss_queues(void);
4090 
4091 void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason);
4092 void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason);
4093 
4094 /*
4095  * It is not allowed to call kfree_skb() or consume_skb() from hardware
4096  * interrupt context or with hardware interrupts being disabled.
4097  * (in_hardirq() || irqs_disabled())
4098  *
4099  * We provide four helpers that can be used in following contexts :
4100  *
4101  * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
4102  *  replacing kfree_skb(skb)
4103  *
4104  * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
4105  *  Typically used in place of consume_skb(skb) in TX completion path
4106  *
4107  * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
4108  *  replacing kfree_skb(skb)
4109  *
4110  * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
4111  *  and consumed a packet. Used in place of consume_skb(skb)
4112  */
dev_kfree_skb_irq(struct sk_buff * skb)4113 static inline void dev_kfree_skb_irq(struct sk_buff *skb)
4114 {
4115 	dev_kfree_skb_irq_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED);
4116 }
4117 
dev_consume_skb_irq(struct sk_buff * skb)4118 static inline void dev_consume_skb_irq(struct sk_buff *skb)
4119 {
4120 	dev_kfree_skb_irq_reason(skb, SKB_CONSUMED);
4121 }
4122 
dev_kfree_skb_any(struct sk_buff * skb)4123 static inline void dev_kfree_skb_any(struct sk_buff *skb)
4124 {
4125 	dev_kfree_skb_any_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED);
4126 }
4127 
dev_consume_skb_any(struct sk_buff * skb)4128 static inline void dev_consume_skb_any(struct sk_buff *skb)
4129 {
4130 	dev_kfree_skb_any_reason(skb, SKB_CONSUMED);
4131 }
4132 
4133 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
4134 			     const struct bpf_prog *xdp_prog);
4135 void generic_xdp_tx(struct sk_buff *skb, const struct bpf_prog *xdp_prog);
4136 int do_xdp_generic(const struct bpf_prog *xdp_prog, struct sk_buff **pskb);
4137 int netif_rx(struct sk_buff *skb);
4138 int __netif_rx(struct sk_buff *skb);
4139 
4140 int netif_receive_skb(struct sk_buff *skb);
4141 int netif_receive_skb_core(struct sk_buff *skb);
4142 void netif_receive_skb_list_internal(struct list_head *head);
4143 void netif_receive_skb_list(struct list_head *head);
4144 gro_result_t gro_receive_skb(struct gro_node *gro, struct sk_buff *skb);
4145 
napi_gro_receive(struct napi_struct * napi,struct sk_buff * skb)4146 static inline gro_result_t napi_gro_receive(struct napi_struct *napi,
4147 					    struct sk_buff *skb)
4148 {
4149 	return gro_receive_skb(&napi->gro, skb);
4150 }
4151 
4152 struct sk_buff *napi_get_frags(struct napi_struct *napi);
4153 gro_result_t napi_gro_frags(struct napi_struct *napi);
4154 
napi_free_frags(struct napi_struct * napi)4155 static inline void napi_free_frags(struct napi_struct *napi)
4156 {
4157 	kfree_skb(napi->skb);
4158 	napi->skb = NULL;
4159 }
4160 
4161 bool netdev_is_rx_handler_busy(struct net_device *dev);
4162 int netdev_rx_handler_register(struct net_device *dev,
4163 			       rx_handler_func_t *rx_handler,
4164 			       void *rx_handler_data);
4165 void netdev_rx_handler_unregister(struct net_device *dev);
4166 
4167 bool dev_valid_name(const char *name);
is_socket_ioctl_cmd(unsigned int cmd)4168 static inline bool is_socket_ioctl_cmd(unsigned int cmd)
4169 {
4170 	return _IOC_TYPE(cmd) == SOCK_IOC_TYPE;
4171 }
4172 int get_user_ifreq(struct ifreq *ifr, void __user **ifrdata, void __user *arg);
4173 int put_user_ifreq(struct ifreq *ifr, void __user *arg);
4174 int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
4175 		void __user *data, bool *need_copyout);
4176 int dev_ifconf(struct net *net, struct ifconf __user *ifc);
4177 int dev_eth_ioctl(struct net_device *dev,
4178 		  struct ifreq *ifr, unsigned int cmd);
4179 int generic_hwtstamp_get_lower(struct net_device *dev,
4180 			       struct kernel_hwtstamp_config *kernel_cfg);
4181 int generic_hwtstamp_set_lower(struct net_device *dev,
4182 			       struct kernel_hwtstamp_config *kernel_cfg,
4183 			       struct netlink_ext_ack *extack);
4184 int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata);
4185 unsigned int dev_get_flags(const struct net_device *);
4186 int __dev_change_flags(struct net_device *dev, unsigned int flags,
4187 		       struct netlink_ext_ack *extack);
4188 int netif_change_flags(struct net_device *dev, unsigned int flags,
4189 		       struct netlink_ext_ack *extack);
4190 int dev_change_flags(struct net_device *dev, unsigned int flags,
4191 		     struct netlink_ext_ack *extack);
4192 int netif_set_alias(struct net_device *dev, const char *alias, size_t len);
4193 int dev_set_alias(struct net_device *, const char *, size_t);
4194 int dev_get_alias(const struct net_device *, char *, size_t);
4195 int netif_change_net_namespace(struct net_device *dev, struct net *net,
4196 			       const char *pat, int new_ifindex,
4197 			       struct netlink_ext_ack *extack);
4198 int dev_change_net_namespace(struct net_device *dev, struct net *net,
4199 			     const char *pat);
4200 int __dev_set_mtu(struct net_device *, int);
4201 int netif_set_mtu(struct net_device *dev, int new_mtu);
4202 int dev_set_mtu(struct net_device *, int);
4203 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
4204 			      struct netlink_ext_ack *extack);
4205 int netif_set_mac_address(struct net_device *dev, struct sockaddr *sa,
4206 			  struct netlink_ext_ack *extack);
4207 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
4208 			struct netlink_ext_ack *extack);
4209 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
4210 			     struct netlink_ext_ack *extack);
4211 int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
4212 int dev_get_port_parent_id(struct net_device *dev,
4213 			   struct netdev_phys_item_id *ppid, bool recurse);
4214 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
4215 
4216 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
4217 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
4218 				    struct netdev_queue *txq, int *ret);
4219 
4220 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
4221 u8 dev_xdp_prog_count(struct net_device *dev);
4222 int netif_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf);
4223 int dev_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf);
4224 u8 dev_xdp_sb_prog_count(struct net_device *dev);
4225 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode);
4226 
4227 u32 dev_get_min_mp_channel_count(const struct net_device *dev);
4228 
4229 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
4230 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
4231 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb);
4232 bool is_skb_forwardable(const struct net_device *dev,
4233 			const struct sk_buff *skb);
4234 
__is_skb_forwardable(const struct net_device * dev,const struct sk_buff * skb,const bool check_mtu)4235 static __always_inline bool __is_skb_forwardable(const struct net_device *dev,
4236 						 const struct sk_buff *skb,
4237 						 const bool check_mtu)
4238 {
4239 	const u32 vlan_hdr_len = 4; /* VLAN_HLEN */
4240 	unsigned int len;
4241 
4242 	if (!(dev->flags & IFF_UP))
4243 		return false;
4244 
4245 	if (!check_mtu)
4246 		return true;
4247 
4248 	len = dev->mtu + dev->hard_header_len + vlan_hdr_len;
4249 	if (skb->len <= len)
4250 		return true;
4251 
4252 	/* if TSO is enabled, we don't care about the length as the packet
4253 	 * could be forwarded without being segmented before
4254 	 */
4255 	if (skb_is_gso(skb))
4256 		return true;
4257 
4258 	return false;
4259 }
4260 
4261 void netdev_core_stats_inc(struct net_device *dev, u32 offset);
4262 
4263 #define DEV_CORE_STATS_INC(FIELD)						\
4264 static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev)		\
4265 {										\
4266 	netdev_core_stats_inc(dev,						\
4267 			offsetof(struct net_device_core_stats, FIELD));		\
4268 }
4269 DEV_CORE_STATS_INC(rx_dropped)
DEV_CORE_STATS_INC(tx_dropped)4270 DEV_CORE_STATS_INC(tx_dropped)
4271 DEV_CORE_STATS_INC(rx_nohandler)
4272 DEV_CORE_STATS_INC(rx_otherhost_dropped)
4273 #undef DEV_CORE_STATS_INC
4274 
4275 static __always_inline int ____dev_forward_skb(struct net_device *dev,
4276 					       struct sk_buff *skb,
4277 					       const bool check_mtu)
4278 {
4279 	if (skb_orphan_frags(skb, GFP_ATOMIC) ||
4280 	    unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) {
4281 		dev_core_stats_rx_dropped_inc(dev);
4282 		kfree_skb(skb);
4283 		return NET_RX_DROP;
4284 	}
4285 
4286 	skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev)));
4287 	skb->priority = 0;
4288 	return 0;
4289 }
4290 
4291 bool dev_nit_active_rcu(const struct net_device *dev);
dev_nit_active(const struct net_device * dev)4292 static inline bool dev_nit_active(const struct net_device *dev)
4293 {
4294 	bool ret;
4295 
4296 	rcu_read_lock();
4297 	ret = dev_nit_active_rcu(dev);
4298 	rcu_read_unlock();
4299 	return ret;
4300 }
4301 
4302 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
4303 
__dev_put(struct net_device * dev)4304 static inline void __dev_put(struct net_device *dev)
4305 {
4306 	if (dev) {
4307 #ifdef CONFIG_PCPU_DEV_REFCNT
4308 		this_cpu_dec(*dev->pcpu_refcnt);
4309 #else
4310 		refcount_dec(&dev->dev_refcnt);
4311 #endif
4312 	}
4313 }
4314 
__dev_hold(struct net_device * dev)4315 static inline void __dev_hold(struct net_device *dev)
4316 {
4317 	if (dev) {
4318 #ifdef CONFIG_PCPU_DEV_REFCNT
4319 		this_cpu_inc(*dev->pcpu_refcnt);
4320 #else
4321 		refcount_inc(&dev->dev_refcnt);
4322 #endif
4323 	}
4324 }
4325 
__netdev_tracker_alloc(struct net_device * dev,netdevice_tracker * tracker,gfp_t gfp)4326 static inline void __netdev_tracker_alloc(struct net_device *dev,
4327 					  netdevice_tracker *tracker,
4328 					  gfp_t gfp)
4329 {
4330 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER
4331 	ref_tracker_alloc(&dev->refcnt_tracker, tracker, gfp);
4332 #endif
4333 }
4334 
4335 /* netdev_tracker_alloc() can upgrade a prior untracked reference
4336  * taken by dev_get_by_name()/dev_get_by_index() to a tracked one.
4337  */
netdev_tracker_alloc(struct net_device * dev,netdevice_tracker * tracker,gfp_t gfp)4338 static inline void netdev_tracker_alloc(struct net_device *dev,
4339 					netdevice_tracker *tracker, gfp_t gfp)
4340 {
4341 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER
4342 	refcount_dec(&dev->refcnt_tracker.no_tracker);
4343 	__netdev_tracker_alloc(dev, tracker, gfp);
4344 #endif
4345 }
4346 
netdev_tracker_free(struct net_device * dev,netdevice_tracker * tracker)4347 static inline void netdev_tracker_free(struct net_device *dev,
4348 				       netdevice_tracker *tracker)
4349 {
4350 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER
4351 	ref_tracker_free(&dev->refcnt_tracker, tracker);
4352 #endif
4353 }
4354 
netdev_hold(struct net_device * dev,netdevice_tracker * tracker,gfp_t gfp)4355 static inline void netdev_hold(struct net_device *dev,
4356 			       netdevice_tracker *tracker, gfp_t gfp)
4357 {
4358 	if (dev) {
4359 		__dev_hold(dev);
4360 		__netdev_tracker_alloc(dev, tracker, gfp);
4361 	}
4362 }
4363 
netdev_put(struct net_device * dev,netdevice_tracker * tracker)4364 static inline void netdev_put(struct net_device *dev,
4365 			      netdevice_tracker *tracker)
4366 {
4367 	if (dev) {
4368 		netdev_tracker_free(dev, tracker);
4369 		__dev_put(dev);
4370 	}
4371 }
4372 
4373 /**
4374  *	dev_hold - get reference to device
4375  *	@dev: network device
4376  *
4377  * Hold reference to device to keep it from being freed.
4378  * Try using netdev_hold() instead.
4379  */
dev_hold(struct net_device * dev)4380 static inline void dev_hold(struct net_device *dev)
4381 {
4382 	netdev_hold(dev, NULL, GFP_ATOMIC);
4383 }
4384 
4385 /**
4386  *	dev_put - release reference to device
4387  *	@dev: network device
4388  *
4389  * Release reference to device to allow it to be freed.
4390  * Try using netdev_put() instead.
4391  */
dev_put(struct net_device * dev)4392 static inline void dev_put(struct net_device *dev)
4393 {
4394 	netdev_put(dev, NULL);
4395 }
4396 
DEFINE_FREE(dev_put,struct net_device *,if (_T)dev_put (_T))4397 DEFINE_FREE(dev_put, struct net_device *, if (_T) dev_put(_T))
4398 
4399 static inline void netdev_ref_replace(struct net_device *odev,
4400 				      struct net_device *ndev,
4401 				      netdevice_tracker *tracker,
4402 				      gfp_t gfp)
4403 {
4404 	if (odev)
4405 		netdev_tracker_free(odev, tracker);
4406 
4407 	__dev_hold(ndev);
4408 	__dev_put(odev);
4409 
4410 	if (ndev)
4411 		__netdev_tracker_alloc(ndev, tracker, gfp);
4412 }
4413 
4414 /* Carrier loss detection, dial on demand. The functions netif_carrier_on
4415  * and _off may be called from IRQ context, but it is caller
4416  * who is responsible for serialization of these calls.
4417  *
4418  * The name carrier is inappropriate, these functions should really be
4419  * called netif_lowerlayer_*() because they represent the state of any
4420  * kind of lower layer not just hardware media.
4421  */
4422 void linkwatch_fire_event(struct net_device *dev);
4423 
4424 /**
4425  * linkwatch_sync_dev - sync linkwatch for the given device
4426  * @dev: network device to sync linkwatch for
4427  *
4428  * Sync linkwatch for the given device, removing it from the
4429  * pending work list (if queued).
4430  */
4431 void linkwatch_sync_dev(struct net_device *dev);
4432 
4433 /**
4434  *	netif_carrier_ok - test if carrier present
4435  *	@dev: network device
4436  *
4437  * Check if carrier is present on device
4438  */
netif_carrier_ok(const struct net_device * dev)4439 static inline bool netif_carrier_ok(const struct net_device *dev)
4440 {
4441 	return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
4442 }
4443 
4444 unsigned long dev_trans_start(struct net_device *dev);
4445 
4446 void netdev_watchdog_up(struct net_device *dev);
4447 
4448 void netif_carrier_on(struct net_device *dev);
4449 void netif_carrier_off(struct net_device *dev);
4450 void netif_carrier_event(struct net_device *dev);
4451 
4452 /**
4453  *	netif_dormant_on - mark device as dormant.
4454  *	@dev: network device
4455  *
4456  * Mark device as dormant (as per RFC2863).
4457  *
4458  * The dormant state indicates that the relevant interface is not
4459  * actually in a condition to pass packets (i.e., it is not 'up') but is
4460  * in a "pending" state, waiting for some external event.  For "on-
4461  * demand" interfaces, this new state identifies the situation where the
4462  * interface is waiting for events to place it in the up state.
4463  */
netif_dormant_on(struct net_device * dev)4464 static inline void netif_dormant_on(struct net_device *dev)
4465 {
4466 	if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
4467 		linkwatch_fire_event(dev);
4468 }
4469 
4470 /**
4471  *	netif_dormant_off - set device as not dormant.
4472  *	@dev: network device
4473  *
4474  * Device is not in dormant state.
4475  */
netif_dormant_off(struct net_device * dev)4476 static inline void netif_dormant_off(struct net_device *dev)
4477 {
4478 	if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
4479 		linkwatch_fire_event(dev);
4480 }
4481 
4482 /**
4483  *	netif_dormant - test if device is dormant
4484  *	@dev: network device
4485  *
4486  * Check if device is dormant.
4487  */
netif_dormant(const struct net_device * dev)4488 static inline bool netif_dormant(const struct net_device *dev)
4489 {
4490 	return test_bit(__LINK_STATE_DORMANT, &dev->state);
4491 }
4492 
4493 
4494 /**
4495  *	netif_testing_on - mark device as under test.
4496  *	@dev: network device
4497  *
4498  * Mark device as under test (as per RFC2863).
4499  *
4500  * The testing state indicates that some test(s) must be performed on
4501  * the interface. After completion, of the test, the interface state
4502  * will change to up, dormant, or down, as appropriate.
4503  */
netif_testing_on(struct net_device * dev)4504 static inline void netif_testing_on(struct net_device *dev)
4505 {
4506 	if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state))
4507 		linkwatch_fire_event(dev);
4508 }
4509 
4510 /**
4511  *	netif_testing_off - set device as not under test.
4512  *	@dev: network device
4513  *
4514  * Device is not in testing state.
4515  */
netif_testing_off(struct net_device * dev)4516 static inline void netif_testing_off(struct net_device *dev)
4517 {
4518 	if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state))
4519 		linkwatch_fire_event(dev);
4520 }
4521 
4522 /**
4523  *	netif_testing - test if device is under test
4524  *	@dev: network device
4525  *
4526  * Check if device is under test
4527  */
netif_testing(const struct net_device * dev)4528 static inline bool netif_testing(const struct net_device *dev)
4529 {
4530 	return test_bit(__LINK_STATE_TESTING, &dev->state);
4531 }
4532 
4533 
4534 /**
4535  *	netif_oper_up - test if device is operational
4536  *	@dev: network device
4537  *
4538  * Check if carrier is operational
4539  */
netif_oper_up(const struct net_device * dev)4540 static inline bool netif_oper_up(const struct net_device *dev)
4541 {
4542 	unsigned int operstate = READ_ONCE(dev->operstate);
4543 
4544 	return	operstate == IF_OPER_UP ||
4545 		operstate == IF_OPER_UNKNOWN /* backward compat */;
4546 }
4547 
4548 /**
4549  *	netif_device_present - is device available or removed
4550  *	@dev: network device
4551  *
4552  * Check if device has not been removed from system.
4553  */
netif_device_present(const struct net_device * dev)4554 static inline bool netif_device_present(const struct net_device *dev)
4555 {
4556 	return test_bit(__LINK_STATE_PRESENT, &dev->state);
4557 }
4558 
4559 void netif_device_detach(struct net_device *dev);
4560 
4561 void netif_device_attach(struct net_device *dev);
4562 
4563 /*
4564  * Network interface message level settings
4565  */
4566 
4567 enum {
4568 	NETIF_MSG_DRV_BIT,
4569 	NETIF_MSG_PROBE_BIT,
4570 	NETIF_MSG_LINK_BIT,
4571 	NETIF_MSG_TIMER_BIT,
4572 	NETIF_MSG_IFDOWN_BIT,
4573 	NETIF_MSG_IFUP_BIT,
4574 	NETIF_MSG_RX_ERR_BIT,
4575 	NETIF_MSG_TX_ERR_BIT,
4576 	NETIF_MSG_TX_QUEUED_BIT,
4577 	NETIF_MSG_INTR_BIT,
4578 	NETIF_MSG_TX_DONE_BIT,
4579 	NETIF_MSG_RX_STATUS_BIT,
4580 	NETIF_MSG_PKTDATA_BIT,
4581 	NETIF_MSG_HW_BIT,
4582 	NETIF_MSG_WOL_BIT,
4583 
4584 	/* When you add a new bit above, update netif_msg_class_names array
4585 	 * in net/ethtool/common.c
4586 	 */
4587 	NETIF_MSG_CLASS_COUNT,
4588 };
4589 /* Both ethtool_ops interface and internal driver implementation use u32 */
4590 static_assert(NETIF_MSG_CLASS_COUNT <= 32);
4591 
4592 #define __NETIF_MSG_BIT(bit)	((u32)1 << (bit))
4593 #define __NETIF_MSG(name)	__NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT)
4594 
4595 #define NETIF_MSG_DRV		__NETIF_MSG(DRV)
4596 #define NETIF_MSG_PROBE		__NETIF_MSG(PROBE)
4597 #define NETIF_MSG_LINK		__NETIF_MSG(LINK)
4598 #define NETIF_MSG_TIMER		__NETIF_MSG(TIMER)
4599 #define NETIF_MSG_IFDOWN	__NETIF_MSG(IFDOWN)
4600 #define NETIF_MSG_IFUP		__NETIF_MSG(IFUP)
4601 #define NETIF_MSG_RX_ERR	__NETIF_MSG(RX_ERR)
4602 #define NETIF_MSG_TX_ERR	__NETIF_MSG(TX_ERR)
4603 #define NETIF_MSG_TX_QUEUED	__NETIF_MSG(TX_QUEUED)
4604 #define NETIF_MSG_INTR		__NETIF_MSG(INTR)
4605 #define NETIF_MSG_TX_DONE	__NETIF_MSG(TX_DONE)
4606 #define NETIF_MSG_RX_STATUS	__NETIF_MSG(RX_STATUS)
4607 #define NETIF_MSG_PKTDATA	__NETIF_MSG(PKTDATA)
4608 #define NETIF_MSG_HW		__NETIF_MSG(HW)
4609 #define NETIF_MSG_WOL		__NETIF_MSG(WOL)
4610 
4611 #define netif_msg_drv(p)	((p)->msg_enable & NETIF_MSG_DRV)
4612 #define netif_msg_probe(p)	((p)->msg_enable & NETIF_MSG_PROBE)
4613 #define netif_msg_link(p)	((p)->msg_enable & NETIF_MSG_LINK)
4614 #define netif_msg_timer(p)	((p)->msg_enable & NETIF_MSG_TIMER)
4615 #define netif_msg_ifdown(p)	((p)->msg_enable & NETIF_MSG_IFDOWN)
4616 #define netif_msg_ifup(p)	((p)->msg_enable & NETIF_MSG_IFUP)
4617 #define netif_msg_rx_err(p)	((p)->msg_enable & NETIF_MSG_RX_ERR)
4618 #define netif_msg_tx_err(p)	((p)->msg_enable & NETIF_MSG_TX_ERR)
4619 #define netif_msg_tx_queued(p)	((p)->msg_enable & NETIF_MSG_TX_QUEUED)
4620 #define netif_msg_intr(p)	((p)->msg_enable & NETIF_MSG_INTR)
4621 #define netif_msg_tx_done(p)	((p)->msg_enable & NETIF_MSG_TX_DONE)
4622 #define netif_msg_rx_status(p)	((p)->msg_enable & NETIF_MSG_RX_STATUS)
4623 #define netif_msg_pktdata(p)	((p)->msg_enable & NETIF_MSG_PKTDATA)
4624 #define netif_msg_hw(p)		((p)->msg_enable & NETIF_MSG_HW)
4625 #define netif_msg_wol(p)	((p)->msg_enable & NETIF_MSG_WOL)
4626 
netif_msg_init(int debug_value,int default_msg_enable_bits)4627 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
4628 {
4629 	/* use default */
4630 	if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
4631 		return default_msg_enable_bits;
4632 	if (debug_value == 0)	/* no output */
4633 		return 0;
4634 	/* set low N bits */
4635 	return (1U << debug_value) - 1;
4636 }
4637 
__netif_tx_lock(struct netdev_queue * txq,int cpu)4638 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
4639 {
4640 	spin_lock(&txq->_xmit_lock);
4641 	/* Pairs with READ_ONCE() in __dev_queue_xmit() */
4642 	WRITE_ONCE(txq->xmit_lock_owner, cpu);
4643 }
4644 
__netif_tx_acquire(struct netdev_queue * txq)4645 static inline bool __netif_tx_acquire(struct netdev_queue *txq)
4646 {
4647 	__acquire(&txq->_xmit_lock);
4648 	return true;
4649 }
4650 
__netif_tx_release(struct netdev_queue * txq)4651 static inline void __netif_tx_release(struct netdev_queue *txq)
4652 {
4653 	__release(&txq->_xmit_lock);
4654 }
4655 
__netif_tx_lock_bh(struct netdev_queue * txq)4656 static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
4657 {
4658 	spin_lock_bh(&txq->_xmit_lock);
4659 	/* Pairs with READ_ONCE() in __dev_queue_xmit() */
4660 	WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
4661 }
4662 
__netif_tx_trylock(struct netdev_queue * txq)4663 static inline bool __netif_tx_trylock(struct netdev_queue *txq)
4664 {
4665 	bool ok = spin_trylock(&txq->_xmit_lock);
4666 
4667 	if (likely(ok)) {
4668 		/* Pairs with READ_ONCE() in __dev_queue_xmit() */
4669 		WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
4670 	}
4671 	return ok;
4672 }
4673 
__netif_tx_unlock(struct netdev_queue * txq)4674 static inline void __netif_tx_unlock(struct netdev_queue *txq)
4675 {
4676 	/* Pairs with READ_ONCE() in __dev_queue_xmit() */
4677 	WRITE_ONCE(txq->xmit_lock_owner, -1);
4678 	spin_unlock(&txq->_xmit_lock);
4679 }
4680 
__netif_tx_unlock_bh(struct netdev_queue * txq)4681 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
4682 {
4683 	/* Pairs with READ_ONCE() in __dev_queue_xmit() */
4684 	WRITE_ONCE(txq->xmit_lock_owner, -1);
4685 	spin_unlock_bh(&txq->_xmit_lock);
4686 }
4687 
4688 /*
4689  * txq->trans_start can be read locklessly from dev_watchdog()
4690  */
txq_trans_update(struct netdev_queue * txq)4691 static inline void txq_trans_update(struct netdev_queue *txq)
4692 {
4693 	if (txq->xmit_lock_owner != -1)
4694 		WRITE_ONCE(txq->trans_start, jiffies);
4695 }
4696 
txq_trans_cond_update(struct netdev_queue * txq)4697 static inline void txq_trans_cond_update(struct netdev_queue *txq)
4698 {
4699 	unsigned long now = jiffies;
4700 
4701 	if (READ_ONCE(txq->trans_start) != now)
4702 		WRITE_ONCE(txq->trans_start, now);
4703 }
4704 
4705 /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
netif_trans_update(struct net_device * dev)4706 static inline void netif_trans_update(struct net_device *dev)
4707 {
4708 	struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
4709 
4710 	txq_trans_cond_update(txq);
4711 }
4712 
4713 /**
4714  *	netif_tx_lock - grab network device transmit lock
4715  *	@dev: network device
4716  *
4717  * Get network device transmit lock
4718  */
4719 void netif_tx_lock(struct net_device *dev);
4720 
netif_tx_lock_bh(struct net_device * dev)4721 static inline void netif_tx_lock_bh(struct net_device *dev)
4722 {
4723 	local_bh_disable();
4724 	netif_tx_lock(dev);
4725 }
4726 
4727 void netif_tx_unlock(struct net_device *dev);
4728 
netif_tx_unlock_bh(struct net_device * dev)4729 static inline void netif_tx_unlock_bh(struct net_device *dev)
4730 {
4731 	netif_tx_unlock(dev);
4732 	local_bh_enable();
4733 }
4734 
4735 #define HARD_TX_LOCK(dev, txq, cpu) {			\
4736 	if (!(dev)->lltx) {				\
4737 		__netif_tx_lock(txq, cpu);		\
4738 	} else {					\
4739 		__netif_tx_acquire(txq);		\
4740 	}						\
4741 }
4742 
4743 #define HARD_TX_TRYLOCK(dev, txq)			\
4744 	(!(dev)->lltx ?					\
4745 		__netif_tx_trylock(txq) :		\
4746 		__netif_tx_acquire(txq))
4747 
4748 #define HARD_TX_UNLOCK(dev, txq) {			\
4749 	if (!(dev)->lltx) {				\
4750 		__netif_tx_unlock(txq);			\
4751 	} else {					\
4752 		__netif_tx_release(txq);		\
4753 	}						\
4754 }
4755 
netif_tx_disable(struct net_device * dev)4756 static inline void netif_tx_disable(struct net_device *dev)
4757 {
4758 	unsigned int i;
4759 	int cpu;
4760 
4761 	local_bh_disable();
4762 	cpu = smp_processor_id();
4763 	spin_lock(&dev->tx_global_lock);
4764 	for (i = 0; i < dev->num_tx_queues; i++) {
4765 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
4766 
4767 		__netif_tx_lock(txq, cpu);
4768 		netif_tx_stop_queue(txq);
4769 		__netif_tx_unlock(txq);
4770 	}
4771 	spin_unlock(&dev->tx_global_lock);
4772 	local_bh_enable();
4773 }
4774 
netif_addr_lock(struct net_device * dev)4775 static inline void netif_addr_lock(struct net_device *dev)
4776 {
4777 	unsigned char nest_level = 0;
4778 
4779 #ifdef CONFIG_LOCKDEP
4780 	nest_level = dev->nested_level;
4781 #endif
4782 	spin_lock_nested(&dev->addr_list_lock, nest_level);
4783 }
4784 
netif_addr_lock_bh(struct net_device * dev)4785 static inline void netif_addr_lock_bh(struct net_device *dev)
4786 {
4787 	unsigned char nest_level = 0;
4788 
4789 #ifdef CONFIG_LOCKDEP
4790 	nest_level = dev->nested_level;
4791 #endif
4792 	local_bh_disable();
4793 	spin_lock_nested(&dev->addr_list_lock, nest_level);
4794 }
4795 
netif_addr_unlock(struct net_device * dev)4796 static inline void netif_addr_unlock(struct net_device *dev)
4797 {
4798 	spin_unlock(&dev->addr_list_lock);
4799 }
4800 
netif_addr_unlock_bh(struct net_device * dev)4801 static inline void netif_addr_unlock_bh(struct net_device *dev)
4802 {
4803 	spin_unlock_bh(&dev->addr_list_lock);
4804 }
4805 
4806 /*
4807  * dev_addrs walker. Should be used only for read access. Call with
4808  * rcu_read_lock held.
4809  */
4810 #define for_each_dev_addr(dev, ha) \
4811 		list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
4812 
4813 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
4814 
4815 void ether_setup(struct net_device *dev);
4816 
4817 /* Allocate dummy net_device */
4818 struct net_device *alloc_netdev_dummy(int sizeof_priv);
4819 
4820 /* Support for loadable net-drivers */
4821 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
4822 				    unsigned char name_assign_type,
4823 				    void (*setup)(struct net_device *),
4824 				    unsigned int txqs, unsigned int rxqs);
4825 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
4826 	alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
4827 
4828 #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
4829 	alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
4830 			 count)
4831 
4832 int register_netdev(struct net_device *dev);
4833 void unregister_netdev(struct net_device *dev);
4834 
4835 int devm_register_netdev(struct device *dev, struct net_device *ndev);
4836 
4837 /* General hardware address lists handling functions */
4838 int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
4839 		   struct netdev_hw_addr_list *from_list, int addr_len);
4840 int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list,
4841 			    struct netdev_hw_addr_list *from_list,
4842 			    int addr_len);
4843 void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
4844 		      struct netdev_hw_addr_list *from_list, int addr_len);
4845 int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
4846 		       struct net_device *dev,
4847 		       int (*sync)(struct net_device *, const unsigned char *),
4848 		       int (*unsync)(struct net_device *,
4849 				     const unsigned char *));
4850 int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list,
4851 			   struct net_device *dev,
4852 			   int (*sync)(struct net_device *,
4853 				       const unsigned char *, int),
4854 			   int (*unsync)(struct net_device *,
4855 					 const unsigned char *, int));
4856 void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list,
4857 			      struct net_device *dev,
4858 			      int (*unsync)(struct net_device *,
4859 					    const unsigned char *, int));
4860 void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
4861 			  struct net_device *dev,
4862 			  int (*unsync)(struct net_device *,
4863 					const unsigned char *));
4864 void __hw_addr_init(struct netdev_hw_addr_list *list);
4865 
4866 /* Functions used for device addresses handling */
4867 void dev_addr_mod(struct net_device *dev, unsigned int offset,
4868 		  const void *addr, size_t len);
4869 
4870 static inline void
__dev_addr_set(struct net_device * dev,const void * addr,size_t len)4871 __dev_addr_set(struct net_device *dev, const void *addr, size_t len)
4872 {
4873 	dev_addr_mod(dev, 0, addr, len);
4874 }
4875 
dev_addr_set(struct net_device * dev,const u8 * addr)4876 static inline void dev_addr_set(struct net_device *dev, const u8 *addr)
4877 {
4878 	__dev_addr_set(dev, addr, dev->addr_len);
4879 }
4880 
4881 int dev_addr_add(struct net_device *dev, const unsigned char *addr,
4882 		 unsigned char addr_type);
4883 int dev_addr_del(struct net_device *dev, const unsigned char *addr,
4884 		 unsigned char addr_type);
4885 
4886 /* Functions used for unicast addresses handling */
4887 int dev_uc_add(struct net_device *dev, const unsigned char *addr);
4888 int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
4889 int dev_uc_del(struct net_device *dev, const unsigned char *addr);
4890 int dev_uc_sync(struct net_device *to, struct net_device *from);
4891 int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
4892 void dev_uc_unsync(struct net_device *to, struct net_device *from);
4893 void dev_uc_flush(struct net_device *dev);
4894 void dev_uc_init(struct net_device *dev);
4895 
4896 /**
4897  *  __dev_uc_sync - Synchronize device's unicast list
4898  *  @dev:  device to sync
4899  *  @sync: function to call if address should be added
4900  *  @unsync: function to call if address should be removed
4901  *
4902  *  Add newly added addresses to the interface, and release
4903  *  addresses that have been deleted.
4904  */
__dev_uc_sync(struct net_device * dev,int (* sync)(struct net_device *,const unsigned char *),int (* unsync)(struct net_device *,const unsigned char *))4905 static inline int __dev_uc_sync(struct net_device *dev,
4906 				int (*sync)(struct net_device *,
4907 					    const unsigned char *),
4908 				int (*unsync)(struct net_device *,
4909 					      const unsigned char *))
4910 {
4911 	return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
4912 }
4913 
4914 /**
4915  *  __dev_uc_unsync - Remove synchronized addresses from device
4916  *  @dev:  device to sync
4917  *  @unsync: function to call if address should be removed
4918  *
4919  *  Remove all addresses that were added to the device by dev_uc_sync().
4920  */
__dev_uc_unsync(struct net_device * dev,int (* unsync)(struct net_device *,const unsigned char *))4921 static inline void __dev_uc_unsync(struct net_device *dev,
4922 				   int (*unsync)(struct net_device *,
4923 						 const unsigned char *))
4924 {
4925 	__hw_addr_unsync_dev(&dev->uc, dev, unsync);
4926 }
4927 
4928 /* Functions used for multicast addresses handling */
4929 int dev_mc_add(struct net_device *dev, const unsigned char *addr);
4930 int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
4931 int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
4932 int dev_mc_del(struct net_device *dev, const unsigned char *addr);
4933 int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
4934 int dev_mc_sync(struct net_device *to, struct net_device *from);
4935 int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
4936 void dev_mc_unsync(struct net_device *to, struct net_device *from);
4937 void dev_mc_flush(struct net_device *dev);
4938 void dev_mc_init(struct net_device *dev);
4939 
4940 /**
4941  *  __dev_mc_sync - Synchronize device's multicast list
4942  *  @dev:  device to sync
4943  *  @sync: function to call if address should be added
4944  *  @unsync: function to call if address should be removed
4945  *
4946  *  Add newly added addresses to the interface, and release
4947  *  addresses that have been deleted.
4948  */
__dev_mc_sync(struct net_device * dev,int (* sync)(struct net_device *,const unsigned char *),int (* unsync)(struct net_device *,const unsigned char *))4949 static inline int __dev_mc_sync(struct net_device *dev,
4950 				int (*sync)(struct net_device *,
4951 					    const unsigned char *),
4952 				int (*unsync)(struct net_device *,
4953 					      const unsigned char *))
4954 {
4955 	return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
4956 }
4957 
4958 /**
4959  *  __dev_mc_unsync - Remove synchronized addresses from device
4960  *  @dev:  device to sync
4961  *  @unsync: function to call if address should be removed
4962  *
4963  *  Remove all addresses that were added to the device by dev_mc_sync().
4964  */
__dev_mc_unsync(struct net_device * dev,int (* unsync)(struct net_device *,const unsigned char *))4965 static inline void __dev_mc_unsync(struct net_device *dev,
4966 				   int (*unsync)(struct net_device *,
4967 						 const unsigned char *))
4968 {
4969 	__hw_addr_unsync_dev(&dev->mc, dev, unsync);
4970 }
4971 
4972 /* Functions used for secondary unicast and multicast support */
4973 void dev_set_rx_mode(struct net_device *dev);
4974 int dev_set_promiscuity(struct net_device *dev, int inc);
4975 int netif_set_allmulti(struct net_device *dev, int inc, bool notify);
4976 int dev_set_allmulti(struct net_device *dev, int inc);
4977 void netdev_state_change(struct net_device *dev);
4978 void __netdev_notify_peers(struct net_device *dev);
4979 void netdev_notify_peers(struct net_device *dev);
4980 void netdev_features_change(struct net_device *dev);
4981 /* Load a device via the kmod */
4982 void dev_load(struct net *net, const char *name);
4983 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
4984 					struct rtnl_link_stats64 *storage);
4985 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
4986 			     const struct net_device_stats *netdev_stats);
4987 void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
4988 			   const struct pcpu_sw_netstats __percpu *netstats);
4989 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s);
4990 
4991 enum {
4992 	NESTED_SYNC_IMM_BIT,
4993 	NESTED_SYNC_TODO_BIT,
4994 };
4995 
4996 #define __NESTED_SYNC_BIT(bit)	((u32)1 << (bit))
4997 #define __NESTED_SYNC(name)	__NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT)
4998 
4999 #define NESTED_SYNC_IMM		__NESTED_SYNC(IMM)
5000 #define NESTED_SYNC_TODO	__NESTED_SYNC(TODO)
5001 
5002 struct netdev_nested_priv {
5003 	unsigned char flags;
5004 	void *data;
5005 };
5006 
5007 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
5008 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
5009 						     struct list_head **iter);
5010 
5011 /* iterate through upper list, must be called under RCU read lock */
5012 #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
5013 	for (iter = &(dev)->adj_list.upper, \
5014 	     updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
5015 	     updev; \
5016 	     updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
5017 
5018 int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
5019 				  int (*fn)(struct net_device *upper_dev,
5020 					    struct netdev_nested_priv *priv),
5021 				  struct netdev_nested_priv *priv);
5022 
5023 bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
5024 				  struct net_device *upper_dev);
5025 
5026 bool netdev_has_any_upper_dev(struct net_device *dev);
5027 
5028 void *netdev_lower_get_next_private(struct net_device *dev,
5029 				    struct list_head **iter);
5030 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
5031 					struct list_head **iter);
5032 
5033 #define netdev_for_each_lower_private(dev, priv, iter) \
5034 	for (iter = (dev)->adj_list.lower.next, \
5035 	     priv = netdev_lower_get_next_private(dev, &(iter)); \
5036 	     priv; \
5037 	     priv = netdev_lower_get_next_private(dev, &(iter)))
5038 
5039 #define netdev_for_each_lower_private_rcu(dev, priv, iter) \
5040 	for (iter = &(dev)->adj_list.lower, \
5041 	     priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
5042 	     priv; \
5043 	     priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
5044 
5045 void *netdev_lower_get_next(struct net_device *dev,
5046 				struct list_head **iter);
5047 
5048 #define netdev_for_each_lower_dev(dev, ldev, iter) \
5049 	for (iter = (dev)->adj_list.lower.next, \
5050 	     ldev = netdev_lower_get_next(dev, &(iter)); \
5051 	     ldev; \
5052 	     ldev = netdev_lower_get_next(dev, &(iter)))
5053 
5054 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
5055 					     struct list_head **iter);
5056 int netdev_walk_all_lower_dev(struct net_device *dev,
5057 			      int (*fn)(struct net_device *lower_dev,
5058 					struct netdev_nested_priv *priv),
5059 			      struct netdev_nested_priv *priv);
5060 int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
5061 				  int (*fn)(struct net_device *lower_dev,
5062 					    struct netdev_nested_priv *priv),
5063 				  struct netdev_nested_priv *priv);
5064 
5065 void *netdev_adjacent_get_private(struct list_head *adj_list);
5066 void *netdev_lower_get_first_private_rcu(struct net_device *dev);
5067 struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
5068 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
5069 int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev,
5070 			  struct netlink_ext_ack *extack);
5071 int netdev_master_upper_dev_link(struct net_device *dev,
5072 				 struct net_device *upper_dev,
5073 				 void *upper_priv, void *upper_info,
5074 				 struct netlink_ext_ack *extack);
5075 void netdev_upper_dev_unlink(struct net_device *dev,
5076 			     struct net_device *upper_dev);
5077 int netdev_adjacent_change_prepare(struct net_device *old_dev,
5078 				   struct net_device *new_dev,
5079 				   struct net_device *dev,
5080 				   struct netlink_ext_ack *extack);
5081 void netdev_adjacent_change_commit(struct net_device *old_dev,
5082 				   struct net_device *new_dev,
5083 				   struct net_device *dev);
5084 void netdev_adjacent_change_abort(struct net_device *old_dev,
5085 				  struct net_device *new_dev,
5086 				  struct net_device *dev);
5087 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
5088 void *netdev_lower_dev_get_private(struct net_device *dev,
5089 				   struct net_device *lower_dev);
5090 void netdev_lower_state_changed(struct net_device *lower_dev,
5091 				void *lower_state_info);
5092 
5093 /* RSS keys are 40 or 52 bytes long */
5094 #define NETDEV_RSS_KEY_LEN 52
5095 extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
5096 void netdev_rss_key_fill(void *buffer, size_t len);
5097 
5098 int skb_checksum_help(struct sk_buff *skb);
5099 int skb_crc32c_csum_help(struct sk_buff *skb);
5100 int skb_csum_hwoffload_help(struct sk_buff *skb,
5101 			    const netdev_features_t features);
5102 
5103 struct netdev_bonding_info {
5104 	ifslave	slave;
5105 	ifbond	master;
5106 };
5107 
5108 struct netdev_notifier_bonding_info {
5109 	struct netdev_notifier_info info; /* must be first */
5110 	struct netdev_bonding_info  bonding_info;
5111 };
5112 
5113 void netdev_bonding_info_change(struct net_device *dev,
5114 				struct netdev_bonding_info *bonding_info);
5115 
5116 #if IS_ENABLED(CONFIG_ETHTOOL_NETLINK)
5117 void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data);
5118 #else
ethtool_notify(struct net_device * dev,unsigned int cmd,const void * data)5119 static inline void ethtool_notify(struct net_device *dev, unsigned int cmd,
5120 				  const void *data)
5121 {
5122 }
5123 #endif
5124 
5125 __be16 skb_network_protocol(struct sk_buff *skb, int *depth);
5126 
can_checksum_protocol(netdev_features_t features,__be16 protocol)5127 static inline bool can_checksum_protocol(netdev_features_t features,
5128 					 __be16 protocol)
5129 {
5130 	if (protocol == htons(ETH_P_FCOE))
5131 		return !!(features & NETIF_F_FCOE_CRC);
5132 
5133 	/* Assume this is an IP checksum (not SCTP CRC) */
5134 
5135 	if (features & NETIF_F_HW_CSUM) {
5136 		/* Can checksum everything */
5137 		return true;
5138 	}
5139 
5140 	switch (protocol) {
5141 	case htons(ETH_P_IP):
5142 		return !!(features & NETIF_F_IP_CSUM);
5143 	case htons(ETH_P_IPV6):
5144 		return !!(features & NETIF_F_IPV6_CSUM);
5145 	default:
5146 		return false;
5147 	}
5148 }
5149 
5150 #ifdef CONFIG_BUG
5151 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb);
5152 #else
netdev_rx_csum_fault(struct net_device * dev,struct sk_buff * skb)5153 static inline void netdev_rx_csum_fault(struct net_device *dev,
5154 					struct sk_buff *skb)
5155 {
5156 }
5157 #endif
5158 /* rx skb timestamps */
5159 void net_enable_timestamp(void);
5160 void net_disable_timestamp(void);
5161 
netdev_get_tstamp(struct net_device * dev,const struct skb_shared_hwtstamps * hwtstamps,bool cycles)5162 static inline ktime_t netdev_get_tstamp(struct net_device *dev,
5163 					const struct skb_shared_hwtstamps *hwtstamps,
5164 					bool cycles)
5165 {
5166 	const struct net_device_ops *ops = dev->netdev_ops;
5167 
5168 	if (ops->ndo_get_tstamp)
5169 		return ops->ndo_get_tstamp(dev, hwtstamps, cycles);
5170 
5171 	return hwtstamps->hwtstamp;
5172 }
5173 
5174 #ifndef CONFIG_PREEMPT_RT
netdev_xmit_set_more(bool more)5175 static inline void netdev_xmit_set_more(bool more)
5176 {
5177 	__this_cpu_write(softnet_data.xmit.more, more);
5178 }
5179 
netdev_xmit_more(void)5180 static inline bool netdev_xmit_more(void)
5181 {
5182 	return __this_cpu_read(softnet_data.xmit.more);
5183 }
5184 #else
netdev_xmit_set_more(bool more)5185 static inline void netdev_xmit_set_more(bool more)
5186 {
5187 	current->net_xmit.more = more;
5188 }
5189 
netdev_xmit_more(void)5190 static inline bool netdev_xmit_more(void)
5191 {
5192 	return current->net_xmit.more;
5193 }
5194 #endif
5195 
__netdev_start_xmit(const struct net_device_ops * ops,struct sk_buff * skb,struct net_device * dev,bool more)5196 static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
5197 					      struct sk_buff *skb, struct net_device *dev,
5198 					      bool more)
5199 {
5200 	netdev_xmit_set_more(more);
5201 	return ops->ndo_start_xmit(skb, dev);
5202 }
5203 
netdev_start_xmit(struct sk_buff * skb,struct net_device * dev,struct netdev_queue * txq,bool more)5204 static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
5205 					    struct netdev_queue *txq, bool more)
5206 {
5207 	const struct net_device_ops *ops = dev->netdev_ops;
5208 	netdev_tx_t rc;
5209 
5210 	rc = __netdev_start_xmit(ops, skb, dev, more);
5211 	if (rc == NETDEV_TX_OK)
5212 		txq_trans_update(txq);
5213 
5214 	return rc;
5215 }
5216 
5217 int netdev_class_create_file_ns(const struct class_attribute *class_attr,
5218 				const void *ns);
5219 void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
5220 				 const void *ns);
5221 
5222 extern const struct kobj_ns_type_operations net_ns_type_operations;
5223 
5224 const char *netdev_drivername(const struct net_device *dev);
5225 
netdev_intersect_features(netdev_features_t f1,netdev_features_t f2)5226 static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
5227 							  netdev_features_t f2)
5228 {
5229 	if ((f1 ^ f2) & NETIF_F_HW_CSUM) {
5230 		if (f1 & NETIF_F_HW_CSUM)
5231 			f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5232 		else
5233 			f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5234 	}
5235 
5236 	return f1 & f2;
5237 }
5238 
netdev_get_wanted_features(struct net_device * dev)5239 static inline netdev_features_t netdev_get_wanted_features(
5240 	struct net_device *dev)
5241 {
5242 	return (dev->features & ~dev->hw_features) | dev->wanted_features;
5243 }
5244 netdev_features_t netdev_increment_features(netdev_features_t all,
5245 	netdev_features_t one, netdev_features_t mask);
5246 
5247 /* Allow TSO being used on stacked device :
5248  * Performing the GSO segmentation before last device
5249  * is a performance improvement.
5250  */
netdev_add_tso_features(netdev_features_t features,netdev_features_t mask)5251 static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
5252 							netdev_features_t mask)
5253 {
5254 	return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
5255 }
5256 
5257 int __netdev_update_features(struct net_device *dev);
5258 void netdev_update_features(struct net_device *dev);
5259 void netdev_change_features(struct net_device *dev);
5260 
5261 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5262 					struct net_device *dev);
5263 
5264 netdev_features_t passthru_features_check(struct sk_buff *skb,
5265 					  struct net_device *dev,
5266 					  netdev_features_t features);
5267 netdev_features_t netif_skb_features(struct sk_buff *skb);
5268 void skb_warn_bad_offload(const struct sk_buff *skb);
5269 
net_gso_ok(netdev_features_t features,int gso_type)5270 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
5271 {
5272 	netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
5273 
5274 	/* check flags correspondence */
5275 	BUILD_BUG_ON(SKB_GSO_TCPV4   != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
5276 	BUILD_BUG_ON(SKB_GSO_DODGY   != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
5277 	BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
5278 	BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
5279 	BUILD_BUG_ON(SKB_GSO_TCPV6   != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
5280 	BUILD_BUG_ON(SKB_GSO_FCOE    != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
5281 	BUILD_BUG_ON(SKB_GSO_GRE     != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
5282 	BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
5283 	BUILD_BUG_ON(SKB_GSO_IPXIP4  != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT));
5284 	BUILD_BUG_ON(SKB_GSO_IPXIP6  != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT));
5285 	BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
5286 	BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
5287 	BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
5288 	BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
5289 	BUILD_BUG_ON(SKB_GSO_SCTP    != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
5290 	BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
5291 	BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
5292 	BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT));
5293 	BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT));
5294 	BUILD_BUG_ON(SKB_GSO_TCP_ACCECN !=
5295 		     (NETIF_F_GSO_ACCECN >> NETIF_F_GSO_SHIFT));
5296 
5297 	return (features & feature) == feature;
5298 }
5299 
skb_gso_ok(struct sk_buff * skb,netdev_features_t features)5300 static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
5301 {
5302 	return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
5303 	       (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
5304 }
5305 
netif_needs_gso(struct sk_buff * skb,netdev_features_t features)5306 static inline bool netif_needs_gso(struct sk_buff *skb,
5307 				   netdev_features_t features)
5308 {
5309 	return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
5310 		unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
5311 			 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
5312 }
5313 
5314 void netif_set_tso_max_size(struct net_device *dev, unsigned int size);
5315 void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs);
5316 void netif_inherit_tso_max(struct net_device *to,
5317 			   const struct net_device *from);
5318 
5319 static inline unsigned int
netif_get_gro_max_size(const struct net_device * dev,const struct sk_buff * skb)5320 netif_get_gro_max_size(const struct net_device *dev, const struct sk_buff *skb)
5321 {
5322 	/* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */
5323 	return skb->protocol == htons(ETH_P_IPV6) ?
5324 	       READ_ONCE(dev->gro_max_size) :
5325 	       READ_ONCE(dev->gro_ipv4_max_size);
5326 }
5327 
5328 static inline unsigned int
netif_get_gso_max_size(const struct net_device * dev,const struct sk_buff * skb)5329 netif_get_gso_max_size(const struct net_device *dev, const struct sk_buff *skb)
5330 {
5331 	/* pairs with WRITE_ONCE() in netif_set_gso(_ipv4)_max_size() */
5332 	return skb->protocol == htons(ETH_P_IPV6) ?
5333 	       READ_ONCE(dev->gso_max_size) :
5334 	       READ_ONCE(dev->gso_ipv4_max_size);
5335 }
5336 
netif_is_macsec(const struct net_device * dev)5337 static inline bool netif_is_macsec(const struct net_device *dev)
5338 {
5339 	return dev->priv_flags & IFF_MACSEC;
5340 }
5341 
netif_is_macvlan(const struct net_device * dev)5342 static inline bool netif_is_macvlan(const struct net_device *dev)
5343 {
5344 	return dev->priv_flags & IFF_MACVLAN;
5345 }
5346 
netif_is_macvlan_port(const struct net_device * dev)5347 static inline bool netif_is_macvlan_port(const struct net_device *dev)
5348 {
5349 	return dev->priv_flags & IFF_MACVLAN_PORT;
5350 }
5351 
netif_is_bond_master(const struct net_device * dev)5352 static inline bool netif_is_bond_master(const struct net_device *dev)
5353 {
5354 	return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
5355 }
5356 
netif_is_bond_slave(const struct net_device * dev)5357 static inline bool netif_is_bond_slave(const struct net_device *dev)
5358 {
5359 	return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
5360 }
5361 
netif_supports_nofcs(struct net_device * dev)5362 static inline bool netif_supports_nofcs(struct net_device *dev)
5363 {
5364 	return dev->priv_flags & IFF_SUPP_NOFCS;
5365 }
5366 
netif_has_l3_rx_handler(const struct net_device * dev)5367 static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
5368 {
5369 	return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
5370 }
5371 
netif_is_l3_master(const struct net_device * dev)5372 static inline bool netif_is_l3_master(const struct net_device *dev)
5373 {
5374 	return dev->priv_flags & IFF_L3MDEV_MASTER;
5375 }
5376 
netif_is_l3_slave(const struct net_device * dev)5377 static inline bool netif_is_l3_slave(const struct net_device *dev)
5378 {
5379 	return dev->priv_flags & IFF_L3MDEV_SLAVE;
5380 }
5381 
dev_sdif(const struct net_device * dev)5382 static inline int dev_sdif(const struct net_device *dev)
5383 {
5384 #ifdef CONFIG_NET_L3_MASTER_DEV
5385 	if (netif_is_l3_slave(dev))
5386 		return dev->ifindex;
5387 #endif
5388 	return 0;
5389 }
5390 
netif_is_bridge_master(const struct net_device * dev)5391 static inline bool netif_is_bridge_master(const struct net_device *dev)
5392 {
5393 	return dev->priv_flags & IFF_EBRIDGE;
5394 }
5395 
netif_is_bridge_port(const struct net_device * dev)5396 static inline bool netif_is_bridge_port(const struct net_device *dev)
5397 {
5398 	return dev->priv_flags & IFF_BRIDGE_PORT;
5399 }
5400 
netif_is_ovs_master(const struct net_device * dev)5401 static inline bool netif_is_ovs_master(const struct net_device *dev)
5402 {
5403 	return dev->priv_flags & IFF_OPENVSWITCH;
5404 }
5405 
netif_is_ovs_port(const struct net_device * dev)5406 static inline bool netif_is_ovs_port(const struct net_device *dev)
5407 {
5408 	return dev->priv_flags & IFF_OVS_DATAPATH;
5409 }
5410 
netif_is_any_bridge_master(const struct net_device * dev)5411 static inline bool netif_is_any_bridge_master(const struct net_device *dev)
5412 {
5413 	return netif_is_bridge_master(dev) || netif_is_ovs_master(dev);
5414 }
5415 
netif_is_any_bridge_port(const struct net_device * dev)5416 static inline bool netif_is_any_bridge_port(const struct net_device *dev)
5417 {
5418 	return netif_is_bridge_port(dev) || netif_is_ovs_port(dev);
5419 }
5420 
netif_is_team_master(const struct net_device * dev)5421 static inline bool netif_is_team_master(const struct net_device *dev)
5422 {
5423 	return dev->priv_flags & IFF_TEAM;
5424 }
5425 
netif_is_team_port(const struct net_device * dev)5426 static inline bool netif_is_team_port(const struct net_device *dev)
5427 {
5428 	return dev->priv_flags & IFF_TEAM_PORT;
5429 }
5430 
netif_is_lag_master(const struct net_device * dev)5431 static inline bool netif_is_lag_master(const struct net_device *dev)
5432 {
5433 	return netif_is_bond_master(dev) || netif_is_team_master(dev);
5434 }
5435 
netif_is_lag_port(const struct net_device * dev)5436 static inline bool netif_is_lag_port(const struct net_device *dev)
5437 {
5438 	return netif_is_bond_slave(dev) || netif_is_team_port(dev);
5439 }
5440 
netif_is_rxfh_configured(const struct net_device * dev)5441 static inline bool netif_is_rxfh_configured(const struct net_device *dev)
5442 {
5443 	return dev->priv_flags & IFF_RXFH_CONFIGURED;
5444 }
5445 
netif_is_failover(const struct net_device * dev)5446 static inline bool netif_is_failover(const struct net_device *dev)
5447 {
5448 	return dev->priv_flags & IFF_FAILOVER;
5449 }
5450 
netif_is_failover_slave(const struct net_device * dev)5451 static inline bool netif_is_failover_slave(const struct net_device *dev)
5452 {
5453 	return dev->priv_flags & IFF_FAILOVER_SLAVE;
5454 }
5455 
5456 /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
netif_keep_dst(struct net_device * dev)5457 static inline void netif_keep_dst(struct net_device *dev)
5458 {
5459 	dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
5460 }
5461 
5462 /* return true if dev can't cope with mtu frames that need vlan tag insertion */
netif_reduces_vlan_mtu(struct net_device * dev)5463 static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
5464 {
5465 	/* TODO: reserve and use an additional IFF bit, if we get more users */
5466 	return netif_is_macsec(dev);
5467 }
5468 
5469 extern struct pernet_operations __net_initdata loopback_net_ops;
5470 
5471 /* Logging, debugging and troubleshooting/diagnostic helpers. */
5472 
5473 /* netdev_printk helpers, similar to dev_printk */
5474 
netdev_name(const struct net_device * dev)5475 static inline const char *netdev_name(const struct net_device *dev)
5476 {
5477 	if (!dev->name[0] || strchr(dev->name, '%'))
5478 		return "(unnamed net_device)";
5479 	return dev->name;
5480 }
5481 
netdev_reg_state(const struct net_device * dev)5482 static inline const char *netdev_reg_state(const struct net_device *dev)
5483 {
5484 	u8 reg_state = READ_ONCE(dev->reg_state);
5485 
5486 	switch (reg_state) {
5487 	case NETREG_UNINITIALIZED: return " (uninitialized)";
5488 	case NETREG_REGISTERED: return "";
5489 	case NETREG_UNREGISTERING: return " (unregistering)";
5490 	case NETREG_UNREGISTERED: return " (unregistered)";
5491 	case NETREG_RELEASED: return " (released)";
5492 	case NETREG_DUMMY: return " (dummy)";
5493 	}
5494 
5495 	WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, reg_state);
5496 	return " (unknown)";
5497 }
5498 
5499 #define MODULE_ALIAS_NETDEV(device) \
5500 	MODULE_ALIAS("netdev-" device)
5501 
5502 /*
5503  * netdev_WARN() acts like dev_printk(), but with the key difference
5504  * of using a WARN/WARN_ON to get the message out, including the
5505  * file/line information and a backtrace.
5506  */
5507 #define netdev_WARN(dev, format, args...)			\
5508 	WARN(1, "netdevice: %s%s: " format, netdev_name(dev),	\
5509 	     netdev_reg_state(dev), ##args)
5510 
5511 #define netdev_WARN_ONCE(dev, format, args...)				\
5512 	WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev),	\
5513 		  netdev_reg_state(dev), ##args)
5514 
5515 /*
5516  *	The list of packet types we will receive (as opposed to discard)
5517  *	and the routines to invoke.
5518  *
5519  *	Why 16. Because with 16 the only overlap we get on a hash of the
5520  *	low nibble of the protocol value is RARP/SNAP/X.25.
5521  *
5522  *		0800	IP
5523  *		0001	802.3
5524  *		0002	AX.25
5525  *		0004	802.2
5526  *		8035	RARP
5527  *		0005	SNAP
5528  *		0805	X.25
5529  *		0806	ARP
5530  *		8137	IPX
5531  *		0009	Localtalk
5532  *		86DD	IPv6
5533  */
5534 #define PTYPE_HASH_SIZE	(16)
5535 #define PTYPE_HASH_MASK	(PTYPE_HASH_SIZE - 1)
5536 
5537 extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
5538 
5539 extern struct net_device *blackhole_netdev;
5540 
5541 /* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */
5542 #define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD)
5543 #define DEV_STATS_ADD(DEV, FIELD, VAL) 	\
5544 		atomic_long_add((VAL), &(DEV)->stats.__##FIELD)
5545 #define DEV_STATS_READ(DEV, FIELD) atomic_long_read(&(DEV)->stats.__##FIELD)
5546 
5547 #endif	/* _LINUX_NETDEVICE_H */
5548