1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the Interfaces handler.
8 *
9 * Version: @(#)dev.h 1.0.10 08/12/93
10 *
11 * Authors: Ross Biro
12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
15 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
16 * Bjorn Ekwall. <bj0rn@blox.se>
17 * Pekka Riikonen <priikone@poseidon.pspt.fi>
18 *
19 * Moved to /usr/include/linux for NET3
20 */
21 #ifndef _LINUX_NETDEVICE_H
22 #define _LINUX_NETDEVICE_H
23
24 #include <linux/timer.h>
25 #include <linux/bug.h>
26 #include <linux/delay.h>
27 #include <linux/atomic.h>
28 #include <linux/prefetch.h>
29 #include <asm/cache.h>
30 #include <asm/byteorder.h>
31 #include <asm/local.h>
32
33 #include <linux/percpu.h>
34 #include <linux/rculist.h>
35 #include <linux/workqueue.h>
36 #include <linux/dynamic_queue_limits.h>
37
38 #include <net/net_namespace.h>
39 #ifdef CONFIG_DCB
40 #include <net/dcbnl.h>
41 #endif
42 #include <net/netprio_cgroup.h>
43 #include <linux/netdev_features.h>
44 #include <linux/neighbour.h>
45 #include <linux/netdevice_xmit.h>
46 #include <uapi/linux/netdevice.h>
47 #include <uapi/linux/if_bonding.h>
48 #include <uapi/linux/pkt_cls.h>
49 #include <uapi/linux/netdev.h>
50 #include <linux/hashtable.h>
51 #include <linux/rbtree.h>
52 #include <net/net_trackers.h>
53 #include <net/net_debug.h>
54 #include <net/dropreason-core.h>
55 #include <net/neighbour_tables.h>
56
57 struct netpoll_info;
58 struct device;
59 struct ethtool_ops;
60 struct kernel_hwtstamp_config;
61 struct phy_device;
62 struct dsa_port;
63 struct ip_tunnel_parm_kern;
64 struct macsec_context;
65 struct macsec_ops;
66 struct netdev_config;
67 struct netdev_name_node;
68 struct sd_flow_limit;
69 struct sfp_bus;
70 /* 802.11 specific */
71 struct wireless_dev;
72 /* 802.15.4 specific */
73 struct wpan_dev;
74 struct mpls_dev;
75 /* UDP Tunnel offloads */
76 struct udp_tunnel_info;
77 struct udp_tunnel_nic_info;
78 struct udp_tunnel_nic;
79 struct bpf_prog;
80 struct xdp_buff;
81 struct xdp_frame;
82 struct xdp_metadata_ops;
83 struct xdp_md;
84 struct ethtool_netdev_state;
85 struct phy_link_topology;
86 struct hwtstamp_provider;
87
88 typedef u32 xdp_features_t;
89
90 void synchronize_net(void);
91 void netdev_set_default_ethtool_ops(struct net_device *dev,
92 const struct ethtool_ops *ops);
93 void netdev_sw_irq_coalesce_default_on(struct net_device *dev);
94
95 /* Backlog congestion levels */
96 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
97 #define NET_RX_DROP 1 /* packet dropped */
98
99 #define MAX_NEST_DEV 8
100
101 /*
102 * Transmit return codes: transmit return codes originate from three different
103 * namespaces:
104 *
105 * - qdisc return codes
106 * - driver transmit return codes
107 * - errno values
108 *
109 * Drivers are allowed to return any one of those in their hard_start_xmit()
110 * function. Real network devices commonly used with qdiscs should only return
111 * the driver transmit return codes though - when qdiscs are used, the actual
112 * transmission happens asynchronously, so the value is not propagated to
113 * higher layers. Virtual network devices transmit synchronously; in this case
114 * the driver transmit return codes are consumed by dev_queue_xmit(), and all
115 * others are propagated to higher layers.
116 */
117
118 /* qdisc ->enqueue() return codes. */
119 #define NET_XMIT_SUCCESS 0x00
120 #define NET_XMIT_DROP 0x01 /* skb dropped */
121 #define NET_XMIT_CN 0x02 /* congestion notification */
122 #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
123
124 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
125 * indicates that the device will soon be dropping packets, or already drops
126 * some packets of the same priority; prompting us to send less aggressively. */
127 #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
128 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
129
130 /* Driver transmit return codes */
131 #define NETDEV_TX_MASK 0xf0
132
133 enum netdev_tx {
134 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
135 NETDEV_TX_OK = 0x00, /* driver took care of packet */
136 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
137 };
138 typedef enum netdev_tx netdev_tx_t;
139
140 /*
141 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
142 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
143 */
dev_xmit_complete(int rc)144 static inline bool dev_xmit_complete(int rc)
145 {
146 /*
147 * Positive cases with an skb consumed by a driver:
148 * - successful transmission (rc == NETDEV_TX_OK)
149 * - error while transmitting (rc < 0)
150 * - error while queueing to a different device (rc & NET_XMIT_MASK)
151 */
152 if (likely(rc < NET_XMIT_MASK))
153 return true;
154
155 return false;
156 }
157
158 /*
159 * Compute the worst-case header length according to the protocols
160 * used.
161 */
162
163 #if defined(CONFIG_HYPERV_NET)
164 # define LL_MAX_HEADER 128
165 #elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
166 # if defined(CONFIG_MAC80211_MESH)
167 # define LL_MAX_HEADER 128
168 # else
169 # define LL_MAX_HEADER 96
170 # endif
171 #else
172 # define LL_MAX_HEADER 32
173 #endif
174
175 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
176 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
177 #define MAX_HEADER LL_MAX_HEADER
178 #else
179 #define MAX_HEADER (LL_MAX_HEADER + 48)
180 #endif
181
182 /*
183 * Old network device statistics. Fields are native words
184 * (unsigned long) so they can be read and written atomically.
185 */
186
187 #define NET_DEV_STAT(FIELD) \
188 union { \
189 unsigned long FIELD; \
190 atomic_long_t __##FIELD; \
191 }
192
193 struct net_device_stats {
194 NET_DEV_STAT(rx_packets);
195 NET_DEV_STAT(tx_packets);
196 NET_DEV_STAT(rx_bytes);
197 NET_DEV_STAT(tx_bytes);
198 NET_DEV_STAT(rx_errors);
199 NET_DEV_STAT(tx_errors);
200 NET_DEV_STAT(rx_dropped);
201 NET_DEV_STAT(tx_dropped);
202 NET_DEV_STAT(multicast);
203 NET_DEV_STAT(collisions);
204 NET_DEV_STAT(rx_length_errors);
205 NET_DEV_STAT(rx_over_errors);
206 NET_DEV_STAT(rx_crc_errors);
207 NET_DEV_STAT(rx_frame_errors);
208 NET_DEV_STAT(rx_fifo_errors);
209 NET_DEV_STAT(rx_missed_errors);
210 NET_DEV_STAT(tx_aborted_errors);
211 NET_DEV_STAT(tx_carrier_errors);
212 NET_DEV_STAT(tx_fifo_errors);
213 NET_DEV_STAT(tx_heartbeat_errors);
214 NET_DEV_STAT(tx_window_errors);
215 NET_DEV_STAT(rx_compressed);
216 NET_DEV_STAT(tx_compressed);
217 };
218 #undef NET_DEV_STAT
219
220 /* per-cpu stats, allocated on demand.
221 * Try to fit them in a single cache line, for dev_get_stats() sake.
222 */
223 struct net_device_core_stats {
224 unsigned long rx_dropped;
225 unsigned long tx_dropped;
226 unsigned long rx_nohandler;
227 unsigned long rx_otherhost_dropped;
228 } __aligned(4 * sizeof(unsigned long));
229
230 #include <linux/cache.h>
231 #include <linux/skbuff.h>
232
233 struct neighbour;
234 struct neigh_parms;
235 struct sk_buff;
236
237 struct netdev_hw_addr {
238 struct list_head list;
239 struct rb_node node;
240 unsigned char addr[MAX_ADDR_LEN];
241 unsigned char type;
242 #define NETDEV_HW_ADDR_T_LAN 1
243 #define NETDEV_HW_ADDR_T_SAN 2
244 #define NETDEV_HW_ADDR_T_UNICAST 3
245 #define NETDEV_HW_ADDR_T_MULTICAST 4
246 bool global_use;
247 int sync_cnt;
248 int refcount;
249 int synced;
250 struct rcu_head rcu_head;
251 };
252
253 struct netdev_hw_addr_list {
254 struct list_head list;
255 int count;
256
257 /* Auxiliary tree for faster lookup on addition and deletion */
258 struct rb_root tree;
259 };
260
261 #define netdev_hw_addr_list_count(l) ((l)->count)
262 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
263 #define netdev_hw_addr_list_for_each(ha, l) \
264 list_for_each_entry(ha, &(l)->list, list)
265
266 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
267 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
268 #define netdev_for_each_uc_addr(ha, dev) \
269 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
270 #define netdev_for_each_synced_uc_addr(_ha, _dev) \
271 netdev_for_each_uc_addr((_ha), (_dev)) \
272 if ((_ha)->sync_cnt)
273
274 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
275 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
276 #define netdev_for_each_mc_addr(ha, dev) \
277 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
278 #define netdev_for_each_synced_mc_addr(_ha, _dev) \
279 netdev_for_each_mc_addr((_ha), (_dev)) \
280 if ((_ha)->sync_cnt)
281
282 struct hh_cache {
283 unsigned int hh_len;
284 seqlock_t hh_lock;
285
286 /* cached hardware header; allow for machine alignment needs. */
287 #define HH_DATA_MOD 16
288 #define HH_DATA_OFF(__len) \
289 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
290 #define HH_DATA_ALIGN(__len) \
291 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
292 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
293 };
294
295 /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much.
296 * Alternative is:
297 * dev->hard_header_len ? (dev->hard_header_len +
298 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
299 *
300 * We could use other alignment values, but we must maintain the
301 * relationship HH alignment <= LL alignment.
302 */
303 #define LL_RESERVED_SPACE(dev) \
304 ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \
305 & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
306 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
307 ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \
308 & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
309
310 struct header_ops {
311 int (*create) (struct sk_buff *skb, struct net_device *dev,
312 unsigned short type, const void *daddr,
313 const void *saddr, unsigned int len);
314 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
315 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
316 void (*cache_update)(struct hh_cache *hh,
317 const struct net_device *dev,
318 const unsigned char *haddr);
319 bool (*validate)(const char *ll_header, unsigned int len);
320 __be16 (*parse_protocol)(const struct sk_buff *skb);
321 };
322
323 /* These flag bits are private to the generic network queueing
324 * layer; they may not be explicitly referenced by any other
325 * code.
326 */
327
328 enum netdev_state_t {
329 __LINK_STATE_START,
330 __LINK_STATE_PRESENT,
331 __LINK_STATE_NOCARRIER,
332 __LINK_STATE_LINKWATCH_PENDING,
333 __LINK_STATE_DORMANT,
334 __LINK_STATE_TESTING,
335 };
336
337 struct gro_list {
338 struct list_head list;
339 int count;
340 };
341
342 /*
343 * size of gro hash buckets, must less than bit number of
344 * napi_struct::gro_bitmask
345 */
346 #define GRO_HASH_BUCKETS 8
347
348 /*
349 * Structure for per-NAPI config
350 */
351 struct napi_config {
352 u64 gro_flush_timeout;
353 u64 irq_suspend_timeout;
354 u32 defer_hard_irqs;
355 unsigned int napi_id;
356 };
357
358 /*
359 * Structure for NAPI scheduling similar to tasklet but with weighting
360 */
361 struct napi_struct {
362 /* The poll_list must only be managed by the entity which
363 * changes the state of the NAPI_STATE_SCHED bit. This means
364 * whoever atomically sets that bit can add this napi_struct
365 * to the per-CPU poll_list, and whoever clears that bit
366 * can remove from the list right before clearing the bit.
367 */
368 struct list_head poll_list;
369
370 unsigned long state;
371 int weight;
372 u32 defer_hard_irqs_count;
373 unsigned long gro_bitmask;
374 int (*poll)(struct napi_struct *, int);
375 #ifdef CONFIG_NETPOLL
376 /* CPU actively polling if netpoll is configured */
377 int poll_owner;
378 #endif
379 /* CPU on which NAPI has been scheduled for processing */
380 int list_owner;
381 struct net_device *dev;
382 struct gro_list gro_hash[GRO_HASH_BUCKETS];
383 struct sk_buff *skb;
384 struct list_head rx_list; /* Pending GRO_NORMAL skbs */
385 int rx_count; /* length of rx_list */
386 unsigned int napi_id; /* protected by netdev_lock */
387 struct hrtimer timer;
388 /* all fields past this point are write-protected by netdev_lock */
389 struct task_struct *thread;
390 unsigned long gro_flush_timeout;
391 unsigned long irq_suspend_timeout;
392 u32 defer_hard_irqs;
393 /* control-path-only fields follow */
394 struct list_head dev_list;
395 struct hlist_node napi_hash_node;
396 int irq;
397 int index;
398 struct napi_config *config;
399 };
400
401 enum {
402 NAPI_STATE_SCHED, /* Poll is scheduled */
403 NAPI_STATE_MISSED, /* reschedule a napi */
404 NAPI_STATE_DISABLE, /* Disable pending */
405 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
406 NAPI_STATE_LISTED, /* NAPI added to system lists */
407 NAPI_STATE_NO_BUSY_POLL, /* Do not add in napi_hash, no busy polling */
408 NAPI_STATE_IN_BUSY_POLL, /* sk_busy_loop() owns this NAPI */
409 NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/
410 NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/
411 NAPI_STATE_SCHED_THREADED, /* Napi is currently scheduled in threaded mode */
412 };
413
414 enum {
415 NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED),
416 NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
417 NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
418 NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
419 NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED),
420 NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
421 NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
422 NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL),
423 NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED),
424 NAPIF_STATE_SCHED_THREADED = BIT(NAPI_STATE_SCHED_THREADED),
425 };
426
427 enum gro_result {
428 GRO_MERGED,
429 GRO_MERGED_FREE,
430 GRO_HELD,
431 GRO_NORMAL,
432 GRO_CONSUMED,
433 };
434 typedef enum gro_result gro_result_t;
435
436 /*
437 * enum rx_handler_result - Possible return values for rx_handlers.
438 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
439 * further.
440 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
441 * case skb->dev was changed by rx_handler.
442 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
443 * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called.
444 *
445 * rx_handlers are functions called from inside __netif_receive_skb(), to do
446 * special processing of the skb, prior to delivery to protocol handlers.
447 *
448 * Currently, a net_device can only have a single rx_handler registered. Trying
449 * to register a second rx_handler will return -EBUSY.
450 *
451 * To register a rx_handler on a net_device, use netdev_rx_handler_register().
452 * To unregister a rx_handler on a net_device, use
453 * netdev_rx_handler_unregister().
454 *
455 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
456 * do with the skb.
457 *
458 * If the rx_handler consumed the skb in some way, it should return
459 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
460 * the skb to be delivered in some other way.
461 *
462 * If the rx_handler changed skb->dev, to divert the skb to another
463 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
464 * new device will be called if it exists.
465 *
466 * If the rx_handler decides the skb should be ignored, it should return
467 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
468 * are registered on exact device (ptype->dev == skb->dev).
469 *
470 * If the rx_handler didn't change skb->dev, but wants the skb to be normally
471 * delivered, it should return RX_HANDLER_PASS.
472 *
473 * A device without a registered rx_handler will behave as if rx_handler
474 * returned RX_HANDLER_PASS.
475 */
476
477 enum rx_handler_result {
478 RX_HANDLER_CONSUMED,
479 RX_HANDLER_ANOTHER,
480 RX_HANDLER_EXACT,
481 RX_HANDLER_PASS,
482 };
483 typedef enum rx_handler_result rx_handler_result_t;
484 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
485
486 void __napi_schedule(struct napi_struct *n);
487 void __napi_schedule_irqoff(struct napi_struct *n);
488
napi_disable_pending(struct napi_struct * n)489 static inline bool napi_disable_pending(struct napi_struct *n)
490 {
491 return test_bit(NAPI_STATE_DISABLE, &n->state);
492 }
493
napi_prefer_busy_poll(struct napi_struct * n)494 static inline bool napi_prefer_busy_poll(struct napi_struct *n)
495 {
496 return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state);
497 }
498
499 /**
500 * napi_is_scheduled - test if NAPI is scheduled
501 * @n: NAPI context
502 *
503 * This check is "best-effort". With no locking implemented,
504 * a NAPI can be scheduled or terminate right after this check
505 * and produce not precise results.
506 *
507 * NAPI_STATE_SCHED is an internal state, napi_is_scheduled
508 * should not be used normally and napi_schedule should be
509 * used instead.
510 *
511 * Use only if the driver really needs to check if a NAPI
512 * is scheduled for example in the context of delayed timer
513 * that can be skipped if a NAPI is already scheduled.
514 *
515 * Return: True if NAPI is scheduled, False otherwise.
516 */
napi_is_scheduled(struct napi_struct * n)517 static inline bool napi_is_scheduled(struct napi_struct *n)
518 {
519 return test_bit(NAPI_STATE_SCHED, &n->state);
520 }
521
522 bool napi_schedule_prep(struct napi_struct *n);
523
524 /**
525 * napi_schedule - schedule NAPI poll
526 * @n: NAPI context
527 *
528 * Schedule NAPI poll routine to be called if it is not already
529 * running.
530 * Return: true if we schedule a NAPI or false if not.
531 * Refer to napi_schedule_prep() for additional reason on why
532 * a NAPI might not be scheduled.
533 */
napi_schedule(struct napi_struct * n)534 static inline bool napi_schedule(struct napi_struct *n)
535 {
536 if (napi_schedule_prep(n)) {
537 __napi_schedule(n);
538 return true;
539 }
540
541 return false;
542 }
543
544 /**
545 * napi_schedule_irqoff - schedule NAPI poll
546 * @n: NAPI context
547 *
548 * Variant of napi_schedule(), assuming hard irqs are masked.
549 */
napi_schedule_irqoff(struct napi_struct * n)550 static inline void napi_schedule_irqoff(struct napi_struct *n)
551 {
552 if (napi_schedule_prep(n))
553 __napi_schedule_irqoff(n);
554 }
555
556 /**
557 * napi_complete_done - NAPI processing complete
558 * @n: NAPI context
559 * @work_done: number of packets processed
560 *
561 * Mark NAPI processing as complete. Should only be called if poll budget
562 * has not been completely consumed.
563 * Prefer over napi_complete().
564 * Return: false if device should avoid rearming interrupts.
565 */
566 bool napi_complete_done(struct napi_struct *n, int work_done);
567
napi_complete(struct napi_struct * n)568 static inline bool napi_complete(struct napi_struct *n)
569 {
570 return napi_complete_done(n, 0);
571 }
572
573 int dev_set_threaded(struct net_device *dev, bool threaded);
574
575 void napi_disable(struct napi_struct *n);
576 void napi_disable_locked(struct napi_struct *n);
577
578 void napi_enable(struct napi_struct *n);
579 void napi_enable_locked(struct napi_struct *n);
580
581 /**
582 * napi_synchronize - wait until NAPI is not running
583 * @n: NAPI context
584 *
585 * Wait until NAPI is done being scheduled on this context.
586 * Waits till any outstanding processing completes but
587 * does not disable future activations.
588 */
napi_synchronize(const struct napi_struct * n)589 static inline void napi_synchronize(const struct napi_struct *n)
590 {
591 if (IS_ENABLED(CONFIG_SMP))
592 while (test_bit(NAPI_STATE_SCHED, &n->state))
593 msleep(1);
594 else
595 barrier();
596 }
597
598 /**
599 * napi_if_scheduled_mark_missed - if napi is running, set the
600 * NAPIF_STATE_MISSED
601 * @n: NAPI context
602 *
603 * If napi is running, set the NAPIF_STATE_MISSED, and return true if
604 * NAPI is scheduled.
605 **/
napi_if_scheduled_mark_missed(struct napi_struct * n)606 static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n)
607 {
608 unsigned long val, new;
609
610 val = READ_ONCE(n->state);
611 do {
612 if (val & NAPIF_STATE_DISABLE)
613 return true;
614
615 if (!(val & NAPIF_STATE_SCHED))
616 return false;
617
618 new = val | NAPIF_STATE_MISSED;
619 } while (!try_cmpxchg(&n->state, &val, new));
620
621 return true;
622 }
623
624 enum netdev_queue_state_t {
625 __QUEUE_STATE_DRV_XOFF,
626 __QUEUE_STATE_STACK_XOFF,
627 __QUEUE_STATE_FROZEN,
628 };
629
630 #define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF)
631 #define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF)
632 #define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN)
633
634 #define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
635 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
636 QUEUE_STATE_FROZEN)
637 #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
638 QUEUE_STATE_FROZEN)
639
640 /*
641 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
642 * netif_tx_* functions below are used to manipulate this flag. The
643 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
644 * queue independently. The netif_xmit_*stopped functions below are called
645 * to check if the queue has been stopped by the driver or stack (either
646 * of the XOFF bits are set in the state). Drivers should not need to call
647 * netif_xmit*stopped functions, they should only be using netif_tx_*.
648 */
649
650 struct netdev_queue {
651 /*
652 * read-mostly part
653 */
654 struct net_device *dev;
655 netdevice_tracker dev_tracker;
656
657 struct Qdisc __rcu *qdisc;
658 struct Qdisc __rcu *qdisc_sleeping;
659 #ifdef CONFIG_SYSFS
660 struct kobject kobj;
661 #endif
662 unsigned long tx_maxrate;
663 /*
664 * Number of TX timeouts for this queue
665 * (/sys/class/net/DEV/Q/trans_timeout)
666 */
667 atomic_long_t trans_timeout;
668
669 /* Subordinate device that the queue has been assigned to */
670 struct net_device *sb_dev;
671 #ifdef CONFIG_XDP_SOCKETS
672 struct xsk_buff_pool *pool;
673 #endif
674
675 /*
676 * write-mostly part
677 */
678 #ifdef CONFIG_BQL
679 struct dql dql;
680 #endif
681 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
682 int xmit_lock_owner;
683 /*
684 * Time (in jiffies) of last Tx
685 */
686 unsigned long trans_start;
687
688 unsigned long state;
689
690 /*
691 * slow- / control-path part
692 */
693 /* NAPI instance for the queue
694 * Readers and writers must hold RTNL
695 */
696 struct napi_struct *napi;
697
698 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
699 int numa_node;
700 #endif
701 } ____cacheline_aligned_in_smp;
702
703 extern int sysctl_fb_tunnels_only_for_init_net;
704 extern int sysctl_devconf_inherit_init_net;
705
706 /*
707 * sysctl_fb_tunnels_only_for_init_net == 0 : For all netns
708 * == 1 : For initns only
709 * == 2 : For none.
710 */
net_has_fallback_tunnels(const struct net * net)711 static inline bool net_has_fallback_tunnels(const struct net *net)
712 {
713 #if IS_ENABLED(CONFIG_SYSCTL)
714 int fb_tunnels_only_for_init_net = READ_ONCE(sysctl_fb_tunnels_only_for_init_net);
715
716 return !fb_tunnels_only_for_init_net ||
717 (net_eq(net, &init_net) && fb_tunnels_only_for_init_net == 1);
718 #else
719 return true;
720 #endif
721 }
722
net_inherit_devconf(void)723 static inline int net_inherit_devconf(void)
724 {
725 #if IS_ENABLED(CONFIG_SYSCTL)
726 return READ_ONCE(sysctl_devconf_inherit_init_net);
727 #else
728 return 0;
729 #endif
730 }
731
netdev_queue_numa_node_read(const struct netdev_queue * q)732 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
733 {
734 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
735 return q->numa_node;
736 #else
737 return NUMA_NO_NODE;
738 #endif
739 }
740
netdev_queue_numa_node_write(struct netdev_queue * q,int node)741 static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
742 {
743 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
744 q->numa_node = node;
745 #endif
746 }
747
748 #ifdef CONFIG_RFS_ACCEL
749 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
750 u16 filter_id);
751 #endif
752
753 /* XPS map type and offset of the xps map within net_device->xps_maps[]. */
754 enum xps_map_type {
755 XPS_CPUS = 0,
756 XPS_RXQS,
757 XPS_MAPS_MAX,
758 };
759
760 #ifdef CONFIG_XPS
761 /*
762 * This structure holds an XPS map which can be of variable length. The
763 * map is an array of queues.
764 */
765 struct xps_map {
766 unsigned int len;
767 unsigned int alloc_len;
768 struct rcu_head rcu;
769 u16 queues[];
770 };
771 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
772 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
773 - sizeof(struct xps_map)) / sizeof(u16))
774
775 /*
776 * This structure holds all XPS maps for device. Maps are indexed by CPU.
777 *
778 * We keep track of the number of cpus/rxqs used when the struct is allocated,
779 * in nr_ids. This will help not accessing out-of-bound memory.
780 *
781 * We keep track of the number of traffic classes used when the struct is
782 * allocated, in num_tc. This will be used to navigate the maps, to ensure we're
783 * not crossing its upper bound, as the original dev->num_tc can be updated in
784 * the meantime.
785 */
786 struct xps_dev_maps {
787 struct rcu_head rcu;
788 unsigned int nr_ids;
789 s16 num_tc;
790 struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */
791 };
792
793 #define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
794 (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
795
796 #define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\
797 (_rxqs * (_tcs) * sizeof(struct xps_map *)))
798
799 #endif /* CONFIG_XPS */
800
801 #define TC_MAX_QUEUE 16
802 #define TC_BITMASK 15
803 /* HW offloaded queuing disciplines txq count and offset maps */
804 struct netdev_tc_txq {
805 u16 count;
806 u16 offset;
807 };
808
809 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
810 /*
811 * This structure is to hold information about the device
812 * configured to run FCoE protocol stack.
813 */
814 struct netdev_fcoe_hbainfo {
815 char manufacturer[64];
816 char serial_number[64];
817 char hardware_version[64];
818 char driver_version[64];
819 char optionrom_version[64];
820 char firmware_version[64];
821 char model[256];
822 char model_description[256];
823 };
824 #endif
825
826 #define MAX_PHYS_ITEM_ID_LEN 32
827
828 /* This structure holds a unique identifier to identify some
829 * physical item (port for example) used by a netdevice.
830 */
831 struct netdev_phys_item_id {
832 unsigned char id[MAX_PHYS_ITEM_ID_LEN];
833 unsigned char id_len;
834 };
835
netdev_phys_item_id_same(struct netdev_phys_item_id * a,struct netdev_phys_item_id * b)836 static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
837 struct netdev_phys_item_id *b)
838 {
839 return a->id_len == b->id_len &&
840 memcmp(a->id, b->id, a->id_len) == 0;
841 }
842
843 typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
844 struct sk_buff *skb,
845 struct net_device *sb_dev);
846
847 enum net_device_path_type {
848 DEV_PATH_ETHERNET = 0,
849 DEV_PATH_VLAN,
850 DEV_PATH_BRIDGE,
851 DEV_PATH_PPPOE,
852 DEV_PATH_DSA,
853 DEV_PATH_MTK_WDMA,
854 };
855
856 struct net_device_path {
857 enum net_device_path_type type;
858 const struct net_device *dev;
859 union {
860 struct {
861 u16 id;
862 __be16 proto;
863 u8 h_dest[ETH_ALEN];
864 } encap;
865 struct {
866 enum {
867 DEV_PATH_BR_VLAN_KEEP,
868 DEV_PATH_BR_VLAN_TAG,
869 DEV_PATH_BR_VLAN_UNTAG,
870 DEV_PATH_BR_VLAN_UNTAG_HW,
871 } vlan_mode;
872 u16 vlan_id;
873 __be16 vlan_proto;
874 } bridge;
875 struct {
876 int port;
877 u16 proto;
878 } dsa;
879 struct {
880 u8 wdma_idx;
881 u8 queue;
882 u16 wcid;
883 u8 bss;
884 u8 amsdu;
885 } mtk_wdma;
886 };
887 };
888
889 #define NET_DEVICE_PATH_STACK_MAX 5
890 #define NET_DEVICE_PATH_VLAN_MAX 2
891
892 struct net_device_path_stack {
893 int num_paths;
894 struct net_device_path path[NET_DEVICE_PATH_STACK_MAX];
895 };
896
897 struct net_device_path_ctx {
898 const struct net_device *dev;
899 u8 daddr[ETH_ALEN];
900
901 int num_vlans;
902 struct {
903 u16 id;
904 __be16 proto;
905 } vlan[NET_DEVICE_PATH_VLAN_MAX];
906 };
907
908 enum tc_setup_type {
909 TC_QUERY_CAPS,
910 TC_SETUP_QDISC_MQPRIO,
911 TC_SETUP_CLSU32,
912 TC_SETUP_CLSFLOWER,
913 TC_SETUP_CLSMATCHALL,
914 TC_SETUP_CLSBPF,
915 TC_SETUP_BLOCK,
916 TC_SETUP_QDISC_CBS,
917 TC_SETUP_QDISC_RED,
918 TC_SETUP_QDISC_PRIO,
919 TC_SETUP_QDISC_MQ,
920 TC_SETUP_QDISC_ETF,
921 TC_SETUP_ROOT_QDISC,
922 TC_SETUP_QDISC_GRED,
923 TC_SETUP_QDISC_TAPRIO,
924 TC_SETUP_FT,
925 TC_SETUP_QDISC_ETS,
926 TC_SETUP_QDISC_TBF,
927 TC_SETUP_QDISC_FIFO,
928 TC_SETUP_QDISC_HTB,
929 TC_SETUP_ACT,
930 };
931
932 /* These structures hold the attributes of bpf state that are being passed
933 * to the netdevice through the bpf op.
934 */
935 enum bpf_netdev_command {
936 /* Set or clear a bpf program used in the earliest stages of packet
937 * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee
938 * is responsible for calling bpf_prog_put on any old progs that are
939 * stored. In case of error, the callee need not release the new prog
940 * reference, but on success it takes ownership and must bpf_prog_put
941 * when it is no longer used.
942 */
943 XDP_SETUP_PROG,
944 XDP_SETUP_PROG_HW,
945 /* BPF program for offload callbacks, invoked at program load time. */
946 BPF_OFFLOAD_MAP_ALLOC,
947 BPF_OFFLOAD_MAP_FREE,
948 XDP_SETUP_XSK_POOL,
949 };
950
951 struct bpf_prog_offload_ops;
952 struct netlink_ext_ack;
953 struct xdp_umem;
954 struct xdp_dev_bulk_queue;
955 struct bpf_xdp_link;
956
957 enum bpf_xdp_mode {
958 XDP_MODE_SKB = 0,
959 XDP_MODE_DRV = 1,
960 XDP_MODE_HW = 2,
961 __MAX_XDP_MODE
962 };
963
964 struct bpf_xdp_entity {
965 struct bpf_prog *prog;
966 struct bpf_xdp_link *link;
967 };
968
969 struct netdev_bpf {
970 enum bpf_netdev_command command;
971 union {
972 /* XDP_SETUP_PROG */
973 struct {
974 u32 flags;
975 struct bpf_prog *prog;
976 struct netlink_ext_ack *extack;
977 };
978 /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */
979 struct {
980 struct bpf_offloaded_map *offmap;
981 };
982 /* XDP_SETUP_XSK_POOL */
983 struct {
984 struct xsk_buff_pool *pool;
985 u16 queue_id;
986 } xsk;
987 };
988 };
989
990 /* Flags for ndo_xsk_wakeup. */
991 #define XDP_WAKEUP_RX (1 << 0)
992 #define XDP_WAKEUP_TX (1 << 1)
993
994 #ifdef CONFIG_XFRM_OFFLOAD
995 struct xfrmdev_ops {
996 int (*xdo_dev_state_add) (struct xfrm_state *x, struct netlink_ext_ack *extack);
997 void (*xdo_dev_state_delete) (struct xfrm_state *x);
998 void (*xdo_dev_state_free) (struct xfrm_state *x);
999 bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
1000 struct xfrm_state *x);
1001 void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
1002 void (*xdo_dev_state_update_stats) (struct xfrm_state *x);
1003 int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack);
1004 void (*xdo_dev_policy_delete) (struct xfrm_policy *x);
1005 void (*xdo_dev_policy_free) (struct xfrm_policy *x);
1006 };
1007 #endif
1008
1009 struct dev_ifalias {
1010 struct rcu_head rcuhead;
1011 char ifalias[];
1012 };
1013
1014 struct devlink;
1015 struct tlsdev_ops;
1016
1017 struct netdev_net_notifier {
1018 struct list_head list;
1019 struct notifier_block *nb;
1020 };
1021
1022 /*
1023 * This structure defines the management hooks for network devices.
1024 * The following hooks can be defined; unless noted otherwise, they are
1025 * optional and can be filled with a null pointer.
1026 *
1027 * int (*ndo_init)(struct net_device *dev);
1028 * This function is called once when a network device is registered.
1029 * The network device can use this for any late stage initialization
1030 * or semantic validation. It can fail with an error code which will
1031 * be propagated back to register_netdev.
1032 *
1033 * void (*ndo_uninit)(struct net_device *dev);
1034 * This function is called when device is unregistered or when registration
1035 * fails. It is not called if init fails.
1036 *
1037 * int (*ndo_open)(struct net_device *dev);
1038 * This function is called when a network device transitions to the up
1039 * state.
1040 *
1041 * int (*ndo_stop)(struct net_device *dev);
1042 * This function is called when a network device transitions to the down
1043 * state.
1044 *
1045 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
1046 * struct net_device *dev);
1047 * Called when a packet needs to be transmitted.
1048 * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop
1049 * the queue before that can happen; it's for obsolete devices and weird
1050 * corner cases, but the stack really does a non-trivial amount
1051 * of useless work if you return NETDEV_TX_BUSY.
1052 * Required; cannot be NULL.
1053 *
1054 * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1055 * struct net_device *dev
1056 * netdev_features_t features);
1057 * Called by core transmit path to determine if device is capable of
1058 * performing offload operations on a given packet. This is to give
1059 * the device an opportunity to implement any restrictions that cannot
1060 * be otherwise expressed by feature flags. The check is called with
1061 * the set of features that the stack has calculated and it returns
1062 * those the driver believes to be appropriate.
1063 *
1064 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
1065 * struct net_device *sb_dev);
1066 * Called to decide which queue to use when device supports multiple
1067 * transmit queues.
1068 *
1069 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
1070 * This function is called to allow device receiver to make
1071 * changes to configuration when multicast or promiscuous is enabled.
1072 *
1073 * void (*ndo_set_rx_mode)(struct net_device *dev);
1074 * This function is called device changes address list filtering.
1075 * If driver handles unicast address filtering, it should set
1076 * IFF_UNICAST_FLT in its priv_flags.
1077 *
1078 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
1079 * This function is called when the Media Access Control address
1080 * needs to be changed. If this interface is not defined, the
1081 * MAC address can not be changed.
1082 *
1083 * int (*ndo_validate_addr)(struct net_device *dev);
1084 * Test if Media Access Control address is valid for the device.
1085 *
1086 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
1087 * Old-style ioctl entry point. This is used internally by the
1088 * ieee802154 subsystem but is no longer called by the device
1089 * ioctl handler.
1090 *
1091 * int (*ndo_siocbond)(struct net_device *dev, struct ifreq *ifr, int cmd);
1092 * Used by the bonding driver for its device specific ioctls:
1093 * SIOCBONDENSLAVE, SIOCBONDRELEASE, SIOCBONDSETHWADDR, SIOCBONDCHANGEACTIVE,
1094 * SIOCBONDSLAVEINFOQUERY, and SIOCBONDINFOQUERY
1095 *
1096 * * int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
1097 * Called for ethernet specific ioctls: SIOCGMIIPHY, SIOCGMIIREG,
1098 * SIOCSMIIREG, SIOCSHWTSTAMP and SIOCGHWTSTAMP.
1099 *
1100 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
1101 * Used to set network devices bus interface parameters. This interface
1102 * is retained for legacy reasons; new devices should use the bus
1103 * interface (PCI) for low level management.
1104 *
1105 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
1106 * Called when a user wants to change the Maximum Transfer Unit
1107 * of a device.
1108 *
1109 * void (*ndo_tx_timeout)(struct net_device *dev, unsigned int txqueue);
1110 * Callback used when the transmitter has not made any progress
1111 * for dev->watchdog ticks.
1112 *
1113 * void (*ndo_get_stats64)(struct net_device *dev,
1114 * struct rtnl_link_stats64 *storage);
1115 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1116 * Called when a user wants to get the network device usage
1117 * statistics. Drivers must do one of the following:
1118 * 1. Define @ndo_get_stats64 to fill in a zero-initialised
1119 * rtnl_link_stats64 structure passed by the caller.
1120 * 2. Define @ndo_get_stats to update a net_device_stats structure
1121 * (which should normally be dev->stats) and return a pointer to
1122 * it. The structure may be changed asynchronously only if each
1123 * field is written atomically.
1124 * 3. Update dev->stats asynchronously and atomically, and define
1125 * neither operation.
1126 *
1127 * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id)
1128 * Return true if this device supports offload stats of this attr_id.
1129 *
1130 * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev,
1131 * void *attr_data)
1132 * Get statistics for offload operations by attr_id. Write it into the
1133 * attr_data pointer.
1134 *
1135 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
1136 * If device supports VLAN filtering this function is called when a
1137 * VLAN id is registered.
1138 *
1139 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
1140 * If device supports VLAN filtering this function is called when a
1141 * VLAN id is unregistered.
1142 *
1143 * void (*ndo_poll_controller)(struct net_device *dev);
1144 *
1145 * SR-IOV management functions.
1146 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
1147 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan,
1148 * u8 qos, __be16 proto);
1149 * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
1150 * int max_tx_rate);
1151 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
1152 * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting);
1153 * int (*ndo_get_vf_config)(struct net_device *dev,
1154 * int vf, struct ifla_vf_info *ivf);
1155 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
1156 * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
1157 * struct nlattr *port[]);
1158 *
1159 * Enable or disable the VF ability to query its RSS Redirection Table and
1160 * Hash Key. This is needed since on some devices VF share this information
1161 * with PF and querying it may introduce a theoretical security risk.
1162 * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
1163 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
1164 * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type,
1165 * void *type_data);
1166 * Called to setup any 'tc' scheduler, classifier or action on @dev.
1167 * This is always called from the stack with the rtnl lock held and netif
1168 * tx queues stopped. This allows the netdevice to perform queue
1169 * management safely.
1170 *
1171 * Fiber Channel over Ethernet (FCoE) offload functions.
1172 * int (*ndo_fcoe_enable)(struct net_device *dev);
1173 * Called when the FCoE protocol stack wants to start using LLD for FCoE
1174 * so the underlying device can perform whatever needed configuration or
1175 * initialization to support acceleration of FCoE traffic.
1176 *
1177 * int (*ndo_fcoe_disable)(struct net_device *dev);
1178 * Called when the FCoE protocol stack wants to stop using LLD for FCoE
1179 * so the underlying device can perform whatever needed clean-ups to
1180 * stop supporting acceleration of FCoE traffic.
1181 *
1182 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
1183 * struct scatterlist *sgl, unsigned int sgc);
1184 * Called when the FCoE Initiator wants to initialize an I/O that
1185 * is a possible candidate for Direct Data Placement (DDP). The LLD can
1186 * perform necessary setup and returns 1 to indicate the device is set up
1187 * successfully to perform DDP on this I/O, otherwise this returns 0.
1188 *
1189 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
1190 * Called when the FCoE Initiator/Target is done with the DDPed I/O as
1191 * indicated by the FC exchange id 'xid', so the underlying device can
1192 * clean up and reuse resources for later DDP requests.
1193 *
1194 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
1195 * struct scatterlist *sgl, unsigned int sgc);
1196 * Called when the FCoE Target wants to initialize an I/O that
1197 * is a possible candidate for Direct Data Placement (DDP). The LLD can
1198 * perform necessary setup and returns 1 to indicate the device is set up
1199 * successfully to perform DDP on this I/O, otherwise this returns 0.
1200 *
1201 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1202 * struct netdev_fcoe_hbainfo *hbainfo);
1203 * Called when the FCoE Protocol stack wants information on the underlying
1204 * device. This information is utilized by the FCoE protocol stack to
1205 * register attributes with Fiber Channel management service as per the
1206 * FC-GS Fabric Device Management Information(FDMI) specification.
1207 *
1208 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
1209 * Called when the underlying device wants to override default World Wide
1210 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
1211 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
1212 * protocol stack to use.
1213 *
1214 * RFS acceleration.
1215 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
1216 * u16 rxq_index, u32 flow_id);
1217 * Set hardware filter for RFS. rxq_index is the target queue index;
1218 * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
1219 * Return the filter ID on success, or a negative error code.
1220 *
1221 * Slave management functions (for bridge, bonding, etc).
1222 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
1223 * Called to make another netdev an underling.
1224 *
1225 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
1226 * Called to release previously enslaved netdev.
1227 *
1228 * struct net_device *(*ndo_get_xmit_slave)(struct net_device *dev,
1229 * struct sk_buff *skb,
1230 * bool all_slaves);
1231 * Get the xmit slave of master device. If all_slaves is true, function
1232 * assume all the slaves can transmit.
1233 *
1234 * Feature/offload setting functions.
1235 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1236 * netdev_features_t features);
1237 * Adjusts the requested feature flags according to device-specific
1238 * constraints, and returns the resulting flags. Must not modify
1239 * the device state.
1240 *
1241 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
1242 * Called to update device configuration to new features. Passed
1243 * feature set might be less than what was returned by ndo_fix_features()).
1244 * Must return >0 or -errno if it changed dev->features itself.
1245 *
1246 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
1247 * struct net_device *dev,
1248 * const unsigned char *addr, u16 vid, u16 flags,
1249 * bool *notified, struct netlink_ext_ack *extack);
1250 * Adds an FDB entry to dev for addr.
1251 * Callee shall set *notified to true if it sent any appropriate
1252 * notification(s). Otherwise core will send a generic one.
1253 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
1254 * struct net_device *dev,
1255 * const unsigned char *addr, u16 vid
1256 * bool *notified, struct netlink_ext_ack *extack);
1257 * Deletes the FDB entry from dev corresponding to addr.
1258 * Callee shall set *notified to true if it sent any appropriate
1259 * notification(s). Otherwise core will send a generic one.
1260 * int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, struct net_device *dev,
1261 * struct netlink_ext_ack *extack);
1262 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
1263 * struct net_device *dev, struct net_device *filter_dev,
1264 * int *idx)
1265 * Used to add FDB entries to dump requests. Implementers should add
1266 * entries to skb and update idx with the number of entries.
1267 *
1268 * int (*ndo_mdb_add)(struct net_device *dev, struct nlattr *tb[],
1269 * u16 nlmsg_flags, struct netlink_ext_ack *extack);
1270 * Adds an MDB entry to dev.
1271 * int (*ndo_mdb_del)(struct net_device *dev, struct nlattr *tb[],
1272 * struct netlink_ext_ack *extack);
1273 * Deletes the MDB entry from dev.
1274 * int (*ndo_mdb_del_bulk)(struct net_device *dev, struct nlattr *tb[],
1275 * struct netlink_ext_ack *extack);
1276 * Bulk deletes MDB entries from dev.
1277 * int (*ndo_mdb_dump)(struct net_device *dev, struct sk_buff *skb,
1278 * struct netlink_callback *cb);
1279 * Dumps MDB entries from dev. The first argument (marker) in the netlink
1280 * callback is used by core rtnetlink code.
1281 *
1282 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
1283 * u16 flags, struct netlink_ext_ack *extack)
1284 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
1285 * struct net_device *dev, u32 filter_mask,
1286 * int nlflags)
1287 * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
1288 * u16 flags);
1289 *
1290 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
1291 * Called to change device carrier. Soft-devices (like dummy, team, etc)
1292 * which do not represent real hardware may define this to allow their
1293 * userspace components to manage their virtual carrier state. Devices
1294 * that determine carrier state from physical hardware properties (eg
1295 * network cables) or protocol-dependent mechanisms (eg
1296 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
1297 *
1298 * int (*ndo_get_phys_port_id)(struct net_device *dev,
1299 * struct netdev_phys_item_id *ppid);
1300 * Called to get ID of physical port of this device. If driver does
1301 * not implement this, it is assumed that the hw is not able to have
1302 * multiple net devices on single physical port.
1303 *
1304 * int (*ndo_get_port_parent_id)(struct net_device *dev,
1305 * struct netdev_phys_item_id *ppid)
1306 * Called to get the parent ID of the physical port of this device.
1307 *
1308 * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1309 * struct net_device *dev)
1310 * Called by upper layer devices to accelerate switching or other
1311 * station functionality into hardware. 'pdev is the lowerdev
1312 * to use for the offload and 'dev' is the net device that will
1313 * back the offload. Returns a pointer to the private structure
1314 * the upper layer will maintain.
1315 * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
1316 * Called by upper layer device to delete the station created
1317 * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
1318 * the station and priv is the structure returned by the add
1319 * operation.
1320 * int (*ndo_set_tx_maxrate)(struct net_device *dev,
1321 * int queue_index, u32 maxrate);
1322 * Called when a user wants to set a max-rate limitation of specific
1323 * TX queue.
1324 * int (*ndo_get_iflink)(const struct net_device *dev);
1325 * Called to get the iflink value of this device.
1326 * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
1327 * This function is used to get egress tunnel information for given skb.
1328 * This is useful for retrieving outer tunnel header parameters while
1329 * sampling packet.
1330 * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom);
1331 * This function is used to specify the headroom that the skb must
1332 * consider when allocation skb during packet reception. Setting
1333 * appropriate rx headroom value allows avoiding skb head copy on
1334 * forward. Setting a negative value resets the rx headroom to the
1335 * default value.
1336 * int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf);
1337 * This function is used to set or query state related to XDP on the
1338 * netdevice and manage BPF offload. See definition of
1339 * enum bpf_netdev_command for details.
1340 * int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp,
1341 * u32 flags);
1342 * This function is used to submit @n XDP packets for transmit on a
1343 * netdevice. Returns number of frames successfully transmitted, frames
1344 * that got dropped are freed/returned via xdp_return_frame().
1345 * Returns negative number, means general error invoking ndo, meaning
1346 * no frames were xmit'ed and core-caller will free all frames.
1347 * struct net_device *(*ndo_xdp_get_xmit_slave)(struct net_device *dev,
1348 * struct xdp_buff *xdp);
1349 * Get the xmit slave of master device based on the xdp_buff.
1350 * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags);
1351 * This function is used to wake up the softirq, ksoftirqd or kthread
1352 * responsible for sending and/or receiving packets on a specific
1353 * queue id bound to an AF_XDP socket. The flags field specifies if
1354 * only RX, only Tx, or both should be woken up using the flags
1355 * XDP_WAKEUP_RX and XDP_WAKEUP_TX.
1356 * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm_kern *p,
1357 * int cmd);
1358 * Add, change, delete or get information on an IPv4 tunnel.
1359 * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev);
1360 * If a device is paired with a peer device, return the peer instance.
1361 * The caller must be under RCU read context.
1362 * int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, struct net_device_path *path);
1363 * Get the forwarding path to reach the real device from the HW destination address
1364 * ktime_t (*ndo_get_tstamp)(struct net_device *dev,
1365 * const struct skb_shared_hwtstamps *hwtstamps,
1366 * bool cycles);
1367 * Get hardware timestamp based on normal/adjustable time or free running
1368 * cycle counter. This function is required if physical clock supports a
1369 * free running cycle counter.
1370 *
1371 * int (*ndo_hwtstamp_get)(struct net_device *dev,
1372 * struct kernel_hwtstamp_config *kernel_config);
1373 * Get the currently configured hardware timestamping parameters for the
1374 * NIC device.
1375 *
1376 * int (*ndo_hwtstamp_set)(struct net_device *dev,
1377 * struct kernel_hwtstamp_config *kernel_config,
1378 * struct netlink_ext_ack *extack);
1379 * Change the hardware timestamping parameters for NIC device.
1380 */
1381 struct net_device_ops {
1382 int (*ndo_init)(struct net_device *dev);
1383 void (*ndo_uninit)(struct net_device *dev);
1384 int (*ndo_open)(struct net_device *dev);
1385 int (*ndo_stop)(struct net_device *dev);
1386 netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
1387 struct net_device *dev);
1388 netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1389 struct net_device *dev,
1390 netdev_features_t features);
1391 u16 (*ndo_select_queue)(struct net_device *dev,
1392 struct sk_buff *skb,
1393 struct net_device *sb_dev);
1394 void (*ndo_change_rx_flags)(struct net_device *dev,
1395 int flags);
1396 void (*ndo_set_rx_mode)(struct net_device *dev);
1397 int (*ndo_set_mac_address)(struct net_device *dev,
1398 void *addr);
1399 int (*ndo_validate_addr)(struct net_device *dev);
1400 int (*ndo_do_ioctl)(struct net_device *dev,
1401 struct ifreq *ifr, int cmd);
1402 int (*ndo_eth_ioctl)(struct net_device *dev,
1403 struct ifreq *ifr, int cmd);
1404 int (*ndo_siocbond)(struct net_device *dev,
1405 struct ifreq *ifr, int cmd);
1406 int (*ndo_siocwandev)(struct net_device *dev,
1407 struct if_settings *ifs);
1408 int (*ndo_siocdevprivate)(struct net_device *dev,
1409 struct ifreq *ifr,
1410 void __user *data, int cmd);
1411 int (*ndo_set_config)(struct net_device *dev,
1412 struct ifmap *map);
1413 int (*ndo_change_mtu)(struct net_device *dev,
1414 int new_mtu);
1415 int (*ndo_neigh_setup)(struct net_device *dev,
1416 struct neigh_parms *);
1417 void (*ndo_tx_timeout) (struct net_device *dev,
1418 unsigned int txqueue);
1419
1420 void (*ndo_get_stats64)(struct net_device *dev,
1421 struct rtnl_link_stats64 *storage);
1422 bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
1423 int (*ndo_get_offload_stats)(int attr_id,
1424 const struct net_device *dev,
1425 void *attr_data);
1426 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1427
1428 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
1429 __be16 proto, u16 vid);
1430 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
1431 __be16 proto, u16 vid);
1432 #ifdef CONFIG_NET_POLL_CONTROLLER
1433 void (*ndo_poll_controller)(struct net_device *dev);
1434 int (*ndo_netpoll_setup)(struct net_device *dev);
1435 void (*ndo_netpoll_cleanup)(struct net_device *dev);
1436 #endif
1437 int (*ndo_set_vf_mac)(struct net_device *dev,
1438 int queue, u8 *mac);
1439 int (*ndo_set_vf_vlan)(struct net_device *dev,
1440 int queue, u16 vlan,
1441 u8 qos, __be16 proto);
1442 int (*ndo_set_vf_rate)(struct net_device *dev,
1443 int vf, int min_tx_rate,
1444 int max_tx_rate);
1445 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
1446 int vf, bool setting);
1447 int (*ndo_set_vf_trust)(struct net_device *dev,
1448 int vf, bool setting);
1449 int (*ndo_get_vf_config)(struct net_device *dev,
1450 int vf,
1451 struct ifla_vf_info *ivf);
1452 int (*ndo_set_vf_link_state)(struct net_device *dev,
1453 int vf, int link_state);
1454 int (*ndo_get_vf_stats)(struct net_device *dev,
1455 int vf,
1456 struct ifla_vf_stats
1457 *vf_stats);
1458 int (*ndo_set_vf_port)(struct net_device *dev,
1459 int vf,
1460 struct nlattr *port[]);
1461 int (*ndo_get_vf_port)(struct net_device *dev,
1462 int vf, struct sk_buff *skb);
1463 int (*ndo_get_vf_guid)(struct net_device *dev,
1464 int vf,
1465 struct ifla_vf_guid *node_guid,
1466 struct ifla_vf_guid *port_guid);
1467 int (*ndo_set_vf_guid)(struct net_device *dev,
1468 int vf, u64 guid,
1469 int guid_type);
1470 int (*ndo_set_vf_rss_query_en)(
1471 struct net_device *dev,
1472 int vf, bool setting);
1473 int (*ndo_setup_tc)(struct net_device *dev,
1474 enum tc_setup_type type,
1475 void *type_data);
1476 #if IS_ENABLED(CONFIG_FCOE)
1477 int (*ndo_fcoe_enable)(struct net_device *dev);
1478 int (*ndo_fcoe_disable)(struct net_device *dev);
1479 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
1480 u16 xid,
1481 struct scatterlist *sgl,
1482 unsigned int sgc);
1483 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
1484 u16 xid);
1485 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
1486 u16 xid,
1487 struct scatterlist *sgl,
1488 unsigned int sgc);
1489 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1490 struct netdev_fcoe_hbainfo *hbainfo);
1491 #endif
1492
1493 #if IS_ENABLED(CONFIG_LIBFCOE)
1494 #define NETDEV_FCOE_WWNN 0
1495 #define NETDEV_FCOE_WWPN 1
1496 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
1497 u64 *wwn, int type);
1498 #endif
1499
1500 #ifdef CONFIG_RFS_ACCEL
1501 int (*ndo_rx_flow_steer)(struct net_device *dev,
1502 const struct sk_buff *skb,
1503 u16 rxq_index,
1504 u32 flow_id);
1505 #endif
1506 int (*ndo_add_slave)(struct net_device *dev,
1507 struct net_device *slave_dev,
1508 struct netlink_ext_ack *extack);
1509 int (*ndo_del_slave)(struct net_device *dev,
1510 struct net_device *slave_dev);
1511 struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev,
1512 struct sk_buff *skb,
1513 bool all_slaves);
1514 struct net_device* (*ndo_sk_get_lower_dev)(struct net_device *dev,
1515 struct sock *sk);
1516 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1517 netdev_features_t features);
1518 int (*ndo_set_features)(struct net_device *dev,
1519 netdev_features_t features);
1520 int (*ndo_neigh_construct)(struct net_device *dev,
1521 struct neighbour *n);
1522 void (*ndo_neigh_destroy)(struct net_device *dev,
1523 struct neighbour *n);
1524
1525 int (*ndo_fdb_add)(struct ndmsg *ndm,
1526 struct nlattr *tb[],
1527 struct net_device *dev,
1528 const unsigned char *addr,
1529 u16 vid,
1530 u16 flags,
1531 bool *notified,
1532 struct netlink_ext_ack *extack);
1533 int (*ndo_fdb_del)(struct ndmsg *ndm,
1534 struct nlattr *tb[],
1535 struct net_device *dev,
1536 const unsigned char *addr,
1537 u16 vid,
1538 bool *notified,
1539 struct netlink_ext_ack *extack);
1540 int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh,
1541 struct net_device *dev,
1542 struct netlink_ext_ack *extack);
1543 int (*ndo_fdb_dump)(struct sk_buff *skb,
1544 struct netlink_callback *cb,
1545 struct net_device *dev,
1546 struct net_device *filter_dev,
1547 int *idx);
1548 int (*ndo_fdb_get)(struct sk_buff *skb,
1549 struct nlattr *tb[],
1550 struct net_device *dev,
1551 const unsigned char *addr,
1552 u16 vid, u32 portid, u32 seq,
1553 struct netlink_ext_ack *extack);
1554 int (*ndo_mdb_add)(struct net_device *dev,
1555 struct nlattr *tb[],
1556 u16 nlmsg_flags,
1557 struct netlink_ext_ack *extack);
1558 int (*ndo_mdb_del)(struct net_device *dev,
1559 struct nlattr *tb[],
1560 struct netlink_ext_ack *extack);
1561 int (*ndo_mdb_del_bulk)(struct net_device *dev,
1562 struct nlattr *tb[],
1563 struct netlink_ext_ack *extack);
1564 int (*ndo_mdb_dump)(struct net_device *dev,
1565 struct sk_buff *skb,
1566 struct netlink_callback *cb);
1567 int (*ndo_mdb_get)(struct net_device *dev,
1568 struct nlattr *tb[], u32 portid,
1569 u32 seq,
1570 struct netlink_ext_ack *extack);
1571 int (*ndo_bridge_setlink)(struct net_device *dev,
1572 struct nlmsghdr *nlh,
1573 u16 flags,
1574 struct netlink_ext_ack *extack);
1575 int (*ndo_bridge_getlink)(struct sk_buff *skb,
1576 u32 pid, u32 seq,
1577 struct net_device *dev,
1578 u32 filter_mask,
1579 int nlflags);
1580 int (*ndo_bridge_dellink)(struct net_device *dev,
1581 struct nlmsghdr *nlh,
1582 u16 flags);
1583 int (*ndo_change_carrier)(struct net_device *dev,
1584 bool new_carrier);
1585 int (*ndo_get_phys_port_id)(struct net_device *dev,
1586 struct netdev_phys_item_id *ppid);
1587 int (*ndo_get_port_parent_id)(struct net_device *dev,
1588 struct netdev_phys_item_id *ppid);
1589 int (*ndo_get_phys_port_name)(struct net_device *dev,
1590 char *name, size_t len);
1591 void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1592 struct net_device *dev);
1593 void (*ndo_dfwd_del_station)(struct net_device *pdev,
1594 void *priv);
1595
1596 int (*ndo_set_tx_maxrate)(struct net_device *dev,
1597 int queue_index,
1598 u32 maxrate);
1599 int (*ndo_get_iflink)(const struct net_device *dev);
1600 int (*ndo_fill_metadata_dst)(struct net_device *dev,
1601 struct sk_buff *skb);
1602 void (*ndo_set_rx_headroom)(struct net_device *dev,
1603 int needed_headroom);
1604 int (*ndo_bpf)(struct net_device *dev,
1605 struct netdev_bpf *bpf);
1606 int (*ndo_xdp_xmit)(struct net_device *dev, int n,
1607 struct xdp_frame **xdp,
1608 u32 flags);
1609 struct net_device * (*ndo_xdp_get_xmit_slave)(struct net_device *dev,
1610 struct xdp_buff *xdp);
1611 int (*ndo_xsk_wakeup)(struct net_device *dev,
1612 u32 queue_id, u32 flags);
1613 int (*ndo_tunnel_ctl)(struct net_device *dev,
1614 struct ip_tunnel_parm_kern *p,
1615 int cmd);
1616 struct net_device * (*ndo_get_peer_dev)(struct net_device *dev);
1617 int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx,
1618 struct net_device_path *path);
1619 ktime_t (*ndo_get_tstamp)(struct net_device *dev,
1620 const struct skb_shared_hwtstamps *hwtstamps,
1621 bool cycles);
1622 int (*ndo_hwtstamp_get)(struct net_device *dev,
1623 struct kernel_hwtstamp_config *kernel_config);
1624 int (*ndo_hwtstamp_set)(struct net_device *dev,
1625 struct kernel_hwtstamp_config *kernel_config,
1626 struct netlink_ext_ack *extack);
1627
1628 #if IS_ENABLED(CONFIG_NET_SHAPER)
1629 /**
1630 * @net_shaper_ops: Device shaping offload operations
1631 * see include/net/net_shapers.h
1632 */
1633 const struct net_shaper_ops *net_shaper_ops;
1634 #endif
1635 };
1636
1637 /**
1638 * enum netdev_priv_flags - &struct net_device priv_flags
1639 *
1640 * These are the &struct net_device, they are only set internally
1641 * by drivers and used in the kernel. These flags are invisible to
1642 * userspace; this means that the order of these flags can change
1643 * during any kernel release.
1644 *
1645 * You should add bitfield booleans after either net_device::priv_flags
1646 * (hotpath) or ::threaded (slowpath) instead of extending these flags.
1647 *
1648 * @IFF_802_1Q_VLAN: 802.1Q VLAN device
1649 * @IFF_EBRIDGE: Ethernet bridging device
1650 * @IFF_BONDING: bonding master or slave
1651 * @IFF_ISATAP: ISATAP interface (RFC4214)
1652 * @IFF_WAN_HDLC: WAN HDLC device
1653 * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
1654 * release skb->dst
1655 * @IFF_DONT_BRIDGE: disallow bridging this ether dev
1656 * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
1657 * @IFF_MACVLAN_PORT: device used as macvlan port
1658 * @IFF_BRIDGE_PORT: device used as bridge port
1659 * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port
1660 * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit
1661 * @IFF_UNICAST_FLT: Supports unicast filtering
1662 * @IFF_TEAM_PORT: device used as team port
1663 * @IFF_SUPP_NOFCS: device supports sending custom FCS
1664 * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
1665 * change when it's running
1666 * @IFF_MACVLAN: Macvlan device
1667 * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account
1668 * underlying stacked devices
1669 * @IFF_L3MDEV_MASTER: device is an L3 master device
1670 * @IFF_NO_QUEUE: device can run without qdisc attached
1671 * @IFF_OPENVSWITCH: device is a Open vSwitch master
1672 * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
1673 * @IFF_TEAM: device is a team device
1674 * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured
1675 * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
1676 * entity (i.e. the master device for bridged veth)
1677 * @IFF_MACSEC: device is a MACsec device
1678 * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook
1679 * @IFF_FAILOVER: device is a failover master device
1680 * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
1681 * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
1682 * @IFF_NO_ADDRCONF: prevent ipv6 addrconf
1683 * @IFF_TX_SKB_NO_LINEAR: device/driver is capable of xmitting frames with
1684 * skb_headlen(skb) == 0 (data starts from frag0)
1685 */
1686 enum netdev_priv_flags {
1687 IFF_802_1Q_VLAN = 1<<0,
1688 IFF_EBRIDGE = 1<<1,
1689 IFF_BONDING = 1<<2,
1690 IFF_ISATAP = 1<<3,
1691 IFF_WAN_HDLC = 1<<4,
1692 IFF_XMIT_DST_RELEASE = 1<<5,
1693 IFF_DONT_BRIDGE = 1<<6,
1694 IFF_DISABLE_NETPOLL = 1<<7,
1695 IFF_MACVLAN_PORT = 1<<8,
1696 IFF_BRIDGE_PORT = 1<<9,
1697 IFF_OVS_DATAPATH = 1<<10,
1698 IFF_TX_SKB_SHARING = 1<<11,
1699 IFF_UNICAST_FLT = 1<<12,
1700 IFF_TEAM_PORT = 1<<13,
1701 IFF_SUPP_NOFCS = 1<<14,
1702 IFF_LIVE_ADDR_CHANGE = 1<<15,
1703 IFF_MACVLAN = 1<<16,
1704 IFF_XMIT_DST_RELEASE_PERM = 1<<17,
1705 IFF_L3MDEV_MASTER = 1<<18,
1706 IFF_NO_QUEUE = 1<<19,
1707 IFF_OPENVSWITCH = 1<<20,
1708 IFF_L3MDEV_SLAVE = 1<<21,
1709 IFF_TEAM = 1<<22,
1710 IFF_RXFH_CONFIGURED = 1<<23,
1711 IFF_PHONY_HEADROOM = 1<<24,
1712 IFF_MACSEC = 1<<25,
1713 IFF_NO_RX_HANDLER = 1<<26,
1714 IFF_FAILOVER = 1<<27,
1715 IFF_FAILOVER_SLAVE = 1<<28,
1716 IFF_L3MDEV_RX_HANDLER = 1<<29,
1717 IFF_NO_ADDRCONF = BIT_ULL(30),
1718 IFF_TX_SKB_NO_LINEAR = BIT_ULL(31),
1719 };
1720
1721 /* Specifies the type of the struct net_device::ml_priv pointer */
1722 enum netdev_ml_priv_type {
1723 ML_PRIV_NONE,
1724 ML_PRIV_CAN,
1725 };
1726
1727 enum netdev_stat_type {
1728 NETDEV_PCPU_STAT_NONE,
1729 NETDEV_PCPU_STAT_LSTATS, /* struct pcpu_lstats */
1730 NETDEV_PCPU_STAT_TSTATS, /* struct pcpu_sw_netstats */
1731 NETDEV_PCPU_STAT_DSTATS, /* struct pcpu_dstats */
1732 };
1733
1734 enum netdev_reg_state {
1735 NETREG_UNINITIALIZED = 0,
1736 NETREG_REGISTERED, /* completed register_netdevice */
1737 NETREG_UNREGISTERING, /* called unregister_netdevice */
1738 NETREG_UNREGISTERED, /* completed unregister todo */
1739 NETREG_RELEASED, /* called free_netdev */
1740 NETREG_DUMMY, /* dummy device for NAPI poll */
1741 };
1742
1743 /**
1744 * struct net_device - The DEVICE structure.
1745 *
1746 * Actually, this whole structure is a big mistake. It mixes I/O
1747 * data with strictly "high-level" data, and it has to know about
1748 * almost every data structure used in the INET module.
1749 *
1750 * @priv_flags: flags invisible to userspace defined as bits, see
1751 * enum netdev_priv_flags for the definitions
1752 * @lltx: device supports lockless Tx. Deprecated for real HW
1753 * drivers. Mainly used by logical interfaces, such as
1754 * bonding and tunnels
1755 *
1756 * @name: This is the first field of the "visible" part of this structure
1757 * (i.e. as seen by users in the "Space.c" file). It is the name
1758 * of the interface.
1759 *
1760 * @name_node: Name hashlist node
1761 * @ifalias: SNMP alias
1762 * @mem_end: Shared memory end
1763 * @mem_start: Shared memory start
1764 * @base_addr: Device I/O address
1765 * @irq: Device IRQ number
1766 *
1767 * @state: Generic network queuing layer state, see netdev_state_t
1768 * @dev_list: The global list of network devices
1769 * @napi_list: List entry used for polling NAPI devices
1770 * @unreg_list: List entry when we are unregistering the
1771 * device; see the function unregister_netdev
1772 * @close_list: List entry used when we are closing the device
1773 * @ptype_all: Device-specific packet handlers for all protocols
1774 * @ptype_specific: Device-specific, protocol-specific packet handlers
1775 *
1776 * @adj_list: Directly linked devices, like slaves for bonding
1777 * @features: Currently active device features
1778 * @hw_features: User-changeable features
1779 *
1780 * @wanted_features: User-requested features
1781 * @vlan_features: Mask of features inheritable by VLAN devices
1782 *
1783 * @hw_enc_features: Mask of features inherited by encapsulating devices
1784 * This field indicates what encapsulation
1785 * offloads the hardware is capable of doing,
1786 * and drivers will need to set them appropriately.
1787 *
1788 * @mpls_features: Mask of features inheritable by MPLS
1789 * @gso_partial_features: value(s) from NETIF_F_GSO\*
1790 *
1791 * @ifindex: interface index
1792 * @group: The group the device belongs to
1793 *
1794 * @stats: Statistics struct, which was left as a legacy, use
1795 * rtnl_link_stats64 instead
1796 *
1797 * @core_stats: core networking counters,
1798 * do not use this in drivers
1799 * @carrier_up_count: Number of times the carrier has been up
1800 * @carrier_down_count: Number of times the carrier has been down
1801 *
1802 * @wireless_handlers: List of functions to handle Wireless Extensions,
1803 * instead of ioctl,
1804 * see <net/iw_handler.h> for details.
1805 *
1806 * @netdev_ops: Includes several pointers to callbacks,
1807 * if one wants to override the ndo_*() functions
1808 * @xdp_metadata_ops: Includes pointers to XDP metadata callbacks.
1809 * @xsk_tx_metadata_ops: Includes pointers to AF_XDP TX metadata callbacks.
1810 * @ethtool_ops: Management operations
1811 * @l3mdev_ops: Layer 3 master device operations
1812 * @ndisc_ops: Includes callbacks for different IPv6 neighbour
1813 * discovery handling. Necessary for e.g. 6LoWPAN.
1814 * @xfrmdev_ops: Transformation offload operations
1815 * @tlsdev_ops: Transport Layer Security offload operations
1816 * @header_ops: Includes callbacks for creating,parsing,caching,etc
1817 * of Layer 2 headers.
1818 *
1819 * @flags: Interface flags (a la BSD)
1820 * @xdp_features: XDP capability supported by the device
1821 * @gflags: Global flags ( kept as legacy )
1822 * @priv_len: Size of the ->priv flexible array
1823 * @priv: Flexible array containing private data
1824 * @operstate: RFC2863 operstate
1825 * @link_mode: Mapping policy to operstate
1826 * @if_port: Selectable AUI, TP, ...
1827 * @dma: DMA channel
1828 * @mtu: Interface MTU value
1829 * @min_mtu: Interface Minimum MTU value
1830 * @max_mtu: Interface Maximum MTU value
1831 * @type: Interface hardware type
1832 * @hard_header_len: Maximum hardware header length.
1833 * @min_header_len: Minimum hardware header length
1834 *
1835 * @needed_headroom: Extra headroom the hardware may need, but not in all
1836 * cases can this be guaranteed
1837 * @needed_tailroom: Extra tailroom the hardware may need, but not in all
1838 * cases can this be guaranteed. Some cases also use
1839 * LL_MAX_HEADER instead to allocate the skb
1840 *
1841 * interface address info:
1842 *
1843 * @perm_addr: Permanent hw address
1844 * @addr_assign_type: Hw address assignment type
1845 * @addr_len: Hardware address length
1846 * @upper_level: Maximum depth level of upper devices.
1847 * @lower_level: Maximum depth level of lower devices.
1848 * @neigh_priv_len: Used in neigh_alloc()
1849 * @dev_id: Used to differentiate devices that share
1850 * the same link layer address
1851 * @dev_port: Used to differentiate devices that share
1852 * the same function
1853 * @addr_list_lock: XXX: need comments on this one
1854 * @name_assign_type: network interface name assignment type
1855 * @uc_promisc: Counter that indicates promiscuous mode
1856 * has been enabled due to the need to listen to
1857 * additional unicast addresses in a device that
1858 * does not implement ndo_set_rx_mode()
1859 * @uc: unicast mac addresses
1860 * @mc: multicast mac addresses
1861 * @dev_addrs: list of device hw addresses
1862 * @queues_kset: Group of all Kobjects in the Tx and RX queues
1863 * @promiscuity: Number of times the NIC is told to work in
1864 * promiscuous mode; if it becomes 0 the NIC will
1865 * exit promiscuous mode
1866 * @allmulti: Counter, enables or disables allmulticast mode
1867 *
1868 * @vlan_info: VLAN info
1869 * @dsa_ptr: dsa specific data
1870 * @tipc_ptr: TIPC specific data
1871 * @atalk_ptr: AppleTalk link
1872 * @ip_ptr: IPv4 specific data
1873 * @ip6_ptr: IPv6 specific data
1874 * @ax25_ptr: AX.25 specific data
1875 * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
1876 * @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network
1877 * device struct
1878 * @mpls_ptr: mpls_dev struct pointer
1879 * @mctp_ptr: MCTP specific data
1880 *
1881 * @dev_addr: Hw address (before bcast,
1882 * because most packets are unicast)
1883 *
1884 * @_rx: Array of RX queues
1885 * @num_rx_queues: Number of RX queues
1886 * allocated at register_netdev() time
1887 * @real_num_rx_queues: Number of RX queues currently active in device
1888 * @xdp_prog: XDP sockets filter program pointer
1889 *
1890 * @rx_handler: handler for received packets
1891 * @rx_handler_data: XXX: need comments on this one
1892 * @tcx_ingress: BPF & clsact qdisc specific data for ingress processing
1893 * @ingress_queue: XXX: need comments on this one
1894 * @nf_hooks_ingress: netfilter hooks executed for ingress packets
1895 * @broadcast: hw bcast address
1896 *
1897 * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
1898 * indexed by RX queue number. Assigned by driver.
1899 * This must only be set if the ndo_rx_flow_steer
1900 * operation is defined
1901 * @index_hlist: Device index hash chain
1902 *
1903 * @_tx: Array of TX queues
1904 * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time
1905 * @real_num_tx_queues: Number of TX queues currently active in device
1906 * @qdisc: Root qdisc from userspace point of view
1907 * @tx_queue_len: Max frames per queue allowed
1908 * @tx_global_lock: XXX: need comments on this one
1909 * @xdp_bulkq: XDP device bulk queue
1910 * @xps_maps: all CPUs/RXQs maps for XPS device
1911 *
1912 * @xps_maps: XXX: need comments on this one
1913 * @tcx_egress: BPF & clsact qdisc specific data for egress processing
1914 * @nf_hooks_egress: netfilter hooks executed for egress packets
1915 * @qdisc_hash: qdisc hash table
1916 * @watchdog_timeo: Represents the timeout that is used by
1917 * the watchdog (see dev_watchdog())
1918 * @watchdog_timer: List of timers
1919 *
1920 * @proto_down_reason: reason a netdev interface is held down
1921 * @pcpu_refcnt: Number of references to this device
1922 * @dev_refcnt: Number of references to this device
1923 * @refcnt_tracker: Tracker directory for tracked references to this device
1924 * @todo_list: Delayed register/unregister
1925 * @link_watch_list: XXX: need comments on this one
1926 *
1927 * @reg_state: Register/unregister state machine
1928 * @dismantle: Device is going to be freed
1929 * @rtnl_link_state: This enum represents the phases of creating
1930 * a new link
1931 *
1932 * @needs_free_netdev: Should unregister perform free_netdev?
1933 * @priv_destructor: Called from unregister
1934 * @npinfo: XXX: need comments on this one
1935 * @nd_net: Network namespace this network device is inside
1936 *
1937 * @ml_priv: Mid-layer private
1938 * @ml_priv_type: Mid-layer private type
1939 *
1940 * @pcpu_stat_type: Type of device statistics which the core should
1941 * allocate/free: none, lstats, tstats, dstats. none
1942 * means the driver is handling statistics allocation/
1943 * freeing internally.
1944 * @lstats: Loopback statistics: packets, bytes
1945 * @tstats: Tunnel statistics: RX/TX packets, RX/TX bytes
1946 * @dstats: Dummy statistics: RX/TX/drop packets, RX/TX bytes
1947 *
1948 * @garp_port: GARP
1949 * @mrp_port: MRP
1950 *
1951 * @dm_private: Drop monitor private
1952 *
1953 * @dev: Class/net/name entry
1954 * @sysfs_groups: Space for optional device, statistics and wireless
1955 * sysfs groups
1956 *
1957 * @sysfs_rx_queue_group: Space for optional per-rx queue attributes
1958 * @rtnl_link_ops: Rtnl_link_ops
1959 * @stat_ops: Optional ops for queue-aware statistics
1960 * @queue_mgmt_ops: Optional ops for queue management
1961 *
1962 * @gso_max_size: Maximum size of generic segmentation offload
1963 * @tso_max_size: Device (as in HW) limit on the max TSO request size
1964 * @gso_max_segs: Maximum number of segments that can be passed to the
1965 * NIC for GSO
1966 * @tso_max_segs: Device (as in HW) limit on the max TSO segment count
1967 * @gso_ipv4_max_size: Maximum size of generic segmentation offload,
1968 * for IPv4.
1969 *
1970 * @dcbnl_ops: Data Center Bridging netlink ops
1971 * @num_tc: Number of traffic classes in the net device
1972 * @tc_to_txq: XXX: need comments on this one
1973 * @prio_tc_map: XXX: need comments on this one
1974 *
1975 * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp
1976 *
1977 * @priomap: XXX: need comments on this one
1978 * @link_topo: Physical link topology tracking attached PHYs
1979 * @phydev: Physical device may attach itself
1980 * for hardware timestamping
1981 * @sfp_bus: attached &struct sfp_bus structure.
1982 *
1983 * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
1984 *
1985 * @proto_down: protocol port state information can be sent to the
1986 * switch driver and used to set the phys state of the
1987 * switch port.
1988 *
1989 * @threaded: napi threaded mode is enabled
1990 *
1991 * @see_all_hwtstamp_requests: device wants to see calls to
1992 * ndo_hwtstamp_set() for all timestamp requests
1993 * regardless of source, even if those aren't
1994 * HWTSTAMP_SOURCE_NETDEV
1995 * @change_proto_down: device supports setting carrier via IFLA_PROTO_DOWN
1996 * @netns_local: interface can't change network namespaces
1997 * @fcoe_mtu: device supports maximum FCoE MTU, 2158 bytes
1998 *
1999 * @net_notifier_list: List of per-net netdev notifier block
2000 * that follow this device when it is moved
2001 * to another network namespace.
2002 *
2003 * @macsec_ops: MACsec offloading ops
2004 *
2005 * @udp_tunnel_nic_info: static structure describing the UDP tunnel
2006 * offload capabilities of the device
2007 * @udp_tunnel_nic: UDP tunnel offload state
2008 * @ethtool: ethtool related state
2009 * @xdp_state: stores info on attached XDP BPF programs
2010 *
2011 * @nested_level: Used as a parameter of spin_lock_nested() of
2012 * dev->addr_list_lock.
2013 * @unlink_list: As netif_addr_lock() can be called recursively,
2014 * keep a list of interfaces to be deleted.
2015 * @gro_max_size: Maximum size of aggregated packet in generic
2016 * receive offload (GRO)
2017 * @gro_ipv4_max_size: Maximum size of aggregated packet in generic
2018 * receive offload (GRO), for IPv4.
2019 * @xdp_zc_max_segs: Maximum number of segments supported by AF_XDP
2020 * zero copy driver
2021 *
2022 * @dev_addr_shadow: Copy of @dev_addr to catch direct writes.
2023 * @linkwatch_dev_tracker: refcount tracker used by linkwatch.
2024 * @watchdog_dev_tracker: refcount tracker used by watchdog.
2025 * @dev_registered_tracker: tracker for reference held while
2026 * registered
2027 * @offload_xstats_l3: L3 HW stats for this netdevice.
2028 *
2029 * @devlink_port: Pointer to related devlink port structure.
2030 * Assigned by a driver before netdev registration using
2031 * SET_NETDEV_DEVLINK_PORT macro. This pointer is static
2032 * during the time netdevice is registered.
2033 *
2034 * @dpll_pin: Pointer to the SyncE source pin of a DPLL subsystem,
2035 * where the clock is recovered.
2036 *
2037 * @max_pacing_offload_horizon: max EDT offload horizon in nsec.
2038 * @napi_config: An array of napi_config structures containing per-NAPI
2039 * settings.
2040 * @gro_flush_timeout: timeout for GRO layer in NAPI
2041 * @napi_defer_hard_irqs: If not zero, provides a counter that would
2042 * allow to avoid NIC hard IRQ, on busy queues.
2043 *
2044 * @neighbours: List heads pointing to this device's neighbours'
2045 * dev_list, one per address-family.
2046 * @hwprov: Tracks which PTP performs hardware packet time stamping.
2047 *
2048 * FIXME: cleanup struct net_device such that network protocol info
2049 * moves out.
2050 */
2051
2052 struct net_device {
2053 /* Cacheline organization can be found documented in
2054 * Documentation/networking/net_cachelines/net_device.rst.
2055 * Please update the document when adding new fields.
2056 */
2057
2058 /* TX read-mostly hotpath */
2059 __cacheline_group_begin(net_device_read_tx);
2060 struct_group(priv_flags_fast,
2061 unsigned long priv_flags:32;
2062 unsigned long lltx:1;
2063 );
2064 const struct net_device_ops *netdev_ops;
2065 const struct header_ops *header_ops;
2066 struct netdev_queue *_tx;
2067 netdev_features_t gso_partial_features;
2068 unsigned int real_num_tx_queues;
2069 unsigned int gso_max_size;
2070 unsigned int gso_ipv4_max_size;
2071 u16 gso_max_segs;
2072 s16 num_tc;
2073 /* Note : dev->mtu is often read without holding a lock.
2074 * Writers usually hold RTNL.
2075 * It is recommended to use READ_ONCE() to annotate the reads,
2076 * and to use WRITE_ONCE() to annotate the writes.
2077 */
2078 unsigned int mtu;
2079 unsigned short needed_headroom;
2080 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
2081 #ifdef CONFIG_XPS
2082 struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX];
2083 #endif
2084 #ifdef CONFIG_NETFILTER_EGRESS
2085 struct nf_hook_entries __rcu *nf_hooks_egress;
2086 #endif
2087 #ifdef CONFIG_NET_XGRESS
2088 struct bpf_mprog_entry __rcu *tcx_egress;
2089 #endif
2090 __cacheline_group_end(net_device_read_tx);
2091
2092 /* TXRX read-mostly hotpath */
2093 __cacheline_group_begin(net_device_read_txrx);
2094 union {
2095 struct pcpu_lstats __percpu *lstats;
2096 struct pcpu_sw_netstats __percpu *tstats;
2097 struct pcpu_dstats __percpu *dstats;
2098 };
2099 unsigned long state;
2100 unsigned int flags;
2101 unsigned short hard_header_len;
2102 netdev_features_t features;
2103 struct inet6_dev __rcu *ip6_ptr;
2104 __cacheline_group_end(net_device_read_txrx);
2105
2106 /* RX read-mostly hotpath */
2107 __cacheline_group_begin(net_device_read_rx);
2108 struct bpf_prog __rcu *xdp_prog;
2109 struct list_head ptype_specific;
2110 int ifindex;
2111 unsigned int real_num_rx_queues;
2112 struct netdev_rx_queue *_rx;
2113 unsigned int gro_max_size;
2114 unsigned int gro_ipv4_max_size;
2115 rx_handler_func_t __rcu *rx_handler;
2116 void __rcu *rx_handler_data;
2117 possible_net_t nd_net;
2118 #ifdef CONFIG_NETPOLL
2119 struct netpoll_info __rcu *npinfo;
2120 #endif
2121 #ifdef CONFIG_NET_XGRESS
2122 struct bpf_mprog_entry __rcu *tcx_ingress;
2123 #endif
2124 __cacheline_group_end(net_device_read_rx);
2125
2126 char name[IFNAMSIZ];
2127 struct netdev_name_node *name_node;
2128 struct dev_ifalias __rcu *ifalias;
2129 /*
2130 * I/O specific fields
2131 * FIXME: Merge these and struct ifmap into one
2132 */
2133 unsigned long mem_end;
2134 unsigned long mem_start;
2135 unsigned long base_addr;
2136
2137 /*
2138 * Some hardware also needs these fields (state,dev_list,
2139 * napi_list,unreg_list,close_list) but they are not
2140 * part of the usual set specified in Space.c.
2141 */
2142
2143
2144 struct list_head dev_list;
2145 struct list_head napi_list;
2146 struct list_head unreg_list;
2147 struct list_head close_list;
2148 struct list_head ptype_all;
2149
2150 struct {
2151 struct list_head upper;
2152 struct list_head lower;
2153 } adj_list;
2154
2155 /* Read-mostly cache-line for fast-path access */
2156 xdp_features_t xdp_features;
2157 const struct xdp_metadata_ops *xdp_metadata_ops;
2158 const struct xsk_tx_metadata_ops *xsk_tx_metadata_ops;
2159 unsigned short gflags;
2160
2161 unsigned short needed_tailroom;
2162
2163 netdev_features_t hw_features;
2164 netdev_features_t wanted_features;
2165 netdev_features_t vlan_features;
2166 netdev_features_t hw_enc_features;
2167 netdev_features_t mpls_features;
2168
2169 unsigned int min_mtu;
2170 unsigned int max_mtu;
2171 unsigned short type;
2172 unsigned char min_header_len;
2173 unsigned char name_assign_type;
2174
2175 int group;
2176
2177 struct net_device_stats stats; /* not used by modern drivers */
2178
2179 struct net_device_core_stats __percpu *core_stats;
2180
2181 /* Stats to monitor link on/off, flapping */
2182 atomic_t carrier_up_count;
2183 atomic_t carrier_down_count;
2184
2185 #ifdef CONFIG_WIRELESS_EXT
2186 const struct iw_handler_def *wireless_handlers;
2187 #endif
2188 const struct ethtool_ops *ethtool_ops;
2189 #ifdef CONFIG_NET_L3_MASTER_DEV
2190 const struct l3mdev_ops *l3mdev_ops;
2191 #endif
2192 #if IS_ENABLED(CONFIG_IPV6)
2193 const struct ndisc_ops *ndisc_ops;
2194 #endif
2195
2196 #ifdef CONFIG_XFRM_OFFLOAD
2197 const struct xfrmdev_ops *xfrmdev_ops;
2198 #endif
2199
2200 #if IS_ENABLED(CONFIG_TLS_DEVICE)
2201 const struct tlsdev_ops *tlsdev_ops;
2202 #endif
2203
2204 unsigned int operstate;
2205 unsigned char link_mode;
2206
2207 unsigned char if_port;
2208 unsigned char dma;
2209
2210 /* Interface address info. */
2211 unsigned char perm_addr[MAX_ADDR_LEN];
2212 unsigned char addr_assign_type;
2213 unsigned char addr_len;
2214 unsigned char upper_level;
2215 unsigned char lower_level;
2216
2217 unsigned short neigh_priv_len;
2218 unsigned short dev_id;
2219 unsigned short dev_port;
2220 int irq;
2221 u32 priv_len;
2222
2223 spinlock_t addr_list_lock;
2224
2225 struct netdev_hw_addr_list uc;
2226 struct netdev_hw_addr_list mc;
2227 struct netdev_hw_addr_list dev_addrs;
2228
2229 #ifdef CONFIG_SYSFS
2230 struct kset *queues_kset;
2231 #endif
2232 #ifdef CONFIG_LOCKDEP
2233 struct list_head unlink_list;
2234 #endif
2235 unsigned int promiscuity;
2236 unsigned int allmulti;
2237 bool uc_promisc;
2238 #ifdef CONFIG_LOCKDEP
2239 unsigned char nested_level;
2240 #endif
2241
2242
2243 /* Protocol-specific pointers */
2244 struct in_device __rcu *ip_ptr;
2245 /** @fib_nh_head: nexthops associated with this netdev */
2246 struct hlist_head fib_nh_head;
2247
2248 #if IS_ENABLED(CONFIG_VLAN_8021Q)
2249 struct vlan_info __rcu *vlan_info;
2250 #endif
2251 #if IS_ENABLED(CONFIG_NET_DSA)
2252 struct dsa_port *dsa_ptr;
2253 #endif
2254 #if IS_ENABLED(CONFIG_TIPC)
2255 struct tipc_bearer __rcu *tipc_ptr;
2256 #endif
2257 #if IS_ENABLED(CONFIG_ATALK)
2258 void *atalk_ptr;
2259 #endif
2260 #if IS_ENABLED(CONFIG_AX25)
2261 struct ax25_dev __rcu *ax25_ptr;
2262 #endif
2263 #if IS_ENABLED(CONFIG_CFG80211)
2264 struct wireless_dev *ieee80211_ptr;
2265 #endif
2266 #if IS_ENABLED(CONFIG_IEEE802154) || IS_ENABLED(CONFIG_6LOWPAN)
2267 struct wpan_dev *ieee802154_ptr;
2268 #endif
2269 #if IS_ENABLED(CONFIG_MPLS_ROUTING)
2270 struct mpls_dev __rcu *mpls_ptr;
2271 #endif
2272 #if IS_ENABLED(CONFIG_MCTP)
2273 struct mctp_dev __rcu *mctp_ptr;
2274 #endif
2275
2276 /*
2277 * Cache lines mostly used on receive path (including eth_type_trans())
2278 */
2279 /* Interface address info used in eth_type_trans() */
2280 const unsigned char *dev_addr;
2281
2282 unsigned int num_rx_queues;
2283 #define GRO_LEGACY_MAX_SIZE 65536u
2284 /* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE),
2285 * and shinfo->gso_segs is a 16bit field.
2286 */
2287 #define GRO_MAX_SIZE (8 * 65535u)
2288 unsigned int xdp_zc_max_segs;
2289 struct netdev_queue __rcu *ingress_queue;
2290 #ifdef CONFIG_NETFILTER_INGRESS
2291 struct nf_hook_entries __rcu *nf_hooks_ingress;
2292 #endif
2293
2294 unsigned char broadcast[MAX_ADDR_LEN];
2295 #ifdef CONFIG_RFS_ACCEL
2296 struct cpu_rmap *rx_cpu_rmap;
2297 #endif
2298 struct hlist_node index_hlist;
2299
2300 /*
2301 * Cache lines mostly used on transmit path
2302 */
2303 unsigned int num_tx_queues;
2304 struct Qdisc __rcu *qdisc;
2305 unsigned int tx_queue_len;
2306 spinlock_t tx_global_lock;
2307
2308 struct xdp_dev_bulk_queue __percpu *xdp_bulkq;
2309
2310 #ifdef CONFIG_NET_SCHED
2311 DECLARE_HASHTABLE (qdisc_hash, 4);
2312 #endif
2313 /* These may be needed for future network-power-down code. */
2314 struct timer_list watchdog_timer;
2315 int watchdog_timeo;
2316
2317 u32 proto_down_reason;
2318
2319 struct list_head todo_list;
2320
2321 #ifdef CONFIG_PCPU_DEV_REFCNT
2322 int __percpu *pcpu_refcnt;
2323 #else
2324 refcount_t dev_refcnt;
2325 #endif
2326 struct ref_tracker_dir refcnt_tracker;
2327
2328 struct list_head link_watch_list;
2329
2330 u8 reg_state;
2331
2332 bool dismantle;
2333
2334 enum {
2335 RTNL_LINK_INITIALIZED,
2336 RTNL_LINK_INITIALIZING,
2337 } rtnl_link_state:16;
2338
2339 bool needs_free_netdev;
2340 void (*priv_destructor)(struct net_device *dev);
2341
2342 /* mid-layer private */
2343 void *ml_priv;
2344 enum netdev_ml_priv_type ml_priv_type;
2345
2346 enum netdev_stat_type pcpu_stat_type:8;
2347
2348 #if IS_ENABLED(CONFIG_GARP)
2349 struct garp_port __rcu *garp_port;
2350 #endif
2351 #if IS_ENABLED(CONFIG_MRP)
2352 struct mrp_port __rcu *mrp_port;
2353 #endif
2354 #if IS_ENABLED(CONFIG_NET_DROP_MONITOR)
2355 struct dm_hw_stat_delta __rcu *dm_private;
2356 #endif
2357 struct device dev;
2358 const struct attribute_group *sysfs_groups[4];
2359 const struct attribute_group *sysfs_rx_queue_group;
2360
2361 const struct rtnl_link_ops *rtnl_link_ops;
2362
2363 const struct netdev_stat_ops *stat_ops;
2364
2365 const struct netdev_queue_mgmt_ops *queue_mgmt_ops;
2366
2367 /* for setting kernel sock attribute on TCP connection setup */
2368 #define GSO_MAX_SEGS 65535u
2369 #define GSO_LEGACY_MAX_SIZE 65536u
2370 /* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE),
2371 * and shinfo->gso_segs is a 16bit field.
2372 */
2373 #define GSO_MAX_SIZE (8 * GSO_MAX_SEGS)
2374
2375 #define TSO_LEGACY_MAX_SIZE 65536
2376 #define TSO_MAX_SIZE UINT_MAX
2377 unsigned int tso_max_size;
2378 #define TSO_MAX_SEGS U16_MAX
2379 u16 tso_max_segs;
2380
2381 #ifdef CONFIG_DCB
2382 const struct dcbnl_rtnl_ops *dcbnl_ops;
2383 #endif
2384 u8 prio_tc_map[TC_BITMASK + 1];
2385
2386 #if IS_ENABLED(CONFIG_FCOE)
2387 unsigned int fcoe_ddp_xid;
2388 #endif
2389 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
2390 struct netprio_map __rcu *priomap;
2391 #endif
2392 struct phy_link_topology *link_topo;
2393 struct phy_device *phydev;
2394 struct sfp_bus *sfp_bus;
2395 struct lock_class_key *qdisc_tx_busylock;
2396 bool proto_down;
2397 bool threaded;
2398
2399 /* priv_flags_slow, ungrouped to save space */
2400 unsigned long see_all_hwtstamp_requests:1;
2401 unsigned long change_proto_down:1;
2402 unsigned long netns_local:1;
2403 unsigned long fcoe_mtu:1;
2404
2405 struct list_head net_notifier_list;
2406
2407 #if IS_ENABLED(CONFIG_MACSEC)
2408 /* MACsec management functions */
2409 const struct macsec_ops *macsec_ops;
2410 #endif
2411 const struct udp_tunnel_nic_info *udp_tunnel_nic_info;
2412 struct udp_tunnel_nic *udp_tunnel_nic;
2413
2414 /** @cfg: net_device queue-related configuration */
2415 struct netdev_config *cfg;
2416 /**
2417 * @cfg_pending: same as @cfg but when device is being actively
2418 * reconfigured includes any changes to the configuration
2419 * requested by the user, but which may or may not be rejected.
2420 */
2421 struct netdev_config *cfg_pending;
2422 struct ethtool_netdev_state *ethtool;
2423
2424 /* protected by rtnl_lock */
2425 struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE];
2426
2427 u8 dev_addr_shadow[MAX_ADDR_LEN];
2428 netdevice_tracker linkwatch_dev_tracker;
2429 netdevice_tracker watchdog_dev_tracker;
2430 netdevice_tracker dev_registered_tracker;
2431 struct rtnl_hw_stats64 *offload_xstats_l3;
2432
2433 struct devlink_port *devlink_port;
2434
2435 #if IS_ENABLED(CONFIG_DPLL)
2436 struct dpll_pin __rcu *dpll_pin;
2437 #endif
2438 #if IS_ENABLED(CONFIG_PAGE_POOL)
2439 /** @page_pools: page pools created for this netdevice */
2440 struct hlist_head page_pools;
2441 #endif
2442
2443 /** @irq_moder: dim parameters used if IS_ENABLED(CONFIG_DIMLIB). */
2444 struct dim_irq_moder *irq_moder;
2445
2446 u64 max_pacing_offload_horizon;
2447 struct napi_config *napi_config;
2448 unsigned long gro_flush_timeout;
2449 u32 napi_defer_hard_irqs;
2450
2451 /**
2452 * @up: copy of @state's IFF_UP, but safe to read with just @lock.
2453 * May report false negatives while the device is being opened
2454 * or closed (@lock does not protect .ndo_open, or .ndo_close).
2455 */
2456 bool up;
2457
2458 /**
2459 * @lock: netdev-scope lock, protects a small selection of fields.
2460 * Should always be taken using netdev_lock() / netdev_unlock() helpers.
2461 * Drivers are free to use it for other protection.
2462 *
2463 * Protects:
2464 * @gro_flush_timeout, @napi_defer_hard_irqs, @napi_list,
2465 * @net_shaper_hierarchy, @reg_state, @threaded
2466 *
2467 * Partially protects (writers must hold both @lock and rtnl_lock):
2468 * @up
2469 *
2470 * Also protects some fields in struct napi_struct.
2471 *
2472 * Ordering: take after rtnl_lock.
2473 */
2474 struct mutex lock;
2475
2476 #if IS_ENABLED(CONFIG_NET_SHAPER)
2477 /**
2478 * @net_shaper_hierarchy: data tracking the current shaper status
2479 * see include/net/net_shapers.h
2480 */
2481 struct net_shaper_hierarchy *net_shaper_hierarchy;
2482 #endif
2483
2484 struct hlist_head neighbours[NEIGH_NR_TABLES];
2485
2486 struct hwtstamp_provider __rcu *hwprov;
2487
2488 u8 priv[] ____cacheline_aligned
2489 __counted_by(priv_len);
2490 } ____cacheline_aligned;
2491 #define to_net_dev(d) container_of(d, struct net_device, dev)
2492
2493 /*
2494 * Driver should use this to assign devlink port instance to a netdevice
2495 * before it registers the netdevice. Therefore devlink_port is static
2496 * during the netdev lifetime after it is registered.
2497 */
2498 #define SET_NETDEV_DEVLINK_PORT(dev, port) \
2499 ({ \
2500 WARN_ON((dev)->reg_state != NETREG_UNINITIALIZED); \
2501 ((dev)->devlink_port = (port)); \
2502 })
2503
netif_elide_gro(const struct net_device * dev)2504 static inline bool netif_elide_gro(const struct net_device *dev)
2505 {
2506 if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog)
2507 return true;
2508 return false;
2509 }
2510
2511 #define NETDEV_ALIGN 32
2512
2513 static inline
netdev_get_prio_tc_map(const struct net_device * dev,u32 prio)2514 int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
2515 {
2516 return dev->prio_tc_map[prio & TC_BITMASK];
2517 }
2518
2519 static inline
netdev_set_prio_tc_map(struct net_device * dev,u8 prio,u8 tc)2520 int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
2521 {
2522 if (tc >= dev->num_tc)
2523 return -EINVAL;
2524
2525 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
2526 return 0;
2527 }
2528
2529 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
2530 void netdev_reset_tc(struct net_device *dev);
2531 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset);
2532 int netdev_set_num_tc(struct net_device *dev, u8 num_tc);
2533
2534 static inline
netdev_get_num_tc(struct net_device * dev)2535 int netdev_get_num_tc(struct net_device *dev)
2536 {
2537 return dev->num_tc;
2538 }
2539
net_prefetch(void * p)2540 static inline void net_prefetch(void *p)
2541 {
2542 prefetch(p);
2543 #if L1_CACHE_BYTES < 128
2544 prefetch((u8 *)p + L1_CACHE_BYTES);
2545 #endif
2546 }
2547
net_prefetchw(void * p)2548 static inline void net_prefetchw(void *p)
2549 {
2550 prefetchw(p);
2551 #if L1_CACHE_BYTES < 128
2552 prefetchw((u8 *)p + L1_CACHE_BYTES);
2553 #endif
2554 }
2555
2556 void netdev_unbind_sb_channel(struct net_device *dev,
2557 struct net_device *sb_dev);
2558 int netdev_bind_sb_channel_queue(struct net_device *dev,
2559 struct net_device *sb_dev,
2560 u8 tc, u16 count, u16 offset);
2561 int netdev_set_sb_channel(struct net_device *dev, u16 channel);
netdev_get_sb_channel(struct net_device * dev)2562 static inline int netdev_get_sb_channel(struct net_device *dev)
2563 {
2564 return max_t(int, -dev->num_tc, 0);
2565 }
2566
2567 static inline
netdev_get_tx_queue(const struct net_device * dev,unsigned int index)2568 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
2569 unsigned int index)
2570 {
2571 DEBUG_NET_WARN_ON_ONCE(index >= dev->num_tx_queues);
2572 return &dev->_tx[index];
2573 }
2574
skb_get_tx_queue(const struct net_device * dev,const struct sk_buff * skb)2575 static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
2576 const struct sk_buff *skb)
2577 {
2578 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2579 }
2580
netdev_for_each_tx_queue(struct net_device * dev,void (* f)(struct net_device *,struct netdev_queue *,void *),void * arg)2581 static inline void netdev_for_each_tx_queue(struct net_device *dev,
2582 void (*f)(struct net_device *,
2583 struct netdev_queue *,
2584 void *),
2585 void *arg)
2586 {
2587 unsigned int i;
2588
2589 for (i = 0; i < dev->num_tx_queues; i++)
2590 f(dev, &dev->_tx[i], arg);
2591 }
2592
2593 #define netdev_lockdep_set_classes(dev) \
2594 { \
2595 static struct lock_class_key qdisc_tx_busylock_key; \
2596 static struct lock_class_key qdisc_xmit_lock_key; \
2597 static struct lock_class_key dev_addr_list_lock_key; \
2598 unsigned int i; \
2599 \
2600 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
2601 lockdep_set_class(&(dev)->addr_list_lock, \
2602 &dev_addr_list_lock_key); \
2603 for (i = 0; i < (dev)->num_tx_queues; i++) \
2604 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
2605 &qdisc_xmit_lock_key); \
2606 }
2607
2608 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
2609 struct net_device *sb_dev);
2610 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
2611 struct sk_buff *skb,
2612 struct net_device *sb_dev);
2613
2614 /* returns the headroom that the master device needs to take in account
2615 * when forwarding to this dev
2616 */
netdev_get_fwd_headroom(struct net_device * dev)2617 static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
2618 {
2619 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
2620 }
2621
netdev_set_rx_headroom(struct net_device * dev,int new_hr)2622 static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
2623 {
2624 if (dev->netdev_ops->ndo_set_rx_headroom)
2625 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
2626 }
2627
2628 /* set the device rx headroom to the dev's default */
netdev_reset_rx_headroom(struct net_device * dev)2629 static inline void netdev_reset_rx_headroom(struct net_device *dev)
2630 {
2631 netdev_set_rx_headroom(dev, -1);
2632 }
2633
netdev_get_ml_priv(struct net_device * dev,enum netdev_ml_priv_type type)2634 static inline void *netdev_get_ml_priv(struct net_device *dev,
2635 enum netdev_ml_priv_type type)
2636 {
2637 if (dev->ml_priv_type != type)
2638 return NULL;
2639
2640 return dev->ml_priv;
2641 }
2642
netdev_set_ml_priv(struct net_device * dev,void * ml_priv,enum netdev_ml_priv_type type)2643 static inline void netdev_set_ml_priv(struct net_device *dev,
2644 void *ml_priv,
2645 enum netdev_ml_priv_type type)
2646 {
2647 WARN(dev->ml_priv_type && dev->ml_priv_type != type,
2648 "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n",
2649 dev->ml_priv_type, type);
2650 WARN(!dev->ml_priv_type && dev->ml_priv,
2651 "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n");
2652
2653 dev->ml_priv = ml_priv;
2654 dev->ml_priv_type = type;
2655 }
2656
2657 /*
2658 * Net namespace inlines
2659 */
2660 static inline
dev_net(const struct net_device * dev)2661 struct net *dev_net(const struct net_device *dev)
2662 {
2663 return read_pnet(&dev->nd_net);
2664 }
2665
2666 static inline
dev_net_rcu(const struct net_device * dev)2667 struct net *dev_net_rcu(const struct net_device *dev)
2668 {
2669 return read_pnet_rcu(&dev->nd_net);
2670 }
2671
2672 static inline
dev_net_set(struct net_device * dev,struct net * net)2673 void dev_net_set(struct net_device *dev, struct net *net)
2674 {
2675 write_pnet(&dev->nd_net, net);
2676 }
2677
2678 /**
2679 * netdev_priv - access network device private data
2680 * @dev: network device
2681 *
2682 * Get network device private data
2683 */
netdev_priv(const struct net_device * dev)2684 static inline void *netdev_priv(const struct net_device *dev)
2685 {
2686 return (void *)dev->priv;
2687 }
2688
2689 /* Set the sysfs physical device reference for the network logical device
2690 * if set prior to registration will cause a symlink during initialization.
2691 */
2692 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
2693
2694 /* Set the sysfs device type for the network logical device to allow
2695 * fine-grained identification of different network device types. For
2696 * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc.
2697 */
2698 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
2699
2700 void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index,
2701 enum netdev_queue_type type,
2702 struct napi_struct *napi);
2703
netdev_lock(struct net_device * dev)2704 static inline void netdev_lock(struct net_device *dev)
2705 {
2706 mutex_lock(&dev->lock);
2707 }
2708
netdev_unlock(struct net_device * dev)2709 static inline void netdev_unlock(struct net_device *dev)
2710 {
2711 mutex_unlock(&dev->lock);
2712 }
2713
netdev_assert_locked(struct net_device * dev)2714 static inline void netdev_assert_locked(struct net_device *dev)
2715 {
2716 lockdep_assert_held(&dev->lock);
2717 }
2718
netdev_assert_locked_or_invisible(struct net_device * dev)2719 static inline void netdev_assert_locked_or_invisible(struct net_device *dev)
2720 {
2721 if (dev->reg_state == NETREG_REGISTERED ||
2722 dev->reg_state == NETREG_UNREGISTERING)
2723 netdev_assert_locked(dev);
2724 }
2725
netif_napi_set_irq_locked(struct napi_struct * napi,int irq)2726 static inline void netif_napi_set_irq_locked(struct napi_struct *napi, int irq)
2727 {
2728 napi->irq = irq;
2729 }
2730
netif_napi_set_irq(struct napi_struct * napi,int irq)2731 static inline void netif_napi_set_irq(struct napi_struct *napi, int irq)
2732 {
2733 netdev_lock(napi->dev);
2734 netif_napi_set_irq_locked(napi, irq);
2735 netdev_unlock(napi->dev);
2736 }
2737
2738 /* Default NAPI poll() weight
2739 * Device drivers are strongly advised to not use bigger value
2740 */
2741 #define NAPI_POLL_WEIGHT 64
2742
2743 void netif_napi_add_weight_locked(struct net_device *dev,
2744 struct napi_struct *napi,
2745 int (*poll)(struct napi_struct *, int),
2746 int weight);
2747
2748 static inline void
netif_napi_add_weight(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int),int weight)2749 netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
2750 int (*poll)(struct napi_struct *, int), int weight)
2751 {
2752 netdev_lock(dev);
2753 netif_napi_add_weight_locked(dev, napi, poll, weight);
2754 netdev_unlock(dev);
2755 }
2756
2757 /**
2758 * netif_napi_add() - initialize a NAPI context
2759 * @dev: network device
2760 * @napi: NAPI context
2761 * @poll: polling function
2762 *
2763 * netif_napi_add() must be used to initialize a NAPI context prior to calling
2764 * *any* of the other NAPI-related functions.
2765 */
2766 static inline void
netif_napi_add(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int))2767 netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2768 int (*poll)(struct napi_struct *, int))
2769 {
2770 netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
2771 }
2772
2773 static inline void
netif_napi_add_locked(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int))2774 netif_napi_add_locked(struct net_device *dev, struct napi_struct *napi,
2775 int (*poll)(struct napi_struct *, int))
2776 {
2777 netif_napi_add_weight_locked(dev, napi, poll, NAPI_POLL_WEIGHT);
2778 }
2779
2780 static inline void
netif_napi_add_tx_weight(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int),int weight)2781 netif_napi_add_tx_weight(struct net_device *dev,
2782 struct napi_struct *napi,
2783 int (*poll)(struct napi_struct *, int),
2784 int weight)
2785 {
2786 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
2787 netif_napi_add_weight(dev, napi, poll, weight);
2788 }
2789
2790 static inline void
netif_napi_add_config_locked(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int),int index)2791 netif_napi_add_config_locked(struct net_device *dev, struct napi_struct *napi,
2792 int (*poll)(struct napi_struct *, int), int index)
2793 {
2794 napi->index = index;
2795 napi->config = &dev->napi_config[index];
2796 netif_napi_add_weight_locked(dev, napi, poll, NAPI_POLL_WEIGHT);
2797 }
2798
2799 /**
2800 * netif_napi_add_config - initialize a NAPI context with persistent config
2801 * @dev: network device
2802 * @napi: NAPI context
2803 * @poll: polling function
2804 * @index: the NAPI index
2805 */
2806 static inline void
netif_napi_add_config(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int),int index)2807 netif_napi_add_config(struct net_device *dev, struct napi_struct *napi,
2808 int (*poll)(struct napi_struct *, int), int index)
2809 {
2810 netdev_lock(dev);
2811 netif_napi_add_config_locked(dev, napi, poll, index);
2812 netdev_unlock(dev);
2813 }
2814
2815 /**
2816 * netif_napi_add_tx() - initialize a NAPI context to be used for Tx only
2817 * @dev: network device
2818 * @napi: NAPI context
2819 * @poll: polling function
2820 *
2821 * This variant of netif_napi_add() should be used from drivers using NAPI
2822 * to exclusively poll a TX queue.
2823 * This will avoid we add it into napi_hash[], thus polluting this hash table.
2824 */
netif_napi_add_tx(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int))2825 static inline void netif_napi_add_tx(struct net_device *dev,
2826 struct napi_struct *napi,
2827 int (*poll)(struct napi_struct *, int))
2828 {
2829 netif_napi_add_tx_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
2830 }
2831
2832 void __netif_napi_del_locked(struct napi_struct *napi);
2833
2834 /**
2835 * __netif_napi_del - remove a NAPI context
2836 * @napi: NAPI context
2837 *
2838 * Warning: caller must observe RCU grace period before freeing memory
2839 * containing @napi. Drivers might want to call this helper to combine
2840 * all the needed RCU grace periods into a single one.
2841 */
__netif_napi_del(struct napi_struct * napi)2842 static inline void __netif_napi_del(struct napi_struct *napi)
2843 {
2844 netdev_lock(napi->dev);
2845 __netif_napi_del_locked(napi);
2846 netdev_unlock(napi->dev);
2847 }
2848
netif_napi_del_locked(struct napi_struct * napi)2849 static inline void netif_napi_del_locked(struct napi_struct *napi)
2850 {
2851 __netif_napi_del_locked(napi);
2852 synchronize_net();
2853 }
2854
2855 /**
2856 * netif_napi_del - remove a NAPI context
2857 * @napi: NAPI context
2858 *
2859 * netif_napi_del() removes a NAPI context from the network device NAPI list
2860 */
netif_napi_del(struct napi_struct * napi)2861 static inline void netif_napi_del(struct napi_struct *napi)
2862 {
2863 __netif_napi_del(napi);
2864 synchronize_net();
2865 }
2866
2867 struct packet_type {
2868 __be16 type; /* This is really htons(ether_type). */
2869 bool ignore_outgoing;
2870 struct net_device *dev; /* NULL is wildcarded here */
2871 netdevice_tracker dev_tracker;
2872 int (*func) (struct sk_buff *,
2873 struct net_device *,
2874 struct packet_type *,
2875 struct net_device *);
2876 void (*list_func) (struct list_head *,
2877 struct packet_type *,
2878 struct net_device *);
2879 bool (*id_match)(struct packet_type *ptype,
2880 struct sock *sk);
2881 struct net *af_packet_net;
2882 void *af_packet_priv;
2883 struct list_head list;
2884 };
2885
2886 struct offload_callbacks {
2887 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
2888 netdev_features_t features);
2889 struct sk_buff *(*gro_receive)(struct list_head *head,
2890 struct sk_buff *skb);
2891 int (*gro_complete)(struct sk_buff *skb, int nhoff);
2892 };
2893
2894 struct packet_offload {
2895 __be16 type; /* This is really htons(ether_type). */
2896 u16 priority;
2897 struct offload_callbacks callbacks;
2898 struct list_head list;
2899 };
2900
2901 /* often modified stats are per-CPU, other are shared (netdev->stats) */
2902 struct pcpu_sw_netstats {
2903 u64_stats_t rx_packets;
2904 u64_stats_t rx_bytes;
2905 u64_stats_t tx_packets;
2906 u64_stats_t tx_bytes;
2907 struct u64_stats_sync syncp;
2908 } __aligned(4 * sizeof(u64));
2909
2910 struct pcpu_dstats {
2911 u64_stats_t rx_packets;
2912 u64_stats_t rx_bytes;
2913 u64_stats_t tx_packets;
2914 u64_stats_t tx_bytes;
2915 u64_stats_t rx_drops;
2916 u64_stats_t tx_drops;
2917 struct u64_stats_sync syncp;
2918 } __aligned(8 * sizeof(u64));
2919
2920 struct pcpu_lstats {
2921 u64_stats_t packets;
2922 u64_stats_t bytes;
2923 struct u64_stats_sync syncp;
2924 } __aligned(2 * sizeof(u64));
2925
2926 void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes);
2927
dev_sw_netstats_rx_add(struct net_device * dev,unsigned int len)2928 static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len)
2929 {
2930 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
2931
2932 u64_stats_update_begin(&tstats->syncp);
2933 u64_stats_add(&tstats->rx_bytes, len);
2934 u64_stats_inc(&tstats->rx_packets);
2935 u64_stats_update_end(&tstats->syncp);
2936 }
2937
dev_sw_netstats_tx_add(struct net_device * dev,unsigned int packets,unsigned int len)2938 static inline void dev_sw_netstats_tx_add(struct net_device *dev,
2939 unsigned int packets,
2940 unsigned int len)
2941 {
2942 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
2943
2944 u64_stats_update_begin(&tstats->syncp);
2945 u64_stats_add(&tstats->tx_bytes, len);
2946 u64_stats_add(&tstats->tx_packets, packets);
2947 u64_stats_update_end(&tstats->syncp);
2948 }
2949
dev_lstats_add(struct net_device * dev,unsigned int len)2950 static inline void dev_lstats_add(struct net_device *dev, unsigned int len)
2951 {
2952 struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats);
2953
2954 u64_stats_update_begin(&lstats->syncp);
2955 u64_stats_add(&lstats->bytes, len);
2956 u64_stats_inc(&lstats->packets);
2957 u64_stats_update_end(&lstats->syncp);
2958 }
2959
dev_dstats_rx_add(struct net_device * dev,unsigned int len)2960 static inline void dev_dstats_rx_add(struct net_device *dev,
2961 unsigned int len)
2962 {
2963 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
2964
2965 u64_stats_update_begin(&dstats->syncp);
2966 u64_stats_inc(&dstats->rx_packets);
2967 u64_stats_add(&dstats->rx_bytes, len);
2968 u64_stats_update_end(&dstats->syncp);
2969 }
2970
dev_dstats_rx_dropped(struct net_device * dev)2971 static inline void dev_dstats_rx_dropped(struct net_device *dev)
2972 {
2973 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
2974
2975 u64_stats_update_begin(&dstats->syncp);
2976 u64_stats_inc(&dstats->rx_drops);
2977 u64_stats_update_end(&dstats->syncp);
2978 }
2979
dev_dstats_tx_add(struct net_device * dev,unsigned int len)2980 static inline void dev_dstats_tx_add(struct net_device *dev,
2981 unsigned int len)
2982 {
2983 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
2984
2985 u64_stats_update_begin(&dstats->syncp);
2986 u64_stats_inc(&dstats->tx_packets);
2987 u64_stats_add(&dstats->tx_bytes, len);
2988 u64_stats_update_end(&dstats->syncp);
2989 }
2990
dev_dstats_tx_dropped(struct net_device * dev)2991 static inline void dev_dstats_tx_dropped(struct net_device *dev)
2992 {
2993 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
2994
2995 u64_stats_update_begin(&dstats->syncp);
2996 u64_stats_inc(&dstats->tx_drops);
2997 u64_stats_update_end(&dstats->syncp);
2998 }
2999
3000 #define __netdev_alloc_pcpu_stats(type, gfp) \
3001 ({ \
3002 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
3003 if (pcpu_stats) { \
3004 int __cpu; \
3005 for_each_possible_cpu(__cpu) { \
3006 typeof(type) *stat; \
3007 stat = per_cpu_ptr(pcpu_stats, __cpu); \
3008 u64_stats_init(&stat->syncp); \
3009 } \
3010 } \
3011 pcpu_stats; \
3012 })
3013
3014 #define netdev_alloc_pcpu_stats(type) \
3015 __netdev_alloc_pcpu_stats(type, GFP_KERNEL)
3016
3017 #define devm_netdev_alloc_pcpu_stats(dev, type) \
3018 ({ \
3019 typeof(type) __percpu *pcpu_stats = devm_alloc_percpu(dev, type);\
3020 if (pcpu_stats) { \
3021 int __cpu; \
3022 for_each_possible_cpu(__cpu) { \
3023 typeof(type) *stat; \
3024 stat = per_cpu_ptr(pcpu_stats, __cpu); \
3025 u64_stats_init(&stat->syncp); \
3026 } \
3027 } \
3028 pcpu_stats; \
3029 })
3030
3031 enum netdev_lag_tx_type {
3032 NETDEV_LAG_TX_TYPE_UNKNOWN,
3033 NETDEV_LAG_TX_TYPE_RANDOM,
3034 NETDEV_LAG_TX_TYPE_BROADCAST,
3035 NETDEV_LAG_TX_TYPE_ROUNDROBIN,
3036 NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
3037 NETDEV_LAG_TX_TYPE_HASH,
3038 };
3039
3040 enum netdev_lag_hash {
3041 NETDEV_LAG_HASH_NONE,
3042 NETDEV_LAG_HASH_L2,
3043 NETDEV_LAG_HASH_L34,
3044 NETDEV_LAG_HASH_L23,
3045 NETDEV_LAG_HASH_E23,
3046 NETDEV_LAG_HASH_E34,
3047 NETDEV_LAG_HASH_VLAN_SRCMAC,
3048 NETDEV_LAG_HASH_UNKNOWN,
3049 };
3050
3051 struct netdev_lag_upper_info {
3052 enum netdev_lag_tx_type tx_type;
3053 enum netdev_lag_hash hash_type;
3054 };
3055
3056 struct netdev_lag_lower_state_info {
3057 u8 link_up : 1,
3058 tx_enabled : 1;
3059 };
3060
3061 #include <linux/notifier.h>
3062
3063 /* netdevice notifier chain. Please remember to update netdev_cmd_to_name()
3064 * and the rtnetlink notification exclusion list in rtnetlink_event() when
3065 * adding new types.
3066 */
3067 enum netdev_cmd {
3068 NETDEV_UP = 1, /* For now you can't veto a device up/down */
3069 NETDEV_DOWN,
3070 NETDEV_REBOOT, /* Tell a protocol stack a network interface
3071 detected a hardware crash and restarted
3072 - we can use this eg to kick tcp sessions
3073 once done */
3074 NETDEV_CHANGE, /* Notify device state change */
3075 NETDEV_REGISTER,
3076 NETDEV_UNREGISTER,
3077 NETDEV_CHANGEMTU, /* notify after mtu change happened */
3078 NETDEV_CHANGEADDR, /* notify after the address change */
3079 NETDEV_PRE_CHANGEADDR, /* notify before the address change */
3080 NETDEV_GOING_DOWN,
3081 NETDEV_CHANGENAME,
3082 NETDEV_FEAT_CHANGE,
3083 NETDEV_BONDING_FAILOVER,
3084 NETDEV_PRE_UP,
3085 NETDEV_PRE_TYPE_CHANGE,
3086 NETDEV_POST_TYPE_CHANGE,
3087 NETDEV_POST_INIT,
3088 NETDEV_PRE_UNINIT,
3089 NETDEV_RELEASE,
3090 NETDEV_NOTIFY_PEERS,
3091 NETDEV_JOIN,
3092 NETDEV_CHANGEUPPER,
3093 NETDEV_RESEND_IGMP,
3094 NETDEV_PRECHANGEMTU, /* notify before mtu change happened */
3095 NETDEV_CHANGEINFODATA,
3096 NETDEV_BONDING_INFO,
3097 NETDEV_PRECHANGEUPPER,
3098 NETDEV_CHANGELOWERSTATE,
3099 NETDEV_UDP_TUNNEL_PUSH_INFO,
3100 NETDEV_UDP_TUNNEL_DROP_INFO,
3101 NETDEV_CHANGE_TX_QUEUE_LEN,
3102 NETDEV_CVLAN_FILTER_PUSH_INFO,
3103 NETDEV_CVLAN_FILTER_DROP_INFO,
3104 NETDEV_SVLAN_FILTER_PUSH_INFO,
3105 NETDEV_SVLAN_FILTER_DROP_INFO,
3106 NETDEV_OFFLOAD_XSTATS_ENABLE,
3107 NETDEV_OFFLOAD_XSTATS_DISABLE,
3108 NETDEV_OFFLOAD_XSTATS_REPORT_USED,
3109 NETDEV_OFFLOAD_XSTATS_REPORT_DELTA,
3110 NETDEV_XDP_FEAT_CHANGE,
3111 };
3112 const char *netdev_cmd_to_name(enum netdev_cmd cmd);
3113
3114 int register_netdevice_notifier(struct notifier_block *nb);
3115 int unregister_netdevice_notifier(struct notifier_block *nb);
3116 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb);
3117 int unregister_netdevice_notifier_net(struct net *net,
3118 struct notifier_block *nb);
3119 int register_netdevice_notifier_dev_net(struct net_device *dev,
3120 struct notifier_block *nb,
3121 struct netdev_net_notifier *nn);
3122 int unregister_netdevice_notifier_dev_net(struct net_device *dev,
3123 struct notifier_block *nb,
3124 struct netdev_net_notifier *nn);
3125
3126 struct netdev_notifier_info {
3127 struct net_device *dev;
3128 struct netlink_ext_ack *extack;
3129 };
3130
3131 struct netdev_notifier_info_ext {
3132 struct netdev_notifier_info info; /* must be first */
3133 union {
3134 u32 mtu;
3135 } ext;
3136 };
3137
3138 struct netdev_notifier_change_info {
3139 struct netdev_notifier_info info; /* must be first */
3140 unsigned int flags_changed;
3141 };
3142
3143 struct netdev_notifier_changeupper_info {
3144 struct netdev_notifier_info info; /* must be first */
3145 struct net_device *upper_dev; /* new upper dev */
3146 bool master; /* is upper dev master */
3147 bool linking; /* is the notification for link or unlink */
3148 void *upper_info; /* upper dev info */
3149 };
3150
3151 struct netdev_notifier_changelowerstate_info {
3152 struct netdev_notifier_info info; /* must be first */
3153 void *lower_state_info; /* is lower dev state */
3154 };
3155
3156 struct netdev_notifier_pre_changeaddr_info {
3157 struct netdev_notifier_info info; /* must be first */
3158 const unsigned char *dev_addr;
3159 };
3160
3161 enum netdev_offload_xstats_type {
3162 NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1,
3163 };
3164
3165 struct netdev_notifier_offload_xstats_info {
3166 struct netdev_notifier_info info; /* must be first */
3167 enum netdev_offload_xstats_type type;
3168
3169 union {
3170 /* NETDEV_OFFLOAD_XSTATS_REPORT_DELTA */
3171 struct netdev_notifier_offload_xstats_rd *report_delta;
3172 /* NETDEV_OFFLOAD_XSTATS_REPORT_USED */
3173 struct netdev_notifier_offload_xstats_ru *report_used;
3174 };
3175 };
3176
3177 int netdev_offload_xstats_enable(struct net_device *dev,
3178 enum netdev_offload_xstats_type type,
3179 struct netlink_ext_ack *extack);
3180 int netdev_offload_xstats_disable(struct net_device *dev,
3181 enum netdev_offload_xstats_type type);
3182 bool netdev_offload_xstats_enabled(const struct net_device *dev,
3183 enum netdev_offload_xstats_type type);
3184 int netdev_offload_xstats_get(struct net_device *dev,
3185 enum netdev_offload_xstats_type type,
3186 struct rtnl_hw_stats64 *stats, bool *used,
3187 struct netlink_ext_ack *extack);
3188 void
3189 netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *rd,
3190 const struct rtnl_hw_stats64 *stats);
3191 void
3192 netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *ru);
3193 void netdev_offload_xstats_push_delta(struct net_device *dev,
3194 enum netdev_offload_xstats_type type,
3195 const struct rtnl_hw_stats64 *stats);
3196
netdev_notifier_info_init(struct netdev_notifier_info * info,struct net_device * dev)3197 static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
3198 struct net_device *dev)
3199 {
3200 info->dev = dev;
3201 info->extack = NULL;
3202 }
3203
3204 static inline struct net_device *
netdev_notifier_info_to_dev(const struct netdev_notifier_info * info)3205 netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
3206 {
3207 return info->dev;
3208 }
3209
3210 static inline struct netlink_ext_ack *
netdev_notifier_info_to_extack(const struct netdev_notifier_info * info)3211 netdev_notifier_info_to_extack(const struct netdev_notifier_info *info)
3212 {
3213 return info->extack;
3214 }
3215
3216 int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
3217 int call_netdevice_notifiers_info(unsigned long val,
3218 struct netdev_notifier_info *info);
3219
3220 #define for_each_netdev(net, d) \
3221 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
3222 #define for_each_netdev_reverse(net, d) \
3223 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
3224 #define for_each_netdev_rcu(net, d) \
3225 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
3226 #define for_each_netdev_safe(net, d, n) \
3227 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
3228 #define for_each_netdev_continue(net, d) \
3229 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
3230 #define for_each_netdev_continue_reverse(net, d) \
3231 list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \
3232 dev_list)
3233 #define for_each_netdev_continue_rcu(net, d) \
3234 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
3235 #define for_each_netdev_in_bond_rcu(bond, slave) \
3236 for_each_netdev_rcu(&init_net, slave) \
3237 if (netdev_master_upper_dev_get_rcu(slave) == (bond))
3238 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
3239
3240 #define for_each_netdev_dump(net, d, ifindex) \
3241 for (; (d = xa_find(&(net)->dev_by_index, &ifindex, \
3242 ULONG_MAX, XA_PRESENT)); ifindex++)
3243
next_net_device(struct net_device * dev)3244 static inline struct net_device *next_net_device(struct net_device *dev)
3245 {
3246 struct list_head *lh;
3247 struct net *net;
3248
3249 net = dev_net(dev);
3250 lh = dev->dev_list.next;
3251 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
3252 }
3253
next_net_device_rcu(struct net_device * dev)3254 static inline struct net_device *next_net_device_rcu(struct net_device *dev)
3255 {
3256 struct list_head *lh;
3257 struct net *net;
3258
3259 net = dev_net(dev);
3260 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
3261 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
3262 }
3263
first_net_device(struct net * net)3264 static inline struct net_device *first_net_device(struct net *net)
3265 {
3266 return list_empty(&net->dev_base_head) ? NULL :
3267 net_device_entry(net->dev_base_head.next);
3268 }
3269
first_net_device_rcu(struct net * net)3270 static inline struct net_device *first_net_device_rcu(struct net *net)
3271 {
3272 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
3273
3274 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
3275 }
3276
3277 int netdev_boot_setup_check(struct net_device *dev);
3278 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type,
3279 const char *hwaddr);
3280 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
3281 const char *hwaddr);
3282 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
3283 void dev_add_pack(struct packet_type *pt);
3284 void dev_remove_pack(struct packet_type *pt);
3285 void __dev_remove_pack(struct packet_type *pt);
3286 void dev_add_offload(struct packet_offload *po);
3287 void dev_remove_offload(struct packet_offload *po);
3288
3289 int dev_get_iflink(const struct net_device *dev);
3290 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
3291 int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
3292 struct net_device_path_stack *stack);
3293 struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
3294 unsigned short mask);
3295 struct net_device *dev_get_by_name(struct net *net, const char *name);
3296 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
3297 struct net_device *__dev_get_by_name(struct net *net, const char *name);
3298 bool netdev_name_in_use(struct net *net, const char *name);
3299 int dev_alloc_name(struct net_device *dev, const char *name);
3300 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack);
3301 void dev_close(struct net_device *dev);
3302 void dev_close_many(struct list_head *head, bool unlink);
3303 void dev_disable_lro(struct net_device *dev);
3304 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
3305 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
3306 struct net_device *sb_dev);
3307
3308 int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev);
3309 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
3310
dev_queue_xmit(struct sk_buff * skb)3311 static inline int dev_queue_xmit(struct sk_buff *skb)
3312 {
3313 return __dev_queue_xmit(skb, NULL);
3314 }
3315
dev_queue_xmit_accel(struct sk_buff * skb,struct net_device * sb_dev)3316 static inline int dev_queue_xmit_accel(struct sk_buff *skb,
3317 struct net_device *sb_dev)
3318 {
3319 return __dev_queue_xmit(skb, sb_dev);
3320 }
3321
dev_direct_xmit(struct sk_buff * skb,u16 queue_id)3322 static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
3323 {
3324 int ret;
3325
3326 ret = __dev_direct_xmit(skb, queue_id);
3327 if (!dev_xmit_complete(ret))
3328 kfree_skb(skb);
3329 return ret;
3330 }
3331
3332 int register_netdevice(struct net_device *dev);
3333 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
3334 void unregister_netdevice_many(struct list_head *head);
unregister_netdevice(struct net_device * dev)3335 static inline void unregister_netdevice(struct net_device *dev)
3336 {
3337 unregister_netdevice_queue(dev, NULL);
3338 }
3339
3340 int netdev_refcnt_read(const struct net_device *dev);
3341 void free_netdev(struct net_device *dev);
3342
3343 struct net_device *netdev_get_xmit_slave(struct net_device *dev,
3344 struct sk_buff *skb,
3345 bool all_slaves);
3346 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
3347 struct sock *sk);
3348 struct net_device *dev_get_by_index(struct net *net, int ifindex);
3349 struct net_device *__dev_get_by_index(struct net *net, int ifindex);
3350 struct net_device *netdev_get_by_index(struct net *net, int ifindex,
3351 netdevice_tracker *tracker, gfp_t gfp);
3352 struct net_device *netdev_get_by_name(struct net *net, const char *name,
3353 netdevice_tracker *tracker, gfp_t gfp);
3354 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
3355 void netdev_copy_name(struct net_device *dev, char *name);
3356
dev_hard_header(struct sk_buff * skb,struct net_device * dev,unsigned short type,const void * daddr,const void * saddr,unsigned int len)3357 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
3358 unsigned short type,
3359 const void *daddr, const void *saddr,
3360 unsigned int len)
3361 {
3362 if (!dev->header_ops || !dev->header_ops->create)
3363 return 0;
3364
3365 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
3366 }
3367
dev_parse_header(const struct sk_buff * skb,unsigned char * haddr)3368 static inline int dev_parse_header(const struct sk_buff *skb,
3369 unsigned char *haddr)
3370 {
3371 const struct net_device *dev = skb->dev;
3372
3373 if (!dev->header_ops || !dev->header_ops->parse)
3374 return 0;
3375 return dev->header_ops->parse(skb, haddr);
3376 }
3377
dev_parse_header_protocol(const struct sk_buff * skb)3378 static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb)
3379 {
3380 const struct net_device *dev = skb->dev;
3381
3382 if (!dev->header_ops || !dev->header_ops->parse_protocol)
3383 return 0;
3384 return dev->header_ops->parse_protocol(skb);
3385 }
3386
3387 /* ll_header must have at least hard_header_len allocated */
dev_validate_header(const struct net_device * dev,char * ll_header,int len)3388 static inline bool dev_validate_header(const struct net_device *dev,
3389 char *ll_header, int len)
3390 {
3391 if (likely(len >= dev->hard_header_len))
3392 return true;
3393 if (len < dev->min_header_len)
3394 return false;
3395
3396 if (capable(CAP_SYS_RAWIO)) {
3397 memset(ll_header + len, 0, dev->hard_header_len - len);
3398 return true;
3399 }
3400
3401 if (dev->header_ops && dev->header_ops->validate)
3402 return dev->header_ops->validate(ll_header, len);
3403
3404 return false;
3405 }
3406
dev_has_header(const struct net_device * dev)3407 static inline bool dev_has_header(const struct net_device *dev)
3408 {
3409 return dev->header_ops && dev->header_ops->create;
3410 }
3411
3412 /*
3413 * Incoming packets are placed on per-CPU queues
3414 */
3415 struct softnet_data {
3416 struct list_head poll_list;
3417 struct sk_buff_head process_queue;
3418 local_lock_t process_queue_bh_lock;
3419
3420 /* stats */
3421 unsigned int processed;
3422 unsigned int time_squeeze;
3423 #ifdef CONFIG_RPS
3424 struct softnet_data *rps_ipi_list;
3425 #endif
3426
3427 unsigned int received_rps;
3428 bool in_net_rx_action;
3429 bool in_napi_threaded_poll;
3430
3431 #ifdef CONFIG_NET_FLOW_LIMIT
3432 struct sd_flow_limit __rcu *flow_limit;
3433 #endif
3434 struct Qdisc *output_queue;
3435 struct Qdisc **output_queue_tailp;
3436 struct sk_buff *completion_queue;
3437 #ifdef CONFIG_XFRM_OFFLOAD
3438 struct sk_buff_head xfrm_backlog;
3439 #endif
3440 /* written and read only by owning cpu: */
3441 struct netdev_xmit xmit;
3442 #ifdef CONFIG_RPS
3443 /* input_queue_head should be written by cpu owning this struct,
3444 * and only read by other cpus. Worth using a cache line.
3445 */
3446 unsigned int input_queue_head ____cacheline_aligned_in_smp;
3447
3448 /* Elements below can be accessed between CPUs for RPS/RFS */
3449 call_single_data_t csd ____cacheline_aligned_in_smp;
3450 struct softnet_data *rps_ipi_next;
3451 unsigned int cpu;
3452 unsigned int input_queue_tail;
3453 #endif
3454 struct sk_buff_head input_pkt_queue;
3455 struct napi_struct backlog;
3456
3457 atomic_t dropped ____cacheline_aligned_in_smp;
3458
3459 /* Another possibly contended cache line */
3460 spinlock_t defer_lock ____cacheline_aligned_in_smp;
3461 int defer_count;
3462 int defer_ipi_scheduled;
3463 struct sk_buff *defer_list;
3464 call_single_data_t defer_csd;
3465 };
3466
3467 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
3468 DECLARE_PER_CPU(struct page_pool *, system_page_pool);
3469
3470 #ifndef CONFIG_PREEMPT_RT
dev_recursion_level(void)3471 static inline int dev_recursion_level(void)
3472 {
3473 return this_cpu_read(softnet_data.xmit.recursion);
3474 }
3475 #else
dev_recursion_level(void)3476 static inline int dev_recursion_level(void)
3477 {
3478 return current->net_xmit.recursion;
3479 }
3480
3481 #endif
3482
3483 void __netif_schedule(struct Qdisc *q);
3484 void netif_schedule_queue(struct netdev_queue *txq);
3485
netif_tx_schedule_all(struct net_device * dev)3486 static inline void netif_tx_schedule_all(struct net_device *dev)
3487 {
3488 unsigned int i;
3489
3490 for (i = 0; i < dev->num_tx_queues; i++)
3491 netif_schedule_queue(netdev_get_tx_queue(dev, i));
3492 }
3493
netif_tx_start_queue(struct netdev_queue * dev_queue)3494 static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
3495 {
3496 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3497 }
3498
3499 /**
3500 * netif_start_queue - allow transmit
3501 * @dev: network device
3502 *
3503 * Allow upper layers to call the device hard_start_xmit routine.
3504 */
netif_start_queue(struct net_device * dev)3505 static inline void netif_start_queue(struct net_device *dev)
3506 {
3507 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
3508 }
3509
netif_tx_start_all_queues(struct net_device * dev)3510 static inline void netif_tx_start_all_queues(struct net_device *dev)
3511 {
3512 unsigned int i;
3513
3514 for (i = 0; i < dev->num_tx_queues; i++) {
3515 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3516 netif_tx_start_queue(txq);
3517 }
3518 }
3519
3520 void netif_tx_wake_queue(struct netdev_queue *dev_queue);
3521
3522 /**
3523 * netif_wake_queue - restart transmit
3524 * @dev: network device
3525 *
3526 * Allow upper layers to call the device hard_start_xmit routine.
3527 * Used for flow control when transmit resources are available.
3528 */
netif_wake_queue(struct net_device * dev)3529 static inline void netif_wake_queue(struct net_device *dev)
3530 {
3531 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
3532 }
3533
netif_tx_wake_all_queues(struct net_device * dev)3534 static inline void netif_tx_wake_all_queues(struct net_device *dev)
3535 {
3536 unsigned int i;
3537
3538 for (i = 0; i < dev->num_tx_queues; i++) {
3539 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3540 netif_tx_wake_queue(txq);
3541 }
3542 }
3543
netif_tx_stop_queue(struct netdev_queue * dev_queue)3544 static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
3545 {
3546 /* Paired with READ_ONCE() from dev_watchdog() */
3547 WRITE_ONCE(dev_queue->trans_start, jiffies);
3548
3549 /* This barrier is paired with smp_mb() from dev_watchdog() */
3550 smp_mb__before_atomic();
3551
3552 /* Must be an atomic op see netif_txq_try_stop() */
3553 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3554 }
3555
3556 /**
3557 * netif_stop_queue - stop transmitted packets
3558 * @dev: network device
3559 *
3560 * Stop upper layers calling the device hard_start_xmit routine.
3561 * Used for flow control when transmit resources are unavailable.
3562 */
netif_stop_queue(struct net_device * dev)3563 static inline void netif_stop_queue(struct net_device *dev)
3564 {
3565 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
3566 }
3567
3568 void netif_tx_stop_all_queues(struct net_device *dev);
3569
netif_tx_queue_stopped(const struct netdev_queue * dev_queue)3570 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
3571 {
3572 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3573 }
3574
3575 /**
3576 * netif_queue_stopped - test if transmit queue is flowblocked
3577 * @dev: network device
3578 *
3579 * Test if transmit queue on device is currently unable to send.
3580 */
netif_queue_stopped(const struct net_device * dev)3581 static inline bool netif_queue_stopped(const struct net_device *dev)
3582 {
3583 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
3584 }
3585
netif_xmit_stopped(const struct netdev_queue * dev_queue)3586 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
3587 {
3588 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
3589 }
3590
3591 static inline bool
netif_xmit_frozen_or_stopped(const struct netdev_queue * dev_queue)3592 netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
3593 {
3594 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
3595 }
3596
3597 static inline bool
netif_xmit_frozen_or_drv_stopped(const struct netdev_queue * dev_queue)3598 netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
3599 {
3600 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
3601 }
3602
3603 /**
3604 * netdev_queue_set_dql_min_limit - set dql minimum limit
3605 * @dev_queue: pointer to transmit queue
3606 * @min_limit: dql minimum limit
3607 *
3608 * Forces xmit_more() to return true until the minimum threshold
3609 * defined by @min_limit is reached (or until the tx queue is
3610 * empty). Warning: to be use with care, misuse will impact the
3611 * latency.
3612 */
netdev_queue_set_dql_min_limit(struct netdev_queue * dev_queue,unsigned int min_limit)3613 static inline void netdev_queue_set_dql_min_limit(struct netdev_queue *dev_queue,
3614 unsigned int min_limit)
3615 {
3616 #ifdef CONFIG_BQL
3617 dev_queue->dql.min_limit = min_limit;
3618 #endif
3619 }
3620
netdev_queue_dql_avail(const struct netdev_queue * txq)3621 static inline int netdev_queue_dql_avail(const struct netdev_queue *txq)
3622 {
3623 #ifdef CONFIG_BQL
3624 /* Non-BQL migrated drivers will return 0, too. */
3625 return dql_avail(&txq->dql);
3626 #else
3627 return 0;
3628 #endif
3629 }
3630
3631 /**
3632 * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
3633 * @dev_queue: pointer to transmit queue
3634 *
3635 * BQL enabled drivers might use this helper in their ndo_start_xmit(),
3636 * to give appropriate hint to the CPU.
3637 */
netdev_txq_bql_enqueue_prefetchw(struct netdev_queue * dev_queue)3638 static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
3639 {
3640 #ifdef CONFIG_BQL
3641 prefetchw(&dev_queue->dql.num_queued);
3642 #endif
3643 }
3644
3645 /**
3646 * netdev_txq_bql_complete_prefetchw - prefetch bql data for write
3647 * @dev_queue: pointer to transmit queue
3648 *
3649 * BQL enabled drivers might use this helper in their TX completion path,
3650 * to give appropriate hint to the CPU.
3651 */
netdev_txq_bql_complete_prefetchw(struct netdev_queue * dev_queue)3652 static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
3653 {
3654 #ifdef CONFIG_BQL
3655 prefetchw(&dev_queue->dql.limit);
3656 #endif
3657 }
3658
3659 /**
3660 * netdev_tx_sent_queue - report the number of bytes queued to a given tx queue
3661 * @dev_queue: network device queue
3662 * @bytes: number of bytes queued to the device queue
3663 *
3664 * Report the number of bytes queued for sending/completion to the network
3665 * device hardware queue. @bytes should be a good approximation and should
3666 * exactly match netdev_completed_queue() @bytes.
3667 * This is typically called once per packet, from ndo_start_xmit().
3668 */
netdev_tx_sent_queue(struct netdev_queue * dev_queue,unsigned int bytes)3669 static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3670 unsigned int bytes)
3671 {
3672 #ifdef CONFIG_BQL
3673 dql_queued(&dev_queue->dql, bytes);
3674
3675 if (likely(dql_avail(&dev_queue->dql) >= 0))
3676 return;
3677
3678 /* Paired with READ_ONCE() from dev_watchdog() */
3679 WRITE_ONCE(dev_queue->trans_start, jiffies);
3680
3681 /* This barrier is paired with smp_mb() from dev_watchdog() */
3682 smp_mb__before_atomic();
3683
3684 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
3685
3686 /*
3687 * The XOFF flag must be set before checking the dql_avail below,
3688 * because in netdev_tx_completed_queue we update the dql_completed
3689 * before checking the XOFF flag.
3690 */
3691 smp_mb__after_atomic();
3692
3693 /* check again in case another CPU has just made room avail */
3694 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
3695 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
3696 #endif
3697 }
3698
3699 /* Variant of netdev_tx_sent_queue() for drivers that are aware
3700 * that they should not test BQL status themselves.
3701 * We do want to change __QUEUE_STATE_STACK_XOFF only for the last
3702 * skb of a batch.
3703 * Returns true if the doorbell must be used to kick the NIC.
3704 */
__netdev_tx_sent_queue(struct netdev_queue * dev_queue,unsigned int bytes,bool xmit_more)3705 static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3706 unsigned int bytes,
3707 bool xmit_more)
3708 {
3709 if (xmit_more) {
3710 #ifdef CONFIG_BQL
3711 dql_queued(&dev_queue->dql, bytes);
3712 #endif
3713 return netif_tx_queue_stopped(dev_queue);
3714 }
3715 netdev_tx_sent_queue(dev_queue, bytes);
3716 return true;
3717 }
3718
3719 /**
3720 * netdev_sent_queue - report the number of bytes queued to hardware
3721 * @dev: network device
3722 * @bytes: number of bytes queued to the hardware device queue
3723 *
3724 * Report the number of bytes queued for sending/completion to the network
3725 * device hardware queue#0. @bytes should be a good approximation and should
3726 * exactly match netdev_completed_queue() @bytes.
3727 * This is typically called once per packet, from ndo_start_xmit().
3728 */
netdev_sent_queue(struct net_device * dev,unsigned int bytes)3729 static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
3730 {
3731 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
3732 }
3733
__netdev_sent_queue(struct net_device * dev,unsigned int bytes,bool xmit_more)3734 static inline bool __netdev_sent_queue(struct net_device *dev,
3735 unsigned int bytes,
3736 bool xmit_more)
3737 {
3738 return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes,
3739 xmit_more);
3740 }
3741
3742 /**
3743 * netdev_tx_completed_queue - report number of packets/bytes at TX completion.
3744 * @dev_queue: network device queue
3745 * @pkts: number of packets (currently ignored)
3746 * @bytes: number of bytes dequeued from the device queue
3747 *
3748 * Must be called at most once per TX completion round (and not per
3749 * individual packet), so that BQL can adjust its limits appropriately.
3750 */
netdev_tx_completed_queue(struct netdev_queue * dev_queue,unsigned int pkts,unsigned int bytes)3751 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
3752 unsigned int pkts, unsigned int bytes)
3753 {
3754 #ifdef CONFIG_BQL
3755 if (unlikely(!bytes))
3756 return;
3757
3758 dql_completed(&dev_queue->dql, bytes);
3759
3760 /*
3761 * Without the memory barrier there is a small possibility that
3762 * netdev_tx_sent_queue will miss the update and cause the queue to
3763 * be stopped forever
3764 */
3765 smp_mb(); /* NOTE: netdev_txq_completed_mb() assumes this exists */
3766
3767 if (unlikely(dql_avail(&dev_queue->dql) < 0))
3768 return;
3769
3770 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
3771 netif_schedule_queue(dev_queue);
3772 #endif
3773 }
3774
3775 /**
3776 * netdev_completed_queue - report bytes and packets completed by device
3777 * @dev: network device
3778 * @pkts: actual number of packets sent over the medium
3779 * @bytes: actual number of bytes sent over the medium
3780 *
3781 * Report the number of bytes and packets transmitted by the network device
3782 * hardware queue over the physical medium, @bytes must exactly match the
3783 * @bytes amount passed to netdev_sent_queue()
3784 */
netdev_completed_queue(struct net_device * dev,unsigned int pkts,unsigned int bytes)3785 static inline void netdev_completed_queue(struct net_device *dev,
3786 unsigned int pkts, unsigned int bytes)
3787 {
3788 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
3789 }
3790
netdev_tx_reset_queue(struct netdev_queue * q)3791 static inline void netdev_tx_reset_queue(struct netdev_queue *q)
3792 {
3793 #ifdef CONFIG_BQL
3794 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
3795 dql_reset(&q->dql);
3796 #endif
3797 }
3798
3799 /**
3800 * netdev_tx_reset_subqueue - reset the BQL stats and state of a netdev queue
3801 * @dev: network device
3802 * @qid: stack index of the queue to reset
3803 */
netdev_tx_reset_subqueue(const struct net_device * dev,u32 qid)3804 static inline void netdev_tx_reset_subqueue(const struct net_device *dev,
3805 u32 qid)
3806 {
3807 netdev_tx_reset_queue(netdev_get_tx_queue(dev, qid));
3808 }
3809
3810 /**
3811 * netdev_reset_queue - reset the packets and bytes count of a network device
3812 * @dev_queue: network device
3813 *
3814 * Reset the bytes and packet count of a network device and clear the
3815 * software flow control OFF bit for this network device
3816 */
netdev_reset_queue(struct net_device * dev_queue)3817 static inline void netdev_reset_queue(struct net_device *dev_queue)
3818 {
3819 netdev_tx_reset_subqueue(dev_queue, 0);
3820 }
3821
3822 /**
3823 * netdev_cap_txqueue - check if selected tx queue exceeds device queues
3824 * @dev: network device
3825 * @queue_index: given tx queue index
3826 *
3827 * Returns 0 if given tx queue index >= number of device tx queues,
3828 * otherwise returns the originally passed tx queue index.
3829 */
netdev_cap_txqueue(struct net_device * dev,u16 queue_index)3830 static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
3831 {
3832 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
3833 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
3834 dev->name, queue_index,
3835 dev->real_num_tx_queues);
3836 return 0;
3837 }
3838
3839 return queue_index;
3840 }
3841
3842 /**
3843 * netif_running - test if up
3844 * @dev: network device
3845 *
3846 * Test if the device has been brought up.
3847 */
netif_running(const struct net_device * dev)3848 static inline bool netif_running(const struct net_device *dev)
3849 {
3850 return test_bit(__LINK_STATE_START, &dev->state);
3851 }
3852
3853 /*
3854 * Routines to manage the subqueues on a device. We only need start,
3855 * stop, and a check if it's stopped. All other device management is
3856 * done at the overall netdevice level.
3857 * Also test the device if we're multiqueue.
3858 */
3859
3860 /**
3861 * netif_start_subqueue - allow sending packets on subqueue
3862 * @dev: network device
3863 * @queue_index: sub queue index
3864 *
3865 * Start individual transmit queue of a device with multiple transmit queues.
3866 */
netif_start_subqueue(struct net_device * dev,u16 queue_index)3867 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
3868 {
3869 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3870
3871 netif_tx_start_queue(txq);
3872 }
3873
3874 /**
3875 * netif_stop_subqueue - stop sending packets on subqueue
3876 * @dev: network device
3877 * @queue_index: sub queue index
3878 *
3879 * Stop individual transmit queue of a device with multiple transmit queues.
3880 */
netif_stop_subqueue(struct net_device * dev,u16 queue_index)3881 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
3882 {
3883 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3884 netif_tx_stop_queue(txq);
3885 }
3886
3887 /**
3888 * __netif_subqueue_stopped - test status of subqueue
3889 * @dev: network device
3890 * @queue_index: sub queue index
3891 *
3892 * Check individual transmit queue of a device with multiple transmit queues.
3893 */
__netif_subqueue_stopped(const struct net_device * dev,u16 queue_index)3894 static inline bool __netif_subqueue_stopped(const struct net_device *dev,
3895 u16 queue_index)
3896 {
3897 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3898
3899 return netif_tx_queue_stopped(txq);
3900 }
3901
3902 /**
3903 * netif_subqueue_stopped - test status of subqueue
3904 * @dev: network device
3905 * @skb: sub queue buffer pointer
3906 *
3907 * Check individual transmit queue of a device with multiple transmit queues.
3908 */
netif_subqueue_stopped(const struct net_device * dev,struct sk_buff * skb)3909 static inline bool netif_subqueue_stopped(const struct net_device *dev,
3910 struct sk_buff *skb)
3911 {
3912 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
3913 }
3914
3915 /**
3916 * netif_wake_subqueue - allow sending packets on subqueue
3917 * @dev: network device
3918 * @queue_index: sub queue index
3919 *
3920 * Resume individual transmit queue of a device with multiple transmit queues.
3921 */
netif_wake_subqueue(struct net_device * dev,u16 queue_index)3922 static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
3923 {
3924 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3925
3926 netif_tx_wake_queue(txq);
3927 }
3928
3929 #ifdef CONFIG_XPS
3930 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
3931 u16 index);
3932 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
3933 u16 index, enum xps_map_type type);
3934
3935 /**
3936 * netif_attr_test_mask - Test a CPU or Rx queue set in a mask
3937 * @j: CPU/Rx queue index
3938 * @mask: bitmask of all cpus/rx queues
3939 * @nr_bits: number of bits in the bitmask
3940 *
3941 * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues.
3942 */
netif_attr_test_mask(unsigned long j,const unsigned long * mask,unsigned int nr_bits)3943 static inline bool netif_attr_test_mask(unsigned long j,
3944 const unsigned long *mask,
3945 unsigned int nr_bits)
3946 {
3947 cpu_max_bits_warn(j, nr_bits);
3948 return test_bit(j, mask);
3949 }
3950
3951 /**
3952 * netif_attr_test_online - Test for online CPU/Rx queue
3953 * @j: CPU/Rx queue index
3954 * @online_mask: bitmask for CPUs/Rx queues that are online
3955 * @nr_bits: number of bits in the bitmask
3956 *
3957 * Returns: true if a CPU/Rx queue is online.
3958 */
netif_attr_test_online(unsigned long j,const unsigned long * online_mask,unsigned int nr_bits)3959 static inline bool netif_attr_test_online(unsigned long j,
3960 const unsigned long *online_mask,
3961 unsigned int nr_bits)
3962 {
3963 cpu_max_bits_warn(j, nr_bits);
3964
3965 if (online_mask)
3966 return test_bit(j, online_mask);
3967
3968 return (j < nr_bits);
3969 }
3970
3971 /**
3972 * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask
3973 * @n: CPU/Rx queue index
3974 * @srcp: the cpumask/Rx queue mask pointer
3975 * @nr_bits: number of bits in the bitmask
3976 *
3977 * Returns: next (after n) CPU/Rx queue index in the mask;
3978 * >= nr_bits if no further CPUs/Rx queues set.
3979 */
netif_attrmask_next(int n,const unsigned long * srcp,unsigned int nr_bits)3980 static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
3981 unsigned int nr_bits)
3982 {
3983 /* -1 is a legal arg here. */
3984 if (n != -1)
3985 cpu_max_bits_warn(n, nr_bits);
3986
3987 if (srcp)
3988 return find_next_bit(srcp, nr_bits, n + 1);
3989
3990 return n + 1;
3991 }
3992
3993 /**
3994 * netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p
3995 * @n: CPU/Rx queue index
3996 * @src1p: the first CPUs/Rx queues mask pointer
3997 * @src2p: the second CPUs/Rx queues mask pointer
3998 * @nr_bits: number of bits in the bitmask
3999 *
4000 * Returns: next (after n) CPU/Rx queue index set in both masks;
4001 * >= nr_bits if no further CPUs/Rx queues set in both.
4002 */
netif_attrmask_next_and(int n,const unsigned long * src1p,const unsigned long * src2p,unsigned int nr_bits)4003 static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
4004 const unsigned long *src2p,
4005 unsigned int nr_bits)
4006 {
4007 /* -1 is a legal arg here. */
4008 if (n != -1)
4009 cpu_max_bits_warn(n, nr_bits);
4010
4011 if (src1p && src2p)
4012 return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
4013 else if (src1p)
4014 return find_next_bit(src1p, nr_bits, n + 1);
4015 else if (src2p)
4016 return find_next_bit(src2p, nr_bits, n + 1);
4017
4018 return n + 1;
4019 }
4020 #else
netif_set_xps_queue(struct net_device * dev,const struct cpumask * mask,u16 index)4021 static inline int netif_set_xps_queue(struct net_device *dev,
4022 const struct cpumask *mask,
4023 u16 index)
4024 {
4025 return 0;
4026 }
4027
__netif_set_xps_queue(struct net_device * dev,const unsigned long * mask,u16 index,enum xps_map_type type)4028 static inline int __netif_set_xps_queue(struct net_device *dev,
4029 const unsigned long *mask,
4030 u16 index, enum xps_map_type type)
4031 {
4032 return 0;
4033 }
4034 #endif
4035
4036 /**
4037 * netif_is_multiqueue - test if device has multiple transmit queues
4038 * @dev: network device
4039 *
4040 * Check if device has multiple transmit queues
4041 */
netif_is_multiqueue(const struct net_device * dev)4042 static inline bool netif_is_multiqueue(const struct net_device *dev)
4043 {
4044 return dev->num_tx_queues > 1;
4045 }
4046
4047 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
4048
4049 #ifdef CONFIG_SYSFS
4050 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
4051 #else
netif_set_real_num_rx_queues(struct net_device * dev,unsigned int rxqs)4052 static inline int netif_set_real_num_rx_queues(struct net_device *dev,
4053 unsigned int rxqs)
4054 {
4055 dev->real_num_rx_queues = rxqs;
4056 return 0;
4057 }
4058 #endif
4059 int netif_set_real_num_queues(struct net_device *dev,
4060 unsigned int txq, unsigned int rxq);
4061
4062 int netif_get_num_default_rss_queues(void);
4063
4064 void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason);
4065 void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason);
4066
4067 /*
4068 * It is not allowed to call kfree_skb() or consume_skb() from hardware
4069 * interrupt context or with hardware interrupts being disabled.
4070 * (in_hardirq() || irqs_disabled())
4071 *
4072 * We provide four helpers that can be used in following contexts :
4073 *
4074 * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
4075 * replacing kfree_skb(skb)
4076 *
4077 * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
4078 * Typically used in place of consume_skb(skb) in TX completion path
4079 *
4080 * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
4081 * replacing kfree_skb(skb)
4082 *
4083 * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
4084 * and consumed a packet. Used in place of consume_skb(skb)
4085 */
dev_kfree_skb_irq(struct sk_buff * skb)4086 static inline void dev_kfree_skb_irq(struct sk_buff *skb)
4087 {
4088 dev_kfree_skb_irq_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED);
4089 }
4090
dev_consume_skb_irq(struct sk_buff * skb)4091 static inline void dev_consume_skb_irq(struct sk_buff *skb)
4092 {
4093 dev_kfree_skb_irq_reason(skb, SKB_CONSUMED);
4094 }
4095
dev_kfree_skb_any(struct sk_buff * skb)4096 static inline void dev_kfree_skb_any(struct sk_buff *skb)
4097 {
4098 dev_kfree_skb_any_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED);
4099 }
4100
dev_consume_skb_any(struct sk_buff * skb)4101 static inline void dev_consume_skb_any(struct sk_buff *skb)
4102 {
4103 dev_kfree_skb_any_reason(skb, SKB_CONSUMED);
4104 }
4105
4106 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
4107 const struct bpf_prog *xdp_prog);
4108 void generic_xdp_tx(struct sk_buff *skb, const struct bpf_prog *xdp_prog);
4109 int do_xdp_generic(const struct bpf_prog *xdp_prog, struct sk_buff **pskb);
4110 int netif_rx(struct sk_buff *skb);
4111 int __netif_rx(struct sk_buff *skb);
4112
4113 int netif_receive_skb(struct sk_buff *skb);
4114 int netif_receive_skb_core(struct sk_buff *skb);
4115 void netif_receive_skb_list_internal(struct list_head *head);
4116 void netif_receive_skb_list(struct list_head *head);
4117 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
4118 void napi_gro_flush(struct napi_struct *napi, bool flush_old);
4119 struct sk_buff *napi_get_frags(struct napi_struct *napi);
4120 gro_result_t napi_gro_frags(struct napi_struct *napi);
4121
napi_free_frags(struct napi_struct * napi)4122 static inline void napi_free_frags(struct napi_struct *napi)
4123 {
4124 kfree_skb(napi->skb);
4125 napi->skb = NULL;
4126 }
4127
4128 bool netdev_is_rx_handler_busy(struct net_device *dev);
4129 int netdev_rx_handler_register(struct net_device *dev,
4130 rx_handler_func_t *rx_handler,
4131 void *rx_handler_data);
4132 void netdev_rx_handler_unregister(struct net_device *dev);
4133
4134 bool dev_valid_name(const char *name);
is_socket_ioctl_cmd(unsigned int cmd)4135 static inline bool is_socket_ioctl_cmd(unsigned int cmd)
4136 {
4137 return _IOC_TYPE(cmd) == SOCK_IOC_TYPE;
4138 }
4139 int get_user_ifreq(struct ifreq *ifr, void __user **ifrdata, void __user *arg);
4140 int put_user_ifreq(struct ifreq *ifr, void __user *arg);
4141 int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
4142 void __user *data, bool *need_copyout);
4143 int dev_ifconf(struct net *net, struct ifconf __user *ifc);
4144 int generic_hwtstamp_get_lower(struct net_device *dev,
4145 struct kernel_hwtstamp_config *kernel_cfg);
4146 int generic_hwtstamp_set_lower(struct net_device *dev,
4147 struct kernel_hwtstamp_config *kernel_cfg,
4148 struct netlink_ext_ack *extack);
4149 int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata);
4150 unsigned int dev_get_flags(const struct net_device *);
4151 int __dev_change_flags(struct net_device *dev, unsigned int flags,
4152 struct netlink_ext_ack *extack);
4153 int dev_change_flags(struct net_device *dev, unsigned int flags,
4154 struct netlink_ext_ack *extack);
4155 int dev_set_alias(struct net_device *, const char *, size_t);
4156 int dev_get_alias(const struct net_device *, char *, size_t);
4157 int __dev_change_net_namespace(struct net_device *dev, struct net *net,
4158 const char *pat, int new_ifindex);
4159 static inline
dev_change_net_namespace(struct net_device * dev,struct net * net,const char * pat)4160 int dev_change_net_namespace(struct net_device *dev, struct net *net,
4161 const char *pat)
4162 {
4163 return __dev_change_net_namespace(dev, net, pat, 0);
4164 }
4165 int __dev_set_mtu(struct net_device *, int);
4166 int dev_set_mtu(struct net_device *, int);
4167 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
4168 struct netlink_ext_ack *extack);
4169 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
4170 struct netlink_ext_ack *extack);
4171 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
4172 struct netlink_ext_ack *extack);
4173 int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
4174 int dev_get_port_parent_id(struct net_device *dev,
4175 struct netdev_phys_item_id *ppid, bool recurse);
4176 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
4177
4178 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
4179 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
4180 struct netdev_queue *txq, int *ret);
4181
4182 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
4183 u8 dev_xdp_prog_count(struct net_device *dev);
4184 int dev_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf);
4185 u8 dev_xdp_sb_prog_count(struct net_device *dev);
4186 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode);
4187
4188 u32 dev_get_min_mp_channel_count(const struct net_device *dev);
4189
4190 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
4191 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
4192 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb);
4193 bool is_skb_forwardable(const struct net_device *dev,
4194 const struct sk_buff *skb);
4195
__is_skb_forwardable(const struct net_device * dev,const struct sk_buff * skb,const bool check_mtu)4196 static __always_inline bool __is_skb_forwardable(const struct net_device *dev,
4197 const struct sk_buff *skb,
4198 const bool check_mtu)
4199 {
4200 const u32 vlan_hdr_len = 4; /* VLAN_HLEN */
4201 unsigned int len;
4202
4203 if (!(dev->flags & IFF_UP))
4204 return false;
4205
4206 if (!check_mtu)
4207 return true;
4208
4209 len = dev->mtu + dev->hard_header_len + vlan_hdr_len;
4210 if (skb->len <= len)
4211 return true;
4212
4213 /* if TSO is enabled, we don't care about the length as the packet
4214 * could be forwarded without being segmented before
4215 */
4216 if (skb_is_gso(skb))
4217 return true;
4218
4219 return false;
4220 }
4221
4222 void netdev_core_stats_inc(struct net_device *dev, u32 offset);
4223
4224 #define DEV_CORE_STATS_INC(FIELD) \
4225 static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \
4226 { \
4227 netdev_core_stats_inc(dev, \
4228 offsetof(struct net_device_core_stats, FIELD)); \
4229 }
4230 DEV_CORE_STATS_INC(rx_dropped)
DEV_CORE_STATS_INC(tx_dropped)4231 DEV_CORE_STATS_INC(tx_dropped)
4232 DEV_CORE_STATS_INC(rx_nohandler)
4233 DEV_CORE_STATS_INC(rx_otherhost_dropped)
4234 #undef DEV_CORE_STATS_INC
4235
4236 static __always_inline int ____dev_forward_skb(struct net_device *dev,
4237 struct sk_buff *skb,
4238 const bool check_mtu)
4239 {
4240 if (skb_orphan_frags(skb, GFP_ATOMIC) ||
4241 unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) {
4242 dev_core_stats_rx_dropped_inc(dev);
4243 kfree_skb(skb);
4244 return NET_RX_DROP;
4245 }
4246
4247 skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev)));
4248 skb->priority = 0;
4249 return 0;
4250 }
4251
4252 bool dev_nit_active(struct net_device *dev);
4253 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
4254
__dev_put(struct net_device * dev)4255 static inline void __dev_put(struct net_device *dev)
4256 {
4257 if (dev) {
4258 #ifdef CONFIG_PCPU_DEV_REFCNT
4259 this_cpu_dec(*dev->pcpu_refcnt);
4260 #else
4261 refcount_dec(&dev->dev_refcnt);
4262 #endif
4263 }
4264 }
4265
__dev_hold(struct net_device * dev)4266 static inline void __dev_hold(struct net_device *dev)
4267 {
4268 if (dev) {
4269 #ifdef CONFIG_PCPU_DEV_REFCNT
4270 this_cpu_inc(*dev->pcpu_refcnt);
4271 #else
4272 refcount_inc(&dev->dev_refcnt);
4273 #endif
4274 }
4275 }
4276
__netdev_tracker_alloc(struct net_device * dev,netdevice_tracker * tracker,gfp_t gfp)4277 static inline void __netdev_tracker_alloc(struct net_device *dev,
4278 netdevice_tracker *tracker,
4279 gfp_t gfp)
4280 {
4281 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER
4282 ref_tracker_alloc(&dev->refcnt_tracker, tracker, gfp);
4283 #endif
4284 }
4285
4286 /* netdev_tracker_alloc() can upgrade a prior untracked reference
4287 * taken by dev_get_by_name()/dev_get_by_index() to a tracked one.
4288 */
netdev_tracker_alloc(struct net_device * dev,netdevice_tracker * tracker,gfp_t gfp)4289 static inline void netdev_tracker_alloc(struct net_device *dev,
4290 netdevice_tracker *tracker, gfp_t gfp)
4291 {
4292 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER
4293 refcount_dec(&dev->refcnt_tracker.no_tracker);
4294 __netdev_tracker_alloc(dev, tracker, gfp);
4295 #endif
4296 }
4297
netdev_tracker_free(struct net_device * dev,netdevice_tracker * tracker)4298 static inline void netdev_tracker_free(struct net_device *dev,
4299 netdevice_tracker *tracker)
4300 {
4301 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER
4302 ref_tracker_free(&dev->refcnt_tracker, tracker);
4303 #endif
4304 }
4305
netdev_hold(struct net_device * dev,netdevice_tracker * tracker,gfp_t gfp)4306 static inline void netdev_hold(struct net_device *dev,
4307 netdevice_tracker *tracker, gfp_t gfp)
4308 {
4309 if (dev) {
4310 __dev_hold(dev);
4311 __netdev_tracker_alloc(dev, tracker, gfp);
4312 }
4313 }
4314
netdev_put(struct net_device * dev,netdevice_tracker * tracker)4315 static inline void netdev_put(struct net_device *dev,
4316 netdevice_tracker *tracker)
4317 {
4318 if (dev) {
4319 netdev_tracker_free(dev, tracker);
4320 __dev_put(dev);
4321 }
4322 }
4323
4324 /**
4325 * dev_hold - get reference to device
4326 * @dev: network device
4327 *
4328 * Hold reference to device to keep it from being freed.
4329 * Try using netdev_hold() instead.
4330 */
dev_hold(struct net_device * dev)4331 static inline void dev_hold(struct net_device *dev)
4332 {
4333 netdev_hold(dev, NULL, GFP_ATOMIC);
4334 }
4335
4336 /**
4337 * dev_put - release reference to device
4338 * @dev: network device
4339 *
4340 * Release reference to device to allow it to be freed.
4341 * Try using netdev_put() instead.
4342 */
dev_put(struct net_device * dev)4343 static inline void dev_put(struct net_device *dev)
4344 {
4345 netdev_put(dev, NULL);
4346 }
4347
DEFINE_FREE(dev_put,struct net_device *,if (_T)dev_put (_T))4348 DEFINE_FREE(dev_put, struct net_device *, if (_T) dev_put(_T))
4349
4350 static inline void netdev_ref_replace(struct net_device *odev,
4351 struct net_device *ndev,
4352 netdevice_tracker *tracker,
4353 gfp_t gfp)
4354 {
4355 if (odev)
4356 netdev_tracker_free(odev, tracker);
4357
4358 __dev_hold(ndev);
4359 __dev_put(odev);
4360
4361 if (ndev)
4362 __netdev_tracker_alloc(ndev, tracker, gfp);
4363 }
4364
4365 /* Carrier loss detection, dial on demand. The functions netif_carrier_on
4366 * and _off may be called from IRQ context, but it is caller
4367 * who is responsible for serialization of these calls.
4368 *
4369 * The name carrier is inappropriate, these functions should really be
4370 * called netif_lowerlayer_*() because they represent the state of any
4371 * kind of lower layer not just hardware media.
4372 */
4373 void linkwatch_fire_event(struct net_device *dev);
4374
4375 /**
4376 * linkwatch_sync_dev - sync linkwatch for the given device
4377 * @dev: network device to sync linkwatch for
4378 *
4379 * Sync linkwatch for the given device, removing it from the
4380 * pending work list (if queued).
4381 */
4382 void linkwatch_sync_dev(struct net_device *dev);
4383
4384 /**
4385 * netif_carrier_ok - test if carrier present
4386 * @dev: network device
4387 *
4388 * Check if carrier is present on device
4389 */
netif_carrier_ok(const struct net_device * dev)4390 static inline bool netif_carrier_ok(const struct net_device *dev)
4391 {
4392 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
4393 }
4394
4395 unsigned long dev_trans_start(struct net_device *dev);
4396
4397 void netdev_watchdog_up(struct net_device *dev);
4398
4399 void netif_carrier_on(struct net_device *dev);
4400 void netif_carrier_off(struct net_device *dev);
4401 void netif_carrier_event(struct net_device *dev);
4402
4403 /**
4404 * netif_dormant_on - mark device as dormant.
4405 * @dev: network device
4406 *
4407 * Mark device as dormant (as per RFC2863).
4408 *
4409 * The dormant state indicates that the relevant interface is not
4410 * actually in a condition to pass packets (i.e., it is not 'up') but is
4411 * in a "pending" state, waiting for some external event. For "on-
4412 * demand" interfaces, this new state identifies the situation where the
4413 * interface is waiting for events to place it in the up state.
4414 */
netif_dormant_on(struct net_device * dev)4415 static inline void netif_dormant_on(struct net_device *dev)
4416 {
4417 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
4418 linkwatch_fire_event(dev);
4419 }
4420
4421 /**
4422 * netif_dormant_off - set device as not dormant.
4423 * @dev: network device
4424 *
4425 * Device is not in dormant state.
4426 */
netif_dormant_off(struct net_device * dev)4427 static inline void netif_dormant_off(struct net_device *dev)
4428 {
4429 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
4430 linkwatch_fire_event(dev);
4431 }
4432
4433 /**
4434 * netif_dormant - test if device is dormant
4435 * @dev: network device
4436 *
4437 * Check if device is dormant.
4438 */
netif_dormant(const struct net_device * dev)4439 static inline bool netif_dormant(const struct net_device *dev)
4440 {
4441 return test_bit(__LINK_STATE_DORMANT, &dev->state);
4442 }
4443
4444
4445 /**
4446 * netif_testing_on - mark device as under test.
4447 * @dev: network device
4448 *
4449 * Mark device as under test (as per RFC2863).
4450 *
4451 * The testing state indicates that some test(s) must be performed on
4452 * the interface. After completion, of the test, the interface state
4453 * will change to up, dormant, or down, as appropriate.
4454 */
netif_testing_on(struct net_device * dev)4455 static inline void netif_testing_on(struct net_device *dev)
4456 {
4457 if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state))
4458 linkwatch_fire_event(dev);
4459 }
4460
4461 /**
4462 * netif_testing_off - set device as not under test.
4463 * @dev: network device
4464 *
4465 * Device is not in testing state.
4466 */
netif_testing_off(struct net_device * dev)4467 static inline void netif_testing_off(struct net_device *dev)
4468 {
4469 if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state))
4470 linkwatch_fire_event(dev);
4471 }
4472
4473 /**
4474 * netif_testing - test if device is under test
4475 * @dev: network device
4476 *
4477 * Check if device is under test
4478 */
netif_testing(const struct net_device * dev)4479 static inline bool netif_testing(const struct net_device *dev)
4480 {
4481 return test_bit(__LINK_STATE_TESTING, &dev->state);
4482 }
4483
4484
4485 /**
4486 * netif_oper_up - test if device is operational
4487 * @dev: network device
4488 *
4489 * Check if carrier is operational
4490 */
netif_oper_up(const struct net_device * dev)4491 static inline bool netif_oper_up(const struct net_device *dev)
4492 {
4493 unsigned int operstate = READ_ONCE(dev->operstate);
4494
4495 return operstate == IF_OPER_UP ||
4496 operstate == IF_OPER_UNKNOWN /* backward compat */;
4497 }
4498
4499 /**
4500 * netif_device_present - is device available or removed
4501 * @dev: network device
4502 *
4503 * Check if device has not been removed from system.
4504 */
netif_device_present(const struct net_device * dev)4505 static inline bool netif_device_present(const struct net_device *dev)
4506 {
4507 return test_bit(__LINK_STATE_PRESENT, &dev->state);
4508 }
4509
4510 void netif_device_detach(struct net_device *dev);
4511
4512 void netif_device_attach(struct net_device *dev);
4513
4514 /*
4515 * Network interface message level settings
4516 */
4517
4518 enum {
4519 NETIF_MSG_DRV_BIT,
4520 NETIF_MSG_PROBE_BIT,
4521 NETIF_MSG_LINK_BIT,
4522 NETIF_MSG_TIMER_BIT,
4523 NETIF_MSG_IFDOWN_BIT,
4524 NETIF_MSG_IFUP_BIT,
4525 NETIF_MSG_RX_ERR_BIT,
4526 NETIF_MSG_TX_ERR_BIT,
4527 NETIF_MSG_TX_QUEUED_BIT,
4528 NETIF_MSG_INTR_BIT,
4529 NETIF_MSG_TX_DONE_BIT,
4530 NETIF_MSG_RX_STATUS_BIT,
4531 NETIF_MSG_PKTDATA_BIT,
4532 NETIF_MSG_HW_BIT,
4533 NETIF_MSG_WOL_BIT,
4534
4535 /* When you add a new bit above, update netif_msg_class_names array
4536 * in net/ethtool/common.c
4537 */
4538 NETIF_MSG_CLASS_COUNT,
4539 };
4540 /* Both ethtool_ops interface and internal driver implementation use u32 */
4541 static_assert(NETIF_MSG_CLASS_COUNT <= 32);
4542
4543 #define __NETIF_MSG_BIT(bit) ((u32)1 << (bit))
4544 #define __NETIF_MSG(name) __NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT)
4545
4546 #define NETIF_MSG_DRV __NETIF_MSG(DRV)
4547 #define NETIF_MSG_PROBE __NETIF_MSG(PROBE)
4548 #define NETIF_MSG_LINK __NETIF_MSG(LINK)
4549 #define NETIF_MSG_TIMER __NETIF_MSG(TIMER)
4550 #define NETIF_MSG_IFDOWN __NETIF_MSG(IFDOWN)
4551 #define NETIF_MSG_IFUP __NETIF_MSG(IFUP)
4552 #define NETIF_MSG_RX_ERR __NETIF_MSG(RX_ERR)
4553 #define NETIF_MSG_TX_ERR __NETIF_MSG(TX_ERR)
4554 #define NETIF_MSG_TX_QUEUED __NETIF_MSG(TX_QUEUED)
4555 #define NETIF_MSG_INTR __NETIF_MSG(INTR)
4556 #define NETIF_MSG_TX_DONE __NETIF_MSG(TX_DONE)
4557 #define NETIF_MSG_RX_STATUS __NETIF_MSG(RX_STATUS)
4558 #define NETIF_MSG_PKTDATA __NETIF_MSG(PKTDATA)
4559 #define NETIF_MSG_HW __NETIF_MSG(HW)
4560 #define NETIF_MSG_WOL __NETIF_MSG(WOL)
4561
4562 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
4563 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
4564 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
4565 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
4566 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
4567 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
4568 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
4569 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
4570 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
4571 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
4572 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
4573 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
4574 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
4575 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
4576 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
4577
netif_msg_init(int debug_value,int default_msg_enable_bits)4578 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
4579 {
4580 /* use default */
4581 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
4582 return default_msg_enable_bits;
4583 if (debug_value == 0) /* no output */
4584 return 0;
4585 /* set low N bits */
4586 return (1U << debug_value) - 1;
4587 }
4588
__netif_tx_lock(struct netdev_queue * txq,int cpu)4589 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
4590 {
4591 spin_lock(&txq->_xmit_lock);
4592 /* Pairs with READ_ONCE() in __dev_queue_xmit() */
4593 WRITE_ONCE(txq->xmit_lock_owner, cpu);
4594 }
4595
__netif_tx_acquire(struct netdev_queue * txq)4596 static inline bool __netif_tx_acquire(struct netdev_queue *txq)
4597 {
4598 __acquire(&txq->_xmit_lock);
4599 return true;
4600 }
4601
__netif_tx_release(struct netdev_queue * txq)4602 static inline void __netif_tx_release(struct netdev_queue *txq)
4603 {
4604 __release(&txq->_xmit_lock);
4605 }
4606
__netif_tx_lock_bh(struct netdev_queue * txq)4607 static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
4608 {
4609 spin_lock_bh(&txq->_xmit_lock);
4610 /* Pairs with READ_ONCE() in __dev_queue_xmit() */
4611 WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
4612 }
4613
__netif_tx_trylock(struct netdev_queue * txq)4614 static inline bool __netif_tx_trylock(struct netdev_queue *txq)
4615 {
4616 bool ok = spin_trylock(&txq->_xmit_lock);
4617
4618 if (likely(ok)) {
4619 /* Pairs with READ_ONCE() in __dev_queue_xmit() */
4620 WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
4621 }
4622 return ok;
4623 }
4624
__netif_tx_unlock(struct netdev_queue * txq)4625 static inline void __netif_tx_unlock(struct netdev_queue *txq)
4626 {
4627 /* Pairs with READ_ONCE() in __dev_queue_xmit() */
4628 WRITE_ONCE(txq->xmit_lock_owner, -1);
4629 spin_unlock(&txq->_xmit_lock);
4630 }
4631
__netif_tx_unlock_bh(struct netdev_queue * txq)4632 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
4633 {
4634 /* Pairs with READ_ONCE() in __dev_queue_xmit() */
4635 WRITE_ONCE(txq->xmit_lock_owner, -1);
4636 spin_unlock_bh(&txq->_xmit_lock);
4637 }
4638
4639 /*
4640 * txq->trans_start can be read locklessly from dev_watchdog()
4641 */
txq_trans_update(struct netdev_queue * txq)4642 static inline void txq_trans_update(struct netdev_queue *txq)
4643 {
4644 if (txq->xmit_lock_owner != -1)
4645 WRITE_ONCE(txq->trans_start, jiffies);
4646 }
4647
txq_trans_cond_update(struct netdev_queue * txq)4648 static inline void txq_trans_cond_update(struct netdev_queue *txq)
4649 {
4650 unsigned long now = jiffies;
4651
4652 if (READ_ONCE(txq->trans_start) != now)
4653 WRITE_ONCE(txq->trans_start, now);
4654 }
4655
4656 /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
netif_trans_update(struct net_device * dev)4657 static inline void netif_trans_update(struct net_device *dev)
4658 {
4659 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
4660
4661 txq_trans_cond_update(txq);
4662 }
4663
4664 /**
4665 * netif_tx_lock - grab network device transmit lock
4666 * @dev: network device
4667 *
4668 * Get network device transmit lock
4669 */
4670 void netif_tx_lock(struct net_device *dev);
4671
netif_tx_lock_bh(struct net_device * dev)4672 static inline void netif_tx_lock_bh(struct net_device *dev)
4673 {
4674 local_bh_disable();
4675 netif_tx_lock(dev);
4676 }
4677
4678 void netif_tx_unlock(struct net_device *dev);
4679
netif_tx_unlock_bh(struct net_device * dev)4680 static inline void netif_tx_unlock_bh(struct net_device *dev)
4681 {
4682 netif_tx_unlock(dev);
4683 local_bh_enable();
4684 }
4685
4686 #define HARD_TX_LOCK(dev, txq, cpu) { \
4687 if (!(dev)->lltx) { \
4688 __netif_tx_lock(txq, cpu); \
4689 } else { \
4690 __netif_tx_acquire(txq); \
4691 } \
4692 }
4693
4694 #define HARD_TX_TRYLOCK(dev, txq) \
4695 (!(dev)->lltx ? \
4696 __netif_tx_trylock(txq) : \
4697 __netif_tx_acquire(txq))
4698
4699 #define HARD_TX_UNLOCK(dev, txq) { \
4700 if (!(dev)->lltx) { \
4701 __netif_tx_unlock(txq); \
4702 } else { \
4703 __netif_tx_release(txq); \
4704 } \
4705 }
4706
netif_tx_disable(struct net_device * dev)4707 static inline void netif_tx_disable(struct net_device *dev)
4708 {
4709 unsigned int i;
4710 int cpu;
4711
4712 local_bh_disable();
4713 cpu = smp_processor_id();
4714 spin_lock(&dev->tx_global_lock);
4715 for (i = 0; i < dev->num_tx_queues; i++) {
4716 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
4717
4718 __netif_tx_lock(txq, cpu);
4719 netif_tx_stop_queue(txq);
4720 __netif_tx_unlock(txq);
4721 }
4722 spin_unlock(&dev->tx_global_lock);
4723 local_bh_enable();
4724 }
4725
netif_addr_lock(struct net_device * dev)4726 static inline void netif_addr_lock(struct net_device *dev)
4727 {
4728 unsigned char nest_level = 0;
4729
4730 #ifdef CONFIG_LOCKDEP
4731 nest_level = dev->nested_level;
4732 #endif
4733 spin_lock_nested(&dev->addr_list_lock, nest_level);
4734 }
4735
netif_addr_lock_bh(struct net_device * dev)4736 static inline void netif_addr_lock_bh(struct net_device *dev)
4737 {
4738 unsigned char nest_level = 0;
4739
4740 #ifdef CONFIG_LOCKDEP
4741 nest_level = dev->nested_level;
4742 #endif
4743 local_bh_disable();
4744 spin_lock_nested(&dev->addr_list_lock, nest_level);
4745 }
4746
netif_addr_unlock(struct net_device * dev)4747 static inline void netif_addr_unlock(struct net_device *dev)
4748 {
4749 spin_unlock(&dev->addr_list_lock);
4750 }
4751
netif_addr_unlock_bh(struct net_device * dev)4752 static inline void netif_addr_unlock_bh(struct net_device *dev)
4753 {
4754 spin_unlock_bh(&dev->addr_list_lock);
4755 }
4756
4757 /*
4758 * dev_addrs walker. Should be used only for read access. Call with
4759 * rcu_read_lock held.
4760 */
4761 #define for_each_dev_addr(dev, ha) \
4762 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
4763
4764 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
4765
4766 void ether_setup(struct net_device *dev);
4767
4768 /* Allocate dummy net_device */
4769 struct net_device *alloc_netdev_dummy(int sizeof_priv);
4770
4771 /* Support for loadable net-drivers */
4772 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
4773 unsigned char name_assign_type,
4774 void (*setup)(struct net_device *),
4775 unsigned int txqs, unsigned int rxqs);
4776 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
4777 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
4778
4779 #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
4780 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
4781 count)
4782
4783 int register_netdev(struct net_device *dev);
4784 void unregister_netdev(struct net_device *dev);
4785
4786 int devm_register_netdev(struct device *dev, struct net_device *ndev);
4787
4788 /* General hardware address lists handling functions */
4789 int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
4790 struct netdev_hw_addr_list *from_list, int addr_len);
4791 int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list,
4792 struct netdev_hw_addr_list *from_list,
4793 int addr_len);
4794 void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
4795 struct netdev_hw_addr_list *from_list, int addr_len);
4796 int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
4797 struct net_device *dev,
4798 int (*sync)(struct net_device *, const unsigned char *),
4799 int (*unsync)(struct net_device *,
4800 const unsigned char *));
4801 int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list,
4802 struct net_device *dev,
4803 int (*sync)(struct net_device *,
4804 const unsigned char *, int),
4805 int (*unsync)(struct net_device *,
4806 const unsigned char *, int));
4807 void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list,
4808 struct net_device *dev,
4809 int (*unsync)(struct net_device *,
4810 const unsigned char *, int));
4811 void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
4812 struct net_device *dev,
4813 int (*unsync)(struct net_device *,
4814 const unsigned char *));
4815 void __hw_addr_init(struct netdev_hw_addr_list *list);
4816
4817 /* Functions used for device addresses handling */
4818 void dev_addr_mod(struct net_device *dev, unsigned int offset,
4819 const void *addr, size_t len);
4820
4821 static inline void
__dev_addr_set(struct net_device * dev,const void * addr,size_t len)4822 __dev_addr_set(struct net_device *dev, const void *addr, size_t len)
4823 {
4824 dev_addr_mod(dev, 0, addr, len);
4825 }
4826
dev_addr_set(struct net_device * dev,const u8 * addr)4827 static inline void dev_addr_set(struct net_device *dev, const u8 *addr)
4828 {
4829 __dev_addr_set(dev, addr, dev->addr_len);
4830 }
4831
4832 int dev_addr_add(struct net_device *dev, const unsigned char *addr,
4833 unsigned char addr_type);
4834 int dev_addr_del(struct net_device *dev, const unsigned char *addr,
4835 unsigned char addr_type);
4836
4837 /* Functions used for unicast addresses handling */
4838 int dev_uc_add(struct net_device *dev, const unsigned char *addr);
4839 int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
4840 int dev_uc_del(struct net_device *dev, const unsigned char *addr);
4841 int dev_uc_sync(struct net_device *to, struct net_device *from);
4842 int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
4843 void dev_uc_unsync(struct net_device *to, struct net_device *from);
4844 void dev_uc_flush(struct net_device *dev);
4845 void dev_uc_init(struct net_device *dev);
4846
4847 /**
4848 * __dev_uc_sync - Synchronize device's unicast list
4849 * @dev: device to sync
4850 * @sync: function to call if address should be added
4851 * @unsync: function to call if address should be removed
4852 *
4853 * Add newly added addresses to the interface, and release
4854 * addresses that have been deleted.
4855 */
__dev_uc_sync(struct net_device * dev,int (* sync)(struct net_device *,const unsigned char *),int (* unsync)(struct net_device *,const unsigned char *))4856 static inline int __dev_uc_sync(struct net_device *dev,
4857 int (*sync)(struct net_device *,
4858 const unsigned char *),
4859 int (*unsync)(struct net_device *,
4860 const unsigned char *))
4861 {
4862 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
4863 }
4864
4865 /**
4866 * __dev_uc_unsync - Remove synchronized addresses from device
4867 * @dev: device to sync
4868 * @unsync: function to call if address should be removed
4869 *
4870 * Remove all addresses that were added to the device by dev_uc_sync().
4871 */
__dev_uc_unsync(struct net_device * dev,int (* unsync)(struct net_device *,const unsigned char *))4872 static inline void __dev_uc_unsync(struct net_device *dev,
4873 int (*unsync)(struct net_device *,
4874 const unsigned char *))
4875 {
4876 __hw_addr_unsync_dev(&dev->uc, dev, unsync);
4877 }
4878
4879 /* Functions used for multicast addresses handling */
4880 int dev_mc_add(struct net_device *dev, const unsigned char *addr);
4881 int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
4882 int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
4883 int dev_mc_del(struct net_device *dev, const unsigned char *addr);
4884 int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
4885 int dev_mc_sync(struct net_device *to, struct net_device *from);
4886 int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
4887 void dev_mc_unsync(struct net_device *to, struct net_device *from);
4888 void dev_mc_flush(struct net_device *dev);
4889 void dev_mc_init(struct net_device *dev);
4890
4891 /**
4892 * __dev_mc_sync - Synchronize device's multicast list
4893 * @dev: device to sync
4894 * @sync: function to call if address should be added
4895 * @unsync: function to call if address should be removed
4896 *
4897 * Add newly added addresses to the interface, and release
4898 * addresses that have been deleted.
4899 */
__dev_mc_sync(struct net_device * dev,int (* sync)(struct net_device *,const unsigned char *),int (* unsync)(struct net_device *,const unsigned char *))4900 static inline int __dev_mc_sync(struct net_device *dev,
4901 int (*sync)(struct net_device *,
4902 const unsigned char *),
4903 int (*unsync)(struct net_device *,
4904 const unsigned char *))
4905 {
4906 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
4907 }
4908
4909 /**
4910 * __dev_mc_unsync - Remove synchronized addresses from device
4911 * @dev: device to sync
4912 * @unsync: function to call if address should be removed
4913 *
4914 * Remove all addresses that were added to the device by dev_mc_sync().
4915 */
__dev_mc_unsync(struct net_device * dev,int (* unsync)(struct net_device *,const unsigned char *))4916 static inline void __dev_mc_unsync(struct net_device *dev,
4917 int (*unsync)(struct net_device *,
4918 const unsigned char *))
4919 {
4920 __hw_addr_unsync_dev(&dev->mc, dev, unsync);
4921 }
4922
4923 /* Functions used for secondary unicast and multicast support */
4924 void dev_set_rx_mode(struct net_device *dev);
4925 int dev_set_promiscuity(struct net_device *dev, int inc);
4926 int dev_set_allmulti(struct net_device *dev, int inc);
4927 void netdev_state_change(struct net_device *dev);
4928 void __netdev_notify_peers(struct net_device *dev);
4929 void netdev_notify_peers(struct net_device *dev);
4930 void netdev_features_change(struct net_device *dev);
4931 /* Load a device via the kmod */
4932 void dev_load(struct net *net, const char *name);
4933 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
4934 struct rtnl_link_stats64 *storage);
4935 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
4936 const struct net_device_stats *netdev_stats);
4937 void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
4938 const struct pcpu_sw_netstats __percpu *netstats);
4939 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s);
4940
4941 enum {
4942 NESTED_SYNC_IMM_BIT,
4943 NESTED_SYNC_TODO_BIT,
4944 };
4945
4946 #define __NESTED_SYNC_BIT(bit) ((u32)1 << (bit))
4947 #define __NESTED_SYNC(name) __NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT)
4948
4949 #define NESTED_SYNC_IMM __NESTED_SYNC(IMM)
4950 #define NESTED_SYNC_TODO __NESTED_SYNC(TODO)
4951
4952 struct netdev_nested_priv {
4953 unsigned char flags;
4954 void *data;
4955 };
4956
4957 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
4958 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4959 struct list_head **iter);
4960
4961 /* iterate through upper list, must be called under RCU read lock */
4962 #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
4963 for (iter = &(dev)->adj_list.upper, \
4964 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
4965 updev; \
4966 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
4967
4968 int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
4969 int (*fn)(struct net_device *upper_dev,
4970 struct netdev_nested_priv *priv),
4971 struct netdev_nested_priv *priv);
4972
4973 bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
4974 struct net_device *upper_dev);
4975
4976 bool netdev_has_any_upper_dev(struct net_device *dev);
4977
4978 void *netdev_lower_get_next_private(struct net_device *dev,
4979 struct list_head **iter);
4980 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4981 struct list_head **iter);
4982
4983 #define netdev_for_each_lower_private(dev, priv, iter) \
4984 for (iter = (dev)->adj_list.lower.next, \
4985 priv = netdev_lower_get_next_private(dev, &(iter)); \
4986 priv; \
4987 priv = netdev_lower_get_next_private(dev, &(iter)))
4988
4989 #define netdev_for_each_lower_private_rcu(dev, priv, iter) \
4990 for (iter = &(dev)->adj_list.lower, \
4991 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
4992 priv; \
4993 priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
4994
4995 void *netdev_lower_get_next(struct net_device *dev,
4996 struct list_head **iter);
4997
4998 #define netdev_for_each_lower_dev(dev, ldev, iter) \
4999 for (iter = (dev)->adj_list.lower.next, \
5000 ldev = netdev_lower_get_next(dev, &(iter)); \
5001 ldev; \
5002 ldev = netdev_lower_get_next(dev, &(iter)))
5003
5004 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
5005 struct list_head **iter);
5006 int netdev_walk_all_lower_dev(struct net_device *dev,
5007 int (*fn)(struct net_device *lower_dev,
5008 struct netdev_nested_priv *priv),
5009 struct netdev_nested_priv *priv);
5010 int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
5011 int (*fn)(struct net_device *lower_dev,
5012 struct netdev_nested_priv *priv),
5013 struct netdev_nested_priv *priv);
5014
5015 void *netdev_adjacent_get_private(struct list_head *adj_list);
5016 void *netdev_lower_get_first_private_rcu(struct net_device *dev);
5017 struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
5018 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
5019 int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev,
5020 struct netlink_ext_ack *extack);
5021 int netdev_master_upper_dev_link(struct net_device *dev,
5022 struct net_device *upper_dev,
5023 void *upper_priv, void *upper_info,
5024 struct netlink_ext_ack *extack);
5025 void netdev_upper_dev_unlink(struct net_device *dev,
5026 struct net_device *upper_dev);
5027 int netdev_adjacent_change_prepare(struct net_device *old_dev,
5028 struct net_device *new_dev,
5029 struct net_device *dev,
5030 struct netlink_ext_ack *extack);
5031 void netdev_adjacent_change_commit(struct net_device *old_dev,
5032 struct net_device *new_dev,
5033 struct net_device *dev);
5034 void netdev_adjacent_change_abort(struct net_device *old_dev,
5035 struct net_device *new_dev,
5036 struct net_device *dev);
5037 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
5038 void *netdev_lower_dev_get_private(struct net_device *dev,
5039 struct net_device *lower_dev);
5040 void netdev_lower_state_changed(struct net_device *lower_dev,
5041 void *lower_state_info);
5042
5043 /* RSS keys are 40 or 52 bytes long */
5044 #define NETDEV_RSS_KEY_LEN 52
5045 extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
5046 void netdev_rss_key_fill(void *buffer, size_t len);
5047
5048 int skb_checksum_help(struct sk_buff *skb);
5049 int skb_crc32c_csum_help(struct sk_buff *skb);
5050 int skb_csum_hwoffload_help(struct sk_buff *skb,
5051 const netdev_features_t features);
5052
5053 struct netdev_bonding_info {
5054 ifslave slave;
5055 ifbond master;
5056 };
5057
5058 struct netdev_notifier_bonding_info {
5059 struct netdev_notifier_info info; /* must be first */
5060 struct netdev_bonding_info bonding_info;
5061 };
5062
5063 void netdev_bonding_info_change(struct net_device *dev,
5064 struct netdev_bonding_info *bonding_info);
5065
5066 #if IS_ENABLED(CONFIG_ETHTOOL_NETLINK)
5067 void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data);
5068 #else
ethtool_notify(struct net_device * dev,unsigned int cmd,const void * data)5069 static inline void ethtool_notify(struct net_device *dev, unsigned int cmd,
5070 const void *data)
5071 {
5072 }
5073 #endif
5074
5075 __be16 skb_network_protocol(struct sk_buff *skb, int *depth);
5076
can_checksum_protocol(netdev_features_t features,__be16 protocol)5077 static inline bool can_checksum_protocol(netdev_features_t features,
5078 __be16 protocol)
5079 {
5080 if (protocol == htons(ETH_P_FCOE))
5081 return !!(features & NETIF_F_FCOE_CRC);
5082
5083 /* Assume this is an IP checksum (not SCTP CRC) */
5084
5085 if (features & NETIF_F_HW_CSUM) {
5086 /* Can checksum everything */
5087 return true;
5088 }
5089
5090 switch (protocol) {
5091 case htons(ETH_P_IP):
5092 return !!(features & NETIF_F_IP_CSUM);
5093 case htons(ETH_P_IPV6):
5094 return !!(features & NETIF_F_IPV6_CSUM);
5095 default:
5096 return false;
5097 }
5098 }
5099
5100 #ifdef CONFIG_BUG
5101 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb);
5102 #else
netdev_rx_csum_fault(struct net_device * dev,struct sk_buff * skb)5103 static inline void netdev_rx_csum_fault(struct net_device *dev,
5104 struct sk_buff *skb)
5105 {
5106 }
5107 #endif
5108 /* rx skb timestamps */
5109 void net_enable_timestamp(void);
5110 void net_disable_timestamp(void);
5111
netdev_get_tstamp(struct net_device * dev,const struct skb_shared_hwtstamps * hwtstamps,bool cycles)5112 static inline ktime_t netdev_get_tstamp(struct net_device *dev,
5113 const struct skb_shared_hwtstamps *hwtstamps,
5114 bool cycles)
5115 {
5116 const struct net_device_ops *ops = dev->netdev_ops;
5117
5118 if (ops->ndo_get_tstamp)
5119 return ops->ndo_get_tstamp(dev, hwtstamps, cycles);
5120
5121 return hwtstamps->hwtstamp;
5122 }
5123
5124 #ifndef CONFIG_PREEMPT_RT
netdev_xmit_set_more(bool more)5125 static inline void netdev_xmit_set_more(bool more)
5126 {
5127 __this_cpu_write(softnet_data.xmit.more, more);
5128 }
5129
netdev_xmit_more(void)5130 static inline bool netdev_xmit_more(void)
5131 {
5132 return __this_cpu_read(softnet_data.xmit.more);
5133 }
5134 #else
netdev_xmit_set_more(bool more)5135 static inline void netdev_xmit_set_more(bool more)
5136 {
5137 current->net_xmit.more = more;
5138 }
5139
netdev_xmit_more(void)5140 static inline bool netdev_xmit_more(void)
5141 {
5142 return current->net_xmit.more;
5143 }
5144 #endif
5145
__netdev_start_xmit(const struct net_device_ops * ops,struct sk_buff * skb,struct net_device * dev,bool more)5146 static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
5147 struct sk_buff *skb, struct net_device *dev,
5148 bool more)
5149 {
5150 netdev_xmit_set_more(more);
5151 return ops->ndo_start_xmit(skb, dev);
5152 }
5153
netdev_start_xmit(struct sk_buff * skb,struct net_device * dev,struct netdev_queue * txq,bool more)5154 static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
5155 struct netdev_queue *txq, bool more)
5156 {
5157 const struct net_device_ops *ops = dev->netdev_ops;
5158 netdev_tx_t rc;
5159
5160 rc = __netdev_start_xmit(ops, skb, dev, more);
5161 if (rc == NETDEV_TX_OK)
5162 txq_trans_update(txq);
5163
5164 return rc;
5165 }
5166
5167 int netdev_class_create_file_ns(const struct class_attribute *class_attr,
5168 const void *ns);
5169 void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
5170 const void *ns);
5171
5172 extern const struct kobj_ns_type_operations net_ns_type_operations;
5173
5174 const char *netdev_drivername(const struct net_device *dev);
5175
netdev_intersect_features(netdev_features_t f1,netdev_features_t f2)5176 static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
5177 netdev_features_t f2)
5178 {
5179 if ((f1 ^ f2) & NETIF_F_HW_CSUM) {
5180 if (f1 & NETIF_F_HW_CSUM)
5181 f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5182 else
5183 f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5184 }
5185
5186 return f1 & f2;
5187 }
5188
netdev_get_wanted_features(struct net_device * dev)5189 static inline netdev_features_t netdev_get_wanted_features(
5190 struct net_device *dev)
5191 {
5192 return (dev->features & ~dev->hw_features) | dev->wanted_features;
5193 }
5194 netdev_features_t netdev_increment_features(netdev_features_t all,
5195 netdev_features_t one, netdev_features_t mask);
5196
5197 /* Allow TSO being used on stacked device :
5198 * Performing the GSO segmentation before last device
5199 * is a performance improvement.
5200 */
netdev_add_tso_features(netdev_features_t features,netdev_features_t mask)5201 static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
5202 netdev_features_t mask)
5203 {
5204 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
5205 }
5206
5207 int __netdev_update_features(struct net_device *dev);
5208 void netdev_update_features(struct net_device *dev);
5209 void netdev_change_features(struct net_device *dev);
5210
5211 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5212 struct net_device *dev);
5213
5214 netdev_features_t passthru_features_check(struct sk_buff *skb,
5215 struct net_device *dev,
5216 netdev_features_t features);
5217 netdev_features_t netif_skb_features(struct sk_buff *skb);
5218 void skb_warn_bad_offload(const struct sk_buff *skb);
5219
net_gso_ok(netdev_features_t features,int gso_type)5220 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
5221 {
5222 netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
5223
5224 /* check flags correspondence */
5225 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
5226 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
5227 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
5228 BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
5229 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
5230 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
5231 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
5232 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
5233 BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT));
5234 BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT));
5235 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
5236 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
5237 BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
5238 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
5239 BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
5240 BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
5241 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
5242 BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT));
5243 BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT));
5244
5245 return (features & feature) == feature;
5246 }
5247
skb_gso_ok(struct sk_buff * skb,netdev_features_t features)5248 static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
5249 {
5250 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
5251 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
5252 }
5253
netif_needs_gso(struct sk_buff * skb,netdev_features_t features)5254 static inline bool netif_needs_gso(struct sk_buff *skb,
5255 netdev_features_t features)
5256 {
5257 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
5258 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
5259 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
5260 }
5261
5262 void netif_set_tso_max_size(struct net_device *dev, unsigned int size);
5263 void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs);
5264 void netif_inherit_tso_max(struct net_device *to,
5265 const struct net_device *from);
5266
5267 static inline unsigned int
netif_get_gro_max_size(const struct net_device * dev,const struct sk_buff * skb)5268 netif_get_gro_max_size(const struct net_device *dev, const struct sk_buff *skb)
5269 {
5270 /* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */
5271 return skb->protocol == htons(ETH_P_IPV6) ?
5272 READ_ONCE(dev->gro_max_size) :
5273 READ_ONCE(dev->gro_ipv4_max_size);
5274 }
5275
5276 static inline unsigned int
netif_get_gso_max_size(const struct net_device * dev,const struct sk_buff * skb)5277 netif_get_gso_max_size(const struct net_device *dev, const struct sk_buff *skb)
5278 {
5279 /* pairs with WRITE_ONCE() in netif_set_gso(_ipv4)_max_size() */
5280 return skb->protocol == htons(ETH_P_IPV6) ?
5281 READ_ONCE(dev->gso_max_size) :
5282 READ_ONCE(dev->gso_ipv4_max_size);
5283 }
5284
netif_is_macsec(const struct net_device * dev)5285 static inline bool netif_is_macsec(const struct net_device *dev)
5286 {
5287 return dev->priv_flags & IFF_MACSEC;
5288 }
5289
netif_is_macvlan(const struct net_device * dev)5290 static inline bool netif_is_macvlan(const struct net_device *dev)
5291 {
5292 return dev->priv_flags & IFF_MACVLAN;
5293 }
5294
netif_is_macvlan_port(const struct net_device * dev)5295 static inline bool netif_is_macvlan_port(const struct net_device *dev)
5296 {
5297 return dev->priv_flags & IFF_MACVLAN_PORT;
5298 }
5299
netif_is_bond_master(const struct net_device * dev)5300 static inline bool netif_is_bond_master(const struct net_device *dev)
5301 {
5302 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
5303 }
5304
netif_is_bond_slave(const struct net_device * dev)5305 static inline bool netif_is_bond_slave(const struct net_device *dev)
5306 {
5307 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
5308 }
5309
netif_supports_nofcs(struct net_device * dev)5310 static inline bool netif_supports_nofcs(struct net_device *dev)
5311 {
5312 return dev->priv_flags & IFF_SUPP_NOFCS;
5313 }
5314
netif_has_l3_rx_handler(const struct net_device * dev)5315 static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
5316 {
5317 return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
5318 }
5319
netif_is_l3_master(const struct net_device * dev)5320 static inline bool netif_is_l3_master(const struct net_device *dev)
5321 {
5322 return dev->priv_flags & IFF_L3MDEV_MASTER;
5323 }
5324
netif_is_l3_slave(const struct net_device * dev)5325 static inline bool netif_is_l3_slave(const struct net_device *dev)
5326 {
5327 return dev->priv_flags & IFF_L3MDEV_SLAVE;
5328 }
5329
dev_sdif(const struct net_device * dev)5330 static inline int dev_sdif(const struct net_device *dev)
5331 {
5332 #ifdef CONFIG_NET_L3_MASTER_DEV
5333 if (netif_is_l3_slave(dev))
5334 return dev->ifindex;
5335 #endif
5336 return 0;
5337 }
5338
netif_is_bridge_master(const struct net_device * dev)5339 static inline bool netif_is_bridge_master(const struct net_device *dev)
5340 {
5341 return dev->priv_flags & IFF_EBRIDGE;
5342 }
5343
netif_is_bridge_port(const struct net_device * dev)5344 static inline bool netif_is_bridge_port(const struct net_device *dev)
5345 {
5346 return dev->priv_flags & IFF_BRIDGE_PORT;
5347 }
5348
netif_is_ovs_master(const struct net_device * dev)5349 static inline bool netif_is_ovs_master(const struct net_device *dev)
5350 {
5351 return dev->priv_flags & IFF_OPENVSWITCH;
5352 }
5353
netif_is_ovs_port(const struct net_device * dev)5354 static inline bool netif_is_ovs_port(const struct net_device *dev)
5355 {
5356 return dev->priv_flags & IFF_OVS_DATAPATH;
5357 }
5358
netif_is_any_bridge_master(const struct net_device * dev)5359 static inline bool netif_is_any_bridge_master(const struct net_device *dev)
5360 {
5361 return netif_is_bridge_master(dev) || netif_is_ovs_master(dev);
5362 }
5363
netif_is_any_bridge_port(const struct net_device * dev)5364 static inline bool netif_is_any_bridge_port(const struct net_device *dev)
5365 {
5366 return netif_is_bridge_port(dev) || netif_is_ovs_port(dev);
5367 }
5368
netif_is_team_master(const struct net_device * dev)5369 static inline bool netif_is_team_master(const struct net_device *dev)
5370 {
5371 return dev->priv_flags & IFF_TEAM;
5372 }
5373
netif_is_team_port(const struct net_device * dev)5374 static inline bool netif_is_team_port(const struct net_device *dev)
5375 {
5376 return dev->priv_flags & IFF_TEAM_PORT;
5377 }
5378
netif_is_lag_master(const struct net_device * dev)5379 static inline bool netif_is_lag_master(const struct net_device *dev)
5380 {
5381 return netif_is_bond_master(dev) || netif_is_team_master(dev);
5382 }
5383
netif_is_lag_port(const struct net_device * dev)5384 static inline bool netif_is_lag_port(const struct net_device *dev)
5385 {
5386 return netif_is_bond_slave(dev) || netif_is_team_port(dev);
5387 }
5388
netif_is_rxfh_configured(const struct net_device * dev)5389 static inline bool netif_is_rxfh_configured(const struct net_device *dev)
5390 {
5391 return dev->priv_flags & IFF_RXFH_CONFIGURED;
5392 }
5393
netif_is_failover(const struct net_device * dev)5394 static inline bool netif_is_failover(const struct net_device *dev)
5395 {
5396 return dev->priv_flags & IFF_FAILOVER;
5397 }
5398
netif_is_failover_slave(const struct net_device * dev)5399 static inline bool netif_is_failover_slave(const struct net_device *dev)
5400 {
5401 return dev->priv_flags & IFF_FAILOVER_SLAVE;
5402 }
5403
5404 /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
netif_keep_dst(struct net_device * dev)5405 static inline void netif_keep_dst(struct net_device *dev)
5406 {
5407 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
5408 }
5409
5410 /* return true if dev can't cope with mtu frames that need vlan tag insertion */
netif_reduces_vlan_mtu(struct net_device * dev)5411 static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
5412 {
5413 /* TODO: reserve and use an additional IFF bit, if we get more users */
5414 return netif_is_macsec(dev);
5415 }
5416
5417 extern struct pernet_operations __net_initdata loopback_net_ops;
5418
5419 /* Logging, debugging and troubleshooting/diagnostic helpers. */
5420
5421 /* netdev_printk helpers, similar to dev_printk */
5422
netdev_name(const struct net_device * dev)5423 static inline const char *netdev_name(const struct net_device *dev)
5424 {
5425 if (!dev->name[0] || strchr(dev->name, '%'))
5426 return "(unnamed net_device)";
5427 return dev->name;
5428 }
5429
netdev_reg_state(const struct net_device * dev)5430 static inline const char *netdev_reg_state(const struct net_device *dev)
5431 {
5432 u8 reg_state = READ_ONCE(dev->reg_state);
5433
5434 switch (reg_state) {
5435 case NETREG_UNINITIALIZED: return " (uninitialized)";
5436 case NETREG_REGISTERED: return "";
5437 case NETREG_UNREGISTERING: return " (unregistering)";
5438 case NETREG_UNREGISTERED: return " (unregistered)";
5439 case NETREG_RELEASED: return " (released)";
5440 case NETREG_DUMMY: return " (dummy)";
5441 }
5442
5443 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, reg_state);
5444 return " (unknown)";
5445 }
5446
5447 #define MODULE_ALIAS_NETDEV(device) \
5448 MODULE_ALIAS("netdev-" device)
5449
5450 /*
5451 * netdev_WARN() acts like dev_printk(), but with the key difference
5452 * of using a WARN/WARN_ON to get the message out, including the
5453 * file/line information and a backtrace.
5454 */
5455 #define netdev_WARN(dev, format, args...) \
5456 WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \
5457 netdev_reg_state(dev), ##args)
5458
5459 #define netdev_WARN_ONCE(dev, format, args...) \
5460 WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \
5461 netdev_reg_state(dev), ##args)
5462
5463 /*
5464 * The list of packet types we will receive (as opposed to discard)
5465 * and the routines to invoke.
5466 *
5467 * Why 16. Because with 16 the only overlap we get on a hash of the
5468 * low nibble of the protocol value is RARP/SNAP/X.25.
5469 *
5470 * 0800 IP
5471 * 0001 802.3
5472 * 0002 AX.25
5473 * 0004 802.2
5474 * 8035 RARP
5475 * 0005 SNAP
5476 * 0805 X.25
5477 * 0806 ARP
5478 * 8137 IPX
5479 * 0009 Localtalk
5480 * 86DD IPv6
5481 */
5482 #define PTYPE_HASH_SIZE (16)
5483 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
5484
5485 extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
5486
5487 extern struct net_device *blackhole_netdev;
5488
5489 /* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */
5490 #define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD)
5491 #define DEV_STATS_ADD(DEV, FIELD, VAL) \
5492 atomic_long_add((VAL), &(DEV)->stats.__##FIELD)
5493 #define DEV_STATS_READ(DEV, FIELD) atomic_long_read(&(DEV)->stats.__##FIELD)
5494
5495 #endif /* _LINUX_NETDEVICE_H */
5496