xref: /linux/net/core/dev.c (revision 15a1fbdcfb519c2bd291ed01c6c94e0b89537a77)
1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   *      NET3    Protocol independent device support routines.
4   *
5   *	Derived from the non IP parts of dev.c 1.0.19
6   *              Authors:	Ross Biro
7   *				Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
8   *				Mark Evans, <evansmp@uhura.aston.ac.uk>
9   *
10   *	Additional Authors:
11   *		Florian la Roche <rzsfl@rz.uni-sb.de>
12   *		Alan Cox <gw4pts@gw4pts.ampr.org>
13   *		David Hinds <dahinds@users.sourceforge.net>
14   *		Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
15   *		Adam Sulmicki <adam@cfar.umd.edu>
16   *              Pekka Riikonen <priikone@poesidon.pspt.fi>
17   *
18   *	Changes:
19   *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
20   *                                      to 2 if register_netdev gets called
21   *                                      before net_dev_init & also removed a
22   *                                      few lines of code in the process.
23   *		Alan Cox	:	device private ioctl copies fields back.
24   *		Alan Cox	:	Transmit queue code does relevant
25   *					stunts to keep the queue safe.
26   *		Alan Cox	:	Fixed double lock.
27   *		Alan Cox	:	Fixed promisc NULL pointer trap
28   *		????????	:	Support the full private ioctl range
29   *		Alan Cox	:	Moved ioctl permission check into
30   *					drivers
31   *		Tim Kordas	:	SIOCADDMULTI/SIOCDELMULTI
32   *		Alan Cox	:	100 backlog just doesn't cut it when
33   *					you start doing multicast video 8)
34   *		Alan Cox	:	Rewrote net_bh and list manager.
35   *              Alan Cox        :       Fix ETH_P_ALL echoback lengths.
36   *		Alan Cox	:	Took out transmit every packet pass
37   *					Saved a few bytes in the ioctl handler
38   *		Alan Cox	:	Network driver sets packet type before
39   *					calling netif_rx. Saves a function
40   *					call a packet.
41   *		Alan Cox	:	Hashed net_bh()
42   *		Richard Kooijman:	Timestamp fixes.
43   *		Alan Cox	:	Wrong field in SIOCGIFDSTADDR
44   *		Alan Cox	:	Device lock protection.
45   *              Alan Cox        :       Fixed nasty side effect of device close
46   *					changes.
47   *		Rudi Cilibrasi	:	Pass the right thing to
48   *					set_mac_address()
49   *		Dave Miller	:	32bit quantity for the device lock to
50   *					make it work out on a Sparc.
51   *		Bjorn Ekwall	:	Added KERNELD hack.
52   *		Alan Cox	:	Cleaned up the backlog initialise.
53   *		Craig Metz	:	SIOCGIFCONF fix if space for under
54   *					1 device.
55   *	    Thomas Bogendoerfer :	Return ENODEV for dev_open, if there
56   *					is no device open function.
57   *		Andi Kleen	:	Fix error reporting for SIOCGIFCONF
58   *	    Michael Chastain	:	Fix signed/unsigned for SIOCGIFCONF
59   *		Cyrus Durgin	:	Cleaned for KMOD
60   *		Adam Sulmicki   :	Bug Fix : Network Device Unload
61   *					A network device unload needs to purge
62   *					the backlog queue.
63   *	Paul Rusty Russell	:	SIOCSIFNAME
64   *              Pekka Riikonen  :	Netdev boot-time settings code
65   *              Andrew Morton   :       Make unregister_netdevice wait
66   *                                      indefinitely on dev->refcnt
67   *              J Hadi Salim    :       - Backlog queue sampling
68   *				        - netif_rx() feedback
69   */
70  
71  #include <linux/uaccess.h>
72  #include <linux/bitops.h>
73  #include <linux/capability.h>
74  #include <linux/cpu.h>
75  #include <linux/types.h>
76  #include <linux/kernel.h>
77  #include <linux/hash.h>
78  #include <linux/slab.h>
79  #include <linux/sched.h>
80  #include <linux/sched/mm.h>
81  #include <linux/mutex.h>
82  #include <linux/string.h>
83  #include <linux/mm.h>
84  #include <linux/socket.h>
85  #include <linux/sockios.h>
86  #include <linux/errno.h>
87  #include <linux/interrupt.h>
88  #include <linux/if_ether.h>
89  #include <linux/netdevice.h>
90  #include <linux/etherdevice.h>
91  #include <linux/ethtool.h>
92  #include <linux/skbuff.h>
93  #include <linux/bpf.h>
94  #include <linux/bpf_trace.h>
95  #include <net/net_namespace.h>
96  #include <net/sock.h>
97  #include <net/busy_poll.h>
98  #include <linux/rtnetlink.h>
99  #include <linux/stat.h>
100  #include <net/dst.h>
101  #include <net/dst_metadata.h>
102  #include <net/pkt_sched.h>
103  #include <net/pkt_cls.h>
104  #include <net/checksum.h>
105  #include <net/xfrm.h>
106  #include <linux/highmem.h>
107  #include <linux/init.h>
108  #include <linux/module.h>
109  #include <linux/netpoll.h>
110  #include <linux/rcupdate.h>
111  #include <linux/delay.h>
112  #include <net/iw_handler.h>
113  #include <asm/current.h>
114  #include <linux/audit.h>
115  #include <linux/dmaengine.h>
116  #include <linux/err.h>
117  #include <linux/ctype.h>
118  #include <linux/if_arp.h>
119  #include <linux/if_vlan.h>
120  #include <linux/ip.h>
121  #include <net/ip.h>
122  #include <net/mpls.h>
123  #include <linux/ipv6.h>
124  #include <linux/in.h>
125  #include <linux/jhash.h>
126  #include <linux/random.h>
127  #include <trace/events/napi.h>
128  #include <trace/events/net.h>
129  #include <trace/events/skb.h>
130  #include <linux/inetdevice.h>
131  #include <linux/cpu_rmap.h>
132  #include <linux/static_key.h>
133  #include <linux/hashtable.h>
134  #include <linux/vmalloc.h>
135  #include <linux/if_macvlan.h>
136  #include <linux/errqueue.h>
137  #include <linux/hrtimer.h>
138  #include <linux/netfilter_ingress.h>
139  #include <linux/crash_dump.h>
140  #include <linux/sctp.h>
141  #include <net/udp_tunnel.h>
142  #include <linux/net_namespace.h>
143  #include <linux/indirect_call_wrapper.h>
144  #include <net/devlink.h>
145  
146  #include "net-sysfs.h"
147  
148  #define MAX_GRO_SKBS 8
149  #define MAX_NEST_DEV 8
150  
151  /* This should be increased if a protocol with a bigger head is added. */
152  #define GRO_MAX_HEAD (MAX_HEADER + 128)
153  
154  static DEFINE_SPINLOCK(ptype_lock);
155  static DEFINE_SPINLOCK(offload_lock);
156  struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
157  struct list_head ptype_all __read_mostly;	/* Taps */
158  static struct list_head offload_base __read_mostly;
159  
160  static int netif_rx_internal(struct sk_buff *skb);
161  static int call_netdevice_notifiers_info(unsigned long val,
162  					 struct netdev_notifier_info *info);
163  static int call_netdevice_notifiers_extack(unsigned long val,
164  					   struct net_device *dev,
165  					   struct netlink_ext_ack *extack);
166  static struct napi_struct *napi_by_id(unsigned int napi_id);
167  
168  /*
169   * The @dev_base_head list is protected by @dev_base_lock and the rtnl
170   * semaphore.
171   *
172   * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
173   *
174   * Writers must hold the rtnl semaphore while they loop through the
175   * dev_base_head list, and hold dev_base_lock for writing when they do the
176   * actual updates.  This allows pure readers to access the list even
177   * while a writer is preparing to update it.
178   *
179   * To put it another way, dev_base_lock is held for writing only to
180   * protect against pure readers; the rtnl semaphore provides the
181   * protection against other writers.
182   *
183   * See, for example usages, register_netdevice() and
184   * unregister_netdevice(), which must be called with the rtnl
185   * semaphore held.
186   */
187  DEFINE_RWLOCK(dev_base_lock);
188  EXPORT_SYMBOL(dev_base_lock);
189  
190  static DEFINE_MUTEX(ifalias_mutex);
191  
192  /* protects napi_hash addition/deletion and napi_gen_id */
193  static DEFINE_SPINLOCK(napi_hash_lock);
194  
195  static unsigned int napi_gen_id = NR_CPUS;
196  static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
197  
198  static seqcount_t devnet_rename_seq;
199  
200  static inline void dev_base_seq_inc(struct net *net)
201  {
202  	while (++net->dev_base_seq == 0)
203  		;
204  }
205  
206  static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
207  {
208  	unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
209  
210  	return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
211  }
212  
213  static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
214  {
215  	return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
216  }
217  
218  static inline void rps_lock(struct softnet_data *sd)
219  {
220  #ifdef CONFIG_RPS
221  	spin_lock(&sd->input_pkt_queue.lock);
222  #endif
223  }
224  
225  static inline void rps_unlock(struct softnet_data *sd)
226  {
227  #ifdef CONFIG_RPS
228  	spin_unlock(&sd->input_pkt_queue.lock);
229  #endif
230  }
231  
232  static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev,
233  						       const char *name)
234  {
235  	struct netdev_name_node *name_node;
236  
237  	name_node = kmalloc(sizeof(*name_node), GFP_KERNEL);
238  	if (!name_node)
239  		return NULL;
240  	INIT_HLIST_NODE(&name_node->hlist);
241  	name_node->dev = dev;
242  	name_node->name = name;
243  	return name_node;
244  }
245  
246  static struct netdev_name_node *
247  netdev_name_node_head_alloc(struct net_device *dev)
248  {
249  	struct netdev_name_node *name_node;
250  
251  	name_node = netdev_name_node_alloc(dev, dev->name);
252  	if (!name_node)
253  		return NULL;
254  	INIT_LIST_HEAD(&name_node->list);
255  	return name_node;
256  }
257  
258  static void netdev_name_node_free(struct netdev_name_node *name_node)
259  {
260  	kfree(name_node);
261  }
262  
263  static void netdev_name_node_add(struct net *net,
264  				 struct netdev_name_node *name_node)
265  {
266  	hlist_add_head_rcu(&name_node->hlist,
267  			   dev_name_hash(net, name_node->name));
268  }
269  
270  static void netdev_name_node_del(struct netdev_name_node *name_node)
271  {
272  	hlist_del_rcu(&name_node->hlist);
273  }
274  
275  static struct netdev_name_node *netdev_name_node_lookup(struct net *net,
276  							const char *name)
277  {
278  	struct hlist_head *head = dev_name_hash(net, name);
279  	struct netdev_name_node *name_node;
280  
281  	hlist_for_each_entry(name_node, head, hlist)
282  		if (!strcmp(name_node->name, name))
283  			return name_node;
284  	return NULL;
285  }
286  
287  static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net,
288  							    const char *name)
289  {
290  	struct hlist_head *head = dev_name_hash(net, name);
291  	struct netdev_name_node *name_node;
292  
293  	hlist_for_each_entry_rcu(name_node, head, hlist)
294  		if (!strcmp(name_node->name, name))
295  			return name_node;
296  	return NULL;
297  }
298  
299  int netdev_name_node_alt_create(struct net_device *dev, const char *name)
300  {
301  	struct netdev_name_node *name_node;
302  	struct net *net = dev_net(dev);
303  
304  	name_node = netdev_name_node_lookup(net, name);
305  	if (name_node)
306  		return -EEXIST;
307  	name_node = netdev_name_node_alloc(dev, name);
308  	if (!name_node)
309  		return -ENOMEM;
310  	netdev_name_node_add(net, name_node);
311  	/* The node that holds dev->name acts as a head of per-device list. */
312  	list_add_tail(&name_node->list, &dev->name_node->list);
313  
314  	return 0;
315  }
316  EXPORT_SYMBOL(netdev_name_node_alt_create);
317  
318  static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
319  {
320  	list_del(&name_node->list);
321  	netdev_name_node_del(name_node);
322  	kfree(name_node->name);
323  	netdev_name_node_free(name_node);
324  }
325  
326  int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
327  {
328  	struct netdev_name_node *name_node;
329  	struct net *net = dev_net(dev);
330  
331  	name_node = netdev_name_node_lookup(net, name);
332  	if (!name_node)
333  		return -ENOENT;
334  	__netdev_name_node_alt_destroy(name_node);
335  
336  	return 0;
337  }
338  EXPORT_SYMBOL(netdev_name_node_alt_destroy);
339  
340  static void netdev_name_node_alt_flush(struct net_device *dev)
341  {
342  	struct netdev_name_node *name_node, *tmp;
343  
344  	list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list)
345  		__netdev_name_node_alt_destroy(name_node);
346  }
347  
348  /* Device list insertion */
349  static void list_netdevice(struct net_device *dev)
350  {
351  	struct net *net = dev_net(dev);
352  
353  	ASSERT_RTNL();
354  
355  	write_lock_bh(&dev_base_lock);
356  	list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
357  	netdev_name_node_add(net, dev->name_node);
358  	hlist_add_head_rcu(&dev->index_hlist,
359  			   dev_index_hash(net, dev->ifindex));
360  	write_unlock_bh(&dev_base_lock);
361  
362  	dev_base_seq_inc(net);
363  }
364  
365  /* Device list removal
366   * caller must respect a RCU grace period before freeing/reusing dev
367   */
368  static void unlist_netdevice(struct net_device *dev)
369  {
370  	ASSERT_RTNL();
371  
372  	/* Unlink dev from the device chain */
373  	write_lock_bh(&dev_base_lock);
374  	list_del_rcu(&dev->dev_list);
375  	netdev_name_node_del(dev->name_node);
376  	hlist_del_rcu(&dev->index_hlist);
377  	write_unlock_bh(&dev_base_lock);
378  
379  	dev_base_seq_inc(dev_net(dev));
380  }
381  
382  /*
383   *	Our notifier list
384   */
385  
386  static RAW_NOTIFIER_HEAD(netdev_chain);
387  
388  /*
389   *	Device drivers call our routines to queue packets here. We empty the
390   *	queue in the local softnet handler.
391   */
392  
393  DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
394  EXPORT_PER_CPU_SYMBOL(softnet_data);
395  
396  /*******************************************************************************
397   *
398   *		Protocol management and registration routines
399   *
400   *******************************************************************************/
401  
402  
403  /*
404   *	Add a protocol ID to the list. Now that the input handler is
405   *	smarter we can dispense with all the messy stuff that used to be
406   *	here.
407   *
408   *	BEWARE!!! Protocol handlers, mangling input packets,
409   *	MUST BE last in hash buckets and checking protocol handlers
410   *	MUST start from promiscuous ptype_all chain in net_bh.
411   *	It is true now, do not change it.
412   *	Explanation follows: if protocol handler, mangling packet, will
413   *	be the first on list, it is not able to sense, that packet
414   *	is cloned and should be copied-on-write, so that it will
415   *	change it and subsequent readers will get broken packet.
416   *							--ANK (980803)
417   */
418  
419  static inline struct list_head *ptype_head(const struct packet_type *pt)
420  {
421  	if (pt->type == htons(ETH_P_ALL))
422  		return pt->dev ? &pt->dev->ptype_all : &ptype_all;
423  	else
424  		return pt->dev ? &pt->dev->ptype_specific :
425  				 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
426  }
427  
428  /**
429   *	dev_add_pack - add packet handler
430   *	@pt: packet type declaration
431   *
432   *	Add a protocol handler to the networking stack. The passed &packet_type
433   *	is linked into kernel lists and may not be freed until it has been
434   *	removed from the kernel lists.
435   *
436   *	This call does not sleep therefore it can not
437   *	guarantee all CPU's that are in middle of receiving packets
438   *	will see the new packet type (until the next received packet).
439   */
440  
441  void dev_add_pack(struct packet_type *pt)
442  {
443  	struct list_head *head = ptype_head(pt);
444  
445  	spin_lock(&ptype_lock);
446  	list_add_rcu(&pt->list, head);
447  	spin_unlock(&ptype_lock);
448  }
449  EXPORT_SYMBOL(dev_add_pack);
450  
451  /**
452   *	__dev_remove_pack	 - remove packet handler
453   *	@pt: packet type declaration
454   *
455   *	Remove a protocol handler that was previously added to the kernel
456   *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
457   *	from the kernel lists and can be freed or reused once this function
458   *	returns.
459   *
460   *      The packet type might still be in use by receivers
461   *	and must not be freed until after all the CPU's have gone
462   *	through a quiescent state.
463   */
464  void __dev_remove_pack(struct packet_type *pt)
465  {
466  	struct list_head *head = ptype_head(pt);
467  	struct packet_type *pt1;
468  
469  	spin_lock(&ptype_lock);
470  
471  	list_for_each_entry(pt1, head, list) {
472  		if (pt == pt1) {
473  			list_del_rcu(&pt->list);
474  			goto out;
475  		}
476  	}
477  
478  	pr_warn("dev_remove_pack: %p not found\n", pt);
479  out:
480  	spin_unlock(&ptype_lock);
481  }
482  EXPORT_SYMBOL(__dev_remove_pack);
483  
484  /**
485   *	dev_remove_pack	 - remove packet handler
486   *	@pt: packet type declaration
487   *
488   *	Remove a protocol handler that was previously added to the kernel
489   *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
490   *	from the kernel lists and can be freed or reused once this function
491   *	returns.
492   *
493   *	This call sleeps to guarantee that no CPU is looking at the packet
494   *	type after return.
495   */
496  void dev_remove_pack(struct packet_type *pt)
497  {
498  	__dev_remove_pack(pt);
499  
500  	synchronize_net();
501  }
502  EXPORT_SYMBOL(dev_remove_pack);
503  
504  
505  /**
506   *	dev_add_offload - register offload handlers
507   *	@po: protocol offload declaration
508   *
509   *	Add protocol offload handlers to the networking stack. The passed
510   *	&proto_offload is linked into kernel lists and may not be freed until
511   *	it has been removed from the kernel lists.
512   *
513   *	This call does not sleep therefore it can not
514   *	guarantee all CPU's that are in middle of receiving packets
515   *	will see the new offload handlers (until the next received packet).
516   */
517  void dev_add_offload(struct packet_offload *po)
518  {
519  	struct packet_offload *elem;
520  
521  	spin_lock(&offload_lock);
522  	list_for_each_entry(elem, &offload_base, list) {
523  		if (po->priority < elem->priority)
524  			break;
525  	}
526  	list_add_rcu(&po->list, elem->list.prev);
527  	spin_unlock(&offload_lock);
528  }
529  EXPORT_SYMBOL(dev_add_offload);
530  
531  /**
532   *	__dev_remove_offload	 - remove offload handler
533   *	@po: packet offload declaration
534   *
535   *	Remove a protocol offload handler that was previously added to the
536   *	kernel offload handlers by dev_add_offload(). The passed &offload_type
537   *	is removed from the kernel lists and can be freed or reused once this
538   *	function returns.
539   *
540   *      The packet type might still be in use by receivers
541   *	and must not be freed until after all the CPU's have gone
542   *	through a quiescent state.
543   */
544  static void __dev_remove_offload(struct packet_offload *po)
545  {
546  	struct list_head *head = &offload_base;
547  	struct packet_offload *po1;
548  
549  	spin_lock(&offload_lock);
550  
551  	list_for_each_entry(po1, head, list) {
552  		if (po == po1) {
553  			list_del_rcu(&po->list);
554  			goto out;
555  		}
556  	}
557  
558  	pr_warn("dev_remove_offload: %p not found\n", po);
559  out:
560  	spin_unlock(&offload_lock);
561  }
562  
563  /**
564   *	dev_remove_offload	 - remove packet offload handler
565   *	@po: packet offload declaration
566   *
567   *	Remove a packet offload handler that was previously added to the kernel
568   *	offload handlers by dev_add_offload(). The passed &offload_type is
569   *	removed from the kernel lists and can be freed or reused once this
570   *	function returns.
571   *
572   *	This call sleeps to guarantee that no CPU is looking at the packet
573   *	type after return.
574   */
575  void dev_remove_offload(struct packet_offload *po)
576  {
577  	__dev_remove_offload(po);
578  
579  	synchronize_net();
580  }
581  EXPORT_SYMBOL(dev_remove_offload);
582  
583  /******************************************************************************
584   *
585   *		      Device Boot-time Settings Routines
586   *
587   ******************************************************************************/
588  
589  /* Boot time configuration table */
590  static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
591  
592  /**
593   *	netdev_boot_setup_add	- add new setup entry
594   *	@name: name of the device
595   *	@map: configured settings for the device
596   *
597   *	Adds new setup entry to the dev_boot_setup list.  The function
598   *	returns 0 on error and 1 on success.  This is a generic routine to
599   *	all netdevices.
600   */
601  static int netdev_boot_setup_add(char *name, struct ifmap *map)
602  {
603  	struct netdev_boot_setup *s;
604  	int i;
605  
606  	s = dev_boot_setup;
607  	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
608  		if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
609  			memset(s[i].name, 0, sizeof(s[i].name));
610  			strlcpy(s[i].name, name, IFNAMSIZ);
611  			memcpy(&s[i].map, map, sizeof(s[i].map));
612  			break;
613  		}
614  	}
615  
616  	return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
617  }
618  
619  /**
620   * netdev_boot_setup_check	- check boot time settings
621   * @dev: the netdevice
622   *
623   * Check boot time settings for the device.
624   * The found settings are set for the device to be used
625   * later in the device probing.
626   * Returns 0 if no settings found, 1 if they are.
627   */
628  int netdev_boot_setup_check(struct net_device *dev)
629  {
630  	struct netdev_boot_setup *s = dev_boot_setup;
631  	int i;
632  
633  	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
634  		if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
635  		    !strcmp(dev->name, s[i].name)) {
636  			dev->irq = s[i].map.irq;
637  			dev->base_addr = s[i].map.base_addr;
638  			dev->mem_start = s[i].map.mem_start;
639  			dev->mem_end = s[i].map.mem_end;
640  			return 1;
641  		}
642  	}
643  	return 0;
644  }
645  EXPORT_SYMBOL(netdev_boot_setup_check);
646  
647  
648  /**
649   * netdev_boot_base	- get address from boot time settings
650   * @prefix: prefix for network device
651   * @unit: id for network device
652   *
653   * Check boot time settings for the base address of device.
654   * The found settings are set for the device to be used
655   * later in the device probing.
656   * Returns 0 if no settings found.
657   */
658  unsigned long netdev_boot_base(const char *prefix, int unit)
659  {
660  	const struct netdev_boot_setup *s = dev_boot_setup;
661  	char name[IFNAMSIZ];
662  	int i;
663  
664  	sprintf(name, "%s%d", prefix, unit);
665  
666  	/*
667  	 * If device already registered then return base of 1
668  	 * to indicate not to probe for this interface
669  	 */
670  	if (__dev_get_by_name(&init_net, name))
671  		return 1;
672  
673  	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
674  		if (!strcmp(name, s[i].name))
675  			return s[i].map.base_addr;
676  	return 0;
677  }
678  
679  /*
680   * Saves at boot time configured settings for any netdevice.
681   */
682  int __init netdev_boot_setup(char *str)
683  {
684  	int ints[5];
685  	struct ifmap map;
686  
687  	str = get_options(str, ARRAY_SIZE(ints), ints);
688  	if (!str || !*str)
689  		return 0;
690  
691  	/* Save settings */
692  	memset(&map, 0, sizeof(map));
693  	if (ints[0] > 0)
694  		map.irq = ints[1];
695  	if (ints[0] > 1)
696  		map.base_addr = ints[2];
697  	if (ints[0] > 2)
698  		map.mem_start = ints[3];
699  	if (ints[0] > 3)
700  		map.mem_end = ints[4];
701  
702  	/* Add new entry to the list */
703  	return netdev_boot_setup_add(str, &map);
704  }
705  
706  __setup("netdev=", netdev_boot_setup);
707  
708  /*******************************************************************************
709   *
710   *			    Device Interface Subroutines
711   *
712   *******************************************************************************/
713  
714  /**
715   *	dev_get_iflink	- get 'iflink' value of a interface
716   *	@dev: targeted interface
717   *
718   *	Indicates the ifindex the interface is linked to.
719   *	Physical interfaces have the same 'ifindex' and 'iflink' values.
720   */
721  
722  int dev_get_iflink(const struct net_device *dev)
723  {
724  	if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
725  		return dev->netdev_ops->ndo_get_iflink(dev);
726  
727  	return dev->ifindex;
728  }
729  EXPORT_SYMBOL(dev_get_iflink);
730  
731  /**
732   *	dev_fill_metadata_dst - Retrieve tunnel egress information.
733   *	@dev: targeted interface
734   *	@skb: The packet.
735   *
736   *	For better visibility of tunnel traffic OVS needs to retrieve
737   *	egress tunnel information for a packet. Following API allows
738   *	user to get this info.
739   */
740  int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
741  {
742  	struct ip_tunnel_info *info;
743  
744  	if (!dev->netdev_ops  || !dev->netdev_ops->ndo_fill_metadata_dst)
745  		return -EINVAL;
746  
747  	info = skb_tunnel_info_unclone(skb);
748  	if (!info)
749  		return -ENOMEM;
750  	if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
751  		return -EINVAL;
752  
753  	return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
754  }
755  EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
756  
757  /**
758   *	__dev_get_by_name	- find a device by its name
759   *	@net: the applicable net namespace
760   *	@name: name to find
761   *
762   *	Find an interface by name. Must be called under RTNL semaphore
763   *	or @dev_base_lock. If the name is found a pointer to the device
764   *	is returned. If the name is not found then %NULL is returned. The
765   *	reference counters are not incremented so the caller must be
766   *	careful with locks.
767   */
768  
769  struct net_device *__dev_get_by_name(struct net *net, const char *name)
770  {
771  	struct netdev_name_node *node_name;
772  
773  	node_name = netdev_name_node_lookup(net, name);
774  	return node_name ? node_name->dev : NULL;
775  }
776  EXPORT_SYMBOL(__dev_get_by_name);
777  
778  /**
779   * dev_get_by_name_rcu	- find a device by its name
780   * @net: the applicable net namespace
781   * @name: name to find
782   *
783   * Find an interface by name.
784   * If the name is found a pointer to the device is returned.
785   * If the name is not found then %NULL is returned.
786   * The reference counters are not incremented so the caller must be
787   * careful with locks. The caller must hold RCU lock.
788   */
789  
790  struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
791  {
792  	struct netdev_name_node *node_name;
793  
794  	node_name = netdev_name_node_lookup_rcu(net, name);
795  	return node_name ? node_name->dev : NULL;
796  }
797  EXPORT_SYMBOL(dev_get_by_name_rcu);
798  
799  /**
800   *	dev_get_by_name		- find a device by its name
801   *	@net: the applicable net namespace
802   *	@name: name to find
803   *
804   *	Find an interface by name. This can be called from any
805   *	context and does its own locking. The returned handle has
806   *	the usage count incremented and the caller must use dev_put() to
807   *	release it when it is no longer needed. %NULL is returned if no
808   *	matching device is found.
809   */
810  
811  struct net_device *dev_get_by_name(struct net *net, const char *name)
812  {
813  	struct net_device *dev;
814  
815  	rcu_read_lock();
816  	dev = dev_get_by_name_rcu(net, name);
817  	if (dev)
818  		dev_hold(dev);
819  	rcu_read_unlock();
820  	return dev;
821  }
822  EXPORT_SYMBOL(dev_get_by_name);
823  
824  /**
825   *	__dev_get_by_index - find a device by its ifindex
826   *	@net: the applicable net namespace
827   *	@ifindex: index of device
828   *
829   *	Search for an interface by index. Returns %NULL if the device
830   *	is not found or a pointer to the device. The device has not
831   *	had its reference counter increased so the caller must be careful
832   *	about locking. The caller must hold either the RTNL semaphore
833   *	or @dev_base_lock.
834   */
835  
836  struct net_device *__dev_get_by_index(struct net *net, int ifindex)
837  {
838  	struct net_device *dev;
839  	struct hlist_head *head = dev_index_hash(net, ifindex);
840  
841  	hlist_for_each_entry(dev, head, index_hlist)
842  		if (dev->ifindex == ifindex)
843  			return dev;
844  
845  	return NULL;
846  }
847  EXPORT_SYMBOL(__dev_get_by_index);
848  
849  /**
850   *	dev_get_by_index_rcu - find a device by its ifindex
851   *	@net: the applicable net namespace
852   *	@ifindex: index of device
853   *
854   *	Search for an interface by index. Returns %NULL if the device
855   *	is not found or a pointer to the device. The device has not
856   *	had its reference counter increased so the caller must be careful
857   *	about locking. The caller must hold RCU lock.
858   */
859  
860  struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
861  {
862  	struct net_device *dev;
863  	struct hlist_head *head = dev_index_hash(net, ifindex);
864  
865  	hlist_for_each_entry_rcu(dev, head, index_hlist)
866  		if (dev->ifindex == ifindex)
867  			return dev;
868  
869  	return NULL;
870  }
871  EXPORT_SYMBOL(dev_get_by_index_rcu);
872  
873  
874  /**
875   *	dev_get_by_index - find a device by its ifindex
876   *	@net: the applicable net namespace
877   *	@ifindex: index of device
878   *
879   *	Search for an interface by index. Returns NULL if the device
880   *	is not found or a pointer to the device. The device returned has
881   *	had a reference added and the pointer is safe until the user calls
882   *	dev_put to indicate they have finished with it.
883   */
884  
885  struct net_device *dev_get_by_index(struct net *net, int ifindex)
886  {
887  	struct net_device *dev;
888  
889  	rcu_read_lock();
890  	dev = dev_get_by_index_rcu(net, ifindex);
891  	if (dev)
892  		dev_hold(dev);
893  	rcu_read_unlock();
894  	return dev;
895  }
896  EXPORT_SYMBOL(dev_get_by_index);
897  
898  /**
899   *	dev_get_by_napi_id - find a device by napi_id
900   *	@napi_id: ID of the NAPI struct
901   *
902   *	Search for an interface by NAPI ID. Returns %NULL if the device
903   *	is not found or a pointer to the device. The device has not had
904   *	its reference counter increased so the caller must be careful
905   *	about locking. The caller must hold RCU lock.
906   */
907  
908  struct net_device *dev_get_by_napi_id(unsigned int napi_id)
909  {
910  	struct napi_struct *napi;
911  
912  	WARN_ON_ONCE(!rcu_read_lock_held());
913  
914  	if (napi_id < MIN_NAPI_ID)
915  		return NULL;
916  
917  	napi = napi_by_id(napi_id);
918  
919  	return napi ? napi->dev : NULL;
920  }
921  EXPORT_SYMBOL(dev_get_by_napi_id);
922  
923  /**
924   *	netdev_get_name - get a netdevice name, knowing its ifindex.
925   *	@net: network namespace
926   *	@name: a pointer to the buffer where the name will be stored.
927   *	@ifindex: the ifindex of the interface to get the name from.
928   *
929   *	The use of raw_seqcount_begin() and cond_resched() before
930   *	retrying is required as we want to give the writers a chance
931   *	to complete when CONFIG_PREEMPTION is not set.
932   */
933  int netdev_get_name(struct net *net, char *name, int ifindex)
934  {
935  	struct net_device *dev;
936  	unsigned int seq;
937  
938  retry:
939  	seq = raw_seqcount_begin(&devnet_rename_seq);
940  	rcu_read_lock();
941  	dev = dev_get_by_index_rcu(net, ifindex);
942  	if (!dev) {
943  		rcu_read_unlock();
944  		return -ENODEV;
945  	}
946  
947  	strcpy(name, dev->name);
948  	rcu_read_unlock();
949  	if (read_seqcount_retry(&devnet_rename_seq, seq)) {
950  		cond_resched();
951  		goto retry;
952  	}
953  
954  	return 0;
955  }
956  
957  /**
958   *	dev_getbyhwaddr_rcu - find a device by its hardware address
959   *	@net: the applicable net namespace
960   *	@type: media type of device
961   *	@ha: hardware address
962   *
963   *	Search for an interface by MAC address. Returns NULL if the device
964   *	is not found or a pointer to the device.
965   *	The caller must hold RCU or RTNL.
966   *	The returned device has not had its ref count increased
967   *	and the caller must therefore be careful about locking
968   *
969   */
970  
971  struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
972  				       const char *ha)
973  {
974  	struct net_device *dev;
975  
976  	for_each_netdev_rcu(net, dev)
977  		if (dev->type == type &&
978  		    !memcmp(dev->dev_addr, ha, dev->addr_len))
979  			return dev;
980  
981  	return NULL;
982  }
983  EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
984  
985  struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
986  {
987  	struct net_device *dev;
988  
989  	ASSERT_RTNL();
990  	for_each_netdev(net, dev)
991  		if (dev->type == type)
992  			return dev;
993  
994  	return NULL;
995  }
996  EXPORT_SYMBOL(__dev_getfirstbyhwtype);
997  
998  struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
999  {
1000  	struct net_device *dev, *ret = NULL;
1001  
1002  	rcu_read_lock();
1003  	for_each_netdev_rcu(net, dev)
1004  		if (dev->type == type) {
1005  			dev_hold(dev);
1006  			ret = dev;
1007  			break;
1008  		}
1009  	rcu_read_unlock();
1010  	return ret;
1011  }
1012  EXPORT_SYMBOL(dev_getfirstbyhwtype);
1013  
1014  /**
1015   *	__dev_get_by_flags - find any device with given flags
1016   *	@net: the applicable net namespace
1017   *	@if_flags: IFF_* values
1018   *	@mask: bitmask of bits in if_flags to check
1019   *
1020   *	Search for any interface with the given flags. Returns NULL if a device
1021   *	is not found or a pointer to the device. Must be called inside
1022   *	rtnl_lock(), and result refcount is unchanged.
1023   */
1024  
1025  struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
1026  				      unsigned short mask)
1027  {
1028  	struct net_device *dev, *ret;
1029  
1030  	ASSERT_RTNL();
1031  
1032  	ret = NULL;
1033  	for_each_netdev(net, dev) {
1034  		if (((dev->flags ^ if_flags) & mask) == 0) {
1035  			ret = dev;
1036  			break;
1037  		}
1038  	}
1039  	return ret;
1040  }
1041  EXPORT_SYMBOL(__dev_get_by_flags);
1042  
1043  /**
1044   *	dev_valid_name - check if name is okay for network device
1045   *	@name: name string
1046   *
1047   *	Network device names need to be valid file names to
1048   *	to allow sysfs to work.  We also disallow any kind of
1049   *	whitespace.
1050   */
1051  bool dev_valid_name(const char *name)
1052  {
1053  	if (*name == '\0')
1054  		return false;
1055  	if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
1056  		return false;
1057  	if (!strcmp(name, ".") || !strcmp(name, ".."))
1058  		return false;
1059  
1060  	while (*name) {
1061  		if (*name == '/' || *name == ':' || isspace(*name))
1062  			return false;
1063  		name++;
1064  	}
1065  	return true;
1066  }
1067  EXPORT_SYMBOL(dev_valid_name);
1068  
1069  /**
1070   *	__dev_alloc_name - allocate a name for a device
1071   *	@net: network namespace to allocate the device name in
1072   *	@name: name format string
1073   *	@buf:  scratch buffer and result name string
1074   *
1075   *	Passed a format string - eg "lt%d" it will try and find a suitable
1076   *	id. It scans list of devices to build up a free map, then chooses
1077   *	the first empty slot. The caller must hold the dev_base or rtnl lock
1078   *	while allocating the name and adding the device in order to avoid
1079   *	duplicates.
1080   *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1081   *	Returns the number of the unit assigned or a negative errno code.
1082   */
1083  
1084  static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1085  {
1086  	int i = 0;
1087  	const char *p;
1088  	const int max_netdevices = 8*PAGE_SIZE;
1089  	unsigned long *inuse;
1090  	struct net_device *d;
1091  
1092  	if (!dev_valid_name(name))
1093  		return -EINVAL;
1094  
1095  	p = strchr(name, '%');
1096  	if (p) {
1097  		/*
1098  		 * Verify the string as this thing may have come from
1099  		 * the user.  There must be either one "%d" and no other "%"
1100  		 * characters.
1101  		 */
1102  		if (p[1] != 'd' || strchr(p + 2, '%'))
1103  			return -EINVAL;
1104  
1105  		/* Use one page as a bit array of possible slots */
1106  		inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1107  		if (!inuse)
1108  			return -ENOMEM;
1109  
1110  		for_each_netdev(net, d) {
1111  			if (!sscanf(d->name, name, &i))
1112  				continue;
1113  			if (i < 0 || i >= max_netdevices)
1114  				continue;
1115  
1116  			/*  avoid cases where sscanf is not exact inverse of printf */
1117  			snprintf(buf, IFNAMSIZ, name, i);
1118  			if (!strncmp(buf, d->name, IFNAMSIZ))
1119  				set_bit(i, inuse);
1120  		}
1121  
1122  		i = find_first_zero_bit(inuse, max_netdevices);
1123  		free_page((unsigned long) inuse);
1124  	}
1125  
1126  	snprintf(buf, IFNAMSIZ, name, i);
1127  	if (!__dev_get_by_name(net, buf))
1128  		return i;
1129  
1130  	/* It is possible to run out of possible slots
1131  	 * when the name is long and there isn't enough space left
1132  	 * for the digits, or if all bits are used.
1133  	 */
1134  	return -ENFILE;
1135  }
1136  
1137  static int dev_alloc_name_ns(struct net *net,
1138  			     struct net_device *dev,
1139  			     const char *name)
1140  {
1141  	char buf[IFNAMSIZ];
1142  	int ret;
1143  
1144  	BUG_ON(!net);
1145  	ret = __dev_alloc_name(net, name, buf);
1146  	if (ret >= 0)
1147  		strlcpy(dev->name, buf, IFNAMSIZ);
1148  	return ret;
1149  }
1150  
1151  /**
1152   *	dev_alloc_name - allocate a name for a device
1153   *	@dev: device
1154   *	@name: name format string
1155   *
1156   *	Passed a format string - eg "lt%d" it will try and find a suitable
1157   *	id. It scans list of devices to build up a free map, then chooses
1158   *	the first empty slot. The caller must hold the dev_base or rtnl lock
1159   *	while allocating the name and adding the device in order to avoid
1160   *	duplicates.
1161   *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1162   *	Returns the number of the unit assigned or a negative errno code.
1163   */
1164  
1165  int dev_alloc_name(struct net_device *dev, const char *name)
1166  {
1167  	return dev_alloc_name_ns(dev_net(dev), dev, name);
1168  }
1169  EXPORT_SYMBOL(dev_alloc_name);
1170  
1171  static int dev_get_valid_name(struct net *net, struct net_device *dev,
1172  			      const char *name)
1173  {
1174  	BUG_ON(!net);
1175  
1176  	if (!dev_valid_name(name))
1177  		return -EINVAL;
1178  
1179  	if (strchr(name, '%'))
1180  		return dev_alloc_name_ns(net, dev, name);
1181  	else if (__dev_get_by_name(net, name))
1182  		return -EEXIST;
1183  	else if (dev->name != name)
1184  		strlcpy(dev->name, name, IFNAMSIZ);
1185  
1186  	return 0;
1187  }
1188  
1189  /**
1190   *	dev_change_name - change name of a device
1191   *	@dev: device
1192   *	@newname: name (or format string) must be at least IFNAMSIZ
1193   *
1194   *	Change name of a device, can pass format strings "eth%d".
1195   *	for wildcarding.
1196   */
1197  int dev_change_name(struct net_device *dev, const char *newname)
1198  {
1199  	unsigned char old_assign_type;
1200  	char oldname[IFNAMSIZ];
1201  	int err = 0;
1202  	int ret;
1203  	struct net *net;
1204  
1205  	ASSERT_RTNL();
1206  	BUG_ON(!dev_net(dev));
1207  
1208  	net = dev_net(dev);
1209  
1210  	/* Some auto-enslaved devices e.g. failover slaves are
1211  	 * special, as userspace might rename the device after
1212  	 * the interface had been brought up and running since
1213  	 * the point kernel initiated auto-enslavement. Allow
1214  	 * live name change even when these slave devices are
1215  	 * up and running.
1216  	 *
1217  	 * Typically, users of these auto-enslaving devices
1218  	 * don't actually care about slave name change, as
1219  	 * they are supposed to operate on master interface
1220  	 * directly.
1221  	 */
1222  	if (dev->flags & IFF_UP &&
1223  	    likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK)))
1224  		return -EBUSY;
1225  
1226  	write_seqcount_begin(&devnet_rename_seq);
1227  
1228  	if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1229  		write_seqcount_end(&devnet_rename_seq);
1230  		return 0;
1231  	}
1232  
1233  	memcpy(oldname, dev->name, IFNAMSIZ);
1234  
1235  	err = dev_get_valid_name(net, dev, newname);
1236  	if (err < 0) {
1237  		write_seqcount_end(&devnet_rename_seq);
1238  		return err;
1239  	}
1240  
1241  	if (oldname[0] && !strchr(oldname, '%'))
1242  		netdev_info(dev, "renamed from %s\n", oldname);
1243  
1244  	old_assign_type = dev->name_assign_type;
1245  	dev->name_assign_type = NET_NAME_RENAMED;
1246  
1247  rollback:
1248  	ret = device_rename(&dev->dev, dev->name);
1249  	if (ret) {
1250  		memcpy(dev->name, oldname, IFNAMSIZ);
1251  		dev->name_assign_type = old_assign_type;
1252  		write_seqcount_end(&devnet_rename_seq);
1253  		return ret;
1254  	}
1255  
1256  	write_seqcount_end(&devnet_rename_seq);
1257  
1258  	netdev_adjacent_rename_links(dev, oldname);
1259  
1260  	write_lock_bh(&dev_base_lock);
1261  	netdev_name_node_del(dev->name_node);
1262  	write_unlock_bh(&dev_base_lock);
1263  
1264  	synchronize_rcu();
1265  
1266  	write_lock_bh(&dev_base_lock);
1267  	netdev_name_node_add(net, dev->name_node);
1268  	write_unlock_bh(&dev_base_lock);
1269  
1270  	ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1271  	ret = notifier_to_errno(ret);
1272  
1273  	if (ret) {
1274  		/* err >= 0 after dev_alloc_name() or stores the first errno */
1275  		if (err >= 0) {
1276  			err = ret;
1277  			write_seqcount_begin(&devnet_rename_seq);
1278  			memcpy(dev->name, oldname, IFNAMSIZ);
1279  			memcpy(oldname, newname, IFNAMSIZ);
1280  			dev->name_assign_type = old_assign_type;
1281  			old_assign_type = NET_NAME_RENAMED;
1282  			goto rollback;
1283  		} else {
1284  			pr_err("%s: name change rollback failed: %d\n",
1285  			       dev->name, ret);
1286  		}
1287  	}
1288  
1289  	return err;
1290  }
1291  
1292  /**
1293   *	dev_set_alias - change ifalias of a device
1294   *	@dev: device
1295   *	@alias: name up to IFALIASZ
1296   *	@len: limit of bytes to copy from info
1297   *
1298   *	Set ifalias for a device,
1299   */
1300  int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1301  {
1302  	struct dev_ifalias *new_alias = NULL;
1303  
1304  	if (len >= IFALIASZ)
1305  		return -EINVAL;
1306  
1307  	if (len) {
1308  		new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
1309  		if (!new_alias)
1310  			return -ENOMEM;
1311  
1312  		memcpy(new_alias->ifalias, alias, len);
1313  		new_alias->ifalias[len] = 0;
1314  	}
1315  
1316  	mutex_lock(&ifalias_mutex);
1317  	new_alias = rcu_replace_pointer(dev->ifalias, new_alias,
1318  					mutex_is_locked(&ifalias_mutex));
1319  	mutex_unlock(&ifalias_mutex);
1320  
1321  	if (new_alias)
1322  		kfree_rcu(new_alias, rcuhead);
1323  
1324  	return len;
1325  }
1326  EXPORT_SYMBOL(dev_set_alias);
1327  
1328  /**
1329   *	dev_get_alias - get ifalias of a device
1330   *	@dev: device
1331   *	@name: buffer to store name of ifalias
1332   *	@len: size of buffer
1333   *
1334   *	get ifalias for a device.  Caller must make sure dev cannot go
1335   *	away,  e.g. rcu read lock or own a reference count to device.
1336   */
1337  int dev_get_alias(const struct net_device *dev, char *name, size_t len)
1338  {
1339  	const struct dev_ifalias *alias;
1340  	int ret = 0;
1341  
1342  	rcu_read_lock();
1343  	alias = rcu_dereference(dev->ifalias);
1344  	if (alias)
1345  		ret = snprintf(name, len, "%s", alias->ifalias);
1346  	rcu_read_unlock();
1347  
1348  	return ret;
1349  }
1350  
1351  /**
1352   *	netdev_features_change - device changes features
1353   *	@dev: device to cause notification
1354   *
1355   *	Called to indicate a device has changed features.
1356   */
1357  void netdev_features_change(struct net_device *dev)
1358  {
1359  	call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1360  }
1361  EXPORT_SYMBOL(netdev_features_change);
1362  
1363  /**
1364   *	netdev_state_change - device changes state
1365   *	@dev: device to cause notification
1366   *
1367   *	Called to indicate a device has changed state. This function calls
1368   *	the notifier chains for netdev_chain and sends a NEWLINK message
1369   *	to the routing socket.
1370   */
1371  void netdev_state_change(struct net_device *dev)
1372  {
1373  	if (dev->flags & IFF_UP) {
1374  		struct netdev_notifier_change_info change_info = {
1375  			.info.dev = dev,
1376  		};
1377  
1378  		call_netdevice_notifiers_info(NETDEV_CHANGE,
1379  					      &change_info.info);
1380  		rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1381  	}
1382  }
1383  EXPORT_SYMBOL(netdev_state_change);
1384  
1385  /**
1386   * netdev_notify_peers - notify network peers about existence of @dev
1387   * @dev: network device
1388   *
1389   * Generate traffic such that interested network peers are aware of
1390   * @dev, such as by generating a gratuitous ARP. This may be used when
1391   * a device wants to inform the rest of the network about some sort of
1392   * reconfiguration such as a failover event or virtual machine
1393   * migration.
1394   */
1395  void netdev_notify_peers(struct net_device *dev)
1396  {
1397  	rtnl_lock();
1398  	call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1399  	call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
1400  	rtnl_unlock();
1401  }
1402  EXPORT_SYMBOL(netdev_notify_peers);
1403  
1404  static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1405  {
1406  	const struct net_device_ops *ops = dev->netdev_ops;
1407  	int ret;
1408  
1409  	ASSERT_RTNL();
1410  
1411  	if (!netif_device_present(dev))
1412  		return -ENODEV;
1413  
1414  	/* Block netpoll from trying to do any rx path servicing.
1415  	 * If we don't do this there is a chance ndo_poll_controller
1416  	 * or ndo_poll may be running while we open the device
1417  	 */
1418  	netpoll_poll_disable(dev);
1419  
1420  	ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack);
1421  	ret = notifier_to_errno(ret);
1422  	if (ret)
1423  		return ret;
1424  
1425  	set_bit(__LINK_STATE_START, &dev->state);
1426  
1427  	if (ops->ndo_validate_addr)
1428  		ret = ops->ndo_validate_addr(dev);
1429  
1430  	if (!ret && ops->ndo_open)
1431  		ret = ops->ndo_open(dev);
1432  
1433  	netpoll_poll_enable(dev);
1434  
1435  	if (ret)
1436  		clear_bit(__LINK_STATE_START, &dev->state);
1437  	else {
1438  		dev->flags |= IFF_UP;
1439  		dev_set_rx_mode(dev);
1440  		dev_activate(dev);
1441  		add_device_randomness(dev->dev_addr, dev->addr_len);
1442  	}
1443  
1444  	return ret;
1445  }
1446  
1447  /**
1448   *	dev_open	- prepare an interface for use.
1449   *	@dev: device to open
1450   *	@extack: netlink extended ack
1451   *
1452   *	Takes a device from down to up state. The device's private open
1453   *	function is invoked and then the multicast lists are loaded. Finally
1454   *	the device is moved into the up state and a %NETDEV_UP message is
1455   *	sent to the netdev notifier chain.
1456   *
1457   *	Calling this function on an active interface is a nop. On a failure
1458   *	a negative errno code is returned.
1459   */
1460  int dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1461  {
1462  	int ret;
1463  
1464  	if (dev->flags & IFF_UP)
1465  		return 0;
1466  
1467  	ret = __dev_open(dev, extack);
1468  	if (ret < 0)
1469  		return ret;
1470  
1471  	rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1472  	call_netdevice_notifiers(NETDEV_UP, dev);
1473  
1474  	return ret;
1475  }
1476  EXPORT_SYMBOL(dev_open);
1477  
1478  static void __dev_close_many(struct list_head *head)
1479  {
1480  	struct net_device *dev;
1481  
1482  	ASSERT_RTNL();
1483  	might_sleep();
1484  
1485  	list_for_each_entry(dev, head, close_list) {
1486  		/* Temporarily disable netpoll until the interface is down */
1487  		netpoll_poll_disable(dev);
1488  
1489  		call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1490  
1491  		clear_bit(__LINK_STATE_START, &dev->state);
1492  
1493  		/* Synchronize to scheduled poll. We cannot touch poll list, it
1494  		 * can be even on different cpu. So just clear netif_running().
1495  		 *
1496  		 * dev->stop() will invoke napi_disable() on all of it's
1497  		 * napi_struct instances on this device.
1498  		 */
1499  		smp_mb__after_atomic(); /* Commit netif_running(). */
1500  	}
1501  
1502  	dev_deactivate_many(head);
1503  
1504  	list_for_each_entry(dev, head, close_list) {
1505  		const struct net_device_ops *ops = dev->netdev_ops;
1506  
1507  		/*
1508  		 *	Call the device specific close. This cannot fail.
1509  		 *	Only if device is UP
1510  		 *
1511  		 *	We allow it to be called even after a DETACH hot-plug
1512  		 *	event.
1513  		 */
1514  		if (ops->ndo_stop)
1515  			ops->ndo_stop(dev);
1516  
1517  		dev->flags &= ~IFF_UP;
1518  		netpoll_poll_enable(dev);
1519  	}
1520  }
1521  
1522  static void __dev_close(struct net_device *dev)
1523  {
1524  	LIST_HEAD(single);
1525  
1526  	list_add(&dev->close_list, &single);
1527  	__dev_close_many(&single);
1528  	list_del(&single);
1529  }
1530  
1531  void dev_close_many(struct list_head *head, bool unlink)
1532  {
1533  	struct net_device *dev, *tmp;
1534  
1535  	/* Remove the devices that don't need to be closed */
1536  	list_for_each_entry_safe(dev, tmp, head, close_list)
1537  		if (!(dev->flags & IFF_UP))
1538  			list_del_init(&dev->close_list);
1539  
1540  	__dev_close_many(head);
1541  
1542  	list_for_each_entry_safe(dev, tmp, head, close_list) {
1543  		rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1544  		call_netdevice_notifiers(NETDEV_DOWN, dev);
1545  		if (unlink)
1546  			list_del_init(&dev->close_list);
1547  	}
1548  }
1549  EXPORT_SYMBOL(dev_close_many);
1550  
1551  /**
1552   *	dev_close - shutdown an interface.
1553   *	@dev: device to shutdown
1554   *
1555   *	This function moves an active device into down state. A
1556   *	%NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1557   *	is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1558   *	chain.
1559   */
1560  void dev_close(struct net_device *dev)
1561  {
1562  	if (dev->flags & IFF_UP) {
1563  		LIST_HEAD(single);
1564  
1565  		list_add(&dev->close_list, &single);
1566  		dev_close_many(&single, true);
1567  		list_del(&single);
1568  	}
1569  }
1570  EXPORT_SYMBOL(dev_close);
1571  
1572  
1573  /**
1574   *	dev_disable_lro - disable Large Receive Offload on a device
1575   *	@dev: device
1576   *
1577   *	Disable Large Receive Offload (LRO) on a net device.  Must be
1578   *	called under RTNL.  This is needed if received packets may be
1579   *	forwarded to another interface.
1580   */
1581  void dev_disable_lro(struct net_device *dev)
1582  {
1583  	struct net_device *lower_dev;
1584  	struct list_head *iter;
1585  
1586  	dev->wanted_features &= ~NETIF_F_LRO;
1587  	netdev_update_features(dev);
1588  
1589  	if (unlikely(dev->features & NETIF_F_LRO))
1590  		netdev_WARN(dev, "failed to disable LRO!\n");
1591  
1592  	netdev_for_each_lower_dev(dev, lower_dev, iter)
1593  		dev_disable_lro(lower_dev);
1594  }
1595  EXPORT_SYMBOL(dev_disable_lro);
1596  
1597  /**
1598   *	dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1599   *	@dev: device
1600   *
1601   *	Disable HW Generic Receive Offload (GRO_HW) on a net device.  Must be
1602   *	called under RTNL.  This is needed if Generic XDP is installed on
1603   *	the device.
1604   */
1605  static void dev_disable_gro_hw(struct net_device *dev)
1606  {
1607  	dev->wanted_features &= ~NETIF_F_GRO_HW;
1608  	netdev_update_features(dev);
1609  
1610  	if (unlikely(dev->features & NETIF_F_GRO_HW))
1611  		netdev_WARN(dev, "failed to disable GRO_HW!\n");
1612  }
1613  
1614  const char *netdev_cmd_to_name(enum netdev_cmd cmd)
1615  {
1616  #define N(val) 						\
1617  	case NETDEV_##val:				\
1618  		return "NETDEV_" __stringify(val);
1619  	switch (cmd) {
1620  	N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER)
1621  	N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE)
1622  	N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE)
1623  	N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER)
1624  	N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO)
1625  	N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO)
1626  	N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
1627  	N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
1628  	N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
1629  	N(PRE_CHANGEADDR)
1630  	}
1631  #undef N
1632  	return "UNKNOWN_NETDEV_EVENT";
1633  }
1634  EXPORT_SYMBOL_GPL(netdev_cmd_to_name);
1635  
1636  static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1637  				   struct net_device *dev)
1638  {
1639  	struct netdev_notifier_info info = {
1640  		.dev = dev,
1641  	};
1642  
1643  	return nb->notifier_call(nb, val, &info);
1644  }
1645  
1646  static int call_netdevice_register_notifiers(struct notifier_block *nb,
1647  					     struct net_device *dev)
1648  {
1649  	int err;
1650  
1651  	err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1652  	err = notifier_to_errno(err);
1653  	if (err)
1654  		return err;
1655  
1656  	if (!(dev->flags & IFF_UP))
1657  		return 0;
1658  
1659  	call_netdevice_notifier(nb, NETDEV_UP, dev);
1660  	return 0;
1661  }
1662  
1663  static void call_netdevice_unregister_notifiers(struct notifier_block *nb,
1664  						struct net_device *dev)
1665  {
1666  	if (dev->flags & IFF_UP) {
1667  		call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1668  					dev);
1669  		call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1670  	}
1671  	call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1672  }
1673  
1674  static int call_netdevice_register_net_notifiers(struct notifier_block *nb,
1675  						 struct net *net)
1676  {
1677  	struct net_device *dev;
1678  	int err;
1679  
1680  	for_each_netdev(net, dev) {
1681  		err = call_netdevice_register_notifiers(nb, dev);
1682  		if (err)
1683  			goto rollback;
1684  	}
1685  	return 0;
1686  
1687  rollback:
1688  	for_each_netdev_continue_reverse(net, dev)
1689  		call_netdevice_unregister_notifiers(nb, dev);
1690  	return err;
1691  }
1692  
1693  static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb,
1694  						    struct net *net)
1695  {
1696  	struct net_device *dev;
1697  
1698  	for_each_netdev(net, dev)
1699  		call_netdevice_unregister_notifiers(nb, dev);
1700  }
1701  
1702  static int dev_boot_phase = 1;
1703  
1704  /**
1705   * register_netdevice_notifier - register a network notifier block
1706   * @nb: notifier
1707   *
1708   * Register a notifier to be called when network device events occur.
1709   * The notifier passed is linked into the kernel structures and must
1710   * not be reused until it has been unregistered. A negative errno code
1711   * is returned on a failure.
1712   *
1713   * When registered all registration and up events are replayed
1714   * to the new notifier to allow device to have a race free
1715   * view of the network device list.
1716   */
1717  
1718  int register_netdevice_notifier(struct notifier_block *nb)
1719  {
1720  	struct net *net;
1721  	int err;
1722  
1723  	/* Close race with setup_net() and cleanup_net() */
1724  	down_write(&pernet_ops_rwsem);
1725  	rtnl_lock();
1726  	err = raw_notifier_chain_register(&netdev_chain, nb);
1727  	if (err)
1728  		goto unlock;
1729  	if (dev_boot_phase)
1730  		goto unlock;
1731  	for_each_net(net) {
1732  		err = call_netdevice_register_net_notifiers(nb, net);
1733  		if (err)
1734  			goto rollback;
1735  	}
1736  
1737  unlock:
1738  	rtnl_unlock();
1739  	up_write(&pernet_ops_rwsem);
1740  	return err;
1741  
1742  rollback:
1743  	for_each_net_continue_reverse(net)
1744  		call_netdevice_unregister_net_notifiers(nb, net);
1745  
1746  	raw_notifier_chain_unregister(&netdev_chain, nb);
1747  	goto unlock;
1748  }
1749  EXPORT_SYMBOL(register_netdevice_notifier);
1750  
1751  /**
1752   * unregister_netdevice_notifier - unregister a network notifier block
1753   * @nb: notifier
1754   *
1755   * Unregister a notifier previously registered by
1756   * register_netdevice_notifier(). The notifier is unlinked into the
1757   * kernel structures and may then be reused. A negative errno code
1758   * is returned on a failure.
1759   *
1760   * After unregistering unregister and down device events are synthesized
1761   * for all devices on the device list to the removed notifier to remove
1762   * the need for special case cleanup code.
1763   */
1764  
1765  int unregister_netdevice_notifier(struct notifier_block *nb)
1766  {
1767  	struct net *net;
1768  	int err;
1769  
1770  	/* Close race with setup_net() and cleanup_net() */
1771  	down_write(&pernet_ops_rwsem);
1772  	rtnl_lock();
1773  	err = raw_notifier_chain_unregister(&netdev_chain, nb);
1774  	if (err)
1775  		goto unlock;
1776  
1777  	for_each_net(net)
1778  		call_netdevice_unregister_net_notifiers(nb, net);
1779  
1780  unlock:
1781  	rtnl_unlock();
1782  	up_write(&pernet_ops_rwsem);
1783  	return err;
1784  }
1785  EXPORT_SYMBOL(unregister_netdevice_notifier);
1786  
1787  static int __register_netdevice_notifier_net(struct net *net,
1788  					     struct notifier_block *nb,
1789  					     bool ignore_call_fail)
1790  {
1791  	int err;
1792  
1793  	err = raw_notifier_chain_register(&net->netdev_chain, nb);
1794  	if (err)
1795  		return err;
1796  	if (dev_boot_phase)
1797  		return 0;
1798  
1799  	err = call_netdevice_register_net_notifiers(nb, net);
1800  	if (err && !ignore_call_fail)
1801  		goto chain_unregister;
1802  
1803  	return 0;
1804  
1805  chain_unregister:
1806  	raw_notifier_chain_unregister(&net->netdev_chain, nb);
1807  	return err;
1808  }
1809  
1810  static int __unregister_netdevice_notifier_net(struct net *net,
1811  					       struct notifier_block *nb)
1812  {
1813  	int err;
1814  
1815  	err = raw_notifier_chain_unregister(&net->netdev_chain, nb);
1816  	if (err)
1817  		return err;
1818  
1819  	call_netdevice_unregister_net_notifiers(nb, net);
1820  	return 0;
1821  }
1822  
1823  /**
1824   * register_netdevice_notifier_net - register a per-netns network notifier block
1825   * @net: network namespace
1826   * @nb: notifier
1827   *
1828   * Register a notifier to be called when network device events occur.
1829   * The notifier passed is linked into the kernel structures and must
1830   * not be reused until it has been unregistered. A negative errno code
1831   * is returned on a failure.
1832   *
1833   * When registered all registration and up events are replayed
1834   * to the new notifier to allow device to have a race free
1835   * view of the network device list.
1836   */
1837  
1838  int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb)
1839  {
1840  	int err;
1841  
1842  	rtnl_lock();
1843  	err = __register_netdevice_notifier_net(net, nb, false);
1844  	rtnl_unlock();
1845  	return err;
1846  }
1847  EXPORT_SYMBOL(register_netdevice_notifier_net);
1848  
1849  /**
1850   * unregister_netdevice_notifier_net - unregister a per-netns
1851   *                                     network notifier block
1852   * @net: network namespace
1853   * @nb: notifier
1854   *
1855   * Unregister a notifier previously registered by
1856   * register_netdevice_notifier(). The notifier is unlinked into the
1857   * kernel structures and may then be reused. A negative errno code
1858   * is returned on a failure.
1859   *
1860   * After unregistering unregister and down device events are synthesized
1861   * for all devices on the device list to the removed notifier to remove
1862   * the need for special case cleanup code.
1863   */
1864  
1865  int unregister_netdevice_notifier_net(struct net *net,
1866  				      struct notifier_block *nb)
1867  {
1868  	int err;
1869  
1870  	rtnl_lock();
1871  	err = __unregister_netdevice_notifier_net(net, nb);
1872  	rtnl_unlock();
1873  	return err;
1874  }
1875  EXPORT_SYMBOL(unregister_netdevice_notifier_net);
1876  
1877  int register_netdevice_notifier_dev_net(struct net_device *dev,
1878  					struct notifier_block *nb,
1879  					struct netdev_net_notifier *nn)
1880  {
1881  	int err;
1882  
1883  	rtnl_lock();
1884  	err = __register_netdevice_notifier_net(dev_net(dev), nb, false);
1885  	if (!err) {
1886  		nn->nb = nb;
1887  		list_add(&nn->list, &dev->net_notifier_list);
1888  	}
1889  	rtnl_unlock();
1890  	return err;
1891  }
1892  EXPORT_SYMBOL(register_netdevice_notifier_dev_net);
1893  
1894  int unregister_netdevice_notifier_dev_net(struct net_device *dev,
1895  					  struct notifier_block *nb,
1896  					  struct netdev_net_notifier *nn)
1897  {
1898  	int err;
1899  
1900  	rtnl_lock();
1901  	list_del(&nn->list);
1902  	err = __unregister_netdevice_notifier_net(dev_net(dev), nb);
1903  	rtnl_unlock();
1904  	return err;
1905  }
1906  EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net);
1907  
1908  static void move_netdevice_notifiers_dev_net(struct net_device *dev,
1909  					     struct net *net)
1910  {
1911  	struct netdev_net_notifier *nn;
1912  
1913  	list_for_each_entry(nn, &dev->net_notifier_list, list) {
1914  		__unregister_netdevice_notifier_net(dev_net(dev), nn->nb);
1915  		__register_netdevice_notifier_net(net, nn->nb, true);
1916  	}
1917  }
1918  
1919  /**
1920   *	call_netdevice_notifiers_info - call all network notifier blocks
1921   *	@val: value passed unmodified to notifier function
1922   *	@info: notifier information data
1923   *
1924   *	Call all network notifier blocks.  Parameters and return value
1925   *	are as for raw_notifier_call_chain().
1926   */
1927  
1928  static int call_netdevice_notifiers_info(unsigned long val,
1929  					 struct netdev_notifier_info *info)
1930  {
1931  	struct net *net = dev_net(info->dev);
1932  	int ret;
1933  
1934  	ASSERT_RTNL();
1935  
1936  	/* Run per-netns notifier block chain first, then run the global one.
1937  	 * Hopefully, one day, the global one is going to be removed after
1938  	 * all notifier block registrators get converted to be per-netns.
1939  	 */
1940  	ret = raw_notifier_call_chain(&net->netdev_chain, val, info);
1941  	if (ret & NOTIFY_STOP_MASK)
1942  		return ret;
1943  	return raw_notifier_call_chain(&netdev_chain, val, info);
1944  }
1945  
1946  static int call_netdevice_notifiers_extack(unsigned long val,
1947  					   struct net_device *dev,
1948  					   struct netlink_ext_ack *extack)
1949  {
1950  	struct netdev_notifier_info info = {
1951  		.dev = dev,
1952  		.extack = extack,
1953  	};
1954  
1955  	return call_netdevice_notifiers_info(val, &info);
1956  }
1957  
1958  /**
1959   *	call_netdevice_notifiers - call all network notifier blocks
1960   *      @val: value passed unmodified to notifier function
1961   *      @dev: net_device pointer passed unmodified to notifier function
1962   *
1963   *	Call all network notifier blocks.  Parameters and return value
1964   *	are as for raw_notifier_call_chain().
1965   */
1966  
1967  int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1968  {
1969  	return call_netdevice_notifiers_extack(val, dev, NULL);
1970  }
1971  EXPORT_SYMBOL(call_netdevice_notifiers);
1972  
1973  /**
1974   *	call_netdevice_notifiers_mtu - call all network notifier blocks
1975   *	@val: value passed unmodified to notifier function
1976   *	@dev: net_device pointer passed unmodified to notifier function
1977   *	@arg: additional u32 argument passed to the notifier function
1978   *
1979   *	Call all network notifier blocks.  Parameters and return value
1980   *	are as for raw_notifier_call_chain().
1981   */
1982  static int call_netdevice_notifiers_mtu(unsigned long val,
1983  					struct net_device *dev, u32 arg)
1984  {
1985  	struct netdev_notifier_info_ext info = {
1986  		.info.dev = dev,
1987  		.ext.mtu = arg,
1988  	};
1989  
1990  	BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
1991  
1992  	return call_netdevice_notifiers_info(val, &info.info);
1993  }
1994  
1995  #ifdef CONFIG_NET_INGRESS
1996  static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
1997  
1998  void net_inc_ingress_queue(void)
1999  {
2000  	static_branch_inc(&ingress_needed_key);
2001  }
2002  EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
2003  
2004  void net_dec_ingress_queue(void)
2005  {
2006  	static_branch_dec(&ingress_needed_key);
2007  }
2008  EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
2009  #endif
2010  
2011  #ifdef CONFIG_NET_EGRESS
2012  static DEFINE_STATIC_KEY_FALSE(egress_needed_key);
2013  
2014  void net_inc_egress_queue(void)
2015  {
2016  	static_branch_inc(&egress_needed_key);
2017  }
2018  EXPORT_SYMBOL_GPL(net_inc_egress_queue);
2019  
2020  void net_dec_egress_queue(void)
2021  {
2022  	static_branch_dec(&egress_needed_key);
2023  }
2024  EXPORT_SYMBOL_GPL(net_dec_egress_queue);
2025  #endif
2026  
2027  static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
2028  #ifdef CONFIG_JUMP_LABEL
2029  static atomic_t netstamp_needed_deferred;
2030  static atomic_t netstamp_wanted;
2031  static void netstamp_clear(struct work_struct *work)
2032  {
2033  	int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
2034  	int wanted;
2035  
2036  	wanted = atomic_add_return(deferred, &netstamp_wanted);
2037  	if (wanted > 0)
2038  		static_branch_enable(&netstamp_needed_key);
2039  	else
2040  		static_branch_disable(&netstamp_needed_key);
2041  }
2042  static DECLARE_WORK(netstamp_work, netstamp_clear);
2043  #endif
2044  
2045  void net_enable_timestamp(void)
2046  {
2047  #ifdef CONFIG_JUMP_LABEL
2048  	int wanted;
2049  
2050  	while (1) {
2051  		wanted = atomic_read(&netstamp_wanted);
2052  		if (wanted <= 0)
2053  			break;
2054  		if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
2055  			return;
2056  	}
2057  	atomic_inc(&netstamp_needed_deferred);
2058  	schedule_work(&netstamp_work);
2059  #else
2060  	static_branch_inc(&netstamp_needed_key);
2061  #endif
2062  }
2063  EXPORT_SYMBOL(net_enable_timestamp);
2064  
2065  void net_disable_timestamp(void)
2066  {
2067  #ifdef CONFIG_JUMP_LABEL
2068  	int wanted;
2069  
2070  	while (1) {
2071  		wanted = atomic_read(&netstamp_wanted);
2072  		if (wanted <= 1)
2073  			break;
2074  		if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
2075  			return;
2076  	}
2077  	atomic_dec(&netstamp_needed_deferred);
2078  	schedule_work(&netstamp_work);
2079  #else
2080  	static_branch_dec(&netstamp_needed_key);
2081  #endif
2082  }
2083  EXPORT_SYMBOL(net_disable_timestamp);
2084  
2085  static inline void net_timestamp_set(struct sk_buff *skb)
2086  {
2087  	skb->tstamp = 0;
2088  	if (static_branch_unlikely(&netstamp_needed_key))
2089  		__net_timestamp(skb);
2090  }
2091  
2092  #define net_timestamp_check(COND, SKB)				\
2093  	if (static_branch_unlikely(&netstamp_needed_key)) {	\
2094  		if ((COND) && !(SKB)->tstamp)			\
2095  			__net_timestamp(SKB);			\
2096  	}							\
2097  
2098  bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
2099  {
2100  	unsigned int len;
2101  
2102  	if (!(dev->flags & IFF_UP))
2103  		return false;
2104  
2105  	len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
2106  	if (skb->len <= len)
2107  		return true;
2108  
2109  	/* if TSO is enabled, we don't care about the length as the packet
2110  	 * could be forwarded without being segmented before
2111  	 */
2112  	if (skb_is_gso(skb))
2113  		return true;
2114  
2115  	return false;
2116  }
2117  EXPORT_SYMBOL_GPL(is_skb_forwardable);
2118  
2119  int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2120  {
2121  	int ret = ____dev_forward_skb(dev, skb);
2122  
2123  	if (likely(!ret)) {
2124  		skb->protocol = eth_type_trans(skb, dev);
2125  		skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
2126  	}
2127  
2128  	return ret;
2129  }
2130  EXPORT_SYMBOL_GPL(__dev_forward_skb);
2131  
2132  /**
2133   * dev_forward_skb - loopback an skb to another netif
2134   *
2135   * @dev: destination network device
2136   * @skb: buffer to forward
2137   *
2138   * return values:
2139   *	NET_RX_SUCCESS	(no congestion)
2140   *	NET_RX_DROP     (packet was dropped, but freed)
2141   *
2142   * dev_forward_skb can be used for injecting an skb from the
2143   * start_xmit function of one device into the receive queue
2144   * of another device.
2145   *
2146   * The receiving device may be in another namespace, so
2147   * we have to clear all information in the skb that could
2148   * impact namespace isolation.
2149   */
2150  int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2151  {
2152  	return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
2153  }
2154  EXPORT_SYMBOL_GPL(dev_forward_skb);
2155  
2156  static inline int deliver_skb(struct sk_buff *skb,
2157  			      struct packet_type *pt_prev,
2158  			      struct net_device *orig_dev)
2159  {
2160  	if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
2161  		return -ENOMEM;
2162  	refcount_inc(&skb->users);
2163  	return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2164  }
2165  
2166  static inline void deliver_ptype_list_skb(struct sk_buff *skb,
2167  					  struct packet_type **pt,
2168  					  struct net_device *orig_dev,
2169  					  __be16 type,
2170  					  struct list_head *ptype_list)
2171  {
2172  	struct packet_type *ptype, *pt_prev = *pt;
2173  
2174  	list_for_each_entry_rcu(ptype, ptype_list, list) {
2175  		if (ptype->type != type)
2176  			continue;
2177  		if (pt_prev)
2178  			deliver_skb(skb, pt_prev, orig_dev);
2179  		pt_prev = ptype;
2180  	}
2181  	*pt = pt_prev;
2182  }
2183  
2184  static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
2185  {
2186  	if (!ptype->af_packet_priv || !skb->sk)
2187  		return false;
2188  
2189  	if (ptype->id_match)
2190  		return ptype->id_match(ptype, skb->sk);
2191  	else if ((struct sock *)ptype->af_packet_priv == skb->sk)
2192  		return true;
2193  
2194  	return false;
2195  }
2196  
2197  /**
2198   * dev_nit_active - return true if any network interface taps are in use
2199   *
2200   * @dev: network device to check for the presence of taps
2201   */
2202  bool dev_nit_active(struct net_device *dev)
2203  {
2204  	return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all);
2205  }
2206  EXPORT_SYMBOL_GPL(dev_nit_active);
2207  
2208  /*
2209   *	Support routine. Sends outgoing frames to any network
2210   *	taps currently in use.
2211   */
2212  
2213  void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
2214  {
2215  	struct packet_type *ptype;
2216  	struct sk_buff *skb2 = NULL;
2217  	struct packet_type *pt_prev = NULL;
2218  	struct list_head *ptype_list = &ptype_all;
2219  
2220  	rcu_read_lock();
2221  again:
2222  	list_for_each_entry_rcu(ptype, ptype_list, list) {
2223  		if (ptype->ignore_outgoing)
2224  			continue;
2225  
2226  		/* Never send packets back to the socket
2227  		 * they originated from - MvS (miquels@drinkel.ow.org)
2228  		 */
2229  		if (skb_loop_sk(ptype, skb))
2230  			continue;
2231  
2232  		if (pt_prev) {
2233  			deliver_skb(skb2, pt_prev, skb->dev);
2234  			pt_prev = ptype;
2235  			continue;
2236  		}
2237  
2238  		/* need to clone skb, done only once */
2239  		skb2 = skb_clone(skb, GFP_ATOMIC);
2240  		if (!skb2)
2241  			goto out_unlock;
2242  
2243  		net_timestamp_set(skb2);
2244  
2245  		/* skb->nh should be correctly
2246  		 * set by sender, so that the second statement is
2247  		 * just protection against buggy protocols.
2248  		 */
2249  		skb_reset_mac_header(skb2);
2250  
2251  		if (skb_network_header(skb2) < skb2->data ||
2252  		    skb_network_header(skb2) > skb_tail_pointer(skb2)) {
2253  			net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2254  					     ntohs(skb2->protocol),
2255  					     dev->name);
2256  			skb_reset_network_header(skb2);
2257  		}
2258  
2259  		skb2->transport_header = skb2->network_header;
2260  		skb2->pkt_type = PACKET_OUTGOING;
2261  		pt_prev = ptype;
2262  	}
2263  
2264  	if (ptype_list == &ptype_all) {
2265  		ptype_list = &dev->ptype_all;
2266  		goto again;
2267  	}
2268  out_unlock:
2269  	if (pt_prev) {
2270  		if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
2271  			pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
2272  		else
2273  			kfree_skb(skb2);
2274  	}
2275  	rcu_read_unlock();
2276  }
2277  EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
2278  
2279  /**
2280   * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
2281   * @dev: Network device
2282   * @txq: number of queues available
2283   *
2284   * If real_num_tx_queues is changed the tc mappings may no longer be
2285   * valid. To resolve this verify the tc mapping remains valid and if
2286   * not NULL the mapping. With no priorities mapping to this
2287   * offset/count pair it will no longer be used. In the worst case TC0
2288   * is invalid nothing can be done so disable priority mappings. If is
2289   * expected that drivers will fix this mapping if they can before
2290   * calling netif_set_real_num_tx_queues.
2291   */
2292  static void netif_setup_tc(struct net_device *dev, unsigned int txq)
2293  {
2294  	int i;
2295  	struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2296  
2297  	/* If TC0 is invalidated disable TC mapping */
2298  	if (tc->offset + tc->count > txq) {
2299  		pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2300  		dev->num_tc = 0;
2301  		return;
2302  	}
2303  
2304  	/* Invalidated prio to tc mappings set to TC0 */
2305  	for (i = 1; i < TC_BITMASK + 1; i++) {
2306  		int q = netdev_get_prio_tc_map(dev, i);
2307  
2308  		tc = &dev->tc_to_txq[q];
2309  		if (tc->offset + tc->count > txq) {
2310  			pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2311  				i, q);
2312  			netdev_set_prio_tc_map(dev, i, 0);
2313  		}
2314  	}
2315  }
2316  
2317  int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2318  {
2319  	if (dev->num_tc) {
2320  		struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2321  		int i;
2322  
2323  		/* walk through the TCs and see if it falls into any of them */
2324  		for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2325  			if ((txq - tc->offset) < tc->count)
2326  				return i;
2327  		}
2328  
2329  		/* didn't find it, just return -1 to indicate no match */
2330  		return -1;
2331  	}
2332  
2333  	return 0;
2334  }
2335  EXPORT_SYMBOL(netdev_txq_to_tc);
2336  
2337  #ifdef CONFIG_XPS
2338  struct static_key xps_needed __read_mostly;
2339  EXPORT_SYMBOL(xps_needed);
2340  struct static_key xps_rxqs_needed __read_mostly;
2341  EXPORT_SYMBOL(xps_rxqs_needed);
2342  static DEFINE_MUTEX(xps_map_mutex);
2343  #define xmap_dereference(P)		\
2344  	rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2345  
2346  static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2347  			     int tci, u16 index)
2348  {
2349  	struct xps_map *map = NULL;
2350  	int pos;
2351  
2352  	if (dev_maps)
2353  		map = xmap_dereference(dev_maps->attr_map[tci]);
2354  	if (!map)
2355  		return false;
2356  
2357  	for (pos = map->len; pos--;) {
2358  		if (map->queues[pos] != index)
2359  			continue;
2360  
2361  		if (map->len > 1) {
2362  			map->queues[pos] = map->queues[--map->len];
2363  			break;
2364  		}
2365  
2366  		RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2367  		kfree_rcu(map, rcu);
2368  		return false;
2369  	}
2370  
2371  	return true;
2372  }
2373  
2374  static bool remove_xps_queue_cpu(struct net_device *dev,
2375  				 struct xps_dev_maps *dev_maps,
2376  				 int cpu, u16 offset, u16 count)
2377  {
2378  	int num_tc = dev->num_tc ? : 1;
2379  	bool active = false;
2380  	int tci;
2381  
2382  	for (tci = cpu * num_tc; num_tc--; tci++) {
2383  		int i, j;
2384  
2385  		for (i = count, j = offset; i--; j++) {
2386  			if (!remove_xps_queue(dev_maps, tci, j))
2387  				break;
2388  		}
2389  
2390  		active |= i < 0;
2391  	}
2392  
2393  	return active;
2394  }
2395  
2396  static void reset_xps_maps(struct net_device *dev,
2397  			   struct xps_dev_maps *dev_maps,
2398  			   bool is_rxqs_map)
2399  {
2400  	if (is_rxqs_map) {
2401  		static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
2402  		RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
2403  	} else {
2404  		RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
2405  	}
2406  	static_key_slow_dec_cpuslocked(&xps_needed);
2407  	kfree_rcu(dev_maps, rcu);
2408  }
2409  
2410  static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
2411  			   struct xps_dev_maps *dev_maps, unsigned int nr_ids,
2412  			   u16 offset, u16 count, bool is_rxqs_map)
2413  {
2414  	bool active = false;
2415  	int i, j;
2416  
2417  	for (j = -1; j = netif_attrmask_next(j, mask, nr_ids),
2418  	     j < nr_ids;)
2419  		active |= remove_xps_queue_cpu(dev, dev_maps, j, offset,
2420  					       count);
2421  	if (!active)
2422  		reset_xps_maps(dev, dev_maps, is_rxqs_map);
2423  
2424  	if (!is_rxqs_map) {
2425  		for (i = offset + (count - 1); count--; i--) {
2426  			netdev_queue_numa_node_write(
2427  				netdev_get_tx_queue(dev, i),
2428  				NUMA_NO_NODE);
2429  		}
2430  	}
2431  }
2432  
2433  static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2434  				   u16 count)
2435  {
2436  	const unsigned long *possible_mask = NULL;
2437  	struct xps_dev_maps *dev_maps;
2438  	unsigned int nr_ids;
2439  
2440  	if (!static_key_false(&xps_needed))
2441  		return;
2442  
2443  	cpus_read_lock();
2444  	mutex_lock(&xps_map_mutex);
2445  
2446  	if (static_key_false(&xps_rxqs_needed)) {
2447  		dev_maps = xmap_dereference(dev->xps_rxqs_map);
2448  		if (dev_maps) {
2449  			nr_ids = dev->num_rx_queues;
2450  			clean_xps_maps(dev, possible_mask, dev_maps, nr_ids,
2451  				       offset, count, true);
2452  		}
2453  	}
2454  
2455  	dev_maps = xmap_dereference(dev->xps_cpus_map);
2456  	if (!dev_maps)
2457  		goto out_no_maps;
2458  
2459  	if (num_possible_cpus() > 1)
2460  		possible_mask = cpumask_bits(cpu_possible_mask);
2461  	nr_ids = nr_cpu_ids;
2462  	clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, offset, count,
2463  		       false);
2464  
2465  out_no_maps:
2466  	mutex_unlock(&xps_map_mutex);
2467  	cpus_read_unlock();
2468  }
2469  
2470  static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2471  {
2472  	netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2473  }
2474  
2475  static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
2476  				      u16 index, bool is_rxqs_map)
2477  {
2478  	struct xps_map *new_map;
2479  	int alloc_len = XPS_MIN_MAP_ALLOC;
2480  	int i, pos;
2481  
2482  	for (pos = 0; map && pos < map->len; pos++) {
2483  		if (map->queues[pos] != index)
2484  			continue;
2485  		return map;
2486  	}
2487  
2488  	/* Need to add tx-queue to this CPU's/rx-queue's existing map */
2489  	if (map) {
2490  		if (pos < map->alloc_len)
2491  			return map;
2492  
2493  		alloc_len = map->alloc_len * 2;
2494  	}
2495  
2496  	/* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
2497  	 *  map
2498  	 */
2499  	if (is_rxqs_map)
2500  		new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL);
2501  	else
2502  		new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2503  				       cpu_to_node(attr_index));
2504  	if (!new_map)
2505  		return NULL;
2506  
2507  	for (i = 0; i < pos; i++)
2508  		new_map->queues[i] = map->queues[i];
2509  	new_map->alloc_len = alloc_len;
2510  	new_map->len = pos;
2511  
2512  	return new_map;
2513  }
2514  
2515  /* Must be called under cpus_read_lock */
2516  int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
2517  			  u16 index, bool is_rxqs_map)
2518  {
2519  	const unsigned long *online_mask = NULL, *possible_mask = NULL;
2520  	struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
2521  	int i, j, tci, numa_node_id = -2;
2522  	int maps_sz, num_tc = 1, tc = 0;
2523  	struct xps_map *map, *new_map;
2524  	bool active = false;
2525  	unsigned int nr_ids;
2526  
2527  	if (dev->num_tc) {
2528  		/* Do not allow XPS on subordinate device directly */
2529  		num_tc = dev->num_tc;
2530  		if (num_tc < 0)
2531  			return -EINVAL;
2532  
2533  		/* If queue belongs to subordinate dev use its map */
2534  		dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
2535  
2536  		tc = netdev_txq_to_tc(dev, index);
2537  		if (tc < 0)
2538  			return -EINVAL;
2539  	}
2540  
2541  	mutex_lock(&xps_map_mutex);
2542  	if (is_rxqs_map) {
2543  		maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
2544  		dev_maps = xmap_dereference(dev->xps_rxqs_map);
2545  		nr_ids = dev->num_rx_queues;
2546  	} else {
2547  		maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc);
2548  		if (num_possible_cpus() > 1) {
2549  			online_mask = cpumask_bits(cpu_online_mask);
2550  			possible_mask = cpumask_bits(cpu_possible_mask);
2551  		}
2552  		dev_maps = xmap_dereference(dev->xps_cpus_map);
2553  		nr_ids = nr_cpu_ids;
2554  	}
2555  
2556  	if (maps_sz < L1_CACHE_BYTES)
2557  		maps_sz = L1_CACHE_BYTES;
2558  
2559  	/* allocate memory for queue storage */
2560  	for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
2561  	     j < nr_ids;) {
2562  		if (!new_dev_maps)
2563  			new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2564  		if (!new_dev_maps) {
2565  			mutex_unlock(&xps_map_mutex);
2566  			return -ENOMEM;
2567  		}
2568  
2569  		tci = j * num_tc + tc;
2570  		map = dev_maps ? xmap_dereference(dev_maps->attr_map[tci]) :
2571  				 NULL;
2572  
2573  		map = expand_xps_map(map, j, index, is_rxqs_map);
2574  		if (!map)
2575  			goto error;
2576  
2577  		RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2578  	}
2579  
2580  	if (!new_dev_maps)
2581  		goto out_no_new_maps;
2582  
2583  	if (!dev_maps) {
2584  		/* Increment static keys at most once per type */
2585  		static_key_slow_inc_cpuslocked(&xps_needed);
2586  		if (is_rxqs_map)
2587  			static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
2588  	}
2589  
2590  	for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2591  	     j < nr_ids;) {
2592  		/* copy maps belonging to foreign traffic classes */
2593  		for (i = tc, tci = j * num_tc; dev_maps && i--; tci++) {
2594  			/* fill in the new device map from the old device map */
2595  			map = xmap_dereference(dev_maps->attr_map[tci]);
2596  			RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2597  		}
2598  
2599  		/* We need to explicitly update tci as prevous loop
2600  		 * could break out early if dev_maps is NULL.
2601  		 */
2602  		tci = j * num_tc + tc;
2603  
2604  		if (netif_attr_test_mask(j, mask, nr_ids) &&
2605  		    netif_attr_test_online(j, online_mask, nr_ids)) {
2606  			/* add tx-queue to CPU/rx-queue maps */
2607  			int pos = 0;
2608  
2609  			map = xmap_dereference(new_dev_maps->attr_map[tci]);
2610  			while ((pos < map->len) && (map->queues[pos] != index))
2611  				pos++;
2612  
2613  			if (pos == map->len)
2614  				map->queues[map->len++] = index;
2615  #ifdef CONFIG_NUMA
2616  			if (!is_rxqs_map) {
2617  				if (numa_node_id == -2)
2618  					numa_node_id = cpu_to_node(j);
2619  				else if (numa_node_id != cpu_to_node(j))
2620  					numa_node_id = -1;
2621  			}
2622  #endif
2623  		} else if (dev_maps) {
2624  			/* fill in the new device map from the old device map */
2625  			map = xmap_dereference(dev_maps->attr_map[tci]);
2626  			RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2627  		}
2628  
2629  		/* copy maps belonging to foreign traffic classes */
2630  		for (i = num_tc - tc, tci++; dev_maps && --i; tci++) {
2631  			/* fill in the new device map from the old device map */
2632  			map = xmap_dereference(dev_maps->attr_map[tci]);
2633  			RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2634  		}
2635  	}
2636  
2637  	if (is_rxqs_map)
2638  		rcu_assign_pointer(dev->xps_rxqs_map, new_dev_maps);
2639  	else
2640  		rcu_assign_pointer(dev->xps_cpus_map, new_dev_maps);
2641  
2642  	/* Cleanup old maps */
2643  	if (!dev_maps)
2644  		goto out_no_old_maps;
2645  
2646  	for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2647  	     j < nr_ids;) {
2648  		for (i = num_tc, tci = j * num_tc; i--; tci++) {
2649  			new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2650  			map = xmap_dereference(dev_maps->attr_map[tci]);
2651  			if (map && map != new_map)
2652  				kfree_rcu(map, rcu);
2653  		}
2654  	}
2655  
2656  	kfree_rcu(dev_maps, rcu);
2657  
2658  out_no_old_maps:
2659  	dev_maps = new_dev_maps;
2660  	active = true;
2661  
2662  out_no_new_maps:
2663  	if (!is_rxqs_map) {
2664  		/* update Tx queue numa node */
2665  		netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2666  					     (numa_node_id >= 0) ?
2667  					     numa_node_id : NUMA_NO_NODE);
2668  	}
2669  
2670  	if (!dev_maps)
2671  		goto out_no_maps;
2672  
2673  	/* removes tx-queue from unused CPUs/rx-queues */
2674  	for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2675  	     j < nr_ids;) {
2676  		for (i = tc, tci = j * num_tc; i--; tci++)
2677  			active |= remove_xps_queue(dev_maps, tci, index);
2678  		if (!netif_attr_test_mask(j, mask, nr_ids) ||
2679  		    !netif_attr_test_online(j, online_mask, nr_ids))
2680  			active |= remove_xps_queue(dev_maps, tci, index);
2681  		for (i = num_tc - tc, tci++; --i; tci++)
2682  			active |= remove_xps_queue(dev_maps, tci, index);
2683  	}
2684  
2685  	/* free map if not active */
2686  	if (!active)
2687  		reset_xps_maps(dev, dev_maps, is_rxqs_map);
2688  
2689  out_no_maps:
2690  	mutex_unlock(&xps_map_mutex);
2691  
2692  	return 0;
2693  error:
2694  	/* remove any maps that we added */
2695  	for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2696  	     j < nr_ids;) {
2697  		for (i = num_tc, tci = j * num_tc; i--; tci++) {
2698  			new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2699  			map = dev_maps ?
2700  			      xmap_dereference(dev_maps->attr_map[tci]) :
2701  			      NULL;
2702  			if (new_map && new_map != map)
2703  				kfree(new_map);
2704  		}
2705  	}
2706  
2707  	mutex_unlock(&xps_map_mutex);
2708  
2709  	kfree(new_dev_maps);
2710  	return -ENOMEM;
2711  }
2712  EXPORT_SYMBOL_GPL(__netif_set_xps_queue);
2713  
2714  int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2715  			u16 index)
2716  {
2717  	int ret;
2718  
2719  	cpus_read_lock();
2720  	ret =  __netif_set_xps_queue(dev, cpumask_bits(mask), index, false);
2721  	cpus_read_unlock();
2722  
2723  	return ret;
2724  }
2725  EXPORT_SYMBOL(netif_set_xps_queue);
2726  
2727  #endif
2728  static void netdev_unbind_all_sb_channels(struct net_device *dev)
2729  {
2730  	struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2731  
2732  	/* Unbind any subordinate channels */
2733  	while (txq-- != &dev->_tx[0]) {
2734  		if (txq->sb_dev)
2735  			netdev_unbind_sb_channel(dev, txq->sb_dev);
2736  	}
2737  }
2738  
2739  void netdev_reset_tc(struct net_device *dev)
2740  {
2741  #ifdef CONFIG_XPS
2742  	netif_reset_xps_queues_gt(dev, 0);
2743  #endif
2744  	netdev_unbind_all_sb_channels(dev);
2745  
2746  	/* Reset TC configuration of device */
2747  	dev->num_tc = 0;
2748  	memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2749  	memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2750  }
2751  EXPORT_SYMBOL(netdev_reset_tc);
2752  
2753  int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2754  {
2755  	if (tc >= dev->num_tc)
2756  		return -EINVAL;
2757  
2758  #ifdef CONFIG_XPS
2759  	netif_reset_xps_queues(dev, offset, count);
2760  #endif
2761  	dev->tc_to_txq[tc].count = count;
2762  	dev->tc_to_txq[tc].offset = offset;
2763  	return 0;
2764  }
2765  EXPORT_SYMBOL(netdev_set_tc_queue);
2766  
2767  int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2768  {
2769  	if (num_tc > TC_MAX_QUEUE)
2770  		return -EINVAL;
2771  
2772  #ifdef CONFIG_XPS
2773  	netif_reset_xps_queues_gt(dev, 0);
2774  #endif
2775  	netdev_unbind_all_sb_channels(dev);
2776  
2777  	dev->num_tc = num_tc;
2778  	return 0;
2779  }
2780  EXPORT_SYMBOL(netdev_set_num_tc);
2781  
2782  void netdev_unbind_sb_channel(struct net_device *dev,
2783  			      struct net_device *sb_dev)
2784  {
2785  	struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2786  
2787  #ifdef CONFIG_XPS
2788  	netif_reset_xps_queues_gt(sb_dev, 0);
2789  #endif
2790  	memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
2791  	memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
2792  
2793  	while (txq-- != &dev->_tx[0]) {
2794  		if (txq->sb_dev == sb_dev)
2795  			txq->sb_dev = NULL;
2796  	}
2797  }
2798  EXPORT_SYMBOL(netdev_unbind_sb_channel);
2799  
2800  int netdev_bind_sb_channel_queue(struct net_device *dev,
2801  				 struct net_device *sb_dev,
2802  				 u8 tc, u16 count, u16 offset)
2803  {
2804  	/* Make certain the sb_dev and dev are already configured */
2805  	if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
2806  		return -EINVAL;
2807  
2808  	/* We cannot hand out queues we don't have */
2809  	if ((offset + count) > dev->real_num_tx_queues)
2810  		return -EINVAL;
2811  
2812  	/* Record the mapping */
2813  	sb_dev->tc_to_txq[tc].count = count;
2814  	sb_dev->tc_to_txq[tc].offset = offset;
2815  
2816  	/* Provide a way for Tx queue to find the tc_to_txq map or
2817  	 * XPS map for itself.
2818  	 */
2819  	while (count--)
2820  		netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
2821  
2822  	return 0;
2823  }
2824  EXPORT_SYMBOL(netdev_bind_sb_channel_queue);
2825  
2826  int netdev_set_sb_channel(struct net_device *dev, u16 channel)
2827  {
2828  	/* Do not use a multiqueue device to represent a subordinate channel */
2829  	if (netif_is_multiqueue(dev))
2830  		return -ENODEV;
2831  
2832  	/* We allow channels 1 - 32767 to be used for subordinate channels.
2833  	 * Channel 0 is meant to be "native" mode and used only to represent
2834  	 * the main root device. We allow writing 0 to reset the device back
2835  	 * to normal mode after being used as a subordinate channel.
2836  	 */
2837  	if (channel > S16_MAX)
2838  		return -EINVAL;
2839  
2840  	dev->num_tc = -channel;
2841  
2842  	return 0;
2843  }
2844  EXPORT_SYMBOL(netdev_set_sb_channel);
2845  
2846  /*
2847   * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2848   * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
2849   */
2850  int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2851  {
2852  	bool disabling;
2853  	int rc;
2854  
2855  	disabling = txq < dev->real_num_tx_queues;
2856  
2857  	if (txq < 1 || txq > dev->num_tx_queues)
2858  		return -EINVAL;
2859  
2860  	if (dev->reg_state == NETREG_REGISTERED ||
2861  	    dev->reg_state == NETREG_UNREGISTERING) {
2862  		ASSERT_RTNL();
2863  
2864  		rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2865  						  txq);
2866  		if (rc)
2867  			return rc;
2868  
2869  		if (dev->num_tc)
2870  			netif_setup_tc(dev, txq);
2871  
2872  		dev->real_num_tx_queues = txq;
2873  
2874  		if (disabling) {
2875  			synchronize_net();
2876  			qdisc_reset_all_tx_gt(dev, txq);
2877  #ifdef CONFIG_XPS
2878  			netif_reset_xps_queues_gt(dev, txq);
2879  #endif
2880  		}
2881  	} else {
2882  		dev->real_num_tx_queues = txq;
2883  	}
2884  
2885  	return 0;
2886  }
2887  EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2888  
2889  #ifdef CONFIG_SYSFS
2890  /**
2891   *	netif_set_real_num_rx_queues - set actual number of RX queues used
2892   *	@dev: Network device
2893   *	@rxq: Actual number of RX queues
2894   *
2895   *	This must be called either with the rtnl_lock held or before
2896   *	registration of the net device.  Returns 0 on success, or a
2897   *	negative error code.  If called before registration, it always
2898   *	succeeds.
2899   */
2900  int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2901  {
2902  	int rc;
2903  
2904  	if (rxq < 1 || rxq > dev->num_rx_queues)
2905  		return -EINVAL;
2906  
2907  	if (dev->reg_state == NETREG_REGISTERED) {
2908  		ASSERT_RTNL();
2909  
2910  		rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2911  						  rxq);
2912  		if (rc)
2913  			return rc;
2914  	}
2915  
2916  	dev->real_num_rx_queues = rxq;
2917  	return 0;
2918  }
2919  EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2920  #endif
2921  
2922  /**
2923   * netif_get_num_default_rss_queues - default number of RSS queues
2924   *
2925   * This routine should set an upper limit on the number of RSS queues
2926   * used by default by multiqueue devices.
2927   */
2928  int netif_get_num_default_rss_queues(void)
2929  {
2930  	return is_kdump_kernel() ?
2931  		1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2932  }
2933  EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2934  
2935  static void __netif_reschedule(struct Qdisc *q)
2936  {
2937  	struct softnet_data *sd;
2938  	unsigned long flags;
2939  
2940  	local_irq_save(flags);
2941  	sd = this_cpu_ptr(&softnet_data);
2942  	q->next_sched = NULL;
2943  	*sd->output_queue_tailp = q;
2944  	sd->output_queue_tailp = &q->next_sched;
2945  	raise_softirq_irqoff(NET_TX_SOFTIRQ);
2946  	local_irq_restore(flags);
2947  }
2948  
2949  void __netif_schedule(struct Qdisc *q)
2950  {
2951  	if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2952  		__netif_reschedule(q);
2953  }
2954  EXPORT_SYMBOL(__netif_schedule);
2955  
2956  struct dev_kfree_skb_cb {
2957  	enum skb_free_reason reason;
2958  };
2959  
2960  static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
2961  {
2962  	return (struct dev_kfree_skb_cb *)skb->cb;
2963  }
2964  
2965  void netif_schedule_queue(struct netdev_queue *txq)
2966  {
2967  	rcu_read_lock();
2968  	if (!netif_xmit_stopped(txq)) {
2969  		struct Qdisc *q = rcu_dereference(txq->qdisc);
2970  
2971  		__netif_schedule(q);
2972  	}
2973  	rcu_read_unlock();
2974  }
2975  EXPORT_SYMBOL(netif_schedule_queue);
2976  
2977  void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2978  {
2979  	if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2980  		struct Qdisc *q;
2981  
2982  		rcu_read_lock();
2983  		q = rcu_dereference(dev_queue->qdisc);
2984  		__netif_schedule(q);
2985  		rcu_read_unlock();
2986  	}
2987  }
2988  EXPORT_SYMBOL(netif_tx_wake_queue);
2989  
2990  void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2991  {
2992  	unsigned long flags;
2993  
2994  	if (unlikely(!skb))
2995  		return;
2996  
2997  	if (likely(refcount_read(&skb->users) == 1)) {
2998  		smp_rmb();
2999  		refcount_set(&skb->users, 0);
3000  	} else if (likely(!refcount_dec_and_test(&skb->users))) {
3001  		return;
3002  	}
3003  	get_kfree_skb_cb(skb)->reason = reason;
3004  	local_irq_save(flags);
3005  	skb->next = __this_cpu_read(softnet_data.completion_queue);
3006  	__this_cpu_write(softnet_data.completion_queue, skb);
3007  	raise_softirq_irqoff(NET_TX_SOFTIRQ);
3008  	local_irq_restore(flags);
3009  }
3010  EXPORT_SYMBOL(__dev_kfree_skb_irq);
3011  
3012  void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
3013  {
3014  	if (in_irq() || irqs_disabled())
3015  		__dev_kfree_skb_irq(skb, reason);
3016  	else
3017  		dev_kfree_skb(skb);
3018  }
3019  EXPORT_SYMBOL(__dev_kfree_skb_any);
3020  
3021  
3022  /**
3023   * netif_device_detach - mark device as removed
3024   * @dev: network device
3025   *
3026   * Mark device as removed from system and therefore no longer available.
3027   */
3028  void netif_device_detach(struct net_device *dev)
3029  {
3030  	if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
3031  	    netif_running(dev)) {
3032  		netif_tx_stop_all_queues(dev);
3033  	}
3034  }
3035  EXPORT_SYMBOL(netif_device_detach);
3036  
3037  /**
3038   * netif_device_attach - mark device as attached
3039   * @dev: network device
3040   *
3041   * Mark device as attached from system and restart if needed.
3042   */
3043  void netif_device_attach(struct net_device *dev)
3044  {
3045  	if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
3046  	    netif_running(dev)) {
3047  		netif_tx_wake_all_queues(dev);
3048  		__netdev_watchdog_up(dev);
3049  	}
3050  }
3051  EXPORT_SYMBOL(netif_device_attach);
3052  
3053  /*
3054   * Returns a Tx hash based on the given packet descriptor a Tx queues' number
3055   * to be used as a distribution range.
3056   */
3057  static u16 skb_tx_hash(const struct net_device *dev,
3058  		       const struct net_device *sb_dev,
3059  		       struct sk_buff *skb)
3060  {
3061  	u32 hash;
3062  	u16 qoffset = 0;
3063  	u16 qcount = dev->real_num_tx_queues;
3064  
3065  	if (dev->num_tc) {
3066  		u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
3067  
3068  		qoffset = sb_dev->tc_to_txq[tc].offset;
3069  		qcount = sb_dev->tc_to_txq[tc].count;
3070  	}
3071  
3072  	if (skb_rx_queue_recorded(skb)) {
3073  		hash = skb_get_rx_queue(skb);
3074  		while (unlikely(hash >= qcount))
3075  			hash -= qcount;
3076  		return hash + qoffset;
3077  	}
3078  
3079  	return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
3080  }
3081  
3082  static void skb_warn_bad_offload(const struct sk_buff *skb)
3083  {
3084  	static const netdev_features_t null_features;
3085  	struct net_device *dev = skb->dev;
3086  	const char *name = "";
3087  
3088  	if (!net_ratelimit())
3089  		return;
3090  
3091  	if (dev) {
3092  		if (dev->dev.parent)
3093  			name = dev_driver_string(dev->dev.parent);
3094  		else
3095  			name = netdev_name(dev);
3096  	}
3097  	skb_dump(KERN_WARNING, skb, false);
3098  	WARN(1, "%s: caps=(%pNF, %pNF)\n",
3099  	     name, dev ? &dev->features : &null_features,
3100  	     skb->sk ? &skb->sk->sk_route_caps : &null_features);
3101  }
3102  
3103  /*
3104   * Invalidate hardware checksum when packet is to be mangled, and
3105   * complete checksum manually on outgoing path.
3106   */
3107  int skb_checksum_help(struct sk_buff *skb)
3108  {
3109  	__wsum csum;
3110  	int ret = 0, offset;
3111  
3112  	if (skb->ip_summed == CHECKSUM_COMPLETE)
3113  		goto out_set_summed;
3114  
3115  	if (unlikely(skb_shinfo(skb)->gso_size)) {
3116  		skb_warn_bad_offload(skb);
3117  		return -EINVAL;
3118  	}
3119  
3120  	/* Before computing a checksum, we should make sure no frag could
3121  	 * be modified by an external entity : checksum could be wrong.
3122  	 */
3123  	if (skb_has_shared_frag(skb)) {
3124  		ret = __skb_linearize(skb);
3125  		if (ret)
3126  			goto out;
3127  	}
3128  
3129  	offset = skb_checksum_start_offset(skb);
3130  	BUG_ON(offset >= skb_headlen(skb));
3131  	csum = skb_checksum(skb, offset, skb->len - offset, 0);
3132  
3133  	offset += skb->csum_offset;
3134  	BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
3135  
3136  	ret = skb_ensure_writable(skb, offset + sizeof(__sum16));
3137  	if (ret)
3138  		goto out;
3139  
3140  	*(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
3141  out_set_summed:
3142  	skb->ip_summed = CHECKSUM_NONE;
3143  out:
3144  	return ret;
3145  }
3146  EXPORT_SYMBOL(skb_checksum_help);
3147  
3148  int skb_crc32c_csum_help(struct sk_buff *skb)
3149  {
3150  	__le32 crc32c_csum;
3151  	int ret = 0, offset, start;
3152  
3153  	if (skb->ip_summed != CHECKSUM_PARTIAL)
3154  		goto out;
3155  
3156  	if (unlikely(skb_is_gso(skb)))
3157  		goto out;
3158  
3159  	/* Before computing a checksum, we should make sure no frag could
3160  	 * be modified by an external entity : checksum could be wrong.
3161  	 */
3162  	if (unlikely(skb_has_shared_frag(skb))) {
3163  		ret = __skb_linearize(skb);
3164  		if (ret)
3165  			goto out;
3166  	}
3167  	start = skb_checksum_start_offset(skb);
3168  	offset = start + offsetof(struct sctphdr, checksum);
3169  	if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
3170  		ret = -EINVAL;
3171  		goto out;
3172  	}
3173  
3174  	ret = skb_ensure_writable(skb, offset + sizeof(__le32));
3175  	if (ret)
3176  		goto out;
3177  
3178  	crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
3179  						  skb->len - start, ~(__u32)0,
3180  						  crc32c_csum_stub));
3181  	*(__le32 *)(skb->data + offset) = crc32c_csum;
3182  	skb->ip_summed = CHECKSUM_NONE;
3183  	skb->csum_not_inet = 0;
3184  out:
3185  	return ret;
3186  }
3187  
3188  __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
3189  {
3190  	__be16 type = skb->protocol;
3191  
3192  	/* Tunnel gso handlers can set protocol to ethernet. */
3193  	if (type == htons(ETH_P_TEB)) {
3194  		struct ethhdr *eth;
3195  
3196  		if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
3197  			return 0;
3198  
3199  		eth = (struct ethhdr *)skb->data;
3200  		type = eth->h_proto;
3201  	}
3202  
3203  	return __vlan_get_protocol(skb, type, depth);
3204  }
3205  
3206  /**
3207   *	skb_mac_gso_segment - mac layer segmentation handler.
3208   *	@skb: buffer to segment
3209   *	@features: features for the output path (see dev->features)
3210   */
3211  struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
3212  				    netdev_features_t features)
3213  {
3214  	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
3215  	struct packet_offload *ptype;
3216  	int vlan_depth = skb->mac_len;
3217  	__be16 type = skb_network_protocol(skb, &vlan_depth);
3218  
3219  	if (unlikely(!type))
3220  		return ERR_PTR(-EINVAL);
3221  
3222  	__skb_pull(skb, vlan_depth);
3223  
3224  	rcu_read_lock();
3225  	list_for_each_entry_rcu(ptype, &offload_base, list) {
3226  		if (ptype->type == type && ptype->callbacks.gso_segment) {
3227  			segs = ptype->callbacks.gso_segment(skb, features);
3228  			break;
3229  		}
3230  	}
3231  	rcu_read_unlock();
3232  
3233  	__skb_push(skb, skb->data - skb_mac_header(skb));
3234  
3235  	return segs;
3236  }
3237  EXPORT_SYMBOL(skb_mac_gso_segment);
3238  
3239  
3240  /* openvswitch calls this on rx path, so we need a different check.
3241   */
3242  static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
3243  {
3244  	if (tx_path)
3245  		return skb->ip_summed != CHECKSUM_PARTIAL &&
3246  		       skb->ip_summed != CHECKSUM_UNNECESSARY;
3247  
3248  	return skb->ip_summed == CHECKSUM_NONE;
3249  }
3250  
3251  /**
3252   *	__skb_gso_segment - Perform segmentation on skb.
3253   *	@skb: buffer to segment
3254   *	@features: features for the output path (see dev->features)
3255   *	@tx_path: whether it is called in TX path
3256   *
3257   *	This function segments the given skb and returns a list of segments.
3258   *
3259   *	It may return NULL if the skb requires no segmentation.  This is
3260   *	only possible when GSO is used for verifying header integrity.
3261   *
3262   *	Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
3263   */
3264  struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3265  				  netdev_features_t features, bool tx_path)
3266  {
3267  	struct sk_buff *segs;
3268  
3269  	if (unlikely(skb_needs_check(skb, tx_path))) {
3270  		int err;
3271  
3272  		/* We're going to init ->check field in TCP or UDP header */
3273  		err = skb_cow_head(skb, 0);
3274  		if (err < 0)
3275  			return ERR_PTR(err);
3276  	}
3277  
3278  	/* Only report GSO partial support if it will enable us to
3279  	 * support segmentation on this frame without needing additional
3280  	 * work.
3281  	 */
3282  	if (features & NETIF_F_GSO_PARTIAL) {
3283  		netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
3284  		struct net_device *dev = skb->dev;
3285  
3286  		partial_features |= dev->features & dev->gso_partial_features;
3287  		if (!skb_gso_ok(skb, features | partial_features))
3288  			features &= ~NETIF_F_GSO_PARTIAL;
3289  	}
3290  
3291  	BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
3292  		     sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
3293  
3294  	SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
3295  	SKB_GSO_CB(skb)->encap_level = 0;
3296  
3297  	skb_reset_mac_header(skb);
3298  	skb_reset_mac_len(skb);
3299  
3300  	segs = skb_mac_gso_segment(skb, features);
3301  
3302  	if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
3303  		skb_warn_bad_offload(skb);
3304  
3305  	return segs;
3306  }
3307  EXPORT_SYMBOL(__skb_gso_segment);
3308  
3309  /* Take action when hardware reception checksum errors are detected. */
3310  #ifdef CONFIG_BUG
3311  void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3312  {
3313  	if (net_ratelimit()) {
3314  		pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
3315  		skb_dump(KERN_ERR, skb, true);
3316  		dump_stack();
3317  	}
3318  }
3319  EXPORT_SYMBOL(netdev_rx_csum_fault);
3320  #endif
3321  
3322  /* XXX: check that highmem exists at all on the given machine. */
3323  static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
3324  {
3325  #ifdef CONFIG_HIGHMEM
3326  	int i;
3327  
3328  	if (!(dev->features & NETIF_F_HIGHDMA)) {
3329  		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3330  			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3331  
3332  			if (PageHighMem(skb_frag_page(frag)))
3333  				return 1;
3334  		}
3335  	}
3336  #endif
3337  	return 0;
3338  }
3339  
3340  /* If MPLS offload request, verify we are testing hardware MPLS features
3341   * instead of standard features for the netdev.
3342   */
3343  #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3344  static netdev_features_t net_mpls_features(struct sk_buff *skb,
3345  					   netdev_features_t features,
3346  					   __be16 type)
3347  {
3348  	if (eth_p_mpls(type))
3349  		features &= skb->dev->mpls_features;
3350  
3351  	return features;
3352  }
3353  #else
3354  static netdev_features_t net_mpls_features(struct sk_buff *skb,
3355  					   netdev_features_t features,
3356  					   __be16 type)
3357  {
3358  	return features;
3359  }
3360  #endif
3361  
3362  static netdev_features_t harmonize_features(struct sk_buff *skb,
3363  	netdev_features_t features)
3364  {
3365  	int tmp;
3366  	__be16 type;
3367  
3368  	type = skb_network_protocol(skb, &tmp);
3369  	features = net_mpls_features(skb, features, type);
3370  
3371  	if (skb->ip_summed != CHECKSUM_NONE &&
3372  	    !can_checksum_protocol(features, type)) {
3373  		features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3374  	}
3375  	if (illegal_highdma(skb->dev, skb))
3376  		features &= ~NETIF_F_SG;
3377  
3378  	return features;
3379  }
3380  
3381  netdev_features_t passthru_features_check(struct sk_buff *skb,
3382  					  struct net_device *dev,
3383  					  netdev_features_t features)
3384  {
3385  	return features;
3386  }
3387  EXPORT_SYMBOL(passthru_features_check);
3388  
3389  static netdev_features_t dflt_features_check(struct sk_buff *skb,
3390  					     struct net_device *dev,
3391  					     netdev_features_t features)
3392  {
3393  	return vlan_features_check(skb, features);
3394  }
3395  
3396  static netdev_features_t gso_features_check(const struct sk_buff *skb,
3397  					    struct net_device *dev,
3398  					    netdev_features_t features)
3399  {
3400  	u16 gso_segs = skb_shinfo(skb)->gso_segs;
3401  
3402  	if (gso_segs > dev->gso_max_segs)
3403  		return features & ~NETIF_F_GSO_MASK;
3404  
3405  	/* Support for GSO partial features requires software
3406  	 * intervention before we can actually process the packets
3407  	 * so we need to strip support for any partial features now
3408  	 * and we can pull them back in after we have partially
3409  	 * segmented the frame.
3410  	 */
3411  	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
3412  		features &= ~dev->gso_partial_features;
3413  
3414  	/* Make sure to clear the IPv4 ID mangling feature if the
3415  	 * IPv4 header has the potential to be fragmented.
3416  	 */
3417  	if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
3418  		struct iphdr *iph = skb->encapsulation ?
3419  				    inner_ip_hdr(skb) : ip_hdr(skb);
3420  
3421  		if (!(iph->frag_off & htons(IP_DF)))
3422  			features &= ~NETIF_F_TSO_MANGLEID;
3423  	}
3424  
3425  	return features;
3426  }
3427  
3428  netdev_features_t netif_skb_features(struct sk_buff *skb)
3429  {
3430  	struct net_device *dev = skb->dev;
3431  	netdev_features_t features = dev->features;
3432  
3433  	if (skb_is_gso(skb))
3434  		features = gso_features_check(skb, dev, features);
3435  
3436  	/* If encapsulation offload request, verify we are testing
3437  	 * hardware encapsulation features instead of standard
3438  	 * features for the netdev
3439  	 */
3440  	if (skb->encapsulation)
3441  		features &= dev->hw_enc_features;
3442  
3443  	if (skb_vlan_tagged(skb))
3444  		features = netdev_intersect_features(features,
3445  						     dev->vlan_features |
3446  						     NETIF_F_HW_VLAN_CTAG_TX |
3447  						     NETIF_F_HW_VLAN_STAG_TX);
3448  
3449  	if (dev->netdev_ops->ndo_features_check)
3450  		features &= dev->netdev_ops->ndo_features_check(skb, dev,
3451  								features);
3452  	else
3453  		features &= dflt_features_check(skb, dev, features);
3454  
3455  	return harmonize_features(skb, features);
3456  }
3457  EXPORT_SYMBOL(netif_skb_features);
3458  
3459  static int xmit_one(struct sk_buff *skb, struct net_device *dev,
3460  		    struct netdev_queue *txq, bool more)
3461  {
3462  	unsigned int len;
3463  	int rc;
3464  
3465  	if (dev_nit_active(dev))
3466  		dev_queue_xmit_nit(skb, dev);
3467  
3468  	len = skb->len;
3469  	trace_net_dev_start_xmit(skb, dev);
3470  	rc = netdev_start_xmit(skb, dev, txq, more);
3471  	trace_net_dev_xmit(skb, rc, dev, len);
3472  
3473  	return rc;
3474  }
3475  
3476  struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
3477  				    struct netdev_queue *txq, int *ret)
3478  {
3479  	struct sk_buff *skb = first;
3480  	int rc = NETDEV_TX_OK;
3481  
3482  	while (skb) {
3483  		struct sk_buff *next = skb->next;
3484  
3485  		skb_mark_not_on_list(skb);
3486  		rc = xmit_one(skb, dev, txq, next != NULL);
3487  		if (unlikely(!dev_xmit_complete(rc))) {
3488  			skb->next = next;
3489  			goto out;
3490  		}
3491  
3492  		skb = next;
3493  		if (netif_tx_queue_stopped(txq) && skb) {
3494  			rc = NETDEV_TX_BUSY;
3495  			break;
3496  		}
3497  	}
3498  
3499  out:
3500  	*ret = rc;
3501  	return skb;
3502  }
3503  
3504  static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3505  					  netdev_features_t features)
3506  {
3507  	if (skb_vlan_tag_present(skb) &&
3508  	    !vlan_hw_offload_capable(features, skb->vlan_proto))
3509  		skb = __vlan_hwaccel_push_inside(skb);
3510  	return skb;
3511  }
3512  
3513  int skb_csum_hwoffload_help(struct sk_buff *skb,
3514  			    const netdev_features_t features)
3515  {
3516  	if (unlikely(skb->csum_not_inet))
3517  		return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3518  			skb_crc32c_csum_help(skb);
3519  
3520  	return !!(features & NETIF_F_CSUM_MASK) ? 0 : skb_checksum_help(skb);
3521  }
3522  EXPORT_SYMBOL(skb_csum_hwoffload_help);
3523  
3524  static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
3525  {
3526  	netdev_features_t features;
3527  
3528  	features = netif_skb_features(skb);
3529  	skb = validate_xmit_vlan(skb, features);
3530  	if (unlikely(!skb))
3531  		goto out_null;
3532  
3533  	skb = sk_validate_xmit_skb(skb, dev);
3534  	if (unlikely(!skb))
3535  		goto out_null;
3536  
3537  	if (netif_needs_gso(skb, features)) {
3538  		struct sk_buff *segs;
3539  
3540  		segs = skb_gso_segment(skb, features);
3541  		if (IS_ERR(segs)) {
3542  			goto out_kfree_skb;
3543  		} else if (segs) {
3544  			consume_skb(skb);
3545  			skb = segs;
3546  		}
3547  	} else {
3548  		if (skb_needs_linearize(skb, features) &&
3549  		    __skb_linearize(skb))
3550  			goto out_kfree_skb;
3551  
3552  		/* If packet is not checksummed and device does not
3553  		 * support checksumming for this protocol, complete
3554  		 * checksumming here.
3555  		 */
3556  		if (skb->ip_summed == CHECKSUM_PARTIAL) {
3557  			if (skb->encapsulation)
3558  				skb_set_inner_transport_header(skb,
3559  							       skb_checksum_start_offset(skb));
3560  			else
3561  				skb_set_transport_header(skb,
3562  							 skb_checksum_start_offset(skb));
3563  			if (skb_csum_hwoffload_help(skb, features))
3564  				goto out_kfree_skb;
3565  		}
3566  	}
3567  
3568  	skb = validate_xmit_xfrm(skb, features, again);
3569  
3570  	return skb;
3571  
3572  out_kfree_skb:
3573  	kfree_skb(skb);
3574  out_null:
3575  	atomic_long_inc(&dev->tx_dropped);
3576  	return NULL;
3577  }
3578  
3579  struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
3580  {
3581  	struct sk_buff *next, *head = NULL, *tail;
3582  
3583  	for (; skb != NULL; skb = next) {
3584  		next = skb->next;
3585  		skb_mark_not_on_list(skb);
3586  
3587  		/* in case skb wont be segmented, point to itself */
3588  		skb->prev = skb;
3589  
3590  		skb = validate_xmit_skb(skb, dev, again);
3591  		if (!skb)
3592  			continue;
3593  
3594  		if (!head)
3595  			head = skb;
3596  		else
3597  			tail->next = skb;
3598  		/* If skb was segmented, skb->prev points to
3599  		 * the last segment. If not, it still contains skb.
3600  		 */
3601  		tail = skb->prev;
3602  	}
3603  	return head;
3604  }
3605  EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
3606  
3607  static void qdisc_pkt_len_init(struct sk_buff *skb)
3608  {
3609  	const struct skb_shared_info *shinfo = skb_shinfo(skb);
3610  
3611  	qdisc_skb_cb(skb)->pkt_len = skb->len;
3612  
3613  	/* To get more precise estimation of bytes sent on wire,
3614  	 * we add to pkt_len the headers size of all segments
3615  	 */
3616  	if (shinfo->gso_size && skb_transport_header_was_set(skb)) {
3617  		unsigned int hdr_len;
3618  		u16 gso_segs = shinfo->gso_segs;
3619  
3620  		/* mac layer + network layer */
3621  		hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3622  
3623  		/* + transport layer */
3624  		if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3625  			const struct tcphdr *th;
3626  			struct tcphdr _tcphdr;
3627  
3628  			th = skb_header_pointer(skb, skb_transport_offset(skb),
3629  						sizeof(_tcphdr), &_tcphdr);
3630  			if (likely(th))
3631  				hdr_len += __tcp_hdrlen(th);
3632  		} else {
3633  			struct udphdr _udphdr;
3634  
3635  			if (skb_header_pointer(skb, skb_transport_offset(skb),
3636  					       sizeof(_udphdr), &_udphdr))
3637  				hdr_len += sizeof(struct udphdr);
3638  		}
3639  
3640  		if (shinfo->gso_type & SKB_GSO_DODGY)
3641  			gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3642  						shinfo->gso_size);
3643  
3644  		qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
3645  	}
3646  }
3647  
3648  static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3649  				 struct net_device *dev,
3650  				 struct netdev_queue *txq)
3651  {
3652  	spinlock_t *root_lock = qdisc_lock(q);
3653  	struct sk_buff *to_free = NULL;
3654  	bool contended;
3655  	int rc;
3656  
3657  	qdisc_calculate_pkt_len(skb, q);
3658  
3659  	if (q->flags & TCQ_F_NOLOCK) {
3660  		if ((q->flags & TCQ_F_CAN_BYPASS) && READ_ONCE(q->empty) &&
3661  		    qdisc_run_begin(q)) {
3662  			if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
3663  					      &q->state))) {
3664  				__qdisc_drop(skb, &to_free);
3665  				rc = NET_XMIT_DROP;
3666  				goto end_run;
3667  			}
3668  			qdisc_bstats_cpu_update(q, skb);
3669  
3670  			rc = NET_XMIT_SUCCESS;
3671  			if (sch_direct_xmit(skb, q, dev, txq, NULL, true))
3672  				__qdisc_run(q);
3673  
3674  end_run:
3675  			qdisc_run_end(q);
3676  		} else {
3677  			rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
3678  			qdisc_run(q);
3679  		}
3680  
3681  		if (unlikely(to_free))
3682  			kfree_skb_list(to_free);
3683  		return rc;
3684  	}
3685  
3686  	/*
3687  	 * Heuristic to force contended enqueues to serialize on a
3688  	 * separate lock before trying to get qdisc main lock.
3689  	 * This permits qdisc->running owner to get the lock more
3690  	 * often and dequeue packets faster.
3691  	 */
3692  	contended = qdisc_is_running(q);
3693  	if (unlikely(contended))
3694  		spin_lock(&q->busylock);
3695  
3696  	spin_lock(root_lock);
3697  	if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3698  		__qdisc_drop(skb, &to_free);
3699  		rc = NET_XMIT_DROP;
3700  	} else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
3701  		   qdisc_run_begin(q)) {
3702  		/*
3703  		 * This is a work-conserving queue; there are no old skbs
3704  		 * waiting to be sent out; and the qdisc is not running -
3705  		 * xmit the skb directly.
3706  		 */
3707  
3708  		qdisc_bstats_update(q, skb);
3709  
3710  		if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
3711  			if (unlikely(contended)) {
3712  				spin_unlock(&q->busylock);
3713  				contended = false;
3714  			}
3715  			__qdisc_run(q);
3716  		}
3717  
3718  		qdisc_run_end(q);
3719  		rc = NET_XMIT_SUCCESS;
3720  	} else {
3721  		rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
3722  		if (qdisc_run_begin(q)) {
3723  			if (unlikely(contended)) {
3724  				spin_unlock(&q->busylock);
3725  				contended = false;
3726  			}
3727  			__qdisc_run(q);
3728  			qdisc_run_end(q);
3729  		}
3730  	}
3731  	spin_unlock(root_lock);
3732  	if (unlikely(to_free))
3733  		kfree_skb_list(to_free);
3734  	if (unlikely(contended))
3735  		spin_unlock(&q->busylock);
3736  	return rc;
3737  }
3738  
3739  #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3740  static void skb_update_prio(struct sk_buff *skb)
3741  {
3742  	const struct netprio_map *map;
3743  	const struct sock *sk;
3744  	unsigned int prioidx;
3745  
3746  	if (skb->priority)
3747  		return;
3748  	map = rcu_dereference_bh(skb->dev->priomap);
3749  	if (!map)
3750  		return;
3751  	sk = skb_to_full_sk(skb);
3752  	if (!sk)
3753  		return;
3754  
3755  	prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
3756  
3757  	if (prioidx < map->priomap_len)
3758  		skb->priority = map->priomap[prioidx];
3759  }
3760  #else
3761  #define skb_update_prio(skb)
3762  #endif
3763  
3764  /**
3765   *	dev_loopback_xmit - loop back @skb
3766   *	@net: network namespace this loopback is happening in
3767   *	@sk:  sk needed to be a netfilter okfn
3768   *	@skb: buffer to transmit
3769   */
3770  int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
3771  {
3772  	skb_reset_mac_header(skb);
3773  	__skb_pull(skb, skb_network_offset(skb));
3774  	skb->pkt_type = PACKET_LOOPBACK;
3775  	skb->ip_summed = CHECKSUM_UNNECESSARY;
3776  	WARN_ON(!skb_dst(skb));
3777  	skb_dst_force(skb);
3778  	netif_rx_ni(skb);
3779  	return 0;
3780  }
3781  EXPORT_SYMBOL(dev_loopback_xmit);
3782  
3783  #ifdef CONFIG_NET_EGRESS
3784  static struct sk_buff *
3785  sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3786  {
3787  	struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress);
3788  	struct tcf_result cl_res;
3789  
3790  	if (!miniq)
3791  		return skb;
3792  
3793  	/* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
3794  	mini_qdisc_bstats_cpu_update(miniq, skb);
3795  
3796  	switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
3797  	case TC_ACT_OK:
3798  	case TC_ACT_RECLASSIFY:
3799  		skb->tc_index = TC_H_MIN(cl_res.classid);
3800  		break;
3801  	case TC_ACT_SHOT:
3802  		mini_qdisc_qstats_cpu_drop(miniq);
3803  		*ret = NET_XMIT_DROP;
3804  		kfree_skb(skb);
3805  		return NULL;
3806  	case TC_ACT_STOLEN:
3807  	case TC_ACT_QUEUED:
3808  	case TC_ACT_TRAP:
3809  		*ret = NET_XMIT_SUCCESS;
3810  		consume_skb(skb);
3811  		return NULL;
3812  	case TC_ACT_REDIRECT:
3813  		/* No need to push/pop skb's mac_header here on egress! */
3814  		skb_do_redirect(skb);
3815  		*ret = NET_XMIT_SUCCESS;
3816  		return NULL;
3817  	default:
3818  		break;
3819  	}
3820  
3821  	return skb;
3822  }
3823  #endif /* CONFIG_NET_EGRESS */
3824  
3825  #ifdef CONFIG_XPS
3826  static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
3827  			       struct xps_dev_maps *dev_maps, unsigned int tci)
3828  {
3829  	struct xps_map *map;
3830  	int queue_index = -1;
3831  
3832  	if (dev->num_tc) {
3833  		tci *= dev->num_tc;
3834  		tci += netdev_get_prio_tc_map(dev, skb->priority);
3835  	}
3836  
3837  	map = rcu_dereference(dev_maps->attr_map[tci]);
3838  	if (map) {
3839  		if (map->len == 1)
3840  			queue_index = map->queues[0];
3841  		else
3842  			queue_index = map->queues[reciprocal_scale(
3843  						skb_get_hash(skb), map->len)];
3844  		if (unlikely(queue_index >= dev->real_num_tx_queues))
3845  			queue_index = -1;
3846  	}
3847  	return queue_index;
3848  }
3849  #endif
3850  
3851  static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
3852  			 struct sk_buff *skb)
3853  {
3854  #ifdef CONFIG_XPS
3855  	struct xps_dev_maps *dev_maps;
3856  	struct sock *sk = skb->sk;
3857  	int queue_index = -1;
3858  
3859  	if (!static_key_false(&xps_needed))
3860  		return -1;
3861  
3862  	rcu_read_lock();
3863  	if (!static_key_false(&xps_rxqs_needed))
3864  		goto get_cpus_map;
3865  
3866  	dev_maps = rcu_dereference(sb_dev->xps_rxqs_map);
3867  	if (dev_maps) {
3868  		int tci = sk_rx_queue_get(sk);
3869  
3870  		if (tci >= 0 && tci < dev->num_rx_queues)
3871  			queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
3872  							  tci);
3873  	}
3874  
3875  get_cpus_map:
3876  	if (queue_index < 0) {
3877  		dev_maps = rcu_dereference(sb_dev->xps_cpus_map);
3878  		if (dev_maps) {
3879  			unsigned int tci = skb->sender_cpu - 1;
3880  
3881  			queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
3882  							  tci);
3883  		}
3884  	}
3885  	rcu_read_unlock();
3886  
3887  	return queue_index;
3888  #else
3889  	return -1;
3890  #endif
3891  }
3892  
3893  u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
3894  		     struct net_device *sb_dev)
3895  {
3896  	return 0;
3897  }
3898  EXPORT_SYMBOL(dev_pick_tx_zero);
3899  
3900  u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
3901  		       struct net_device *sb_dev)
3902  {
3903  	return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
3904  }
3905  EXPORT_SYMBOL(dev_pick_tx_cpu_id);
3906  
3907  u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
3908  		     struct net_device *sb_dev)
3909  {
3910  	struct sock *sk = skb->sk;
3911  	int queue_index = sk_tx_queue_get(sk);
3912  
3913  	sb_dev = sb_dev ? : dev;
3914  
3915  	if (queue_index < 0 || skb->ooo_okay ||
3916  	    queue_index >= dev->real_num_tx_queues) {
3917  		int new_index = get_xps_queue(dev, sb_dev, skb);
3918  
3919  		if (new_index < 0)
3920  			new_index = skb_tx_hash(dev, sb_dev, skb);
3921  
3922  		if (queue_index != new_index && sk &&
3923  		    sk_fullsock(sk) &&
3924  		    rcu_access_pointer(sk->sk_dst_cache))
3925  			sk_tx_queue_set(sk, new_index);
3926  
3927  		queue_index = new_index;
3928  	}
3929  
3930  	return queue_index;
3931  }
3932  EXPORT_SYMBOL(netdev_pick_tx);
3933  
3934  struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
3935  					 struct sk_buff *skb,
3936  					 struct net_device *sb_dev)
3937  {
3938  	int queue_index = 0;
3939  
3940  #ifdef CONFIG_XPS
3941  	u32 sender_cpu = skb->sender_cpu - 1;
3942  
3943  	if (sender_cpu >= (u32)NR_CPUS)
3944  		skb->sender_cpu = raw_smp_processor_id() + 1;
3945  #endif
3946  
3947  	if (dev->real_num_tx_queues != 1) {
3948  		const struct net_device_ops *ops = dev->netdev_ops;
3949  
3950  		if (ops->ndo_select_queue)
3951  			queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
3952  		else
3953  			queue_index = netdev_pick_tx(dev, skb, sb_dev);
3954  
3955  		queue_index = netdev_cap_txqueue(dev, queue_index);
3956  	}
3957  
3958  	skb_set_queue_mapping(skb, queue_index);
3959  	return netdev_get_tx_queue(dev, queue_index);
3960  }
3961  
3962  /**
3963   *	__dev_queue_xmit - transmit a buffer
3964   *	@skb: buffer to transmit
3965   *	@sb_dev: suboordinate device used for L2 forwarding offload
3966   *
3967   *	Queue a buffer for transmission to a network device. The caller must
3968   *	have set the device and priority and built the buffer before calling
3969   *	this function. The function can be called from an interrupt.
3970   *
3971   *	A negative errno code is returned on a failure. A success does not
3972   *	guarantee the frame will be transmitted as it may be dropped due
3973   *	to congestion or traffic shaping.
3974   *
3975   * -----------------------------------------------------------------------------------
3976   *      I notice this method can also return errors from the queue disciplines,
3977   *      including NET_XMIT_DROP, which is a positive value.  So, errors can also
3978   *      be positive.
3979   *
3980   *      Regardless of the return value, the skb is consumed, so it is currently
3981   *      difficult to retry a send to this method.  (You can bump the ref count
3982   *      before sending to hold a reference for retry if you are careful.)
3983   *
3984   *      When calling this method, interrupts MUST be enabled.  This is because
3985   *      the BH enable code must have IRQs enabled so that it will not deadlock.
3986   *          --BLG
3987   */
3988  static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
3989  {
3990  	struct net_device *dev = skb->dev;
3991  	struct netdev_queue *txq;
3992  	struct Qdisc *q;
3993  	int rc = -ENOMEM;
3994  	bool again = false;
3995  
3996  	skb_reset_mac_header(skb);
3997  
3998  	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
3999  		__skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
4000  
4001  	/* Disable soft irqs for various locks below. Also
4002  	 * stops preemption for RCU.
4003  	 */
4004  	rcu_read_lock_bh();
4005  
4006  	skb_update_prio(skb);
4007  
4008  	qdisc_pkt_len_init(skb);
4009  #ifdef CONFIG_NET_CLS_ACT
4010  	skb->tc_at_ingress = 0;
4011  # ifdef CONFIG_NET_EGRESS
4012  	if (static_branch_unlikely(&egress_needed_key)) {
4013  		skb = sch_handle_egress(skb, &rc, dev);
4014  		if (!skb)
4015  			goto out;
4016  	}
4017  # endif
4018  #endif
4019  	/* If device/qdisc don't need skb->dst, release it right now while
4020  	 * its hot in this cpu cache.
4021  	 */
4022  	if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
4023  		skb_dst_drop(skb);
4024  	else
4025  		skb_dst_force(skb);
4026  
4027  	txq = netdev_core_pick_tx(dev, skb, sb_dev);
4028  	q = rcu_dereference_bh(txq->qdisc);
4029  
4030  	trace_net_dev_queue(skb);
4031  	if (q->enqueue) {
4032  		rc = __dev_xmit_skb(skb, q, dev, txq);
4033  		goto out;
4034  	}
4035  
4036  	/* The device has no queue. Common case for software devices:
4037  	 * loopback, all the sorts of tunnels...
4038  
4039  	 * Really, it is unlikely that netif_tx_lock protection is necessary
4040  	 * here.  (f.e. loopback and IP tunnels are clean ignoring statistics
4041  	 * counters.)
4042  	 * However, it is possible, that they rely on protection
4043  	 * made by us here.
4044  
4045  	 * Check this and shot the lock. It is not prone from deadlocks.
4046  	 *Either shot noqueue qdisc, it is even simpler 8)
4047  	 */
4048  	if (dev->flags & IFF_UP) {
4049  		int cpu = smp_processor_id(); /* ok because BHs are off */
4050  
4051  		if (txq->xmit_lock_owner != cpu) {
4052  			if (dev_xmit_recursion())
4053  				goto recursion_alert;
4054  
4055  			skb = validate_xmit_skb(skb, dev, &again);
4056  			if (!skb)
4057  				goto out;
4058  
4059  			HARD_TX_LOCK(dev, txq, cpu);
4060  
4061  			if (!netif_xmit_stopped(txq)) {
4062  				dev_xmit_recursion_inc();
4063  				skb = dev_hard_start_xmit(skb, dev, txq, &rc);
4064  				dev_xmit_recursion_dec();
4065  				if (dev_xmit_complete(rc)) {
4066  					HARD_TX_UNLOCK(dev, txq);
4067  					goto out;
4068  				}
4069  			}
4070  			HARD_TX_UNLOCK(dev, txq);
4071  			net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
4072  					     dev->name);
4073  		} else {
4074  			/* Recursion is detected! It is possible,
4075  			 * unfortunately
4076  			 */
4077  recursion_alert:
4078  			net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
4079  					     dev->name);
4080  		}
4081  	}
4082  
4083  	rc = -ENETDOWN;
4084  	rcu_read_unlock_bh();
4085  
4086  	atomic_long_inc(&dev->tx_dropped);
4087  	kfree_skb_list(skb);
4088  	return rc;
4089  out:
4090  	rcu_read_unlock_bh();
4091  	return rc;
4092  }
4093  
4094  int dev_queue_xmit(struct sk_buff *skb)
4095  {
4096  	return __dev_queue_xmit(skb, NULL);
4097  }
4098  EXPORT_SYMBOL(dev_queue_xmit);
4099  
4100  int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev)
4101  {
4102  	return __dev_queue_xmit(skb, sb_dev);
4103  }
4104  EXPORT_SYMBOL(dev_queue_xmit_accel);
4105  
4106  int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
4107  {
4108  	struct net_device *dev = skb->dev;
4109  	struct sk_buff *orig_skb = skb;
4110  	struct netdev_queue *txq;
4111  	int ret = NETDEV_TX_BUSY;
4112  	bool again = false;
4113  
4114  	if (unlikely(!netif_running(dev) ||
4115  		     !netif_carrier_ok(dev)))
4116  		goto drop;
4117  
4118  	skb = validate_xmit_skb_list(skb, dev, &again);
4119  	if (skb != orig_skb)
4120  		goto drop;
4121  
4122  	skb_set_queue_mapping(skb, queue_id);
4123  	txq = skb_get_tx_queue(dev, skb);
4124  
4125  	local_bh_disable();
4126  
4127  	HARD_TX_LOCK(dev, txq, smp_processor_id());
4128  	if (!netif_xmit_frozen_or_drv_stopped(txq))
4129  		ret = netdev_start_xmit(skb, dev, txq, false);
4130  	HARD_TX_UNLOCK(dev, txq);
4131  
4132  	local_bh_enable();
4133  
4134  	if (!dev_xmit_complete(ret))
4135  		kfree_skb(skb);
4136  
4137  	return ret;
4138  drop:
4139  	atomic_long_inc(&dev->tx_dropped);
4140  	kfree_skb_list(skb);
4141  	return NET_XMIT_DROP;
4142  }
4143  EXPORT_SYMBOL(dev_direct_xmit);
4144  
4145  /*************************************************************************
4146   *			Receiver routines
4147   *************************************************************************/
4148  
4149  int netdev_max_backlog __read_mostly = 1000;
4150  EXPORT_SYMBOL(netdev_max_backlog);
4151  
4152  int netdev_tstamp_prequeue __read_mostly = 1;
4153  int netdev_budget __read_mostly = 300;
4154  unsigned int __read_mostly netdev_budget_usecs = 2000;
4155  int weight_p __read_mostly = 64;           /* old backlog weight */
4156  int dev_weight_rx_bias __read_mostly = 1;  /* bias for backlog weight */
4157  int dev_weight_tx_bias __read_mostly = 1;  /* bias for output_queue quota */
4158  int dev_rx_weight __read_mostly = 64;
4159  int dev_tx_weight __read_mostly = 64;
4160  /* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
4161  int gro_normal_batch __read_mostly = 8;
4162  
4163  /* Called with irq disabled */
4164  static inline void ____napi_schedule(struct softnet_data *sd,
4165  				     struct napi_struct *napi)
4166  {
4167  	list_add_tail(&napi->poll_list, &sd->poll_list);
4168  	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
4169  }
4170  
4171  #ifdef CONFIG_RPS
4172  
4173  /* One global table that all flow-based protocols share. */
4174  struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
4175  EXPORT_SYMBOL(rps_sock_flow_table);
4176  u32 rps_cpu_mask __read_mostly;
4177  EXPORT_SYMBOL(rps_cpu_mask);
4178  
4179  struct static_key_false rps_needed __read_mostly;
4180  EXPORT_SYMBOL(rps_needed);
4181  struct static_key_false rfs_needed __read_mostly;
4182  EXPORT_SYMBOL(rfs_needed);
4183  
4184  static struct rps_dev_flow *
4185  set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4186  	    struct rps_dev_flow *rflow, u16 next_cpu)
4187  {
4188  	if (next_cpu < nr_cpu_ids) {
4189  #ifdef CONFIG_RFS_ACCEL
4190  		struct netdev_rx_queue *rxqueue;
4191  		struct rps_dev_flow_table *flow_table;
4192  		struct rps_dev_flow *old_rflow;
4193  		u32 flow_id;
4194  		u16 rxq_index;
4195  		int rc;
4196  
4197  		/* Should we steer this flow to a different hardware queue? */
4198  		if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
4199  		    !(dev->features & NETIF_F_NTUPLE))
4200  			goto out;
4201  		rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
4202  		if (rxq_index == skb_get_rx_queue(skb))
4203  			goto out;
4204  
4205  		rxqueue = dev->_rx + rxq_index;
4206  		flow_table = rcu_dereference(rxqueue->rps_flow_table);
4207  		if (!flow_table)
4208  			goto out;
4209  		flow_id = skb_get_hash(skb) & flow_table->mask;
4210  		rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
4211  							rxq_index, flow_id);
4212  		if (rc < 0)
4213  			goto out;
4214  		old_rflow = rflow;
4215  		rflow = &flow_table->flows[flow_id];
4216  		rflow->filter = rc;
4217  		if (old_rflow->filter == rflow->filter)
4218  			old_rflow->filter = RPS_NO_FILTER;
4219  	out:
4220  #endif
4221  		rflow->last_qtail =
4222  			per_cpu(softnet_data, next_cpu).input_queue_head;
4223  	}
4224  
4225  	rflow->cpu = next_cpu;
4226  	return rflow;
4227  }
4228  
4229  /*
4230   * get_rps_cpu is called from netif_receive_skb and returns the target
4231   * CPU from the RPS map of the receiving queue for a given skb.
4232   * rcu_read_lock must be held on entry.
4233   */
4234  static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4235  		       struct rps_dev_flow **rflowp)
4236  {
4237  	const struct rps_sock_flow_table *sock_flow_table;
4238  	struct netdev_rx_queue *rxqueue = dev->_rx;
4239  	struct rps_dev_flow_table *flow_table;
4240  	struct rps_map *map;
4241  	int cpu = -1;
4242  	u32 tcpu;
4243  	u32 hash;
4244  
4245  	if (skb_rx_queue_recorded(skb)) {
4246  		u16 index = skb_get_rx_queue(skb);
4247  
4248  		if (unlikely(index >= dev->real_num_rx_queues)) {
4249  			WARN_ONCE(dev->real_num_rx_queues > 1,
4250  				  "%s received packet on queue %u, but number "
4251  				  "of RX queues is %u\n",
4252  				  dev->name, index, dev->real_num_rx_queues);
4253  			goto done;
4254  		}
4255  		rxqueue += index;
4256  	}
4257  
4258  	/* Avoid computing hash if RFS/RPS is not active for this rxqueue */
4259  
4260  	flow_table = rcu_dereference(rxqueue->rps_flow_table);
4261  	map = rcu_dereference(rxqueue->rps_map);
4262  	if (!flow_table && !map)
4263  		goto done;
4264  
4265  	skb_reset_network_header(skb);
4266  	hash = skb_get_hash(skb);
4267  	if (!hash)
4268  		goto done;
4269  
4270  	sock_flow_table = rcu_dereference(rps_sock_flow_table);
4271  	if (flow_table && sock_flow_table) {
4272  		struct rps_dev_flow *rflow;
4273  		u32 next_cpu;
4274  		u32 ident;
4275  
4276  		/* First check into global flow table if there is a match */
4277  		ident = sock_flow_table->ents[hash & sock_flow_table->mask];
4278  		if ((ident ^ hash) & ~rps_cpu_mask)
4279  			goto try_rps;
4280  
4281  		next_cpu = ident & rps_cpu_mask;
4282  
4283  		/* OK, now we know there is a match,
4284  		 * we can look at the local (per receive queue) flow table
4285  		 */
4286  		rflow = &flow_table->flows[hash & flow_table->mask];
4287  		tcpu = rflow->cpu;
4288  
4289  		/*
4290  		 * If the desired CPU (where last recvmsg was done) is
4291  		 * different from current CPU (one in the rx-queue flow
4292  		 * table entry), switch if one of the following holds:
4293  		 *   - Current CPU is unset (>= nr_cpu_ids).
4294  		 *   - Current CPU is offline.
4295  		 *   - The current CPU's queue tail has advanced beyond the
4296  		 *     last packet that was enqueued using this table entry.
4297  		 *     This guarantees that all previous packets for the flow
4298  		 *     have been dequeued, thus preserving in order delivery.
4299  		 */
4300  		if (unlikely(tcpu != next_cpu) &&
4301  		    (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
4302  		     ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
4303  		      rflow->last_qtail)) >= 0)) {
4304  			tcpu = next_cpu;
4305  			rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
4306  		}
4307  
4308  		if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
4309  			*rflowp = rflow;
4310  			cpu = tcpu;
4311  			goto done;
4312  		}
4313  	}
4314  
4315  try_rps:
4316  
4317  	if (map) {
4318  		tcpu = map->cpus[reciprocal_scale(hash, map->len)];
4319  		if (cpu_online(tcpu)) {
4320  			cpu = tcpu;
4321  			goto done;
4322  		}
4323  	}
4324  
4325  done:
4326  	return cpu;
4327  }
4328  
4329  #ifdef CONFIG_RFS_ACCEL
4330  
4331  /**
4332   * rps_may_expire_flow - check whether an RFS hardware filter may be removed
4333   * @dev: Device on which the filter was set
4334   * @rxq_index: RX queue index
4335   * @flow_id: Flow ID passed to ndo_rx_flow_steer()
4336   * @filter_id: Filter ID returned by ndo_rx_flow_steer()
4337   *
4338   * Drivers that implement ndo_rx_flow_steer() should periodically call
4339   * this function for each installed filter and remove the filters for
4340   * which it returns %true.
4341   */
4342  bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
4343  			 u32 flow_id, u16 filter_id)
4344  {
4345  	struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
4346  	struct rps_dev_flow_table *flow_table;
4347  	struct rps_dev_flow *rflow;
4348  	bool expire = true;
4349  	unsigned int cpu;
4350  
4351  	rcu_read_lock();
4352  	flow_table = rcu_dereference(rxqueue->rps_flow_table);
4353  	if (flow_table && flow_id <= flow_table->mask) {
4354  		rflow = &flow_table->flows[flow_id];
4355  		cpu = READ_ONCE(rflow->cpu);
4356  		if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
4357  		    ((int)(per_cpu(softnet_data, cpu).input_queue_head -
4358  			   rflow->last_qtail) <
4359  		     (int)(10 * flow_table->mask)))
4360  			expire = false;
4361  	}
4362  	rcu_read_unlock();
4363  	return expire;
4364  }
4365  EXPORT_SYMBOL(rps_may_expire_flow);
4366  
4367  #endif /* CONFIG_RFS_ACCEL */
4368  
4369  /* Called from hardirq (IPI) context */
4370  static void rps_trigger_softirq(void *data)
4371  {
4372  	struct softnet_data *sd = data;
4373  
4374  	____napi_schedule(sd, &sd->backlog);
4375  	sd->received_rps++;
4376  }
4377  
4378  #endif /* CONFIG_RPS */
4379  
4380  /*
4381   * Check if this softnet_data structure is another cpu one
4382   * If yes, queue it to our IPI list and return 1
4383   * If no, return 0
4384   */
4385  static int rps_ipi_queued(struct softnet_data *sd)
4386  {
4387  #ifdef CONFIG_RPS
4388  	struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
4389  
4390  	if (sd != mysd) {
4391  		sd->rps_ipi_next = mysd->rps_ipi_list;
4392  		mysd->rps_ipi_list = sd;
4393  
4394  		__raise_softirq_irqoff(NET_RX_SOFTIRQ);
4395  		return 1;
4396  	}
4397  #endif /* CONFIG_RPS */
4398  	return 0;
4399  }
4400  
4401  #ifdef CONFIG_NET_FLOW_LIMIT
4402  int netdev_flow_limit_table_len __read_mostly = (1 << 12);
4403  #endif
4404  
4405  static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
4406  {
4407  #ifdef CONFIG_NET_FLOW_LIMIT
4408  	struct sd_flow_limit *fl;
4409  	struct softnet_data *sd;
4410  	unsigned int old_flow, new_flow;
4411  
4412  	if (qlen < (netdev_max_backlog >> 1))
4413  		return false;
4414  
4415  	sd = this_cpu_ptr(&softnet_data);
4416  
4417  	rcu_read_lock();
4418  	fl = rcu_dereference(sd->flow_limit);
4419  	if (fl) {
4420  		new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
4421  		old_flow = fl->history[fl->history_head];
4422  		fl->history[fl->history_head] = new_flow;
4423  
4424  		fl->history_head++;
4425  		fl->history_head &= FLOW_LIMIT_HISTORY - 1;
4426  
4427  		if (likely(fl->buckets[old_flow]))
4428  			fl->buckets[old_flow]--;
4429  
4430  		if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
4431  			fl->count++;
4432  			rcu_read_unlock();
4433  			return true;
4434  		}
4435  	}
4436  	rcu_read_unlock();
4437  #endif
4438  	return false;
4439  }
4440  
4441  /*
4442   * enqueue_to_backlog is called to queue an skb to a per CPU backlog
4443   * queue (may be a remote CPU queue).
4444   */
4445  static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
4446  			      unsigned int *qtail)
4447  {
4448  	struct softnet_data *sd;
4449  	unsigned long flags;
4450  	unsigned int qlen;
4451  
4452  	sd = &per_cpu(softnet_data, cpu);
4453  
4454  	local_irq_save(flags);
4455  
4456  	rps_lock(sd);
4457  	if (!netif_running(skb->dev))
4458  		goto drop;
4459  	qlen = skb_queue_len(&sd->input_pkt_queue);
4460  	if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
4461  		if (qlen) {
4462  enqueue:
4463  			__skb_queue_tail(&sd->input_pkt_queue, skb);
4464  			input_queue_tail_incr_save(sd, qtail);
4465  			rps_unlock(sd);
4466  			local_irq_restore(flags);
4467  			return NET_RX_SUCCESS;
4468  		}
4469  
4470  		/* Schedule NAPI for backlog device
4471  		 * We can use non atomic operation since we own the queue lock
4472  		 */
4473  		if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
4474  			if (!rps_ipi_queued(sd))
4475  				____napi_schedule(sd, &sd->backlog);
4476  		}
4477  		goto enqueue;
4478  	}
4479  
4480  drop:
4481  	sd->dropped++;
4482  	rps_unlock(sd);
4483  
4484  	local_irq_restore(flags);
4485  
4486  	atomic_long_inc(&skb->dev->rx_dropped);
4487  	kfree_skb(skb);
4488  	return NET_RX_DROP;
4489  }
4490  
4491  static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
4492  {
4493  	struct net_device *dev = skb->dev;
4494  	struct netdev_rx_queue *rxqueue;
4495  
4496  	rxqueue = dev->_rx;
4497  
4498  	if (skb_rx_queue_recorded(skb)) {
4499  		u16 index = skb_get_rx_queue(skb);
4500  
4501  		if (unlikely(index >= dev->real_num_rx_queues)) {
4502  			WARN_ONCE(dev->real_num_rx_queues > 1,
4503  				  "%s received packet on queue %u, but number "
4504  				  "of RX queues is %u\n",
4505  				  dev->name, index, dev->real_num_rx_queues);
4506  
4507  			return rxqueue; /* Return first rxqueue */
4508  		}
4509  		rxqueue += index;
4510  	}
4511  	return rxqueue;
4512  }
4513  
4514  static u32 netif_receive_generic_xdp(struct sk_buff *skb,
4515  				     struct xdp_buff *xdp,
4516  				     struct bpf_prog *xdp_prog)
4517  {
4518  	struct netdev_rx_queue *rxqueue;
4519  	void *orig_data, *orig_data_end;
4520  	u32 metalen, act = XDP_DROP;
4521  	__be16 orig_eth_type;
4522  	struct ethhdr *eth;
4523  	bool orig_bcast;
4524  	int hlen, off;
4525  	u32 mac_len;
4526  
4527  	/* Reinjected packets coming from act_mirred or similar should
4528  	 * not get XDP generic processing.
4529  	 */
4530  	if (skb_is_tc_redirected(skb))
4531  		return XDP_PASS;
4532  
4533  	/* XDP packets must be linear and must have sufficient headroom
4534  	 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
4535  	 * native XDP provides, thus we need to do it here as well.
4536  	 */
4537  	if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
4538  	    skb_headroom(skb) < XDP_PACKET_HEADROOM) {
4539  		int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
4540  		int troom = skb->tail + skb->data_len - skb->end;
4541  
4542  		/* In case we have to go down the path and also linearize,
4543  		 * then lets do the pskb_expand_head() work just once here.
4544  		 */
4545  		if (pskb_expand_head(skb,
4546  				     hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
4547  				     troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
4548  			goto do_drop;
4549  		if (skb_linearize(skb))
4550  			goto do_drop;
4551  	}
4552  
4553  	/* The XDP program wants to see the packet starting at the MAC
4554  	 * header.
4555  	 */
4556  	mac_len = skb->data - skb_mac_header(skb);
4557  	hlen = skb_headlen(skb) + mac_len;
4558  	xdp->data = skb->data - mac_len;
4559  	xdp->data_meta = xdp->data;
4560  	xdp->data_end = xdp->data + hlen;
4561  	xdp->data_hard_start = skb->data - skb_headroom(skb);
4562  	orig_data_end = xdp->data_end;
4563  	orig_data = xdp->data;
4564  	eth = (struct ethhdr *)xdp->data;
4565  	orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
4566  	orig_eth_type = eth->h_proto;
4567  
4568  	rxqueue = netif_get_rxqueue(skb);
4569  	xdp->rxq = &rxqueue->xdp_rxq;
4570  
4571  	act = bpf_prog_run_xdp(xdp_prog, xdp);
4572  
4573  	/* check if bpf_xdp_adjust_head was used */
4574  	off = xdp->data - orig_data;
4575  	if (off) {
4576  		if (off > 0)
4577  			__skb_pull(skb, off);
4578  		else if (off < 0)
4579  			__skb_push(skb, -off);
4580  
4581  		skb->mac_header += off;
4582  		skb_reset_network_header(skb);
4583  	}
4584  
4585  	/* check if bpf_xdp_adjust_tail was used. it can only "shrink"
4586  	 * pckt.
4587  	 */
4588  	off = orig_data_end - xdp->data_end;
4589  	if (off != 0) {
4590  		skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
4591  		skb->len -= off;
4592  
4593  	}
4594  
4595  	/* check if XDP changed eth hdr such SKB needs update */
4596  	eth = (struct ethhdr *)xdp->data;
4597  	if ((orig_eth_type != eth->h_proto) ||
4598  	    (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) {
4599  		__skb_push(skb, ETH_HLEN);
4600  		skb->protocol = eth_type_trans(skb, skb->dev);
4601  	}
4602  
4603  	switch (act) {
4604  	case XDP_REDIRECT:
4605  	case XDP_TX:
4606  		__skb_push(skb, mac_len);
4607  		break;
4608  	case XDP_PASS:
4609  		metalen = xdp->data - xdp->data_meta;
4610  		if (metalen)
4611  			skb_metadata_set(skb, metalen);
4612  		break;
4613  	default:
4614  		bpf_warn_invalid_xdp_action(act);
4615  		/* fall through */
4616  	case XDP_ABORTED:
4617  		trace_xdp_exception(skb->dev, xdp_prog, act);
4618  		/* fall through */
4619  	case XDP_DROP:
4620  	do_drop:
4621  		kfree_skb(skb);
4622  		break;
4623  	}
4624  
4625  	return act;
4626  }
4627  
4628  /* When doing generic XDP we have to bypass the qdisc layer and the
4629   * network taps in order to match in-driver-XDP behavior.
4630   */
4631  void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
4632  {
4633  	struct net_device *dev = skb->dev;
4634  	struct netdev_queue *txq;
4635  	bool free_skb = true;
4636  	int cpu, rc;
4637  
4638  	txq = netdev_core_pick_tx(dev, skb, NULL);
4639  	cpu = smp_processor_id();
4640  	HARD_TX_LOCK(dev, txq, cpu);
4641  	if (!netif_xmit_stopped(txq)) {
4642  		rc = netdev_start_xmit(skb, dev, txq, 0);
4643  		if (dev_xmit_complete(rc))
4644  			free_skb = false;
4645  	}
4646  	HARD_TX_UNLOCK(dev, txq);
4647  	if (free_skb) {
4648  		trace_xdp_exception(dev, xdp_prog, XDP_TX);
4649  		kfree_skb(skb);
4650  	}
4651  }
4652  EXPORT_SYMBOL_GPL(generic_xdp_tx);
4653  
4654  static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
4655  
4656  int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
4657  {
4658  	if (xdp_prog) {
4659  		struct xdp_buff xdp;
4660  		u32 act;
4661  		int err;
4662  
4663  		act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
4664  		if (act != XDP_PASS) {
4665  			switch (act) {
4666  			case XDP_REDIRECT:
4667  				err = xdp_do_generic_redirect(skb->dev, skb,
4668  							      &xdp, xdp_prog);
4669  				if (err)
4670  					goto out_redir;
4671  				break;
4672  			case XDP_TX:
4673  				generic_xdp_tx(skb, xdp_prog);
4674  				break;
4675  			}
4676  			return XDP_DROP;
4677  		}
4678  	}
4679  	return XDP_PASS;
4680  out_redir:
4681  	kfree_skb(skb);
4682  	return XDP_DROP;
4683  }
4684  EXPORT_SYMBOL_GPL(do_xdp_generic);
4685  
4686  static int netif_rx_internal(struct sk_buff *skb)
4687  {
4688  	int ret;
4689  
4690  	net_timestamp_check(netdev_tstamp_prequeue, skb);
4691  
4692  	trace_netif_rx(skb);
4693  
4694  #ifdef CONFIG_RPS
4695  	if (static_branch_unlikely(&rps_needed)) {
4696  		struct rps_dev_flow voidflow, *rflow = &voidflow;
4697  		int cpu;
4698  
4699  		preempt_disable();
4700  		rcu_read_lock();
4701  
4702  		cpu = get_rps_cpu(skb->dev, skb, &rflow);
4703  		if (cpu < 0)
4704  			cpu = smp_processor_id();
4705  
4706  		ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4707  
4708  		rcu_read_unlock();
4709  		preempt_enable();
4710  	} else
4711  #endif
4712  	{
4713  		unsigned int qtail;
4714  
4715  		ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
4716  		put_cpu();
4717  	}
4718  	return ret;
4719  }
4720  
4721  /**
4722   *	netif_rx	-	post buffer to the network code
4723   *	@skb: buffer to post
4724   *
4725   *	This function receives a packet from a device driver and queues it for
4726   *	the upper (protocol) levels to process.  It always succeeds. The buffer
4727   *	may be dropped during processing for congestion control or by the
4728   *	protocol layers.
4729   *
4730   *	return values:
4731   *	NET_RX_SUCCESS	(no congestion)
4732   *	NET_RX_DROP     (packet was dropped)
4733   *
4734   */
4735  
4736  int netif_rx(struct sk_buff *skb)
4737  {
4738  	int ret;
4739  
4740  	trace_netif_rx_entry(skb);
4741  
4742  	ret = netif_rx_internal(skb);
4743  	trace_netif_rx_exit(ret);
4744  
4745  	return ret;
4746  }
4747  EXPORT_SYMBOL(netif_rx);
4748  
4749  int netif_rx_ni(struct sk_buff *skb)
4750  {
4751  	int err;
4752  
4753  	trace_netif_rx_ni_entry(skb);
4754  
4755  	preempt_disable();
4756  	err = netif_rx_internal(skb);
4757  	if (local_softirq_pending())
4758  		do_softirq();
4759  	preempt_enable();
4760  	trace_netif_rx_ni_exit(err);
4761  
4762  	return err;
4763  }
4764  EXPORT_SYMBOL(netif_rx_ni);
4765  
4766  static __latent_entropy void net_tx_action(struct softirq_action *h)
4767  {
4768  	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
4769  
4770  	if (sd->completion_queue) {
4771  		struct sk_buff *clist;
4772  
4773  		local_irq_disable();
4774  		clist = sd->completion_queue;
4775  		sd->completion_queue = NULL;
4776  		local_irq_enable();
4777  
4778  		while (clist) {
4779  			struct sk_buff *skb = clist;
4780  
4781  			clist = clist->next;
4782  
4783  			WARN_ON(refcount_read(&skb->users));
4784  			if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
4785  				trace_consume_skb(skb);
4786  			else
4787  				trace_kfree_skb(skb, net_tx_action);
4788  
4789  			if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
4790  				__kfree_skb(skb);
4791  			else
4792  				__kfree_skb_defer(skb);
4793  		}
4794  
4795  		__kfree_skb_flush();
4796  	}
4797  
4798  	if (sd->output_queue) {
4799  		struct Qdisc *head;
4800  
4801  		local_irq_disable();
4802  		head = sd->output_queue;
4803  		sd->output_queue = NULL;
4804  		sd->output_queue_tailp = &sd->output_queue;
4805  		local_irq_enable();
4806  
4807  		while (head) {
4808  			struct Qdisc *q = head;
4809  			spinlock_t *root_lock = NULL;
4810  
4811  			head = head->next_sched;
4812  
4813  			if (!(q->flags & TCQ_F_NOLOCK)) {
4814  				root_lock = qdisc_lock(q);
4815  				spin_lock(root_lock);
4816  			}
4817  			/* We need to make sure head->next_sched is read
4818  			 * before clearing __QDISC_STATE_SCHED
4819  			 */
4820  			smp_mb__before_atomic();
4821  			clear_bit(__QDISC_STATE_SCHED, &q->state);
4822  			qdisc_run(q);
4823  			if (root_lock)
4824  				spin_unlock(root_lock);
4825  		}
4826  	}
4827  
4828  	xfrm_dev_backlog(sd);
4829  }
4830  
4831  #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
4832  /* This hook is defined here for ATM LANE */
4833  int (*br_fdb_test_addr_hook)(struct net_device *dev,
4834  			     unsigned char *addr) __read_mostly;
4835  EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
4836  #endif
4837  
4838  static inline struct sk_buff *
4839  sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
4840  		   struct net_device *orig_dev)
4841  {
4842  #ifdef CONFIG_NET_CLS_ACT
4843  	struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress);
4844  	struct tcf_result cl_res;
4845  
4846  	/* If there's at least one ingress present somewhere (so
4847  	 * we get here via enabled static key), remaining devices
4848  	 * that are not configured with an ingress qdisc will bail
4849  	 * out here.
4850  	 */
4851  	if (!miniq)
4852  		return skb;
4853  
4854  	if (*pt_prev) {
4855  		*ret = deliver_skb(skb, *pt_prev, orig_dev);
4856  		*pt_prev = NULL;
4857  	}
4858  
4859  	qdisc_skb_cb(skb)->pkt_len = skb->len;
4860  	skb->tc_at_ingress = 1;
4861  	mini_qdisc_bstats_cpu_update(miniq, skb);
4862  
4863  	switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
4864  	case TC_ACT_OK:
4865  	case TC_ACT_RECLASSIFY:
4866  		skb->tc_index = TC_H_MIN(cl_res.classid);
4867  		break;
4868  	case TC_ACT_SHOT:
4869  		mini_qdisc_qstats_cpu_drop(miniq);
4870  		kfree_skb(skb);
4871  		return NULL;
4872  	case TC_ACT_STOLEN:
4873  	case TC_ACT_QUEUED:
4874  	case TC_ACT_TRAP:
4875  		consume_skb(skb);
4876  		return NULL;
4877  	case TC_ACT_REDIRECT:
4878  		/* skb_mac_header check was done by cls/act_bpf, so
4879  		 * we can safely push the L2 header back before
4880  		 * redirecting to another netdev
4881  		 */
4882  		__skb_push(skb, skb->mac_len);
4883  		skb_do_redirect(skb);
4884  		return NULL;
4885  	case TC_ACT_CONSUMED:
4886  		return NULL;
4887  	default:
4888  		break;
4889  	}
4890  #endif /* CONFIG_NET_CLS_ACT */
4891  	return skb;
4892  }
4893  
4894  /**
4895   *	netdev_is_rx_handler_busy - check if receive handler is registered
4896   *	@dev: device to check
4897   *
4898   *	Check if a receive handler is already registered for a given device.
4899   *	Return true if there one.
4900   *
4901   *	The caller must hold the rtnl_mutex.
4902   */
4903  bool netdev_is_rx_handler_busy(struct net_device *dev)
4904  {
4905  	ASSERT_RTNL();
4906  	return dev && rtnl_dereference(dev->rx_handler);
4907  }
4908  EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
4909  
4910  /**
4911   *	netdev_rx_handler_register - register receive handler
4912   *	@dev: device to register a handler for
4913   *	@rx_handler: receive handler to register
4914   *	@rx_handler_data: data pointer that is used by rx handler
4915   *
4916   *	Register a receive handler for a device. This handler will then be
4917   *	called from __netif_receive_skb. A negative errno code is returned
4918   *	on a failure.
4919   *
4920   *	The caller must hold the rtnl_mutex.
4921   *
4922   *	For a general description of rx_handler, see enum rx_handler_result.
4923   */
4924  int netdev_rx_handler_register(struct net_device *dev,
4925  			       rx_handler_func_t *rx_handler,
4926  			       void *rx_handler_data)
4927  {
4928  	if (netdev_is_rx_handler_busy(dev))
4929  		return -EBUSY;
4930  
4931  	if (dev->priv_flags & IFF_NO_RX_HANDLER)
4932  		return -EINVAL;
4933  
4934  	/* Note: rx_handler_data must be set before rx_handler */
4935  	rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
4936  	rcu_assign_pointer(dev->rx_handler, rx_handler);
4937  
4938  	return 0;
4939  }
4940  EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
4941  
4942  /**
4943   *	netdev_rx_handler_unregister - unregister receive handler
4944   *	@dev: device to unregister a handler from
4945   *
4946   *	Unregister a receive handler from a device.
4947   *
4948   *	The caller must hold the rtnl_mutex.
4949   */
4950  void netdev_rx_handler_unregister(struct net_device *dev)
4951  {
4952  
4953  	ASSERT_RTNL();
4954  	RCU_INIT_POINTER(dev->rx_handler, NULL);
4955  	/* a reader seeing a non NULL rx_handler in a rcu_read_lock()
4956  	 * section has a guarantee to see a non NULL rx_handler_data
4957  	 * as well.
4958  	 */
4959  	synchronize_net();
4960  	RCU_INIT_POINTER(dev->rx_handler_data, NULL);
4961  }
4962  EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
4963  
4964  /*
4965   * Limit the use of PFMEMALLOC reserves to those protocols that implement
4966   * the special handling of PFMEMALLOC skbs.
4967   */
4968  static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
4969  {
4970  	switch (skb->protocol) {
4971  	case htons(ETH_P_ARP):
4972  	case htons(ETH_P_IP):
4973  	case htons(ETH_P_IPV6):
4974  	case htons(ETH_P_8021Q):
4975  	case htons(ETH_P_8021AD):
4976  		return true;
4977  	default:
4978  		return false;
4979  	}
4980  }
4981  
4982  static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
4983  			     int *ret, struct net_device *orig_dev)
4984  {
4985  	if (nf_hook_ingress_active(skb)) {
4986  		int ingress_retval;
4987  
4988  		if (*pt_prev) {
4989  			*ret = deliver_skb(skb, *pt_prev, orig_dev);
4990  			*pt_prev = NULL;
4991  		}
4992  
4993  		rcu_read_lock();
4994  		ingress_retval = nf_hook_ingress(skb);
4995  		rcu_read_unlock();
4996  		return ingress_retval;
4997  	}
4998  	return 0;
4999  }
5000  
5001  static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc,
5002  				    struct packet_type **ppt_prev)
5003  {
5004  	struct packet_type *ptype, *pt_prev;
5005  	rx_handler_func_t *rx_handler;
5006  	struct net_device *orig_dev;
5007  	bool deliver_exact = false;
5008  	int ret = NET_RX_DROP;
5009  	__be16 type;
5010  
5011  	net_timestamp_check(!netdev_tstamp_prequeue, skb);
5012  
5013  	trace_netif_receive_skb(skb);
5014  
5015  	orig_dev = skb->dev;
5016  
5017  	skb_reset_network_header(skb);
5018  	if (!skb_transport_header_was_set(skb))
5019  		skb_reset_transport_header(skb);
5020  	skb_reset_mac_len(skb);
5021  
5022  	pt_prev = NULL;
5023  
5024  another_round:
5025  	skb->skb_iif = skb->dev->ifindex;
5026  
5027  	__this_cpu_inc(softnet_data.processed);
5028  
5029  	if (static_branch_unlikely(&generic_xdp_needed_key)) {
5030  		int ret2;
5031  
5032  		preempt_disable();
5033  		ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
5034  		preempt_enable();
5035  
5036  		if (ret2 != XDP_PASS)
5037  			return NET_RX_DROP;
5038  		skb_reset_mac_len(skb);
5039  	}
5040  
5041  	if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
5042  	    skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
5043  		skb = skb_vlan_untag(skb);
5044  		if (unlikely(!skb))
5045  			goto out;
5046  	}
5047  
5048  	if (skb_skip_tc_classify(skb))
5049  		goto skip_classify;
5050  
5051  	if (pfmemalloc)
5052  		goto skip_taps;
5053  
5054  	list_for_each_entry_rcu(ptype, &ptype_all, list) {
5055  		if (pt_prev)
5056  			ret = deliver_skb(skb, pt_prev, orig_dev);
5057  		pt_prev = ptype;
5058  	}
5059  
5060  	list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
5061  		if (pt_prev)
5062  			ret = deliver_skb(skb, pt_prev, orig_dev);
5063  		pt_prev = ptype;
5064  	}
5065  
5066  skip_taps:
5067  #ifdef CONFIG_NET_INGRESS
5068  	if (static_branch_unlikely(&ingress_needed_key)) {
5069  		skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
5070  		if (!skb)
5071  			goto out;
5072  
5073  		if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
5074  			goto out;
5075  	}
5076  #endif
5077  	skb_reset_tc(skb);
5078  skip_classify:
5079  	if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
5080  		goto drop;
5081  
5082  	if (skb_vlan_tag_present(skb)) {
5083  		if (pt_prev) {
5084  			ret = deliver_skb(skb, pt_prev, orig_dev);
5085  			pt_prev = NULL;
5086  		}
5087  		if (vlan_do_receive(&skb))
5088  			goto another_round;
5089  		else if (unlikely(!skb))
5090  			goto out;
5091  	}
5092  
5093  	rx_handler = rcu_dereference(skb->dev->rx_handler);
5094  	if (rx_handler) {
5095  		if (pt_prev) {
5096  			ret = deliver_skb(skb, pt_prev, orig_dev);
5097  			pt_prev = NULL;
5098  		}
5099  		switch (rx_handler(&skb)) {
5100  		case RX_HANDLER_CONSUMED:
5101  			ret = NET_RX_SUCCESS;
5102  			goto out;
5103  		case RX_HANDLER_ANOTHER:
5104  			goto another_round;
5105  		case RX_HANDLER_EXACT:
5106  			deliver_exact = true;
5107  		case RX_HANDLER_PASS:
5108  			break;
5109  		default:
5110  			BUG();
5111  		}
5112  	}
5113  
5114  	if (unlikely(skb_vlan_tag_present(skb))) {
5115  check_vlan_id:
5116  		if (skb_vlan_tag_get_id(skb)) {
5117  			/* Vlan id is non 0 and vlan_do_receive() above couldn't
5118  			 * find vlan device.
5119  			 */
5120  			skb->pkt_type = PACKET_OTHERHOST;
5121  		} else if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
5122  			   skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
5123  			/* Outer header is 802.1P with vlan 0, inner header is
5124  			 * 802.1Q or 802.1AD and vlan_do_receive() above could
5125  			 * not find vlan dev for vlan id 0.
5126  			 */
5127  			__vlan_hwaccel_clear_tag(skb);
5128  			skb = skb_vlan_untag(skb);
5129  			if (unlikely(!skb))
5130  				goto out;
5131  			if (vlan_do_receive(&skb))
5132  				/* After stripping off 802.1P header with vlan 0
5133  				 * vlan dev is found for inner header.
5134  				 */
5135  				goto another_round;
5136  			else if (unlikely(!skb))
5137  				goto out;
5138  			else
5139  				/* We have stripped outer 802.1P vlan 0 header.
5140  				 * But could not find vlan dev.
5141  				 * check again for vlan id to set OTHERHOST.
5142  				 */
5143  				goto check_vlan_id;
5144  		}
5145  		/* Note: we might in the future use prio bits
5146  		 * and set skb->priority like in vlan_do_receive()
5147  		 * For the time being, just ignore Priority Code Point
5148  		 */
5149  		__vlan_hwaccel_clear_tag(skb);
5150  	}
5151  
5152  	type = skb->protocol;
5153  
5154  	/* deliver only exact match when indicated */
5155  	if (likely(!deliver_exact)) {
5156  		deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5157  				       &ptype_base[ntohs(type) &
5158  						   PTYPE_HASH_MASK]);
5159  	}
5160  
5161  	deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5162  			       &orig_dev->ptype_specific);
5163  
5164  	if (unlikely(skb->dev != orig_dev)) {
5165  		deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5166  				       &skb->dev->ptype_specific);
5167  	}
5168  
5169  	if (pt_prev) {
5170  		if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
5171  			goto drop;
5172  		*ppt_prev = pt_prev;
5173  	} else {
5174  drop:
5175  		if (!deliver_exact)
5176  			atomic_long_inc(&skb->dev->rx_dropped);
5177  		else
5178  			atomic_long_inc(&skb->dev->rx_nohandler);
5179  		kfree_skb(skb);
5180  		/* Jamal, now you will not able to escape explaining
5181  		 * me how you were going to use this. :-)
5182  		 */
5183  		ret = NET_RX_DROP;
5184  	}
5185  
5186  out:
5187  	return ret;
5188  }
5189  
5190  static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
5191  {
5192  	struct net_device *orig_dev = skb->dev;
5193  	struct packet_type *pt_prev = NULL;
5194  	int ret;
5195  
5196  	ret = __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
5197  	if (pt_prev)
5198  		ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
5199  					 skb->dev, pt_prev, orig_dev);
5200  	return ret;
5201  }
5202  
5203  /**
5204   *	netif_receive_skb_core - special purpose version of netif_receive_skb
5205   *	@skb: buffer to process
5206   *
5207   *	More direct receive version of netif_receive_skb().  It should
5208   *	only be used by callers that have a need to skip RPS and Generic XDP.
5209   *	Caller must also take care of handling if (page_is_)pfmemalloc.
5210   *
5211   *	This function may only be called from softirq context and interrupts
5212   *	should be enabled.
5213   *
5214   *	Return values (usually ignored):
5215   *	NET_RX_SUCCESS: no congestion
5216   *	NET_RX_DROP: packet was dropped
5217   */
5218  int netif_receive_skb_core(struct sk_buff *skb)
5219  {
5220  	int ret;
5221  
5222  	rcu_read_lock();
5223  	ret = __netif_receive_skb_one_core(skb, false);
5224  	rcu_read_unlock();
5225  
5226  	return ret;
5227  }
5228  EXPORT_SYMBOL(netif_receive_skb_core);
5229  
5230  static inline void __netif_receive_skb_list_ptype(struct list_head *head,
5231  						  struct packet_type *pt_prev,
5232  						  struct net_device *orig_dev)
5233  {
5234  	struct sk_buff *skb, *next;
5235  
5236  	if (!pt_prev)
5237  		return;
5238  	if (list_empty(head))
5239  		return;
5240  	if (pt_prev->list_func != NULL)
5241  		INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv,
5242  				   ip_list_rcv, head, pt_prev, orig_dev);
5243  	else
5244  		list_for_each_entry_safe(skb, next, head, list) {
5245  			skb_list_del_init(skb);
5246  			pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
5247  		}
5248  }
5249  
5250  static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
5251  {
5252  	/* Fast-path assumptions:
5253  	 * - There is no RX handler.
5254  	 * - Only one packet_type matches.
5255  	 * If either of these fails, we will end up doing some per-packet
5256  	 * processing in-line, then handling the 'last ptype' for the whole
5257  	 * sublist.  This can't cause out-of-order delivery to any single ptype,
5258  	 * because the 'last ptype' must be constant across the sublist, and all
5259  	 * other ptypes are handled per-packet.
5260  	 */
5261  	/* Current (common) ptype of sublist */
5262  	struct packet_type *pt_curr = NULL;
5263  	/* Current (common) orig_dev of sublist */
5264  	struct net_device *od_curr = NULL;
5265  	struct list_head sublist;
5266  	struct sk_buff *skb, *next;
5267  
5268  	INIT_LIST_HEAD(&sublist);
5269  	list_for_each_entry_safe(skb, next, head, list) {
5270  		struct net_device *orig_dev = skb->dev;
5271  		struct packet_type *pt_prev = NULL;
5272  
5273  		skb_list_del_init(skb);
5274  		__netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
5275  		if (!pt_prev)
5276  			continue;
5277  		if (pt_curr != pt_prev || od_curr != orig_dev) {
5278  			/* dispatch old sublist */
5279  			__netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5280  			/* start new sublist */
5281  			INIT_LIST_HEAD(&sublist);
5282  			pt_curr = pt_prev;
5283  			od_curr = orig_dev;
5284  		}
5285  		list_add_tail(&skb->list, &sublist);
5286  	}
5287  
5288  	/* dispatch final sublist */
5289  	__netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5290  }
5291  
5292  static int __netif_receive_skb(struct sk_buff *skb)
5293  {
5294  	int ret;
5295  
5296  	if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
5297  		unsigned int noreclaim_flag;
5298  
5299  		/*
5300  		 * PFMEMALLOC skbs are special, they should
5301  		 * - be delivered to SOCK_MEMALLOC sockets only
5302  		 * - stay away from userspace
5303  		 * - have bounded memory usage
5304  		 *
5305  		 * Use PF_MEMALLOC as this saves us from propagating the allocation
5306  		 * context down to all allocation sites.
5307  		 */
5308  		noreclaim_flag = memalloc_noreclaim_save();
5309  		ret = __netif_receive_skb_one_core(skb, true);
5310  		memalloc_noreclaim_restore(noreclaim_flag);
5311  	} else
5312  		ret = __netif_receive_skb_one_core(skb, false);
5313  
5314  	return ret;
5315  }
5316  
5317  static void __netif_receive_skb_list(struct list_head *head)
5318  {
5319  	unsigned long noreclaim_flag = 0;
5320  	struct sk_buff *skb, *next;
5321  	bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */
5322  
5323  	list_for_each_entry_safe(skb, next, head, list) {
5324  		if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
5325  			struct list_head sublist;
5326  
5327  			/* Handle the previous sublist */
5328  			list_cut_before(&sublist, head, &skb->list);
5329  			if (!list_empty(&sublist))
5330  				__netif_receive_skb_list_core(&sublist, pfmemalloc);
5331  			pfmemalloc = !pfmemalloc;
5332  			/* See comments in __netif_receive_skb */
5333  			if (pfmemalloc)
5334  				noreclaim_flag = memalloc_noreclaim_save();
5335  			else
5336  				memalloc_noreclaim_restore(noreclaim_flag);
5337  		}
5338  	}
5339  	/* Handle the remaining sublist */
5340  	if (!list_empty(head))
5341  		__netif_receive_skb_list_core(head, pfmemalloc);
5342  	/* Restore pflags */
5343  	if (pfmemalloc)
5344  		memalloc_noreclaim_restore(noreclaim_flag);
5345  }
5346  
5347  static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
5348  {
5349  	struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
5350  	struct bpf_prog *new = xdp->prog;
5351  	int ret = 0;
5352  
5353  	switch (xdp->command) {
5354  	case XDP_SETUP_PROG:
5355  		rcu_assign_pointer(dev->xdp_prog, new);
5356  		if (old)
5357  			bpf_prog_put(old);
5358  
5359  		if (old && !new) {
5360  			static_branch_dec(&generic_xdp_needed_key);
5361  		} else if (new && !old) {
5362  			static_branch_inc(&generic_xdp_needed_key);
5363  			dev_disable_lro(dev);
5364  			dev_disable_gro_hw(dev);
5365  		}
5366  		break;
5367  
5368  	case XDP_QUERY_PROG:
5369  		xdp->prog_id = old ? old->aux->id : 0;
5370  		break;
5371  
5372  	default:
5373  		ret = -EINVAL;
5374  		break;
5375  	}
5376  
5377  	return ret;
5378  }
5379  
5380  static int netif_receive_skb_internal(struct sk_buff *skb)
5381  {
5382  	int ret;
5383  
5384  	net_timestamp_check(netdev_tstamp_prequeue, skb);
5385  
5386  	if (skb_defer_rx_timestamp(skb))
5387  		return NET_RX_SUCCESS;
5388  
5389  	rcu_read_lock();
5390  #ifdef CONFIG_RPS
5391  	if (static_branch_unlikely(&rps_needed)) {
5392  		struct rps_dev_flow voidflow, *rflow = &voidflow;
5393  		int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5394  
5395  		if (cpu >= 0) {
5396  			ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5397  			rcu_read_unlock();
5398  			return ret;
5399  		}
5400  	}
5401  #endif
5402  	ret = __netif_receive_skb(skb);
5403  	rcu_read_unlock();
5404  	return ret;
5405  }
5406  
5407  static void netif_receive_skb_list_internal(struct list_head *head)
5408  {
5409  	struct sk_buff *skb, *next;
5410  	struct list_head sublist;
5411  
5412  	INIT_LIST_HEAD(&sublist);
5413  	list_for_each_entry_safe(skb, next, head, list) {
5414  		net_timestamp_check(netdev_tstamp_prequeue, skb);
5415  		skb_list_del_init(skb);
5416  		if (!skb_defer_rx_timestamp(skb))
5417  			list_add_tail(&skb->list, &sublist);
5418  	}
5419  	list_splice_init(&sublist, head);
5420  
5421  	rcu_read_lock();
5422  #ifdef CONFIG_RPS
5423  	if (static_branch_unlikely(&rps_needed)) {
5424  		list_for_each_entry_safe(skb, next, head, list) {
5425  			struct rps_dev_flow voidflow, *rflow = &voidflow;
5426  			int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5427  
5428  			if (cpu >= 0) {
5429  				/* Will be handled, remove from list */
5430  				skb_list_del_init(skb);
5431  				enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5432  			}
5433  		}
5434  	}
5435  #endif
5436  	__netif_receive_skb_list(head);
5437  	rcu_read_unlock();
5438  }
5439  
5440  /**
5441   *	netif_receive_skb - process receive buffer from network
5442   *	@skb: buffer to process
5443   *
5444   *	netif_receive_skb() is the main receive data processing function.
5445   *	It always succeeds. The buffer may be dropped during processing
5446   *	for congestion control or by the protocol layers.
5447   *
5448   *	This function may only be called from softirq context and interrupts
5449   *	should be enabled.
5450   *
5451   *	Return values (usually ignored):
5452   *	NET_RX_SUCCESS: no congestion
5453   *	NET_RX_DROP: packet was dropped
5454   */
5455  int netif_receive_skb(struct sk_buff *skb)
5456  {
5457  	int ret;
5458  
5459  	trace_netif_receive_skb_entry(skb);
5460  
5461  	ret = netif_receive_skb_internal(skb);
5462  	trace_netif_receive_skb_exit(ret);
5463  
5464  	return ret;
5465  }
5466  EXPORT_SYMBOL(netif_receive_skb);
5467  
5468  /**
5469   *	netif_receive_skb_list - process many receive buffers from network
5470   *	@head: list of skbs to process.
5471   *
5472   *	Since return value of netif_receive_skb() is normally ignored, and
5473   *	wouldn't be meaningful for a list, this function returns void.
5474   *
5475   *	This function may only be called from softirq context and interrupts
5476   *	should be enabled.
5477   */
5478  void netif_receive_skb_list(struct list_head *head)
5479  {
5480  	struct sk_buff *skb;
5481  
5482  	if (list_empty(head))
5483  		return;
5484  	if (trace_netif_receive_skb_list_entry_enabled()) {
5485  		list_for_each_entry(skb, head, list)
5486  			trace_netif_receive_skb_list_entry(skb);
5487  	}
5488  	netif_receive_skb_list_internal(head);
5489  	trace_netif_receive_skb_list_exit(0);
5490  }
5491  EXPORT_SYMBOL(netif_receive_skb_list);
5492  
5493  DEFINE_PER_CPU(struct work_struct, flush_works);
5494  
5495  /* Network device is going away, flush any packets still pending */
5496  static void flush_backlog(struct work_struct *work)
5497  {
5498  	struct sk_buff *skb, *tmp;
5499  	struct softnet_data *sd;
5500  
5501  	local_bh_disable();
5502  	sd = this_cpu_ptr(&softnet_data);
5503  
5504  	local_irq_disable();
5505  	rps_lock(sd);
5506  	skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
5507  		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5508  			__skb_unlink(skb, &sd->input_pkt_queue);
5509  			kfree_skb(skb);
5510  			input_queue_head_incr(sd);
5511  		}
5512  	}
5513  	rps_unlock(sd);
5514  	local_irq_enable();
5515  
5516  	skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
5517  		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5518  			__skb_unlink(skb, &sd->process_queue);
5519  			kfree_skb(skb);
5520  			input_queue_head_incr(sd);
5521  		}
5522  	}
5523  	local_bh_enable();
5524  }
5525  
5526  static void flush_all_backlogs(void)
5527  {
5528  	unsigned int cpu;
5529  
5530  	get_online_cpus();
5531  
5532  	for_each_online_cpu(cpu)
5533  		queue_work_on(cpu, system_highpri_wq,
5534  			      per_cpu_ptr(&flush_works, cpu));
5535  
5536  	for_each_online_cpu(cpu)
5537  		flush_work(per_cpu_ptr(&flush_works, cpu));
5538  
5539  	put_online_cpus();
5540  }
5541  
5542  /* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
5543  static void gro_normal_list(struct napi_struct *napi)
5544  {
5545  	if (!napi->rx_count)
5546  		return;
5547  	netif_receive_skb_list_internal(&napi->rx_list);
5548  	INIT_LIST_HEAD(&napi->rx_list);
5549  	napi->rx_count = 0;
5550  }
5551  
5552  /* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
5553   * pass the whole batch up to the stack.
5554   */
5555  static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
5556  {
5557  	list_add_tail(&skb->list, &napi->rx_list);
5558  	if (++napi->rx_count >= gro_normal_batch)
5559  		gro_normal_list(napi);
5560  }
5561  
5562  INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
5563  INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int));
5564  static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
5565  {
5566  	struct packet_offload *ptype;
5567  	__be16 type = skb->protocol;
5568  	struct list_head *head = &offload_base;
5569  	int err = -ENOENT;
5570  
5571  	BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
5572  
5573  	if (NAPI_GRO_CB(skb)->count == 1) {
5574  		skb_shinfo(skb)->gso_size = 0;
5575  		goto out;
5576  	}
5577  
5578  	rcu_read_lock();
5579  	list_for_each_entry_rcu(ptype, head, list) {
5580  		if (ptype->type != type || !ptype->callbacks.gro_complete)
5581  			continue;
5582  
5583  		err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
5584  					 ipv6_gro_complete, inet_gro_complete,
5585  					 skb, 0);
5586  		break;
5587  	}
5588  	rcu_read_unlock();
5589  
5590  	if (err) {
5591  		WARN_ON(&ptype->list == head);
5592  		kfree_skb(skb);
5593  		return NET_RX_SUCCESS;
5594  	}
5595  
5596  out:
5597  	gro_normal_one(napi, skb);
5598  	return NET_RX_SUCCESS;
5599  }
5600  
5601  static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
5602  				   bool flush_old)
5603  {
5604  	struct list_head *head = &napi->gro_hash[index].list;
5605  	struct sk_buff *skb, *p;
5606  
5607  	list_for_each_entry_safe_reverse(skb, p, head, list) {
5608  		if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
5609  			return;
5610  		skb_list_del_init(skb);
5611  		napi_gro_complete(napi, skb);
5612  		napi->gro_hash[index].count--;
5613  	}
5614  
5615  	if (!napi->gro_hash[index].count)
5616  		__clear_bit(index, &napi->gro_bitmask);
5617  }
5618  
5619  /* napi->gro_hash[].list contains packets ordered by age.
5620   * youngest packets at the head of it.
5621   * Complete skbs in reverse order to reduce latencies.
5622   */
5623  void napi_gro_flush(struct napi_struct *napi, bool flush_old)
5624  {
5625  	unsigned long bitmask = napi->gro_bitmask;
5626  	unsigned int i, base = ~0U;
5627  
5628  	while ((i = ffs(bitmask)) != 0) {
5629  		bitmask >>= i;
5630  		base += i;
5631  		__napi_gro_flush_chain(napi, base, flush_old);
5632  	}
5633  }
5634  EXPORT_SYMBOL(napi_gro_flush);
5635  
5636  static struct list_head *gro_list_prepare(struct napi_struct *napi,
5637  					  struct sk_buff *skb)
5638  {
5639  	unsigned int maclen = skb->dev->hard_header_len;
5640  	u32 hash = skb_get_hash_raw(skb);
5641  	struct list_head *head;
5642  	struct sk_buff *p;
5643  
5644  	head = &napi->gro_hash[hash & (GRO_HASH_BUCKETS - 1)].list;
5645  	list_for_each_entry(p, head, list) {
5646  		unsigned long diffs;
5647  
5648  		NAPI_GRO_CB(p)->flush = 0;
5649  
5650  		if (hash != skb_get_hash_raw(p)) {
5651  			NAPI_GRO_CB(p)->same_flow = 0;
5652  			continue;
5653  		}
5654  
5655  		diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
5656  		diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb);
5657  		if (skb_vlan_tag_present(p))
5658  			diffs |= skb_vlan_tag_get(p) ^ skb_vlan_tag_get(skb);
5659  		diffs |= skb_metadata_dst_cmp(p, skb);
5660  		diffs |= skb_metadata_differs(p, skb);
5661  		if (maclen == ETH_HLEN)
5662  			diffs |= compare_ether_header(skb_mac_header(p),
5663  						      skb_mac_header(skb));
5664  		else if (!diffs)
5665  			diffs = memcmp(skb_mac_header(p),
5666  				       skb_mac_header(skb),
5667  				       maclen);
5668  		NAPI_GRO_CB(p)->same_flow = !diffs;
5669  	}
5670  
5671  	return head;
5672  }
5673  
5674  static void skb_gro_reset_offset(struct sk_buff *skb)
5675  {
5676  	const struct skb_shared_info *pinfo = skb_shinfo(skb);
5677  	const skb_frag_t *frag0 = &pinfo->frags[0];
5678  
5679  	NAPI_GRO_CB(skb)->data_offset = 0;
5680  	NAPI_GRO_CB(skb)->frag0 = NULL;
5681  	NAPI_GRO_CB(skb)->frag0_len = 0;
5682  
5683  	if (!skb_headlen(skb) && pinfo->nr_frags &&
5684  	    !PageHighMem(skb_frag_page(frag0))) {
5685  		NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
5686  		NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
5687  						    skb_frag_size(frag0),
5688  						    skb->end - skb->tail);
5689  	}
5690  }
5691  
5692  static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
5693  {
5694  	struct skb_shared_info *pinfo = skb_shinfo(skb);
5695  
5696  	BUG_ON(skb->end - skb->tail < grow);
5697  
5698  	memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
5699  
5700  	skb->data_len -= grow;
5701  	skb->tail += grow;
5702  
5703  	skb_frag_off_add(&pinfo->frags[0], grow);
5704  	skb_frag_size_sub(&pinfo->frags[0], grow);
5705  
5706  	if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
5707  		skb_frag_unref(skb, 0);
5708  		memmove(pinfo->frags, pinfo->frags + 1,
5709  			--pinfo->nr_frags * sizeof(pinfo->frags[0]));
5710  	}
5711  }
5712  
5713  static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
5714  {
5715  	struct sk_buff *oldest;
5716  
5717  	oldest = list_last_entry(head, struct sk_buff, list);
5718  
5719  	/* We are called with head length >= MAX_GRO_SKBS, so this is
5720  	 * impossible.
5721  	 */
5722  	if (WARN_ON_ONCE(!oldest))
5723  		return;
5724  
5725  	/* Do not adjust napi->gro_hash[].count, caller is adding a new
5726  	 * SKB to the chain.
5727  	 */
5728  	skb_list_del_init(oldest);
5729  	napi_gro_complete(napi, oldest);
5730  }
5731  
5732  INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *,
5733  							   struct sk_buff *));
5734  INDIRECT_CALLABLE_DECLARE(struct sk_buff *ipv6_gro_receive(struct list_head *,
5735  							   struct sk_buff *));
5736  static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5737  {
5738  	u32 hash = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
5739  	struct list_head *head = &offload_base;
5740  	struct packet_offload *ptype;
5741  	__be16 type = skb->protocol;
5742  	struct list_head *gro_head;
5743  	struct sk_buff *pp = NULL;
5744  	enum gro_result ret;
5745  	int same_flow;
5746  	int grow;
5747  
5748  	if (netif_elide_gro(skb->dev))
5749  		goto normal;
5750  
5751  	gro_head = gro_list_prepare(napi, skb);
5752  
5753  	rcu_read_lock();
5754  	list_for_each_entry_rcu(ptype, head, list) {
5755  		if (ptype->type != type || !ptype->callbacks.gro_receive)
5756  			continue;
5757  
5758  		skb_set_network_header(skb, skb_gro_offset(skb));
5759  		skb_reset_mac_len(skb);
5760  		NAPI_GRO_CB(skb)->same_flow = 0;
5761  		NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
5762  		NAPI_GRO_CB(skb)->free = 0;
5763  		NAPI_GRO_CB(skb)->encap_mark = 0;
5764  		NAPI_GRO_CB(skb)->recursion_counter = 0;
5765  		NAPI_GRO_CB(skb)->is_fou = 0;
5766  		NAPI_GRO_CB(skb)->is_atomic = 1;
5767  		NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
5768  
5769  		/* Setup for GRO checksum validation */
5770  		switch (skb->ip_summed) {
5771  		case CHECKSUM_COMPLETE:
5772  			NAPI_GRO_CB(skb)->csum = skb->csum;
5773  			NAPI_GRO_CB(skb)->csum_valid = 1;
5774  			NAPI_GRO_CB(skb)->csum_cnt = 0;
5775  			break;
5776  		case CHECKSUM_UNNECESSARY:
5777  			NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
5778  			NAPI_GRO_CB(skb)->csum_valid = 0;
5779  			break;
5780  		default:
5781  			NAPI_GRO_CB(skb)->csum_cnt = 0;
5782  			NAPI_GRO_CB(skb)->csum_valid = 0;
5783  		}
5784  
5785  		pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
5786  					ipv6_gro_receive, inet_gro_receive,
5787  					gro_head, skb);
5788  		break;
5789  	}
5790  	rcu_read_unlock();
5791  
5792  	if (&ptype->list == head)
5793  		goto normal;
5794  
5795  	if (PTR_ERR(pp) == -EINPROGRESS) {
5796  		ret = GRO_CONSUMED;
5797  		goto ok;
5798  	}
5799  
5800  	same_flow = NAPI_GRO_CB(skb)->same_flow;
5801  	ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
5802  
5803  	if (pp) {
5804  		skb_list_del_init(pp);
5805  		napi_gro_complete(napi, pp);
5806  		napi->gro_hash[hash].count--;
5807  	}
5808  
5809  	if (same_flow)
5810  		goto ok;
5811  
5812  	if (NAPI_GRO_CB(skb)->flush)
5813  		goto normal;
5814  
5815  	if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) {
5816  		gro_flush_oldest(napi, gro_head);
5817  	} else {
5818  		napi->gro_hash[hash].count++;
5819  	}
5820  	NAPI_GRO_CB(skb)->count = 1;
5821  	NAPI_GRO_CB(skb)->age = jiffies;
5822  	NAPI_GRO_CB(skb)->last = skb;
5823  	skb_shinfo(skb)->gso_size = skb_gro_len(skb);
5824  	list_add(&skb->list, gro_head);
5825  	ret = GRO_HELD;
5826  
5827  pull:
5828  	grow = skb_gro_offset(skb) - skb_headlen(skb);
5829  	if (grow > 0)
5830  		gro_pull_from_frag0(skb, grow);
5831  ok:
5832  	if (napi->gro_hash[hash].count) {
5833  		if (!test_bit(hash, &napi->gro_bitmask))
5834  			__set_bit(hash, &napi->gro_bitmask);
5835  	} else if (test_bit(hash, &napi->gro_bitmask)) {
5836  		__clear_bit(hash, &napi->gro_bitmask);
5837  	}
5838  
5839  	return ret;
5840  
5841  normal:
5842  	ret = GRO_NORMAL;
5843  	goto pull;
5844  }
5845  
5846  struct packet_offload *gro_find_receive_by_type(__be16 type)
5847  {
5848  	struct list_head *offload_head = &offload_base;
5849  	struct packet_offload *ptype;
5850  
5851  	list_for_each_entry_rcu(ptype, offload_head, list) {
5852  		if (ptype->type != type || !ptype->callbacks.gro_receive)
5853  			continue;
5854  		return ptype;
5855  	}
5856  	return NULL;
5857  }
5858  EXPORT_SYMBOL(gro_find_receive_by_type);
5859  
5860  struct packet_offload *gro_find_complete_by_type(__be16 type)
5861  {
5862  	struct list_head *offload_head = &offload_base;
5863  	struct packet_offload *ptype;
5864  
5865  	list_for_each_entry_rcu(ptype, offload_head, list) {
5866  		if (ptype->type != type || !ptype->callbacks.gro_complete)
5867  			continue;
5868  		return ptype;
5869  	}
5870  	return NULL;
5871  }
5872  EXPORT_SYMBOL(gro_find_complete_by_type);
5873  
5874  static void napi_skb_free_stolen_head(struct sk_buff *skb)
5875  {
5876  	skb_dst_drop(skb);
5877  	skb_ext_put(skb);
5878  	kmem_cache_free(skbuff_head_cache, skb);
5879  }
5880  
5881  static gro_result_t napi_skb_finish(struct napi_struct *napi,
5882  				    struct sk_buff *skb,
5883  				    gro_result_t ret)
5884  {
5885  	switch (ret) {
5886  	case GRO_NORMAL:
5887  		gro_normal_one(napi, skb);
5888  		break;
5889  
5890  	case GRO_DROP:
5891  		kfree_skb(skb);
5892  		break;
5893  
5894  	case GRO_MERGED_FREE:
5895  		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
5896  			napi_skb_free_stolen_head(skb);
5897  		else
5898  			__kfree_skb(skb);
5899  		break;
5900  
5901  	case GRO_HELD:
5902  	case GRO_MERGED:
5903  	case GRO_CONSUMED:
5904  		break;
5905  	}
5906  
5907  	return ret;
5908  }
5909  
5910  gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5911  {
5912  	gro_result_t ret;
5913  
5914  	skb_mark_napi_id(skb, napi);
5915  	trace_napi_gro_receive_entry(skb);
5916  
5917  	skb_gro_reset_offset(skb);
5918  
5919  	ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
5920  	trace_napi_gro_receive_exit(ret);
5921  
5922  	return ret;
5923  }
5924  EXPORT_SYMBOL(napi_gro_receive);
5925  
5926  static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
5927  {
5928  	if (unlikely(skb->pfmemalloc)) {
5929  		consume_skb(skb);
5930  		return;
5931  	}
5932  	__skb_pull(skb, skb_headlen(skb));
5933  	/* restore the reserve we had after netdev_alloc_skb_ip_align() */
5934  	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
5935  	__vlan_hwaccel_clear_tag(skb);
5936  	skb->dev = napi->dev;
5937  	skb->skb_iif = 0;
5938  
5939  	/* eth_type_trans() assumes pkt_type is PACKET_HOST */
5940  	skb->pkt_type = PACKET_HOST;
5941  
5942  	skb->encapsulation = 0;
5943  	skb_shinfo(skb)->gso_type = 0;
5944  	skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
5945  	skb_ext_reset(skb);
5946  
5947  	napi->skb = skb;
5948  }
5949  
5950  struct sk_buff *napi_get_frags(struct napi_struct *napi)
5951  {
5952  	struct sk_buff *skb = napi->skb;
5953  
5954  	if (!skb) {
5955  		skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
5956  		if (skb) {
5957  			napi->skb = skb;
5958  			skb_mark_napi_id(skb, napi);
5959  		}
5960  	}
5961  	return skb;
5962  }
5963  EXPORT_SYMBOL(napi_get_frags);
5964  
5965  static gro_result_t napi_frags_finish(struct napi_struct *napi,
5966  				      struct sk_buff *skb,
5967  				      gro_result_t ret)
5968  {
5969  	switch (ret) {
5970  	case GRO_NORMAL:
5971  	case GRO_HELD:
5972  		__skb_push(skb, ETH_HLEN);
5973  		skb->protocol = eth_type_trans(skb, skb->dev);
5974  		if (ret == GRO_NORMAL)
5975  			gro_normal_one(napi, skb);
5976  		break;
5977  
5978  	case GRO_DROP:
5979  		napi_reuse_skb(napi, skb);
5980  		break;
5981  
5982  	case GRO_MERGED_FREE:
5983  		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
5984  			napi_skb_free_stolen_head(skb);
5985  		else
5986  			napi_reuse_skb(napi, skb);
5987  		break;
5988  
5989  	case GRO_MERGED:
5990  	case GRO_CONSUMED:
5991  		break;
5992  	}
5993  
5994  	return ret;
5995  }
5996  
5997  /* Upper GRO stack assumes network header starts at gro_offset=0
5998   * Drivers could call both napi_gro_frags() and napi_gro_receive()
5999   * We copy ethernet header into skb->data to have a common layout.
6000   */
6001  static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
6002  {
6003  	struct sk_buff *skb = napi->skb;
6004  	const struct ethhdr *eth;
6005  	unsigned int hlen = sizeof(*eth);
6006  
6007  	napi->skb = NULL;
6008  
6009  	skb_reset_mac_header(skb);
6010  	skb_gro_reset_offset(skb);
6011  
6012  	if (unlikely(skb_gro_header_hard(skb, hlen))) {
6013  		eth = skb_gro_header_slow(skb, hlen, 0);
6014  		if (unlikely(!eth)) {
6015  			net_warn_ratelimited("%s: dropping impossible skb from %s\n",
6016  					     __func__, napi->dev->name);
6017  			napi_reuse_skb(napi, skb);
6018  			return NULL;
6019  		}
6020  	} else {
6021  		eth = (const struct ethhdr *)skb->data;
6022  		gro_pull_from_frag0(skb, hlen);
6023  		NAPI_GRO_CB(skb)->frag0 += hlen;
6024  		NAPI_GRO_CB(skb)->frag0_len -= hlen;
6025  	}
6026  	__skb_pull(skb, hlen);
6027  
6028  	/*
6029  	 * This works because the only protocols we care about don't require
6030  	 * special handling.
6031  	 * We'll fix it up properly in napi_frags_finish()
6032  	 */
6033  	skb->protocol = eth->h_proto;
6034  
6035  	return skb;
6036  }
6037  
6038  gro_result_t napi_gro_frags(struct napi_struct *napi)
6039  {
6040  	gro_result_t ret;
6041  	struct sk_buff *skb = napi_frags_skb(napi);
6042  
6043  	if (!skb)
6044  		return GRO_DROP;
6045  
6046  	trace_napi_gro_frags_entry(skb);
6047  
6048  	ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
6049  	trace_napi_gro_frags_exit(ret);
6050  
6051  	return ret;
6052  }
6053  EXPORT_SYMBOL(napi_gro_frags);
6054  
6055  /* Compute the checksum from gro_offset and return the folded value
6056   * after adding in any pseudo checksum.
6057   */
6058  __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
6059  {
6060  	__wsum wsum;
6061  	__sum16 sum;
6062  
6063  	wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
6064  
6065  	/* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
6066  	sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
6067  	/* See comments in __skb_checksum_complete(). */
6068  	if (likely(!sum)) {
6069  		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
6070  		    !skb->csum_complete_sw)
6071  			netdev_rx_csum_fault(skb->dev, skb);
6072  	}
6073  
6074  	NAPI_GRO_CB(skb)->csum = wsum;
6075  	NAPI_GRO_CB(skb)->csum_valid = 1;
6076  
6077  	return sum;
6078  }
6079  EXPORT_SYMBOL(__skb_gro_checksum_complete);
6080  
6081  static void net_rps_send_ipi(struct softnet_data *remsd)
6082  {
6083  #ifdef CONFIG_RPS
6084  	while (remsd) {
6085  		struct softnet_data *next = remsd->rps_ipi_next;
6086  
6087  		if (cpu_online(remsd->cpu))
6088  			smp_call_function_single_async(remsd->cpu, &remsd->csd);
6089  		remsd = next;
6090  	}
6091  #endif
6092  }
6093  
6094  /*
6095   * net_rps_action_and_irq_enable sends any pending IPI's for rps.
6096   * Note: called with local irq disabled, but exits with local irq enabled.
6097   */
6098  static void net_rps_action_and_irq_enable(struct softnet_data *sd)
6099  {
6100  #ifdef CONFIG_RPS
6101  	struct softnet_data *remsd = sd->rps_ipi_list;
6102  
6103  	if (remsd) {
6104  		sd->rps_ipi_list = NULL;
6105  
6106  		local_irq_enable();
6107  
6108  		/* Send pending IPI's to kick RPS processing on remote cpus. */
6109  		net_rps_send_ipi(remsd);
6110  	} else
6111  #endif
6112  		local_irq_enable();
6113  }
6114  
6115  static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
6116  {
6117  #ifdef CONFIG_RPS
6118  	return sd->rps_ipi_list != NULL;
6119  #else
6120  	return false;
6121  #endif
6122  }
6123  
6124  static int process_backlog(struct napi_struct *napi, int quota)
6125  {
6126  	struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
6127  	bool again = true;
6128  	int work = 0;
6129  
6130  	/* Check if we have pending ipi, its better to send them now,
6131  	 * not waiting net_rx_action() end.
6132  	 */
6133  	if (sd_has_rps_ipi_waiting(sd)) {
6134  		local_irq_disable();
6135  		net_rps_action_and_irq_enable(sd);
6136  	}
6137  
6138  	napi->weight = dev_rx_weight;
6139  	while (again) {
6140  		struct sk_buff *skb;
6141  
6142  		while ((skb = __skb_dequeue(&sd->process_queue))) {
6143  			rcu_read_lock();
6144  			__netif_receive_skb(skb);
6145  			rcu_read_unlock();
6146  			input_queue_head_incr(sd);
6147  			if (++work >= quota)
6148  				return work;
6149  
6150  		}
6151  
6152  		local_irq_disable();
6153  		rps_lock(sd);
6154  		if (skb_queue_empty(&sd->input_pkt_queue)) {
6155  			/*
6156  			 * Inline a custom version of __napi_complete().
6157  			 * only current cpu owns and manipulates this napi,
6158  			 * and NAPI_STATE_SCHED is the only possible flag set
6159  			 * on backlog.
6160  			 * We can use a plain write instead of clear_bit(),
6161  			 * and we dont need an smp_mb() memory barrier.
6162  			 */
6163  			napi->state = 0;
6164  			again = false;
6165  		} else {
6166  			skb_queue_splice_tail_init(&sd->input_pkt_queue,
6167  						   &sd->process_queue);
6168  		}
6169  		rps_unlock(sd);
6170  		local_irq_enable();
6171  	}
6172  
6173  	return work;
6174  }
6175  
6176  /**
6177   * __napi_schedule - schedule for receive
6178   * @n: entry to schedule
6179   *
6180   * The entry's receive function will be scheduled to run.
6181   * Consider using __napi_schedule_irqoff() if hard irqs are masked.
6182   */
6183  void __napi_schedule(struct napi_struct *n)
6184  {
6185  	unsigned long flags;
6186  
6187  	local_irq_save(flags);
6188  	____napi_schedule(this_cpu_ptr(&softnet_data), n);
6189  	local_irq_restore(flags);
6190  }
6191  EXPORT_SYMBOL(__napi_schedule);
6192  
6193  /**
6194   *	napi_schedule_prep - check if napi can be scheduled
6195   *	@n: napi context
6196   *
6197   * Test if NAPI routine is already running, and if not mark
6198   * it as running.  This is used as a condition variable
6199   * insure only one NAPI poll instance runs.  We also make
6200   * sure there is no pending NAPI disable.
6201   */
6202  bool napi_schedule_prep(struct napi_struct *n)
6203  {
6204  	unsigned long val, new;
6205  
6206  	do {
6207  		val = READ_ONCE(n->state);
6208  		if (unlikely(val & NAPIF_STATE_DISABLE))
6209  			return false;
6210  		new = val | NAPIF_STATE_SCHED;
6211  
6212  		/* Sets STATE_MISSED bit if STATE_SCHED was already set
6213  		 * This was suggested by Alexander Duyck, as compiler
6214  		 * emits better code than :
6215  		 * if (val & NAPIF_STATE_SCHED)
6216  		 *     new |= NAPIF_STATE_MISSED;
6217  		 */
6218  		new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
6219  						   NAPIF_STATE_MISSED;
6220  	} while (cmpxchg(&n->state, val, new) != val);
6221  
6222  	return !(val & NAPIF_STATE_SCHED);
6223  }
6224  EXPORT_SYMBOL(napi_schedule_prep);
6225  
6226  /**
6227   * __napi_schedule_irqoff - schedule for receive
6228   * @n: entry to schedule
6229   *
6230   * Variant of __napi_schedule() assuming hard irqs are masked
6231   */
6232  void __napi_schedule_irqoff(struct napi_struct *n)
6233  {
6234  	____napi_schedule(this_cpu_ptr(&softnet_data), n);
6235  }
6236  EXPORT_SYMBOL(__napi_schedule_irqoff);
6237  
6238  bool napi_complete_done(struct napi_struct *n, int work_done)
6239  {
6240  	unsigned long flags, val, new;
6241  
6242  	/*
6243  	 * 1) Don't let napi dequeue from the cpu poll list
6244  	 *    just in case its running on a different cpu.
6245  	 * 2) If we are busy polling, do nothing here, we have
6246  	 *    the guarantee we will be called later.
6247  	 */
6248  	if (unlikely(n->state & (NAPIF_STATE_NPSVC |
6249  				 NAPIF_STATE_IN_BUSY_POLL)))
6250  		return false;
6251  
6252  	if (n->gro_bitmask) {
6253  		unsigned long timeout = 0;
6254  
6255  		if (work_done)
6256  			timeout = n->dev->gro_flush_timeout;
6257  
6258  		/* When the NAPI instance uses a timeout and keeps postponing
6259  		 * it, we need to bound somehow the time packets are kept in
6260  		 * the GRO layer
6261  		 */
6262  		napi_gro_flush(n, !!timeout);
6263  		if (timeout)
6264  			hrtimer_start(&n->timer, ns_to_ktime(timeout),
6265  				      HRTIMER_MODE_REL_PINNED);
6266  	}
6267  
6268  	gro_normal_list(n);
6269  
6270  	if (unlikely(!list_empty(&n->poll_list))) {
6271  		/* If n->poll_list is not empty, we need to mask irqs */
6272  		local_irq_save(flags);
6273  		list_del_init(&n->poll_list);
6274  		local_irq_restore(flags);
6275  	}
6276  
6277  	do {
6278  		val = READ_ONCE(n->state);
6279  
6280  		WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
6281  
6282  		new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED);
6283  
6284  		/* If STATE_MISSED was set, leave STATE_SCHED set,
6285  		 * because we will call napi->poll() one more time.
6286  		 * This C code was suggested by Alexander Duyck to help gcc.
6287  		 */
6288  		new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
6289  						    NAPIF_STATE_SCHED;
6290  	} while (cmpxchg(&n->state, val, new) != val);
6291  
6292  	if (unlikely(val & NAPIF_STATE_MISSED)) {
6293  		__napi_schedule(n);
6294  		return false;
6295  	}
6296  
6297  	return true;
6298  }
6299  EXPORT_SYMBOL(napi_complete_done);
6300  
6301  /* must be called under rcu_read_lock(), as we dont take a reference */
6302  static struct napi_struct *napi_by_id(unsigned int napi_id)
6303  {
6304  	unsigned int hash = napi_id % HASH_SIZE(napi_hash);
6305  	struct napi_struct *napi;
6306  
6307  	hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
6308  		if (napi->napi_id == napi_id)
6309  			return napi;
6310  
6311  	return NULL;
6312  }
6313  
6314  #if defined(CONFIG_NET_RX_BUSY_POLL)
6315  
6316  #define BUSY_POLL_BUDGET 8
6317  
6318  static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
6319  {
6320  	int rc;
6321  
6322  	/* Busy polling means there is a high chance device driver hard irq
6323  	 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
6324  	 * set in napi_schedule_prep().
6325  	 * Since we are about to call napi->poll() once more, we can safely
6326  	 * clear NAPI_STATE_MISSED.
6327  	 *
6328  	 * Note: x86 could use a single "lock and ..." instruction
6329  	 * to perform these two clear_bit()
6330  	 */
6331  	clear_bit(NAPI_STATE_MISSED, &napi->state);
6332  	clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
6333  
6334  	local_bh_disable();
6335  
6336  	/* All we really want here is to re-enable device interrupts.
6337  	 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
6338  	 */
6339  	rc = napi->poll(napi, BUSY_POLL_BUDGET);
6340  	/* We can't gro_normal_list() here, because napi->poll() might have
6341  	 * rearmed the napi (napi_complete_done()) in which case it could
6342  	 * already be running on another CPU.
6343  	 */
6344  	trace_napi_poll(napi, rc, BUSY_POLL_BUDGET);
6345  	netpoll_poll_unlock(have_poll_lock);
6346  	if (rc == BUSY_POLL_BUDGET) {
6347  		/* As the whole budget was spent, we still own the napi so can
6348  		 * safely handle the rx_list.
6349  		 */
6350  		gro_normal_list(napi);
6351  		__napi_schedule(napi);
6352  	}
6353  	local_bh_enable();
6354  }
6355  
6356  void napi_busy_loop(unsigned int napi_id,
6357  		    bool (*loop_end)(void *, unsigned long),
6358  		    void *loop_end_arg)
6359  {
6360  	unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
6361  	int (*napi_poll)(struct napi_struct *napi, int budget);
6362  	void *have_poll_lock = NULL;
6363  	struct napi_struct *napi;
6364  
6365  restart:
6366  	napi_poll = NULL;
6367  
6368  	rcu_read_lock();
6369  
6370  	napi = napi_by_id(napi_id);
6371  	if (!napi)
6372  		goto out;
6373  
6374  	preempt_disable();
6375  	for (;;) {
6376  		int work = 0;
6377  
6378  		local_bh_disable();
6379  		if (!napi_poll) {
6380  			unsigned long val = READ_ONCE(napi->state);
6381  
6382  			/* If multiple threads are competing for this napi,
6383  			 * we avoid dirtying napi->state as much as we can.
6384  			 */
6385  			if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
6386  				   NAPIF_STATE_IN_BUSY_POLL))
6387  				goto count;
6388  			if (cmpxchg(&napi->state, val,
6389  				    val | NAPIF_STATE_IN_BUSY_POLL |
6390  					  NAPIF_STATE_SCHED) != val)
6391  				goto count;
6392  			have_poll_lock = netpoll_poll_lock(napi);
6393  			napi_poll = napi->poll;
6394  		}
6395  		work = napi_poll(napi, BUSY_POLL_BUDGET);
6396  		trace_napi_poll(napi, work, BUSY_POLL_BUDGET);
6397  		gro_normal_list(napi);
6398  count:
6399  		if (work > 0)
6400  			__NET_ADD_STATS(dev_net(napi->dev),
6401  					LINUX_MIB_BUSYPOLLRXPACKETS, work);
6402  		local_bh_enable();
6403  
6404  		if (!loop_end || loop_end(loop_end_arg, start_time))
6405  			break;
6406  
6407  		if (unlikely(need_resched())) {
6408  			if (napi_poll)
6409  				busy_poll_stop(napi, have_poll_lock);
6410  			preempt_enable();
6411  			rcu_read_unlock();
6412  			cond_resched();
6413  			if (loop_end(loop_end_arg, start_time))
6414  				return;
6415  			goto restart;
6416  		}
6417  		cpu_relax();
6418  	}
6419  	if (napi_poll)
6420  		busy_poll_stop(napi, have_poll_lock);
6421  	preempt_enable();
6422  out:
6423  	rcu_read_unlock();
6424  }
6425  EXPORT_SYMBOL(napi_busy_loop);
6426  
6427  #endif /* CONFIG_NET_RX_BUSY_POLL */
6428  
6429  static void napi_hash_add(struct napi_struct *napi)
6430  {
6431  	if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
6432  	    test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
6433  		return;
6434  
6435  	spin_lock(&napi_hash_lock);
6436  
6437  	/* 0..NR_CPUS range is reserved for sender_cpu use */
6438  	do {
6439  		if (unlikely(++napi_gen_id < MIN_NAPI_ID))
6440  			napi_gen_id = MIN_NAPI_ID;
6441  	} while (napi_by_id(napi_gen_id));
6442  	napi->napi_id = napi_gen_id;
6443  
6444  	hlist_add_head_rcu(&napi->napi_hash_node,
6445  			   &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
6446  
6447  	spin_unlock(&napi_hash_lock);
6448  }
6449  
6450  /* Warning : caller is responsible to make sure rcu grace period
6451   * is respected before freeing memory containing @napi
6452   */
6453  bool napi_hash_del(struct napi_struct *napi)
6454  {
6455  	bool rcu_sync_needed = false;
6456  
6457  	spin_lock(&napi_hash_lock);
6458  
6459  	if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
6460  		rcu_sync_needed = true;
6461  		hlist_del_rcu(&napi->napi_hash_node);
6462  	}
6463  	spin_unlock(&napi_hash_lock);
6464  	return rcu_sync_needed;
6465  }
6466  EXPORT_SYMBOL_GPL(napi_hash_del);
6467  
6468  static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
6469  {
6470  	struct napi_struct *napi;
6471  
6472  	napi = container_of(timer, struct napi_struct, timer);
6473  
6474  	/* Note : we use a relaxed variant of napi_schedule_prep() not setting
6475  	 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
6476  	 */
6477  	if (napi->gro_bitmask && !napi_disable_pending(napi) &&
6478  	    !test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
6479  		__napi_schedule_irqoff(napi);
6480  
6481  	return HRTIMER_NORESTART;
6482  }
6483  
6484  static void init_gro_hash(struct napi_struct *napi)
6485  {
6486  	int i;
6487  
6488  	for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6489  		INIT_LIST_HEAD(&napi->gro_hash[i].list);
6490  		napi->gro_hash[i].count = 0;
6491  	}
6492  	napi->gro_bitmask = 0;
6493  }
6494  
6495  void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
6496  		    int (*poll)(struct napi_struct *, int), int weight)
6497  {
6498  	INIT_LIST_HEAD(&napi->poll_list);
6499  	hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6500  	napi->timer.function = napi_watchdog;
6501  	init_gro_hash(napi);
6502  	napi->skb = NULL;
6503  	INIT_LIST_HEAD(&napi->rx_list);
6504  	napi->rx_count = 0;
6505  	napi->poll = poll;
6506  	if (weight > NAPI_POLL_WEIGHT)
6507  		netdev_err_once(dev, "%s() called with weight %d\n", __func__,
6508  				weight);
6509  	napi->weight = weight;
6510  	list_add(&napi->dev_list, &dev->napi_list);
6511  	napi->dev = dev;
6512  #ifdef CONFIG_NETPOLL
6513  	napi->poll_owner = -1;
6514  #endif
6515  	set_bit(NAPI_STATE_SCHED, &napi->state);
6516  	napi_hash_add(napi);
6517  }
6518  EXPORT_SYMBOL(netif_napi_add);
6519  
6520  void napi_disable(struct napi_struct *n)
6521  {
6522  	might_sleep();
6523  	set_bit(NAPI_STATE_DISABLE, &n->state);
6524  
6525  	while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
6526  		msleep(1);
6527  	while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
6528  		msleep(1);
6529  
6530  	hrtimer_cancel(&n->timer);
6531  
6532  	clear_bit(NAPI_STATE_DISABLE, &n->state);
6533  }
6534  EXPORT_SYMBOL(napi_disable);
6535  
6536  static void flush_gro_hash(struct napi_struct *napi)
6537  {
6538  	int i;
6539  
6540  	for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6541  		struct sk_buff *skb, *n;
6542  
6543  		list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list)
6544  			kfree_skb(skb);
6545  		napi->gro_hash[i].count = 0;
6546  	}
6547  }
6548  
6549  /* Must be called in process context */
6550  void netif_napi_del(struct napi_struct *napi)
6551  {
6552  	might_sleep();
6553  	if (napi_hash_del(napi))
6554  		synchronize_net();
6555  	list_del_init(&napi->dev_list);
6556  	napi_free_frags(napi);
6557  
6558  	flush_gro_hash(napi);
6559  	napi->gro_bitmask = 0;
6560  }
6561  EXPORT_SYMBOL(netif_napi_del);
6562  
6563  static int napi_poll(struct napi_struct *n, struct list_head *repoll)
6564  {
6565  	void *have;
6566  	int work, weight;
6567  
6568  	list_del_init(&n->poll_list);
6569  
6570  	have = netpoll_poll_lock(n);
6571  
6572  	weight = n->weight;
6573  
6574  	/* This NAPI_STATE_SCHED test is for avoiding a race
6575  	 * with netpoll's poll_napi().  Only the entity which
6576  	 * obtains the lock and sees NAPI_STATE_SCHED set will
6577  	 * actually make the ->poll() call.  Therefore we avoid
6578  	 * accidentally calling ->poll() when NAPI is not scheduled.
6579  	 */
6580  	work = 0;
6581  	if (test_bit(NAPI_STATE_SCHED, &n->state)) {
6582  		work = n->poll(n, weight);
6583  		trace_napi_poll(n, work, weight);
6584  	}
6585  
6586  	WARN_ON_ONCE(work > weight);
6587  
6588  	if (likely(work < weight))
6589  		goto out_unlock;
6590  
6591  	/* Drivers must not modify the NAPI state if they
6592  	 * consume the entire weight.  In such cases this code
6593  	 * still "owns" the NAPI instance and therefore can
6594  	 * move the instance around on the list at-will.
6595  	 */
6596  	if (unlikely(napi_disable_pending(n))) {
6597  		napi_complete(n);
6598  		goto out_unlock;
6599  	}
6600  
6601  	if (n->gro_bitmask) {
6602  		/* flush too old packets
6603  		 * If HZ < 1000, flush all packets.
6604  		 */
6605  		napi_gro_flush(n, HZ >= 1000);
6606  	}
6607  
6608  	gro_normal_list(n);
6609  
6610  	/* Some drivers may have called napi_schedule
6611  	 * prior to exhausting their budget.
6612  	 */
6613  	if (unlikely(!list_empty(&n->poll_list))) {
6614  		pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
6615  			     n->dev ? n->dev->name : "backlog");
6616  		goto out_unlock;
6617  	}
6618  
6619  	list_add_tail(&n->poll_list, repoll);
6620  
6621  out_unlock:
6622  	netpoll_poll_unlock(have);
6623  
6624  	return work;
6625  }
6626  
6627  static __latent_entropy void net_rx_action(struct softirq_action *h)
6628  {
6629  	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
6630  	unsigned long time_limit = jiffies +
6631  		usecs_to_jiffies(netdev_budget_usecs);
6632  	int budget = netdev_budget;
6633  	LIST_HEAD(list);
6634  	LIST_HEAD(repoll);
6635  
6636  	local_irq_disable();
6637  	list_splice_init(&sd->poll_list, &list);
6638  	local_irq_enable();
6639  
6640  	for (;;) {
6641  		struct napi_struct *n;
6642  
6643  		if (list_empty(&list)) {
6644  			if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
6645  				goto out;
6646  			break;
6647  		}
6648  
6649  		n = list_first_entry(&list, struct napi_struct, poll_list);
6650  		budget -= napi_poll(n, &repoll);
6651  
6652  		/* If softirq window is exhausted then punt.
6653  		 * Allow this to run for 2 jiffies since which will allow
6654  		 * an average latency of 1.5/HZ.
6655  		 */
6656  		if (unlikely(budget <= 0 ||
6657  			     time_after_eq(jiffies, time_limit))) {
6658  			sd->time_squeeze++;
6659  			break;
6660  		}
6661  	}
6662  
6663  	local_irq_disable();
6664  
6665  	list_splice_tail_init(&sd->poll_list, &list);
6666  	list_splice_tail(&repoll, &list);
6667  	list_splice(&list, &sd->poll_list);
6668  	if (!list_empty(&sd->poll_list))
6669  		__raise_softirq_irqoff(NET_RX_SOFTIRQ);
6670  
6671  	net_rps_action_and_irq_enable(sd);
6672  out:
6673  	__kfree_skb_flush();
6674  }
6675  
6676  struct netdev_adjacent {
6677  	struct net_device *dev;
6678  
6679  	/* upper master flag, there can only be one master device per list */
6680  	bool master;
6681  
6682  	/* lookup ignore flag */
6683  	bool ignore;
6684  
6685  	/* counter for the number of times this device was added to us */
6686  	u16 ref_nr;
6687  
6688  	/* private field for the users */
6689  	void *private;
6690  
6691  	struct list_head list;
6692  	struct rcu_head rcu;
6693  };
6694  
6695  static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
6696  						 struct list_head *adj_list)
6697  {
6698  	struct netdev_adjacent *adj;
6699  
6700  	list_for_each_entry(adj, adj_list, list) {
6701  		if (adj->dev == adj_dev)
6702  			return adj;
6703  	}
6704  	return NULL;
6705  }
6706  
6707  static int ____netdev_has_upper_dev(struct net_device *upper_dev, void *data)
6708  {
6709  	struct net_device *dev = data;
6710  
6711  	return upper_dev == dev;
6712  }
6713  
6714  /**
6715   * netdev_has_upper_dev - Check if device is linked to an upper device
6716   * @dev: device
6717   * @upper_dev: upper device to check
6718   *
6719   * Find out if a device is linked to specified upper device and return true
6720   * in case it is. Note that this checks only immediate upper device,
6721   * not through a complete stack of devices. The caller must hold the RTNL lock.
6722   */
6723  bool netdev_has_upper_dev(struct net_device *dev,
6724  			  struct net_device *upper_dev)
6725  {
6726  	ASSERT_RTNL();
6727  
6728  	return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
6729  					     upper_dev);
6730  }
6731  EXPORT_SYMBOL(netdev_has_upper_dev);
6732  
6733  /**
6734   * netdev_has_upper_dev_all - Check if device is linked to an upper device
6735   * @dev: device
6736   * @upper_dev: upper device to check
6737   *
6738   * Find out if a device is linked to specified upper device and return true
6739   * in case it is. Note that this checks the entire upper device chain.
6740   * The caller must hold rcu lock.
6741   */
6742  
6743  bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
6744  				  struct net_device *upper_dev)
6745  {
6746  	return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
6747  					       upper_dev);
6748  }
6749  EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
6750  
6751  /**
6752   * netdev_has_any_upper_dev - Check if device is linked to some device
6753   * @dev: device
6754   *
6755   * Find out if a device is linked to an upper device and return true in case
6756   * it is. The caller must hold the RTNL lock.
6757   */
6758  bool netdev_has_any_upper_dev(struct net_device *dev)
6759  {
6760  	ASSERT_RTNL();
6761  
6762  	return !list_empty(&dev->adj_list.upper);
6763  }
6764  EXPORT_SYMBOL(netdev_has_any_upper_dev);
6765  
6766  /**
6767   * netdev_master_upper_dev_get - Get master upper device
6768   * @dev: device
6769   *
6770   * Find a master upper device and return pointer to it or NULL in case
6771   * it's not there. The caller must hold the RTNL lock.
6772   */
6773  struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
6774  {
6775  	struct netdev_adjacent *upper;
6776  
6777  	ASSERT_RTNL();
6778  
6779  	if (list_empty(&dev->adj_list.upper))
6780  		return NULL;
6781  
6782  	upper = list_first_entry(&dev->adj_list.upper,
6783  				 struct netdev_adjacent, list);
6784  	if (likely(upper->master))
6785  		return upper->dev;
6786  	return NULL;
6787  }
6788  EXPORT_SYMBOL(netdev_master_upper_dev_get);
6789  
6790  static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev)
6791  {
6792  	struct netdev_adjacent *upper;
6793  
6794  	ASSERT_RTNL();
6795  
6796  	if (list_empty(&dev->adj_list.upper))
6797  		return NULL;
6798  
6799  	upper = list_first_entry(&dev->adj_list.upper,
6800  				 struct netdev_adjacent, list);
6801  	if (likely(upper->master) && !upper->ignore)
6802  		return upper->dev;
6803  	return NULL;
6804  }
6805  
6806  /**
6807   * netdev_has_any_lower_dev - Check if device is linked to some device
6808   * @dev: device
6809   *
6810   * Find out if a device is linked to a lower device and return true in case
6811   * it is. The caller must hold the RTNL lock.
6812   */
6813  static bool netdev_has_any_lower_dev(struct net_device *dev)
6814  {
6815  	ASSERT_RTNL();
6816  
6817  	return !list_empty(&dev->adj_list.lower);
6818  }
6819  
6820  void *netdev_adjacent_get_private(struct list_head *adj_list)
6821  {
6822  	struct netdev_adjacent *adj;
6823  
6824  	adj = list_entry(adj_list, struct netdev_adjacent, list);
6825  
6826  	return adj->private;
6827  }
6828  EXPORT_SYMBOL(netdev_adjacent_get_private);
6829  
6830  /**
6831   * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
6832   * @dev: device
6833   * @iter: list_head ** of the current position
6834   *
6835   * Gets the next device from the dev's upper list, starting from iter
6836   * position. The caller must hold RCU read lock.
6837   */
6838  struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
6839  						 struct list_head **iter)
6840  {
6841  	struct netdev_adjacent *upper;
6842  
6843  	WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6844  
6845  	upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6846  
6847  	if (&upper->list == &dev->adj_list.upper)
6848  		return NULL;
6849  
6850  	*iter = &upper->list;
6851  
6852  	return upper->dev;
6853  }
6854  EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
6855  
6856  static struct net_device *__netdev_next_upper_dev(struct net_device *dev,
6857  						  struct list_head **iter,
6858  						  bool *ignore)
6859  {
6860  	struct netdev_adjacent *upper;
6861  
6862  	upper = list_entry((*iter)->next, struct netdev_adjacent, list);
6863  
6864  	if (&upper->list == &dev->adj_list.upper)
6865  		return NULL;
6866  
6867  	*iter = &upper->list;
6868  	*ignore = upper->ignore;
6869  
6870  	return upper->dev;
6871  }
6872  
6873  static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
6874  						    struct list_head **iter)
6875  {
6876  	struct netdev_adjacent *upper;
6877  
6878  	WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6879  
6880  	upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6881  
6882  	if (&upper->list == &dev->adj_list.upper)
6883  		return NULL;
6884  
6885  	*iter = &upper->list;
6886  
6887  	return upper->dev;
6888  }
6889  
6890  static int __netdev_walk_all_upper_dev(struct net_device *dev,
6891  				       int (*fn)(struct net_device *dev,
6892  						 void *data),
6893  				       void *data)
6894  {
6895  	struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
6896  	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
6897  	int ret, cur = 0;
6898  	bool ignore;
6899  
6900  	now = dev;
6901  	iter = &dev->adj_list.upper;
6902  
6903  	while (1) {
6904  		if (now != dev) {
6905  			ret = fn(now, data);
6906  			if (ret)
6907  				return ret;
6908  		}
6909  
6910  		next = NULL;
6911  		while (1) {
6912  			udev = __netdev_next_upper_dev(now, &iter, &ignore);
6913  			if (!udev)
6914  				break;
6915  			if (ignore)
6916  				continue;
6917  
6918  			next = udev;
6919  			niter = &udev->adj_list.upper;
6920  			dev_stack[cur] = now;
6921  			iter_stack[cur++] = iter;
6922  			break;
6923  		}
6924  
6925  		if (!next) {
6926  			if (!cur)
6927  				return 0;
6928  			next = dev_stack[--cur];
6929  			niter = iter_stack[cur];
6930  		}
6931  
6932  		now = next;
6933  		iter = niter;
6934  	}
6935  
6936  	return 0;
6937  }
6938  
6939  int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
6940  				  int (*fn)(struct net_device *dev,
6941  					    void *data),
6942  				  void *data)
6943  {
6944  	struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
6945  	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
6946  	int ret, cur = 0;
6947  
6948  	now = dev;
6949  	iter = &dev->adj_list.upper;
6950  
6951  	while (1) {
6952  		if (now != dev) {
6953  			ret = fn(now, data);
6954  			if (ret)
6955  				return ret;
6956  		}
6957  
6958  		next = NULL;
6959  		while (1) {
6960  			udev = netdev_next_upper_dev_rcu(now, &iter);
6961  			if (!udev)
6962  				break;
6963  
6964  			next = udev;
6965  			niter = &udev->adj_list.upper;
6966  			dev_stack[cur] = now;
6967  			iter_stack[cur++] = iter;
6968  			break;
6969  		}
6970  
6971  		if (!next) {
6972  			if (!cur)
6973  				return 0;
6974  			next = dev_stack[--cur];
6975  			niter = iter_stack[cur];
6976  		}
6977  
6978  		now = next;
6979  		iter = niter;
6980  	}
6981  
6982  	return 0;
6983  }
6984  EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
6985  
6986  static bool __netdev_has_upper_dev(struct net_device *dev,
6987  				   struct net_device *upper_dev)
6988  {
6989  	ASSERT_RTNL();
6990  
6991  	return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev,
6992  					   upper_dev);
6993  }
6994  
6995  /**
6996   * netdev_lower_get_next_private - Get the next ->private from the
6997   *				   lower neighbour list
6998   * @dev: device
6999   * @iter: list_head ** of the current position
7000   *
7001   * Gets the next netdev_adjacent->private from the dev's lower neighbour
7002   * list, starting from iter position. The caller must hold either hold the
7003   * RTNL lock or its own locking that guarantees that the neighbour lower
7004   * list will remain unchanged.
7005   */
7006  void *netdev_lower_get_next_private(struct net_device *dev,
7007  				    struct list_head **iter)
7008  {
7009  	struct netdev_adjacent *lower;
7010  
7011  	lower = list_entry(*iter, struct netdev_adjacent, list);
7012  
7013  	if (&lower->list == &dev->adj_list.lower)
7014  		return NULL;
7015  
7016  	*iter = lower->list.next;
7017  
7018  	return lower->private;
7019  }
7020  EXPORT_SYMBOL(netdev_lower_get_next_private);
7021  
7022  /**
7023   * netdev_lower_get_next_private_rcu - Get the next ->private from the
7024   *				       lower neighbour list, RCU
7025   *				       variant
7026   * @dev: device
7027   * @iter: list_head ** of the current position
7028   *
7029   * Gets the next netdev_adjacent->private from the dev's lower neighbour
7030   * list, starting from iter position. The caller must hold RCU read lock.
7031   */
7032  void *netdev_lower_get_next_private_rcu(struct net_device *dev,
7033  					struct list_head **iter)
7034  {
7035  	struct netdev_adjacent *lower;
7036  
7037  	WARN_ON_ONCE(!rcu_read_lock_held());
7038  
7039  	lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7040  
7041  	if (&lower->list == &dev->adj_list.lower)
7042  		return NULL;
7043  
7044  	*iter = &lower->list;
7045  
7046  	return lower->private;
7047  }
7048  EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
7049  
7050  /**
7051   * netdev_lower_get_next - Get the next device from the lower neighbour
7052   *                         list
7053   * @dev: device
7054   * @iter: list_head ** of the current position
7055   *
7056   * Gets the next netdev_adjacent from the dev's lower neighbour
7057   * list, starting from iter position. The caller must hold RTNL lock or
7058   * its own locking that guarantees that the neighbour lower
7059   * list will remain unchanged.
7060   */
7061  void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
7062  {
7063  	struct netdev_adjacent *lower;
7064  
7065  	lower = list_entry(*iter, struct netdev_adjacent, list);
7066  
7067  	if (&lower->list == &dev->adj_list.lower)
7068  		return NULL;
7069  
7070  	*iter = lower->list.next;
7071  
7072  	return lower->dev;
7073  }
7074  EXPORT_SYMBOL(netdev_lower_get_next);
7075  
7076  static struct net_device *netdev_next_lower_dev(struct net_device *dev,
7077  						struct list_head **iter)
7078  {
7079  	struct netdev_adjacent *lower;
7080  
7081  	lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7082  
7083  	if (&lower->list == &dev->adj_list.lower)
7084  		return NULL;
7085  
7086  	*iter = &lower->list;
7087  
7088  	return lower->dev;
7089  }
7090  
7091  static struct net_device *__netdev_next_lower_dev(struct net_device *dev,
7092  						  struct list_head **iter,
7093  						  bool *ignore)
7094  {
7095  	struct netdev_adjacent *lower;
7096  
7097  	lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7098  
7099  	if (&lower->list == &dev->adj_list.lower)
7100  		return NULL;
7101  
7102  	*iter = &lower->list;
7103  	*ignore = lower->ignore;
7104  
7105  	return lower->dev;
7106  }
7107  
7108  int netdev_walk_all_lower_dev(struct net_device *dev,
7109  			      int (*fn)(struct net_device *dev,
7110  					void *data),
7111  			      void *data)
7112  {
7113  	struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7114  	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7115  	int ret, cur = 0;
7116  
7117  	now = dev;
7118  	iter = &dev->adj_list.lower;
7119  
7120  	while (1) {
7121  		if (now != dev) {
7122  			ret = fn(now, data);
7123  			if (ret)
7124  				return ret;
7125  		}
7126  
7127  		next = NULL;
7128  		while (1) {
7129  			ldev = netdev_next_lower_dev(now, &iter);
7130  			if (!ldev)
7131  				break;
7132  
7133  			next = ldev;
7134  			niter = &ldev->adj_list.lower;
7135  			dev_stack[cur] = now;
7136  			iter_stack[cur++] = iter;
7137  			break;
7138  		}
7139  
7140  		if (!next) {
7141  			if (!cur)
7142  				return 0;
7143  			next = dev_stack[--cur];
7144  			niter = iter_stack[cur];
7145  		}
7146  
7147  		now = next;
7148  		iter = niter;
7149  	}
7150  
7151  	return 0;
7152  }
7153  EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
7154  
7155  static int __netdev_walk_all_lower_dev(struct net_device *dev,
7156  				       int (*fn)(struct net_device *dev,
7157  						 void *data),
7158  				       void *data)
7159  {
7160  	struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7161  	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7162  	int ret, cur = 0;
7163  	bool ignore;
7164  
7165  	now = dev;
7166  	iter = &dev->adj_list.lower;
7167  
7168  	while (1) {
7169  		if (now != dev) {
7170  			ret = fn(now, data);
7171  			if (ret)
7172  				return ret;
7173  		}
7174  
7175  		next = NULL;
7176  		while (1) {
7177  			ldev = __netdev_next_lower_dev(now, &iter, &ignore);
7178  			if (!ldev)
7179  				break;
7180  			if (ignore)
7181  				continue;
7182  
7183  			next = ldev;
7184  			niter = &ldev->adj_list.lower;
7185  			dev_stack[cur] = now;
7186  			iter_stack[cur++] = iter;
7187  			break;
7188  		}
7189  
7190  		if (!next) {
7191  			if (!cur)
7192  				return 0;
7193  			next = dev_stack[--cur];
7194  			niter = iter_stack[cur];
7195  		}
7196  
7197  		now = next;
7198  		iter = niter;
7199  	}
7200  
7201  	return 0;
7202  }
7203  
7204  static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
7205  						    struct list_head **iter)
7206  {
7207  	struct netdev_adjacent *lower;
7208  
7209  	lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7210  	if (&lower->list == &dev->adj_list.lower)
7211  		return NULL;
7212  
7213  	*iter = &lower->list;
7214  
7215  	return lower->dev;
7216  }
7217  
7218  static u8 __netdev_upper_depth(struct net_device *dev)
7219  {
7220  	struct net_device *udev;
7221  	struct list_head *iter;
7222  	u8 max_depth = 0;
7223  	bool ignore;
7224  
7225  	for (iter = &dev->adj_list.upper,
7226  	     udev = __netdev_next_upper_dev(dev, &iter, &ignore);
7227  	     udev;
7228  	     udev = __netdev_next_upper_dev(dev, &iter, &ignore)) {
7229  		if (ignore)
7230  			continue;
7231  		if (max_depth < udev->upper_level)
7232  			max_depth = udev->upper_level;
7233  	}
7234  
7235  	return max_depth;
7236  }
7237  
7238  static u8 __netdev_lower_depth(struct net_device *dev)
7239  {
7240  	struct net_device *ldev;
7241  	struct list_head *iter;
7242  	u8 max_depth = 0;
7243  	bool ignore;
7244  
7245  	for (iter = &dev->adj_list.lower,
7246  	     ldev = __netdev_next_lower_dev(dev, &iter, &ignore);
7247  	     ldev;
7248  	     ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) {
7249  		if (ignore)
7250  			continue;
7251  		if (max_depth < ldev->lower_level)
7252  			max_depth = ldev->lower_level;
7253  	}
7254  
7255  	return max_depth;
7256  }
7257  
7258  static int __netdev_update_upper_level(struct net_device *dev, void *data)
7259  {
7260  	dev->upper_level = __netdev_upper_depth(dev) + 1;
7261  	return 0;
7262  }
7263  
7264  static int __netdev_update_lower_level(struct net_device *dev, void *data)
7265  {
7266  	dev->lower_level = __netdev_lower_depth(dev) + 1;
7267  	return 0;
7268  }
7269  
7270  int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
7271  				  int (*fn)(struct net_device *dev,
7272  					    void *data),
7273  				  void *data)
7274  {
7275  	struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7276  	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7277  	int ret, cur = 0;
7278  
7279  	now = dev;
7280  	iter = &dev->adj_list.lower;
7281  
7282  	while (1) {
7283  		if (now != dev) {
7284  			ret = fn(now, data);
7285  			if (ret)
7286  				return ret;
7287  		}
7288  
7289  		next = NULL;
7290  		while (1) {
7291  			ldev = netdev_next_lower_dev_rcu(now, &iter);
7292  			if (!ldev)
7293  				break;
7294  
7295  			next = ldev;
7296  			niter = &ldev->adj_list.lower;
7297  			dev_stack[cur] = now;
7298  			iter_stack[cur++] = iter;
7299  			break;
7300  		}
7301  
7302  		if (!next) {
7303  			if (!cur)
7304  				return 0;
7305  			next = dev_stack[--cur];
7306  			niter = iter_stack[cur];
7307  		}
7308  
7309  		now = next;
7310  		iter = niter;
7311  	}
7312  
7313  	return 0;
7314  }
7315  EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
7316  
7317  /**
7318   * netdev_lower_get_first_private_rcu - Get the first ->private from the
7319   *				       lower neighbour list, RCU
7320   *				       variant
7321   * @dev: device
7322   *
7323   * Gets the first netdev_adjacent->private from the dev's lower neighbour
7324   * list. The caller must hold RCU read lock.
7325   */
7326  void *netdev_lower_get_first_private_rcu(struct net_device *dev)
7327  {
7328  	struct netdev_adjacent *lower;
7329  
7330  	lower = list_first_or_null_rcu(&dev->adj_list.lower,
7331  			struct netdev_adjacent, list);
7332  	if (lower)
7333  		return lower->private;
7334  	return NULL;
7335  }
7336  EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
7337  
7338  /**
7339   * netdev_master_upper_dev_get_rcu - Get master upper device
7340   * @dev: device
7341   *
7342   * Find a master upper device and return pointer to it or NULL in case
7343   * it's not there. The caller must hold the RCU read lock.
7344   */
7345  struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
7346  {
7347  	struct netdev_adjacent *upper;
7348  
7349  	upper = list_first_or_null_rcu(&dev->adj_list.upper,
7350  				       struct netdev_adjacent, list);
7351  	if (upper && likely(upper->master))
7352  		return upper->dev;
7353  	return NULL;
7354  }
7355  EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
7356  
7357  static int netdev_adjacent_sysfs_add(struct net_device *dev,
7358  			      struct net_device *adj_dev,
7359  			      struct list_head *dev_list)
7360  {
7361  	char linkname[IFNAMSIZ+7];
7362  
7363  	sprintf(linkname, dev_list == &dev->adj_list.upper ?
7364  		"upper_%s" : "lower_%s", adj_dev->name);
7365  	return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
7366  				 linkname);
7367  }
7368  static void netdev_adjacent_sysfs_del(struct net_device *dev,
7369  			       char *name,
7370  			       struct list_head *dev_list)
7371  {
7372  	char linkname[IFNAMSIZ+7];
7373  
7374  	sprintf(linkname, dev_list == &dev->adj_list.upper ?
7375  		"upper_%s" : "lower_%s", name);
7376  	sysfs_remove_link(&(dev->dev.kobj), linkname);
7377  }
7378  
7379  static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
7380  						 struct net_device *adj_dev,
7381  						 struct list_head *dev_list)
7382  {
7383  	return (dev_list == &dev->adj_list.upper ||
7384  		dev_list == &dev->adj_list.lower) &&
7385  		net_eq(dev_net(dev), dev_net(adj_dev));
7386  }
7387  
7388  static int __netdev_adjacent_dev_insert(struct net_device *dev,
7389  					struct net_device *adj_dev,
7390  					struct list_head *dev_list,
7391  					void *private, bool master)
7392  {
7393  	struct netdev_adjacent *adj;
7394  	int ret;
7395  
7396  	adj = __netdev_find_adj(adj_dev, dev_list);
7397  
7398  	if (adj) {
7399  		adj->ref_nr += 1;
7400  		pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
7401  			 dev->name, adj_dev->name, adj->ref_nr);
7402  
7403  		return 0;
7404  	}
7405  
7406  	adj = kmalloc(sizeof(*adj), GFP_KERNEL);
7407  	if (!adj)
7408  		return -ENOMEM;
7409  
7410  	adj->dev = adj_dev;
7411  	adj->master = master;
7412  	adj->ref_nr = 1;
7413  	adj->private = private;
7414  	adj->ignore = false;
7415  	dev_hold(adj_dev);
7416  
7417  	pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
7418  		 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
7419  
7420  	if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
7421  		ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
7422  		if (ret)
7423  			goto free_adj;
7424  	}
7425  
7426  	/* Ensure that master link is always the first item in list. */
7427  	if (master) {
7428  		ret = sysfs_create_link(&(dev->dev.kobj),
7429  					&(adj_dev->dev.kobj), "master");
7430  		if (ret)
7431  			goto remove_symlinks;
7432  
7433  		list_add_rcu(&adj->list, dev_list);
7434  	} else {
7435  		list_add_tail_rcu(&adj->list, dev_list);
7436  	}
7437  
7438  	return 0;
7439  
7440  remove_symlinks:
7441  	if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
7442  		netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
7443  free_adj:
7444  	kfree(adj);
7445  	dev_put(adj_dev);
7446  
7447  	return ret;
7448  }
7449  
7450  static void __netdev_adjacent_dev_remove(struct net_device *dev,
7451  					 struct net_device *adj_dev,
7452  					 u16 ref_nr,
7453  					 struct list_head *dev_list)
7454  {
7455  	struct netdev_adjacent *adj;
7456  
7457  	pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
7458  		 dev->name, adj_dev->name, ref_nr);
7459  
7460  	adj = __netdev_find_adj(adj_dev, dev_list);
7461  
7462  	if (!adj) {
7463  		pr_err("Adjacency does not exist for device %s from %s\n",
7464  		       dev->name, adj_dev->name);
7465  		WARN_ON(1);
7466  		return;
7467  	}
7468  
7469  	if (adj->ref_nr > ref_nr) {
7470  		pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
7471  			 dev->name, adj_dev->name, ref_nr,
7472  			 adj->ref_nr - ref_nr);
7473  		adj->ref_nr -= ref_nr;
7474  		return;
7475  	}
7476  
7477  	if (adj->master)
7478  		sysfs_remove_link(&(dev->dev.kobj), "master");
7479  
7480  	if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
7481  		netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
7482  
7483  	list_del_rcu(&adj->list);
7484  	pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
7485  		 adj_dev->name, dev->name, adj_dev->name);
7486  	dev_put(adj_dev);
7487  	kfree_rcu(adj, rcu);
7488  }
7489  
7490  static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
7491  					    struct net_device *upper_dev,
7492  					    struct list_head *up_list,
7493  					    struct list_head *down_list,
7494  					    void *private, bool master)
7495  {
7496  	int ret;
7497  
7498  	ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
7499  					   private, master);
7500  	if (ret)
7501  		return ret;
7502  
7503  	ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
7504  					   private, false);
7505  	if (ret) {
7506  		__netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
7507  		return ret;
7508  	}
7509  
7510  	return 0;
7511  }
7512  
7513  static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
7514  					       struct net_device *upper_dev,
7515  					       u16 ref_nr,
7516  					       struct list_head *up_list,
7517  					       struct list_head *down_list)
7518  {
7519  	__netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
7520  	__netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
7521  }
7522  
7523  static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
7524  						struct net_device *upper_dev,
7525  						void *private, bool master)
7526  {
7527  	return __netdev_adjacent_dev_link_lists(dev, upper_dev,
7528  						&dev->adj_list.upper,
7529  						&upper_dev->adj_list.lower,
7530  						private, master);
7531  }
7532  
7533  static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
7534  						   struct net_device *upper_dev)
7535  {
7536  	__netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
7537  					   &dev->adj_list.upper,
7538  					   &upper_dev->adj_list.lower);
7539  }
7540  
7541  static int __netdev_upper_dev_link(struct net_device *dev,
7542  				   struct net_device *upper_dev, bool master,
7543  				   void *upper_priv, void *upper_info,
7544  				   struct netlink_ext_ack *extack)
7545  {
7546  	struct netdev_notifier_changeupper_info changeupper_info = {
7547  		.info = {
7548  			.dev = dev,
7549  			.extack = extack,
7550  		},
7551  		.upper_dev = upper_dev,
7552  		.master = master,
7553  		.linking = true,
7554  		.upper_info = upper_info,
7555  	};
7556  	struct net_device *master_dev;
7557  	int ret = 0;
7558  
7559  	ASSERT_RTNL();
7560  
7561  	if (dev == upper_dev)
7562  		return -EBUSY;
7563  
7564  	/* To prevent loops, check if dev is not upper device to upper_dev. */
7565  	if (__netdev_has_upper_dev(upper_dev, dev))
7566  		return -EBUSY;
7567  
7568  	if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV)
7569  		return -EMLINK;
7570  
7571  	if (!master) {
7572  		if (__netdev_has_upper_dev(dev, upper_dev))
7573  			return -EEXIST;
7574  	} else {
7575  		master_dev = __netdev_master_upper_dev_get(dev);
7576  		if (master_dev)
7577  			return master_dev == upper_dev ? -EEXIST : -EBUSY;
7578  	}
7579  
7580  	ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
7581  					    &changeupper_info.info);
7582  	ret = notifier_to_errno(ret);
7583  	if (ret)
7584  		return ret;
7585  
7586  	ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
7587  						   master);
7588  	if (ret)
7589  		return ret;
7590  
7591  	ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
7592  					    &changeupper_info.info);
7593  	ret = notifier_to_errno(ret);
7594  	if (ret)
7595  		goto rollback;
7596  
7597  	__netdev_update_upper_level(dev, NULL);
7598  	__netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
7599  
7600  	__netdev_update_lower_level(upper_dev, NULL);
7601  	__netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
7602  				    NULL);
7603  
7604  	return 0;
7605  
7606  rollback:
7607  	__netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
7608  
7609  	return ret;
7610  }
7611  
7612  /**
7613   * netdev_upper_dev_link - Add a link to the upper device
7614   * @dev: device
7615   * @upper_dev: new upper device
7616   * @extack: netlink extended ack
7617   *
7618   * Adds a link to device which is upper to this one. The caller must hold
7619   * the RTNL lock. On a failure a negative errno code is returned.
7620   * On success the reference counts are adjusted and the function
7621   * returns zero.
7622   */
7623  int netdev_upper_dev_link(struct net_device *dev,
7624  			  struct net_device *upper_dev,
7625  			  struct netlink_ext_ack *extack)
7626  {
7627  	return __netdev_upper_dev_link(dev, upper_dev, false,
7628  				       NULL, NULL, extack);
7629  }
7630  EXPORT_SYMBOL(netdev_upper_dev_link);
7631  
7632  /**
7633   * netdev_master_upper_dev_link - Add a master link to the upper device
7634   * @dev: device
7635   * @upper_dev: new upper device
7636   * @upper_priv: upper device private
7637   * @upper_info: upper info to be passed down via notifier
7638   * @extack: netlink extended ack
7639   *
7640   * Adds a link to device which is upper to this one. In this case, only
7641   * one master upper device can be linked, although other non-master devices
7642   * might be linked as well. The caller must hold the RTNL lock.
7643   * On a failure a negative errno code is returned. On success the reference
7644   * counts are adjusted and the function returns zero.
7645   */
7646  int netdev_master_upper_dev_link(struct net_device *dev,
7647  				 struct net_device *upper_dev,
7648  				 void *upper_priv, void *upper_info,
7649  				 struct netlink_ext_ack *extack)
7650  {
7651  	return __netdev_upper_dev_link(dev, upper_dev, true,
7652  				       upper_priv, upper_info, extack);
7653  }
7654  EXPORT_SYMBOL(netdev_master_upper_dev_link);
7655  
7656  /**
7657   * netdev_upper_dev_unlink - Removes a link to upper device
7658   * @dev: device
7659   * @upper_dev: new upper device
7660   *
7661   * Removes a link to device which is upper to this one. The caller must hold
7662   * the RTNL lock.
7663   */
7664  void netdev_upper_dev_unlink(struct net_device *dev,
7665  			     struct net_device *upper_dev)
7666  {
7667  	struct netdev_notifier_changeupper_info changeupper_info = {
7668  		.info = {
7669  			.dev = dev,
7670  		},
7671  		.upper_dev = upper_dev,
7672  		.linking = false,
7673  	};
7674  
7675  	ASSERT_RTNL();
7676  
7677  	changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
7678  
7679  	call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
7680  				      &changeupper_info.info);
7681  
7682  	__netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
7683  
7684  	call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
7685  				      &changeupper_info.info);
7686  
7687  	__netdev_update_upper_level(dev, NULL);
7688  	__netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
7689  
7690  	__netdev_update_lower_level(upper_dev, NULL);
7691  	__netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
7692  				    NULL);
7693  }
7694  EXPORT_SYMBOL(netdev_upper_dev_unlink);
7695  
7696  static void __netdev_adjacent_dev_set(struct net_device *upper_dev,
7697  				      struct net_device *lower_dev,
7698  				      bool val)
7699  {
7700  	struct netdev_adjacent *adj;
7701  
7702  	adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower);
7703  	if (adj)
7704  		adj->ignore = val;
7705  
7706  	adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper);
7707  	if (adj)
7708  		adj->ignore = val;
7709  }
7710  
7711  static void netdev_adjacent_dev_disable(struct net_device *upper_dev,
7712  					struct net_device *lower_dev)
7713  {
7714  	__netdev_adjacent_dev_set(upper_dev, lower_dev, true);
7715  }
7716  
7717  static void netdev_adjacent_dev_enable(struct net_device *upper_dev,
7718  				       struct net_device *lower_dev)
7719  {
7720  	__netdev_adjacent_dev_set(upper_dev, lower_dev, false);
7721  }
7722  
7723  int netdev_adjacent_change_prepare(struct net_device *old_dev,
7724  				   struct net_device *new_dev,
7725  				   struct net_device *dev,
7726  				   struct netlink_ext_ack *extack)
7727  {
7728  	int err;
7729  
7730  	if (!new_dev)
7731  		return 0;
7732  
7733  	if (old_dev && new_dev != old_dev)
7734  		netdev_adjacent_dev_disable(dev, old_dev);
7735  
7736  	err = netdev_upper_dev_link(new_dev, dev, extack);
7737  	if (err) {
7738  		if (old_dev && new_dev != old_dev)
7739  			netdev_adjacent_dev_enable(dev, old_dev);
7740  		return err;
7741  	}
7742  
7743  	return 0;
7744  }
7745  EXPORT_SYMBOL(netdev_adjacent_change_prepare);
7746  
7747  void netdev_adjacent_change_commit(struct net_device *old_dev,
7748  				   struct net_device *new_dev,
7749  				   struct net_device *dev)
7750  {
7751  	if (!new_dev || !old_dev)
7752  		return;
7753  
7754  	if (new_dev == old_dev)
7755  		return;
7756  
7757  	netdev_adjacent_dev_enable(dev, old_dev);
7758  	netdev_upper_dev_unlink(old_dev, dev);
7759  }
7760  EXPORT_SYMBOL(netdev_adjacent_change_commit);
7761  
7762  void netdev_adjacent_change_abort(struct net_device *old_dev,
7763  				  struct net_device *new_dev,
7764  				  struct net_device *dev)
7765  {
7766  	if (!new_dev)
7767  		return;
7768  
7769  	if (old_dev && new_dev != old_dev)
7770  		netdev_adjacent_dev_enable(dev, old_dev);
7771  
7772  	netdev_upper_dev_unlink(new_dev, dev);
7773  }
7774  EXPORT_SYMBOL(netdev_adjacent_change_abort);
7775  
7776  /**
7777   * netdev_bonding_info_change - Dispatch event about slave change
7778   * @dev: device
7779   * @bonding_info: info to dispatch
7780   *
7781   * Send NETDEV_BONDING_INFO to netdev notifiers with info.
7782   * The caller must hold the RTNL lock.
7783   */
7784  void netdev_bonding_info_change(struct net_device *dev,
7785  				struct netdev_bonding_info *bonding_info)
7786  {
7787  	struct netdev_notifier_bonding_info info = {
7788  		.info.dev = dev,
7789  	};
7790  
7791  	memcpy(&info.bonding_info, bonding_info,
7792  	       sizeof(struct netdev_bonding_info));
7793  	call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
7794  				      &info.info);
7795  }
7796  EXPORT_SYMBOL(netdev_bonding_info_change);
7797  
7798  static void netdev_adjacent_add_links(struct net_device *dev)
7799  {
7800  	struct netdev_adjacent *iter;
7801  
7802  	struct net *net = dev_net(dev);
7803  
7804  	list_for_each_entry(iter, &dev->adj_list.upper, list) {
7805  		if (!net_eq(net, dev_net(iter->dev)))
7806  			continue;
7807  		netdev_adjacent_sysfs_add(iter->dev, dev,
7808  					  &iter->dev->adj_list.lower);
7809  		netdev_adjacent_sysfs_add(dev, iter->dev,
7810  					  &dev->adj_list.upper);
7811  	}
7812  
7813  	list_for_each_entry(iter, &dev->adj_list.lower, list) {
7814  		if (!net_eq(net, dev_net(iter->dev)))
7815  			continue;
7816  		netdev_adjacent_sysfs_add(iter->dev, dev,
7817  					  &iter->dev->adj_list.upper);
7818  		netdev_adjacent_sysfs_add(dev, iter->dev,
7819  					  &dev->adj_list.lower);
7820  	}
7821  }
7822  
7823  static void netdev_adjacent_del_links(struct net_device *dev)
7824  {
7825  	struct netdev_adjacent *iter;
7826  
7827  	struct net *net = dev_net(dev);
7828  
7829  	list_for_each_entry(iter, &dev->adj_list.upper, list) {
7830  		if (!net_eq(net, dev_net(iter->dev)))
7831  			continue;
7832  		netdev_adjacent_sysfs_del(iter->dev, dev->name,
7833  					  &iter->dev->adj_list.lower);
7834  		netdev_adjacent_sysfs_del(dev, iter->dev->name,
7835  					  &dev->adj_list.upper);
7836  	}
7837  
7838  	list_for_each_entry(iter, &dev->adj_list.lower, list) {
7839  		if (!net_eq(net, dev_net(iter->dev)))
7840  			continue;
7841  		netdev_adjacent_sysfs_del(iter->dev, dev->name,
7842  					  &iter->dev->adj_list.upper);
7843  		netdev_adjacent_sysfs_del(dev, iter->dev->name,
7844  					  &dev->adj_list.lower);
7845  	}
7846  }
7847  
7848  void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
7849  {
7850  	struct netdev_adjacent *iter;
7851  
7852  	struct net *net = dev_net(dev);
7853  
7854  	list_for_each_entry(iter, &dev->adj_list.upper, list) {
7855  		if (!net_eq(net, dev_net(iter->dev)))
7856  			continue;
7857  		netdev_adjacent_sysfs_del(iter->dev, oldname,
7858  					  &iter->dev->adj_list.lower);
7859  		netdev_adjacent_sysfs_add(iter->dev, dev,
7860  					  &iter->dev->adj_list.lower);
7861  	}
7862  
7863  	list_for_each_entry(iter, &dev->adj_list.lower, list) {
7864  		if (!net_eq(net, dev_net(iter->dev)))
7865  			continue;
7866  		netdev_adjacent_sysfs_del(iter->dev, oldname,
7867  					  &iter->dev->adj_list.upper);
7868  		netdev_adjacent_sysfs_add(iter->dev, dev,
7869  					  &iter->dev->adj_list.upper);
7870  	}
7871  }
7872  
7873  void *netdev_lower_dev_get_private(struct net_device *dev,
7874  				   struct net_device *lower_dev)
7875  {
7876  	struct netdev_adjacent *lower;
7877  
7878  	if (!lower_dev)
7879  		return NULL;
7880  	lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
7881  	if (!lower)
7882  		return NULL;
7883  
7884  	return lower->private;
7885  }
7886  EXPORT_SYMBOL(netdev_lower_dev_get_private);
7887  
7888  
7889  /**
7890   * netdev_lower_change - Dispatch event about lower device state change
7891   * @lower_dev: device
7892   * @lower_state_info: state to dispatch
7893   *
7894   * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
7895   * The caller must hold the RTNL lock.
7896   */
7897  void netdev_lower_state_changed(struct net_device *lower_dev,
7898  				void *lower_state_info)
7899  {
7900  	struct netdev_notifier_changelowerstate_info changelowerstate_info = {
7901  		.info.dev = lower_dev,
7902  	};
7903  
7904  	ASSERT_RTNL();
7905  	changelowerstate_info.lower_state_info = lower_state_info;
7906  	call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
7907  				      &changelowerstate_info.info);
7908  }
7909  EXPORT_SYMBOL(netdev_lower_state_changed);
7910  
7911  static void dev_change_rx_flags(struct net_device *dev, int flags)
7912  {
7913  	const struct net_device_ops *ops = dev->netdev_ops;
7914  
7915  	if (ops->ndo_change_rx_flags)
7916  		ops->ndo_change_rx_flags(dev, flags);
7917  }
7918  
7919  static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
7920  {
7921  	unsigned int old_flags = dev->flags;
7922  	kuid_t uid;
7923  	kgid_t gid;
7924  
7925  	ASSERT_RTNL();
7926  
7927  	dev->flags |= IFF_PROMISC;
7928  	dev->promiscuity += inc;
7929  	if (dev->promiscuity == 0) {
7930  		/*
7931  		 * Avoid overflow.
7932  		 * If inc causes overflow, untouch promisc and return error.
7933  		 */
7934  		if (inc < 0)
7935  			dev->flags &= ~IFF_PROMISC;
7936  		else {
7937  			dev->promiscuity -= inc;
7938  			pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
7939  				dev->name);
7940  			return -EOVERFLOW;
7941  		}
7942  	}
7943  	if (dev->flags != old_flags) {
7944  		pr_info("device %s %s promiscuous mode\n",
7945  			dev->name,
7946  			dev->flags & IFF_PROMISC ? "entered" : "left");
7947  		if (audit_enabled) {
7948  			current_uid_gid(&uid, &gid);
7949  			audit_log(audit_context(), GFP_ATOMIC,
7950  				  AUDIT_ANOM_PROMISCUOUS,
7951  				  "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
7952  				  dev->name, (dev->flags & IFF_PROMISC),
7953  				  (old_flags & IFF_PROMISC),
7954  				  from_kuid(&init_user_ns, audit_get_loginuid(current)),
7955  				  from_kuid(&init_user_ns, uid),
7956  				  from_kgid(&init_user_ns, gid),
7957  				  audit_get_sessionid(current));
7958  		}
7959  
7960  		dev_change_rx_flags(dev, IFF_PROMISC);
7961  	}
7962  	if (notify)
7963  		__dev_notify_flags(dev, old_flags, IFF_PROMISC);
7964  	return 0;
7965  }
7966  
7967  /**
7968   *	dev_set_promiscuity	- update promiscuity count on a device
7969   *	@dev: device
7970   *	@inc: modifier
7971   *
7972   *	Add or remove promiscuity from a device. While the count in the device
7973   *	remains above zero the interface remains promiscuous. Once it hits zero
7974   *	the device reverts back to normal filtering operation. A negative inc
7975   *	value is used to drop promiscuity on the device.
7976   *	Return 0 if successful or a negative errno code on error.
7977   */
7978  int dev_set_promiscuity(struct net_device *dev, int inc)
7979  {
7980  	unsigned int old_flags = dev->flags;
7981  	int err;
7982  
7983  	err = __dev_set_promiscuity(dev, inc, true);
7984  	if (err < 0)
7985  		return err;
7986  	if (dev->flags != old_flags)
7987  		dev_set_rx_mode(dev);
7988  	return err;
7989  }
7990  EXPORT_SYMBOL(dev_set_promiscuity);
7991  
7992  static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
7993  {
7994  	unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
7995  
7996  	ASSERT_RTNL();
7997  
7998  	dev->flags |= IFF_ALLMULTI;
7999  	dev->allmulti += inc;
8000  	if (dev->allmulti == 0) {
8001  		/*
8002  		 * Avoid overflow.
8003  		 * If inc causes overflow, untouch allmulti and return error.
8004  		 */
8005  		if (inc < 0)
8006  			dev->flags &= ~IFF_ALLMULTI;
8007  		else {
8008  			dev->allmulti -= inc;
8009  			pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
8010  				dev->name);
8011  			return -EOVERFLOW;
8012  		}
8013  	}
8014  	if (dev->flags ^ old_flags) {
8015  		dev_change_rx_flags(dev, IFF_ALLMULTI);
8016  		dev_set_rx_mode(dev);
8017  		if (notify)
8018  			__dev_notify_flags(dev, old_flags,
8019  					   dev->gflags ^ old_gflags);
8020  	}
8021  	return 0;
8022  }
8023  
8024  /**
8025   *	dev_set_allmulti	- update allmulti count on a device
8026   *	@dev: device
8027   *	@inc: modifier
8028   *
8029   *	Add or remove reception of all multicast frames to a device. While the
8030   *	count in the device remains above zero the interface remains listening
8031   *	to all interfaces. Once it hits zero the device reverts back to normal
8032   *	filtering operation. A negative @inc value is used to drop the counter
8033   *	when releasing a resource needing all multicasts.
8034   *	Return 0 if successful or a negative errno code on error.
8035   */
8036  
8037  int dev_set_allmulti(struct net_device *dev, int inc)
8038  {
8039  	return __dev_set_allmulti(dev, inc, true);
8040  }
8041  EXPORT_SYMBOL(dev_set_allmulti);
8042  
8043  /*
8044   *	Upload unicast and multicast address lists to device and
8045   *	configure RX filtering. When the device doesn't support unicast
8046   *	filtering it is put in promiscuous mode while unicast addresses
8047   *	are present.
8048   */
8049  void __dev_set_rx_mode(struct net_device *dev)
8050  {
8051  	const struct net_device_ops *ops = dev->netdev_ops;
8052  
8053  	/* dev_open will call this function so the list will stay sane. */
8054  	if (!(dev->flags&IFF_UP))
8055  		return;
8056  
8057  	if (!netif_device_present(dev))
8058  		return;
8059  
8060  	if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
8061  		/* Unicast addresses changes may only happen under the rtnl,
8062  		 * therefore calling __dev_set_promiscuity here is safe.
8063  		 */
8064  		if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
8065  			__dev_set_promiscuity(dev, 1, false);
8066  			dev->uc_promisc = true;
8067  		} else if (netdev_uc_empty(dev) && dev->uc_promisc) {
8068  			__dev_set_promiscuity(dev, -1, false);
8069  			dev->uc_promisc = false;
8070  		}
8071  	}
8072  
8073  	if (ops->ndo_set_rx_mode)
8074  		ops->ndo_set_rx_mode(dev);
8075  }
8076  
8077  void dev_set_rx_mode(struct net_device *dev)
8078  {
8079  	netif_addr_lock_bh(dev);
8080  	__dev_set_rx_mode(dev);
8081  	netif_addr_unlock_bh(dev);
8082  }
8083  
8084  /**
8085   *	dev_get_flags - get flags reported to userspace
8086   *	@dev: device
8087   *
8088   *	Get the combination of flag bits exported through APIs to userspace.
8089   */
8090  unsigned int dev_get_flags(const struct net_device *dev)
8091  {
8092  	unsigned int flags;
8093  
8094  	flags = (dev->flags & ~(IFF_PROMISC |
8095  				IFF_ALLMULTI |
8096  				IFF_RUNNING |
8097  				IFF_LOWER_UP |
8098  				IFF_DORMANT)) |
8099  		(dev->gflags & (IFF_PROMISC |
8100  				IFF_ALLMULTI));
8101  
8102  	if (netif_running(dev)) {
8103  		if (netif_oper_up(dev))
8104  			flags |= IFF_RUNNING;
8105  		if (netif_carrier_ok(dev))
8106  			flags |= IFF_LOWER_UP;
8107  		if (netif_dormant(dev))
8108  			flags |= IFF_DORMANT;
8109  	}
8110  
8111  	return flags;
8112  }
8113  EXPORT_SYMBOL(dev_get_flags);
8114  
8115  int __dev_change_flags(struct net_device *dev, unsigned int flags,
8116  		       struct netlink_ext_ack *extack)
8117  {
8118  	unsigned int old_flags = dev->flags;
8119  	int ret;
8120  
8121  	ASSERT_RTNL();
8122  
8123  	/*
8124  	 *	Set the flags on our device.
8125  	 */
8126  
8127  	dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
8128  			       IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
8129  			       IFF_AUTOMEDIA)) |
8130  		     (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
8131  				    IFF_ALLMULTI));
8132  
8133  	/*
8134  	 *	Load in the correct multicast list now the flags have changed.
8135  	 */
8136  
8137  	if ((old_flags ^ flags) & IFF_MULTICAST)
8138  		dev_change_rx_flags(dev, IFF_MULTICAST);
8139  
8140  	dev_set_rx_mode(dev);
8141  
8142  	/*
8143  	 *	Have we downed the interface. We handle IFF_UP ourselves
8144  	 *	according to user attempts to set it, rather than blindly
8145  	 *	setting it.
8146  	 */
8147  
8148  	ret = 0;
8149  	if ((old_flags ^ flags) & IFF_UP) {
8150  		if (old_flags & IFF_UP)
8151  			__dev_close(dev);
8152  		else
8153  			ret = __dev_open(dev, extack);
8154  	}
8155  
8156  	if ((flags ^ dev->gflags) & IFF_PROMISC) {
8157  		int inc = (flags & IFF_PROMISC) ? 1 : -1;
8158  		unsigned int old_flags = dev->flags;
8159  
8160  		dev->gflags ^= IFF_PROMISC;
8161  
8162  		if (__dev_set_promiscuity(dev, inc, false) >= 0)
8163  			if (dev->flags != old_flags)
8164  				dev_set_rx_mode(dev);
8165  	}
8166  
8167  	/* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
8168  	 * is important. Some (broken) drivers set IFF_PROMISC, when
8169  	 * IFF_ALLMULTI is requested not asking us and not reporting.
8170  	 */
8171  	if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
8172  		int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
8173  
8174  		dev->gflags ^= IFF_ALLMULTI;
8175  		__dev_set_allmulti(dev, inc, false);
8176  	}
8177  
8178  	return ret;
8179  }
8180  
8181  void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
8182  			unsigned int gchanges)
8183  {
8184  	unsigned int changes = dev->flags ^ old_flags;
8185  
8186  	if (gchanges)
8187  		rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
8188  
8189  	if (changes & IFF_UP) {
8190  		if (dev->flags & IFF_UP)
8191  			call_netdevice_notifiers(NETDEV_UP, dev);
8192  		else
8193  			call_netdevice_notifiers(NETDEV_DOWN, dev);
8194  	}
8195  
8196  	if (dev->flags & IFF_UP &&
8197  	    (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
8198  		struct netdev_notifier_change_info change_info = {
8199  			.info = {
8200  				.dev = dev,
8201  			},
8202  			.flags_changed = changes,
8203  		};
8204  
8205  		call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
8206  	}
8207  }
8208  
8209  /**
8210   *	dev_change_flags - change device settings
8211   *	@dev: device
8212   *	@flags: device state flags
8213   *	@extack: netlink extended ack
8214   *
8215   *	Change settings on device based state flags. The flags are
8216   *	in the userspace exported format.
8217   */
8218  int dev_change_flags(struct net_device *dev, unsigned int flags,
8219  		     struct netlink_ext_ack *extack)
8220  {
8221  	int ret;
8222  	unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
8223  
8224  	ret = __dev_change_flags(dev, flags, extack);
8225  	if (ret < 0)
8226  		return ret;
8227  
8228  	changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
8229  	__dev_notify_flags(dev, old_flags, changes);
8230  	return ret;
8231  }
8232  EXPORT_SYMBOL(dev_change_flags);
8233  
8234  int __dev_set_mtu(struct net_device *dev, int new_mtu)
8235  {
8236  	const struct net_device_ops *ops = dev->netdev_ops;
8237  
8238  	if (ops->ndo_change_mtu)
8239  		return ops->ndo_change_mtu(dev, new_mtu);
8240  
8241  	/* Pairs with all the lockless reads of dev->mtu in the stack */
8242  	WRITE_ONCE(dev->mtu, new_mtu);
8243  	return 0;
8244  }
8245  EXPORT_SYMBOL(__dev_set_mtu);
8246  
8247  int dev_validate_mtu(struct net_device *dev, int new_mtu,
8248  		     struct netlink_ext_ack *extack)
8249  {
8250  	/* MTU must be positive, and in range */
8251  	if (new_mtu < 0 || new_mtu < dev->min_mtu) {
8252  		NL_SET_ERR_MSG(extack, "mtu less than device minimum");
8253  		return -EINVAL;
8254  	}
8255  
8256  	if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
8257  		NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
8258  		return -EINVAL;
8259  	}
8260  	return 0;
8261  }
8262  
8263  /**
8264   *	dev_set_mtu_ext - Change maximum transfer unit
8265   *	@dev: device
8266   *	@new_mtu: new transfer unit
8267   *	@extack: netlink extended ack
8268   *
8269   *	Change the maximum transfer size of the network device.
8270   */
8271  int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
8272  		    struct netlink_ext_ack *extack)
8273  {
8274  	int err, orig_mtu;
8275  
8276  	if (new_mtu == dev->mtu)
8277  		return 0;
8278  
8279  	err = dev_validate_mtu(dev, new_mtu, extack);
8280  	if (err)
8281  		return err;
8282  
8283  	if (!netif_device_present(dev))
8284  		return -ENODEV;
8285  
8286  	err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
8287  	err = notifier_to_errno(err);
8288  	if (err)
8289  		return err;
8290  
8291  	orig_mtu = dev->mtu;
8292  	err = __dev_set_mtu(dev, new_mtu);
8293  
8294  	if (!err) {
8295  		err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8296  						   orig_mtu);
8297  		err = notifier_to_errno(err);
8298  		if (err) {
8299  			/* setting mtu back and notifying everyone again,
8300  			 * so that they have a chance to revert changes.
8301  			 */
8302  			__dev_set_mtu(dev, orig_mtu);
8303  			call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8304  						     new_mtu);
8305  		}
8306  	}
8307  	return err;
8308  }
8309  
8310  int dev_set_mtu(struct net_device *dev, int new_mtu)
8311  {
8312  	struct netlink_ext_ack extack;
8313  	int err;
8314  
8315  	memset(&extack, 0, sizeof(extack));
8316  	err = dev_set_mtu_ext(dev, new_mtu, &extack);
8317  	if (err && extack._msg)
8318  		net_err_ratelimited("%s: %s\n", dev->name, extack._msg);
8319  	return err;
8320  }
8321  EXPORT_SYMBOL(dev_set_mtu);
8322  
8323  /**
8324   *	dev_change_tx_queue_len - Change TX queue length of a netdevice
8325   *	@dev: device
8326   *	@new_len: new tx queue length
8327   */
8328  int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
8329  {
8330  	unsigned int orig_len = dev->tx_queue_len;
8331  	int res;
8332  
8333  	if (new_len != (unsigned int)new_len)
8334  		return -ERANGE;
8335  
8336  	if (new_len != orig_len) {
8337  		dev->tx_queue_len = new_len;
8338  		res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
8339  		res = notifier_to_errno(res);
8340  		if (res)
8341  			goto err_rollback;
8342  		res = dev_qdisc_change_tx_queue_len(dev);
8343  		if (res)
8344  			goto err_rollback;
8345  	}
8346  
8347  	return 0;
8348  
8349  err_rollback:
8350  	netdev_err(dev, "refused to change device tx_queue_len\n");
8351  	dev->tx_queue_len = orig_len;
8352  	return res;
8353  }
8354  
8355  /**
8356   *	dev_set_group - Change group this device belongs to
8357   *	@dev: device
8358   *	@new_group: group this device should belong to
8359   */
8360  void dev_set_group(struct net_device *dev, int new_group)
8361  {
8362  	dev->group = new_group;
8363  }
8364  EXPORT_SYMBOL(dev_set_group);
8365  
8366  /**
8367   *	dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR.
8368   *	@dev: device
8369   *	@addr: new address
8370   *	@extack: netlink extended ack
8371   */
8372  int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
8373  			      struct netlink_ext_ack *extack)
8374  {
8375  	struct netdev_notifier_pre_changeaddr_info info = {
8376  		.info.dev = dev,
8377  		.info.extack = extack,
8378  		.dev_addr = addr,
8379  	};
8380  	int rc;
8381  
8382  	rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info);
8383  	return notifier_to_errno(rc);
8384  }
8385  EXPORT_SYMBOL(dev_pre_changeaddr_notify);
8386  
8387  /**
8388   *	dev_set_mac_address - Change Media Access Control Address
8389   *	@dev: device
8390   *	@sa: new address
8391   *	@extack: netlink extended ack
8392   *
8393   *	Change the hardware (MAC) address of the device
8394   */
8395  int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
8396  			struct netlink_ext_ack *extack)
8397  {
8398  	const struct net_device_ops *ops = dev->netdev_ops;
8399  	int err;
8400  
8401  	if (!ops->ndo_set_mac_address)
8402  		return -EOPNOTSUPP;
8403  	if (sa->sa_family != dev->type)
8404  		return -EINVAL;
8405  	if (!netif_device_present(dev))
8406  		return -ENODEV;
8407  	err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack);
8408  	if (err)
8409  		return err;
8410  	err = ops->ndo_set_mac_address(dev, sa);
8411  	if (err)
8412  		return err;
8413  	dev->addr_assign_type = NET_ADDR_SET;
8414  	call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
8415  	add_device_randomness(dev->dev_addr, dev->addr_len);
8416  	return 0;
8417  }
8418  EXPORT_SYMBOL(dev_set_mac_address);
8419  
8420  /**
8421   *	dev_change_carrier - Change device carrier
8422   *	@dev: device
8423   *	@new_carrier: new value
8424   *
8425   *	Change device carrier
8426   */
8427  int dev_change_carrier(struct net_device *dev, bool new_carrier)
8428  {
8429  	const struct net_device_ops *ops = dev->netdev_ops;
8430  
8431  	if (!ops->ndo_change_carrier)
8432  		return -EOPNOTSUPP;
8433  	if (!netif_device_present(dev))
8434  		return -ENODEV;
8435  	return ops->ndo_change_carrier(dev, new_carrier);
8436  }
8437  EXPORT_SYMBOL(dev_change_carrier);
8438  
8439  /**
8440   *	dev_get_phys_port_id - Get device physical port ID
8441   *	@dev: device
8442   *	@ppid: port ID
8443   *
8444   *	Get device physical port ID
8445   */
8446  int dev_get_phys_port_id(struct net_device *dev,
8447  			 struct netdev_phys_item_id *ppid)
8448  {
8449  	const struct net_device_ops *ops = dev->netdev_ops;
8450  
8451  	if (!ops->ndo_get_phys_port_id)
8452  		return -EOPNOTSUPP;
8453  	return ops->ndo_get_phys_port_id(dev, ppid);
8454  }
8455  EXPORT_SYMBOL(dev_get_phys_port_id);
8456  
8457  /**
8458   *	dev_get_phys_port_name - Get device physical port name
8459   *	@dev: device
8460   *	@name: port name
8461   *	@len: limit of bytes to copy to name
8462   *
8463   *	Get device physical port name
8464   */
8465  int dev_get_phys_port_name(struct net_device *dev,
8466  			   char *name, size_t len)
8467  {
8468  	const struct net_device_ops *ops = dev->netdev_ops;
8469  	int err;
8470  
8471  	if (ops->ndo_get_phys_port_name) {
8472  		err = ops->ndo_get_phys_port_name(dev, name, len);
8473  		if (err != -EOPNOTSUPP)
8474  			return err;
8475  	}
8476  	return devlink_compat_phys_port_name_get(dev, name, len);
8477  }
8478  EXPORT_SYMBOL(dev_get_phys_port_name);
8479  
8480  /**
8481   *	dev_get_port_parent_id - Get the device's port parent identifier
8482   *	@dev: network device
8483   *	@ppid: pointer to a storage for the port's parent identifier
8484   *	@recurse: allow/disallow recursion to lower devices
8485   *
8486   *	Get the devices's port parent identifier
8487   */
8488  int dev_get_port_parent_id(struct net_device *dev,
8489  			   struct netdev_phys_item_id *ppid,
8490  			   bool recurse)
8491  {
8492  	const struct net_device_ops *ops = dev->netdev_ops;
8493  	struct netdev_phys_item_id first = { };
8494  	struct net_device *lower_dev;
8495  	struct list_head *iter;
8496  	int err;
8497  
8498  	if (ops->ndo_get_port_parent_id) {
8499  		err = ops->ndo_get_port_parent_id(dev, ppid);
8500  		if (err != -EOPNOTSUPP)
8501  			return err;
8502  	}
8503  
8504  	err = devlink_compat_switch_id_get(dev, ppid);
8505  	if (!err || err != -EOPNOTSUPP)
8506  		return err;
8507  
8508  	if (!recurse)
8509  		return -EOPNOTSUPP;
8510  
8511  	netdev_for_each_lower_dev(dev, lower_dev, iter) {
8512  		err = dev_get_port_parent_id(lower_dev, ppid, recurse);
8513  		if (err)
8514  			break;
8515  		if (!first.id_len)
8516  			first = *ppid;
8517  		else if (memcmp(&first, ppid, sizeof(*ppid)))
8518  			return -ENODATA;
8519  	}
8520  
8521  	return err;
8522  }
8523  EXPORT_SYMBOL(dev_get_port_parent_id);
8524  
8525  /**
8526   *	netdev_port_same_parent_id - Indicate if two network devices have
8527   *	the same port parent identifier
8528   *	@a: first network device
8529   *	@b: second network device
8530   */
8531  bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b)
8532  {
8533  	struct netdev_phys_item_id a_id = { };
8534  	struct netdev_phys_item_id b_id = { };
8535  
8536  	if (dev_get_port_parent_id(a, &a_id, true) ||
8537  	    dev_get_port_parent_id(b, &b_id, true))
8538  		return false;
8539  
8540  	return netdev_phys_item_id_same(&a_id, &b_id);
8541  }
8542  EXPORT_SYMBOL(netdev_port_same_parent_id);
8543  
8544  /**
8545   *	dev_change_proto_down - update protocol port state information
8546   *	@dev: device
8547   *	@proto_down: new value
8548   *
8549   *	This info can be used by switch drivers to set the phys state of the
8550   *	port.
8551   */
8552  int dev_change_proto_down(struct net_device *dev, bool proto_down)
8553  {
8554  	const struct net_device_ops *ops = dev->netdev_ops;
8555  
8556  	if (!ops->ndo_change_proto_down)
8557  		return -EOPNOTSUPP;
8558  	if (!netif_device_present(dev))
8559  		return -ENODEV;
8560  	return ops->ndo_change_proto_down(dev, proto_down);
8561  }
8562  EXPORT_SYMBOL(dev_change_proto_down);
8563  
8564  /**
8565   *	dev_change_proto_down_generic - generic implementation for
8566   * 	ndo_change_proto_down that sets carrier according to
8567   * 	proto_down.
8568   *
8569   *	@dev: device
8570   *	@proto_down: new value
8571   */
8572  int dev_change_proto_down_generic(struct net_device *dev, bool proto_down)
8573  {
8574  	if (proto_down)
8575  		netif_carrier_off(dev);
8576  	else
8577  		netif_carrier_on(dev);
8578  	dev->proto_down = proto_down;
8579  	return 0;
8580  }
8581  EXPORT_SYMBOL(dev_change_proto_down_generic);
8582  
8583  u32 __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op,
8584  		    enum bpf_netdev_command cmd)
8585  {
8586  	struct netdev_bpf xdp;
8587  
8588  	if (!bpf_op)
8589  		return 0;
8590  
8591  	memset(&xdp, 0, sizeof(xdp));
8592  	xdp.command = cmd;
8593  
8594  	/* Query must always succeed. */
8595  	WARN_ON(bpf_op(dev, &xdp) < 0 && cmd == XDP_QUERY_PROG);
8596  
8597  	return xdp.prog_id;
8598  }
8599  
8600  static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op,
8601  			   struct netlink_ext_ack *extack, u32 flags,
8602  			   struct bpf_prog *prog)
8603  {
8604  	bool non_hw = !(flags & XDP_FLAGS_HW_MODE);
8605  	struct bpf_prog *prev_prog = NULL;
8606  	struct netdev_bpf xdp;
8607  	int err;
8608  
8609  	if (non_hw) {
8610  		prev_prog = bpf_prog_by_id(__dev_xdp_query(dev, bpf_op,
8611  							   XDP_QUERY_PROG));
8612  		if (IS_ERR(prev_prog))
8613  			prev_prog = NULL;
8614  	}
8615  
8616  	memset(&xdp, 0, sizeof(xdp));
8617  	if (flags & XDP_FLAGS_HW_MODE)
8618  		xdp.command = XDP_SETUP_PROG_HW;
8619  	else
8620  		xdp.command = XDP_SETUP_PROG;
8621  	xdp.extack = extack;
8622  	xdp.flags = flags;
8623  	xdp.prog = prog;
8624  
8625  	err = bpf_op(dev, &xdp);
8626  	if (!err && non_hw)
8627  		bpf_prog_change_xdp(prev_prog, prog);
8628  
8629  	if (prev_prog)
8630  		bpf_prog_put(prev_prog);
8631  
8632  	return err;
8633  }
8634  
8635  static void dev_xdp_uninstall(struct net_device *dev)
8636  {
8637  	struct netdev_bpf xdp;
8638  	bpf_op_t ndo_bpf;
8639  
8640  	/* Remove generic XDP */
8641  	WARN_ON(dev_xdp_install(dev, generic_xdp_install, NULL, 0, NULL));
8642  
8643  	/* Remove from the driver */
8644  	ndo_bpf = dev->netdev_ops->ndo_bpf;
8645  	if (!ndo_bpf)
8646  		return;
8647  
8648  	memset(&xdp, 0, sizeof(xdp));
8649  	xdp.command = XDP_QUERY_PROG;
8650  	WARN_ON(ndo_bpf(dev, &xdp));
8651  	if (xdp.prog_id)
8652  		WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags,
8653  					NULL));
8654  
8655  	/* Remove HW offload */
8656  	memset(&xdp, 0, sizeof(xdp));
8657  	xdp.command = XDP_QUERY_PROG_HW;
8658  	if (!ndo_bpf(dev, &xdp) && xdp.prog_id)
8659  		WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags,
8660  					NULL));
8661  }
8662  
8663  /**
8664   *	dev_change_xdp_fd - set or clear a bpf program for a device rx path
8665   *	@dev: device
8666   *	@extack: netlink extended ack
8667   *	@fd: new program fd or negative value to clear
8668   *	@flags: xdp-related flags
8669   *
8670   *	Set or clear a bpf program for a device
8671   */
8672  int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
8673  		      int fd, u32 flags)
8674  {
8675  	const struct net_device_ops *ops = dev->netdev_ops;
8676  	enum bpf_netdev_command query;
8677  	struct bpf_prog *prog = NULL;
8678  	bpf_op_t bpf_op, bpf_chk;
8679  	bool offload;
8680  	int err;
8681  
8682  	ASSERT_RTNL();
8683  
8684  	offload = flags & XDP_FLAGS_HW_MODE;
8685  	query = offload ? XDP_QUERY_PROG_HW : XDP_QUERY_PROG;
8686  
8687  	bpf_op = bpf_chk = ops->ndo_bpf;
8688  	if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE))) {
8689  		NL_SET_ERR_MSG(extack, "underlying driver does not support XDP in native mode");
8690  		return -EOPNOTSUPP;
8691  	}
8692  	if (!bpf_op || (flags & XDP_FLAGS_SKB_MODE))
8693  		bpf_op = generic_xdp_install;
8694  	if (bpf_op == bpf_chk)
8695  		bpf_chk = generic_xdp_install;
8696  
8697  	if (fd >= 0) {
8698  		u32 prog_id;
8699  
8700  		if (!offload && __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG)) {
8701  			NL_SET_ERR_MSG(extack, "native and generic XDP can't be active at the same time");
8702  			return -EEXIST;
8703  		}
8704  
8705  		prog_id = __dev_xdp_query(dev, bpf_op, query);
8706  		if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && prog_id) {
8707  			NL_SET_ERR_MSG(extack, "XDP program already attached");
8708  			return -EBUSY;
8709  		}
8710  
8711  		prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
8712  					     bpf_op == ops->ndo_bpf);
8713  		if (IS_ERR(prog))
8714  			return PTR_ERR(prog);
8715  
8716  		if (!offload && bpf_prog_is_dev_bound(prog->aux)) {
8717  			NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported");
8718  			bpf_prog_put(prog);
8719  			return -EINVAL;
8720  		}
8721  
8722  		/* prog->aux->id may be 0 for orphaned device-bound progs */
8723  		if (prog->aux->id && prog->aux->id == prog_id) {
8724  			bpf_prog_put(prog);
8725  			return 0;
8726  		}
8727  	} else {
8728  		if (!__dev_xdp_query(dev, bpf_op, query))
8729  			return 0;
8730  	}
8731  
8732  	err = dev_xdp_install(dev, bpf_op, extack, flags, prog);
8733  	if (err < 0 && prog)
8734  		bpf_prog_put(prog);
8735  
8736  	return err;
8737  }
8738  
8739  /**
8740   *	dev_new_index	-	allocate an ifindex
8741   *	@net: the applicable net namespace
8742   *
8743   *	Returns a suitable unique value for a new device interface
8744   *	number.  The caller must hold the rtnl semaphore or the
8745   *	dev_base_lock to be sure it remains unique.
8746   */
8747  static int dev_new_index(struct net *net)
8748  {
8749  	int ifindex = net->ifindex;
8750  
8751  	for (;;) {
8752  		if (++ifindex <= 0)
8753  			ifindex = 1;
8754  		if (!__dev_get_by_index(net, ifindex))
8755  			return net->ifindex = ifindex;
8756  	}
8757  }
8758  
8759  /* Delayed registration/unregisteration */
8760  static LIST_HEAD(net_todo_list);
8761  DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
8762  
8763  static void net_set_todo(struct net_device *dev)
8764  {
8765  	list_add_tail(&dev->todo_list, &net_todo_list);
8766  	dev_net(dev)->dev_unreg_count++;
8767  }
8768  
8769  static void rollback_registered_many(struct list_head *head)
8770  {
8771  	struct net_device *dev, *tmp;
8772  	LIST_HEAD(close_head);
8773  
8774  	BUG_ON(dev_boot_phase);
8775  	ASSERT_RTNL();
8776  
8777  	list_for_each_entry_safe(dev, tmp, head, unreg_list) {
8778  		/* Some devices call without registering
8779  		 * for initialization unwind. Remove those
8780  		 * devices and proceed with the remaining.
8781  		 */
8782  		if (dev->reg_state == NETREG_UNINITIALIZED) {
8783  			pr_debug("unregister_netdevice: device %s/%p never was registered\n",
8784  				 dev->name, dev);
8785  
8786  			WARN_ON(1);
8787  			list_del(&dev->unreg_list);
8788  			continue;
8789  		}
8790  		dev->dismantle = true;
8791  		BUG_ON(dev->reg_state != NETREG_REGISTERED);
8792  	}
8793  
8794  	/* If device is running, close it first. */
8795  	list_for_each_entry(dev, head, unreg_list)
8796  		list_add_tail(&dev->close_list, &close_head);
8797  	dev_close_many(&close_head, true);
8798  
8799  	list_for_each_entry(dev, head, unreg_list) {
8800  		/* And unlink it from device chain. */
8801  		unlist_netdevice(dev);
8802  
8803  		dev->reg_state = NETREG_UNREGISTERING;
8804  	}
8805  	flush_all_backlogs();
8806  
8807  	synchronize_net();
8808  
8809  	list_for_each_entry(dev, head, unreg_list) {
8810  		struct sk_buff *skb = NULL;
8811  
8812  		/* Shutdown queueing discipline. */
8813  		dev_shutdown(dev);
8814  
8815  		dev_xdp_uninstall(dev);
8816  
8817  		/* Notify protocols, that we are about to destroy
8818  		 * this device. They should clean all the things.
8819  		 */
8820  		call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
8821  
8822  		if (!dev->rtnl_link_ops ||
8823  		    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
8824  			skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
8825  						     GFP_KERNEL, NULL, 0);
8826  
8827  		/*
8828  		 *	Flush the unicast and multicast chains
8829  		 */
8830  		dev_uc_flush(dev);
8831  		dev_mc_flush(dev);
8832  
8833  		netdev_name_node_alt_flush(dev);
8834  		netdev_name_node_free(dev->name_node);
8835  
8836  		if (dev->netdev_ops->ndo_uninit)
8837  			dev->netdev_ops->ndo_uninit(dev);
8838  
8839  		if (skb)
8840  			rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
8841  
8842  		/* Notifier chain MUST detach us all upper devices. */
8843  		WARN_ON(netdev_has_any_upper_dev(dev));
8844  		WARN_ON(netdev_has_any_lower_dev(dev));
8845  
8846  		/* Remove entries from kobject tree */
8847  		netdev_unregister_kobject(dev);
8848  #ifdef CONFIG_XPS
8849  		/* Remove XPS queueing entries */
8850  		netif_reset_xps_queues_gt(dev, 0);
8851  #endif
8852  	}
8853  
8854  	synchronize_net();
8855  
8856  	list_for_each_entry(dev, head, unreg_list)
8857  		dev_put(dev);
8858  }
8859  
8860  static void rollback_registered(struct net_device *dev)
8861  {
8862  	LIST_HEAD(single);
8863  
8864  	list_add(&dev->unreg_list, &single);
8865  	rollback_registered_many(&single);
8866  	list_del(&single);
8867  }
8868  
8869  static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
8870  	struct net_device *upper, netdev_features_t features)
8871  {
8872  	netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
8873  	netdev_features_t feature;
8874  	int feature_bit;
8875  
8876  	for_each_netdev_feature(upper_disables, feature_bit) {
8877  		feature = __NETIF_F_BIT(feature_bit);
8878  		if (!(upper->wanted_features & feature)
8879  		    && (features & feature)) {
8880  			netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
8881  				   &feature, upper->name);
8882  			features &= ~feature;
8883  		}
8884  	}
8885  
8886  	return features;
8887  }
8888  
8889  static void netdev_sync_lower_features(struct net_device *upper,
8890  	struct net_device *lower, netdev_features_t features)
8891  {
8892  	netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
8893  	netdev_features_t feature;
8894  	int feature_bit;
8895  
8896  	for_each_netdev_feature(upper_disables, feature_bit) {
8897  		feature = __NETIF_F_BIT(feature_bit);
8898  		if (!(features & feature) && (lower->features & feature)) {
8899  			netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
8900  				   &feature, lower->name);
8901  			lower->wanted_features &= ~feature;
8902  			netdev_update_features(lower);
8903  
8904  			if (unlikely(lower->features & feature))
8905  				netdev_WARN(upper, "failed to disable %pNF on %s!\n",
8906  					    &feature, lower->name);
8907  		}
8908  	}
8909  }
8910  
8911  static netdev_features_t netdev_fix_features(struct net_device *dev,
8912  	netdev_features_t features)
8913  {
8914  	/* Fix illegal checksum combinations */
8915  	if ((features & NETIF_F_HW_CSUM) &&
8916  	    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
8917  		netdev_warn(dev, "mixed HW and IP checksum settings.\n");
8918  		features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
8919  	}
8920  
8921  	/* TSO requires that SG is present as well. */
8922  	if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
8923  		netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
8924  		features &= ~NETIF_F_ALL_TSO;
8925  	}
8926  
8927  	if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
8928  					!(features & NETIF_F_IP_CSUM)) {
8929  		netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
8930  		features &= ~NETIF_F_TSO;
8931  		features &= ~NETIF_F_TSO_ECN;
8932  	}
8933  
8934  	if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
8935  					 !(features & NETIF_F_IPV6_CSUM)) {
8936  		netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
8937  		features &= ~NETIF_F_TSO6;
8938  	}
8939  
8940  	/* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
8941  	if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
8942  		features &= ~NETIF_F_TSO_MANGLEID;
8943  
8944  	/* TSO ECN requires that TSO is present as well. */
8945  	if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
8946  		features &= ~NETIF_F_TSO_ECN;
8947  
8948  	/* Software GSO depends on SG. */
8949  	if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
8950  		netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
8951  		features &= ~NETIF_F_GSO;
8952  	}
8953  
8954  	/* GSO partial features require GSO partial be set */
8955  	if ((features & dev->gso_partial_features) &&
8956  	    !(features & NETIF_F_GSO_PARTIAL)) {
8957  		netdev_dbg(dev,
8958  			   "Dropping partially supported GSO features since no GSO partial.\n");
8959  		features &= ~dev->gso_partial_features;
8960  	}
8961  
8962  	if (!(features & NETIF_F_RXCSUM)) {
8963  		/* NETIF_F_GRO_HW implies doing RXCSUM since every packet
8964  		 * successfully merged by hardware must also have the
8965  		 * checksum verified by hardware.  If the user does not
8966  		 * want to enable RXCSUM, logically, we should disable GRO_HW.
8967  		 */
8968  		if (features & NETIF_F_GRO_HW) {
8969  			netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
8970  			features &= ~NETIF_F_GRO_HW;
8971  		}
8972  	}
8973  
8974  	/* LRO/HW-GRO features cannot be combined with RX-FCS */
8975  	if (features & NETIF_F_RXFCS) {
8976  		if (features & NETIF_F_LRO) {
8977  			netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n");
8978  			features &= ~NETIF_F_LRO;
8979  		}
8980  
8981  		if (features & NETIF_F_GRO_HW) {
8982  			netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n");
8983  			features &= ~NETIF_F_GRO_HW;
8984  		}
8985  	}
8986  
8987  	return features;
8988  }
8989  
8990  int __netdev_update_features(struct net_device *dev)
8991  {
8992  	struct net_device *upper, *lower;
8993  	netdev_features_t features;
8994  	struct list_head *iter;
8995  	int err = -1;
8996  
8997  	ASSERT_RTNL();
8998  
8999  	features = netdev_get_wanted_features(dev);
9000  
9001  	if (dev->netdev_ops->ndo_fix_features)
9002  		features = dev->netdev_ops->ndo_fix_features(dev, features);
9003  
9004  	/* driver might be less strict about feature dependencies */
9005  	features = netdev_fix_features(dev, features);
9006  
9007  	/* some features can't be enabled if they're off an an upper device */
9008  	netdev_for_each_upper_dev_rcu(dev, upper, iter)
9009  		features = netdev_sync_upper_features(dev, upper, features);
9010  
9011  	if (dev->features == features)
9012  		goto sync_lower;
9013  
9014  	netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
9015  		&dev->features, &features);
9016  
9017  	if (dev->netdev_ops->ndo_set_features)
9018  		err = dev->netdev_ops->ndo_set_features(dev, features);
9019  	else
9020  		err = 0;
9021  
9022  	if (unlikely(err < 0)) {
9023  		netdev_err(dev,
9024  			"set_features() failed (%d); wanted %pNF, left %pNF\n",
9025  			err, &features, &dev->features);
9026  		/* return non-0 since some features might have changed and
9027  		 * it's better to fire a spurious notification than miss it
9028  		 */
9029  		return -1;
9030  	}
9031  
9032  sync_lower:
9033  	/* some features must be disabled on lower devices when disabled
9034  	 * on an upper device (think: bonding master or bridge)
9035  	 */
9036  	netdev_for_each_lower_dev(dev, lower, iter)
9037  		netdev_sync_lower_features(dev, lower, features);
9038  
9039  	if (!err) {
9040  		netdev_features_t diff = features ^ dev->features;
9041  
9042  		if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
9043  			/* udp_tunnel_{get,drop}_rx_info both need
9044  			 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
9045  			 * device, or they won't do anything.
9046  			 * Thus we need to update dev->features
9047  			 * *before* calling udp_tunnel_get_rx_info,
9048  			 * but *after* calling udp_tunnel_drop_rx_info.
9049  			 */
9050  			if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
9051  				dev->features = features;
9052  				udp_tunnel_get_rx_info(dev);
9053  			} else {
9054  				udp_tunnel_drop_rx_info(dev);
9055  			}
9056  		}
9057  
9058  		if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) {
9059  			if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
9060  				dev->features = features;
9061  				err |= vlan_get_rx_ctag_filter_info(dev);
9062  			} else {
9063  				vlan_drop_rx_ctag_filter_info(dev);
9064  			}
9065  		}
9066  
9067  		if (diff & NETIF_F_HW_VLAN_STAG_FILTER) {
9068  			if (features & NETIF_F_HW_VLAN_STAG_FILTER) {
9069  				dev->features = features;
9070  				err |= vlan_get_rx_stag_filter_info(dev);
9071  			} else {
9072  				vlan_drop_rx_stag_filter_info(dev);
9073  			}
9074  		}
9075  
9076  		dev->features = features;
9077  	}
9078  
9079  	return err < 0 ? 0 : 1;
9080  }
9081  
9082  /**
9083   *	netdev_update_features - recalculate device features
9084   *	@dev: the device to check
9085   *
9086   *	Recalculate dev->features set and send notifications if it
9087   *	has changed. Should be called after driver or hardware dependent
9088   *	conditions might have changed that influence the features.
9089   */
9090  void netdev_update_features(struct net_device *dev)
9091  {
9092  	if (__netdev_update_features(dev))
9093  		netdev_features_change(dev);
9094  }
9095  EXPORT_SYMBOL(netdev_update_features);
9096  
9097  /**
9098   *	netdev_change_features - recalculate device features
9099   *	@dev: the device to check
9100   *
9101   *	Recalculate dev->features set and send notifications even
9102   *	if they have not changed. Should be called instead of
9103   *	netdev_update_features() if also dev->vlan_features might
9104   *	have changed to allow the changes to be propagated to stacked
9105   *	VLAN devices.
9106   */
9107  void netdev_change_features(struct net_device *dev)
9108  {
9109  	__netdev_update_features(dev);
9110  	netdev_features_change(dev);
9111  }
9112  EXPORT_SYMBOL(netdev_change_features);
9113  
9114  /**
9115   *	netif_stacked_transfer_operstate -	transfer operstate
9116   *	@rootdev: the root or lower level device to transfer state from
9117   *	@dev: the device to transfer operstate to
9118   *
9119   *	Transfer operational state from root to device. This is normally
9120   *	called when a stacking relationship exists between the root
9121   *	device and the device(a leaf device).
9122   */
9123  void netif_stacked_transfer_operstate(const struct net_device *rootdev,
9124  					struct net_device *dev)
9125  {
9126  	if (rootdev->operstate == IF_OPER_DORMANT)
9127  		netif_dormant_on(dev);
9128  	else
9129  		netif_dormant_off(dev);
9130  
9131  	if (netif_carrier_ok(rootdev))
9132  		netif_carrier_on(dev);
9133  	else
9134  		netif_carrier_off(dev);
9135  }
9136  EXPORT_SYMBOL(netif_stacked_transfer_operstate);
9137  
9138  static int netif_alloc_rx_queues(struct net_device *dev)
9139  {
9140  	unsigned int i, count = dev->num_rx_queues;
9141  	struct netdev_rx_queue *rx;
9142  	size_t sz = count * sizeof(*rx);
9143  	int err = 0;
9144  
9145  	BUG_ON(count < 1);
9146  
9147  	rx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
9148  	if (!rx)
9149  		return -ENOMEM;
9150  
9151  	dev->_rx = rx;
9152  
9153  	for (i = 0; i < count; i++) {
9154  		rx[i].dev = dev;
9155  
9156  		/* XDP RX-queue setup */
9157  		err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i);
9158  		if (err < 0)
9159  			goto err_rxq_info;
9160  	}
9161  	return 0;
9162  
9163  err_rxq_info:
9164  	/* Rollback successful reg's and free other resources */
9165  	while (i--)
9166  		xdp_rxq_info_unreg(&rx[i].xdp_rxq);
9167  	kvfree(dev->_rx);
9168  	dev->_rx = NULL;
9169  	return err;
9170  }
9171  
9172  static void netif_free_rx_queues(struct net_device *dev)
9173  {
9174  	unsigned int i, count = dev->num_rx_queues;
9175  
9176  	/* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
9177  	if (!dev->_rx)
9178  		return;
9179  
9180  	for (i = 0; i < count; i++)
9181  		xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
9182  
9183  	kvfree(dev->_rx);
9184  }
9185  
9186  static void netdev_init_one_queue(struct net_device *dev,
9187  				  struct netdev_queue *queue, void *_unused)
9188  {
9189  	/* Initialize queue lock */
9190  	spin_lock_init(&queue->_xmit_lock);
9191  	lockdep_set_class(&queue->_xmit_lock, &dev->qdisc_xmit_lock_key);
9192  	queue->xmit_lock_owner = -1;
9193  	netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
9194  	queue->dev = dev;
9195  #ifdef CONFIG_BQL
9196  	dql_init(&queue->dql, HZ);
9197  #endif
9198  }
9199  
9200  static void netif_free_tx_queues(struct net_device *dev)
9201  {
9202  	kvfree(dev->_tx);
9203  }
9204  
9205  static int netif_alloc_netdev_queues(struct net_device *dev)
9206  {
9207  	unsigned int count = dev->num_tx_queues;
9208  	struct netdev_queue *tx;
9209  	size_t sz = count * sizeof(*tx);
9210  
9211  	if (count < 1 || count > 0xffff)
9212  		return -EINVAL;
9213  
9214  	tx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
9215  	if (!tx)
9216  		return -ENOMEM;
9217  
9218  	dev->_tx = tx;
9219  
9220  	netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
9221  	spin_lock_init(&dev->tx_global_lock);
9222  
9223  	return 0;
9224  }
9225  
9226  void netif_tx_stop_all_queues(struct net_device *dev)
9227  {
9228  	unsigned int i;
9229  
9230  	for (i = 0; i < dev->num_tx_queues; i++) {
9231  		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
9232  
9233  		netif_tx_stop_queue(txq);
9234  	}
9235  }
9236  EXPORT_SYMBOL(netif_tx_stop_all_queues);
9237  
9238  static void netdev_register_lockdep_key(struct net_device *dev)
9239  {
9240  	lockdep_register_key(&dev->qdisc_tx_busylock_key);
9241  	lockdep_register_key(&dev->qdisc_running_key);
9242  	lockdep_register_key(&dev->qdisc_xmit_lock_key);
9243  	lockdep_register_key(&dev->addr_list_lock_key);
9244  }
9245  
9246  static void netdev_unregister_lockdep_key(struct net_device *dev)
9247  {
9248  	lockdep_unregister_key(&dev->qdisc_tx_busylock_key);
9249  	lockdep_unregister_key(&dev->qdisc_running_key);
9250  	lockdep_unregister_key(&dev->qdisc_xmit_lock_key);
9251  	lockdep_unregister_key(&dev->addr_list_lock_key);
9252  }
9253  
9254  void netdev_update_lockdep_key(struct net_device *dev)
9255  {
9256  	lockdep_unregister_key(&dev->addr_list_lock_key);
9257  	lockdep_register_key(&dev->addr_list_lock_key);
9258  
9259  	lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key);
9260  }
9261  EXPORT_SYMBOL(netdev_update_lockdep_key);
9262  
9263  /**
9264   *	register_netdevice	- register a network device
9265   *	@dev: device to register
9266   *
9267   *	Take a completed network device structure and add it to the kernel
9268   *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
9269   *	chain. 0 is returned on success. A negative errno code is returned
9270   *	on a failure to set up the device, or if the name is a duplicate.
9271   *
9272   *	Callers must hold the rtnl semaphore. You may want
9273   *	register_netdev() instead of this.
9274   *
9275   *	BUGS:
9276   *	The locking appears insufficient to guarantee two parallel registers
9277   *	will not get the same name.
9278   */
9279  
9280  int register_netdevice(struct net_device *dev)
9281  {
9282  	int ret;
9283  	struct net *net = dev_net(dev);
9284  
9285  	BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE <
9286  		     NETDEV_FEATURE_COUNT);
9287  	BUG_ON(dev_boot_phase);
9288  	ASSERT_RTNL();
9289  
9290  	might_sleep();
9291  
9292  	/* When net_device's are persistent, this will be fatal. */
9293  	BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
9294  	BUG_ON(!net);
9295  
9296  	spin_lock_init(&dev->addr_list_lock);
9297  	lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key);
9298  
9299  	ret = dev_get_valid_name(net, dev, dev->name);
9300  	if (ret < 0)
9301  		goto out;
9302  
9303  	ret = -ENOMEM;
9304  	dev->name_node = netdev_name_node_head_alloc(dev);
9305  	if (!dev->name_node)
9306  		goto out;
9307  
9308  	/* Init, if this function is available */
9309  	if (dev->netdev_ops->ndo_init) {
9310  		ret = dev->netdev_ops->ndo_init(dev);
9311  		if (ret) {
9312  			if (ret > 0)
9313  				ret = -EIO;
9314  			goto err_free_name;
9315  		}
9316  	}
9317  
9318  	if (((dev->hw_features | dev->features) &
9319  	     NETIF_F_HW_VLAN_CTAG_FILTER) &&
9320  	    (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
9321  	     !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
9322  		netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
9323  		ret = -EINVAL;
9324  		goto err_uninit;
9325  	}
9326  
9327  	ret = -EBUSY;
9328  	if (!dev->ifindex)
9329  		dev->ifindex = dev_new_index(net);
9330  	else if (__dev_get_by_index(net, dev->ifindex))
9331  		goto err_uninit;
9332  
9333  	/* Transfer changeable features to wanted_features and enable
9334  	 * software offloads (GSO and GRO).
9335  	 */
9336  	dev->hw_features |= (NETIF_F_SOFT_FEATURES | NETIF_F_SOFT_FEATURES_OFF);
9337  	dev->features |= NETIF_F_SOFT_FEATURES;
9338  
9339  	if (dev->netdev_ops->ndo_udp_tunnel_add) {
9340  		dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
9341  		dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
9342  	}
9343  
9344  	dev->wanted_features = dev->features & dev->hw_features;
9345  
9346  	if (!(dev->flags & IFF_LOOPBACK))
9347  		dev->hw_features |= NETIF_F_NOCACHE_COPY;
9348  
9349  	/* If IPv4 TCP segmentation offload is supported we should also
9350  	 * allow the device to enable segmenting the frame with the option
9351  	 * of ignoring a static IP ID value.  This doesn't enable the
9352  	 * feature itself but allows the user to enable it later.
9353  	 */
9354  	if (dev->hw_features & NETIF_F_TSO)
9355  		dev->hw_features |= NETIF_F_TSO_MANGLEID;
9356  	if (dev->vlan_features & NETIF_F_TSO)
9357  		dev->vlan_features |= NETIF_F_TSO_MANGLEID;
9358  	if (dev->mpls_features & NETIF_F_TSO)
9359  		dev->mpls_features |= NETIF_F_TSO_MANGLEID;
9360  	if (dev->hw_enc_features & NETIF_F_TSO)
9361  		dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
9362  
9363  	/* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
9364  	 */
9365  	dev->vlan_features |= NETIF_F_HIGHDMA;
9366  
9367  	/* Make NETIF_F_SG inheritable to tunnel devices.
9368  	 */
9369  	dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
9370  
9371  	/* Make NETIF_F_SG inheritable to MPLS.
9372  	 */
9373  	dev->mpls_features |= NETIF_F_SG;
9374  
9375  	ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
9376  	ret = notifier_to_errno(ret);
9377  	if (ret)
9378  		goto err_uninit;
9379  
9380  	ret = netdev_register_kobject(dev);
9381  	if (ret) {
9382  		dev->reg_state = NETREG_UNREGISTERED;
9383  		goto err_uninit;
9384  	}
9385  	dev->reg_state = NETREG_REGISTERED;
9386  
9387  	__netdev_update_features(dev);
9388  
9389  	/*
9390  	 *	Default initial state at registry is that the
9391  	 *	device is present.
9392  	 */
9393  
9394  	set_bit(__LINK_STATE_PRESENT, &dev->state);
9395  
9396  	linkwatch_init_dev(dev);
9397  
9398  	dev_init_scheduler(dev);
9399  	dev_hold(dev);
9400  	list_netdevice(dev);
9401  	add_device_randomness(dev->dev_addr, dev->addr_len);
9402  
9403  	/* If the device has permanent device address, driver should
9404  	 * set dev_addr and also addr_assign_type should be set to
9405  	 * NET_ADDR_PERM (default value).
9406  	 */
9407  	if (dev->addr_assign_type == NET_ADDR_PERM)
9408  		memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
9409  
9410  	/* Notify protocols, that a new device appeared. */
9411  	ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
9412  	ret = notifier_to_errno(ret);
9413  	if (ret) {
9414  		rollback_registered(dev);
9415  		rcu_barrier();
9416  
9417  		dev->reg_state = NETREG_UNREGISTERED;
9418  	}
9419  	/*
9420  	 *	Prevent userspace races by waiting until the network
9421  	 *	device is fully setup before sending notifications.
9422  	 */
9423  	if (!dev->rtnl_link_ops ||
9424  	    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
9425  		rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
9426  
9427  out:
9428  	return ret;
9429  
9430  err_uninit:
9431  	if (dev->netdev_ops->ndo_uninit)
9432  		dev->netdev_ops->ndo_uninit(dev);
9433  	if (dev->priv_destructor)
9434  		dev->priv_destructor(dev);
9435  err_free_name:
9436  	netdev_name_node_free(dev->name_node);
9437  	goto out;
9438  }
9439  EXPORT_SYMBOL(register_netdevice);
9440  
9441  /**
9442   *	init_dummy_netdev	- init a dummy network device for NAPI
9443   *	@dev: device to init
9444   *
9445   *	This takes a network device structure and initialize the minimum
9446   *	amount of fields so it can be used to schedule NAPI polls without
9447   *	registering a full blown interface. This is to be used by drivers
9448   *	that need to tie several hardware interfaces to a single NAPI
9449   *	poll scheduler due to HW limitations.
9450   */
9451  int init_dummy_netdev(struct net_device *dev)
9452  {
9453  	/* Clear everything. Note we don't initialize spinlocks
9454  	 * are they aren't supposed to be taken by any of the
9455  	 * NAPI code and this dummy netdev is supposed to be
9456  	 * only ever used for NAPI polls
9457  	 */
9458  	memset(dev, 0, sizeof(struct net_device));
9459  
9460  	/* make sure we BUG if trying to hit standard
9461  	 * register/unregister code path
9462  	 */
9463  	dev->reg_state = NETREG_DUMMY;
9464  
9465  	/* NAPI wants this */
9466  	INIT_LIST_HEAD(&dev->napi_list);
9467  
9468  	/* a dummy interface is started by default */
9469  	set_bit(__LINK_STATE_PRESENT, &dev->state);
9470  	set_bit(__LINK_STATE_START, &dev->state);
9471  
9472  	/* napi_busy_loop stats accounting wants this */
9473  	dev_net_set(dev, &init_net);
9474  
9475  	/* Note : We dont allocate pcpu_refcnt for dummy devices,
9476  	 * because users of this 'device' dont need to change
9477  	 * its refcount.
9478  	 */
9479  
9480  	return 0;
9481  }
9482  EXPORT_SYMBOL_GPL(init_dummy_netdev);
9483  
9484  
9485  /**
9486   *	register_netdev	- register a network device
9487   *	@dev: device to register
9488   *
9489   *	Take a completed network device structure and add it to the kernel
9490   *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
9491   *	chain. 0 is returned on success. A negative errno code is returned
9492   *	on a failure to set up the device, or if the name is a duplicate.
9493   *
9494   *	This is a wrapper around register_netdevice that takes the rtnl semaphore
9495   *	and expands the device name if you passed a format string to
9496   *	alloc_netdev.
9497   */
9498  int register_netdev(struct net_device *dev)
9499  {
9500  	int err;
9501  
9502  	if (rtnl_lock_killable())
9503  		return -EINTR;
9504  	err = register_netdevice(dev);
9505  	rtnl_unlock();
9506  	return err;
9507  }
9508  EXPORT_SYMBOL(register_netdev);
9509  
9510  int netdev_refcnt_read(const struct net_device *dev)
9511  {
9512  	int i, refcnt = 0;
9513  
9514  	for_each_possible_cpu(i)
9515  		refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
9516  	return refcnt;
9517  }
9518  EXPORT_SYMBOL(netdev_refcnt_read);
9519  
9520  /**
9521   * netdev_wait_allrefs - wait until all references are gone.
9522   * @dev: target net_device
9523   *
9524   * This is called when unregistering network devices.
9525   *
9526   * Any protocol or device that holds a reference should register
9527   * for netdevice notification, and cleanup and put back the
9528   * reference if they receive an UNREGISTER event.
9529   * We can get stuck here if buggy protocols don't correctly
9530   * call dev_put.
9531   */
9532  static void netdev_wait_allrefs(struct net_device *dev)
9533  {
9534  	unsigned long rebroadcast_time, warning_time;
9535  	int refcnt;
9536  
9537  	linkwatch_forget_dev(dev);
9538  
9539  	rebroadcast_time = warning_time = jiffies;
9540  	refcnt = netdev_refcnt_read(dev);
9541  
9542  	while (refcnt != 0) {
9543  		if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
9544  			rtnl_lock();
9545  
9546  			/* Rebroadcast unregister notification */
9547  			call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
9548  
9549  			__rtnl_unlock();
9550  			rcu_barrier();
9551  			rtnl_lock();
9552  
9553  			if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
9554  				     &dev->state)) {
9555  				/* We must not have linkwatch events
9556  				 * pending on unregister. If this
9557  				 * happens, we simply run the queue
9558  				 * unscheduled, resulting in a noop
9559  				 * for this device.
9560  				 */
9561  				linkwatch_run_queue();
9562  			}
9563  
9564  			__rtnl_unlock();
9565  
9566  			rebroadcast_time = jiffies;
9567  		}
9568  
9569  		msleep(250);
9570  
9571  		refcnt = netdev_refcnt_read(dev);
9572  
9573  		if (refcnt && time_after(jiffies, warning_time + 10 * HZ)) {
9574  			pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
9575  				 dev->name, refcnt);
9576  			warning_time = jiffies;
9577  		}
9578  	}
9579  }
9580  
9581  /* The sequence is:
9582   *
9583   *	rtnl_lock();
9584   *	...
9585   *	register_netdevice(x1);
9586   *	register_netdevice(x2);
9587   *	...
9588   *	unregister_netdevice(y1);
9589   *	unregister_netdevice(y2);
9590   *      ...
9591   *	rtnl_unlock();
9592   *	free_netdev(y1);
9593   *	free_netdev(y2);
9594   *
9595   * We are invoked by rtnl_unlock().
9596   * This allows us to deal with problems:
9597   * 1) We can delete sysfs objects which invoke hotplug
9598   *    without deadlocking with linkwatch via keventd.
9599   * 2) Since we run with the RTNL semaphore not held, we can sleep
9600   *    safely in order to wait for the netdev refcnt to drop to zero.
9601   *
9602   * We must not return until all unregister events added during
9603   * the interval the lock was held have been completed.
9604   */
9605  void netdev_run_todo(void)
9606  {
9607  	struct list_head list;
9608  
9609  	/* Snapshot list, allow later requests */
9610  	list_replace_init(&net_todo_list, &list);
9611  
9612  	__rtnl_unlock();
9613  
9614  
9615  	/* Wait for rcu callbacks to finish before next phase */
9616  	if (!list_empty(&list))
9617  		rcu_barrier();
9618  
9619  	while (!list_empty(&list)) {
9620  		struct net_device *dev
9621  			= list_first_entry(&list, struct net_device, todo_list);
9622  		list_del(&dev->todo_list);
9623  
9624  		if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
9625  			pr_err("network todo '%s' but state %d\n",
9626  			       dev->name, dev->reg_state);
9627  			dump_stack();
9628  			continue;
9629  		}
9630  
9631  		dev->reg_state = NETREG_UNREGISTERED;
9632  
9633  		netdev_wait_allrefs(dev);
9634  
9635  		/* paranoia */
9636  		BUG_ON(netdev_refcnt_read(dev));
9637  		BUG_ON(!list_empty(&dev->ptype_all));
9638  		BUG_ON(!list_empty(&dev->ptype_specific));
9639  		WARN_ON(rcu_access_pointer(dev->ip_ptr));
9640  		WARN_ON(rcu_access_pointer(dev->ip6_ptr));
9641  #if IS_ENABLED(CONFIG_DECNET)
9642  		WARN_ON(dev->dn_ptr);
9643  #endif
9644  		if (dev->priv_destructor)
9645  			dev->priv_destructor(dev);
9646  		if (dev->needs_free_netdev)
9647  			free_netdev(dev);
9648  
9649  		/* Report a network device has been unregistered */
9650  		rtnl_lock();
9651  		dev_net(dev)->dev_unreg_count--;
9652  		__rtnl_unlock();
9653  		wake_up(&netdev_unregistering_wq);
9654  
9655  		/* Free network device */
9656  		kobject_put(&dev->dev.kobj);
9657  	}
9658  }
9659  
9660  /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
9661   * all the same fields in the same order as net_device_stats, with only
9662   * the type differing, but rtnl_link_stats64 may have additional fields
9663   * at the end for newer counters.
9664   */
9665  void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
9666  			     const struct net_device_stats *netdev_stats)
9667  {
9668  #if BITS_PER_LONG == 64
9669  	BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
9670  	memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
9671  	/* zero out counters that only exist in rtnl_link_stats64 */
9672  	memset((char *)stats64 + sizeof(*netdev_stats), 0,
9673  	       sizeof(*stats64) - sizeof(*netdev_stats));
9674  #else
9675  	size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
9676  	const unsigned long *src = (const unsigned long *)netdev_stats;
9677  	u64 *dst = (u64 *)stats64;
9678  
9679  	BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
9680  	for (i = 0; i < n; i++)
9681  		dst[i] = src[i];
9682  	/* zero out counters that only exist in rtnl_link_stats64 */
9683  	memset((char *)stats64 + n * sizeof(u64), 0,
9684  	       sizeof(*stats64) - n * sizeof(u64));
9685  #endif
9686  }
9687  EXPORT_SYMBOL(netdev_stats_to_stats64);
9688  
9689  /**
9690   *	dev_get_stats	- get network device statistics
9691   *	@dev: device to get statistics from
9692   *	@storage: place to store stats
9693   *
9694   *	Get network statistics from device. Return @storage.
9695   *	The device driver may provide its own method by setting
9696   *	dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
9697   *	otherwise the internal statistics structure is used.
9698   */
9699  struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
9700  					struct rtnl_link_stats64 *storage)
9701  {
9702  	const struct net_device_ops *ops = dev->netdev_ops;
9703  
9704  	if (ops->ndo_get_stats64) {
9705  		memset(storage, 0, sizeof(*storage));
9706  		ops->ndo_get_stats64(dev, storage);
9707  	} else if (ops->ndo_get_stats) {
9708  		netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
9709  	} else {
9710  		netdev_stats_to_stats64(storage, &dev->stats);
9711  	}
9712  	storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
9713  	storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
9714  	storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
9715  	return storage;
9716  }
9717  EXPORT_SYMBOL(dev_get_stats);
9718  
9719  struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
9720  {
9721  	struct netdev_queue *queue = dev_ingress_queue(dev);
9722  
9723  #ifdef CONFIG_NET_CLS_ACT
9724  	if (queue)
9725  		return queue;
9726  	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
9727  	if (!queue)
9728  		return NULL;
9729  	netdev_init_one_queue(dev, queue, NULL);
9730  	RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
9731  	queue->qdisc_sleeping = &noop_qdisc;
9732  	rcu_assign_pointer(dev->ingress_queue, queue);
9733  #endif
9734  	return queue;
9735  }
9736  
9737  static const struct ethtool_ops default_ethtool_ops;
9738  
9739  void netdev_set_default_ethtool_ops(struct net_device *dev,
9740  				    const struct ethtool_ops *ops)
9741  {
9742  	if (dev->ethtool_ops == &default_ethtool_ops)
9743  		dev->ethtool_ops = ops;
9744  }
9745  EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
9746  
9747  void netdev_freemem(struct net_device *dev)
9748  {
9749  	char *addr = (char *)dev - dev->padded;
9750  
9751  	kvfree(addr);
9752  }
9753  
9754  /**
9755   * alloc_netdev_mqs - allocate network device
9756   * @sizeof_priv: size of private data to allocate space for
9757   * @name: device name format string
9758   * @name_assign_type: origin of device name
9759   * @setup: callback to initialize device
9760   * @txqs: the number of TX subqueues to allocate
9761   * @rxqs: the number of RX subqueues to allocate
9762   *
9763   * Allocates a struct net_device with private data area for driver use
9764   * and performs basic initialization.  Also allocates subqueue structs
9765   * for each queue on the device.
9766   */
9767  struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
9768  		unsigned char name_assign_type,
9769  		void (*setup)(struct net_device *),
9770  		unsigned int txqs, unsigned int rxqs)
9771  {
9772  	struct net_device *dev;
9773  	unsigned int alloc_size;
9774  	struct net_device *p;
9775  
9776  	BUG_ON(strlen(name) >= sizeof(dev->name));
9777  
9778  	if (txqs < 1) {
9779  		pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
9780  		return NULL;
9781  	}
9782  
9783  	if (rxqs < 1) {
9784  		pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
9785  		return NULL;
9786  	}
9787  
9788  	alloc_size = sizeof(struct net_device);
9789  	if (sizeof_priv) {
9790  		/* ensure 32-byte alignment of private area */
9791  		alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
9792  		alloc_size += sizeof_priv;
9793  	}
9794  	/* ensure 32-byte alignment of whole construct */
9795  	alloc_size += NETDEV_ALIGN - 1;
9796  
9797  	p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
9798  	if (!p)
9799  		return NULL;
9800  
9801  	dev = PTR_ALIGN(p, NETDEV_ALIGN);
9802  	dev->padded = (char *)dev - (char *)p;
9803  
9804  	dev->pcpu_refcnt = alloc_percpu(int);
9805  	if (!dev->pcpu_refcnt)
9806  		goto free_dev;
9807  
9808  	if (dev_addr_init(dev))
9809  		goto free_pcpu;
9810  
9811  	dev_mc_init(dev);
9812  	dev_uc_init(dev);
9813  
9814  	dev_net_set(dev, &init_net);
9815  
9816  	netdev_register_lockdep_key(dev);
9817  
9818  	dev->gso_max_size = GSO_MAX_SIZE;
9819  	dev->gso_max_segs = GSO_MAX_SEGS;
9820  	dev->upper_level = 1;
9821  	dev->lower_level = 1;
9822  
9823  	INIT_LIST_HEAD(&dev->napi_list);
9824  	INIT_LIST_HEAD(&dev->unreg_list);
9825  	INIT_LIST_HEAD(&dev->close_list);
9826  	INIT_LIST_HEAD(&dev->link_watch_list);
9827  	INIT_LIST_HEAD(&dev->adj_list.upper);
9828  	INIT_LIST_HEAD(&dev->adj_list.lower);
9829  	INIT_LIST_HEAD(&dev->ptype_all);
9830  	INIT_LIST_HEAD(&dev->ptype_specific);
9831  	INIT_LIST_HEAD(&dev->net_notifier_list);
9832  #ifdef CONFIG_NET_SCHED
9833  	hash_init(dev->qdisc_hash);
9834  #endif
9835  	dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
9836  	setup(dev);
9837  
9838  	if (!dev->tx_queue_len) {
9839  		dev->priv_flags |= IFF_NO_QUEUE;
9840  		dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
9841  	}
9842  
9843  	dev->num_tx_queues = txqs;
9844  	dev->real_num_tx_queues = txqs;
9845  	if (netif_alloc_netdev_queues(dev))
9846  		goto free_all;
9847  
9848  	dev->num_rx_queues = rxqs;
9849  	dev->real_num_rx_queues = rxqs;
9850  	if (netif_alloc_rx_queues(dev))
9851  		goto free_all;
9852  
9853  	strcpy(dev->name, name);
9854  	dev->name_assign_type = name_assign_type;
9855  	dev->group = INIT_NETDEV_GROUP;
9856  	if (!dev->ethtool_ops)
9857  		dev->ethtool_ops = &default_ethtool_ops;
9858  
9859  	nf_hook_ingress_init(dev);
9860  
9861  	return dev;
9862  
9863  free_all:
9864  	free_netdev(dev);
9865  	return NULL;
9866  
9867  free_pcpu:
9868  	free_percpu(dev->pcpu_refcnt);
9869  free_dev:
9870  	netdev_freemem(dev);
9871  	return NULL;
9872  }
9873  EXPORT_SYMBOL(alloc_netdev_mqs);
9874  
9875  /**
9876   * free_netdev - free network device
9877   * @dev: device
9878   *
9879   * This function does the last stage of destroying an allocated device
9880   * interface. The reference to the device object is released. If this
9881   * is the last reference then it will be freed.Must be called in process
9882   * context.
9883   */
9884  void free_netdev(struct net_device *dev)
9885  {
9886  	struct napi_struct *p, *n;
9887  
9888  	might_sleep();
9889  	netif_free_tx_queues(dev);
9890  	netif_free_rx_queues(dev);
9891  
9892  	kfree(rcu_dereference_protected(dev->ingress_queue, 1));
9893  
9894  	/* Flush device addresses */
9895  	dev_addr_flush(dev);
9896  
9897  	list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
9898  		netif_napi_del(p);
9899  
9900  	free_percpu(dev->pcpu_refcnt);
9901  	dev->pcpu_refcnt = NULL;
9902  	free_percpu(dev->xdp_bulkq);
9903  	dev->xdp_bulkq = NULL;
9904  
9905  	netdev_unregister_lockdep_key(dev);
9906  
9907  	/*  Compatibility with error handling in drivers */
9908  	if (dev->reg_state == NETREG_UNINITIALIZED) {
9909  		netdev_freemem(dev);
9910  		return;
9911  	}
9912  
9913  	BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
9914  	dev->reg_state = NETREG_RELEASED;
9915  
9916  	/* will free via device release */
9917  	put_device(&dev->dev);
9918  }
9919  EXPORT_SYMBOL(free_netdev);
9920  
9921  /**
9922   *	synchronize_net -  Synchronize with packet receive processing
9923   *
9924   *	Wait for packets currently being received to be done.
9925   *	Does not block later packets from starting.
9926   */
9927  void synchronize_net(void)
9928  {
9929  	might_sleep();
9930  	if (rtnl_is_locked())
9931  		synchronize_rcu_expedited();
9932  	else
9933  		synchronize_rcu();
9934  }
9935  EXPORT_SYMBOL(synchronize_net);
9936  
9937  /**
9938   *	unregister_netdevice_queue - remove device from the kernel
9939   *	@dev: device
9940   *	@head: list
9941   *
9942   *	This function shuts down a device interface and removes it
9943   *	from the kernel tables.
9944   *	If head not NULL, device is queued to be unregistered later.
9945   *
9946   *	Callers must hold the rtnl semaphore.  You may want
9947   *	unregister_netdev() instead of this.
9948   */
9949  
9950  void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
9951  {
9952  	ASSERT_RTNL();
9953  
9954  	if (head) {
9955  		list_move_tail(&dev->unreg_list, head);
9956  	} else {
9957  		rollback_registered(dev);
9958  		/* Finish processing unregister after unlock */
9959  		net_set_todo(dev);
9960  	}
9961  }
9962  EXPORT_SYMBOL(unregister_netdevice_queue);
9963  
9964  /**
9965   *	unregister_netdevice_many - unregister many devices
9966   *	@head: list of devices
9967   *
9968   *  Note: As most callers use a stack allocated list_head,
9969   *  we force a list_del() to make sure stack wont be corrupted later.
9970   */
9971  void unregister_netdevice_many(struct list_head *head)
9972  {
9973  	struct net_device *dev;
9974  
9975  	if (!list_empty(head)) {
9976  		rollback_registered_many(head);
9977  		list_for_each_entry(dev, head, unreg_list)
9978  			net_set_todo(dev);
9979  		list_del(head);
9980  	}
9981  }
9982  EXPORT_SYMBOL(unregister_netdevice_many);
9983  
9984  /**
9985   *	unregister_netdev - remove device from the kernel
9986   *	@dev: device
9987   *
9988   *	This function shuts down a device interface and removes it
9989   *	from the kernel tables.
9990   *
9991   *	This is just a wrapper for unregister_netdevice that takes
9992   *	the rtnl semaphore.  In general you want to use this and not
9993   *	unregister_netdevice.
9994   */
9995  void unregister_netdev(struct net_device *dev)
9996  {
9997  	rtnl_lock();
9998  	unregister_netdevice(dev);
9999  	rtnl_unlock();
10000  }
10001  EXPORT_SYMBOL(unregister_netdev);
10002  
10003  /**
10004   *	dev_change_net_namespace - move device to different nethost namespace
10005   *	@dev: device
10006   *	@net: network namespace
10007   *	@pat: If not NULL name pattern to try if the current device name
10008   *	      is already taken in the destination network namespace.
10009   *
10010   *	This function shuts down a device interface and moves it
10011   *	to a new network namespace. On success 0 is returned, on
10012   *	a failure a netagive errno code is returned.
10013   *
10014   *	Callers must hold the rtnl semaphore.
10015   */
10016  
10017  int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
10018  {
10019  	int err, new_nsid, new_ifindex;
10020  
10021  	ASSERT_RTNL();
10022  
10023  	/* Don't allow namespace local devices to be moved. */
10024  	err = -EINVAL;
10025  	if (dev->features & NETIF_F_NETNS_LOCAL)
10026  		goto out;
10027  
10028  	/* Ensure the device has been registrered */
10029  	if (dev->reg_state != NETREG_REGISTERED)
10030  		goto out;
10031  
10032  	/* Get out if there is nothing todo */
10033  	err = 0;
10034  	if (net_eq(dev_net(dev), net))
10035  		goto out;
10036  
10037  	/* Pick the destination device name, and ensure
10038  	 * we can use it in the destination network namespace.
10039  	 */
10040  	err = -EEXIST;
10041  	if (__dev_get_by_name(net, dev->name)) {
10042  		/* We get here if we can't use the current device name */
10043  		if (!pat)
10044  			goto out;
10045  		err = dev_get_valid_name(net, dev, pat);
10046  		if (err < 0)
10047  			goto out;
10048  	}
10049  
10050  	/*
10051  	 * And now a mini version of register_netdevice unregister_netdevice.
10052  	 */
10053  
10054  	/* If device is running close it first. */
10055  	dev_close(dev);
10056  
10057  	/* And unlink it from device chain */
10058  	unlist_netdevice(dev);
10059  
10060  	synchronize_net();
10061  
10062  	/* Shutdown queueing discipline. */
10063  	dev_shutdown(dev);
10064  
10065  	/* Notify protocols, that we are about to destroy
10066  	 * this device. They should clean all the things.
10067  	 *
10068  	 * Note that dev->reg_state stays at NETREG_REGISTERED.
10069  	 * This is wanted because this way 8021q and macvlan know
10070  	 * the device is just moving and can keep their slaves up.
10071  	 */
10072  	call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
10073  	rcu_barrier();
10074  
10075  	new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL);
10076  	/* If there is an ifindex conflict assign a new one */
10077  	if (__dev_get_by_index(net, dev->ifindex))
10078  		new_ifindex = dev_new_index(net);
10079  	else
10080  		new_ifindex = dev->ifindex;
10081  
10082  	rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid,
10083  			    new_ifindex);
10084  
10085  	/*
10086  	 *	Flush the unicast and multicast chains
10087  	 */
10088  	dev_uc_flush(dev);
10089  	dev_mc_flush(dev);
10090  
10091  	/* Send a netdev-removed uevent to the old namespace */
10092  	kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
10093  	netdev_adjacent_del_links(dev);
10094  
10095  	/* Move per-net netdevice notifiers that are following the netdevice */
10096  	move_netdevice_notifiers_dev_net(dev, net);
10097  
10098  	/* Actually switch the network namespace */
10099  	dev_net_set(dev, net);
10100  	dev->ifindex = new_ifindex;
10101  
10102  	/* Send a netdev-add uevent to the new namespace */
10103  	kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
10104  	netdev_adjacent_add_links(dev);
10105  
10106  	/* Fixup kobjects */
10107  	err = device_rename(&dev->dev, dev->name);
10108  	WARN_ON(err);
10109  
10110  	/* Add the device back in the hashes */
10111  	list_netdevice(dev);
10112  
10113  	/* Notify protocols, that a new device appeared. */
10114  	call_netdevice_notifiers(NETDEV_REGISTER, dev);
10115  
10116  	/*
10117  	 *	Prevent userspace races by waiting until the network
10118  	 *	device is fully setup before sending notifications.
10119  	 */
10120  	rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
10121  
10122  	synchronize_net();
10123  	err = 0;
10124  out:
10125  	return err;
10126  }
10127  EXPORT_SYMBOL_GPL(dev_change_net_namespace);
10128  
10129  static int dev_cpu_dead(unsigned int oldcpu)
10130  {
10131  	struct sk_buff **list_skb;
10132  	struct sk_buff *skb;
10133  	unsigned int cpu;
10134  	struct softnet_data *sd, *oldsd, *remsd = NULL;
10135  
10136  	local_irq_disable();
10137  	cpu = smp_processor_id();
10138  	sd = &per_cpu(softnet_data, cpu);
10139  	oldsd = &per_cpu(softnet_data, oldcpu);
10140  
10141  	/* Find end of our completion_queue. */
10142  	list_skb = &sd->completion_queue;
10143  	while (*list_skb)
10144  		list_skb = &(*list_skb)->next;
10145  	/* Append completion queue from offline CPU. */
10146  	*list_skb = oldsd->completion_queue;
10147  	oldsd->completion_queue = NULL;
10148  
10149  	/* Append output queue from offline CPU. */
10150  	if (oldsd->output_queue) {
10151  		*sd->output_queue_tailp = oldsd->output_queue;
10152  		sd->output_queue_tailp = oldsd->output_queue_tailp;
10153  		oldsd->output_queue = NULL;
10154  		oldsd->output_queue_tailp = &oldsd->output_queue;
10155  	}
10156  	/* Append NAPI poll list from offline CPU, with one exception :
10157  	 * process_backlog() must be called by cpu owning percpu backlog.
10158  	 * We properly handle process_queue & input_pkt_queue later.
10159  	 */
10160  	while (!list_empty(&oldsd->poll_list)) {
10161  		struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
10162  							    struct napi_struct,
10163  							    poll_list);
10164  
10165  		list_del_init(&napi->poll_list);
10166  		if (napi->poll == process_backlog)
10167  			napi->state = 0;
10168  		else
10169  			____napi_schedule(sd, napi);
10170  	}
10171  
10172  	raise_softirq_irqoff(NET_TX_SOFTIRQ);
10173  	local_irq_enable();
10174  
10175  #ifdef CONFIG_RPS
10176  	remsd = oldsd->rps_ipi_list;
10177  	oldsd->rps_ipi_list = NULL;
10178  #endif
10179  	/* send out pending IPI's on offline CPU */
10180  	net_rps_send_ipi(remsd);
10181  
10182  	/* Process offline CPU's input_pkt_queue */
10183  	while ((skb = __skb_dequeue(&oldsd->process_queue))) {
10184  		netif_rx_ni(skb);
10185  		input_queue_head_incr(oldsd);
10186  	}
10187  	while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
10188  		netif_rx_ni(skb);
10189  		input_queue_head_incr(oldsd);
10190  	}
10191  
10192  	return 0;
10193  }
10194  
10195  /**
10196   *	netdev_increment_features - increment feature set by one
10197   *	@all: current feature set
10198   *	@one: new feature set
10199   *	@mask: mask feature set
10200   *
10201   *	Computes a new feature set after adding a device with feature set
10202   *	@one to the master device with current feature set @all.  Will not
10203   *	enable anything that is off in @mask. Returns the new feature set.
10204   */
10205  netdev_features_t netdev_increment_features(netdev_features_t all,
10206  	netdev_features_t one, netdev_features_t mask)
10207  {
10208  	if (mask & NETIF_F_HW_CSUM)
10209  		mask |= NETIF_F_CSUM_MASK;
10210  	mask |= NETIF_F_VLAN_CHALLENGED;
10211  
10212  	all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
10213  	all &= one | ~NETIF_F_ALL_FOR_ALL;
10214  
10215  	/* If one device supports hw checksumming, set for all. */
10216  	if (all & NETIF_F_HW_CSUM)
10217  		all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
10218  
10219  	return all;
10220  }
10221  EXPORT_SYMBOL(netdev_increment_features);
10222  
10223  static struct hlist_head * __net_init netdev_create_hash(void)
10224  {
10225  	int i;
10226  	struct hlist_head *hash;
10227  
10228  	hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL);
10229  	if (hash != NULL)
10230  		for (i = 0; i < NETDEV_HASHENTRIES; i++)
10231  			INIT_HLIST_HEAD(&hash[i]);
10232  
10233  	return hash;
10234  }
10235  
10236  /* Initialize per network namespace state */
10237  static int __net_init netdev_init(struct net *net)
10238  {
10239  	BUILD_BUG_ON(GRO_HASH_BUCKETS >
10240  		     8 * sizeof_field(struct napi_struct, gro_bitmask));
10241  
10242  	if (net != &init_net)
10243  		INIT_LIST_HEAD(&net->dev_base_head);
10244  
10245  	net->dev_name_head = netdev_create_hash();
10246  	if (net->dev_name_head == NULL)
10247  		goto err_name;
10248  
10249  	net->dev_index_head = netdev_create_hash();
10250  	if (net->dev_index_head == NULL)
10251  		goto err_idx;
10252  
10253  	RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain);
10254  
10255  	return 0;
10256  
10257  err_idx:
10258  	kfree(net->dev_name_head);
10259  err_name:
10260  	return -ENOMEM;
10261  }
10262  
10263  /**
10264   *	netdev_drivername - network driver for the device
10265   *	@dev: network device
10266   *
10267   *	Determine network driver for device.
10268   */
10269  const char *netdev_drivername(const struct net_device *dev)
10270  {
10271  	const struct device_driver *driver;
10272  	const struct device *parent;
10273  	const char *empty = "";
10274  
10275  	parent = dev->dev.parent;
10276  	if (!parent)
10277  		return empty;
10278  
10279  	driver = parent->driver;
10280  	if (driver && driver->name)
10281  		return driver->name;
10282  	return empty;
10283  }
10284  
10285  static void __netdev_printk(const char *level, const struct net_device *dev,
10286  			    struct va_format *vaf)
10287  {
10288  	if (dev && dev->dev.parent) {
10289  		dev_printk_emit(level[1] - '0',
10290  				dev->dev.parent,
10291  				"%s %s %s%s: %pV",
10292  				dev_driver_string(dev->dev.parent),
10293  				dev_name(dev->dev.parent),
10294  				netdev_name(dev), netdev_reg_state(dev),
10295  				vaf);
10296  	} else if (dev) {
10297  		printk("%s%s%s: %pV",
10298  		       level, netdev_name(dev), netdev_reg_state(dev), vaf);
10299  	} else {
10300  		printk("%s(NULL net_device): %pV", level, vaf);
10301  	}
10302  }
10303  
10304  void netdev_printk(const char *level, const struct net_device *dev,
10305  		   const char *format, ...)
10306  {
10307  	struct va_format vaf;
10308  	va_list args;
10309  
10310  	va_start(args, format);
10311  
10312  	vaf.fmt = format;
10313  	vaf.va = &args;
10314  
10315  	__netdev_printk(level, dev, &vaf);
10316  
10317  	va_end(args);
10318  }
10319  EXPORT_SYMBOL(netdev_printk);
10320  
10321  #define define_netdev_printk_level(func, level)			\
10322  void func(const struct net_device *dev, const char *fmt, ...)	\
10323  {								\
10324  	struct va_format vaf;					\
10325  	va_list args;						\
10326  								\
10327  	va_start(args, fmt);					\
10328  								\
10329  	vaf.fmt = fmt;						\
10330  	vaf.va = &args;						\
10331  								\
10332  	__netdev_printk(level, dev, &vaf);			\
10333  								\
10334  	va_end(args);						\
10335  }								\
10336  EXPORT_SYMBOL(func);
10337  
10338  define_netdev_printk_level(netdev_emerg, KERN_EMERG);
10339  define_netdev_printk_level(netdev_alert, KERN_ALERT);
10340  define_netdev_printk_level(netdev_crit, KERN_CRIT);
10341  define_netdev_printk_level(netdev_err, KERN_ERR);
10342  define_netdev_printk_level(netdev_warn, KERN_WARNING);
10343  define_netdev_printk_level(netdev_notice, KERN_NOTICE);
10344  define_netdev_printk_level(netdev_info, KERN_INFO);
10345  
10346  static void __net_exit netdev_exit(struct net *net)
10347  {
10348  	kfree(net->dev_name_head);
10349  	kfree(net->dev_index_head);
10350  	if (net != &init_net)
10351  		WARN_ON_ONCE(!list_empty(&net->dev_base_head));
10352  }
10353  
10354  static struct pernet_operations __net_initdata netdev_net_ops = {
10355  	.init = netdev_init,
10356  	.exit = netdev_exit,
10357  };
10358  
10359  static void __net_exit default_device_exit(struct net *net)
10360  {
10361  	struct net_device *dev, *aux;
10362  	/*
10363  	 * Push all migratable network devices back to the
10364  	 * initial network namespace
10365  	 */
10366  	rtnl_lock();
10367  	for_each_netdev_safe(net, dev, aux) {
10368  		int err;
10369  		char fb_name[IFNAMSIZ];
10370  
10371  		/* Ignore unmoveable devices (i.e. loopback) */
10372  		if (dev->features & NETIF_F_NETNS_LOCAL)
10373  			continue;
10374  
10375  		/* Leave virtual devices for the generic cleanup */
10376  		if (dev->rtnl_link_ops)
10377  			continue;
10378  
10379  		/* Push remaining network devices to init_net */
10380  		snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
10381  		if (__dev_get_by_name(&init_net, fb_name))
10382  			snprintf(fb_name, IFNAMSIZ, "dev%%d");
10383  		err = dev_change_net_namespace(dev, &init_net, fb_name);
10384  		if (err) {
10385  			pr_emerg("%s: failed to move %s to init_net: %d\n",
10386  				 __func__, dev->name, err);
10387  			BUG();
10388  		}
10389  	}
10390  	rtnl_unlock();
10391  }
10392  
10393  static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
10394  {
10395  	/* Return with the rtnl_lock held when there are no network
10396  	 * devices unregistering in any network namespace in net_list.
10397  	 */
10398  	struct net *net;
10399  	bool unregistering;
10400  	DEFINE_WAIT_FUNC(wait, woken_wake_function);
10401  
10402  	add_wait_queue(&netdev_unregistering_wq, &wait);
10403  	for (;;) {
10404  		unregistering = false;
10405  		rtnl_lock();
10406  		list_for_each_entry(net, net_list, exit_list) {
10407  			if (net->dev_unreg_count > 0) {
10408  				unregistering = true;
10409  				break;
10410  			}
10411  		}
10412  		if (!unregistering)
10413  			break;
10414  		__rtnl_unlock();
10415  
10416  		wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
10417  	}
10418  	remove_wait_queue(&netdev_unregistering_wq, &wait);
10419  }
10420  
10421  static void __net_exit default_device_exit_batch(struct list_head *net_list)
10422  {
10423  	/* At exit all network devices most be removed from a network
10424  	 * namespace.  Do this in the reverse order of registration.
10425  	 * Do this across as many network namespaces as possible to
10426  	 * improve batching efficiency.
10427  	 */
10428  	struct net_device *dev;
10429  	struct net *net;
10430  	LIST_HEAD(dev_kill_list);
10431  
10432  	/* To prevent network device cleanup code from dereferencing
10433  	 * loopback devices or network devices that have been freed
10434  	 * wait here for all pending unregistrations to complete,
10435  	 * before unregistring the loopback device and allowing the
10436  	 * network namespace be freed.
10437  	 *
10438  	 * The netdev todo list containing all network devices
10439  	 * unregistrations that happen in default_device_exit_batch
10440  	 * will run in the rtnl_unlock() at the end of
10441  	 * default_device_exit_batch.
10442  	 */
10443  	rtnl_lock_unregistering(net_list);
10444  	list_for_each_entry(net, net_list, exit_list) {
10445  		for_each_netdev_reverse(net, dev) {
10446  			if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
10447  				dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
10448  			else
10449  				unregister_netdevice_queue(dev, &dev_kill_list);
10450  		}
10451  	}
10452  	unregister_netdevice_many(&dev_kill_list);
10453  	rtnl_unlock();
10454  }
10455  
10456  static struct pernet_operations __net_initdata default_device_ops = {
10457  	.exit = default_device_exit,
10458  	.exit_batch = default_device_exit_batch,
10459  };
10460  
10461  /*
10462   *	Initialize the DEV module. At boot time this walks the device list and
10463   *	unhooks any devices that fail to initialise (normally hardware not
10464   *	present) and leaves us with a valid list of present and active devices.
10465   *
10466   */
10467  
10468  /*
10469   *       This is called single threaded during boot, so no need
10470   *       to take the rtnl semaphore.
10471   */
10472  static int __init net_dev_init(void)
10473  {
10474  	int i, rc = -ENOMEM;
10475  
10476  	BUG_ON(!dev_boot_phase);
10477  
10478  	if (dev_proc_init())
10479  		goto out;
10480  
10481  	if (netdev_kobject_init())
10482  		goto out;
10483  
10484  	INIT_LIST_HEAD(&ptype_all);
10485  	for (i = 0; i < PTYPE_HASH_SIZE; i++)
10486  		INIT_LIST_HEAD(&ptype_base[i]);
10487  
10488  	INIT_LIST_HEAD(&offload_base);
10489  
10490  	if (register_pernet_subsys(&netdev_net_ops))
10491  		goto out;
10492  
10493  	/*
10494  	 *	Initialise the packet receive queues.
10495  	 */
10496  
10497  	for_each_possible_cpu(i) {
10498  		struct work_struct *flush = per_cpu_ptr(&flush_works, i);
10499  		struct softnet_data *sd = &per_cpu(softnet_data, i);
10500  
10501  		INIT_WORK(flush, flush_backlog);
10502  
10503  		skb_queue_head_init(&sd->input_pkt_queue);
10504  		skb_queue_head_init(&sd->process_queue);
10505  #ifdef CONFIG_XFRM_OFFLOAD
10506  		skb_queue_head_init(&sd->xfrm_backlog);
10507  #endif
10508  		INIT_LIST_HEAD(&sd->poll_list);
10509  		sd->output_queue_tailp = &sd->output_queue;
10510  #ifdef CONFIG_RPS
10511  		sd->csd.func = rps_trigger_softirq;
10512  		sd->csd.info = sd;
10513  		sd->cpu = i;
10514  #endif
10515  
10516  		init_gro_hash(&sd->backlog);
10517  		sd->backlog.poll = process_backlog;
10518  		sd->backlog.weight = weight_p;
10519  	}
10520  
10521  	dev_boot_phase = 0;
10522  
10523  	/* The loopback device is special if any other network devices
10524  	 * is present in a network namespace the loopback device must
10525  	 * be present. Since we now dynamically allocate and free the
10526  	 * loopback device ensure this invariant is maintained by
10527  	 * keeping the loopback device as the first device on the
10528  	 * list of network devices.  Ensuring the loopback devices
10529  	 * is the first device that appears and the last network device
10530  	 * that disappears.
10531  	 */
10532  	if (register_pernet_device(&loopback_net_ops))
10533  		goto out;
10534  
10535  	if (register_pernet_device(&default_device_ops))
10536  		goto out;
10537  
10538  	open_softirq(NET_TX_SOFTIRQ, net_tx_action);
10539  	open_softirq(NET_RX_SOFTIRQ, net_rx_action);
10540  
10541  	rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
10542  				       NULL, dev_cpu_dead);
10543  	WARN_ON(rc < 0);
10544  	rc = 0;
10545  out:
10546  	return rc;
10547  }
10548  
10549  subsys_initcall(net_dev_init);
10550