xref: /linux/net/sched/sch_generic.c (revision e3617433c3da3d0859a4bc67f3f975e87f650ebf)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/sch_generic.c	Generic packet scheduler routines.
4  *
5  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  *              Jamal Hadi Salim, <hadi@cyberus.ca> 990601
7  *              - Ingress support
8  */
9 
10 #include <linux/bitops.h>
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/netdevice.h>
18 #include <linux/skbuff.h>
19 #include <linux/rtnetlink.h>
20 #include <linux/init.h>
21 #include <linux/rcupdate.h>
22 #include <linux/list.h>
23 #include <linux/slab.h>
24 #include <linux/if_vlan.h>
25 #include <linux/skb_array.h>
26 #include <linux/if_macvlan.h>
27 #include <net/sch_generic.h>
28 #include <net/pkt_sched.h>
29 #include <net/dst.h>
30 #include <trace/events/qdisc.h>
31 #include <trace/events/net.h>
32 #include <net/xfrm.h>
33 
34 /* Qdisc to use by default */
35 const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
36 EXPORT_SYMBOL(default_qdisc_ops);
37 
38 static void qdisc_maybe_clear_missed(struct Qdisc *q,
39 				     const struct netdev_queue *txq)
40 {
41 	clear_bit(__QDISC_STATE_MISSED, &q->state);
42 
43 	/* Make sure the below netif_xmit_frozen_or_stopped()
44 	 * checking happens after clearing STATE_MISSED.
45 	 */
46 	smp_mb__after_atomic();
47 
48 	/* Checking netif_xmit_frozen_or_stopped() again to
49 	 * make sure STATE_MISSED is set if the STATE_MISSED
50 	 * set by netif_tx_wake_queue()'s rescheduling of
51 	 * net_tx_action() is cleared by the above clear_bit().
52 	 */
53 	if (!netif_xmit_frozen_or_stopped(txq))
54 		set_bit(__QDISC_STATE_MISSED, &q->state);
55 	else
56 		set_bit(__QDISC_STATE_DRAINING, &q->state);
57 }
58 
59 /* Main transmission queue. */
60 
61 /* Modifications to data participating in scheduling must be protected with
62  * qdisc_lock(qdisc) spinlock.
63  *
64  * The idea is the following:
65  * - enqueue, dequeue are serialized via qdisc root lock
66  * - ingress filtering is also serialized via qdisc root lock
67  * - updates to tree and tree walking are only done under the rtnl mutex.
68  */
69 
70 #define SKB_XOFF_MAGIC ((struct sk_buff *)1UL)
71 
72 static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
73 {
74 	const struct netdev_queue *txq = q->dev_queue;
75 	spinlock_t *lock = NULL;
76 	struct sk_buff *skb;
77 
78 	if (q->flags & TCQ_F_NOLOCK) {
79 		lock = qdisc_lock(q);
80 		spin_lock(lock);
81 	}
82 
83 	skb = skb_peek(&q->skb_bad_txq);
84 	if (skb) {
85 		/* check the reason of requeuing without tx lock first */
86 		txq = skb_get_tx_queue(txq->dev, skb);
87 		if (!netif_xmit_frozen_or_stopped(txq)) {
88 			skb = __skb_dequeue(&q->skb_bad_txq);
89 			if (qdisc_is_percpu_stats(q)) {
90 				qdisc_qstats_cpu_backlog_dec(q, skb);
91 				qdisc_qstats_cpu_qlen_dec(q);
92 			} else {
93 				qdisc_qstats_backlog_dec(q, skb);
94 				q->q.qlen--;
95 			}
96 		} else {
97 			skb = SKB_XOFF_MAGIC;
98 			qdisc_maybe_clear_missed(q, txq);
99 		}
100 	}
101 
102 	if (lock)
103 		spin_unlock(lock);
104 
105 	return skb;
106 }
107 
108 static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q)
109 {
110 	struct sk_buff *skb = skb_peek(&q->skb_bad_txq);
111 
112 	if (unlikely(skb))
113 		skb = __skb_dequeue_bad_txq(q);
114 
115 	return skb;
116 }
117 
118 static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
119 					     struct sk_buff *skb)
120 {
121 	spinlock_t *lock = NULL;
122 
123 	if (q->flags & TCQ_F_NOLOCK) {
124 		lock = qdisc_lock(q);
125 		spin_lock(lock);
126 	}
127 
128 	__skb_queue_tail(&q->skb_bad_txq, skb);
129 
130 	if (qdisc_is_percpu_stats(q)) {
131 		qdisc_qstats_cpu_backlog_inc(q, skb);
132 		qdisc_qstats_cpu_qlen_inc(q);
133 	} else {
134 		qdisc_qstats_backlog_inc(q, skb);
135 		q->q.qlen++;
136 	}
137 
138 	if (lock)
139 		spin_unlock(lock);
140 }
141 
142 static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
143 {
144 	spinlock_t *lock = NULL;
145 
146 	if (q->flags & TCQ_F_NOLOCK) {
147 		lock = qdisc_lock(q);
148 		spin_lock(lock);
149 	}
150 
151 	while (skb) {
152 		struct sk_buff *next = skb->next;
153 
154 		__skb_queue_tail(&q->gso_skb, skb);
155 
156 		/* it's still part of the queue */
157 		if (qdisc_is_percpu_stats(q)) {
158 			qdisc_qstats_cpu_requeues_inc(q);
159 			qdisc_qstats_cpu_backlog_inc(q, skb);
160 			qdisc_qstats_cpu_qlen_inc(q);
161 		} else {
162 			q->qstats.requeues++;
163 			qdisc_qstats_backlog_inc(q, skb);
164 			q->q.qlen++;
165 		}
166 
167 		skb = next;
168 	}
169 
170 	if (lock) {
171 		spin_unlock(lock);
172 		set_bit(__QDISC_STATE_MISSED, &q->state);
173 	} else {
174 		__netif_schedule(q);
175 	}
176 }
177 
178 static void try_bulk_dequeue_skb(struct Qdisc *q,
179 				 struct sk_buff *skb,
180 				 const struct netdev_queue *txq,
181 				 int *packets)
182 {
183 	int bytelimit = qdisc_avail_bulklimit(txq) - skb->len;
184 
185 	while (bytelimit > 0) {
186 		struct sk_buff *nskb = q->dequeue(q);
187 
188 		if (!nskb)
189 			break;
190 
191 		bytelimit -= nskb->len; /* covers GSO len */
192 		skb->next = nskb;
193 		skb = nskb;
194 		(*packets)++; /* GSO counts as one pkt */
195 	}
196 	skb_mark_not_on_list(skb);
197 }
198 
199 /* This variant of try_bulk_dequeue_skb() makes sure
200  * all skbs in the chain are for the same txq
201  */
202 static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
203 				      struct sk_buff *skb,
204 				      int *packets)
205 {
206 	int mapping = skb_get_queue_mapping(skb);
207 	struct sk_buff *nskb;
208 	int cnt = 0;
209 
210 	do {
211 		nskb = q->dequeue(q);
212 		if (!nskb)
213 			break;
214 		if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
215 			qdisc_enqueue_skb_bad_txq(q, nskb);
216 			break;
217 		}
218 		skb->next = nskb;
219 		skb = nskb;
220 	} while (++cnt < 8);
221 	(*packets) += cnt;
222 	skb_mark_not_on_list(skb);
223 }
224 
225 /* Note that dequeue_skb can possibly return a SKB list (via skb->next).
226  * A requeued skb (via q->gso_skb) can also be a SKB list.
227  */
228 static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
229 				   int *packets)
230 {
231 	const struct netdev_queue *txq = q->dev_queue;
232 	struct sk_buff *skb = NULL;
233 
234 	*packets = 1;
235 	if (unlikely(!skb_queue_empty(&q->gso_skb))) {
236 		spinlock_t *lock = NULL;
237 
238 		if (q->flags & TCQ_F_NOLOCK) {
239 			lock = qdisc_lock(q);
240 			spin_lock(lock);
241 		}
242 
243 		skb = skb_peek(&q->gso_skb);
244 
245 		/* skb may be null if another cpu pulls gso_skb off in between
246 		 * empty check and lock.
247 		 */
248 		if (!skb) {
249 			if (lock)
250 				spin_unlock(lock);
251 			goto validate;
252 		}
253 
254 		/* skb in gso_skb were already validated */
255 		*validate = false;
256 		if (xfrm_offload(skb))
257 			*validate = true;
258 		/* check the reason of requeuing without tx lock first */
259 		txq = skb_get_tx_queue(txq->dev, skb);
260 		if (!netif_xmit_frozen_or_stopped(txq)) {
261 			skb = __skb_dequeue(&q->gso_skb);
262 			if (qdisc_is_percpu_stats(q)) {
263 				qdisc_qstats_cpu_backlog_dec(q, skb);
264 				qdisc_qstats_cpu_qlen_dec(q);
265 			} else {
266 				qdisc_qstats_backlog_dec(q, skb);
267 				q->q.qlen--;
268 			}
269 		} else {
270 			skb = NULL;
271 			qdisc_maybe_clear_missed(q, txq);
272 		}
273 		if (lock)
274 			spin_unlock(lock);
275 		goto trace;
276 	}
277 validate:
278 	*validate = true;
279 
280 	if ((q->flags & TCQ_F_ONETXQUEUE) &&
281 	    netif_xmit_frozen_or_stopped(txq)) {
282 		qdisc_maybe_clear_missed(q, txq);
283 		return skb;
284 	}
285 
286 	skb = qdisc_dequeue_skb_bad_txq(q);
287 	if (unlikely(skb)) {
288 		if (skb == SKB_XOFF_MAGIC)
289 			return NULL;
290 		goto bulk;
291 	}
292 	skb = q->dequeue(q);
293 	if (skb) {
294 bulk:
295 		if (qdisc_may_bulk(q))
296 			try_bulk_dequeue_skb(q, skb, txq, packets);
297 		else
298 			try_bulk_dequeue_skb_slow(q, skb, packets);
299 	}
300 trace:
301 	trace_qdisc_dequeue(q, txq, *packets, skb);
302 	return skb;
303 }
304 
305 /*
306  * Transmit possibly several skbs, and handle the return status as
307  * required. Owning qdisc running bit guarantees that only one CPU
308  * can execute this function.
309  *
310  * Returns to the caller:
311  *				false  - hardware queue frozen backoff
312  *				true   - feel free to send more pkts
313  */
314 bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
315 		     struct net_device *dev, struct netdev_queue *txq,
316 		     spinlock_t *root_lock, bool validate)
317 {
318 	int ret = NETDEV_TX_BUSY;
319 	bool again = false;
320 
321 	/* And release qdisc */
322 	if (root_lock)
323 		spin_unlock(root_lock);
324 
325 	/* Note that we validate skb (GSO, checksum, ...) outside of locks */
326 	if (validate)
327 		skb = validate_xmit_skb_list(skb, dev, &again);
328 
329 #ifdef CONFIG_XFRM_OFFLOAD
330 	if (unlikely(again)) {
331 		if (root_lock)
332 			spin_lock(root_lock);
333 
334 		dev_requeue_skb(skb, q);
335 		return false;
336 	}
337 #endif
338 
339 	if (likely(skb)) {
340 		HARD_TX_LOCK(dev, txq, smp_processor_id());
341 		if (!netif_xmit_frozen_or_stopped(txq))
342 			skb = dev_hard_start_xmit(skb, dev, txq, &ret);
343 		else
344 			qdisc_maybe_clear_missed(q, txq);
345 
346 		HARD_TX_UNLOCK(dev, txq);
347 	} else {
348 		if (root_lock)
349 			spin_lock(root_lock);
350 		return true;
351 	}
352 
353 	if (root_lock)
354 		spin_lock(root_lock);
355 
356 	if (!dev_xmit_complete(ret)) {
357 		/* Driver returned NETDEV_TX_BUSY - requeue skb */
358 		if (unlikely(ret != NETDEV_TX_BUSY))
359 			net_warn_ratelimited("BUG %s code %d qlen %d\n",
360 					     dev->name, ret, q->q.qlen);
361 
362 		dev_requeue_skb(skb, q);
363 		return false;
364 	}
365 
366 	return true;
367 }
368 
369 /*
370  * NOTE: Called under qdisc_lock(q) with locally disabled BH.
371  *
372  * running seqcount guarantees only one CPU can process
373  * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
374  * this queue.
375  *
376  *  netif_tx_lock serializes accesses to device driver.
377  *
378  *  qdisc_lock(q) and netif_tx_lock are mutually exclusive,
379  *  if one is grabbed, another must be free.
380  *
381  * Note, that this procedure can be called by a watchdog timer
382  *
383  * Returns to the caller:
384  *				0  - queue is empty or throttled.
385  *				>0 - queue is not empty.
386  *
387  */
388 static inline bool qdisc_restart(struct Qdisc *q, int *packets)
389 {
390 	spinlock_t *root_lock = NULL;
391 	struct netdev_queue *txq;
392 	struct net_device *dev;
393 	struct sk_buff *skb;
394 	bool validate;
395 
396 	/* Dequeue packet */
397 	skb = dequeue_skb(q, &validate, packets);
398 	if (unlikely(!skb))
399 		return false;
400 
401 	if (!(q->flags & TCQ_F_NOLOCK))
402 		root_lock = qdisc_lock(q);
403 
404 	dev = qdisc_dev(q);
405 	txq = skb_get_tx_queue(dev, skb);
406 
407 	return sch_direct_xmit(skb, q, dev, txq, root_lock, validate);
408 }
409 
410 void __qdisc_run(struct Qdisc *q)
411 {
412 	int quota = dev_tx_weight;
413 	int packets;
414 
415 	while (qdisc_restart(q, &packets)) {
416 		quota -= packets;
417 		if (quota <= 0) {
418 			if (q->flags & TCQ_F_NOLOCK)
419 				set_bit(__QDISC_STATE_MISSED, &q->state);
420 			else
421 				__netif_schedule(q);
422 
423 			break;
424 		}
425 	}
426 }
427 
428 unsigned long dev_trans_start(struct net_device *dev)
429 {
430 	unsigned long val, res;
431 	unsigned int i;
432 
433 	if (is_vlan_dev(dev))
434 		dev = vlan_dev_real_dev(dev);
435 	else if (netif_is_macvlan(dev))
436 		dev = macvlan_dev_real_dev(dev);
437 	res = READ_ONCE(netdev_get_tx_queue(dev, 0)->trans_start);
438 	for (i = 1; i < dev->num_tx_queues; i++) {
439 		val = READ_ONCE(netdev_get_tx_queue(dev, i)->trans_start);
440 		if (val && time_after(val, res))
441 			res = val;
442 	}
443 
444 	return res;
445 }
446 EXPORT_SYMBOL(dev_trans_start);
447 
448 static void netif_freeze_queues(struct net_device *dev)
449 {
450 	unsigned int i;
451 	int cpu;
452 
453 	cpu = smp_processor_id();
454 	for (i = 0; i < dev->num_tx_queues; i++) {
455 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
456 
457 		/* We are the only thread of execution doing a
458 		 * freeze, but we have to grab the _xmit_lock in
459 		 * order to synchronize with threads which are in
460 		 * the ->hard_start_xmit() handler and already
461 		 * checked the frozen bit.
462 		 */
463 		__netif_tx_lock(txq, cpu);
464 		set_bit(__QUEUE_STATE_FROZEN, &txq->state);
465 		__netif_tx_unlock(txq);
466 	}
467 }
468 
469 void netif_tx_lock(struct net_device *dev)
470 {
471 	spin_lock(&dev->tx_global_lock);
472 	netif_freeze_queues(dev);
473 }
474 EXPORT_SYMBOL(netif_tx_lock);
475 
476 static void netif_unfreeze_queues(struct net_device *dev)
477 {
478 	unsigned int i;
479 
480 	for (i = 0; i < dev->num_tx_queues; i++) {
481 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
482 
483 		/* No need to grab the _xmit_lock here.  If the
484 		 * queue is not stopped for another reason, we
485 		 * force a schedule.
486 		 */
487 		clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
488 		netif_schedule_queue(txq);
489 	}
490 }
491 
492 void netif_tx_unlock(struct net_device *dev)
493 {
494 	netif_unfreeze_queues(dev);
495 	spin_unlock(&dev->tx_global_lock);
496 }
497 EXPORT_SYMBOL(netif_tx_unlock);
498 
499 static void dev_watchdog(struct timer_list *t)
500 {
501 	struct net_device *dev = from_timer(dev, t, watchdog_timer);
502 
503 	spin_lock(&dev->tx_global_lock);
504 	if (!qdisc_tx_is_noop(dev)) {
505 		if (netif_device_present(dev) &&
506 		    netif_running(dev) &&
507 		    netif_carrier_ok(dev)) {
508 			int some_queue_timedout = 0;
509 			unsigned int i;
510 			unsigned long trans_start;
511 
512 			for (i = 0; i < dev->num_tx_queues; i++) {
513 				struct netdev_queue *txq;
514 
515 				txq = netdev_get_tx_queue(dev, i);
516 				trans_start = READ_ONCE(txq->trans_start);
517 				if (netif_xmit_stopped(txq) &&
518 				    time_after(jiffies, (trans_start +
519 							 dev->watchdog_timeo))) {
520 					some_queue_timedout = 1;
521 					atomic_long_inc(&txq->trans_timeout);
522 					break;
523 				}
524 			}
525 
526 			if (unlikely(some_queue_timedout)) {
527 				trace_net_dev_xmit_timeout(dev, i);
528 				WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
529 				       dev->name, netdev_drivername(dev), i);
530 				netif_freeze_queues(dev);
531 				dev->netdev_ops->ndo_tx_timeout(dev, i);
532 				netif_unfreeze_queues(dev);
533 			}
534 			if (!mod_timer(&dev->watchdog_timer,
535 				       round_jiffies(jiffies +
536 						     dev->watchdog_timeo)))
537 				dev_hold(dev);
538 		}
539 	}
540 	spin_unlock(&dev->tx_global_lock);
541 
542 	dev_put(dev);
543 }
544 
545 void __netdev_watchdog_up(struct net_device *dev)
546 {
547 	if (dev->netdev_ops->ndo_tx_timeout) {
548 		if (dev->watchdog_timeo <= 0)
549 			dev->watchdog_timeo = 5*HZ;
550 		if (!mod_timer(&dev->watchdog_timer,
551 			       round_jiffies(jiffies + dev->watchdog_timeo)))
552 			dev_hold(dev);
553 	}
554 }
555 EXPORT_SYMBOL_GPL(__netdev_watchdog_up);
556 
557 static void dev_watchdog_up(struct net_device *dev)
558 {
559 	__netdev_watchdog_up(dev);
560 }
561 
562 static void dev_watchdog_down(struct net_device *dev)
563 {
564 	netif_tx_lock_bh(dev);
565 	if (del_timer(&dev->watchdog_timer))
566 		dev_put(dev);
567 	netif_tx_unlock_bh(dev);
568 }
569 
570 /**
571  *	netif_carrier_on - set carrier
572  *	@dev: network device
573  *
574  * Device has detected acquisition of carrier.
575  */
576 void netif_carrier_on(struct net_device *dev)
577 {
578 	if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
579 		if (dev->reg_state == NETREG_UNINITIALIZED)
580 			return;
581 		atomic_inc(&dev->carrier_up_count);
582 		linkwatch_fire_event(dev);
583 		if (netif_running(dev))
584 			__netdev_watchdog_up(dev);
585 	}
586 }
587 EXPORT_SYMBOL(netif_carrier_on);
588 
589 /**
590  *	netif_carrier_off - clear carrier
591  *	@dev: network device
592  *
593  * Device has detected loss of carrier.
594  */
595 void netif_carrier_off(struct net_device *dev)
596 {
597 	if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
598 		if (dev->reg_state == NETREG_UNINITIALIZED)
599 			return;
600 		atomic_inc(&dev->carrier_down_count);
601 		linkwatch_fire_event(dev);
602 	}
603 }
604 EXPORT_SYMBOL(netif_carrier_off);
605 
606 /**
607  *	netif_carrier_event - report carrier state event
608  *	@dev: network device
609  *
610  * Device has detected a carrier event but the carrier state wasn't changed.
611  * Use in drivers when querying carrier state asynchronously, to avoid missing
612  * events (link flaps) if link recovers before it's queried.
613  */
614 void netif_carrier_event(struct net_device *dev)
615 {
616 	if (dev->reg_state == NETREG_UNINITIALIZED)
617 		return;
618 	atomic_inc(&dev->carrier_up_count);
619 	atomic_inc(&dev->carrier_down_count);
620 	linkwatch_fire_event(dev);
621 }
622 EXPORT_SYMBOL_GPL(netif_carrier_event);
623 
624 /* "NOOP" scheduler: the best scheduler, recommended for all interfaces
625    under all circumstances. It is difficult to invent anything faster or
626    cheaper.
627  */
628 
629 static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
630 			struct sk_buff **to_free)
631 {
632 	__qdisc_drop(skb, to_free);
633 	return NET_XMIT_CN;
634 }
635 
636 static struct sk_buff *noop_dequeue(struct Qdisc *qdisc)
637 {
638 	return NULL;
639 }
640 
641 struct Qdisc_ops noop_qdisc_ops __read_mostly = {
642 	.id		=	"noop",
643 	.priv_size	=	0,
644 	.enqueue	=	noop_enqueue,
645 	.dequeue	=	noop_dequeue,
646 	.peek		=	noop_dequeue,
647 	.owner		=	THIS_MODULE,
648 };
649 
650 static struct netdev_queue noop_netdev_queue = {
651 	RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc),
652 	.qdisc_sleeping	=	&noop_qdisc,
653 };
654 
655 struct Qdisc noop_qdisc = {
656 	.enqueue	=	noop_enqueue,
657 	.dequeue	=	noop_dequeue,
658 	.flags		=	TCQ_F_BUILTIN,
659 	.ops		=	&noop_qdisc_ops,
660 	.q.lock		=	__SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
661 	.dev_queue	=	&noop_netdev_queue,
662 	.busylock	=	__SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
663 	.gso_skb = {
664 		.next = (struct sk_buff *)&noop_qdisc.gso_skb,
665 		.prev = (struct sk_buff *)&noop_qdisc.gso_skb,
666 		.qlen = 0,
667 		.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.gso_skb.lock),
668 	},
669 	.skb_bad_txq = {
670 		.next = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
671 		.prev = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
672 		.qlen = 0,
673 		.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock),
674 	},
675 };
676 EXPORT_SYMBOL(noop_qdisc);
677 
678 static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt,
679 			struct netlink_ext_ack *extack)
680 {
681 	/* register_qdisc() assigns a default of noop_enqueue if unset,
682 	 * but __dev_queue_xmit() treats noqueue only as such
683 	 * if this is NULL - so clear it here. */
684 	qdisc->enqueue = NULL;
685 	return 0;
686 }
687 
688 struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
689 	.id		=	"noqueue",
690 	.priv_size	=	0,
691 	.init		=	noqueue_init,
692 	.enqueue	=	noop_enqueue,
693 	.dequeue	=	noop_dequeue,
694 	.peek		=	noop_dequeue,
695 	.owner		=	THIS_MODULE,
696 };
697 
698 static const u8 prio2band[TC_PRIO_MAX + 1] = {
699 	1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
700 };
701 
702 /* 3-band FIFO queue: old style, but should be a bit faster than
703    generic prio+fifo combination.
704  */
705 
706 #define PFIFO_FAST_BANDS 3
707 
708 /*
709  * Private data for a pfifo_fast scheduler containing:
710  *	- rings for priority bands
711  */
712 struct pfifo_fast_priv {
713 	struct skb_array q[PFIFO_FAST_BANDS];
714 };
715 
716 static inline struct skb_array *band2list(struct pfifo_fast_priv *priv,
717 					  int band)
718 {
719 	return &priv->q[band];
720 }
721 
722 static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
723 			      struct sk_buff **to_free)
724 {
725 	int band = prio2band[skb->priority & TC_PRIO_MAX];
726 	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
727 	struct skb_array *q = band2list(priv, band);
728 	unsigned int pkt_len = qdisc_pkt_len(skb);
729 	int err;
730 
731 	err = skb_array_produce(q, skb);
732 
733 	if (unlikely(err)) {
734 		if (qdisc_is_percpu_stats(qdisc))
735 			return qdisc_drop_cpu(skb, qdisc, to_free);
736 		else
737 			return qdisc_drop(skb, qdisc, to_free);
738 	}
739 
740 	qdisc_update_stats_at_enqueue(qdisc, pkt_len);
741 	return NET_XMIT_SUCCESS;
742 }
743 
744 static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
745 {
746 	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
747 	struct sk_buff *skb = NULL;
748 	bool need_retry = true;
749 	int band;
750 
751 retry:
752 	for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
753 		struct skb_array *q = band2list(priv, band);
754 
755 		if (__skb_array_empty(q))
756 			continue;
757 
758 		skb = __skb_array_consume(q);
759 	}
760 	if (likely(skb)) {
761 		qdisc_update_stats_at_dequeue(qdisc, skb);
762 	} else if (need_retry &&
763 		   READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY) {
764 		/* Delay clearing the STATE_MISSED here to reduce
765 		 * the overhead of the second spin_trylock() in
766 		 * qdisc_run_begin() and __netif_schedule() calling
767 		 * in qdisc_run_end().
768 		 */
769 		clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
770 		clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
771 
772 		/* Make sure dequeuing happens after clearing
773 		 * STATE_MISSED.
774 		 */
775 		smp_mb__after_atomic();
776 
777 		need_retry = false;
778 
779 		goto retry;
780 	}
781 
782 	return skb;
783 }
784 
785 static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
786 {
787 	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
788 	struct sk_buff *skb = NULL;
789 	int band;
790 
791 	for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
792 		struct skb_array *q = band2list(priv, band);
793 
794 		skb = __skb_array_peek(q);
795 	}
796 
797 	return skb;
798 }
799 
800 static void pfifo_fast_reset(struct Qdisc *qdisc)
801 {
802 	int i, band;
803 	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
804 
805 	for (band = 0; band < PFIFO_FAST_BANDS; band++) {
806 		struct skb_array *q = band2list(priv, band);
807 		struct sk_buff *skb;
808 
809 		/* NULL ring is possible if destroy path is due to a failed
810 		 * skb_array_init() in pfifo_fast_init() case.
811 		 */
812 		if (!q->ring.queue)
813 			continue;
814 
815 		while ((skb = __skb_array_consume(q)) != NULL)
816 			kfree_skb(skb);
817 	}
818 
819 	if (qdisc_is_percpu_stats(qdisc)) {
820 		for_each_possible_cpu(i) {
821 			struct gnet_stats_queue *q;
822 
823 			q = per_cpu_ptr(qdisc->cpu_qstats, i);
824 			q->backlog = 0;
825 			q->qlen = 0;
826 		}
827 	}
828 }
829 
830 static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
831 {
832 	struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
833 
834 	memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
835 	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
836 		goto nla_put_failure;
837 	return skb->len;
838 
839 nla_put_failure:
840 	return -1;
841 }
842 
843 static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt,
844 			   struct netlink_ext_ack *extack)
845 {
846 	unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len;
847 	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
848 	int prio;
849 
850 	/* guard against zero length rings */
851 	if (!qlen)
852 		return -EINVAL;
853 
854 	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
855 		struct skb_array *q = band2list(priv, prio);
856 		int err;
857 
858 		err = skb_array_init(q, qlen, GFP_KERNEL);
859 		if (err)
860 			return -ENOMEM;
861 	}
862 
863 	/* Can by-pass the queue discipline */
864 	qdisc->flags |= TCQ_F_CAN_BYPASS;
865 	return 0;
866 }
867 
868 static void pfifo_fast_destroy(struct Qdisc *sch)
869 {
870 	struct pfifo_fast_priv *priv = qdisc_priv(sch);
871 	int prio;
872 
873 	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
874 		struct skb_array *q = band2list(priv, prio);
875 
876 		/* NULL ring is possible if destroy path is due to a failed
877 		 * skb_array_init() in pfifo_fast_init() case.
878 		 */
879 		if (!q->ring.queue)
880 			continue;
881 		/* Destroy ring but no need to kfree_skb because a call to
882 		 * pfifo_fast_reset() has already done that work.
883 		 */
884 		ptr_ring_cleanup(&q->ring, NULL);
885 	}
886 }
887 
888 static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch,
889 					  unsigned int new_len)
890 {
891 	struct pfifo_fast_priv *priv = qdisc_priv(sch);
892 	struct skb_array *bands[PFIFO_FAST_BANDS];
893 	int prio;
894 
895 	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
896 		struct skb_array *q = band2list(priv, prio);
897 
898 		bands[prio] = q;
899 	}
900 
901 	return skb_array_resize_multiple(bands, PFIFO_FAST_BANDS, new_len,
902 					 GFP_KERNEL);
903 }
904 
905 struct Qdisc_ops pfifo_fast_ops __read_mostly = {
906 	.id		=	"pfifo_fast",
907 	.priv_size	=	sizeof(struct pfifo_fast_priv),
908 	.enqueue	=	pfifo_fast_enqueue,
909 	.dequeue	=	pfifo_fast_dequeue,
910 	.peek		=	pfifo_fast_peek,
911 	.init		=	pfifo_fast_init,
912 	.destroy	=	pfifo_fast_destroy,
913 	.reset		=	pfifo_fast_reset,
914 	.dump		=	pfifo_fast_dump,
915 	.change_tx_queue_len =  pfifo_fast_change_tx_queue_len,
916 	.owner		=	THIS_MODULE,
917 	.static_flags	=	TCQ_F_NOLOCK | TCQ_F_CPUSTATS,
918 };
919 EXPORT_SYMBOL(pfifo_fast_ops);
920 
921 static struct lock_class_key qdisc_tx_busylock;
922 
923 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
924 			  const struct Qdisc_ops *ops,
925 			  struct netlink_ext_ack *extack)
926 {
927 	struct Qdisc *sch;
928 	unsigned int size = sizeof(*sch) + ops->priv_size;
929 	int err = -ENOBUFS;
930 	struct net_device *dev;
931 
932 	if (!dev_queue) {
933 		NL_SET_ERR_MSG(extack, "No device queue given");
934 		err = -EINVAL;
935 		goto errout;
936 	}
937 
938 	dev = dev_queue->dev;
939 	sch = kzalloc_node(size, GFP_KERNEL, netdev_queue_numa_node_read(dev_queue));
940 
941 	if (!sch)
942 		goto errout;
943 	__skb_queue_head_init(&sch->gso_skb);
944 	__skb_queue_head_init(&sch->skb_bad_txq);
945 	qdisc_skb_head_init(&sch->q);
946 	gnet_stats_basic_sync_init(&sch->bstats);
947 	spin_lock_init(&sch->q.lock);
948 
949 	if (ops->static_flags & TCQ_F_CPUSTATS) {
950 		sch->cpu_bstats =
951 			netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
952 		if (!sch->cpu_bstats)
953 			goto errout1;
954 
955 		sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
956 		if (!sch->cpu_qstats) {
957 			free_percpu(sch->cpu_bstats);
958 			goto errout1;
959 		}
960 	}
961 
962 	spin_lock_init(&sch->busylock);
963 	lockdep_set_class(&sch->busylock,
964 			  dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
965 
966 	/* seqlock has the same scope of busylock, for NOLOCK qdisc */
967 	spin_lock_init(&sch->seqlock);
968 	lockdep_set_class(&sch->seqlock,
969 			  dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
970 
971 	sch->ops = ops;
972 	sch->flags = ops->static_flags;
973 	sch->enqueue = ops->enqueue;
974 	sch->dequeue = ops->dequeue;
975 	sch->dev_queue = dev_queue;
976 	dev_hold(dev);
977 	refcount_set(&sch->refcnt, 1);
978 
979 	return sch;
980 errout1:
981 	kfree(sch);
982 errout:
983 	return ERR_PTR(err);
984 }
985 
986 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
987 				const struct Qdisc_ops *ops,
988 				unsigned int parentid,
989 				struct netlink_ext_ack *extack)
990 {
991 	struct Qdisc *sch;
992 
993 	if (!try_module_get(ops->owner)) {
994 		NL_SET_ERR_MSG(extack, "Failed to increase module reference counter");
995 		return NULL;
996 	}
997 
998 	sch = qdisc_alloc(dev_queue, ops, extack);
999 	if (IS_ERR(sch)) {
1000 		module_put(ops->owner);
1001 		return NULL;
1002 	}
1003 	sch->parent = parentid;
1004 
1005 	if (!ops->init || ops->init(sch, NULL, extack) == 0) {
1006 		trace_qdisc_create(ops, dev_queue->dev, parentid);
1007 		return sch;
1008 	}
1009 
1010 	qdisc_put(sch);
1011 	return NULL;
1012 }
1013 EXPORT_SYMBOL(qdisc_create_dflt);
1014 
1015 /* Under qdisc_lock(qdisc) and BH! */
1016 
1017 void qdisc_reset(struct Qdisc *qdisc)
1018 {
1019 	const struct Qdisc_ops *ops = qdisc->ops;
1020 	struct sk_buff *skb, *tmp;
1021 
1022 	trace_qdisc_reset(qdisc);
1023 
1024 	if (ops->reset)
1025 		ops->reset(qdisc);
1026 
1027 	skb_queue_walk_safe(&qdisc->gso_skb, skb, tmp) {
1028 		__skb_unlink(skb, &qdisc->gso_skb);
1029 		kfree_skb_list(skb);
1030 	}
1031 
1032 	skb_queue_walk_safe(&qdisc->skb_bad_txq, skb, tmp) {
1033 		__skb_unlink(skb, &qdisc->skb_bad_txq);
1034 		kfree_skb_list(skb);
1035 	}
1036 
1037 	qdisc->q.qlen = 0;
1038 	qdisc->qstats.backlog = 0;
1039 }
1040 EXPORT_SYMBOL(qdisc_reset);
1041 
1042 void qdisc_free(struct Qdisc *qdisc)
1043 {
1044 	if (qdisc_is_percpu_stats(qdisc)) {
1045 		free_percpu(qdisc->cpu_bstats);
1046 		free_percpu(qdisc->cpu_qstats);
1047 	}
1048 
1049 	kfree(qdisc);
1050 }
1051 
1052 static void qdisc_free_cb(struct rcu_head *head)
1053 {
1054 	struct Qdisc *q = container_of(head, struct Qdisc, rcu);
1055 
1056 	qdisc_free(q);
1057 }
1058 
1059 static void qdisc_destroy(struct Qdisc *qdisc)
1060 {
1061 	const struct Qdisc_ops  *ops = qdisc->ops;
1062 
1063 #ifdef CONFIG_NET_SCHED
1064 	qdisc_hash_del(qdisc);
1065 
1066 	qdisc_put_stab(rtnl_dereference(qdisc->stab));
1067 #endif
1068 	gen_kill_estimator(&qdisc->rate_est);
1069 
1070 	qdisc_reset(qdisc);
1071 
1072 	if (ops->destroy)
1073 		ops->destroy(qdisc);
1074 
1075 	module_put(ops->owner);
1076 	dev_put(qdisc_dev(qdisc));
1077 
1078 	trace_qdisc_destroy(qdisc);
1079 
1080 	call_rcu(&qdisc->rcu, qdisc_free_cb);
1081 }
1082 
1083 void qdisc_put(struct Qdisc *qdisc)
1084 {
1085 	if (!qdisc)
1086 		return;
1087 
1088 	if (qdisc->flags & TCQ_F_BUILTIN ||
1089 	    !refcount_dec_and_test(&qdisc->refcnt))
1090 		return;
1091 
1092 	qdisc_destroy(qdisc);
1093 }
1094 EXPORT_SYMBOL(qdisc_put);
1095 
1096 /* Version of qdisc_put() that is called with rtnl mutex unlocked.
1097  * Intended to be used as optimization, this function only takes rtnl lock if
1098  * qdisc reference counter reached zero.
1099  */
1100 
1101 void qdisc_put_unlocked(struct Qdisc *qdisc)
1102 {
1103 	if (qdisc->flags & TCQ_F_BUILTIN ||
1104 	    !refcount_dec_and_rtnl_lock(&qdisc->refcnt))
1105 		return;
1106 
1107 	qdisc_destroy(qdisc);
1108 	rtnl_unlock();
1109 }
1110 EXPORT_SYMBOL(qdisc_put_unlocked);
1111 
1112 /* Attach toplevel qdisc to device queue. */
1113 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
1114 			      struct Qdisc *qdisc)
1115 {
1116 	struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
1117 	spinlock_t *root_lock;
1118 
1119 	root_lock = qdisc_lock(oqdisc);
1120 	spin_lock_bh(root_lock);
1121 
1122 	/* ... and graft new one */
1123 	if (qdisc == NULL)
1124 		qdisc = &noop_qdisc;
1125 	dev_queue->qdisc_sleeping = qdisc;
1126 	rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
1127 
1128 	spin_unlock_bh(root_lock);
1129 
1130 	return oqdisc;
1131 }
1132 EXPORT_SYMBOL(dev_graft_qdisc);
1133 
1134 static void attach_one_default_qdisc(struct net_device *dev,
1135 				     struct netdev_queue *dev_queue,
1136 				     void *_unused)
1137 {
1138 	struct Qdisc *qdisc;
1139 	const struct Qdisc_ops *ops = default_qdisc_ops;
1140 
1141 	if (dev->priv_flags & IFF_NO_QUEUE)
1142 		ops = &noqueue_qdisc_ops;
1143 	else if(dev->type == ARPHRD_CAN)
1144 		ops = &pfifo_fast_ops;
1145 
1146 	qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL);
1147 	if (!qdisc)
1148 		return;
1149 
1150 	if (!netif_is_multiqueue(dev))
1151 		qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1152 	dev_queue->qdisc_sleeping = qdisc;
1153 }
1154 
1155 static void attach_default_qdiscs(struct net_device *dev)
1156 {
1157 	struct netdev_queue *txq;
1158 	struct Qdisc *qdisc;
1159 
1160 	txq = netdev_get_tx_queue(dev, 0);
1161 
1162 	if (!netif_is_multiqueue(dev) ||
1163 	    dev->priv_flags & IFF_NO_QUEUE) {
1164 		netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
1165 		dev->qdisc = txq->qdisc_sleeping;
1166 		qdisc_refcount_inc(dev->qdisc);
1167 	} else {
1168 		qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL);
1169 		if (qdisc) {
1170 			dev->qdisc = qdisc;
1171 			qdisc->ops->attach(qdisc);
1172 		}
1173 	}
1174 
1175 	/* Detect default qdisc setup/init failed and fallback to "noqueue" */
1176 	if (dev->qdisc == &noop_qdisc) {
1177 		netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n",
1178 			    default_qdisc_ops->id, noqueue_qdisc_ops.id);
1179 		dev->priv_flags |= IFF_NO_QUEUE;
1180 		netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
1181 		dev->qdisc = txq->qdisc_sleeping;
1182 		qdisc_refcount_inc(dev->qdisc);
1183 		dev->priv_flags ^= IFF_NO_QUEUE;
1184 	}
1185 
1186 #ifdef CONFIG_NET_SCHED
1187 	if (dev->qdisc != &noop_qdisc)
1188 		qdisc_hash_add(dev->qdisc, false);
1189 #endif
1190 }
1191 
1192 static void transition_one_qdisc(struct net_device *dev,
1193 				 struct netdev_queue *dev_queue,
1194 				 void *_need_watchdog)
1195 {
1196 	struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
1197 	int *need_watchdog_p = _need_watchdog;
1198 
1199 	if (!(new_qdisc->flags & TCQ_F_BUILTIN))
1200 		clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
1201 
1202 	rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
1203 	if (need_watchdog_p) {
1204 		WRITE_ONCE(dev_queue->trans_start, 0);
1205 		*need_watchdog_p = 1;
1206 	}
1207 }
1208 
1209 void dev_activate(struct net_device *dev)
1210 {
1211 	int need_watchdog;
1212 
1213 	/* No queueing discipline is attached to device;
1214 	 * create default one for devices, which need queueing
1215 	 * and noqueue_qdisc for virtual interfaces
1216 	 */
1217 
1218 	if (dev->qdisc == &noop_qdisc)
1219 		attach_default_qdiscs(dev);
1220 
1221 	if (!netif_carrier_ok(dev))
1222 		/* Delay activation until next carrier-on event */
1223 		return;
1224 
1225 	need_watchdog = 0;
1226 	netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
1227 	if (dev_ingress_queue(dev))
1228 		transition_one_qdisc(dev, dev_ingress_queue(dev), NULL);
1229 
1230 	if (need_watchdog) {
1231 		netif_trans_update(dev);
1232 		dev_watchdog_up(dev);
1233 	}
1234 }
1235 EXPORT_SYMBOL(dev_activate);
1236 
1237 static void qdisc_deactivate(struct Qdisc *qdisc)
1238 {
1239 	if (qdisc->flags & TCQ_F_BUILTIN)
1240 		return;
1241 
1242 	set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
1243 }
1244 
1245 static void dev_deactivate_queue(struct net_device *dev,
1246 				 struct netdev_queue *dev_queue,
1247 				 void *_qdisc_default)
1248 {
1249 	struct Qdisc *qdisc_default = _qdisc_default;
1250 	struct Qdisc *qdisc;
1251 
1252 	qdisc = rtnl_dereference(dev_queue->qdisc);
1253 	if (qdisc) {
1254 		qdisc_deactivate(qdisc);
1255 		rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
1256 	}
1257 }
1258 
1259 static void dev_reset_queue(struct net_device *dev,
1260 			    struct netdev_queue *dev_queue,
1261 			    void *_unused)
1262 {
1263 	struct Qdisc *qdisc;
1264 	bool nolock;
1265 
1266 	qdisc = dev_queue->qdisc_sleeping;
1267 	if (!qdisc)
1268 		return;
1269 
1270 	nolock = qdisc->flags & TCQ_F_NOLOCK;
1271 
1272 	if (nolock)
1273 		spin_lock_bh(&qdisc->seqlock);
1274 	spin_lock_bh(qdisc_lock(qdisc));
1275 
1276 	qdisc_reset(qdisc);
1277 
1278 	spin_unlock_bh(qdisc_lock(qdisc));
1279 	if (nolock) {
1280 		clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
1281 		clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
1282 		spin_unlock_bh(&qdisc->seqlock);
1283 	}
1284 }
1285 
1286 static bool some_qdisc_is_busy(struct net_device *dev)
1287 {
1288 	unsigned int i;
1289 
1290 	for (i = 0; i < dev->num_tx_queues; i++) {
1291 		struct netdev_queue *dev_queue;
1292 		spinlock_t *root_lock;
1293 		struct Qdisc *q;
1294 		int val;
1295 
1296 		dev_queue = netdev_get_tx_queue(dev, i);
1297 		q = dev_queue->qdisc_sleeping;
1298 
1299 		root_lock = qdisc_lock(q);
1300 		spin_lock_bh(root_lock);
1301 
1302 		val = (qdisc_is_running(q) ||
1303 		       test_bit(__QDISC_STATE_SCHED, &q->state));
1304 
1305 		spin_unlock_bh(root_lock);
1306 
1307 		if (val)
1308 			return true;
1309 	}
1310 	return false;
1311 }
1312 
1313 /**
1314  * 	dev_deactivate_many - deactivate transmissions on several devices
1315  * 	@head: list of devices to deactivate
1316  *
1317  *	This function returns only when all outstanding transmissions
1318  *	have completed, unless all devices are in dismantle phase.
1319  */
1320 void dev_deactivate_many(struct list_head *head)
1321 {
1322 	struct net_device *dev;
1323 
1324 	list_for_each_entry(dev, head, close_list) {
1325 		netdev_for_each_tx_queue(dev, dev_deactivate_queue,
1326 					 &noop_qdisc);
1327 		if (dev_ingress_queue(dev))
1328 			dev_deactivate_queue(dev, dev_ingress_queue(dev),
1329 					     &noop_qdisc);
1330 
1331 		dev_watchdog_down(dev);
1332 	}
1333 
1334 	/* Wait for outstanding qdisc-less dev_queue_xmit calls or
1335 	 * outstanding qdisc enqueuing calls.
1336 	 * This is avoided if all devices are in dismantle phase :
1337 	 * Caller will call synchronize_net() for us
1338 	 */
1339 	synchronize_net();
1340 
1341 	list_for_each_entry(dev, head, close_list) {
1342 		netdev_for_each_tx_queue(dev, dev_reset_queue, NULL);
1343 
1344 		if (dev_ingress_queue(dev))
1345 			dev_reset_queue(dev, dev_ingress_queue(dev), NULL);
1346 	}
1347 
1348 	/* Wait for outstanding qdisc_run calls. */
1349 	list_for_each_entry(dev, head, close_list) {
1350 		while (some_qdisc_is_busy(dev)) {
1351 			/* wait_event() would avoid this sleep-loop but would
1352 			 * require expensive checks in the fast paths of packet
1353 			 * processing which isn't worth it.
1354 			 */
1355 			schedule_timeout_uninterruptible(1);
1356 		}
1357 	}
1358 }
1359 
1360 void dev_deactivate(struct net_device *dev)
1361 {
1362 	LIST_HEAD(single);
1363 
1364 	list_add(&dev->close_list, &single);
1365 	dev_deactivate_many(&single);
1366 	list_del(&single);
1367 }
1368 EXPORT_SYMBOL(dev_deactivate);
1369 
1370 static int qdisc_change_tx_queue_len(struct net_device *dev,
1371 				     struct netdev_queue *dev_queue)
1372 {
1373 	struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
1374 	const struct Qdisc_ops *ops = qdisc->ops;
1375 
1376 	if (ops->change_tx_queue_len)
1377 		return ops->change_tx_queue_len(qdisc, dev->tx_queue_len);
1378 	return 0;
1379 }
1380 
1381 void dev_qdisc_change_real_num_tx(struct net_device *dev,
1382 				  unsigned int new_real_tx)
1383 {
1384 	struct Qdisc *qdisc = dev->qdisc;
1385 
1386 	if (qdisc->ops->change_real_num_tx)
1387 		qdisc->ops->change_real_num_tx(qdisc, new_real_tx);
1388 }
1389 
1390 void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx)
1391 {
1392 #ifdef CONFIG_NET_SCHED
1393 	struct net_device *dev = qdisc_dev(sch);
1394 	struct Qdisc *qdisc;
1395 	unsigned int i;
1396 
1397 	for (i = new_real_tx; i < dev->real_num_tx_queues; i++) {
1398 		qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping;
1399 		/* Only update the default qdiscs we created,
1400 		 * qdiscs with handles are always hashed.
1401 		 */
1402 		if (qdisc != &noop_qdisc && !qdisc->handle)
1403 			qdisc_hash_del(qdisc);
1404 	}
1405 	for (i = dev->real_num_tx_queues; i < new_real_tx; i++) {
1406 		qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping;
1407 		if (qdisc != &noop_qdisc && !qdisc->handle)
1408 			qdisc_hash_add(qdisc, false);
1409 	}
1410 #endif
1411 }
1412 EXPORT_SYMBOL(mq_change_real_num_tx);
1413 
1414 int dev_qdisc_change_tx_queue_len(struct net_device *dev)
1415 {
1416 	bool up = dev->flags & IFF_UP;
1417 	unsigned int i;
1418 	int ret = 0;
1419 
1420 	if (up)
1421 		dev_deactivate(dev);
1422 
1423 	for (i = 0; i < dev->num_tx_queues; i++) {
1424 		ret = qdisc_change_tx_queue_len(dev, &dev->_tx[i]);
1425 
1426 		/* TODO: revert changes on a partial failure */
1427 		if (ret)
1428 			break;
1429 	}
1430 
1431 	if (up)
1432 		dev_activate(dev);
1433 	return ret;
1434 }
1435 
1436 static void dev_init_scheduler_queue(struct net_device *dev,
1437 				     struct netdev_queue *dev_queue,
1438 				     void *_qdisc)
1439 {
1440 	struct Qdisc *qdisc = _qdisc;
1441 
1442 	rcu_assign_pointer(dev_queue->qdisc, qdisc);
1443 	dev_queue->qdisc_sleeping = qdisc;
1444 }
1445 
1446 void dev_init_scheduler(struct net_device *dev)
1447 {
1448 	dev->qdisc = &noop_qdisc;
1449 	netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
1450 	if (dev_ingress_queue(dev))
1451 		dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
1452 
1453 	timer_setup(&dev->watchdog_timer, dev_watchdog, 0);
1454 }
1455 
1456 static void shutdown_scheduler_queue(struct net_device *dev,
1457 				     struct netdev_queue *dev_queue,
1458 				     void *_qdisc_default)
1459 {
1460 	struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
1461 	struct Qdisc *qdisc_default = _qdisc_default;
1462 
1463 	if (qdisc) {
1464 		rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
1465 		dev_queue->qdisc_sleeping = qdisc_default;
1466 
1467 		qdisc_put(qdisc);
1468 	}
1469 }
1470 
1471 void dev_shutdown(struct net_device *dev)
1472 {
1473 	netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
1474 	if (dev_ingress_queue(dev))
1475 		shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
1476 	qdisc_put(dev->qdisc);
1477 	dev->qdisc = &noop_qdisc;
1478 
1479 	WARN_ON(timer_pending(&dev->watchdog_timer));
1480 }
1481 
1482 /**
1483  * psched_ratecfg_precompute__() - Pre-compute values for reciprocal division
1484  * @rate:   Rate to compute reciprocal division values of
1485  * @mult:   Multiplier for reciprocal division
1486  * @shift:  Shift for reciprocal division
1487  *
1488  * The multiplier and shift for reciprocal division by rate are stored
1489  * in mult and shift.
1490  *
1491  * The deal here is to replace a divide by a reciprocal one
1492  * in fast path (a reciprocal divide is a multiply and a shift)
1493  *
1494  * Normal formula would be :
1495  *  time_in_ns = (NSEC_PER_SEC * len) / rate_bps
1496  *
1497  * We compute mult/shift to use instead :
1498  *  time_in_ns = (len * mult) >> shift;
1499  *
1500  * We try to get the highest possible mult value for accuracy,
1501  * but have to make sure no overflows will ever happen.
1502  *
1503  * reciprocal_value() is not used here it doesn't handle 64-bit values.
1504  */
1505 static void psched_ratecfg_precompute__(u64 rate, u32 *mult, u8 *shift)
1506 {
1507 	u64 factor = NSEC_PER_SEC;
1508 
1509 	*mult = 1;
1510 	*shift = 0;
1511 
1512 	if (rate <= 0)
1513 		return;
1514 
1515 	for (;;) {
1516 		*mult = div64_u64(factor, rate);
1517 		if (*mult & (1U << 31) || factor & (1ULL << 63))
1518 			break;
1519 		factor <<= 1;
1520 		(*shift)++;
1521 	}
1522 }
1523 
1524 void psched_ratecfg_precompute(struct psched_ratecfg *r,
1525 			       const struct tc_ratespec *conf,
1526 			       u64 rate64)
1527 {
1528 	memset(r, 0, sizeof(*r));
1529 	r->overhead = conf->overhead;
1530 	r->rate_bytes_ps = max_t(u64, conf->rate, rate64);
1531 	r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
1532 	psched_ratecfg_precompute__(r->rate_bytes_ps, &r->mult, &r->shift);
1533 }
1534 EXPORT_SYMBOL(psched_ratecfg_precompute);
1535 
1536 void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64)
1537 {
1538 	r->rate_pkts_ps = pktrate64;
1539 	psched_ratecfg_precompute__(r->rate_pkts_ps, &r->mult, &r->shift);
1540 }
1541 EXPORT_SYMBOL(psched_ppscfg_precompute);
1542 
1543 void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
1544 			  struct tcf_proto *tp_head)
1545 {
1546 	/* Protected with chain0->filter_chain_lock.
1547 	 * Can't access chain directly because tp_head can be NULL.
1548 	 */
1549 	struct mini_Qdisc *miniq_old =
1550 		rcu_dereference_protected(*miniqp->p_miniq, 1);
1551 	struct mini_Qdisc *miniq;
1552 
1553 	if (!tp_head) {
1554 		RCU_INIT_POINTER(*miniqp->p_miniq, NULL);
1555 	} else {
1556 		miniq = miniq_old != &miniqp->miniq1 ?
1557 			&miniqp->miniq1 : &miniqp->miniq2;
1558 
1559 		/* We need to make sure that readers won't see the miniq
1560 		 * we are about to modify. So ensure that at least one RCU
1561 		 * grace period has elapsed since the miniq was made
1562 		 * inactive.
1563 		 */
1564 		if (IS_ENABLED(CONFIG_PREEMPT_RT))
1565 			cond_synchronize_rcu(miniq->rcu_state);
1566 		else if (!poll_state_synchronize_rcu(miniq->rcu_state))
1567 			synchronize_rcu_expedited();
1568 
1569 		miniq->filter_list = tp_head;
1570 		rcu_assign_pointer(*miniqp->p_miniq, miniq);
1571 	}
1572 
1573 	if (miniq_old)
1574 		/* This is counterpart of the rcu sync above. We need to
1575 		 * block potential new user of miniq_old until all readers
1576 		 * are not seeing it.
1577 		 */
1578 		miniq_old->rcu_state = start_poll_synchronize_rcu();
1579 }
1580 EXPORT_SYMBOL(mini_qdisc_pair_swap);
1581 
1582 void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
1583 				struct tcf_block *block)
1584 {
1585 	miniqp->miniq1.block = block;
1586 	miniqp->miniq2.block = block;
1587 }
1588 EXPORT_SYMBOL(mini_qdisc_pair_block_init);
1589 
1590 void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
1591 			  struct mini_Qdisc __rcu **p_miniq)
1592 {
1593 	miniqp->miniq1.cpu_bstats = qdisc->cpu_bstats;
1594 	miniqp->miniq1.cpu_qstats = qdisc->cpu_qstats;
1595 	miniqp->miniq2.cpu_bstats = qdisc->cpu_bstats;
1596 	miniqp->miniq2.cpu_qstats = qdisc->cpu_qstats;
1597 	miniqp->miniq1.rcu_state = get_state_synchronize_rcu();
1598 	miniqp->miniq2.rcu_state = miniqp->miniq1.rcu_state;
1599 	miniqp->p_miniq = p_miniq;
1600 }
1601 EXPORT_SYMBOL(mini_qdisc_pair_init);
1602