xref: /linux/net/sched/sch_generic.c (revision c4101e55974cc7d835fbd2d8e01553a3f61e9e75)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/sch_generic.c	Generic packet scheduler routines.
4  *
5  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  *              Jamal Hadi Salim, <hadi@cyberus.ca> 990601
7  *              - Ingress support
8  */
9 
10 #include <linux/bitops.h>
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/netdevice.h>
18 #include <linux/skbuff.h>
19 #include <linux/rtnetlink.h>
20 #include <linux/init.h>
21 #include <linux/rcupdate.h>
22 #include <linux/list.h>
23 #include <linux/slab.h>
24 #include <linux/if_vlan.h>
25 #include <linux/skb_array.h>
26 #include <linux/if_macvlan.h>
27 #include <net/sch_generic.h>
28 #include <net/pkt_sched.h>
29 #include <net/dst.h>
30 #include <trace/events/qdisc.h>
31 #include <trace/events/net.h>
32 #include <net/xfrm.h>
33 
34 /* Qdisc to use by default */
35 const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
36 EXPORT_SYMBOL(default_qdisc_ops);
37 
38 static void qdisc_maybe_clear_missed(struct Qdisc *q,
39 				     const struct netdev_queue *txq)
40 {
41 	clear_bit(__QDISC_STATE_MISSED, &q->state);
42 
43 	/* Make sure the below netif_xmit_frozen_or_stopped()
44 	 * checking happens after clearing STATE_MISSED.
45 	 */
46 	smp_mb__after_atomic();
47 
48 	/* Checking netif_xmit_frozen_or_stopped() again to
49 	 * make sure STATE_MISSED is set if the STATE_MISSED
50 	 * set by netif_tx_wake_queue()'s rescheduling of
51 	 * net_tx_action() is cleared by the above clear_bit().
52 	 */
53 	if (!netif_xmit_frozen_or_stopped(txq))
54 		set_bit(__QDISC_STATE_MISSED, &q->state);
55 	else
56 		set_bit(__QDISC_STATE_DRAINING, &q->state);
57 }
58 
59 /* Main transmission queue. */
60 
61 /* Modifications to data participating in scheduling must be protected with
62  * qdisc_lock(qdisc) spinlock.
63  *
64  * The idea is the following:
65  * - enqueue, dequeue are serialized via qdisc root lock
66  * - ingress filtering is also serialized via qdisc root lock
67  * - updates to tree and tree walking are only done under the rtnl mutex.
68  */
69 
70 #define SKB_XOFF_MAGIC ((struct sk_buff *)1UL)
71 
72 static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
73 {
74 	const struct netdev_queue *txq = q->dev_queue;
75 	spinlock_t *lock = NULL;
76 	struct sk_buff *skb;
77 
78 	if (q->flags & TCQ_F_NOLOCK) {
79 		lock = qdisc_lock(q);
80 		spin_lock(lock);
81 	}
82 
83 	skb = skb_peek(&q->skb_bad_txq);
84 	if (skb) {
85 		/* check the reason of requeuing without tx lock first */
86 		txq = skb_get_tx_queue(txq->dev, skb);
87 		if (!netif_xmit_frozen_or_stopped(txq)) {
88 			skb = __skb_dequeue(&q->skb_bad_txq);
89 			if (qdisc_is_percpu_stats(q)) {
90 				qdisc_qstats_cpu_backlog_dec(q, skb);
91 				qdisc_qstats_cpu_qlen_dec(q);
92 			} else {
93 				qdisc_qstats_backlog_dec(q, skb);
94 				q->q.qlen--;
95 			}
96 		} else {
97 			skb = SKB_XOFF_MAGIC;
98 			qdisc_maybe_clear_missed(q, txq);
99 		}
100 	}
101 
102 	if (lock)
103 		spin_unlock(lock);
104 
105 	return skb;
106 }
107 
108 static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q)
109 {
110 	struct sk_buff *skb = skb_peek(&q->skb_bad_txq);
111 
112 	if (unlikely(skb))
113 		skb = __skb_dequeue_bad_txq(q);
114 
115 	return skb;
116 }
117 
118 static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
119 					     struct sk_buff *skb)
120 {
121 	spinlock_t *lock = NULL;
122 
123 	if (q->flags & TCQ_F_NOLOCK) {
124 		lock = qdisc_lock(q);
125 		spin_lock(lock);
126 	}
127 
128 	__skb_queue_tail(&q->skb_bad_txq, skb);
129 
130 	if (qdisc_is_percpu_stats(q)) {
131 		qdisc_qstats_cpu_backlog_inc(q, skb);
132 		qdisc_qstats_cpu_qlen_inc(q);
133 	} else {
134 		qdisc_qstats_backlog_inc(q, skb);
135 		q->q.qlen++;
136 	}
137 
138 	if (lock)
139 		spin_unlock(lock);
140 }
141 
142 static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
143 {
144 	spinlock_t *lock = NULL;
145 
146 	if (q->flags & TCQ_F_NOLOCK) {
147 		lock = qdisc_lock(q);
148 		spin_lock(lock);
149 	}
150 
151 	while (skb) {
152 		struct sk_buff *next = skb->next;
153 
154 		__skb_queue_tail(&q->gso_skb, skb);
155 
156 		/* it's still part of the queue */
157 		if (qdisc_is_percpu_stats(q)) {
158 			qdisc_qstats_cpu_requeues_inc(q);
159 			qdisc_qstats_cpu_backlog_inc(q, skb);
160 			qdisc_qstats_cpu_qlen_inc(q);
161 		} else {
162 			q->qstats.requeues++;
163 			qdisc_qstats_backlog_inc(q, skb);
164 			q->q.qlen++;
165 		}
166 
167 		skb = next;
168 	}
169 
170 	if (lock) {
171 		spin_unlock(lock);
172 		set_bit(__QDISC_STATE_MISSED, &q->state);
173 	} else {
174 		__netif_schedule(q);
175 	}
176 }
177 
178 static void try_bulk_dequeue_skb(struct Qdisc *q,
179 				 struct sk_buff *skb,
180 				 const struct netdev_queue *txq,
181 				 int *packets)
182 {
183 	int bytelimit = qdisc_avail_bulklimit(txq) - skb->len;
184 
185 	while (bytelimit > 0) {
186 		struct sk_buff *nskb = q->dequeue(q);
187 
188 		if (!nskb)
189 			break;
190 
191 		bytelimit -= nskb->len; /* covers GSO len */
192 		skb->next = nskb;
193 		skb = nskb;
194 		(*packets)++; /* GSO counts as one pkt */
195 	}
196 	skb_mark_not_on_list(skb);
197 }
198 
199 /* This variant of try_bulk_dequeue_skb() makes sure
200  * all skbs in the chain are for the same txq
201  */
202 static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
203 				      struct sk_buff *skb,
204 				      int *packets)
205 {
206 	int mapping = skb_get_queue_mapping(skb);
207 	struct sk_buff *nskb;
208 	int cnt = 0;
209 
210 	do {
211 		nskb = q->dequeue(q);
212 		if (!nskb)
213 			break;
214 		if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
215 			qdisc_enqueue_skb_bad_txq(q, nskb);
216 			break;
217 		}
218 		skb->next = nskb;
219 		skb = nskb;
220 	} while (++cnt < 8);
221 	(*packets) += cnt;
222 	skb_mark_not_on_list(skb);
223 }
224 
225 /* Note that dequeue_skb can possibly return a SKB list (via skb->next).
226  * A requeued skb (via q->gso_skb) can also be a SKB list.
227  */
228 static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
229 				   int *packets)
230 {
231 	const struct netdev_queue *txq = q->dev_queue;
232 	struct sk_buff *skb = NULL;
233 
234 	*packets = 1;
235 	if (unlikely(!skb_queue_empty(&q->gso_skb))) {
236 		spinlock_t *lock = NULL;
237 
238 		if (q->flags & TCQ_F_NOLOCK) {
239 			lock = qdisc_lock(q);
240 			spin_lock(lock);
241 		}
242 
243 		skb = skb_peek(&q->gso_skb);
244 
245 		/* skb may be null if another cpu pulls gso_skb off in between
246 		 * empty check and lock.
247 		 */
248 		if (!skb) {
249 			if (lock)
250 				spin_unlock(lock);
251 			goto validate;
252 		}
253 
254 		/* skb in gso_skb were already validated */
255 		*validate = false;
256 		if (xfrm_offload(skb))
257 			*validate = true;
258 		/* check the reason of requeuing without tx lock first */
259 		txq = skb_get_tx_queue(txq->dev, skb);
260 		if (!netif_xmit_frozen_or_stopped(txq)) {
261 			skb = __skb_dequeue(&q->gso_skb);
262 			if (qdisc_is_percpu_stats(q)) {
263 				qdisc_qstats_cpu_backlog_dec(q, skb);
264 				qdisc_qstats_cpu_qlen_dec(q);
265 			} else {
266 				qdisc_qstats_backlog_dec(q, skb);
267 				q->q.qlen--;
268 			}
269 		} else {
270 			skb = NULL;
271 			qdisc_maybe_clear_missed(q, txq);
272 		}
273 		if (lock)
274 			spin_unlock(lock);
275 		goto trace;
276 	}
277 validate:
278 	*validate = true;
279 
280 	if ((q->flags & TCQ_F_ONETXQUEUE) &&
281 	    netif_xmit_frozen_or_stopped(txq)) {
282 		qdisc_maybe_clear_missed(q, txq);
283 		return skb;
284 	}
285 
286 	skb = qdisc_dequeue_skb_bad_txq(q);
287 	if (unlikely(skb)) {
288 		if (skb == SKB_XOFF_MAGIC)
289 			return NULL;
290 		goto bulk;
291 	}
292 	skb = q->dequeue(q);
293 	if (skb) {
294 bulk:
295 		if (qdisc_may_bulk(q))
296 			try_bulk_dequeue_skb(q, skb, txq, packets);
297 		else
298 			try_bulk_dequeue_skb_slow(q, skb, packets);
299 	}
300 trace:
301 	trace_qdisc_dequeue(q, txq, *packets, skb);
302 	return skb;
303 }
304 
305 /*
306  * Transmit possibly several skbs, and handle the return status as
307  * required. Owning qdisc running bit guarantees that only one CPU
308  * can execute this function.
309  *
310  * Returns to the caller:
311  *				false  - hardware queue frozen backoff
312  *				true   - feel free to send more pkts
313  */
314 bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
315 		     struct net_device *dev, struct netdev_queue *txq,
316 		     spinlock_t *root_lock, bool validate)
317 {
318 	int ret = NETDEV_TX_BUSY;
319 	bool again = false;
320 
321 	/* And release qdisc */
322 	if (root_lock)
323 		spin_unlock(root_lock);
324 
325 	/* Note that we validate skb (GSO, checksum, ...) outside of locks */
326 	if (validate)
327 		skb = validate_xmit_skb_list(skb, dev, &again);
328 
329 #ifdef CONFIG_XFRM_OFFLOAD
330 	if (unlikely(again)) {
331 		if (root_lock)
332 			spin_lock(root_lock);
333 
334 		dev_requeue_skb(skb, q);
335 		return false;
336 	}
337 #endif
338 
339 	if (likely(skb)) {
340 		HARD_TX_LOCK(dev, txq, smp_processor_id());
341 		if (!netif_xmit_frozen_or_stopped(txq))
342 			skb = dev_hard_start_xmit(skb, dev, txq, &ret);
343 		else
344 			qdisc_maybe_clear_missed(q, txq);
345 
346 		HARD_TX_UNLOCK(dev, txq);
347 	} else {
348 		if (root_lock)
349 			spin_lock(root_lock);
350 		return true;
351 	}
352 
353 	if (root_lock)
354 		spin_lock(root_lock);
355 
356 	if (!dev_xmit_complete(ret)) {
357 		/* Driver returned NETDEV_TX_BUSY - requeue skb */
358 		if (unlikely(ret != NETDEV_TX_BUSY))
359 			net_warn_ratelimited("BUG %s code %d qlen %d\n",
360 					     dev->name, ret, q->q.qlen);
361 
362 		dev_requeue_skb(skb, q);
363 		return false;
364 	}
365 
366 	return true;
367 }
368 
369 /*
370  * NOTE: Called under qdisc_lock(q) with locally disabled BH.
371  *
372  * running seqcount guarantees only one CPU can process
373  * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
374  * this queue.
375  *
376  *  netif_tx_lock serializes accesses to device driver.
377  *
378  *  qdisc_lock(q) and netif_tx_lock are mutually exclusive,
379  *  if one is grabbed, another must be free.
380  *
381  * Note, that this procedure can be called by a watchdog timer
382  *
383  * Returns to the caller:
384  *				0  - queue is empty or throttled.
385  *				>0 - queue is not empty.
386  *
387  */
388 static inline bool qdisc_restart(struct Qdisc *q, int *packets)
389 {
390 	spinlock_t *root_lock = NULL;
391 	struct netdev_queue *txq;
392 	struct net_device *dev;
393 	struct sk_buff *skb;
394 	bool validate;
395 
396 	/* Dequeue packet */
397 	skb = dequeue_skb(q, &validate, packets);
398 	if (unlikely(!skb))
399 		return false;
400 
401 	if (!(q->flags & TCQ_F_NOLOCK))
402 		root_lock = qdisc_lock(q);
403 
404 	dev = qdisc_dev(q);
405 	txq = skb_get_tx_queue(dev, skb);
406 
407 	return sch_direct_xmit(skb, q, dev, txq, root_lock, validate);
408 }
409 
410 void __qdisc_run(struct Qdisc *q)
411 {
412 	int quota = READ_ONCE(dev_tx_weight);
413 	int packets;
414 
415 	while (qdisc_restart(q, &packets)) {
416 		quota -= packets;
417 		if (quota <= 0) {
418 			if (q->flags & TCQ_F_NOLOCK)
419 				set_bit(__QDISC_STATE_MISSED, &q->state);
420 			else
421 				__netif_schedule(q);
422 
423 			break;
424 		}
425 	}
426 }
427 
428 unsigned long dev_trans_start(struct net_device *dev)
429 {
430 	unsigned long res = READ_ONCE(netdev_get_tx_queue(dev, 0)->trans_start);
431 	unsigned long val;
432 	unsigned int i;
433 
434 	for (i = 1; i < dev->num_tx_queues; i++) {
435 		val = READ_ONCE(netdev_get_tx_queue(dev, i)->trans_start);
436 		if (val && time_after(val, res))
437 			res = val;
438 	}
439 
440 	return res;
441 }
442 EXPORT_SYMBOL(dev_trans_start);
443 
444 static void netif_freeze_queues(struct net_device *dev)
445 {
446 	unsigned int i;
447 	int cpu;
448 
449 	cpu = smp_processor_id();
450 	for (i = 0; i < dev->num_tx_queues; i++) {
451 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
452 
453 		/* We are the only thread of execution doing a
454 		 * freeze, but we have to grab the _xmit_lock in
455 		 * order to synchronize with threads which are in
456 		 * the ->hard_start_xmit() handler and already
457 		 * checked the frozen bit.
458 		 */
459 		__netif_tx_lock(txq, cpu);
460 		set_bit(__QUEUE_STATE_FROZEN, &txq->state);
461 		__netif_tx_unlock(txq);
462 	}
463 }
464 
465 void netif_tx_lock(struct net_device *dev)
466 {
467 	spin_lock(&dev->tx_global_lock);
468 	netif_freeze_queues(dev);
469 }
470 EXPORT_SYMBOL(netif_tx_lock);
471 
472 static void netif_unfreeze_queues(struct net_device *dev)
473 {
474 	unsigned int i;
475 
476 	for (i = 0; i < dev->num_tx_queues; i++) {
477 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
478 
479 		/* No need to grab the _xmit_lock here.  If the
480 		 * queue is not stopped for another reason, we
481 		 * force a schedule.
482 		 */
483 		clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
484 		netif_schedule_queue(txq);
485 	}
486 }
487 
488 void netif_tx_unlock(struct net_device *dev)
489 {
490 	netif_unfreeze_queues(dev);
491 	spin_unlock(&dev->tx_global_lock);
492 }
493 EXPORT_SYMBOL(netif_tx_unlock);
494 
495 static void dev_watchdog(struct timer_list *t)
496 {
497 	struct net_device *dev = from_timer(dev, t, watchdog_timer);
498 	bool release = true;
499 
500 	spin_lock(&dev->tx_global_lock);
501 	if (!qdisc_tx_is_noop(dev)) {
502 		if (netif_device_present(dev) &&
503 		    netif_running(dev) &&
504 		    netif_carrier_ok(dev)) {
505 			unsigned int timedout_ms = 0;
506 			unsigned int i;
507 			unsigned long trans_start;
508 
509 			for (i = 0; i < dev->num_tx_queues; i++) {
510 				struct netdev_queue *txq;
511 
512 				txq = netdev_get_tx_queue(dev, i);
513 				trans_start = READ_ONCE(txq->trans_start);
514 				if (netif_xmit_stopped(txq) &&
515 				    time_after(jiffies, (trans_start +
516 							 dev->watchdog_timeo))) {
517 					timedout_ms = jiffies_to_msecs(jiffies - trans_start);
518 					atomic_long_inc(&txq->trans_timeout);
519 					break;
520 				}
521 			}
522 
523 			if (unlikely(timedout_ms)) {
524 				trace_net_dev_xmit_timeout(dev, i);
525 				netdev_crit(dev, "NETDEV WATCHDOG: CPU: %d: transmit queue %u timed out %u ms\n",
526 					    raw_smp_processor_id(),
527 					    i, timedout_ms);
528 				netif_freeze_queues(dev);
529 				dev->netdev_ops->ndo_tx_timeout(dev, i);
530 				netif_unfreeze_queues(dev);
531 			}
532 			if (!mod_timer(&dev->watchdog_timer,
533 				       round_jiffies(jiffies +
534 						     dev->watchdog_timeo)))
535 				release = false;
536 		}
537 	}
538 	spin_unlock(&dev->tx_global_lock);
539 
540 	if (release)
541 		netdev_put(dev, &dev->watchdog_dev_tracker);
542 }
543 
544 void __netdev_watchdog_up(struct net_device *dev)
545 {
546 	if (dev->netdev_ops->ndo_tx_timeout) {
547 		if (dev->watchdog_timeo <= 0)
548 			dev->watchdog_timeo = 5*HZ;
549 		if (!mod_timer(&dev->watchdog_timer,
550 			       round_jiffies(jiffies + dev->watchdog_timeo)))
551 			netdev_hold(dev, &dev->watchdog_dev_tracker,
552 				    GFP_ATOMIC);
553 	}
554 }
555 EXPORT_SYMBOL_GPL(__netdev_watchdog_up);
556 
557 static void dev_watchdog_up(struct net_device *dev)
558 {
559 	__netdev_watchdog_up(dev);
560 }
561 
562 static void dev_watchdog_down(struct net_device *dev)
563 {
564 	netif_tx_lock_bh(dev);
565 	if (del_timer(&dev->watchdog_timer))
566 		netdev_put(dev, &dev->watchdog_dev_tracker);
567 	netif_tx_unlock_bh(dev);
568 }
569 
570 /**
571  *	netif_carrier_on - set carrier
572  *	@dev: network device
573  *
574  * Device has detected acquisition of carrier.
575  */
576 void netif_carrier_on(struct net_device *dev)
577 {
578 	if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
579 		if (dev->reg_state == NETREG_UNINITIALIZED)
580 			return;
581 		atomic_inc(&dev->carrier_up_count);
582 		linkwatch_fire_event(dev);
583 		if (netif_running(dev))
584 			__netdev_watchdog_up(dev);
585 	}
586 }
587 EXPORT_SYMBOL(netif_carrier_on);
588 
589 /**
590  *	netif_carrier_off - clear carrier
591  *	@dev: network device
592  *
593  * Device has detected loss of carrier.
594  */
595 void netif_carrier_off(struct net_device *dev)
596 {
597 	if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
598 		if (dev->reg_state == NETREG_UNINITIALIZED)
599 			return;
600 		atomic_inc(&dev->carrier_down_count);
601 		linkwatch_fire_event(dev);
602 	}
603 }
604 EXPORT_SYMBOL(netif_carrier_off);
605 
606 /**
607  *	netif_carrier_event - report carrier state event
608  *	@dev: network device
609  *
610  * Device has detected a carrier event but the carrier state wasn't changed.
611  * Use in drivers when querying carrier state asynchronously, to avoid missing
612  * events (link flaps) if link recovers before it's queried.
613  */
614 void netif_carrier_event(struct net_device *dev)
615 {
616 	if (dev->reg_state == NETREG_UNINITIALIZED)
617 		return;
618 	atomic_inc(&dev->carrier_up_count);
619 	atomic_inc(&dev->carrier_down_count);
620 	linkwatch_fire_event(dev);
621 }
622 EXPORT_SYMBOL_GPL(netif_carrier_event);
623 
624 /* "NOOP" scheduler: the best scheduler, recommended for all interfaces
625    under all circumstances. It is difficult to invent anything faster or
626    cheaper.
627  */
628 
629 static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
630 			struct sk_buff **to_free)
631 {
632 	__qdisc_drop(skb, to_free);
633 	return NET_XMIT_CN;
634 }
635 
636 static struct sk_buff *noop_dequeue(struct Qdisc *qdisc)
637 {
638 	return NULL;
639 }
640 
641 struct Qdisc_ops noop_qdisc_ops __read_mostly = {
642 	.id		=	"noop",
643 	.priv_size	=	0,
644 	.enqueue	=	noop_enqueue,
645 	.dequeue	=	noop_dequeue,
646 	.peek		=	noop_dequeue,
647 	.owner		=	THIS_MODULE,
648 };
649 
650 static struct netdev_queue noop_netdev_queue = {
651 	RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc),
652 	RCU_POINTER_INITIALIZER(qdisc_sleeping, &noop_qdisc),
653 };
654 
655 struct Qdisc noop_qdisc = {
656 	.enqueue	=	noop_enqueue,
657 	.dequeue	=	noop_dequeue,
658 	.flags		=	TCQ_F_BUILTIN,
659 	.ops		=	&noop_qdisc_ops,
660 	.q.lock		=	__SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
661 	.dev_queue	=	&noop_netdev_queue,
662 	.busylock	=	__SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
663 	.gso_skb = {
664 		.next = (struct sk_buff *)&noop_qdisc.gso_skb,
665 		.prev = (struct sk_buff *)&noop_qdisc.gso_skb,
666 		.qlen = 0,
667 		.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.gso_skb.lock),
668 	},
669 	.skb_bad_txq = {
670 		.next = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
671 		.prev = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
672 		.qlen = 0,
673 		.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock),
674 	},
675 };
676 EXPORT_SYMBOL(noop_qdisc);
677 
678 static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt,
679 			struct netlink_ext_ack *extack)
680 {
681 	/* register_qdisc() assigns a default of noop_enqueue if unset,
682 	 * but __dev_queue_xmit() treats noqueue only as such
683 	 * if this is NULL - so clear it here. */
684 	qdisc->enqueue = NULL;
685 	return 0;
686 }
687 
688 struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
689 	.id		=	"noqueue",
690 	.priv_size	=	0,
691 	.init		=	noqueue_init,
692 	.enqueue	=	noop_enqueue,
693 	.dequeue	=	noop_dequeue,
694 	.peek		=	noop_dequeue,
695 	.owner		=	THIS_MODULE,
696 };
697 
698 const u8 sch_default_prio2band[TC_PRIO_MAX + 1] = {
699 	1, 2, 2, 2, 1, 2, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1
700 };
701 EXPORT_SYMBOL(sch_default_prio2band);
702 
703 /* 3-band FIFO queue: old style, but should be a bit faster than
704    generic prio+fifo combination.
705  */
706 
707 #define PFIFO_FAST_BANDS 3
708 
709 /*
710  * Private data for a pfifo_fast scheduler containing:
711  *	- rings for priority bands
712  */
713 struct pfifo_fast_priv {
714 	struct skb_array q[PFIFO_FAST_BANDS];
715 };
716 
717 static inline struct skb_array *band2list(struct pfifo_fast_priv *priv,
718 					  int band)
719 {
720 	return &priv->q[band];
721 }
722 
723 static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
724 			      struct sk_buff **to_free)
725 {
726 	int band = sch_default_prio2band[skb->priority & TC_PRIO_MAX];
727 	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
728 	struct skb_array *q = band2list(priv, band);
729 	unsigned int pkt_len = qdisc_pkt_len(skb);
730 	int err;
731 
732 	err = skb_array_produce(q, skb);
733 
734 	if (unlikely(err)) {
735 		if (qdisc_is_percpu_stats(qdisc))
736 			return qdisc_drop_cpu(skb, qdisc, to_free);
737 		else
738 			return qdisc_drop(skb, qdisc, to_free);
739 	}
740 
741 	qdisc_update_stats_at_enqueue(qdisc, pkt_len);
742 	return NET_XMIT_SUCCESS;
743 }
744 
745 static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
746 {
747 	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
748 	struct sk_buff *skb = NULL;
749 	bool need_retry = true;
750 	int band;
751 
752 retry:
753 	for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
754 		struct skb_array *q = band2list(priv, band);
755 
756 		if (__skb_array_empty(q))
757 			continue;
758 
759 		skb = __skb_array_consume(q);
760 	}
761 	if (likely(skb)) {
762 		qdisc_update_stats_at_dequeue(qdisc, skb);
763 	} else if (need_retry &&
764 		   READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY) {
765 		/* Delay clearing the STATE_MISSED here to reduce
766 		 * the overhead of the second spin_trylock() in
767 		 * qdisc_run_begin() and __netif_schedule() calling
768 		 * in qdisc_run_end().
769 		 */
770 		clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
771 		clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
772 
773 		/* Make sure dequeuing happens after clearing
774 		 * STATE_MISSED.
775 		 */
776 		smp_mb__after_atomic();
777 
778 		need_retry = false;
779 
780 		goto retry;
781 	}
782 
783 	return skb;
784 }
785 
786 static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
787 {
788 	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
789 	struct sk_buff *skb = NULL;
790 	int band;
791 
792 	for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
793 		struct skb_array *q = band2list(priv, band);
794 
795 		skb = __skb_array_peek(q);
796 	}
797 
798 	return skb;
799 }
800 
801 static void pfifo_fast_reset(struct Qdisc *qdisc)
802 {
803 	int i, band;
804 	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
805 
806 	for (band = 0; band < PFIFO_FAST_BANDS; band++) {
807 		struct skb_array *q = band2list(priv, band);
808 		struct sk_buff *skb;
809 
810 		/* NULL ring is possible if destroy path is due to a failed
811 		 * skb_array_init() in pfifo_fast_init() case.
812 		 */
813 		if (!q->ring.queue)
814 			continue;
815 
816 		while ((skb = __skb_array_consume(q)) != NULL)
817 			kfree_skb(skb);
818 	}
819 
820 	if (qdisc_is_percpu_stats(qdisc)) {
821 		for_each_possible_cpu(i) {
822 			struct gnet_stats_queue *q;
823 
824 			q = per_cpu_ptr(qdisc->cpu_qstats, i);
825 			q->backlog = 0;
826 			q->qlen = 0;
827 		}
828 	}
829 }
830 
831 static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
832 {
833 	struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
834 
835 	memcpy(&opt.priomap, sch_default_prio2band, TC_PRIO_MAX + 1);
836 	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
837 		goto nla_put_failure;
838 	return skb->len;
839 
840 nla_put_failure:
841 	return -1;
842 }
843 
844 static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt,
845 			   struct netlink_ext_ack *extack)
846 {
847 	unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len;
848 	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
849 	int prio;
850 
851 	/* guard against zero length rings */
852 	if (!qlen)
853 		return -EINVAL;
854 
855 	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
856 		struct skb_array *q = band2list(priv, prio);
857 		int err;
858 
859 		err = skb_array_init(q, qlen, GFP_KERNEL);
860 		if (err)
861 			return -ENOMEM;
862 	}
863 
864 	/* Can by-pass the queue discipline */
865 	qdisc->flags |= TCQ_F_CAN_BYPASS;
866 	return 0;
867 }
868 
869 static void pfifo_fast_destroy(struct Qdisc *sch)
870 {
871 	struct pfifo_fast_priv *priv = qdisc_priv(sch);
872 	int prio;
873 
874 	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
875 		struct skb_array *q = band2list(priv, prio);
876 
877 		/* NULL ring is possible if destroy path is due to a failed
878 		 * skb_array_init() in pfifo_fast_init() case.
879 		 */
880 		if (!q->ring.queue)
881 			continue;
882 		/* Destroy ring but no need to kfree_skb because a call to
883 		 * pfifo_fast_reset() has already done that work.
884 		 */
885 		ptr_ring_cleanup(&q->ring, NULL);
886 	}
887 }
888 
889 static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch,
890 					  unsigned int new_len)
891 {
892 	struct pfifo_fast_priv *priv = qdisc_priv(sch);
893 	struct skb_array *bands[PFIFO_FAST_BANDS];
894 	int prio;
895 
896 	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
897 		struct skb_array *q = band2list(priv, prio);
898 
899 		bands[prio] = q;
900 	}
901 
902 	return skb_array_resize_multiple(bands, PFIFO_FAST_BANDS, new_len,
903 					 GFP_KERNEL);
904 }
905 
906 struct Qdisc_ops pfifo_fast_ops __read_mostly = {
907 	.id		=	"pfifo_fast",
908 	.priv_size	=	sizeof(struct pfifo_fast_priv),
909 	.enqueue	=	pfifo_fast_enqueue,
910 	.dequeue	=	pfifo_fast_dequeue,
911 	.peek		=	pfifo_fast_peek,
912 	.init		=	pfifo_fast_init,
913 	.destroy	=	pfifo_fast_destroy,
914 	.reset		=	pfifo_fast_reset,
915 	.dump		=	pfifo_fast_dump,
916 	.change_tx_queue_len =  pfifo_fast_change_tx_queue_len,
917 	.owner		=	THIS_MODULE,
918 	.static_flags	=	TCQ_F_NOLOCK | TCQ_F_CPUSTATS,
919 };
920 EXPORT_SYMBOL(pfifo_fast_ops);
921 
922 static struct lock_class_key qdisc_tx_busylock;
923 
924 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
925 			  const struct Qdisc_ops *ops,
926 			  struct netlink_ext_ack *extack)
927 {
928 	struct Qdisc *sch;
929 	unsigned int size = sizeof(*sch) + ops->priv_size;
930 	int err = -ENOBUFS;
931 	struct net_device *dev;
932 
933 	if (!dev_queue) {
934 		NL_SET_ERR_MSG(extack, "No device queue given");
935 		err = -EINVAL;
936 		goto errout;
937 	}
938 
939 	dev = dev_queue->dev;
940 	sch = kzalloc_node(size, GFP_KERNEL, netdev_queue_numa_node_read(dev_queue));
941 
942 	if (!sch)
943 		goto errout;
944 	__skb_queue_head_init(&sch->gso_skb);
945 	__skb_queue_head_init(&sch->skb_bad_txq);
946 	gnet_stats_basic_sync_init(&sch->bstats);
947 	spin_lock_init(&sch->q.lock);
948 
949 	if (ops->static_flags & TCQ_F_CPUSTATS) {
950 		sch->cpu_bstats =
951 			netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
952 		if (!sch->cpu_bstats)
953 			goto errout1;
954 
955 		sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
956 		if (!sch->cpu_qstats) {
957 			free_percpu(sch->cpu_bstats);
958 			goto errout1;
959 		}
960 	}
961 
962 	spin_lock_init(&sch->busylock);
963 	lockdep_set_class(&sch->busylock,
964 			  dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
965 
966 	/* seqlock has the same scope of busylock, for NOLOCK qdisc */
967 	spin_lock_init(&sch->seqlock);
968 	lockdep_set_class(&sch->seqlock,
969 			  dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
970 
971 	sch->ops = ops;
972 	sch->flags = ops->static_flags;
973 	sch->enqueue = ops->enqueue;
974 	sch->dequeue = ops->dequeue;
975 	sch->dev_queue = dev_queue;
976 	netdev_hold(dev, &sch->dev_tracker, GFP_KERNEL);
977 	refcount_set(&sch->refcnt, 1);
978 
979 	return sch;
980 errout1:
981 	kfree(sch);
982 errout:
983 	return ERR_PTR(err);
984 }
985 
986 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
987 				const struct Qdisc_ops *ops,
988 				unsigned int parentid,
989 				struct netlink_ext_ack *extack)
990 {
991 	struct Qdisc *sch;
992 
993 	if (!try_module_get(ops->owner)) {
994 		NL_SET_ERR_MSG(extack, "Failed to increase module reference counter");
995 		return NULL;
996 	}
997 
998 	sch = qdisc_alloc(dev_queue, ops, extack);
999 	if (IS_ERR(sch)) {
1000 		module_put(ops->owner);
1001 		return NULL;
1002 	}
1003 	sch->parent = parentid;
1004 
1005 	if (!ops->init || ops->init(sch, NULL, extack) == 0) {
1006 		trace_qdisc_create(ops, dev_queue->dev, parentid);
1007 		return sch;
1008 	}
1009 
1010 	qdisc_put(sch);
1011 	return NULL;
1012 }
1013 EXPORT_SYMBOL(qdisc_create_dflt);
1014 
1015 /* Under qdisc_lock(qdisc) and BH! */
1016 
1017 void qdisc_reset(struct Qdisc *qdisc)
1018 {
1019 	const struct Qdisc_ops *ops = qdisc->ops;
1020 
1021 	trace_qdisc_reset(qdisc);
1022 
1023 	if (ops->reset)
1024 		ops->reset(qdisc);
1025 
1026 	__skb_queue_purge(&qdisc->gso_skb);
1027 	__skb_queue_purge(&qdisc->skb_bad_txq);
1028 
1029 	qdisc->q.qlen = 0;
1030 	qdisc->qstats.backlog = 0;
1031 }
1032 EXPORT_SYMBOL(qdisc_reset);
1033 
1034 void qdisc_free(struct Qdisc *qdisc)
1035 {
1036 	if (qdisc_is_percpu_stats(qdisc)) {
1037 		free_percpu(qdisc->cpu_bstats);
1038 		free_percpu(qdisc->cpu_qstats);
1039 	}
1040 
1041 	kfree(qdisc);
1042 }
1043 
1044 static void qdisc_free_cb(struct rcu_head *head)
1045 {
1046 	struct Qdisc *q = container_of(head, struct Qdisc, rcu);
1047 
1048 	qdisc_free(q);
1049 }
1050 
1051 static void __qdisc_destroy(struct Qdisc *qdisc)
1052 {
1053 	const struct Qdisc_ops  *ops = qdisc->ops;
1054 	struct net_device *dev = qdisc_dev(qdisc);
1055 
1056 #ifdef CONFIG_NET_SCHED
1057 	qdisc_hash_del(qdisc);
1058 
1059 	qdisc_put_stab(rtnl_dereference(qdisc->stab));
1060 #endif
1061 	gen_kill_estimator(&qdisc->rate_est);
1062 
1063 	qdisc_reset(qdisc);
1064 
1065 
1066 	if (ops->destroy)
1067 		ops->destroy(qdisc);
1068 
1069 	module_put(ops->owner);
1070 	netdev_put(dev, &qdisc->dev_tracker);
1071 
1072 	trace_qdisc_destroy(qdisc);
1073 
1074 	call_rcu(&qdisc->rcu, qdisc_free_cb);
1075 }
1076 
1077 void qdisc_destroy(struct Qdisc *qdisc)
1078 {
1079 	if (qdisc->flags & TCQ_F_BUILTIN)
1080 		return;
1081 
1082 	__qdisc_destroy(qdisc);
1083 }
1084 
1085 void qdisc_put(struct Qdisc *qdisc)
1086 {
1087 	if (!qdisc)
1088 		return;
1089 
1090 	if (qdisc->flags & TCQ_F_BUILTIN ||
1091 	    !refcount_dec_and_test(&qdisc->refcnt))
1092 		return;
1093 
1094 	__qdisc_destroy(qdisc);
1095 }
1096 EXPORT_SYMBOL(qdisc_put);
1097 
1098 /* Version of qdisc_put() that is called with rtnl mutex unlocked.
1099  * Intended to be used as optimization, this function only takes rtnl lock if
1100  * qdisc reference counter reached zero.
1101  */
1102 
1103 void qdisc_put_unlocked(struct Qdisc *qdisc)
1104 {
1105 	if (qdisc->flags & TCQ_F_BUILTIN ||
1106 	    !refcount_dec_and_rtnl_lock(&qdisc->refcnt))
1107 		return;
1108 
1109 	__qdisc_destroy(qdisc);
1110 	rtnl_unlock();
1111 }
1112 EXPORT_SYMBOL(qdisc_put_unlocked);
1113 
1114 /* Attach toplevel qdisc to device queue. */
1115 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
1116 			      struct Qdisc *qdisc)
1117 {
1118 	struct Qdisc *oqdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
1119 	spinlock_t *root_lock;
1120 
1121 	root_lock = qdisc_lock(oqdisc);
1122 	spin_lock_bh(root_lock);
1123 
1124 	/* ... and graft new one */
1125 	if (qdisc == NULL)
1126 		qdisc = &noop_qdisc;
1127 	rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
1128 	rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
1129 
1130 	spin_unlock_bh(root_lock);
1131 
1132 	return oqdisc;
1133 }
1134 EXPORT_SYMBOL(dev_graft_qdisc);
1135 
1136 static void shutdown_scheduler_queue(struct net_device *dev,
1137 				     struct netdev_queue *dev_queue,
1138 				     void *_qdisc_default)
1139 {
1140 	struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
1141 	struct Qdisc *qdisc_default = _qdisc_default;
1142 
1143 	if (qdisc) {
1144 		rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
1145 		rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc_default);
1146 
1147 		qdisc_put(qdisc);
1148 	}
1149 }
1150 
1151 static void attach_one_default_qdisc(struct net_device *dev,
1152 				     struct netdev_queue *dev_queue,
1153 				     void *_unused)
1154 {
1155 	struct Qdisc *qdisc;
1156 	const struct Qdisc_ops *ops = default_qdisc_ops;
1157 
1158 	if (dev->priv_flags & IFF_NO_QUEUE)
1159 		ops = &noqueue_qdisc_ops;
1160 	else if(dev->type == ARPHRD_CAN)
1161 		ops = &pfifo_fast_ops;
1162 
1163 	qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL);
1164 	if (!qdisc)
1165 		return;
1166 
1167 	if (!netif_is_multiqueue(dev))
1168 		qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1169 	rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
1170 }
1171 
1172 static void attach_default_qdiscs(struct net_device *dev)
1173 {
1174 	struct netdev_queue *txq;
1175 	struct Qdisc *qdisc;
1176 
1177 	txq = netdev_get_tx_queue(dev, 0);
1178 
1179 	if (!netif_is_multiqueue(dev) ||
1180 	    dev->priv_flags & IFF_NO_QUEUE) {
1181 		netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
1182 		qdisc = rtnl_dereference(txq->qdisc_sleeping);
1183 		rcu_assign_pointer(dev->qdisc, qdisc);
1184 		qdisc_refcount_inc(qdisc);
1185 	} else {
1186 		qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL);
1187 		if (qdisc) {
1188 			rcu_assign_pointer(dev->qdisc, qdisc);
1189 			qdisc->ops->attach(qdisc);
1190 		}
1191 	}
1192 	qdisc = rtnl_dereference(dev->qdisc);
1193 
1194 	/* Detect default qdisc setup/init failed and fallback to "noqueue" */
1195 	if (qdisc == &noop_qdisc) {
1196 		netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n",
1197 			    default_qdisc_ops->id, noqueue_qdisc_ops.id);
1198 		netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
1199 		dev->priv_flags |= IFF_NO_QUEUE;
1200 		netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
1201 		qdisc = rtnl_dereference(txq->qdisc_sleeping);
1202 		rcu_assign_pointer(dev->qdisc, qdisc);
1203 		qdisc_refcount_inc(qdisc);
1204 		dev->priv_flags ^= IFF_NO_QUEUE;
1205 	}
1206 
1207 #ifdef CONFIG_NET_SCHED
1208 	if (qdisc != &noop_qdisc)
1209 		qdisc_hash_add(qdisc, false);
1210 #endif
1211 }
1212 
1213 static void transition_one_qdisc(struct net_device *dev,
1214 				 struct netdev_queue *dev_queue,
1215 				 void *_need_watchdog)
1216 {
1217 	struct Qdisc *new_qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
1218 	int *need_watchdog_p = _need_watchdog;
1219 
1220 	if (!(new_qdisc->flags & TCQ_F_BUILTIN))
1221 		clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
1222 
1223 	rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
1224 	if (need_watchdog_p) {
1225 		WRITE_ONCE(dev_queue->trans_start, 0);
1226 		*need_watchdog_p = 1;
1227 	}
1228 }
1229 
1230 void dev_activate(struct net_device *dev)
1231 {
1232 	int need_watchdog;
1233 
1234 	/* No queueing discipline is attached to device;
1235 	 * create default one for devices, which need queueing
1236 	 * and noqueue_qdisc for virtual interfaces
1237 	 */
1238 
1239 	if (rtnl_dereference(dev->qdisc) == &noop_qdisc)
1240 		attach_default_qdiscs(dev);
1241 
1242 	if (!netif_carrier_ok(dev))
1243 		/* Delay activation until next carrier-on event */
1244 		return;
1245 
1246 	need_watchdog = 0;
1247 	netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
1248 	if (dev_ingress_queue(dev))
1249 		transition_one_qdisc(dev, dev_ingress_queue(dev), NULL);
1250 
1251 	if (need_watchdog) {
1252 		netif_trans_update(dev);
1253 		dev_watchdog_up(dev);
1254 	}
1255 }
1256 EXPORT_SYMBOL(dev_activate);
1257 
1258 static void qdisc_deactivate(struct Qdisc *qdisc)
1259 {
1260 	if (qdisc->flags & TCQ_F_BUILTIN)
1261 		return;
1262 
1263 	set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
1264 }
1265 
1266 static void dev_deactivate_queue(struct net_device *dev,
1267 				 struct netdev_queue *dev_queue,
1268 				 void *_qdisc_default)
1269 {
1270 	struct Qdisc *qdisc_default = _qdisc_default;
1271 	struct Qdisc *qdisc;
1272 
1273 	qdisc = rtnl_dereference(dev_queue->qdisc);
1274 	if (qdisc) {
1275 		qdisc_deactivate(qdisc);
1276 		rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
1277 	}
1278 }
1279 
1280 static void dev_reset_queue(struct net_device *dev,
1281 			    struct netdev_queue *dev_queue,
1282 			    void *_unused)
1283 {
1284 	struct Qdisc *qdisc;
1285 	bool nolock;
1286 
1287 	qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
1288 	if (!qdisc)
1289 		return;
1290 
1291 	nolock = qdisc->flags & TCQ_F_NOLOCK;
1292 
1293 	if (nolock)
1294 		spin_lock_bh(&qdisc->seqlock);
1295 	spin_lock_bh(qdisc_lock(qdisc));
1296 
1297 	qdisc_reset(qdisc);
1298 
1299 	spin_unlock_bh(qdisc_lock(qdisc));
1300 	if (nolock) {
1301 		clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
1302 		clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
1303 		spin_unlock_bh(&qdisc->seqlock);
1304 	}
1305 }
1306 
1307 static bool some_qdisc_is_busy(struct net_device *dev)
1308 {
1309 	unsigned int i;
1310 
1311 	for (i = 0; i < dev->num_tx_queues; i++) {
1312 		struct netdev_queue *dev_queue;
1313 		spinlock_t *root_lock;
1314 		struct Qdisc *q;
1315 		int val;
1316 
1317 		dev_queue = netdev_get_tx_queue(dev, i);
1318 		q = rtnl_dereference(dev_queue->qdisc_sleeping);
1319 
1320 		root_lock = qdisc_lock(q);
1321 		spin_lock_bh(root_lock);
1322 
1323 		val = (qdisc_is_running(q) ||
1324 		       test_bit(__QDISC_STATE_SCHED, &q->state));
1325 
1326 		spin_unlock_bh(root_lock);
1327 
1328 		if (val)
1329 			return true;
1330 	}
1331 	return false;
1332 }
1333 
1334 /**
1335  * 	dev_deactivate_many - deactivate transmissions on several devices
1336  * 	@head: list of devices to deactivate
1337  *
1338  *	This function returns only when all outstanding transmissions
1339  *	have completed, unless all devices are in dismantle phase.
1340  */
1341 void dev_deactivate_many(struct list_head *head)
1342 {
1343 	struct net_device *dev;
1344 
1345 	list_for_each_entry(dev, head, close_list) {
1346 		netdev_for_each_tx_queue(dev, dev_deactivate_queue,
1347 					 &noop_qdisc);
1348 		if (dev_ingress_queue(dev))
1349 			dev_deactivate_queue(dev, dev_ingress_queue(dev),
1350 					     &noop_qdisc);
1351 
1352 		dev_watchdog_down(dev);
1353 	}
1354 
1355 	/* Wait for outstanding qdisc-less dev_queue_xmit calls or
1356 	 * outstanding qdisc enqueuing calls.
1357 	 * This is avoided if all devices are in dismantle phase :
1358 	 * Caller will call synchronize_net() for us
1359 	 */
1360 	synchronize_net();
1361 
1362 	list_for_each_entry(dev, head, close_list) {
1363 		netdev_for_each_tx_queue(dev, dev_reset_queue, NULL);
1364 
1365 		if (dev_ingress_queue(dev))
1366 			dev_reset_queue(dev, dev_ingress_queue(dev), NULL);
1367 	}
1368 
1369 	/* Wait for outstanding qdisc_run calls. */
1370 	list_for_each_entry(dev, head, close_list) {
1371 		while (some_qdisc_is_busy(dev)) {
1372 			/* wait_event() would avoid this sleep-loop but would
1373 			 * require expensive checks in the fast paths of packet
1374 			 * processing which isn't worth it.
1375 			 */
1376 			schedule_timeout_uninterruptible(1);
1377 		}
1378 	}
1379 }
1380 
1381 void dev_deactivate(struct net_device *dev)
1382 {
1383 	LIST_HEAD(single);
1384 
1385 	list_add(&dev->close_list, &single);
1386 	dev_deactivate_many(&single);
1387 	list_del(&single);
1388 }
1389 EXPORT_SYMBOL(dev_deactivate);
1390 
1391 static int qdisc_change_tx_queue_len(struct net_device *dev,
1392 				     struct netdev_queue *dev_queue)
1393 {
1394 	struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
1395 	const struct Qdisc_ops *ops = qdisc->ops;
1396 
1397 	if (ops->change_tx_queue_len)
1398 		return ops->change_tx_queue_len(qdisc, dev->tx_queue_len);
1399 	return 0;
1400 }
1401 
1402 void dev_qdisc_change_real_num_tx(struct net_device *dev,
1403 				  unsigned int new_real_tx)
1404 {
1405 	struct Qdisc *qdisc = rtnl_dereference(dev->qdisc);
1406 
1407 	if (qdisc->ops->change_real_num_tx)
1408 		qdisc->ops->change_real_num_tx(qdisc, new_real_tx);
1409 }
1410 
1411 void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx)
1412 {
1413 #ifdef CONFIG_NET_SCHED
1414 	struct net_device *dev = qdisc_dev(sch);
1415 	struct Qdisc *qdisc;
1416 	unsigned int i;
1417 
1418 	for (i = new_real_tx; i < dev->real_num_tx_queues; i++) {
1419 		qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping);
1420 		/* Only update the default qdiscs we created,
1421 		 * qdiscs with handles are always hashed.
1422 		 */
1423 		if (qdisc != &noop_qdisc && !qdisc->handle)
1424 			qdisc_hash_del(qdisc);
1425 	}
1426 	for (i = dev->real_num_tx_queues; i < new_real_tx; i++) {
1427 		qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping);
1428 		if (qdisc != &noop_qdisc && !qdisc->handle)
1429 			qdisc_hash_add(qdisc, false);
1430 	}
1431 #endif
1432 }
1433 EXPORT_SYMBOL(mq_change_real_num_tx);
1434 
1435 int dev_qdisc_change_tx_queue_len(struct net_device *dev)
1436 {
1437 	bool up = dev->flags & IFF_UP;
1438 	unsigned int i;
1439 	int ret = 0;
1440 
1441 	if (up)
1442 		dev_deactivate(dev);
1443 
1444 	for (i = 0; i < dev->num_tx_queues; i++) {
1445 		ret = qdisc_change_tx_queue_len(dev, &dev->_tx[i]);
1446 
1447 		/* TODO: revert changes on a partial failure */
1448 		if (ret)
1449 			break;
1450 	}
1451 
1452 	if (up)
1453 		dev_activate(dev);
1454 	return ret;
1455 }
1456 
1457 static void dev_init_scheduler_queue(struct net_device *dev,
1458 				     struct netdev_queue *dev_queue,
1459 				     void *_qdisc)
1460 {
1461 	struct Qdisc *qdisc = _qdisc;
1462 
1463 	rcu_assign_pointer(dev_queue->qdisc, qdisc);
1464 	rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
1465 }
1466 
1467 void dev_init_scheduler(struct net_device *dev)
1468 {
1469 	rcu_assign_pointer(dev->qdisc, &noop_qdisc);
1470 	netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
1471 	if (dev_ingress_queue(dev))
1472 		dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
1473 
1474 	timer_setup(&dev->watchdog_timer, dev_watchdog, 0);
1475 }
1476 
1477 void dev_shutdown(struct net_device *dev)
1478 {
1479 	netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
1480 	if (dev_ingress_queue(dev))
1481 		shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
1482 	qdisc_put(rtnl_dereference(dev->qdisc));
1483 	rcu_assign_pointer(dev->qdisc, &noop_qdisc);
1484 
1485 	WARN_ON(timer_pending(&dev->watchdog_timer));
1486 }
1487 
1488 /**
1489  * psched_ratecfg_precompute__() - Pre-compute values for reciprocal division
1490  * @rate:   Rate to compute reciprocal division values of
1491  * @mult:   Multiplier for reciprocal division
1492  * @shift:  Shift for reciprocal division
1493  *
1494  * The multiplier and shift for reciprocal division by rate are stored
1495  * in mult and shift.
1496  *
1497  * The deal here is to replace a divide by a reciprocal one
1498  * in fast path (a reciprocal divide is a multiply and a shift)
1499  *
1500  * Normal formula would be :
1501  *  time_in_ns = (NSEC_PER_SEC * len) / rate_bps
1502  *
1503  * We compute mult/shift to use instead :
1504  *  time_in_ns = (len * mult) >> shift;
1505  *
1506  * We try to get the highest possible mult value for accuracy,
1507  * but have to make sure no overflows will ever happen.
1508  *
1509  * reciprocal_value() is not used here it doesn't handle 64-bit values.
1510  */
1511 static void psched_ratecfg_precompute__(u64 rate, u32 *mult, u8 *shift)
1512 {
1513 	u64 factor = NSEC_PER_SEC;
1514 
1515 	*mult = 1;
1516 	*shift = 0;
1517 
1518 	if (rate <= 0)
1519 		return;
1520 
1521 	for (;;) {
1522 		*mult = div64_u64(factor, rate);
1523 		if (*mult & (1U << 31) || factor & (1ULL << 63))
1524 			break;
1525 		factor <<= 1;
1526 		(*shift)++;
1527 	}
1528 }
1529 
1530 void psched_ratecfg_precompute(struct psched_ratecfg *r,
1531 			       const struct tc_ratespec *conf,
1532 			       u64 rate64)
1533 {
1534 	memset(r, 0, sizeof(*r));
1535 	r->overhead = conf->overhead;
1536 	r->mpu = conf->mpu;
1537 	r->rate_bytes_ps = max_t(u64, conf->rate, rate64);
1538 	r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
1539 	psched_ratecfg_precompute__(r->rate_bytes_ps, &r->mult, &r->shift);
1540 }
1541 EXPORT_SYMBOL(psched_ratecfg_precompute);
1542 
1543 void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64)
1544 {
1545 	r->rate_pkts_ps = pktrate64;
1546 	psched_ratecfg_precompute__(r->rate_pkts_ps, &r->mult, &r->shift);
1547 }
1548 EXPORT_SYMBOL(psched_ppscfg_precompute);
1549 
1550 void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
1551 			  struct tcf_proto *tp_head)
1552 {
1553 	/* Protected with chain0->filter_chain_lock.
1554 	 * Can't access chain directly because tp_head can be NULL.
1555 	 */
1556 	struct mini_Qdisc *miniq_old =
1557 		rcu_dereference_protected(*miniqp->p_miniq, 1);
1558 	struct mini_Qdisc *miniq;
1559 
1560 	if (!tp_head) {
1561 		RCU_INIT_POINTER(*miniqp->p_miniq, NULL);
1562 	} else {
1563 		miniq = miniq_old != &miniqp->miniq1 ?
1564 			&miniqp->miniq1 : &miniqp->miniq2;
1565 
1566 		/* We need to make sure that readers won't see the miniq
1567 		 * we are about to modify. So ensure that at least one RCU
1568 		 * grace period has elapsed since the miniq was made
1569 		 * inactive.
1570 		 */
1571 		if (IS_ENABLED(CONFIG_PREEMPT_RT))
1572 			cond_synchronize_rcu(miniq->rcu_state);
1573 		else if (!poll_state_synchronize_rcu(miniq->rcu_state))
1574 			synchronize_rcu_expedited();
1575 
1576 		miniq->filter_list = tp_head;
1577 		rcu_assign_pointer(*miniqp->p_miniq, miniq);
1578 	}
1579 
1580 	if (miniq_old)
1581 		/* This is counterpart of the rcu sync above. We need to
1582 		 * block potential new user of miniq_old until all readers
1583 		 * are not seeing it.
1584 		 */
1585 		miniq_old->rcu_state = start_poll_synchronize_rcu();
1586 }
1587 EXPORT_SYMBOL(mini_qdisc_pair_swap);
1588 
1589 void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
1590 				struct tcf_block *block)
1591 {
1592 	miniqp->miniq1.block = block;
1593 	miniqp->miniq2.block = block;
1594 }
1595 EXPORT_SYMBOL(mini_qdisc_pair_block_init);
1596 
1597 void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
1598 			  struct mini_Qdisc __rcu **p_miniq)
1599 {
1600 	miniqp->miniq1.cpu_bstats = qdisc->cpu_bstats;
1601 	miniqp->miniq1.cpu_qstats = qdisc->cpu_qstats;
1602 	miniqp->miniq2.cpu_bstats = qdisc->cpu_bstats;
1603 	miniqp->miniq2.cpu_qstats = qdisc->cpu_qstats;
1604 	miniqp->miniq1.rcu_state = get_state_synchronize_rcu();
1605 	miniqp->miniq2.rcu_state = miniqp->miniq1.rcu_state;
1606 	miniqp->p_miniq = p_miniq;
1607 }
1608 EXPORT_SYMBOL(mini_qdisc_pair_init);
1609