Lines Matching +full:pre +full:- +full:multiply

1 // SPDX-License-Identifier: GPL-2.0-or-later
7 * - Ingress support
43 clear_bit(__QDISC_STATE_MISSED, &q->state); in qdisc_maybe_clear_missed()
56 set_bit(__QDISC_STATE_MISSED, &q->state); in qdisc_maybe_clear_missed()
58 set_bit(__QDISC_STATE_DRAINING, &q->state); in qdisc_maybe_clear_missed()
67 * - enqueue, dequeue are serialized via qdisc root lock
68 * - ingress filtering is also serialized via qdisc root lock
69 * - updates to tree and tree walking are only done under the rtnl mutex.
76 const struct netdev_queue *txq = q->dev_queue; in __skb_dequeue_bad_txq()
80 if (q->flags & TCQ_F_NOLOCK) { in __skb_dequeue_bad_txq()
85 skb = skb_peek(&q->skb_bad_txq); in __skb_dequeue_bad_txq()
88 txq = skb_get_tx_queue(txq->dev, skb); in __skb_dequeue_bad_txq()
90 skb = __skb_dequeue(&q->skb_bad_txq); in __skb_dequeue_bad_txq()
96 q->q.qlen--; in __skb_dequeue_bad_txq()
112 struct sk_buff *skb = skb_peek(&q->skb_bad_txq); in qdisc_dequeue_skb_bad_txq()
125 if (q->flags & TCQ_F_NOLOCK) { in qdisc_enqueue_skb_bad_txq()
130 __skb_queue_tail(&q->skb_bad_txq, skb); in qdisc_enqueue_skb_bad_txq()
137 q->q.qlen++; in qdisc_enqueue_skb_bad_txq()
148 if (q->flags & TCQ_F_NOLOCK) { in dev_requeue_skb()
154 struct sk_buff *next = skb->next; in dev_requeue_skb()
156 __skb_queue_tail(&q->gso_skb, skb); in dev_requeue_skb()
164 q->qstats.requeues++; in dev_requeue_skb()
166 q->q.qlen++; in dev_requeue_skb()
174 set_bit(__QDISC_STATE_MISSED, &q->state); in dev_requeue_skb()
185 int bytelimit = qdisc_avail_bulklimit(txq) - skb->len; in try_bulk_dequeue_skb()
188 struct sk_buff *nskb = q->dequeue(q); in try_bulk_dequeue_skb()
193 bytelimit -= nskb->len; /* covers GSO len */ in try_bulk_dequeue_skb()
194 skb->next = nskb; in try_bulk_dequeue_skb()
213 nskb = q->dequeue(q); in try_bulk_dequeue_skb_slow()
220 skb->next = nskb; in try_bulk_dequeue_skb_slow()
227 /* Note that dequeue_skb can possibly return a SKB list (via skb->next).
228 * A requeued skb (via q->gso_skb) can also be a SKB list.
233 const struct netdev_queue *txq = q->dev_queue; in dequeue_skb()
237 if (unlikely(!skb_queue_empty(&q->gso_skb))) { in dequeue_skb()
240 if (q->flags & TCQ_F_NOLOCK) { in dequeue_skb()
245 skb = skb_peek(&q->gso_skb); in dequeue_skb()
261 txq = skb_get_tx_queue(txq->dev, skb); in dequeue_skb()
263 skb = __skb_dequeue(&q->gso_skb); in dequeue_skb()
269 q->q.qlen--; in dequeue_skb()
282 if ((q->flags & TCQ_F_ONETXQUEUE) && in dequeue_skb()
294 skb = q->dequeue(q); in dequeue_skb()
313 * false - hardware queue frozen backoff
314 * true - feel free to send more pkts
359 /* Driver returned NETDEV_TX_BUSY - requeue skb */ in sch_direct_xmit()
362 dev->name, ret, q->q.qlen); in sch_direct_xmit()
386 * 0 - queue is empty or throttled.
387 * >0 - queue is not empty.
403 if (!(q->flags & TCQ_F_NOLOCK)) in qdisc_restart()
418 quota -= packets; in __qdisc_run()
420 if (q->flags & TCQ_F_NOLOCK) in __qdisc_run()
421 set_bit(__QDISC_STATE_MISSED, &q->state); in __qdisc_run()
432 unsigned long res = READ_ONCE(netdev_get_tx_queue(dev, 0)->trans_start); in dev_trans_start()
436 for (i = 1; i < dev->num_tx_queues; i++) { in dev_trans_start()
437 val = READ_ONCE(netdev_get_tx_queue(dev, i)->trans_start); in dev_trans_start()
452 for (i = 0; i < dev->num_tx_queues; i++) { in netif_freeze_queues()
458 * the ->hard_start_xmit() handler and already in netif_freeze_queues()
462 set_bit(__QUEUE_STATE_FROZEN, &txq->state); in netif_freeze_queues()
469 spin_lock(&dev->tx_global_lock); in netif_tx_lock()
478 for (i = 0; i < dev->num_tx_queues; i++) { in netif_unfreeze_queues()
485 clear_bit(__QUEUE_STATE_FROZEN, &txq->state); in netif_unfreeze_queues()
493 spin_unlock(&dev->tx_global_lock); in netif_tx_unlock()
502 spin_lock(&dev->tx_global_lock); in dev_watchdog()
512 for (i = 0; i < dev->num_tx_queues; i++) { in dev_watchdog()
523 trans_start = READ_ONCE(txq->trans_start); in dev_watchdog()
525 if (time_after(jiffies, trans_start + dev->watchdog_timeo)) { in dev_watchdog()
526 timedout_ms = jiffies_to_msecs(jiffies - trans_start); in dev_watchdog()
527 atomic_long_inc(&txq->trans_timeout); in dev_watchdog()
540 dev->netdev_ops->ndo_tx_timeout(dev, i); in dev_watchdog()
543 if (!mod_timer(&dev->watchdog_timer, in dev_watchdog()
545 dev->watchdog_timeo))) in dev_watchdog()
549 spin_unlock(&dev->tx_global_lock); in dev_watchdog()
552 netdev_put(dev, &dev->watchdog_dev_tracker); in dev_watchdog()
557 if (!dev->netdev_ops->ndo_tx_timeout) in netdev_watchdog_up()
559 if (dev->watchdog_timeo <= 0) in netdev_watchdog_up()
560 dev->watchdog_timeo = 5*HZ; in netdev_watchdog_up()
561 if (!mod_timer(&dev->watchdog_timer, in netdev_watchdog_up()
562 round_jiffies(jiffies + dev->watchdog_timeo))) in netdev_watchdog_up()
563 netdev_hold(dev, &dev->watchdog_dev_tracker, in netdev_watchdog_up()
571 if (timer_delete(&dev->watchdog_timer)) in netdev_watchdog_down()
572 netdev_put(dev, &dev->watchdog_dev_tracker); in netdev_watchdog_down()
577 * netif_carrier_on - set carrier
584 if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) { in netif_carrier_on()
585 if (dev->reg_state == NETREG_UNINITIALIZED) in netif_carrier_on()
587 atomic_inc(&dev->carrier_up_count); in netif_carrier_on()
596 * netif_carrier_off - clear carrier
603 if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) { in netif_carrier_off()
604 if (dev->reg_state == NETREG_UNINITIALIZED) in netif_carrier_off()
606 atomic_inc(&dev->carrier_down_count); in netif_carrier_off()
613 * netif_carrier_event - report carrier state event
622 if (dev->reg_state == NETREG_UNINITIALIZED) in netif_carrier_event()
624 atomic_inc(&dev->carrier_up_count); in netif_carrier_event()
625 atomic_inc(&dev->carrier_down_count); in netif_carrier_event()
638 dev_core_stats_tx_dropped_inc(skb->dev); in noop_enqueue()
682 .owner = -1,
691 * if this is NULL - so clear it here. */ in noqueue_init()
692 qdisc->enqueue = NULL; in noqueue_init()
711 /* 3-band FIFO queue: old style, but should be a bit faster than
719 * - rings for priority bands
728 return &priv->q[band]; in band2list()
734 int band = sch_default_prio2band[skb->priority & TC_PRIO_MAX]; in pfifo_fast_enqueue()
774 READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY) { in pfifo_fast_dequeue()
780 clear_bit(__QDISC_STATE_MISSED, &qdisc->state); in pfifo_fast_dequeue()
781 clear_bit(__QDISC_STATE_DRAINING, &qdisc->state); in pfifo_fast_dequeue()
823 if (!q->ring.queue) in pfifo_fast_reset()
834 q = per_cpu_ptr(qdisc->cpu_qstats, i); in pfifo_fast_reset()
835 q->backlog = 0; in pfifo_fast_reset()
836 q->qlen = 0; in pfifo_fast_reset()
848 return skb->len; in pfifo_fast_dump()
851 return -1; in pfifo_fast_dump()
857 unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len; in pfifo_fast_init()
863 return -EINVAL; in pfifo_fast_init()
871 return -ENOMEM; in pfifo_fast_init()
874 /* Can by-pass the queue discipline */ in pfifo_fast_init()
875 qdisc->flags |= TCQ_F_CAN_BYPASS; in pfifo_fast_init()
890 if (!q->ring.queue) in pfifo_fast_destroy()
895 ptr_ring_cleanup(&q->ring, NULL); in pfifo_fast_destroy()
939 unsigned int size = sizeof(*sch) + ops->priv_size; in qdisc_alloc()
940 int err = -ENOBUFS; in qdisc_alloc()
945 err = -EINVAL; in qdisc_alloc()
949 dev = dev_queue->dev; in qdisc_alloc()
954 __skb_queue_head_init(&sch->gso_skb); in qdisc_alloc()
955 __skb_queue_head_init(&sch->skb_bad_txq); in qdisc_alloc()
956 gnet_stats_basic_sync_init(&sch->bstats); in qdisc_alloc()
957 lockdep_register_key(&sch->root_lock_key); in qdisc_alloc()
958 spin_lock_init(&sch->q.lock); in qdisc_alloc()
959 lockdep_set_class(&sch->q.lock, &sch->root_lock_key); in qdisc_alloc()
961 if (ops->static_flags & TCQ_F_CPUSTATS) { in qdisc_alloc()
962 sch->cpu_bstats = in qdisc_alloc()
964 if (!sch->cpu_bstats) in qdisc_alloc()
967 sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue); in qdisc_alloc()
968 if (!sch->cpu_qstats) { in qdisc_alloc()
969 free_percpu(sch->cpu_bstats); in qdisc_alloc()
974 spin_lock_init(&sch->busylock); in qdisc_alloc()
975 lockdep_set_class(&sch->busylock, in qdisc_alloc()
976 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); in qdisc_alloc()
979 spin_lock_init(&sch->seqlock); in qdisc_alloc()
980 lockdep_set_class(&sch->seqlock, in qdisc_alloc()
981 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); in qdisc_alloc()
983 sch->ops = ops; in qdisc_alloc()
984 sch->flags = ops->static_flags; in qdisc_alloc()
985 sch->enqueue = ops->enqueue; in qdisc_alloc()
986 sch->dequeue = ops->dequeue; in qdisc_alloc()
987 sch->dev_queue = dev_queue; in qdisc_alloc()
988 sch->owner = -1; in qdisc_alloc()
989 netdev_hold(dev, &sch->dev_tracker, GFP_KERNEL); in qdisc_alloc()
990 refcount_set(&sch->refcnt, 1); in qdisc_alloc()
994 lockdep_unregister_key(&sch->root_lock_key); in qdisc_alloc()
1007 if (!bpf_try_module_get(ops, ops->owner)) { in qdisc_create_dflt()
1014 bpf_module_put(ops, ops->owner); in qdisc_create_dflt()
1017 sch->parent = parentid; in qdisc_create_dflt()
1019 if (!ops->init || ops->init(sch, NULL, extack) == 0) { in qdisc_create_dflt()
1020 trace_qdisc_create(ops, dev_queue->dev, parentid); in qdisc_create_dflt()
1033 const struct Qdisc_ops *ops = qdisc->ops; in qdisc_reset()
1037 if (ops->reset) in qdisc_reset()
1038 ops->reset(qdisc); in qdisc_reset()
1040 __skb_queue_purge(&qdisc->gso_skb); in qdisc_reset()
1041 __skb_queue_purge(&qdisc->skb_bad_txq); in qdisc_reset()
1043 qdisc->q.qlen = 0; in qdisc_reset()
1044 qdisc->qstats.backlog = 0; in qdisc_reset()
1051 free_percpu(qdisc->cpu_bstats); in qdisc_free()
1052 free_percpu(qdisc->cpu_qstats); in qdisc_free()
1067 const struct Qdisc_ops *ops = qdisc->ops; in __qdisc_destroy()
1073 qdisc_put_stab(rtnl_dereference(qdisc->stab)); in __qdisc_destroy()
1075 gen_kill_estimator(&qdisc->rate_est); in __qdisc_destroy()
1080 if (ops->destroy) in __qdisc_destroy()
1081 ops->destroy(qdisc); in __qdisc_destroy()
1083 lockdep_unregister_key(&qdisc->root_lock_key); in __qdisc_destroy()
1084 bpf_module_put(ops, ops->owner); in __qdisc_destroy()
1085 netdev_put(dev, &qdisc->dev_tracker); in __qdisc_destroy()
1089 call_rcu(&qdisc->rcu, qdisc_free_cb); in __qdisc_destroy()
1094 if (qdisc->flags & TCQ_F_BUILTIN) in qdisc_destroy()
1105 if (qdisc->flags & TCQ_F_BUILTIN || in qdisc_put()
1106 !refcount_dec_and_test(&qdisc->refcnt)) in qdisc_put()
1120 if (qdisc->flags & TCQ_F_BUILTIN || in qdisc_put_unlocked()
1121 !refcount_dec_and_rtnl_lock(&qdisc->refcnt)) in qdisc_put_unlocked()
1133 struct Qdisc *oqdisc = rtnl_dereference(dev_queue->qdisc_sleeping); in dev_graft_qdisc()
1142 rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc); in dev_graft_qdisc()
1143 rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); in dev_graft_qdisc()
1155 struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); in shutdown_scheduler_queue()
1159 rcu_assign_pointer(dev_queue->qdisc, qdisc_default); in shutdown_scheduler_queue()
1160 rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc_default); in shutdown_scheduler_queue()
1173 if (dev->priv_flags & IFF_NO_QUEUE) in attach_one_default_qdisc()
1175 else if(dev->type == ARPHRD_CAN) in attach_one_default_qdisc()
1183 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; in attach_one_default_qdisc()
1184 rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc); in attach_one_default_qdisc()
1195 dev->priv_flags & IFF_NO_QUEUE) { in attach_default_qdiscs()
1197 qdisc = rtnl_dereference(txq->qdisc_sleeping); in attach_default_qdiscs()
1198 rcu_assign_pointer(dev->qdisc, qdisc); in attach_default_qdiscs()
1203 rcu_assign_pointer(dev->qdisc, qdisc); in attach_default_qdiscs()
1204 qdisc->ops->attach(qdisc); in attach_default_qdiscs()
1207 qdisc = rtnl_dereference(dev->qdisc); in attach_default_qdiscs()
1212 default_qdisc_ops->id, noqueue_qdisc_ops.id); in attach_default_qdiscs()
1214 dev->priv_flags |= IFF_NO_QUEUE; in attach_default_qdiscs()
1216 qdisc = rtnl_dereference(txq->qdisc_sleeping); in attach_default_qdiscs()
1217 rcu_assign_pointer(dev->qdisc, qdisc); in attach_default_qdiscs()
1219 dev->priv_flags ^= IFF_NO_QUEUE; in attach_default_qdiscs()
1232 struct Qdisc *new_qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); in transition_one_qdisc()
1235 if (!(new_qdisc->flags & TCQ_F_BUILTIN)) in transition_one_qdisc()
1236 clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state); in transition_one_qdisc()
1238 rcu_assign_pointer(dev_queue->qdisc, new_qdisc); in transition_one_qdisc()
1240 WRITE_ONCE(dev_queue->trans_start, 0); in transition_one_qdisc()
1254 if (rtnl_dereference(dev->qdisc) == &noop_qdisc) in dev_activate()
1258 /* Delay activation until next carrier-on event */ in dev_activate()
1275 if (qdisc->flags & TCQ_F_BUILTIN) in qdisc_deactivate()
1278 set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); in qdisc_deactivate()
1288 qdisc = rtnl_dereference(dev_queue->qdisc); in dev_deactivate_queue()
1290 if (qdisc->enqueue) in dev_deactivate_queue()
1293 rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); in dev_deactivate_queue()
1304 qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); in dev_reset_queue()
1308 nolock = qdisc->flags & TCQ_F_NOLOCK; in dev_reset_queue()
1311 spin_lock_bh(&qdisc->seqlock); in dev_reset_queue()
1318 clear_bit(__QDISC_STATE_MISSED, &qdisc->state); in dev_reset_queue()
1319 clear_bit(__QDISC_STATE_DRAINING, &qdisc->state); in dev_reset_queue()
1320 spin_unlock_bh(&qdisc->seqlock); in dev_reset_queue()
1328 for (i = 0; i < dev->num_tx_queues; i++) { in some_qdisc_is_busy()
1335 q = rtnl_dereference(dev_queue->qdisc_sleeping); in some_qdisc_is_busy()
1341 test_bit(__QDISC_STATE_SCHED, &q->state)); in some_qdisc_is_busy()
1352 * dev_deactivate_many - deactivate transmissions on several devices
1387 /* wait_event() would avoid this sleep-loop but would in dev_deactivate_many()
1400 list_add(&dev->close_list, &single); in dev_deactivate()
1409 struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); in qdisc_change_tx_queue_len()
1410 const struct Qdisc_ops *ops = qdisc->ops; in qdisc_change_tx_queue_len()
1412 if (ops->change_tx_queue_len) in qdisc_change_tx_queue_len()
1413 return ops->change_tx_queue_len(qdisc, dev->tx_queue_len); in qdisc_change_tx_queue_len()
1420 struct Qdisc *qdisc = rtnl_dereference(dev->qdisc); in dev_qdisc_change_real_num_tx()
1422 if (qdisc->ops->change_real_num_tx) in dev_qdisc_change_real_num_tx()
1423 qdisc->ops->change_real_num_tx(qdisc, new_real_tx); in dev_qdisc_change_real_num_tx()
1433 for (i = new_real_tx; i < dev->real_num_tx_queues; i++) { in mq_change_real_num_tx()
1434 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping); in mq_change_real_num_tx()
1438 if (qdisc != &noop_qdisc && !qdisc->handle) in mq_change_real_num_tx()
1441 for (i = dev->real_num_tx_queues; i < new_real_tx; i++) { in mq_change_real_num_tx()
1442 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping); in mq_change_real_num_tx()
1443 if (qdisc != &noop_qdisc && !qdisc->handle) in mq_change_real_num_tx()
1452 bool up = dev->flags & IFF_UP; in dev_qdisc_change_tx_queue_len()
1459 for (i = 0; i < dev->num_tx_queues; i++) { in dev_qdisc_change_tx_queue_len()
1460 ret = qdisc_change_tx_queue_len(dev, &dev->_tx[i]); in dev_qdisc_change_tx_queue_len()
1478 rcu_assign_pointer(dev_queue->qdisc, qdisc); in dev_init_scheduler_queue()
1479 rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc); in dev_init_scheduler_queue()
1484 rcu_assign_pointer(dev->qdisc, &noop_qdisc); in dev_init_scheduler()
1489 timer_setup(&dev->watchdog_timer, dev_watchdog, 0); in dev_init_scheduler()
1497 qdisc_put(rtnl_dereference(dev->qdisc)); in dev_shutdown()
1498 rcu_assign_pointer(dev->qdisc, &noop_qdisc); in dev_shutdown()
1500 WARN_ON(timer_pending(&dev->watchdog_timer)); in dev_shutdown()
1504 * psched_ratecfg_precompute__() - Pre-compute values for reciprocal division
1513 * in fast path (a reciprocal divide is a multiply and a shift)
1524 * reciprocal_value() is not used here it doesn't handle 64-bit values.
1550 r->overhead = conf->overhead; in psched_ratecfg_precompute()
1551 r->mpu = conf->mpu; in psched_ratecfg_precompute()
1552 r->rate_bytes_ps = max_t(u64, conf->rate, rate64); in psched_ratecfg_precompute()
1553 r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK); in psched_ratecfg_precompute()
1554 psched_ratecfg_precompute__(r->rate_bytes_ps, &r->mult, &r->shift); in psched_ratecfg_precompute()
1560 r->rate_pkts_ps = pktrate64; in psched_ppscfg_precompute()
1561 psched_ratecfg_precompute__(r->rate_pkts_ps, &r->mult, &r->shift); in psched_ppscfg_precompute()
1568 /* Protected with chain0->filter_chain_lock. in mini_qdisc_pair_swap()
1572 rcu_dereference_protected(*miniqp->p_miniq, 1); in mini_qdisc_pair_swap()
1576 RCU_INIT_POINTER(*miniqp->p_miniq, NULL); in mini_qdisc_pair_swap()
1578 miniq = miniq_old != &miniqp->miniq1 ? in mini_qdisc_pair_swap()
1579 &miniqp->miniq1 : &miniqp->miniq2; in mini_qdisc_pair_swap()
1587 cond_synchronize_rcu(miniq->rcu_state); in mini_qdisc_pair_swap()
1588 else if (!poll_state_synchronize_rcu(miniq->rcu_state)) in mini_qdisc_pair_swap()
1591 miniq->filter_list = tp_head; in mini_qdisc_pair_swap()
1592 rcu_assign_pointer(*miniqp->p_miniq, miniq); in mini_qdisc_pair_swap()
1600 miniq_old->rcu_state = start_poll_synchronize_rcu(); in mini_qdisc_pair_swap()
1607 miniqp->miniq1.block = block; in mini_qdisc_pair_block_init()
1608 miniqp->miniq2.block = block; in mini_qdisc_pair_block_init()
1615 miniqp->miniq1.cpu_bstats = qdisc->cpu_bstats; in mini_qdisc_pair_init()
1616 miniqp->miniq1.cpu_qstats = qdisc->cpu_qstats; in mini_qdisc_pair_init()
1617 miniqp->miniq2.cpu_bstats = qdisc->cpu_bstats; in mini_qdisc_pair_init()
1618 miniqp->miniq2.cpu_qstats = qdisc->cpu_qstats; in mini_qdisc_pair_init()
1619 miniqp->miniq1.rcu_state = get_state_synchronize_rcu(); in mini_qdisc_pair_init()
1620 miniqp->miniq2.rcu_state = miniqp->miniq1.rcu_state; in mini_qdisc_pair_init()
1621 miniqp->p_miniq = p_miniq; in mini_qdisc_pair_init()