Lines Matching +full:additional +full:- +full:devs
1 /* SPDX-License-Identifier: GPL-2.0 */
65 /* similar to sk_buff_head, but skb->prev pointer is undefined. */
84 * q->dev_queue : It can test
138 if (qdisc->flags & TCQ_F_BUILTIN) in qdisc_refcount_inc()
140 refcount_inc(&qdisc->refcnt); in qdisc_refcount_inc()
145 if (qdisc->flags & TCQ_F_BUILTIN) in qdisc_refcount_dec_if_one()
147 return refcount_dec_if_one(&qdisc->refcnt); in qdisc_refcount_dec_if_one()
156 if (qdisc->flags & TCQ_F_BUILTIN) in qdisc_refcount_inc_nz()
158 if (refcount_inc_not_zero(&qdisc->refcnt)) in qdisc_refcount_inc_nz()
164 * root_lock section, or provide their own memory barriers -- ordering
169 if (qdisc->flags & TCQ_F_NOLOCK) in qdisc_is_running()
170 return spin_is_locked(&qdisc->seqlock); in qdisc_is_running()
171 return test_bit(__QDISC_STATE2_RUNNING, &qdisc->state2); in qdisc_is_running()
176 return !(READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY); in nolock_qdisc_is_empty()
181 return q->flags & TCQ_F_CPUSTATS; in qdisc_is_percpu_stats()
188 return !READ_ONCE(qdisc->q.qlen); in qdisc_is_empty()
196 if (qdisc->flags & TCQ_F_NOLOCK) { in qdisc_run_begin()
197 if (spin_trylock(&qdisc->seqlock)) in qdisc_run_begin()
205 if (test_and_set_bit(__QDISC_STATE_MISSED, &qdisc->state)) in qdisc_run_begin()
212 return spin_trylock(&qdisc->seqlock); in qdisc_run_begin()
214 return !__test_and_set_bit(__QDISC_STATE2_RUNNING, &qdisc->state2); in qdisc_run_begin()
219 if (qdisc->flags & TCQ_F_NOLOCK) { in qdisc_run_end()
220 spin_unlock(&qdisc->seqlock); in qdisc_run_end()
222 /* spin_unlock() only has store-release semantic. The unlock in qdisc_run_end()
223 * and test_bit() ordering is a store-load ordering, so a full in qdisc_run_end()
229 &qdisc->state))) in qdisc_run_end()
232 __clear_bit(__QDISC_STATE2_RUNNING, &qdisc->state2); in qdisc_run_end()
238 return qdisc->flags & TCQ_F_ONETXQUEUE; in qdisc_may_bulk()
398 /* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags
399 * are expected to implement tcf_proto_ops->delete_empty(), otherwise race
463 /* Lock protects tcf_block and lifetime-management data of chains
481 unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
482 unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */
496 return lockdep_is_held(&chain->filter_chain_lock); in lockdep_tcf_chain_is_locked()
501 return lockdep_is_held(&tp->lock); in lockdep_tcf_proto_is_locked()
514 BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*qcb)); in qdisc_cb_private_validate()
515 BUILD_BUG_ON(sizeof(qcb->data) < sz); in qdisc_cb_private_validate()
520 return q->q.qlen; in qdisc_qlen()
525 __u32 qlen = q->qstats.qlen; in qdisc_qlen_sum()
530 qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen; in qdisc_qlen_sum()
532 qlen += q->q.qlen; in qdisc_qlen_sum()
540 return (struct qdisc_skb_cb *)skb->cb; in qdisc_skb_cb()
545 return &qdisc->q.lock; in qdisc_lock()
550 struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc); in qdisc_root()
557 return rcu_dereference_bh(qdisc->dev_queue->qdisc); in qdisc_root_bh()
562 return rcu_dereference_rtnl(qdisc->dev_queue->qdisc_sleeping); in qdisc_root_sleeping()
575 return qdisc->dev_queue->dev; in qdisc_dev()
580 if (q->flags & TCQ_F_MQROOT) in sch_tree_lock()
588 if (q->flags & TCQ_F_MQROOT) in sch_tree_unlock()
604 return ntx < dev->real_num_tx_queues ? in get_default_qdisc_ops()
637 h = qdisc_class_hash(id, hash->hashmask); in qdisc_class_find()
638 hlist_for_each_entry(cl, &hash->hash[h], hnode) { in qdisc_class_find()
639 if (cl->classid == id) in qdisc_class_find()
647 return cl->filter_cnt > 0; in qdisc_class_in_use()
654 if (check_add_overflow(cl->filter_cnt, 1, &res)) in qdisc_class_get()
657 cl->filter_cnt = res; in qdisc_class_get()
664 if (check_sub_overflow(cl->filter_cnt, 1, &res)) in qdisc_class_put()
667 cl->filter_cnt = res; in qdisc_class_put()
672 u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY; in tc_classid_to_hwtc()
674 return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL; in tc_classid_to_hwtc()
712 q->flags &= ~TCQ_F_OFFLOADED; in qdisc_offload_dump_helper()
741 return skb->tc_at_ingress; in skb_at_tc_ingress()
750 if (skb->tc_skip_classify) { in skb_skip_tc_classify()
751 skb->tc_skip_classify = 0; in skb_skip_tc_classify()
763 for (; i < dev->num_tx_queues; i++) { in qdisc_reset_all_tx_gt()
764 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); in qdisc_reset_all_tx_gt()
779 for (i = 0; i < dev->num_tx_queues; i++) { in qdisc_all_tx_empty()
781 const struct Qdisc *q = rcu_dereference(txq->qdisc); in qdisc_all_tx_empty()
797 for (i = 0; i < dev->num_tx_queues; i++) { in qdisc_tx_changing()
800 if (rcu_access_pointer(txq->qdisc) != in qdisc_tx_changing()
801 rcu_access_pointer(txq->qdisc_sleeping)) in qdisc_tx_changing()
812 for (i = 0; i < dev->num_tx_queues; i++) { in qdisc_tx_is_noop()
814 if (rcu_access_pointer(txq->qdisc) != &noop_qdisc) in qdisc_tx_is_noop()
822 return qdisc_skb_cb(skb)->pkt_len; in qdisc_pkt_len()
825 /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
841 struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab); in qdisc_calculate_pkt_len()
851 return sch->enqueue(skb, sch, to_free); in qdisc_enqueue()
857 u64_stats_update_begin(&bstats->syncp); in _bstats_update()
858 u64_stats_add(&bstats->bytes, bytes); in _bstats_update()
859 u64_stats_add(&bstats->packets, packets); in _bstats_update()
860 u64_stats_update_end(&bstats->syncp); in _bstats_update()
868 skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1); in bstats_update()
874 bstats_update(this_cpu_ptr(sch->cpu_bstats), skb); in qdisc_bstats_cpu_update()
880 bstats_update(&sch->bstats, skb); in qdisc_bstats_update()
886 sch->qstats.backlog -= qdisc_pkt_len(skb); in qdisc_qstats_backlog_dec()
892 this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); in qdisc_qstats_cpu_backlog_dec()
898 sch->qstats.backlog += qdisc_pkt_len(skb); in qdisc_qstats_backlog_inc()
904 this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); in qdisc_qstats_cpu_backlog_inc()
909 this_cpu_inc(sch->cpu_qstats->qlen); in qdisc_qstats_cpu_qlen_inc()
914 this_cpu_dec(sch->cpu_qstats->qlen); in qdisc_qstats_cpu_qlen_dec()
919 this_cpu_inc(sch->cpu_qstats->requeues); in qdisc_qstats_cpu_requeues_inc()
924 sch->qstats.drops += count; in __qdisc_qstats_drop()
929 qstats->drops++; in qstats_drop_inc()
934 qstats->overlimits++; in qstats_overlimit_inc()
939 qstats_drop_inc(&sch->qstats); in qdisc_qstats_drop()
944 this_cpu_inc(sch->cpu_qstats->drops); in qdisc_qstats_cpu_drop()
949 sch->qstats.overlimits++; in qdisc_qstats_overlimit()
956 return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen); in qdisc_qstats_copy()
964 gnet_stats_add_queue(&qstats, sch->cpu_qstats, &sch->qstats); in qdisc_qstats_qlen_backlog()
989 struct sk_buff *last = qh->tail; in __qdisc_enqueue_tail()
992 skb->next = NULL; in __qdisc_enqueue_tail()
993 last->next = skb; in __qdisc_enqueue_tail()
994 qh->tail = skb; in __qdisc_enqueue_tail()
996 qh->tail = skb; in __qdisc_enqueue_tail()
997 qh->head = skb; in __qdisc_enqueue_tail()
999 qh->qlen++; in __qdisc_enqueue_tail()
1004 __qdisc_enqueue_tail(skb, &sch->q); in qdisc_enqueue_tail()
1012 skb->next = qh->head; in __qdisc_enqueue_head()
1014 if (!qh->head) in __qdisc_enqueue_head()
1015 qh->tail = skb; in __qdisc_enqueue_head()
1016 qh->head = skb; in __qdisc_enqueue_head()
1017 qh->qlen++; in __qdisc_enqueue_head()
1022 struct sk_buff *skb = qh->head; in __qdisc_dequeue_head()
1025 qh->head = skb->next; in __qdisc_dequeue_head()
1026 qh->qlen--; in __qdisc_dequeue_head()
1027 if (qh->head == NULL) in __qdisc_dequeue_head()
1028 qh->tail = NULL; in __qdisc_dequeue_head()
1029 skb->next = NULL; in __qdisc_dequeue_head()
1037 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); in qdisc_dequeue_head()
1060 struct tc_skb_cb *cb = (struct tc_skb_cb *)skb->cb; in tc_skb_cb()
1069 return tc_skb_cb(skb)->drop_reason; in tcf_get_drop_reason()
1075 tc_skb_cb(skb)->drop_reason = reason; in tcf_set_drop_reason()
1083 skb->next = *to_free; in __qdisc_drop()
1090 if (skb->prev) in __qdisc_drop_all()
1091 skb->prev->next = *to_free; in __qdisc_drop_all()
1093 skb->next = *to_free; in __qdisc_drop_all()
1116 const struct qdisc_skb_head *qh = &sch->q; in qdisc_peek_head()
1118 return qh->head; in qdisc_peek_head()
1121 /* generic pseudo peek method for non-work-conserving qdisc */
1124 struct sk_buff *skb = skb_peek(&sch->gso_skb); in qdisc_peek_dequeued()
1126 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ in qdisc_peek_dequeued()
1128 skb = sch->dequeue(sch); in qdisc_peek_dequeued()
1131 __skb_queue_head(&sch->gso_skb, skb); in qdisc_peek_dequeued()
1134 sch->q.qlen++; in qdisc_peek_dequeued()
1151 sch->q.qlen--; in qdisc_update_stats_at_dequeue()
1160 this_cpu_add(sch->cpu_qstats->backlog, pkt_len); in qdisc_update_stats_at_enqueue()
1162 sch->qstats.backlog += pkt_len; in qdisc_update_stats_at_enqueue()
1163 sch->q.qlen++; in qdisc_update_stats_at_enqueue()
1167 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
1170 struct sk_buff *skb = skb_peek(&sch->gso_skb); in qdisc_dequeue_peeked()
1173 skb = __skb_dequeue(&sch->gso_skb); in qdisc_dequeue_peeked()
1179 sch->q.qlen--; in qdisc_dequeue_peeked()
1182 skb = sch->dequeue(sch); in qdisc_dequeue_peeked()
1195 if (qh->qlen) { in __qdisc_reset_queue()
1196 rtnl_kfree_skbs(qh->head, qh->tail); in __qdisc_reset_queue()
1198 qh->head = NULL; in __qdisc_reset_queue()
1199 qh->tail = NULL; in __qdisc_reset_queue()
1200 qh->qlen = 0; in __qdisc_reset_queue()
1206 __qdisc_reset_queue(&sch->q); in qdisc_reset_queue()
1269 len += r->overhead; in psched_l2t_ns()
1271 if (len < r->mpu) in psched_l2t_ns()
1272 len = r->mpu; in psched_l2t_ns()
1274 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) in psched_l2t_ns()
1275 return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift; in psched_l2t_ns()
1277 return ((u64)len * r->mult) >> r->shift; in psched_l2t_ns()
1293 res->rate = min_t(u64, r->rate_bytes_ps, ~0U); in psched_ratecfg_getrate()
1295 res->overhead = r->overhead; in psched_ratecfg_getrate()
1296 res->mpu = r->mpu; in psched_ratecfg_getrate()
1297 res->linklayer = (r->linklayer & TC_LINKLAYER_MASK); in psched_ratecfg_getrate()
1309 return ((u64)pkt_num * r->mult) >> r->shift; in psched_pkt2t_ns()
1328 bstats_update(this_cpu_ptr(miniq->cpu_bstats), skb); in mini_qdisc_bstats_cpu_update()
1333 this_cpu_inc(miniq->cpu_qstats->drops); in mini_qdisc_qstats_cpu_drop()
1356 while (test_bit(__QDISC_STATE_SCHED, &q->state)) in qdisc_synchronize()