sch_ingress.c (50501936288d6a29d7ef78f25d00e33240fad45f) | sch_ingress.c (e420bed025071a623d2720a92bc2245c84757ecb) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* net/sched/sch_ingress.c - Ingress and clsact qdisc 3 * 4 * Authors: Jamal Hadi Salim 1999 5 */ 6 7#include <linux/module.h> 8#include <linux/types.h> 9#include <linux/list.h> 10#include <linux/skbuff.h> 11#include <linux/rtnetlink.h> 12 13#include <net/netlink.h> 14#include <net/pkt_sched.h> 15#include <net/pkt_cls.h> | 1// SPDX-License-Identifier: GPL-2.0-or-later 2/* net/sched/sch_ingress.c - Ingress and clsact qdisc 3 * 4 * Authors: Jamal Hadi Salim 1999 5 */ 6 7#include <linux/module.h> 8#include <linux/types.h> 9#include <linux/list.h> 10#include <linux/skbuff.h> 11#include <linux/rtnetlink.h> 12 13#include <net/netlink.h> 14#include <net/pkt_sched.h> 15#include <net/pkt_cls.h> |
16#include <net/tcx.h> |
|
16 17struct ingress_sched_data { 18 struct tcf_block *block; 19 struct tcf_block_ext_info block_info; 20 struct mini_Qdisc_pair miniqp; 21}; 22 23static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg) --- 49 unchanged lines hidden (view full) --- 73 return q->block_info.block_index; 74} 75 76static int ingress_init(struct Qdisc *sch, struct nlattr *opt, 77 struct netlink_ext_ack *extack) 78{ 79 struct ingress_sched_data *q = qdisc_priv(sch); 80 struct net_device *dev = qdisc_dev(sch); | 17 18struct ingress_sched_data { 19 struct tcf_block *block; 20 struct tcf_block_ext_info block_info; 21 struct mini_Qdisc_pair miniqp; 22}; 23 24static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg) --- 49 unchanged lines hidden (view full) --- 74 return q->block_info.block_index; 75} 76 77static int ingress_init(struct Qdisc *sch, struct nlattr *opt, 78 struct netlink_ext_ack *extack) 79{ 80 struct ingress_sched_data *q = qdisc_priv(sch); 81 struct net_device *dev = qdisc_dev(sch); |
82 struct bpf_mprog_entry *entry; 83 bool created; |
|
81 int err; 82 83 if (sch->parent != TC_H_INGRESS) 84 return -EOPNOTSUPP; 85 86 net_inc_ingress_queue(); 87 | 84 int err; 85 86 if (sch->parent != TC_H_INGRESS) 87 return -EOPNOTSUPP; 88 89 net_inc_ingress_queue(); 90 |
88 mini_qdisc_pair_init(&q->miniqp, sch, &dev->miniq_ingress); | 91 entry = tcx_entry_fetch_or_create(dev, true, &created); 92 if (!entry) 93 return -ENOMEM; 94 tcx_miniq_set_active(entry, true); 95 mini_qdisc_pair_init(&q->miniqp, sch, &tcx_entry(entry)->miniq); 96 if (created) 97 tcx_entry_update(dev, entry, true); |
89 90 q->block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; 91 q->block_info.chain_head_change = clsact_chain_head_change; 92 q->block_info.chain_head_change_priv = &q->miniqp; 93 94 err = tcf_block_get_ext(&q->block, sch, &q->block_info, extack); 95 if (err) 96 return err; 97 98 mini_qdisc_pair_block_init(&q->miniqp, q->block); 99 100 return 0; 101} 102 103static void ingress_destroy(struct Qdisc *sch) 104{ 105 struct ingress_sched_data *q = qdisc_priv(sch); | 98 99 q->block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; 100 q->block_info.chain_head_change = clsact_chain_head_change; 101 q->block_info.chain_head_change_priv = &q->miniqp; 102 103 err = tcf_block_get_ext(&q->block, sch, &q->block_info, extack); 104 if (err) 105 return err; 106 107 mini_qdisc_pair_block_init(&q->miniqp, q->block); 108 109 return 0; 110} 111 112static void ingress_destroy(struct Qdisc *sch) 113{ 114 struct ingress_sched_data *q = qdisc_priv(sch); |
115 struct net_device *dev = qdisc_dev(sch); 116 struct bpf_mprog_entry *entry = rtnl_dereference(dev->tcx_ingress); |
|
106 107 if (sch->parent != TC_H_INGRESS) 108 return; 109 110 tcf_block_put_ext(q->block, sch, &q->block_info); | 117 118 if (sch->parent != TC_H_INGRESS) 119 return; 120 121 tcf_block_put_ext(q->block, sch, &q->block_info); |
122 123 if (entry) { 124 tcx_miniq_set_active(entry, false); 125 if (!tcx_entry_is_active(entry)) { 126 tcx_entry_update(dev, NULL, false); 127 tcx_entry_free(entry); 128 } 129 } 130 |
|
111 net_dec_ingress_queue(); 112} 113 114static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb) 115{ 116 struct nlattr *nest; 117 118 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); --- 99 unchanged lines hidden (view full) --- 218 return q->egress_block_info.block_index; 219} 220 221static int clsact_init(struct Qdisc *sch, struct nlattr *opt, 222 struct netlink_ext_ack *extack) 223{ 224 struct clsact_sched_data *q = qdisc_priv(sch); 225 struct net_device *dev = qdisc_dev(sch); | 131 net_dec_ingress_queue(); 132} 133 134static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb) 135{ 136 struct nlattr *nest; 137 138 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); --- 99 unchanged lines hidden (view full) --- 238 return q->egress_block_info.block_index; 239} 240 241static int clsact_init(struct Qdisc *sch, struct nlattr *opt, 242 struct netlink_ext_ack *extack) 243{ 244 struct clsact_sched_data *q = qdisc_priv(sch); 245 struct net_device *dev = qdisc_dev(sch); |
246 struct bpf_mprog_entry *entry; 247 bool created; |
|
226 int err; 227 228 if (sch->parent != TC_H_CLSACT) 229 return -EOPNOTSUPP; 230 231 net_inc_ingress_queue(); 232 net_inc_egress_queue(); 233 | 248 int err; 249 250 if (sch->parent != TC_H_CLSACT) 251 return -EOPNOTSUPP; 252 253 net_inc_ingress_queue(); 254 net_inc_egress_queue(); 255 |
234 mini_qdisc_pair_init(&q->miniqp_ingress, sch, &dev->miniq_ingress); | 256 entry = tcx_entry_fetch_or_create(dev, true, &created); 257 if (!entry) 258 return -ENOMEM; 259 tcx_miniq_set_active(entry, true); 260 mini_qdisc_pair_init(&q->miniqp_ingress, sch, &tcx_entry(entry)->miniq); 261 if (created) 262 tcx_entry_update(dev, entry, true); |
235 236 q->ingress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; 237 q->ingress_block_info.chain_head_change = clsact_chain_head_change; 238 q->ingress_block_info.chain_head_change_priv = &q->miniqp_ingress; 239 240 err = tcf_block_get_ext(&q->ingress_block, sch, &q->ingress_block_info, 241 extack); 242 if (err) 243 return err; 244 245 mini_qdisc_pair_block_init(&q->miniqp_ingress, q->ingress_block); 246 | 263 264 q->ingress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; 265 q->ingress_block_info.chain_head_change = clsact_chain_head_change; 266 q->ingress_block_info.chain_head_change_priv = &q->miniqp_ingress; 267 268 err = tcf_block_get_ext(&q->ingress_block, sch, &q->ingress_block_info, 269 extack); 270 if (err) 271 return err; 272 273 mini_qdisc_pair_block_init(&q->miniqp_ingress, q->ingress_block); 274 |
247 mini_qdisc_pair_init(&q->miniqp_egress, sch, &dev->miniq_egress); | 275 entry = tcx_entry_fetch_or_create(dev, false, &created); 276 if (!entry) 277 return -ENOMEM; 278 tcx_miniq_set_active(entry, true); 279 mini_qdisc_pair_init(&q->miniqp_egress, sch, &tcx_entry(entry)->miniq); 280 if (created) 281 tcx_entry_update(dev, entry, false); |
248 249 q->egress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS; 250 q->egress_block_info.chain_head_change = clsact_chain_head_change; 251 q->egress_block_info.chain_head_change_priv = &q->miniqp_egress; 252 253 return tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info, extack); 254} 255 256static void clsact_destroy(struct Qdisc *sch) 257{ 258 struct clsact_sched_data *q = qdisc_priv(sch); | 282 283 q->egress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS; 284 q->egress_block_info.chain_head_change = clsact_chain_head_change; 285 q->egress_block_info.chain_head_change_priv = &q->miniqp_egress; 286 287 return tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info, extack); 288} 289 290static void clsact_destroy(struct Qdisc *sch) 291{ 292 struct clsact_sched_data *q = qdisc_priv(sch); |
293 struct net_device *dev = qdisc_dev(sch); 294 struct bpf_mprog_entry *ingress_entry = rtnl_dereference(dev->tcx_ingress); 295 struct bpf_mprog_entry *egress_entry = rtnl_dereference(dev->tcx_egress); |
|
259 260 if (sch->parent != TC_H_CLSACT) 261 return; 262 | 296 297 if (sch->parent != TC_H_CLSACT) 298 return; 299 |
263 tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info); | |
264 tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info); | 300 tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info); |
301 tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info); |
|
265 | 302 |
303 if (ingress_entry) { 304 tcx_miniq_set_active(ingress_entry, false); 305 if (!tcx_entry_is_active(ingress_entry)) { 306 tcx_entry_update(dev, NULL, true); 307 tcx_entry_free(ingress_entry); 308 } 309 } 310 311 if (egress_entry) { 312 tcx_miniq_set_active(egress_entry, false); 313 if (!tcx_entry_is_active(egress_entry)) { 314 tcx_entry_update(dev, NULL, false); 315 tcx_entry_free(egress_entry); 316 } 317 } 318 |
|
266 net_dec_ingress_queue(); 267 net_dec_egress_queue(); 268} 269 270static const struct Qdisc_class_ops clsact_class_ops = { 271 .flags = QDISC_CLASS_OPS_DOIT_UNLOCKED, 272 .leaf = ingress_leaf, 273 .find = clsact_find, --- 46 unchanged lines hidden --- | 319 net_dec_ingress_queue(); 320 net_dec_egress_queue(); 321} 322 323static const struct Qdisc_class_ops clsact_class_ops = { 324 .flags = QDISC_CLASS_OPS_DOIT_UNLOCKED, 325 .leaf = ingress_leaf, 326 .find = clsact_find, --- 46 unchanged lines hidden --- |