xref: /linux/net/sched/sch_ingress.c (revision 51df8e0cbaefd432f7029dde94e6c7e4e5b19465)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* net/sched/sch_ingress.c - Ingress and clsact qdisc
3  *
4  * Authors:     Jamal Hadi Salim 1999
5  */
6 
7 #include <linux/module.h>
8 #include <linux/types.h>
9 #include <linux/list.h>
10 #include <linux/skbuff.h>
11 #include <linux/rtnetlink.h>
12 
13 #include <net/netlink.h>
14 #include <net/pkt_sched.h>
15 #include <net/pkt_cls.h>
16 #include <net/tcx.h>
17 
18 struct ingress_sched_data {
19 	struct tcf_block *block;
20 	struct tcf_block_ext_info block_info;
21 	struct mini_Qdisc_pair miniqp;
22 };
23 
ingress_leaf(struct Qdisc * sch,unsigned long arg)24 static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg)
25 {
26 	return NULL;
27 }
28 
ingress_find(struct Qdisc * sch,u32 classid)29 static unsigned long ingress_find(struct Qdisc *sch, u32 classid)
30 {
31 	return TC_H_MIN(classid) + 1;
32 }
33 
ingress_bind_filter(struct Qdisc * sch,unsigned long parent,u32 classid)34 static unsigned long ingress_bind_filter(struct Qdisc *sch,
35 					 unsigned long parent, u32 classid)
36 {
37 	return ingress_find(sch, classid);
38 }
39 
ingress_unbind_filter(struct Qdisc * sch,unsigned long cl)40 static void ingress_unbind_filter(struct Qdisc *sch, unsigned long cl)
41 {
42 }
43 
ingress_walk(struct Qdisc * sch,struct qdisc_walker * walker)44 static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker)
45 {
46 }
47 
ingress_tcf_block(struct Qdisc * sch,unsigned long cl,struct netlink_ext_ack * extack)48 static struct tcf_block *ingress_tcf_block(struct Qdisc *sch, unsigned long cl,
49 					   struct netlink_ext_ack *extack)
50 {
51 	struct ingress_sched_data *q = qdisc_priv(sch);
52 
53 	return q->block;
54 }
55 
clsact_chain_head_change(struct tcf_proto * tp_head,void * priv)56 static void clsact_chain_head_change(struct tcf_proto *tp_head, void *priv)
57 {
58 	struct mini_Qdisc_pair *miniqp = priv;
59 
60 	mini_qdisc_pair_swap(miniqp, tp_head);
61 };
62 
ingress_ingress_block_set(struct Qdisc * sch,u32 block_index)63 static void ingress_ingress_block_set(struct Qdisc *sch, u32 block_index)
64 {
65 	struct ingress_sched_data *q = qdisc_priv(sch);
66 
67 	q->block_info.block_index = block_index;
68 }
69 
ingress_ingress_block_get(struct Qdisc * sch)70 static u32 ingress_ingress_block_get(struct Qdisc *sch)
71 {
72 	struct ingress_sched_data *q = qdisc_priv(sch);
73 
74 	return q->block_info.block_index;
75 }
76 
ingress_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)77 static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
78 			struct netlink_ext_ack *extack)
79 {
80 	struct ingress_sched_data *q = qdisc_priv(sch);
81 	struct net_device *dev = qdisc_dev(sch);
82 	struct bpf_mprog_entry *entry;
83 	bool created;
84 	int err;
85 
86 	if (sch->parent != TC_H_INGRESS)
87 		return -EOPNOTSUPP;
88 
89 	net_inc_ingress_queue();
90 
91 	entry = tcx_entry_fetch_or_create(dev, true, &created);
92 	if (!entry)
93 		return -ENOMEM;
94 	tcx_miniq_inc(entry);
95 	mini_qdisc_pair_init(&q->miniqp, sch, &tcx_entry(entry)->miniq);
96 	if (created)
97 		tcx_entry_update(dev, entry, true);
98 
99 	q->block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
100 	q->block_info.chain_head_change = clsact_chain_head_change;
101 	q->block_info.chain_head_change_priv = &q->miniqp;
102 
103 	err = tcf_block_get_ext(&q->block, sch, &q->block_info, extack);
104 	if (err)
105 		return err;
106 
107 	mini_qdisc_pair_block_init(&q->miniqp, q->block);
108 
109 	return 0;
110 }
111 
ingress_destroy(struct Qdisc * sch)112 static void ingress_destroy(struct Qdisc *sch)
113 {
114 	struct ingress_sched_data *q = qdisc_priv(sch);
115 	struct net_device *dev = qdisc_dev(sch);
116 	struct bpf_mprog_entry *entry = rtnl_dereference(dev->tcx_ingress);
117 
118 	if (sch->parent != TC_H_INGRESS)
119 		return;
120 
121 	tcf_block_put_ext(q->block, sch, &q->block_info);
122 
123 	if (entry) {
124 		tcx_miniq_dec(entry);
125 		if (!tcx_entry_is_active(entry)) {
126 			tcx_entry_update(dev, NULL, true);
127 			tcx_entry_free(entry);
128 		}
129 	}
130 
131 	net_dec_ingress_queue();
132 }
133 
ingress_dump(struct Qdisc * sch,struct sk_buff * skb)134 static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb)
135 {
136 	struct nlattr *nest;
137 
138 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
139 	if (nest == NULL)
140 		goto nla_put_failure;
141 
142 	return nla_nest_end(skb, nest);
143 
144 nla_put_failure:
145 	nla_nest_cancel(skb, nest);
146 	return -1;
147 }
148 
149 static const struct Qdisc_class_ops ingress_class_ops = {
150 	.flags		=	QDISC_CLASS_OPS_DOIT_UNLOCKED,
151 	.leaf		=	ingress_leaf,
152 	.find		=	ingress_find,
153 	.walk		=	ingress_walk,
154 	.tcf_block	=	ingress_tcf_block,
155 	.bind_tcf	=	ingress_bind_filter,
156 	.unbind_tcf	=	ingress_unbind_filter,
157 };
158 
159 static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
160 	.cl_ops			=	&ingress_class_ops,
161 	.id			=	"ingress",
162 	.priv_size		=	sizeof(struct ingress_sched_data),
163 	.static_flags		=	TCQ_F_INGRESS | TCQ_F_CPUSTATS,
164 	.init			=	ingress_init,
165 	.destroy		=	ingress_destroy,
166 	.dump			=	ingress_dump,
167 	.ingress_block_set	=	ingress_ingress_block_set,
168 	.ingress_block_get	=	ingress_ingress_block_get,
169 	.owner			=	THIS_MODULE,
170 };
171 MODULE_ALIAS_NET_SCH("ingress");
172 
173 struct clsact_sched_data {
174 	struct tcf_block *ingress_block;
175 	struct tcf_block *egress_block;
176 	struct tcf_block_ext_info ingress_block_info;
177 	struct tcf_block_ext_info egress_block_info;
178 	struct mini_Qdisc_pair miniqp_ingress;
179 	struct mini_Qdisc_pair miniqp_egress;
180 };
181 
clsact_find(struct Qdisc * sch,u32 classid)182 static unsigned long clsact_find(struct Qdisc *sch, u32 classid)
183 {
184 	switch (TC_H_MIN(classid)) {
185 	case TC_H_MIN(TC_H_MIN_INGRESS):
186 	case TC_H_MIN(TC_H_MIN_EGRESS):
187 		return TC_H_MIN(classid);
188 	default:
189 		return 0;
190 	}
191 }
192 
clsact_bind_filter(struct Qdisc * sch,unsigned long parent,u32 classid)193 static unsigned long clsact_bind_filter(struct Qdisc *sch,
194 					unsigned long parent, u32 classid)
195 {
196 	return clsact_find(sch, classid);
197 }
198 
clsact_tcf_block(struct Qdisc * sch,unsigned long cl,struct netlink_ext_ack * extack)199 static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl,
200 					  struct netlink_ext_ack *extack)
201 {
202 	struct clsact_sched_data *q = qdisc_priv(sch);
203 
204 	switch (cl) {
205 	case TC_H_MIN(TC_H_MIN_INGRESS):
206 		return q->ingress_block;
207 	case TC_H_MIN(TC_H_MIN_EGRESS):
208 		return q->egress_block;
209 	default:
210 		return NULL;
211 	}
212 }
213 
clsact_ingress_block_set(struct Qdisc * sch,u32 block_index)214 static void clsact_ingress_block_set(struct Qdisc *sch, u32 block_index)
215 {
216 	struct clsact_sched_data *q = qdisc_priv(sch);
217 
218 	q->ingress_block_info.block_index = block_index;
219 }
220 
clsact_egress_block_set(struct Qdisc * sch,u32 block_index)221 static void clsact_egress_block_set(struct Qdisc *sch, u32 block_index)
222 {
223 	struct clsact_sched_data *q = qdisc_priv(sch);
224 
225 	q->egress_block_info.block_index = block_index;
226 }
227 
clsact_ingress_block_get(struct Qdisc * sch)228 static u32 clsact_ingress_block_get(struct Qdisc *sch)
229 {
230 	struct clsact_sched_data *q = qdisc_priv(sch);
231 
232 	return q->ingress_block_info.block_index;
233 }
234 
clsact_egress_block_get(struct Qdisc * sch)235 static u32 clsact_egress_block_get(struct Qdisc *sch)
236 {
237 	struct clsact_sched_data *q = qdisc_priv(sch);
238 
239 	return q->egress_block_info.block_index;
240 }
241 
clsact_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)242 static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
243 		       struct netlink_ext_ack *extack)
244 {
245 	struct clsact_sched_data *q = qdisc_priv(sch);
246 	struct net_device *dev = qdisc_dev(sch);
247 	struct bpf_mprog_entry *entry;
248 	bool created;
249 	int err;
250 
251 	if (sch->parent != TC_H_CLSACT)
252 		return -EOPNOTSUPP;
253 
254 	net_inc_ingress_queue();
255 	net_inc_egress_queue();
256 
257 	entry = tcx_entry_fetch_or_create(dev, true, &created);
258 	if (!entry)
259 		return -ENOMEM;
260 	tcx_miniq_inc(entry);
261 	mini_qdisc_pair_init(&q->miniqp_ingress, sch, &tcx_entry(entry)->miniq);
262 	if (created)
263 		tcx_entry_update(dev, entry, true);
264 
265 	q->ingress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
266 	q->ingress_block_info.chain_head_change = clsact_chain_head_change;
267 	q->ingress_block_info.chain_head_change_priv = &q->miniqp_ingress;
268 
269 	err = tcf_block_get_ext(&q->ingress_block, sch, &q->ingress_block_info,
270 				extack);
271 	if (err)
272 		return err;
273 
274 	mini_qdisc_pair_block_init(&q->miniqp_ingress, q->ingress_block);
275 
276 	entry = tcx_entry_fetch_or_create(dev, false, &created);
277 	if (!entry)
278 		return -ENOMEM;
279 	tcx_miniq_inc(entry);
280 	mini_qdisc_pair_init(&q->miniqp_egress, sch, &tcx_entry(entry)->miniq);
281 	if (created)
282 		tcx_entry_update(dev, entry, false);
283 
284 	q->egress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS;
285 	q->egress_block_info.chain_head_change = clsact_chain_head_change;
286 	q->egress_block_info.chain_head_change_priv = &q->miniqp_egress;
287 
288 	return tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info, extack);
289 }
290 
clsact_destroy(struct Qdisc * sch)291 static void clsact_destroy(struct Qdisc *sch)
292 {
293 	struct clsact_sched_data *q = qdisc_priv(sch);
294 	struct net_device *dev = qdisc_dev(sch);
295 	struct bpf_mprog_entry *ingress_entry = rtnl_dereference(dev->tcx_ingress);
296 	struct bpf_mprog_entry *egress_entry = rtnl_dereference(dev->tcx_egress);
297 
298 	if (sch->parent != TC_H_CLSACT)
299 		return;
300 
301 	tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info);
302 	tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info);
303 
304 	if (ingress_entry) {
305 		tcx_miniq_dec(ingress_entry);
306 		if (!tcx_entry_is_active(ingress_entry)) {
307 			tcx_entry_update(dev, NULL, true);
308 			tcx_entry_free(ingress_entry);
309 		}
310 	}
311 
312 	if (egress_entry) {
313 		tcx_miniq_dec(egress_entry);
314 		if (!tcx_entry_is_active(egress_entry)) {
315 			tcx_entry_update(dev, NULL, false);
316 			tcx_entry_free(egress_entry);
317 		}
318 	}
319 
320 	net_dec_ingress_queue();
321 	net_dec_egress_queue();
322 }
323 
324 static const struct Qdisc_class_ops clsact_class_ops = {
325 	.flags		=	QDISC_CLASS_OPS_DOIT_UNLOCKED,
326 	.leaf		=	ingress_leaf,
327 	.find		=	clsact_find,
328 	.walk		=	ingress_walk,
329 	.tcf_block	=	clsact_tcf_block,
330 	.bind_tcf	=	clsact_bind_filter,
331 	.unbind_tcf	=	ingress_unbind_filter,
332 };
333 
334 static struct Qdisc_ops clsact_qdisc_ops __read_mostly = {
335 	.cl_ops			=	&clsact_class_ops,
336 	.id			=	"clsact",
337 	.priv_size		=	sizeof(struct clsact_sched_data),
338 	.static_flags		=	TCQ_F_INGRESS | TCQ_F_CPUSTATS,
339 	.init			=	clsact_init,
340 	.destroy		=	clsact_destroy,
341 	.dump			=	ingress_dump,
342 	.ingress_block_set	=	clsact_ingress_block_set,
343 	.egress_block_set	=	clsact_egress_block_set,
344 	.ingress_block_get	=	clsact_ingress_block_get,
345 	.egress_block_get	=	clsact_egress_block_get,
346 	.owner			=	THIS_MODULE,
347 };
348 MODULE_ALIAS_NET_SCH("clsact");
349 
ingress_module_init(void)350 static int __init ingress_module_init(void)
351 {
352 	int ret;
353 
354 	ret = register_qdisc(&ingress_qdisc_ops);
355 	if (!ret) {
356 		ret = register_qdisc(&clsact_qdisc_ops);
357 		if (ret)
358 			unregister_qdisc(&ingress_qdisc_ops);
359 	}
360 
361 	return ret;
362 }
363 
ingress_module_exit(void)364 static void __exit ingress_module_exit(void)
365 {
366 	unregister_qdisc(&ingress_qdisc_ops);
367 	unregister_qdisc(&clsact_qdisc_ops);
368 }
369 
370 module_init(ingress_module_init);
371 module_exit(ingress_module_exit);
372 
373 MODULE_LICENSE("GPL");
374 MODULE_DESCRIPTION("Ingress and clsact based ingress and egress qdiscs");
375