xref: /linux/net/sched/sch_ingress.c (revision 7c7e33b799ac169e5fab8abfc6819fce8b26d53d)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* net/sched/sch_ingress.c - Ingress and clsact qdisc
3  *
4  * Authors:     Jamal Hadi Salim 1999
5  */
6 
7 #include <linux/module.h>
8 #include <linux/types.h>
9 #include <linux/list.h>
10 #include <linux/skbuff.h>
11 #include <linux/rtnetlink.h>
12 
13 #include <net/netlink.h>
14 #include <net/pkt_sched.h>
15 #include <net/pkt_cls.h>
16 
17 struct ingress_sched_data {
18 	struct tcf_block *block;
19 	struct tcf_block_ext_info block_info;
20 	struct mini_Qdisc_pair miniqp;
21 };
22 
23 static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg)
24 {
25 	return NULL;
26 }
27 
28 static unsigned long ingress_find(struct Qdisc *sch, u32 classid)
29 {
30 	return TC_H_MIN(classid) + 1;
31 }
32 
33 static unsigned long ingress_bind_filter(struct Qdisc *sch,
34 					 unsigned long parent, u32 classid)
35 {
36 	return ingress_find(sch, classid);
37 }
38 
39 static void ingress_unbind_filter(struct Qdisc *sch, unsigned long cl)
40 {
41 }
42 
43 static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker)
44 {
45 }
46 
47 static struct tcf_block *ingress_tcf_block(struct Qdisc *sch, unsigned long cl,
48 					   struct netlink_ext_ack *extack)
49 {
50 	struct ingress_sched_data *q = qdisc_priv(sch);
51 
52 	return q->block;
53 }
54 
55 static void clsact_chain_head_change(struct tcf_proto *tp_head, void *priv)
56 {
57 	struct mini_Qdisc_pair *miniqp = priv;
58 
59 	mini_qdisc_pair_swap(miniqp, tp_head);
60 };
61 
62 static void ingress_ingress_block_set(struct Qdisc *sch, u32 block_index)
63 {
64 	struct ingress_sched_data *q = qdisc_priv(sch);
65 
66 	q->block_info.block_index = block_index;
67 }
68 
69 static u32 ingress_ingress_block_get(struct Qdisc *sch)
70 {
71 	struct ingress_sched_data *q = qdisc_priv(sch);
72 
73 	return q->block_info.block_index;
74 }
75 
76 static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
77 			struct netlink_ext_ack *extack)
78 {
79 	struct ingress_sched_data *q = qdisc_priv(sch);
80 	struct net_device *dev = qdisc_dev(sch);
81 	int err;
82 
83 	if (sch->parent != TC_H_INGRESS)
84 		return -EOPNOTSUPP;
85 
86 	net_inc_ingress_queue();
87 
88 	mini_qdisc_pair_init(&q->miniqp, sch, &dev->miniq_ingress);
89 
90 	q->block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
91 	q->block_info.chain_head_change = clsact_chain_head_change;
92 	q->block_info.chain_head_change_priv = &q->miniqp;
93 
94 	err = tcf_block_get_ext(&q->block, sch, &q->block_info, extack);
95 	if (err)
96 		return err;
97 
98 	mini_qdisc_pair_block_init(&q->miniqp, q->block);
99 
100 	return 0;
101 }
102 
103 static void ingress_destroy(struct Qdisc *sch)
104 {
105 	struct ingress_sched_data *q = qdisc_priv(sch);
106 
107 	if (sch->parent != TC_H_INGRESS)
108 		return;
109 
110 	tcf_block_put_ext(q->block, sch, &q->block_info);
111 	net_dec_ingress_queue();
112 }
113 
114 static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb)
115 {
116 	struct nlattr *nest;
117 
118 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
119 	if (nest == NULL)
120 		goto nla_put_failure;
121 
122 	return nla_nest_end(skb, nest);
123 
124 nla_put_failure:
125 	nla_nest_cancel(skb, nest);
126 	return -1;
127 }
128 
129 static const struct Qdisc_class_ops ingress_class_ops = {
130 	.flags		=	QDISC_CLASS_OPS_DOIT_UNLOCKED,
131 	.leaf		=	ingress_leaf,
132 	.find		=	ingress_find,
133 	.walk		=	ingress_walk,
134 	.tcf_block	=	ingress_tcf_block,
135 	.bind_tcf	=	ingress_bind_filter,
136 	.unbind_tcf	=	ingress_unbind_filter,
137 };
138 
139 static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
140 	.cl_ops			=	&ingress_class_ops,
141 	.id			=	"ingress",
142 	.priv_size		=	sizeof(struct ingress_sched_data),
143 	.static_flags		=	TCQ_F_INGRESS | TCQ_F_CPUSTATS,
144 	.init			=	ingress_init,
145 	.destroy		=	ingress_destroy,
146 	.dump			=	ingress_dump,
147 	.ingress_block_set	=	ingress_ingress_block_set,
148 	.ingress_block_get	=	ingress_ingress_block_get,
149 	.owner			=	THIS_MODULE,
150 };
151 
152 struct clsact_sched_data {
153 	struct tcf_block *ingress_block;
154 	struct tcf_block *egress_block;
155 	struct tcf_block_ext_info ingress_block_info;
156 	struct tcf_block_ext_info egress_block_info;
157 	struct mini_Qdisc_pair miniqp_ingress;
158 	struct mini_Qdisc_pair miniqp_egress;
159 };
160 
161 static unsigned long clsact_find(struct Qdisc *sch, u32 classid)
162 {
163 	switch (TC_H_MIN(classid)) {
164 	case TC_H_MIN(TC_H_MIN_INGRESS):
165 	case TC_H_MIN(TC_H_MIN_EGRESS):
166 		return TC_H_MIN(classid);
167 	default:
168 		return 0;
169 	}
170 }
171 
172 static unsigned long clsact_bind_filter(struct Qdisc *sch,
173 					unsigned long parent, u32 classid)
174 {
175 	return clsact_find(sch, classid);
176 }
177 
178 static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl,
179 					  struct netlink_ext_ack *extack)
180 {
181 	struct clsact_sched_data *q = qdisc_priv(sch);
182 
183 	switch (cl) {
184 	case TC_H_MIN(TC_H_MIN_INGRESS):
185 		return q->ingress_block;
186 	case TC_H_MIN(TC_H_MIN_EGRESS):
187 		return q->egress_block;
188 	default:
189 		return NULL;
190 	}
191 }
192 
193 static void clsact_ingress_block_set(struct Qdisc *sch, u32 block_index)
194 {
195 	struct clsact_sched_data *q = qdisc_priv(sch);
196 
197 	q->ingress_block_info.block_index = block_index;
198 }
199 
200 static void clsact_egress_block_set(struct Qdisc *sch, u32 block_index)
201 {
202 	struct clsact_sched_data *q = qdisc_priv(sch);
203 
204 	q->egress_block_info.block_index = block_index;
205 }
206 
207 static u32 clsact_ingress_block_get(struct Qdisc *sch)
208 {
209 	struct clsact_sched_data *q = qdisc_priv(sch);
210 
211 	return q->ingress_block_info.block_index;
212 }
213 
214 static u32 clsact_egress_block_get(struct Qdisc *sch)
215 {
216 	struct clsact_sched_data *q = qdisc_priv(sch);
217 
218 	return q->egress_block_info.block_index;
219 }
220 
221 static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
222 		       struct netlink_ext_ack *extack)
223 {
224 	struct clsact_sched_data *q = qdisc_priv(sch);
225 	struct net_device *dev = qdisc_dev(sch);
226 	int err;
227 
228 	if (sch->parent != TC_H_CLSACT)
229 		return -EOPNOTSUPP;
230 
231 	net_inc_ingress_queue();
232 	net_inc_egress_queue();
233 
234 	mini_qdisc_pair_init(&q->miniqp_ingress, sch, &dev->miniq_ingress);
235 
236 	q->ingress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
237 	q->ingress_block_info.chain_head_change = clsact_chain_head_change;
238 	q->ingress_block_info.chain_head_change_priv = &q->miniqp_ingress;
239 
240 	err = tcf_block_get_ext(&q->ingress_block, sch, &q->ingress_block_info,
241 				extack);
242 	if (err)
243 		return err;
244 
245 	mini_qdisc_pair_block_init(&q->miniqp_ingress, q->ingress_block);
246 
247 	mini_qdisc_pair_init(&q->miniqp_egress, sch, &dev->miniq_egress);
248 
249 	q->egress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS;
250 	q->egress_block_info.chain_head_change = clsact_chain_head_change;
251 	q->egress_block_info.chain_head_change_priv = &q->miniqp_egress;
252 
253 	return tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info, extack);
254 }
255 
256 static void clsact_destroy(struct Qdisc *sch)
257 {
258 	struct clsact_sched_data *q = qdisc_priv(sch);
259 
260 	if (sch->parent != TC_H_CLSACT)
261 		return;
262 
263 	tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info);
264 	tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info);
265 
266 	net_dec_ingress_queue();
267 	net_dec_egress_queue();
268 }
269 
270 static const struct Qdisc_class_ops clsact_class_ops = {
271 	.flags		=	QDISC_CLASS_OPS_DOIT_UNLOCKED,
272 	.leaf		=	ingress_leaf,
273 	.find		=	clsact_find,
274 	.walk		=	ingress_walk,
275 	.tcf_block	=	clsact_tcf_block,
276 	.bind_tcf	=	clsact_bind_filter,
277 	.unbind_tcf	=	ingress_unbind_filter,
278 };
279 
280 static struct Qdisc_ops clsact_qdisc_ops __read_mostly = {
281 	.cl_ops			=	&clsact_class_ops,
282 	.id			=	"clsact",
283 	.priv_size		=	sizeof(struct clsact_sched_data),
284 	.static_flags		=	TCQ_F_INGRESS | TCQ_F_CPUSTATS,
285 	.init			=	clsact_init,
286 	.destroy		=	clsact_destroy,
287 	.dump			=	ingress_dump,
288 	.ingress_block_set	=	clsact_ingress_block_set,
289 	.egress_block_set	=	clsact_egress_block_set,
290 	.ingress_block_get	=	clsact_ingress_block_get,
291 	.egress_block_get	=	clsact_egress_block_get,
292 	.owner			=	THIS_MODULE,
293 };
294 
295 static int __init ingress_module_init(void)
296 {
297 	int ret;
298 
299 	ret = register_qdisc(&ingress_qdisc_ops);
300 	if (!ret) {
301 		ret = register_qdisc(&clsact_qdisc_ops);
302 		if (ret)
303 			unregister_qdisc(&ingress_qdisc_ops);
304 	}
305 
306 	return ret;
307 }
308 
309 static void __exit ingress_module_exit(void)
310 {
311 	unregister_qdisc(&ingress_qdisc_ops);
312 	unregister_qdisc(&clsact_qdisc_ops);
313 }
314 
315 module_init(ingress_module_init);
316 module_exit(ingress_module_exit);
317 
318 MODULE_ALIAS("sch_clsact");
319 MODULE_LICENSE("GPL");
320