xref: /linux/net/sched/act_ctinfo.c (revision d00453b6e3a3d2340b88c5292c3c5b5f9c4ece75)
1 // SPDX-License-Identifier: GPL-2.0+
2 /* net/sched/act_ctinfo.c  netfilter ctinfo connmark actions
3  *
4  * Copyright (c) 2019 Kevin Darbyshire-Bryant <ldir@darbyshire-bryant.me.uk>
5  */
6 
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/skbuff.h>
11 #include <linux/rtnetlink.h>
12 #include <linux/pkt_cls.h>
13 #include <linux/ip.h>
14 #include <linux/ipv6.h>
15 #include <net/netlink.h>
16 #include <net/pkt_sched.h>
17 #include <net/act_api.h>
18 #include <net/pkt_cls.h>
19 #include <net/inet_ecn.h>
20 #include <uapi/linux/tc_act/tc_ctinfo.h>
21 #include <net/tc_act/tc_ctinfo.h>
22 #include <net/tc_wrapper.h>
23 
24 #include <net/netfilter/nf_conntrack.h>
25 #include <net/netfilter/nf_conntrack_core.h>
26 #include <net/netfilter/nf_conntrack_ecache.h>
27 #include <net/netfilter/nf_conntrack_zones.h>
28 
29 static struct tc_action_ops act_ctinfo_ops;
30 
31 static void tcf_ctinfo_dscp_set(struct nf_conn *ct, struct tcf_ctinfo *ca,
32 				struct tcf_ctinfo_params *cp,
33 				struct sk_buff *skb, int wlen, int proto)
34 {
35 	u8 dscp, newdscp;
36 
37 	newdscp = (((READ_ONCE(ct->mark) & cp->dscpmask) >> cp->dscpmaskshift) << 2) &
38 		     ~INET_ECN_MASK;
39 
40 	switch (proto) {
41 	case NFPROTO_IPV4:
42 		dscp = ipv4_get_dsfield(ip_hdr(skb)) & ~INET_ECN_MASK;
43 		if (dscp != newdscp) {
44 			if (likely(!skb_try_make_writable(skb, wlen))) {
45 				ipv4_change_dsfield(ip_hdr(skb),
46 						    INET_ECN_MASK,
47 						    newdscp);
48 				atomic64_inc(&ca->stats_dscp_set);
49 			} else {
50 				atomic64_inc(&ca->stats_dscp_error);
51 			}
52 		}
53 		break;
54 	case NFPROTO_IPV6:
55 		dscp = ipv6_get_dsfield(ipv6_hdr(skb)) & ~INET_ECN_MASK;
56 		if (dscp != newdscp) {
57 			if (likely(!skb_try_make_writable(skb, wlen))) {
58 				ipv6_change_dsfield(ipv6_hdr(skb),
59 						    INET_ECN_MASK,
60 						    newdscp);
61 				atomic64_inc(&ca->stats_dscp_set);
62 			} else {
63 				atomic64_inc(&ca->stats_dscp_error);
64 			}
65 		}
66 		break;
67 	default:
68 		break;
69 	}
70 }
71 
72 static void tcf_ctinfo_cpmark_set(struct nf_conn *ct, struct tcf_ctinfo *ca,
73 				  struct tcf_ctinfo_params *cp,
74 				  struct sk_buff *skb)
75 {
76 	atomic64_inc(&ca->stats_cpmark_set);
77 	skb->mark = READ_ONCE(ct->mark) & cp->cpmarkmask;
78 }
79 
80 TC_INDIRECT_SCOPE int tcf_ctinfo_act(struct sk_buff *skb,
81 				     const struct tc_action *a,
82 				     struct tcf_result *res)
83 {
84 	const struct nf_conntrack_tuple_hash *thash = NULL;
85 	struct tcf_ctinfo *ca = to_ctinfo(a);
86 	struct nf_conntrack_tuple tuple;
87 	struct nf_conntrack_zone zone;
88 	enum ip_conntrack_info ctinfo;
89 	struct tcf_ctinfo_params *cp;
90 	struct nf_conn *ct;
91 	int proto, wlen;
92 
93 	cp = rcu_dereference_bh(ca->params);
94 
95 	tcf_lastuse_update(&ca->tcf_tm);
96 	tcf_action_update_bstats(&ca->common, skb);
97 
98 	wlen = skb_network_offset(skb);
99 	switch (skb_protocol(skb, true)) {
100 	case htons(ETH_P_IP):
101 		wlen += sizeof(struct iphdr);
102 		if (!pskb_may_pull(skb, wlen))
103 			goto out;
104 
105 		proto = NFPROTO_IPV4;
106 		break;
107 	case htons(ETH_P_IPV6):
108 		wlen += sizeof(struct ipv6hdr);
109 		if (!pskb_may_pull(skb, wlen))
110 			goto out;
111 
112 		proto = NFPROTO_IPV6;
113 		break;
114 	default:
115 		goto out;
116 	}
117 
118 	ct = nf_ct_get(skb, &ctinfo);
119 	if (!ct) { /* look harder, usually ingress */
120 		if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
121 				       proto, cp->net, &tuple))
122 			goto out;
123 		zone.id = cp->zone;
124 		zone.dir = NF_CT_DEFAULT_ZONE_DIR;
125 
126 		thash = nf_conntrack_find_get(cp->net, &zone, &tuple);
127 		if (!thash)
128 			goto out;
129 
130 		ct = nf_ct_tuplehash_to_ctrack(thash);
131 	}
132 
133 	if (cp->mode & CTINFO_MODE_DSCP)
134 		if (!cp->dscpstatemask || (READ_ONCE(ct->mark) & cp->dscpstatemask))
135 			tcf_ctinfo_dscp_set(ct, ca, cp, skb, wlen, proto);
136 
137 	if (cp->mode & CTINFO_MODE_CPMARK)
138 		tcf_ctinfo_cpmark_set(ct, ca, cp, skb);
139 
140 	if (thash)
141 		nf_ct_put(ct);
142 out:
143 	return cp->action;
144 }
145 
146 static const struct nla_policy ctinfo_policy[TCA_CTINFO_MAX + 1] = {
147 	[TCA_CTINFO_ACT]		  =
148 		NLA_POLICY_EXACT_LEN(sizeof(struct tc_ctinfo)),
149 	[TCA_CTINFO_ZONE]		  = { .type = NLA_U16 },
150 	[TCA_CTINFO_PARMS_DSCP_MASK]	  = { .type = NLA_U32 },
151 	[TCA_CTINFO_PARMS_DSCP_STATEMASK] = { .type = NLA_U32 },
152 	[TCA_CTINFO_PARMS_CPMARK_MASK]	  = { .type = NLA_U32 },
153 };
154 
155 static int tcf_ctinfo_init(struct net *net, struct nlattr *nla,
156 			   struct nlattr *est, struct tc_action **a,
157 			   struct tcf_proto *tp, u32 flags,
158 			   struct netlink_ext_ack *extack)
159 {
160 	struct tc_action_net *tn = net_generic(net, act_ctinfo_ops.net_id);
161 	bool bind = flags & TCA_ACT_FLAGS_BIND;
162 	u32 dscpmask = 0, dscpstatemask, index;
163 	struct nlattr *tb[TCA_CTINFO_MAX + 1];
164 	struct tcf_ctinfo_params *cp_new;
165 	struct tcf_chain *goto_ch = NULL;
166 	struct tc_ctinfo *actparm;
167 	struct tcf_ctinfo *ci;
168 	u8 dscpmaskshift;
169 	int ret = 0, err;
170 
171 	if (!nla) {
172 		NL_SET_ERR_MSG_MOD(extack, "ctinfo requires attributes to be passed");
173 		return -EINVAL;
174 	}
175 
176 	err = nla_parse_nested(tb, TCA_CTINFO_MAX, nla, ctinfo_policy, extack);
177 	if (err < 0)
178 		return err;
179 
180 	if (!tb[TCA_CTINFO_ACT]) {
181 		NL_SET_ERR_MSG_MOD(extack,
182 				   "Missing required TCA_CTINFO_ACT attribute");
183 		return -EINVAL;
184 	}
185 	actparm = nla_data(tb[TCA_CTINFO_ACT]);
186 
187 	/* do some basic validation here before dynamically allocating things */
188 	/* that we would otherwise have to clean up.			      */
189 	if (tb[TCA_CTINFO_PARMS_DSCP_MASK]) {
190 		dscpmask = nla_get_u32(tb[TCA_CTINFO_PARMS_DSCP_MASK]);
191 		/* need contiguous 6 bit mask */
192 		dscpmaskshift = dscpmask ? __ffs(dscpmask) : 0;
193 		if ((~0 & (dscpmask >> dscpmaskshift)) != 0x3f) {
194 			NL_SET_ERR_MSG_ATTR(extack,
195 					    tb[TCA_CTINFO_PARMS_DSCP_MASK],
196 					    "dscp mask must be 6 contiguous bits");
197 			return -EINVAL;
198 		}
199 		dscpstatemask =
200 			nla_get_u32_default(tb[TCA_CTINFO_PARMS_DSCP_STATEMASK],
201 					    0);
202 		/* mask & statemask must not overlap */
203 		if (dscpmask & dscpstatemask) {
204 			NL_SET_ERR_MSG_ATTR(extack,
205 					    tb[TCA_CTINFO_PARMS_DSCP_STATEMASK],
206 					    "dscp statemask must not overlap dscp mask");
207 			return -EINVAL;
208 		}
209 	}
210 
211 	/* done the validation:now to the actual action allocation */
212 	index = actparm->index;
213 	err = tcf_idr_check_alloc(tn, &index, a, bind);
214 	if (!err) {
215 		ret = tcf_idr_create_from_flags(tn, index, est, a,
216 						&act_ctinfo_ops, bind, flags);
217 		if (ret) {
218 			tcf_idr_cleanup(tn, index);
219 			return ret;
220 		}
221 		ret = ACT_P_CREATED;
222 	} else if (err > 0) {
223 		if (bind) /* don't override defaults */
224 			return ACT_P_BOUND;
225 		if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
226 			tcf_idr_release(*a, bind);
227 			return -EEXIST;
228 		}
229 	} else {
230 		return err;
231 	}
232 
233 	err = tcf_action_check_ctrlact(actparm->action, tp, &goto_ch, extack);
234 	if (err < 0)
235 		goto release_idr;
236 
237 	ci = to_ctinfo(*a);
238 
239 	cp_new = kzalloc(sizeof(*cp_new), GFP_KERNEL);
240 	if (unlikely(!cp_new)) {
241 		err = -ENOMEM;
242 		goto put_chain;
243 	}
244 
245 	cp_new->net = net;
246 	cp_new->zone = nla_get_u16_default(tb[TCA_CTINFO_ZONE], 0);
247 	if (dscpmask) {
248 		cp_new->dscpmask = dscpmask;
249 		cp_new->dscpmaskshift = dscpmaskshift;
250 		cp_new->dscpstatemask = dscpstatemask;
251 		cp_new->mode |= CTINFO_MODE_DSCP;
252 	}
253 
254 	if (tb[TCA_CTINFO_PARMS_CPMARK_MASK]) {
255 		cp_new->cpmarkmask =
256 				nla_get_u32(tb[TCA_CTINFO_PARMS_CPMARK_MASK]);
257 		cp_new->mode |= CTINFO_MODE_CPMARK;
258 	}
259 
260 	cp_new->action = actparm->action;
261 
262 	spin_lock_bh(&ci->tcf_lock);
263 	goto_ch = tcf_action_set_ctrlact(*a, actparm->action, goto_ch);
264 	cp_new = rcu_replace_pointer(ci->params, cp_new,
265 				     lockdep_is_held(&ci->tcf_lock));
266 	spin_unlock_bh(&ci->tcf_lock);
267 
268 	if (goto_ch)
269 		tcf_chain_put_by_act(goto_ch);
270 	if (cp_new)
271 		kfree_rcu(cp_new, rcu);
272 
273 	return ret;
274 
275 put_chain:
276 	if (goto_ch)
277 		tcf_chain_put_by_act(goto_ch);
278 release_idr:
279 	tcf_idr_release(*a, bind);
280 	return err;
281 }
282 
283 static int tcf_ctinfo_dump(struct sk_buff *skb, struct tc_action *a,
284 			   int bind, int ref)
285 {
286 	const struct tcf_ctinfo *ci = to_ctinfo(a);
287 	unsigned char *b = skb_tail_pointer(skb);
288 	const struct tcf_ctinfo_params *cp;
289 	struct tc_ctinfo opt = {
290 		.index   = ci->tcf_index,
291 		.refcnt  = refcount_read(&ci->tcf_refcnt) - ref,
292 		.bindcnt = atomic_read(&ci->tcf_bindcnt) - bind,
293 	};
294 	struct tcf_t t;
295 
296 	rcu_read_lock();
297 	cp = rcu_dereference(ci->params);
298 
299 	tcf_tm_dump(&t, &ci->tcf_tm);
300 	if (nla_put_64bit(skb, TCA_CTINFO_TM, sizeof(t), &t, TCA_CTINFO_PAD))
301 		goto nla_put_failure;
302 
303 	opt.action = cp->action;
304 	if (nla_put(skb, TCA_CTINFO_ACT, sizeof(opt), &opt))
305 		goto nla_put_failure;
306 
307 	if (nla_put_u16(skb, TCA_CTINFO_ZONE, cp->zone))
308 		goto nla_put_failure;
309 
310 	if (cp->mode & CTINFO_MODE_DSCP) {
311 		if (nla_put_u32(skb, TCA_CTINFO_PARMS_DSCP_MASK,
312 				cp->dscpmask))
313 			goto nla_put_failure;
314 		if (nla_put_u32(skb, TCA_CTINFO_PARMS_DSCP_STATEMASK,
315 				cp->dscpstatemask))
316 			goto nla_put_failure;
317 	}
318 
319 	if (cp->mode & CTINFO_MODE_CPMARK) {
320 		if (nla_put_u32(skb, TCA_CTINFO_PARMS_CPMARK_MASK,
321 				cp->cpmarkmask))
322 			goto nla_put_failure;
323 	}
324 
325 	if (nla_put_u64_64bit(skb, TCA_CTINFO_STATS_DSCP_SET,
326 			      atomic64_read(&ci->stats_dscp_set),
327 			      TCA_CTINFO_PAD))
328 		goto nla_put_failure;
329 
330 	if (nla_put_u64_64bit(skb, TCA_CTINFO_STATS_DSCP_ERROR,
331 			      atomic64_read(&ci->stats_dscp_error),
332 			      TCA_CTINFO_PAD))
333 		goto nla_put_failure;
334 
335 	if (nla_put_u64_64bit(skb, TCA_CTINFO_STATS_CPMARK_SET,
336 			      atomic64_read(&ci->stats_cpmark_set),
337 			      TCA_CTINFO_PAD))
338 		goto nla_put_failure;
339 
340 	rcu_read_unlock();
341 	return skb->len;
342 
343 nla_put_failure:
344 	rcu_read_unlock();
345 	nlmsg_trim(skb, b);
346 	return -1;
347 }
348 
349 static void tcf_ctinfo_cleanup(struct tc_action *a)
350 {
351 	struct tcf_ctinfo *ci = to_ctinfo(a);
352 	struct tcf_ctinfo_params *cp;
353 
354 	cp = rcu_dereference_protected(ci->params, 1);
355 	if (cp)
356 		kfree_rcu(cp, rcu);
357 }
358 
359 static struct tc_action_ops act_ctinfo_ops = {
360 	.kind	= "ctinfo",
361 	.id	= TCA_ID_CTINFO,
362 	.owner	= THIS_MODULE,
363 	.act	= tcf_ctinfo_act,
364 	.dump	= tcf_ctinfo_dump,
365 	.init	= tcf_ctinfo_init,
366 	.cleanup= tcf_ctinfo_cleanup,
367 	.size	= sizeof(struct tcf_ctinfo),
368 };
369 MODULE_ALIAS_NET_ACT("ctinfo");
370 
371 static __net_init int ctinfo_init_net(struct net *net)
372 {
373 	struct tc_action_net *tn = net_generic(net, act_ctinfo_ops.net_id);
374 
375 	return tc_action_net_init(net, tn, &act_ctinfo_ops);
376 }
377 
378 static void __net_exit ctinfo_exit_net(struct list_head *net_list)
379 {
380 	tc_action_net_exit(net_list, act_ctinfo_ops.net_id);
381 }
382 
383 static struct pernet_operations ctinfo_net_ops = {
384 	.init		= ctinfo_init_net,
385 	.exit_batch	= ctinfo_exit_net,
386 	.id		= &act_ctinfo_ops.net_id,
387 	.size		= sizeof(struct tc_action_net),
388 };
389 
390 static int __init ctinfo_init_module(void)
391 {
392 	return tcf_register_action(&act_ctinfo_ops, &ctinfo_net_ops);
393 }
394 
395 static void __exit ctinfo_cleanup_module(void)
396 {
397 	tcf_unregister_action(&act_ctinfo_ops, &ctinfo_net_ops);
398 }
399 
400 module_init(ctinfo_init_module);
401 module_exit(ctinfo_cleanup_module);
402 MODULE_AUTHOR("Kevin Darbyshire-Bryant <ldir@darbyshire-bryant.me.uk>");
403 MODULE_DESCRIPTION("Connection tracking mark actions");
404 MODULE_LICENSE("GPL");
405