1 // SPDX-License-Identifier: GPL-2.0+ 2 /* net/sched/act_ctinfo.c netfilter ctinfo connmark actions 3 * 4 * Copyright (c) 2019 Kevin Darbyshire-Bryant <ldir@darbyshire-bryant.me.uk> 5 */ 6 7 #include <linux/module.h> 8 #include <linux/init.h> 9 #include <linux/kernel.h> 10 #include <linux/skbuff.h> 11 #include <linux/rtnetlink.h> 12 #include <linux/pkt_cls.h> 13 #include <linux/ip.h> 14 #include <linux/ipv6.h> 15 #include <net/netlink.h> 16 #include <net/pkt_sched.h> 17 #include <net/act_api.h> 18 #include <net/pkt_cls.h> 19 #include <uapi/linux/tc_act/tc_ctinfo.h> 20 #include <net/tc_act/tc_ctinfo.h> 21 22 #include <net/netfilter/nf_conntrack.h> 23 #include <net/netfilter/nf_conntrack_core.h> 24 #include <net/netfilter/nf_conntrack_ecache.h> 25 #include <net/netfilter/nf_conntrack_zones.h> 26 27 static struct tc_action_ops act_ctinfo_ops; 28 static unsigned int ctinfo_net_id; 29 30 static void tcf_ctinfo_dscp_set(struct nf_conn *ct, struct tcf_ctinfo *ca, 31 struct tcf_ctinfo_params *cp, 32 struct sk_buff *skb, int wlen, int proto) 33 { 34 u8 dscp, newdscp; 35 36 newdscp = (((ct->mark & cp->dscpmask) >> cp->dscpmaskshift) << 2) & 37 ~INET_ECN_MASK; 38 39 switch (proto) { 40 case NFPROTO_IPV4: 41 dscp = ipv4_get_dsfield(ip_hdr(skb)) & ~INET_ECN_MASK; 42 if (dscp != newdscp) { 43 if (likely(!skb_try_make_writable(skb, wlen))) { 44 ipv4_change_dsfield(ip_hdr(skb), 45 INET_ECN_MASK, 46 newdscp); 47 ca->stats_dscp_set++; 48 } else { 49 ca->stats_dscp_error++; 50 } 51 } 52 break; 53 case NFPROTO_IPV6: 54 dscp = ipv6_get_dsfield(ipv6_hdr(skb)) & ~INET_ECN_MASK; 55 if (dscp != newdscp) { 56 if (likely(!skb_try_make_writable(skb, wlen))) { 57 ipv6_change_dsfield(ipv6_hdr(skb), 58 INET_ECN_MASK, 59 newdscp); 60 ca->stats_dscp_set++; 61 } else { 62 ca->stats_dscp_error++; 63 } 64 } 65 break; 66 default: 67 break; 68 } 69 } 70 71 static void tcf_ctinfo_cpmark_set(struct nf_conn *ct, struct tcf_ctinfo *ca, 72 struct tcf_ctinfo_params *cp, 73 struct sk_buff *skb) 74 { 75 ca->stats_cpmark_set++; 76 skb->mark = ct->mark & cp->cpmarkmask; 77 } 78 79 static int tcf_ctinfo_act(struct sk_buff *skb, const struct tc_action *a, 80 struct tcf_result *res) 81 { 82 const struct nf_conntrack_tuple_hash *thash = NULL; 83 struct tcf_ctinfo *ca = to_ctinfo(a); 84 struct nf_conntrack_tuple tuple; 85 struct nf_conntrack_zone zone; 86 enum ip_conntrack_info ctinfo; 87 struct tcf_ctinfo_params *cp; 88 struct nf_conn *ct; 89 int proto, wlen; 90 int action; 91 92 cp = rcu_dereference_bh(ca->params); 93 94 tcf_lastuse_update(&ca->tcf_tm); 95 bstats_update(&ca->tcf_bstats, skb); 96 action = READ_ONCE(ca->tcf_action); 97 98 wlen = skb_network_offset(skb); 99 if (tc_skb_protocol(skb) == htons(ETH_P_IP)) { 100 wlen += sizeof(struct iphdr); 101 if (!pskb_may_pull(skb, wlen)) 102 goto out; 103 104 proto = NFPROTO_IPV4; 105 } else if (tc_skb_protocol(skb) == htons(ETH_P_IPV6)) { 106 wlen += sizeof(struct ipv6hdr); 107 if (!pskb_may_pull(skb, wlen)) 108 goto out; 109 110 proto = NFPROTO_IPV6; 111 } else { 112 goto out; 113 } 114 115 ct = nf_ct_get(skb, &ctinfo); 116 if (!ct) { /* look harder, usually ingress */ 117 if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), 118 proto, cp->net, &tuple)) 119 goto out; 120 zone.id = cp->zone; 121 zone.dir = NF_CT_DEFAULT_ZONE_DIR; 122 123 thash = nf_conntrack_find_get(cp->net, &zone, &tuple); 124 if (!thash) 125 goto out; 126 127 ct = nf_ct_tuplehash_to_ctrack(thash); 128 } 129 130 if (cp->mode & CTINFO_MODE_DSCP) 131 if (!cp->dscpstatemask || (ct->mark & cp->dscpstatemask)) 132 tcf_ctinfo_dscp_set(ct, ca, cp, skb, wlen, proto); 133 134 if (cp->mode & CTINFO_MODE_CPMARK) 135 tcf_ctinfo_cpmark_set(ct, ca, cp, skb); 136 137 if (thash) 138 nf_ct_put(ct); 139 out: 140 return action; 141 } 142 143 static const struct nla_policy ctinfo_policy[TCA_CTINFO_MAX + 1] = { 144 [TCA_CTINFO_ACT] = { .len = sizeof(struct 145 tc_ctinfo) }, 146 [TCA_CTINFO_ZONE] = { .type = NLA_U16 }, 147 [TCA_CTINFO_PARMS_DSCP_MASK] = { .type = NLA_U32 }, 148 [TCA_CTINFO_PARMS_DSCP_STATEMASK] = { .type = NLA_U32 }, 149 [TCA_CTINFO_PARMS_CPMARK_MASK] = { .type = NLA_U32 }, 150 }; 151 152 static int tcf_ctinfo_init(struct net *net, struct nlattr *nla, 153 struct nlattr *est, struct tc_action **a, 154 int ovr, int bind, bool rtnl_held, 155 struct tcf_proto *tp, 156 struct netlink_ext_ack *extack) 157 { 158 struct tc_action_net *tn = net_generic(net, ctinfo_net_id); 159 struct nlattr *tb[TCA_CTINFO_MAX + 1]; 160 struct tcf_ctinfo_params *cp_new; 161 struct tcf_chain *goto_ch = NULL; 162 u32 dscpmask = 0, dscpstatemask; 163 struct tc_ctinfo *actparm; 164 struct tcf_ctinfo *ci; 165 u8 dscpmaskshift; 166 int ret = 0, err; 167 168 if (!nla) { 169 NL_SET_ERR_MSG_MOD(extack, "ctinfo requires attributes to be passed"); 170 return -EINVAL; 171 } 172 173 err = nla_parse_nested(tb, TCA_CTINFO_MAX, nla, ctinfo_policy, extack); 174 if (err < 0) 175 return err; 176 177 if (!tb[TCA_CTINFO_ACT]) { 178 NL_SET_ERR_MSG_MOD(extack, 179 "Missing required TCA_CTINFO_ACT attribute"); 180 return -EINVAL; 181 } 182 actparm = nla_data(tb[TCA_CTINFO_ACT]); 183 184 /* do some basic validation here before dynamically allocating things */ 185 /* that we would otherwise have to clean up. */ 186 if (tb[TCA_CTINFO_PARMS_DSCP_MASK]) { 187 dscpmask = nla_get_u32(tb[TCA_CTINFO_PARMS_DSCP_MASK]); 188 /* need contiguous 6 bit mask */ 189 dscpmaskshift = dscpmask ? __ffs(dscpmask) : 0; 190 if ((~0 & (dscpmask >> dscpmaskshift)) != 0x3f) { 191 NL_SET_ERR_MSG_ATTR(extack, 192 tb[TCA_CTINFO_PARMS_DSCP_MASK], 193 "dscp mask must be 6 contiguous bits"); 194 return -EINVAL; 195 } 196 dscpstatemask = tb[TCA_CTINFO_PARMS_DSCP_STATEMASK] ? 197 nla_get_u32(tb[TCA_CTINFO_PARMS_DSCP_STATEMASK]) : 0; 198 /* mask & statemask must not overlap */ 199 if (dscpmask & dscpstatemask) { 200 NL_SET_ERR_MSG_ATTR(extack, 201 tb[TCA_CTINFO_PARMS_DSCP_STATEMASK], 202 "dscp statemask must not overlap dscp mask"); 203 return -EINVAL; 204 } 205 } 206 207 /* done the validation:now to the actual action allocation */ 208 err = tcf_idr_check_alloc(tn, &actparm->index, a, bind); 209 if (!err) { 210 ret = tcf_idr_create(tn, actparm->index, est, a, 211 &act_ctinfo_ops, bind, false); 212 if (ret) { 213 tcf_idr_cleanup(tn, actparm->index); 214 return ret; 215 } 216 } else if (err > 0) { 217 if (bind) /* don't override defaults */ 218 return 0; 219 if (!ovr) { 220 tcf_idr_release(*a, bind); 221 return -EEXIST; 222 } 223 } else { 224 return err; 225 } 226 227 err = tcf_action_check_ctrlact(actparm->action, tp, &goto_ch, extack); 228 if (err < 0) 229 goto release_idr; 230 231 ci = to_ctinfo(*a); 232 233 cp_new = kzalloc(sizeof(*cp_new), GFP_KERNEL); 234 if (unlikely(!cp_new)) { 235 err = -ENOMEM; 236 goto put_chain; 237 } 238 239 cp_new->net = net; 240 cp_new->zone = tb[TCA_CTINFO_ZONE] ? 241 nla_get_u16(tb[TCA_CTINFO_ZONE]) : 0; 242 if (dscpmask) { 243 cp_new->dscpmask = dscpmask; 244 cp_new->dscpmaskshift = dscpmaskshift; 245 cp_new->dscpstatemask = dscpstatemask; 246 cp_new->mode |= CTINFO_MODE_DSCP; 247 } 248 249 if (tb[TCA_CTINFO_PARMS_CPMARK_MASK]) { 250 cp_new->cpmarkmask = 251 nla_get_u32(tb[TCA_CTINFO_PARMS_CPMARK_MASK]); 252 cp_new->mode |= CTINFO_MODE_CPMARK; 253 } 254 255 spin_lock_bh(&ci->tcf_lock); 256 goto_ch = tcf_action_set_ctrlact(*a, actparm->action, goto_ch); 257 rcu_swap_protected(ci->params, cp_new, 258 lockdep_is_held(&ci->tcf_lock)); 259 spin_unlock_bh(&ci->tcf_lock); 260 261 if (goto_ch) 262 tcf_chain_put_by_act(goto_ch); 263 if (cp_new) 264 kfree_rcu(cp_new, rcu); 265 266 if (ret == ACT_P_CREATED) 267 tcf_idr_insert(tn, *a); 268 269 return ret; 270 271 put_chain: 272 if (goto_ch) 273 tcf_chain_put_by_act(goto_ch); 274 release_idr: 275 tcf_idr_release(*a, bind); 276 return err; 277 } 278 279 static int tcf_ctinfo_dump(struct sk_buff *skb, struct tc_action *a, 280 int bind, int ref) 281 { 282 struct tcf_ctinfo *ci = to_ctinfo(a); 283 struct tc_ctinfo opt = { 284 .index = ci->tcf_index, 285 .refcnt = refcount_read(&ci->tcf_refcnt) - ref, 286 .bindcnt = atomic_read(&ci->tcf_bindcnt) - bind, 287 }; 288 unsigned char *b = skb_tail_pointer(skb); 289 struct tcf_ctinfo_params *cp; 290 struct tcf_t t; 291 292 spin_lock_bh(&ci->tcf_lock); 293 cp = rcu_dereference_protected(ci->params, 294 lockdep_is_held(&ci->tcf_lock)); 295 296 tcf_tm_dump(&t, &ci->tcf_tm); 297 if (nla_put_64bit(skb, TCA_CTINFO_TM, sizeof(t), &t, TCA_CTINFO_PAD)) 298 goto nla_put_failure; 299 300 opt.action = ci->tcf_action; 301 if (nla_put(skb, TCA_CTINFO_ACT, sizeof(opt), &opt)) 302 goto nla_put_failure; 303 304 if (nla_put_u16(skb, TCA_CTINFO_ZONE, cp->zone)) 305 goto nla_put_failure; 306 307 if (cp->mode & CTINFO_MODE_DSCP) { 308 if (nla_put_u32(skb, TCA_CTINFO_PARMS_DSCP_MASK, 309 cp->dscpmask)) 310 goto nla_put_failure; 311 if (nla_put_u32(skb, TCA_CTINFO_PARMS_DSCP_STATEMASK, 312 cp->dscpstatemask)) 313 goto nla_put_failure; 314 } 315 316 if (cp->mode & CTINFO_MODE_CPMARK) { 317 if (nla_put_u32(skb, TCA_CTINFO_PARMS_CPMARK_MASK, 318 cp->cpmarkmask)) 319 goto nla_put_failure; 320 } 321 322 if (nla_put_u64_64bit(skb, TCA_CTINFO_STATS_DSCP_SET, 323 ci->stats_dscp_set, TCA_CTINFO_PAD)) 324 goto nla_put_failure; 325 326 if (nla_put_u64_64bit(skb, TCA_CTINFO_STATS_DSCP_ERROR, 327 ci->stats_dscp_error, TCA_CTINFO_PAD)) 328 goto nla_put_failure; 329 330 if (nla_put_u64_64bit(skb, TCA_CTINFO_STATS_CPMARK_SET, 331 ci->stats_cpmark_set, TCA_CTINFO_PAD)) 332 goto nla_put_failure; 333 334 spin_unlock_bh(&ci->tcf_lock); 335 return skb->len; 336 337 nla_put_failure: 338 spin_unlock_bh(&ci->tcf_lock); 339 nlmsg_trim(skb, b); 340 return -1; 341 } 342 343 static int tcf_ctinfo_walker(struct net *net, struct sk_buff *skb, 344 struct netlink_callback *cb, int type, 345 const struct tc_action_ops *ops, 346 struct netlink_ext_ack *extack) 347 { 348 struct tc_action_net *tn = net_generic(net, ctinfo_net_id); 349 350 return tcf_generic_walker(tn, skb, cb, type, ops, extack); 351 } 352 353 static int tcf_ctinfo_search(struct net *net, struct tc_action **a, u32 index) 354 { 355 struct tc_action_net *tn = net_generic(net, ctinfo_net_id); 356 357 return tcf_idr_search(tn, a, index); 358 } 359 360 static struct tc_action_ops act_ctinfo_ops = { 361 .kind = "ctinfo", 362 .id = TCA_ID_CTINFO, 363 .owner = THIS_MODULE, 364 .act = tcf_ctinfo_act, 365 .dump = tcf_ctinfo_dump, 366 .init = tcf_ctinfo_init, 367 .walk = tcf_ctinfo_walker, 368 .lookup = tcf_ctinfo_search, 369 .size = sizeof(struct tcf_ctinfo), 370 }; 371 372 static __net_init int ctinfo_init_net(struct net *net) 373 { 374 struct tc_action_net *tn = net_generic(net, ctinfo_net_id); 375 376 return tc_action_net_init(tn, &act_ctinfo_ops); 377 } 378 379 static void __net_exit ctinfo_exit_net(struct list_head *net_list) 380 { 381 tc_action_net_exit(net_list, ctinfo_net_id); 382 } 383 384 static struct pernet_operations ctinfo_net_ops = { 385 .init = ctinfo_init_net, 386 .exit_batch = ctinfo_exit_net, 387 .id = &ctinfo_net_id, 388 .size = sizeof(struct tc_action_net), 389 }; 390 391 static int __init ctinfo_init_module(void) 392 { 393 return tcf_register_action(&act_ctinfo_ops, &ctinfo_net_ops); 394 } 395 396 static void __exit ctinfo_cleanup_module(void) 397 { 398 tcf_unregister_action(&act_ctinfo_ops, &ctinfo_net_ops); 399 } 400 401 module_init(ctinfo_init_module); 402 module_exit(ctinfo_cleanup_module); 403 MODULE_AUTHOR("Kevin Darbyshire-Bryant <ldir@darbyshire-bryant.me.uk>"); 404 MODULE_DESCRIPTION("Connection tracking mark actions"); 405 MODULE_LICENSE("GPL"); 406