1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* -
3 * net/sched/act_ct.c Connection Tracking action
4 *
5 * Authors: Paul Blakey <paulb@mellanox.com>
6 * Yossi Kuperman <yossiku@mellanox.com>
7 * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
8 */
9
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/skbuff.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/pkt_cls.h>
16 #include <linux/if_tunnel.h>
17 #include <linux/ip.h>
18 #include <linux/ipv6.h>
19 #include <linux/rhashtable.h>
20 #include <net/gre.h>
21 #include <net/netlink.h>
22 #include <net/pkt_sched.h>
23 #include <net/pkt_cls.h>
24 #include <net/act_api.h>
25 #include <net/ip.h>
26 #include <net/ipv6_frag.h>
27 #include <uapi/linux/tc_act/tc_ct.h>
28 #include <net/tc_act/tc_ct.h>
29 #include <net/tc_wrapper.h>
30
31 #include <net/netfilter/nf_flow_table.h>
32 #include <net/netfilter/nf_conntrack.h>
33 #include <net/netfilter/nf_conntrack_core.h>
34 #include <net/netfilter/nf_conntrack_zones.h>
35 #include <net/netfilter/nf_conntrack_helper.h>
36 #include <net/netfilter/nf_conntrack_acct.h>
37 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
38 #include <net/netfilter/nf_conntrack_act_ct.h>
39 #include <net/netfilter/nf_conntrack_seqadj.h>
40 #include <uapi/linux/netfilter/nf_nat.h>
41
42 static struct workqueue_struct *act_ct_wq;
43 static struct rhashtable zones_ht;
44 static DEFINE_MUTEX(zones_mutex);
45
46 struct zones_ht_key {
47 struct net *net;
48 u16 zone;
49 };
50
51 struct tcf_ct_flow_table {
52 struct rhash_head node; /* In zones tables */
53
54 struct rcu_work rwork;
55 struct nf_flowtable nf_ft;
56 refcount_t ref;
57 struct zones_ht_key key;
58
59 bool dying;
60 };
61
62 static const struct rhashtable_params zones_params = {
63 .head_offset = offsetof(struct tcf_ct_flow_table, node),
64 .key_offset = offsetof(struct tcf_ct_flow_table, key),
65 .key_len = offsetofend(struct zones_ht_key, zone),
66 .automatic_shrinking = true,
67 };
68
69 static struct flow_action_entry *
tcf_ct_flow_table_flow_action_get_next(struct flow_action * flow_action)70 tcf_ct_flow_table_flow_action_get_next(struct flow_action *flow_action)
71 {
72 int i = flow_action->num_entries++;
73
74 return &flow_action->entries[i];
75 }
76
tcf_ct_add_mangle_action(struct flow_action * action,enum flow_action_mangle_base htype,u32 offset,u32 mask,u32 val)77 static void tcf_ct_add_mangle_action(struct flow_action *action,
78 enum flow_action_mangle_base htype,
79 u32 offset,
80 u32 mask,
81 u32 val)
82 {
83 struct flow_action_entry *entry;
84
85 entry = tcf_ct_flow_table_flow_action_get_next(action);
86 entry->id = FLOW_ACTION_MANGLE;
87 entry->mangle.htype = htype;
88 entry->mangle.mask = ~mask;
89 entry->mangle.offset = offset;
90 entry->mangle.val = val;
91 }
92
93 /* The following nat helper functions check if the inverted reverse tuple
94 * (target) is different then the current dir tuple - meaning nat for ports
95 * and/or ip is needed, and add the relevant mangle actions.
96 */
97 static void
tcf_ct_flow_table_add_action_nat_ipv4(const struct nf_conntrack_tuple * tuple,struct nf_conntrack_tuple target,struct flow_action * action)98 tcf_ct_flow_table_add_action_nat_ipv4(const struct nf_conntrack_tuple *tuple,
99 struct nf_conntrack_tuple target,
100 struct flow_action *action)
101 {
102 if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
103 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
104 offsetof(struct iphdr, saddr),
105 0xFFFFFFFF,
106 be32_to_cpu(target.src.u3.ip));
107 if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
108 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
109 offsetof(struct iphdr, daddr),
110 0xFFFFFFFF,
111 be32_to_cpu(target.dst.u3.ip));
112 }
113
114 static void
tcf_ct_add_ipv6_addr_mangle_action(struct flow_action * action,union nf_inet_addr * addr,u32 offset)115 tcf_ct_add_ipv6_addr_mangle_action(struct flow_action *action,
116 union nf_inet_addr *addr,
117 u32 offset)
118 {
119 int i;
120
121 for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++)
122 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
123 i * sizeof(u32) + offset,
124 0xFFFFFFFF, be32_to_cpu(addr->ip6[i]));
125 }
126
127 static void
tcf_ct_flow_table_add_action_nat_ipv6(const struct nf_conntrack_tuple * tuple,struct nf_conntrack_tuple target,struct flow_action * action)128 tcf_ct_flow_table_add_action_nat_ipv6(const struct nf_conntrack_tuple *tuple,
129 struct nf_conntrack_tuple target,
130 struct flow_action *action)
131 {
132 if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
133 tcf_ct_add_ipv6_addr_mangle_action(action, &target.src.u3,
134 offsetof(struct ipv6hdr,
135 saddr));
136 if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
137 tcf_ct_add_ipv6_addr_mangle_action(action, &target.dst.u3,
138 offsetof(struct ipv6hdr,
139 daddr));
140 }
141
142 static void
tcf_ct_flow_table_add_action_nat_tcp(const struct nf_conntrack_tuple * tuple,struct nf_conntrack_tuple target,struct flow_action * action)143 tcf_ct_flow_table_add_action_nat_tcp(const struct nf_conntrack_tuple *tuple,
144 struct nf_conntrack_tuple target,
145 struct flow_action *action)
146 {
147 __be16 target_src = target.src.u.tcp.port;
148 __be16 target_dst = target.dst.u.tcp.port;
149
150 if (target_src != tuple->src.u.tcp.port)
151 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
152 offsetof(struct tcphdr, source),
153 0xFFFF, be16_to_cpu(target_src));
154 if (target_dst != tuple->dst.u.tcp.port)
155 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
156 offsetof(struct tcphdr, dest),
157 0xFFFF, be16_to_cpu(target_dst));
158 }
159
160 static void
tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple * tuple,struct nf_conntrack_tuple target,struct flow_action * action)161 tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple,
162 struct nf_conntrack_tuple target,
163 struct flow_action *action)
164 {
165 __be16 target_src = target.src.u.udp.port;
166 __be16 target_dst = target.dst.u.udp.port;
167
168 if (target_src != tuple->src.u.udp.port)
169 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
170 offsetof(struct udphdr, source),
171 0xFFFF, be16_to_cpu(target_src));
172 if (target_dst != tuple->dst.u.udp.port)
173 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
174 offsetof(struct udphdr, dest),
175 0xFFFF, be16_to_cpu(target_dst));
176 }
177
tcf_ct_flow_table_add_action_meta(struct nf_conn * ct,enum ip_conntrack_dir dir,enum ip_conntrack_info ctinfo,struct flow_action * action)178 static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct,
179 enum ip_conntrack_dir dir,
180 enum ip_conntrack_info ctinfo,
181 struct flow_action *action)
182 {
183 struct nf_conn_labels *ct_labels;
184 struct flow_action_entry *entry;
185 u32 *act_ct_labels;
186
187 entry = tcf_ct_flow_table_flow_action_get_next(action);
188 entry->id = FLOW_ACTION_CT_METADATA;
189 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
190 entry->ct_metadata.mark = READ_ONCE(ct->mark);
191 #endif
192 /* aligns with the CT reference on the SKB nf_ct_set */
193 entry->ct_metadata.cookie = (unsigned long)ct | ctinfo;
194 entry->ct_metadata.orig_dir = dir == IP_CT_DIR_ORIGINAL;
195
196 act_ct_labels = entry->ct_metadata.labels;
197 ct_labels = nf_ct_labels_find(ct);
198 if (ct_labels)
199 memcpy(act_ct_labels, ct_labels->bits, NF_CT_LABELS_MAX_SIZE);
200 else
201 memset(act_ct_labels, 0, NF_CT_LABELS_MAX_SIZE);
202 }
203
tcf_ct_flow_table_add_action_nat(struct net * net,struct nf_conn * ct,enum ip_conntrack_dir dir,struct flow_action * action)204 static int tcf_ct_flow_table_add_action_nat(struct net *net,
205 struct nf_conn *ct,
206 enum ip_conntrack_dir dir,
207 struct flow_action *action)
208 {
209 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
210 struct nf_conntrack_tuple target;
211
212 if (!(ct->status & IPS_NAT_MASK))
213 return 0;
214
215 nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
216
217 switch (tuple->src.l3num) {
218 case NFPROTO_IPV4:
219 tcf_ct_flow_table_add_action_nat_ipv4(tuple, target,
220 action);
221 break;
222 case NFPROTO_IPV6:
223 tcf_ct_flow_table_add_action_nat_ipv6(tuple, target,
224 action);
225 break;
226 default:
227 return -EOPNOTSUPP;
228 }
229
230 switch (nf_ct_protonum(ct)) {
231 case IPPROTO_TCP:
232 tcf_ct_flow_table_add_action_nat_tcp(tuple, target, action);
233 break;
234 case IPPROTO_UDP:
235 tcf_ct_flow_table_add_action_nat_udp(tuple, target, action);
236 break;
237 default:
238 return -EOPNOTSUPP;
239 }
240
241 return 0;
242 }
243
tcf_ct_flow_table_fill_actions(struct net * net,struct flow_offload * flow,enum flow_offload_tuple_dir tdir,struct nf_flow_rule * flow_rule)244 static int tcf_ct_flow_table_fill_actions(struct net *net,
245 struct flow_offload *flow,
246 enum flow_offload_tuple_dir tdir,
247 struct nf_flow_rule *flow_rule)
248 {
249 struct flow_action *action = &flow_rule->rule->action;
250 int num_entries = action->num_entries;
251 struct nf_conn *ct = flow->ct;
252 enum ip_conntrack_info ctinfo;
253 enum ip_conntrack_dir dir;
254 int i, err;
255
256 switch (tdir) {
257 case FLOW_OFFLOAD_DIR_ORIGINAL:
258 dir = IP_CT_DIR_ORIGINAL;
259 ctinfo = test_bit(IPS_SEEN_REPLY_BIT, &ct->status) ?
260 IP_CT_ESTABLISHED : IP_CT_NEW;
261 if (ctinfo == IP_CT_ESTABLISHED)
262 set_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
263 break;
264 case FLOW_OFFLOAD_DIR_REPLY:
265 dir = IP_CT_DIR_REPLY;
266 ctinfo = IP_CT_ESTABLISHED_REPLY;
267 break;
268 default:
269 return -EOPNOTSUPP;
270 }
271
272 err = tcf_ct_flow_table_add_action_nat(net, ct, dir, action);
273 if (err)
274 goto err_nat;
275
276 tcf_ct_flow_table_add_action_meta(ct, dir, ctinfo, action);
277 return 0;
278
279 err_nat:
280 /* Clear filled actions */
281 for (i = num_entries; i < action->num_entries; i++)
282 memset(&action->entries[i], 0, sizeof(action->entries[i]));
283 action->num_entries = num_entries;
284
285 return err;
286 }
287
tcf_ct_flow_is_outdated(const struct flow_offload * flow)288 static bool tcf_ct_flow_is_outdated(const struct flow_offload *flow)
289 {
290 return test_bit(IPS_SEEN_REPLY_BIT, &flow->ct->status) &&
291 test_bit(IPS_HW_OFFLOAD_BIT, &flow->ct->status) &&
292 !test_bit(NF_FLOW_HW_PENDING, &flow->flags) &&
293 !test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
294 }
295
296 static void tcf_ct_flow_table_get_ref(struct tcf_ct_flow_table *ct_ft);
297
tcf_ct_nf_get(struct nf_flowtable * ft)298 static void tcf_ct_nf_get(struct nf_flowtable *ft)
299 {
300 struct tcf_ct_flow_table *ct_ft =
301 container_of(ft, struct tcf_ct_flow_table, nf_ft);
302
303 tcf_ct_flow_table_get_ref(ct_ft);
304 }
305
306 static void tcf_ct_flow_table_put(struct tcf_ct_flow_table *ct_ft);
307
tcf_ct_nf_put(struct nf_flowtable * ft)308 static void tcf_ct_nf_put(struct nf_flowtable *ft)
309 {
310 struct tcf_ct_flow_table *ct_ft =
311 container_of(ft, struct tcf_ct_flow_table, nf_ft);
312
313 tcf_ct_flow_table_put(ct_ft);
314 }
315
316 static struct nf_flowtable_type flowtable_ct = {
317 .gc = tcf_ct_flow_is_outdated,
318 .action = tcf_ct_flow_table_fill_actions,
319 .get = tcf_ct_nf_get,
320 .put = tcf_ct_nf_put,
321 .owner = THIS_MODULE,
322 };
323
tcf_ct_flow_table_get(struct net * net,struct tcf_ct_params * params)324 static int tcf_ct_flow_table_get(struct net *net, struct tcf_ct_params *params)
325 {
326 struct zones_ht_key key = { .net = net, .zone = params->zone };
327 struct tcf_ct_flow_table *ct_ft;
328 int err = -ENOMEM;
329
330 mutex_lock(&zones_mutex);
331 rcu_read_lock();
332 ct_ft = rhashtable_lookup(&zones_ht, &key, zones_params);
333 if (ct_ft && refcount_inc_not_zero(&ct_ft->ref)) {
334 rcu_read_unlock();
335 goto out_unlock;
336 }
337 rcu_read_unlock();
338
339 ct_ft = kzalloc_obj(*ct_ft);
340 if (!ct_ft)
341 goto err_alloc;
342 refcount_set(&ct_ft->ref, 1);
343
344 ct_ft->key = key;
345 err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
346 if (err)
347 goto err_insert;
348
349 ct_ft->nf_ft.type = &flowtable_ct;
350 ct_ft->nf_ft.flags |= NF_FLOWTABLE_HW_OFFLOAD |
351 NF_FLOWTABLE_COUNTER;
352 err = nf_flow_table_init(&ct_ft->nf_ft);
353 if (err)
354 goto err_init;
355 write_pnet(&ct_ft->nf_ft.net, net);
356
357 __module_get(THIS_MODULE);
358 out_unlock:
359 params->ct_ft = ct_ft;
360 params->nf_ft = &ct_ft->nf_ft;
361 mutex_unlock(&zones_mutex);
362
363 return 0;
364
365 err_init:
366 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
367 err_insert:
368 kfree(ct_ft);
369 err_alloc:
370 mutex_unlock(&zones_mutex);
371 return err;
372 }
373
tcf_ct_flow_table_get_ref(struct tcf_ct_flow_table * ct_ft)374 static void tcf_ct_flow_table_get_ref(struct tcf_ct_flow_table *ct_ft)
375 {
376 refcount_inc(&ct_ft->ref);
377 }
378
tcf_ct_flow_table_cleanup_work(struct work_struct * work)379 static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
380 {
381 struct tcf_ct_flow_table *ct_ft;
382 struct flow_block *block;
383
384 ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table,
385 rwork);
386 nf_flow_table_free(&ct_ft->nf_ft);
387
388 block = &ct_ft->nf_ft.flow_block;
389 down_write(&ct_ft->nf_ft.flow_block_lock);
390 WARN_ON(!list_empty(&block->cb_list));
391 up_write(&ct_ft->nf_ft.flow_block_lock);
392 kfree(ct_ft);
393
394 module_put(THIS_MODULE);
395 }
396
tcf_ct_flow_table_put(struct tcf_ct_flow_table * ct_ft)397 static void tcf_ct_flow_table_put(struct tcf_ct_flow_table *ct_ft)
398 {
399 if (refcount_dec_and_test(&ct_ft->ref)) {
400 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
401 INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work);
402 queue_rcu_work(act_ct_wq, &ct_ft->rwork);
403 }
404 }
405
tcf_ct_flow_tc_ifidx(struct flow_offload * entry,struct nf_conn_act_ct_ext * act_ct_ext,u8 dir)406 static void tcf_ct_flow_tc_ifidx(struct flow_offload *entry,
407 struct nf_conn_act_ct_ext *act_ct_ext, u8 dir)
408 {
409 entry->tuplehash[dir].tuple.xmit_type = FLOW_OFFLOAD_XMIT_TC;
410 entry->tuplehash[dir].tuple.tc.iifidx = act_ct_ext->ifindex[dir];
411 }
412
tcf_ct_flow_ct_ext_ifidx_update(struct flow_offload * entry)413 static void tcf_ct_flow_ct_ext_ifidx_update(struct flow_offload *entry)
414 {
415 struct nf_conn_act_ct_ext *act_ct_ext;
416
417 act_ct_ext = nf_conn_act_ct_ext_find(entry->ct);
418 if (act_ct_ext) {
419 tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_ORIGINAL);
420 tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_REPLY);
421 }
422 }
423
tcf_ct_flow_table_add(struct tcf_ct_flow_table * ct_ft,struct nf_conn * ct,bool tcp,bool bidirectional)424 static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
425 struct nf_conn *ct,
426 bool tcp, bool bidirectional)
427 {
428 struct nf_conn_act_ct_ext *act_ct_ext;
429 struct flow_offload *entry;
430 int err;
431
432 if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
433 return;
434
435 entry = flow_offload_alloc(ct);
436 if (!entry) {
437 WARN_ON_ONCE(1);
438 goto err_alloc;
439 }
440
441 if (tcp) {
442 ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
443 ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
444 }
445 if (bidirectional)
446 __set_bit(NF_FLOW_HW_BIDIRECTIONAL, &entry->flags);
447
448 act_ct_ext = nf_conn_act_ct_ext_find(ct);
449 if (act_ct_ext) {
450 tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_ORIGINAL);
451 tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_REPLY);
452 }
453
454 err = flow_offload_add(&ct_ft->nf_ft, entry);
455 if (err)
456 goto err_add;
457
458 return;
459
460 err_add:
461 flow_offload_free(entry);
462 err_alloc:
463 clear_bit(IPS_OFFLOAD_BIT, &ct->status);
464 }
465
tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table * ct_ft,struct nf_conn * ct,enum ip_conntrack_info ctinfo)466 static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft,
467 struct nf_conn *ct,
468 enum ip_conntrack_info ctinfo)
469 {
470 bool tcp = false, bidirectional = true;
471
472 switch (nf_ct_protonum(ct)) {
473 case IPPROTO_TCP:
474 if ((ctinfo != IP_CT_ESTABLISHED &&
475 ctinfo != IP_CT_ESTABLISHED_REPLY) ||
476 !test_bit(IPS_ASSURED_BIT, &ct->status) ||
477 ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
478 return;
479
480 tcp = true;
481 break;
482 case IPPROTO_UDP:
483 if (!nf_ct_is_confirmed(ct))
484 return;
485 if (!test_bit(IPS_ASSURED_BIT, &ct->status))
486 bidirectional = false;
487 break;
488 #ifdef CONFIG_NF_CT_PROTO_GRE
489 case IPPROTO_GRE: {
490 struct nf_conntrack_tuple *tuple;
491
492 if ((ctinfo != IP_CT_ESTABLISHED &&
493 ctinfo != IP_CT_ESTABLISHED_REPLY) ||
494 !test_bit(IPS_ASSURED_BIT, &ct->status) ||
495 ct->status & IPS_NAT_MASK)
496 return;
497
498 tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
499 /* No support for GRE v1 */
500 if (tuple->src.u.gre.key || tuple->dst.u.gre.key)
501 return;
502 break;
503 }
504 #endif
505 default:
506 return;
507 }
508
509 if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
510 ct->status & IPS_SEQ_ADJUST)
511 return;
512
513 tcf_ct_flow_table_add(ct_ft, ct, tcp, bidirectional);
514 }
515
516 static bool
tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff * skb,struct flow_offload_tuple * tuple,struct tcphdr ** tcph)517 tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff *skb,
518 struct flow_offload_tuple *tuple,
519 struct tcphdr **tcph)
520 {
521 struct flow_ports *ports;
522 unsigned int thoff;
523 struct iphdr *iph;
524 size_t hdrsize;
525 u8 ipproto;
526
527 if (!pskb_network_may_pull(skb, sizeof(*iph)))
528 return false;
529
530 iph = ip_hdr(skb);
531 thoff = iph->ihl * 4;
532
533 if (ip_is_fragment(iph) ||
534 unlikely(thoff != sizeof(struct iphdr)))
535 return false;
536
537 ipproto = iph->protocol;
538 switch (ipproto) {
539 case IPPROTO_TCP:
540 hdrsize = sizeof(struct tcphdr);
541 break;
542 case IPPROTO_UDP:
543 hdrsize = sizeof(*ports);
544 break;
545 #ifdef CONFIG_NF_CT_PROTO_GRE
546 case IPPROTO_GRE:
547 hdrsize = sizeof(struct gre_base_hdr);
548 break;
549 #endif
550 default:
551 return false;
552 }
553
554 if (iph->ttl <= 1)
555 return false;
556
557 if (!pskb_network_may_pull(skb, thoff + hdrsize))
558 return false;
559
560 switch (ipproto) {
561 case IPPROTO_TCP:
562 *tcph = (void *)(skb_network_header(skb) + thoff);
563 fallthrough;
564 case IPPROTO_UDP:
565 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
566 tuple->src_port = ports->source;
567 tuple->dst_port = ports->dest;
568 break;
569 case IPPROTO_GRE: {
570 struct gre_base_hdr *greh;
571
572 greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
573 if ((greh->flags & GRE_VERSION) != GRE_VERSION_0)
574 return false;
575 break;
576 }
577 }
578
579 iph = ip_hdr(skb);
580
581 tuple->src_v4.s_addr = iph->saddr;
582 tuple->dst_v4.s_addr = iph->daddr;
583 tuple->l3proto = AF_INET;
584 tuple->l4proto = ipproto;
585
586 return true;
587 }
588
589 static bool
tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff * skb,struct flow_offload_tuple * tuple,struct tcphdr ** tcph)590 tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff *skb,
591 struct flow_offload_tuple *tuple,
592 struct tcphdr **tcph)
593 {
594 struct flow_ports *ports;
595 struct ipv6hdr *ip6h;
596 unsigned int thoff;
597 size_t hdrsize;
598 u8 nexthdr;
599
600 if (!pskb_network_may_pull(skb, sizeof(*ip6h)))
601 return false;
602
603 ip6h = ipv6_hdr(skb);
604 thoff = sizeof(*ip6h);
605
606 nexthdr = ip6h->nexthdr;
607 switch (nexthdr) {
608 case IPPROTO_TCP:
609 hdrsize = sizeof(struct tcphdr);
610 break;
611 case IPPROTO_UDP:
612 hdrsize = sizeof(*ports);
613 break;
614 #ifdef CONFIG_NF_CT_PROTO_GRE
615 case IPPROTO_GRE:
616 hdrsize = sizeof(struct gre_base_hdr);
617 break;
618 #endif
619 default:
620 return false;
621 }
622
623 if (ip6h->hop_limit <= 1)
624 return false;
625
626 if (!pskb_network_may_pull(skb, thoff + hdrsize))
627 return false;
628
629 switch (nexthdr) {
630 case IPPROTO_TCP:
631 *tcph = (void *)(skb_network_header(skb) + thoff);
632 fallthrough;
633 case IPPROTO_UDP:
634 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
635 tuple->src_port = ports->source;
636 tuple->dst_port = ports->dest;
637 break;
638 case IPPROTO_GRE: {
639 struct gre_base_hdr *greh;
640
641 greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
642 if ((greh->flags & GRE_VERSION) != GRE_VERSION_0)
643 return false;
644 break;
645 }
646 }
647
648 ip6h = ipv6_hdr(skb);
649
650 tuple->src_v6 = ip6h->saddr;
651 tuple->dst_v6 = ip6h->daddr;
652 tuple->l3proto = AF_INET6;
653 tuple->l4proto = nexthdr;
654
655 return true;
656 }
657
tcf_ct_flow_table_lookup(struct tcf_ct_params * p,struct sk_buff * skb,u8 family)658 static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
659 struct sk_buff *skb,
660 u8 family)
661 {
662 struct nf_flowtable *nf_ft = &p->ct_ft->nf_ft;
663 struct flow_offload_tuple_rhash *tuplehash;
664 struct flow_offload_tuple tuple = {};
665 enum ip_conntrack_info ctinfo;
666 struct tcphdr *tcph = NULL;
667 bool force_refresh = false;
668 struct flow_offload *flow;
669 struct nf_conn *ct;
670 u8 dir;
671
672 switch (family) {
673 case NFPROTO_IPV4:
674 if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph))
675 return false;
676 break;
677 case NFPROTO_IPV6:
678 if (!tcf_ct_flow_table_fill_tuple_ipv6(skb, &tuple, &tcph))
679 return false;
680 break;
681 default:
682 return false;
683 }
684
685 tuplehash = flow_offload_lookup(nf_ft, &tuple);
686 if (!tuplehash)
687 return false;
688
689 dir = tuplehash->tuple.dir;
690 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
691 ct = flow->ct;
692
693 if (dir == FLOW_OFFLOAD_DIR_REPLY &&
694 !test_bit(NF_FLOW_HW_BIDIRECTIONAL, &flow->flags)) {
695 /* Only offload reply direction after connection became
696 * assured.
697 */
698 if (test_bit(IPS_ASSURED_BIT, &ct->status))
699 set_bit(NF_FLOW_HW_BIDIRECTIONAL, &flow->flags);
700 else if (test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags))
701 /* If flow_table flow has already been updated to the
702 * established state, then don't refresh.
703 */
704 return false;
705 force_refresh = true;
706 }
707
708 if (tcph && (unlikely(tcph->fin || tcph->rst))) {
709 flow_offload_teardown(flow);
710 return false;
711 }
712
713 if (dir == FLOW_OFFLOAD_DIR_ORIGINAL)
714 ctinfo = test_bit(IPS_SEEN_REPLY_BIT, &ct->status) ?
715 IP_CT_ESTABLISHED : IP_CT_NEW;
716 else
717 ctinfo = IP_CT_ESTABLISHED_REPLY;
718
719 nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
720 tcf_ct_flow_ct_ext_ifidx_update(flow);
721 flow_offload_refresh(nf_ft, flow, force_refresh);
722 if (!test_bit(IPS_ASSURED_BIT, &ct->status)) {
723 /* Process this flow in SW to allow promoting to ASSURED */
724 return false;
725 }
726
727 nf_conntrack_get(&ct->ct_general);
728 nf_ct_set(skb, ct, ctinfo);
729 if (nf_ft->flags & NF_FLOWTABLE_COUNTER)
730 nf_ct_acct_update(ct, dir, skb->len);
731
732 return true;
733 }
734
tcf_ct_flow_tables_init(void)735 static int tcf_ct_flow_tables_init(void)
736 {
737 return rhashtable_init(&zones_ht, &zones_params);
738 }
739
tcf_ct_flow_tables_uninit(void)740 static void tcf_ct_flow_tables_uninit(void)
741 {
742 rhashtable_destroy(&zones_ht);
743 }
744
745 static struct tc_action_ops act_ct_ops;
746
747 struct tc_ct_action_net {
748 struct tc_action_net tn; /* Must be first */
749 };
750
751 /* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
tcf_ct_skb_nfct_cached(struct net * net,struct sk_buff * skb,struct tcf_ct_params * p)752 static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb,
753 struct tcf_ct_params *p)
754 {
755 enum ip_conntrack_info ctinfo;
756 struct nf_conn *ct;
757
758 ct = nf_ct_get(skb, &ctinfo);
759 if (!ct)
760 return false;
761 if (!net_eq(net, read_pnet(&ct->ct_net)))
762 goto drop_ct;
763 if (nf_ct_zone(ct)->id != p->zone)
764 goto drop_ct;
765 if (p->helper) {
766 struct nf_conn_help *help;
767
768 help = nf_ct_ext_find(ct, NF_CT_EXT_HELPER);
769 if (help && rcu_access_pointer(help->helper) != p->helper)
770 goto drop_ct;
771 }
772
773 /* Force conntrack entry direction. */
774 if ((p->ct_action & TCA_CT_ACT_FORCE) &&
775 CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
776 if (nf_ct_is_confirmed(ct))
777 nf_ct_kill(ct);
778
779 goto drop_ct;
780 }
781
782 return true;
783
784 drop_ct:
785 nf_ct_put(ct);
786 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
787
788 return false;
789 }
790
tcf_ct_skb_nf_family(struct sk_buff * skb)791 static u8 tcf_ct_skb_nf_family(struct sk_buff *skb)
792 {
793 u8 family = NFPROTO_UNSPEC;
794
795 switch (skb_protocol(skb, true)) {
796 case htons(ETH_P_IP):
797 family = NFPROTO_IPV4;
798 break;
799 case htons(ETH_P_IPV6):
800 family = NFPROTO_IPV6;
801 break;
802 default:
803 break;
804 }
805
806 return family;
807 }
808
tcf_ct_ipv4_is_fragment(struct sk_buff * skb,bool * frag)809 static int tcf_ct_ipv4_is_fragment(struct sk_buff *skb, bool *frag)
810 {
811 unsigned int len;
812
813 len = skb_network_offset(skb) + sizeof(struct iphdr);
814 if (unlikely(skb->len < len))
815 return -EINVAL;
816 if (unlikely(!pskb_may_pull(skb, len)))
817 return -ENOMEM;
818
819 *frag = ip_is_fragment(ip_hdr(skb));
820 return 0;
821 }
822
tcf_ct_ipv6_is_fragment(struct sk_buff * skb,bool * frag)823 static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag)
824 {
825 unsigned int flags = 0, len, payload_ofs = 0;
826 unsigned short frag_off;
827 int nexthdr;
828
829 len = skb_network_offset(skb) + sizeof(struct ipv6hdr);
830 if (unlikely(skb->len < len))
831 return -EINVAL;
832 if (unlikely(!pskb_may_pull(skb, len)))
833 return -ENOMEM;
834
835 nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
836 if (unlikely(nexthdr < 0))
837 return -EPROTO;
838
839 *frag = flags & IP6_FH_F_FRAG;
840 return 0;
841 }
842
tcf_ct_handle_fragments(struct net * net,struct sk_buff * skb,u8 family,u16 zone,bool * defrag)843 static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
844 u8 family, u16 zone, bool *defrag)
845 {
846 enum ip_conntrack_info ctinfo;
847 struct nf_conn *ct;
848 int err = 0;
849 bool frag;
850 u8 proto;
851 u16 mru;
852
853 /* Previously seen (loopback)? Ignore. */
854 ct = nf_ct_get(skb, &ctinfo);
855 if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
856 return 0;
857
858 if (family == NFPROTO_IPV4)
859 err = tcf_ct_ipv4_is_fragment(skb, &frag);
860 else
861 err = tcf_ct_ipv6_is_fragment(skb, &frag);
862 if (err || !frag)
863 return err;
864
865 err = nf_ct_handle_fragments(net, skb, zone, family, &proto, &mru);
866 if (err)
867 return err;
868
869 *defrag = true;
870 tc_skb_cb(skb)->mru = mru;
871
872 return 0;
873 }
874
tcf_ct_params_free(struct tcf_ct_params * params)875 static void tcf_ct_params_free(struct tcf_ct_params *params)
876 {
877 if (params->helper) {
878 #if IS_ENABLED(CONFIG_NF_NAT)
879 if (params->ct_action & TCA_CT_ACT_NAT)
880 nf_nat_helper_put(params->helper);
881 #endif
882 nf_conntrack_helper_put(params->helper);
883 }
884 if (params->ct_ft)
885 tcf_ct_flow_table_put(params->ct_ft);
886 if (params->tmpl) {
887 if (params->put_labels)
888 nf_connlabels_put(nf_ct_net(params->tmpl));
889
890 nf_ct_put(params->tmpl);
891 }
892
893 kfree(params);
894 }
895
tcf_ct_params_free_rcu(struct rcu_head * head)896 static void tcf_ct_params_free_rcu(struct rcu_head *head)
897 {
898 struct tcf_ct_params *params;
899
900 params = container_of(head, struct tcf_ct_params, rcu);
901 tcf_ct_params_free(params);
902 }
903
tcf_ct_act_set_mark(struct nf_conn * ct,u32 mark,u32 mask)904 static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask)
905 {
906 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
907 u32 new_mark;
908
909 if (!mask)
910 return;
911
912 new_mark = mark | (READ_ONCE(ct->mark) & ~(mask));
913 if (READ_ONCE(ct->mark) != new_mark) {
914 WRITE_ONCE(ct->mark, new_mark);
915 if (nf_ct_is_confirmed(ct))
916 nf_conntrack_event_cache(IPCT_MARK, ct);
917 }
918 #endif
919 }
920
tcf_ct_act_set_labels(struct nf_conn * ct,u32 * labels,u32 * labels_m)921 static void tcf_ct_act_set_labels(struct nf_conn *ct,
922 u32 *labels,
923 u32 *labels_m)
924 {
925 #if IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)
926 size_t labels_sz = sizeof_field(struct tcf_ct_params, labels);
927
928 if (!memchr_inv(labels_m, 0, labels_sz))
929 return;
930
931 nf_connlabels_replace(ct, labels, labels_m, 4);
932 #endif
933 }
934
tcf_ct_act_nat(struct sk_buff * skb,struct nf_conn * ct,enum ip_conntrack_info ctinfo,int ct_action,struct nf_nat_range2 * range,bool commit)935 static int tcf_ct_act_nat(struct sk_buff *skb,
936 struct nf_conn *ct,
937 enum ip_conntrack_info ctinfo,
938 int ct_action,
939 struct nf_nat_range2 *range,
940 bool commit)
941 {
942 #if IS_ENABLED(CONFIG_NF_NAT)
943 int err, action = 0;
944
945 if (!(ct_action & TCA_CT_ACT_NAT))
946 return NF_ACCEPT;
947 if (ct_action & TCA_CT_ACT_NAT_SRC)
948 action |= BIT(NF_NAT_MANIP_SRC);
949 if (ct_action & TCA_CT_ACT_NAT_DST)
950 action |= BIT(NF_NAT_MANIP_DST);
951
952 err = nf_ct_nat(skb, ct, ctinfo, &action, range, commit);
953 if (err != NF_ACCEPT)
954 return err & NF_VERDICT_MASK;
955
956 if (action & BIT(NF_NAT_MANIP_SRC))
957 qdisc_skb_cb(skb)->post_ct_snat = 1;
958 if (action & BIT(NF_NAT_MANIP_DST))
959 qdisc_skb_cb(skb)->post_ct_dnat = 1;
960
961 return err;
962 #else
963 return NF_ACCEPT;
964 #endif
965 }
966
tcf_ct_act(struct sk_buff * skb,const struct tc_action * a,struct tcf_result * res)967 TC_INDIRECT_SCOPE int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
968 struct tcf_result *res)
969 {
970 struct net *net = dev_net(skb->dev);
971 enum ip_conntrack_info ctinfo;
972 struct tcf_ct *c = to_ct(a);
973 struct nf_conn *tmpl = NULL;
974 struct nf_hook_state state;
975 bool cached, commit, clear;
976 int nh_ofs, err, retval;
977 struct tcf_ct_params *p;
978 bool add_helper = false;
979 bool skip_add = false;
980 bool defrag = false;
981 struct nf_conn *ct;
982 u8 family;
983
984 p = rcu_dereference_bh(c->params);
985
986 retval = p->action;
987 commit = p->ct_action & TCA_CT_ACT_COMMIT;
988 clear = p->ct_action & TCA_CT_ACT_CLEAR;
989 tmpl = p->tmpl;
990
991 tcf_lastuse_update(&c->tcf_tm);
992 tcf_action_update_bstats(&c->common, skb);
993
994 if (clear) {
995 qdisc_skb_cb(skb)->post_ct = false;
996 ct = nf_ct_get(skb, &ctinfo);
997 if (ct) {
998 nf_ct_put(ct);
999 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
1000 }
1001
1002 goto out_clear;
1003 }
1004
1005 family = tcf_ct_skb_nf_family(skb);
1006 if (family == NFPROTO_UNSPEC)
1007 goto drop;
1008
1009 /* The conntrack module expects to be working at L3.
1010 * We also try to pull the IPv4/6 header to linear area
1011 */
1012 nh_ofs = skb_network_offset(skb);
1013 skb_pull_rcsum(skb, nh_ofs);
1014 err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
1015 if (err)
1016 goto out_frag;
1017
1018 err = nf_ct_skb_network_trim(skb, family);
1019 if (err)
1020 goto drop;
1021
1022 /* If we are recirculating packets to match on ct fields and
1023 * committing with a separate ct action, then we don't need to
1024 * actually run the packet through conntrack twice unless it's for a
1025 * different zone.
1026 */
1027 cached = tcf_ct_skb_nfct_cached(net, skb, p);
1028 if (!cached) {
1029 if (tcf_ct_flow_table_lookup(p, skb, family)) {
1030 skip_add = true;
1031 goto do_nat;
1032 }
1033
1034 /* Associate skb with specified zone. */
1035 if (tmpl) {
1036 nf_conntrack_put(skb_nfct(skb));
1037 nf_conntrack_get(&tmpl->ct_general);
1038 nf_ct_set(skb, tmpl, IP_CT_NEW);
1039 }
1040
1041 state.hook = NF_INET_PRE_ROUTING;
1042 state.net = net;
1043 state.pf = family;
1044 err = nf_conntrack_in(skb, &state);
1045 if (err != NF_ACCEPT)
1046 goto nf_error;
1047 }
1048
1049 do_nat:
1050 ct = nf_ct_get(skb, &ctinfo);
1051 if (!ct)
1052 goto out_push;
1053 nf_ct_deliver_cached_events(ct);
1054 nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
1055
1056 err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit);
1057 if (err != NF_ACCEPT)
1058 goto nf_error;
1059
1060 if (!nf_ct_is_confirmed(ct) && commit && p->helper && !nfct_help(ct)) {
1061 err = __nf_ct_try_assign_helper(ct, p->tmpl, GFP_ATOMIC);
1062 if (err)
1063 goto drop;
1064 add_helper = true;
1065 if (p->ct_action & TCA_CT_ACT_NAT && !nfct_seqadj(ct)) {
1066 if (!nfct_seqadj_ext_add(ct))
1067 goto drop;
1068 }
1069 }
1070
1071 if (nf_ct_is_confirmed(ct) ? ((!cached && !skip_add) || add_helper) : commit) {
1072 err = nf_ct_helper(skb, ct, ctinfo, family);
1073 if (err != NF_ACCEPT)
1074 goto nf_error;
1075 }
1076
1077 if (commit) {
1078 tcf_ct_act_set_mark(ct, p->mark, p->mark_mask);
1079 tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
1080
1081 if (!nf_ct_is_confirmed(ct))
1082 nf_conn_act_ct_ext_add(skb, ct, ctinfo);
1083
1084 /* This will take care of sending queued events
1085 * even if the connection is already confirmed.
1086 */
1087 err = nf_conntrack_confirm(skb);
1088 if (err != NF_ACCEPT)
1089 goto nf_error;
1090
1091 /* The ct may be dropped if a clash has been resolved,
1092 * so it's necessary to retrieve it from skb again to
1093 * prevent UAF.
1094 */
1095 ct = nf_ct_get(skb, &ctinfo);
1096 if (!ct)
1097 skip_add = true;
1098 }
1099
1100 if (!skip_add)
1101 tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
1102
1103 out_push:
1104 skb_push_rcsum(skb, nh_ofs);
1105
1106 qdisc_skb_cb(skb)->post_ct = true;
1107 tc_skb_cb(skb)->zone = p->zone;
1108 out_clear:
1109 if (defrag)
1110 qdisc_skb_cb(skb)->pkt_len = skb->len;
1111 return retval;
1112
1113 out_frag:
1114 if (err != -EINPROGRESS)
1115 tcf_action_inc_drop_qstats(&c->common);
1116 return TC_ACT_CONSUMED;
1117
1118 drop:
1119 tcf_action_inc_drop_qstats(&c->common);
1120 return TC_ACT_SHOT;
1121
1122 nf_error:
1123 /* some verdicts store extra data in upper bits, such
1124 * as errno or queue number.
1125 */
1126 switch (err & NF_VERDICT_MASK) {
1127 case NF_DROP:
1128 goto drop;
1129 case NF_STOLEN:
1130 tcf_action_inc_drop_qstats(&c->common);
1131 return TC_ACT_CONSUMED;
1132 default:
1133 DEBUG_NET_WARN_ON_ONCE(1);
1134 goto drop;
1135 }
1136 }
1137
1138 static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = {
1139 [TCA_CT_ACTION] = { .type = NLA_U16 },
1140 [TCA_CT_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_ct)),
1141 [TCA_CT_ZONE] = { .type = NLA_U16 },
1142 [TCA_CT_MARK] = { .type = NLA_U32 },
1143 [TCA_CT_MARK_MASK] = { .type = NLA_U32 },
1144 [TCA_CT_LABELS] = { .type = NLA_BINARY,
1145 .len = 128 / BITS_PER_BYTE },
1146 [TCA_CT_LABELS_MASK] = { .type = NLA_BINARY,
1147 .len = 128 / BITS_PER_BYTE },
1148 [TCA_CT_NAT_IPV4_MIN] = { .type = NLA_U32 },
1149 [TCA_CT_NAT_IPV4_MAX] = { .type = NLA_U32 },
1150 [TCA_CT_NAT_IPV6_MIN] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1151 [TCA_CT_NAT_IPV6_MAX] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1152 [TCA_CT_NAT_PORT_MIN] = { .type = NLA_U16 },
1153 [TCA_CT_NAT_PORT_MAX] = { .type = NLA_U16 },
1154 [TCA_CT_HELPER_NAME] = { .type = NLA_STRING, .len = NF_CT_HELPER_NAME_LEN },
1155 [TCA_CT_HELPER_FAMILY] = { .type = NLA_U8 },
1156 [TCA_CT_HELPER_PROTO] = { .type = NLA_U8 },
1157 };
1158
tcf_ct_fill_params_nat(struct tcf_ct_params * p,struct tc_ct * parm,struct nlattr ** tb,struct netlink_ext_ack * extack)1159 static int tcf_ct_fill_params_nat(struct tcf_ct_params *p,
1160 struct tc_ct *parm,
1161 struct nlattr **tb,
1162 struct netlink_ext_ack *extack)
1163 {
1164 struct nf_nat_range2 *range;
1165
1166 if (!(p->ct_action & TCA_CT_ACT_NAT))
1167 return 0;
1168
1169 if (!IS_ENABLED(CONFIG_NF_NAT)) {
1170 NL_SET_ERR_MSG_MOD(extack, "Netfilter nat isn't enabled in kernel");
1171 return -EOPNOTSUPP;
1172 }
1173
1174 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1175 return 0;
1176
1177 if ((p->ct_action & TCA_CT_ACT_NAT_SRC) &&
1178 (p->ct_action & TCA_CT_ACT_NAT_DST)) {
1179 NL_SET_ERR_MSG_MOD(extack, "dnat and snat can't be enabled at the same time");
1180 return -EOPNOTSUPP;
1181 }
1182
1183 range = &p->range;
1184 if (tb[TCA_CT_NAT_IPV4_MIN]) {
1185 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV4_MAX];
1186
1187 p->ipv4_range = true;
1188 range->flags |= NF_NAT_RANGE_MAP_IPS;
1189 range->min_addr.ip =
1190 nla_get_in_addr(tb[TCA_CT_NAT_IPV4_MIN]);
1191
1192 range->max_addr.ip =
1193 nla_get_in_addr_default(max_attr, range->min_addr.ip);
1194 } else if (tb[TCA_CT_NAT_IPV6_MIN]) {
1195 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV6_MAX];
1196
1197 p->ipv4_range = false;
1198 range->flags |= NF_NAT_RANGE_MAP_IPS;
1199 range->min_addr.in6 =
1200 nla_get_in6_addr(tb[TCA_CT_NAT_IPV6_MIN]);
1201
1202 range->max_addr.in6 = max_attr ?
1203 nla_get_in6_addr(max_attr) :
1204 range->min_addr.in6;
1205 }
1206
1207 if (tb[TCA_CT_NAT_PORT_MIN]) {
1208 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1209 range->min_proto.all = nla_get_be16(tb[TCA_CT_NAT_PORT_MIN]);
1210
1211 range->max_proto.all = tb[TCA_CT_NAT_PORT_MAX] ?
1212 nla_get_be16(tb[TCA_CT_NAT_PORT_MAX]) :
1213 range->min_proto.all;
1214 }
1215
1216 return 0;
1217 }
1218
tcf_ct_set_key_val(struct nlattr ** tb,void * val,int val_type,void * mask,int mask_type,int len)1219 static void tcf_ct_set_key_val(struct nlattr **tb,
1220 void *val, int val_type,
1221 void *mask, int mask_type,
1222 int len)
1223 {
1224 if (!tb[val_type])
1225 return;
1226 nla_memcpy(val, tb[val_type], len);
1227
1228 if (!mask)
1229 return;
1230
1231 if (mask_type == TCA_CT_UNSPEC || !tb[mask_type])
1232 memset(mask, 0xff, len);
1233 else
1234 nla_memcpy(mask, tb[mask_type], len);
1235 }
1236
tcf_ct_fill_params(struct net * net,struct tcf_ct_params * p,struct tc_ct * parm,struct nlattr ** tb,struct netlink_ext_ack * extack)1237 static int tcf_ct_fill_params(struct net *net,
1238 struct tcf_ct_params *p,
1239 struct tc_ct *parm,
1240 struct nlattr **tb,
1241 struct netlink_ext_ack *extack)
1242 {
1243 struct nf_conntrack_zone zone;
1244 int err, family, proto, len;
1245 bool put_labels = false;
1246 struct nf_conn *tmpl;
1247 char *name;
1248
1249 p->zone = NF_CT_DEFAULT_ZONE_ID;
1250
1251 tcf_ct_set_key_val(tb,
1252 &p->ct_action, TCA_CT_ACTION,
1253 NULL, TCA_CT_UNSPEC,
1254 sizeof(p->ct_action));
1255
1256 if (p->ct_action & TCA_CT_ACT_CLEAR)
1257 return 0;
1258
1259 err = tcf_ct_fill_params_nat(p, parm, tb, extack);
1260 if (err)
1261 return err;
1262
1263 if (tb[TCA_CT_MARK]) {
1264 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1265 NL_SET_ERR_MSG_MOD(extack, "Conntrack mark isn't enabled.");
1266 return -EOPNOTSUPP;
1267 }
1268 tcf_ct_set_key_val(tb,
1269 &p->mark, TCA_CT_MARK,
1270 &p->mark_mask, TCA_CT_MARK_MASK,
1271 sizeof(p->mark));
1272 }
1273
1274 if (tb[TCA_CT_LABELS]) {
1275 unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8;
1276
1277 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1278 NL_SET_ERR_MSG_MOD(extack, "Conntrack labels isn't enabled.");
1279 return -EOPNOTSUPP;
1280 }
1281
1282 if (nf_connlabels_get(net, n_bits - 1)) {
1283 NL_SET_ERR_MSG_MOD(extack, "Failed to set connlabel length");
1284 return -EOPNOTSUPP;
1285 } else {
1286 put_labels = true;
1287 }
1288
1289 tcf_ct_set_key_val(tb,
1290 p->labels, TCA_CT_LABELS,
1291 p->labels_mask, TCA_CT_LABELS_MASK,
1292 sizeof(p->labels));
1293 }
1294
1295 if (tb[TCA_CT_ZONE]) {
1296 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1297 NL_SET_ERR_MSG_MOD(extack, "Conntrack zones isn't enabled.");
1298 return -EOPNOTSUPP;
1299 }
1300
1301 tcf_ct_set_key_val(tb,
1302 &p->zone, TCA_CT_ZONE,
1303 NULL, TCA_CT_UNSPEC,
1304 sizeof(p->zone));
1305 }
1306
1307 nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
1308 tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
1309 if (!tmpl) {
1310 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate conntrack template");
1311 return -ENOMEM;
1312 }
1313 p->tmpl = tmpl;
1314 if (tb[TCA_CT_HELPER_NAME]) {
1315 name = nla_data(tb[TCA_CT_HELPER_NAME]);
1316 len = nla_len(tb[TCA_CT_HELPER_NAME]);
1317 if (len > 16 || name[len - 1] != '\0') {
1318 NL_SET_ERR_MSG_MOD(extack, "Failed to parse helper name.");
1319 err = -EINVAL;
1320 goto err;
1321 }
1322 family = nla_get_u8_default(tb[TCA_CT_HELPER_FAMILY], AF_INET);
1323 proto = nla_get_u8_default(tb[TCA_CT_HELPER_PROTO],
1324 IPPROTO_TCP);
1325 err = nf_ct_add_helper(tmpl, name, family, proto,
1326 p->ct_action & TCA_CT_ACT_NAT, &p->helper);
1327 if (err) {
1328 NL_SET_ERR_MSG_MOD(extack, "Failed to add helper");
1329 goto err;
1330 }
1331 }
1332
1333 p->put_labels = put_labels;
1334
1335 if (p->ct_action & TCA_CT_ACT_COMMIT)
1336 __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
1337 return 0;
1338 err:
1339 if (put_labels)
1340 nf_connlabels_put(net);
1341
1342 nf_ct_put(p->tmpl);
1343 p->tmpl = NULL;
1344 return err;
1345 }
1346
tcf_ct_init(struct net * net,struct nlattr * nla,struct nlattr * est,struct tc_action ** a,struct tcf_proto * tp,u32 flags,struct netlink_ext_ack * extack)1347 static int tcf_ct_init(struct net *net, struct nlattr *nla,
1348 struct nlattr *est, struct tc_action **a,
1349 struct tcf_proto *tp, u32 flags,
1350 struct netlink_ext_ack *extack)
1351 {
1352 struct tc_action_net *tn = net_generic(net, act_ct_ops.net_id);
1353 bool bind = flags & TCA_ACT_FLAGS_BIND;
1354 struct tcf_ct_params *params = NULL;
1355 struct nlattr *tb[TCA_CT_MAX + 1];
1356 struct tcf_chain *goto_ch = NULL;
1357 struct tc_ct *parm;
1358 struct tcf_ct *c;
1359 int err, res = 0;
1360 u32 index;
1361
1362 if (!nla) {
1363 NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed");
1364 return -EINVAL;
1365 }
1366
1367 if (bind && !(flags & TCA_ACT_FLAGS_AT_INGRESS_OR_CLSACT)) {
1368 NL_SET_ERR_MSG_MOD(extack,
1369 "Attaching ct to a non ingress/clsact qdisc is unsupported");
1370 return -EOPNOTSUPP;
1371 }
1372
1373 err = nla_parse_nested(tb, TCA_CT_MAX, nla, ct_policy, extack);
1374 if (err < 0)
1375 return err;
1376
1377 if (!tb[TCA_CT_PARMS]) {
1378 NL_SET_ERR_MSG_MOD(extack, "Missing required ct parameters");
1379 return -EINVAL;
1380 }
1381 parm = nla_data(tb[TCA_CT_PARMS]);
1382 index = parm->index;
1383 err = tcf_idr_check_alloc(tn, &index, a, bind);
1384 if (err < 0)
1385 return err;
1386
1387 if (!err) {
1388 err = tcf_idr_create_from_flags(tn, index, est, a,
1389 &act_ct_ops, bind, flags);
1390 if (err) {
1391 tcf_idr_cleanup(tn, index);
1392 return err;
1393 }
1394 res = ACT_P_CREATED;
1395 } else {
1396 if (bind)
1397 return ACT_P_BOUND;
1398
1399 if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
1400 tcf_idr_release(*a, bind);
1401 return -EEXIST;
1402 }
1403 }
1404 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
1405 if (err < 0)
1406 goto cleanup;
1407
1408 c = to_ct(*a);
1409
1410 params = kzalloc_obj(*params);
1411 if (unlikely(!params)) {
1412 err = -ENOMEM;
1413 goto cleanup;
1414 }
1415
1416 err = tcf_ct_fill_params(net, params, parm, tb, extack);
1417 if (err)
1418 goto cleanup;
1419
1420 err = tcf_ct_flow_table_get(net, params);
1421 if (err)
1422 goto cleanup;
1423
1424 params->action = parm->action;
1425 spin_lock_bh(&c->tcf_lock);
1426 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
1427 params = rcu_replace_pointer(c->params, params,
1428 lockdep_is_held(&c->tcf_lock));
1429 spin_unlock_bh(&c->tcf_lock);
1430
1431 if (goto_ch)
1432 tcf_chain_put_by_act(goto_ch);
1433 if (params)
1434 call_rcu(¶ms->rcu, tcf_ct_params_free_rcu);
1435
1436 return res;
1437
1438 cleanup:
1439 if (goto_ch)
1440 tcf_chain_put_by_act(goto_ch);
1441 if (params)
1442 tcf_ct_params_free(params);
1443 tcf_idr_release(*a, bind);
1444 return err;
1445 }
1446
tcf_ct_cleanup(struct tc_action * a)1447 static void tcf_ct_cleanup(struct tc_action *a)
1448 {
1449 struct tcf_ct_params *params;
1450 struct tcf_ct *c = to_ct(a);
1451
1452 params = rcu_dereference_protected(c->params, 1);
1453 if (params)
1454 call_rcu(¶ms->rcu, tcf_ct_params_free_rcu);
1455 }
1456
tcf_ct_dump_key_val(struct sk_buff * skb,const void * val,int val_type,const void * mask,int mask_type,int len)1457 static int tcf_ct_dump_key_val(struct sk_buff *skb,
1458 const void *val, int val_type,
1459 const void *mask, int mask_type,
1460 int len)
1461 {
1462 int err;
1463
1464 if (mask && !memchr_inv(mask, 0, len))
1465 return 0;
1466
1467 err = nla_put(skb, val_type, len, val);
1468 if (err)
1469 return err;
1470
1471 if (mask_type != TCA_CT_UNSPEC) {
1472 err = nla_put(skb, mask_type, len, mask);
1473 if (err)
1474 return err;
1475 }
1476
1477 return 0;
1478 }
1479
tcf_ct_dump_nat(struct sk_buff * skb,const struct tcf_ct_params * p)1480 static int tcf_ct_dump_nat(struct sk_buff *skb, const struct tcf_ct_params *p)
1481 {
1482 const struct nf_nat_range2 *range = &p->range;
1483
1484 if (!(p->ct_action & TCA_CT_ACT_NAT))
1485 return 0;
1486
1487 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1488 return 0;
1489
1490 if (range->flags & NF_NAT_RANGE_MAP_IPS) {
1491 if (p->ipv4_range) {
1492 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MIN,
1493 range->min_addr.ip))
1494 return -1;
1495 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MAX,
1496 range->max_addr.ip))
1497 return -1;
1498 } else {
1499 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MIN,
1500 &range->min_addr.in6))
1501 return -1;
1502 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MAX,
1503 &range->max_addr.in6))
1504 return -1;
1505 }
1506 }
1507
1508 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
1509 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MIN,
1510 range->min_proto.all))
1511 return -1;
1512 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MAX,
1513 range->max_proto.all))
1514 return -1;
1515 }
1516
1517 return 0;
1518 }
1519
tcf_ct_dump_helper(struct sk_buff * skb,const struct nf_conntrack_helper * helper)1520 static int tcf_ct_dump_helper(struct sk_buff *skb,
1521 const struct nf_conntrack_helper *helper)
1522 {
1523 if (!helper)
1524 return 0;
1525
1526 if (nla_put_string(skb, TCA_CT_HELPER_NAME, helper->name) ||
1527 nla_put_u8(skb, TCA_CT_HELPER_FAMILY, helper->tuple.src.l3num) ||
1528 nla_put_u8(skb, TCA_CT_HELPER_PROTO, helper->tuple.dst.protonum))
1529 return -1;
1530
1531 return 0;
1532 }
1533
tcf_ct_dump(struct sk_buff * skb,struct tc_action * a,int bind,int ref)1534 static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a,
1535 int bind, int ref)
1536 {
1537 unsigned char *b = skb_tail_pointer(skb);
1538 const struct tcf_ct *c = to_ct(a);
1539 const struct tcf_ct_params *p;
1540 struct tc_ct opt = {
1541 .index = c->tcf_index,
1542 .refcnt = refcount_read(&c->tcf_refcnt) - ref,
1543 .bindcnt = atomic_read(&c->tcf_bindcnt) - bind,
1544 };
1545 struct tcf_t t;
1546
1547 rcu_read_lock();
1548 p = rcu_dereference(c->params);
1549 opt.action = p->action;
1550
1551 if (tcf_ct_dump_key_val(skb,
1552 &p->ct_action, TCA_CT_ACTION,
1553 NULL, TCA_CT_UNSPEC,
1554 sizeof(p->ct_action)))
1555 goto nla_put_failure;
1556
1557 if (p->ct_action & TCA_CT_ACT_CLEAR)
1558 goto skip_dump;
1559
1560 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
1561 tcf_ct_dump_key_val(skb,
1562 &p->mark, TCA_CT_MARK,
1563 &p->mark_mask, TCA_CT_MARK_MASK,
1564 sizeof(p->mark)))
1565 goto nla_put_failure;
1566
1567 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1568 tcf_ct_dump_key_val(skb,
1569 p->labels, TCA_CT_LABELS,
1570 p->labels_mask, TCA_CT_LABELS_MASK,
1571 sizeof(p->labels)))
1572 goto nla_put_failure;
1573
1574 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
1575 tcf_ct_dump_key_val(skb,
1576 &p->zone, TCA_CT_ZONE,
1577 NULL, TCA_CT_UNSPEC,
1578 sizeof(p->zone)))
1579 goto nla_put_failure;
1580
1581 if (tcf_ct_dump_nat(skb, p))
1582 goto nla_put_failure;
1583
1584 if (tcf_ct_dump_helper(skb, p->helper))
1585 goto nla_put_failure;
1586
1587 skip_dump:
1588 if (nla_put(skb, TCA_CT_PARMS, sizeof(opt), &opt))
1589 goto nla_put_failure;
1590
1591 tcf_tm_dump(&t, &c->tcf_tm);
1592 if (nla_put_64bit(skb, TCA_CT_TM, sizeof(t), &t, TCA_CT_PAD))
1593 goto nla_put_failure;
1594 rcu_read_unlock();
1595
1596 return skb->len;
1597 nla_put_failure:
1598 rcu_read_unlock();
1599 nlmsg_trim(skb, b);
1600 return -1;
1601 }
1602
tcf_stats_update(struct tc_action * a,u64 bytes,u64 packets,u64 drops,u64 lastuse,bool hw)1603 static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
1604 u64 drops, u64 lastuse, bool hw)
1605 {
1606 struct tcf_ct *c = to_ct(a);
1607
1608 tcf_action_update_stats(a, bytes, packets, drops, hw);
1609 c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse);
1610 }
1611
tcf_ct_offload_act_setup(struct tc_action * act,void * entry_data,u32 * index_inc,bool bind,struct netlink_ext_ack * extack)1612 static int tcf_ct_offload_act_setup(struct tc_action *act, void *entry_data,
1613 u32 *index_inc, bool bind,
1614 struct netlink_ext_ack *extack)
1615 {
1616 if (bind) {
1617 struct flow_action_entry *entry = entry_data;
1618
1619 if (tcf_ct_helper(act))
1620 return -EOPNOTSUPP;
1621
1622 entry->id = FLOW_ACTION_CT;
1623 entry->ct.action = tcf_ct_action(act);
1624 entry->ct.zone = tcf_ct_zone(act);
1625 entry->ct.flow_table = tcf_ct_ft(act);
1626 *index_inc = 1;
1627 } else {
1628 struct flow_offload_action *fl_action = entry_data;
1629
1630 fl_action->id = FLOW_ACTION_CT;
1631 }
1632
1633 return 0;
1634 }
1635
1636 static struct tc_action_ops act_ct_ops = {
1637 .kind = "ct",
1638 .id = TCA_ID_CT,
1639 .owner = THIS_MODULE,
1640 .act = tcf_ct_act,
1641 .dump = tcf_ct_dump,
1642 .init = tcf_ct_init,
1643 .cleanup = tcf_ct_cleanup,
1644 .stats_update = tcf_stats_update,
1645 .offload_act_setup = tcf_ct_offload_act_setup,
1646 .size = sizeof(struct tcf_ct),
1647 };
1648 MODULE_ALIAS_NET_ACT("ct");
1649
ct_init_net(struct net * net)1650 static __net_init int ct_init_net(struct net *net)
1651 {
1652 struct tc_ct_action_net *tn = net_generic(net, act_ct_ops.net_id);
1653
1654 return tc_action_net_init(net, &tn->tn, &act_ct_ops);
1655 }
1656
ct_exit_net(struct list_head * net_list)1657 static void __net_exit ct_exit_net(struct list_head *net_list)
1658 {
1659 tc_action_net_exit(net_list, act_ct_ops.net_id);
1660 }
1661
1662 static struct pernet_operations ct_net_ops = {
1663 .init = ct_init_net,
1664 .exit_batch = ct_exit_net,
1665 .id = &act_ct_ops.net_id,
1666 .size = sizeof(struct tc_ct_action_net),
1667 };
1668
ct_init_module(void)1669 static int __init ct_init_module(void)
1670 {
1671 int err;
1672
1673 act_ct_wq = alloc_ordered_workqueue("act_ct_workqueue", 0);
1674 if (!act_ct_wq)
1675 return -ENOMEM;
1676
1677 err = tcf_ct_flow_tables_init();
1678 if (err)
1679 goto err_tbl_init;
1680
1681 err = tcf_register_action(&act_ct_ops, &ct_net_ops);
1682 if (err)
1683 goto err_register;
1684
1685 static_branch_inc(&tcf_frag_xmit_count);
1686
1687 return 0;
1688
1689 err_register:
1690 tcf_ct_flow_tables_uninit();
1691 err_tbl_init:
1692 destroy_workqueue(act_ct_wq);
1693 return err;
1694 }
1695
ct_cleanup_module(void)1696 static void __exit ct_cleanup_module(void)
1697 {
1698 static_branch_dec(&tcf_frag_xmit_count);
1699 tcf_unregister_action(&act_ct_ops, &ct_net_ops);
1700 tcf_ct_flow_tables_uninit();
1701 destroy_workqueue(act_ct_wq);
1702 }
1703
1704 module_init(ct_init_module);
1705 module_exit(ct_cleanup_module);
1706 MODULE_AUTHOR("Paul Blakey <paulb@mellanox.com>");
1707 MODULE_AUTHOR("Yossi Kuperman <yossiku@mellanox.com>");
1708 MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>");
1709 MODULE_DESCRIPTION("Connection tracking action");
1710 MODULE_LICENSE("GPL v2");
1711