xref: /linux/net/sched/act_ct.c (revision 6015fb905d89063231ed33bc15be19ef0fc339b8)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* -
3  * net/sched/act_ct.c  Connection Tracking action
4  *
5  * Authors:   Paul Blakey <paulb@mellanox.com>
6  *            Yossi Kuperman <yossiku@mellanox.com>
7  *            Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
8  */
9 
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/skbuff.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/pkt_cls.h>
16 #include <linux/ip.h>
17 #include <linux/ipv6.h>
18 #include <linux/rhashtable.h>
19 #include <net/netlink.h>
20 #include <net/pkt_sched.h>
21 #include <net/pkt_cls.h>
22 #include <net/act_api.h>
23 #include <net/ip.h>
24 #include <net/ipv6_frag.h>
25 #include <uapi/linux/tc_act/tc_ct.h>
26 #include <net/tc_act/tc_ct.h>
27 
28 #include <net/netfilter/nf_flow_table.h>
29 #include <net/netfilter/nf_conntrack.h>
30 #include <net/netfilter/nf_conntrack_core.h>
31 #include <net/netfilter/nf_conntrack_zones.h>
32 #include <net/netfilter/nf_conntrack_helper.h>
33 #include <net/netfilter/nf_conntrack_acct.h>
34 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
35 #include <net/netfilter/nf_conntrack_act_ct.h>
36 #include <uapi/linux/netfilter/nf_nat.h>
37 
38 static struct workqueue_struct *act_ct_wq;
39 static struct rhashtable zones_ht;
40 static DEFINE_MUTEX(zones_mutex);
41 
42 struct tcf_ct_flow_table {
43 	struct rhash_head node; /* In zones tables */
44 
45 	struct rcu_work rwork;
46 	struct nf_flowtable nf_ft;
47 	refcount_t ref;
48 	u16 zone;
49 
50 	bool dying;
51 };
52 
53 static const struct rhashtable_params zones_params = {
54 	.head_offset = offsetof(struct tcf_ct_flow_table, node),
55 	.key_offset = offsetof(struct tcf_ct_flow_table, zone),
56 	.key_len = sizeof_field(struct tcf_ct_flow_table, zone),
57 	.automatic_shrinking = true,
58 };
59 
60 static struct flow_action_entry *
61 tcf_ct_flow_table_flow_action_get_next(struct flow_action *flow_action)
62 {
63 	int i = flow_action->num_entries++;
64 
65 	return &flow_action->entries[i];
66 }
67 
68 static void tcf_ct_add_mangle_action(struct flow_action *action,
69 				     enum flow_action_mangle_base htype,
70 				     u32 offset,
71 				     u32 mask,
72 				     u32 val)
73 {
74 	struct flow_action_entry *entry;
75 
76 	entry = tcf_ct_flow_table_flow_action_get_next(action);
77 	entry->id = FLOW_ACTION_MANGLE;
78 	entry->mangle.htype = htype;
79 	entry->mangle.mask = ~mask;
80 	entry->mangle.offset = offset;
81 	entry->mangle.val = val;
82 }
83 
84 /* The following nat helper functions check if the inverted reverse tuple
85  * (target) is different then the current dir tuple - meaning nat for ports
86  * and/or ip is needed, and add the relevant mangle actions.
87  */
88 static void
89 tcf_ct_flow_table_add_action_nat_ipv4(const struct nf_conntrack_tuple *tuple,
90 				      struct nf_conntrack_tuple target,
91 				      struct flow_action *action)
92 {
93 	if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
94 		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
95 					 offsetof(struct iphdr, saddr),
96 					 0xFFFFFFFF,
97 					 be32_to_cpu(target.src.u3.ip));
98 	if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
99 		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
100 					 offsetof(struct iphdr, daddr),
101 					 0xFFFFFFFF,
102 					 be32_to_cpu(target.dst.u3.ip));
103 }
104 
105 static void
106 tcf_ct_add_ipv6_addr_mangle_action(struct flow_action *action,
107 				   union nf_inet_addr *addr,
108 				   u32 offset)
109 {
110 	int i;
111 
112 	for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++)
113 		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
114 					 i * sizeof(u32) + offset,
115 					 0xFFFFFFFF, be32_to_cpu(addr->ip6[i]));
116 }
117 
118 static void
119 tcf_ct_flow_table_add_action_nat_ipv6(const struct nf_conntrack_tuple *tuple,
120 				      struct nf_conntrack_tuple target,
121 				      struct flow_action *action)
122 {
123 	if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
124 		tcf_ct_add_ipv6_addr_mangle_action(action, &target.src.u3,
125 						   offsetof(struct ipv6hdr,
126 							    saddr));
127 	if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
128 		tcf_ct_add_ipv6_addr_mangle_action(action, &target.dst.u3,
129 						   offsetof(struct ipv6hdr,
130 							    daddr));
131 }
132 
133 static void
134 tcf_ct_flow_table_add_action_nat_tcp(const struct nf_conntrack_tuple *tuple,
135 				     struct nf_conntrack_tuple target,
136 				     struct flow_action *action)
137 {
138 	__be16 target_src = target.src.u.tcp.port;
139 	__be16 target_dst = target.dst.u.tcp.port;
140 
141 	if (target_src != tuple->src.u.tcp.port)
142 		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
143 					 offsetof(struct tcphdr, source),
144 					 0xFFFF, be16_to_cpu(target_src));
145 	if (target_dst != tuple->dst.u.tcp.port)
146 		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
147 					 offsetof(struct tcphdr, dest),
148 					 0xFFFF, be16_to_cpu(target_dst));
149 }
150 
151 static void
152 tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple,
153 				     struct nf_conntrack_tuple target,
154 				     struct flow_action *action)
155 {
156 	__be16 target_src = target.src.u.udp.port;
157 	__be16 target_dst = target.dst.u.udp.port;
158 
159 	if (target_src != tuple->src.u.udp.port)
160 		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
161 					 offsetof(struct udphdr, source),
162 					 0xFFFF, be16_to_cpu(target_src));
163 	if (target_dst != tuple->dst.u.udp.port)
164 		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
165 					 offsetof(struct udphdr, dest),
166 					 0xFFFF, be16_to_cpu(target_dst));
167 }
168 
169 static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct,
170 					      enum ip_conntrack_dir dir,
171 					      struct flow_action *action)
172 {
173 	struct nf_conn_labels *ct_labels;
174 	struct flow_action_entry *entry;
175 	enum ip_conntrack_info ctinfo;
176 	u32 *act_ct_labels;
177 
178 	entry = tcf_ct_flow_table_flow_action_get_next(action);
179 	entry->id = FLOW_ACTION_CT_METADATA;
180 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
181 	entry->ct_metadata.mark = ct->mark;
182 #endif
183 	ctinfo = dir == IP_CT_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
184 					     IP_CT_ESTABLISHED_REPLY;
185 	/* aligns with the CT reference on the SKB nf_ct_set */
186 	entry->ct_metadata.cookie = (unsigned long)ct | ctinfo;
187 	entry->ct_metadata.orig_dir = dir == IP_CT_DIR_ORIGINAL;
188 
189 	act_ct_labels = entry->ct_metadata.labels;
190 	ct_labels = nf_ct_labels_find(ct);
191 	if (ct_labels)
192 		memcpy(act_ct_labels, ct_labels->bits, NF_CT_LABELS_MAX_SIZE);
193 	else
194 		memset(act_ct_labels, 0, NF_CT_LABELS_MAX_SIZE);
195 }
196 
197 static int tcf_ct_flow_table_add_action_nat(struct net *net,
198 					    struct nf_conn *ct,
199 					    enum ip_conntrack_dir dir,
200 					    struct flow_action *action)
201 {
202 	const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
203 	struct nf_conntrack_tuple target;
204 
205 	if (!(ct->status & IPS_NAT_MASK))
206 		return 0;
207 
208 	nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
209 
210 	switch (tuple->src.l3num) {
211 	case NFPROTO_IPV4:
212 		tcf_ct_flow_table_add_action_nat_ipv4(tuple, target,
213 						      action);
214 		break;
215 	case NFPROTO_IPV6:
216 		tcf_ct_flow_table_add_action_nat_ipv6(tuple, target,
217 						      action);
218 		break;
219 	default:
220 		return -EOPNOTSUPP;
221 	}
222 
223 	switch (nf_ct_protonum(ct)) {
224 	case IPPROTO_TCP:
225 		tcf_ct_flow_table_add_action_nat_tcp(tuple, target, action);
226 		break;
227 	case IPPROTO_UDP:
228 		tcf_ct_flow_table_add_action_nat_udp(tuple, target, action);
229 		break;
230 	default:
231 		return -EOPNOTSUPP;
232 	}
233 
234 	return 0;
235 }
236 
237 static int tcf_ct_flow_table_fill_actions(struct net *net,
238 					  const struct flow_offload *flow,
239 					  enum flow_offload_tuple_dir tdir,
240 					  struct nf_flow_rule *flow_rule)
241 {
242 	struct flow_action *action = &flow_rule->rule->action;
243 	int num_entries = action->num_entries;
244 	struct nf_conn *ct = flow->ct;
245 	enum ip_conntrack_dir dir;
246 	int i, err;
247 
248 	switch (tdir) {
249 	case FLOW_OFFLOAD_DIR_ORIGINAL:
250 		dir = IP_CT_DIR_ORIGINAL;
251 		break;
252 	case FLOW_OFFLOAD_DIR_REPLY:
253 		dir = IP_CT_DIR_REPLY;
254 		break;
255 	default:
256 		return -EOPNOTSUPP;
257 	}
258 
259 	err = tcf_ct_flow_table_add_action_nat(net, ct, dir, action);
260 	if (err)
261 		goto err_nat;
262 
263 	tcf_ct_flow_table_add_action_meta(ct, dir, action);
264 	return 0;
265 
266 err_nat:
267 	/* Clear filled actions */
268 	for (i = num_entries; i < action->num_entries; i++)
269 		memset(&action->entries[i], 0, sizeof(action->entries[i]));
270 	action->num_entries = num_entries;
271 
272 	return err;
273 }
274 
275 static struct nf_flowtable_type flowtable_ct = {
276 	.action		= tcf_ct_flow_table_fill_actions,
277 	.owner		= THIS_MODULE,
278 };
279 
280 static int tcf_ct_flow_table_get(struct tcf_ct_params *params)
281 {
282 	struct tcf_ct_flow_table *ct_ft;
283 	int err = -ENOMEM;
284 
285 	mutex_lock(&zones_mutex);
286 	ct_ft = rhashtable_lookup_fast(&zones_ht, &params->zone, zones_params);
287 	if (ct_ft && refcount_inc_not_zero(&ct_ft->ref))
288 		goto out_unlock;
289 
290 	ct_ft = kzalloc(sizeof(*ct_ft), GFP_KERNEL);
291 	if (!ct_ft)
292 		goto err_alloc;
293 	refcount_set(&ct_ft->ref, 1);
294 
295 	ct_ft->zone = params->zone;
296 	err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
297 	if (err)
298 		goto err_insert;
299 
300 	ct_ft->nf_ft.type = &flowtable_ct;
301 	ct_ft->nf_ft.flags |= NF_FLOWTABLE_HW_OFFLOAD |
302 			      NF_FLOWTABLE_COUNTER;
303 	err = nf_flow_table_init(&ct_ft->nf_ft);
304 	if (err)
305 		goto err_init;
306 
307 	__module_get(THIS_MODULE);
308 out_unlock:
309 	params->ct_ft = ct_ft;
310 	params->nf_ft = &ct_ft->nf_ft;
311 	mutex_unlock(&zones_mutex);
312 
313 	return 0;
314 
315 err_init:
316 	rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
317 err_insert:
318 	kfree(ct_ft);
319 err_alloc:
320 	mutex_unlock(&zones_mutex);
321 	return err;
322 }
323 
324 static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
325 {
326 	struct flow_block_cb *block_cb, *tmp_cb;
327 	struct tcf_ct_flow_table *ct_ft;
328 	struct flow_block *block;
329 
330 	ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table,
331 			     rwork);
332 	nf_flow_table_free(&ct_ft->nf_ft);
333 
334 	/* Remove any remaining callbacks before cleanup */
335 	block = &ct_ft->nf_ft.flow_block;
336 	down_write(&ct_ft->nf_ft.flow_block_lock);
337 	list_for_each_entry_safe(block_cb, tmp_cb, &block->cb_list, list) {
338 		list_del(&block_cb->list);
339 		flow_block_cb_free(block_cb);
340 	}
341 	up_write(&ct_ft->nf_ft.flow_block_lock);
342 	kfree(ct_ft);
343 
344 	module_put(THIS_MODULE);
345 }
346 
347 static void tcf_ct_flow_table_put(struct tcf_ct_params *params)
348 {
349 	struct tcf_ct_flow_table *ct_ft = params->ct_ft;
350 
351 	if (refcount_dec_and_test(&params->ct_ft->ref)) {
352 		rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
353 		INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work);
354 		queue_rcu_work(act_ct_wq, &ct_ft->rwork);
355 	}
356 }
357 
358 static void tcf_ct_flow_tc_ifidx(struct flow_offload *entry,
359 				 struct nf_conn_act_ct_ext *act_ct_ext, u8 dir)
360 {
361 	entry->tuplehash[dir].tuple.xmit_type = FLOW_OFFLOAD_XMIT_TC;
362 	entry->tuplehash[dir].tuple.tc.iifidx = act_ct_ext->ifindex[dir];
363 }
364 
365 static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
366 				  struct nf_conn *ct,
367 				  bool tcp)
368 {
369 	struct nf_conn_act_ct_ext *act_ct_ext;
370 	struct flow_offload *entry;
371 	int err;
372 
373 	if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
374 		return;
375 
376 	entry = flow_offload_alloc(ct);
377 	if (!entry) {
378 		WARN_ON_ONCE(1);
379 		goto err_alloc;
380 	}
381 
382 	if (tcp) {
383 		ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
384 		ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
385 	}
386 
387 	act_ct_ext = nf_conn_act_ct_ext_find(ct);
388 	if (act_ct_ext) {
389 		tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_ORIGINAL);
390 		tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_REPLY);
391 	}
392 
393 	err = flow_offload_add(&ct_ft->nf_ft, entry);
394 	if (err)
395 		goto err_add;
396 
397 	return;
398 
399 err_add:
400 	flow_offload_free(entry);
401 err_alloc:
402 	clear_bit(IPS_OFFLOAD_BIT, &ct->status);
403 }
404 
405 static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft,
406 					   struct nf_conn *ct,
407 					   enum ip_conntrack_info ctinfo)
408 {
409 	bool tcp = false;
410 
411 	if ((ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY) ||
412 	    !test_bit(IPS_ASSURED_BIT, &ct->status))
413 		return;
414 
415 	switch (nf_ct_protonum(ct)) {
416 	case IPPROTO_TCP:
417 		tcp = true;
418 		if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
419 			return;
420 		break;
421 	case IPPROTO_UDP:
422 		break;
423 	default:
424 		return;
425 	}
426 
427 	if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
428 	    ct->status & IPS_SEQ_ADJUST)
429 		return;
430 
431 	tcf_ct_flow_table_add(ct_ft, ct, tcp);
432 }
433 
434 static bool
435 tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff *skb,
436 				  struct flow_offload_tuple *tuple,
437 				  struct tcphdr **tcph)
438 {
439 	struct flow_ports *ports;
440 	unsigned int thoff;
441 	struct iphdr *iph;
442 
443 	if (!pskb_network_may_pull(skb, sizeof(*iph)))
444 		return false;
445 
446 	iph = ip_hdr(skb);
447 	thoff = iph->ihl * 4;
448 
449 	if (ip_is_fragment(iph) ||
450 	    unlikely(thoff != sizeof(struct iphdr)))
451 		return false;
452 
453 	if (iph->protocol != IPPROTO_TCP &&
454 	    iph->protocol != IPPROTO_UDP)
455 		return false;
456 
457 	if (iph->ttl <= 1)
458 		return false;
459 
460 	if (!pskb_network_may_pull(skb, iph->protocol == IPPROTO_TCP ?
461 					thoff + sizeof(struct tcphdr) :
462 					thoff + sizeof(*ports)))
463 		return false;
464 
465 	iph = ip_hdr(skb);
466 	if (iph->protocol == IPPROTO_TCP)
467 		*tcph = (void *)(skb_network_header(skb) + thoff);
468 
469 	ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
470 	tuple->src_v4.s_addr = iph->saddr;
471 	tuple->dst_v4.s_addr = iph->daddr;
472 	tuple->src_port = ports->source;
473 	tuple->dst_port = ports->dest;
474 	tuple->l3proto = AF_INET;
475 	tuple->l4proto = iph->protocol;
476 
477 	return true;
478 }
479 
480 static bool
481 tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff *skb,
482 				  struct flow_offload_tuple *tuple,
483 				  struct tcphdr **tcph)
484 {
485 	struct flow_ports *ports;
486 	struct ipv6hdr *ip6h;
487 	unsigned int thoff;
488 
489 	if (!pskb_network_may_pull(skb, sizeof(*ip6h)))
490 		return false;
491 
492 	ip6h = ipv6_hdr(skb);
493 
494 	if (ip6h->nexthdr != IPPROTO_TCP &&
495 	    ip6h->nexthdr != IPPROTO_UDP)
496 		return false;
497 
498 	if (ip6h->hop_limit <= 1)
499 		return false;
500 
501 	thoff = sizeof(*ip6h);
502 	if (!pskb_network_may_pull(skb, ip6h->nexthdr == IPPROTO_TCP ?
503 					thoff + sizeof(struct tcphdr) :
504 					thoff + sizeof(*ports)))
505 		return false;
506 
507 	ip6h = ipv6_hdr(skb);
508 	if (ip6h->nexthdr == IPPROTO_TCP)
509 		*tcph = (void *)(skb_network_header(skb) + thoff);
510 
511 	ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
512 	tuple->src_v6 = ip6h->saddr;
513 	tuple->dst_v6 = ip6h->daddr;
514 	tuple->src_port = ports->source;
515 	tuple->dst_port = ports->dest;
516 	tuple->l3proto = AF_INET6;
517 	tuple->l4proto = ip6h->nexthdr;
518 
519 	return true;
520 }
521 
522 static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
523 				     struct sk_buff *skb,
524 				     u8 family)
525 {
526 	struct nf_flowtable *nf_ft = &p->ct_ft->nf_ft;
527 	struct flow_offload_tuple_rhash *tuplehash;
528 	struct flow_offload_tuple tuple = {};
529 	enum ip_conntrack_info ctinfo;
530 	struct tcphdr *tcph = NULL;
531 	struct flow_offload *flow;
532 	struct nf_conn *ct;
533 	u8 dir;
534 
535 	switch (family) {
536 	case NFPROTO_IPV4:
537 		if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph))
538 			return false;
539 		break;
540 	case NFPROTO_IPV6:
541 		if (!tcf_ct_flow_table_fill_tuple_ipv6(skb, &tuple, &tcph))
542 			return false;
543 		break;
544 	default:
545 		return false;
546 	}
547 
548 	tuplehash = flow_offload_lookup(nf_ft, &tuple);
549 	if (!tuplehash)
550 		return false;
551 
552 	dir = tuplehash->tuple.dir;
553 	flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
554 	ct = flow->ct;
555 
556 	if (tcph && (unlikely(tcph->fin || tcph->rst))) {
557 		flow_offload_teardown(flow);
558 		return false;
559 	}
560 
561 	ctinfo = dir == FLOW_OFFLOAD_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
562 						    IP_CT_ESTABLISHED_REPLY;
563 
564 	flow_offload_refresh(nf_ft, flow);
565 	nf_conntrack_get(&ct->ct_general);
566 	nf_ct_set(skb, ct, ctinfo);
567 	if (nf_ft->flags & NF_FLOWTABLE_COUNTER)
568 		nf_ct_acct_update(ct, dir, skb->len);
569 
570 	return true;
571 }
572 
573 static int tcf_ct_flow_tables_init(void)
574 {
575 	return rhashtable_init(&zones_ht, &zones_params);
576 }
577 
578 static void tcf_ct_flow_tables_uninit(void)
579 {
580 	rhashtable_destroy(&zones_ht);
581 }
582 
583 static struct tc_action_ops act_ct_ops;
584 static unsigned int ct_net_id;
585 
586 struct tc_ct_action_net {
587 	struct tc_action_net tn; /* Must be first */
588 	bool labels;
589 };
590 
591 /* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
592 static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb,
593 				   u16 zone_id, bool force)
594 {
595 	enum ip_conntrack_info ctinfo;
596 	struct nf_conn *ct;
597 
598 	ct = nf_ct_get(skb, &ctinfo);
599 	if (!ct)
600 		return false;
601 	if (!net_eq(net, read_pnet(&ct->ct_net)))
602 		return false;
603 	if (nf_ct_zone(ct)->id != zone_id)
604 		return false;
605 
606 	/* Force conntrack entry direction. */
607 	if (force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
608 		if (nf_ct_is_confirmed(ct))
609 			nf_ct_kill(ct);
610 
611 		nf_ct_put(ct);
612 		nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
613 
614 		return false;
615 	}
616 
617 	return true;
618 }
619 
620 /* Trim the skb to the length specified by the IP/IPv6 header,
621  * removing any trailing lower-layer padding. This prepares the skb
622  * for higher-layer processing that assumes skb->len excludes padding
623  * (such as nf_ip_checksum). The caller needs to pull the skb to the
624  * network header, and ensure ip_hdr/ipv6_hdr points to valid data.
625  */
626 static int tcf_ct_skb_network_trim(struct sk_buff *skb, int family)
627 {
628 	unsigned int len;
629 	int err;
630 
631 	switch (family) {
632 	case NFPROTO_IPV4:
633 		len = ntohs(ip_hdr(skb)->tot_len);
634 		break;
635 	case NFPROTO_IPV6:
636 		len = sizeof(struct ipv6hdr)
637 			+ ntohs(ipv6_hdr(skb)->payload_len);
638 		break;
639 	default:
640 		len = skb->len;
641 	}
642 
643 	err = pskb_trim_rcsum(skb, len);
644 
645 	return err;
646 }
647 
648 static u8 tcf_ct_skb_nf_family(struct sk_buff *skb)
649 {
650 	u8 family = NFPROTO_UNSPEC;
651 
652 	switch (skb_protocol(skb, true)) {
653 	case htons(ETH_P_IP):
654 		family = NFPROTO_IPV4;
655 		break;
656 	case htons(ETH_P_IPV6):
657 		family = NFPROTO_IPV6;
658 		break;
659 	default:
660 		break;
661 	}
662 
663 	return family;
664 }
665 
666 static int tcf_ct_ipv4_is_fragment(struct sk_buff *skb, bool *frag)
667 {
668 	unsigned int len;
669 
670 	len =  skb_network_offset(skb) + sizeof(struct iphdr);
671 	if (unlikely(skb->len < len))
672 		return -EINVAL;
673 	if (unlikely(!pskb_may_pull(skb, len)))
674 		return -ENOMEM;
675 
676 	*frag = ip_is_fragment(ip_hdr(skb));
677 	return 0;
678 }
679 
680 static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag)
681 {
682 	unsigned int flags = 0, len, payload_ofs = 0;
683 	unsigned short frag_off;
684 	int nexthdr;
685 
686 	len =  skb_network_offset(skb) + sizeof(struct ipv6hdr);
687 	if (unlikely(skb->len < len))
688 		return -EINVAL;
689 	if (unlikely(!pskb_may_pull(skb, len)))
690 		return -ENOMEM;
691 
692 	nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
693 	if (unlikely(nexthdr < 0))
694 		return -EPROTO;
695 
696 	*frag = flags & IP6_FH_F_FRAG;
697 	return 0;
698 }
699 
700 static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
701 				   u8 family, u16 zone, bool *defrag)
702 {
703 	enum ip_conntrack_info ctinfo;
704 	struct nf_conn *ct;
705 	int err = 0;
706 	bool frag;
707 	u16 mru;
708 
709 	/* Previously seen (loopback)? Ignore. */
710 	ct = nf_ct_get(skb, &ctinfo);
711 	if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
712 		return 0;
713 
714 	if (family == NFPROTO_IPV4)
715 		err = tcf_ct_ipv4_is_fragment(skb, &frag);
716 	else
717 		err = tcf_ct_ipv6_is_fragment(skb, &frag);
718 	if (err || !frag)
719 		return err;
720 
721 	skb_get(skb);
722 	mru = tc_skb_cb(skb)->mru;
723 
724 	if (family == NFPROTO_IPV4) {
725 		enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
726 
727 		memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
728 		local_bh_disable();
729 		err = ip_defrag(net, skb, user);
730 		local_bh_enable();
731 		if (err && err != -EINPROGRESS)
732 			return err;
733 
734 		if (!err) {
735 			*defrag = true;
736 			mru = IPCB(skb)->frag_max_size;
737 		}
738 	} else { /* NFPROTO_IPV6 */
739 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
740 		enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
741 
742 		memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
743 		err = nf_ct_frag6_gather(net, skb, user);
744 		if (err && err != -EINPROGRESS)
745 			goto out_free;
746 
747 		if (!err) {
748 			*defrag = true;
749 			mru = IP6CB(skb)->frag_max_size;
750 		}
751 #else
752 		err = -EOPNOTSUPP;
753 		goto out_free;
754 #endif
755 	}
756 
757 	if (err != -EINPROGRESS)
758 		tc_skb_cb(skb)->mru = mru;
759 	skb_clear_hash(skb);
760 	skb->ignore_df = 1;
761 	return err;
762 
763 out_free:
764 	kfree_skb(skb);
765 	return err;
766 }
767 
768 static void tcf_ct_params_free(struct rcu_head *head)
769 {
770 	struct tcf_ct_params *params = container_of(head,
771 						    struct tcf_ct_params, rcu);
772 
773 	tcf_ct_flow_table_put(params);
774 
775 	if (params->tmpl)
776 		nf_ct_put(params->tmpl);
777 	kfree(params);
778 }
779 
780 #if IS_ENABLED(CONFIG_NF_NAT)
781 /* Modelled after nf_nat_ipv[46]_fn().
782  * range is only used for new, uninitialized NAT state.
783  * Returns either NF_ACCEPT or NF_DROP.
784  */
785 static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
786 			  enum ip_conntrack_info ctinfo,
787 			  const struct nf_nat_range2 *range,
788 			  enum nf_nat_manip_type maniptype)
789 {
790 	__be16 proto = skb_protocol(skb, true);
791 	int hooknum, err = NF_ACCEPT;
792 
793 	/* See HOOK2MANIP(). */
794 	if (maniptype == NF_NAT_MANIP_SRC)
795 		hooknum = NF_INET_LOCAL_IN; /* Source NAT */
796 	else
797 		hooknum = NF_INET_LOCAL_OUT; /* Destination NAT */
798 
799 	switch (ctinfo) {
800 	case IP_CT_RELATED:
801 	case IP_CT_RELATED_REPLY:
802 		if (proto == htons(ETH_P_IP) &&
803 		    ip_hdr(skb)->protocol == IPPROTO_ICMP) {
804 			if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
805 							   hooknum))
806 				err = NF_DROP;
807 			goto out;
808 		} else if (IS_ENABLED(CONFIG_IPV6) && proto == htons(ETH_P_IPV6)) {
809 			__be16 frag_off;
810 			u8 nexthdr = ipv6_hdr(skb)->nexthdr;
811 			int hdrlen = ipv6_skip_exthdr(skb,
812 						      sizeof(struct ipv6hdr),
813 						      &nexthdr, &frag_off);
814 
815 			if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
816 				if (!nf_nat_icmpv6_reply_translation(skb, ct,
817 								     ctinfo,
818 								     hooknum,
819 								     hdrlen))
820 					err = NF_DROP;
821 				goto out;
822 			}
823 		}
824 		/* Non-ICMP, fall thru to initialize if needed. */
825 		fallthrough;
826 	case IP_CT_NEW:
827 		/* Seen it before?  This can happen for loopback, retrans,
828 		 * or local packets.
829 		 */
830 		if (!nf_nat_initialized(ct, maniptype)) {
831 			/* Initialize according to the NAT action. */
832 			err = (range && range->flags & NF_NAT_RANGE_MAP_IPS)
833 				/* Action is set up to establish a new
834 				 * mapping.
835 				 */
836 				? nf_nat_setup_info(ct, range, maniptype)
837 				: nf_nat_alloc_null_binding(ct, hooknum);
838 			if (err != NF_ACCEPT)
839 				goto out;
840 		}
841 		break;
842 
843 	case IP_CT_ESTABLISHED:
844 	case IP_CT_ESTABLISHED_REPLY:
845 		break;
846 
847 	default:
848 		err = NF_DROP;
849 		goto out;
850 	}
851 
852 	err = nf_nat_packet(ct, ctinfo, hooknum, skb);
853 	if (err == NF_ACCEPT) {
854 		if (maniptype == NF_NAT_MANIP_SRC)
855 			tc_skb_cb(skb)->post_ct_snat = 1;
856 		if (maniptype == NF_NAT_MANIP_DST)
857 			tc_skb_cb(skb)->post_ct_dnat = 1;
858 	}
859 out:
860 	return err;
861 }
862 #endif /* CONFIG_NF_NAT */
863 
864 static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask)
865 {
866 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
867 	u32 new_mark;
868 
869 	if (!mask)
870 		return;
871 
872 	new_mark = mark | (ct->mark & ~(mask));
873 	if (ct->mark != new_mark) {
874 		ct->mark = new_mark;
875 		if (nf_ct_is_confirmed(ct))
876 			nf_conntrack_event_cache(IPCT_MARK, ct);
877 	}
878 #endif
879 }
880 
881 static void tcf_ct_act_set_labels(struct nf_conn *ct,
882 				  u32 *labels,
883 				  u32 *labels_m)
884 {
885 #if IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)
886 	size_t labels_sz = sizeof_field(struct tcf_ct_params, labels);
887 
888 	if (!memchr_inv(labels_m, 0, labels_sz))
889 		return;
890 
891 	nf_connlabels_replace(ct, labels, labels_m, 4);
892 #endif
893 }
894 
895 static int tcf_ct_act_nat(struct sk_buff *skb,
896 			  struct nf_conn *ct,
897 			  enum ip_conntrack_info ctinfo,
898 			  int ct_action,
899 			  struct nf_nat_range2 *range,
900 			  bool commit)
901 {
902 #if IS_ENABLED(CONFIG_NF_NAT)
903 	int err;
904 	enum nf_nat_manip_type maniptype;
905 
906 	if (!(ct_action & TCA_CT_ACT_NAT))
907 		return NF_ACCEPT;
908 
909 	/* Add NAT extension if not confirmed yet. */
910 	if (!nf_ct_is_confirmed(ct) && !nf_ct_nat_ext_add(ct))
911 		return NF_DROP;   /* Can't NAT. */
912 
913 	if (ctinfo != IP_CT_NEW && (ct->status & IPS_NAT_MASK) &&
914 	    (ctinfo != IP_CT_RELATED || commit)) {
915 		/* NAT an established or related connection like before. */
916 		if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY)
917 			/* This is the REPLY direction for a connection
918 			 * for which NAT was applied in the forward
919 			 * direction.  Do the reverse NAT.
920 			 */
921 			maniptype = ct->status & IPS_SRC_NAT
922 				? NF_NAT_MANIP_DST : NF_NAT_MANIP_SRC;
923 		else
924 			maniptype = ct->status & IPS_SRC_NAT
925 				? NF_NAT_MANIP_SRC : NF_NAT_MANIP_DST;
926 	} else if (ct_action & TCA_CT_ACT_NAT_SRC) {
927 		maniptype = NF_NAT_MANIP_SRC;
928 	} else if (ct_action & TCA_CT_ACT_NAT_DST) {
929 		maniptype = NF_NAT_MANIP_DST;
930 	} else {
931 		return NF_ACCEPT;
932 	}
933 
934 	err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
935 	if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) {
936 		if (ct->status & IPS_SRC_NAT) {
937 			if (maniptype == NF_NAT_MANIP_SRC)
938 				maniptype = NF_NAT_MANIP_DST;
939 			else
940 				maniptype = NF_NAT_MANIP_SRC;
941 
942 			err = ct_nat_execute(skb, ct, ctinfo, range,
943 					     maniptype);
944 		} else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
945 			err = ct_nat_execute(skb, ct, ctinfo, NULL,
946 					     NF_NAT_MANIP_SRC);
947 		}
948 	}
949 	return err;
950 #else
951 	return NF_ACCEPT;
952 #endif
953 }
954 
955 static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
956 		      struct tcf_result *res)
957 {
958 	struct net *net = dev_net(skb->dev);
959 	bool cached, commit, clear, force;
960 	enum ip_conntrack_info ctinfo;
961 	struct tcf_ct *c = to_ct(a);
962 	struct nf_conn *tmpl = NULL;
963 	struct nf_hook_state state;
964 	int nh_ofs, err, retval;
965 	struct tcf_ct_params *p;
966 	bool skip_add = false;
967 	bool defrag = false;
968 	struct nf_conn *ct;
969 	u8 family;
970 
971 	p = rcu_dereference_bh(c->params);
972 
973 	retval = READ_ONCE(c->tcf_action);
974 	commit = p->ct_action & TCA_CT_ACT_COMMIT;
975 	clear = p->ct_action & TCA_CT_ACT_CLEAR;
976 	force = p->ct_action & TCA_CT_ACT_FORCE;
977 	tmpl = p->tmpl;
978 
979 	tcf_lastuse_update(&c->tcf_tm);
980 	tcf_action_update_bstats(&c->common, skb);
981 
982 	if (clear) {
983 		tc_skb_cb(skb)->post_ct = false;
984 		ct = nf_ct_get(skb, &ctinfo);
985 		if (ct) {
986 			nf_ct_put(ct);
987 			nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
988 		}
989 
990 		goto out_clear;
991 	}
992 
993 	family = tcf_ct_skb_nf_family(skb);
994 	if (family == NFPROTO_UNSPEC)
995 		goto drop;
996 
997 	/* The conntrack module expects to be working at L3.
998 	 * We also try to pull the IPv4/6 header to linear area
999 	 */
1000 	nh_ofs = skb_network_offset(skb);
1001 	skb_pull_rcsum(skb, nh_ofs);
1002 	err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
1003 	if (err == -EINPROGRESS) {
1004 		retval = TC_ACT_STOLEN;
1005 		goto out_clear;
1006 	}
1007 	if (err)
1008 		goto drop;
1009 
1010 	err = tcf_ct_skb_network_trim(skb, family);
1011 	if (err)
1012 		goto drop;
1013 
1014 	/* If we are recirculating packets to match on ct fields and
1015 	 * committing with a separate ct action, then we don't need to
1016 	 * actually run the packet through conntrack twice unless it's for a
1017 	 * different zone.
1018 	 */
1019 	cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force);
1020 	if (!cached) {
1021 		if (tcf_ct_flow_table_lookup(p, skb, family)) {
1022 			skip_add = true;
1023 			goto do_nat;
1024 		}
1025 
1026 		/* Associate skb with specified zone. */
1027 		if (tmpl) {
1028 			nf_conntrack_put(skb_nfct(skb));
1029 			nf_conntrack_get(&tmpl->ct_general);
1030 			nf_ct_set(skb, tmpl, IP_CT_NEW);
1031 		}
1032 
1033 		state.hook = NF_INET_PRE_ROUTING;
1034 		state.net = net;
1035 		state.pf = family;
1036 		err = nf_conntrack_in(skb, &state);
1037 		if (err != NF_ACCEPT)
1038 			goto out_push;
1039 	}
1040 
1041 do_nat:
1042 	ct = nf_ct_get(skb, &ctinfo);
1043 	if (!ct)
1044 		goto out_push;
1045 	nf_ct_deliver_cached_events(ct);
1046 	nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
1047 
1048 	err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit);
1049 	if (err != NF_ACCEPT)
1050 		goto drop;
1051 
1052 	if (commit) {
1053 		tcf_ct_act_set_mark(ct, p->mark, p->mark_mask);
1054 		tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
1055 
1056 		if (!nf_ct_is_confirmed(ct))
1057 			nf_conn_act_ct_ext_add(ct);
1058 
1059 		/* This will take care of sending queued events
1060 		 * even if the connection is already confirmed.
1061 		 */
1062 		if (nf_conntrack_confirm(skb) != NF_ACCEPT)
1063 			goto drop;
1064 	}
1065 
1066 	if (!skip_add)
1067 		tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
1068 
1069 out_push:
1070 	skb_push_rcsum(skb, nh_ofs);
1071 
1072 	tc_skb_cb(skb)->post_ct = true;
1073 	tc_skb_cb(skb)->zone = p->zone;
1074 out_clear:
1075 	if (defrag)
1076 		qdisc_skb_cb(skb)->pkt_len = skb->len;
1077 	return retval;
1078 
1079 drop:
1080 	tcf_action_inc_drop_qstats(&c->common);
1081 	return TC_ACT_SHOT;
1082 }
1083 
1084 static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = {
1085 	[TCA_CT_ACTION] = { .type = NLA_U16 },
1086 	[TCA_CT_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_ct)),
1087 	[TCA_CT_ZONE] = { .type = NLA_U16 },
1088 	[TCA_CT_MARK] = { .type = NLA_U32 },
1089 	[TCA_CT_MARK_MASK] = { .type = NLA_U32 },
1090 	[TCA_CT_LABELS] = { .type = NLA_BINARY,
1091 			    .len = 128 / BITS_PER_BYTE },
1092 	[TCA_CT_LABELS_MASK] = { .type = NLA_BINARY,
1093 				 .len = 128 / BITS_PER_BYTE },
1094 	[TCA_CT_NAT_IPV4_MIN] = { .type = NLA_U32 },
1095 	[TCA_CT_NAT_IPV4_MAX] = { .type = NLA_U32 },
1096 	[TCA_CT_NAT_IPV6_MIN] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1097 	[TCA_CT_NAT_IPV6_MAX] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1098 	[TCA_CT_NAT_PORT_MIN] = { .type = NLA_U16 },
1099 	[TCA_CT_NAT_PORT_MAX] = { .type = NLA_U16 },
1100 };
1101 
1102 static int tcf_ct_fill_params_nat(struct tcf_ct_params *p,
1103 				  struct tc_ct *parm,
1104 				  struct nlattr **tb,
1105 				  struct netlink_ext_ack *extack)
1106 {
1107 	struct nf_nat_range2 *range;
1108 
1109 	if (!(p->ct_action & TCA_CT_ACT_NAT))
1110 		return 0;
1111 
1112 	if (!IS_ENABLED(CONFIG_NF_NAT)) {
1113 		NL_SET_ERR_MSG_MOD(extack, "Netfilter nat isn't enabled in kernel");
1114 		return -EOPNOTSUPP;
1115 	}
1116 
1117 	if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1118 		return 0;
1119 
1120 	if ((p->ct_action & TCA_CT_ACT_NAT_SRC) &&
1121 	    (p->ct_action & TCA_CT_ACT_NAT_DST)) {
1122 		NL_SET_ERR_MSG_MOD(extack, "dnat and snat can't be enabled at the same time");
1123 		return -EOPNOTSUPP;
1124 	}
1125 
1126 	range = &p->range;
1127 	if (tb[TCA_CT_NAT_IPV4_MIN]) {
1128 		struct nlattr *max_attr = tb[TCA_CT_NAT_IPV4_MAX];
1129 
1130 		p->ipv4_range = true;
1131 		range->flags |= NF_NAT_RANGE_MAP_IPS;
1132 		range->min_addr.ip =
1133 			nla_get_in_addr(tb[TCA_CT_NAT_IPV4_MIN]);
1134 
1135 		range->max_addr.ip = max_attr ?
1136 				     nla_get_in_addr(max_attr) :
1137 				     range->min_addr.ip;
1138 	} else if (tb[TCA_CT_NAT_IPV6_MIN]) {
1139 		struct nlattr *max_attr = tb[TCA_CT_NAT_IPV6_MAX];
1140 
1141 		p->ipv4_range = false;
1142 		range->flags |= NF_NAT_RANGE_MAP_IPS;
1143 		range->min_addr.in6 =
1144 			nla_get_in6_addr(tb[TCA_CT_NAT_IPV6_MIN]);
1145 
1146 		range->max_addr.in6 = max_attr ?
1147 				      nla_get_in6_addr(max_attr) :
1148 				      range->min_addr.in6;
1149 	}
1150 
1151 	if (tb[TCA_CT_NAT_PORT_MIN]) {
1152 		range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1153 		range->min_proto.all = nla_get_be16(tb[TCA_CT_NAT_PORT_MIN]);
1154 
1155 		range->max_proto.all = tb[TCA_CT_NAT_PORT_MAX] ?
1156 				       nla_get_be16(tb[TCA_CT_NAT_PORT_MAX]) :
1157 				       range->min_proto.all;
1158 	}
1159 
1160 	return 0;
1161 }
1162 
1163 static void tcf_ct_set_key_val(struct nlattr **tb,
1164 			       void *val, int val_type,
1165 			       void *mask, int mask_type,
1166 			       int len)
1167 {
1168 	if (!tb[val_type])
1169 		return;
1170 	nla_memcpy(val, tb[val_type], len);
1171 
1172 	if (!mask)
1173 		return;
1174 
1175 	if (mask_type == TCA_CT_UNSPEC || !tb[mask_type])
1176 		memset(mask, 0xff, len);
1177 	else
1178 		nla_memcpy(mask, tb[mask_type], len);
1179 }
1180 
1181 static int tcf_ct_fill_params(struct net *net,
1182 			      struct tcf_ct_params *p,
1183 			      struct tc_ct *parm,
1184 			      struct nlattr **tb,
1185 			      struct netlink_ext_ack *extack)
1186 {
1187 	struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1188 	struct nf_conntrack_zone zone;
1189 	struct nf_conn *tmpl;
1190 	int err;
1191 
1192 	p->zone = NF_CT_DEFAULT_ZONE_ID;
1193 
1194 	tcf_ct_set_key_val(tb,
1195 			   &p->ct_action, TCA_CT_ACTION,
1196 			   NULL, TCA_CT_UNSPEC,
1197 			   sizeof(p->ct_action));
1198 
1199 	if (p->ct_action & TCA_CT_ACT_CLEAR)
1200 		return 0;
1201 
1202 	err = tcf_ct_fill_params_nat(p, parm, tb, extack);
1203 	if (err)
1204 		return err;
1205 
1206 	if (tb[TCA_CT_MARK]) {
1207 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1208 			NL_SET_ERR_MSG_MOD(extack, "Conntrack mark isn't enabled.");
1209 			return -EOPNOTSUPP;
1210 		}
1211 		tcf_ct_set_key_val(tb,
1212 				   &p->mark, TCA_CT_MARK,
1213 				   &p->mark_mask, TCA_CT_MARK_MASK,
1214 				   sizeof(p->mark));
1215 	}
1216 
1217 	if (tb[TCA_CT_LABELS]) {
1218 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1219 			NL_SET_ERR_MSG_MOD(extack, "Conntrack labels isn't enabled.");
1220 			return -EOPNOTSUPP;
1221 		}
1222 
1223 		if (!tn->labels) {
1224 			NL_SET_ERR_MSG_MOD(extack, "Failed to set connlabel length");
1225 			return -EOPNOTSUPP;
1226 		}
1227 		tcf_ct_set_key_val(tb,
1228 				   p->labels, TCA_CT_LABELS,
1229 				   p->labels_mask, TCA_CT_LABELS_MASK,
1230 				   sizeof(p->labels));
1231 	}
1232 
1233 	if (tb[TCA_CT_ZONE]) {
1234 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1235 			NL_SET_ERR_MSG_MOD(extack, "Conntrack zones isn't enabled.");
1236 			return -EOPNOTSUPP;
1237 		}
1238 
1239 		tcf_ct_set_key_val(tb,
1240 				   &p->zone, TCA_CT_ZONE,
1241 				   NULL, TCA_CT_UNSPEC,
1242 				   sizeof(p->zone));
1243 	}
1244 
1245 	nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
1246 	tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
1247 	if (!tmpl) {
1248 		NL_SET_ERR_MSG_MOD(extack, "Failed to allocate conntrack template");
1249 		return -ENOMEM;
1250 	}
1251 	__set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
1252 	p->tmpl = tmpl;
1253 
1254 	return 0;
1255 }
1256 
1257 static int tcf_ct_init(struct net *net, struct nlattr *nla,
1258 		       struct nlattr *est, struct tc_action **a,
1259 		       struct tcf_proto *tp, u32 flags,
1260 		       struct netlink_ext_ack *extack)
1261 {
1262 	struct tc_action_net *tn = net_generic(net, ct_net_id);
1263 	bool bind = flags & TCA_ACT_FLAGS_BIND;
1264 	struct tcf_ct_params *params = NULL;
1265 	struct nlattr *tb[TCA_CT_MAX + 1];
1266 	struct tcf_chain *goto_ch = NULL;
1267 	struct tc_ct *parm;
1268 	struct tcf_ct *c;
1269 	int err, res = 0;
1270 	u32 index;
1271 
1272 	if (!nla) {
1273 		NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed");
1274 		return -EINVAL;
1275 	}
1276 
1277 	err = nla_parse_nested(tb, TCA_CT_MAX, nla, ct_policy, extack);
1278 	if (err < 0)
1279 		return err;
1280 
1281 	if (!tb[TCA_CT_PARMS]) {
1282 		NL_SET_ERR_MSG_MOD(extack, "Missing required ct parameters");
1283 		return -EINVAL;
1284 	}
1285 	parm = nla_data(tb[TCA_CT_PARMS]);
1286 	index = parm->index;
1287 	err = tcf_idr_check_alloc(tn, &index, a, bind);
1288 	if (err < 0)
1289 		return err;
1290 
1291 	if (!err) {
1292 		err = tcf_idr_create_from_flags(tn, index, est, a,
1293 						&act_ct_ops, bind, flags);
1294 		if (err) {
1295 			tcf_idr_cleanup(tn, index);
1296 			return err;
1297 		}
1298 		res = ACT_P_CREATED;
1299 	} else {
1300 		if (bind)
1301 			return 0;
1302 
1303 		if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
1304 			tcf_idr_release(*a, bind);
1305 			return -EEXIST;
1306 		}
1307 	}
1308 	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
1309 	if (err < 0)
1310 		goto cleanup;
1311 
1312 	c = to_ct(*a);
1313 
1314 	params = kzalloc(sizeof(*params), GFP_KERNEL);
1315 	if (unlikely(!params)) {
1316 		err = -ENOMEM;
1317 		goto cleanup;
1318 	}
1319 
1320 	err = tcf_ct_fill_params(net, params, parm, tb, extack);
1321 	if (err)
1322 		goto cleanup;
1323 
1324 	err = tcf_ct_flow_table_get(params);
1325 	if (err)
1326 		goto cleanup;
1327 
1328 	spin_lock_bh(&c->tcf_lock);
1329 	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
1330 	params = rcu_replace_pointer(c->params, params,
1331 				     lockdep_is_held(&c->tcf_lock));
1332 	spin_unlock_bh(&c->tcf_lock);
1333 
1334 	if (goto_ch)
1335 		tcf_chain_put_by_act(goto_ch);
1336 	if (params)
1337 		call_rcu(&params->rcu, tcf_ct_params_free);
1338 
1339 	return res;
1340 
1341 cleanup:
1342 	if (goto_ch)
1343 		tcf_chain_put_by_act(goto_ch);
1344 	kfree(params);
1345 	tcf_idr_release(*a, bind);
1346 	return err;
1347 }
1348 
1349 static void tcf_ct_cleanup(struct tc_action *a)
1350 {
1351 	struct tcf_ct_params *params;
1352 	struct tcf_ct *c = to_ct(a);
1353 
1354 	params = rcu_dereference_protected(c->params, 1);
1355 	if (params)
1356 		call_rcu(&params->rcu, tcf_ct_params_free);
1357 }
1358 
1359 static int tcf_ct_dump_key_val(struct sk_buff *skb,
1360 			       void *val, int val_type,
1361 			       void *mask, int mask_type,
1362 			       int len)
1363 {
1364 	int err;
1365 
1366 	if (mask && !memchr_inv(mask, 0, len))
1367 		return 0;
1368 
1369 	err = nla_put(skb, val_type, len, val);
1370 	if (err)
1371 		return err;
1372 
1373 	if (mask_type != TCA_CT_UNSPEC) {
1374 		err = nla_put(skb, mask_type, len, mask);
1375 		if (err)
1376 			return err;
1377 	}
1378 
1379 	return 0;
1380 }
1381 
1382 static int tcf_ct_dump_nat(struct sk_buff *skb, struct tcf_ct_params *p)
1383 {
1384 	struct nf_nat_range2 *range = &p->range;
1385 
1386 	if (!(p->ct_action & TCA_CT_ACT_NAT))
1387 		return 0;
1388 
1389 	if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1390 		return 0;
1391 
1392 	if (range->flags & NF_NAT_RANGE_MAP_IPS) {
1393 		if (p->ipv4_range) {
1394 			if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MIN,
1395 					    range->min_addr.ip))
1396 				return -1;
1397 			if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MAX,
1398 					    range->max_addr.ip))
1399 				return -1;
1400 		} else {
1401 			if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MIN,
1402 					     &range->min_addr.in6))
1403 				return -1;
1404 			if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MAX,
1405 					     &range->max_addr.in6))
1406 				return -1;
1407 		}
1408 	}
1409 
1410 	if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
1411 		if (nla_put_be16(skb, TCA_CT_NAT_PORT_MIN,
1412 				 range->min_proto.all))
1413 			return -1;
1414 		if (nla_put_be16(skb, TCA_CT_NAT_PORT_MAX,
1415 				 range->max_proto.all))
1416 			return -1;
1417 	}
1418 
1419 	return 0;
1420 }
1421 
1422 static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a,
1423 			      int bind, int ref)
1424 {
1425 	unsigned char *b = skb_tail_pointer(skb);
1426 	struct tcf_ct *c = to_ct(a);
1427 	struct tcf_ct_params *p;
1428 
1429 	struct tc_ct opt = {
1430 		.index   = c->tcf_index,
1431 		.refcnt  = refcount_read(&c->tcf_refcnt) - ref,
1432 		.bindcnt = atomic_read(&c->tcf_bindcnt) - bind,
1433 	};
1434 	struct tcf_t t;
1435 
1436 	spin_lock_bh(&c->tcf_lock);
1437 	p = rcu_dereference_protected(c->params,
1438 				      lockdep_is_held(&c->tcf_lock));
1439 	opt.action = c->tcf_action;
1440 
1441 	if (tcf_ct_dump_key_val(skb,
1442 				&p->ct_action, TCA_CT_ACTION,
1443 				NULL, TCA_CT_UNSPEC,
1444 				sizeof(p->ct_action)))
1445 		goto nla_put_failure;
1446 
1447 	if (p->ct_action & TCA_CT_ACT_CLEAR)
1448 		goto skip_dump;
1449 
1450 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
1451 	    tcf_ct_dump_key_val(skb,
1452 				&p->mark, TCA_CT_MARK,
1453 				&p->mark_mask, TCA_CT_MARK_MASK,
1454 				sizeof(p->mark)))
1455 		goto nla_put_failure;
1456 
1457 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1458 	    tcf_ct_dump_key_val(skb,
1459 				p->labels, TCA_CT_LABELS,
1460 				p->labels_mask, TCA_CT_LABELS_MASK,
1461 				sizeof(p->labels)))
1462 		goto nla_put_failure;
1463 
1464 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
1465 	    tcf_ct_dump_key_val(skb,
1466 				&p->zone, TCA_CT_ZONE,
1467 				NULL, TCA_CT_UNSPEC,
1468 				sizeof(p->zone)))
1469 		goto nla_put_failure;
1470 
1471 	if (tcf_ct_dump_nat(skb, p))
1472 		goto nla_put_failure;
1473 
1474 skip_dump:
1475 	if (nla_put(skb, TCA_CT_PARMS, sizeof(opt), &opt))
1476 		goto nla_put_failure;
1477 
1478 	tcf_tm_dump(&t, &c->tcf_tm);
1479 	if (nla_put_64bit(skb, TCA_CT_TM, sizeof(t), &t, TCA_CT_PAD))
1480 		goto nla_put_failure;
1481 	spin_unlock_bh(&c->tcf_lock);
1482 
1483 	return skb->len;
1484 nla_put_failure:
1485 	spin_unlock_bh(&c->tcf_lock);
1486 	nlmsg_trim(skb, b);
1487 	return -1;
1488 }
1489 
1490 static int tcf_ct_walker(struct net *net, struct sk_buff *skb,
1491 			 struct netlink_callback *cb, int type,
1492 			 const struct tc_action_ops *ops,
1493 			 struct netlink_ext_ack *extack)
1494 {
1495 	struct tc_action_net *tn = net_generic(net, ct_net_id);
1496 
1497 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
1498 }
1499 
1500 static int tcf_ct_search(struct net *net, struct tc_action **a, u32 index)
1501 {
1502 	struct tc_action_net *tn = net_generic(net, ct_net_id);
1503 
1504 	return tcf_idr_search(tn, a, index);
1505 }
1506 
1507 static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
1508 			     u64 drops, u64 lastuse, bool hw)
1509 {
1510 	struct tcf_ct *c = to_ct(a);
1511 
1512 	tcf_action_update_stats(a, bytes, packets, drops, hw);
1513 	c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse);
1514 }
1515 
1516 static int tcf_ct_offload_act_setup(struct tc_action *act, void *entry_data,
1517 				    u32 *index_inc, bool bind)
1518 {
1519 	if (bind) {
1520 		struct flow_action_entry *entry = entry_data;
1521 
1522 		entry->id = FLOW_ACTION_CT;
1523 		entry->ct.action = tcf_ct_action(act);
1524 		entry->ct.zone = tcf_ct_zone(act);
1525 		entry->ct.flow_table = tcf_ct_ft(act);
1526 		*index_inc = 1;
1527 	} else {
1528 		struct flow_offload_action *fl_action = entry_data;
1529 
1530 		fl_action->id = FLOW_ACTION_CT;
1531 	}
1532 
1533 	return 0;
1534 }
1535 
1536 static struct tc_action_ops act_ct_ops = {
1537 	.kind		=	"ct",
1538 	.id		=	TCA_ID_CT,
1539 	.owner		=	THIS_MODULE,
1540 	.act		=	tcf_ct_act,
1541 	.dump		=	tcf_ct_dump,
1542 	.init		=	tcf_ct_init,
1543 	.cleanup	=	tcf_ct_cleanup,
1544 	.walk		=	tcf_ct_walker,
1545 	.lookup		=	tcf_ct_search,
1546 	.stats_update	=	tcf_stats_update,
1547 	.offload_act_setup =	tcf_ct_offload_act_setup,
1548 	.size		=	sizeof(struct tcf_ct),
1549 };
1550 
1551 static __net_init int ct_init_net(struct net *net)
1552 {
1553 	unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8;
1554 	struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1555 
1556 	if (nf_connlabels_get(net, n_bits - 1)) {
1557 		tn->labels = false;
1558 		pr_err("act_ct: Failed to set connlabels length");
1559 	} else {
1560 		tn->labels = true;
1561 	}
1562 
1563 	return tc_action_net_init(net, &tn->tn, &act_ct_ops);
1564 }
1565 
1566 static void __net_exit ct_exit_net(struct list_head *net_list)
1567 {
1568 	struct net *net;
1569 
1570 	rtnl_lock();
1571 	list_for_each_entry(net, net_list, exit_list) {
1572 		struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1573 
1574 		if (tn->labels)
1575 			nf_connlabels_put(net);
1576 	}
1577 	rtnl_unlock();
1578 
1579 	tc_action_net_exit(net_list, ct_net_id);
1580 }
1581 
1582 static struct pernet_operations ct_net_ops = {
1583 	.init = ct_init_net,
1584 	.exit_batch = ct_exit_net,
1585 	.id   = &ct_net_id,
1586 	.size = sizeof(struct tc_ct_action_net),
1587 };
1588 
1589 static int __init ct_init_module(void)
1590 {
1591 	int err;
1592 
1593 	act_ct_wq = alloc_ordered_workqueue("act_ct_workqueue", 0);
1594 	if (!act_ct_wq)
1595 		return -ENOMEM;
1596 
1597 	err = tcf_ct_flow_tables_init();
1598 	if (err)
1599 		goto err_tbl_init;
1600 
1601 	err = tcf_register_action(&act_ct_ops, &ct_net_ops);
1602 	if (err)
1603 		goto err_register;
1604 
1605 	static_branch_inc(&tcf_frag_xmit_count);
1606 
1607 	return 0;
1608 
1609 err_register:
1610 	tcf_ct_flow_tables_uninit();
1611 err_tbl_init:
1612 	destroy_workqueue(act_ct_wq);
1613 	return err;
1614 }
1615 
1616 static void __exit ct_cleanup_module(void)
1617 {
1618 	static_branch_dec(&tcf_frag_xmit_count);
1619 	tcf_unregister_action(&act_ct_ops, &ct_net_ops);
1620 	tcf_ct_flow_tables_uninit();
1621 	destroy_workqueue(act_ct_wq);
1622 }
1623 
1624 module_init(ct_init_module);
1625 module_exit(ct_cleanup_module);
1626 MODULE_AUTHOR("Paul Blakey <paulb@mellanox.com>");
1627 MODULE_AUTHOR("Yossi Kuperman <yossiku@mellanox.com>");
1628 MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>");
1629 MODULE_DESCRIPTION("Connection tracking action");
1630 MODULE_LICENSE("GPL v2");
1631