1 /* Connection tracking via netlink socket. Allows for user space
2 * protocol helpers and general trouble making from userspace.
3 *
4 * (C) 2001 by Jay Schulist <jschlst@samba.org>
5 * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
6 * (C) 2003 by Patrick Mchardy <kaber@trash.net>
7 * (C) 2005-2012 by Pablo Neira Ayuso <pablo@netfilter.org>
8 *
9 * Initial connection tracking via netlink development funded and
10 * generally made possible by Network Robots, Inc. (www.networkrobots.com)
11 *
12 * Further development of this code funded by Astaro AG (http://www.astaro.com)
13 *
14 * This software may be used and distributed according to the terms
15 * of the GNU General Public License, incorporated herein by reference.
16 */
17
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/rculist.h>
22 #include <linux/rculist_nulls.h>
23 #include <linux/types.h>
24 #include <linux/timer.h>
25 #include <linux/security.h>
26 #include <linux/skbuff.h>
27 #include <linux/errno.h>
28 #include <linux/netlink.h>
29 #include <linux/spinlock.h>
30 #include <linux/interrupt.h>
31 #include <linux/slab.h>
32 #include <linux/siphash.h>
33
34 #include <linux/netfilter.h>
35 #include <net/netlink.h>
36 #include <net/sock.h>
37 #include <net/netfilter/nf_conntrack.h>
38 #include <net/netfilter/nf_conntrack_core.h>
39 #include <net/netfilter/nf_conntrack_expect.h>
40 #include <net/netfilter/nf_conntrack_helper.h>
41 #include <net/netfilter/nf_conntrack_seqadj.h>
42 #include <net/netfilter/nf_conntrack_l4proto.h>
43 #include <net/netfilter/nf_conntrack_tuple.h>
44 #include <net/netfilter/nf_conntrack_acct.h>
45 #include <net/netfilter/nf_conntrack_zones.h>
46 #include <net/netfilter/nf_conntrack_timestamp.h>
47 #include <net/netfilter/nf_conntrack_labels.h>
48 #include <net/netfilter/nf_conntrack_synproxy.h>
49 #if IS_ENABLED(CONFIG_NF_NAT)
50 #include <net/netfilter/nf_nat.h>
51 #include <net/netfilter/nf_nat_helper.h>
52 #endif
53
54 #include <linux/netfilter/nfnetlink.h>
55 #include <linux/netfilter/nfnetlink_conntrack.h>
56
57 #include "nf_internals.h"
58
59 MODULE_LICENSE("GPL");
60 MODULE_DESCRIPTION("List and change connection tracking table");
61
62 struct ctnetlink_list_dump_ctx {
63 struct nf_conn *last;
64 unsigned int cpu;
65 bool done;
66 };
67
ctnetlink_dump_tuples_proto(struct sk_buff * skb,const struct nf_conntrack_tuple * tuple,const struct nf_conntrack_l4proto * l4proto)68 static int ctnetlink_dump_tuples_proto(struct sk_buff *skb,
69 const struct nf_conntrack_tuple *tuple,
70 const struct nf_conntrack_l4proto *l4proto)
71 {
72 int ret = 0;
73 struct nlattr *nest_parms;
74
75 nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO);
76 if (!nest_parms)
77 goto nla_put_failure;
78 if (nla_put_u8(skb, CTA_PROTO_NUM, tuple->dst.protonum))
79 goto nla_put_failure;
80
81 if (likely(l4proto->tuple_to_nlattr))
82 ret = l4proto->tuple_to_nlattr(skb, tuple);
83
84 nla_nest_end(skb, nest_parms);
85
86 return ret;
87
88 nla_put_failure:
89 return -1;
90 }
91
ipv4_tuple_to_nlattr(struct sk_buff * skb,const struct nf_conntrack_tuple * tuple)92 static int ipv4_tuple_to_nlattr(struct sk_buff *skb,
93 const struct nf_conntrack_tuple *tuple)
94 {
95 if (nla_put_in_addr(skb, CTA_IP_V4_SRC, tuple->src.u3.ip) ||
96 nla_put_in_addr(skb, CTA_IP_V4_DST, tuple->dst.u3.ip))
97 return -EMSGSIZE;
98 return 0;
99 }
100
ipv6_tuple_to_nlattr(struct sk_buff * skb,const struct nf_conntrack_tuple * tuple)101 static int ipv6_tuple_to_nlattr(struct sk_buff *skb,
102 const struct nf_conntrack_tuple *tuple)
103 {
104 if (nla_put_in6_addr(skb, CTA_IP_V6_SRC, &tuple->src.u3.in6) ||
105 nla_put_in6_addr(skb, CTA_IP_V6_DST, &tuple->dst.u3.in6))
106 return -EMSGSIZE;
107 return 0;
108 }
109
ctnetlink_dump_tuples_ip(struct sk_buff * skb,const struct nf_conntrack_tuple * tuple)110 static int ctnetlink_dump_tuples_ip(struct sk_buff *skb,
111 const struct nf_conntrack_tuple *tuple)
112 {
113 int ret = 0;
114 struct nlattr *nest_parms;
115
116 nest_parms = nla_nest_start(skb, CTA_TUPLE_IP);
117 if (!nest_parms)
118 goto nla_put_failure;
119
120 switch (tuple->src.l3num) {
121 case NFPROTO_IPV4:
122 ret = ipv4_tuple_to_nlattr(skb, tuple);
123 break;
124 case NFPROTO_IPV6:
125 ret = ipv6_tuple_to_nlattr(skb, tuple);
126 break;
127 }
128
129 nla_nest_end(skb, nest_parms);
130
131 return ret;
132
133 nla_put_failure:
134 return -1;
135 }
136
ctnetlink_dump_tuples(struct sk_buff * skb,const struct nf_conntrack_tuple * tuple)137 static int ctnetlink_dump_tuples(struct sk_buff *skb,
138 const struct nf_conntrack_tuple *tuple)
139 {
140 const struct nf_conntrack_l4proto *l4proto;
141 int ret;
142
143 rcu_read_lock();
144 ret = ctnetlink_dump_tuples_ip(skb, tuple);
145
146 if (ret >= 0) {
147 l4proto = nf_ct_l4proto_find(tuple->dst.protonum);
148 ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto);
149 }
150 rcu_read_unlock();
151 return ret;
152 }
153
ctnetlink_dump_zone_id(struct sk_buff * skb,int attrtype,const struct nf_conntrack_zone * zone,int dir)154 static int ctnetlink_dump_zone_id(struct sk_buff *skb, int attrtype,
155 const struct nf_conntrack_zone *zone, int dir)
156 {
157 if (zone->id == NF_CT_DEFAULT_ZONE_ID || zone->dir != dir)
158 return 0;
159 if (nla_put_be16(skb, attrtype, htons(zone->id)))
160 goto nla_put_failure;
161 return 0;
162
163 nla_put_failure:
164 return -1;
165 }
166
ctnetlink_dump_status(struct sk_buff * skb,const struct nf_conn * ct)167 static int ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
168 {
169 if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status)))
170 goto nla_put_failure;
171 return 0;
172
173 nla_put_failure:
174 return -1;
175 }
176
ctnetlink_dump_timeout(struct sk_buff * skb,const struct nf_conn * ct,bool skip_zero)177 static int ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct,
178 bool skip_zero)
179 {
180 long timeout;
181
182 if (nf_ct_is_confirmed(ct))
183 timeout = nf_ct_expires(ct) / HZ;
184 else
185 timeout = ct->timeout / HZ;
186
187 if (skip_zero && timeout == 0)
188 return 0;
189
190 if (nla_put_be32(skb, CTA_TIMEOUT, htonl(timeout)))
191 goto nla_put_failure;
192 return 0;
193
194 nla_put_failure:
195 return -1;
196 }
197
ctnetlink_dump_protoinfo(struct sk_buff * skb,struct nf_conn * ct,bool destroy)198 static int ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct,
199 bool destroy)
200 {
201 const struct nf_conntrack_l4proto *l4proto;
202 struct nlattr *nest_proto;
203 int ret;
204
205 l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
206 if (!l4proto->to_nlattr)
207 return 0;
208
209 nest_proto = nla_nest_start(skb, CTA_PROTOINFO);
210 if (!nest_proto)
211 goto nla_put_failure;
212
213 ret = l4proto->to_nlattr(skb, nest_proto, ct, destroy);
214
215 nla_nest_end(skb, nest_proto);
216
217 return ret;
218
219 nla_put_failure:
220 return -1;
221 }
222
ctnetlink_dump_helpinfo(struct sk_buff * skb,const struct nf_conn * ct)223 static int ctnetlink_dump_helpinfo(struct sk_buff *skb,
224 const struct nf_conn *ct)
225 {
226 struct nlattr *nest_helper;
227 const struct nf_conn_help *help = nfct_help(ct);
228 struct nf_conntrack_helper *helper;
229
230 if (!help)
231 return 0;
232
233 rcu_read_lock();
234 helper = rcu_dereference(help->helper);
235 if (!helper)
236 goto out;
237
238 nest_helper = nla_nest_start(skb, CTA_HELP);
239 if (!nest_helper)
240 goto nla_put_failure;
241 if (nla_put_string(skb, CTA_HELP_NAME, helper->name))
242 goto nla_put_failure;
243
244 if (helper->to_nlattr)
245 helper->to_nlattr(skb, ct);
246
247 nla_nest_end(skb, nest_helper);
248 out:
249 rcu_read_unlock();
250 return 0;
251
252 nla_put_failure:
253 rcu_read_unlock();
254 return -1;
255 }
256
257 static int
dump_counters(struct sk_buff * skb,struct nf_conn_acct * acct,enum ip_conntrack_dir dir,int type)258 dump_counters(struct sk_buff *skb, struct nf_conn_acct *acct,
259 enum ip_conntrack_dir dir, int type)
260 {
261 enum ctattr_type attr = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG;
262 struct nf_conn_counter *counter = acct->counter;
263 struct nlattr *nest_count;
264 u64 pkts, bytes;
265
266 if (type == IPCTNL_MSG_CT_GET_CTRZERO) {
267 pkts = atomic64_xchg(&counter[dir].packets, 0);
268 bytes = atomic64_xchg(&counter[dir].bytes, 0);
269 } else {
270 pkts = atomic64_read(&counter[dir].packets);
271 bytes = atomic64_read(&counter[dir].bytes);
272 }
273
274 nest_count = nla_nest_start(skb, attr);
275 if (!nest_count)
276 goto nla_put_failure;
277
278 if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts),
279 CTA_COUNTERS_PAD) ||
280 nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes),
281 CTA_COUNTERS_PAD))
282 goto nla_put_failure;
283
284 nla_nest_end(skb, nest_count);
285
286 return 0;
287
288 nla_put_failure:
289 return -1;
290 }
291
292 static int
ctnetlink_dump_acct(struct sk_buff * skb,const struct nf_conn * ct,int type)293 ctnetlink_dump_acct(struct sk_buff *skb, const struct nf_conn *ct, int type)
294 {
295 struct nf_conn_acct *acct = nf_conn_acct_find(ct);
296
297 if (!acct)
298 return 0;
299
300 if (dump_counters(skb, acct, IP_CT_DIR_ORIGINAL, type) < 0)
301 return -1;
302 if (dump_counters(skb, acct, IP_CT_DIR_REPLY, type) < 0)
303 return -1;
304
305 return 0;
306 }
307
308 static int
ctnetlink_dump_timestamp(struct sk_buff * skb,const struct nf_conn * ct)309 ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
310 {
311 struct nlattr *nest_count;
312 const struct nf_conn_tstamp *tstamp;
313
314 tstamp = nf_conn_tstamp_find(ct);
315 if (!tstamp)
316 return 0;
317
318 nest_count = nla_nest_start(skb, CTA_TIMESTAMP);
319 if (!nest_count)
320 goto nla_put_failure;
321
322 if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start),
323 CTA_TIMESTAMP_PAD) ||
324 (tstamp->stop != 0 && nla_put_be64(skb, CTA_TIMESTAMP_STOP,
325 cpu_to_be64(tstamp->stop),
326 CTA_TIMESTAMP_PAD)))
327 goto nla_put_failure;
328 nla_nest_end(skb, nest_count);
329
330 return 0;
331
332 nla_put_failure:
333 return -1;
334 }
335
336 #ifdef CONFIG_NF_CONNTRACK_MARK
ctnetlink_dump_mark(struct sk_buff * skb,const struct nf_conn * ct,bool dump)337 static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct,
338 bool dump)
339 {
340 u32 mark = READ_ONCE(ct->mark);
341
342 if (!mark && !dump)
343 return 0;
344
345 if (nla_put_be32(skb, CTA_MARK, htonl(mark)))
346 goto nla_put_failure;
347 return 0;
348
349 nla_put_failure:
350 return -1;
351 }
352 #else
353 #define ctnetlink_dump_mark(a, b, c) (0)
354 #endif
355
356 #ifdef CONFIG_NF_CONNTRACK_SECMARK
ctnetlink_dump_secctx(struct sk_buff * skb,const struct nf_conn * ct)357 static int ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
358 {
359 struct nlattr *nest_secctx;
360 struct lsm_context ctx;
361 int ret;
362
363 ret = security_secid_to_secctx(ct->secmark, &ctx);
364 if (ret < 0)
365 return 0;
366
367 ret = -1;
368 nest_secctx = nla_nest_start(skb, CTA_SECCTX);
369 if (!nest_secctx)
370 goto nla_put_failure;
371
372 if (nla_put_string(skb, CTA_SECCTX_NAME, ctx.context))
373 goto nla_put_failure;
374 nla_nest_end(skb, nest_secctx);
375
376 ret = 0;
377 nla_put_failure:
378 security_release_secctx(&ctx);
379 return ret;
380 }
381 #else
382 #define ctnetlink_dump_secctx(a, b) (0)
383 #endif
384
385 #ifdef CONFIG_NF_CONNTRACK_EVENTS
386 static int
ctnetlink_dump_event_timestamp(struct sk_buff * skb,const struct nf_conn * ct)387 ctnetlink_dump_event_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
388 {
389 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
390 const struct nf_conntrack_ecache *e = nf_ct_ecache_find(ct);
391
392 if (e) {
393 u64 ts = local64_read(&e->timestamp);
394
395 if (ts)
396 return nla_put_be64(skb, CTA_TIMESTAMP_EVENT,
397 cpu_to_be64(ts), CTA_TIMESTAMP_PAD);
398 }
399 #endif
400 return 0;
401 }
402
ctnetlink_label_size(const struct nf_conn * ct)403 static inline int ctnetlink_label_size(const struct nf_conn *ct)
404 {
405 struct nf_conn_labels *labels = nf_ct_labels_find(ct);
406
407 if (!labels)
408 return 0;
409 return nla_total_size(sizeof(labels->bits));
410 }
411 #endif
412
413 static int
ctnetlink_dump_labels(struct sk_buff * skb,const struct nf_conn * ct)414 ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct)
415 {
416 struct nf_conn_labels *labels = nf_ct_labels_find(ct);
417 unsigned int i;
418
419 if (!labels)
420 return 0;
421
422 i = 0;
423 do {
424 if (labels->bits[i] != 0)
425 return nla_put(skb, CTA_LABELS, sizeof(labels->bits),
426 labels->bits);
427 i++;
428 } while (i < ARRAY_SIZE(labels->bits));
429
430 return 0;
431 }
432
433 #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
434
ctnetlink_dump_master(struct sk_buff * skb,const struct nf_conn * ct)435 static int ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct)
436 {
437 struct nlattr *nest_parms;
438
439 if (!(ct->status & IPS_EXPECTED))
440 return 0;
441
442 nest_parms = nla_nest_start(skb, CTA_TUPLE_MASTER);
443 if (!nest_parms)
444 goto nla_put_failure;
445 if (ctnetlink_dump_tuples(skb, master_tuple(ct)) < 0)
446 goto nla_put_failure;
447 nla_nest_end(skb, nest_parms);
448
449 return 0;
450
451 nla_put_failure:
452 return -1;
453 }
454
455 static int
dump_ct_seq_adj(struct sk_buff * skb,const struct nf_ct_seqadj * seq,int type)456 dump_ct_seq_adj(struct sk_buff *skb, const struct nf_ct_seqadj *seq, int type)
457 {
458 struct nlattr *nest_parms;
459
460 nest_parms = nla_nest_start(skb, type);
461 if (!nest_parms)
462 goto nla_put_failure;
463
464 if (nla_put_be32(skb, CTA_SEQADJ_CORRECTION_POS,
465 htonl(seq->correction_pos)) ||
466 nla_put_be32(skb, CTA_SEQADJ_OFFSET_BEFORE,
467 htonl(seq->offset_before)) ||
468 nla_put_be32(skb, CTA_SEQADJ_OFFSET_AFTER,
469 htonl(seq->offset_after)))
470 goto nla_put_failure;
471
472 nla_nest_end(skb, nest_parms);
473
474 return 0;
475
476 nla_put_failure:
477 return -1;
478 }
479
ctnetlink_dump_ct_seq_adj(struct sk_buff * skb,struct nf_conn * ct)480 static int ctnetlink_dump_ct_seq_adj(struct sk_buff *skb, struct nf_conn *ct)
481 {
482 struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
483 struct nf_ct_seqadj *seq;
484
485 if (!(ct->status & IPS_SEQ_ADJUST) || !seqadj)
486 return 0;
487
488 spin_lock_bh(&ct->lock);
489 seq = &seqadj->seq[IP_CT_DIR_ORIGINAL];
490 if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_ORIG) == -1)
491 goto err;
492
493 seq = &seqadj->seq[IP_CT_DIR_REPLY];
494 if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_REPLY) == -1)
495 goto err;
496
497 spin_unlock_bh(&ct->lock);
498 return 0;
499 err:
500 spin_unlock_bh(&ct->lock);
501 return -1;
502 }
503
ctnetlink_dump_ct_synproxy(struct sk_buff * skb,struct nf_conn * ct)504 static int ctnetlink_dump_ct_synproxy(struct sk_buff *skb, struct nf_conn *ct)
505 {
506 struct nf_conn_synproxy *synproxy = nfct_synproxy(ct);
507 struct nlattr *nest_parms;
508
509 if (!synproxy)
510 return 0;
511
512 nest_parms = nla_nest_start(skb, CTA_SYNPROXY);
513 if (!nest_parms)
514 goto nla_put_failure;
515
516 if (nla_put_be32(skb, CTA_SYNPROXY_ISN, htonl(synproxy->isn)) ||
517 nla_put_be32(skb, CTA_SYNPROXY_ITS, htonl(synproxy->its)) ||
518 nla_put_be32(skb, CTA_SYNPROXY_TSOFF, htonl(synproxy->tsoff)))
519 goto nla_put_failure;
520
521 nla_nest_end(skb, nest_parms);
522
523 return 0;
524
525 nla_put_failure:
526 return -1;
527 }
528
ctnetlink_dump_id(struct sk_buff * skb,const struct nf_conn * ct)529 static int ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
530 {
531 __be32 id = (__force __be32)nf_ct_get_id(ct);
532
533 if (nla_put_be32(skb, CTA_ID, id))
534 goto nla_put_failure;
535 return 0;
536
537 nla_put_failure:
538 return -1;
539 }
540
ctnetlink_dump_use(struct sk_buff * skb,const struct nf_conn * ct)541 static int ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
542 {
543 if (nla_put_be32(skb, CTA_USE, htonl(refcount_read(&ct->ct_general.use))))
544 goto nla_put_failure;
545 return 0;
546
547 nla_put_failure:
548 return -1;
549 }
550
551 /* all these functions access ct->ext. Caller must either hold a reference
552 * on ct or prevent its deletion by holding either the bucket spinlock or
553 * pcpu dying list lock.
554 */
ctnetlink_dump_extinfo(struct sk_buff * skb,struct nf_conn * ct,u32 type)555 static int ctnetlink_dump_extinfo(struct sk_buff *skb,
556 struct nf_conn *ct, u32 type)
557 {
558 if (ctnetlink_dump_acct(skb, ct, type) < 0 ||
559 ctnetlink_dump_timestamp(skb, ct) < 0 ||
560 ctnetlink_dump_helpinfo(skb, ct) < 0 ||
561 ctnetlink_dump_labels(skb, ct) < 0 ||
562 ctnetlink_dump_ct_seq_adj(skb, ct) < 0 ||
563 ctnetlink_dump_ct_synproxy(skb, ct) < 0)
564 return -1;
565
566 return 0;
567 }
568
ctnetlink_dump_info(struct sk_buff * skb,struct nf_conn * ct)569 static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct)
570 {
571 if (ctnetlink_dump_status(skb, ct) < 0 ||
572 ctnetlink_dump_mark(skb, ct, true) < 0 ||
573 ctnetlink_dump_secctx(skb, ct) < 0 ||
574 ctnetlink_dump_id(skb, ct) < 0 ||
575 ctnetlink_dump_use(skb, ct) < 0 ||
576 ctnetlink_dump_master(skb, ct) < 0)
577 return -1;
578
579 if (!test_bit(IPS_OFFLOAD_BIT, &ct->status) &&
580 (ctnetlink_dump_timeout(skb, ct, false) < 0 ||
581 ctnetlink_dump_protoinfo(skb, ct, false) < 0))
582 return -1;
583
584 return 0;
585 }
586
587 static int
ctnetlink_fill_info(struct sk_buff * skb,u32 portid,u32 seq,u32 type,struct nf_conn * ct,bool extinfo,unsigned int flags)588 ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
589 struct nf_conn *ct, bool extinfo, unsigned int flags)
590 {
591 const struct nf_conntrack_zone *zone;
592 struct nlmsghdr *nlh;
593 struct nlattr *nest_parms;
594 unsigned int event;
595
596 if (portid)
597 flags |= NLM_F_MULTI;
598 event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_NEW);
599 nlh = nfnl_msg_put(skb, portid, seq, event, flags, nf_ct_l3num(ct),
600 NFNETLINK_V0, 0);
601 if (!nlh)
602 goto nlmsg_failure;
603
604 zone = nf_ct_zone(ct);
605
606 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG);
607 if (!nest_parms)
608 goto nla_put_failure;
609 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
610 goto nla_put_failure;
611 if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
612 NF_CT_ZONE_DIR_ORIG) < 0)
613 goto nla_put_failure;
614 nla_nest_end(skb, nest_parms);
615
616 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY);
617 if (!nest_parms)
618 goto nla_put_failure;
619 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
620 goto nla_put_failure;
621 if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
622 NF_CT_ZONE_DIR_REPL) < 0)
623 goto nla_put_failure;
624 nla_nest_end(skb, nest_parms);
625
626 if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
627 NF_CT_DEFAULT_ZONE_DIR) < 0)
628 goto nla_put_failure;
629
630 if (ctnetlink_dump_info(skb, ct) < 0)
631 goto nla_put_failure;
632 if (extinfo && ctnetlink_dump_extinfo(skb, ct, type) < 0)
633 goto nla_put_failure;
634
635 nlmsg_end(skb, nlh);
636 return skb->len;
637
638 nlmsg_failure:
639 nla_put_failure:
640 nlmsg_cancel(skb, nlh);
641 return -1;
642 }
643
644 static const struct nla_policy cta_ip_nla_policy[CTA_IP_MAX + 1] = {
645 [CTA_IP_V4_SRC] = { .type = NLA_U32 },
646 [CTA_IP_V4_DST] = { .type = NLA_U32 },
647 [CTA_IP_V6_SRC] = { .len = sizeof(__be32) * 4 },
648 [CTA_IP_V6_DST] = { .len = sizeof(__be32) * 4 },
649 };
650
651 #if defined(CONFIG_NETFILTER_NETLINK_GLUE_CT) || defined(CONFIG_NF_CONNTRACK_EVENTS)
ctnetlink_proto_size(const struct nf_conn * ct)652 static size_t ctnetlink_proto_size(const struct nf_conn *ct)
653 {
654 const struct nf_conntrack_l4proto *l4proto;
655 size_t len, len4 = 0;
656
657 len = nla_policy_len(cta_ip_nla_policy, CTA_IP_MAX + 1);
658 len *= 3u; /* ORIG, REPLY, MASTER */
659
660 l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
661 len += l4proto->nlattr_size;
662 if (l4proto->nlattr_tuple_size) {
663 len4 = l4proto->nlattr_tuple_size();
664 len4 *= 3u; /* ORIG, REPLY, MASTER */
665 }
666
667 return len + len4;
668 }
669
ctnetlink_acct_size(const struct nf_conn * ct)670 static inline size_t ctnetlink_acct_size(const struct nf_conn *ct)
671 {
672 if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT))
673 return 0;
674 return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */
675 + 2 * nla_total_size_64bit(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */
676 + 2 * nla_total_size_64bit(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */
677 ;
678 }
679
ctnetlink_secctx_size(const struct nf_conn * ct)680 static inline int ctnetlink_secctx_size(const struct nf_conn *ct)
681 {
682 #ifdef CONFIG_NF_CONNTRACK_SECMARK
683 int ret;
684
685 ret = security_secid_to_secctx(ct->secmark, NULL);
686 if (ret < 0)
687 return 0;
688
689 return nla_total_size(0) /* CTA_SECCTX */
690 + nla_total_size(sizeof(char) * ret); /* CTA_SECCTX_NAME */
691 #else
692 return 0;
693 #endif
694 }
695
ctnetlink_timestamp_size(const struct nf_conn * ct)696 static inline size_t ctnetlink_timestamp_size(const struct nf_conn *ct)
697 {
698 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
699 if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP))
700 return 0;
701 return nla_total_size(0) + 2 * nla_total_size_64bit(sizeof(uint64_t));
702 #else
703 return 0;
704 #endif
705 }
706 #endif
707
708 #ifdef CONFIG_NF_CONNTRACK_EVENTS
ctnetlink_nlmsg_size(const struct nf_conn * ct)709 static size_t ctnetlink_nlmsg_size(const struct nf_conn *ct)
710 {
711 return NLMSG_ALIGN(sizeof(struct nfgenmsg))
712 + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
713 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
714 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
715 + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
716 + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
717 + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
718 + ctnetlink_acct_size(ct)
719 + ctnetlink_timestamp_size(ct)
720 + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
721 + nla_total_size(0) /* CTA_PROTOINFO */
722 + nla_total_size(0) /* CTA_HELP */
723 + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
724 + ctnetlink_secctx_size(ct)
725 #if IS_ENABLED(CONFIG_NF_NAT)
726 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
727 + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
728 #endif
729 #ifdef CONFIG_NF_CONNTRACK_MARK
730 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
731 #endif
732 #ifdef CONFIG_NF_CONNTRACK_ZONES
733 + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */
734 #endif
735 + ctnetlink_proto_size(ct)
736 + ctnetlink_label_size(ct)
737 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
738 + nla_total_size(sizeof(u64)) /* CTA_TIMESTAMP_EVENT */
739 #endif
740 ;
741 }
742
743 static int
ctnetlink_conntrack_event(unsigned int events,const struct nf_ct_event * item)744 ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item)
745 {
746 const struct nf_conntrack_zone *zone;
747 struct net *net;
748 struct nlmsghdr *nlh;
749 struct nlattr *nest_parms;
750 struct nf_conn *ct = item->ct;
751 struct sk_buff *skb;
752 unsigned int type;
753 unsigned int flags = 0, group;
754 int err;
755
756 if (events & (1 << IPCT_DESTROY)) {
757 type = IPCTNL_MSG_CT_DELETE;
758 group = NFNLGRP_CONNTRACK_DESTROY;
759 } else if (events & ((1 << IPCT_NEW) | (1 << IPCT_RELATED))) {
760 type = IPCTNL_MSG_CT_NEW;
761 flags = NLM_F_CREATE|NLM_F_EXCL;
762 group = NFNLGRP_CONNTRACK_NEW;
763 } else if (events) {
764 type = IPCTNL_MSG_CT_NEW;
765 group = NFNLGRP_CONNTRACK_UPDATE;
766 } else
767 return 0;
768
769 net = nf_ct_net(ct);
770 if (!item->report && !nfnetlink_has_listeners(net, group))
771 return 0;
772
773 skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC);
774 if (skb == NULL)
775 goto errout;
776
777 type = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, type);
778 nlh = nfnl_msg_put(skb, item->portid, 0, type, flags, nf_ct_l3num(ct),
779 NFNETLINK_V0, 0);
780 if (!nlh)
781 goto nlmsg_failure;
782
783 zone = nf_ct_zone(ct);
784
785 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG);
786 if (!nest_parms)
787 goto nla_put_failure;
788 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
789 goto nla_put_failure;
790 if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
791 NF_CT_ZONE_DIR_ORIG) < 0)
792 goto nla_put_failure;
793 nla_nest_end(skb, nest_parms);
794
795 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY);
796 if (!nest_parms)
797 goto nla_put_failure;
798 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
799 goto nla_put_failure;
800 if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
801 NF_CT_ZONE_DIR_REPL) < 0)
802 goto nla_put_failure;
803 nla_nest_end(skb, nest_parms);
804
805 if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
806 NF_CT_DEFAULT_ZONE_DIR) < 0)
807 goto nla_put_failure;
808
809 if (ctnetlink_dump_id(skb, ct) < 0)
810 goto nla_put_failure;
811
812 if (ctnetlink_dump_status(skb, ct) < 0)
813 goto nla_put_failure;
814
815 if (events & (1 << IPCT_DESTROY)) {
816 if (ctnetlink_dump_timeout(skb, ct, true) < 0)
817 goto nla_put_failure;
818
819 if (ctnetlink_dump_acct(skb, ct, type) < 0 ||
820 ctnetlink_dump_timestamp(skb, ct) < 0 ||
821 ctnetlink_dump_protoinfo(skb, ct, true) < 0)
822 goto nla_put_failure;
823 } else {
824 if (ctnetlink_dump_timeout(skb, ct, false) < 0)
825 goto nla_put_failure;
826
827 if (events & (1 << IPCT_PROTOINFO) &&
828 ctnetlink_dump_protoinfo(skb, ct, false) < 0)
829 goto nla_put_failure;
830
831 if ((events & (1 << IPCT_HELPER) || nfct_help(ct))
832 && ctnetlink_dump_helpinfo(skb, ct) < 0)
833 goto nla_put_failure;
834
835 #ifdef CONFIG_NF_CONNTRACK_SECMARK
836 if ((events & (1 << IPCT_SECMARK) || ct->secmark)
837 && ctnetlink_dump_secctx(skb, ct) < 0)
838 goto nla_put_failure;
839 #endif
840 if (events & (1 << IPCT_LABEL) &&
841 ctnetlink_dump_labels(skb, ct) < 0)
842 goto nla_put_failure;
843
844 if (events & (1 << IPCT_RELATED) &&
845 ctnetlink_dump_master(skb, ct) < 0)
846 goto nla_put_failure;
847
848 if (events & (1 << IPCT_SEQADJ) &&
849 ctnetlink_dump_ct_seq_adj(skb, ct) < 0)
850 goto nla_put_failure;
851
852 if (events & (1 << IPCT_SYNPROXY) &&
853 ctnetlink_dump_ct_synproxy(skb, ct) < 0)
854 goto nla_put_failure;
855 }
856
857 #ifdef CONFIG_NF_CONNTRACK_MARK
858 if (ctnetlink_dump_mark(skb, ct, events & (1 << IPCT_MARK)))
859 goto nla_put_failure;
860 #endif
861
862 if (ctnetlink_dump_event_timestamp(skb, ct))
863 goto nla_put_failure;
864
865 nlmsg_end(skb, nlh);
866 err = nfnetlink_send(skb, net, item->portid, group, item->report,
867 GFP_ATOMIC);
868 if (err == -ENOBUFS || err == -EAGAIN)
869 return -ENOBUFS;
870
871 return 0;
872
873 nla_put_failure:
874 nlmsg_cancel(skb, nlh);
875 nlmsg_failure:
876 kfree_skb(skb);
877 errout:
878 if (nfnetlink_set_err(net, 0, group, -ENOBUFS) > 0)
879 return -ENOBUFS;
880
881 return 0;
882 }
883 #endif /* CONFIG_NF_CONNTRACK_EVENTS */
884
ctnetlink_done(struct netlink_callback * cb)885 static int ctnetlink_done(struct netlink_callback *cb)
886 {
887 if (cb->args[1])
888 nf_ct_put((struct nf_conn *)cb->args[1]);
889 kfree(cb->data);
890 return 0;
891 }
892
893 struct ctnetlink_filter_u32 {
894 u32 val;
895 u32 mask;
896 };
897
898 struct ctnetlink_filter {
899 u8 family;
900 bool zone_filter;
901
902 u_int32_t orig_flags;
903 u_int32_t reply_flags;
904
905 struct nf_conntrack_tuple orig;
906 struct nf_conntrack_tuple reply;
907 struct nf_conntrack_zone zone;
908
909 struct ctnetlink_filter_u32 mark;
910 struct ctnetlink_filter_u32 status;
911 };
912
913 static const struct nla_policy cta_filter_nla_policy[CTA_FILTER_MAX + 1] = {
914 [CTA_FILTER_ORIG_FLAGS] = { .type = NLA_U32 },
915 [CTA_FILTER_REPLY_FLAGS] = { .type = NLA_U32 },
916 };
917
ctnetlink_parse_filter(const struct nlattr * attr,struct ctnetlink_filter * filter)918 static int ctnetlink_parse_filter(const struct nlattr *attr,
919 struct ctnetlink_filter *filter)
920 {
921 struct nlattr *tb[CTA_FILTER_MAX + 1];
922 int ret = 0;
923
924 ret = nla_parse_nested(tb, CTA_FILTER_MAX, attr, cta_filter_nla_policy,
925 NULL);
926 if (ret)
927 return ret;
928
929 if (tb[CTA_FILTER_ORIG_FLAGS]) {
930 filter->orig_flags = nla_get_u32(tb[CTA_FILTER_ORIG_FLAGS]);
931 if (filter->orig_flags & ~CTA_FILTER_F_ALL)
932 return -EOPNOTSUPP;
933 }
934
935 if (tb[CTA_FILTER_REPLY_FLAGS]) {
936 filter->reply_flags = nla_get_u32(tb[CTA_FILTER_REPLY_FLAGS]);
937 if (filter->reply_flags & ~CTA_FILTER_F_ALL)
938 return -EOPNOTSUPP;
939 }
940
941 return 0;
942 }
943
944 static int ctnetlink_parse_zone(const struct nlattr *attr,
945 struct nf_conntrack_zone *zone);
946 static int ctnetlink_parse_tuple_filter(const struct nlattr * const cda[],
947 struct nf_conntrack_tuple *tuple,
948 u32 type, u_int8_t l3num,
949 struct nf_conntrack_zone *zone,
950 u_int32_t flags);
951
ctnetlink_filter_parse_mark(struct ctnetlink_filter_u32 * mark,const struct nlattr * const cda[])952 static int ctnetlink_filter_parse_mark(struct ctnetlink_filter_u32 *mark,
953 const struct nlattr * const cda[])
954 {
955 #ifdef CONFIG_NF_CONNTRACK_MARK
956 if (cda[CTA_MARK]) {
957 mark->val = ntohl(nla_get_be32(cda[CTA_MARK]));
958
959 if (cda[CTA_MARK_MASK])
960 mark->mask = ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
961 else
962 mark->mask = 0xffffffff;
963 } else if (cda[CTA_MARK_MASK]) {
964 return -EINVAL;
965 }
966 #endif
967 return 0;
968 }
969
ctnetlink_filter_parse_status(struct ctnetlink_filter_u32 * status,const struct nlattr * const cda[])970 static int ctnetlink_filter_parse_status(struct ctnetlink_filter_u32 *status,
971 const struct nlattr * const cda[])
972 {
973 if (cda[CTA_STATUS]) {
974 status->val = ntohl(nla_get_be32(cda[CTA_STATUS]));
975 if (cda[CTA_STATUS_MASK])
976 status->mask = ntohl(nla_get_be32(cda[CTA_STATUS_MASK]));
977 else
978 status->mask = status->val;
979
980 /* status->val == 0? always true, else always false. */
981 if (status->mask == 0)
982 return -EINVAL;
983 } else if (cda[CTA_STATUS_MASK]) {
984 return -EINVAL;
985 }
986
987 /* CTA_STATUS is NLA_U32, if this fires UAPI needs to be extended */
988 BUILD_BUG_ON(__IPS_MAX_BIT >= 32);
989 return 0;
990 }
991
992 static struct ctnetlink_filter *
ctnetlink_alloc_filter(const struct nlattr * const cda[],u8 family)993 ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
994 {
995 struct ctnetlink_filter *filter;
996 int err;
997
998 #ifndef CONFIG_NF_CONNTRACK_MARK
999 if (cda[CTA_MARK] || cda[CTA_MARK_MASK])
1000 return ERR_PTR(-EOPNOTSUPP);
1001 #endif
1002
1003 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
1004 if (filter == NULL)
1005 return ERR_PTR(-ENOMEM);
1006
1007 filter->family = family;
1008
1009 err = ctnetlink_filter_parse_mark(&filter->mark, cda);
1010 if (err)
1011 goto err_filter;
1012
1013 err = ctnetlink_filter_parse_status(&filter->status, cda);
1014 if (err)
1015 goto err_filter;
1016
1017 if (cda[CTA_ZONE]) {
1018 err = ctnetlink_parse_zone(cda[CTA_ZONE], &filter->zone);
1019 if (err < 0)
1020 goto err_filter;
1021 filter->zone_filter = true;
1022 }
1023
1024 if (!cda[CTA_FILTER])
1025 return filter;
1026
1027 err = ctnetlink_parse_filter(cda[CTA_FILTER], filter);
1028 if (err < 0)
1029 goto err_filter;
1030
1031 if (filter->orig_flags) {
1032 if (!cda[CTA_TUPLE_ORIG]) {
1033 err = -EINVAL;
1034 goto err_filter;
1035 }
1036
1037 err = ctnetlink_parse_tuple_filter(cda, &filter->orig,
1038 CTA_TUPLE_ORIG,
1039 filter->family,
1040 &filter->zone,
1041 filter->orig_flags);
1042 if (err < 0)
1043 goto err_filter;
1044 }
1045
1046 if (filter->reply_flags) {
1047 if (!cda[CTA_TUPLE_REPLY]) {
1048 err = -EINVAL;
1049 goto err_filter;
1050 }
1051
1052 err = ctnetlink_parse_tuple_filter(cda, &filter->reply,
1053 CTA_TUPLE_REPLY,
1054 filter->family,
1055 &filter->zone,
1056 filter->reply_flags);
1057 if (err < 0)
1058 goto err_filter;
1059 }
1060
1061 return filter;
1062
1063 err_filter:
1064 kfree(filter);
1065
1066 return ERR_PTR(err);
1067 }
1068
ctnetlink_needs_filter(u8 family,const struct nlattr * const * cda)1069 static bool ctnetlink_needs_filter(u8 family, const struct nlattr * const *cda)
1070 {
1071 return family || cda[CTA_MARK] || cda[CTA_FILTER] || cda[CTA_STATUS] || cda[CTA_ZONE];
1072 }
1073
ctnetlink_start(struct netlink_callback * cb)1074 static int ctnetlink_start(struct netlink_callback *cb)
1075 {
1076 const struct nlattr * const *cda = cb->data;
1077 struct ctnetlink_filter *filter = NULL;
1078 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
1079 u8 family = nfmsg->nfgen_family;
1080
1081 if (ctnetlink_needs_filter(family, cda)) {
1082 filter = ctnetlink_alloc_filter(cda, family);
1083 if (IS_ERR(filter))
1084 return PTR_ERR(filter);
1085 }
1086
1087 cb->data = filter;
1088 return 0;
1089 }
1090
ctnetlink_filter_match_tuple(struct nf_conntrack_tuple * filter_tuple,struct nf_conntrack_tuple * ct_tuple,u_int32_t flags,int family)1091 static int ctnetlink_filter_match_tuple(struct nf_conntrack_tuple *filter_tuple,
1092 struct nf_conntrack_tuple *ct_tuple,
1093 u_int32_t flags, int family)
1094 {
1095 switch (family) {
1096 case NFPROTO_IPV4:
1097 if ((flags & CTA_FILTER_FLAG(CTA_IP_SRC)) &&
1098 filter_tuple->src.u3.ip != ct_tuple->src.u3.ip)
1099 return 0;
1100
1101 if ((flags & CTA_FILTER_FLAG(CTA_IP_DST)) &&
1102 filter_tuple->dst.u3.ip != ct_tuple->dst.u3.ip)
1103 return 0;
1104 break;
1105 case NFPROTO_IPV6:
1106 if ((flags & CTA_FILTER_FLAG(CTA_IP_SRC)) &&
1107 !ipv6_addr_cmp(&filter_tuple->src.u3.in6,
1108 &ct_tuple->src.u3.in6))
1109 return 0;
1110
1111 if ((flags & CTA_FILTER_FLAG(CTA_IP_DST)) &&
1112 !ipv6_addr_cmp(&filter_tuple->dst.u3.in6,
1113 &ct_tuple->dst.u3.in6))
1114 return 0;
1115 break;
1116 }
1117
1118 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_NUM)) &&
1119 filter_tuple->dst.protonum != ct_tuple->dst.protonum)
1120 return 0;
1121
1122 switch (ct_tuple->dst.protonum) {
1123 case IPPROTO_TCP:
1124 case IPPROTO_UDP:
1125 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_SRC_PORT)) &&
1126 filter_tuple->src.u.tcp.port != ct_tuple->src.u.tcp.port)
1127 return 0;
1128
1129 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_DST_PORT)) &&
1130 filter_tuple->dst.u.tcp.port != ct_tuple->dst.u.tcp.port)
1131 return 0;
1132 break;
1133 case IPPROTO_ICMP:
1134 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_TYPE)) &&
1135 filter_tuple->dst.u.icmp.type != ct_tuple->dst.u.icmp.type)
1136 return 0;
1137 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_CODE)) &&
1138 filter_tuple->dst.u.icmp.code != ct_tuple->dst.u.icmp.code)
1139 return 0;
1140 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_ID)) &&
1141 filter_tuple->src.u.icmp.id != ct_tuple->src.u.icmp.id)
1142 return 0;
1143 break;
1144 case IPPROTO_ICMPV6:
1145 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_TYPE)) &&
1146 filter_tuple->dst.u.icmp.type != ct_tuple->dst.u.icmp.type)
1147 return 0;
1148 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_CODE)) &&
1149 filter_tuple->dst.u.icmp.code != ct_tuple->dst.u.icmp.code)
1150 return 0;
1151 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_ID)) &&
1152 filter_tuple->src.u.icmp.id != ct_tuple->src.u.icmp.id)
1153 return 0;
1154 break;
1155 }
1156
1157 return 1;
1158 }
1159
ctnetlink_filter_match(struct nf_conn * ct,void * data)1160 static int ctnetlink_filter_match(struct nf_conn *ct, void *data)
1161 {
1162 struct ctnetlink_filter *filter = data;
1163 struct nf_conntrack_tuple *tuple;
1164 u32 status;
1165
1166 if (filter == NULL)
1167 goto out;
1168
1169 /* Match entries of a given L3 protocol number.
1170 * If it is not specified, ie. l3proto == 0,
1171 * then match everything.
1172 */
1173 if (filter->family && nf_ct_l3num(ct) != filter->family)
1174 goto ignore_entry;
1175
1176 if (filter->zone_filter &&
1177 !nf_ct_zone_equal_any(ct, &filter->zone))
1178 goto ignore_entry;
1179
1180 if (filter->orig_flags) {
1181 tuple = nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL);
1182 if (!ctnetlink_filter_match_tuple(&filter->orig, tuple,
1183 filter->orig_flags,
1184 filter->family))
1185 goto ignore_entry;
1186 }
1187
1188 if (filter->reply_flags) {
1189 tuple = nf_ct_tuple(ct, IP_CT_DIR_REPLY);
1190 if (!ctnetlink_filter_match_tuple(&filter->reply, tuple,
1191 filter->reply_flags,
1192 filter->family))
1193 goto ignore_entry;
1194 }
1195
1196 #ifdef CONFIG_NF_CONNTRACK_MARK
1197 if ((READ_ONCE(ct->mark) & filter->mark.mask) != filter->mark.val)
1198 goto ignore_entry;
1199 #endif
1200 status = (u32)READ_ONCE(ct->status);
1201 if ((status & filter->status.mask) != filter->status.val)
1202 goto ignore_entry;
1203
1204 out:
1205 return 1;
1206
1207 ignore_entry:
1208 return 0;
1209 }
1210
1211 static int
ctnetlink_dump_table(struct sk_buff * skb,struct netlink_callback * cb)1212 ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
1213 {
1214 unsigned int flags = cb->data ? NLM_F_DUMP_FILTERED : 0;
1215 struct net *net = sock_net(skb->sk);
1216 struct nf_conn *ct, *last;
1217 struct nf_conntrack_tuple_hash *h;
1218 struct hlist_nulls_node *n;
1219 struct nf_conn *nf_ct_evict[8];
1220 int res, i;
1221 spinlock_t *lockp;
1222
1223 last = (struct nf_conn *)cb->args[1];
1224 i = 0;
1225
1226 local_bh_disable();
1227 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
1228 restart:
1229 while (i) {
1230 i--;
1231 if (nf_ct_should_gc(nf_ct_evict[i]))
1232 nf_ct_kill(nf_ct_evict[i]);
1233 nf_ct_put(nf_ct_evict[i]);
1234 }
1235
1236 lockp = &nf_conntrack_locks[cb->args[0] % CONNTRACK_LOCKS];
1237 nf_conntrack_lock(lockp);
1238 if (cb->args[0] >= nf_conntrack_htable_size) {
1239 spin_unlock(lockp);
1240 goto out;
1241 }
1242 hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[cb->args[0]],
1243 hnnode) {
1244 ct = nf_ct_tuplehash_to_ctrack(h);
1245 if (nf_ct_is_expired(ct)) {
1246 /* need to defer nf_ct_kill() until lock is released */
1247 if (i < ARRAY_SIZE(nf_ct_evict) &&
1248 refcount_inc_not_zero(&ct->ct_general.use))
1249 nf_ct_evict[i++] = ct;
1250 continue;
1251 }
1252
1253 if (!net_eq(net, nf_ct_net(ct)))
1254 continue;
1255
1256 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
1257 continue;
1258
1259 if (cb->args[1]) {
1260 if (ct != last)
1261 continue;
1262 cb->args[1] = 0;
1263 }
1264 if (!ctnetlink_filter_match(ct, cb->data))
1265 continue;
1266
1267 res =
1268 ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
1269 cb->nlh->nlmsg_seq,
1270 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
1271 ct, true, flags);
1272 if (res < 0) {
1273 nf_conntrack_get(&ct->ct_general);
1274 cb->args[1] = (unsigned long)ct;
1275 spin_unlock(lockp);
1276 goto out;
1277 }
1278 }
1279 spin_unlock(lockp);
1280 if (cb->args[1]) {
1281 cb->args[1] = 0;
1282 goto restart;
1283 }
1284 }
1285 out:
1286 local_bh_enable();
1287 if (last) {
1288 /* nf ct hash resize happened, now clear the leftover. */
1289 if ((struct nf_conn *)cb->args[1] == last)
1290 cb->args[1] = 0;
1291
1292 nf_ct_put(last);
1293 }
1294
1295 while (i) {
1296 i--;
1297 if (nf_ct_should_gc(nf_ct_evict[i]))
1298 nf_ct_kill(nf_ct_evict[i]);
1299 nf_ct_put(nf_ct_evict[i]);
1300 }
1301
1302 return skb->len;
1303 }
1304
ipv4_nlattr_to_tuple(struct nlattr * tb[],struct nf_conntrack_tuple * t,u_int32_t flags)1305 static int ipv4_nlattr_to_tuple(struct nlattr *tb[],
1306 struct nf_conntrack_tuple *t,
1307 u_int32_t flags)
1308 {
1309 if (flags & CTA_FILTER_FLAG(CTA_IP_SRC)) {
1310 if (!tb[CTA_IP_V4_SRC])
1311 return -EINVAL;
1312
1313 t->src.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_SRC]);
1314 }
1315
1316 if (flags & CTA_FILTER_FLAG(CTA_IP_DST)) {
1317 if (!tb[CTA_IP_V4_DST])
1318 return -EINVAL;
1319
1320 t->dst.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_DST]);
1321 }
1322
1323 return 0;
1324 }
1325
ipv6_nlattr_to_tuple(struct nlattr * tb[],struct nf_conntrack_tuple * t,u_int32_t flags)1326 static int ipv6_nlattr_to_tuple(struct nlattr *tb[],
1327 struct nf_conntrack_tuple *t,
1328 u_int32_t flags)
1329 {
1330 if (flags & CTA_FILTER_FLAG(CTA_IP_SRC)) {
1331 if (!tb[CTA_IP_V6_SRC])
1332 return -EINVAL;
1333
1334 t->src.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_SRC]);
1335 }
1336
1337 if (flags & CTA_FILTER_FLAG(CTA_IP_DST)) {
1338 if (!tb[CTA_IP_V6_DST])
1339 return -EINVAL;
1340
1341 t->dst.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_DST]);
1342 }
1343
1344 return 0;
1345 }
1346
ctnetlink_parse_tuple_ip(struct nlattr * attr,struct nf_conntrack_tuple * tuple,u_int32_t flags)1347 static int ctnetlink_parse_tuple_ip(struct nlattr *attr,
1348 struct nf_conntrack_tuple *tuple,
1349 u_int32_t flags)
1350 {
1351 struct nlattr *tb[CTA_IP_MAX+1];
1352 int ret = 0;
1353
1354 ret = nla_parse_nested_deprecated(tb, CTA_IP_MAX, attr,
1355 cta_ip_nla_policy, NULL);
1356 if (ret < 0)
1357 return ret;
1358
1359 switch (tuple->src.l3num) {
1360 case NFPROTO_IPV4:
1361 ret = ipv4_nlattr_to_tuple(tb, tuple, flags);
1362 break;
1363 case NFPROTO_IPV6:
1364 ret = ipv6_nlattr_to_tuple(tb, tuple, flags);
1365 break;
1366 }
1367
1368 return ret;
1369 }
1370
1371 static const struct nla_policy proto_nla_policy[CTA_PROTO_MAX+1] = {
1372 [CTA_PROTO_NUM] = { .type = NLA_U8 },
1373 };
1374
ctnetlink_parse_tuple_proto(struct nlattr * attr,struct nf_conntrack_tuple * tuple,u_int32_t flags)1375 static int ctnetlink_parse_tuple_proto(struct nlattr *attr,
1376 struct nf_conntrack_tuple *tuple,
1377 u_int32_t flags)
1378 {
1379 const struct nf_conntrack_l4proto *l4proto;
1380 struct nlattr *tb[CTA_PROTO_MAX+1];
1381 int ret = 0;
1382
1383 ret = nla_parse_nested_deprecated(tb, CTA_PROTO_MAX, attr,
1384 proto_nla_policy, NULL);
1385 if (ret < 0)
1386 return ret;
1387
1388 if (!(flags & CTA_FILTER_FLAG(CTA_PROTO_NUM)))
1389 return 0;
1390
1391 if (!tb[CTA_PROTO_NUM])
1392 return -EINVAL;
1393
1394 tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]);
1395
1396 rcu_read_lock();
1397 l4proto = nf_ct_l4proto_find(tuple->dst.protonum);
1398
1399 if (likely(l4proto->nlattr_to_tuple)) {
1400 ret = nla_validate_nested_deprecated(attr, CTA_PROTO_MAX,
1401 l4proto->nla_policy,
1402 NULL);
1403 if (ret == 0)
1404 ret = l4proto->nlattr_to_tuple(tb, tuple, flags);
1405 }
1406
1407 rcu_read_unlock();
1408
1409 return ret;
1410 }
1411
1412 static int
ctnetlink_parse_zone(const struct nlattr * attr,struct nf_conntrack_zone * zone)1413 ctnetlink_parse_zone(const struct nlattr *attr,
1414 struct nf_conntrack_zone *zone)
1415 {
1416 nf_ct_zone_init(zone, NF_CT_DEFAULT_ZONE_ID,
1417 NF_CT_DEFAULT_ZONE_DIR, 0);
1418 #ifdef CONFIG_NF_CONNTRACK_ZONES
1419 if (attr)
1420 zone->id = ntohs(nla_get_be16(attr));
1421 #else
1422 if (attr)
1423 return -EOPNOTSUPP;
1424 #endif
1425 return 0;
1426 }
1427
1428 static int
ctnetlink_parse_tuple_zone(struct nlattr * attr,enum ctattr_type type,struct nf_conntrack_zone * zone)1429 ctnetlink_parse_tuple_zone(struct nlattr *attr, enum ctattr_type type,
1430 struct nf_conntrack_zone *zone)
1431 {
1432 int ret;
1433
1434 if (zone->id != NF_CT_DEFAULT_ZONE_ID)
1435 return -EINVAL;
1436
1437 ret = ctnetlink_parse_zone(attr, zone);
1438 if (ret < 0)
1439 return ret;
1440
1441 if (type == CTA_TUPLE_REPLY)
1442 zone->dir = NF_CT_ZONE_DIR_REPL;
1443 else
1444 zone->dir = NF_CT_ZONE_DIR_ORIG;
1445
1446 return 0;
1447 }
1448
1449 static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
1450 [CTA_TUPLE_IP] = { .type = NLA_NESTED },
1451 [CTA_TUPLE_PROTO] = { .type = NLA_NESTED },
1452 [CTA_TUPLE_ZONE] = { .type = NLA_U16 },
1453 };
1454
1455 #define CTA_FILTER_F_ALL_CTA_PROTO \
1456 (CTA_FILTER_F_CTA_PROTO_SRC_PORT | \
1457 CTA_FILTER_F_CTA_PROTO_DST_PORT | \
1458 CTA_FILTER_F_CTA_PROTO_ICMP_TYPE | \
1459 CTA_FILTER_F_CTA_PROTO_ICMP_CODE | \
1460 CTA_FILTER_F_CTA_PROTO_ICMP_ID | \
1461 CTA_FILTER_F_CTA_PROTO_ICMPV6_TYPE | \
1462 CTA_FILTER_F_CTA_PROTO_ICMPV6_CODE | \
1463 CTA_FILTER_F_CTA_PROTO_ICMPV6_ID)
1464
1465 static int
ctnetlink_parse_tuple_filter(const struct nlattr * const cda[],struct nf_conntrack_tuple * tuple,u32 type,u_int8_t l3num,struct nf_conntrack_zone * zone,u_int32_t flags)1466 ctnetlink_parse_tuple_filter(const struct nlattr * const cda[],
1467 struct nf_conntrack_tuple *tuple, u32 type,
1468 u_int8_t l3num, struct nf_conntrack_zone *zone,
1469 u_int32_t flags)
1470 {
1471 struct nlattr *tb[CTA_TUPLE_MAX+1];
1472 int err;
1473
1474 memset(tuple, 0, sizeof(*tuple));
1475
1476 err = nla_parse_nested_deprecated(tb, CTA_TUPLE_MAX, cda[type],
1477 tuple_nla_policy, NULL);
1478 if (err < 0)
1479 return err;
1480
1481 if (l3num != NFPROTO_IPV4 && l3num != NFPROTO_IPV6)
1482 return -EOPNOTSUPP;
1483 tuple->src.l3num = l3num;
1484
1485 if (flags & CTA_FILTER_FLAG(CTA_IP_DST) ||
1486 flags & CTA_FILTER_FLAG(CTA_IP_SRC)) {
1487 if (!tb[CTA_TUPLE_IP])
1488 return -EINVAL;
1489
1490 err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple, flags);
1491 if (err < 0)
1492 return err;
1493 }
1494
1495 if (flags & CTA_FILTER_FLAG(CTA_PROTO_NUM)) {
1496 if (!tb[CTA_TUPLE_PROTO])
1497 return -EINVAL;
1498
1499 err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO], tuple, flags);
1500 if (err < 0)
1501 return err;
1502 } else if (flags & CTA_FILTER_FLAG(ALL_CTA_PROTO)) {
1503 /* Can't manage proto flags without a protonum */
1504 return -EINVAL;
1505 }
1506
1507 if ((flags & CTA_FILTER_FLAG(CTA_TUPLE_ZONE)) && tb[CTA_TUPLE_ZONE]) {
1508 if (!zone)
1509 return -EINVAL;
1510
1511 err = ctnetlink_parse_tuple_zone(tb[CTA_TUPLE_ZONE],
1512 type, zone);
1513 if (err < 0)
1514 return err;
1515 }
1516
1517 /* orig and expect tuples get DIR_ORIGINAL */
1518 if (type == CTA_TUPLE_REPLY)
1519 tuple->dst.dir = IP_CT_DIR_REPLY;
1520 else
1521 tuple->dst.dir = IP_CT_DIR_ORIGINAL;
1522
1523 return 0;
1524 }
1525
1526 static int
ctnetlink_parse_tuple(const struct nlattr * const cda[],struct nf_conntrack_tuple * tuple,u32 type,u_int8_t l3num,struct nf_conntrack_zone * zone)1527 ctnetlink_parse_tuple(const struct nlattr * const cda[],
1528 struct nf_conntrack_tuple *tuple, u32 type,
1529 u_int8_t l3num, struct nf_conntrack_zone *zone)
1530 {
1531 return ctnetlink_parse_tuple_filter(cda, tuple, type, l3num, zone,
1532 CTA_FILTER_FLAG(ALL));
1533 }
1534
1535 static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
1536 [CTA_HELP_NAME] = { .type = NLA_NUL_STRING,
1537 .len = NF_CT_HELPER_NAME_LEN - 1 },
1538 };
1539
ctnetlink_parse_help(const struct nlattr * attr,char ** helper_name,struct nlattr ** helpinfo)1540 static int ctnetlink_parse_help(const struct nlattr *attr, char **helper_name,
1541 struct nlattr **helpinfo)
1542 {
1543 int err;
1544 struct nlattr *tb[CTA_HELP_MAX+1];
1545
1546 err = nla_parse_nested_deprecated(tb, CTA_HELP_MAX, attr,
1547 help_nla_policy, NULL);
1548 if (err < 0)
1549 return err;
1550
1551 if (!tb[CTA_HELP_NAME])
1552 return -EINVAL;
1553
1554 *helper_name = nla_data(tb[CTA_HELP_NAME]);
1555
1556 if (tb[CTA_HELP_INFO])
1557 *helpinfo = tb[CTA_HELP_INFO];
1558
1559 return 0;
1560 }
1561
1562 static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
1563 [CTA_TUPLE_ORIG] = { .type = NLA_NESTED },
1564 [CTA_TUPLE_REPLY] = { .type = NLA_NESTED },
1565 [CTA_STATUS] = { .type = NLA_U32 },
1566 [CTA_PROTOINFO] = { .type = NLA_NESTED },
1567 [CTA_HELP] = { .type = NLA_NESTED },
1568 [CTA_NAT_SRC] = { .type = NLA_NESTED },
1569 [CTA_TIMEOUT] = { .type = NLA_U32 },
1570 [CTA_MARK] = { .type = NLA_U32 },
1571 [CTA_ID] = { .type = NLA_U32 },
1572 [CTA_NAT_DST] = { .type = NLA_NESTED },
1573 [CTA_TUPLE_MASTER] = { .type = NLA_NESTED },
1574 [CTA_NAT_SEQ_ADJ_ORIG] = { .type = NLA_NESTED },
1575 [CTA_NAT_SEQ_ADJ_REPLY] = { .type = NLA_NESTED },
1576 [CTA_ZONE] = { .type = NLA_U16 },
1577 [CTA_MARK_MASK] = { .type = NLA_U32 },
1578 [CTA_LABELS] = { .type = NLA_BINARY,
1579 .len = NF_CT_LABELS_MAX_SIZE },
1580 [CTA_LABELS_MASK] = { .type = NLA_BINARY,
1581 .len = NF_CT_LABELS_MAX_SIZE },
1582 [CTA_FILTER] = { .type = NLA_NESTED },
1583 [CTA_STATUS_MASK] = { .type = NLA_U32 },
1584 [CTA_TIMESTAMP_EVENT] = { .type = NLA_REJECT },
1585 };
1586
ctnetlink_flush_iterate(struct nf_conn * ct,void * data)1587 static int ctnetlink_flush_iterate(struct nf_conn *ct, void *data)
1588 {
1589 return ctnetlink_filter_match(ct, data);
1590 }
1591
ctnetlink_flush_conntrack(struct net * net,const struct nlattr * const cda[],u32 portid,int report,u8 family)1592 static int ctnetlink_flush_conntrack(struct net *net,
1593 const struct nlattr * const cda[],
1594 u32 portid, int report, u8 family)
1595 {
1596 struct ctnetlink_filter *filter = NULL;
1597 struct nf_ct_iter_data iter = {
1598 .net = net,
1599 .portid = portid,
1600 .report = report,
1601 };
1602
1603 if (ctnetlink_needs_filter(family, cda)) {
1604 filter = ctnetlink_alloc_filter(cda, family);
1605 if (IS_ERR(filter))
1606 return PTR_ERR(filter);
1607
1608 iter.data = filter;
1609 }
1610
1611 nf_ct_iterate_cleanup_net(ctnetlink_flush_iterate, &iter);
1612 kfree(filter);
1613
1614 return 0;
1615 }
1616
ctnetlink_del_conntrack(struct sk_buff * skb,const struct nfnl_info * info,const struct nlattr * const cda[])1617 static int ctnetlink_del_conntrack(struct sk_buff *skb,
1618 const struct nfnl_info *info,
1619 const struct nlattr * const cda[])
1620 {
1621 u8 family = info->nfmsg->nfgen_family;
1622 struct nf_conntrack_tuple_hash *h;
1623 struct nf_conntrack_tuple tuple;
1624 struct nf_conntrack_zone zone;
1625 struct nf_conn *ct;
1626 int err;
1627
1628 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1629 if (err < 0)
1630 return err;
1631
1632 if (cda[CTA_TUPLE_ORIG] && !cda[CTA_FILTER])
1633 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
1634 family, &zone);
1635 else if (cda[CTA_TUPLE_REPLY] && !cda[CTA_FILTER])
1636 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
1637 family, &zone);
1638 else {
1639 u8 u3 = info->nfmsg->version || cda[CTA_FILTER] ? family : AF_UNSPEC;
1640
1641 return ctnetlink_flush_conntrack(info->net, cda,
1642 NETLINK_CB(skb).portid,
1643 nlmsg_report(info->nlh), u3);
1644 }
1645
1646 if (err < 0)
1647 return err;
1648
1649 h = nf_conntrack_find_get(info->net, &zone, &tuple);
1650 if (!h)
1651 return -ENOENT;
1652
1653 ct = nf_ct_tuplehash_to_ctrack(h);
1654
1655 if (cda[CTA_ID]) {
1656 __be32 id = nla_get_be32(cda[CTA_ID]);
1657
1658 if (id != (__force __be32)nf_ct_get_id(ct)) {
1659 nf_ct_put(ct);
1660 return -ENOENT;
1661 }
1662 }
1663
1664 nf_ct_delete(ct, NETLINK_CB(skb).portid, nlmsg_report(info->nlh));
1665 nf_ct_put(ct);
1666
1667 return 0;
1668 }
1669
ctnetlink_get_conntrack(struct sk_buff * skb,const struct nfnl_info * info,const struct nlattr * const cda[])1670 static int ctnetlink_get_conntrack(struct sk_buff *skb,
1671 const struct nfnl_info *info,
1672 const struct nlattr * const cda[])
1673 {
1674 u_int8_t u3 = info->nfmsg->nfgen_family;
1675 struct nf_conntrack_tuple_hash *h;
1676 struct nf_conntrack_tuple tuple;
1677 struct nf_conntrack_zone zone;
1678 struct sk_buff *skb2;
1679 struct nf_conn *ct;
1680 int err;
1681
1682 if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
1683 struct netlink_dump_control c = {
1684 .start = ctnetlink_start,
1685 .dump = ctnetlink_dump_table,
1686 .done = ctnetlink_done,
1687 .data = (void *)cda,
1688 };
1689
1690 return netlink_dump_start(info->sk, skb, info->nlh, &c);
1691 }
1692
1693 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1694 if (err < 0)
1695 return err;
1696
1697 if (cda[CTA_TUPLE_ORIG])
1698 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
1699 u3, &zone);
1700 else if (cda[CTA_TUPLE_REPLY])
1701 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
1702 u3, &zone);
1703 else
1704 return -EINVAL;
1705
1706 if (err < 0)
1707 return err;
1708
1709 h = nf_conntrack_find_get(info->net, &zone, &tuple);
1710 if (!h)
1711 return -ENOENT;
1712
1713 ct = nf_ct_tuplehash_to_ctrack(h);
1714
1715 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1716 if (!skb2) {
1717 nf_ct_put(ct);
1718 return -ENOMEM;
1719 }
1720
1721 err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).portid,
1722 info->nlh->nlmsg_seq,
1723 NFNL_MSG_TYPE(info->nlh->nlmsg_type), ct,
1724 true, 0);
1725 nf_ct_put(ct);
1726 if (err <= 0) {
1727 kfree_skb(skb2);
1728 return -ENOMEM;
1729 }
1730
1731 return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
1732 }
1733
ctnetlink_done_list(struct netlink_callback * cb)1734 static int ctnetlink_done_list(struct netlink_callback *cb)
1735 {
1736 struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx;
1737
1738 if (ctx->last)
1739 nf_ct_put(ctx->last);
1740
1741 return 0;
1742 }
1743
1744 #ifdef CONFIG_NF_CONNTRACK_EVENTS
ctnetlink_dump_one_entry(struct sk_buff * skb,struct netlink_callback * cb,struct nf_conn * ct,bool dying)1745 static int ctnetlink_dump_one_entry(struct sk_buff *skb,
1746 struct netlink_callback *cb,
1747 struct nf_conn *ct,
1748 bool dying)
1749 {
1750 struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx;
1751 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
1752 u8 l3proto = nfmsg->nfgen_family;
1753 int res;
1754
1755 if (l3proto && nf_ct_l3num(ct) != l3proto)
1756 return 0;
1757
1758 if (ctx->last) {
1759 if (ct != ctx->last)
1760 return 0;
1761
1762 ctx->last = NULL;
1763 }
1764
1765 /* We can't dump extension info for the unconfirmed
1766 * list because unconfirmed conntracks can have
1767 * ct->ext reallocated (and thus freed).
1768 *
1769 * In the dying list case ct->ext can't be free'd
1770 * until after we drop pcpu->lock.
1771 */
1772 res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
1773 cb->nlh->nlmsg_seq,
1774 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
1775 ct, dying, 0);
1776 if (res < 0) {
1777 if (!refcount_inc_not_zero(&ct->ct_general.use))
1778 return 0;
1779
1780 ctx->last = ct;
1781 }
1782
1783 return res;
1784 }
1785 #endif
1786
1787 static int
ctnetlink_dump_unconfirmed(struct sk_buff * skb,struct netlink_callback * cb)1788 ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb)
1789 {
1790 return 0;
1791 }
1792
1793 static int
ctnetlink_dump_dying(struct sk_buff * skb,struct netlink_callback * cb)1794 ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb)
1795 {
1796 struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx;
1797 struct nf_conn *last = ctx->last;
1798 #ifdef CONFIG_NF_CONNTRACK_EVENTS
1799 const struct net *net = sock_net(skb->sk);
1800 struct nf_conntrack_net_ecache *ecache_net;
1801 struct nf_conntrack_tuple_hash *h;
1802 struct hlist_nulls_node *n;
1803 #endif
1804
1805 if (ctx->done)
1806 return 0;
1807
1808 ctx->last = NULL;
1809
1810 #ifdef CONFIG_NF_CONNTRACK_EVENTS
1811 ecache_net = nf_conn_pernet_ecache(net);
1812 spin_lock_bh(&ecache_net->dying_lock);
1813
1814 hlist_nulls_for_each_entry(h, n, &ecache_net->dying_list, hnnode) {
1815 struct nf_conn *ct;
1816 int res;
1817
1818 ct = nf_ct_tuplehash_to_ctrack(h);
1819 if (last && last != ct)
1820 continue;
1821
1822 res = ctnetlink_dump_one_entry(skb, cb, ct, true);
1823 if (res < 0) {
1824 spin_unlock_bh(&ecache_net->dying_lock);
1825 nf_ct_put(last);
1826 return skb->len;
1827 }
1828
1829 nf_ct_put(last);
1830 last = NULL;
1831 }
1832
1833 spin_unlock_bh(&ecache_net->dying_lock);
1834 #endif
1835 ctx->done = true;
1836 nf_ct_put(last);
1837
1838 return skb->len;
1839 }
1840
ctnetlink_get_ct_dying(struct sk_buff * skb,const struct nfnl_info * info,const struct nlattr * const cda[])1841 static int ctnetlink_get_ct_dying(struct sk_buff *skb,
1842 const struct nfnl_info *info,
1843 const struct nlattr * const cda[])
1844 {
1845 if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
1846 struct netlink_dump_control c = {
1847 .dump = ctnetlink_dump_dying,
1848 .done = ctnetlink_done_list,
1849 };
1850 return netlink_dump_start(info->sk, skb, info->nlh, &c);
1851 }
1852
1853 return -EOPNOTSUPP;
1854 }
1855
ctnetlink_get_ct_unconfirmed(struct sk_buff * skb,const struct nfnl_info * info,const struct nlattr * const cda[])1856 static int ctnetlink_get_ct_unconfirmed(struct sk_buff *skb,
1857 const struct nfnl_info *info,
1858 const struct nlattr * const cda[])
1859 {
1860 if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
1861 struct netlink_dump_control c = {
1862 .dump = ctnetlink_dump_unconfirmed,
1863 .done = ctnetlink_done_list,
1864 };
1865 return netlink_dump_start(info->sk, skb, info->nlh, &c);
1866 }
1867
1868 return -EOPNOTSUPP;
1869 }
1870
1871 #if IS_ENABLED(CONFIG_NF_NAT)
1872 static int
ctnetlink_parse_nat_setup(struct nf_conn * ct,enum nf_nat_manip_type manip,const struct nlattr * attr)1873 ctnetlink_parse_nat_setup(struct nf_conn *ct,
1874 enum nf_nat_manip_type manip,
1875 const struct nlattr *attr)
1876 __must_hold(RCU)
1877 {
1878 const struct nf_nat_hook *nat_hook;
1879 int err;
1880
1881 nat_hook = rcu_dereference(nf_nat_hook);
1882 if (!nat_hook) {
1883 #ifdef CONFIG_MODULES
1884 rcu_read_unlock();
1885 nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
1886 if (request_module("nf-nat") < 0) {
1887 nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1888 rcu_read_lock();
1889 return -EOPNOTSUPP;
1890 }
1891 nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1892 rcu_read_lock();
1893 nat_hook = rcu_dereference(nf_nat_hook);
1894 if (nat_hook)
1895 return -EAGAIN;
1896 #endif
1897 return -EOPNOTSUPP;
1898 }
1899
1900 err = nat_hook->parse_nat_setup(ct, manip, attr);
1901 if (err == -EAGAIN) {
1902 #ifdef CONFIG_MODULES
1903 rcu_read_unlock();
1904 nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
1905 if (request_module("nf-nat-%u", nf_ct_l3num(ct)) < 0) {
1906 nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1907 rcu_read_lock();
1908 return -EOPNOTSUPP;
1909 }
1910 nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1911 rcu_read_lock();
1912 #else
1913 err = -EOPNOTSUPP;
1914 #endif
1915 }
1916 return err;
1917 }
1918 #endif
1919
1920 static int
ctnetlink_change_status(struct nf_conn * ct,const struct nlattr * const cda[])1921 ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[])
1922 {
1923 return nf_ct_change_status_common(ct, ntohl(nla_get_be32(cda[CTA_STATUS])));
1924 }
1925
1926 static int
ctnetlink_setup_nat(struct nf_conn * ct,const struct nlattr * const cda[])1927 ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[])
1928 {
1929 #if IS_ENABLED(CONFIG_NF_NAT)
1930 int ret;
1931
1932 if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
1933 return 0;
1934
1935 ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST,
1936 cda[CTA_NAT_DST]);
1937 if (ret < 0)
1938 return ret;
1939
1940 return ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_SRC,
1941 cda[CTA_NAT_SRC]);
1942 #else
1943 if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
1944 return 0;
1945 return -EOPNOTSUPP;
1946 #endif
1947 }
1948
ctnetlink_change_helper(struct nf_conn * ct,const struct nlattr * const cda[])1949 static int ctnetlink_change_helper(struct nf_conn *ct,
1950 const struct nlattr * const cda[])
1951 {
1952 struct nf_conntrack_helper *helper;
1953 struct nf_conn_help *help = nfct_help(ct);
1954 char *helpname = NULL;
1955 struct nlattr *helpinfo = NULL;
1956 int err;
1957
1958 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
1959 if (err < 0)
1960 return err;
1961
1962 /* don't change helper of sibling connections */
1963 if (ct->master) {
1964 /* If we try to change the helper to the same thing twice,
1965 * treat the second attempt as a no-op instead of returning
1966 * an error.
1967 */
1968 err = -EBUSY;
1969 if (help) {
1970 rcu_read_lock();
1971 helper = rcu_dereference(help->helper);
1972 if (helper && !strcmp(helper->name, helpname))
1973 err = 0;
1974 rcu_read_unlock();
1975 }
1976
1977 return err;
1978 }
1979
1980 if (!strcmp(helpname, "")) {
1981 if (help && help->helper) {
1982 /* we had a helper before ... */
1983 nf_ct_remove_expectations(ct);
1984 RCU_INIT_POINTER(help->helper, NULL);
1985 }
1986
1987 return 0;
1988 }
1989
1990 rcu_read_lock();
1991 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1992 nf_ct_protonum(ct));
1993 if (helper == NULL) {
1994 rcu_read_unlock();
1995 return -EOPNOTSUPP;
1996 }
1997
1998 if (help) {
1999 if (rcu_access_pointer(help->helper) == helper) {
2000 /* update private helper data if allowed. */
2001 if (helper->from_nlattr)
2002 helper->from_nlattr(helpinfo, ct);
2003 err = 0;
2004 } else
2005 err = -EBUSY;
2006 } else {
2007 /* we cannot set a helper for an existing conntrack */
2008 err = -EOPNOTSUPP;
2009 }
2010
2011 rcu_read_unlock();
2012 return err;
2013 }
2014
ctnetlink_change_timeout(struct nf_conn * ct,const struct nlattr * const cda[])2015 static int ctnetlink_change_timeout(struct nf_conn *ct,
2016 const struct nlattr * const cda[])
2017 {
2018 return __nf_ct_change_timeout(ct, (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ);
2019 }
2020
2021 #if defined(CONFIG_NF_CONNTRACK_MARK)
ctnetlink_change_mark(struct nf_conn * ct,const struct nlattr * const cda[])2022 static void ctnetlink_change_mark(struct nf_conn *ct,
2023 const struct nlattr * const cda[])
2024 {
2025 u32 mark, newmark, mask = 0;
2026
2027 if (cda[CTA_MARK_MASK])
2028 mask = ~ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
2029
2030 mark = ntohl(nla_get_be32(cda[CTA_MARK]));
2031 newmark = (READ_ONCE(ct->mark) & mask) ^ mark;
2032 if (newmark != READ_ONCE(ct->mark))
2033 WRITE_ONCE(ct->mark, newmark);
2034 }
2035 #endif
2036
2037 static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = {
2038 [CTA_PROTOINFO_TCP] = { .type = NLA_NESTED },
2039 [CTA_PROTOINFO_SCTP] = { .type = NLA_NESTED },
2040 };
2041
ctnetlink_change_protoinfo(struct nf_conn * ct,const struct nlattr * const cda[])2042 static int ctnetlink_change_protoinfo(struct nf_conn *ct,
2043 const struct nlattr * const cda[])
2044 {
2045 const struct nlattr *attr = cda[CTA_PROTOINFO];
2046 const struct nf_conntrack_l4proto *l4proto;
2047 struct nlattr *tb[CTA_PROTOINFO_MAX+1];
2048 int err = 0;
2049
2050 err = nla_parse_nested_deprecated(tb, CTA_PROTOINFO_MAX, attr,
2051 protoinfo_policy, NULL);
2052 if (err < 0)
2053 return err;
2054
2055 l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
2056 if (l4proto->from_nlattr)
2057 err = l4proto->from_nlattr(tb, ct);
2058
2059 return err;
2060 }
2061
2062 static const struct nla_policy seqadj_policy[CTA_SEQADJ_MAX+1] = {
2063 [CTA_SEQADJ_CORRECTION_POS] = { .type = NLA_U32 },
2064 [CTA_SEQADJ_OFFSET_BEFORE] = { .type = NLA_U32 },
2065 [CTA_SEQADJ_OFFSET_AFTER] = { .type = NLA_U32 },
2066 };
2067
change_seq_adj(struct nf_ct_seqadj * seq,const struct nlattr * const attr)2068 static int change_seq_adj(struct nf_ct_seqadj *seq,
2069 const struct nlattr * const attr)
2070 {
2071 int err;
2072 struct nlattr *cda[CTA_SEQADJ_MAX+1];
2073
2074 err = nla_parse_nested_deprecated(cda, CTA_SEQADJ_MAX, attr,
2075 seqadj_policy, NULL);
2076 if (err < 0)
2077 return err;
2078
2079 if (!cda[CTA_SEQADJ_CORRECTION_POS])
2080 return -EINVAL;
2081
2082 seq->correction_pos =
2083 ntohl(nla_get_be32(cda[CTA_SEQADJ_CORRECTION_POS]));
2084
2085 if (!cda[CTA_SEQADJ_OFFSET_BEFORE])
2086 return -EINVAL;
2087
2088 seq->offset_before =
2089 ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_BEFORE]));
2090
2091 if (!cda[CTA_SEQADJ_OFFSET_AFTER])
2092 return -EINVAL;
2093
2094 seq->offset_after =
2095 ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_AFTER]));
2096
2097 return 0;
2098 }
2099
2100 static int
ctnetlink_change_seq_adj(struct nf_conn * ct,const struct nlattr * const cda[])2101 ctnetlink_change_seq_adj(struct nf_conn *ct,
2102 const struct nlattr * const cda[])
2103 {
2104 struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
2105 int ret = 0;
2106
2107 if (!seqadj)
2108 return 0;
2109
2110 spin_lock_bh(&ct->lock);
2111 if (cda[CTA_SEQ_ADJ_ORIG]) {
2112 ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_ORIGINAL],
2113 cda[CTA_SEQ_ADJ_ORIG]);
2114 if (ret < 0)
2115 goto err;
2116
2117 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
2118 }
2119
2120 if (cda[CTA_SEQ_ADJ_REPLY]) {
2121 ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_REPLY],
2122 cda[CTA_SEQ_ADJ_REPLY]);
2123 if (ret < 0)
2124 goto err;
2125
2126 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
2127 }
2128
2129 spin_unlock_bh(&ct->lock);
2130 return 0;
2131 err:
2132 spin_unlock_bh(&ct->lock);
2133 return ret;
2134 }
2135
2136 static const struct nla_policy synproxy_policy[CTA_SYNPROXY_MAX + 1] = {
2137 [CTA_SYNPROXY_ISN] = { .type = NLA_U32 },
2138 [CTA_SYNPROXY_ITS] = { .type = NLA_U32 },
2139 [CTA_SYNPROXY_TSOFF] = { .type = NLA_U32 },
2140 };
2141
ctnetlink_change_synproxy(struct nf_conn * ct,const struct nlattr * const cda[])2142 static int ctnetlink_change_synproxy(struct nf_conn *ct,
2143 const struct nlattr * const cda[])
2144 {
2145 struct nf_conn_synproxy *synproxy = nfct_synproxy(ct);
2146 struct nlattr *tb[CTA_SYNPROXY_MAX + 1];
2147 int err;
2148
2149 if (!synproxy)
2150 return 0;
2151
2152 err = nla_parse_nested_deprecated(tb, CTA_SYNPROXY_MAX,
2153 cda[CTA_SYNPROXY], synproxy_policy,
2154 NULL);
2155 if (err < 0)
2156 return err;
2157
2158 if (!tb[CTA_SYNPROXY_ISN] ||
2159 !tb[CTA_SYNPROXY_ITS] ||
2160 !tb[CTA_SYNPROXY_TSOFF])
2161 return -EINVAL;
2162
2163 synproxy->isn = ntohl(nla_get_be32(tb[CTA_SYNPROXY_ISN]));
2164 synproxy->its = ntohl(nla_get_be32(tb[CTA_SYNPROXY_ITS]));
2165 synproxy->tsoff = ntohl(nla_get_be32(tb[CTA_SYNPROXY_TSOFF]));
2166
2167 return 0;
2168 }
2169
2170 static int
ctnetlink_attach_labels(struct nf_conn * ct,const struct nlattr * const cda[])2171 ctnetlink_attach_labels(struct nf_conn *ct, const struct nlattr * const cda[])
2172 {
2173 #ifdef CONFIG_NF_CONNTRACK_LABELS
2174 size_t len = nla_len(cda[CTA_LABELS]);
2175 const void *mask = cda[CTA_LABELS_MASK];
2176
2177 if (len & (sizeof(u32)-1)) /* must be multiple of u32 */
2178 return -EINVAL;
2179
2180 if (mask) {
2181 if (nla_len(cda[CTA_LABELS_MASK]) == 0 ||
2182 nla_len(cda[CTA_LABELS_MASK]) != len)
2183 return -EINVAL;
2184 mask = nla_data(cda[CTA_LABELS_MASK]);
2185 }
2186
2187 len /= sizeof(u32);
2188
2189 return nf_connlabels_replace(ct, nla_data(cda[CTA_LABELS]), mask, len);
2190 #else
2191 return -EOPNOTSUPP;
2192 #endif
2193 }
2194
2195 static int
ctnetlink_change_conntrack(struct nf_conn * ct,const struct nlattr * const cda[])2196 ctnetlink_change_conntrack(struct nf_conn *ct,
2197 const struct nlattr * const cda[])
2198 {
2199 int err;
2200
2201 /* only allow NAT changes and master assignation for new conntracks */
2202 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST] || cda[CTA_TUPLE_MASTER])
2203 return -EOPNOTSUPP;
2204
2205 if (cda[CTA_HELP]) {
2206 err = ctnetlink_change_helper(ct, cda);
2207 if (err < 0)
2208 return err;
2209 }
2210
2211 if (cda[CTA_TIMEOUT]) {
2212 err = ctnetlink_change_timeout(ct, cda);
2213 if (err < 0)
2214 return err;
2215 }
2216
2217 if (cda[CTA_STATUS]) {
2218 err = ctnetlink_change_status(ct, cda);
2219 if (err < 0)
2220 return err;
2221 }
2222
2223 if (cda[CTA_PROTOINFO]) {
2224 err = ctnetlink_change_protoinfo(ct, cda);
2225 if (err < 0)
2226 return err;
2227 }
2228
2229 #if defined(CONFIG_NF_CONNTRACK_MARK)
2230 if (cda[CTA_MARK])
2231 ctnetlink_change_mark(ct, cda);
2232 #endif
2233
2234 if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) {
2235 err = ctnetlink_change_seq_adj(ct, cda);
2236 if (err < 0)
2237 return err;
2238 }
2239
2240 if (cda[CTA_SYNPROXY]) {
2241 err = ctnetlink_change_synproxy(ct, cda);
2242 if (err < 0)
2243 return err;
2244 }
2245
2246 if (cda[CTA_LABELS]) {
2247 err = ctnetlink_attach_labels(ct, cda);
2248 if (err < 0)
2249 return err;
2250 }
2251
2252 return 0;
2253 }
2254
2255 static struct nf_conn *
ctnetlink_create_conntrack(struct net * net,const struct nf_conntrack_zone * zone,const struct nlattr * const cda[],struct nf_conntrack_tuple * otuple,struct nf_conntrack_tuple * rtuple,u8 u3)2256 ctnetlink_create_conntrack(struct net *net,
2257 const struct nf_conntrack_zone *zone,
2258 const struct nlattr * const cda[],
2259 struct nf_conntrack_tuple *otuple,
2260 struct nf_conntrack_tuple *rtuple,
2261 u8 u3)
2262 {
2263 struct nf_conn *ct;
2264 int err = -EINVAL;
2265 struct nf_conntrack_helper *helper;
2266 struct nf_conn_tstamp *tstamp;
2267 u64 timeout;
2268
2269 ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC);
2270 if (IS_ERR(ct))
2271 return ERR_PTR(-ENOMEM);
2272
2273 if (!cda[CTA_TIMEOUT])
2274 goto err1;
2275
2276 rcu_read_lock();
2277 if (cda[CTA_HELP]) {
2278 char *helpname = NULL;
2279 struct nlattr *helpinfo = NULL;
2280
2281 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
2282 if (err < 0)
2283 goto err2;
2284
2285 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
2286 nf_ct_protonum(ct));
2287 if (helper == NULL) {
2288 rcu_read_unlock();
2289 #ifdef CONFIG_MODULES
2290 if (request_module("nfct-helper-%s", helpname) < 0) {
2291 err = -EOPNOTSUPP;
2292 goto err1;
2293 }
2294
2295 rcu_read_lock();
2296 helper = __nf_conntrack_helper_find(helpname,
2297 nf_ct_l3num(ct),
2298 nf_ct_protonum(ct));
2299 if (helper) {
2300 err = -EAGAIN;
2301 goto err2;
2302 }
2303 rcu_read_unlock();
2304 #endif
2305 err = -EOPNOTSUPP;
2306 goto err1;
2307 } else {
2308 struct nf_conn_help *help;
2309
2310 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
2311 if (help == NULL) {
2312 err = -ENOMEM;
2313 goto err2;
2314 }
2315 /* set private helper data if allowed. */
2316 if (helper->from_nlattr)
2317 helper->from_nlattr(helpinfo, ct);
2318
2319 /* disable helper auto-assignment for this entry */
2320 ct->status |= IPS_HELPER;
2321 RCU_INIT_POINTER(help->helper, helper);
2322 }
2323 }
2324
2325 err = ctnetlink_setup_nat(ct, cda);
2326 if (err < 0)
2327 goto err2;
2328
2329 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
2330 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
2331 nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
2332 nf_ct_labels_ext_add(ct);
2333 nfct_seqadj_ext_add(ct);
2334 nfct_synproxy_ext_add(ct);
2335
2336 /* we must add conntrack extensions before confirmation. */
2337 ct->status |= IPS_CONFIRMED;
2338
2339 timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ;
2340 __nf_ct_set_timeout(ct, timeout);
2341
2342 if (cda[CTA_STATUS]) {
2343 err = ctnetlink_change_status(ct, cda);
2344 if (err < 0)
2345 goto err2;
2346 }
2347
2348 if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) {
2349 err = ctnetlink_change_seq_adj(ct, cda);
2350 if (err < 0)
2351 goto err2;
2352 }
2353
2354 memset(&ct->proto, 0, sizeof(ct->proto));
2355 if (cda[CTA_PROTOINFO]) {
2356 err = ctnetlink_change_protoinfo(ct, cda);
2357 if (err < 0)
2358 goto err2;
2359 }
2360
2361 if (cda[CTA_SYNPROXY]) {
2362 err = ctnetlink_change_synproxy(ct, cda);
2363 if (err < 0)
2364 goto err2;
2365 }
2366
2367 #if defined(CONFIG_NF_CONNTRACK_MARK)
2368 if (cda[CTA_MARK])
2369 ctnetlink_change_mark(ct, cda);
2370 #endif
2371
2372 /* setup master conntrack: this is a confirmed expectation */
2373 if (cda[CTA_TUPLE_MASTER]) {
2374 struct nf_conntrack_tuple master;
2375 struct nf_conntrack_tuple_hash *master_h;
2376 struct nf_conn *master_ct;
2377
2378 err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER,
2379 u3, NULL);
2380 if (err < 0)
2381 goto err2;
2382
2383 master_h = nf_conntrack_find_get(net, zone, &master);
2384 if (master_h == NULL) {
2385 err = -ENOENT;
2386 goto err2;
2387 }
2388 master_ct = nf_ct_tuplehash_to_ctrack(master_h);
2389 __set_bit(IPS_EXPECTED_BIT, &ct->status);
2390 ct->master = master_ct;
2391 }
2392 tstamp = nf_conn_tstamp_find(ct);
2393 if (tstamp)
2394 tstamp->start = ktime_get_real_ns();
2395
2396 err = nf_conntrack_hash_check_insert(ct);
2397 if (err < 0)
2398 goto err3;
2399
2400 rcu_read_unlock();
2401
2402 return ct;
2403
2404 err3:
2405 if (ct->master)
2406 nf_ct_put(ct->master);
2407 err2:
2408 rcu_read_unlock();
2409 err1:
2410 nf_conntrack_free(ct);
2411 return ERR_PTR(err);
2412 }
2413
ctnetlink_new_conntrack(struct sk_buff * skb,const struct nfnl_info * info,const struct nlattr * const cda[])2414 static int ctnetlink_new_conntrack(struct sk_buff *skb,
2415 const struct nfnl_info *info,
2416 const struct nlattr * const cda[])
2417 {
2418 struct nf_conntrack_tuple otuple, rtuple;
2419 struct nf_conntrack_tuple_hash *h = NULL;
2420 u_int8_t u3 = info->nfmsg->nfgen_family;
2421 struct nf_conntrack_zone zone;
2422 struct nf_conn *ct;
2423 int err;
2424
2425 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
2426 if (err < 0)
2427 return err;
2428
2429 if (cda[CTA_TUPLE_ORIG]) {
2430 err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG,
2431 u3, &zone);
2432 if (err < 0)
2433 return err;
2434 }
2435
2436 if (cda[CTA_TUPLE_REPLY]) {
2437 err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY,
2438 u3, &zone);
2439 if (err < 0)
2440 return err;
2441 }
2442
2443 if (cda[CTA_TUPLE_ORIG])
2444 h = nf_conntrack_find_get(info->net, &zone, &otuple);
2445 else if (cda[CTA_TUPLE_REPLY])
2446 h = nf_conntrack_find_get(info->net, &zone, &rtuple);
2447
2448 if (h == NULL) {
2449 err = -ENOENT;
2450 if (info->nlh->nlmsg_flags & NLM_F_CREATE) {
2451 enum ip_conntrack_events events;
2452
2453 if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY])
2454 return -EINVAL;
2455 if (otuple.dst.protonum != rtuple.dst.protonum)
2456 return -EINVAL;
2457
2458 ct = ctnetlink_create_conntrack(info->net, &zone, cda,
2459 &otuple, &rtuple, u3);
2460 if (IS_ERR(ct))
2461 return PTR_ERR(ct);
2462
2463 err = 0;
2464 if (test_bit(IPS_EXPECTED_BIT, &ct->status))
2465 events = 1 << IPCT_RELATED;
2466 else
2467 events = 1 << IPCT_NEW;
2468
2469 if (cda[CTA_LABELS] &&
2470 ctnetlink_attach_labels(ct, cda) == 0)
2471 events |= (1 << IPCT_LABEL);
2472
2473 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
2474 (1 << IPCT_ASSURED) |
2475 (1 << IPCT_HELPER) |
2476 (1 << IPCT_PROTOINFO) |
2477 (1 << IPCT_SEQADJ) |
2478 (1 << IPCT_MARK) |
2479 (1 << IPCT_SYNPROXY) |
2480 events,
2481 ct, NETLINK_CB(skb).portid,
2482 nlmsg_report(info->nlh));
2483 nf_ct_put(ct);
2484 }
2485
2486 return err;
2487 }
2488 /* implicit 'else' */
2489
2490 err = -EEXIST;
2491 ct = nf_ct_tuplehash_to_ctrack(h);
2492 if (!(info->nlh->nlmsg_flags & NLM_F_EXCL)) {
2493 err = ctnetlink_change_conntrack(ct, cda);
2494 if (err == 0) {
2495 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
2496 (1 << IPCT_ASSURED) |
2497 (1 << IPCT_HELPER) |
2498 (1 << IPCT_LABEL) |
2499 (1 << IPCT_PROTOINFO) |
2500 (1 << IPCT_SEQADJ) |
2501 (1 << IPCT_MARK) |
2502 (1 << IPCT_SYNPROXY),
2503 ct, NETLINK_CB(skb).portid,
2504 nlmsg_report(info->nlh));
2505 }
2506 }
2507
2508 nf_ct_put(ct);
2509 return err;
2510 }
2511
2512 static int
ctnetlink_ct_stat_cpu_fill_info(struct sk_buff * skb,u32 portid,u32 seq,__u16 cpu,const struct ip_conntrack_stat * st)2513 ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
2514 __u16 cpu, const struct ip_conntrack_stat *st)
2515 {
2516 struct nlmsghdr *nlh;
2517 unsigned int flags = portid ? NLM_F_MULTI : 0, event;
2518
2519 event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK,
2520 IPCTNL_MSG_CT_GET_STATS_CPU);
2521 nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC,
2522 NFNETLINK_V0, htons(cpu));
2523 if (!nlh)
2524 goto nlmsg_failure;
2525
2526 if (nla_put_be32(skb, CTA_STATS_FOUND, htonl(st->found)) ||
2527 nla_put_be32(skb, CTA_STATS_INVALID, htonl(st->invalid)) ||
2528 nla_put_be32(skb, CTA_STATS_INSERT, htonl(st->insert)) ||
2529 nla_put_be32(skb, CTA_STATS_INSERT_FAILED,
2530 htonl(st->insert_failed)) ||
2531 nla_put_be32(skb, CTA_STATS_DROP, htonl(st->drop)) ||
2532 nla_put_be32(skb, CTA_STATS_EARLY_DROP, htonl(st->early_drop)) ||
2533 nla_put_be32(skb, CTA_STATS_ERROR, htonl(st->error)) ||
2534 nla_put_be32(skb, CTA_STATS_SEARCH_RESTART,
2535 htonl(st->search_restart)) ||
2536 nla_put_be32(skb, CTA_STATS_CLASH_RESOLVE,
2537 htonl(st->clash_resolve)) ||
2538 nla_put_be32(skb, CTA_STATS_CHAIN_TOOLONG,
2539 htonl(st->chaintoolong)))
2540 goto nla_put_failure;
2541
2542 nlmsg_end(skb, nlh);
2543 return skb->len;
2544
2545 nla_put_failure:
2546 nlmsg_failure:
2547 nlmsg_cancel(skb, nlh);
2548 return -1;
2549 }
2550
2551 static int
ctnetlink_ct_stat_cpu_dump(struct sk_buff * skb,struct netlink_callback * cb)2552 ctnetlink_ct_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
2553 {
2554 int cpu;
2555 struct net *net = sock_net(skb->sk);
2556
2557 if (cb->args[0] == nr_cpu_ids)
2558 return 0;
2559
2560 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
2561 const struct ip_conntrack_stat *st;
2562
2563 if (!cpu_possible(cpu))
2564 continue;
2565
2566 st = per_cpu_ptr(net->ct.stat, cpu);
2567 if (ctnetlink_ct_stat_cpu_fill_info(skb,
2568 NETLINK_CB(cb->skb).portid,
2569 cb->nlh->nlmsg_seq,
2570 cpu, st) < 0)
2571 break;
2572 }
2573 cb->args[0] = cpu;
2574
2575 return skb->len;
2576 }
2577
ctnetlink_stat_ct_cpu(struct sk_buff * skb,const struct nfnl_info * info,const struct nlattr * const cda[])2578 static int ctnetlink_stat_ct_cpu(struct sk_buff *skb,
2579 const struct nfnl_info *info,
2580 const struct nlattr * const cda[])
2581 {
2582 if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
2583 struct netlink_dump_control c = {
2584 .dump = ctnetlink_ct_stat_cpu_dump,
2585 };
2586 return netlink_dump_start(info->sk, skb, info->nlh, &c);
2587 }
2588
2589 return 0;
2590 }
2591
2592 static int
ctnetlink_stat_ct_fill_info(struct sk_buff * skb,u32 portid,u32 seq,u32 type,struct net * net)2593 ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
2594 struct net *net)
2595 {
2596 unsigned int flags = portid ? NLM_F_MULTI : 0, event;
2597 unsigned int nr_conntracks;
2598 struct nlmsghdr *nlh;
2599
2600 event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_GET_STATS);
2601 nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC,
2602 NFNETLINK_V0, 0);
2603 if (!nlh)
2604 goto nlmsg_failure;
2605
2606 nr_conntracks = nf_conntrack_count(net);
2607 if (nla_put_be32(skb, CTA_STATS_GLOBAL_ENTRIES, htonl(nr_conntracks)))
2608 goto nla_put_failure;
2609
2610 if (nla_put_be32(skb, CTA_STATS_GLOBAL_MAX_ENTRIES, htonl(nf_conntrack_max)))
2611 goto nla_put_failure;
2612
2613 nlmsg_end(skb, nlh);
2614 return skb->len;
2615
2616 nla_put_failure:
2617 nlmsg_failure:
2618 nlmsg_cancel(skb, nlh);
2619 return -1;
2620 }
2621
ctnetlink_stat_ct(struct sk_buff * skb,const struct nfnl_info * info,const struct nlattr * const cda[])2622 static int ctnetlink_stat_ct(struct sk_buff *skb, const struct nfnl_info *info,
2623 const struct nlattr * const cda[])
2624 {
2625 struct sk_buff *skb2;
2626 int err;
2627
2628 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2629 if (skb2 == NULL)
2630 return -ENOMEM;
2631
2632 err = ctnetlink_stat_ct_fill_info(skb2, NETLINK_CB(skb).portid,
2633 info->nlh->nlmsg_seq,
2634 NFNL_MSG_TYPE(info->nlh->nlmsg_type),
2635 sock_net(skb->sk));
2636 if (err <= 0) {
2637 kfree_skb(skb2);
2638 return -ENOMEM;
2639 }
2640
2641 return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
2642 }
2643
2644 static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
2645 [CTA_EXPECT_MASTER] = { .type = NLA_NESTED },
2646 [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED },
2647 [CTA_EXPECT_MASK] = { .type = NLA_NESTED },
2648 [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 },
2649 [CTA_EXPECT_ID] = { .type = NLA_U32 },
2650 [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING,
2651 .len = NF_CT_HELPER_NAME_LEN - 1 },
2652 [CTA_EXPECT_ZONE] = { .type = NLA_U16 },
2653 [CTA_EXPECT_FLAGS] = { .type = NLA_U32 },
2654 [CTA_EXPECT_CLASS] = { .type = NLA_U32 },
2655 [CTA_EXPECT_NAT] = { .type = NLA_NESTED },
2656 [CTA_EXPECT_FN] = { .type = NLA_NUL_STRING },
2657 };
2658
2659 static struct nf_conntrack_expect *
2660 ctnetlink_alloc_expect(const struct nlattr *const cda[], struct nf_conn *ct,
2661 struct nf_conntrack_helper *helper,
2662 struct nf_conntrack_tuple *tuple,
2663 struct nf_conntrack_tuple *mask);
2664
2665 #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
2666 static size_t
ctnetlink_glue_build_size(const struct nf_conn * ct)2667 ctnetlink_glue_build_size(const struct nf_conn *ct)
2668 {
2669 return 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
2670 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
2671 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
2672 + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
2673 + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
2674 + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
2675 + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
2676 + nla_total_size(0) /* CTA_PROTOINFO */
2677 + nla_total_size(0) /* CTA_HELP */
2678 + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
2679 + ctnetlink_secctx_size(ct)
2680 + ctnetlink_acct_size(ct)
2681 + ctnetlink_timestamp_size(ct)
2682 #if IS_ENABLED(CONFIG_NF_NAT)
2683 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
2684 + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
2685 #endif
2686 #ifdef CONFIG_NF_CONNTRACK_MARK
2687 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
2688 #endif
2689 #ifdef CONFIG_NF_CONNTRACK_ZONES
2690 + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */
2691 #endif
2692 + ctnetlink_proto_size(ct)
2693 ;
2694 }
2695
__ctnetlink_glue_build(struct sk_buff * skb,struct nf_conn * ct)2696 static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
2697 {
2698 const struct nf_conntrack_zone *zone;
2699 struct nlattr *nest_parms;
2700
2701 zone = nf_ct_zone(ct);
2702
2703 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG);
2704 if (!nest_parms)
2705 goto nla_put_failure;
2706 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
2707 goto nla_put_failure;
2708 if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
2709 NF_CT_ZONE_DIR_ORIG) < 0)
2710 goto nla_put_failure;
2711 nla_nest_end(skb, nest_parms);
2712
2713 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY);
2714 if (!nest_parms)
2715 goto nla_put_failure;
2716 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
2717 goto nla_put_failure;
2718 if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
2719 NF_CT_ZONE_DIR_REPL) < 0)
2720 goto nla_put_failure;
2721 nla_nest_end(skb, nest_parms);
2722
2723 if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
2724 NF_CT_DEFAULT_ZONE_DIR) < 0)
2725 goto nla_put_failure;
2726
2727 if (ctnetlink_dump_id(skb, ct) < 0)
2728 goto nla_put_failure;
2729
2730 if (ctnetlink_dump_status(skb, ct) < 0)
2731 goto nla_put_failure;
2732
2733 if (ctnetlink_dump_timeout(skb, ct, false) < 0)
2734 goto nla_put_failure;
2735
2736 if (ctnetlink_dump_protoinfo(skb, ct, false) < 0)
2737 goto nla_put_failure;
2738
2739 if (ctnetlink_dump_acct(skb, ct, IPCTNL_MSG_CT_GET) < 0 ||
2740 ctnetlink_dump_timestamp(skb, ct) < 0)
2741 goto nla_put_failure;
2742
2743 if (ctnetlink_dump_helpinfo(skb, ct) < 0)
2744 goto nla_put_failure;
2745
2746 #ifdef CONFIG_NF_CONNTRACK_SECMARK
2747 if (ct->secmark && ctnetlink_dump_secctx(skb, ct) < 0)
2748 goto nla_put_failure;
2749 #endif
2750 if (ct->master && ctnetlink_dump_master(skb, ct) < 0)
2751 goto nla_put_failure;
2752
2753 if ((ct->status & IPS_SEQ_ADJUST) &&
2754 ctnetlink_dump_ct_seq_adj(skb, ct) < 0)
2755 goto nla_put_failure;
2756
2757 if (ctnetlink_dump_ct_synproxy(skb, ct) < 0)
2758 goto nla_put_failure;
2759
2760 #ifdef CONFIG_NF_CONNTRACK_MARK
2761 if (ctnetlink_dump_mark(skb, ct, true) < 0)
2762 goto nla_put_failure;
2763 #endif
2764 if (ctnetlink_dump_labels(skb, ct) < 0)
2765 goto nla_put_failure;
2766 return 0;
2767
2768 nla_put_failure:
2769 return -ENOSPC;
2770 }
2771
2772 static int
ctnetlink_glue_build(struct sk_buff * skb,struct nf_conn * ct,enum ip_conntrack_info ctinfo,u_int16_t ct_attr,u_int16_t ct_info_attr)2773 ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct,
2774 enum ip_conntrack_info ctinfo,
2775 u_int16_t ct_attr, u_int16_t ct_info_attr)
2776 {
2777 struct nlattr *nest_parms;
2778
2779 nest_parms = nla_nest_start(skb, ct_attr);
2780 if (!nest_parms)
2781 goto nla_put_failure;
2782
2783 if (__ctnetlink_glue_build(skb, ct) < 0)
2784 goto nla_put_failure;
2785
2786 nla_nest_end(skb, nest_parms);
2787
2788 if (nla_put_be32(skb, ct_info_attr, htonl(ctinfo)))
2789 goto nla_put_failure;
2790
2791 return 0;
2792
2793 nla_put_failure:
2794 return -ENOSPC;
2795 }
2796
2797 static int
ctnetlink_update_status(struct nf_conn * ct,const struct nlattr * const cda[])2798 ctnetlink_update_status(struct nf_conn *ct, const struct nlattr * const cda[])
2799 {
2800 unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS]));
2801 unsigned long d = ct->status ^ status;
2802
2803 if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY))
2804 /* SEEN_REPLY bit can only be set */
2805 return -EBUSY;
2806
2807 if (d & IPS_ASSURED && !(status & IPS_ASSURED))
2808 /* ASSURED bit can only be set */
2809 return -EBUSY;
2810
2811 /* This check is less strict than ctnetlink_change_status()
2812 * because callers often flip IPS_EXPECTED bits when sending
2813 * an NFQA_CT attribute to the kernel. So ignore the
2814 * unchangeable bits but do not error out. Also user programs
2815 * are allowed to clear the bits that they are allowed to change.
2816 */
2817 __nf_ct_change_status(ct, status, ~status);
2818 return 0;
2819 }
2820
2821 static int
ctnetlink_glue_parse_ct(const struct nlattr * cda[],struct nf_conn * ct)2822 ctnetlink_glue_parse_ct(const struct nlattr *cda[], struct nf_conn *ct)
2823 {
2824 int err;
2825
2826 if (cda[CTA_TIMEOUT]) {
2827 err = ctnetlink_change_timeout(ct, cda);
2828 if (err < 0)
2829 return err;
2830 }
2831 if (cda[CTA_STATUS]) {
2832 err = ctnetlink_update_status(ct, cda);
2833 if (err < 0)
2834 return err;
2835 }
2836 if (cda[CTA_HELP]) {
2837 err = ctnetlink_change_helper(ct, cda);
2838 if (err < 0)
2839 return err;
2840 }
2841 if (cda[CTA_LABELS]) {
2842 err = ctnetlink_attach_labels(ct, cda);
2843 if (err < 0)
2844 return err;
2845 }
2846 #if defined(CONFIG_NF_CONNTRACK_MARK)
2847 if (cda[CTA_MARK]) {
2848 ctnetlink_change_mark(ct, cda);
2849 }
2850 #endif
2851 return 0;
2852 }
2853
2854 static int
ctnetlink_glue_parse(const struct nlattr * attr,struct nf_conn * ct)2855 ctnetlink_glue_parse(const struct nlattr *attr, struct nf_conn *ct)
2856 {
2857 struct nlattr *cda[CTA_MAX+1];
2858 int ret;
2859
2860 ret = nla_parse_nested_deprecated(cda, CTA_MAX, attr, ct_nla_policy,
2861 NULL);
2862 if (ret < 0)
2863 return ret;
2864
2865 return ctnetlink_glue_parse_ct((const struct nlattr **)cda, ct);
2866 }
2867
ctnetlink_glue_exp_parse(const struct nlattr * const * cda,const struct nf_conn * ct,struct nf_conntrack_tuple * tuple,struct nf_conntrack_tuple * mask)2868 static int ctnetlink_glue_exp_parse(const struct nlattr * const *cda,
2869 const struct nf_conn *ct,
2870 struct nf_conntrack_tuple *tuple,
2871 struct nf_conntrack_tuple *mask)
2872 {
2873 int err;
2874
2875 err = ctnetlink_parse_tuple(cda, tuple, CTA_EXPECT_TUPLE,
2876 nf_ct_l3num(ct), NULL);
2877 if (err < 0)
2878 return err;
2879
2880 return ctnetlink_parse_tuple(cda, mask, CTA_EXPECT_MASK,
2881 nf_ct_l3num(ct), NULL);
2882 }
2883
2884 static int
ctnetlink_glue_attach_expect(const struct nlattr * attr,struct nf_conn * ct,u32 portid,u32 report)2885 ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
2886 u32 portid, u32 report)
2887 {
2888 struct nlattr *cda[CTA_EXPECT_MAX+1];
2889 struct nf_conntrack_tuple tuple, mask;
2890 struct nf_conntrack_helper *helper = NULL;
2891 struct nf_conntrack_expect *exp;
2892 int err;
2893
2894 err = nla_parse_nested_deprecated(cda, CTA_EXPECT_MAX, attr,
2895 exp_nla_policy, NULL);
2896 if (err < 0)
2897 return err;
2898
2899 err = ctnetlink_glue_exp_parse((const struct nlattr * const *)cda,
2900 ct, &tuple, &mask);
2901 if (err < 0)
2902 return err;
2903
2904 if (cda[CTA_EXPECT_HELP_NAME]) {
2905 const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
2906
2907 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
2908 nf_ct_protonum(ct));
2909 if (helper == NULL)
2910 return -EOPNOTSUPP;
2911 }
2912
2913 exp = ctnetlink_alloc_expect((const struct nlattr * const *)cda, ct,
2914 helper, &tuple, &mask);
2915 if (IS_ERR(exp))
2916 return PTR_ERR(exp);
2917
2918 err = nf_ct_expect_related_report(exp, portid, report, 0);
2919 nf_ct_expect_put(exp);
2920 return err;
2921 }
2922
ctnetlink_glue_seqadj(struct sk_buff * skb,struct nf_conn * ct,enum ip_conntrack_info ctinfo,int diff)2923 static void ctnetlink_glue_seqadj(struct sk_buff *skb, struct nf_conn *ct,
2924 enum ip_conntrack_info ctinfo, int diff)
2925 {
2926 if (!(ct->status & IPS_NAT_MASK))
2927 return;
2928
2929 nf_ct_tcp_seqadj_set(skb, ct, ctinfo, diff);
2930 }
2931
2932 static const struct nfnl_ct_hook ctnetlink_glue_hook = {
2933 .build_size = ctnetlink_glue_build_size,
2934 .build = ctnetlink_glue_build,
2935 .parse = ctnetlink_glue_parse,
2936 .attach_expect = ctnetlink_glue_attach_expect,
2937 .seq_adjust = ctnetlink_glue_seqadj,
2938 };
2939 #endif /* CONFIG_NETFILTER_NETLINK_GLUE_CT */
2940
2941 /***********************************************************************
2942 * EXPECT
2943 ***********************************************************************/
2944
ctnetlink_exp_dump_tuple(struct sk_buff * skb,const struct nf_conntrack_tuple * tuple,u32 type)2945 static int ctnetlink_exp_dump_tuple(struct sk_buff *skb,
2946 const struct nf_conntrack_tuple *tuple,
2947 u32 type)
2948 {
2949 struct nlattr *nest_parms;
2950
2951 nest_parms = nla_nest_start(skb, type);
2952 if (!nest_parms)
2953 goto nla_put_failure;
2954 if (ctnetlink_dump_tuples(skb, tuple) < 0)
2955 goto nla_put_failure;
2956 nla_nest_end(skb, nest_parms);
2957
2958 return 0;
2959
2960 nla_put_failure:
2961 return -1;
2962 }
2963
ctnetlink_exp_dump_mask(struct sk_buff * skb,const struct nf_conntrack_tuple * tuple,const struct nf_conntrack_tuple_mask * mask)2964 static int ctnetlink_exp_dump_mask(struct sk_buff *skb,
2965 const struct nf_conntrack_tuple *tuple,
2966 const struct nf_conntrack_tuple_mask *mask)
2967 {
2968 const struct nf_conntrack_l4proto *l4proto;
2969 struct nf_conntrack_tuple m;
2970 struct nlattr *nest_parms;
2971 int ret;
2972
2973 memset(&m, 0xFF, sizeof(m));
2974 memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3));
2975 m.src.u.all = mask->src.u.all;
2976 m.src.l3num = tuple->src.l3num;
2977 m.dst.protonum = tuple->dst.protonum;
2978
2979 nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK);
2980 if (!nest_parms)
2981 goto nla_put_failure;
2982
2983 rcu_read_lock();
2984 ret = ctnetlink_dump_tuples_ip(skb, &m);
2985 if (ret >= 0) {
2986 l4proto = nf_ct_l4proto_find(tuple->dst.protonum);
2987 ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto);
2988 }
2989 rcu_read_unlock();
2990
2991 if (unlikely(ret < 0))
2992 goto nla_put_failure;
2993
2994 nla_nest_end(skb, nest_parms);
2995
2996 return 0;
2997
2998 nla_put_failure:
2999 return -1;
3000 }
3001
3002 #if IS_ENABLED(CONFIG_NF_NAT)
3003 static const union nf_inet_addr any_addr;
3004 #endif
3005
nf_expect_get_id(const struct nf_conntrack_expect * exp)3006 static __be32 nf_expect_get_id(const struct nf_conntrack_expect *exp)
3007 {
3008 static siphash_aligned_key_t exp_id_seed;
3009 unsigned long a, b, c, d;
3010
3011 net_get_random_once(&exp_id_seed, sizeof(exp_id_seed));
3012
3013 a = (unsigned long)exp;
3014 b = (unsigned long)exp->helper;
3015 c = (unsigned long)exp->master;
3016 d = (unsigned long)siphash(&exp->tuple, sizeof(exp->tuple), &exp_id_seed);
3017
3018 #ifdef CONFIG_64BIT
3019 return (__force __be32)siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &exp_id_seed);
3020 #else
3021 return (__force __be32)siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &exp_id_seed);
3022 #endif
3023 }
3024
3025 static int
ctnetlink_exp_dump_expect(struct sk_buff * skb,const struct nf_conntrack_expect * exp)3026 ctnetlink_exp_dump_expect(struct sk_buff *skb,
3027 const struct nf_conntrack_expect *exp)
3028 {
3029 struct nf_conn *master = exp->master;
3030 long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ;
3031 struct nf_conn_help *help;
3032 #if IS_ENABLED(CONFIG_NF_NAT)
3033 struct nlattr *nest_parms;
3034 struct nf_conntrack_tuple nat_tuple = {};
3035 #endif
3036 struct nf_ct_helper_expectfn *expfn;
3037
3038 if (timeout < 0)
3039 timeout = 0;
3040
3041 if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0)
3042 goto nla_put_failure;
3043 if (ctnetlink_exp_dump_mask(skb, &exp->tuple, &exp->mask) < 0)
3044 goto nla_put_failure;
3045 if (ctnetlink_exp_dump_tuple(skb,
3046 &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
3047 CTA_EXPECT_MASTER) < 0)
3048 goto nla_put_failure;
3049
3050 #if IS_ENABLED(CONFIG_NF_NAT)
3051 if (!nf_inet_addr_cmp(&exp->saved_addr, &any_addr) ||
3052 exp->saved_proto.all) {
3053 nest_parms = nla_nest_start(skb, CTA_EXPECT_NAT);
3054 if (!nest_parms)
3055 goto nla_put_failure;
3056
3057 if (nla_put_be32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir)))
3058 goto nla_put_failure;
3059
3060 nat_tuple.src.l3num = nf_ct_l3num(master);
3061 nat_tuple.src.u3 = exp->saved_addr;
3062 nat_tuple.dst.protonum = nf_ct_protonum(master);
3063 nat_tuple.src.u = exp->saved_proto;
3064
3065 if (ctnetlink_exp_dump_tuple(skb, &nat_tuple,
3066 CTA_EXPECT_NAT_TUPLE) < 0)
3067 goto nla_put_failure;
3068 nla_nest_end(skb, nest_parms);
3069 }
3070 #endif
3071 if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) ||
3072 nla_put_be32(skb, CTA_EXPECT_ID, nf_expect_get_id(exp)) ||
3073 nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) ||
3074 nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class)))
3075 goto nla_put_failure;
3076 help = nfct_help(master);
3077 if (help) {
3078 struct nf_conntrack_helper *helper;
3079
3080 helper = rcu_dereference(help->helper);
3081 if (helper &&
3082 nla_put_string(skb, CTA_EXPECT_HELP_NAME, helper->name))
3083 goto nla_put_failure;
3084 }
3085 expfn = nf_ct_helper_expectfn_find_by_symbol(exp->expectfn);
3086 if (expfn != NULL &&
3087 nla_put_string(skb, CTA_EXPECT_FN, expfn->name))
3088 goto nla_put_failure;
3089
3090 return 0;
3091
3092 nla_put_failure:
3093 return -1;
3094 }
3095
3096 static int
ctnetlink_exp_fill_info(struct sk_buff * skb,u32 portid,u32 seq,int event,const struct nf_conntrack_expect * exp)3097 ctnetlink_exp_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
3098 int event, const struct nf_conntrack_expect *exp)
3099 {
3100 struct nlmsghdr *nlh;
3101 unsigned int flags = portid ? NLM_F_MULTI : 0;
3102
3103 event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_EXP, event);
3104 nlh = nfnl_msg_put(skb, portid, seq, event, flags,
3105 exp->tuple.src.l3num, NFNETLINK_V0, 0);
3106 if (!nlh)
3107 goto nlmsg_failure;
3108
3109 if (ctnetlink_exp_dump_expect(skb, exp) < 0)
3110 goto nla_put_failure;
3111
3112 nlmsg_end(skb, nlh);
3113 return skb->len;
3114
3115 nlmsg_failure:
3116 nla_put_failure:
3117 nlmsg_cancel(skb, nlh);
3118 return -1;
3119 }
3120
3121 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3122 static int
ctnetlink_expect_event(unsigned int events,const struct nf_exp_event * item)3123 ctnetlink_expect_event(unsigned int events, const struct nf_exp_event *item)
3124 {
3125 struct nf_conntrack_expect *exp = item->exp;
3126 struct net *net = nf_ct_exp_net(exp);
3127 struct nlmsghdr *nlh;
3128 struct sk_buff *skb;
3129 unsigned int type, group;
3130 int flags = 0;
3131
3132 if (events & (1 << IPEXP_DESTROY)) {
3133 type = IPCTNL_MSG_EXP_DELETE;
3134 group = NFNLGRP_CONNTRACK_EXP_DESTROY;
3135 } else if (events & (1 << IPEXP_NEW)) {
3136 type = IPCTNL_MSG_EXP_NEW;
3137 flags = NLM_F_CREATE|NLM_F_EXCL;
3138 group = NFNLGRP_CONNTRACK_EXP_NEW;
3139 } else
3140 return 0;
3141
3142 if (!item->report && !nfnetlink_has_listeners(net, group))
3143 return 0;
3144
3145 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
3146 if (skb == NULL)
3147 goto errout;
3148
3149 type = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_EXP, type);
3150 nlh = nfnl_msg_put(skb, item->portid, 0, type, flags,
3151 exp->tuple.src.l3num, NFNETLINK_V0, 0);
3152 if (!nlh)
3153 goto nlmsg_failure;
3154
3155 if (ctnetlink_exp_dump_expect(skb, exp) < 0)
3156 goto nla_put_failure;
3157
3158 nlmsg_end(skb, nlh);
3159 nfnetlink_send(skb, net, item->portid, group, item->report, GFP_ATOMIC);
3160 return 0;
3161
3162 nla_put_failure:
3163 nlmsg_cancel(skb, nlh);
3164 nlmsg_failure:
3165 kfree_skb(skb);
3166 errout:
3167 nfnetlink_set_err(net, 0, 0, -ENOBUFS);
3168 return 0;
3169 }
3170 #endif
ctnetlink_exp_done(struct netlink_callback * cb)3171 static int ctnetlink_exp_done(struct netlink_callback *cb)
3172 {
3173 if (cb->args[1])
3174 nf_ct_expect_put((struct nf_conntrack_expect *)cb->args[1]);
3175 return 0;
3176 }
3177
3178 static int
ctnetlink_exp_dump_table(struct sk_buff * skb,struct netlink_callback * cb)3179 ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
3180 {
3181 struct net *net = sock_net(skb->sk);
3182 struct nf_conntrack_expect *exp, *last;
3183 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
3184 u_int8_t l3proto = nfmsg->nfgen_family;
3185
3186 rcu_read_lock();
3187 last = (struct nf_conntrack_expect *)cb->args[1];
3188 for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
3189 restart:
3190 hlist_for_each_entry_rcu(exp, &nf_ct_expect_hash[cb->args[0]],
3191 hnode) {
3192 if (l3proto && exp->tuple.src.l3num != l3proto)
3193 continue;
3194
3195 if (!net_eq(nf_ct_net(exp->master), net))
3196 continue;
3197
3198 if (cb->args[1]) {
3199 if (exp != last)
3200 continue;
3201 cb->args[1] = 0;
3202 }
3203 if (ctnetlink_exp_fill_info(skb,
3204 NETLINK_CB(cb->skb).portid,
3205 cb->nlh->nlmsg_seq,
3206 IPCTNL_MSG_EXP_NEW,
3207 exp) < 0) {
3208 if (!refcount_inc_not_zero(&exp->use))
3209 continue;
3210 cb->args[1] = (unsigned long)exp;
3211 goto out;
3212 }
3213 }
3214 if (cb->args[1]) {
3215 cb->args[1] = 0;
3216 goto restart;
3217 }
3218 }
3219 out:
3220 rcu_read_unlock();
3221 if (last)
3222 nf_ct_expect_put(last);
3223
3224 return skb->len;
3225 }
3226
3227 static int
ctnetlink_exp_ct_dump_table(struct sk_buff * skb,struct netlink_callback * cb)3228 ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
3229 {
3230 struct nf_conntrack_expect *exp, *last;
3231 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
3232 struct nf_conn *ct = cb->data;
3233 struct nf_conn_help *help = nfct_help(ct);
3234 u_int8_t l3proto = nfmsg->nfgen_family;
3235
3236 if (cb->args[0])
3237 return 0;
3238
3239 rcu_read_lock();
3240 last = (struct nf_conntrack_expect *)cb->args[1];
3241 restart:
3242 hlist_for_each_entry_rcu(exp, &help->expectations, lnode) {
3243 if (l3proto && exp->tuple.src.l3num != l3proto)
3244 continue;
3245 if (cb->args[1]) {
3246 if (exp != last)
3247 continue;
3248 cb->args[1] = 0;
3249 }
3250 if (ctnetlink_exp_fill_info(skb, NETLINK_CB(cb->skb).portid,
3251 cb->nlh->nlmsg_seq,
3252 IPCTNL_MSG_EXP_NEW,
3253 exp) < 0) {
3254 if (!refcount_inc_not_zero(&exp->use))
3255 continue;
3256 cb->args[1] = (unsigned long)exp;
3257 goto out;
3258 }
3259 }
3260 if (cb->args[1]) {
3261 cb->args[1] = 0;
3262 goto restart;
3263 }
3264 cb->args[0] = 1;
3265 out:
3266 rcu_read_unlock();
3267 if (last)
3268 nf_ct_expect_put(last);
3269
3270 return skb->len;
3271 }
3272
ctnetlink_dump_exp_ct(struct net * net,struct sock * ctnl,struct sk_buff * skb,const struct nlmsghdr * nlh,const struct nlattr * const cda[],struct netlink_ext_ack * extack)3273 static int ctnetlink_dump_exp_ct(struct net *net, struct sock *ctnl,
3274 struct sk_buff *skb,
3275 const struct nlmsghdr *nlh,
3276 const struct nlattr * const cda[],
3277 struct netlink_ext_ack *extack)
3278 {
3279 int err;
3280 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
3281 u_int8_t u3 = nfmsg->nfgen_family;
3282 struct nf_conntrack_tuple tuple;
3283 struct nf_conntrack_tuple_hash *h;
3284 struct nf_conn *ct;
3285 struct nf_conntrack_zone zone;
3286 struct netlink_dump_control c = {
3287 .dump = ctnetlink_exp_ct_dump_table,
3288 .done = ctnetlink_exp_done,
3289 };
3290
3291 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER,
3292 u3, NULL);
3293 if (err < 0)
3294 return err;
3295
3296 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
3297 if (err < 0)
3298 return err;
3299
3300 h = nf_conntrack_find_get(net, &zone, &tuple);
3301 if (!h)
3302 return -ENOENT;
3303
3304 ct = nf_ct_tuplehash_to_ctrack(h);
3305 /* No expectation linked to this connection tracking. */
3306 if (!nfct_help(ct)) {
3307 nf_ct_put(ct);
3308 return 0;
3309 }
3310
3311 c.data = ct;
3312
3313 err = netlink_dump_start(ctnl, skb, nlh, &c);
3314 nf_ct_put(ct);
3315
3316 return err;
3317 }
3318
ctnetlink_get_expect(struct sk_buff * skb,const struct nfnl_info * info,const struct nlattr * const cda[])3319 static int ctnetlink_get_expect(struct sk_buff *skb,
3320 const struct nfnl_info *info,
3321 const struct nlattr * const cda[])
3322 {
3323 u_int8_t u3 = info->nfmsg->nfgen_family;
3324 struct nf_conntrack_tuple tuple;
3325 struct nf_conntrack_expect *exp;
3326 struct nf_conntrack_zone zone;
3327 struct sk_buff *skb2;
3328 int err;
3329
3330 if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
3331 if (cda[CTA_EXPECT_MASTER])
3332 return ctnetlink_dump_exp_ct(info->net, info->sk, skb,
3333 info->nlh, cda,
3334 info->extack);
3335 else {
3336 struct netlink_dump_control c = {
3337 .dump = ctnetlink_exp_dump_table,
3338 .done = ctnetlink_exp_done,
3339 };
3340 return netlink_dump_start(info->sk, skb, info->nlh, &c);
3341 }
3342 }
3343
3344 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
3345 if (err < 0)
3346 return err;
3347
3348 if (cda[CTA_EXPECT_TUPLE])
3349 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
3350 u3, NULL);
3351 else if (cda[CTA_EXPECT_MASTER])
3352 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER,
3353 u3, NULL);
3354 else
3355 return -EINVAL;
3356
3357 if (err < 0)
3358 return err;
3359
3360 exp = nf_ct_expect_find_get(info->net, &zone, &tuple);
3361 if (!exp)
3362 return -ENOENT;
3363
3364 if (cda[CTA_EXPECT_ID]) {
3365 __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
3366
3367 if (id != nf_expect_get_id(exp)) {
3368 nf_ct_expect_put(exp);
3369 return -ENOENT;
3370 }
3371 }
3372
3373 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3374 if (!skb2) {
3375 nf_ct_expect_put(exp);
3376 return -ENOMEM;
3377 }
3378
3379 rcu_read_lock();
3380 err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).portid,
3381 info->nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW,
3382 exp);
3383 rcu_read_unlock();
3384 nf_ct_expect_put(exp);
3385 if (err <= 0) {
3386 kfree_skb(skb2);
3387 return -ENOMEM;
3388 }
3389
3390 return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
3391 }
3392
expect_iter_name(struct nf_conntrack_expect * exp,void * data)3393 static bool expect_iter_name(struct nf_conntrack_expect *exp, void *data)
3394 {
3395 struct nf_conntrack_helper *helper;
3396 const struct nf_conn_help *m_help;
3397 const char *name = data;
3398
3399 m_help = nfct_help(exp->master);
3400
3401 helper = rcu_dereference(m_help->helper);
3402 if (!helper)
3403 return false;
3404
3405 return strcmp(helper->name, name) == 0;
3406 }
3407
expect_iter_all(struct nf_conntrack_expect * exp,void * data)3408 static bool expect_iter_all(struct nf_conntrack_expect *exp, void *data)
3409 {
3410 return true;
3411 }
3412
ctnetlink_del_expect(struct sk_buff * skb,const struct nfnl_info * info,const struct nlattr * const cda[])3413 static int ctnetlink_del_expect(struct sk_buff *skb,
3414 const struct nfnl_info *info,
3415 const struct nlattr * const cda[])
3416 {
3417 u_int8_t u3 = info->nfmsg->nfgen_family;
3418 struct nf_conntrack_expect *exp;
3419 struct nf_conntrack_tuple tuple;
3420 struct nf_conntrack_zone zone;
3421 int err;
3422
3423 if (cda[CTA_EXPECT_TUPLE]) {
3424 /* delete a single expect by tuple */
3425 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
3426 if (err < 0)
3427 return err;
3428
3429 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
3430 u3, NULL);
3431 if (err < 0)
3432 return err;
3433
3434 /* bump usage count to 2 */
3435 exp = nf_ct_expect_find_get(info->net, &zone, &tuple);
3436 if (!exp)
3437 return -ENOENT;
3438
3439 if (cda[CTA_EXPECT_ID]) {
3440 __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
3441
3442 if (id != nf_expect_get_id(exp)) {
3443 nf_ct_expect_put(exp);
3444 return -ENOENT;
3445 }
3446 }
3447
3448 /* after list removal, usage count == 1 */
3449 spin_lock_bh(&nf_conntrack_expect_lock);
3450 if (timer_delete(&exp->timeout)) {
3451 nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).portid,
3452 nlmsg_report(info->nlh));
3453 nf_ct_expect_put(exp);
3454 }
3455 spin_unlock_bh(&nf_conntrack_expect_lock);
3456 /* have to put what we 'get' above.
3457 * after this line usage count == 0 */
3458 nf_ct_expect_put(exp);
3459 } else if (cda[CTA_EXPECT_HELP_NAME]) {
3460 char *name = nla_data(cda[CTA_EXPECT_HELP_NAME]);
3461
3462 nf_ct_expect_iterate_net(info->net, expect_iter_name, name,
3463 NETLINK_CB(skb).portid,
3464 nlmsg_report(info->nlh));
3465 } else {
3466 /* This basically means we have to flush everything*/
3467 nf_ct_expect_iterate_net(info->net, expect_iter_all, NULL,
3468 NETLINK_CB(skb).portid,
3469 nlmsg_report(info->nlh));
3470 }
3471
3472 return 0;
3473 }
3474 static int
ctnetlink_change_expect(struct nf_conntrack_expect * x,const struct nlattr * const cda[])3475 ctnetlink_change_expect(struct nf_conntrack_expect *x,
3476 const struct nlattr * const cda[])
3477 {
3478 if (cda[CTA_EXPECT_TIMEOUT]) {
3479 if (!timer_delete(&x->timeout))
3480 return -ETIME;
3481
3482 x->timeout.expires = jiffies +
3483 ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ;
3484 add_timer(&x->timeout);
3485 }
3486 return 0;
3487 }
3488
3489 #if IS_ENABLED(CONFIG_NF_NAT)
3490 static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
3491 [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 },
3492 [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED },
3493 };
3494 #endif
3495
3496 static int
ctnetlink_parse_expect_nat(const struct nlattr * attr,struct nf_conntrack_expect * exp,u_int8_t u3)3497 ctnetlink_parse_expect_nat(const struct nlattr *attr,
3498 struct nf_conntrack_expect *exp,
3499 u_int8_t u3)
3500 {
3501 #if IS_ENABLED(CONFIG_NF_NAT)
3502 struct nlattr *tb[CTA_EXPECT_NAT_MAX+1];
3503 struct nf_conntrack_tuple nat_tuple = {};
3504 int err;
3505
3506 err = nla_parse_nested_deprecated(tb, CTA_EXPECT_NAT_MAX, attr,
3507 exp_nat_nla_policy, NULL);
3508 if (err < 0)
3509 return err;
3510
3511 if (!tb[CTA_EXPECT_NAT_DIR] || !tb[CTA_EXPECT_NAT_TUPLE])
3512 return -EINVAL;
3513
3514 err = ctnetlink_parse_tuple((const struct nlattr * const *)tb,
3515 &nat_tuple, CTA_EXPECT_NAT_TUPLE,
3516 u3, NULL);
3517 if (err < 0)
3518 return err;
3519
3520 exp->saved_addr = nat_tuple.src.u3;
3521 exp->saved_proto = nat_tuple.src.u;
3522 exp->dir = ntohl(nla_get_be32(tb[CTA_EXPECT_NAT_DIR]));
3523
3524 return 0;
3525 #else
3526 return -EOPNOTSUPP;
3527 #endif
3528 }
3529
3530 static struct nf_conntrack_expect *
ctnetlink_alloc_expect(const struct nlattr * const cda[],struct nf_conn * ct,struct nf_conntrack_helper * helper,struct nf_conntrack_tuple * tuple,struct nf_conntrack_tuple * mask)3531 ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct,
3532 struct nf_conntrack_helper *helper,
3533 struct nf_conntrack_tuple *tuple,
3534 struct nf_conntrack_tuple *mask)
3535 {
3536 u_int32_t class = 0;
3537 struct nf_conntrack_expect *exp;
3538 struct nf_conn_help *help;
3539 int err;
3540
3541 help = nfct_help(ct);
3542 if (!help)
3543 return ERR_PTR(-EOPNOTSUPP);
3544
3545 if (cda[CTA_EXPECT_CLASS] && helper) {
3546 class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS]));
3547 if (class > helper->expect_class_max)
3548 return ERR_PTR(-EINVAL);
3549 }
3550 exp = nf_ct_expect_alloc(ct);
3551 if (!exp)
3552 return ERR_PTR(-ENOMEM);
3553
3554 if (cda[CTA_EXPECT_FLAGS]) {
3555 exp->flags = ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS]));
3556 exp->flags &= ~NF_CT_EXPECT_USERSPACE;
3557 } else {
3558 exp->flags = 0;
3559 }
3560 if (cda[CTA_EXPECT_FN]) {
3561 const char *name = nla_data(cda[CTA_EXPECT_FN]);
3562 struct nf_ct_helper_expectfn *expfn;
3563
3564 expfn = nf_ct_helper_expectfn_find_by_name(name);
3565 if (expfn == NULL) {
3566 err = -EINVAL;
3567 goto err_out;
3568 }
3569 exp->expectfn = expfn->expectfn;
3570 } else
3571 exp->expectfn = NULL;
3572
3573 exp->class = class;
3574 exp->master = ct;
3575 exp->helper = helper;
3576 exp->tuple = *tuple;
3577 exp->mask.src.u3 = mask->src.u3;
3578 exp->mask.src.u.all = mask->src.u.all;
3579
3580 if (cda[CTA_EXPECT_NAT]) {
3581 err = ctnetlink_parse_expect_nat(cda[CTA_EXPECT_NAT],
3582 exp, nf_ct_l3num(ct));
3583 if (err < 0)
3584 goto err_out;
3585 }
3586 return exp;
3587 err_out:
3588 nf_ct_expect_put(exp);
3589 return ERR_PTR(err);
3590 }
3591
3592 static int
ctnetlink_create_expect(struct net * net,const struct nf_conntrack_zone * zone,const struct nlattr * const cda[],u_int8_t u3,u32 portid,int report)3593 ctnetlink_create_expect(struct net *net,
3594 const struct nf_conntrack_zone *zone,
3595 const struct nlattr * const cda[],
3596 u_int8_t u3, u32 portid, int report)
3597 {
3598 struct nf_conntrack_tuple tuple, mask, master_tuple;
3599 struct nf_conntrack_tuple_hash *h = NULL;
3600 struct nf_conntrack_helper *helper = NULL;
3601 struct nf_conntrack_expect *exp;
3602 struct nf_conn *ct;
3603 int err;
3604
3605 /* caller guarantees that those three CTA_EXPECT_* exist */
3606 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
3607 u3, NULL);
3608 if (err < 0)
3609 return err;
3610 err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK,
3611 u3, NULL);
3612 if (err < 0)
3613 return err;
3614 err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER,
3615 u3, NULL);
3616 if (err < 0)
3617 return err;
3618
3619 /* Look for master conntrack of this expectation */
3620 h = nf_conntrack_find_get(net, zone, &master_tuple);
3621 if (!h)
3622 return -ENOENT;
3623 ct = nf_ct_tuplehash_to_ctrack(h);
3624
3625 rcu_read_lock();
3626 if (cda[CTA_EXPECT_HELP_NAME]) {
3627 const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
3628
3629 helper = __nf_conntrack_helper_find(helpname, u3,
3630 nf_ct_protonum(ct));
3631 if (helper == NULL) {
3632 rcu_read_unlock();
3633 #ifdef CONFIG_MODULES
3634 if (request_module("nfct-helper-%s", helpname) < 0) {
3635 err = -EOPNOTSUPP;
3636 goto err_ct;
3637 }
3638 rcu_read_lock();
3639 helper = __nf_conntrack_helper_find(helpname, u3,
3640 nf_ct_protonum(ct));
3641 if (helper) {
3642 err = -EAGAIN;
3643 goto err_rcu;
3644 }
3645 rcu_read_unlock();
3646 #endif
3647 err = -EOPNOTSUPP;
3648 goto err_ct;
3649 }
3650 }
3651
3652 exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask);
3653 if (IS_ERR(exp)) {
3654 err = PTR_ERR(exp);
3655 goto err_rcu;
3656 }
3657
3658 err = nf_ct_expect_related_report(exp, portid, report, 0);
3659 nf_ct_expect_put(exp);
3660 err_rcu:
3661 rcu_read_unlock();
3662 err_ct:
3663 nf_ct_put(ct);
3664 return err;
3665 }
3666
ctnetlink_new_expect(struct sk_buff * skb,const struct nfnl_info * info,const struct nlattr * const cda[])3667 static int ctnetlink_new_expect(struct sk_buff *skb,
3668 const struct nfnl_info *info,
3669 const struct nlattr * const cda[])
3670 {
3671 u_int8_t u3 = info->nfmsg->nfgen_family;
3672 struct nf_conntrack_tuple tuple;
3673 struct nf_conntrack_expect *exp;
3674 struct nf_conntrack_zone zone;
3675 int err;
3676
3677 if (!cda[CTA_EXPECT_TUPLE]
3678 || !cda[CTA_EXPECT_MASK]
3679 || !cda[CTA_EXPECT_MASTER])
3680 return -EINVAL;
3681
3682 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
3683 if (err < 0)
3684 return err;
3685
3686 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
3687 u3, NULL);
3688 if (err < 0)
3689 return err;
3690
3691 spin_lock_bh(&nf_conntrack_expect_lock);
3692 exp = __nf_ct_expect_find(info->net, &zone, &tuple);
3693 if (!exp) {
3694 spin_unlock_bh(&nf_conntrack_expect_lock);
3695 err = -ENOENT;
3696 if (info->nlh->nlmsg_flags & NLM_F_CREATE) {
3697 err = ctnetlink_create_expect(info->net, &zone, cda, u3,
3698 NETLINK_CB(skb).portid,
3699 nlmsg_report(info->nlh));
3700 }
3701 return err;
3702 }
3703
3704 err = -EEXIST;
3705 if (!(info->nlh->nlmsg_flags & NLM_F_EXCL))
3706 err = ctnetlink_change_expect(exp, cda);
3707 spin_unlock_bh(&nf_conntrack_expect_lock);
3708
3709 return err;
3710 }
3711
3712 static int
ctnetlink_exp_stat_fill_info(struct sk_buff * skb,u32 portid,u32 seq,int cpu,const struct ip_conntrack_stat * st)3713 ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int cpu,
3714 const struct ip_conntrack_stat *st)
3715 {
3716 struct nlmsghdr *nlh;
3717 unsigned int flags = portid ? NLM_F_MULTI : 0, event;
3718
3719 event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK,
3720 IPCTNL_MSG_EXP_GET_STATS_CPU);
3721 nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC,
3722 NFNETLINK_V0, htons(cpu));
3723 if (!nlh)
3724 goto nlmsg_failure;
3725
3726 if (nla_put_be32(skb, CTA_STATS_EXP_NEW, htonl(st->expect_new)) ||
3727 nla_put_be32(skb, CTA_STATS_EXP_CREATE, htonl(st->expect_create)) ||
3728 nla_put_be32(skb, CTA_STATS_EXP_DELETE, htonl(st->expect_delete)))
3729 goto nla_put_failure;
3730
3731 nlmsg_end(skb, nlh);
3732 return skb->len;
3733
3734 nla_put_failure:
3735 nlmsg_failure:
3736 nlmsg_cancel(skb, nlh);
3737 return -1;
3738 }
3739
3740 static int
ctnetlink_exp_stat_cpu_dump(struct sk_buff * skb,struct netlink_callback * cb)3741 ctnetlink_exp_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
3742 {
3743 int cpu;
3744 struct net *net = sock_net(skb->sk);
3745
3746 if (cb->args[0] == nr_cpu_ids)
3747 return 0;
3748
3749 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
3750 const struct ip_conntrack_stat *st;
3751
3752 if (!cpu_possible(cpu))
3753 continue;
3754
3755 st = per_cpu_ptr(net->ct.stat, cpu);
3756 if (ctnetlink_exp_stat_fill_info(skb, NETLINK_CB(cb->skb).portid,
3757 cb->nlh->nlmsg_seq,
3758 cpu, st) < 0)
3759 break;
3760 }
3761 cb->args[0] = cpu;
3762
3763 return skb->len;
3764 }
3765
ctnetlink_stat_exp_cpu(struct sk_buff * skb,const struct nfnl_info * info,const struct nlattr * const cda[])3766 static int ctnetlink_stat_exp_cpu(struct sk_buff *skb,
3767 const struct nfnl_info *info,
3768 const struct nlattr * const cda[])
3769 {
3770 if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
3771 struct netlink_dump_control c = {
3772 .dump = ctnetlink_exp_stat_cpu_dump,
3773 };
3774 return netlink_dump_start(info->sk, skb, info->nlh, &c);
3775 }
3776
3777 return 0;
3778 }
3779
3780 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3781 static struct nf_ct_event_notifier ctnl_notifier = {
3782 .ct_event = ctnetlink_conntrack_event,
3783 .exp_event = ctnetlink_expect_event,
3784 };
3785 #endif
3786
3787 static const struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = {
3788 [IPCTNL_MSG_CT_NEW] = {
3789 .call = ctnetlink_new_conntrack,
3790 .type = NFNL_CB_MUTEX,
3791 .attr_count = CTA_MAX,
3792 .policy = ct_nla_policy
3793 },
3794 [IPCTNL_MSG_CT_GET] = {
3795 .call = ctnetlink_get_conntrack,
3796 .type = NFNL_CB_MUTEX,
3797 .attr_count = CTA_MAX,
3798 .policy = ct_nla_policy
3799 },
3800 [IPCTNL_MSG_CT_DELETE] = {
3801 .call = ctnetlink_del_conntrack,
3802 .type = NFNL_CB_MUTEX,
3803 .attr_count = CTA_MAX,
3804 .policy = ct_nla_policy
3805 },
3806 [IPCTNL_MSG_CT_GET_CTRZERO] = {
3807 .call = ctnetlink_get_conntrack,
3808 .type = NFNL_CB_MUTEX,
3809 .attr_count = CTA_MAX,
3810 .policy = ct_nla_policy
3811 },
3812 [IPCTNL_MSG_CT_GET_STATS_CPU] = {
3813 .call = ctnetlink_stat_ct_cpu,
3814 .type = NFNL_CB_MUTEX,
3815 },
3816 [IPCTNL_MSG_CT_GET_STATS] = {
3817 .call = ctnetlink_stat_ct,
3818 .type = NFNL_CB_MUTEX,
3819 },
3820 [IPCTNL_MSG_CT_GET_DYING] = {
3821 .call = ctnetlink_get_ct_dying,
3822 .type = NFNL_CB_MUTEX,
3823 },
3824 [IPCTNL_MSG_CT_GET_UNCONFIRMED] = {
3825 .call = ctnetlink_get_ct_unconfirmed,
3826 .type = NFNL_CB_MUTEX,
3827 },
3828 };
3829
3830 static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = {
3831 [IPCTNL_MSG_EXP_GET] = {
3832 .call = ctnetlink_get_expect,
3833 .type = NFNL_CB_MUTEX,
3834 .attr_count = CTA_EXPECT_MAX,
3835 .policy = exp_nla_policy
3836 },
3837 [IPCTNL_MSG_EXP_NEW] = {
3838 .call = ctnetlink_new_expect,
3839 .type = NFNL_CB_MUTEX,
3840 .attr_count = CTA_EXPECT_MAX,
3841 .policy = exp_nla_policy
3842 },
3843 [IPCTNL_MSG_EXP_DELETE] = {
3844 .call = ctnetlink_del_expect,
3845 .type = NFNL_CB_MUTEX,
3846 .attr_count = CTA_EXPECT_MAX,
3847 .policy = exp_nla_policy
3848 },
3849 [IPCTNL_MSG_EXP_GET_STATS_CPU] = {
3850 .call = ctnetlink_stat_exp_cpu,
3851 .type = NFNL_CB_MUTEX,
3852 },
3853 };
3854
3855 static const struct nfnetlink_subsystem ctnl_subsys = {
3856 .name = "conntrack",
3857 .subsys_id = NFNL_SUBSYS_CTNETLINK,
3858 .cb_count = IPCTNL_MSG_MAX,
3859 .cb = ctnl_cb,
3860 };
3861
3862 static const struct nfnetlink_subsystem ctnl_exp_subsys = {
3863 .name = "conntrack_expect",
3864 .subsys_id = NFNL_SUBSYS_CTNETLINK_EXP,
3865 .cb_count = IPCTNL_MSG_EXP_MAX,
3866 .cb = ctnl_exp_cb,
3867 };
3868
3869 MODULE_ALIAS("ip_conntrack_netlink");
3870 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
3871 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
3872
ctnetlink_net_init(struct net * net)3873 static int __net_init ctnetlink_net_init(struct net *net)
3874 {
3875 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3876 nf_conntrack_register_notifier(net, &ctnl_notifier);
3877 #endif
3878 return 0;
3879 }
3880
ctnetlink_net_pre_exit(struct net * net)3881 static void ctnetlink_net_pre_exit(struct net *net)
3882 {
3883 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3884 nf_conntrack_unregister_notifier(net);
3885 #endif
3886 }
3887
3888 static struct pernet_operations ctnetlink_net_ops = {
3889 .init = ctnetlink_net_init,
3890 .pre_exit = ctnetlink_net_pre_exit,
3891 };
3892
ctnetlink_init(void)3893 static int __init ctnetlink_init(void)
3894 {
3895 int ret;
3896
3897 NL_ASSERT_CTX_FITS(struct ctnetlink_list_dump_ctx);
3898
3899 ret = nfnetlink_subsys_register(&ctnl_subsys);
3900 if (ret < 0) {
3901 pr_err("ctnetlink_init: cannot register with nfnetlink.\n");
3902 goto err_out;
3903 }
3904
3905 ret = nfnetlink_subsys_register(&ctnl_exp_subsys);
3906 if (ret < 0) {
3907 pr_err("ctnetlink_init: cannot register exp with nfnetlink.\n");
3908 goto err_unreg_subsys;
3909 }
3910
3911 ret = register_pernet_subsys(&ctnetlink_net_ops);
3912 if (ret < 0) {
3913 pr_err("ctnetlink_init: cannot register pernet operations\n");
3914 goto err_unreg_exp_subsys;
3915 }
3916 #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
3917 /* setup interaction between nf_queue and nf_conntrack_netlink. */
3918 RCU_INIT_POINTER(nfnl_ct_hook, &ctnetlink_glue_hook);
3919 #endif
3920 return 0;
3921
3922 err_unreg_exp_subsys:
3923 nfnetlink_subsys_unregister(&ctnl_exp_subsys);
3924 err_unreg_subsys:
3925 nfnetlink_subsys_unregister(&ctnl_subsys);
3926 err_out:
3927 return ret;
3928 }
3929
ctnetlink_exit(void)3930 static void __exit ctnetlink_exit(void)
3931 {
3932 unregister_pernet_subsys(&ctnetlink_net_ops);
3933 nfnetlink_subsys_unregister(&ctnl_exp_subsys);
3934 nfnetlink_subsys_unregister(&ctnl_subsys);
3935 #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
3936 RCU_INIT_POINTER(nfnl_ct_hook, NULL);
3937 #endif
3938 synchronize_rcu();
3939 }
3940
3941 module_init(ctnetlink_init);
3942 module_exit(ctnetlink_exit);
3943