1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * x_tables core - Backend for {ip,ip6,arp}_tables
4 *
5 * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
6 * Copyright (C) 2006-2012 Patrick McHardy <kaber@trash.net>
7 *
8 * Based on existing ip_tables code which is
9 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
10 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
11 */
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/socket.h>
16 #include <linux/net.h>
17 #include <linux/proc_fs.h>
18 #include <linux/seq_file.h>
19 #include <linux/string.h>
20 #include <linux/vmalloc.h>
21 #include <linux/mutex.h>
22 #include <linux/mm.h>
23 #include <linux/slab.h>
24 #include <linux/audit.h>
25 #include <linux/user_namespace.h>
26 #include <net/net_namespace.h>
27 #include <net/netns/generic.h>
28
29 #include <linux/netfilter/x_tables.h>
30 #include <linux/netfilter_arp.h>
31 #include <linux/netfilter_ipv4/ip_tables.h>
32 #include <linux/netfilter_ipv6/ip6_tables.h>
33 #include <linux/netfilter_arp/arp_tables.h>
34
35 MODULE_LICENSE("GPL");
36 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
37 MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
38
39 #define XT_PCPU_BLOCK_SIZE 4096
40 #define XT_MAX_TABLE_SIZE (512 * 1024 * 1024)
41
42 struct xt_template {
43 struct list_head list;
44
45 /* called when table is needed in the given netns */
46 int (*table_init)(struct net *net);
47
48 struct module *me;
49
50 /* A unique name... */
51 char name[XT_TABLE_MAXNAMELEN];
52 };
53
54 static struct list_head xt_templates[NFPROTO_NUMPROTO];
55
56 struct xt_pernet {
57 struct list_head tables[NFPROTO_NUMPROTO];
58 };
59
60 struct compat_delta {
61 unsigned int offset; /* offset in kernel */
62 int delta; /* delta in 32bit user land */
63 };
64
65 struct xt_af {
66 struct mutex mutex;
67 struct list_head match;
68 struct list_head target;
69 #ifdef CONFIG_NETFILTER_XTABLES_COMPAT
70 struct mutex compat_mutex;
71 struct compat_delta *compat_tab;
72 unsigned int number; /* number of slots in compat_tab[] */
73 unsigned int cur; /* number of used slots in compat_tab[] */
74 #endif
75 };
76
77 static unsigned int xt_pernet_id __read_mostly;
78 static struct xt_af *xt __read_mostly;
79
80 static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
81 [NFPROTO_UNSPEC] = "x",
82 [NFPROTO_IPV4] = "ip",
83 [NFPROTO_ARP] = "arp",
84 [NFPROTO_BRIDGE] = "eb",
85 [NFPROTO_IPV6] = "ip6",
86 };
87
88 /* Registration hooks for targets. */
xt_register_target(struct xt_target * target)89 int xt_register_target(struct xt_target *target)
90 {
91 u_int8_t af = target->family;
92
93 mutex_lock(&xt[af].mutex);
94 list_add(&target->list, &xt[af].target);
95 mutex_unlock(&xt[af].mutex);
96 return 0;
97 }
98 EXPORT_SYMBOL(xt_register_target);
99
100 void
xt_unregister_target(struct xt_target * target)101 xt_unregister_target(struct xt_target *target)
102 {
103 u_int8_t af = target->family;
104
105 mutex_lock(&xt[af].mutex);
106 list_del(&target->list);
107 mutex_unlock(&xt[af].mutex);
108 }
109 EXPORT_SYMBOL(xt_unregister_target);
110
111 int
xt_register_targets(struct xt_target * target,unsigned int n)112 xt_register_targets(struct xt_target *target, unsigned int n)
113 {
114 unsigned int i;
115 int err = 0;
116
117 for (i = 0; i < n; i++) {
118 err = xt_register_target(&target[i]);
119 if (err)
120 goto err;
121 }
122 return err;
123
124 err:
125 if (i > 0)
126 xt_unregister_targets(target, i);
127 return err;
128 }
129 EXPORT_SYMBOL(xt_register_targets);
130
131 void
xt_unregister_targets(struct xt_target * target,unsigned int n)132 xt_unregister_targets(struct xt_target *target, unsigned int n)
133 {
134 while (n-- > 0)
135 xt_unregister_target(&target[n]);
136 }
137 EXPORT_SYMBOL(xt_unregister_targets);
138
xt_register_match(struct xt_match * match)139 int xt_register_match(struct xt_match *match)
140 {
141 u_int8_t af = match->family;
142
143 mutex_lock(&xt[af].mutex);
144 list_add(&match->list, &xt[af].match);
145 mutex_unlock(&xt[af].mutex);
146 return 0;
147 }
148 EXPORT_SYMBOL(xt_register_match);
149
150 void
xt_unregister_match(struct xt_match * match)151 xt_unregister_match(struct xt_match *match)
152 {
153 u_int8_t af = match->family;
154
155 mutex_lock(&xt[af].mutex);
156 list_del(&match->list);
157 mutex_unlock(&xt[af].mutex);
158 }
159 EXPORT_SYMBOL(xt_unregister_match);
160
161 int
xt_register_matches(struct xt_match * match,unsigned int n)162 xt_register_matches(struct xt_match *match, unsigned int n)
163 {
164 unsigned int i;
165 int err = 0;
166
167 for (i = 0; i < n; i++) {
168 err = xt_register_match(&match[i]);
169 if (err)
170 goto err;
171 }
172 return err;
173
174 err:
175 if (i > 0)
176 xt_unregister_matches(match, i);
177 return err;
178 }
179 EXPORT_SYMBOL(xt_register_matches);
180
181 void
xt_unregister_matches(struct xt_match * match,unsigned int n)182 xt_unregister_matches(struct xt_match *match, unsigned int n)
183 {
184 while (n-- > 0)
185 xt_unregister_match(&match[n]);
186 }
187 EXPORT_SYMBOL(xt_unregister_matches);
188
189
190 /*
191 * These are weird, but module loading must not be done with mutex
192 * held (since they will register), and we have to have a single
193 * function to use.
194 */
195
196 /* Find match, grabs ref. Returns ERR_PTR() on error. */
xt_find_match(u8 af,const char * name,u8 revision)197 struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
198 {
199 struct xt_match *m;
200 int err = -ENOENT;
201
202 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
203 return ERR_PTR(-EINVAL);
204
205 mutex_lock(&xt[af].mutex);
206 list_for_each_entry(m, &xt[af].match, list) {
207 if (strcmp(m->name, name) == 0) {
208 if (m->revision == revision) {
209 if (try_module_get(m->me)) {
210 mutex_unlock(&xt[af].mutex);
211 return m;
212 }
213 } else
214 err = -EPROTOTYPE; /* Found something. */
215 }
216 }
217 mutex_unlock(&xt[af].mutex);
218
219 if (af != NFPROTO_UNSPEC)
220 /* Try searching again in the family-independent list */
221 return xt_find_match(NFPROTO_UNSPEC, name, revision);
222
223 return ERR_PTR(err);
224 }
225 EXPORT_SYMBOL(xt_find_match);
226
227 struct xt_match *
xt_request_find_match(uint8_t nfproto,const char * name,uint8_t revision)228 xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
229 {
230 struct xt_match *match;
231
232 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
233 return ERR_PTR(-EINVAL);
234
235 match = xt_find_match(nfproto, name, revision);
236 if (IS_ERR(match)) {
237 request_module("%st_%s", xt_prefix[nfproto], name);
238 match = xt_find_match(nfproto, name, revision);
239 }
240
241 return match;
242 }
243 EXPORT_SYMBOL_GPL(xt_request_find_match);
244
245 /* Find target, grabs ref. Returns ERR_PTR() on error. */
xt_find_target(u8 af,const char * name,u8 revision)246 static struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
247 {
248 struct xt_target *t;
249 int err = -ENOENT;
250
251 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
252 return ERR_PTR(-EINVAL);
253
254 mutex_lock(&xt[af].mutex);
255 list_for_each_entry(t, &xt[af].target, list) {
256 if (strcmp(t->name, name) == 0) {
257 if (t->revision == revision) {
258 if (try_module_get(t->me)) {
259 mutex_unlock(&xt[af].mutex);
260 return t;
261 }
262 } else
263 err = -EPROTOTYPE; /* Found something. */
264 }
265 }
266 mutex_unlock(&xt[af].mutex);
267
268 if (af != NFPROTO_UNSPEC)
269 /* Try searching again in the family-independent list */
270 return xt_find_target(NFPROTO_UNSPEC, name, revision);
271
272 return ERR_PTR(err);
273 }
274
xt_request_find_target(u8 af,const char * name,u8 revision)275 struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
276 {
277 struct xt_target *target;
278
279 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
280 return ERR_PTR(-EINVAL);
281
282 target = xt_find_target(af, name, revision);
283 if (IS_ERR(target)) {
284 request_module("%st_%s", xt_prefix[af], name);
285 target = xt_find_target(af, name, revision);
286 }
287
288 return target;
289 }
290 EXPORT_SYMBOL_GPL(xt_request_find_target);
291
292
xt_obj_to_user(u16 __user * psize,u16 size,void __user * pname,const char * name,u8 __user * prev,u8 rev)293 static int xt_obj_to_user(u16 __user *psize, u16 size,
294 void __user *pname, const char *name,
295 u8 __user *prev, u8 rev)
296 {
297 if (put_user(size, psize))
298 return -EFAULT;
299 if (copy_to_user(pname, name, strlen(name) + 1))
300 return -EFAULT;
301 if (put_user(rev, prev))
302 return -EFAULT;
303
304 return 0;
305 }
306
307 #define XT_OBJ_TO_USER(U, K, TYPE, C_SIZE) \
308 xt_obj_to_user(&U->u.TYPE##_size, C_SIZE ? : K->u.TYPE##_size, \
309 U->u.user.name, K->u.kernel.TYPE->name, \
310 &U->u.user.revision, K->u.kernel.TYPE->revision)
311
xt_data_to_user(void __user * dst,const void * src,int usersize,int size,int aligned_size)312 int xt_data_to_user(void __user *dst, const void *src,
313 int usersize, int size, int aligned_size)
314 {
315 usersize = usersize ? : size;
316 if (copy_to_user(dst, src, usersize))
317 return -EFAULT;
318 if (usersize != aligned_size &&
319 clear_user(dst + usersize, aligned_size - usersize))
320 return -EFAULT;
321
322 return 0;
323 }
324 EXPORT_SYMBOL_GPL(xt_data_to_user);
325
326 #define XT_DATA_TO_USER(U, K, TYPE) \
327 xt_data_to_user(U->data, K->data, \
328 K->u.kernel.TYPE->usersize, \
329 K->u.kernel.TYPE->TYPE##size, \
330 XT_ALIGN(K->u.kernel.TYPE->TYPE##size))
331
xt_match_to_user(const struct xt_entry_match * m,struct xt_entry_match __user * u)332 int xt_match_to_user(const struct xt_entry_match *m,
333 struct xt_entry_match __user *u)
334 {
335 return XT_OBJ_TO_USER(u, m, match, 0) ||
336 XT_DATA_TO_USER(u, m, match);
337 }
338 EXPORT_SYMBOL_GPL(xt_match_to_user);
339
xt_target_to_user(const struct xt_entry_target * t,struct xt_entry_target __user * u)340 int xt_target_to_user(const struct xt_entry_target *t,
341 struct xt_entry_target __user *u)
342 {
343 return XT_OBJ_TO_USER(u, t, target, 0) ||
344 XT_DATA_TO_USER(u, t, target);
345 }
346 EXPORT_SYMBOL_GPL(xt_target_to_user);
347
match_revfn(u8 af,const char * name,u8 revision,int * bestp)348 static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
349 {
350 const struct xt_match *m;
351 int have_rev = 0;
352
353 mutex_lock(&xt[af].mutex);
354 list_for_each_entry(m, &xt[af].match, list) {
355 if (strcmp(m->name, name) == 0) {
356 if (m->revision > *bestp)
357 *bestp = m->revision;
358 if (m->revision == revision)
359 have_rev = 1;
360 }
361 }
362 mutex_unlock(&xt[af].mutex);
363
364 if (af != NFPROTO_UNSPEC && !have_rev)
365 return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
366
367 return have_rev;
368 }
369
target_revfn(u8 af,const char * name,u8 revision,int * bestp)370 static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
371 {
372 const struct xt_target *t;
373 int have_rev = 0;
374
375 mutex_lock(&xt[af].mutex);
376 list_for_each_entry(t, &xt[af].target, list) {
377 if (strcmp(t->name, name) == 0) {
378 if (t->revision > *bestp)
379 *bestp = t->revision;
380 if (t->revision == revision)
381 have_rev = 1;
382 }
383 }
384 mutex_unlock(&xt[af].mutex);
385
386 if (af != NFPROTO_UNSPEC && !have_rev)
387 return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
388
389 return have_rev;
390 }
391
392 /* Returns true or false (if no such extension at all) */
xt_find_revision(u8 af,const char * name,u8 revision,int target,int * err)393 int xt_find_revision(u8 af, const char *name, u8 revision, int target,
394 int *err)
395 {
396 int have_rev, best = -1;
397
398 if (target == 1)
399 have_rev = target_revfn(af, name, revision, &best);
400 else
401 have_rev = match_revfn(af, name, revision, &best);
402
403 /* Nothing at all? Return 0 to try loading module. */
404 if (best == -1) {
405 *err = -ENOENT;
406 return 0;
407 }
408
409 *err = best;
410 if (!have_rev)
411 *err = -EPROTONOSUPPORT;
412 return 1;
413 }
414 EXPORT_SYMBOL_GPL(xt_find_revision);
415
416 static char *
textify_hooks(char * buf,size_t size,unsigned int mask,uint8_t nfproto)417 textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
418 {
419 static const char *const inetbr_names[] = {
420 "PREROUTING", "INPUT", "FORWARD",
421 "OUTPUT", "POSTROUTING", "BROUTING",
422 };
423 static const char *const arp_names[] = {
424 "INPUT", "FORWARD", "OUTPUT",
425 };
426 const char *const *names;
427 unsigned int i, max;
428 char *p = buf;
429 bool np = false;
430 int res;
431
432 names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names;
433 max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) :
434 ARRAY_SIZE(inetbr_names);
435 *p = '\0';
436 for (i = 0; i < max; ++i) {
437 if (!(mask & (1 << i)))
438 continue;
439 res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
440 if (res > 0) {
441 size -= res;
442 p += res;
443 }
444 np = true;
445 }
446
447 return buf;
448 }
449
450 /**
451 * xt_check_proc_name - check that name is suitable for /proc file creation
452 *
453 * @name: file name candidate
454 * @size: length of buffer
455 *
456 * some x_tables modules wish to create a file in /proc.
457 * This function makes sure that the name is suitable for this
458 * purpose, it checks that name is NUL terminated and isn't a 'special'
459 * name, like "..".
460 *
461 * returns negative number on error or 0 if name is useable.
462 */
xt_check_proc_name(const char * name,unsigned int size)463 int xt_check_proc_name(const char *name, unsigned int size)
464 {
465 if (name[0] == '\0')
466 return -EINVAL;
467
468 if (strnlen(name, size) == size)
469 return -ENAMETOOLONG;
470
471 if (strcmp(name, ".") == 0 ||
472 strcmp(name, "..") == 0 ||
473 strchr(name, '/'))
474 return -EINVAL;
475
476 return 0;
477 }
478 EXPORT_SYMBOL(xt_check_proc_name);
479
xt_check_match_common(struct xt_mtchk_param * par,unsigned int size,u16 proto,bool inv_proto)480 static int xt_check_match_common(struct xt_mtchk_param *par,
481 unsigned int size, u16 proto, bool inv_proto)
482 {
483 if (XT_ALIGN(par->match->matchsize) != size &&
484 par->match->matchsize != -1) {
485 /*
486 * ebt_among is exempt from centralized matchsize checking
487 * because it uses a dynamic-size data set.
488 */
489 pr_err_ratelimited("%s_tables: %s.%u match: invalid size %u (kernel) != (user) %u\n",
490 xt_prefix[par->family], par->match->name,
491 par->match->revision,
492 XT_ALIGN(par->match->matchsize), size);
493 return -EINVAL;
494 }
495 if (par->match->table != NULL &&
496 strcmp(par->match->table, par->table) != 0) {
497 pr_info_ratelimited("%s_tables: %s match: only valid in %s table, not %s\n",
498 xt_prefix[par->family], par->match->name,
499 par->match->table, par->table);
500 return -EINVAL;
501 }
502
503 /* NFPROTO_UNSPEC implies NF_INET_* hooks which do not overlap with
504 * NF_ARP_IN,OUT,FORWARD, allow explicit extensions with NFPROTO_ARP
505 * support.
506 */
507 if (par->family == NFPROTO_ARP &&
508 par->match->family != NFPROTO_ARP) {
509 pr_info_ratelimited("%s_tables: %s match: not valid for this family\n",
510 xt_prefix[par->family], par->match->name);
511 return -EINVAL;
512 }
513 if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
514 char used[64], allow[64];
515
516 pr_info_ratelimited("%s_tables: %s match: used from hooks %s, but only valid from %s\n",
517 xt_prefix[par->family], par->match->name,
518 textify_hooks(used, sizeof(used),
519 par->hook_mask, par->family),
520 textify_hooks(allow, sizeof(allow),
521 par->match->hooks,
522 par->family));
523 return -EINVAL;
524 }
525 if (par->match->proto && (par->match->proto != proto || inv_proto)) {
526 pr_info_ratelimited("%s_tables: %s match: only valid for protocol %u\n",
527 xt_prefix[par->family], par->match->name,
528 par->match->proto);
529 return -EINVAL;
530 }
531
532 return 0;
533 }
534
xt_checkentry_match(struct xt_mtchk_param * par)535 static int xt_checkentry_match(struct xt_mtchk_param *par)
536 {
537 int ret;
538
539 if (par->match->checkentry != NULL) {
540 ret = par->match->checkentry(par);
541 if (ret < 0)
542 return ret;
543 else if (ret > 0)
544 /* Flag up potential errors. */
545 return -EIO;
546 }
547
548 return 0;
549 }
550
xt_check_hooks_match(struct xt_mtchk_param * par)551 int xt_check_hooks_match(struct xt_mtchk_param *par)
552 {
553 if (par->match->check_hooks != NULL)
554 return par->match->check_hooks(par);
555
556 return 0;
557 }
558 EXPORT_SYMBOL_GPL(xt_check_hooks_match);
559
xt_check_match(struct xt_mtchk_param * par,unsigned int size,u16 proto,bool inv_proto)560 int xt_check_match(struct xt_mtchk_param *par,
561 unsigned int size, u16 proto, bool inv_proto)
562 {
563 int ret;
564
565 ret = xt_check_match_common(par, size, proto, inv_proto);
566 if (ret < 0)
567 return ret;
568
569 ret = xt_check_hooks_match(par);
570 if (ret < 0)
571 return ret;
572
573 return xt_checkentry_match(par);
574 }
575 EXPORT_SYMBOL_GPL(xt_check_match);
576
577 /** xt_check_entry_match - check that matches end before start of target
578 *
579 * @match: beginning of xt_entry_match
580 * @target: beginning of this rules target (alleged end of matches)
581 * @alignment: alignment requirement of match structures
582 *
583 * Validates that all matches add up to the beginning of the target,
584 * and that each match covers at least the base structure size.
585 *
586 * Return: 0 on success, negative errno on failure.
587 */
xt_check_entry_match(const char * match,const char * target,const size_t alignment)588 static int xt_check_entry_match(const char *match, const char *target,
589 const size_t alignment)
590 {
591 const struct xt_entry_match *pos;
592 int length = target - match;
593
594 if (length == 0) /* no matches */
595 return 0;
596
597 pos = (struct xt_entry_match *)match;
598 do {
599 if ((unsigned long)pos % alignment)
600 return -EINVAL;
601
602 if (length < (int)sizeof(struct xt_entry_match))
603 return -EINVAL;
604
605 if (pos->u.match_size < sizeof(struct xt_entry_match))
606 return -EINVAL;
607
608 if (pos->u.match_size > length)
609 return -EINVAL;
610
611 length -= pos->u.match_size;
612 pos = ((void *)((char *)(pos) + (pos)->u.match_size));
613 } while (length > 0);
614
615 return 0;
616 }
617
618 /** xt_check_table_hooks - check hook entry points are sane
619 *
620 * @info xt_table_info to check
621 * @valid_hooks - hook entry points that we can enter from
622 *
623 * Validates that the hook entry and underflows points are set up.
624 *
625 * Return: 0 on success, negative errno on failure.
626 */
xt_check_table_hooks(const struct xt_table_info * info,unsigned int valid_hooks)627 int xt_check_table_hooks(const struct xt_table_info *info, unsigned int valid_hooks)
628 {
629 const char *err = "unsorted underflow";
630 unsigned int i, max_uflow, max_entry;
631 bool check_hooks = false;
632
633 BUILD_BUG_ON(ARRAY_SIZE(info->hook_entry) != ARRAY_SIZE(info->underflow));
634
635 max_entry = 0;
636 max_uflow = 0;
637
638 for (i = 0; i < ARRAY_SIZE(info->hook_entry); i++) {
639 if (!(valid_hooks & (1 << i)))
640 continue;
641
642 if (info->hook_entry[i] == 0xFFFFFFFF)
643 return -EINVAL;
644 if (info->underflow[i] == 0xFFFFFFFF)
645 return -EINVAL;
646
647 if (check_hooks) {
648 if (max_uflow > info->underflow[i])
649 goto error;
650
651 if (max_uflow == info->underflow[i]) {
652 err = "duplicate underflow";
653 goto error;
654 }
655 if (max_entry > info->hook_entry[i]) {
656 err = "unsorted entry";
657 goto error;
658 }
659 if (max_entry == info->hook_entry[i]) {
660 err = "duplicate entry";
661 goto error;
662 }
663 }
664 max_entry = info->hook_entry[i];
665 max_uflow = info->underflow[i];
666 check_hooks = true;
667 }
668
669 return 0;
670 error:
671 pr_err_ratelimited("%s at hook %d\n", err, i);
672 return -EINVAL;
673 }
674 EXPORT_SYMBOL(xt_check_table_hooks);
675
verdict_ok(int verdict)676 static bool verdict_ok(int verdict)
677 {
678 if (verdict > 0)
679 return true;
680
681 if (verdict < 0) {
682 int v = -verdict - 1;
683
684 if (verdict == XT_RETURN)
685 return true;
686
687 switch (v) {
688 case NF_ACCEPT: return true;
689 case NF_DROP: return true;
690 case NF_QUEUE: return true;
691 default:
692 break;
693 }
694
695 return false;
696 }
697
698 return false;
699 }
700
error_tg_ok(unsigned int usersize,unsigned int kernsize,const char * msg,unsigned int msglen)701 static bool error_tg_ok(unsigned int usersize, unsigned int kernsize,
702 const char *msg, unsigned int msglen)
703 {
704 return usersize == kernsize && strnlen(msg, msglen) < msglen;
705 }
706
707 #ifdef CONFIG_NETFILTER_XTABLES_COMPAT
xt_compat_add_offset(u_int8_t af,unsigned int offset,int delta)708 int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
709 {
710 struct xt_af *xp = &xt[af];
711
712 WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
713
714 if (WARN_ON(!xp->compat_tab))
715 return -ENOMEM;
716
717 if (xp->cur >= xp->number)
718 return -EINVAL;
719
720 if (xp->cur)
721 delta += xp->compat_tab[xp->cur - 1].delta;
722 xp->compat_tab[xp->cur].offset = offset;
723 xp->compat_tab[xp->cur].delta = delta;
724 xp->cur++;
725 return 0;
726 }
727 EXPORT_SYMBOL_GPL(xt_compat_add_offset);
728
xt_compat_flush_offsets(u_int8_t af)729 void xt_compat_flush_offsets(u_int8_t af)
730 {
731 WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
732
733 if (xt[af].compat_tab) {
734 vfree(xt[af].compat_tab);
735 xt[af].compat_tab = NULL;
736 xt[af].number = 0;
737 xt[af].cur = 0;
738 }
739 }
740 EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
741
xt_compat_calc_jump(u_int8_t af,unsigned int offset)742 int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
743 {
744 struct compat_delta *tmp = xt[af].compat_tab;
745 int mid, left = 0, right = xt[af].cur - 1;
746
747 while (left <= right) {
748 mid = (left + right) >> 1;
749 if (offset > tmp[mid].offset)
750 left = mid + 1;
751 else if (offset < tmp[mid].offset)
752 right = mid - 1;
753 else
754 return mid ? tmp[mid - 1].delta : 0;
755 }
756 return left ? tmp[left - 1].delta : 0;
757 }
758 EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
759
xt_compat_init_offsets(u8 af,unsigned int number)760 int xt_compat_init_offsets(u8 af, unsigned int number)
761 {
762 size_t mem;
763
764 WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
765
766 if (!number || number > (INT_MAX / sizeof(struct compat_delta)))
767 return -EINVAL;
768
769 if (WARN_ON(xt[af].compat_tab))
770 return -EINVAL;
771
772 mem = sizeof(struct compat_delta) * number;
773 if (mem > XT_MAX_TABLE_SIZE)
774 return -ENOMEM;
775
776 xt[af].compat_tab = vmalloc(mem);
777 if (!xt[af].compat_tab)
778 return -ENOMEM;
779
780 xt[af].number = number;
781 xt[af].cur = 0;
782
783 return 0;
784 }
785 EXPORT_SYMBOL(xt_compat_init_offsets);
786
xt_compat_match_offset(const struct xt_match * match)787 int xt_compat_match_offset(const struct xt_match *match)
788 {
789 u_int16_t csize = match->compatsize ? : match->matchsize;
790 return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
791 }
792 EXPORT_SYMBOL_GPL(xt_compat_match_offset);
793
xt_compat_match_from_user(struct xt_entry_match * m,void ** dstptr,unsigned int * size)794 void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
795 unsigned int *size)
796 {
797 const struct xt_match *match = m->u.kernel.match;
798 struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
799 int off = xt_compat_match_offset(match);
800 u_int16_t msize = cm->u.user.match_size;
801 char name[sizeof(m->u.user.name)];
802
803 m = *dstptr;
804 memcpy(m, cm, sizeof(*cm));
805 if (match->compat_from_user)
806 match->compat_from_user(m->data, cm->data);
807 else
808 memcpy(m->data, cm->data, msize - sizeof(*cm));
809
810 msize += off;
811 m->u.user.match_size = msize;
812 strscpy(name, match->name, sizeof(name));
813 module_put(match->me);
814 strscpy_pad(m->u.user.name, name, sizeof(m->u.user.name));
815
816 *size += off;
817 *dstptr += msize;
818 }
819 EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
820
821 #define COMPAT_XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \
822 xt_data_to_user(U->data, K->data, \
823 K->u.kernel.TYPE->usersize, \
824 C_SIZE, \
825 COMPAT_XT_ALIGN(C_SIZE))
826
xt_compat_match_to_user(const struct xt_entry_match * m,void __user ** dstptr,unsigned int * size)827 int xt_compat_match_to_user(const struct xt_entry_match *m,
828 void __user **dstptr, unsigned int *size)
829 {
830 const struct xt_match *match = m->u.kernel.match;
831 struct compat_xt_entry_match __user *cm = *dstptr;
832 int off = xt_compat_match_offset(match);
833 u_int16_t msize = m->u.user.match_size - off;
834
835 if (XT_OBJ_TO_USER(cm, m, match, msize))
836 return -EFAULT;
837
838 if (match->compat_to_user) {
839 if (match->compat_to_user((void __user *)cm->data, m->data))
840 return -EFAULT;
841 } else {
842 if (COMPAT_XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm)))
843 return -EFAULT;
844 }
845
846 *size -= off;
847 *dstptr += msize;
848 return 0;
849 }
850 EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
851
852 /* non-compat version may have padding after verdict */
853 struct compat_xt_standard_target {
854 /* Must be last as it ends in a flexible-array member. */
855 TRAILING_OVERLAP(struct compat_xt_entry_target, t, data,
856 compat_uint_t verdict;
857 );
858 };
859
860 struct compat_xt_error_target {
861 /* Must be last as it ends in a flexible-array member. */
862 TRAILING_OVERLAP(struct compat_xt_entry_target, t, data,
863 char errorname[XT_FUNCTION_MAXNAMELEN];
864 );
865 };
866
xt_compat_check_entry_offsets(const void * base,const char * elems,unsigned int target_offset,unsigned int next_offset)867 int xt_compat_check_entry_offsets(const void *base, const char *elems,
868 unsigned int target_offset,
869 unsigned int next_offset)
870 {
871 long size_of_base_struct = elems - (const char *)base;
872 const struct compat_xt_entry_target *t;
873 const char *e = base;
874
875 if (target_offset < size_of_base_struct)
876 return -EINVAL;
877
878 if (target_offset + sizeof(*t) > next_offset)
879 return -EINVAL;
880
881 t = (void *)(e + target_offset);
882 if (t->u.target_size < sizeof(*t))
883 return -EINVAL;
884
885 if (target_offset + t->u.target_size > next_offset)
886 return -EINVAL;
887
888 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
889 const struct compat_xt_standard_target *st = (const void *)t;
890
891 if (COMPAT_XT_ALIGN(target_offset + sizeof(*st)) != next_offset)
892 return -EINVAL;
893
894 if (!verdict_ok(st->verdict))
895 return -EINVAL;
896 } else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
897 const struct compat_xt_error_target *et = (const void *)t;
898
899 if (!error_tg_ok(t->u.target_size, sizeof(*et),
900 et->errorname, sizeof(et->errorname)))
901 return -EINVAL;
902 }
903
904 /* compat_xt_entry match has less strict alignment requirements,
905 * otherwise they are identical. In case of padding differences
906 * we need to add compat version of xt_check_entry_match.
907 */
908 BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
909
910 return xt_check_entry_match(elems, base + target_offset,
911 __alignof__(struct compat_xt_entry_match));
912 }
913 EXPORT_SYMBOL(xt_compat_check_entry_offsets);
914 #endif /* CONFIG_NETFILTER_XTABLES_COMPAT */
915
916 /**
917 * xt_check_entry_offsets - validate arp/ip/ip6t_entry
918 *
919 * @base: pointer to arp/ip/ip6t_entry
920 * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems
921 * @target_offset: the arp/ip/ip6_t->target_offset
922 * @next_offset: the arp/ip/ip6_t->next_offset
923 *
924 * validates that target_offset and next_offset are sane and that all
925 * match sizes (if any) align with the target offset.
926 *
927 * This function does not validate the targets or matches themselves, it
928 * only tests that all the offsets and sizes are correct, that all
929 * match structures are aligned, and that the last structure ends where
930 * the target structure begins.
931 *
932 * Also see xt_compat_check_entry_offsets for CONFIG_NETFILTER_XTABLES_COMPAT version.
933 *
934 * The arp/ip/ip6t_entry structure @base must have passed following tests:
935 * - it must point to a valid memory location
936 * - base to base + next_offset must be accessible, i.e. not exceed allocated
937 * length.
938 *
939 * A well-formed entry looks like this:
940 *
941 * ip(6)t_entry match [mtdata] match [mtdata] target [tgdata] ip(6)t_entry
942 * e->elems[]-----' | |
943 * matchsize | |
944 * matchsize | |
945 * | |
946 * target_offset---------------------------------' |
947 * next_offset---------------------------------------------------'
948 *
949 * elems[]: flexible array member at end of ip(6)/arpt_entry struct.
950 * This is where matches (if any) and the target reside.
951 * target_offset: beginning of target.
952 * next_offset: start of the next rule; also: size of this rule.
953 * Since targets have a minimum size, target_offset + minlen <= next_offset.
954 *
955 * Every match stores its size, sum of sizes must not exceed target_offset.
956 *
957 * Return: 0 on success, negative errno on failure.
958 */
xt_check_entry_offsets(const void * base,const char * elems,unsigned int target_offset,unsigned int next_offset)959 int xt_check_entry_offsets(const void *base,
960 const char *elems,
961 unsigned int target_offset,
962 unsigned int next_offset)
963 {
964 long size_of_base_struct = elems - (const char *)base;
965 const struct xt_entry_target *t;
966 const char *e = base;
967
968 /* target start is within the ip/ip6/arpt_entry struct */
969 if (target_offset < size_of_base_struct)
970 return -EINVAL;
971
972 if (target_offset + sizeof(*t) > next_offset)
973 return -EINVAL;
974
975 t = (void *)(e + target_offset);
976 if (t->u.target_size < sizeof(*t))
977 return -EINVAL;
978
979 if (target_offset + t->u.target_size > next_offset)
980 return -EINVAL;
981
982 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
983 const struct xt_standard_target *st = (const void *)t;
984
985 if (XT_ALIGN(target_offset + sizeof(*st)) != next_offset)
986 return -EINVAL;
987
988 if (!verdict_ok(st->verdict))
989 return -EINVAL;
990 } else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
991 const struct xt_error_target *et = (const void *)t;
992
993 if (!error_tg_ok(t->u.target_size, sizeof(*et),
994 et->errorname, sizeof(et->errorname)))
995 return -EINVAL;
996 }
997
998 return xt_check_entry_match(elems, base + target_offset,
999 __alignof__(struct xt_entry_match));
1000 }
1001 EXPORT_SYMBOL(xt_check_entry_offsets);
1002
1003 /**
1004 * xt_alloc_entry_offsets - allocate array to store rule head offsets
1005 *
1006 * @size: number of entries
1007 *
1008 * Return: NULL or zeroed kmalloc'd or vmalloc'd array
1009 */
xt_alloc_entry_offsets(unsigned int size)1010 unsigned int *xt_alloc_entry_offsets(unsigned int size)
1011 {
1012 if (size > XT_MAX_TABLE_SIZE / sizeof(unsigned int))
1013 return NULL;
1014
1015 return kvcalloc(size, sizeof(unsigned int), GFP_KERNEL);
1016
1017 }
1018 EXPORT_SYMBOL(xt_alloc_entry_offsets);
1019
1020 /**
1021 * xt_find_jump_offset - check if target is a valid jump offset
1022 *
1023 * @offsets: array containing all valid rule start offsets of a rule blob
1024 * @target: the jump target to search for
1025 * @size: entries in @offset
1026 */
xt_find_jump_offset(const unsigned int * offsets,unsigned int target,unsigned int size)1027 bool xt_find_jump_offset(const unsigned int *offsets,
1028 unsigned int target, unsigned int size)
1029 {
1030 int m, low = 0, hi = size;
1031
1032 while (hi > low) {
1033 m = (low + hi) / 2u;
1034
1035 if (offsets[m] > target)
1036 hi = m;
1037 else if (offsets[m] < target)
1038 low = m + 1;
1039 else
1040 return true;
1041 }
1042
1043 return false;
1044 }
1045 EXPORT_SYMBOL(xt_find_jump_offset);
1046
xt_check_target_common(struct xt_tgchk_param * par,unsigned int size,u16 proto,bool inv_proto)1047 static int xt_check_target_common(struct xt_tgchk_param *par,
1048 unsigned int size, u16 proto, bool inv_proto)
1049 {
1050 if (XT_ALIGN(par->target->targetsize) != size) {
1051 pr_err_ratelimited("%s_tables: %s.%u target: invalid size %u (kernel) != (user) %u\n",
1052 xt_prefix[par->family], par->target->name,
1053 par->target->revision,
1054 XT_ALIGN(par->target->targetsize), size);
1055 return -EINVAL;
1056 }
1057 if (par->target->table != NULL &&
1058 strcmp(par->target->table, par->table) != 0) {
1059 pr_info_ratelimited("%s_tables: %s target: only valid in %s table, not %s\n",
1060 xt_prefix[par->family], par->target->name,
1061 par->target->table, par->table);
1062 return -EINVAL;
1063 }
1064
1065 /* NFPROTO_UNSPEC implies NF_INET_* hooks which do not overlap with
1066 * NF_ARP_IN,OUT,FORWARD, allow explicit extensions with NFPROTO_ARP
1067 * support.
1068 */
1069 if (par->family == NFPROTO_ARP &&
1070 par->target->family != NFPROTO_ARP) {
1071 pr_info_ratelimited("%s_tables: %s target: not valid for this family\n",
1072 xt_prefix[par->family], par->target->name);
1073 return -EINVAL;
1074 }
1075
1076 if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
1077 char used[64], allow[64];
1078
1079 pr_info_ratelimited("%s_tables: %s target: used from hooks %s, but only usable from %s\n",
1080 xt_prefix[par->family], par->target->name,
1081 textify_hooks(used, sizeof(used),
1082 par->hook_mask, par->family),
1083 textify_hooks(allow, sizeof(allow),
1084 par->target->hooks,
1085 par->family));
1086 return -EINVAL;
1087 }
1088 if (par->target->proto && (par->target->proto != proto || inv_proto)) {
1089 pr_info_ratelimited("%s_tables: %s target: only valid for protocol %u\n",
1090 xt_prefix[par->family], par->target->name,
1091 par->target->proto);
1092 return -EINVAL;
1093 }
1094
1095 return 0;
1096 }
1097
xt_check_hooks_target(struct xt_tgchk_param * par)1098 int xt_check_hooks_target(struct xt_tgchk_param *par)
1099 {
1100 if (par->target->check_hooks != NULL)
1101 return par->target->check_hooks(par);
1102
1103 return 0;
1104 }
1105 EXPORT_SYMBOL_GPL(xt_check_hooks_target);
1106
xt_checkentry_target(struct xt_tgchk_param * par)1107 static int xt_checkentry_target(struct xt_tgchk_param *par)
1108 {
1109 int ret;
1110
1111 if (par->target->checkentry != NULL) {
1112 ret = par->target->checkentry(par);
1113 if (ret < 0)
1114 return ret;
1115 else if (ret > 0)
1116 /* Flag up potential errors. */
1117 return -EIO;
1118 }
1119 return 0;
1120 }
1121
xt_check_target(struct xt_tgchk_param * par,unsigned int size,u16 proto,bool inv_proto)1122 int xt_check_target(struct xt_tgchk_param *par,
1123 unsigned int size, u16 proto, bool inv_proto)
1124 {
1125 int ret;
1126
1127 ret = xt_check_target_common(par, size, proto, inv_proto);
1128 if (ret < 0)
1129 return ret;
1130
1131 ret = xt_check_hooks_target(par);
1132 if (ret < 0)
1133 return ret;
1134
1135 return xt_checkentry_target(par);
1136 }
1137 EXPORT_SYMBOL_GPL(xt_check_target);
1138
1139 /**
1140 * xt_copy_counters - copy counters and metadata from a sockptr_t
1141 *
1142 * @arg: src sockptr
1143 * @len: alleged size of userspace memory
1144 * @info: where to store the xt_counters_info metadata
1145 *
1146 * Copies counter meta data from @user and stores it in @info.
1147 *
1148 * vmallocs memory to hold the counters, then copies the counter data
1149 * from @user to the new memory and returns a pointer to it.
1150 *
1151 * If called from a compat syscall, @info gets converted automatically to the
1152 * 64bit representation.
1153 *
1154 * The metadata associated with the counters is stored in @info.
1155 *
1156 * Return: returns pointer that caller has to test via IS_ERR().
1157 * If IS_ERR is false, caller has to vfree the pointer.
1158 */
xt_copy_counters(sockptr_t arg,unsigned int len,struct xt_counters_info * info)1159 void *xt_copy_counters(sockptr_t arg, unsigned int len,
1160 struct xt_counters_info *info)
1161 {
1162 size_t offset;
1163 void *mem;
1164 u64 size;
1165
1166 #ifdef CONFIG_NETFILTER_XTABLES_COMPAT
1167 if (in_compat_syscall()) {
1168 /* structures only differ in size due to alignment */
1169 struct compat_xt_counters_info compat_tmp;
1170
1171 if (len <= sizeof(compat_tmp))
1172 return ERR_PTR(-EINVAL);
1173
1174 len -= sizeof(compat_tmp);
1175 if (copy_from_sockptr(&compat_tmp, arg, sizeof(compat_tmp)) != 0)
1176 return ERR_PTR(-EFAULT);
1177
1178 memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1);
1179 info->num_counters = compat_tmp.num_counters;
1180 offset = sizeof(compat_tmp);
1181 } else
1182 #endif
1183 {
1184 if (len <= sizeof(*info))
1185 return ERR_PTR(-EINVAL);
1186
1187 len -= sizeof(*info);
1188 if (copy_from_sockptr(info, arg, sizeof(*info)) != 0)
1189 return ERR_PTR(-EFAULT);
1190
1191 offset = sizeof(*info);
1192 }
1193 info->name[sizeof(info->name) - 1] = '\0';
1194
1195 size = sizeof(struct xt_counters);
1196 size *= info->num_counters;
1197
1198 if (size != (u64)len)
1199 return ERR_PTR(-EINVAL);
1200
1201 mem = vmalloc(len);
1202 if (!mem)
1203 return ERR_PTR(-ENOMEM);
1204
1205 if (copy_from_sockptr_offset(mem, arg, offset, len) == 0)
1206 return mem;
1207
1208 vfree(mem);
1209 return ERR_PTR(-EFAULT);
1210 }
1211 EXPORT_SYMBOL_GPL(xt_copy_counters);
1212
1213 #ifdef CONFIG_NETFILTER_XTABLES_COMPAT
xt_compat_target_offset(const struct xt_target * target)1214 int xt_compat_target_offset(const struct xt_target *target)
1215 {
1216 u_int16_t csize = target->compatsize ? : target->targetsize;
1217 return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
1218 }
1219 EXPORT_SYMBOL_GPL(xt_compat_target_offset);
1220
xt_compat_target_from_user(struct xt_entry_target * t,void ** dstptr,unsigned int * size)1221 void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
1222 unsigned int *size)
1223 {
1224 const struct xt_target *target = t->u.kernel.target;
1225 struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
1226 int off = xt_compat_target_offset(target);
1227 u_int16_t tsize = ct->u.user.target_size;
1228 char name[sizeof(t->u.user.name)];
1229
1230 t = *dstptr;
1231 memcpy(t, ct, sizeof(*ct));
1232 if (target->compat_from_user)
1233 target->compat_from_user(t->data, ct->data);
1234 else
1235 unsafe_memcpy(t->data, ct->data, tsize - sizeof(*ct),
1236 /* UAPI 0-sized destination */);
1237
1238 tsize += off;
1239 t->u.user.target_size = tsize;
1240 strscpy(name, target->name, sizeof(name));
1241 module_put(target->me);
1242 strscpy_pad(t->u.user.name, name, sizeof(t->u.user.name));
1243
1244 *size += off;
1245 *dstptr += tsize;
1246 }
1247 EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
1248
xt_compat_target_to_user(const struct xt_entry_target * t,void __user ** dstptr,unsigned int * size)1249 int xt_compat_target_to_user(const struct xt_entry_target *t,
1250 void __user **dstptr, unsigned int *size)
1251 {
1252 const struct xt_target *target = t->u.kernel.target;
1253 struct compat_xt_entry_target __user *ct = *dstptr;
1254 int off = xt_compat_target_offset(target);
1255 u_int16_t tsize = t->u.user.target_size - off;
1256
1257 if (XT_OBJ_TO_USER(ct, t, target, tsize))
1258 return -EFAULT;
1259
1260 if (target->compat_to_user) {
1261 if (target->compat_to_user((void __user *)ct->data, t->data))
1262 return -EFAULT;
1263 } else {
1264 if (COMPAT_XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct)))
1265 return -EFAULT;
1266 }
1267
1268 *size -= off;
1269 *dstptr += tsize;
1270 return 0;
1271 }
1272 EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
1273 #endif
1274
xt_alloc_table_info(unsigned int size)1275 struct xt_table_info *xt_alloc_table_info(unsigned int size)
1276 {
1277 struct xt_table_info *info = NULL;
1278 size_t sz = sizeof(*info) + size;
1279
1280 if (sz < sizeof(*info) || sz >= XT_MAX_TABLE_SIZE)
1281 return NULL;
1282
1283 info = kvmalloc(sz, GFP_KERNEL_ACCOUNT);
1284 if (!info)
1285 return NULL;
1286
1287 memset(info, 0, sizeof(*info));
1288 info->size = size;
1289 return info;
1290 }
1291 EXPORT_SYMBOL(xt_alloc_table_info);
1292
xt_free_table_info(struct xt_table_info * info)1293 void xt_free_table_info(struct xt_table_info *info)
1294 {
1295 int cpu;
1296
1297 if (info->jumpstack != NULL) {
1298 for_each_possible_cpu(cpu)
1299 kvfree(info->jumpstack[cpu]);
1300 kvfree(info->jumpstack);
1301 }
1302
1303 kvfree(info);
1304 }
1305 EXPORT_SYMBOL(xt_free_table_info);
1306
xt_find_table(struct net * net,u8 af,const char * name)1307 struct xt_table *xt_find_table(struct net *net, u8 af, const char *name)
1308 {
1309 struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
1310 struct xt_table *t;
1311
1312 mutex_lock(&xt[af].mutex);
1313 list_for_each_entry(t, &xt_net->tables[af], list) {
1314 if (strcmp(t->name, name) == 0) {
1315 mutex_unlock(&xt[af].mutex);
1316 return t;
1317 }
1318 }
1319 mutex_unlock(&xt[af].mutex);
1320 return NULL;
1321 }
1322 EXPORT_SYMBOL(xt_find_table);
1323
1324 /* Find table by name, grabs mutex & ref. Returns ERR_PTR on error. */
xt_find_table_lock(struct net * net,u_int8_t af,const char * name)1325 struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
1326 const char *name)
1327 {
1328 struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
1329 struct module *owner = NULL;
1330 struct xt_template *tmpl;
1331 struct xt_table *t;
1332
1333 mutex_lock(&xt[af].mutex);
1334 list_for_each_entry(t, &xt_net->tables[af], list)
1335 if (strcmp(t->name, name) == 0 && try_module_get(t->me))
1336 return t;
1337
1338 /* Table doesn't exist in this netns, check larval list */
1339 list_for_each_entry(tmpl, &xt_templates[af], list) {
1340 int err;
1341
1342 if (strcmp(tmpl->name, name))
1343 continue;
1344 if (!try_module_get(tmpl->me))
1345 goto out;
1346
1347 owner = tmpl->me;
1348
1349 mutex_unlock(&xt[af].mutex);
1350 err = tmpl->table_init(net);
1351 if (err < 0) {
1352 module_put(owner);
1353 return ERR_PTR(err);
1354 }
1355
1356 mutex_lock(&xt[af].mutex);
1357 break;
1358 }
1359
1360 /* and once again: */
1361 list_for_each_entry(t, &xt_net->tables[af], list)
1362 if (strcmp(t->name, name) == 0 && owner == t->me)
1363 return t;
1364
1365 module_put(owner);
1366 out:
1367 mutex_unlock(&xt[af].mutex);
1368 return ERR_PTR(-ENOENT);
1369 }
1370 EXPORT_SYMBOL_GPL(xt_find_table_lock);
1371
xt_request_find_table_lock(struct net * net,u_int8_t af,const char * name)1372 struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af,
1373 const char *name)
1374 {
1375 struct xt_table *t = xt_find_table_lock(net, af, name);
1376
1377 #ifdef CONFIG_MODULES
1378 if (IS_ERR(t)) {
1379 int err = request_module("%stable_%s", xt_prefix[af], name);
1380 if (err < 0)
1381 return ERR_PTR(err);
1382 t = xt_find_table_lock(net, af, name);
1383 }
1384 #endif
1385
1386 return t;
1387 }
1388 EXPORT_SYMBOL_GPL(xt_request_find_table_lock);
1389
xt_table_unlock(struct xt_table * table)1390 void xt_table_unlock(struct xt_table *table)
1391 {
1392 mutex_unlock(&xt[table->af].mutex);
1393 }
1394 EXPORT_SYMBOL_GPL(xt_table_unlock);
1395
1396 #ifdef CONFIG_NETFILTER_XTABLES_COMPAT
xt_compat_lock(u_int8_t af)1397 void xt_compat_lock(u_int8_t af)
1398 {
1399 mutex_lock(&xt[af].compat_mutex);
1400 }
1401 EXPORT_SYMBOL_GPL(xt_compat_lock);
1402
xt_compat_unlock(u_int8_t af)1403 void xt_compat_unlock(u_int8_t af)
1404 {
1405 mutex_unlock(&xt[af].compat_mutex);
1406 }
1407 EXPORT_SYMBOL_GPL(xt_compat_unlock);
1408 #endif
1409
1410 struct static_key xt_tee_enabled __read_mostly;
1411 EXPORT_SYMBOL_GPL(xt_tee_enabled);
1412
1413 #ifdef CONFIG_NETFILTER_XTABLES_LEGACY
1414 DEFINE_PER_CPU(seqcount_t, xt_recseq);
1415 EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
1416
xt_jumpstack_alloc(struct xt_table_info * i)1417 static int xt_jumpstack_alloc(struct xt_table_info *i)
1418 {
1419 unsigned int size;
1420 int cpu;
1421
1422 size = sizeof(void **) * nr_cpu_ids;
1423 if (size > PAGE_SIZE)
1424 i->jumpstack = kvzalloc(size, GFP_KERNEL);
1425 else
1426 i->jumpstack = kzalloc(size, GFP_KERNEL);
1427 if (i->jumpstack == NULL)
1428 return -ENOMEM;
1429
1430 /* ruleset without jumps -- no stack needed */
1431 if (i->stacksize == 0)
1432 return 0;
1433
1434 /* Jumpstack needs to be able to record two full callchains, one
1435 * from the first rule set traversal, plus one table reentrancy
1436 * via -j TEE without clobbering the callchain that brought us to
1437 * TEE target.
1438 *
1439 * This is done by allocating two jumpstacks per cpu, on reentry
1440 * the upper half of the stack is used.
1441 *
1442 * see the jumpstack setup in ipt_do_table() for more details.
1443 */
1444 size = sizeof(void *) * i->stacksize * 2u;
1445 for_each_possible_cpu(cpu) {
1446 i->jumpstack[cpu] = kvmalloc_node(size, GFP_KERNEL,
1447 cpu_to_node(cpu));
1448 if (i->jumpstack[cpu] == NULL)
1449 /*
1450 * Freeing will be done later on by the callers. The
1451 * chain is: xt_replace_table -> __do_replace ->
1452 * do_replace -> xt_free_table_info.
1453 */
1454 return -ENOMEM;
1455 }
1456
1457 return 0;
1458 }
1459
xt_counters_alloc(unsigned int counters)1460 struct xt_counters *xt_counters_alloc(unsigned int counters)
1461 {
1462 struct xt_counters *mem;
1463
1464 if (counters == 0 || counters > INT_MAX / sizeof(*mem))
1465 return NULL;
1466
1467 counters *= sizeof(*mem);
1468 if (counters > XT_MAX_TABLE_SIZE)
1469 return NULL;
1470
1471 return vzalloc(counters);
1472 }
1473 EXPORT_SYMBOL(xt_counters_alloc);
1474
1475 struct xt_table_info *
xt_replace_table(struct xt_table * table,unsigned int num_counters,struct xt_table_info * newinfo,int * error)1476 xt_replace_table(struct xt_table *table,
1477 unsigned int num_counters,
1478 struct xt_table_info *newinfo,
1479 int *error)
1480 {
1481 struct xt_table_info *private;
1482 unsigned int cpu;
1483 int ret;
1484
1485 ret = xt_jumpstack_alloc(newinfo);
1486 if (ret < 0) {
1487 *error = ret;
1488 return NULL;
1489 }
1490
1491 /* Do the substitution. */
1492 local_bh_disable();
1493 private = table->private;
1494
1495 /* Check inside lock: is the old number correct? */
1496 if (num_counters != private->number) {
1497 pr_debug("num_counters != table->private->number (%u/%u)\n",
1498 num_counters, private->number);
1499 local_bh_enable();
1500 *error = -EAGAIN;
1501 return NULL;
1502 }
1503
1504 newinfo->initial_entries = private->initial_entries;
1505 /*
1506 * Ensure contents of newinfo are visible before assigning to
1507 * private.
1508 */
1509 smp_wmb();
1510 table->private = newinfo;
1511
1512 /* make sure all cpus see new ->private value */
1513 smp_mb();
1514
1515 /*
1516 * Even though table entries have now been swapped, other CPU's
1517 * may still be using the old entries...
1518 */
1519 local_bh_enable();
1520
1521 /* ... so wait for even xt_recseq on all cpus */
1522 for_each_possible_cpu(cpu) {
1523 seqcount_t *s = &per_cpu(xt_recseq, cpu);
1524 u32 seq = raw_read_seqcount(s);
1525
1526 if (seq & 1) {
1527 do {
1528 cond_resched();
1529 cpu_relax();
1530 } while (seq == raw_read_seqcount(s));
1531 }
1532 }
1533
1534 audit_log_nfcfg(table->name, table->af, private->number,
1535 !private->number ? AUDIT_XT_OP_REGISTER :
1536 AUDIT_XT_OP_REPLACE,
1537 GFP_KERNEL);
1538 return private;
1539 }
1540 EXPORT_SYMBOL_GPL(xt_replace_table);
1541
xt_register_table(struct net * net,const struct xt_table * input_table,struct xt_table_info * bootstrap,struct xt_table_info * newinfo)1542 struct xt_table *xt_register_table(struct net *net,
1543 const struct xt_table *input_table,
1544 struct xt_table_info *bootstrap,
1545 struct xt_table_info *newinfo)
1546 {
1547 struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
1548 struct xt_table_info *private;
1549 struct xt_table *t, *table;
1550 int ret;
1551
1552 /* Don't add one object to multiple lists. */
1553 table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
1554 if (!table) {
1555 ret = -ENOMEM;
1556 goto out;
1557 }
1558
1559 mutex_lock(&xt[table->af].mutex);
1560 /* Don't autoload: we'd eat our tail... */
1561 list_for_each_entry(t, &xt_net->tables[table->af], list) {
1562 if (strcmp(t->name, table->name) == 0) {
1563 ret = -EEXIST;
1564 goto unlock;
1565 }
1566 }
1567
1568 /* Simplifies replace_table code. */
1569 table->private = bootstrap;
1570
1571 if (!xt_replace_table(table, 0, newinfo, &ret))
1572 goto unlock;
1573
1574 private = table->private;
1575 pr_debug("table->private->number = %u\n", private->number);
1576
1577 /* save number of initial entries */
1578 private->initial_entries = private->number;
1579
1580 list_add(&table->list, &xt_net->tables[table->af]);
1581 mutex_unlock(&xt[table->af].mutex);
1582 return table;
1583
1584 unlock:
1585 mutex_unlock(&xt[table->af].mutex);
1586 kfree(table);
1587 out:
1588 return ERR_PTR(ret);
1589 }
1590 EXPORT_SYMBOL_GPL(xt_register_table);
1591
xt_unregister_table(struct xt_table * table)1592 void *xt_unregister_table(struct xt_table *table)
1593 {
1594 struct xt_table_info *private;
1595
1596 mutex_lock(&xt[table->af].mutex);
1597 private = table->private;
1598 list_del(&table->list);
1599 mutex_unlock(&xt[table->af].mutex);
1600 audit_log_nfcfg(table->name, table->af, private->number,
1601 AUDIT_XT_OP_UNREGISTER, GFP_KERNEL);
1602 kfree(table->ops);
1603 kfree(table);
1604
1605 return private;
1606 }
1607 EXPORT_SYMBOL_GPL(xt_unregister_table);
1608 #endif
1609
1610 #ifdef CONFIG_PROC_FS
xt_table_seq_start(struct seq_file * seq,loff_t * pos)1611 static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
1612 {
1613 u8 af = (unsigned long)pde_data(file_inode(seq->file));
1614 struct net *net = seq_file_net(seq);
1615 struct xt_pernet *xt_net;
1616
1617 xt_net = net_generic(net, xt_pernet_id);
1618
1619 mutex_lock(&xt[af].mutex);
1620 return seq_list_start(&xt_net->tables[af], *pos);
1621 }
1622
xt_table_seq_next(struct seq_file * seq,void * v,loff_t * pos)1623 static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1624 {
1625 u8 af = (unsigned long)pde_data(file_inode(seq->file));
1626 struct net *net = seq_file_net(seq);
1627 struct xt_pernet *xt_net;
1628
1629 xt_net = net_generic(net, xt_pernet_id);
1630
1631 return seq_list_next(v, &xt_net->tables[af], pos);
1632 }
1633
xt_table_seq_stop(struct seq_file * seq,void * v)1634 static void xt_table_seq_stop(struct seq_file *seq, void *v)
1635 {
1636 u_int8_t af = (unsigned long)pde_data(file_inode(seq->file));
1637
1638 mutex_unlock(&xt[af].mutex);
1639 }
1640
xt_table_seq_show(struct seq_file * seq,void * v)1641 static int xt_table_seq_show(struct seq_file *seq, void *v)
1642 {
1643 struct xt_table *table = list_entry(v, struct xt_table, list);
1644
1645 if (*table->name)
1646 seq_printf(seq, "%s\n", table->name);
1647 return 0;
1648 }
1649
1650 static const struct seq_operations xt_table_seq_ops = {
1651 .start = xt_table_seq_start,
1652 .next = xt_table_seq_next,
1653 .stop = xt_table_seq_stop,
1654 .show = xt_table_seq_show,
1655 };
1656
1657 /*
1658 * Traverse state for ip{,6}_{tables,matches} for helping crossing
1659 * the multi-AF mutexes.
1660 */
1661 struct nf_mttg_trav {
1662 struct list_head *head, *curr;
1663 uint8_t class;
1664 };
1665
1666 enum {
1667 MTTG_TRAV_INIT,
1668 MTTG_TRAV_NFP_UNSPEC,
1669 MTTG_TRAV_NFP_SPEC,
1670 MTTG_TRAV_DONE,
1671 };
1672
xt_mttg_seq_next(struct seq_file * seq,void * v,loff_t * ppos,bool is_target)1673 static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
1674 bool is_target)
1675 {
1676 static const uint8_t next_class[] = {
1677 [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
1678 [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE,
1679 };
1680 uint8_t nfproto = (unsigned long)pde_data(file_inode(seq->file));
1681 struct nf_mttg_trav *trav = seq->private;
1682
1683 if (ppos != NULL)
1684 ++(*ppos);
1685
1686 switch (trav->class) {
1687 case MTTG_TRAV_INIT:
1688 trav->class = MTTG_TRAV_NFP_UNSPEC;
1689 mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
1690 trav->head = trav->curr = is_target ?
1691 &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
1692 break;
1693 case MTTG_TRAV_NFP_UNSPEC:
1694 trav->curr = trav->curr->next;
1695 if (trav->curr != trav->head)
1696 break;
1697 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1698 mutex_lock(&xt[nfproto].mutex);
1699 trav->head = trav->curr = is_target ?
1700 &xt[nfproto].target : &xt[nfproto].match;
1701 trav->class = next_class[trav->class];
1702 break;
1703 case MTTG_TRAV_NFP_SPEC:
1704 trav->curr = trav->curr->next;
1705 if (trav->curr != trav->head)
1706 break;
1707 fallthrough;
1708 default:
1709 return NULL;
1710 }
1711 return trav;
1712 }
1713
xt_mttg_seq_start(struct seq_file * seq,loff_t * pos,bool is_target)1714 static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
1715 bool is_target)
1716 {
1717 struct nf_mttg_trav *trav = seq->private;
1718 unsigned int j;
1719
1720 trav->class = MTTG_TRAV_INIT;
1721 for (j = 0; j < *pos; ++j)
1722 if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
1723 return NULL;
1724 return trav;
1725 }
1726
xt_mttg_seq_stop(struct seq_file * seq,void * v)1727 static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
1728 {
1729 uint8_t nfproto = (unsigned long)pde_data(file_inode(seq->file));
1730 struct nf_mttg_trav *trav = seq->private;
1731
1732 switch (trav->class) {
1733 case MTTG_TRAV_NFP_UNSPEC:
1734 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1735 break;
1736 case MTTG_TRAV_NFP_SPEC:
1737 mutex_unlock(&xt[nfproto].mutex);
1738 break;
1739 }
1740 }
1741
xt_match_seq_start(struct seq_file * seq,loff_t * pos)1742 static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
1743 {
1744 return xt_mttg_seq_start(seq, pos, false);
1745 }
1746
xt_match_seq_next(struct seq_file * seq,void * v,loff_t * ppos)1747 static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1748 {
1749 return xt_mttg_seq_next(seq, v, ppos, false);
1750 }
1751
xt_match_seq_show(struct seq_file * seq,void * v)1752 static int xt_match_seq_show(struct seq_file *seq, void *v)
1753 {
1754 const struct nf_mttg_trav *trav = seq->private;
1755 const struct xt_match *match;
1756
1757 switch (trav->class) {
1758 case MTTG_TRAV_NFP_UNSPEC:
1759 case MTTG_TRAV_NFP_SPEC:
1760 if (trav->curr == trav->head)
1761 return 0;
1762 match = list_entry(trav->curr, struct xt_match, list);
1763 if (*match->name)
1764 seq_printf(seq, "%s\n", match->name);
1765 }
1766 return 0;
1767 }
1768
1769 static const struct seq_operations xt_match_seq_ops = {
1770 .start = xt_match_seq_start,
1771 .next = xt_match_seq_next,
1772 .stop = xt_mttg_seq_stop,
1773 .show = xt_match_seq_show,
1774 };
1775
xt_target_seq_start(struct seq_file * seq,loff_t * pos)1776 static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
1777 {
1778 return xt_mttg_seq_start(seq, pos, true);
1779 }
1780
xt_target_seq_next(struct seq_file * seq,void * v,loff_t * ppos)1781 static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1782 {
1783 return xt_mttg_seq_next(seq, v, ppos, true);
1784 }
1785
xt_target_seq_show(struct seq_file * seq,void * v)1786 static int xt_target_seq_show(struct seq_file *seq, void *v)
1787 {
1788 const struct nf_mttg_trav *trav = seq->private;
1789 const struct xt_target *target;
1790
1791 switch (trav->class) {
1792 case MTTG_TRAV_NFP_UNSPEC:
1793 case MTTG_TRAV_NFP_SPEC:
1794 if (trav->curr == trav->head)
1795 return 0;
1796 target = list_entry(trav->curr, struct xt_target, list);
1797 if (*target->name)
1798 seq_printf(seq, "%s\n", target->name);
1799 }
1800 return 0;
1801 }
1802
1803 static const struct seq_operations xt_target_seq_ops = {
1804 .start = xt_target_seq_start,
1805 .next = xt_target_seq_next,
1806 .stop = xt_mttg_seq_stop,
1807 .show = xt_target_seq_show,
1808 };
1809
1810 #define FORMAT_TABLES "_tables_names"
1811 #define FORMAT_MATCHES "_tables_matches"
1812 #define FORMAT_TARGETS "_tables_targets"
1813
1814 #endif /* CONFIG_PROC_FS */
1815
1816 /**
1817 * xt_hook_ops_alloc - set up hooks for a new table
1818 * @table: table with metadata needed to set up hooks
1819 * @fn: Hook function
1820 *
1821 * This function will create the nf_hook_ops that the x_table needs
1822 * to hand to xt_hook_link_net().
1823 */
1824 struct nf_hook_ops *
xt_hook_ops_alloc(const struct xt_table * table,nf_hookfn * fn)1825 xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn)
1826 {
1827 unsigned int hook_mask = table->valid_hooks;
1828 uint8_t i, num_hooks = hweight32(hook_mask);
1829 uint8_t hooknum;
1830 struct nf_hook_ops *ops;
1831
1832 if (!num_hooks)
1833 return ERR_PTR(-EINVAL);
1834
1835 ops = kzalloc_objs(*ops, num_hooks);
1836 if (ops == NULL)
1837 return ERR_PTR(-ENOMEM);
1838
1839 for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
1840 hook_mask >>= 1, ++hooknum) {
1841 if (!(hook_mask & 1))
1842 continue;
1843 ops[i].hook = fn;
1844 ops[i].pf = table->af;
1845 ops[i].hooknum = hooknum;
1846 ops[i].priority = table->priority;
1847 ++i;
1848 }
1849
1850 return ops;
1851 }
1852 EXPORT_SYMBOL_GPL(xt_hook_ops_alloc);
1853
xt_register_template(const struct xt_table * table,int (* table_init)(struct net * net))1854 int xt_register_template(const struct xt_table *table,
1855 int (*table_init)(struct net *net))
1856 {
1857 int ret = -EBUSY, af = table->af;
1858 struct xt_template *t;
1859
1860 mutex_lock(&xt[af].mutex);
1861
1862 list_for_each_entry(t, &xt_templates[af], list) {
1863 if (WARN_ON_ONCE(strcmp(table->name, t->name) == 0))
1864 goto out_unlock;
1865 }
1866
1867 ret = -ENOMEM;
1868 t = kzalloc_obj(*t);
1869 if (!t)
1870 goto out_unlock;
1871
1872 BUILD_BUG_ON(sizeof(t->name) != sizeof(table->name));
1873
1874 strscpy(t->name, table->name, sizeof(t->name));
1875 t->table_init = table_init;
1876 t->me = table->me;
1877 list_add(&t->list, &xt_templates[af]);
1878 ret = 0;
1879 out_unlock:
1880 mutex_unlock(&xt[af].mutex);
1881 return ret;
1882 }
1883 EXPORT_SYMBOL_GPL(xt_register_template);
1884
xt_unregister_template(const struct xt_table * table)1885 void xt_unregister_template(const struct xt_table *table)
1886 {
1887 struct xt_template *t;
1888 int af = table->af;
1889
1890 mutex_lock(&xt[af].mutex);
1891 list_for_each_entry(t, &xt_templates[af], list) {
1892 if (strcmp(table->name, t->name))
1893 continue;
1894
1895 list_del(&t->list);
1896 mutex_unlock(&xt[af].mutex);
1897 kfree(t);
1898 return;
1899 }
1900
1901 mutex_unlock(&xt[af].mutex);
1902 WARN_ON_ONCE(1);
1903 }
1904 EXPORT_SYMBOL_GPL(xt_unregister_template);
1905
xt_proto_init(struct net * net,u_int8_t af)1906 int xt_proto_init(struct net *net, u_int8_t af)
1907 {
1908 #ifdef CONFIG_PROC_FS
1909 char buf[XT_FUNCTION_MAXNAMELEN];
1910 struct proc_dir_entry *proc;
1911 kuid_t root_uid;
1912 kgid_t root_gid;
1913 #endif
1914
1915 if (af >= ARRAY_SIZE(xt_prefix))
1916 return -EINVAL;
1917
1918
1919 #ifdef CONFIG_PROC_FS
1920 root_uid = make_kuid(net->user_ns, 0);
1921 root_gid = make_kgid(net->user_ns, 0);
1922
1923 strscpy(buf, xt_prefix[af], sizeof(buf));
1924 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1925 proc = proc_create_net_data(buf, 0440, net->proc_net, &xt_table_seq_ops,
1926 sizeof(struct seq_net_private),
1927 (void *)(unsigned long)af);
1928 if (!proc)
1929 goto out;
1930 if (uid_valid(root_uid) && gid_valid(root_gid))
1931 proc_set_user(proc, root_uid, root_gid);
1932
1933 strscpy(buf, xt_prefix[af], sizeof(buf));
1934 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1935 proc = proc_create_seq_private(buf, 0440, net->proc_net,
1936 &xt_match_seq_ops, sizeof(struct nf_mttg_trav),
1937 (void *)(unsigned long)af);
1938 if (!proc)
1939 goto out_remove_tables;
1940 if (uid_valid(root_uid) && gid_valid(root_gid))
1941 proc_set_user(proc, root_uid, root_gid);
1942
1943 strscpy(buf, xt_prefix[af], sizeof(buf));
1944 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1945 proc = proc_create_seq_private(buf, 0440, net->proc_net,
1946 &xt_target_seq_ops, sizeof(struct nf_mttg_trav),
1947 (void *)(unsigned long)af);
1948 if (!proc)
1949 goto out_remove_matches;
1950 if (uid_valid(root_uid) && gid_valid(root_gid))
1951 proc_set_user(proc, root_uid, root_gid);
1952 #endif
1953
1954 return 0;
1955
1956 #ifdef CONFIG_PROC_FS
1957 out_remove_matches:
1958 strscpy(buf, xt_prefix[af], sizeof(buf));
1959 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1960 remove_proc_entry(buf, net->proc_net);
1961
1962 out_remove_tables:
1963 strscpy(buf, xt_prefix[af], sizeof(buf));
1964 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1965 remove_proc_entry(buf, net->proc_net);
1966 out:
1967 return -1;
1968 #endif
1969 }
1970 EXPORT_SYMBOL_GPL(xt_proto_init);
1971
xt_proto_fini(struct net * net,u_int8_t af)1972 void xt_proto_fini(struct net *net, u_int8_t af)
1973 {
1974 #ifdef CONFIG_PROC_FS
1975 char buf[XT_FUNCTION_MAXNAMELEN];
1976
1977 strscpy(buf, xt_prefix[af], sizeof(buf));
1978 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1979 remove_proc_entry(buf, net->proc_net);
1980
1981 strscpy(buf, xt_prefix[af], sizeof(buf));
1982 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1983 remove_proc_entry(buf, net->proc_net);
1984
1985 strscpy(buf, xt_prefix[af], sizeof(buf));
1986 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1987 remove_proc_entry(buf, net->proc_net);
1988 #endif /*CONFIG_PROC_FS*/
1989 }
1990 EXPORT_SYMBOL_GPL(xt_proto_fini);
1991
1992 #ifdef CONFIG_NETFILTER_XTABLES_LEGACY
1993 /**
1994 * xt_percpu_counter_alloc - allocate x_tables rule counter
1995 *
1996 * @state: pointer to xt_percpu allocation state
1997 * @counter: pointer to counter struct inside the ip(6)/arpt_entry struct
1998 *
1999 * On SMP, the packet counter [ ip(6)t_entry->counters.pcnt ] will then
2000 * contain the address of the real (percpu) counter.
2001 *
2002 * Rule evaluation needs to use xt_get_this_cpu_counter() helper
2003 * to fetch the real percpu counter.
2004 *
2005 * To speed up allocation and improve data locality, a 4kb block is
2006 * allocated. Freeing any counter may free an entire block, so all
2007 * counters allocated using the same state must be freed at the same
2008 * time.
2009 *
2010 * xt_percpu_counter_alloc_state contains the base address of the
2011 * allocated page and the current sub-offset.
2012 *
2013 * returns false on error.
2014 */
xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state * state,struct xt_counters * counter)2015 bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
2016 struct xt_counters *counter)
2017 {
2018 BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2));
2019
2020 if (nr_cpu_ids <= 1)
2021 return true;
2022
2023 if (!state->mem) {
2024 state->mem = __alloc_percpu(XT_PCPU_BLOCK_SIZE,
2025 XT_PCPU_BLOCK_SIZE);
2026 if (!state->mem)
2027 return false;
2028 }
2029 counter->pcnt = (__force unsigned long)(state->mem + state->off);
2030 state->off += sizeof(*counter);
2031 if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) {
2032 state->mem = NULL;
2033 state->off = 0;
2034 }
2035 return true;
2036 }
2037 EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc);
2038
xt_percpu_counter_free(struct xt_counters * counters)2039 void xt_percpu_counter_free(struct xt_counters *counters)
2040 {
2041 unsigned long pcnt = counters->pcnt;
2042
2043 if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0)
2044 free_percpu((void __percpu *)pcnt);
2045 }
2046 EXPORT_SYMBOL_GPL(xt_percpu_counter_free);
2047 #endif
2048
xt_net_init(struct net * net)2049 static int __net_init xt_net_init(struct net *net)
2050 {
2051 struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
2052 int i;
2053
2054 for (i = 0; i < NFPROTO_NUMPROTO; i++)
2055 INIT_LIST_HEAD(&xt_net->tables[i]);
2056 return 0;
2057 }
2058
xt_net_exit(struct net * net)2059 static void __net_exit xt_net_exit(struct net *net)
2060 {
2061 struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
2062 int i;
2063
2064 for (i = 0; i < NFPROTO_NUMPROTO; i++)
2065 WARN_ON_ONCE(!list_empty(&xt_net->tables[i]));
2066 }
2067
2068 static struct pernet_operations xt_net_ops = {
2069 .init = xt_net_init,
2070 .exit = xt_net_exit,
2071 .id = &xt_pernet_id,
2072 .size = sizeof(struct xt_pernet),
2073 };
2074
xt_init(void)2075 static int __init xt_init(void)
2076 {
2077 unsigned int i;
2078 int rv;
2079
2080 if (IS_ENABLED(CONFIG_NETFILTER_XTABLES_LEGACY)) {
2081 for_each_possible_cpu(i) {
2082 seqcount_init(&per_cpu(xt_recseq, i));
2083 }
2084 }
2085
2086 xt = kzalloc_objs(struct xt_af, NFPROTO_NUMPROTO);
2087 if (!xt)
2088 return -ENOMEM;
2089
2090 for (i = 0; i < NFPROTO_NUMPROTO; i++) {
2091 mutex_init(&xt[i].mutex);
2092 #ifdef CONFIG_NETFILTER_XTABLES_COMPAT
2093 mutex_init(&xt[i].compat_mutex);
2094 xt[i].compat_tab = NULL;
2095 #endif
2096 INIT_LIST_HEAD(&xt[i].target);
2097 INIT_LIST_HEAD(&xt[i].match);
2098 INIT_LIST_HEAD(&xt_templates[i]);
2099 }
2100 rv = register_pernet_subsys(&xt_net_ops);
2101 if (rv < 0)
2102 kfree(xt);
2103 return rv;
2104 }
2105
xt_fini(void)2106 static void __exit xt_fini(void)
2107 {
2108 unregister_pernet_subsys(&xt_net_ops);
2109 kfree(xt);
2110 }
2111
2112 module_init(xt_init);
2113 module_exit(xt_fini);
2114