1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2011 Patrick McHardy <kaber@trash.net>
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/timer.h>
13 #include <linux/skbuff.h>
14 #include <linux/gfp.h>
15 #include <net/xfrm.h>
16 #include <linux/siphash.h>
17 #include <linux/rtnetlink.h>
18
19 #include <net/netfilter/nf_conntrack_bpf.h>
20 #include <net/netfilter/nf_conntrack_core.h>
21 #include <net/netfilter/nf_conntrack_helper.h>
22 #include <net/netfilter/nf_conntrack_seqadj.h>
23 #include <net/netfilter/nf_conntrack_zones.h>
24 #include <net/netfilter/nf_nat.h>
25 #include <net/netfilter/nf_nat_helper.h>
26 #include <uapi/linux/netfilter/nf_nat.h>
27
28 #include "nf_internals.h"
29
30 #define NF_NAT_MAX_ATTEMPTS 128
31 #define NF_NAT_HARDER_THRESH (NF_NAT_MAX_ATTEMPTS / 4)
32
33 static spinlock_t nf_nat_locks[CONNTRACK_LOCKS];
34
35 static DEFINE_MUTEX(nf_nat_proto_mutex);
36 static unsigned int nat_net_id __read_mostly;
37
38 static struct hlist_head *nf_nat_bysource __read_mostly;
39 static unsigned int nf_nat_htable_size __read_mostly;
40 static siphash_aligned_key_t nf_nat_hash_rnd;
41
42 struct nf_nat_lookup_hook_priv {
43 struct nf_hook_entries __rcu *entries;
44
45 struct rcu_head rcu_head;
46 };
47
48 struct nf_nat_hooks_net {
49 struct nf_hook_ops *nat_hook_ops;
50 unsigned int users;
51 };
52
53 struct nat_net {
54 struct nf_nat_hooks_net nat_proto_net[NFPROTO_NUMPROTO];
55 };
56
57 #ifdef CONFIG_XFRM
nf_nat_ipv4_decode_session(struct sk_buff * skb,const struct nf_conn * ct,enum ip_conntrack_dir dir,unsigned long statusbit,struct flowi * fl)58 static void nf_nat_ipv4_decode_session(struct sk_buff *skb,
59 const struct nf_conn *ct,
60 enum ip_conntrack_dir dir,
61 unsigned long statusbit,
62 struct flowi *fl)
63 {
64 const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple;
65 struct flowi4 *fl4 = &fl->u.ip4;
66
67 if (ct->status & statusbit) {
68 fl4->daddr = t->dst.u3.ip;
69 if (t->dst.protonum == IPPROTO_TCP ||
70 t->dst.protonum == IPPROTO_UDP ||
71 t->dst.protonum == IPPROTO_UDPLITE ||
72 t->dst.protonum == IPPROTO_DCCP ||
73 t->dst.protonum == IPPROTO_SCTP)
74 fl4->fl4_dport = t->dst.u.all;
75 }
76
77 statusbit ^= IPS_NAT_MASK;
78
79 if (ct->status & statusbit) {
80 fl4->saddr = t->src.u3.ip;
81 if (t->dst.protonum == IPPROTO_TCP ||
82 t->dst.protonum == IPPROTO_UDP ||
83 t->dst.protonum == IPPROTO_UDPLITE ||
84 t->dst.protonum == IPPROTO_DCCP ||
85 t->dst.protonum == IPPROTO_SCTP)
86 fl4->fl4_sport = t->src.u.all;
87 }
88 }
89
nf_nat_ipv6_decode_session(struct sk_buff * skb,const struct nf_conn * ct,enum ip_conntrack_dir dir,unsigned long statusbit,struct flowi * fl)90 static void nf_nat_ipv6_decode_session(struct sk_buff *skb,
91 const struct nf_conn *ct,
92 enum ip_conntrack_dir dir,
93 unsigned long statusbit,
94 struct flowi *fl)
95 {
96 #if IS_ENABLED(CONFIG_IPV6)
97 const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple;
98 struct flowi6 *fl6 = &fl->u.ip6;
99
100 if (ct->status & statusbit) {
101 fl6->daddr = t->dst.u3.in6;
102 if (t->dst.protonum == IPPROTO_TCP ||
103 t->dst.protonum == IPPROTO_UDP ||
104 t->dst.protonum == IPPROTO_UDPLITE ||
105 t->dst.protonum == IPPROTO_DCCP ||
106 t->dst.protonum == IPPROTO_SCTP)
107 fl6->fl6_dport = t->dst.u.all;
108 }
109
110 statusbit ^= IPS_NAT_MASK;
111
112 if (ct->status & statusbit) {
113 fl6->saddr = t->src.u3.in6;
114 if (t->dst.protonum == IPPROTO_TCP ||
115 t->dst.protonum == IPPROTO_UDP ||
116 t->dst.protonum == IPPROTO_UDPLITE ||
117 t->dst.protonum == IPPROTO_DCCP ||
118 t->dst.protonum == IPPROTO_SCTP)
119 fl6->fl6_sport = t->src.u.all;
120 }
121 #endif
122 }
123
__nf_nat_decode_session(struct sk_buff * skb,struct flowi * fl)124 static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl)
125 {
126 const struct nf_conn *ct;
127 enum ip_conntrack_info ctinfo;
128 enum ip_conntrack_dir dir;
129 unsigned long statusbit;
130 u8 family;
131
132 ct = nf_ct_get(skb, &ctinfo);
133 if (ct == NULL)
134 return;
135
136 family = nf_ct_l3num(ct);
137 dir = CTINFO2DIR(ctinfo);
138 if (dir == IP_CT_DIR_ORIGINAL)
139 statusbit = IPS_DST_NAT;
140 else
141 statusbit = IPS_SRC_NAT;
142
143 switch (family) {
144 case NFPROTO_IPV4:
145 nf_nat_ipv4_decode_session(skb, ct, dir, statusbit, fl);
146 return;
147 case NFPROTO_IPV6:
148 nf_nat_ipv6_decode_session(skb, ct, dir, statusbit, fl);
149 return;
150 }
151 }
152 #endif /* CONFIG_XFRM */
153
154 /* We keep an extra hash for each conntrack, for fast searching. */
155 static unsigned int
hash_by_src(const struct net * net,const struct nf_conntrack_zone * zone,const struct nf_conntrack_tuple * tuple)156 hash_by_src(const struct net *net,
157 const struct nf_conntrack_zone *zone,
158 const struct nf_conntrack_tuple *tuple)
159 {
160 unsigned int hash;
161 struct {
162 struct nf_conntrack_man src;
163 u32 net_mix;
164 u32 protonum;
165 u32 zone;
166 } __aligned(SIPHASH_ALIGNMENT) combined;
167
168 get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd));
169
170 memset(&combined, 0, sizeof(combined));
171
172 /* Original src, to ensure we map it consistently if poss. */
173 combined.src = tuple->src;
174 combined.net_mix = net_hash_mix(net);
175 combined.protonum = tuple->dst.protonum;
176
177 /* Zone ID can be used provided its valid for both directions */
178 if (zone->dir == NF_CT_DEFAULT_ZONE_DIR)
179 combined.zone = zone->id;
180
181 hash = siphash(&combined, sizeof(combined), &nf_nat_hash_rnd);
182
183 return reciprocal_scale(hash, nf_nat_htable_size);
184 }
185
186 /**
187 * nf_nat_used_tuple - check if proposed nat tuple clashes with existing entry
188 * @tuple: proposed NAT binding
189 * @ignored_conntrack: our (unconfirmed) conntrack entry
190 *
191 * A conntrack entry can be inserted to the connection tracking table
192 * if there is no existing entry with an identical tuple in either direction.
193 *
194 * Example:
195 * INITIATOR -> NAT/PAT -> RESPONDER
196 *
197 * INITIATOR passes through NAT/PAT ("us") and SNAT is done (saddr rewrite).
198 * Then, later, NAT/PAT itself also connects to RESPONDER.
199 *
200 * This will not work if the SNAT done earlier has same IP:PORT source pair.
201 *
202 * Conntrack table has:
203 * ORIGINAL: $IP_INITIATOR:$SPORT -> $IP_RESPONDER:$DPORT
204 * REPLY: $IP_RESPONDER:$DPORT -> $IP_NAT:$SPORT
205 *
206 * and new locally originating connection wants:
207 * ORIGINAL: $IP_NAT:$SPORT -> $IP_RESPONDER:$DPORT
208 * REPLY: $IP_RESPONDER:$DPORT -> $IP_NAT:$SPORT
209 *
210 * ... which would mean incoming packets cannot be distinguished between
211 * the existing and the newly added entry (identical IP_CT_DIR_REPLY tuple).
212 *
213 * @return: true if the proposed NAT mapping collides with an existing entry.
214 */
215 static int
nf_nat_used_tuple(const struct nf_conntrack_tuple * tuple,const struct nf_conn * ignored_conntrack)216 nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
217 const struct nf_conn *ignored_conntrack)
218 {
219 /* Conntrack tracking doesn't keep track of outgoing tuples; only
220 * incoming ones. NAT means they don't have a fixed mapping,
221 * so we invert the tuple and look for the incoming reply.
222 *
223 * We could keep a separate hash if this proves too slow.
224 */
225 struct nf_conntrack_tuple reply;
226
227 nf_ct_invert_tuple(&reply, tuple);
228 return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
229 }
230
nf_nat_allow_clash(const struct nf_conn * ct)231 static bool nf_nat_allow_clash(const struct nf_conn *ct)
232 {
233 return nf_ct_l4proto_find(nf_ct_protonum(ct))->allow_clash;
234 }
235
236 /**
237 * nf_nat_used_tuple_new - check if to-be-inserted conntrack collides with existing entry
238 * @tuple: proposed NAT binding
239 * @ignored_ct: our (unconfirmed) conntrack entry
240 *
241 * Same as nf_nat_used_tuple, but also check for rare clash in reverse
242 * direction. Should be called only when @tuple has not been altered, i.e.
243 * @ignored_conntrack will not be subject to NAT.
244 *
245 * @return: true if the proposed NAT mapping collides with existing entry.
246 */
247 static noinline bool
nf_nat_used_tuple_new(const struct nf_conntrack_tuple * tuple,const struct nf_conn * ignored_ct)248 nf_nat_used_tuple_new(const struct nf_conntrack_tuple *tuple,
249 const struct nf_conn *ignored_ct)
250 {
251 static const unsigned long uses_nat = IPS_NAT_MASK | IPS_SEQ_ADJUST_BIT;
252 const struct nf_conntrack_tuple_hash *thash;
253 const struct nf_conntrack_zone *zone;
254 struct nf_conn *ct;
255 bool taken = true;
256 struct net *net;
257
258 if (!nf_nat_used_tuple(tuple, ignored_ct))
259 return false;
260
261 if (!nf_nat_allow_clash(ignored_ct))
262 return true;
263
264 /* Initial choice clashes with existing conntrack.
265 * Check for (rare) reverse collision.
266 *
267 * This can happen when new packets are received in both directions
268 * at the exact same time on different CPUs.
269 *
270 * Without SMP, first packet creates new conntrack entry and second
271 * packet is resolved as established reply packet.
272 *
273 * With parallel processing, both packets could be picked up as
274 * new and both get their own ct entry allocated.
275 *
276 * If ignored_conntrack and colliding ct are not subject to NAT then
277 * pretend the tuple is available and let later clash resolution
278 * handle this at insertion time.
279 *
280 * Without it, the 'reply' packet has its source port rewritten
281 * by nat engine.
282 */
283 if (READ_ONCE(ignored_ct->status) & uses_nat)
284 return true;
285
286 net = nf_ct_net(ignored_ct);
287 zone = nf_ct_zone(ignored_ct);
288
289 thash = nf_conntrack_find_get(net, zone, tuple);
290 if (unlikely(!thash)) /* clashing entry went away */
291 return false;
292
293 ct = nf_ct_tuplehash_to_ctrack(thash);
294
295 /* NB: IP_CT_DIR_ORIGINAL should be impossible because
296 * nf_nat_used_tuple() handles origin collisions.
297 *
298 * Handle remote chance other CPU confirmed its ct right after.
299 */
300 if (thash->tuple.dst.dir != IP_CT_DIR_REPLY)
301 goto out;
302
303 /* clashing connection subject to NAT? Retry with new tuple. */
304 if (READ_ONCE(ct->status) & uses_nat)
305 goto out;
306
307 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
308 &ignored_ct->tuplehash[IP_CT_DIR_REPLY].tuple) &&
309 nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
310 &ignored_ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)) {
311 taken = false;
312 goto out;
313 }
314 out:
315 nf_ct_put(ct);
316 return taken;
317 }
318
nf_nat_may_kill(struct nf_conn * ct,unsigned long flags)319 static bool nf_nat_may_kill(struct nf_conn *ct, unsigned long flags)
320 {
321 static const unsigned long flags_refuse = IPS_FIXED_TIMEOUT |
322 IPS_DYING;
323 static const unsigned long flags_needed = IPS_SRC_NAT;
324 enum tcp_conntrack old_state;
325
326 old_state = READ_ONCE(ct->proto.tcp.state);
327 if (old_state < TCP_CONNTRACK_TIME_WAIT)
328 return false;
329
330 if (flags & flags_refuse)
331 return false;
332
333 return (flags & flags_needed) == flags_needed;
334 }
335
336 /* reverse direction will send packets to new source, so
337 * make sure such packets are invalid.
338 */
nf_seq_has_advanced(const struct nf_conn * old,const struct nf_conn * new)339 static bool nf_seq_has_advanced(const struct nf_conn *old, const struct nf_conn *new)
340 {
341 return (__s32)(new->proto.tcp.seen[0].td_end -
342 old->proto.tcp.seen[0].td_end) > 0;
343 }
344
345 static int
nf_nat_used_tuple_harder(const struct nf_conntrack_tuple * tuple,const struct nf_conn * ignored_conntrack,unsigned int attempts_left)346 nf_nat_used_tuple_harder(const struct nf_conntrack_tuple *tuple,
347 const struct nf_conn *ignored_conntrack,
348 unsigned int attempts_left)
349 {
350 static const unsigned long flags_offload = IPS_OFFLOAD | IPS_HW_OFFLOAD;
351 struct nf_conntrack_tuple_hash *thash;
352 const struct nf_conntrack_zone *zone;
353 struct nf_conntrack_tuple reply;
354 unsigned long flags;
355 struct nf_conn *ct;
356 bool taken = true;
357 struct net *net;
358
359 nf_ct_invert_tuple(&reply, tuple);
360
361 if (attempts_left > NF_NAT_HARDER_THRESH ||
362 tuple->dst.protonum != IPPROTO_TCP ||
363 ignored_conntrack->proto.tcp.state != TCP_CONNTRACK_SYN_SENT)
364 return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
365
366 /* :ast few attempts to find a free tcp port. Destructive
367 * action: evict colliding if its in timewait state and the
368 * tcp sequence number has advanced past the one used by the
369 * old entry.
370 */
371 net = nf_ct_net(ignored_conntrack);
372 zone = nf_ct_zone(ignored_conntrack);
373
374 thash = nf_conntrack_find_get(net, zone, &reply);
375 if (!thash)
376 return false;
377
378 ct = nf_ct_tuplehash_to_ctrack(thash);
379
380 if (thash->tuple.dst.dir == IP_CT_DIR_ORIGINAL)
381 goto out;
382
383 if (WARN_ON_ONCE(ct == ignored_conntrack))
384 goto out;
385
386 flags = READ_ONCE(ct->status);
387 if (!nf_nat_may_kill(ct, flags))
388 goto out;
389
390 if (!nf_seq_has_advanced(ct, ignored_conntrack))
391 goto out;
392
393 /* Even if we can evict do not reuse if entry is offloaded. */
394 if (nf_ct_kill(ct))
395 taken = flags & flags_offload;
396 out:
397 nf_ct_put(ct);
398 return taken;
399 }
400
nf_nat_inet_in_range(const struct nf_conntrack_tuple * t,const struct nf_nat_range2 * range)401 static bool nf_nat_inet_in_range(const struct nf_conntrack_tuple *t,
402 const struct nf_nat_range2 *range)
403 {
404 if (t->src.l3num == NFPROTO_IPV4)
405 return ntohl(t->src.u3.ip) >= ntohl(range->min_addr.ip) &&
406 ntohl(t->src.u3.ip) <= ntohl(range->max_addr.ip);
407
408 return ipv6_addr_cmp(&t->src.u3.in6, &range->min_addr.in6) >= 0 &&
409 ipv6_addr_cmp(&t->src.u3.in6, &range->max_addr.in6) <= 0;
410 }
411
412 /* Is the manipable part of the tuple between min and max incl? */
l4proto_in_range(const struct nf_conntrack_tuple * tuple,enum nf_nat_manip_type maniptype,const union nf_conntrack_man_proto * min,const union nf_conntrack_man_proto * max)413 static bool l4proto_in_range(const struct nf_conntrack_tuple *tuple,
414 enum nf_nat_manip_type maniptype,
415 const union nf_conntrack_man_proto *min,
416 const union nf_conntrack_man_proto *max)
417 {
418 __be16 port;
419
420 switch (tuple->dst.protonum) {
421 case IPPROTO_ICMP:
422 case IPPROTO_ICMPV6:
423 return ntohs(tuple->src.u.icmp.id) >= ntohs(min->icmp.id) &&
424 ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id);
425 case IPPROTO_GRE: /* all fall though */
426 case IPPROTO_TCP:
427 case IPPROTO_UDP:
428 case IPPROTO_UDPLITE:
429 case IPPROTO_DCCP:
430 case IPPROTO_SCTP:
431 if (maniptype == NF_NAT_MANIP_SRC)
432 port = tuple->src.u.all;
433 else
434 port = tuple->dst.u.all;
435
436 return ntohs(port) >= ntohs(min->all) &&
437 ntohs(port) <= ntohs(max->all);
438 default:
439 return true;
440 }
441 }
442
443 /* If we source map this tuple so reply looks like reply_tuple, will
444 * that meet the constraints of range.
445 */
nf_in_range(const struct nf_conntrack_tuple * tuple,const struct nf_nat_range2 * range)446 static int nf_in_range(const struct nf_conntrack_tuple *tuple,
447 const struct nf_nat_range2 *range)
448 {
449 /* If we are supposed to map IPs, then we must be in the
450 * range specified, otherwise let this drag us onto a new src IP.
451 */
452 if (range->flags & NF_NAT_RANGE_MAP_IPS &&
453 !nf_nat_inet_in_range(tuple, range))
454 return 0;
455
456 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED))
457 return 1;
458
459 return l4proto_in_range(tuple, NF_NAT_MANIP_SRC,
460 &range->min_proto, &range->max_proto);
461 }
462
463 static inline int
same_src(const struct nf_conn * ct,const struct nf_conntrack_tuple * tuple)464 same_src(const struct nf_conn *ct,
465 const struct nf_conntrack_tuple *tuple)
466 {
467 const struct nf_conntrack_tuple *t;
468
469 t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
470 return (t->dst.protonum == tuple->dst.protonum &&
471 nf_inet_addr_cmp(&t->src.u3, &tuple->src.u3) &&
472 t->src.u.all == tuple->src.u.all);
473 }
474
475 /* Only called for SRC manip */
476 static int
find_appropriate_src(struct net * net,const struct nf_conntrack_zone * zone,const struct nf_conntrack_tuple * tuple,struct nf_conntrack_tuple * result,const struct nf_nat_range2 * range)477 find_appropriate_src(struct net *net,
478 const struct nf_conntrack_zone *zone,
479 const struct nf_conntrack_tuple *tuple,
480 struct nf_conntrack_tuple *result,
481 const struct nf_nat_range2 *range)
482 {
483 unsigned int h = hash_by_src(net, zone, tuple);
484 const struct nf_conn *ct;
485
486 hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) {
487 if (same_src(ct, tuple) &&
488 net_eq(net, nf_ct_net(ct)) &&
489 nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
490 /* Copy source part from reply tuple. */
491 nf_ct_invert_tuple(result,
492 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
493 result->dst = tuple->dst;
494
495 if (nf_in_range(result, range))
496 return 1;
497 }
498 }
499 return 0;
500 }
501
502 /* For [FUTURE] fragmentation handling, we want the least-used
503 * src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus
504 * if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
505 * 1-65535, we don't do pro-rata allocation based on ports; we choose
506 * the ip with the lowest src-ip/dst-ip/proto usage.
507 */
508 static void
find_best_ips_proto(const struct nf_conntrack_zone * zone,struct nf_conntrack_tuple * tuple,const struct nf_nat_range2 * range,const struct nf_conn * ct,enum nf_nat_manip_type maniptype)509 find_best_ips_proto(const struct nf_conntrack_zone *zone,
510 struct nf_conntrack_tuple *tuple,
511 const struct nf_nat_range2 *range,
512 const struct nf_conn *ct,
513 enum nf_nat_manip_type maniptype)
514 {
515 union nf_inet_addr *var_ipp;
516 unsigned int i, max;
517 /* Host order */
518 u32 minip, maxip, j, dist;
519 bool full_range;
520
521 /* No IP mapping? Do nothing. */
522 if (!(range->flags & NF_NAT_RANGE_MAP_IPS))
523 return;
524
525 if (maniptype == NF_NAT_MANIP_SRC)
526 var_ipp = &tuple->src.u3;
527 else
528 var_ipp = &tuple->dst.u3;
529
530 /* Fast path: only one choice. */
531 if (nf_inet_addr_cmp(&range->min_addr, &range->max_addr)) {
532 *var_ipp = range->min_addr;
533 return;
534 }
535
536 if (nf_ct_l3num(ct) == NFPROTO_IPV4)
537 max = sizeof(var_ipp->ip) / sizeof(u32) - 1;
538 else
539 max = sizeof(var_ipp->ip6) / sizeof(u32) - 1;
540
541 /* Hashing source and destination IPs gives a fairly even
542 * spread in practice (if there are a small number of IPs
543 * involved, there usually aren't that many connections
544 * anyway). The consistency means that servers see the same
545 * client coming from the same IP (some Internet Banking sites
546 * like this), even across reboots.
547 */
548 j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32),
549 range->flags & NF_NAT_RANGE_PERSISTENT ?
550 0 : (__force u32)tuple->dst.u3.all[max] ^ zone->id);
551
552 full_range = false;
553 for (i = 0; i <= max; i++) {
554 /* If first bytes of the address are at the maximum, use the
555 * distance. Otherwise use the full range.
556 */
557 if (!full_range) {
558 minip = ntohl((__force __be32)range->min_addr.all[i]);
559 maxip = ntohl((__force __be32)range->max_addr.all[i]);
560 dist = maxip - minip + 1;
561 } else {
562 minip = 0;
563 dist = ~0;
564 }
565
566 var_ipp->all[i] = (__force __u32)
567 htonl(minip + reciprocal_scale(j, dist));
568 if (var_ipp->all[i] != range->max_addr.all[i])
569 full_range = true;
570
571 if (!(range->flags & NF_NAT_RANGE_PERSISTENT))
572 j ^= (__force u32)tuple->dst.u3.all[i];
573 }
574 }
575
576 /* Alter the per-proto part of the tuple (depending on maniptype), to
577 * give a unique tuple in the given range if possible.
578 *
579 * Per-protocol part of tuple is initialized to the incoming packet.
580 */
nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple * tuple,const struct nf_nat_range2 * range,enum nf_nat_manip_type maniptype,const struct nf_conn * ct)581 static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple,
582 const struct nf_nat_range2 *range,
583 enum nf_nat_manip_type maniptype,
584 const struct nf_conn *ct)
585 {
586 unsigned int range_size, min, max, i, attempts;
587 __be16 *keyptr;
588 u16 off;
589
590 switch (tuple->dst.protonum) {
591 case IPPROTO_ICMP:
592 case IPPROTO_ICMPV6:
593 /* id is same for either direction... */
594 keyptr = &tuple->src.u.icmp.id;
595 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
596 min = 0;
597 range_size = 65536;
598 } else {
599 min = ntohs(range->min_proto.icmp.id);
600 range_size = ntohs(range->max_proto.icmp.id) -
601 ntohs(range->min_proto.icmp.id) + 1;
602 }
603 goto find_free_id;
604 #if IS_ENABLED(CONFIG_NF_CT_PROTO_GRE)
605 case IPPROTO_GRE:
606 /* If there is no master conntrack we are not PPTP,
607 do not change tuples */
608 if (!ct->master)
609 return;
610
611 if (maniptype == NF_NAT_MANIP_SRC)
612 keyptr = &tuple->src.u.gre.key;
613 else
614 keyptr = &tuple->dst.u.gre.key;
615
616 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
617 min = 1;
618 range_size = 65535;
619 } else {
620 min = ntohs(range->min_proto.gre.key);
621 range_size = ntohs(range->max_proto.gre.key) - min + 1;
622 }
623 goto find_free_id;
624 #endif
625 case IPPROTO_UDP:
626 case IPPROTO_UDPLITE:
627 case IPPROTO_TCP:
628 case IPPROTO_SCTP:
629 case IPPROTO_DCCP:
630 if (maniptype == NF_NAT_MANIP_SRC)
631 keyptr = &tuple->src.u.all;
632 else
633 keyptr = &tuple->dst.u.all;
634
635 break;
636 default:
637 return;
638 }
639
640 /* If no range specified... */
641 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
642 /* If it's dst rewrite, can't change port */
643 if (maniptype == NF_NAT_MANIP_DST)
644 return;
645
646 if (ntohs(*keyptr) < 1024) {
647 /* Loose convention: >> 512 is credential passing */
648 if (ntohs(*keyptr) < 512) {
649 min = 1;
650 range_size = 511 - min + 1;
651 } else {
652 min = 600;
653 range_size = 1023 - min + 1;
654 }
655 } else {
656 min = 1024;
657 range_size = 65535 - 1024 + 1;
658 }
659 } else {
660 min = ntohs(range->min_proto.all);
661 max = ntohs(range->max_proto.all);
662 if (unlikely(max < min))
663 swap(max, min);
664 range_size = max - min + 1;
665 }
666
667 find_free_id:
668 if (range->flags & NF_NAT_RANGE_PROTO_OFFSET)
669 off = (ntohs(*keyptr) - ntohs(range->base_proto.all));
670 else if ((range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL) ||
671 maniptype != NF_NAT_MANIP_DST)
672 off = get_random_u16();
673 else
674 off = 0;
675
676 attempts = range_size;
677 if (attempts > NF_NAT_MAX_ATTEMPTS)
678 attempts = NF_NAT_MAX_ATTEMPTS;
679
680 /* We are in softirq; doing a search of the entire range risks
681 * soft lockup when all tuples are already used.
682 *
683 * If we can't find any free port from first offset, pick a new
684 * one and try again, with ever smaller search window.
685 */
686 another_round:
687 for (i = 0; i < attempts; i++, off++) {
688 *keyptr = htons(min + off % range_size);
689 if (!nf_nat_used_tuple_harder(tuple, ct, attempts - i))
690 return;
691 }
692
693 if (attempts >= range_size || attempts < 16)
694 return;
695 attempts /= 2;
696 off = get_random_u16();
697 goto another_round;
698 }
699
700 /* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING,
701 * we change the source to map into the range. For NF_INET_PRE_ROUTING
702 * and NF_INET_LOCAL_OUT, we change the destination to map into the
703 * range. It might not be possible to get a unique tuple, but we try.
704 * At worst (or if we race), we will end up with a final duplicate in
705 * __nf_conntrack_confirm and drop the packet. */
706 static void
get_unique_tuple(struct nf_conntrack_tuple * tuple,const struct nf_conntrack_tuple * orig_tuple,const struct nf_nat_range2 * range,struct nf_conn * ct,enum nf_nat_manip_type maniptype)707 get_unique_tuple(struct nf_conntrack_tuple *tuple,
708 const struct nf_conntrack_tuple *orig_tuple,
709 const struct nf_nat_range2 *range,
710 struct nf_conn *ct,
711 enum nf_nat_manip_type maniptype)
712 {
713 const struct nf_conntrack_zone *zone;
714 struct net *net = nf_ct_net(ct);
715
716 zone = nf_ct_zone(ct);
717
718 /* 1) If this srcip/proto/src-proto-part is currently mapped,
719 * and that same mapping gives a unique tuple within the given
720 * range, use that.
721 *
722 * This is only required for source (ie. NAT/masq) mappings.
723 * So far, we don't do local source mappings, so multiple
724 * manips not an issue.
725 */
726 if (maniptype == NF_NAT_MANIP_SRC &&
727 !(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
728 /* try the original tuple first */
729 if (nf_in_range(orig_tuple, range)) {
730 if (!nf_nat_used_tuple_new(orig_tuple, ct)) {
731 *tuple = *orig_tuple;
732 return;
733 }
734 } else if (find_appropriate_src(net, zone,
735 orig_tuple, tuple, range)) {
736 pr_debug("get_unique_tuple: Found current src map\n");
737 if (!nf_nat_used_tuple(tuple, ct))
738 return;
739 }
740 }
741
742 /* 2) Select the least-used IP/proto combination in the given range */
743 *tuple = *orig_tuple;
744 find_best_ips_proto(zone, tuple, range, ct, maniptype);
745
746 /* 3) The per-protocol part of the manip is made to map into
747 * the range to make a unique tuple.
748 */
749
750 /* Only bother mapping if it's not already in range and unique */
751 if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
752 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
753 if (!(range->flags & NF_NAT_RANGE_PROTO_OFFSET) &&
754 l4proto_in_range(tuple, maniptype,
755 &range->min_proto,
756 &range->max_proto) &&
757 (range->min_proto.all == range->max_proto.all ||
758 !nf_nat_used_tuple(tuple, ct)))
759 return;
760 } else if (!nf_nat_used_tuple(tuple, ct)) {
761 return;
762 }
763 }
764
765 /* Last chance: get protocol to try to obtain unique tuple. */
766 nf_nat_l4proto_unique_tuple(tuple, range, maniptype, ct);
767 }
768
nf_ct_nat_ext_add(struct nf_conn * ct)769 struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct)
770 {
771 struct nf_conn_nat *nat = nfct_nat(ct);
772 if (nat)
773 return nat;
774
775 if (!nf_ct_is_confirmed(ct))
776 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
777
778 return nat;
779 }
780 EXPORT_SYMBOL_GPL(nf_ct_nat_ext_add);
781
782 unsigned int
nf_nat_setup_info(struct nf_conn * ct,const struct nf_nat_range2 * range,enum nf_nat_manip_type maniptype)783 nf_nat_setup_info(struct nf_conn *ct,
784 const struct nf_nat_range2 *range,
785 enum nf_nat_manip_type maniptype)
786 {
787 struct net *net = nf_ct_net(ct);
788 struct nf_conntrack_tuple curr_tuple, new_tuple;
789
790 /* Can't setup nat info for confirmed ct. */
791 if (nf_ct_is_confirmed(ct))
792 return NF_ACCEPT;
793
794 WARN_ON(maniptype != NF_NAT_MANIP_SRC &&
795 maniptype != NF_NAT_MANIP_DST);
796
797 if (WARN_ON(nf_nat_initialized(ct, maniptype)))
798 return NF_DROP;
799
800 /* What we've got will look like inverse of reply. Normally
801 * this is what is in the conntrack, except for prior
802 * manipulations (future optimization: if num_manips == 0,
803 * orig_tp = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
804 */
805 nf_ct_invert_tuple(&curr_tuple,
806 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
807
808 get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype);
809
810 if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) {
811 struct nf_conntrack_tuple reply;
812
813 /* Alter conntrack table so will recognize replies. */
814 nf_ct_invert_tuple(&reply, &new_tuple);
815 nf_conntrack_alter_reply(ct, &reply);
816
817 /* Non-atomic: we own this at the moment. */
818 if (maniptype == NF_NAT_MANIP_SRC)
819 ct->status |= IPS_SRC_NAT;
820 else
821 ct->status |= IPS_DST_NAT;
822
823 if (nfct_help(ct) && !nfct_seqadj(ct))
824 if (!nfct_seqadj_ext_add(ct))
825 return NF_DROP;
826 }
827
828 if (maniptype == NF_NAT_MANIP_SRC) {
829 unsigned int srchash;
830 spinlock_t *lock;
831
832 srchash = hash_by_src(net, nf_ct_zone(ct),
833 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
834 lock = &nf_nat_locks[srchash % CONNTRACK_LOCKS];
835 spin_lock_bh(lock);
836 hlist_add_head_rcu(&ct->nat_bysource,
837 &nf_nat_bysource[srchash]);
838 spin_unlock_bh(lock);
839 }
840
841 /* It's done. */
842 if (maniptype == NF_NAT_MANIP_DST)
843 ct->status |= IPS_DST_NAT_DONE;
844 else
845 ct->status |= IPS_SRC_NAT_DONE;
846
847 return NF_ACCEPT;
848 }
849 EXPORT_SYMBOL(nf_nat_setup_info);
850
851 static unsigned int
__nf_nat_alloc_null_binding(struct nf_conn * ct,enum nf_nat_manip_type manip)852 __nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip)
853 {
854 /* Force range to this IP; let proto decide mapping for
855 * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
856 * Use reply in case it's already been mangled (eg local packet).
857 */
858 union nf_inet_addr ip =
859 (manip == NF_NAT_MANIP_SRC ?
860 ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 :
861 ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3);
862 struct nf_nat_range2 range = {
863 .flags = NF_NAT_RANGE_MAP_IPS,
864 .min_addr = ip,
865 .max_addr = ip,
866 };
867 return nf_nat_setup_info(ct, &range, manip);
868 }
869
870 unsigned int
nf_nat_alloc_null_binding(struct nf_conn * ct,unsigned int hooknum)871 nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
872 {
873 return __nf_nat_alloc_null_binding(ct, HOOK2MANIP(hooknum));
874 }
875 EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding);
876
877 /* Do packet manipulations according to nf_nat_setup_info. */
nf_nat_packet(struct nf_conn * ct,enum ip_conntrack_info ctinfo,unsigned int hooknum,struct sk_buff * skb)878 unsigned int nf_nat_packet(struct nf_conn *ct,
879 enum ip_conntrack_info ctinfo,
880 unsigned int hooknum,
881 struct sk_buff *skb)
882 {
883 enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
884 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
885 unsigned int verdict = NF_ACCEPT;
886 unsigned long statusbit;
887
888 if (mtype == NF_NAT_MANIP_SRC)
889 statusbit = IPS_SRC_NAT;
890 else
891 statusbit = IPS_DST_NAT;
892
893 /* Invert if this is reply dir. */
894 if (dir == IP_CT_DIR_REPLY)
895 statusbit ^= IPS_NAT_MASK;
896
897 /* Non-atomic: these bits don't change. */
898 if (ct->status & statusbit)
899 verdict = nf_nat_manip_pkt(skb, ct, mtype, dir);
900
901 return verdict;
902 }
903 EXPORT_SYMBOL_GPL(nf_nat_packet);
904
in_vrf_postrouting(const struct nf_hook_state * state)905 static bool in_vrf_postrouting(const struct nf_hook_state *state)
906 {
907 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
908 if (state->hook == NF_INET_POST_ROUTING &&
909 netif_is_l3_master(state->out))
910 return true;
911 #endif
912 return false;
913 }
914
915 unsigned int
nf_nat_inet_fn(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)916 nf_nat_inet_fn(void *priv, struct sk_buff *skb,
917 const struct nf_hook_state *state)
918 {
919 struct nf_conn *ct;
920 enum ip_conntrack_info ctinfo;
921 struct nf_conn_nat *nat;
922 /* maniptype == SRC for postrouting. */
923 enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook);
924
925 ct = nf_ct_get(skb, &ctinfo);
926 /* Can't track? It's not due to stress, or conntrack would
927 * have dropped it. Hence it's the user's responsibilty to
928 * packet filter it out, or implement conntrack/NAT for that
929 * protocol. 8) --RR
930 */
931 if (!ct || in_vrf_postrouting(state))
932 return NF_ACCEPT;
933
934 nat = nfct_nat(ct);
935
936 switch (ctinfo) {
937 case IP_CT_RELATED:
938 case IP_CT_RELATED_REPLY:
939 /* Only ICMPs can be IP_CT_IS_REPLY. Fallthrough */
940 case IP_CT_NEW:
941 /* Seen it before? This can happen for loopback, retrans,
942 * or local packets.
943 */
944 if (!nf_nat_initialized(ct, maniptype)) {
945 struct nf_nat_lookup_hook_priv *lpriv = priv;
946 struct nf_hook_entries *e = rcu_dereference(lpriv->entries);
947 unsigned int ret;
948 int i;
949
950 if (!e)
951 goto null_bind;
952
953 for (i = 0; i < e->num_hook_entries; i++) {
954 ret = e->hooks[i].hook(e->hooks[i].priv, skb,
955 state);
956 if (ret != NF_ACCEPT)
957 return ret;
958 if (nf_nat_initialized(ct, maniptype))
959 goto do_nat;
960 }
961 null_bind:
962 ret = nf_nat_alloc_null_binding(ct, state->hook);
963 if (ret != NF_ACCEPT)
964 return ret;
965 } else {
966 pr_debug("Already setup manip %s for ct %p (status bits 0x%lx)\n",
967 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
968 ct, ct->status);
969 if (nf_nat_oif_changed(state->hook, ctinfo, nat,
970 state->out))
971 goto oif_changed;
972 }
973 break;
974 default:
975 /* ESTABLISHED */
976 WARN_ON(ctinfo != IP_CT_ESTABLISHED &&
977 ctinfo != IP_CT_ESTABLISHED_REPLY);
978 if (nf_nat_oif_changed(state->hook, ctinfo, nat, state->out))
979 goto oif_changed;
980 }
981 do_nat:
982 return nf_nat_packet(ct, ctinfo, state->hook, skb);
983
984 oif_changed:
985 nf_ct_kill_acct(ct, ctinfo, skb);
986 return NF_DROP;
987 }
988 EXPORT_SYMBOL_GPL(nf_nat_inet_fn);
989
990 struct nf_nat_proto_clean {
991 u8 l3proto;
992 u8 l4proto;
993 };
994
995 /* kill conntracks with affected NAT section */
nf_nat_proto_remove(struct nf_conn * i,void * data)996 static int nf_nat_proto_remove(struct nf_conn *i, void *data)
997 {
998 const struct nf_nat_proto_clean *clean = data;
999
1000 if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) ||
1001 (clean->l4proto && nf_ct_protonum(i) != clean->l4proto))
1002 return 0;
1003
1004 return i->status & IPS_NAT_MASK ? 1 : 0;
1005 }
1006
nf_nat_cleanup_conntrack(struct nf_conn * ct)1007 static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
1008 {
1009 unsigned int h;
1010
1011 h = hash_by_src(nf_ct_net(ct), nf_ct_zone(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
1012 spin_lock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
1013 hlist_del_rcu(&ct->nat_bysource);
1014 spin_unlock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
1015 }
1016
nf_nat_proto_clean(struct nf_conn * ct,void * data)1017 static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
1018 {
1019 if (nf_nat_proto_remove(ct, data))
1020 return 1;
1021
1022 /* This module is being removed and conntrack has nat null binding.
1023 * Remove it from bysource hash, as the table will be freed soon.
1024 *
1025 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
1026 * will delete entry from already-freed table.
1027 */
1028 if (test_and_clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status))
1029 nf_nat_cleanup_conntrack(ct);
1030
1031 /* don't delete conntrack. Although that would make things a lot
1032 * simpler, we'd end up flushing all conntracks on nat rmmod.
1033 */
1034 return 0;
1035 }
1036
1037 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1038
1039 #include <linux/netfilter/nfnetlink.h>
1040 #include <linux/netfilter/nfnetlink_conntrack.h>
1041
1042 static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
1043 [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 },
1044 [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 },
1045 };
1046
nf_nat_l4proto_nlattr_to_range(struct nlattr * tb[],struct nf_nat_range2 * range)1047 static int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
1048 struct nf_nat_range2 *range)
1049 {
1050 if (tb[CTA_PROTONAT_PORT_MIN]) {
1051 range->min_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]);
1052 range->max_proto.all = range->min_proto.all;
1053 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1054 }
1055 if (tb[CTA_PROTONAT_PORT_MAX]) {
1056 range->max_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]);
1057 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1058 }
1059 return 0;
1060 }
1061
nfnetlink_parse_nat_proto(struct nlattr * attr,const struct nf_conn * ct,struct nf_nat_range2 * range)1062 static int nfnetlink_parse_nat_proto(struct nlattr *attr,
1063 const struct nf_conn *ct,
1064 struct nf_nat_range2 *range)
1065 {
1066 struct nlattr *tb[CTA_PROTONAT_MAX+1];
1067 int err;
1068
1069 err = nla_parse_nested_deprecated(tb, CTA_PROTONAT_MAX, attr,
1070 protonat_nla_policy, NULL);
1071 if (err < 0)
1072 return err;
1073
1074 return nf_nat_l4proto_nlattr_to_range(tb, range);
1075 }
1076
1077 static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
1078 [CTA_NAT_V4_MINIP] = { .type = NLA_U32 },
1079 [CTA_NAT_V4_MAXIP] = { .type = NLA_U32 },
1080 [CTA_NAT_V6_MINIP] = { .len = sizeof(struct in6_addr) },
1081 [CTA_NAT_V6_MAXIP] = { .len = sizeof(struct in6_addr) },
1082 [CTA_NAT_PROTO] = { .type = NLA_NESTED },
1083 };
1084
nf_nat_ipv4_nlattr_to_range(struct nlattr * tb[],struct nf_nat_range2 * range)1085 static int nf_nat_ipv4_nlattr_to_range(struct nlattr *tb[],
1086 struct nf_nat_range2 *range)
1087 {
1088 if (tb[CTA_NAT_V4_MINIP]) {
1089 range->min_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MINIP]);
1090 range->flags |= NF_NAT_RANGE_MAP_IPS;
1091 }
1092
1093 range->max_addr.ip = nla_get_be32_default(tb[CTA_NAT_V4_MAXIP],
1094 range->min_addr.ip);
1095
1096 return 0;
1097 }
1098
nf_nat_ipv6_nlattr_to_range(struct nlattr * tb[],struct nf_nat_range2 * range)1099 static int nf_nat_ipv6_nlattr_to_range(struct nlattr *tb[],
1100 struct nf_nat_range2 *range)
1101 {
1102 if (tb[CTA_NAT_V6_MINIP]) {
1103 nla_memcpy(&range->min_addr.ip6, tb[CTA_NAT_V6_MINIP],
1104 sizeof(struct in6_addr));
1105 range->flags |= NF_NAT_RANGE_MAP_IPS;
1106 }
1107
1108 if (tb[CTA_NAT_V6_MAXIP])
1109 nla_memcpy(&range->max_addr.ip6, tb[CTA_NAT_V6_MAXIP],
1110 sizeof(struct in6_addr));
1111 else
1112 range->max_addr = range->min_addr;
1113
1114 return 0;
1115 }
1116
1117 static int
nfnetlink_parse_nat(const struct nlattr * nat,const struct nf_conn * ct,struct nf_nat_range2 * range)1118 nfnetlink_parse_nat(const struct nlattr *nat,
1119 const struct nf_conn *ct, struct nf_nat_range2 *range)
1120 {
1121 struct nlattr *tb[CTA_NAT_MAX+1];
1122 int err;
1123
1124 memset(range, 0, sizeof(*range));
1125
1126 err = nla_parse_nested_deprecated(tb, CTA_NAT_MAX, nat,
1127 nat_nla_policy, NULL);
1128 if (err < 0)
1129 return err;
1130
1131 switch (nf_ct_l3num(ct)) {
1132 case NFPROTO_IPV4:
1133 err = nf_nat_ipv4_nlattr_to_range(tb, range);
1134 break;
1135 case NFPROTO_IPV6:
1136 err = nf_nat_ipv6_nlattr_to_range(tb, range);
1137 break;
1138 default:
1139 err = -EPROTONOSUPPORT;
1140 break;
1141 }
1142
1143 if (err)
1144 return err;
1145
1146 if (!tb[CTA_NAT_PROTO])
1147 return 0;
1148
1149 return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
1150 }
1151
1152 /* This function is called under rcu_read_lock() */
1153 static int
nfnetlink_parse_nat_setup(struct nf_conn * ct,enum nf_nat_manip_type manip,const struct nlattr * attr)1154 nfnetlink_parse_nat_setup(struct nf_conn *ct,
1155 enum nf_nat_manip_type manip,
1156 const struct nlattr *attr)
1157 {
1158 struct nf_nat_range2 range;
1159 int err;
1160
1161 /* Should not happen, restricted to creating new conntracks
1162 * via ctnetlink.
1163 */
1164 if (WARN_ON_ONCE(nf_nat_initialized(ct, manip)))
1165 return -EEXIST;
1166
1167 /* No NAT information has been passed, allocate the null-binding */
1168 if (attr == NULL)
1169 return __nf_nat_alloc_null_binding(ct, manip) == NF_DROP ? -ENOMEM : 0;
1170
1171 err = nfnetlink_parse_nat(attr, ct, &range);
1172 if (err < 0)
1173 return err;
1174
1175 return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0;
1176 }
1177 #else
1178 static int
nfnetlink_parse_nat_setup(struct nf_conn * ct,enum nf_nat_manip_type manip,const struct nlattr * attr)1179 nfnetlink_parse_nat_setup(struct nf_conn *ct,
1180 enum nf_nat_manip_type manip,
1181 const struct nlattr *attr)
1182 {
1183 return -EOPNOTSUPP;
1184 }
1185 #endif
1186
1187 static struct nf_ct_helper_expectfn follow_master_nat = {
1188 .name = "nat-follow-master",
1189 .expectfn = nf_nat_follow_master,
1190 };
1191
nf_nat_register_fn(struct net * net,u8 pf,const struct nf_hook_ops * ops,const struct nf_hook_ops * orig_nat_ops,unsigned int ops_count)1192 int nf_nat_register_fn(struct net *net, u8 pf, const struct nf_hook_ops *ops,
1193 const struct nf_hook_ops *orig_nat_ops, unsigned int ops_count)
1194 {
1195 struct nat_net *nat_net = net_generic(net, nat_net_id);
1196 struct nf_nat_hooks_net *nat_proto_net;
1197 struct nf_nat_lookup_hook_priv *priv;
1198 unsigned int hooknum = ops->hooknum;
1199 struct nf_hook_ops *nat_ops;
1200 int i, ret;
1201
1202 if (WARN_ON_ONCE(pf >= ARRAY_SIZE(nat_net->nat_proto_net)))
1203 return -EINVAL;
1204
1205 nat_proto_net = &nat_net->nat_proto_net[pf];
1206
1207 for (i = 0; i < ops_count; i++) {
1208 if (orig_nat_ops[i].hooknum == hooknum) {
1209 hooknum = i;
1210 break;
1211 }
1212 }
1213
1214 if (WARN_ON_ONCE(i == ops_count))
1215 return -EINVAL;
1216
1217 mutex_lock(&nf_nat_proto_mutex);
1218 if (!nat_proto_net->nat_hook_ops) {
1219 WARN_ON(nat_proto_net->users != 0);
1220
1221 nat_ops = kmemdup_array(orig_nat_ops, ops_count, sizeof(*orig_nat_ops), GFP_KERNEL);
1222 if (!nat_ops) {
1223 mutex_unlock(&nf_nat_proto_mutex);
1224 return -ENOMEM;
1225 }
1226
1227 for (i = 0; i < ops_count; i++) {
1228 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1229 if (priv) {
1230 nat_ops[i].priv = priv;
1231 continue;
1232 }
1233 mutex_unlock(&nf_nat_proto_mutex);
1234 while (i)
1235 kfree(nat_ops[--i].priv);
1236 kfree(nat_ops);
1237 return -ENOMEM;
1238 }
1239
1240 ret = nf_register_net_hooks(net, nat_ops, ops_count);
1241 if (ret < 0) {
1242 mutex_unlock(&nf_nat_proto_mutex);
1243 for (i = 0; i < ops_count; i++)
1244 kfree(nat_ops[i].priv);
1245 kfree(nat_ops);
1246 return ret;
1247 }
1248
1249 nat_proto_net->nat_hook_ops = nat_ops;
1250 }
1251
1252 nat_ops = nat_proto_net->nat_hook_ops;
1253 priv = nat_ops[hooknum].priv;
1254 if (WARN_ON_ONCE(!priv)) {
1255 mutex_unlock(&nf_nat_proto_mutex);
1256 return -EOPNOTSUPP;
1257 }
1258
1259 ret = nf_hook_entries_insert_raw(&priv->entries, ops);
1260 if (ret == 0)
1261 nat_proto_net->users++;
1262
1263 mutex_unlock(&nf_nat_proto_mutex);
1264 return ret;
1265 }
1266
nf_nat_unregister_fn(struct net * net,u8 pf,const struct nf_hook_ops * ops,unsigned int ops_count)1267 void nf_nat_unregister_fn(struct net *net, u8 pf, const struct nf_hook_ops *ops,
1268 unsigned int ops_count)
1269 {
1270 struct nat_net *nat_net = net_generic(net, nat_net_id);
1271 struct nf_nat_hooks_net *nat_proto_net;
1272 struct nf_nat_lookup_hook_priv *priv;
1273 struct nf_hook_ops *nat_ops;
1274 int hooknum = ops->hooknum;
1275 int i;
1276
1277 if (pf >= ARRAY_SIZE(nat_net->nat_proto_net))
1278 return;
1279
1280 nat_proto_net = &nat_net->nat_proto_net[pf];
1281
1282 mutex_lock(&nf_nat_proto_mutex);
1283 if (WARN_ON(nat_proto_net->users == 0))
1284 goto unlock;
1285
1286 nat_proto_net->users--;
1287
1288 nat_ops = nat_proto_net->nat_hook_ops;
1289 for (i = 0; i < ops_count; i++) {
1290 if (nat_ops[i].hooknum == hooknum) {
1291 hooknum = i;
1292 break;
1293 }
1294 }
1295 if (WARN_ON_ONCE(i == ops_count))
1296 goto unlock;
1297 priv = nat_ops[hooknum].priv;
1298 nf_hook_entries_delete_raw(&priv->entries, ops);
1299
1300 if (nat_proto_net->users == 0) {
1301 nf_unregister_net_hooks(net, nat_ops, ops_count);
1302
1303 for (i = 0; i < ops_count; i++) {
1304 priv = nat_ops[i].priv;
1305 kfree_rcu(priv, rcu_head);
1306 }
1307
1308 nat_proto_net->nat_hook_ops = NULL;
1309 kfree(nat_ops);
1310 }
1311 unlock:
1312 mutex_unlock(&nf_nat_proto_mutex);
1313 }
1314
1315 static struct pernet_operations nat_net_ops = {
1316 .id = &nat_net_id,
1317 .size = sizeof(struct nat_net),
1318 };
1319
1320 static const struct nf_nat_hook nat_hook = {
1321 .parse_nat_setup = nfnetlink_parse_nat_setup,
1322 #ifdef CONFIG_XFRM
1323 .decode_session = __nf_nat_decode_session,
1324 #endif
1325 .remove_nat_bysrc = nf_nat_cleanup_conntrack,
1326 };
1327
nf_nat_init(void)1328 static int __init nf_nat_init(void)
1329 {
1330 int ret, i;
1331
1332 /* Leave them the same for the moment. */
1333 nf_nat_htable_size = nf_conntrack_htable_size;
1334 if (nf_nat_htable_size < CONNTRACK_LOCKS)
1335 nf_nat_htable_size = CONNTRACK_LOCKS;
1336
1337 nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0);
1338 if (!nf_nat_bysource)
1339 return -ENOMEM;
1340
1341 for (i = 0; i < CONNTRACK_LOCKS; i++)
1342 spin_lock_init(&nf_nat_locks[i]);
1343
1344 ret = register_pernet_subsys(&nat_net_ops);
1345 if (ret < 0) {
1346 kvfree(nf_nat_bysource);
1347 return ret;
1348 }
1349
1350 nf_ct_helper_expectfn_register(&follow_master_nat);
1351
1352 WARN_ON(nf_nat_hook != NULL);
1353 RCU_INIT_POINTER(nf_nat_hook, &nat_hook);
1354
1355 ret = register_nf_nat_bpf();
1356 if (ret < 0) {
1357 RCU_INIT_POINTER(nf_nat_hook, NULL);
1358 nf_ct_helper_expectfn_unregister(&follow_master_nat);
1359 synchronize_net();
1360 unregister_pernet_subsys(&nat_net_ops);
1361 kvfree(nf_nat_bysource);
1362 }
1363
1364 return ret;
1365 }
1366
nf_nat_cleanup(void)1367 static void __exit nf_nat_cleanup(void)
1368 {
1369 struct nf_nat_proto_clean clean = {};
1370
1371 nf_ct_iterate_destroy(nf_nat_proto_clean, &clean);
1372
1373 nf_ct_helper_expectfn_unregister(&follow_master_nat);
1374 RCU_INIT_POINTER(nf_nat_hook, NULL);
1375
1376 synchronize_net();
1377 kvfree(nf_nat_bysource);
1378 unregister_pernet_subsys(&nat_net_ops);
1379 }
1380
1381 MODULE_LICENSE("GPL");
1382 MODULE_DESCRIPTION("Network address translation core");
1383
1384 module_init(nf_nat_init);
1385 module_exit(nf_nat_cleanup);
1386