1 /* 2 * Stateless NAT actions 3 * 4 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the Free 8 * Software Foundation; either version 2 of the License, or (at your option) 9 * any later version. 10 */ 11 12 #include <linux/errno.h> 13 #include <linux/init.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/netfilter.h> 17 #include <linux/rtnetlink.h> 18 #include <linux/skbuff.h> 19 #include <linux/slab.h> 20 #include <linux/spinlock.h> 21 #include <linux/string.h> 22 #include <linux/tc_act/tc_nat.h> 23 #include <net/act_api.h> 24 #include <net/icmp.h> 25 #include <net/ip.h> 26 #include <net/netlink.h> 27 #include <net/tc_act/tc_nat.h> 28 #include <net/tcp.h> 29 #include <net/udp.h> 30 31 32 static unsigned int nat_net_id; 33 static struct tc_action_ops act_nat_ops; 34 35 static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = { 36 [TCA_NAT_PARMS] = { .len = sizeof(struct tc_nat) }, 37 }; 38 39 static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est, 40 struct tc_action **a, int ovr, int bind) 41 { 42 struct tc_action_net *tn = net_generic(net, nat_net_id); 43 struct nlattr *tb[TCA_NAT_MAX + 1]; 44 struct tc_nat *parm; 45 int ret = 0, err; 46 struct tcf_nat *p; 47 48 if (nla == NULL) 49 return -EINVAL; 50 51 err = nla_parse_nested(tb, TCA_NAT_MAX, nla, nat_policy, NULL); 52 if (err < 0) 53 return err; 54 55 if (tb[TCA_NAT_PARMS] == NULL) 56 return -EINVAL; 57 parm = nla_data(tb[TCA_NAT_PARMS]); 58 59 if (!tcf_idr_check(tn, parm->index, a, bind)) { 60 ret = tcf_idr_create(tn, parm->index, est, a, 61 &act_nat_ops, bind, false); 62 if (ret) 63 return ret; 64 ret = ACT_P_CREATED; 65 } else { 66 if (bind) 67 return 0; 68 tcf_idr_release(*a, bind); 69 if (!ovr) 70 return -EEXIST; 71 } 72 p = to_tcf_nat(*a); 73 74 spin_lock_bh(&p->tcf_lock); 75 p->old_addr = parm->old_addr; 76 p->new_addr = parm->new_addr; 77 p->mask = parm->mask; 78 p->flags = parm->flags; 79 80 p->tcf_action = parm->action; 81 spin_unlock_bh(&p->tcf_lock); 82 83 if (ret == ACT_P_CREATED) 84 tcf_idr_insert(tn, *a); 85 86 return ret; 87 } 88 89 static int tcf_nat(struct sk_buff *skb, const struct tc_action *a, 90 struct tcf_result *res) 91 { 92 struct tcf_nat *p = to_tcf_nat(a); 93 struct iphdr *iph; 94 __be32 old_addr; 95 __be32 new_addr; 96 __be32 mask; 97 __be32 addr; 98 int egress; 99 int action; 100 int ihl; 101 int noff; 102 103 spin_lock(&p->tcf_lock); 104 105 tcf_lastuse_update(&p->tcf_tm); 106 old_addr = p->old_addr; 107 new_addr = p->new_addr; 108 mask = p->mask; 109 egress = p->flags & TCA_NAT_FLAG_EGRESS; 110 action = p->tcf_action; 111 112 bstats_update(&p->tcf_bstats, skb); 113 114 spin_unlock(&p->tcf_lock); 115 116 if (unlikely(action == TC_ACT_SHOT)) 117 goto drop; 118 119 noff = skb_network_offset(skb); 120 if (!pskb_may_pull(skb, sizeof(*iph) + noff)) 121 goto drop; 122 123 iph = ip_hdr(skb); 124 125 if (egress) 126 addr = iph->saddr; 127 else 128 addr = iph->daddr; 129 130 if (!((old_addr ^ addr) & mask)) { 131 if (skb_try_make_writable(skb, sizeof(*iph) + noff)) 132 goto drop; 133 134 new_addr &= mask; 135 new_addr |= addr & ~mask; 136 137 /* Rewrite IP header */ 138 iph = ip_hdr(skb); 139 if (egress) 140 iph->saddr = new_addr; 141 else 142 iph->daddr = new_addr; 143 144 csum_replace4(&iph->check, addr, new_addr); 145 } else if ((iph->frag_off & htons(IP_OFFSET)) || 146 iph->protocol != IPPROTO_ICMP) { 147 goto out; 148 } 149 150 ihl = iph->ihl * 4; 151 152 /* It would be nice to share code with stateful NAT. */ 153 switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) { 154 case IPPROTO_TCP: 155 { 156 struct tcphdr *tcph; 157 158 if (!pskb_may_pull(skb, ihl + sizeof(*tcph) + noff) || 159 skb_try_make_writable(skb, ihl + sizeof(*tcph) + noff)) 160 goto drop; 161 162 tcph = (void *)(skb_network_header(skb) + ihl); 163 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, 164 true); 165 break; 166 } 167 case IPPROTO_UDP: 168 { 169 struct udphdr *udph; 170 171 if (!pskb_may_pull(skb, ihl + sizeof(*udph) + noff) || 172 skb_try_make_writable(skb, ihl + sizeof(*udph) + noff)) 173 goto drop; 174 175 udph = (void *)(skb_network_header(skb) + ihl); 176 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) { 177 inet_proto_csum_replace4(&udph->check, skb, addr, 178 new_addr, true); 179 if (!udph->check) 180 udph->check = CSUM_MANGLED_0; 181 } 182 break; 183 } 184 case IPPROTO_ICMP: 185 { 186 struct icmphdr *icmph; 187 188 if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + noff)) 189 goto drop; 190 191 icmph = (void *)(skb_network_header(skb) + ihl); 192 193 if ((icmph->type != ICMP_DEST_UNREACH) && 194 (icmph->type != ICMP_TIME_EXCEEDED) && 195 (icmph->type != ICMP_PARAMETERPROB)) 196 break; 197 198 if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph) + 199 noff)) 200 goto drop; 201 202 icmph = (void *)(skb_network_header(skb) + ihl); 203 iph = (void *)(icmph + 1); 204 if (egress) 205 addr = iph->daddr; 206 else 207 addr = iph->saddr; 208 209 if ((old_addr ^ addr) & mask) 210 break; 211 212 if (skb_try_make_writable(skb, ihl + sizeof(*icmph) + 213 sizeof(*iph) + noff)) 214 goto drop; 215 216 icmph = (void *)(skb_network_header(skb) + ihl); 217 iph = (void *)(icmph + 1); 218 219 new_addr &= mask; 220 new_addr |= addr & ~mask; 221 222 /* XXX Fix up the inner checksums. */ 223 if (egress) 224 iph->daddr = new_addr; 225 else 226 iph->saddr = new_addr; 227 228 inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr, 229 false); 230 break; 231 } 232 default: 233 break; 234 } 235 236 out: 237 return action; 238 239 drop: 240 spin_lock(&p->tcf_lock); 241 p->tcf_qstats.drops++; 242 spin_unlock(&p->tcf_lock); 243 return TC_ACT_SHOT; 244 } 245 246 static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a, 247 int bind, int ref) 248 { 249 unsigned char *b = skb_tail_pointer(skb); 250 struct tcf_nat *p = to_tcf_nat(a); 251 struct tc_nat opt = { 252 .old_addr = p->old_addr, 253 .new_addr = p->new_addr, 254 .mask = p->mask, 255 .flags = p->flags, 256 257 .index = p->tcf_index, 258 .action = p->tcf_action, 259 .refcnt = p->tcf_refcnt - ref, 260 .bindcnt = p->tcf_bindcnt - bind, 261 }; 262 struct tcf_t t; 263 264 if (nla_put(skb, TCA_NAT_PARMS, sizeof(opt), &opt)) 265 goto nla_put_failure; 266 267 tcf_tm_dump(&t, &p->tcf_tm); 268 if (nla_put_64bit(skb, TCA_NAT_TM, sizeof(t), &t, TCA_NAT_PAD)) 269 goto nla_put_failure; 270 271 return skb->len; 272 273 nla_put_failure: 274 nlmsg_trim(skb, b); 275 return -1; 276 } 277 278 static int tcf_nat_walker(struct net *net, struct sk_buff *skb, 279 struct netlink_callback *cb, int type, 280 const struct tc_action_ops *ops) 281 { 282 struct tc_action_net *tn = net_generic(net, nat_net_id); 283 284 return tcf_generic_walker(tn, skb, cb, type, ops); 285 } 286 287 static int tcf_nat_search(struct net *net, struct tc_action **a, u32 index) 288 { 289 struct tc_action_net *tn = net_generic(net, nat_net_id); 290 291 return tcf_idr_search(tn, a, index); 292 } 293 294 static struct tc_action_ops act_nat_ops = { 295 .kind = "nat", 296 .type = TCA_ACT_NAT, 297 .owner = THIS_MODULE, 298 .act = tcf_nat, 299 .dump = tcf_nat_dump, 300 .init = tcf_nat_init, 301 .walk = tcf_nat_walker, 302 .lookup = tcf_nat_search, 303 .size = sizeof(struct tcf_nat), 304 }; 305 306 static __net_init int nat_init_net(struct net *net) 307 { 308 struct tc_action_net *tn = net_generic(net, nat_net_id); 309 310 return tc_action_net_init(tn, &act_nat_ops); 311 } 312 313 static void __net_exit nat_exit_net(struct list_head *net_list) 314 { 315 tc_action_net_exit(net_list, nat_net_id); 316 } 317 318 static struct pernet_operations nat_net_ops = { 319 .init = nat_init_net, 320 .exit_batch = nat_exit_net, 321 .id = &nat_net_id, 322 .size = sizeof(struct tc_action_net), 323 }; 324 325 MODULE_DESCRIPTION("Stateless NAT actions"); 326 MODULE_LICENSE("GPL"); 327 328 static int __init nat_init_module(void) 329 { 330 return tcf_register_action(&act_nat_ops, &nat_net_ops); 331 } 332 333 static void __exit nat_cleanup_module(void) 334 { 335 tcf_unregister_action(&act_nat_ops, &nat_net_ops); 336 } 337 338 module_init(nat_init_module); 339 module_exit(nat_cleanup_module); 340