1 /* 2 * IPv6 specific functions of netfilter core 3 * 4 * Rusty Russell (C) 2000 -- This code is GPL. 5 * Patrick McHardy (C) 2006-2012 6 */ 7 #include <linux/kernel.h> 8 #include <linux/init.h> 9 #include <linux/ipv6.h> 10 #include <linux/netfilter.h> 11 #include <linux/netfilter_ipv6.h> 12 #include <linux/export.h> 13 #include <net/addrconf.h> 14 #include <net/dst.h> 15 #include <net/ipv6.h> 16 #include <net/ip6_route.h> 17 #include <net/xfrm.h> 18 #include <net/netfilter/nf_queue.h> 19 #include <net/netfilter/nf_conntrack_bridge.h> 20 #include <net/netfilter/ipv6/nf_defrag_ipv6.h> 21 #include "../bridge/br_private.h" 22 23 int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff *skb) 24 { 25 const struct ipv6hdr *iph = ipv6_hdr(skb); 26 struct sock *sk = sk_to_full_sk(sk_partial); 27 unsigned int hh_len; 28 struct dst_entry *dst; 29 int strict = (ipv6_addr_type(&iph->daddr) & 30 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)); 31 struct flowi6 fl6 = { 32 .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if : 33 strict ? skb_dst(skb)->dev->ifindex : 0, 34 .flowi6_mark = skb->mark, 35 .flowi6_uid = sock_net_uid(net, sk), 36 .daddr = iph->daddr, 37 .saddr = iph->saddr, 38 }; 39 int err; 40 41 dst = ip6_route_output(net, sk, &fl6); 42 err = dst->error; 43 if (err) { 44 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); 45 net_dbg_ratelimited("ip6_route_me_harder: No more route\n"); 46 dst_release(dst); 47 return err; 48 } 49 50 /* Drop old route. */ 51 skb_dst_drop(skb); 52 53 skb_dst_set(skb, dst); 54 55 #ifdef CONFIG_XFRM 56 if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && 57 xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) { 58 skb_dst_set(skb, NULL); 59 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0); 60 if (IS_ERR(dst)) 61 return PTR_ERR(dst); 62 skb_dst_set(skb, dst); 63 } 64 #endif 65 66 /* Change in oif may mean change in hh_len. */ 67 hh_len = skb_dst(skb)->dev->hard_header_len; 68 if (skb_headroom(skb) < hh_len && 69 pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)), 70 0, GFP_ATOMIC)) 71 return -ENOMEM; 72 73 return 0; 74 } 75 EXPORT_SYMBOL(ip6_route_me_harder); 76 77 static int nf_ip6_reroute(struct sk_buff *skb, 78 const struct nf_queue_entry *entry) 79 { 80 struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry); 81 82 if (entry->state.hook == NF_INET_LOCAL_OUT) { 83 const struct ipv6hdr *iph = ipv6_hdr(skb); 84 if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) || 85 !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) || 86 skb->mark != rt_info->mark) 87 return ip6_route_me_harder(entry->state.net, entry->state.sk, skb); 88 } 89 return 0; 90 } 91 92 int __nf_ip6_route(struct net *net, struct dst_entry **dst, 93 struct flowi *fl, bool strict) 94 { 95 static const struct ipv6_pinfo fake_pinfo; 96 static const struct inet_sock fake_sk = { 97 /* makes ip6_route_output set RT6_LOOKUP_F_IFACE: */ 98 .sk.sk_bound_dev_if = 1, 99 .pinet6 = (struct ipv6_pinfo *) &fake_pinfo, 100 }; 101 const void *sk = strict ? &fake_sk : NULL; 102 struct dst_entry *result; 103 int err; 104 105 result = ip6_route_output(net, sk, &fl->u.ip6); 106 err = result->error; 107 if (err) 108 dst_release(result); 109 else 110 *dst = result; 111 return err; 112 } 113 EXPORT_SYMBOL_GPL(__nf_ip6_route); 114 115 int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, 116 struct nf_bridge_frag_data *data, 117 int (*output)(struct net *, struct sock *sk, 118 const struct nf_bridge_frag_data *data, 119 struct sk_buff *)) 120 { 121 int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size; 122 ktime_t tstamp = skb->tstamp; 123 struct ip6_frag_state state; 124 u8 *prevhdr, nexthdr = 0; 125 unsigned int mtu, hlen; 126 int hroom, err = 0; 127 __be32 frag_id; 128 129 err = ip6_find_1stfragopt(skb, &prevhdr); 130 if (err < 0) 131 goto blackhole; 132 hlen = err; 133 nexthdr = *prevhdr; 134 135 mtu = skb->dev->mtu; 136 if (frag_max_size > mtu || 137 frag_max_size < IPV6_MIN_MTU) 138 goto blackhole; 139 140 mtu = frag_max_size; 141 if (mtu < hlen + sizeof(struct frag_hdr) + 8) 142 goto blackhole; 143 mtu -= hlen + sizeof(struct frag_hdr); 144 145 frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr, 146 &ipv6_hdr(skb)->saddr); 147 148 if (skb->ip_summed == CHECKSUM_PARTIAL && 149 (err = skb_checksum_help(skb))) 150 goto blackhole; 151 152 hroom = LL_RESERVED_SPACE(skb->dev); 153 if (skb_has_frag_list(skb)) { 154 unsigned int first_len = skb_pagelen(skb); 155 struct ip6_fraglist_iter iter; 156 struct sk_buff *frag2; 157 158 if (first_len - hlen > mtu || 159 skb_headroom(skb) < (hroom + sizeof(struct frag_hdr))) 160 goto blackhole; 161 162 if (skb_cloned(skb)) 163 goto slow_path; 164 165 skb_walk_frags(skb, frag2) { 166 if (frag2->len > mtu || 167 skb_headroom(frag2) < (hlen + hroom + sizeof(struct frag_hdr))) 168 goto blackhole; 169 170 /* Partially cloned skb? */ 171 if (skb_shared(frag2)) 172 goto slow_path; 173 } 174 175 err = ip6_fraglist_init(skb, hlen, prevhdr, nexthdr, frag_id, 176 &iter); 177 if (err < 0) 178 goto blackhole; 179 180 for (;;) { 181 /* Prepare header of the next frame, 182 * before previous one went down. 183 */ 184 if (iter.frag) 185 ip6_fraglist_prepare(skb, &iter); 186 187 skb->tstamp = tstamp; 188 err = output(net, sk, data, skb); 189 if (err || !iter.frag) 190 break; 191 192 skb = ip6_fraglist_next(&iter); 193 } 194 195 kfree(iter.tmp_hdr); 196 if (!err) 197 return 0; 198 199 kfree_skb_list(iter.frag); 200 return err; 201 } 202 slow_path: 203 /* This is a linearized skbuff, the original geometry is lost for us. 204 * This may also be a clone skbuff, we could preserve the geometry for 205 * the copies but probably not worth the effort. 206 */ 207 ip6_frag_init(skb, hlen, mtu, skb->dev->needed_tailroom, 208 LL_RESERVED_SPACE(skb->dev), prevhdr, nexthdr, frag_id, 209 &state); 210 211 while (state.left > 0) { 212 struct sk_buff *skb2; 213 214 skb2 = ip6_frag_next(skb, &state); 215 if (IS_ERR(skb2)) { 216 err = PTR_ERR(skb2); 217 goto blackhole; 218 } 219 220 skb2->tstamp = tstamp; 221 err = output(net, sk, data, skb2); 222 if (err) 223 goto blackhole; 224 } 225 consume_skb(skb); 226 return err; 227 228 blackhole: 229 kfree_skb(skb); 230 return 0; 231 } 232 EXPORT_SYMBOL_GPL(br_ip6_fragment); 233 234 static const struct nf_ipv6_ops ipv6ops = { 235 #if IS_MODULE(CONFIG_IPV6) 236 .chk_addr = ipv6_chk_addr, 237 .route_me_harder = ip6_route_me_harder, 238 .dev_get_saddr = ipv6_dev_get_saddr, 239 .route = __nf_ip6_route, 240 #if IS_ENABLED(CONFIG_SYN_COOKIES) 241 .cookie_init_sequence = __cookie_v6_init_sequence, 242 .cookie_v6_check = __cookie_v6_check, 243 #endif 244 #endif 245 .route_input = ip6_route_input, 246 .fragment = ip6_fragment, 247 .reroute = nf_ip6_reroute, 248 #if IS_MODULE(CONFIG_IPV6) 249 .br_fragment = br_ip6_fragment, 250 #endif 251 }; 252 253 int __init ipv6_netfilter_init(void) 254 { 255 RCU_INIT_POINTER(nf_ipv6_ops, &ipv6ops); 256 return 0; 257 } 258 259 /* This can be called from inet6_init() on errors, so it cannot 260 * be marked __exit. -DaveM 261 */ 262 void ipv6_netfilter_fini(void) 263 { 264 RCU_INIT_POINTER(nf_ipv6_ops, NULL); 265 } 266