1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * xfrm6_input.c: based on net/ipv4/xfrm4_input.c 4 * 5 * Authors: 6 * Mitsuru KANDA @USAGI 7 * Kazunori MIYAZAWA @USAGI 8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 9 * YOSHIFUJI Hideaki @USAGI 10 * IPv6 support 11 */ 12 13 #include <linux/module.h> 14 #include <linux/string.h> 15 #include <linux/netfilter.h> 16 #include <linux/netfilter_ipv6.h> 17 #include <net/ipv6.h> 18 #include <net/xfrm.h> 19 #include <net/protocol.h> 20 #include <net/gro.h> 21 22 int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi, 23 struct ip6_tnl *t) 24 { 25 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = t; 26 XFRM_SPI_SKB_CB(skb)->family = AF_INET6; 27 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr); 28 return xfrm_input(skb, nexthdr, spi, 0); 29 } 30 EXPORT_SYMBOL(xfrm6_rcv_spi); 31 32 static int xfrm6_transport_finish2(struct net *net, struct sock *sk, 33 struct sk_buff *skb) 34 { 35 if (xfrm_trans_queue(skb, ip6_rcv_finish)) { 36 kfree_skb(skb); 37 return NET_RX_DROP; 38 } 39 40 return 0; 41 } 42 43 int xfrm6_transport_finish(struct sk_buff *skb, int async) 44 { 45 struct xfrm_offload *xo = xfrm_offload(skb); 46 int nhlen = -skb_network_offset(skb); 47 48 skb_network_header(skb)[IP6CB(skb)->nhoff] = 49 XFRM_MODE_SKB_CB(skb)->protocol; 50 51 #ifndef CONFIG_NETFILTER 52 if (!async) 53 return 1; 54 #endif 55 56 __skb_push(skb, nhlen); 57 ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); 58 skb_postpush_rcsum(skb, skb_network_header(skb), nhlen); 59 60 if (xo && (xo->flags & XFRM_GRO)) { 61 skb_mac_header_rebuild(skb); 62 skb_reset_transport_header(skb); 63 return 0; 64 } 65 66 NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, 67 dev_net(skb->dev), NULL, skb, skb->dev, NULL, 68 xfrm6_transport_finish2); 69 return 0; 70 } 71 72 static int __xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb, bool pull) 73 { 74 struct udp_sock *up = udp_sk(sk); 75 struct udphdr *uh; 76 struct ipv6hdr *ip6h; 77 int len; 78 int ip6hlen = sizeof(struct ipv6hdr); 79 __u8 *udpdata; 80 __be32 *udpdata32; 81 u16 encap_type; 82 83 encap_type = READ_ONCE(up->encap_type); 84 /* if this is not encapsulated socket, then just return now */ 85 if (!encap_type) 86 return 1; 87 88 /* If this is a paged skb, make sure we pull up 89 * whatever data we need to look at. */ 90 len = skb->len - sizeof(struct udphdr); 91 if (!pskb_may_pull(skb, sizeof(struct udphdr) + min(len, 8))) 92 return 1; 93 94 /* Now we can get the pointers */ 95 uh = udp_hdr(skb); 96 udpdata = (__u8 *)uh + sizeof(struct udphdr); 97 udpdata32 = (__be32 *)udpdata; 98 99 switch (encap_type) { 100 default: 101 case UDP_ENCAP_ESPINUDP: 102 /* Check if this is a keepalive packet. If so, eat it. */ 103 if (len == 1 && udpdata[0] == 0xff) { 104 return -EINVAL; 105 } else if (len > sizeof(struct ip_esp_hdr) && udpdata32[0] != 0) { 106 /* ESP Packet without Non-ESP header */ 107 len = sizeof(struct udphdr); 108 } else 109 /* Must be an IKE packet.. pass it through */ 110 return 1; 111 break; 112 } 113 114 /* At this point we are sure that this is an ESPinUDP packet, 115 * so we need to remove 'len' bytes from the packet (the UDP 116 * header and optional ESP marker bytes) and then modify the 117 * protocol to ESP, and then call into the transform receiver. 118 */ 119 if (skb_unclone(skb, GFP_ATOMIC)) 120 return -EINVAL; 121 122 /* Now we can update and verify the packet length... */ 123 ip6h = ipv6_hdr(skb); 124 ip6h->payload_len = htons(ntohs(ip6h->payload_len) - len); 125 if (skb->len < ip6hlen + len) { 126 /* packet is too small!?! */ 127 return -EINVAL; 128 } 129 130 /* pull the data buffer up to the ESP header and set the 131 * transport header to point to ESP. Keep UDP on the stack 132 * for later. 133 */ 134 if (pull) { 135 __skb_pull(skb, len); 136 skb_reset_transport_header(skb); 137 } else { 138 skb_set_transport_header(skb, len); 139 } 140 141 /* process ESP */ 142 return 0; 143 } 144 145 /* If it's a keepalive packet, then just eat it. 146 * If it's an encapsulated packet, then pass it to the 147 * IPsec xfrm input. 148 * Returns 0 if skb passed to xfrm or was dropped. 149 * Returns >0 if skb should be passed to UDP. 150 * Returns <0 if skb should be resubmitted (-ret is protocol) 151 */ 152 int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb) 153 { 154 int ret; 155 156 if (skb->protocol == htons(ETH_P_IP)) 157 return xfrm4_udp_encap_rcv(sk, skb); 158 159 ret = __xfrm6_udp_encap_rcv(sk, skb, true); 160 if (!ret) 161 return xfrm6_rcv_encap(skb, IPPROTO_ESP, 0, 162 udp_sk(sk)->encap_type); 163 164 if (ret < 0) { 165 kfree_skb(skb); 166 return 0; 167 } 168 169 return ret; 170 } 171 172 struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, 173 struct sk_buff *skb) 174 { 175 int offset = skb_gro_offset(skb); 176 const struct net_offload *ops; 177 struct sk_buff *pp = NULL; 178 int ret; 179 180 if (skb->protocol == htons(ETH_P_IP)) 181 return xfrm4_gro_udp_encap_rcv(sk, head, skb); 182 183 offset = offset - sizeof(struct udphdr); 184 185 if (!pskb_pull(skb, offset)) 186 return NULL; 187 188 rcu_read_lock(); 189 ops = rcu_dereference(inet6_offloads[IPPROTO_ESP]); 190 if (!ops || !ops->callbacks.gro_receive) 191 goto out; 192 193 ret = __xfrm6_udp_encap_rcv(sk, skb, false); 194 if (ret) 195 goto out; 196 197 skb_push(skb, offset); 198 NAPI_GRO_CB(skb)->proto = IPPROTO_UDP; 199 200 pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); 201 rcu_read_unlock(); 202 203 return pp; 204 205 out: 206 rcu_read_unlock(); 207 skb_push(skb, offset); 208 NAPI_GRO_CB(skb)->same_flow = 0; 209 NAPI_GRO_CB(skb)->flush = 1; 210 211 return NULL; 212 } 213 214 int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t) 215 { 216 return xfrm6_rcv_spi(skb, skb_network_header(skb)[IP6CB(skb)->nhoff], 217 0, t); 218 } 219 EXPORT_SYMBOL(xfrm6_rcv_tnl); 220 221 int xfrm6_rcv(struct sk_buff *skb) 222 { 223 return xfrm6_rcv_tnl(skb, NULL); 224 } 225 EXPORT_SYMBOL(xfrm6_rcv); 226 int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr, 227 xfrm_address_t *saddr, u8 proto) 228 { 229 struct net *net = dev_net(skb->dev); 230 struct xfrm_state *x = NULL; 231 struct sec_path *sp; 232 int i = 0; 233 234 sp = secpath_set(skb); 235 if (!sp) { 236 XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR); 237 goto drop; 238 } 239 240 if (1 + sp->len == XFRM_MAX_DEPTH) { 241 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); 242 goto drop; 243 } 244 245 for (i = 0; i < 3; i++) { 246 xfrm_address_t *dst, *src; 247 248 switch (i) { 249 case 0: 250 dst = daddr; 251 src = saddr; 252 break; 253 case 1: 254 /* lookup state with wild-card source address */ 255 dst = daddr; 256 src = (xfrm_address_t *)&in6addr_any; 257 break; 258 default: 259 /* lookup state with wild-card addresses */ 260 dst = (xfrm_address_t *)&in6addr_any; 261 src = (xfrm_address_t *)&in6addr_any; 262 break; 263 } 264 265 x = xfrm_state_lookup_byaddr(net, skb->mark, dst, src, proto, AF_INET6); 266 if (!x) 267 continue; 268 269 if (unlikely(x->dir && x->dir != XFRM_SA_DIR_IN)) { 270 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEDIRERROR); 271 xfrm_state_put(x); 272 x = NULL; 273 continue; 274 } 275 276 spin_lock(&x->lock); 277 278 if ((!i || (x->props.flags & XFRM_STATE_WILDRECV)) && 279 likely(x->km.state == XFRM_STATE_VALID) && 280 !xfrm_state_check_expire(x)) { 281 spin_unlock(&x->lock); 282 if (x->type->input(x, skb) > 0) { 283 /* found a valid state */ 284 break; 285 } 286 } else 287 spin_unlock(&x->lock); 288 289 xfrm_state_put(x); 290 x = NULL; 291 } 292 293 if (!x) { 294 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES); 295 xfrm_audit_state_notfound_simple(skb, AF_INET6); 296 goto drop; 297 } 298 299 sp->xvec[sp->len++] = x; 300 301 spin_lock(&x->lock); 302 303 x->curlft.bytes += skb->len; 304 x->curlft.packets++; 305 306 spin_unlock(&x->lock); 307 308 return 1; 309 310 drop: 311 return -1; 312 } 313 EXPORT_SYMBOL(xfrm6_input_addr); 314