109c434b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2afd46503SJoe Perches #define pr_fmt(fmt) "IPsec: " fmt 3afd46503SJoe Perches 438320c70SHerbert Xu #include <crypto/aead.h> 538320c70SHerbert Xu #include <crypto/authenc.h> 66b7326c8SHerbert Xu #include <linux/err.h> 71da177e4SLinus Torvalds #include <linux/module.h> 81da177e4SLinus Torvalds #include <net/ip.h> 91da177e4SLinus Torvalds #include <net/xfrm.h> 101da177e4SLinus Torvalds #include <net/esp.h> 1172998d8cSAdrian Bunk #include <linux/scatterlist.h> 12a02a6422SHerbert Xu #include <linux/kernel.h> 131da177e4SLinus Torvalds #include <linux/pfkeyv2.h> 1438320c70SHerbert Xu #include <linux/rtnetlink.h> 1538320c70SHerbert Xu #include <linux/slab.h> 16b7c6538cSHerbert Xu #include <linux/spinlock.h> 172017a72cSThomas Graf #include <linux/in6.h> 181da177e4SLinus Torvalds #include <net/icmp.h> 1914c85021SArnaldo Carvalho de Melo #include <net/protocol.h> 201da177e4SLinus Torvalds #include <net/udp.h> 211da177e4SLinus Torvalds 22cac2661cSSteffen Klassert #include <linux/highmem.h> 23cac2661cSSteffen Klassert 2438320c70SHerbert Xu struct esp_skb_cb { 2538320c70SHerbert Xu struct xfrm_skb_cb xfrm; 2638320c70SHerbert Xu void *tmp; 2738320c70SHerbert Xu }; 2838320c70SHerbert Xu 29962fcef3SHerbert Xu struct esp_output_extra { 30962fcef3SHerbert Xu __be32 seqhi; 31962fcef3SHerbert Xu u32 esphoff; 32962fcef3SHerbert Xu }; 33962fcef3SHerbert Xu 3438320c70SHerbert Xu #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0])) 3538320c70SHerbert Xu 3638320c70SHerbert Xu /* 3738320c70SHerbert Xu * Allocate an AEAD request structure with extra space for SG and IV. 3838320c70SHerbert Xu * 3938320c70SHerbert Xu * For alignment considerations the IV is placed at the front, followed 4038320c70SHerbert Xu * by the request and finally the SG list. 4138320c70SHerbert Xu * 4238320c70SHerbert Xu * TODO: Use spare space in skb for this where possible. 4338320c70SHerbert Xu */ 44962fcef3SHerbert Xu static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int extralen) 4538320c70SHerbert Xu { 4638320c70SHerbert Xu unsigned int len; 4738320c70SHerbert Xu 48962fcef3SHerbert Xu len = extralen; 490dc49e9bSSteffen Klassert 500dc49e9bSSteffen Klassert len += crypto_aead_ivsize(aead); 510dc49e9bSSteffen Klassert 5238320c70SHerbert Xu if (len) { 5338320c70SHerbert Xu len += crypto_aead_alignmask(aead) & 5438320c70SHerbert Xu ~(crypto_tfm_ctx_alignment() - 1); 5538320c70SHerbert Xu len = ALIGN(len, crypto_tfm_ctx_alignment()); 5638320c70SHerbert Xu } 5738320c70SHerbert Xu 587021b2e1SHerbert Xu len += sizeof(struct aead_request) + crypto_aead_reqsize(aead); 5938320c70SHerbert Xu len = ALIGN(len, __alignof__(struct scatterlist)); 6038320c70SHerbert Xu 6138320c70SHerbert Xu len += sizeof(struct scatterlist) * nfrags; 6238320c70SHerbert Xu 6338320c70SHerbert Xu return kmalloc(len, GFP_ATOMIC); 6438320c70SHerbert Xu } 6538320c70SHerbert Xu 66962fcef3SHerbert Xu static inline void *esp_tmp_extra(void *tmp) 670dc49e9bSSteffen Klassert { 68962fcef3SHerbert Xu return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra)); 690dc49e9bSSteffen Klassert } 70962fcef3SHerbert Xu 71962fcef3SHerbert Xu static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int extralen) 7238320c70SHerbert Xu { 7338320c70SHerbert Xu return crypto_aead_ivsize(aead) ? 74962fcef3SHerbert Xu PTR_ALIGN((u8 *)tmp + extralen, 75962fcef3SHerbert Xu crypto_aead_alignmask(aead) + 1) : tmp + extralen; 7638320c70SHerbert Xu } 7738320c70SHerbert Xu 7838320c70SHerbert Xu static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv) 7938320c70SHerbert Xu { 8038320c70SHerbert Xu struct aead_request *req; 8138320c70SHerbert Xu 8238320c70SHerbert Xu req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), 8338320c70SHerbert Xu crypto_tfm_ctx_alignment()); 8438320c70SHerbert Xu aead_request_set_tfm(req, aead); 8538320c70SHerbert Xu return req; 8638320c70SHerbert Xu } 8738320c70SHerbert Xu 8838320c70SHerbert Xu static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead, 8938320c70SHerbert Xu struct aead_request *req) 9038320c70SHerbert Xu { 9138320c70SHerbert Xu return (void *)ALIGN((unsigned long)(req + 1) + 9238320c70SHerbert Xu crypto_aead_reqsize(aead), 9338320c70SHerbert Xu __alignof__(struct scatterlist)); 9438320c70SHerbert Xu } 9538320c70SHerbert Xu 96cac2661cSSteffen Klassert static void esp_ssg_unref(struct xfrm_state *x, void *tmp) 97cac2661cSSteffen Klassert { 98cac2661cSSteffen Klassert struct esp_output_extra *extra = esp_tmp_extra(tmp); 99cac2661cSSteffen Klassert struct crypto_aead *aead = x->data; 100cac2661cSSteffen Klassert int extralen = 0; 101cac2661cSSteffen Klassert u8 *iv; 102cac2661cSSteffen Klassert struct aead_request *req; 103cac2661cSSteffen Klassert struct scatterlist *sg; 104cac2661cSSteffen Klassert 105cac2661cSSteffen Klassert if (x->props.flags & XFRM_STATE_ESN) 106cac2661cSSteffen Klassert extralen += sizeof(*extra); 107cac2661cSSteffen Klassert 108cac2661cSSteffen Klassert extra = esp_tmp_extra(tmp); 109cac2661cSSteffen Klassert iv = esp_tmp_iv(aead, tmp, extralen); 110cac2661cSSteffen Klassert req = esp_tmp_req(aead, iv); 111cac2661cSSteffen Klassert 112cac2661cSSteffen Klassert /* Unref skb_frag_pages in the src scatterlist if necessary. 113cac2661cSSteffen Klassert * Skip the first sg which comes from skb->data. 114cac2661cSSteffen Klassert */ 115cac2661cSSteffen Klassert if (req->src != req->dst) 116cac2661cSSteffen Klassert for (sg = sg_next(req->src); sg; sg = sg_next(sg)) 117cac2661cSSteffen Klassert put_page(sg_page(sg)); 118cac2661cSSteffen Klassert } 119cac2661cSSteffen Klassert 12038320c70SHerbert Xu static void esp_output_done(struct crypto_async_request *base, int err) 12138320c70SHerbert Xu { 12238320c70SHerbert Xu struct sk_buff *skb = base->data; 123f53c7239SSteffen Klassert struct xfrm_offload *xo = xfrm_offload(skb); 124cac2661cSSteffen Klassert void *tmp; 125f53c7239SSteffen Klassert struct xfrm_state *x; 126f53c7239SSteffen Klassert 1272294be0fSFlorian Westphal if (xo && (xo->flags & XFRM_DEV_RESUME)) { 1282294be0fSFlorian Westphal struct sec_path *sp = skb_sec_path(skb); 1292294be0fSFlorian Westphal 1302294be0fSFlorian Westphal x = sp->xvec[sp->len - 1]; 1312294be0fSFlorian Westphal } else { 132f53c7239SSteffen Klassert x = skb_dst(skb)->xfrm; 1332294be0fSFlorian Westphal } 13438320c70SHerbert Xu 135cac2661cSSteffen Klassert tmp = ESP_SKB_CB(skb)->tmp; 136cac2661cSSteffen Klassert esp_ssg_unref(x, tmp); 137cac2661cSSteffen Klassert kfree(tmp); 138f53c7239SSteffen Klassert 139f53c7239SSteffen Klassert if (xo && (xo->flags & XFRM_DEV_RESUME)) { 140f53c7239SSteffen Klassert if (err) { 141f53c7239SSteffen Klassert XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); 142f53c7239SSteffen Klassert kfree_skb(skb); 143f53c7239SSteffen Klassert return; 144f53c7239SSteffen Klassert } 145f53c7239SSteffen Klassert 146f53c7239SSteffen Klassert skb_push(skb, skb->data - skb_mac_header(skb)); 147f53c7239SSteffen Klassert secpath_reset(skb); 148f53c7239SSteffen Klassert xfrm_dev_resume(skb); 149f53c7239SSteffen Klassert } else { 15038320c70SHerbert Xu xfrm_output_resume(skb, err); 15138320c70SHerbert Xu } 152f53c7239SSteffen Klassert } 15338320c70SHerbert Xu 1547021b2e1SHerbert Xu /* Move ESP header back into place. */ 1557021b2e1SHerbert Xu static void esp_restore_header(struct sk_buff *skb, unsigned int offset) 1567021b2e1SHerbert Xu { 1577021b2e1SHerbert Xu struct ip_esp_hdr *esph = (void *)(skb->data + offset); 1587021b2e1SHerbert Xu void *tmp = ESP_SKB_CB(skb)->tmp; 159962fcef3SHerbert Xu __be32 *seqhi = esp_tmp_extra(tmp); 1607021b2e1SHerbert Xu 1617021b2e1SHerbert Xu esph->seq_no = esph->spi; 1627021b2e1SHerbert Xu esph->spi = *seqhi; 1637021b2e1SHerbert Xu } 1647021b2e1SHerbert Xu 1657021b2e1SHerbert Xu static void esp_output_restore_header(struct sk_buff *skb) 1667021b2e1SHerbert Xu { 167962fcef3SHerbert Xu void *tmp = ESP_SKB_CB(skb)->tmp; 168962fcef3SHerbert Xu struct esp_output_extra *extra = esp_tmp_extra(tmp); 169962fcef3SHerbert Xu 170962fcef3SHerbert Xu esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff - 171962fcef3SHerbert Xu sizeof(__be32)); 1727021b2e1SHerbert Xu } 1737021b2e1SHerbert Xu 174cac2661cSSteffen Klassert static struct ip_esp_hdr *esp_output_set_extra(struct sk_buff *skb, 175fca11ebdSSteffen Klassert struct xfrm_state *x, 176cac2661cSSteffen Klassert struct ip_esp_hdr *esph, 177cac2661cSSteffen Klassert struct esp_output_extra *extra) 178cac2661cSSteffen Klassert { 179cac2661cSSteffen Klassert /* For ESN we move the header forward by 4 bytes to 180cac2661cSSteffen Klassert * accomodate the high bits. We will move it back after 181cac2661cSSteffen Klassert * encryption. 182cac2661cSSteffen Klassert */ 183cac2661cSSteffen Klassert if ((x->props.flags & XFRM_STATE_ESN)) { 1847862b405SSteffen Klassert __u32 seqhi; 1857862b405SSteffen Klassert struct xfrm_offload *xo = xfrm_offload(skb); 1867862b405SSteffen Klassert 1877862b405SSteffen Klassert if (xo) 1887862b405SSteffen Klassert seqhi = xo->seq.hi; 1897862b405SSteffen Klassert else 1907862b405SSteffen Klassert seqhi = XFRM_SKB_CB(skb)->seq.output.hi; 1917862b405SSteffen Klassert 192cac2661cSSteffen Klassert extra->esphoff = (unsigned char *)esph - 193cac2661cSSteffen Klassert skb_transport_header(skb); 194cac2661cSSteffen Klassert esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4); 195cac2661cSSteffen Klassert extra->seqhi = esph->spi; 1967862b405SSteffen Klassert esph->seq_no = htonl(seqhi); 197cac2661cSSteffen Klassert } 198cac2661cSSteffen Klassert 199cac2661cSSteffen Klassert esph->spi = x->id.spi; 200cac2661cSSteffen Klassert 201cac2661cSSteffen Klassert return esph; 202cac2661cSSteffen Klassert } 203cac2661cSSteffen Klassert 2047021b2e1SHerbert Xu static void esp_output_done_esn(struct crypto_async_request *base, int err) 2057021b2e1SHerbert Xu { 2067021b2e1SHerbert Xu struct sk_buff *skb = base->data; 2077021b2e1SHerbert Xu 2087021b2e1SHerbert Xu esp_output_restore_header(skb); 2097021b2e1SHerbert Xu esp_output_done(base, err); 2107021b2e1SHerbert Xu } 2117021b2e1SHerbert Xu 212eb758c88SSteffen Klassert static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto) 213eb758c88SSteffen Klassert { 214eb758c88SSteffen Klassert /* Fill padding... */ 215eb758c88SSteffen Klassert if (tfclen) { 216eb758c88SSteffen Klassert memset(tail, 0, tfclen); 217eb758c88SSteffen Klassert tail += tfclen; 218eb758c88SSteffen Klassert } 219eb758c88SSteffen Klassert do { 220eb758c88SSteffen Klassert int i; 221eb758c88SSteffen Klassert for (i = 0; i < plen - 2; i++) 222eb758c88SSteffen Klassert tail[i] = i + 1; 223eb758c88SSteffen Klassert } while (0); 224eb758c88SSteffen Klassert tail[plen - 2] = plen - 2; 225eb758c88SSteffen Klassert tail[plen - 1] = proto; 226eb758c88SSteffen Klassert } 227eb758c88SSteffen Klassert 2288dfb4ebaSSabrina Dubroca static int esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) 2291da177e4SLinus Torvalds { 230fca11ebdSSteffen Klassert int encap_type; 2311da177e4SLinus Torvalds struct udphdr *uh; 232d5a0a1e3SAl Viro __be32 *udpdata32; 2335e226e4dSAl Viro __be16 sport, dport; 234fca11ebdSSteffen Klassert struct xfrm_encap_tmpl *encap = x->encap; 235fca11ebdSSteffen Klassert struct ip_esp_hdr *esph = esp->esph; 2368dfb4ebaSSabrina Dubroca unsigned int len; 23738320c70SHerbert Xu 23838320c70SHerbert Xu spin_lock_bh(&x->lock); 23938320c70SHerbert Xu sport = encap->encap_sport; 24038320c70SHerbert Xu dport = encap->encap_dport; 24138320c70SHerbert Xu encap_type = encap->encap_type; 24238320c70SHerbert Xu spin_unlock_bh(&x->lock); 2431da177e4SLinus Torvalds 2448dfb4ebaSSabrina Dubroca len = skb->len + esp->tailen - skb_transport_offset(skb); 2458dfb4ebaSSabrina Dubroca if (len + sizeof(struct iphdr) >= IP_MAX_MTU) 2468dfb4ebaSSabrina Dubroca return -EMSGSIZE; 2478dfb4ebaSSabrina Dubroca 2481da177e4SLinus Torvalds uh = (struct udphdr *)esph; 24938320c70SHerbert Xu uh->source = sport; 25038320c70SHerbert Xu uh->dest = dport; 2518dfb4ebaSSabrina Dubroca uh->len = htons(len); 2521da177e4SLinus Torvalds uh->check = 0; 2531da177e4SLinus Torvalds 25438320c70SHerbert Xu switch (encap_type) { 2551da177e4SLinus Torvalds default: 2561da177e4SLinus Torvalds case UDP_ENCAP_ESPINUDP: 2571da177e4SLinus Torvalds esph = (struct ip_esp_hdr *)(uh + 1); 2581da177e4SLinus Torvalds break; 2591da177e4SLinus Torvalds case UDP_ENCAP_ESPINUDP_NON_IKE: 260d5a0a1e3SAl Viro udpdata32 = (__be32 *)(uh + 1); 2611da177e4SLinus Torvalds udpdata32[0] = udpdata32[1] = 0; 2621da177e4SLinus Torvalds esph = (struct ip_esp_hdr *)(udpdata32 + 2); 2631da177e4SLinus Torvalds break; 2641da177e4SLinus Torvalds } 2651da177e4SLinus Torvalds 26637fedd3aSHerbert Xu *skb_mac_header(skb) = IPPROTO_UDP; 267fca11ebdSSteffen Klassert esp->esph = esph; 2688dfb4ebaSSabrina Dubroca 2698dfb4ebaSSabrina Dubroca return 0; 27037fedd3aSHerbert Xu } 2711da177e4SLinus Torvalds 272fca11ebdSSteffen Klassert int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) 273fca11ebdSSteffen Klassert { 274fca11ebdSSteffen Klassert u8 *tail; 275fca11ebdSSteffen Klassert u8 *vaddr; 276fca11ebdSSteffen Klassert int nfrags; 2770e78a873SSteffen Klassert int esph_offset; 278fca11ebdSSteffen Klassert struct page *page; 279fca11ebdSSteffen Klassert struct sk_buff *trailer; 280fca11ebdSSteffen Klassert int tailen = esp->tailen; 281fca11ebdSSteffen Klassert 282fca11ebdSSteffen Klassert /* this is non-NULL only with UDP Encapsulation */ 2838dfb4ebaSSabrina Dubroca if (x->encap) { 2848dfb4ebaSSabrina Dubroca int err = esp_output_udp_encap(x, skb, esp); 2858dfb4ebaSSabrina Dubroca 2868dfb4ebaSSabrina Dubroca if (err < 0) 2878dfb4ebaSSabrina Dubroca return err; 2888dfb4ebaSSabrina Dubroca } 289fca11ebdSSteffen Klassert 290cac2661cSSteffen Klassert if (!skb_cloned(skb)) { 29154ffd790SSteffen Klassert if (tailen <= skb_tailroom(skb)) { 292cac2661cSSteffen Klassert nfrags = 1; 293cac2661cSSteffen Klassert trailer = skb; 294cac2661cSSteffen Klassert tail = skb_tail_pointer(trailer); 2951da177e4SLinus Torvalds 296cac2661cSSteffen Klassert goto skip_cow; 297cac2661cSSteffen Klassert } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS) 298cac2661cSSteffen Klassert && !skb_has_frag_list(skb)) { 299cac2661cSSteffen Klassert int allocsize; 300cac2661cSSteffen Klassert struct sock *sk = skb->sk; 301cac2661cSSteffen Klassert struct page_frag *pfrag = &x->xfrag; 3027021b2e1SHerbert Xu 303fca11ebdSSteffen Klassert esp->inplace = false; 304fca11ebdSSteffen Klassert 305cac2661cSSteffen Klassert allocsize = ALIGN(tailen, L1_CACHE_BYTES); 306cac2661cSSteffen Klassert 307cac2661cSSteffen Klassert spin_lock_bh(&x->lock); 308cac2661cSSteffen Klassert 309cac2661cSSteffen Klassert if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { 310cac2661cSSteffen Klassert spin_unlock_bh(&x->lock); 311cac2661cSSteffen Klassert goto cow; 3127021b2e1SHerbert Xu } 3137021b2e1SHerbert Xu 314cac2661cSSteffen Klassert page = pfrag->page; 315cac2661cSSteffen Klassert get_page(page); 316cac2661cSSteffen Klassert 317cac2661cSSteffen Klassert vaddr = kmap_atomic(page); 318cac2661cSSteffen Klassert 319cac2661cSSteffen Klassert tail = vaddr + pfrag->offset; 320cac2661cSSteffen Klassert 321fca11ebdSSteffen Klassert esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); 322cac2661cSSteffen Klassert 323cac2661cSSteffen Klassert kunmap_atomic(vaddr); 324cac2661cSSteffen Klassert 325cac2661cSSteffen Klassert nfrags = skb_shinfo(skb)->nr_frags; 326cac2661cSSteffen Klassert 327cac2661cSSteffen Klassert __skb_fill_page_desc(skb, nfrags, page, pfrag->offset, 328cac2661cSSteffen Klassert tailen); 329cac2661cSSteffen Klassert skb_shinfo(skb)->nr_frags = ++nfrags; 330cac2661cSSteffen Klassert 331cac2661cSSteffen Klassert pfrag->offset = pfrag->offset + allocsize; 33236ff0dd3SSteffen Klassert 33336ff0dd3SSteffen Klassert spin_unlock_bh(&x->lock); 33436ff0dd3SSteffen Klassert 335cac2661cSSteffen Klassert nfrags++; 336cac2661cSSteffen Klassert 337cac2661cSSteffen Klassert skb->len += tailen; 338cac2661cSSteffen Klassert skb->data_len += tailen; 339cac2661cSSteffen Klassert skb->truesize += tailen; 34009db5124SMartin Willi if (sk && sk_fullsock(sk)) 34114afee4bSReshetova, Elena refcount_add(tailen, &sk->sk_wmem_alloc); 342cac2661cSSteffen Klassert 343fca11ebdSSteffen Klassert goto out; 344fca11ebdSSteffen Klassert } 345fca11ebdSSteffen Klassert } 346cac2661cSSteffen Klassert 347fca11ebdSSteffen Klassert cow: 3480e78a873SSteffen Klassert esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb); 3490e78a873SSteffen Klassert 350fca11ebdSSteffen Klassert nfrags = skb_cow_data(skb, tailen, &trailer); 351fca11ebdSSteffen Klassert if (nfrags < 0) 352fca11ebdSSteffen Klassert goto out; 353fca11ebdSSteffen Klassert tail = skb_tail_pointer(trailer); 3540e78a873SSteffen Klassert esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset); 3557021b2e1SHerbert Xu 356fca11ebdSSteffen Klassert skip_cow: 357fca11ebdSSteffen Klassert esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); 358fca11ebdSSteffen Klassert pskb_put(skb, trailer, tailen); 359fca11ebdSSteffen Klassert 360fca11ebdSSteffen Klassert out: 361fca11ebdSSteffen Klassert return nfrags; 362fca11ebdSSteffen Klassert } 363fca11ebdSSteffen Klassert EXPORT_SYMBOL_GPL(esp_output_head); 364fca11ebdSSteffen Klassert 365fca11ebdSSteffen Klassert int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) 366fca11ebdSSteffen Klassert { 367fca11ebdSSteffen Klassert u8 *iv; 368fca11ebdSSteffen Klassert int alen; 369fca11ebdSSteffen Klassert void *tmp; 370fca11ebdSSteffen Klassert int ivlen; 371fca11ebdSSteffen Klassert int assoclen; 372fca11ebdSSteffen Klassert int extralen; 373fca11ebdSSteffen Klassert struct page *page; 374fca11ebdSSteffen Klassert struct ip_esp_hdr *esph; 375fca11ebdSSteffen Klassert struct crypto_aead *aead; 376fca11ebdSSteffen Klassert struct aead_request *req; 377fca11ebdSSteffen Klassert struct scatterlist *sg, *dsg; 378fca11ebdSSteffen Klassert struct esp_output_extra *extra; 379fca11ebdSSteffen Klassert int err = -ENOMEM; 380fca11ebdSSteffen Klassert 381fca11ebdSSteffen Klassert assoclen = sizeof(struct ip_esp_hdr); 382fca11ebdSSteffen Klassert extralen = 0; 383fca11ebdSSteffen Klassert 384fca11ebdSSteffen Klassert if (x->props.flags & XFRM_STATE_ESN) { 385fca11ebdSSteffen Klassert extralen += sizeof(*extra); 386fca11ebdSSteffen Klassert assoclen += sizeof(__be32); 387fca11ebdSSteffen Klassert } 388fca11ebdSSteffen Klassert 389fca11ebdSSteffen Klassert aead = x->data; 390fca11ebdSSteffen Klassert alen = crypto_aead_authsize(aead); 391fca11ebdSSteffen Klassert ivlen = crypto_aead_ivsize(aead); 392fca11ebdSSteffen Klassert 393fca11ebdSSteffen Klassert tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen); 394e892d2d4SSteffen Klassert if (!tmp) 395cac2661cSSteffen Klassert goto error; 396cac2661cSSteffen Klassert 397cac2661cSSteffen Klassert extra = esp_tmp_extra(tmp); 398cac2661cSSteffen Klassert iv = esp_tmp_iv(aead, tmp, extralen); 399cac2661cSSteffen Klassert req = esp_tmp_req(aead, iv); 400cac2661cSSteffen Klassert sg = esp_req_sg(aead, req); 401cac2661cSSteffen Klassert 402fca11ebdSSteffen Klassert if (esp->inplace) 403fca11ebdSSteffen Klassert dsg = sg; 404fca11ebdSSteffen Klassert else 405fca11ebdSSteffen Klassert dsg = &sg[esp->nfrags]; 406cac2661cSSteffen Klassert 407fca11ebdSSteffen Klassert esph = esp_output_set_extra(skb, x, esp->esph, extra); 408fca11ebdSSteffen Klassert esp->esph = esph; 409fca11ebdSSteffen Klassert 410fca11ebdSSteffen Klassert sg_init_table(sg, esp->nfrags); 4113f297707SJason A. Donenfeld err = skb_to_sgvec(skb, sg, 4127021b2e1SHerbert Xu (unsigned char *)esph - skb->data, 413fca11ebdSSteffen Klassert assoclen + ivlen + esp->clen + alen); 4143f297707SJason A. Donenfeld if (unlikely(err < 0)) 415e6194923SSteffen Klassert goto error_free; 416fca11ebdSSteffen Klassert 417fca11ebdSSteffen Klassert if (!esp->inplace) { 418fca11ebdSSteffen Klassert int allocsize; 419fca11ebdSSteffen Klassert struct page_frag *pfrag = &x->xfrag; 4200dc49e9bSSteffen Klassert 421cac2661cSSteffen Klassert allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES); 422cac2661cSSteffen Klassert 423fca11ebdSSteffen Klassert spin_lock_bh(&x->lock); 424cac2661cSSteffen Klassert if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { 425cac2661cSSteffen Klassert spin_unlock_bh(&x->lock); 426e6194923SSteffen Klassert goto error_free; 427cac2661cSSteffen Klassert } 428cac2661cSSteffen Klassert 429cac2661cSSteffen Klassert skb_shinfo(skb)->nr_frags = 1; 430cac2661cSSteffen Klassert 431cac2661cSSteffen Klassert page = pfrag->page; 432cac2661cSSteffen Klassert get_page(page); 433cac2661cSSteffen Klassert /* replace page frags in skb with new page */ 434cac2661cSSteffen Klassert __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len); 435cac2661cSSteffen Klassert pfrag->offset = pfrag->offset + allocsize; 436fca11ebdSSteffen Klassert spin_unlock_bh(&x->lock); 437cac2661cSSteffen Klassert 438cac2661cSSteffen Klassert sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1); 4393f297707SJason A. Donenfeld err = skb_to_sgvec(skb, dsg, 440cac2661cSSteffen Klassert (unsigned char *)esph - skb->data, 441fca11ebdSSteffen Klassert assoclen + ivlen + esp->clen + alen); 4423f297707SJason A. Donenfeld if (unlikely(err < 0)) 443e6194923SSteffen Klassert goto error_free; 444cac2661cSSteffen Klassert } 445cac2661cSSteffen Klassert 446cac2661cSSteffen Klassert if ((x->props.flags & XFRM_STATE_ESN)) 447cac2661cSSteffen Klassert aead_request_set_callback(req, 0, esp_output_done_esn, skb); 448cac2661cSSteffen Klassert else 449cac2661cSSteffen Klassert aead_request_set_callback(req, 0, esp_output_done, skb); 450cac2661cSSteffen Klassert 451fca11ebdSSteffen Klassert aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv); 4527021b2e1SHerbert Xu aead_request_set_ad(req, assoclen); 4531da177e4SLinus Torvalds 4547021b2e1SHerbert Xu memset(iv, 0, ivlen); 455fca11ebdSSteffen Klassert memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8), 4567021b2e1SHerbert Xu min(ivlen, 8)); 4576b7326c8SHerbert Xu 45838320c70SHerbert Xu ESP_SKB_CB(skb)->tmp = tmp; 4597021b2e1SHerbert Xu err = crypto_aead_encrypt(req); 4607021b2e1SHerbert Xu 4617021b2e1SHerbert Xu switch (err) { 4627021b2e1SHerbert Xu case -EINPROGRESS: 46338320c70SHerbert Xu goto error; 4641da177e4SLinus Torvalds 465068c2e70SGilad Ben-Yossef case -ENOSPC: 46638320c70SHerbert Xu err = NET_XMIT_DROP; 4677021b2e1SHerbert Xu break; 4687021b2e1SHerbert Xu 4697021b2e1SHerbert Xu case 0: 4707021b2e1SHerbert Xu if ((x->props.flags & XFRM_STATE_ESN)) 4717021b2e1SHerbert Xu esp_output_restore_header(skb); 4727021b2e1SHerbert Xu } 4731da177e4SLinus Torvalds 474cac2661cSSteffen Klassert if (sg != dsg) 475cac2661cSSteffen Klassert esp_ssg_unref(x, tmp); 476b7c6538cSHerbert Xu 477e6194923SSteffen Klassert error_free: 478e6194923SSteffen Klassert kfree(tmp); 4791da177e4SLinus Torvalds error: 4801da177e4SLinus Torvalds return err; 4811da177e4SLinus Torvalds } 482fca11ebdSSteffen Klassert EXPORT_SYMBOL_GPL(esp_output_tail); 4831da177e4SLinus Torvalds 484fca11ebdSSteffen Klassert static int esp_output(struct xfrm_state *x, struct sk_buff *skb) 485fca11ebdSSteffen Klassert { 486fca11ebdSSteffen Klassert int alen; 487fca11ebdSSteffen Klassert int blksize; 488fca11ebdSSteffen Klassert struct ip_esp_hdr *esph; 489fca11ebdSSteffen Klassert struct crypto_aead *aead; 490fca11ebdSSteffen Klassert struct esp_info esp; 491fca11ebdSSteffen Klassert 492fca11ebdSSteffen Klassert esp.inplace = true; 493fca11ebdSSteffen Klassert 494fca11ebdSSteffen Klassert esp.proto = *skb_mac_header(skb); 495fca11ebdSSteffen Klassert *skb_mac_header(skb) = IPPROTO_ESP; 496fca11ebdSSteffen Klassert 497fca11ebdSSteffen Klassert /* skb is pure payload to encrypt */ 498fca11ebdSSteffen Klassert 499fca11ebdSSteffen Klassert aead = x->data; 500fca11ebdSSteffen Klassert alen = crypto_aead_authsize(aead); 501fca11ebdSSteffen Klassert 502fca11ebdSSteffen Klassert esp.tfclen = 0; 503fca11ebdSSteffen Klassert if (x->tfcpad) { 504fca11ebdSSteffen Klassert struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); 505fca11ebdSSteffen Klassert u32 padto; 506fca11ebdSSteffen Klassert 507c7b37c76SFlorian Westphal padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached)); 508fca11ebdSSteffen Klassert if (skb->len < padto) 509fca11ebdSSteffen Klassert esp.tfclen = padto - skb->len; 510fca11ebdSSteffen Klassert } 511fca11ebdSSteffen Klassert blksize = ALIGN(crypto_aead_blocksize(aead), 4); 512fca11ebdSSteffen Klassert esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize); 513fca11ebdSSteffen Klassert esp.plen = esp.clen - skb->len - esp.tfclen; 514fca11ebdSSteffen Klassert esp.tailen = esp.tfclen + esp.plen + alen; 515fca11ebdSSteffen Klassert 516fca11ebdSSteffen Klassert esp.esph = ip_esp_hdr(skb); 517fca11ebdSSteffen Klassert 518fca11ebdSSteffen Klassert esp.nfrags = esp_output_head(x, skb, &esp); 519fca11ebdSSteffen Klassert if (esp.nfrags < 0) 520fca11ebdSSteffen Klassert return esp.nfrags; 521fca11ebdSSteffen Klassert 522fca11ebdSSteffen Klassert esph = esp.esph; 523fca11ebdSSteffen Klassert esph->spi = x->id.spi; 524fca11ebdSSteffen Klassert 525fca11ebdSSteffen Klassert esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); 526fca11ebdSSteffen Klassert esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low + 527fca11ebdSSteffen Klassert ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); 528fca11ebdSSteffen Klassert 529fca11ebdSSteffen Klassert skb_push(skb, -skb_network_offset(skb)); 530fca11ebdSSteffen Klassert 531fca11ebdSSteffen Klassert return esp_output_tail(x, skb, &esp); 532fca11ebdSSteffen Klassert } 533fca11ebdSSteffen Klassert 53447ebcc0bSYossi Kuperman static inline int esp_remove_trailer(struct sk_buff *skb) 53547ebcc0bSYossi Kuperman { 53647ebcc0bSYossi Kuperman struct xfrm_state *x = xfrm_input_state(skb); 53747ebcc0bSYossi Kuperman struct xfrm_offload *xo = xfrm_offload(skb); 53847ebcc0bSYossi Kuperman struct crypto_aead *aead = x->data; 53947ebcc0bSYossi Kuperman int alen, hlen, elen; 54047ebcc0bSYossi Kuperman int padlen, trimlen; 54147ebcc0bSYossi Kuperman __wsum csumdiff; 54247ebcc0bSYossi Kuperman u8 nexthdr[2]; 54347ebcc0bSYossi Kuperman int ret; 54447ebcc0bSYossi Kuperman 54547ebcc0bSYossi Kuperman alen = crypto_aead_authsize(aead); 54647ebcc0bSYossi Kuperman hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); 54747ebcc0bSYossi Kuperman elen = skb->len - hlen; 54847ebcc0bSYossi Kuperman 54947ebcc0bSYossi Kuperman if (xo && (xo->flags & XFRM_ESP_NO_TRAILER)) { 55047ebcc0bSYossi Kuperman ret = xo->proto; 55147ebcc0bSYossi Kuperman goto out; 55247ebcc0bSYossi Kuperman } 55347ebcc0bSYossi Kuperman 55447ebcc0bSYossi Kuperman if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2)) 55547ebcc0bSYossi Kuperman BUG(); 55647ebcc0bSYossi Kuperman 55747ebcc0bSYossi Kuperman ret = -EINVAL; 55847ebcc0bSYossi Kuperman padlen = nexthdr[0]; 55947ebcc0bSYossi Kuperman if (padlen + 2 + alen >= elen) { 56047ebcc0bSYossi Kuperman net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n", 56147ebcc0bSYossi Kuperman padlen + 2, elen - alen); 56247ebcc0bSYossi Kuperman goto out; 56347ebcc0bSYossi Kuperman } 56447ebcc0bSYossi Kuperman 56547ebcc0bSYossi Kuperman trimlen = alen + padlen + 2; 56647ebcc0bSYossi Kuperman if (skb->ip_summed == CHECKSUM_COMPLETE) { 56747ebcc0bSYossi Kuperman csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0); 56847ebcc0bSYossi Kuperman skb->csum = csum_block_sub(skb->csum, csumdiff, 56947ebcc0bSYossi Kuperman skb->len - trimlen); 57047ebcc0bSYossi Kuperman } 57147ebcc0bSYossi Kuperman pskb_trim(skb, skb->len - trimlen); 57247ebcc0bSYossi Kuperman 57347ebcc0bSYossi Kuperman ret = nexthdr[1]; 57447ebcc0bSYossi Kuperman 57547ebcc0bSYossi Kuperman out: 57647ebcc0bSYossi Kuperman return ret; 57747ebcc0bSYossi Kuperman } 57847ebcc0bSYossi Kuperman 579fca11ebdSSteffen Klassert int esp_input_done2(struct sk_buff *skb, int err) 5801da177e4SLinus Torvalds { 581b71d1d42SEric Dumazet const struct iphdr *iph; 58238320c70SHerbert Xu struct xfrm_state *x = xfrm_input_state(skb); 583d77e38e6SSteffen Klassert struct xfrm_offload *xo = xfrm_offload(skb); 5841c5ad13fSMathias Krause struct crypto_aead *aead = x->data; 58538320c70SHerbert Xu int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); 58631a4ab93SHerbert Xu int ihl; 5871da177e4SLinus Torvalds 588d77e38e6SSteffen Klassert if (!xo || (xo && !(xo->flags & CRYPTO_DONE))) 58938320c70SHerbert Xu kfree(ESP_SKB_CB(skb)->tmp); 5900ebea8efSHerbert Xu 5916b7326c8SHerbert Xu if (unlikely(err)) 592668dc8afSHerbert Xu goto out; 5931da177e4SLinus Torvalds 59447ebcc0bSYossi Kuperman err = esp_remove_trailer(skb); 59547ebcc0bSYossi Kuperman if (unlikely(err < 0)) 5961da177e4SLinus Torvalds goto out; 5971da177e4SLinus Torvalds 598eddc9ec5SArnaldo Carvalho de Melo iph = ip_hdr(skb); 59931a4ab93SHerbert Xu ihl = iph->ihl * 4; 60031a4ab93SHerbert Xu 6011da177e4SLinus Torvalds if (x->encap) { 602752c1f4cSHerbert Xu struct xfrm_encap_tmpl *encap = x->encap; 603d56f90a7SArnaldo Carvalho de Melo struct udphdr *uh = (void *)(skb_network_header(skb) + ihl); 604*25f6802bSSabrina Dubroca __be16 source; 605*25f6802bSSabrina Dubroca 606*25f6802bSSabrina Dubroca switch (x->encap->encap_type) { 607*25f6802bSSabrina Dubroca case UDP_ENCAP_ESPINUDP: 608*25f6802bSSabrina Dubroca case UDP_ENCAP_ESPINUDP_NON_IKE: 609*25f6802bSSabrina Dubroca source = uh->source; 610*25f6802bSSabrina Dubroca break; 611*25f6802bSSabrina Dubroca default: 612*25f6802bSSabrina Dubroca WARN_ON_ONCE(1); 613*25f6802bSSabrina Dubroca err = -EINVAL; 614*25f6802bSSabrina Dubroca goto out; 615*25f6802bSSabrina Dubroca } 616752c1f4cSHerbert Xu 6171da177e4SLinus Torvalds /* 6181da177e4SLinus Torvalds * 1) if the NAT-T peer's IP or port changed then 6191da177e4SLinus Torvalds * advertize the change to the keying daemon. 6201da177e4SLinus Torvalds * This is an inbound SA, so just compare 6211da177e4SLinus Torvalds * SRC ports. 6221da177e4SLinus Torvalds */ 623752c1f4cSHerbert Xu if (iph->saddr != x->props.saddr.a4 || 624*25f6802bSSabrina Dubroca source != encap->encap_sport) { 6251da177e4SLinus Torvalds xfrm_address_t ipaddr; 6261da177e4SLinus Torvalds 627752c1f4cSHerbert Xu ipaddr.a4 = iph->saddr; 628*25f6802bSSabrina Dubroca km_new_mapping(x, &ipaddr, source); 6291da177e4SLinus Torvalds 6301da177e4SLinus Torvalds /* XXX: perhaps add an extra 6311da177e4SLinus Torvalds * policy check here, to see 6321da177e4SLinus Torvalds * if we should allow or 6331da177e4SLinus Torvalds * reject a packet from a 6341da177e4SLinus Torvalds * different source 6351da177e4SLinus Torvalds * address/port. 6361da177e4SLinus Torvalds */ 6371da177e4SLinus Torvalds } 6381da177e4SLinus Torvalds 6391da177e4SLinus Torvalds /* 6401da177e4SLinus Torvalds * 2) ignore UDP/TCP checksums in case 6411da177e4SLinus Torvalds * of NAT-T in Transport Mode, or 6421da177e4SLinus Torvalds * perform other post-processing fixes 643752c1f4cSHerbert Xu * as per draft-ietf-ipsec-udp-encaps-06, 6441da177e4SLinus Torvalds * section 3.1.2 6451da177e4SLinus Torvalds */ 6468bd17075SHerbert Xu if (x->props.mode == XFRM_MODE_TRANSPORT) 6471da177e4SLinus Torvalds skb->ip_summed = CHECKSUM_UNNECESSARY; 648752c1f4cSHerbert Xu } 6491da177e4SLinus Torvalds 650ec9567a9SIlan Tayari skb_pull_rcsum(skb, hlen); 6517143dfacSLi RongQing if (x->props.mode == XFRM_MODE_TUNNEL) 6527143dfacSLi RongQing skb_reset_transport_header(skb); 6537143dfacSLi RongQing else 654967b05f6SArnaldo Carvalho de Melo skb_set_transport_header(skb, -ihl); 655752c1f4cSHerbert Xu 65638320c70SHerbert Xu /* RFC4303: Drop dummy packets without any error */ 65738320c70SHerbert Xu if (err == IPPROTO_NONE) 65838320c70SHerbert Xu err = -EINVAL; 65938320c70SHerbert Xu 66038320c70SHerbert Xu out: 66138320c70SHerbert Xu return err; 66238320c70SHerbert Xu } 663fca11ebdSSteffen Klassert EXPORT_SYMBOL_GPL(esp_input_done2); 66438320c70SHerbert Xu 66538320c70SHerbert Xu static void esp_input_done(struct crypto_async_request *base, int err) 66638320c70SHerbert Xu { 66738320c70SHerbert Xu struct sk_buff *skb = base->data; 66838320c70SHerbert Xu 66938320c70SHerbert Xu xfrm_input_resume(skb, esp_input_done2(skb, err)); 67038320c70SHerbert Xu } 67138320c70SHerbert Xu 6727021b2e1SHerbert Xu static void esp_input_restore_header(struct sk_buff *skb) 6737021b2e1SHerbert Xu { 6747021b2e1SHerbert Xu esp_restore_header(skb, 0); 6757021b2e1SHerbert Xu __skb_pull(skb, 4); 6767021b2e1SHerbert Xu } 6777021b2e1SHerbert Xu 678cac2661cSSteffen Klassert static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi) 679cac2661cSSteffen Klassert { 680cac2661cSSteffen Klassert struct xfrm_state *x = xfrm_input_state(skb); 68160aa8046SColin Ian King struct ip_esp_hdr *esph; 682cac2661cSSteffen Klassert 683cac2661cSSteffen Klassert /* For ESN we move the header forward by 4 bytes to 684cac2661cSSteffen Klassert * accomodate the high bits. We will move it back after 685cac2661cSSteffen Klassert * decryption. 686cac2661cSSteffen Klassert */ 687cac2661cSSteffen Klassert if ((x->props.flags & XFRM_STATE_ESN)) { 688d58ff351SJohannes Berg esph = skb_push(skb, 4); 689cac2661cSSteffen Klassert *seqhi = esph->spi; 690cac2661cSSteffen Klassert esph->spi = esph->seq_no; 691cac2661cSSteffen Klassert esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi; 692cac2661cSSteffen Klassert } 693cac2661cSSteffen Klassert } 694cac2661cSSteffen Klassert 6957021b2e1SHerbert Xu static void esp_input_done_esn(struct crypto_async_request *base, int err) 6967021b2e1SHerbert Xu { 6977021b2e1SHerbert Xu struct sk_buff *skb = base->data; 6987021b2e1SHerbert Xu 6997021b2e1SHerbert Xu esp_input_restore_header(skb); 7007021b2e1SHerbert Xu esp_input_done(base, err); 7017021b2e1SHerbert Xu } 7027021b2e1SHerbert Xu 70338320c70SHerbert Xu /* 70438320c70SHerbert Xu * Note: detecting truncated vs. non-truncated authentication data is very 70538320c70SHerbert Xu * expensive, so we only support truncated data, which is the recommended 70638320c70SHerbert Xu * and common case. 70738320c70SHerbert Xu */ 70838320c70SHerbert Xu static int esp_input(struct xfrm_state *x, struct sk_buff *skb) 70938320c70SHerbert Xu { 7101c5ad13fSMathias Krause struct crypto_aead *aead = x->data; 71138320c70SHerbert Xu struct aead_request *req; 71238320c70SHerbert Xu struct sk_buff *trailer; 7137021b2e1SHerbert Xu int ivlen = crypto_aead_ivsize(aead); 7140c05f983SHaishuang Yan int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen; 71538320c70SHerbert Xu int nfrags; 7160dc49e9bSSteffen Klassert int assoclen; 7170dc49e9bSSteffen Klassert int seqhilen; 7180dc49e9bSSteffen Klassert __be32 *seqhi; 71938320c70SHerbert Xu void *tmp; 72038320c70SHerbert Xu u8 *iv; 72138320c70SHerbert Xu struct scatterlist *sg; 72238320c70SHerbert Xu int err = -EINVAL; 72338320c70SHerbert Xu 7240c05f983SHaishuang Yan if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) 72538320c70SHerbert Xu goto out; 72638320c70SHerbert Xu 72738320c70SHerbert Xu if (elen <= 0) 72838320c70SHerbert Xu goto out; 72938320c70SHerbert Xu 7300c05f983SHaishuang Yan assoclen = sizeof(struct ip_esp_hdr); 7310dc49e9bSSteffen Klassert seqhilen = 0; 7320dc49e9bSSteffen Klassert 7330dc49e9bSSteffen Klassert if (x->props.flags & XFRM_STATE_ESN) { 7340dc49e9bSSteffen Klassert seqhilen += sizeof(__be32); 7350dc49e9bSSteffen Klassert assoclen += seqhilen; 7360dc49e9bSSteffen Klassert } 7370dc49e9bSSteffen Klassert 738cac2661cSSteffen Klassert if (!skb_cloned(skb)) { 739cac2661cSSteffen Klassert if (!skb_is_nonlinear(skb)) { 740cac2661cSSteffen Klassert nfrags = 1; 741cac2661cSSteffen Klassert 742cac2661cSSteffen Klassert goto skip_cow; 743cac2661cSSteffen Klassert } else if (!skb_has_frag_list(skb)) { 744cac2661cSSteffen Klassert nfrags = skb_shinfo(skb)->nr_frags; 745cac2661cSSteffen Klassert nfrags++; 746cac2661cSSteffen Klassert 747cac2661cSSteffen Klassert goto skip_cow; 748cac2661cSSteffen Klassert } 749cac2661cSSteffen Klassert } 750cac2661cSSteffen Klassert 751cac2661cSSteffen Klassert err = skb_cow_data(skb, 0, &trailer); 752cac2661cSSteffen Klassert if (err < 0) 753cac2661cSSteffen Klassert goto out; 754cac2661cSSteffen Klassert 755cac2661cSSteffen Klassert nfrags = err; 756cac2661cSSteffen Klassert 757cac2661cSSteffen Klassert skip_cow: 75838320c70SHerbert Xu err = -ENOMEM; 7597021b2e1SHerbert Xu tmp = esp_alloc_tmp(aead, nfrags, seqhilen); 76038320c70SHerbert Xu if (!tmp) 76138320c70SHerbert Xu goto out; 76238320c70SHerbert Xu 76338320c70SHerbert Xu ESP_SKB_CB(skb)->tmp = tmp; 764962fcef3SHerbert Xu seqhi = esp_tmp_extra(tmp); 7650dc49e9bSSteffen Klassert iv = esp_tmp_iv(aead, tmp, seqhilen); 76638320c70SHerbert Xu req = esp_tmp_req(aead, iv); 7677021b2e1SHerbert Xu sg = esp_req_sg(aead, req); 76838320c70SHerbert Xu 769cac2661cSSteffen Klassert esp_input_set_header(skb, seqhi); 77038320c70SHerbert Xu 77138320c70SHerbert Xu sg_init_table(sg, nfrags); 7723f297707SJason A. Donenfeld err = skb_to_sgvec(skb, sg, 0, skb->len); 773e6194923SSteffen Klassert if (unlikely(err < 0)) { 774e6194923SSteffen Klassert kfree(tmp); 7753f297707SJason A. Donenfeld goto out; 776e6194923SSteffen Klassert } 7770dc49e9bSSteffen Klassert 778cac2661cSSteffen Klassert skb->ip_summed = CHECKSUM_NONE; 779cac2661cSSteffen Klassert 780cac2661cSSteffen Klassert if ((x->props.flags & XFRM_STATE_ESN)) 781cac2661cSSteffen Klassert aead_request_set_callback(req, 0, esp_input_done_esn, skb); 782cac2661cSSteffen Klassert else 783cac2661cSSteffen Klassert aead_request_set_callback(req, 0, esp_input_done, skb); 784cac2661cSSteffen Klassert 7857021b2e1SHerbert Xu aead_request_set_crypt(req, sg, sg, elen + ivlen, iv); 7867021b2e1SHerbert Xu aead_request_set_ad(req, assoclen); 78738320c70SHerbert Xu 78838320c70SHerbert Xu err = crypto_aead_decrypt(req); 78938320c70SHerbert Xu if (err == -EINPROGRESS) 79038320c70SHerbert Xu goto out; 79138320c70SHerbert Xu 7927021b2e1SHerbert Xu if ((x->props.flags & XFRM_STATE_ESN)) 7937021b2e1SHerbert Xu esp_input_restore_header(skb); 7947021b2e1SHerbert Xu 79538320c70SHerbert Xu err = esp_input_done2(skb, err); 796752c1f4cSHerbert Xu 797752c1f4cSHerbert Xu out: 798668dc8afSHerbert Xu return err; 7991da177e4SLinus Torvalds } 8001da177e4SLinus Torvalds 801827789cbSSteffen Klassert static int esp4_err(struct sk_buff *skb, u32 info) 8021da177e4SLinus Torvalds { 8034fb236baSAlexey Dobriyan struct net *net = dev_net(skb->dev); 804b71d1d42SEric Dumazet const struct iphdr *iph = (const struct iphdr *)skb->data; 8051da177e4SLinus Torvalds struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2)); 8061da177e4SLinus Torvalds struct xfrm_state *x; 8071da177e4SLinus Torvalds 80855be7a9cSDavid S. Miller switch (icmp_hdr(skb)->type) { 80955be7a9cSDavid S. Miller case ICMP_DEST_UNREACH: 81055be7a9cSDavid S. Miller if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) 811827789cbSSteffen Klassert return 0; 81255be7a9cSDavid S. Miller case ICMP_REDIRECT: 81355be7a9cSDavid S. Miller break; 81455be7a9cSDavid S. Miller default: 815827789cbSSteffen Klassert return 0; 81655be7a9cSDavid S. Miller } 8171da177e4SLinus Torvalds 818b71d1d42SEric Dumazet x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, 819b71d1d42SEric Dumazet esph->spi, IPPROTO_ESP, AF_INET); 8201da177e4SLinus Torvalds if (!x) 821827789cbSSteffen Klassert return 0; 82255be7a9cSDavid S. Miller 823387aa65aSTimo Teräs if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) 824d888f396SMaciej Żenczykowski ipv4_update_pmtu(skb, net, info, 0, IPPROTO_ESP); 825387aa65aSTimo Teräs else 8261042caa7SMaciej Żenczykowski ipv4_redirect(skb, net, 0, IPPROTO_ESP); 8271da177e4SLinus Torvalds xfrm_state_put(x); 828827789cbSSteffen Klassert 829827789cbSSteffen Klassert return 0; 8301da177e4SLinus Torvalds } 8311da177e4SLinus Torvalds 8321da177e4SLinus Torvalds static void esp_destroy(struct xfrm_state *x) 8331da177e4SLinus Torvalds { 8341c5ad13fSMathias Krause struct crypto_aead *aead = x->data; 8351da177e4SLinus Torvalds 8361c5ad13fSMathias Krause if (!aead) 8371da177e4SLinus Torvalds return; 8381da177e4SLinus Torvalds 8391c5ad13fSMathias Krause crypto_free_aead(aead); 8401da177e4SLinus Torvalds } 8411da177e4SLinus Torvalds 8421a6509d9SHerbert Xu static int esp_init_aead(struct xfrm_state *x) 8431da177e4SLinus Torvalds { 8447021b2e1SHerbert Xu char aead_name[CRYPTO_MAX_ALG_NAME]; 8451a6509d9SHerbert Xu struct crypto_aead *aead; 8461a6509d9SHerbert Xu int err; 8471a6509d9SHerbert Xu 8487021b2e1SHerbert Xu err = -ENAMETOOLONG; 8497021b2e1SHerbert Xu if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", 8507021b2e1SHerbert Xu x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) 8517021b2e1SHerbert Xu goto error; 8527021b2e1SHerbert Xu 853f58869c4SSteffen Klassert aead = crypto_alloc_aead(aead_name, 0, 0); 8541a6509d9SHerbert Xu err = PTR_ERR(aead); 8551a6509d9SHerbert Xu if (IS_ERR(aead)) 8561a6509d9SHerbert Xu goto error; 8571a6509d9SHerbert Xu 8581c5ad13fSMathias Krause x->data = aead; 8591a6509d9SHerbert Xu 8601a6509d9SHerbert Xu err = crypto_aead_setkey(aead, x->aead->alg_key, 8611a6509d9SHerbert Xu (x->aead->alg_key_len + 7) / 8); 8621a6509d9SHerbert Xu if (err) 8631a6509d9SHerbert Xu goto error; 8641a6509d9SHerbert Xu 8651a6509d9SHerbert Xu err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8); 8661a6509d9SHerbert Xu if (err) 8671a6509d9SHerbert Xu goto error; 8681a6509d9SHerbert Xu 8691a6509d9SHerbert Xu error: 8701a6509d9SHerbert Xu return err; 8711a6509d9SHerbert Xu } 8721a6509d9SHerbert Xu 8731a6509d9SHerbert Xu static int esp_init_authenc(struct xfrm_state *x) 8741a6509d9SHerbert Xu { 87538320c70SHerbert Xu struct crypto_aead *aead; 87638320c70SHerbert Xu struct crypto_authenc_key_param *param; 87738320c70SHerbert Xu struct rtattr *rta; 87838320c70SHerbert Xu char *key; 87938320c70SHerbert Xu char *p; 88038320c70SHerbert Xu char authenc_name[CRYPTO_MAX_ALG_NAME]; 88138320c70SHerbert Xu unsigned int keylen; 88238320c70SHerbert Xu int err; 8831da177e4SLinus Torvalds 8841a6509d9SHerbert Xu err = -EINVAL; 88551456b29SIan Morris if (!x->ealg) 8861a6509d9SHerbert Xu goto error; 88738320c70SHerbert Xu 8881a6509d9SHerbert Xu err = -ENAMETOOLONG; 8890dc49e9bSSteffen Klassert 8900dc49e9bSSteffen Klassert if ((x->props.flags & XFRM_STATE_ESN)) { 8910dc49e9bSSteffen Klassert if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, 8927021b2e1SHerbert Xu "%s%sauthencesn(%s,%s)%s", 8937021b2e1SHerbert Xu x->geniv ?: "", x->geniv ? "(" : "", 89438320c70SHerbert Xu x->aalg ? x->aalg->alg_name : "digest_null", 8957021b2e1SHerbert Xu x->ealg->alg_name, 8967021b2e1SHerbert Xu x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) 8971a6509d9SHerbert Xu goto error; 8980dc49e9bSSteffen Klassert } else { 8990dc49e9bSSteffen Klassert if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, 9007021b2e1SHerbert Xu "%s%sauthenc(%s,%s)%s", 9017021b2e1SHerbert Xu x->geniv ?: "", x->geniv ? "(" : "", 9020dc49e9bSSteffen Klassert x->aalg ? x->aalg->alg_name : "digest_null", 9037021b2e1SHerbert Xu x->ealg->alg_name, 9047021b2e1SHerbert Xu x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) 9050dc49e9bSSteffen Klassert goto error; 9060dc49e9bSSteffen Klassert } 90738320c70SHerbert Xu 908f58869c4SSteffen Klassert aead = crypto_alloc_aead(authenc_name, 0, 0); 90938320c70SHerbert Xu err = PTR_ERR(aead); 91038320c70SHerbert Xu if (IS_ERR(aead)) 91138320c70SHerbert Xu goto error; 91238320c70SHerbert Xu 9131c5ad13fSMathias Krause x->data = aead; 91438320c70SHerbert Xu 91538320c70SHerbert Xu keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) + 91638320c70SHerbert Xu (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param)); 91738320c70SHerbert Xu err = -ENOMEM; 91838320c70SHerbert Xu key = kmalloc(keylen, GFP_KERNEL); 91938320c70SHerbert Xu if (!key) 92038320c70SHerbert Xu goto error; 92138320c70SHerbert Xu 92238320c70SHerbert Xu p = key; 92338320c70SHerbert Xu rta = (void *)p; 92438320c70SHerbert Xu rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; 92538320c70SHerbert Xu rta->rta_len = RTA_LENGTH(sizeof(*param)); 92638320c70SHerbert Xu param = RTA_DATA(rta); 92738320c70SHerbert Xu p += RTA_SPACE(sizeof(*param)); 92838320c70SHerbert Xu 9291da177e4SLinus Torvalds if (x->aalg) { 9301da177e4SLinus Torvalds struct xfrm_algo_desc *aalg_desc; 9311da177e4SLinus Torvalds 93238320c70SHerbert Xu memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8); 93338320c70SHerbert Xu p += (x->aalg->alg_key_len + 7) / 8; 9341da177e4SLinus Torvalds 9351da177e4SLinus Torvalds aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 9361da177e4SLinus Torvalds BUG_ON(!aalg_desc); 9371da177e4SLinus Torvalds 93838320c70SHerbert Xu err = -EINVAL; 9391da177e4SLinus Torvalds if (aalg_desc->uinfo.auth.icv_fullbits / 8 != 94038320c70SHerbert Xu crypto_aead_authsize(aead)) { 94145083497SJoe Perches pr_info("ESP: %s digestsize %u != %hu\n", 9421da177e4SLinus Torvalds x->aalg->alg_name, 94338320c70SHerbert Xu crypto_aead_authsize(aead), 94464ce2073SPatrick McHardy aalg_desc->uinfo.auth.icv_fullbits / 8); 94538320c70SHerbert Xu goto free_key; 9461da177e4SLinus Torvalds } 9471da177e4SLinus Torvalds 94838320c70SHerbert Xu err = crypto_aead_setauthsize( 9498f8a088cSMartin Willi aead, x->aalg->alg_trunc_len / 8); 95038320c70SHerbert Xu if (err) 95138320c70SHerbert Xu goto free_key; 9521da177e4SLinus Torvalds } 9534b7137ffSHerbert Xu 95438320c70SHerbert Xu param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8); 95538320c70SHerbert Xu memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8); 95638320c70SHerbert Xu 95738320c70SHerbert Xu err = crypto_aead_setkey(aead, key, keylen); 95838320c70SHerbert Xu 95938320c70SHerbert Xu free_key: 96038320c70SHerbert Xu kfree(key); 96138320c70SHerbert Xu 9621a6509d9SHerbert Xu error: 9631a6509d9SHerbert Xu return err; 9641a6509d9SHerbert Xu } 9651a6509d9SHerbert Xu 9661a6509d9SHerbert Xu static int esp_init_state(struct xfrm_state *x) 9671a6509d9SHerbert Xu { 9681a6509d9SHerbert Xu struct crypto_aead *aead; 9691a6509d9SHerbert Xu u32 align; 9701a6509d9SHerbert Xu int err; 9711a6509d9SHerbert Xu 9721c5ad13fSMathias Krause x->data = NULL; 9731a6509d9SHerbert Xu 9741a6509d9SHerbert Xu if (x->aead) 9751a6509d9SHerbert Xu err = esp_init_aead(x); 9761a6509d9SHerbert Xu else 9771a6509d9SHerbert Xu err = esp_init_authenc(x); 9781a6509d9SHerbert Xu 97938320c70SHerbert Xu if (err) 9801da177e4SLinus Torvalds goto error; 98138320c70SHerbert Xu 9821c5ad13fSMathias Krause aead = x->data; 9831a6509d9SHerbert Xu 98438320c70SHerbert Xu x->props.header_len = sizeof(struct ip_esp_hdr) + 98538320c70SHerbert Xu crypto_aead_ivsize(aead); 9867e49e6deSMasahide NAKAMURA if (x->props.mode == XFRM_MODE_TUNNEL) 9871da177e4SLinus Torvalds x->props.header_len += sizeof(struct iphdr); 988eb49e630SJoakim Koskela else if (x->props.mode == XFRM_MODE_BEET && x->sel.family != AF_INET6) 989ac758e3cSPatrick McHardy x->props.header_len += IPV4_BEET_PHMAXLEN; 9901da177e4SLinus Torvalds if (x->encap) { 9911da177e4SLinus Torvalds struct xfrm_encap_tmpl *encap = x->encap; 9921da177e4SLinus Torvalds 9931da177e4SLinus Torvalds switch (encap->encap_type) { 9941da177e4SLinus Torvalds default: 995bcfd09f7SHerbert Xu err = -EINVAL; 9961da177e4SLinus Torvalds goto error; 9971da177e4SLinus Torvalds case UDP_ENCAP_ESPINUDP: 9981da177e4SLinus Torvalds x->props.header_len += sizeof(struct udphdr); 9991da177e4SLinus Torvalds break; 10001da177e4SLinus Torvalds case UDP_ENCAP_ESPINUDP_NON_IKE: 10011da177e4SLinus Torvalds x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32); 10021da177e4SLinus Torvalds break; 10031da177e4SLinus Torvalds } 10041da177e4SLinus Torvalds } 100538320c70SHerbert Xu 100638320c70SHerbert Xu align = ALIGN(crypto_aead_blocksize(aead), 4); 10071c5ad13fSMathias Krause x->props.trailer_len = align + 1 + crypto_aead_authsize(aead); 10081da177e4SLinus Torvalds 10091da177e4SLinus Torvalds error: 101038320c70SHerbert Xu return err; 10111da177e4SLinus Torvalds } 10121da177e4SLinus Torvalds 1013827789cbSSteffen Klassert static int esp4_rcv_cb(struct sk_buff *skb, int err) 1014827789cbSSteffen Klassert { 1015827789cbSSteffen Klassert return 0; 1016827789cbSSteffen Klassert } 1017827789cbSSteffen Klassert 1018533cb5b0SEric Dumazet static const struct xfrm_type esp_type = 10191da177e4SLinus Torvalds { 10201da177e4SLinus Torvalds .description = "ESP4", 10211da177e4SLinus Torvalds .owner = THIS_MODULE, 10221da177e4SLinus Torvalds .proto = IPPROTO_ESP, 1023436a0a40SHerbert Xu .flags = XFRM_TYPE_REPLAY_PROT, 10241da177e4SLinus Torvalds .init_state = esp_init_state, 10251da177e4SLinus Torvalds .destructor = esp_destroy, 10261da177e4SLinus Torvalds .input = esp_input, 1027fca11ebdSSteffen Klassert .output = esp_output, 10281da177e4SLinus Torvalds }; 10291da177e4SLinus Torvalds 1030827789cbSSteffen Klassert static struct xfrm4_protocol esp4_protocol = { 10311da177e4SLinus Torvalds .handler = xfrm4_rcv, 1032827789cbSSteffen Klassert .input_handler = xfrm_input, 1033827789cbSSteffen Klassert .cb_handler = esp4_rcv_cb, 10341da177e4SLinus Torvalds .err_handler = esp4_err, 1035827789cbSSteffen Klassert .priority = 0, 10361da177e4SLinus Torvalds }; 10371da177e4SLinus Torvalds 10381da177e4SLinus Torvalds static int __init esp4_init(void) 10391da177e4SLinus Torvalds { 10401da177e4SLinus Torvalds if (xfrm_register_type(&esp_type, AF_INET) < 0) { 1041058bd4d2SJoe Perches pr_info("%s: can't add xfrm type\n", __func__); 10421da177e4SLinus Torvalds return -EAGAIN; 10431da177e4SLinus Torvalds } 1044827789cbSSteffen Klassert if (xfrm4_protocol_register(&esp4_protocol, IPPROTO_ESP) < 0) { 1045058bd4d2SJoe Perches pr_info("%s: can't add protocol\n", __func__); 10461da177e4SLinus Torvalds xfrm_unregister_type(&esp_type, AF_INET); 10471da177e4SLinus Torvalds return -EAGAIN; 10481da177e4SLinus Torvalds } 10491da177e4SLinus Torvalds return 0; 10501da177e4SLinus Torvalds } 10511da177e4SLinus Torvalds 10521da177e4SLinus Torvalds static void __exit esp4_fini(void) 10531da177e4SLinus Torvalds { 1054827789cbSSteffen Klassert if (xfrm4_protocol_deregister(&esp4_protocol, IPPROTO_ESP) < 0) 1055058bd4d2SJoe Perches pr_info("%s: can't remove protocol\n", __func__); 10564f518e80SFlorian Westphal xfrm_unregister_type(&esp_type, AF_INET); 10571da177e4SLinus Torvalds } 10581da177e4SLinus Torvalds 10591da177e4SLinus Torvalds module_init(esp4_init); 10601da177e4SLinus Torvalds module_exit(esp4_fini); 10611da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 1062d3d6dd3aSMasahide NAKAMURA MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP); 1063