xref: /linux/net/ipv4/ipcomp.c (revision ed3174d93c342b8b2eeba6bbd124707d55304a7b)
1 /*
2  * IP Payload Compression Protocol (IPComp) - RFC3173.
3  *
4  * Copyright (c) 2003 James Morris <jmorris@intercode.com.au>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the Free
8  * Software Foundation; either version 2 of the License, or (at your option)
9  * any later version.
10  *
11  * Todo:
12  *   - Tunable compression parameters.
13  *   - Compression stats.
14  *   - Adaptive compression.
15  */
16 #include <linux/module.h>
17 #include <asm/semaphore.h>
18 #include <linux/crypto.h>
19 #include <linux/err.h>
20 #include <linux/pfkeyv2.h>
21 #include <linux/percpu.h>
22 #include <linux/smp.h>
23 #include <linux/list.h>
24 #include <linux/vmalloc.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/mutex.h>
27 #include <net/ip.h>
28 #include <net/xfrm.h>
29 #include <net/icmp.h>
30 #include <net/ipcomp.h>
31 #include <net/protocol.h>
32 
33 struct ipcomp_tfms {
34 	struct list_head list;
35 	struct crypto_comp **tfms;
36 	int users;
37 };
38 
39 static DEFINE_MUTEX(ipcomp_resource_mutex);
40 static void **ipcomp_scratches;
41 static int ipcomp_scratch_users;
42 static LIST_HEAD(ipcomp_tfms_list);
43 
44 static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
45 {
46 	struct ipcomp_data *ipcd = x->data;
47 	const int plen = skb->len;
48 	int dlen = IPCOMP_SCRATCH_SIZE;
49 	const u8 *start = skb->data;
50 	const int cpu = get_cpu();
51 	u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
52 	struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
53 	int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen);
54 
55 	if (err)
56 		goto out;
57 
58 	if (dlen < (plen + sizeof(struct ip_comp_hdr))) {
59 		err = -EINVAL;
60 		goto out;
61 	}
62 
63 	err = pskb_expand_head(skb, 0, dlen - plen, GFP_ATOMIC);
64 	if (err)
65 		goto out;
66 
67 	skb->truesize += dlen - plen;
68 	__skb_put(skb, dlen - plen);
69 	skb_copy_to_linear_data(skb, scratch, dlen);
70 out:
71 	put_cpu();
72 	return err;
73 }
74 
75 static int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb)
76 {
77 	int nexthdr;
78 	int err = -ENOMEM;
79 	struct ip_comp_hdr *ipch;
80 
81 	if (skb_linearize_cow(skb))
82 		goto out;
83 
84 	skb->ip_summed = CHECKSUM_NONE;
85 
86 	/* Remove ipcomp header and decompress original payload */
87 	ipch = (void *)skb->data;
88 	nexthdr = ipch->nexthdr;
89 
90 	skb->transport_header = skb->network_header + sizeof(*ipch);
91 	__skb_pull(skb, sizeof(*ipch));
92 	err = ipcomp_decompress(x, skb);
93 	if (err)
94 		goto out;
95 
96 	err = nexthdr;
97 
98 out:
99 	return err;
100 }
101 
102 static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
103 {
104 	struct ipcomp_data *ipcd = x->data;
105 	const int plen = skb->len;
106 	int dlen = IPCOMP_SCRATCH_SIZE;
107 	u8 *start = skb->data;
108 	const int cpu = get_cpu();
109 	u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
110 	struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
111 	int err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
112 
113 	if (err)
114 		goto out;
115 
116 	if ((dlen + sizeof(struct ip_comp_hdr)) >= plen) {
117 		err = -EMSGSIZE;
118 		goto out;
119 	}
120 
121 	memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen);
122 	put_cpu();
123 
124 	pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr));
125 	return 0;
126 
127 out:
128 	put_cpu();
129 	return err;
130 }
131 
132 static int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb)
133 {
134 	int err;
135 	struct ip_comp_hdr *ipch;
136 	struct ipcomp_data *ipcd = x->data;
137 
138 	if (skb->len < ipcd->threshold) {
139 		/* Don't bother compressing */
140 		goto out_ok;
141 	}
142 
143 	if (skb_linearize_cow(skb))
144 		goto out_ok;
145 
146 	err = ipcomp_compress(x, skb);
147 
148 	if (err) {
149 		goto out_ok;
150 	}
151 
152 	/* Install ipcomp header, convert into ipcomp datagram. */
153 	ipch = ip_comp_hdr(skb);
154 	ipch->nexthdr = *skb_mac_header(skb);
155 	ipch->flags = 0;
156 	ipch->cpi = htons((u16 )ntohl(x->id.spi));
157 	*skb_mac_header(skb) = IPPROTO_COMP;
158 out_ok:
159 	skb_push(skb, -skb_network_offset(skb));
160 	return 0;
161 }
162 
163 static void ipcomp4_err(struct sk_buff *skb, u32 info)
164 {
165 	__be32 spi;
166 	struct iphdr *iph = (struct iphdr *)skb->data;
167 	struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
168 	struct xfrm_state *x;
169 
170 	if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH ||
171 	    icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
172 		return;
173 
174 	spi = htonl(ntohs(ipch->cpi));
175 	x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr,
176 			      spi, IPPROTO_COMP, AF_INET);
177 	if (!x)
178 		return;
179 	NETDEBUG(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/%u.%u.%u.%u\n",
180 		 spi, NIPQUAD(iph->daddr));
181 	xfrm_state_put(x);
182 }
183 
184 /* We always hold one tunnel user reference to indicate a tunnel */
185 static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x)
186 {
187 	struct xfrm_state *t;
188 
189 	t = xfrm_state_alloc();
190 	if (t == NULL)
191 		goto out;
192 
193 	t->id.proto = IPPROTO_IPIP;
194 	t->id.spi = x->props.saddr.a4;
195 	t->id.daddr.a4 = x->id.daddr.a4;
196 	memcpy(&t->sel, &x->sel, sizeof(t->sel));
197 	t->props.family = AF_INET;
198 	t->props.mode = x->props.mode;
199 	t->props.saddr.a4 = x->props.saddr.a4;
200 	t->props.flags = x->props.flags;
201 
202 	if (xfrm_init_state(t))
203 		goto error;
204 
205 	atomic_set(&t->tunnel_users, 1);
206 out:
207 	return t;
208 
209 error:
210 	t->km.state = XFRM_STATE_DEAD;
211 	xfrm_state_put(t);
212 	t = NULL;
213 	goto out;
214 }
215 
216 /*
217  * Must be protected by xfrm_cfg_mutex.  State and tunnel user references are
218  * always incremented on success.
219  */
220 static int ipcomp_tunnel_attach(struct xfrm_state *x)
221 {
222 	int err = 0;
223 	struct xfrm_state *t;
224 
225 	t = xfrm_state_lookup((xfrm_address_t *)&x->id.daddr.a4,
226 			      x->props.saddr.a4, IPPROTO_IPIP, AF_INET);
227 	if (!t) {
228 		t = ipcomp_tunnel_create(x);
229 		if (!t) {
230 			err = -EINVAL;
231 			goto out;
232 		}
233 		xfrm_state_insert(t);
234 		xfrm_state_hold(t);
235 	}
236 	x->tunnel = t;
237 	atomic_inc(&t->tunnel_users);
238 out:
239 	return err;
240 }
241 
242 static void ipcomp_free_scratches(void)
243 {
244 	int i;
245 	void **scratches;
246 
247 	if (--ipcomp_scratch_users)
248 		return;
249 
250 	scratches = ipcomp_scratches;
251 	if (!scratches)
252 		return;
253 
254 	for_each_possible_cpu(i)
255 		vfree(*per_cpu_ptr(scratches, i));
256 
257 	free_percpu(scratches);
258 }
259 
260 static void **ipcomp_alloc_scratches(void)
261 {
262 	int i;
263 	void **scratches;
264 
265 	if (ipcomp_scratch_users++)
266 		return ipcomp_scratches;
267 
268 	scratches = alloc_percpu(void *);
269 	if (!scratches)
270 		return NULL;
271 
272 	ipcomp_scratches = scratches;
273 
274 	for_each_possible_cpu(i) {
275 		void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE);
276 		if (!scratch)
277 			return NULL;
278 		*per_cpu_ptr(scratches, i) = scratch;
279 	}
280 
281 	return scratches;
282 }
283 
284 static void ipcomp_free_tfms(struct crypto_comp **tfms)
285 {
286 	struct ipcomp_tfms *pos;
287 	int cpu;
288 
289 	list_for_each_entry(pos, &ipcomp_tfms_list, list) {
290 		if (pos->tfms == tfms)
291 			break;
292 	}
293 
294 	BUG_TRAP(pos);
295 
296 	if (--pos->users)
297 		return;
298 
299 	list_del(&pos->list);
300 	kfree(pos);
301 
302 	if (!tfms)
303 		return;
304 
305 	for_each_possible_cpu(cpu) {
306 		struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu);
307 		crypto_free_comp(tfm);
308 	}
309 	free_percpu(tfms);
310 }
311 
312 static struct crypto_comp **ipcomp_alloc_tfms(const char *alg_name)
313 {
314 	struct ipcomp_tfms *pos;
315 	struct crypto_comp **tfms;
316 	int cpu;
317 
318 	/* This can be any valid CPU ID so we don't need locking. */
319 	cpu = raw_smp_processor_id();
320 
321 	list_for_each_entry(pos, &ipcomp_tfms_list, list) {
322 		struct crypto_comp *tfm;
323 
324 		tfms = pos->tfms;
325 		tfm = *per_cpu_ptr(tfms, cpu);
326 
327 		if (!strcmp(crypto_comp_name(tfm), alg_name)) {
328 			pos->users++;
329 			return tfms;
330 		}
331 	}
332 
333 	pos = kmalloc(sizeof(*pos), GFP_KERNEL);
334 	if (!pos)
335 		return NULL;
336 
337 	pos->users = 1;
338 	INIT_LIST_HEAD(&pos->list);
339 	list_add(&pos->list, &ipcomp_tfms_list);
340 
341 	pos->tfms = tfms = alloc_percpu(struct crypto_comp *);
342 	if (!tfms)
343 		goto error;
344 
345 	for_each_possible_cpu(cpu) {
346 		struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0,
347 							    CRYPTO_ALG_ASYNC);
348 		if (IS_ERR(tfm))
349 			goto error;
350 		*per_cpu_ptr(tfms, cpu) = tfm;
351 	}
352 
353 	return tfms;
354 
355 error:
356 	ipcomp_free_tfms(tfms);
357 	return NULL;
358 }
359 
360 static void ipcomp_free_data(struct ipcomp_data *ipcd)
361 {
362 	if (ipcd->tfms)
363 		ipcomp_free_tfms(ipcd->tfms);
364 	ipcomp_free_scratches();
365 }
366 
367 static void ipcomp_destroy(struct xfrm_state *x)
368 {
369 	struct ipcomp_data *ipcd = x->data;
370 	if (!ipcd)
371 		return;
372 	xfrm_state_delete_tunnel(x);
373 	mutex_lock(&ipcomp_resource_mutex);
374 	ipcomp_free_data(ipcd);
375 	mutex_unlock(&ipcomp_resource_mutex);
376 	kfree(ipcd);
377 }
378 
379 static int ipcomp_init_state(struct xfrm_state *x)
380 {
381 	int err;
382 	struct ipcomp_data *ipcd;
383 	struct xfrm_algo_desc *calg_desc;
384 
385 	err = -EINVAL;
386 	if (!x->calg)
387 		goto out;
388 
389 	if (x->encap)
390 		goto out;
391 
392 	x->props.header_len = 0;
393 	switch (x->props.mode) {
394 	case XFRM_MODE_TRANSPORT:
395 		break;
396 	case XFRM_MODE_TUNNEL:
397 		x->props.header_len += sizeof(struct iphdr);
398 		break;
399 	default:
400 		goto out;
401 	}
402 
403 	err = -ENOMEM;
404 	ipcd = kzalloc(sizeof(*ipcd), GFP_KERNEL);
405 	if (!ipcd)
406 		goto out;
407 
408 	mutex_lock(&ipcomp_resource_mutex);
409 	if (!ipcomp_alloc_scratches())
410 		goto error;
411 
412 	ipcd->tfms = ipcomp_alloc_tfms(x->calg->alg_name);
413 	if (!ipcd->tfms)
414 		goto error;
415 	mutex_unlock(&ipcomp_resource_mutex);
416 
417 	if (x->props.mode == XFRM_MODE_TUNNEL) {
418 		err = ipcomp_tunnel_attach(x);
419 		if (err)
420 			goto error_tunnel;
421 	}
422 
423 	calg_desc = xfrm_calg_get_byname(x->calg->alg_name, 0);
424 	BUG_ON(!calg_desc);
425 	ipcd->threshold = calg_desc->uinfo.comp.threshold;
426 	x->data = ipcd;
427 	err = 0;
428 out:
429 	return err;
430 
431 error_tunnel:
432 	mutex_lock(&ipcomp_resource_mutex);
433 error:
434 	ipcomp_free_data(ipcd);
435 	mutex_unlock(&ipcomp_resource_mutex);
436 	kfree(ipcd);
437 	goto out;
438 }
439 
440 static const struct xfrm_type ipcomp_type = {
441 	.description	= "IPCOMP4",
442 	.owner		= THIS_MODULE,
443 	.proto	     	= IPPROTO_COMP,
444 	.init_state	= ipcomp_init_state,
445 	.destructor	= ipcomp_destroy,
446 	.input		= ipcomp_input,
447 	.output		= ipcomp_output
448 };
449 
450 static struct net_protocol ipcomp4_protocol = {
451 	.handler	=	xfrm4_rcv,
452 	.err_handler	=	ipcomp4_err,
453 	.no_policy	=	1,
454 };
455 
456 static int __init ipcomp4_init(void)
457 {
458 	if (xfrm_register_type(&ipcomp_type, AF_INET) < 0) {
459 		printk(KERN_INFO "ipcomp init: can't add xfrm type\n");
460 		return -EAGAIN;
461 	}
462 	if (inet_add_protocol(&ipcomp4_protocol, IPPROTO_COMP) < 0) {
463 		printk(KERN_INFO "ipcomp init: can't add protocol\n");
464 		xfrm_unregister_type(&ipcomp_type, AF_INET);
465 		return -EAGAIN;
466 	}
467 	return 0;
468 }
469 
470 static void __exit ipcomp4_fini(void)
471 {
472 	if (inet_del_protocol(&ipcomp4_protocol, IPPROTO_COMP) < 0)
473 		printk(KERN_INFO "ip ipcomp close: can't remove protocol\n");
474 	if (xfrm_unregister_type(&ipcomp_type, AF_INET) < 0)
475 		printk(KERN_INFO "ip ipcomp close: can't remove xfrm type\n");
476 }
477 
478 module_init(ipcomp4_init);
479 module_exit(ipcomp4_fini);
480 
481 MODULE_LICENSE("GPL");
482 MODULE_DESCRIPTION("IP Payload Compression Protocol (IPComp) - RFC3173");
483 MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
484 
485 MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_COMP);
486