xref: /linux/fs/btrfs/lzo.c (revision a266ef69b890f099069cf51bb40572611c435a54)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2008 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/slab.h>
8 #include <linux/mm.h>
9 #include <linux/init.h>
10 #include <linux/err.h>
11 #include <linux/sched.h>
12 #include <linux/pagemap.h>
13 #include <linux/bio.h>
14 #include <linux/lzo.h>
15 #include <linux/refcount.h>
16 #include "messages.h"
17 #include "compression.h"
18 #include "ctree.h"
19 #include "super.h"
20 
21 #define LZO_LEN	4
22 
23 /*
24  * Btrfs LZO compression format
25  *
26  * Regular and inlined LZO compressed data extents consist of:
27  *
28  * 1.  Header
29  *     Fixed size. LZO_LEN (4) bytes long, LE32.
30  *     Records the total size (including the header) of compressed data.
31  *
32  * 2.  Segment(s)
33  *     Variable size. Each segment includes one segment header, followed by data
34  *     payload.
35  *     One regular LZO compressed extent can have one or more segments.
36  *     For inlined LZO compressed extent, only one segment is allowed.
37  *     One segment represents at most one sector of uncompressed data.
38  *
39  * 2.1 Segment header
40  *     Fixed size. LZO_LEN (4) bytes long, LE32.
41  *     Records the total size of the segment (not including the header).
42  *     Segment header never crosses sector boundary, thus it's possible to
43  *     have at most 3 padding zeros at the end of the sector.
44  *
45  * 2.2 Data Payload
46  *     Variable size. Size up limit should be lzo1x_worst_compress(sectorsize)
47  *     which is 4419 for a 4KiB sectorsize.
48  *
49  * Example with 4K sectorsize:
50  * Page 1:
51  *          0     0x2   0x4   0x6   0x8   0xa   0xc   0xe     0x10
52  * 0x0000   |  Header   | SegHdr 01 | Data payload 01 ...     |
53  * ...
54  * 0x0ff0   | SegHdr  N | Data payload  N     ...          |00|
55  *                                                          ^^ padding zeros
56  * Page 2:
57  * 0x1000   | SegHdr N+1| Data payload N+1 ...                |
58  */
59 
60 #define WORKSPACE_BUF_LENGTH	(lzo1x_worst_compress(PAGE_SIZE))
61 #define WORKSPACE_CBUF_LENGTH	(lzo1x_worst_compress(PAGE_SIZE))
62 
63 struct workspace {
64 	void *mem;
65 	void *buf;	/* where decompressed data goes */
66 	void *cbuf;	/* where compressed data goes */
67 	struct list_head list;
68 };
69 
70 static struct workspace_manager wsm;
71 
72 void lzo_free_workspace(struct list_head *ws)
73 {
74 	struct workspace *workspace = list_entry(ws, struct workspace, list);
75 
76 	kvfree(workspace->buf);
77 	kvfree(workspace->cbuf);
78 	kvfree(workspace->mem);
79 	kfree(workspace);
80 }
81 
82 struct list_head *lzo_alloc_workspace(unsigned int level)
83 {
84 	struct workspace *workspace;
85 
86 	workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
87 	if (!workspace)
88 		return ERR_PTR(-ENOMEM);
89 
90 	workspace->mem = kvmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
91 	workspace->buf = kvmalloc(WORKSPACE_BUF_LENGTH, GFP_KERNEL);
92 	workspace->cbuf = kvmalloc(WORKSPACE_CBUF_LENGTH, GFP_KERNEL);
93 	if (!workspace->mem || !workspace->buf || !workspace->cbuf)
94 		goto fail;
95 
96 	INIT_LIST_HEAD(&workspace->list);
97 
98 	return &workspace->list;
99 fail:
100 	lzo_free_workspace(&workspace->list);
101 	return ERR_PTR(-ENOMEM);
102 }
103 
104 static inline void write_compress_length(char *buf, size_t len)
105 {
106 	__le32 dlen;
107 
108 	dlen = cpu_to_le32(len);
109 	memcpy(buf, &dlen, LZO_LEN);
110 }
111 
112 static inline size_t read_compress_length(const char *buf)
113 {
114 	__le32 dlen;
115 
116 	memcpy(&dlen, buf, LZO_LEN);
117 	return le32_to_cpu(dlen);
118 }
119 
120 /*
121  * Will do:
122  *
123  * - Write a segment header into the destination
124  * - Copy the compressed buffer into the destination
125  * - Make sure we have enough space in the last sector to fit a segment header
126  *   If not, we will pad at most (LZO_LEN (4)) - 1 bytes of zeros.
127  *
128  * Will allocate new pages when needed.
129  */
130 static int copy_compressed_data_to_page(char *compressed_data,
131 					size_t compressed_size,
132 					struct page **out_pages,
133 					unsigned long max_nr_page,
134 					u32 *cur_out,
135 					const u32 sectorsize)
136 {
137 	u32 sector_bytes_left;
138 	u32 orig_out;
139 	struct page *cur_page;
140 	char *kaddr;
141 
142 	if ((*cur_out / PAGE_SIZE) >= max_nr_page)
143 		return -E2BIG;
144 
145 	/*
146 	 * We never allow a segment header crossing sector boundary, previous
147 	 * run should ensure we have enough space left inside the sector.
148 	 */
149 	ASSERT((*cur_out / sectorsize) == (*cur_out + LZO_LEN - 1) / sectorsize);
150 
151 	cur_page = out_pages[*cur_out / PAGE_SIZE];
152 	/* Allocate a new page */
153 	if (!cur_page) {
154 		cur_page = alloc_page(GFP_NOFS);
155 		if (!cur_page)
156 			return -ENOMEM;
157 		out_pages[*cur_out / PAGE_SIZE] = cur_page;
158 	}
159 
160 	kaddr = kmap_local_page(cur_page);
161 	write_compress_length(kaddr + offset_in_page(*cur_out),
162 			      compressed_size);
163 	*cur_out += LZO_LEN;
164 
165 	orig_out = *cur_out;
166 
167 	/* Copy compressed data */
168 	while (*cur_out - orig_out < compressed_size) {
169 		u32 copy_len = min_t(u32, sectorsize - *cur_out % sectorsize,
170 				     orig_out + compressed_size - *cur_out);
171 
172 		kunmap_local(kaddr);
173 
174 		if ((*cur_out / PAGE_SIZE) >= max_nr_page)
175 			return -E2BIG;
176 
177 		cur_page = out_pages[*cur_out / PAGE_SIZE];
178 		/* Allocate a new page */
179 		if (!cur_page) {
180 			cur_page = alloc_page(GFP_NOFS);
181 			if (!cur_page)
182 				return -ENOMEM;
183 			out_pages[*cur_out / PAGE_SIZE] = cur_page;
184 		}
185 		kaddr = kmap_local_page(cur_page);
186 
187 		memcpy(kaddr + offset_in_page(*cur_out),
188 		       compressed_data + *cur_out - orig_out, copy_len);
189 
190 		*cur_out += copy_len;
191 	}
192 
193 	/*
194 	 * Check if we can fit the next segment header into the remaining space
195 	 * of the sector.
196 	 */
197 	sector_bytes_left = round_up(*cur_out, sectorsize) - *cur_out;
198 	if (sector_bytes_left >= LZO_LEN || sector_bytes_left == 0)
199 		goto out;
200 
201 	/* The remaining size is not enough, pad it with zeros */
202 	memset(kaddr + offset_in_page(*cur_out), 0,
203 	       sector_bytes_left);
204 	*cur_out += sector_bytes_left;
205 
206 out:
207 	kunmap_local(kaddr);
208 	return 0;
209 }
210 
211 int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
212 		u64 start, struct page **pages, unsigned long *out_pages,
213 		unsigned long *total_in, unsigned long *total_out)
214 {
215 	struct workspace *workspace = list_entry(ws, struct workspace, list);
216 	const u32 sectorsize = btrfs_sb(mapping->host->i_sb)->sectorsize;
217 	struct page *page_in = NULL;
218 	char *sizes_ptr;
219 	const unsigned long max_nr_page = *out_pages;
220 	int ret = 0;
221 	/* Points to the file offset of input data */
222 	u64 cur_in = start;
223 	/* Points to the current output byte */
224 	u32 cur_out = 0;
225 	u32 len = *total_out;
226 
227 	ASSERT(max_nr_page > 0);
228 	*out_pages = 0;
229 	*total_out = 0;
230 	*total_in = 0;
231 
232 	/*
233 	 * Skip the header for now, we will later come back and write the total
234 	 * compressed size
235 	 */
236 	cur_out += LZO_LEN;
237 	while (cur_in < start + len) {
238 		char *data_in;
239 		const u32 sectorsize_mask = sectorsize - 1;
240 		u32 sector_off = (cur_in - start) & sectorsize_mask;
241 		u32 in_len;
242 		size_t out_len;
243 
244 		/* Get the input page first */
245 		if (!page_in) {
246 			page_in = find_get_page(mapping, cur_in >> PAGE_SHIFT);
247 			ASSERT(page_in);
248 		}
249 
250 		/* Compress at most one sector of data each time */
251 		in_len = min_t(u32, start + len - cur_in, sectorsize - sector_off);
252 		ASSERT(in_len);
253 		data_in = kmap_local_page(page_in);
254 		ret = lzo1x_1_compress(data_in +
255 				       offset_in_page(cur_in), in_len,
256 				       workspace->cbuf, &out_len,
257 				       workspace->mem);
258 		kunmap_local(data_in);
259 		if (ret < 0) {
260 			pr_debug("BTRFS: lzo in loop returned %d\n", ret);
261 			ret = -EIO;
262 			goto out;
263 		}
264 
265 		ret = copy_compressed_data_to_page(workspace->cbuf, out_len,
266 						   pages, max_nr_page,
267 						   &cur_out, sectorsize);
268 		if (ret < 0)
269 			goto out;
270 
271 		cur_in += in_len;
272 
273 		/*
274 		 * Check if we're making it bigger after two sectors.  And if
275 		 * it is so, give up.
276 		 */
277 		if (cur_in - start > sectorsize * 2 && cur_in - start < cur_out) {
278 			ret = -E2BIG;
279 			goto out;
280 		}
281 
282 		/* Check if we have reached page boundary */
283 		if (IS_ALIGNED(cur_in, PAGE_SIZE)) {
284 			put_page(page_in);
285 			page_in = NULL;
286 		}
287 	}
288 
289 	/* Store the size of all chunks of compressed data */
290 	sizes_ptr = kmap_local_page(pages[0]);
291 	write_compress_length(sizes_ptr, cur_out);
292 	kunmap_local(sizes_ptr);
293 
294 	ret = 0;
295 	*total_out = cur_out;
296 	*total_in = cur_in - start;
297 out:
298 	if (page_in)
299 		put_page(page_in);
300 	*out_pages = DIV_ROUND_UP(cur_out, PAGE_SIZE);
301 	return ret;
302 }
303 
304 /*
305  * Copy the compressed segment payload into @dest.
306  *
307  * For the payload there will be no padding, just need to do page switching.
308  */
309 static void copy_compressed_segment(struct compressed_bio *cb,
310 				    char *dest, u32 len, u32 *cur_in)
311 {
312 	u32 orig_in = *cur_in;
313 
314 	while (*cur_in < orig_in + len) {
315 		struct page *cur_page;
316 		u32 copy_len = min_t(u32, PAGE_SIZE - offset_in_page(*cur_in),
317 					  orig_in + len - *cur_in);
318 
319 		ASSERT(copy_len);
320 		cur_page = cb->compressed_pages[*cur_in / PAGE_SIZE];
321 
322 		memcpy_from_page(dest + *cur_in - orig_in, cur_page,
323 				 offset_in_page(*cur_in), copy_len);
324 
325 		*cur_in += copy_len;
326 	}
327 }
328 
329 int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
330 {
331 	struct workspace *workspace = list_entry(ws, struct workspace, list);
332 	const struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
333 	const u32 sectorsize = fs_info->sectorsize;
334 	char *kaddr;
335 	int ret;
336 	/* Compressed data length, can be unaligned */
337 	u32 len_in;
338 	/* Offset inside the compressed data */
339 	u32 cur_in = 0;
340 	/* Bytes decompressed so far */
341 	u32 cur_out = 0;
342 
343 	kaddr = kmap_local_page(cb->compressed_pages[0]);
344 	len_in = read_compress_length(kaddr);
345 	kunmap_local(kaddr);
346 	cur_in += LZO_LEN;
347 
348 	/*
349 	 * LZO header length check
350 	 *
351 	 * The total length should not exceed the maximum extent length,
352 	 * and all sectors should be used.
353 	 * If this happens, it means the compressed extent is corrupted.
354 	 */
355 	if (len_in > min_t(size_t, BTRFS_MAX_COMPRESSED, cb->compressed_len) ||
356 	    round_up(len_in, sectorsize) < cb->compressed_len) {
357 		btrfs_err(fs_info,
358 			"invalid lzo header, lzo len %u compressed len %u",
359 			len_in, cb->compressed_len);
360 		return -EUCLEAN;
361 	}
362 
363 	/* Go through each lzo segment */
364 	while (cur_in < len_in) {
365 		struct page *cur_page;
366 		/* Length of the compressed segment */
367 		u32 seg_len;
368 		u32 sector_bytes_left;
369 		size_t out_len = lzo1x_worst_compress(sectorsize);
370 
371 		/*
372 		 * We should always have enough space for one segment header
373 		 * inside current sector.
374 		 */
375 		ASSERT(cur_in / sectorsize ==
376 		       (cur_in + LZO_LEN - 1) / sectorsize);
377 		cur_page = cb->compressed_pages[cur_in / PAGE_SIZE];
378 		ASSERT(cur_page);
379 		kaddr = kmap_local_page(cur_page);
380 		seg_len = read_compress_length(kaddr + offset_in_page(cur_in));
381 		kunmap_local(kaddr);
382 		cur_in += LZO_LEN;
383 
384 		if (seg_len > WORKSPACE_CBUF_LENGTH) {
385 			/*
386 			 * seg_len shouldn't be larger than we have allocated
387 			 * for workspace->cbuf
388 			 */
389 			btrfs_err(fs_info, "unexpectedly large lzo segment len %u",
390 					seg_len);
391 			ret = -EIO;
392 			goto out;
393 		}
394 
395 		/* Copy the compressed segment payload into workspace */
396 		copy_compressed_segment(cb, workspace->cbuf, seg_len, &cur_in);
397 
398 		/* Decompress the data */
399 		ret = lzo1x_decompress_safe(workspace->cbuf, seg_len,
400 					    workspace->buf, &out_len);
401 		if (ret != LZO_E_OK) {
402 			btrfs_err(fs_info, "failed to decompress");
403 			ret = -EIO;
404 			goto out;
405 		}
406 
407 		/* Copy the data into inode pages */
408 		ret = btrfs_decompress_buf2page(workspace->buf, out_len, cb, cur_out);
409 		cur_out += out_len;
410 
411 		/* All data read, exit */
412 		if (ret == 0)
413 			goto out;
414 		ret = 0;
415 
416 		/* Check if the sector has enough space for a segment header */
417 		sector_bytes_left = sectorsize - (cur_in % sectorsize);
418 		if (sector_bytes_left >= LZO_LEN)
419 			continue;
420 
421 		/* Skip the padding zeros */
422 		cur_in += sector_bytes_left;
423 	}
424 out:
425 	if (!ret)
426 		zero_fill_bio(cb->orig_bio);
427 	return ret;
428 }
429 
430 int lzo_decompress(struct list_head *ws, const u8 *data_in,
431 		struct page *dest_page, unsigned long start_byte, size_t srclen,
432 		size_t destlen)
433 {
434 	struct workspace *workspace = list_entry(ws, struct workspace, list);
435 	size_t in_len;
436 	size_t out_len;
437 	size_t max_segment_len = WORKSPACE_BUF_LENGTH;
438 	int ret = 0;
439 	char *kaddr;
440 	unsigned long bytes;
441 
442 	if (srclen < LZO_LEN || srclen > max_segment_len + LZO_LEN * 2)
443 		return -EUCLEAN;
444 
445 	in_len = read_compress_length(data_in);
446 	if (in_len != srclen)
447 		return -EUCLEAN;
448 	data_in += LZO_LEN;
449 
450 	in_len = read_compress_length(data_in);
451 	if (in_len != srclen - LZO_LEN * 2) {
452 		ret = -EUCLEAN;
453 		goto out;
454 	}
455 	data_in += LZO_LEN;
456 
457 	out_len = PAGE_SIZE;
458 	ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
459 	if (ret != LZO_E_OK) {
460 		pr_warn("BTRFS: decompress failed!\n");
461 		ret = -EIO;
462 		goto out;
463 	}
464 
465 	if (out_len < start_byte) {
466 		ret = -EIO;
467 		goto out;
468 	}
469 
470 	/*
471 	 * the caller is already checking against PAGE_SIZE, but lets
472 	 * move this check closer to the memcpy/memset
473 	 */
474 	destlen = min_t(unsigned long, destlen, PAGE_SIZE);
475 	bytes = min_t(unsigned long, destlen, out_len - start_byte);
476 
477 	kaddr = kmap_local_page(dest_page);
478 	memcpy(kaddr, workspace->buf + start_byte, bytes);
479 
480 	/*
481 	 * btrfs_getblock is doing a zero on the tail of the page too,
482 	 * but this will cover anything missing from the decompressed
483 	 * data.
484 	 */
485 	if (bytes < destlen)
486 		memset(kaddr+bytes, 0, destlen-bytes);
487 	kunmap_local(kaddr);
488 out:
489 	return ret;
490 }
491 
492 const struct btrfs_compress_op btrfs_lzo_compress = {
493 	.workspace_manager	= &wsm,
494 	.max_level		= 1,
495 	.default_level		= 1,
496 };
497