xref: /linux/fs/btrfs/lzo.c (revision f3827213abae9291b7525b05e6fd29b1f0536ce6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2008 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/slab.h>
8 #include <linux/mm.h>
9 #include <linux/init.h>
10 #include <linux/err.h>
11 #include <linux/sched.h>
12 #include <linux/pagemap.h>
13 #include <linux/bio.h>
14 #include <linux/lzo.h>
15 #include <linux/refcount.h>
16 #include "messages.h"
17 #include "compression.h"
18 #include "ctree.h"
19 #include "super.h"
20 #include "btrfs_inode.h"
21 
22 #define LZO_LEN	4
23 
24 /*
25  * Btrfs LZO compression format
26  *
27  * Regular and inlined LZO compressed data extents consist of:
28  *
29  * 1.  Header
30  *     Fixed size. LZO_LEN (4) bytes long, LE32.
31  *     Records the total size (including the header) of compressed data.
32  *
33  * 2.  Segment(s)
34  *     Variable size. Each segment includes one segment header, followed by data
35  *     payload.
36  *     One regular LZO compressed extent can have one or more segments.
37  *     For inlined LZO compressed extent, only one segment is allowed.
38  *     One segment represents at most one sector of uncompressed data.
39  *
40  * 2.1 Segment header
41  *     Fixed size. LZO_LEN (4) bytes long, LE32.
42  *     Records the total size of the segment (not including the header).
43  *     Segment header never crosses sector boundary, thus it's possible to
44  *     have at most 3 padding zeros at the end of the sector.
45  *
46  * 2.2 Data Payload
47  *     Variable size. Size up limit should be lzo1x_worst_compress(sectorsize)
48  *     which is 4419 for a 4KiB sectorsize.
49  *
50  * Example with 4K sectorsize:
51  * Page 1:
52  *          0     0x2   0x4   0x6   0x8   0xa   0xc   0xe     0x10
53  * 0x0000   |  Header   | SegHdr 01 | Data payload 01 ...     |
54  * ...
55  * 0x0ff0   | SegHdr  N | Data payload  N     ...          |00|
56  *                                                          ^^ padding zeros
57  * Page 2:
58  * 0x1000   | SegHdr N+1| Data payload N+1 ...                |
59  */
60 
61 struct workspace {
62 	void *mem;
63 	void *buf;	/* where decompressed data goes */
64 	void *cbuf;	/* where compressed data goes */
65 	struct list_head list;
66 };
67 
workspace_buf_length(const struct btrfs_fs_info * fs_info)68 static u32 workspace_buf_length(const struct btrfs_fs_info *fs_info)
69 {
70 	return lzo1x_worst_compress(fs_info->sectorsize);
71 }
workspace_cbuf_length(const struct btrfs_fs_info * fs_info)72 static u32 workspace_cbuf_length(const struct btrfs_fs_info *fs_info)
73 {
74 	return lzo1x_worst_compress(fs_info->sectorsize);
75 }
76 
lzo_free_workspace(struct list_head * ws)77 void lzo_free_workspace(struct list_head *ws)
78 {
79 	struct workspace *workspace = list_entry(ws, struct workspace, list);
80 
81 	kvfree(workspace->buf);
82 	kvfree(workspace->cbuf);
83 	kvfree(workspace->mem);
84 	kfree(workspace);
85 }
86 
lzo_alloc_workspace(struct btrfs_fs_info * fs_info)87 struct list_head *lzo_alloc_workspace(struct btrfs_fs_info *fs_info)
88 {
89 	struct workspace *workspace;
90 
91 	workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
92 	if (!workspace)
93 		return ERR_PTR(-ENOMEM);
94 
95 	workspace->mem = kvmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL | __GFP_NOWARN);
96 	workspace->buf = kvmalloc(workspace_buf_length(fs_info), GFP_KERNEL | __GFP_NOWARN);
97 	workspace->cbuf = kvmalloc(workspace_cbuf_length(fs_info), GFP_KERNEL | __GFP_NOWARN);
98 	if (!workspace->mem || !workspace->buf || !workspace->cbuf)
99 		goto fail;
100 
101 	INIT_LIST_HEAD(&workspace->list);
102 
103 	return &workspace->list;
104 fail:
105 	lzo_free_workspace(&workspace->list);
106 	return ERR_PTR(-ENOMEM);
107 }
108 
write_compress_length(char * buf,size_t len)109 static inline void write_compress_length(char *buf, size_t len)
110 {
111 	__le32 dlen;
112 
113 	dlen = cpu_to_le32(len);
114 	memcpy(buf, &dlen, LZO_LEN);
115 }
116 
read_compress_length(const char * buf)117 static inline size_t read_compress_length(const char *buf)
118 {
119 	__le32 dlen;
120 
121 	memcpy(&dlen, buf, LZO_LEN);
122 	return le32_to_cpu(dlen);
123 }
124 
125 /*
126  * Will do:
127  *
128  * - Write a segment header into the destination
129  * - Copy the compressed buffer into the destination
130  * - Make sure we have enough space in the last sector to fit a segment header
131  *   If not, we will pad at most (LZO_LEN (4)) - 1 bytes of zeros.
132  *
133  * Will allocate new pages when needed.
134  */
copy_compressed_data_to_page(struct btrfs_fs_info * fs_info,char * compressed_data,size_t compressed_size,struct folio ** out_folios,unsigned long max_nr_folio,u32 * cur_out)135 static int copy_compressed_data_to_page(struct btrfs_fs_info *fs_info,
136 					char *compressed_data,
137 					size_t compressed_size,
138 					struct folio **out_folios,
139 					unsigned long max_nr_folio,
140 					u32 *cur_out)
141 {
142 	const u32 sectorsize = fs_info->sectorsize;
143 	const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order;
144 	u32 sector_bytes_left;
145 	u32 orig_out;
146 	struct folio *cur_folio;
147 	char *kaddr;
148 
149 	if ((*cur_out >> min_folio_shift) >= max_nr_folio)
150 		return -E2BIG;
151 
152 	/*
153 	 * We never allow a segment header crossing sector boundary, previous
154 	 * run should ensure we have enough space left inside the sector.
155 	 */
156 	ASSERT((*cur_out / sectorsize) == (*cur_out + LZO_LEN - 1) / sectorsize);
157 
158 	cur_folio = out_folios[*cur_out >> min_folio_shift];
159 	/* Allocate a new page */
160 	if (!cur_folio) {
161 		cur_folio = btrfs_alloc_compr_folio(fs_info);
162 		if (!cur_folio)
163 			return -ENOMEM;
164 		out_folios[*cur_out >> min_folio_shift] = cur_folio;
165 	}
166 
167 	kaddr = kmap_local_folio(cur_folio, offset_in_folio(cur_folio, *cur_out));
168 	write_compress_length(kaddr, compressed_size);
169 	*cur_out += LZO_LEN;
170 
171 	orig_out = *cur_out;
172 
173 	/* Copy compressed data */
174 	while (*cur_out - orig_out < compressed_size) {
175 		u32 copy_len = min_t(u32, sectorsize - *cur_out % sectorsize,
176 				     orig_out + compressed_size - *cur_out);
177 
178 		kunmap_local(kaddr);
179 
180 		if ((*cur_out >> min_folio_shift) >= max_nr_folio)
181 			return -E2BIG;
182 
183 		cur_folio = out_folios[*cur_out >> min_folio_shift];
184 		/* Allocate a new page */
185 		if (!cur_folio) {
186 			cur_folio = btrfs_alloc_compr_folio(fs_info);
187 			if (!cur_folio)
188 				return -ENOMEM;
189 			out_folios[*cur_out >> min_folio_shift] = cur_folio;
190 		}
191 		kaddr = kmap_local_folio(cur_folio, 0);
192 
193 		memcpy(kaddr + offset_in_folio(cur_folio, *cur_out),
194 		       compressed_data + *cur_out - orig_out, copy_len);
195 
196 		*cur_out += copy_len;
197 	}
198 
199 	/*
200 	 * Check if we can fit the next segment header into the remaining space
201 	 * of the sector.
202 	 */
203 	sector_bytes_left = round_up(*cur_out, sectorsize) - *cur_out;
204 	if (sector_bytes_left >= LZO_LEN || sector_bytes_left == 0)
205 		goto out;
206 
207 	/* The remaining size is not enough, pad it with zeros */
208 	memset(kaddr + offset_in_page(*cur_out), 0,
209 	       sector_bytes_left);
210 	*cur_out += sector_bytes_left;
211 
212 out:
213 	kunmap_local(kaddr);
214 	return 0;
215 }
216 
lzo_compress_folios(struct list_head * ws,struct btrfs_inode * inode,u64 start,struct folio ** folios,unsigned long * out_folios,unsigned long * total_in,unsigned long * total_out)217 int lzo_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
218 			u64 start, struct folio **folios, unsigned long *out_folios,
219 			unsigned long *total_in, unsigned long *total_out)
220 {
221 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
222 	struct workspace *workspace = list_entry(ws, struct workspace, list);
223 	const u32 sectorsize = fs_info->sectorsize;
224 	const u32 min_folio_size = btrfs_min_folio_size(fs_info);
225 	struct address_space *mapping = inode->vfs_inode.i_mapping;
226 	struct folio *folio_in = NULL;
227 	char *sizes_ptr;
228 	const unsigned long max_nr_folio = *out_folios;
229 	int ret = 0;
230 	/* Points to the file offset of input data */
231 	u64 cur_in = start;
232 	/* Points to the current output byte */
233 	u32 cur_out = 0;
234 	u32 len = *total_out;
235 
236 	ASSERT(max_nr_folio > 0);
237 	*out_folios = 0;
238 	*total_out = 0;
239 	*total_in = 0;
240 
241 	/*
242 	 * Skip the header for now, we will later come back and write the total
243 	 * compressed size
244 	 */
245 	cur_out += LZO_LEN;
246 	while (cur_in < start + len) {
247 		char *data_in;
248 		const u32 sectorsize_mask = sectorsize - 1;
249 		u32 sector_off = (cur_in - start) & sectorsize_mask;
250 		u32 in_len;
251 		size_t out_len;
252 
253 		/* Get the input page first */
254 		if (!folio_in) {
255 			ret = btrfs_compress_filemap_get_folio(mapping, cur_in, &folio_in);
256 			if (ret < 0)
257 				goto out;
258 		}
259 
260 		/* Compress at most one sector of data each time */
261 		in_len = min_t(u32, start + len - cur_in, sectorsize - sector_off);
262 		ASSERT(in_len);
263 		data_in = kmap_local_folio(folio_in, offset_in_folio(folio_in, cur_in));
264 		ret = lzo1x_1_compress(data_in, in_len,
265 				       workspace->cbuf, &out_len,
266 				       workspace->mem);
267 		kunmap_local(data_in);
268 		if (unlikely(ret < 0)) {
269 			/* lzo1x_1_compress never fails. */
270 			ret = -EIO;
271 			goto out;
272 		}
273 
274 		ret = copy_compressed_data_to_page(fs_info, workspace->cbuf, out_len,
275 						   folios, max_nr_folio,
276 						   &cur_out);
277 		if (ret < 0)
278 			goto out;
279 
280 		cur_in += in_len;
281 
282 		/*
283 		 * Check if we're making it bigger after two sectors.  And if
284 		 * it is so, give up.
285 		 */
286 		if (cur_in - start > sectorsize * 2 && cur_in - start < cur_out) {
287 			ret = -E2BIG;
288 			goto out;
289 		}
290 
291 		/* Check if we have reached folio boundary. */
292 		if (IS_ALIGNED(cur_in, min_folio_size)) {
293 			folio_put(folio_in);
294 			folio_in = NULL;
295 		}
296 	}
297 
298 	/* Store the size of all chunks of compressed data */
299 	sizes_ptr = kmap_local_folio(folios[0], 0);
300 	write_compress_length(sizes_ptr, cur_out);
301 	kunmap_local(sizes_ptr);
302 
303 	ret = 0;
304 	*total_out = cur_out;
305 	*total_in = cur_in - start;
306 out:
307 	if (folio_in)
308 		folio_put(folio_in);
309 	*out_folios = DIV_ROUND_UP(cur_out, min_folio_size);
310 	return ret;
311 }
312 
313 /*
314  * Copy the compressed segment payload into @dest.
315  *
316  * For the payload there will be no padding, just need to do page switching.
317  */
copy_compressed_segment(struct compressed_bio * cb,char * dest,u32 len,u32 * cur_in)318 static void copy_compressed_segment(struct compressed_bio *cb,
319 				    char *dest, u32 len, u32 *cur_in)
320 {
321 	struct btrfs_fs_info *fs_info = cb_to_fs_info(cb);
322 	const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order;
323 	u32 orig_in = *cur_in;
324 
325 	while (*cur_in < orig_in + len) {
326 		struct folio *cur_folio = cb->compressed_folios[*cur_in >> min_folio_shift];
327 		u32 copy_len = min_t(u32, orig_in + len - *cur_in,
328 				     folio_size(cur_folio) - offset_in_folio(cur_folio, *cur_in));
329 
330 		ASSERT(copy_len);
331 
332 		memcpy_from_folio(dest + *cur_in - orig_in, cur_folio,
333 				  offset_in_folio(cur_folio, *cur_in), copy_len);
334 
335 		*cur_in += copy_len;
336 	}
337 }
338 
lzo_decompress_bio(struct list_head * ws,struct compressed_bio * cb)339 int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
340 {
341 	struct workspace *workspace = list_entry(ws, struct workspace, list);
342 	const struct btrfs_fs_info *fs_info = cb->bbio.inode->root->fs_info;
343 	const u32 sectorsize = fs_info->sectorsize;
344 	const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order;
345 	char *kaddr;
346 	int ret;
347 	/* Compressed data length, can be unaligned */
348 	u32 len_in;
349 	/* Offset inside the compressed data */
350 	u32 cur_in = 0;
351 	/* Bytes decompressed so far */
352 	u32 cur_out = 0;
353 
354 	kaddr = kmap_local_folio(cb->compressed_folios[0], 0);
355 	len_in = read_compress_length(kaddr);
356 	kunmap_local(kaddr);
357 	cur_in += LZO_LEN;
358 
359 	/*
360 	 * LZO header length check
361 	 *
362 	 * The total length should not exceed the maximum extent length,
363 	 * and all sectors should be used.
364 	 * If this happens, it means the compressed extent is corrupted.
365 	 */
366 	if (unlikely(len_in > min_t(size_t, BTRFS_MAX_COMPRESSED, cb->compressed_len) ||
367 		     round_up(len_in, sectorsize) < cb->compressed_len)) {
368 		struct btrfs_inode *inode = cb->bbio.inode;
369 
370 		btrfs_err(fs_info,
371 "lzo header invalid, root %llu inode %llu offset %llu lzo len %u compressed len %u",
372 			  btrfs_root_id(inode->root), btrfs_ino(inode),
373 			  cb->start, len_in, cb->compressed_len);
374 		return -EUCLEAN;
375 	}
376 
377 	/* Go through each lzo segment */
378 	while (cur_in < len_in) {
379 		struct folio *cur_folio;
380 		/* Length of the compressed segment */
381 		u32 seg_len;
382 		u32 sector_bytes_left;
383 		size_t out_len = lzo1x_worst_compress(sectorsize);
384 
385 		/*
386 		 * We should always have enough space for one segment header
387 		 * inside current sector.
388 		 */
389 		ASSERT(cur_in / sectorsize ==
390 		       (cur_in + LZO_LEN - 1) / sectorsize);
391 		cur_folio = cb->compressed_folios[cur_in >> min_folio_shift];
392 		ASSERT(cur_folio);
393 		kaddr = kmap_local_folio(cur_folio, 0);
394 		seg_len = read_compress_length(kaddr + offset_in_folio(cur_folio, cur_in));
395 		kunmap_local(kaddr);
396 		cur_in += LZO_LEN;
397 
398 		if (unlikely(seg_len > workspace_cbuf_length(fs_info))) {
399 			struct btrfs_inode *inode = cb->bbio.inode;
400 
401 			/*
402 			 * seg_len shouldn't be larger than we have allocated
403 			 * for workspace->cbuf
404 			 */
405 			btrfs_err(fs_info,
406 			"lzo segment too big, root %llu inode %llu offset %llu len %u",
407 				  btrfs_root_id(inode->root), btrfs_ino(inode),
408 				  cb->start, seg_len);
409 			return -EIO;
410 		}
411 
412 		/* Copy the compressed segment payload into workspace */
413 		copy_compressed_segment(cb, workspace->cbuf, seg_len, &cur_in);
414 
415 		/* Decompress the data */
416 		ret = lzo1x_decompress_safe(workspace->cbuf, seg_len,
417 					    workspace->buf, &out_len);
418 		if (unlikely(ret != LZO_E_OK)) {
419 			struct btrfs_inode *inode = cb->bbio.inode;
420 
421 			btrfs_err(fs_info,
422 		"lzo decompression failed, error %d root %llu inode %llu offset %llu",
423 				  ret, btrfs_root_id(inode->root), btrfs_ino(inode),
424 				  cb->start);
425 			return -EIO;
426 		}
427 
428 		/* Copy the data into inode pages */
429 		ret = btrfs_decompress_buf2page(workspace->buf, out_len, cb, cur_out);
430 		cur_out += out_len;
431 
432 		/* All data read, exit */
433 		if (ret == 0)
434 			return 0;
435 		ret = 0;
436 
437 		/* Check if the sector has enough space for a segment header */
438 		sector_bytes_left = sectorsize - (cur_in % sectorsize);
439 		if (sector_bytes_left >= LZO_LEN)
440 			continue;
441 
442 		/* Skip the padding zeros */
443 		cur_in += sector_bytes_left;
444 	}
445 
446 	return 0;
447 }
448 
lzo_decompress(struct list_head * ws,const u8 * data_in,struct folio * dest_folio,unsigned long dest_pgoff,size_t srclen,size_t destlen)449 int lzo_decompress(struct list_head *ws, const u8 *data_in,
450 		struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen,
451 		size_t destlen)
452 {
453 	struct workspace *workspace = list_entry(ws, struct workspace, list);
454 	struct btrfs_fs_info *fs_info = folio_to_fs_info(dest_folio);
455 	const u32 sectorsize = fs_info->sectorsize;
456 	size_t in_len;
457 	size_t out_len;
458 	size_t max_segment_len = workspace_buf_length(fs_info);
459 	int ret = 0;
460 
461 	if (unlikely(srclen < LZO_LEN || srclen > max_segment_len + LZO_LEN * 2))
462 		return -EUCLEAN;
463 
464 	in_len = read_compress_length(data_in);
465 	if (unlikely(in_len != srclen))
466 		return -EUCLEAN;
467 	data_in += LZO_LEN;
468 
469 	in_len = read_compress_length(data_in);
470 	if (unlikely(in_len != srclen - LZO_LEN * 2)) {
471 		ret = -EUCLEAN;
472 		goto out;
473 	}
474 	data_in += LZO_LEN;
475 
476 	out_len = sectorsize;
477 	ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
478 	if (unlikely(ret != LZO_E_OK)) {
479 		struct btrfs_inode *inode = folio_to_inode(dest_folio);
480 
481 		btrfs_err(fs_info,
482 		"lzo decompression failed, error %d root %llu inode %llu offset %llu",
483 			  ret, btrfs_root_id(inode->root), btrfs_ino(inode),
484 			  folio_pos(dest_folio));
485 		ret = -EIO;
486 		goto out;
487 	}
488 
489 	ASSERT(out_len <= sectorsize);
490 	memcpy_to_folio(dest_folio, dest_pgoff, workspace->buf, out_len);
491 	/* Early end, considered as an error. */
492 	if (unlikely(out_len < destlen)) {
493 		ret = -EIO;
494 		folio_zero_range(dest_folio, dest_pgoff + out_len, destlen - out_len);
495 	}
496 out:
497 	return ret;
498 }
499 
500 const struct btrfs_compress_levels  btrfs_lzo_compress = {
501 	.max_level		= 1,
502 	.default_level		= 1,
503 };
504