xref: /linux/fs/btrfs/lzo.c (revision 7a40974fd0efa3698de4c6d1d0ee0436bcc4445d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2008 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/slab.h>
8 #include <linux/mm.h>
9 #include <linux/init.h>
10 #include <linux/err.h>
11 #include <linux/sched.h>
12 #include <linux/pagemap.h>
13 #include <linux/bio.h>
14 #include <linux/lzo.h>
15 #include <linux/refcount.h>
16 #include "messages.h"
17 #include "compression.h"
18 #include "ctree.h"
19 #include "super.h"
20 #include "btrfs_inode.h"
21 
22 #define LZO_LEN	4
23 
24 /*
25  * Btrfs LZO compression format
26  *
27  * Regular and inlined LZO compressed data extents consist of:
28  *
29  * 1.  Header
30  *     Fixed size. LZO_LEN (4) bytes long, LE32.
31  *     Records the total size (including the header) of compressed data.
32  *
33  * 2.  Segment(s)
34  *     Variable size. Each segment includes one segment header, followed by data
35  *     payload.
36  *     One regular LZO compressed extent can have one or more segments.
37  *     For inlined LZO compressed extent, only one segment is allowed.
38  *     One segment represents at most one sector of uncompressed data.
39  *
40  * 2.1 Segment header
41  *     Fixed size. LZO_LEN (4) bytes long, LE32.
42  *     Records the total size of the segment (not including the header).
43  *     Segment header never crosses sector boundary, thus it's possible to
44  *     have at most 3 padding zeros at the end of the sector.
45  *
46  * 2.2 Data Payload
47  *     Variable size. Size up limit should be lzo1x_worst_compress(sectorsize)
48  *     which is 4419 for a 4KiB sectorsize.
49  *
50  * Example with 4K sectorsize:
51  * Page 1:
52  *          0     0x2   0x4   0x6   0x8   0xa   0xc   0xe     0x10
53  * 0x0000   |  Header   | SegHdr 01 | Data payload 01 ...     |
54  * ...
55  * 0x0ff0   | SegHdr  N | Data payload  N     ...          |00|
56  *                                                          ^^ padding zeros
57  * Page 2:
58  * 0x1000   | SegHdr N+1| Data payload N+1 ...                |
59  */
60 
61 #define WORKSPACE_BUF_LENGTH	(lzo1x_worst_compress(PAGE_SIZE))
62 #define WORKSPACE_CBUF_LENGTH	(lzo1x_worst_compress(PAGE_SIZE))
63 
64 struct workspace {
65 	void *mem;
66 	void *buf;	/* where decompressed data goes */
67 	void *cbuf;	/* where compressed data goes */
68 	struct list_head list;
69 };
70 
71 static struct workspace_manager wsm;
72 
lzo_free_workspace(struct list_head * ws)73 void lzo_free_workspace(struct list_head *ws)
74 {
75 	struct workspace *workspace = list_entry(ws, struct workspace, list);
76 
77 	kvfree(workspace->buf);
78 	kvfree(workspace->cbuf);
79 	kvfree(workspace->mem);
80 	kfree(workspace);
81 }
82 
lzo_alloc_workspace(unsigned int level)83 struct list_head *lzo_alloc_workspace(unsigned int level)
84 {
85 	struct workspace *workspace;
86 
87 	workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
88 	if (!workspace)
89 		return ERR_PTR(-ENOMEM);
90 
91 	workspace->mem = kvmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL | __GFP_NOWARN);
92 	workspace->buf = kvmalloc(WORKSPACE_BUF_LENGTH, GFP_KERNEL | __GFP_NOWARN);
93 	workspace->cbuf = kvmalloc(WORKSPACE_CBUF_LENGTH, GFP_KERNEL | __GFP_NOWARN);
94 	if (!workspace->mem || !workspace->buf || !workspace->cbuf)
95 		goto fail;
96 
97 	INIT_LIST_HEAD(&workspace->list);
98 
99 	return &workspace->list;
100 fail:
101 	lzo_free_workspace(&workspace->list);
102 	return ERR_PTR(-ENOMEM);
103 }
104 
write_compress_length(char * buf,size_t len)105 static inline void write_compress_length(char *buf, size_t len)
106 {
107 	__le32 dlen;
108 
109 	dlen = cpu_to_le32(len);
110 	memcpy(buf, &dlen, LZO_LEN);
111 }
112 
read_compress_length(const char * buf)113 static inline size_t read_compress_length(const char *buf)
114 {
115 	__le32 dlen;
116 
117 	memcpy(&dlen, buf, LZO_LEN);
118 	return le32_to_cpu(dlen);
119 }
120 
121 /*
122  * Will do:
123  *
124  * - Write a segment header into the destination
125  * - Copy the compressed buffer into the destination
126  * - Make sure we have enough space in the last sector to fit a segment header
127  *   If not, we will pad at most (LZO_LEN (4)) - 1 bytes of zeros.
128  *
129  * Will allocate new pages when needed.
130  */
copy_compressed_data_to_page(char * compressed_data,size_t compressed_size,struct folio ** out_folios,unsigned long max_nr_folio,u32 * cur_out,const u32 sectorsize)131 static int copy_compressed_data_to_page(char *compressed_data,
132 					size_t compressed_size,
133 					struct folio **out_folios,
134 					unsigned long max_nr_folio,
135 					u32 *cur_out,
136 					const u32 sectorsize)
137 {
138 	u32 sector_bytes_left;
139 	u32 orig_out;
140 	struct folio *cur_folio;
141 	char *kaddr;
142 
143 	if ((*cur_out / PAGE_SIZE) >= max_nr_folio)
144 		return -E2BIG;
145 
146 	/*
147 	 * We never allow a segment header crossing sector boundary, previous
148 	 * run should ensure we have enough space left inside the sector.
149 	 */
150 	ASSERT((*cur_out / sectorsize) == (*cur_out + LZO_LEN - 1) / sectorsize);
151 
152 	cur_folio = out_folios[*cur_out / PAGE_SIZE];
153 	/* Allocate a new page */
154 	if (!cur_folio) {
155 		cur_folio = btrfs_alloc_compr_folio();
156 		if (!cur_folio)
157 			return -ENOMEM;
158 		out_folios[*cur_out / PAGE_SIZE] = cur_folio;
159 	}
160 
161 	kaddr = kmap_local_folio(cur_folio, 0);
162 	write_compress_length(kaddr + offset_in_page(*cur_out),
163 			      compressed_size);
164 	*cur_out += LZO_LEN;
165 
166 	orig_out = *cur_out;
167 
168 	/* Copy compressed data */
169 	while (*cur_out - orig_out < compressed_size) {
170 		u32 copy_len = min_t(u32, sectorsize - *cur_out % sectorsize,
171 				     orig_out + compressed_size - *cur_out);
172 
173 		kunmap_local(kaddr);
174 
175 		if ((*cur_out / PAGE_SIZE) >= max_nr_folio)
176 			return -E2BIG;
177 
178 		cur_folio = out_folios[*cur_out / PAGE_SIZE];
179 		/* Allocate a new page */
180 		if (!cur_folio) {
181 			cur_folio = btrfs_alloc_compr_folio();
182 			if (!cur_folio)
183 				return -ENOMEM;
184 			out_folios[*cur_out / PAGE_SIZE] = cur_folio;
185 		}
186 		kaddr = kmap_local_folio(cur_folio, 0);
187 
188 		memcpy(kaddr + offset_in_page(*cur_out),
189 		       compressed_data + *cur_out - orig_out, copy_len);
190 
191 		*cur_out += copy_len;
192 	}
193 
194 	/*
195 	 * Check if we can fit the next segment header into the remaining space
196 	 * of the sector.
197 	 */
198 	sector_bytes_left = round_up(*cur_out, sectorsize) - *cur_out;
199 	if (sector_bytes_left >= LZO_LEN || sector_bytes_left == 0)
200 		goto out;
201 
202 	/* The remaining size is not enough, pad it with zeros */
203 	memset(kaddr + offset_in_page(*cur_out), 0,
204 	       sector_bytes_left);
205 	*cur_out += sector_bytes_left;
206 
207 out:
208 	kunmap_local(kaddr);
209 	return 0;
210 }
211 
lzo_compress_folios(struct list_head * ws,struct address_space * mapping,u64 start,struct folio ** folios,unsigned long * out_folios,unsigned long * total_in,unsigned long * total_out)212 int lzo_compress_folios(struct list_head *ws, struct address_space *mapping,
213 			u64 start, struct folio **folios, unsigned long *out_folios,
214 			unsigned long *total_in, unsigned long *total_out)
215 {
216 	struct workspace *workspace = list_entry(ws, struct workspace, list);
217 	const u32 sectorsize = inode_to_fs_info(mapping->host)->sectorsize;
218 	struct folio *folio_in = NULL;
219 	char *sizes_ptr;
220 	const unsigned long max_nr_folio = *out_folios;
221 	int ret = 0;
222 	/* Points to the file offset of input data */
223 	u64 cur_in = start;
224 	/* Points to the current output byte */
225 	u32 cur_out = 0;
226 	u32 len = *total_out;
227 
228 	ASSERT(max_nr_folio > 0);
229 	*out_folios = 0;
230 	*total_out = 0;
231 	*total_in = 0;
232 
233 	/*
234 	 * Skip the header for now, we will later come back and write the total
235 	 * compressed size
236 	 */
237 	cur_out += LZO_LEN;
238 	while (cur_in < start + len) {
239 		char *data_in;
240 		const u32 sectorsize_mask = sectorsize - 1;
241 		u32 sector_off = (cur_in - start) & sectorsize_mask;
242 		u32 in_len;
243 		size_t out_len;
244 
245 		/* Get the input page first */
246 		if (!folio_in) {
247 			ret = btrfs_compress_filemap_get_folio(mapping, cur_in, &folio_in);
248 			if (ret < 0)
249 				goto out;
250 		}
251 
252 		/* Compress at most one sector of data each time */
253 		in_len = min_t(u32, start + len - cur_in, sectorsize - sector_off);
254 		ASSERT(in_len);
255 		data_in = kmap_local_folio(folio_in, 0);
256 		ret = lzo1x_1_compress(data_in +
257 				       offset_in_page(cur_in), in_len,
258 				       workspace->cbuf, &out_len,
259 				       workspace->mem);
260 		kunmap_local(data_in);
261 		if (unlikely(ret < 0)) {
262 			/* lzo1x_1_compress never fails. */
263 			ret = -EIO;
264 			goto out;
265 		}
266 
267 		ret = copy_compressed_data_to_page(workspace->cbuf, out_len,
268 						   folios, max_nr_folio,
269 						   &cur_out, sectorsize);
270 		if (ret < 0)
271 			goto out;
272 
273 		cur_in += in_len;
274 
275 		/*
276 		 * Check if we're making it bigger after two sectors.  And if
277 		 * it is so, give up.
278 		 */
279 		if (cur_in - start > sectorsize * 2 && cur_in - start < cur_out) {
280 			ret = -E2BIG;
281 			goto out;
282 		}
283 
284 		/* Check if we have reached page boundary */
285 		if (PAGE_ALIGNED(cur_in)) {
286 			folio_put(folio_in);
287 			folio_in = NULL;
288 		}
289 	}
290 
291 	/* Store the size of all chunks of compressed data */
292 	sizes_ptr = kmap_local_folio(folios[0], 0);
293 	write_compress_length(sizes_ptr, cur_out);
294 	kunmap_local(sizes_ptr);
295 
296 	ret = 0;
297 	*total_out = cur_out;
298 	*total_in = cur_in - start;
299 out:
300 	if (folio_in)
301 		folio_put(folio_in);
302 	*out_folios = DIV_ROUND_UP(cur_out, PAGE_SIZE);
303 	return ret;
304 }
305 
306 /*
307  * Copy the compressed segment payload into @dest.
308  *
309  * For the payload there will be no padding, just need to do page switching.
310  */
copy_compressed_segment(struct compressed_bio * cb,char * dest,u32 len,u32 * cur_in)311 static void copy_compressed_segment(struct compressed_bio *cb,
312 				    char *dest, u32 len, u32 *cur_in)
313 {
314 	u32 orig_in = *cur_in;
315 
316 	while (*cur_in < orig_in + len) {
317 		struct folio *cur_folio;
318 		u32 copy_len = min_t(u32, PAGE_SIZE - offset_in_page(*cur_in),
319 					  orig_in + len - *cur_in);
320 
321 		ASSERT(copy_len);
322 		cur_folio = cb->compressed_folios[*cur_in / PAGE_SIZE];
323 
324 		memcpy_from_folio(dest + *cur_in - orig_in, cur_folio,
325 				  offset_in_folio(cur_folio, *cur_in), copy_len);
326 
327 		*cur_in += copy_len;
328 	}
329 }
330 
lzo_decompress_bio(struct list_head * ws,struct compressed_bio * cb)331 int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
332 {
333 	struct workspace *workspace = list_entry(ws, struct workspace, list);
334 	const struct btrfs_fs_info *fs_info = cb->bbio.inode->root->fs_info;
335 	const u32 sectorsize = fs_info->sectorsize;
336 	char *kaddr;
337 	int ret;
338 	/* Compressed data length, can be unaligned */
339 	u32 len_in;
340 	/* Offset inside the compressed data */
341 	u32 cur_in = 0;
342 	/* Bytes decompressed so far */
343 	u32 cur_out = 0;
344 
345 	kaddr = kmap_local_folio(cb->compressed_folios[0], 0);
346 	len_in = read_compress_length(kaddr);
347 	kunmap_local(kaddr);
348 	cur_in += LZO_LEN;
349 
350 	/*
351 	 * LZO header length check
352 	 *
353 	 * The total length should not exceed the maximum extent length,
354 	 * and all sectors should be used.
355 	 * If this happens, it means the compressed extent is corrupted.
356 	 */
357 	if (unlikely(len_in > min_t(size_t, BTRFS_MAX_COMPRESSED, cb->compressed_len) ||
358 		     round_up(len_in, sectorsize) < cb->compressed_len)) {
359 		struct btrfs_inode *inode = cb->bbio.inode;
360 
361 		btrfs_err(fs_info,
362 "lzo header invalid, root %llu inode %llu offset %llu lzo len %u compressed len %u",
363 			  btrfs_root_id(inode->root), btrfs_ino(inode),
364 			  cb->start, len_in, cb->compressed_len);
365 		return -EUCLEAN;
366 	}
367 
368 	/* Go through each lzo segment */
369 	while (cur_in < len_in) {
370 		struct folio *cur_folio;
371 		/* Length of the compressed segment */
372 		u32 seg_len;
373 		u32 sector_bytes_left;
374 		size_t out_len = lzo1x_worst_compress(sectorsize);
375 
376 		/*
377 		 * We should always have enough space for one segment header
378 		 * inside current sector.
379 		 */
380 		ASSERT(cur_in / sectorsize ==
381 		       (cur_in + LZO_LEN - 1) / sectorsize);
382 		cur_folio = cb->compressed_folios[cur_in / PAGE_SIZE];
383 		ASSERT(cur_folio);
384 		kaddr = kmap_local_folio(cur_folio, 0);
385 		seg_len = read_compress_length(kaddr + offset_in_page(cur_in));
386 		kunmap_local(kaddr);
387 		cur_in += LZO_LEN;
388 
389 		if (unlikely(seg_len > WORKSPACE_CBUF_LENGTH)) {
390 			struct btrfs_inode *inode = cb->bbio.inode;
391 
392 			/*
393 			 * seg_len shouldn't be larger than we have allocated
394 			 * for workspace->cbuf
395 			 */
396 			btrfs_err(fs_info,
397 			"lzo segment too big, root %llu inode %llu offset %llu len %u",
398 				  btrfs_root_id(inode->root), btrfs_ino(inode),
399 				  cb->start, seg_len);
400 			return -EIO;
401 		}
402 
403 		/* Copy the compressed segment payload into workspace */
404 		copy_compressed_segment(cb, workspace->cbuf, seg_len, &cur_in);
405 
406 		/* Decompress the data */
407 		ret = lzo1x_decompress_safe(workspace->cbuf, seg_len,
408 					    workspace->buf, &out_len);
409 		if (unlikely(ret != LZO_E_OK)) {
410 			struct btrfs_inode *inode = cb->bbio.inode;
411 
412 			btrfs_err(fs_info,
413 		"lzo decompression failed, error %d root %llu inode %llu offset %llu",
414 				  ret, btrfs_root_id(inode->root), btrfs_ino(inode),
415 				  cb->start);
416 			return -EIO;
417 		}
418 
419 		/* Copy the data into inode pages */
420 		ret = btrfs_decompress_buf2page(workspace->buf, out_len, cb, cur_out);
421 		cur_out += out_len;
422 
423 		/* All data read, exit */
424 		if (ret == 0)
425 			return 0;
426 		ret = 0;
427 
428 		/* Check if the sector has enough space for a segment header */
429 		sector_bytes_left = sectorsize - (cur_in % sectorsize);
430 		if (sector_bytes_left >= LZO_LEN)
431 			continue;
432 
433 		/* Skip the padding zeros */
434 		cur_in += sector_bytes_left;
435 	}
436 
437 	return 0;
438 }
439 
lzo_decompress(struct list_head * ws,const u8 * data_in,struct folio * dest_folio,unsigned long dest_pgoff,size_t srclen,size_t destlen)440 int lzo_decompress(struct list_head *ws, const u8 *data_in,
441 		struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen,
442 		size_t destlen)
443 {
444 	struct workspace *workspace = list_entry(ws, struct workspace, list);
445 	struct btrfs_fs_info *fs_info = folio_to_fs_info(dest_folio);
446 	const u32 sectorsize = fs_info->sectorsize;
447 	size_t in_len;
448 	size_t out_len;
449 	size_t max_segment_len = WORKSPACE_BUF_LENGTH;
450 	int ret = 0;
451 
452 	if (srclen < LZO_LEN || srclen > max_segment_len + LZO_LEN * 2)
453 		return -EUCLEAN;
454 
455 	in_len = read_compress_length(data_in);
456 	if (in_len != srclen)
457 		return -EUCLEAN;
458 	data_in += LZO_LEN;
459 
460 	in_len = read_compress_length(data_in);
461 	if (in_len != srclen - LZO_LEN * 2) {
462 		ret = -EUCLEAN;
463 		goto out;
464 	}
465 	data_in += LZO_LEN;
466 
467 	out_len = sectorsize;
468 	ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
469 	if (unlikely(ret != LZO_E_OK)) {
470 		struct btrfs_inode *inode = folio_to_inode(dest_folio);
471 
472 		btrfs_err(fs_info,
473 		"lzo decompression failed, error %d root %llu inode %llu offset %llu",
474 			  ret, btrfs_root_id(inode->root), btrfs_ino(inode),
475 			  folio_pos(dest_folio));
476 		ret = -EIO;
477 		goto out;
478 	}
479 
480 	ASSERT(out_len <= sectorsize);
481 	memcpy_to_folio(dest_folio, dest_pgoff, workspace->buf, out_len);
482 	/* Early end, considered as an error. */
483 	if (unlikely(out_len < destlen)) {
484 		ret = -EIO;
485 		folio_zero_range(dest_folio, dest_pgoff + out_len, destlen - out_len);
486 	}
487 out:
488 	return ret;
489 }
490 
491 const struct btrfs_compress_op btrfs_lzo_compress = {
492 	.workspace_manager	= &wsm,
493 	.max_level		= 1,
494 	.default_level		= 1,
495 };
496