xref: /linux/fs/btrfs/zlib.c (revision f79e4d5f92a129a1159c973735007d4ddc8541f3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2008 Oracle.  All rights reserved.
4  *
5  * Based on jffs2 zlib code:
6  * Copyright © 2001-2007 Red Hat, Inc.
7  * Created by David Woodhouse <dwmw2@infradead.org>
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/zlib.h>
13 #include <linux/zutil.h>
14 #include <linux/mm.h>
15 #include <linux/init.h>
16 #include <linux/err.h>
17 #include <linux/sched.h>
18 #include <linux/pagemap.h>
19 #include <linux/bio.h>
20 #include <linux/refcount.h>
21 #include "compression.h"
22 
23 struct workspace {
24 	z_stream strm;
25 	char *buf;
26 	struct list_head list;
27 	int level;
28 };
29 
30 static void zlib_free_workspace(struct list_head *ws)
31 {
32 	struct workspace *workspace = list_entry(ws, struct workspace, list);
33 
34 	kvfree(workspace->strm.workspace);
35 	kfree(workspace->buf);
36 	kfree(workspace);
37 }
38 
39 static struct list_head *zlib_alloc_workspace(void)
40 {
41 	struct workspace *workspace;
42 	int workspacesize;
43 
44 	workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
45 	if (!workspace)
46 		return ERR_PTR(-ENOMEM);
47 
48 	workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
49 			zlib_inflate_workspacesize());
50 	workspace->strm.workspace = kvmalloc(workspacesize, GFP_KERNEL);
51 	workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
52 	if (!workspace->strm.workspace || !workspace->buf)
53 		goto fail;
54 
55 	INIT_LIST_HEAD(&workspace->list);
56 
57 	return &workspace->list;
58 fail:
59 	zlib_free_workspace(&workspace->list);
60 	return ERR_PTR(-ENOMEM);
61 }
62 
63 static int zlib_compress_pages(struct list_head *ws,
64 			       struct address_space *mapping,
65 			       u64 start,
66 			       struct page **pages,
67 			       unsigned long *out_pages,
68 			       unsigned long *total_in,
69 			       unsigned long *total_out)
70 {
71 	struct workspace *workspace = list_entry(ws, struct workspace, list);
72 	int ret;
73 	char *data_in;
74 	char *cpage_out;
75 	int nr_pages = 0;
76 	struct page *in_page = NULL;
77 	struct page *out_page = NULL;
78 	unsigned long bytes_left;
79 	unsigned long len = *total_out;
80 	unsigned long nr_dest_pages = *out_pages;
81 	const unsigned long max_out = nr_dest_pages * PAGE_SIZE;
82 
83 	*out_pages = 0;
84 	*total_out = 0;
85 	*total_in = 0;
86 
87 	if (Z_OK != zlib_deflateInit(&workspace->strm, workspace->level)) {
88 		pr_warn("BTRFS: deflateInit failed\n");
89 		ret = -EIO;
90 		goto out;
91 	}
92 
93 	workspace->strm.total_in = 0;
94 	workspace->strm.total_out = 0;
95 
96 	in_page = find_get_page(mapping, start >> PAGE_SHIFT);
97 	data_in = kmap(in_page);
98 
99 	out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
100 	if (out_page == NULL) {
101 		ret = -ENOMEM;
102 		goto out;
103 	}
104 	cpage_out = kmap(out_page);
105 	pages[0] = out_page;
106 	nr_pages = 1;
107 
108 	workspace->strm.next_in = data_in;
109 	workspace->strm.next_out = cpage_out;
110 	workspace->strm.avail_out = PAGE_SIZE;
111 	workspace->strm.avail_in = min(len, PAGE_SIZE);
112 
113 	while (workspace->strm.total_in < len) {
114 		ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
115 		if (ret != Z_OK) {
116 			pr_debug("BTRFS: deflate in loop returned %d\n",
117 			       ret);
118 			zlib_deflateEnd(&workspace->strm);
119 			ret = -EIO;
120 			goto out;
121 		}
122 
123 		/* we're making it bigger, give up */
124 		if (workspace->strm.total_in > 8192 &&
125 		    workspace->strm.total_in <
126 		    workspace->strm.total_out) {
127 			ret = -E2BIG;
128 			goto out;
129 		}
130 		/* we need another page for writing out.  Test this
131 		 * before the total_in so we will pull in a new page for
132 		 * the stream end if required
133 		 */
134 		if (workspace->strm.avail_out == 0) {
135 			kunmap(out_page);
136 			if (nr_pages == nr_dest_pages) {
137 				out_page = NULL;
138 				ret = -E2BIG;
139 				goto out;
140 			}
141 			out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
142 			if (out_page == NULL) {
143 				ret = -ENOMEM;
144 				goto out;
145 			}
146 			cpage_out = kmap(out_page);
147 			pages[nr_pages] = out_page;
148 			nr_pages++;
149 			workspace->strm.avail_out = PAGE_SIZE;
150 			workspace->strm.next_out = cpage_out;
151 		}
152 		/* we're all done */
153 		if (workspace->strm.total_in >= len)
154 			break;
155 
156 		/* we've read in a full page, get a new one */
157 		if (workspace->strm.avail_in == 0) {
158 			if (workspace->strm.total_out > max_out)
159 				break;
160 
161 			bytes_left = len - workspace->strm.total_in;
162 			kunmap(in_page);
163 			put_page(in_page);
164 
165 			start += PAGE_SIZE;
166 			in_page = find_get_page(mapping,
167 						start >> PAGE_SHIFT);
168 			data_in = kmap(in_page);
169 			workspace->strm.avail_in = min(bytes_left,
170 							   PAGE_SIZE);
171 			workspace->strm.next_in = data_in;
172 		}
173 	}
174 	workspace->strm.avail_in = 0;
175 	ret = zlib_deflate(&workspace->strm, Z_FINISH);
176 	zlib_deflateEnd(&workspace->strm);
177 
178 	if (ret != Z_STREAM_END) {
179 		ret = -EIO;
180 		goto out;
181 	}
182 
183 	if (workspace->strm.total_out >= workspace->strm.total_in) {
184 		ret = -E2BIG;
185 		goto out;
186 	}
187 
188 	ret = 0;
189 	*total_out = workspace->strm.total_out;
190 	*total_in = workspace->strm.total_in;
191 out:
192 	*out_pages = nr_pages;
193 	if (out_page)
194 		kunmap(out_page);
195 
196 	if (in_page) {
197 		kunmap(in_page);
198 		put_page(in_page);
199 	}
200 	return ret;
201 }
202 
203 static int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
204 {
205 	struct workspace *workspace = list_entry(ws, struct workspace, list);
206 	int ret = 0, ret2;
207 	int wbits = MAX_WBITS;
208 	char *data_in;
209 	size_t total_out = 0;
210 	unsigned long page_in_index = 0;
211 	size_t srclen = cb->compressed_len;
212 	unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
213 	unsigned long buf_start;
214 	struct page **pages_in = cb->compressed_pages;
215 	u64 disk_start = cb->start;
216 	struct bio *orig_bio = cb->orig_bio;
217 
218 	data_in = kmap(pages_in[page_in_index]);
219 	workspace->strm.next_in = data_in;
220 	workspace->strm.avail_in = min_t(size_t, srclen, PAGE_SIZE);
221 	workspace->strm.total_in = 0;
222 
223 	workspace->strm.total_out = 0;
224 	workspace->strm.next_out = workspace->buf;
225 	workspace->strm.avail_out = PAGE_SIZE;
226 
227 	/* If it's deflate, and it's got no preset dictionary, then
228 	   we can tell zlib to skip the adler32 check. */
229 	if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
230 	    ((data_in[0] & 0x0f) == Z_DEFLATED) &&
231 	    !(((data_in[0]<<8) + data_in[1]) % 31)) {
232 
233 		wbits = -((data_in[0] >> 4) + 8);
234 		workspace->strm.next_in += 2;
235 		workspace->strm.avail_in -= 2;
236 	}
237 
238 	if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
239 		pr_warn("BTRFS: inflateInit failed\n");
240 		kunmap(pages_in[page_in_index]);
241 		return -EIO;
242 	}
243 	while (workspace->strm.total_in < srclen) {
244 		ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
245 		if (ret != Z_OK && ret != Z_STREAM_END)
246 			break;
247 
248 		buf_start = total_out;
249 		total_out = workspace->strm.total_out;
250 
251 		/* we didn't make progress in this inflate call, we're done */
252 		if (buf_start == total_out)
253 			break;
254 
255 		ret2 = btrfs_decompress_buf2page(workspace->buf, buf_start,
256 						 total_out, disk_start,
257 						 orig_bio);
258 		if (ret2 == 0) {
259 			ret = 0;
260 			goto done;
261 		}
262 
263 		workspace->strm.next_out = workspace->buf;
264 		workspace->strm.avail_out = PAGE_SIZE;
265 
266 		if (workspace->strm.avail_in == 0) {
267 			unsigned long tmp;
268 			kunmap(pages_in[page_in_index]);
269 			page_in_index++;
270 			if (page_in_index >= total_pages_in) {
271 				data_in = NULL;
272 				break;
273 			}
274 			data_in = kmap(pages_in[page_in_index]);
275 			workspace->strm.next_in = data_in;
276 			tmp = srclen - workspace->strm.total_in;
277 			workspace->strm.avail_in = min(tmp,
278 							   PAGE_SIZE);
279 		}
280 	}
281 	if (ret != Z_STREAM_END)
282 		ret = -EIO;
283 	else
284 		ret = 0;
285 done:
286 	zlib_inflateEnd(&workspace->strm);
287 	if (data_in)
288 		kunmap(pages_in[page_in_index]);
289 	if (!ret)
290 		zero_fill_bio(orig_bio);
291 	return ret;
292 }
293 
294 static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
295 			   struct page *dest_page,
296 			   unsigned long start_byte,
297 			   size_t srclen, size_t destlen)
298 {
299 	struct workspace *workspace = list_entry(ws, struct workspace, list);
300 	int ret = 0;
301 	int wbits = MAX_WBITS;
302 	unsigned long bytes_left;
303 	unsigned long total_out = 0;
304 	unsigned long pg_offset = 0;
305 	char *kaddr;
306 
307 	destlen = min_t(unsigned long, destlen, PAGE_SIZE);
308 	bytes_left = destlen;
309 
310 	workspace->strm.next_in = data_in;
311 	workspace->strm.avail_in = srclen;
312 	workspace->strm.total_in = 0;
313 
314 	workspace->strm.next_out = workspace->buf;
315 	workspace->strm.avail_out = PAGE_SIZE;
316 	workspace->strm.total_out = 0;
317 	/* If it's deflate, and it's got no preset dictionary, then
318 	   we can tell zlib to skip the adler32 check. */
319 	if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
320 	    ((data_in[0] & 0x0f) == Z_DEFLATED) &&
321 	    !(((data_in[0]<<8) + data_in[1]) % 31)) {
322 
323 		wbits = -((data_in[0] >> 4) + 8);
324 		workspace->strm.next_in += 2;
325 		workspace->strm.avail_in -= 2;
326 	}
327 
328 	if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
329 		pr_warn("BTRFS: inflateInit failed\n");
330 		return -EIO;
331 	}
332 
333 	while (bytes_left > 0) {
334 		unsigned long buf_start;
335 		unsigned long buf_offset;
336 		unsigned long bytes;
337 
338 		ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
339 		if (ret != Z_OK && ret != Z_STREAM_END)
340 			break;
341 
342 		buf_start = total_out;
343 		total_out = workspace->strm.total_out;
344 
345 		if (total_out == buf_start) {
346 			ret = -EIO;
347 			break;
348 		}
349 
350 		if (total_out <= start_byte)
351 			goto next;
352 
353 		if (total_out > start_byte && buf_start < start_byte)
354 			buf_offset = start_byte - buf_start;
355 		else
356 			buf_offset = 0;
357 
358 		bytes = min(PAGE_SIZE - pg_offset,
359 			    PAGE_SIZE - buf_offset);
360 		bytes = min(bytes, bytes_left);
361 
362 		kaddr = kmap_atomic(dest_page);
363 		memcpy(kaddr + pg_offset, workspace->buf + buf_offset, bytes);
364 		kunmap_atomic(kaddr);
365 
366 		pg_offset += bytes;
367 		bytes_left -= bytes;
368 next:
369 		workspace->strm.next_out = workspace->buf;
370 		workspace->strm.avail_out = PAGE_SIZE;
371 	}
372 
373 	if (ret != Z_STREAM_END && bytes_left != 0)
374 		ret = -EIO;
375 	else
376 		ret = 0;
377 
378 	zlib_inflateEnd(&workspace->strm);
379 
380 	/*
381 	 * this should only happen if zlib returned fewer bytes than we
382 	 * expected.  btrfs_get_block is responsible for zeroing from the
383 	 * end of the inline extent (destlen) to the end of the page
384 	 */
385 	if (pg_offset < destlen) {
386 		kaddr = kmap_atomic(dest_page);
387 		memset(kaddr + pg_offset, 0, destlen - pg_offset);
388 		kunmap_atomic(kaddr);
389 	}
390 	return ret;
391 }
392 
393 static void zlib_set_level(struct list_head *ws, unsigned int type)
394 {
395 	struct workspace *workspace = list_entry(ws, struct workspace, list);
396 	unsigned level = (type & 0xF0) >> 4;
397 
398 	if (level > 9)
399 		level = 9;
400 
401 	workspace->level = level > 0 ? level : 3;
402 }
403 
404 const struct btrfs_compress_op btrfs_zlib_compress = {
405 	.alloc_workspace	= zlib_alloc_workspace,
406 	.free_workspace		= zlib_free_workspace,
407 	.compress_pages		= zlib_compress_pages,
408 	.decompress_bio		= zlib_decompress_bio,
409 	.decompress		= zlib_decompress,
410 	.set_level              = zlib_set_level,
411 };
412