xref: /linux/drivers/block/zram/zcomp.c (revision 903a7d37d9ea03cfed21040467d3d345d1e6fc76)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <linux/kernel.h>
4 #include <linux/string.h>
5 #include <linux/err.h>
6 #include <linux/slab.h>
7 #include <linux/wait.h>
8 #include <linux/sched.h>
9 #include <linux/cpu.h>
10 #include <linux/crypto.h>
11 #include <linux/vmalloc.h>
12 
13 #include "zcomp.h"
14 
15 #include "backend_lzo.h"
16 #include "backend_lzorle.h"
17 #include "backend_lz4.h"
18 #include "backend_lz4hc.h"
19 #include "backend_zstd.h"
20 #include "backend_deflate.h"
21 #include "backend_842.h"
22 
23 static const struct zcomp_ops *backends[] = {
24 #if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZO)
25 	&backend_lzorle,
26 	&backend_lzo,
27 #endif
28 #if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZ4)
29 	&backend_lz4,
30 #endif
31 #if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZ4HC)
32 	&backend_lz4hc,
33 #endif
34 #if IS_ENABLED(CONFIG_ZRAM_BACKEND_ZSTD)
35 	&backend_zstd,
36 #endif
37 #if IS_ENABLED(CONFIG_ZRAM_BACKEND_DEFLATE)
38 	&backend_deflate,
39 #endif
40 #if IS_ENABLED(CONFIG_ZRAM_BACKEND_842)
41 	&backend_842,
42 #endif
43 	NULL
44 };
45 
46 static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm)
47 {
48 	comp->ops->destroy_ctx(&zstrm->ctx);
49 	vfree(zstrm->buffer);
50 	zstrm->buffer = NULL;
51 }
52 
53 static int zcomp_strm_init(struct zcomp *comp, struct zcomp_strm *zstrm)
54 {
55 	int ret;
56 
57 	ret = comp->ops->create_ctx(comp->params, &zstrm->ctx);
58 	if (ret)
59 		return ret;
60 
61 	/*
62 	 * allocate 2 pages. 1 for compressed data, plus 1 extra for the
63 	 * case when compressed size is larger than the original one
64 	 */
65 	zstrm->buffer = vzalloc(2 * PAGE_SIZE);
66 	if (!zstrm->buffer) {
67 		zcomp_strm_free(comp, zstrm);
68 		return -ENOMEM;
69 	}
70 	return 0;
71 }
72 
73 static const struct zcomp_ops *lookup_backend_ops(const char *comp)
74 {
75 	int i = 0;
76 
77 	while (backends[i]) {
78 		if (sysfs_streq(comp, backends[i]->name))
79 			break;
80 		i++;
81 	}
82 	return backends[i];
83 }
84 
85 bool zcomp_available_algorithm(const char *comp)
86 {
87 	return lookup_backend_ops(comp) != NULL;
88 }
89 
90 /* show available compressors */
91 ssize_t zcomp_available_show(const char *comp, char *buf)
92 {
93 	ssize_t sz = 0;
94 	int i;
95 
96 	for (i = 0; i < ARRAY_SIZE(backends) - 1; i++) {
97 		if (!strcmp(comp, backends[i]->name)) {
98 			sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
99 					"[%s] ", backends[i]->name);
100 		} else {
101 			sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
102 					"%s ", backends[i]->name);
103 		}
104 	}
105 
106 	sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n");
107 	return sz;
108 }
109 
110 struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
111 {
112 	local_lock(&comp->stream->lock);
113 	return this_cpu_ptr(comp->stream);
114 }
115 
116 void zcomp_stream_put(struct zcomp *comp)
117 {
118 	local_unlock(&comp->stream->lock);
119 }
120 
121 int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
122 		   const void *src, unsigned int *dst_len)
123 {
124 	struct zcomp_req req = {
125 		.src = src,
126 		.dst = zstrm->buffer,
127 		.src_len = PAGE_SIZE,
128 		.dst_len = 2 * PAGE_SIZE,
129 	};
130 	int ret;
131 
132 	ret = comp->ops->compress(comp->params, &zstrm->ctx, &req);
133 	if (!ret)
134 		*dst_len = req.dst_len;
135 	return ret;
136 }
137 
138 int zcomp_decompress(struct zcomp *comp, struct zcomp_strm *zstrm,
139 		     const void *src, unsigned int src_len, void *dst)
140 {
141 	struct zcomp_req req = {
142 		.src = src,
143 		.dst = dst,
144 		.src_len = src_len,
145 		.dst_len = PAGE_SIZE,
146 	};
147 
148 	return comp->ops->decompress(comp->params, &zstrm->ctx, &req);
149 }
150 
151 int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
152 {
153 	struct zcomp *comp = hlist_entry(node, struct zcomp, node);
154 	struct zcomp_strm *zstrm;
155 	int ret;
156 
157 	zstrm = per_cpu_ptr(comp->stream, cpu);
158 	local_lock_init(&zstrm->lock);
159 
160 	ret = zcomp_strm_init(comp, zstrm);
161 	if (ret)
162 		pr_err("Can't allocate a compression stream\n");
163 	return ret;
164 }
165 
166 int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
167 {
168 	struct zcomp *comp = hlist_entry(node, struct zcomp, node);
169 	struct zcomp_strm *zstrm;
170 
171 	zstrm = per_cpu_ptr(comp->stream, cpu);
172 	zcomp_strm_free(comp, zstrm);
173 	return 0;
174 }
175 
176 static int zcomp_init(struct zcomp *comp, struct zcomp_params *params)
177 {
178 	int ret;
179 
180 	comp->stream = alloc_percpu(struct zcomp_strm);
181 	if (!comp->stream)
182 		return -ENOMEM;
183 
184 	comp->params = params;
185 	ret = comp->ops->setup_params(comp->params);
186 	if (ret)
187 		goto cleanup;
188 
189 	ret = cpuhp_state_add_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
190 	if (ret < 0)
191 		goto cleanup;
192 
193 	return 0;
194 
195 cleanup:
196 	comp->ops->release_params(comp->params);
197 	free_percpu(comp->stream);
198 	return ret;
199 }
200 
201 void zcomp_destroy(struct zcomp *comp)
202 {
203 	cpuhp_state_remove_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
204 	comp->ops->release_params(comp->params);
205 	free_percpu(comp->stream);
206 	kfree(comp);
207 }
208 
209 struct zcomp *zcomp_create(const char *alg, struct zcomp_params *params)
210 {
211 	struct zcomp *comp;
212 	int error;
213 
214 	/*
215 	 * The backends array has a sentinel NULL value, so the minimum
216 	 * size is 1. In order to be valid the array, apart from the
217 	 * sentinel NULL element, should have at least one compression
218 	 * backend selected.
219 	 */
220 	BUILD_BUG_ON(ARRAY_SIZE(backends) <= 1);
221 
222 	comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL);
223 	if (!comp)
224 		return ERR_PTR(-ENOMEM);
225 
226 	comp->ops = lookup_backend_ops(alg);
227 	if (!comp->ops) {
228 		kfree(comp);
229 		return ERR_PTR(-EINVAL);
230 	}
231 
232 	error = zcomp_init(comp, params);
233 	if (error) {
234 		kfree(comp);
235 		return ERR_PTR(error);
236 	}
237 	return comp;
238 }
239