xref: /linux/drivers/block/zram/zcomp.c (revision 1d3100cf148de1afb7b2282dbc5941fdc4722949)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <linux/kernel.h>
4 #include <linux/string.h>
5 #include <linux/err.h>
6 #include <linux/slab.h>
7 #include <linux/wait.h>
8 #include <linux/sched.h>
9 #include <linux/cpu.h>
10 #include <linux/crypto.h>
11 #include <linux/vmalloc.h>
12 
13 #include "zcomp.h"
14 
15 #include "backend_lzo.h"
16 #include "backend_lzorle.h"
17 #include "backend_lz4.h"
18 #include "backend_lz4hc.h"
19 #include "backend_zstd.h"
20 #include "backend_deflate.h"
21 #include "backend_842.h"
22 
23 static const struct zcomp_ops *backends[] = {
24 #if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZO)
25 	&backend_lzorle,
26 	&backend_lzo,
27 #endif
28 #if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZ4)
29 	&backend_lz4,
30 #endif
31 #if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZ4HC)
32 	&backend_lz4hc,
33 #endif
34 #if IS_ENABLED(CONFIG_ZRAM_BACKEND_ZSTD)
35 	&backend_zstd,
36 #endif
37 #if IS_ENABLED(CONFIG_ZRAM_BACKEND_DEFLATE)
38 	&backend_deflate,
39 #endif
40 #if IS_ENABLED(CONFIG_ZRAM_BACKEND_842)
41 	&backend_842,
42 #endif
43 	NULL
44 };
45 
46 static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm)
47 {
48 	if (zstrm->ctx)
49 		comp->ops->destroy_ctx(zstrm->ctx);
50 	vfree(zstrm->buffer);
51 	zstrm->ctx = NULL;
52 	zstrm->buffer = NULL;
53 }
54 
55 static int zcomp_strm_init(struct zcomp *comp, struct zcomp_strm *zstrm)
56 {
57 	zstrm->ctx = comp->ops->create_ctx();
58 
59 	/*
60 	 * allocate 2 pages. 1 for compressed data, plus 1 extra for the
61 	 * case when compressed size is larger than the original one
62 	 */
63 	zstrm->buffer = vzalloc(2 * PAGE_SIZE);
64 	if (!zstrm->ctx || !zstrm->buffer) {
65 		zcomp_strm_free(comp, zstrm);
66 		return -ENOMEM;
67 	}
68 	return 0;
69 }
70 
71 static const struct zcomp_ops *lookup_backend_ops(const char *comp)
72 {
73 	int i = 0;
74 
75 	while (backends[i]) {
76 		if (sysfs_streq(comp, backends[i]->name))
77 			break;
78 		i++;
79 	}
80 	return backends[i];
81 }
82 
83 bool zcomp_available_algorithm(const char *comp)
84 {
85 	return lookup_backend_ops(comp) != NULL;
86 }
87 
88 /* show available compressors */
89 ssize_t zcomp_available_show(const char *comp, char *buf)
90 {
91 	ssize_t sz = 0;
92 	int i;
93 
94 	for (i = 0; i < ARRAY_SIZE(backends) - 1; i++) {
95 		if (!strcmp(comp, backends[i]->name)) {
96 			sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
97 					"[%s] ", backends[i]->name);
98 		} else {
99 			sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
100 					"%s ", backends[i]->name);
101 		}
102 	}
103 
104 	sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n");
105 	return sz;
106 }
107 
108 struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
109 {
110 	local_lock(&comp->stream->lock);
111 	return this_cpu_ptr(comp->stream);
112 }
113 
114 void zcomp_stream_put(struct zcomp *comp)
115 {
116 	local_unlock(&comp->stream->lock);
117 }
118 
119 int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
120 		   const void *src, unsigned int *dst_len)
121 {
122 	/* The dst buffer should always be 2 * PAGE_SIZE */
123 	size_t dlen = 2 * PAGE_SIZE;
124 	int ret;
125 
126 	ret = comp->ops->compress(zstrm->ctx, src, PAGE_SIZE,
127 				  zstrm->buffer, &dlen);
128 	if (!ret)
129 		*dst_len = dlen;
130 	return ret;
131 }
132 
133 int zcomp_decompress(struct zcomp *comp, struct zcomp_strm *zstrm,
134 		     const void *src, unsigned int src_len, void *dst)
135 {
136 	return comp->ops->decompress(zstrm->ctx, src, src_len,
137 				     dst, PAGE_SIZE);
138 }
139 
140 int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
141 {
142 	struct zcomp *comp = hlist_entry(node, struct zcomp, node);
143 	struct zcomp_strm *zstrm;
144 	int ret;
145 
146 	zstrm = per_cpu_ptr(comp->stream, cpu);
147 	local_lock_init(&zstrm->lock);
148 
149 	ret = zcomp_strm_init(comp, zstrm);
150 	if (ret)
151 		pr_err("Can't allocate a compression stream\n");
152 	return ret;
153 }
154 
155 int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
156 {
157 	struct zcomp *comp = hlist_entry(node, struct zcomp, node);
158 	struct zcomp_strm *zstrm;
159 
160 	zstrm = per_cpu_ptr(comp->stream, cpu);
161 	zcomp_strm_free(comp, zstrm);
162 	return 0;
163 }
164 
165 static int zcomp_init(struct zcomp *comp)
166 {
167 	int ret;
168 
169 	comp->stream = alloc_percpu(struct zcomp_strm);
170 	if (!comp->stream)
171 		return -ENOMEM;
172 
173 	ret = cpuhp_state_add_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
174 	if (ret < 0)
175 		goto cleanup;
176 	return 0;
177 
178 cleanup:
179 	free_percpu(comp->stream);
180 	return ret;
181 }
182 
183 void zcomp_destroy(struct zcomp *comp)
184 {
185 	cpuhp_state_remove_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
186 	free_percpu(comp->stream);
187 	kfree(comp);
188 }
189 
190 struct zcomp *zcomp_create(const char *alg)
191 {
192 	struct zcomp *comp;
193 	int error;
194 
195 	comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL);
196 	if (!comp)
197 		return ERR_PTR(-ENOMEM);
198 
199 	comp->ops = lookup_backend_ops(alg);
200 	if (!comp->ops) {
201 		kfree(comp);
202 		return ERR_PTR(-EINVAL);
203 	}
204 
205 	error = zcomp_init(comp);
206 	if (error) {
207 		kfree(comp);
208 		return ERR_PTR(error);
209 	}
210 	return comp;
211 }
212