xref: /linux/drivers/block/zram/zram_drv.c (revision 4aa748dd1abf337426b4c941ae1b606ed0e2a5aa)
1 /*
2  * Compressed RAM block device
3  *
4  * Copyright (C) 2008, 2009, 2010  Nitin Gupta
5  *               2012, 2013 Minchan Kim
6  *
7  * This code is released using a dual license strategy: BSD/GPL
8  * You can choose the licence that better fits your requirements.
9  *
10  * Released under the terms of 3-clause BSD License
11  * Released under the terms of GNU General Public License Version 2.0
12  *
13  */
14 
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17 
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/bitops.h>
22 #include <linux/blkdev.h>
23 #include <linux/buffer_head.h>
24 #include <linux/device.h>
25 #include <linux/highmem.h>
26 #include <linux/slab.h>
27 #include <linux/backing-dev.h>
28 #include <linux/string.h>
29 #include <linux/vmalloc.h>
30 #include <linux/err.h>
31 #include <linux/idr.h>
32 #include <linux/sysfs.h>
33 #include <linux/debugfs.h>
34 #include <linux/cpuhotplug.h>
35 #include <linux/part_stat.h>
36 #include <linux/kernel_read_file.h>
37 
38 #include "zram_drv.h"
39 
40 static DEFINE_IDR(zram_index_idr);
41 /* idr index must be protected */
42 static DEFINE_MUTEX(zram_index_mutex);
43 
44 static int zram_major;
45 static const char *default_compressor = CONFIG_ZRAM_DEF_COMP;
46 
47 /* Module params (documentation at end) */
48 static unsigned int num_devices = 1;
49 /*
50  * Pages that compress to sizes equals or greater than this are stored
51  * uncompressed in memory.
52  */
53 static size_t huge_class_size;
54 
55 static const struct block_device_operations zram_devops;
56 
57 static void zram_free_page(struct zram *zram, size_t index);
58 static int zram_read_page(struct zram *zram, struct page *page, u32 index,
59 			  struct bio *parent);
60 
zram_slot_trylock(struct zram * zram,u32 index)61 static int zram_slot_trylock(struct zram *zram, u32 index)
62 {
63 	return spin_trylock(&zram->table[index].lock);
64 }
65 
zram_slot_lock(struct zram * zram,u32 index)66 static void zram_slot_lock(struct zram *zram, u32 index)
67 {
68 	spin_lock(&zram->table[index].lock);
69 }
70 
zram_slot_unlock(struct zram * zram,u32 index)71 static void zram_slot_unlock(struct zram *zram, u32 index)
72 {
73 	spin_unlock(&zram->table[index].lock);
74 }
75 
init_done(struct zram * zram)76 static inline bool init_done(struct zram *zram)
77 {
78 	return zram->disksize;
79 }
80 
dev_to_zram(struct device * dev)81 static inline struct zram *dev_to_zram(struct device *dev)
82 {
83 	return (struct zram *)dev_to_disk(dev)->private_data;
84 }
85 
zram_get_handle(struct zram * zram,u32 index)86 static unsigned long zram_get_handle(struct zram *zram, u32 index)
87 {
88 	return zram->table[index].handle;
89 }
90 
zram_set_handle(struct zram * zram,u32 index,unsigned long handle)91 static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
92 {
93 	zram->table[index].handle = handle;
94 }
95 
96 /* flag operations require table entry bit_spin_lock() being held */
zram_test_flag(struct zram * zram,u32 index,enum zram_pageflags flag)97 static bool zram_test_flag(struct zram *zram, u32 index,
98 			enum zram_pageflags flag)
99 {
100 	return zram->table[index].flags & BIT(flag);
101 }
102 
zram_set_flag(struct zram * zram,u32 index,enum zram_pageflags flag)103 static void zram_set_flag(struct zram *zram, u32 index,
104 			enum zram_pageflags flag)
105 {
106 	zram->table[index].flags |= BIT(flag);
107 }
108 
zram_clear_flag(struct zram * zram,u32 index,enum zram_pageflags flag)109 static void zram_clear_flag(struct zram *zram, u32 index,
110 			enum zram_pageflags flag)
111 {
112 	zram->table[index].flags &= ~BIT(flag);
113 }
114 
zram_set_element(struct zram * zram,u32 index,unsigned long element)115 static inline void zram_set_element(struct zram *zram, u32 index,
116 			unsigned long element)
117 {
118 	zram->table[index].element = element;
119 }
120 
zram_get_element(struct zram * zram,u32 index)121 static unsigned long zram_get_element(struct zram *zram, u32 index)
122 {
123 	return zram->table[index].element;
124 }
125 
zram_get_obj_size(struct zram * zram,u32 index)126 static size_t zram_get_obj_size(struct zram *zram, u32 index)
127 {
128 	return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
129 }
130 
zram_set_obj_size(struct zram * zram,u32 index,size_t size)131 static void zram_set_obj_size(struct zram *zram,
132 					u32 index, size_t size)
133 {
134 	unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT;
135 
136 	zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size;
137 }
138 
zram_allocated(struct zram * zram,u32 index)139 static inline bool zram_allocated(struct zram *zram, u32 index)
140 {
141 	return zram_get_obj_size(zram, index) ||
142 			zram_test_flag(zram, index, ZRAM_SAME) ||
143 			zram_test_flag(zram, index, ZRAM_WB);
144 }
145 
146 #if PAGE_SIZE != 4096
is_partial_io(struct bio_vec * bvec)147 static inline bool is_partial_io(struct bio_vec *bvec)
148 {
149 	return bvec->bv_len != PAGE_SIZE;
150 }
151 #define ZRAM_PARTIAL_IO		1
152 #else
is_partial_io(struct bio_vec * bvec)153 static inline bool is_partial_io(struct bio_vec *bvec)
154 {
155 	return false;
156 }
157 #endif
158 
zram_set_priority(struct zram * zram,u32 index,u32 prio)159 static inline void zram_set_priority(struct zram *zram, u32 index, u32 prio)
160 {
161 	prio &= ZRAM_COMP_PRIORITY_MASK;
162 	/*
163 	 * Clear previous priority value first, in case if we recompress
164 	 * further an already recompressed page
165 	 */
166 	zram->table[index].flags &= ~(ZRAM_COMP_PRIORITY_MASK <<
167 				      ZRAM_COMP_PRIORITY_BIT1);
168 	zram->table[index].flags |= (prio << ZRAM_COMP_PRIORITY_BIT1);
169 }
170 
zram_get_priority(struct zram * zram,u32 index)171 static inline u32 zram_get_priority(struct zram *zram, u32 index)
172 {
173 	u32 prio = zram->table[index].flags >> ZRAM_COMP_PRIORITY_BIT1;
174 
175 	return prio & ZRAM_COMP_PRIORITY_MASK;
176 }
177 
zram_accessed(struct zram * zram,u32 index)178 static void zram_accessed(struct zram *zram, u32 index)
179 {
180 	zram_clear_flag(zram, index, ZRAM_IDLE);
181 	zram_clear_flag(zram, index, ZRAM_PP_SLOT);
182 #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
183 	zram->table[index].ac_time = ktime_get_boottime();
184 #endif
185 }
186 
187 #if defined CONFIG_ZRAM_WRITEBACK || defined CONFIG_ZRAM_MULTI_COMP
188 struct zram_pp_slot {
189 	unsigned long		index;
190 	struct list_head	entry;
191 };
192 
193 /*
194  * A post-processing bucket is, essentially, a size class, this defines
195  * the range (in bytes) of pp-slots sizes in particular bucket.
196  */
197 #define PP_BUCKET_SIZE_RANGE	64
198 #define NUM_PP_BUCKETS		((PAGE_SIZE / PP_BUCKET_SIZE_RANGE) + 1)
199 
200 struct zram_pp_ctl {
201 	struct list_head	pp_buckets[NUM_PP_BUCKETS];
202 };
203 
init_pp_ctl(void)204 static struct zram_pp_ctl *init_pp_ctl(void)
205 {
206 	struct zram_pp_ctl *ctl;
207 	u32 idx;
208 
209 	ctl = kmalloc(sizeof(*ctl), GFP_KERNEL);
210 	if (!ctl)
211 		return NULL;
212 
213 	for (idx = 0; idx < NUM_PP_BUCKETS; idx++)
214 		INIT_LIST_HEAD(&ctl->pp_buckets[idx]);
215 	return ctl;
216 }
217 
release_pp_slot(struct zram * zram,struct zram_pp_slot * pps)218 static void release_pp_slot(struct zram *zram, struct zram_pp_slot *pps)
219 {
220 	list_del_init(&pps->entry);
221 
222 	zram_slot_lock(zram, pps->index);
223 	zram_clear_flag(zram, pps->index, ZRAM_PP_SLOT);
224 	zram_slot_unlock(zram, pps->index);
225 
226 	kfree(pps);
227 }
228 
release_pp_ctl(struct zram * zram,struct zram_pp_ctl * ctl)229 static void release_pp_ctl(struct zram *zram, struct zram_pp_ctl *ctl)
230 {
231 	u32 idx;
232 
233 	if (!ctl)
234 		return;
235 
236 	for (idx = 0; idx < NUM_PP_BUCKETS; idx++) {
237 		while (!list_empty(&ctl->pp_buckets[idx])) {
238 			struct zram_pp_slot *pps;
239 
240 			pps = list_first_entry(&ctl->pp_buckets[idx],
241 					       struct zram_pp_slot,
242 					       entry);
243 			release_pp_slot(zram, pps);
244 		}
245 	}
246 
247 	kfree(ctl);
248 }
249 
place_pp_slot(struct zram * zram,struct zram_pp_ctl * ctl,struct zram_pp_slot * pps)250 static void place_pp_slot(struct zram *zram, struct zram_pp_ctl *ctl,
251 			  struct zram_pp_slot *pps)
252 {
253 	u32 idx;
254 
255 	idx = zram_get_obj_size(zram, pps->index) / PP_BUCKET_SIZE_RANGE;
256 	list_add(&pps->entry, &ctl->pp_buckets[idx]);
257 
258 	zram_set_flag(zram, pps->index, ZRAM_PP_SLOT);
259 }
260 
select_pp_slot(struct zram_pp_ctl * ctl)261 static struct zram_pp_slot *select_pp_slot(struct zram_pp_ctl *ctl)
262 {
263 	struct zram_pp_slot *pps = NULL;
264 	s32 idx = NUM_PP_BUCKETS - 1;
265 
266 	/* The higher the bucket id the more optimal slot post-processing is */
267 	while (idx >= 0) {
268 		pps = list_first_entry_or_null(&ctl->pp_buckets[idx],
269 					       struct zram_pp_slot,
270 					       entry);
271 		if (pps)
272 			break;
273 
274 		idx--;
275 	}
276 	return pps;
277 }
278 #endif
279 
update_used_max(struct zram * zram,const unsigned long pages)280 static inline void update_used_max(struct zram *zram,
281 					const unsigned long pages)
282 {
283 	unsigned long cur_max = atomic_long_read(&zram->stats.max_used_pages);
284 
285 	do {
286 		if (cur_max >= pages)
287 			return;
288 	} while (!atomic_long_try_cmpxchg(&zram->stats.max_used_pages,
289 					  &cur_max, pages));
290 }
291 
zram_fill_page(void * ptr,unsigned long len,unsigned long value)292 static inline void zram_fill_page(void *ptr, unsigned long len,
293 					unsigned long value)
294 {
295 	WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long)));
296 	memset_l(ptr, value, len / sizeof(unsigned long));
297 }
298 
page_same_filled(void * ptr,unsigned long * element)299 static bool page_same_filled(void *ptr, unsigned long *element)
300 {
301 	unsigned long *page;
302 	unsigned long val;
303 	unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
304 
305 	page = (unsigned long *)ptr;
306 	val = page[0];
307 
308 	if (val != page[last_pos])
309 		return false;
310 
311 	for (pos = 1; pos < last_pos; pos++) {
312 		if (val != page[pos])
313 			return false;
314 	}
315 
316 	*element = val;
317 
318 	return true;
319 }
320 
initstate_show(struct device * dev,struct device_attribute * attr,char * buf)321 static ssize_t initstate_show(struct device *dev,
322 		struct device_attribute *attr, char *buf)
323 {
324 	u32 val;
325 	struct zram *zram = dev_to_zram(dev);
326 
327 	down_read(&zram->init_lock);
328 	val = init_done(zram);
329 	up_read(&zram->init_lock);
330 
331 	return scnprintf(buf, PAGE_SIZE, "%u\n", val);
332 }
333 
disksize_show(struct device * dev,struct device_attribute * attr,char * buf)334 static ssize_t disksize_show(struct device *dev,
335 		struct device_attribute *attr, char *buf)
336 {
337 	struct zram *zram = dev_to_zram(dev);
338 
339 	return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
340 }
341 
mem_limit_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)342 static ssize_t mem_limit_store(struct device *dev,
343 		struct device_attribute *attr, const char *buf, size_t len)
344 {
345 	u64 limit;
346 	char *tmp;
347 	struct zram *zram = dev_to_zram(dev);
348 
349 	limit = memparse(buf, &tmp);
350 	if (buf == tmp) /* no chars parsed, invalid input */
351 		return -EINVAL;
352 
353 	down_write(&zram->init_lock);
354 	zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
355 	up_write(&zram->init_lock);
356 
357 	return len;
358 }
359 
mem_used_max_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)360 static ssize_t mem_used_max_store(struct device *dev,
361 		struct device_attribute *attr, const char *buf, size_t len)
362 {
363 	int err;
364 	unsigned long val;
365 	struct zram *zram = dev_to_zram(dev);
366 
367 	err = kstrtoul(buf, 10, &val);
368 	if (err || val != 0)
369 		return -EINVAL;
370 
371 	down_read(&zram->init_lock);
372 	if (init_done(zram)) {
373 		atomic_long_set(&zram->stats.max_used_pages,
374 				zs_get_total_pages(zram->mem_pool));
375 	}
376 	up_read(&zram->init_lock);
377 
378 	return len;
379 }
380 
381 /*
382  * Mark all pages which are older than or equal to cutoff as IDLE.
383  * Callers should hold the zram init lock in read mode
384  */
mark_idle(struct zram * zram,ktime_t cutoff)385 static void mark_idle(struct zram *zram, ktime_t cutoff)
386 {
387 	int is_idle = 1;
388 	unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
389 	int index;
390 
391 	for (index = 0; index < nr_pages; index++) {
392 		/*
393 		 * Do not mark ZRAM_SAME slots as ZRAM_IDLE, because no
394 		 * post-processing (recompress, writeback) happens to the
395 		 * ZRAM_SAME slot.
396 		 *
397 		 * And ZRAM_WB slots simply cannot be ZRAM_IDLE.
398 		 */
399 		zram_slot_lock(zram, index);
400 		if (!zram_allocated(zram, index) ||
401 		    zram_test_flag(zram, index, ZRAM_WB) ||
402 		    zram_test_flag(zram, index, ZRAM_SAME)) {
403 			zram_slot_unlock(zram, index);
404 			continue;
405 		}
406 
407 #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
408 		is_idle = !cutoff ||
409 			ktime_after(cutoff, zram->table[index].ac_time);
410 #endif
411 		if (is_idle)
412 			zram_set_flag(zram, index, ZRAM_IDLE);
413 		else
414 			zram_clear_flag(zram, index, ZRAM_IDLE);
415 		zram_slot_unlock(zram, index);
416 	}
417 }
418 
idle_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)419 static ssize_t idle_store(struct device *dev,
420 		struct device_attribute *attr, const char *buf, size_t len)
421 {
422 	struct zram *zram = dev_to_zram(dev);
423 	ktime_t cutoff_time = 0;
424 	ssize_t rv = -EINVAL;
425 
426 	if (!sysfs_streq(buf, "all")) {
427 		/*
428 		 * If it did not parse as 'all' try to treat it as an integer
429 		 * when we have memory tracking enabled.
430 		 */
431 		u64 age_sec;
432 
433 		if (IS_ENABLED(CONFIG_ZRAM_TRACK_ENTRY_ACTIME) && !kstrtoull(buf, 0, &age_sec))
434 			cutoff_time = ktime_sub(ktime_get_boottime(),
435 					ns_to_ktime(age_sec * NSEC_PER_SEC));
436 		else
437 			goto out;
438 	}
439 
440 	down_read(&zram->init_lock);
441 	if (!init_done(zram))
442 		goto out_unlock;
443 
444 	/*
445 	 * A cutoff_time of 0 marks everything as idle, this is the
446 	 * "all" behavior.
447 	 */
448 	mark_idle(zram, cutoff_time);
449 	rv = len;
450 
451 out_unlock:
452 	up_read(&zram->init_lock);
453 out:
454 	return rv;
455 }
456 
457 #ifdef CONFIG_ZRAM_WRITEBACK
writeback_limit_enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)458 static ssize_t writeback_limit_enable_store(struct device *dev,
459 		struct device_attribute *attr, const char *buf, size_t len)
460 {
461 	struct zram *zram = dev_to_zram(dev);
462 	u64 val;
463 	ssize_t ret = -EINVAL;
464 
465 	if (kstrtoull(buf, 10, &val))
466 		return ret;
467 
468 	down_read(&zram->init_lock);
469 	spin_lock(&zram->wb_limit_lock);
470 	zram->wb_limit_enable = val;
471 	spin_unlock(&zram->wb_limit_lock);
472 	up_read(&zram->init_lock);
473 	ret = len;
474 
475 	return ret;
476 }
477 
writeback_limit_enable_show(struct device * dev,struct device_attribute * attr,char * buf)478 static ssize_t writeback_limit_enable_show(struct device *dev,
479 		struct device_attribute *attr, char *buf)
480 {
481 	bool val;
482 	struct zram *zram = dev_to_zram(dev);
483 
484 	down_read(&zram->init_lock);
485 	spin_lock(&zram->wb_limit_lock);
486 	val = zram->wb_limit_enable;
487 	spin_unlock(&zram->wb_limit_lock);
488 	up_read(&zram->init_lock);
489 
490 	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
491 }
492 
writeback_limit_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)493 static ssize_t writeback_limit_store(struct device *dev,
494 		struct device_attribute *attr, const char *buf, size_t len)
495 {
496 	struct zram *zram = dev_to_zram(dev);
497 	u64 val;
498 	ssize_t ret = -EINVAL;
499 
500 	if (kstrtoull(buf, 10, &val))
501 		return ret;
502 
503 	down_read(&zram->init_lock);
504 	spin_lock(&zram->wb_limit_lock);
505 	zram->bd_wb_limit = val;
506 	spin_unlock(&zram->wb_limit_lock);
507 	up_read(&zram->init_lock);
508 	ret = len;
509 
510 	return ret;
511 }
512 
writeback_limit_show(struct device * dev,struct device_attribute * attr,char * buf)513 static ssize_t writeback_limit_show(struct device *dev,
514 		struct device_attribute *attr, char *buf)
515 {
516 	u64 val;
517 	struct zram *zram = dev_to_zram(dev);
518 
519 	down_read(&zram->init_lock);
520 	spin_lock(&zram->wb_limit_lock);
521 	val = zram->bd_wb_limit;
522 	spin_unlock(&zram->wb_limit_lock);
523 	up_read(&zram->init_lock);
524 
525 	return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
526 }
527 
reset_bdev(struct zram * zram)528 static void reset_bdev(struct zram *zram)
529 {
530 	if (!zram->backing_dev)
531 		return;
532 
533 	/* hope filp_close flush all of IO */
534 	filp_close(zram->backing_dev, NULL);
535 	zram->backing_dev = NULL;
536 	zram->bdev = NULL;
537 	zram->disk->fops = &zram_devops;
538 	kvfree(zram->bitmap);
539 	zram->bitmap = NULL;
540 }
541 
backing_dev_show(struct device * dev,struct device_attribute * attr,char * buf)542 static ssize_t backing_dev_show(struct device *dev,
543 		struct device_attribute *attr, char *buf)
544 {
545 	struct file *file;
546 	struct zram *zram = dev_to_zram(dev);
547 	char *p;
548 	ssize_t ret;
549 
550 	down_read(&zram->init_lock);
551 	file = zram->backing_dev;
552 	if (!file) {
553 		memcpy(buf, "none\n", 5);
554 		up_read(&zram->init_lock);
555 		return 5;
556 	}
557 
558 	p = file_path(file, buf, PAGE_SIZE - 1);
559 	if (IS_ERR(p)) {
560 		ret = PTR_ERR(p);
561 		goto out;
562 	}
563 
564 	ret = strlen(p);
565 	memmove(buf, p, ret);
566 	buf[ret++] = '\n';
567 out:
568 	up_read(&zram->init_lock);
569 	return ret;
570 }
571 
backing_dev_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)572 static ssize_t backing_dev_store(struct device *dev,
573 		struct device_attribute *attr, const char *buf, size_t len)
574 {
575 	char *file_name;
576 	size_t sz;
577 	struct file *backing_dev = NULL;
578 	struct inode *inode;
579 	unsigned int bitmap_sz;
580 	unsigned long nr_pages, *bitmap = NULL;
581 	int err;
582 	struct zram *zram = dev_to_zram(dev);
583 
584 	file_name = kmalloc(PATH_MAX, GFP_KERNEL);
585 	if (!file_name)
586 		return -ENOMEM;
587 
588 	down_write(&zram->init_lock);
589 	if (init_done(zram)) {
590 		pr_info("Can't setup backing device for initialized device\n");
591 		err = -EBUSY;
592 		goto out;
593 	}
594 
595 	strscpy(file_name, buf, PATH_MAX);
596 	/* ignore trailing newline */
597 	sz = strlen(file_name);
598 	if (sz > 0 && file_name[sz - 1] == '\n')
599 		file_name[sz - 1] = 0x00;
600 
601 	backing_dev = filp_open(file_name, O_RDWR | O_LARGEFILE | O_EXCL, 0);
602 	if (IS_ERR(backing_dev)) {
603 		err = PTR_ERR(backing_dev);
604 		backing_dev = NULL;
605 		goto out;
606 	}
607 
608 	inode = backing_dev->f_mapping->host;
609 
610 	/* Support only block device in this moment */
611 	if (!S_ISBLK(inode->i_mode)) {
612 		err = -ENOTBLK;
613 		goto out;
614 	}
615 
616 	nr_pages = i_size_read(inode) >> PAGE_SHIFT;
617 	/* Refuse to use zero sized device (also prevents self reference) */
618 	if (!nr_pages) {
619 		err = -EINVAL;
620 		goto out;
621 	}
622 
623 	bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
624 	bitmap = kvzalloc(bitmap_sz, GFP_KERNEL);
625 	if (!bitmap) {
626 		err = -ENOMEM;
627 		goto out;
628 	}
629 
630 	reset_bdev(zram);
631 
632 	zram->bdev = I_BDEV(inode);
633 	zram->backing_dev = backing_dev;
634 	zram->bitmap = bitmap;
635 	zram->nr_pages = nr_pages;
636 	up_write(&zram->init_lock);
637 
638 	pr_info("setup backing device %s\n", file_name);
639 	kfree(file_name);
640 
641 	return len;
642 out:
643 	kvfree(bitmap);
644 
645 	if (backing_dev)
646 		filp_close(backing_dev, NULL);
647 
648 	up_write(&zram->init_lock);
649 
650 	kfree(file_name);
651 
652 	return err;
653 }
654 
alloc_block_bdev(struct zram * zram)655 static unsigned long alloc_block_bdev(struct zram *zram)
656 {
657 	unsigned long blk_idx = 1;
658 retry:
659 	/* skip 0 bit to confuse zram.handle = 0 */
660 	blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, blk_idx);
661 	if (blk_idx == zram->nr_pages)
662 		return 0;
663 
664 	if (test_and_set_bit(blk_idx, zram->bitmap))
665 		goto retry;
666 
667 	atomic64_inc(&zram->stats.bd_count);
668 	return blk_idx;
669 }
670 
free_block_bdev(struct zram * zram,unsigned long blk_idx)671 static void free_block_bdev(struct zram *zram, unsigned long blk_idx)
672 {
673 	int was_set;
674 
675 	was_set = test_and_clear_bit(blk_idx, zram->bitmap);
676 	WARN_ON_ONCE(!was_set);
677 	atomic64_dec(&zram->stats.bd_count);
678 }
679 
read_from_bdev_async(struct zram * zram,struct page * page,unsigned long entry,struct bio * parent)680 static void read_from_bdev_async(struct zram *zram, struct page *page,
681 			unsigned long entry, struct bio *parent)
682 {
683 	struct bio *bio;
684 
685 	bio = bio_alloc(zram->bdev, 1, parent->bi_opf, GFP_NOIO);
686 	bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
687 	__bio_add_page(bio, page, PAGE_SIZE, 0);
688 	bio_chain(bio, parent);
689 	submit_bio(bio);
690 }
691 
692 #define PAGE_WB_SIG "page_index="
693 
694 #define PAGE_WRITEBACK			0
695 #define HUGE_WRITEBACK			(1<<0)
696 #define IDLE_WRITEBACK			(1<<1)
697 #define INCOMPRESSIBLE_WRITEBACK	(1<<2)
698 
scan_slots_for_writeback(struct zram * zram,u32 mode,unsigned long nr_pages,unsigned long index,struct zram_pp_ctl * ctl)699 static int scan_slots_for_writeback(struct zram *zram, u32 mode,
700 				    unsigned long nr_pages,
701 				    unsigned long index,
702 				    struct zram_pp_ctl *ctl)
703 {
704 	struct zram_pp_slot *pps = NULL;
705 
706 	for (; nr_pages != 0; index++, nr_pages--) {
707 		if (!pps)
708 			pps = kmalloc(sizeof(*pps), GFP_KERNEL);
709 		if (!pps)
710 			return -ENOMEM;
711 
712 		INIT_LIST_HEAD(&pps->entry);
713 
714 		zram_slot_lock(zram, index);
715 		if (!zram_allocated(zram, index))
716 			goto next;
717 
718 		if (zram_test_flag(zram, index, ZRAM_WB) ||
719 		    zram_test_flag(zram, index, ZRAM_SAME))
720 			goto next;
721 
722 		if (mode & IDLE_WRITEBACK &&
723 		    !zram_test_flag(zram, index, ZRAM_IDLE))
724 			goto next;
725 		if (mode & HUGE_WRITEBACK &&
726 		    !zram_test_flag(zram, index, ZRAM_HUGE))
727 			goto next;
728 		if (mode & INCOMPRESSIBLE_WRITEBACK &&
729 		    !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
730 			goto next;
731 
732 		pps->index = index;
733 		place_pp_slot(zram, ctl, pps);
734 		pps = NULL;
735 next:
736 		zram_slot_unlock(zram, index);
737 	}
738 
739 	kfree(pps);
740 	return 0;
741 }
742 
writeback_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)743 static ssize_t writeback_store(struct device *dev,
744 		struct device_attribute *attr, const char *buf, size_t len)
745 {
746 	struct zram *zram = dev_to_zram(dev);
747 	unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
748 	struct zram_pp_ctl *ctl = NULL;
749 	struct zram_pp_slot *pps;
750 	unsigned long index = 0;
751 	struct bio bio;
752 	struct bio_vec bio_vec;
753 	struct page *page;
754 	ssize_t ret = len;
755 	int mode, err;
756 	unsigned long blk_idx = 0;
757 
758 	if (sysfs_streq(buf, "idle"))
759 		mode = IDLE_WRITEBACK;
760 	else if (sysfs_streq(buf, "huge"))
761 		mode = HUGE_WRITEBACK;
762 	else if (sysfs_streq(buf, "huge_idle"))
763 		mode = IDLE_WRITEBACK | HUGE_WRITEBACK;
764 	else if (sysfs_streq(buf, "incompressible"))
765 		mode = INCOMPRESSIBLE_WRITEBACK;
766 	else {
767 		if (strncmp(buf, PAGE_WB_SIG, sizeof(PAGE_WB_SIG) - 1))
768 			return -EINVAL;
769 
770 		if (kstrtol(buf + sizeof(PAGE_WB_SIG) - 1, 10, &index) ||
771 				index >= nr_pages)
772 			return -EINVAL;
773 
774 		nr_pages = 1;
775 		mode = PAGE_WRITEBACK;
776 	}
777 
778 	down_read(&zram->init_lock);
779 	if (!init_done(zram)) {
780 		ret = -EINVAL;
781 		goto release_init_lock;
782 	}
783 
784 	/* Do not permit concurrent post-processing actions. */
785 	if (atomic_xchg(&zram->pp_in_progress, 1)) {
786 		up_read(&zram->init_lock);
787 		return -EAGAIN;
788 	}
789 
790 	if (!zram->backing_dev) {
791 		ret = -ENODEV;
792 		goto release_init_lock;
793 	}
794 
795 	page = alloc_page(GFP_KERNEL);
796 	if (!page) {
797 		ret = -ENOMEM;
798 		goto release_init_lock;
799 	}
800 
801 	ctl = init_pp_ctl();
802 	if (!ctl) {
803 		ret = -ENOMEM;
804 		goto release_init_lock;
805 	}
806 
807 	scan_slots_for_writeback(zram, mode, nr_pages, index, ctl);
808 
809 	while ((pps = select_pp_slot(ctl))) {
810 		spin_lock(&zram->wb_limit_lock);
811 		if (zram->wb_limit_enable && !zram->bd_wb_limit) {
812 			spin_unlock(&zram->wb_limit_lock);
813 			ret = -EIO;
814 			break;
815 		}
816 		spin_unlock(&zram->wb_limit_lock);
817 
818 		if (!blk_idx) {
819 			blk_idx = alloc_block_bdev(zram);
820 			if (!blk_idx) {
821 				ret = -ENOSPC;
822 				break;
823 			}
824 		}
825 
826 		index = pps->index;
827 		zram_slot_lock(zram, index);
828 		/*
829 		 * scan_slots() sets ZRAM_PP_SLOT and relases slot lock, so
830 		 * slots can change in the meantime. If slots are accessed or
831 		 * freed they lose ZRAM_PP_SLOT flag and hence we don't
832 		 * post-process them.
833 		 */
834 		if (!zram_test_flag(zram, index, ZRAM_PP_SLOT))
835 			goto next;
836 		zram_slot_unlock(zram, index);
837 
838 		if (zram_read_page(zram, page, index, NULL)) {
839 			release_pp_slot(zram, pps);
840 			continue;
841 		}
842 
843 		bio_init(&bio, zram->bdev, &bio_vec, 1,
844 			 REQ_OP_WRITE | REQ_SYNC);
845 		bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
846 		__bio_add_page(&bio, page, PAGE_SIZE, 0);
847 
848 		/*
849 		 * XXX: A single page IO would be inefficient for write
850 		 * but it would be not bad as starter.
851 		 */
852 		err = submit_bio_wait(&bio);
853 		if (err) {
854 			release_pp_slot(zram, pps);
855 			/*
856 			 * BIO errors are not fatal, we continue and simply
857 			 * attempt to writeback the remaining objects (pages).
858 			 * At the same time we need to signal user-space that
859 			 * some writes (at least one, but also could be all of
860 			 * them) were not successful and we do so by returning
861 			 * the most recent BIO error.
862 			 */
863 			ret = err;
864 			continue;
865 		}
866 
867 		atomic64_inc(&zram->stats.bd_writes);
868 		zram_slot_lock(zram, index);
869 		/*
870 		 * Same as above, we release slot lock during writeback so
871 		 * slot can change under us: slot_free() or slot_free() and
872 		 * reallocation (zram_write_page()). In both cases slot loses
873 		 * ZRAM_PP_SLOT flag. No concurrent post-processing can set
874 		 * ZRAM_PP_SLOT on such slots until current post-processing
875 		 * finishes.
876 		 */
877 		if (!zram_test_flag(zram, index, ZRAM_PP_SLOT))
878 			goto next;
879 
880 		zram_free_page(zram, index);
881 		zram_set_flag(zram, index, ZRAM_WB);
882 		zram_set_element(zram, index, blk_idx);
883 		blk_idx = 0;
884 		atomic64_inc(&zram->stats.pages_stored);
885 		spin_lock(&zram->wb_limit_lock);
886 		if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
887 			zram->bd_wb_limit -=  1UL << (PAGE_SHIFT - 12);
888 		spin_unlock(&zram->wb_limit_lock);
889 next:
890 		zram_slot_unlock(zram, index);
891 		release_pp_slot(zram, pps);
892 	}
893 
894 	if (blk_idx)
895 		free_block_bdev(zram, blk_idx);
896 	__free_page(page);
897 release_init_lock:
898 	release_pp_ctl(zram, ctl);
899 	atomic_set(&zram->pp_in_progress, 0);
900 	up_read(&zram->init_lock);
901 
902 	return ret;
903 }
904 
905 struct zram_work {
906 	struct work_struct work;
907 	struct zram *zram;
908 	unsigned long entry;
909 	struct page *page;
910 	int error;
911 };
912 
zram_sync_read(struct work_struct * work)913 static void zram_sync_read(struct work_struct *work)
914 {
915 	struct zram_work *zw = container_of(work, struct zram_work, work);
916 	struct bio_vec bv;
917 	struct bio bio;
918 
919 	bio_init(&bio, zw->zram->bdev, &bv, 1, REQ_OP_READ);
920 	bio.bi_iter.bi_sector = zw->entry * (PAGE_SIZE >> 9);
921 	__bio_add_page(&bio, zw->page, PAGE_SIZE, 0);
922 	zw->error = submit_bio_wait(&bio);
923 }
924 
925 /*
926  * Block layer want one ->submit_bio to be active at a time, so if we use
927  * chained IO with parent IO in same context, it's a deadlock. To avoid that,
928  * use a worker thread context.
929  */
read_from_bdev_sync(struct zram * zram,struct page * page,unsigned long entry)930 static int read_from_bdev_sync(struct zram *zram, struct page *page,
931 				unsigned long entry)
932 {
933 	struct zram_work work;
934 
935 	work.page = page;
936 	work.zram = zram;
937 	work.entry = entry;
938 
939 	INIT_WORK_ONSTACK(&work.work, zram_sync_read);
940 	queue_work(system_unbound_wq, &work.work);
941 	flush_work(&work.work);
942 	destroy_work_on_stack(&work.work);
943 
944 	return work.error;
945 }
946 
read_from_bdev(struct zram * zram,struct page * page,unsigned long entry,struct bio * parent)947 static int read_from_bdev(struct zram *zram, struct page *page,
948 			unsigned long entry, struct bio *parent)
949 {
950 	atomic64_inc(&zram->stats.bd_reads);
951 	if (!parent) {
952 		if (WARN_ON_ONCE(!IS_ENABLED(ZRAM_PARTIAL_IO)))
953 			return -EIO;
954 		return read_from_bdev_sync(zram, page, entry);
955 	}
956 	read_from_bdev_async(zram, page, entry, parent);
957 	return 0;
958 }
959 #else
reset_bdev(struct zram * zram)960 static inline void reset_bdev(struct zram *zram) {};
read_from_bdev(struct zram * zram,struct page * page,unsigned long entry,struct bio * parent)961 static int read_from_bdev(struct zram *zram, struct page *page,
962 			unsigned long entry, struct bio *parent)
963 {
964 	return -EIO;
965 }
966 
free_block_bdev(struct zram * zram,unsigned long blk_idx)967 static void free_block_bdev(struct zram *zram, unsigned long blk_idx) {};
968 #endif
969 
970 #ifdef CONFIG_ZRAM_MEMORY_TRACKING
971 
972 static struct dentry *zram_debugfs_root;
973 
zram_debugfs_create(void)974 static void zram_debugfs_create(void)
975 {
976 	zram_debugfs_root = debugfs_create_dir("zram", NULL);
977 }
978 
zram_debugfs_destroy(void)979 static void zram_debugfs_destroy(void)
980 {
981 	debugfs_remove_recursive(zram_debugfs_root);
982 }
983 
read_block_state(struct file * file,char __user * buf,size_t count,loff_t * ppos)984 static ssize_t read_block_state(struct file *file, char __user *buf,
985 				size_t count, loff_t *ppos)
986 {
987 	char *kbuf;
988 	ssize_t index, written = 0;
989 	struct zram *zram = file->private_data;
990 	unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
991 	struct timespec64 ts;
992 
993 	kbuf = kvmalloc(count, GFP_KERNEL);
994 	if (!kbuf)
995 		return -ENOMEM;
996 
997 	down_read(&zram->init_lock);
998 	if (!init_done(zram)) {
999 		up_read(&zram->init_lock);
1000 		kvfree(kbuf);
1001 		return -EINVAL;
1002 	}
1003 
1004 	for (index = *ppos; index < nr_pages; index++) {
1005 		int copied;
1006 
1007 		zram_slot_lock(zram, index);
1008 		if (!zram_allocated(zram, index))
1009 			goto next;
1010 
1011 		ts = ktime_to_timespec64(zram->table[index].ac_time);
1012 		copied = snprintf(kbuf + written, count,
1013 			"%12zd %12lld.%06lu %c%c%c%c%c%c\n",
1014 			index, (s64)ts.tv_sec,
1015 			ts.tv_nsec / NSEC_PER_USEC,
1016 			zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.',
1017 			zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.',
1018 			zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.',
1019 			zram_test_flag(zram, index, ZRAM_IDLE) ? 'i' : '.',
1020 			zram_get_priority(zram, index) ? 'r' : '.',
1021 			zram_test_flag(zram, index,
1022 				       ZRAM_INCOMPRESSIBLE) ? 'n' : '.');
1023 
1024 		if (count <= copied) {
1025 			zram_slot_unlock(zram, index);
1026 			break;
1027 		}
1028 		written += copied;
1029 		count -= copied;
1030 next:
1031 		zram_slot_unlock(zram, index);
1032 		*ppos += 1;
1033 	}
1034 
1035 	up_read(&zram->init_lock);
1036 	if (copy_to_user(buf, kbuf, written))
1037 		written = -EFAULT;
1038 	kvfree(kbuf);
1039 
1040 	return written;
1041 }
1042 
1043 static const struct file_operations proc_zram_block_state_op = {
1044 	.open = simple_open,
1045 	.read = read_block_state,
1046 	.llseek = default_llseek,
1047 };
1048 
zram_debugfs_register(struct zram * zram)1049 static void zram_debugfs_register(struct zram *zram)
1050 {
1051 	if (!zram_debugfs_root)
1052 		return;
1053 
1054 	zram->debugfs_dir = debugfs_create_dir(zram->disk->disk_name,
1055 						zram_debugfs_root);
1056 	debugfs_create_file("block_state", 0400, zram->debugfs_dir,
1057 				zram, &proc_zram_block_state_op);
1058 }
1059 
zram_debugfs_unregister(struct zram * zram)1060 static void zram_debugfs_unregister(struct zram *zram)
1061 {
1062 	debugfs_remove_recursive(zram->debugfs_dir);
1063 }
1064 #else
zram_debugfs_create(void)1065 static void zram_debugfs_create(void) {};
zram_debugfs_destroy(void)1066 static void zram_debugfs_destroy(void) {};
zram_debugfs_register(struct zram * zram)1067 static void zram_debugfs_register(struct zram *zram) {};
zram_debugfs_unregister(struct zram * zram)1068 static void zram_debugfs_unregister(struct zram *zram) {};
1069 #endif
1070 
1071 /*
1072  * We switched to per-cpu streams and this attr is not needed anymore.
1073  * However, we will keep it around for some time, because:
1074  * a) we may revert per-cpu streams in the future
1075  * b) it's visible to user space and we need to follow our 2 years
1076  *    retirement rule; but we already have a number of 'soon to be
1077  *    altered' attrs, so max_comp_streams need to wait for the next
1078  *    layoff cycle.
1079  */
max_comp_streams_show(struct device * dev,struct device_attribute * attr,char * buf)1080 static ssize_t max_comp_streams_show(struct device *dev,
1081 		struct device_attribute *attr, char *buf)
1082 {
1083 	return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
1084 }
1085 
max_comp_streams_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1086 static ssize_t max_comp_streams_store(struct device *dev,
1087 		struct device_attribute *attr, const char *buf, size_t len)
1088 {
1089 	return len;
1090 }
1091 
comp_algorithm_set(struct zram * zram,u32 prio,const char * alg)1092 static void comp_algorithm_set(struct zram *zram, u32 prio, const char *alg)
1093 {
1094 	/* Do not free statically defined compression algorithms */
1095 	if (zram->comp_algs[prio] != default_compressor)
1096 		kfree(zram->comp_algs[prio]);
1097 
1098 	zram->comp_algs[prio] = alg;
1099 }
1100 
__comp_algorithm_show(struct zram * zram,u32 prio,char * buf)1101 static ssize_t __comp_algorithm_show(struct zram *zram, u32 prio, char *buf)
1102 {
1103 	ssize_t sz;
1104 
1105 	down_read(&zram->init_lock);
1106 	sz = zcomp_available_show(zram->comp_algs[prio], buf);
1107 	up_read(&zram->init_lock);
1108 
1109 	return sz;
1110 }
1111 
__comp_algorithm_store(struct zram * zram,u32 prio,const char * buf)1112 static int __comp_algorithm_store(struct zram *zram, u32 prio, const char *buf)
1113 {
1114 	char *compressor;
1115 	size_t sz;
1116 
1117 	sz = strlen(buf);
1118 	if (sz >= CRYPTO_MAX_ALG_NAME)
1119 		return -E2BIG;
1120 
1121 	compressor = kstrdup(buf, GFP_KERNEL);
1122 	if (!compressor)
1123 		return -ENOMEM;
1124 
1125 	/* ignore trailing newline */
1126 	if (sz > 0 && compressor[sz - 1] == '\n')
1127 		compressor[sz - 1] = 0x00;
1128 
1129 	if (!zcomp_available_algorithm(compressor)) {
1130 		kfree(compressor);
1131 		return -EINVAL;
1132 	}
1133 
1134 	down_write(&zram->init_lock);
1135 	if (init_done(zram)) {
1136 		up_write(&zram->init_lock);
1137 		kfree(compressor);
1138 		pr_info("Can't change algorithm for initialized device\n");
1139 		return -EBUSY;
1140 	}
1141 
1142 	comp_algorithm_set(zram, prio, compressor);
1143 	up_write(&zram->init_lock);
1144 	return 0;
1145 }
1146 
comp_params_reset(struct zram * zram,u32 prio)1147 static void comp_params_reset(struct zram *zram, u32 prio)
1148 {
1149 	struct zcomp_params *params = &zram->params[prio];
1150 
1151 	vfree(params->dict);
1152 	params->level = ZCOMP_PARAM_NO_LEVEL;
1153 	params->dict_sz = 0;
1154 	params->dict = NULL;
1155 }
1156 
comp_params_store(struct zram * zram,u32 prio,s32 level,const char * dict_path)1157 static int comp_params_store(struct zram *zram, u32 prio, s32 level,
1158 			     const char *dict_path)
1159 {
1160 	ssize_t sz = 0;
1161 
1162 	comp_params_reset(zram, prio);
1163 
1164 	if (dict_path) {
1165 		sz = kernel_read_file_from_path(dict_path, 0,
1166 						&zram->params[prio].dict,
1167 						INT_MAX,
1168 						NULL,
1169 						READING_POLICY);
1170 		if (sz < 0)
1171 			return -EINVAL;
1172 	}
1173 
1174 	zram->params[prio].dict_sz = sz;
1175 	zram->params[prio].level = level;
1176 	return 0;
1177 }
1178 
algorithm_params_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1179 static ssize_t algorithm_params_store(struct device *dev,
1180 				      struct device_attribute *attr,
1181 				      const char *buf,
1182 				      size_t len)
1183 {
1184 	s32 prio = ZRAM_PRIMARY_COMP, level = ZCOMP_PARAM_NO_LEVEL;
1185 	char *args, *param, *val, *algo = NULL, *dict_path = NULL;
1186 	struct zram *zram = dev_to_zram(dev);
1187 	int ret;
1188 
1189 	args = skip_spaces(buf);
1190 	while (*args) {
1191 		args = next_arg(args, &param, &val);
1192 
1193 		if (!val || !*val)
1194 			return -EINVAL;
1195 
1196 		if (!strcmp(param, "priority")) {
1197 			ret = kstrtoint(val, 10, &prio);
1198 			if (ret)
1199 				return ret;
1200 			continue;
1201 		}
1202 
1203 		if (!strcmp(param, "level")) {
1204 			ret = kstrtoint(val, 10, &level);
1205 			if (ret)
1206 				return ret;
1207 			continue;
1208 		}
1209 
1210 		if (!strcmp(param, "algo")) {
1211 			algo = val;
1212 			continue;
1213 		}
1214 
1215 		if (!strcmp(param, "dict")) {
1216 			dict_path = val;
1217 			continue;
1218 		}
1219 	}
1220 
1221 	/* Lookup priority by algorithm name */
1222 	if (algo) {
1223 		s32 p;
1224 
1225 		prio = -EINVAL;
1226 		for (p = ZRAM_PRIMARY_COMP; p < ZRAM_MAX_COMPS; p++) {
1227 			if (!zram->comp_algs[p])
1228 				continue;
1229 
1230 			if (!strcmp(zram->comp_algs[p], algo)) {
1231 				prio = p;
1232 				break;
1233 			}
1234 		}
1235 	}
1236 
1237 	if (prio < ZRAM_PRIMARY_COMP || prio >= ZRAM_MAX_COMPS)
1238 		return -EINVAL;
1239 
1240 	ret = comp_params_store(zram, prio, level, dict_path);
1241 	return ret ? ret : len;
1242 }
1243 
comp_algorithm_show(struct device * dev,struct device_attribute * attr,char * buf)1244 static ssize_t comp_algorithm_show(struct device *dev,
1245 				   struct device_attribute *attr,
1246 				   char *buf)
1247 {
1248 	struct zram *zram = dev_to_zram(dev);
1249 
1250 	return __comp_algorithm_show(zram, ZRAM_PRIMARY_COMP, buf);
1251 }
1252 
comp_algorithm_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1253 static ssize_t comp_algorithm_store(struct device *dev,
1254 				    struct device_attribute *attr,
1255 				    const char *buf,
1256 				    size_t len)
1257 {
1258 	struct zram *zram = dev_to_zram(dev);
1259 	int ret;
1260 
1261 	ret = __comp_algorithm_store(zram, ZRAM_PRIMARY_COMP, buf);
1262 	return ret ? ret : len;
1263 }
1264 
1265 #ifdef CONFIG_ZRAM_MULTI_COMP
recomp_algorithm_show(struct device * dev,struct device_attribute * attr,char * buf)1266 static ssize_t recomp_algorithm_show(struct device *dev,
1267 				     struct device_attribute *attr,
1268 				     char *buf)
1269 {
1270 	struct zram *zram = dev_to_zram(dev);
1271 	ssize_t sz = 0;
1272 	u32 prio;
1273 
1274 	for (prio = ZRAM_SECONDARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
1275 		if (!zram->comp_algs[prio])
1276 			continue;
1277 
1278 		sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, "#%d: ", prio);
1279 		sz += __comp_algorithm_show(zram, prio, buf + sz);
1280 	}
1281 
1282 	return sz;
1283 }
1284 
recomp_algorithm_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1285 static ssize_t recomp_algorithm_store(struct device *dev,
1286 				      struct device_attribute *attr,
1287 				      const char *buf,
1288 				      size_t len)
1289 {
1290 	struct zram *zram = dev_to_zram(dev);
1291 	int prio = ZRAM_SECONDARY_COMP;
1292 	char *args, *param, *val;
1293 	char *alg = NULL;
1294 	int ret;
1295 
1296 	args = skip_spaces(buf);
1297 	while (*args) {
1298 		args = next_arg(args, &param, &val);
1299 
1300 		if (!val || !*val)
1301 			return -EINVAL;
1302 
1303 		if (!strcmp(param, "algo")) {
1304 			alg = val;
1305 			continue;
1306 		}
1307 
1308 		if (!strcmp(param, "priority")) {
1309 			ret = kstrtoint(val, 10, &prio);
1310 			if (ret)
1311 				return ret;
1312 			continue;
1313 		}
1314 	}
1315 
1316 	if (!alg)
1317 		return -EINVAL;
1318 
1319 	if (prio < ZRAM_SECONDARY_COMP || prio >= ZRAM_MAX_COMPS)
1320 		return -EINVAL;
1321 
1322 	ret = __comp_algorithm_store(zram, prio, alg);
1323 	return ret ? ret : len;
1324 }
1325 #endif
1326 
compact_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1327 static ssize_t compact_store(struct device *dev,
1328 		struct device_attribute *attr, const char *buf, size_t len)
1329 {
1330 	struct zram *zram = dev_to_zram(dev);
1331 
1332 	down_read(&zram->init_lock);
1333 	if (!init_done(zram)) {
1334 		up_read(&zram->init_lock);
1335 		return -EINVAL;
1336 	}
1337 
1338 	zs_compact(zram->mem_pool);
1339 	up_read(&zram->init_lock);
1340 
1341 	return len;
1342 }
1343 
io_stat_show(struct device * dev,struct device_attribute * attr,char * buf)1344 static ssize_t io_stat_show(struct device *dev,
1345 		struct device_attribute *attr, char *buf)
1346 {
1347 	struct zram *zram = dev_to_zram(dev);
1348 	ssize_t ret;
1349 
1350 	down_read(&zram->init_lock);
1351 	ret = scnprintf(buf, PAGE_SIZE,
1352 			"%8llu %8llu 0 %8llu\n",
1353 			(u64)atomic64_read(&zram->stats.failed_reads),
1354 			(u64)atomic64_read(&zram->stats.failed_writes),
1355 			(u64)atomic64_read(&zram->stats.notify_free));
1356 	up_read(&zram->init_lock);
1357 
1358 	return ret;
1359 }
1360 
mm_stat_show(struct device * dev,struct device_attribute * attr,char * buf)1361 static ssize_t mm_stat_show(struct device *dev,
1362 		struct device_attribute *attr, char *buf)
1363 {
1364 	struct zram *zram = dev_to_zram(dev);
1365 	struct zs_pool_stats pool_stats;
1366 	u64 orig_size, mem_used = 0;
1367 	long max_used;
1368 	ssize_t ret;
1369 
1370 	memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
1371 
1372 	down_read(&zram->init_lock);
1373 	if (init_done(zram)) {
1374 		mem_used = zs_get_total_pages(zram->mem_pool);
1375 		zs_pool_stats(zram->mem_pool, &pool_stats);
1376 	}
1377 
1378 	orig_size = atomic64_read(&zram->stats.pages_stored);
1379 	max_used = atomic_long_read(&zram->stats.max_used_pages);
1380 
1381 	ret = scnprintf(buf, PAGE_SIZE,
1382 			"%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu %8llu\n",
1383 			orig_size << PAGE_SHIFT,
1384 			(u64)atomic64_read(&zram->stats.compr_data_size),
1385 			mem_used << PAGE_SHIFT,
1386 			zram->limit_pages << PAGE_SHIFT,
1387 			max_used << PAGE_SHIFT,
1388 			(u64)atomic64_read(&zram->stats.same_pages),
1389 			atomic_long_read(&pool_stats.pages_compacted),
1390 			(u64)atomic64_read(&zram->stats.huge_pages),
1391 			(u64)atomic64_read(&zram->stats.huge_pages_since));
1392 	up_read(&zram->init_lock);
1393 
1394 	return ret;
1395 }
1396 
1397 #ifdef CONFIG_ZRAM_WRITEBACK
1398 #define FOUR_K(x) ((x) * (1 << (PAGE_SHIFT - 12)))
bd_stat_show(struct device * dev,struct device_attribute * attr,char * buf)1399 static ssize_t bd_stat_show(struct device *dev,
1400 		struct device_attribute *attr, char *buf)
1401 {
1402 	struct zram *zram = dev_to_zram(dev);
1403 	ssize_t ret;
1404 
1405 	down_read(&zram->init_lock);
1406 	ret = scnprintf(buf, PAGE_SIZE,
1407 		"%8llu %8llu %8llu\n",
1408 			FOUR_K((u64)atomic64_read(&zram->stats.bd_count)),
1409 			FOUR_K((u64)atomic64_read(&zram->stats.bd_reads)),
1410 			FOUR_K((u64)atomic64_read(&zram->stats.bd_writes)));
1411 	up_read(&zram->init_lock);
1412 
1413 	return ret;
1414 }
1415 #endif
1416 
debug_stat_show(struct device * dev,struct device_attribute * attr,char * buf)1417 static ssize_t debug_stat_show(struct device *dev,
1418 		struct device_attribute *attr, char *buf)
1419 {
1420 	int version = 1;
1421 	struct zram *zram = dev_to_zram(dev);
1422 	ssize_t ret;
1423 
1424 	down_read(&zram->init_lock);
1425 	ret = scnprintf(buf, PAGE_SIZE,
1426 			"version: %d\n%8llu %8llu\n",
1427 			version,
1428 			(u64)atomic64_read(&zram->stats.writestall),
1429 			(u64)atomic64_read(&zram->stats.miss_free));
1430 	up_read(&zram->init_lock);
1431 
1432 	return ret;
1433 }
1434 
1435 static DEVICE_ATTR_RO(io_stat);
1436 static DEVICE_ATTR_RO(mm_stat);
1437 #ifdef CONFIG_ZRAM_WRITEBACK
1438 static DEVICE_ATTR_RO(bd_stat);
1439 #endif
1440 static DEVICE_ATTR_RO(debug_stat);
1441 
zram_meta_free(struct zram * zram,u64 disksize)1442 static void zram_meta_free(struct zram *zram, u64 disksize)
1443 {
1444 	size_t num_pages = disksize >> PAGE_SHIFT;
1445 	size_t index;
1446 
1447 	if (!zram->table)
1448 		return;
1449 
1450 	/* Free all pages that are still in this zram device */
1451 	for (index = 0; index < num_pages; index++)
1452 		zram_free_page(zram, index);
1453 
1454 	zs_destroy_pool(zram->mem_pool);
1455 	vfree(zram->table);
1456 	zram->table = NULL;
1457 }
1458 
zram_meta_alloc(struct zram * zram,u64 disksize)1459 static bool zram_meta_alloc(struct zram *zram, u64 disksize)
1460 {
1461 	size_t num_pages, index;
1462 
1463 	num_pages = disksize >> PAGE_SHIFT;
1464 	zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table)));
1465 	if (!zram->table)
1466 		return false;
1467 
1468 	zram->mem_pool = zs_create_pool(zram->disk->disk_name);
1469 	if (!zram->mem_pool) {
1470 		vfree(zram->table);
1471 		return false;
1472 	}
1473 
1474 	if (!huge_class_size)
1475 		huge_class_size = zs_huge_class_size(zram->mem_pool);
1476 
1477 	for (index = 0; index < num_pages; index++)
1478 		spin_lock_init(&zram->table[index].lock);
1479 	return true;
1480 }
1481 
1482 /*
1483  * To protect concurrent access to the same index entry,
1484  * caller should hold this table index entry's bit_spinlock to
1485  * indicate this index entry is accessing.
1486  */
zram_free_page(struct zram * zram,size_t index)1487 static void zram_free_page(struct zram *zram, size_t index)
1488 {
1489 	unsigned long handle;
1490 
1491 #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
1492 	zram->table[index].ac_time = 0;
1493 #endif
1494 
1495 	zram_clear_flag(zram, index, ZRAM_IDLE);
1496 	zram_clear_flag(zram, index, ZRAM_INCOMPRESSIBLE);
1497 	zram_clear_flag(zram, index, ZRAM_PP_SLOT);
1498 	zram_set_priority(zram, index, 0);
1499 
1500 	if (zram_test_flag(zram, index, ZRAM_HUGE)) {
1501 		zram_clear_flag(zram, index, ZRAM_HUGE);
1502 		atomic64_dec(&zram->stats.huge_pages);
1503 	}
1504 
1505 	if (zram_test_flag(zram, index, ZRAM_WB)) {
1506 		zram_clear_flag(zram, index, ZRAM_WB);
1507 		free_block_bdev(zram, zram_get_element(zram, index));
1508 		goto out;
1509 	}
1510 
1511 	/*
1512 	 * No memory is allocated for same element filled pages.
1513 	 * Simply clear same page flag.
1514 	 */
1515 	if (zram_test_flag(zram, index, ZRAM_SAME)) {
1516 		zram_clear_flag(zram, index, ZRAM_SAME);
1517 		atomic64_dec(&zram->stats.same_pages);
1518 		goto out;
1519 	}
1520 
1521 	handle = zram_get_handle(zram, index);
1522 	if (!handle)
1523 		return;
1524 
1525 	zs_free(zram->mem_pool, handle);
1526 
1527 	atomic64_sub(zram_get_obj_size(zram, index),
1528 		     &zram->stats.compr_data_size);
1529 out:
1530 	atomic64_dec(&zram->stats.pages_stored);
1531 	zram_set_handle(zram, index, 0);
1532 	zram_set_obj_size(zram, index, 0);
1533 }
1534 
1535 /*
1536  * Reads (decompresses if needed) a page from zspool (zsmalloc).
1537  * Corresponding ZRAM slot should be locked.
1538  */
zram_read_from_zspool(struct zram * zram,struct page * page,u32 index)1539 static int zram_read_from_zspool(struct zram *zram, struct page *page,
1540 				 u32 index)
1541 {
1542 	struct zcomp_strm *zstrm;
1543 	unsigned long handle;
1544 	unsigned int size;
1545 	void *src, *dst;
1546 	u32 prio;
1547 	int ret;
1548 
1549 	handle = zram_get_handle(zram, index);
1550 	if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
1551 		unsigned long value;
1552 		void *mem;
1553 
1554 		value = handle ? zram_get_element(zram, index) : 0;
1555 		mem = kmap_local_page(page);
1556 		zram_fill_page(mem, PAGE_SIZE, value);
1557 		kunmap_local(mem);
1558 		return 0;
1559 	}
1560 
1561 	size = zram_get_obj_size(zram, index);
1562 
1563 	if (size != PAGE_SIZE) {
1564 		prio = zram_get_priority(zram, index);
1565 		zstrm = zcomp_stream_get(zram->comps[prio]);
1566 	}
1567 
1568 	src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
1569 	if (size == PAGE_SIZE) {
1570 		dst = kmap_local_page(page);
1571 		copy_page(dst, src);
1572 		kunmap_local(dst);
1573 		ret = 0;
1574 	} else {
1575 		dst = kmap_local_page(page);
1576 		ret = zcomp_decompress(zram->comps[prio], zstrm,
1577 				       src, size, dst);
1578 		kunmap_local(dst);
1579 		zcomp_stream_put(zram->comps[prio]);
1580 	}
1581 	zs_unmap_object(zram->mem_pool, handle);
1582 	return ret;
1583 }
1584 
zram_read_page(struct zram * zram,struct page * page,u32 index,struct bio * parent)1585 static int zram_read_page(struct zram *zram, struct page *page, u32 index,
1586 			  struct bio *parent)
1587 {
1588 	int ret;
1589 
1590 	zram_slot_lock(zram, index);
1591 	if (!zram_test_flag(zram, index, ZRAM_WB)) {
1592 		/* Slot should be locked through out the function call */
1593 		ret = zram_read_from_zspool(zram, page, index);
1594 		zram_slot_unlock(zram, index);
1595 	} else {
1596 		/*
1597 		 * The slot should be unlocked before reading from the backing
1598 		 * device.
1599 		 */
1600 		zram_slot_unlock(zram, index);
1601 
1602 		ret = read_from_bdev(zram, page, zram_get_element(zram, index),
1603 				     parent);
1604 	}
1605 
1606 	/* Should NEVER happen. Return bio error if it does. */
1607 	if (WARN_ON(ret < 0))
1608 		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
1609 
1610 	return ret;
1611 }
1612 
1613 /*
1614  * Use a temporary buffer to decompress the page, as the decompressor
1615  * always expects a full page for the output.
1616  */
zram_bvec_read_partial(struct zram * zram,struct bio_vec * bvec,u32 index,int offset)1617 static int zram_bvec_read_partial(struct zram *zram, struct bio_vec *bvec,
1618 				  u32 index, int offset)
1619 {
1620 	struct page *page = alloc_page(GFP_NOIO);
1621 	int ret;
1622 
1623 	if (!page)
1624 		return -ENOMEM;
1625 	ret = zram_read_page(zram, page, index, NULL);
1626 	if (likely(!ret))
1627 		memcpy_to_bvec(bvec, page_address(page) + offset);
1628 	__free_page(page);
1629 	return ret;
1630 }
1631 
zram_bvec_read(struct zram * zram,struct bio_vec * bvec,u32 index,int offset,struct bio * bio)1632 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
1633 			  u32 index, int offset, struct bio *bio)
1634 {
1635 	if (is_partial_io(bvec))
1636 		return zram_bvec_read_partial(zram, bvec, index, offset);
1637 	return zram_read_page(zram, bvec->bv_page, index, bio);
1638 }
1639 
zram_write_page(struct zram * zram,struct page * page,u32 index)1640 static int zram_write_page(struct zram *zram, struct page *page, u32 index)
1641 {
1642 	int ret = 0;
1643 	unsigned long alloced_pages;
1644 	unsigned long handle = -ENOMEM;
1645 	unsigned int comp_len = 0;
1646 	void *src, *dst, *mem;
1647 	struct zcomp_strm *zstrm;
1648 	unsigned long element = 0;
1649 	enum zram_pageflags flags = 0;
1650 
1651 	mem = kmap_local_page(page);
1652 	if (page_same_filled(mem, &element)) {
1653 		kunmap_local(mem);
1654 		/* Free memory associated with this sector now. */
1655 		flags = ZRAM_SAME;
1656 		atomic64_inc(&zram->stats.same_pages);
1657 		goto out;
1658 	}
1659 	kunmap_local(mem);
1660 
1661 compress_again:
1662 	zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
1663 	src = kmap_local_page(page);
1664 	ret = zcomp_compress(zram->comps[ZRAM_PRIMARY_COMP], zstrm,
1665 			     src, &comp_len);
1666 	kunmap_local(src);
1667 
1668 	if (unlikely(ret)) {
1669 		zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
1670 		pr_err("Compression failed! err=%d\n", ret);
1671 		zs_free(zram->mem_pool, handle);
1672 		return ret;
1673 	}
1674 
1675 	if (comp_len >= huge_class_size)
1676 		comp_len = PAGE_SIZE;
1677 	/*
1678 	 * handle allocation has 2 paths:
1679 	 * a) fast path is executed with preemption disabled (for
1680 	 *  per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
1681 	 *  since we can't sleep;
1682 	 * b) slow path enables preemption and attempts to allocate
1683 	 *  the page with __GFP_DIRECT_RECLAIM bit set. we have to
1684 	 *  put per-cpu compression stream and, thus, to re-do
1685 	 *  the compression once handle is allocated.
1686 	 *
1687 	 * if we have a 'non-null' handle here then we are coming
1688 	 * from the slow path and handle has already been allocated.
1689 	 */
1690 	if (IS_ERR_VALUE(handle))
1691 		handle = zs_malloc(zram->mem_pool, comp_len,
1692 				__GFP_KSWAPD_RECLAIM |
1693 				__GFP_NOWARN |
1694 				__GFP_HIGHMEM |
1695 				__GFP_MOVABLE);
1696 	if (IS_ERR_VALUE(handle)) {
1697 		zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
1698 		atomic64_inc(&zram->stats.writestall);
1699 		handle = zs_malloc(zram->mem_pool, comp_len,
1700 				GFP_NOIO | __GFP_HIGHMEM |
1701 				__GFP_MOVABLE);
1702 		if (IS_ERR_VALUE(handle))
1703 			return PTR_ERR((void *)handle);
1704 
1705 		if (comp_len != PAGE_SIZE)
1706 			goto compress_again;
1707 		/*
1708 		 * If the page is not compressible, you need to acquire the
1709 		 * lock and execute the code below. The zcomp_stream_get()
1710 		 * call is needed to disable the cpu hotplug and grab the
1711 		 * zstrm buffer back. It is necessary that the dereferencing
1712 		 * of the zstrm variable below occurs correctly.
1713 		 */
1714 		zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
1715 	}
1716 
1717 	alloced_pages = zs_get_total_pages(zram->mem_pool);
1718 	update_used_max(zram, alloced_pages);
1719 
1720 	if (zram->limit_pages && alloced_pages > zram->limit_pages) {
1721 		zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
1722 		zs_free(zram->mem_pool, handle);
1723 		return -ENOMEM;
1724 	}
1725 
1726 	dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
1727 
1728 	src = zstrm->buffer;
1729 	if (comp_len == PAGE_SIZE)
1730 		src = kmap_local_page(page);
1731 	memcpy(dst, src, comp_len);
1732 	if (comp_len == PAGE_SIZE)
1733 		kunmap_local(src);
1734 
1735 	zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
1736 	zs_unmap_object(zram->mem_pool, handle);
1737 	atomic64_add(comp_len, &zram->stats.compr_data_size);
1738 out:
1739 	/*
1740 	 * Free memory associated with this sector
1741 	 * before overwriting unused sectors.
1742 	 */
1743 	zram_slot_lock(zram, index);
1744 	zram_free_page(zram, index);
1745 
1746 	if (comp_len == PAGE_SIZE) {
1747 		zram_set_flag(zram, index, ZRAM_HUGE);
1748 		atomic64_inc(&zram->stats.huge_pages);
1749 		atomic64_inc(&zram->stats.huge_pages_since);
1750 	}
1751 
1752 	if (flags) {
1753 		zram_set_flag(zram, index, flags);
1754 		zram_set_element(zram, index, element);
1755 	}  else {
1756 		zram_set_handle(zram, index, handle);
1757 		zram_set_obj_size(zram, index, comp_len);
1758 	}
1759 	zram_slot_unlock(zram, index);
1760 
1761 	/* Update stats */
1762 	atomic64_inc(&zram->stats.pages_stored);
1763 	return ret;
1764 }
1765 
1766 /*
1767  * This is a partial IO. Read the full page before writing the changes.
1768  */
zram_bvec_write_partial(struct zram * zram,struct bio_vec * bvec,u32 index,int offset,struct bio * bio)1769 static int zram_bvec_write_partial(struct zram *zram, struct bio_vec *bvec,
1770 				   u32 index, int offset, struct bio *bio)
1771 {
1772 	struct page *page = alloc_page(GFP_NOIO);
1773 	int ret;
1774 
1775 	if (!page)
1776 		return -ENOMEM;
1777 
1778 	ret = zram_read_page(zram, page, index, bio);
1779 	if (!ret) {
1780 		memcpy_from_bvec(page_address(page) + offset, bvec);
1781 		ret = zram_write_page(zram, page, index);
1782 	}
1783 	__free_page(page);
1784 	return ret;
1785 }
1786 
zram_bvec_write(struct zram * zram,struct bio_vec * bvec,u32 index,int offset,struct bio * bio)1787 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
1788 			   u32 index, int offset, struct bio *bio)
1789 {
1790 	if (is_partial_io(bvec))
1791 		return zram_bvec_write_partial(zram, bvec, index, offset, bio);
1792 	return zram_write_page(zram, bvec->bv_page, index);
1793 }
1794 
1795 #ifdef CONFIG_ZRAM_MULTI_COMP
1796 #define RECOMPRESS_IDLE		(1 << 0)
1797 #define RECOMPRESS_HUGE		(1 << 1)
1798 
scan_slots_for_recompress(struct zram * zram,u32 mode,struct zram_pp_ctl * ctl)1799 static int scan_slots_for_recompress(struct zram *zram, u32 mode,
1800 				     struct zram_pp_ctl *ctl)
1801 {
1802 	unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
1803 	struct zram_pp_slot *pps = NULL;
1804 	unsigned long index;
1805 
1806 	for (index = 0; index < nr_pages; index++) {
1807 		if (!pps)
1808 			pps = kmalloc(sizeof(*pps), GFP_KERNEL);
1809 		if (!pps)
1810 			return -ENOMEM;
1811 
1812 		INIT_LIST_HEAD(&pps->entry);
1813 
1814 		zram_slot_lock(zram, index);
1815 		if (!zram_allocated(zram, index))
1816 			goto next;
1817 
1818 		if (mode & RECOMPRESS_IDLE &&
1819 		    !zram_test_flag(zram, index, ZRAM_IDLE))
1820 			goto next;
1821 
1822 		if (mode & RECOMPRESS_HUGE &&
1823 		    !zram_test_flag(zram, index, ZRAM_HUGE))
1824 			goto next;
1825 
1826 		if (zram_test_flag(zram, index, ZRAM_WB) ||
1827 		    zram_test_flag(zram, index, ZRAM_SAME) ||
1828 		    zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
1829 			goto next;
1830 
1831 		pps->index = index;
1832 		place_pp_slot(zram, ctl, pps);
1833 		pps = NULL;
1834 next:
1835 		zram_slot_unlock(zram, index);
1836 	}
1837 
1838 	kfree(pps);
1839 	return 0;
1840 }
1841 
1842 /*
1843  * This function will decompress (unless it's ZRAM_HUGE) the page and then
1844  * attempt to compress it using provided compression algorithm priority
1845  * (which is potentially more effective).
1846  *
1847  * Corresponding ZRAM slot should be locked.
1848  */
recompress_slot(struct zram * zram,u32 index,struct page * page,u64 * num_recomp_pages,u32 threshold,u32 prio,u32 prio_max)1849 static int recompress_slot(struct zram *zram, u32 index, struct page *page,
1850 			   u64 *num_recomp_pages, u32 threshold, u32 prio,
1851 			   u32 prio_max)
1852 {
1853 	struct zcomp_strm *zstrm = NULL;
1854 	unsigned long handle_old;
1855 	unsigned long handle_new;
1856 	unsigned int comp_len_old;
1857 	unsigned int comp_len_new;
1858 	unsigned int class_index_old;
1859 	unsigned int class_index_new;
1860 	u32 num_recomps = 0;
1861 	void *src, *dst;
1862 	int ret;
1863 
1864 	handle_old = zram_get_handle(zram, index);
1865 	if (!handle_old)
1866 		return -EINVAL;
1867 
1868 	comp_len_old = zram_get_obj_size(zram, index);
1869 	/*
1870 	 * Do not recompress objects that are already "small enough".
1871 	 */
1872 	if (comp_len_old < threshold)
1873 		return 0;
1874 
1875 	ret = zram_read_from_zspool(zram, page, index);
1876 	if (ret)
1877 		return ret;
1878 
1879 	/*
1880 	 * We touched this entry so mark it as non-IDLE. This makes sure that
1881 	 * we don't preserve IDLE flag and don't incorrectly pick this entry
1882 	 * for different post-processing type (e.g. writeback).
1883 	 */
1884 	zram_clear_flag(zram, index, ZRAM_IDLE);
1885 
1886 	class_index_old = zs_lookup_class_index(zram->mem_pool, comp_len_old);
1887 	/*
1888 	 * Iterate the secondary comp algorithms list (in order of priority)
1889 	 * and try to recompress the page.
1890 	 */
1891 	for (; prio < prio_max; prio++) {
1892 		if (!zram->comps[prio])
1893 			continue;
1894 
1895 		/*
1896 		 * Skip if the object is already re-compressed with a higher
1897 		 * priority algorithm (or same algorithm).
1898 		 */
1899 		if (prio <= zram_get_priority(zram, index))
1900 			continue;
1901 
1902 		num_recomps++;
1903 		zstrm = zcomp_stream_get(zram->comps[prio]);
1904 		src = kmap_local_page(page);
1905 		ret = zcomp_compress(zram->comps[prio], zstrm,
1906 				     src, &comp_len_new);
1907 		kunmap_local(src);
1908 
1909 		if (ret) {
1910 			zcomp_stream_put(zram->comps[prio]);
1911 			return ret;
1912 		}
1913 
1914 		class_index_new = zs_lookup_class_index(zram->mem_pool,
1915 							comp_len_new);
1916 
1917 		/* Continue until we make progress */
1918 		if (class_index_new >= class_index_old ||
1919 		    (threshold && comp_len_new >= threshold)) {
1920 			zcomp_stream_put(zram->comps[prio]);
1921 			continue;
1922 		}
1923 
1924 		/* Recompression was successful so break out */
1925 		break;
1926 	}
1927 
1928 	/*
1929 	 * We did not try to recompress, e.g. when we have only one
1930 	 * secondary algorithm and the page is already recompressed
1931 	 * using that algorithm
1932 	 */
1933 	if (!zstrm)
1934 		return 0;
1935 
1936 	/*
1937 	 * Decrement the limit (if set) on pages we can recompress, even
1938 	 * when current recompression was unsuccessful or did not compress
1939 	 * the page below the threshold, because we still spent resources
1940 	 * on it.
1941 	 */
1942 	if (*num_recomp_pages)
1943 		*num_recomp_pages -= 1;
1944 
1945 	if (class_index_new >= class_index_old) {
1946 		/*
1947 		 * Secondary algorithms failed to re-compress the page
1948 		 * in a way that would save memory, mark the object as
1949 		 * incompressible so that we will not try to compress
1950 		 * it again.
1951 		 *
1952 		 * We need to make sure that all secondary algorithms have
1953 		 * failed, so we test if the number of recompressions matches
1954 		 * the number of active secondary algorithms.
1955 		 */
1956 		if (num_recomps == zram->num_active_comps - 1)
1957 			zram_set_flag(zram, index, ZRAM_INCOMPRESSIBLE);
1958 		return 0;
1959 	}
1960 
1961 	/* Successful recompression but above threshold */
1962 	if (threshold && comp_len_new >= threshold)
1963 		return 0;
1964 
1965 	/*
1966 	 * No direct reclaim (slow path) for handle allocation and no
1967 	 * re-compression attempt (unlike in zram_write_bvec()) since
1968 	 * we already have stored that object in zsmalloc. If we cannot
1969 	 * alloc memory for recompressed object then we bail out and
1970 	 * simply keep the old (existing) object in zsmalloc.
1971 	 */
1972 	handle_new = zs_malloc(zram->mem_pool, comp_len_new,
1973 			       __GFP_KSWAPD_RECLAIM |
1974 			       __GFP_NOWARN |
1975 			       __GFP_HIGHMEM |
1976 			       __GFP_MOVABLE);
1977 	if (IS_ERR_VALUE(handle_new)) {
1978 		zcomp_stream_put(zram->comps[prio]);
1979 		return PTR_ERR((void *)handle_new);
1980 	}
1981 
1982 	dst = zs_map_object(zram->mem_pool, handle_new, ZS_MM_WO);
1983 	memcpy(dst, zstrm->buffer, comp_len_new);
1984 	zcomp_stream_put(zram->comps[prio]);
1985 
1986 	zs_unmap_object(zram->mem_pool, handle_new);
1987 
1988 	zram_free_page(zram, index);
1989 	zram_set_handle(zram, index, handle_new);
1990 	zram_set_obj_size(zram, index, comp_len_new);
1991 	zram_set_priority(zram, index, prio);
1992 
1993 	atomic64_add(comp_len_new, &zram->stats.compr_data_size);
1994 	atomic64_inc(&zram->stats.pages_stored);
1995 
1996 	return 0;
1997 }
1998 
recompress_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1999 static ssize_t recompress_store(struct device *dev,
2000 				struct device_attribute *attr,
2001 				const char *buf, size_t len)
2002 {
2003 	u32 prio = ZRAM_SECONDARY_COMP, prio_max = ZRAM_MAX_COMPS;
2004 	struct zram *zram = dev_to_zram(dev);
2005 	char *args, *param, *val, *algo = NULL;
2006 	u64 num_recomp_pages = ULLONG_MAX;
2007 	struct zram_pp_ctl *ctl = NULL;
2008 	struct zram_pp_slot *pps;
2009 	u32 mode = 0, threshold = 0;
2010 	struct page *page;
2011 	ssize_t ret;
2012 
2013 	args = skip_spaces(buf);
2014 	while (*args) {
2015 		args = next_arg(args, &param, &val);
2016 
2017 		if (!val || !*val)
2018 			return -EINVAL;
2019 
2020 		if (!strcmp(param, "type")) {
2021 			if (!strcmp(val, "idle"))
2022 				mode = RECOMPRESS_IDLE;
2023 			if (!strcmp(val, "huge"))
2024 				mode = RECOMPRESS_HUGE;
2025 			if (!strcmp(val, "huge_idle"))
2026 				mode = RECOMPRESS_IDLE | RECOMPRESS_HUGE;
2027 			continue;
2028 		}
2029 
2030 		if (!strcmp(param, "max_pages")) {
2031 			/*
2032 			 * Limit the number of entries (pages) we attempt to
2033 			 * recompress.
2034 			 */
2035 			ret = kstrtoull(val, 10, &num_recomp_pages);
2036 			if (ret)
2037 				return ret;
2038 			continue;
2039 		}
2040 
2041 		if (!strcmp(param, "threshold")) {
2042 			/*
2043 			 * We will re-compress only idle objects equal or
2044 			 * greater in size than watermark.
2045 			 */
2046 			ret = kstrtouint(val, 10, &threshold);
2047 			if (ret)
2048 				return ret;
2049 			continue;
2050 		}
2051 
2052 		if (!strcmp(param, "algo")) {
2053 			algo = val;
2054 			continue;
2055 		}
2056 
2057 		if (!strcmp(param, "priority")) {
2058 			ret = kstrtouint(val, 10, &prio);
2059 			if (ret)
2060 				return ret;
2061 
2062 			if (prio == ZRAM_PRIMARY_COMP)
2063 				prio = ZRAM_SECONDARY_COMP;
2064 
2065 			prio_max = min(prio + 1, ZRAM_MAX_COMPS);
2066 			continue;
2067 		}
2068 	}
2069 
2070 	if (threshold >= huge_class_size)
2071 		return -EINVAL;
2072 
2073 	down_read(&zram->init_lock);
2074 	if (!init_done(zram)) {
2075 		ret = -EINVAL;
2076 		goto release_init_lock;
2077 	}
2078 
2079 	/* Do not permit concurrent post-processing actions. */
2080 	if (atomic_xchg(&zram->pp_in_progress, 1)) {
2081 		up_read(&zram->init_lock);
2082 		return -EAGAIN;
2083 	}
2084 
2085 	if (algo) {
2086 		bool found = false;
2087 
2088 		for (; prio < ZRAM_MAX_COMPS; prio++) {
2089 			if (!zram->comp_algs[prio])
2090 				continue;
2091 
2092 			if (!strcmp(zram->comp_algs[prio], algo)) {
2093 				prio_max = min(prio + 1, ZRAM_MAX_COMPS);
2094 				found = true;
2095 				break;
2096 			}
2097 		}
2098 
2099 		if (!found) {
2100 			ret = -EINVAL;
2101 			goto release_init_lock;
2102 		}
2103 	}
2104 
2105 	page = alloc_page(GFP_KERNEL);
2106 	if (!page) {
2107 		ret = -ENOMEM;
2108 		goto release_init_lock;
2109 	}
2110 
2111 	ctl = init_pp_ctl();
2112 	if (!ctl) {
2113 		ret = -ENOMEM;
2114 		goto release_init_lock;
2115 	}
2116 
2117 	scan_slots_for_recompress(zram, mode, ctl);
2118 
2119 	ret = len;
2120 	while ((pps = select_pp_slot(ctl))) {
2121 		int err = 0;
2122 
2123 		if (!num_recomp_pages)
2124 			break;
2125 
2126 		zram_slot_lock(zram, pps->index);
2127 		if (!zram_test_flag(zram, pps->index, ZRAM_PP_SLOT))
2128 			goto next;
2129 
2130 		err = recompress_slot(zram, pps->index, page,
2131 				      &num_recomp_pages, threshold,
2132 				      prio, prio_max);
2133 next:
2134 		zram_slot_unlock(zram, pps->index);
2135 		release_pp_slot(zram, pps);
2136 
2137 		if (err) {
2138 			ret = err;
2139 			break;
2140 		}
2141 
2142 		cond_resched();
2143 	}
2144 
2145 	__free_page(page);
2146 
2147 release_init_lock:
2148 	release_pp_ctl(zram, ctl);
2149 	atomic_set(&zram->pp_in_progress, 0);
2150 	up_read(&zram->init_lock);
2151 	return ret;
2152 }
2153 #endif
2154 
zram_bio_discard(struct zram * zram,struct bio * bio)2155 static void zram_bio_discard(struct zram *zram, struct bio *bio)
2156 {
2157 	size_t n = bio->bi_iter.bi_size;
2158 	u32 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
2159 	u32 offset = (bio->bi_iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
2160 			SECTOR_SHIFT;
2161 
2162 	/*
2163 	 * zram manages data in physical block size units. Because logical block
2164 	 * size isn't identical with physical block size on some arch, we
2165 	 * could get a discard request pointing to a specific offset within a
2166 	 * certain physical block.  Although we can handle this request by
2167 	 * reading that physiclal block and decompressing and partially zeroing
2168 	 * and re-compressing and then re-storing it, this isn't reasonable
2169 	 * because our intent with a discard request is to save memory.  So
2170 	 * skipping this logical block is appropriate here.
2171 	 */
2172 	if (offset) {
2173 		if (n <= (PAGE_SIZE - offset))
2174 			return;
2175 
2176 		n -= (PAGE_SIZE - offset);
2177 		index++;
2178 	}
2179 
2180 	while (n >= PAGE_SIZE) {
2181 		zram_slot_lock(zram, index);
2182 		zram_free_page(zram, index);
2183 		zram_slot_unlock(zram, index);
2184 		atomic64_inc(&zram->stats.notify_free);
2185 		index++;
2186 		n -= PAGE_SIZE;
2187 	}
2188 
2189 	bio_endio(bio);
2190 }
2191 
zram_bio_read(struct zram * zram,struct bio * bio)2192 static void zram_bio_read(struct zram *zram, struct bio *bio)
2193 {
2194 	unsigned long start_time = bio_start_io_acct(bio);
2195 	struct bvec_iter iter = bio->bi_iter;
2196 
2197 	do {
2198 		u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
2199 		u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
2200 				SECTOR_SHIFT;
2201 		struct bio_vec bv = bio_iter_iovec(bio, iter);
2202 
2203 		bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
2204 
2205 		if (zram_bvec_read(zram, &bv, index, offset, bio) < 0) {
2206 			atomic64_inc(&zram->stats.failed_reads);
2207 			bio->bi_status = BLK_STS_IOERR;
2208 			break;
2209 		}
2210 		flush_dcache_page(bv.bv_page);
2211 
2212 		zram_slot_lock(zram, index);
2213 		zram_accessed(zram, index);
2214 		zram_slot_unlock(zram, index);
2215 
2216 		bio_advance_iter_single(bio, &iter, bv.bv_len);
2217 	} while (iter.bi_size);
2218 
2219 	bio_end_io_acct(bio, start_time);
2220 	bio_endio(bio);
2221 }
2222 
zram_bio_write(struct zram * zram,struct bio * bio)2223 static void zram_bio_write(struct zram *zram, struct bio *bio)
2224 {
2225 	unsigned long start_time = bio_start_io_acct(bio);
2226 	struct bvec_iter iter = bio->bi_iter;
2227 
2228 	do {
2229 		u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
2230 		u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
2231 				SECTOR_SHIFT;
2232 		struct bio_vec bv = bio_iter_iovec(bio, iter);
2233 
2234 		bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
2235 
2236 		if (zram_bvec_write(zram, &bv, index, offset, bio) < 0) {
2237 			atomic64_inc(&zram->stats.failed_writes);
2238 			bio->bi_status = BLK_STS_IOERR;
2239 			break;
2240 		}
2241 
2242 		zram_slot_lock(zram, index);
2243 		zram_accessed(zram, index);
2244 		zram_slot_unlock(zram, index);
2245 
2246 		bio_advance_iter_single(bio, &iter, bv.bv_len);
2247 	} while (iter.bi_size);
2248 
2249 	bio_end_io_acct(bio, start_time);
2250 	bio_endio(bio);
2251 }
2252 
2253 /*
2254  * Handler function for all zram I/O requests.
2255  */
zram_submit_bio(struct bio * bio)2256 static void zram_submit_bio(struct bio *bio)
2257 {
2258 	struct zram *zram = bio->bi_bdev->bd_disk->private_data;
2259 
2260 	switch (bio_op(bio)) {
2261 	case REQ_OP_READ:
2262 		zram_bio_read(zram, bio);
2263 		break;
2264 	case REQ_OP_WRITE:
2265 		zram_bio_write(zram, bio);
2266 		break;
2267 	case REQ_OP_DISCARD:
2268 	case REQ_OP_WRITE_ZEROES:
2269 		zram_bio_discard(zram, bio);
2270 		break;
2271 	default:
2272 		WARN_ON_ONCE(1);
2273 		bio_endio(bio);
2274 	}
2275 }
2276 
zram_slot_free_notify(struct block_device * bdev,unsigned long index)2277 static void zram_slot_free_notify(struct block_device *bdev,
2278 				unsigned long index)
2279 {
2280 	struct zram *zram;
2281 
2282 	zram = bdev->bd_disk->private_data;
2283 
2284 	atomic64_inc(&zram->stats.notify_free);
2285 	if (!zram_slot_trylock(zram, index)) {
2286 		atomic64_inc(&zram->stats.miss_free);
2287 		return;
2288 	}
2289 
2290 	zram_free_page(zram, index);
2291 	zram_slot_unlock(zram, index);
2292 }
2293 
zram_comp_params_reset(struct zram * zram)2294 static void zram_comp_params_reset(struct zram *zram)
2295 {
2296 	u32 prio;
2297 
2298 	for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
2299 		comp_params_reset(zram, prio);
2300 	}
2301 }
2302 
zram_destroy_comps(struct zram * zram)2303 static void zram_destroy_comps(struct zram *zram)
2304 {
2305 	u32 prio;
2306 
2307 	for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
2308 		struct zcomp *comp = zram->comps[prio];
2309 
2310 		zram->comps[prio] = NULL;
2311 		if (!comp)
2312 			continue;
2313 		zcomp_destroy(comp);
2314 		zram->num_active_comps--;
2315 	}
2316 
2317 	for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
2318 		/* Do not free statically defined compression algorithms */
2319 		if (zram->comp_algs[prio] != default_compressor)
2320 			kfree(zram->comp_algs[prio]);
2321 		zram->comp_algs[prio] = NULL;
2322 	}
2323 
2324 	zram_comp_params_reset(zram);
2325 }
2326 
zram_reset_device(struct zram * zram)2327 static void zram_reset_device(struct zram *zram)
2328 {
2329 	down_write(&zram->init_lock);
2330 
2331 	zram->limit_pages = 0;
2332 
2333 	set_capacity_and_notify(zram->disk, 0);
2334 	part_stat_set_all(zram->disk->part0, 0);
2335 
2336 	/* I/O operation under all of CPU are done so let's free */
2337 	zram_meta_free(zram, zram->disksize);
2338 	zram->disksize = 0;
2339 	zram_destroy_comps(zram);
2340 	memset(&zram->stats, 0, sizeof(zram->stats));
2341 	atomic_set(&zram->pp_in_progress, 0);
2342 	reset_bdev(zram);
2343 
2344 	comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
2345 	up_write(&zram->init_lock);
2346 }
2347 
disksize_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)2348 static ssize_t disksize_store(struct device *dev,
2349 		struct device_attribute *attr, const char *buf, size_t len)
2350 {
2351 	u64 disksize;
2352 	struct zcomp *comp;
2353 	struct zram *zram = dev_to_zram(dev);
2354 	int err;
2355 	u32 prio;
2356 
2357 	disksize = memparse(buf, NULL);
2358 	if (!disksize)
2359 		return -EINVAL;
2360 
2361 	down_write(&zram->init_lock);
2362 	if (init_done(zram)) {
2363 		pr_info("Cannot change disksize for initialized device\n");
2364 		err = -EBUSY;
2365 		goto out_unlock;
2366 	}
2367 
2368 	disksize = PAGE_ALIGN(disksize);
2369 	if (!zram_meta_alloc(zram, disksize)) {
2370 		err = -ENOMEM;
2371 		goto out_unlock;
2372 	}
2373 
2374 	for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
2375 		if (!zram->comp_algs[prio])
2376 			continue;
2377 
2378 		comp = zcomp_create(zram->comp_algs[prio],
2379 				    &zram->params[prio]);
2380 		if (IS_ERR(comp)) {
2381 			pr_err("Cannot initialise %s compressing backend\n",
2382 			       zram->comp_algs[prio]);
2383 			err = PTR_ERR(comp);
2384 			goto out_free_comps;
2385 		}
2386 
2387 		zram->comps[prio] = comp;
2388 		zram->num_active_comps++;
2389 	}
2390 	zram->disksize = disksize;
2391 	set_capacity_and_notify(zram->disk, zram->disksize >> SECTOR_SHIFT);
2392 	up_write(&zram->init_lock);
2393 
2394 	return len;
2395 
2396 out_free_comps:
2397 	zram_destroy_comps(zram);
2398 	zram_meta_free(zram, disksize);
2399 out_unlock:
2400 	up_write(&zram->init_lock);
2401 	return err;
2402 }
2403 
reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)2404 static ssize_t reset_store(struct device *dev,
2405 		struct device_attribute *attr, const char *buf, size_t len)
2406 {
2407 	int ret;
2408 	unsigned short do_reset;
2409 	struct zram *zram;
2410 	struct gendisk *disk;
2411 
2412 	ret = kstrtou16(buf, 10, &do_reset);
2413 	if (ret)
2414 		return ret;
2415 
2416 	if (!do_reset)
2417 		return -EINVAL;
2418 
2419 	zram = dev_to_zram(dev);
2420 	disk = zram->disk;
2421 
2422 	mutex_lock(&disk->open_mutex);
2423 	/* Do not reset an active device or claimed device */
2424 	if (disk_openers(disk) || zram->claim) {
2425 		mutex_unlock(&disk->open_mutex);
2426 		return -EBUSY;
2427 	}
2428 
2429 	/* From now on, anyone can't open /dev/zram[0-9] */
2430 	zram->claim = true;
2431 	mutex_unlock(&disk->open_mutex);
2432 
2433 	/* Make sure all the pending I/O are finished */
2434 	sync_blockdev(disk->part0);
2435 	zram_reset_device(zram);
2436 
2437 	mutex_lock(&disk->open_mutex);
2438 	zram->claim = false;
2439 	mutex_unlock(&disk->open_mutex);
2440 
2441 	return len;
2442 }
2443 
zram_open(struct gendisk * disk,blk_mode_t mode)2444 static int zram_open(struct gendisk *disk, blk_mode_t mode)
2445 {
2446 	struct zram *zram = disk->private_data;
2447 
2448 	WARN_ON(!mutex_is_locked(&disk->open_mutex));
2449 
2450 	/* zram was claimed to reset so open request fails */
2451 	if (zram->claim)
2452 		return -EBUSY;
2453 	return 0;
2454 }
2455 
2456 static const struct block_device_operations zram_devops = {
2457 	.open = zram_open,
2458 	.submit_bio = zram_submit_bio,
2459 	.swap_slot_free_notify = zram_slot_free_notify,
2460 	.owner = THIS_MODULE
2461 };
2462 
2463 static DEVICE_ATTR_WO(compact);
2464 static DEVICE_ATTR_RW(disksize);
2465 static DEVICE_ATTR_RO(initstate);
2466 static DEVICE_ATTR_WO(reset);
2467 static DEVICE_ATTR_WO(mem_limit);
2468 static DEVICE_ATTR_WO(mem_used_max);
2469 static DEVICE_ATTR_WO(idle);
2470 static DEVICE_ATTR_RW(max_comp_streams);
2471 static DEVICE_ATTR_RW(comp_algorithm);
2472 #ifdef CONFIG_ZRAM_WRITEBACK
2473 static DEVICE_ATTR_RW(backing_dev);
2474 static DEVICE_ATTR_WO(writeback);
2475 static DEVICE_ATTR_RW(writeback_limit);
2476 static DEVICE_ATTR_RW(writeback_limit_enable);
2477 #endif
2478 #ifdef CONFIG_ZRAM_MULTI_COMP
2479 static DEVICE_ATTR_RW(recomp_algorithm);
2480 static DEVICE_ATTR_WO(recompress);
2481 #endif
2482 static DEVICE_ATTR_WO(algorithm_params);
2483 
2484 static struct attribute *zram_disk_attrs[] = {
2485 	&dev_attr_disksize.attr,
2486 	&dev_attr_initstate.attr,
2487 	&dev_attr_reset.attr,
2488 	&dev_attr_compact.attr,
2489 	&dev_attr_mem_limit.attr,
2490 	&dev_attr_mem_used_max.attr,
2491 	&dev_attr_idle.attr,
2492 	&dev_attr_max_comp_streams.attr,
2493 	&dev_attr_comp_algorithm.attr,
2494 #ifdef CONFIG_ZRAM_WRITEBACK
2495 	&dev_attr_backing_dev.attr,
2496 	&dev_attr_writeback.attr,
2497 	&dev_attr_writeback_limit.attr,
2498 	&dev_attr_writeback_limit_enable.attr,
2499 #endif
2500 	&dev_attr_io_stat.attr,
2501 	&dev_attr_mm_stat.attr,
2502 #ifdef CONFIG_ZRAM_WRITEBACK
2503 	&dev_attr_bd_stat.attr,
2504 #endif
2505 	&dev_attr_debug_stat.attr,
2506 #ifdef CONFIG_ZRAM_MULTI_COMP
2507 	&dev_attr_recomp_algorithm.attr,
2508 	&dev_attr_recompress.attr,
2509 #endif
2510 	&dev_attr_algorithm_params.attr,
2511 	NULL,
2512 };
2513 
2514 ATTRIBUTE_GROUPS(zram_disk);
2515 
2516 /*
2517  * Allocate and initialize new zram device. the function returns
2518  * '>= 0' device_id upon success, and negative value otherwise.
2519  */
zram_add(void)2520 static int zram_add(void)
2521 {
2522 	struct queue_limits lim = {
2523 		.logical_block_size		= ZRAM_LOGICAL_BLOCK_SIZE,
2524 		/*
2525 		 * To ensure that we always get PAGE_SIZE aligned and
2526 		 * n*PAGE_SIZED sized I/O requests.
2527 		 */
2528 		.physical_block_size		= PAGE_SIZE,
2529 		.io_min				= PAGE_SIZE,
2530 		.io_opt				= PAGE_SIZE,
2531 		.max_hw_discard_sectors		= UINT_MAX,
2532 		/*
2533 		 * zram_bio_discard() will clear all logical blocks if logical
2534 		 * block size is identical with physical block size(PAGE_SIZE).
2535 		 * But if it is different, we will skip discarding some parts of
2536 		 * logical blocks in the part of the request range which isn't
2537 		 * aligned to physical block size.  So we can't ensure that all
2538 		 * discarded logical blocks are zeroed.
2539 		 */
2540 #if ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE
2541 		.max_write_zeroes_sectors	= UINT_MAX,
2542 #endif
2543 		.features			= BLK_FEAT_STABLE_WRITES |
2544 						  BLK_FEAT_SYNCHRONOUS,
2545 	};
2546 	struct zram *zram;
2547 	int ret, device_id;
2548 
2549 	zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
2550 	if (!zram)
2551 		return -ENOMEM;
2552 
2553 	ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL);
2554 	if (ret < 0)
2555 		goto out_free_dev;
2556 	device_id = ret;
2557 
2558 	init_rwsem(&zram->init_lock);
2559 #ifdef CONFIG_ZRAM_WRITEBACK
2560 	spin_lock_init(&zram->wb_limit_lock);
2561 #endif
2562 
2563 	/* gendisk structure */
2564 	zram->disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
2565 	if (IS_ERR(zram->disk)) {
2566 		pr_err("Error allocating disk structure for device %d\n",
2567 			device_id);
2568 		ret = PTR_ERR(zram->disk);
2569 		goto out_free_idr;
2570 	}
2571 
2572 	zram->disk->major = zram_major;
2573 	zram->disk->first_minor = device_id;
2574 	zram->disk->minors = 1;
2575 	zram->disk->flags |= GENHD_FL_NO_PART;
2576 	zram->disk->fops = &zram_devops;
2577 	zram->disk->private_data = zram;
2578 	snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
2579 	atomic_set(&zram->pp_in_progress, 0);
2580 	zram_comp_params_reset(zram);
2581 	comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
2582 
2583 	/* Actual capacity set using sysfs (/sys/block/zram<id>/disksize */
2584 	set_capacity(zram->disk, 0);
2585 	ret = device_add_disk(NULL, zram->disk, zram_disk_groups);
2586 	if (ret)
2587 		goto out_cleanup_disk;
2588 
2589 	zram_debugfs_register(zram);
2590 	pr_info("Added device: %s\n", zram->disk->disk_name);
2591 	return device_id;
2592 
2593 out_cleanup_disk:
2594 	put_disk(zram->disk);
2595 out_free_idr:
2596 	idr_remove(&zram_index_idr, device_id);
2597 out_free_dev:
2598 	kfree(zram);
2599 	return ret;
2600 }
2601 
zram_remove(struct zram * zram)2602 static int zram_remove(struct zram *zram)
2603 {
2604 	bool claimed;
2605 
2606 	mutex_lock(&zram->disk->open_mutex);
2607 	if (disk_openers(zram->disk)) {
2608 		mutex_unlock(&zram->disk->open_mutex);
2609 		return -EBUSY;
2610 	}
2611 
2612 	claimed = zram->claim;
2613 	if (!claimed)
2614 		zram->claim = true;
2615 	mutex_unlock(&zram->disk->open_mutex);
2616 
2617 	zram_debugfs_unregister(zram);
2618 
2619 	if (claimed) {
2620 		/*
2621 		 * If we were claimed by reset_store(), del_gendisk() will
2622 		 * wait until reset_store() is done, so nothing need to do.
2623 		 */
2624 		;
2625 	} else {
2626 		/* Make sure all the pending I/O are finished */
2627 		sync_blockdev(zram->disk->part0);
2628 		zram_reset_device(zram);
2629 	}
2630 
2631 	pr_info("Removed device: %s\n", zram->disk->disk_name);
2632 
2633 	del_gendisk(zram->disk);
2634 
2635 	/* del_gendisk drains pending reset_store */
2636 	WARN_ON_ONCE(claimed && zram->claim);
2637 
2638 	/*
2639 	 * disksize_store() may be called in between zram_reset_device()
2640 	 * and del_gendisk(), so run the last reset to avoid leaking
2641 	 * anything allocated with disksize_store()
2642 	 */
2643 	zram_reset_device(zram);
2644 
2645 	put_disk(zram->disk);
2646 	kfree(zram);
2647 	return 0;
2648 }
2649 
2650 /* zram-control sysfs attributes */
2651 
2652 /*
2653  * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
2654  * sense that reading from this file does alter the state of your system -- it
2655  * creates a new un-initialized zram device and returns back this device's
2656  * device_id (or an error code if it fails to create a new device).
2657  */
hot_add_show(const struct class * class,const struct class_attribute * attr,char * buf)2658 static ssize_t hot_add_show(const struct class *class,
2659 			const struct class_attribute *attr,
2660 			char *buf)
2661 {
2662 	int ret;
2663 
2664 	mutex_lock(&zram_index_mutex);
2665 	ret = zram_add();
2666 	mutex_unlock(&zram_index_mutex);
2667 
2668 	if (ret < 0)
2669 		return ret;
2670 	return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
2671 }
2672 /* This attribute must be set to 0400, so CLASS_ATTR_RO() can not be used */
2673 static struct class_attribute class_attr_hot_add =
2674 	__ATTR(hot_add, 0400, hot_add_show, NULL);
2675 
hot_remove_store(const struct class * class,const struct class_attribute * attr,const char * buf,size_t count)2676 static ssize_t hot_remove_store(const struct class *class,
2677 			const struct class_attribute *attr,
2678 			const char *buf,
2679 			size_t count)
2680 {
2681 	struct zram *zram;
2682 	int ret, dev_id;
2683 
2684 	/* dev_id is gendisk->first_minor, which is `int' */
2685 	ret = kstrtoint(buf, 10, &dev_id);
2686 	if (ret)
2687 		return ret;
2688 	if (dev_id < 0)
2689 		return -EINVAL;
2690 
2691 	mutex_lock(&zram_index_mutex);
2692 
2693 	zram = idr_find(&zram_index_idr, dev_id);
2694 	if (zram) {
2695 		ret = zram_remove(zram);
2696 		if (!ret)
2697 			idr_remove(&zram_index_idr, dev_id);
2698 	} else {
2699 		ret = -ENODEV;
2700 	}
2701 
2702 	mutex_unlock(&zram_index_mutex);
2703 	return ret ? ret : count;
2704 }
2705 static CLASS_ATTR_WO(hot_remove);
2706 
2707 static struct attribute *zram_control_class_attrs[] = {
2708 	&class_attr_hot_add.attr,
2709 	&class_attr_hot_remove.attr,
2710 	NULL,
2711 };
2712 ATTRIBUTE_GROUPS(zram_control_class);
2713 
2714 static struct class zram_control_class = {
2715 	.name		= "zram-control",
2716 	.class_groups	= zram_control_class_groups,
2717 };
2718 
zram_remove_cb(int id,void * ptr,void * data)2719 static int zram_remove_cb(int id, void *ptr, void *data)
2720 {
2721 	WARN_ON_ONCE(zram_remove(ptr));
2722 	return 0;
2723 }
2724 
destroy_devices(void)2725 static void destroy_devices(void)
2726 {
2727 	class_unregister(&zram_control_class);
2728 	idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
2729 	zram_debugfs_destroy();
2730 	idr_destroy(&zram_index_idr);
2731 	unregister_blkdev(zram_major, "zram");
2732 	cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
2733 }
2734 
zram_init(void)2735 static int __init zram_init(void)
2736 {
2737 	struct zram_table_entry zram_te;
2738 	int ret;
2739 
2740 	BUILD_BUG_ON(__NR_ZRAM_PAGEFLAGS > sizeof(zram_te.flags) * 8);
2741 
2742 	ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
2743 				      zcomp_cpu_up_prepare, zcomp_cpu_dead);
2744 	if (ret < 0)
2745 		return ret;
2746 
2747 	ret = class_register(&zram_control_class);
2748 	if (ret) {
2749 		pr_err("Unable to register zram-control class\n");
2750 		cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
2751 		return ret;
2752 	}
2753 
2754 	zram_debugfs_create();
2755 	zram_major = register_blkdev(0, "zram");
2756 	if (zram_major <= 0) {
2757 		pr_err("Unable to get major number\n");
2758 		class_unregister(&zram_control_class);
2759 		cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
2760 		return -EBUSY;
2761 	}
2762 
2763 	while (num_devices != 0) {
2764 		mutex_lock(&zram_index_mutex);
2765 		ret = zram_add();
2766 		mutex_unlock(&zram_index_mutex);
2767 		if (ret < 0)
2768 			goto out_error;
2769 		num_devices--;
2770 	}
2771 
2772 	return 0;
2773 
2774 out_error:
2775 	destroy_devices();
2776 	return ret;
2777 }
2778 
zram_exit(void)2779 static void __exit zram_exit(void)
2780 {
2781 	destroy_devices();
2782 }
2783 
2784 module_init(zram_init);
2785 module_exit(zram_exit);
2786 
2787 module_param(num_devices, uint, 0);
2788 MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");
2789 
2790 MODULE_LICENSE("Dual BSD/GPL");
2791 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
2792 MODULE_DESCRIPTION("Compressed RAM Block Device");
2793