1 /*
2 * Compressed RAM block device
3 *
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 2012, 2013 Minchan Kim
6 *
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
9 *
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
12 *
13 */
14
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/bitops.h>
22 #include <linux/blkdev.h>
23 #include <linux/buffer_head.h>
24 #include <linux/device.h>
25 #include <linux/highmem.h>
26 #include <linux/slab.h>
27 #include <linux/backing-dev.h>
28 #include <linux/string.h>
29 #include <linux/vmalloc.h>
30 #include <linux/err.h>
31 #include <linux/idr.h>
32 #include <linux/sysfs.h>
33 #include <linux/debugfs.h>
34 #include <linux/cpuhotplug.h>
35 #include <linux/part_stat.h>
36
37 #include "zram_drv.h"
38
39 static DEFINE_IDR(zram_index_idr);
40 /* idr index must be protected */
41 static DEFINE_MUTEX(zram_index_mutex);
42
43 static int zram_major;
44 static const char *default_compressor = CONFIG_ZRAM_DEF_COMP;
45
46 /* Module params (documentation at end) */
47 static unsigned int num_devices = 1;
48 /*
49 * Pages that compress to sizes equals or greater than this are stored
50 * uncompressed in memory.
51 */
52 static size_t huge_class_size;
53
54 static const struct block_device_operations zram_devops;
55
56 static void zram_free_page(struct zram *zram, size_t index);
57 static int zram_read_page(struct zram *zram, struct page *page, u32 index,
58 struct bio *parent);
59
zram_slot_trylock(struct zram * zram,u32 index)60 static int zram_slot_trylock(struct zram *zram, u32 index)
61 {
62 return spin_trylock(&zram->table[index].lock);
63 }
64
zram_slot_lock(struct zram * zram,u32 index)65 static void zram_slot_lock(struct zram *zram, u32 index)
66 {
67 spin_lock(&zram->table[index].lock);
68 }
69
zram_slot_unlock(struct zram * zram,u32 index)70 static void zram_slot_unlock(struct zram *zram, u32 index)
71 {
72 spin_unlock(&zram->table[index].lock);
73 }
74
init_done(struct zram * zram)75 static inline bool init_done(struct zram *zram)
76 {
77 return zram->disksize;
78 }
79
dev_to_zram(struct device * dev)80 static inline struct zram *dev_to_zram(struct device *dev)
81 {
82 return (struct zram *)dev_to_disk(dev)->private_data;
83 }
84
zram_get_handle(struct zram * zram,u32 index)85 static unsigned long zram_get_handle(struct zram *zram, u32 index)
86 {
87 return zram->table[index].handle;
88 }
89
zram_set_handle(struct zram * zram,u32 index,unsigned long handle)90 static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
91 {
92 zram->table[index].handle = handle;
93 }
94
95 /* flag operations require table entry bit_spin_lock() being held */
zram_test_flag(struct zram * zram,u32 index,enum zram_pageflags flag)96 static bool zram_test_flag(struct zram *zram, u32 index,
97 enum zram_pageflags flag)
98 {
99 return zram->table[index].flags & BIT(flag);
100 }
101
zram_set_flag(struct zram * zram,u32 index,enum zram_pageflags flag)102 static void zram_set_flag(struct zram *zram, u32 index,
103 enum zram_pageflags flag)
104 {
105 zram->table[index].flags |= BIT(flag);
106 }
107
zram_clear_flag(struct zram * zram,u32 index,enum zram_pageflags flag)108 static void zram_clear_flag(struct zram *zram, u32 index,
109 enum zram_pageflags flag)
110 {
111 zram->table[index].flags &= ~BIT(flag);
112 }
113
zram_set_element(struct zram * zram,u32 index,unsigned long element)114 static inline void zram_set_element(struct zram *zram, u32 index,
115 unsigned long element)
116 {
117 zram->table[index].element = element;
118 }
119
zram_get_element(struct zram * zram,u32 index)120 static unsigned long zram_get_element(struct zram *zram, u32 index)
121 {
122 return zram->table[index].element;
123 }
124
zram_get_obj_size(struct zram * zram,u32 index)125 static size_t zram_get_obj_size(struct zram *zram, u32 index)
126 {
127 return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
128 }
129
zram_set_obj_size(struct zram * zram,u32 index,size_t size)130 static void zram_set_obj_size(struct zram *zram,
131 u32 index, size_t size)
132 {
133 unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT;
134
135 zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size;
136 }
137
zram_allocated(struct zram * zram,u32 index)138 static inline bool zram_allocated(struct zram *zram, u32 index)
139 {
140 return zram_get_obj_size(zram, index) ||
141 zram_test_flag(zram, index, ZRAM_SAME) ||
142 zram_test_flag(zram, index, ZRAM_WB);
143 }
144
145 #if PAGE_SIZE != 4096
is_partial_io(struct bio_vec * bvec)146 static inline bool is_partial_io(struct bio_vec *bvec)
147 {
148 return bvec->bv_len != PAGE_SIZE;
149 }
150 #define ZRAM_PARTIAL_IO 1
151 #else
is_partial_io(struct bio_vec * bvec)152 static inline bool is_partial_io(struct bio_vec *bvec)
153 {
154 return false;
155 }
156 #endif
157
zram_set_priority(struct zram * zram,u32 index,u32 prio)158 static inline void zram_set_priority(struct zram *zram, u32 index, u32 prio)
159 {
160 prio &= ZRAM_COMP_PRIORITY_MASK;
161 /*
162 * Clear previous priority value first, in case if we recompress
163 * further an already recompressed page
164 */
165 zram->table[index].flags &= ~(ZRAM_COMP_PRIORITY_MASK <<
166 ZRAM_COMP_PRIORITY_BIT1);
167 zram->table[index].flags |= (prio << ZRAM_COMP_PRIORITY_BIT1);
168 }
169
zram_get_priority(struct zram * zram,u32 index)170 static inline u32 zram_get_priority(struct zram *zram, u32 index)
171 {
172 u32 prio = zram->table[index].flags >> ZRAM_COMP_PRIORITY_BIT1;
173
174 return prio & ZRAM_COMP_PRIORITY_MASK;
175 }
176
zram_accessed(struct zram * zram,u32 index)177 static void zram_accessed(struct zram *zram, u32 index)
178 {
179 zram_clear_flag(zram, index, ZRAM_IDLE);
180 #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
181 zram->table[index].ac_time = ktime_get_boottime();
182 #endif
183 }
184
update_used_max(struct zram * zram,const unsigned long pages)185 static inline void update_used_max(struct zram *zram,
186 const unsigned long pages)
187 {
188 unsigned long cur_max = atomic_long_read(&zram->stats.max_used_pages);
189
190 do {
191 if (cur_max >= pages)
192 return;
193 } while (!atomic_long_try_cmpxchg(&zram->stats.max_used_pages,
194 &cur_max, pages));
195 }
196
zram_fill_page(void * ptr,unsigned long len,unsigned long value)197 static inline void zram_fill_page(void *ptr, unsigned long len,
198 unsigned long value)
199 {
200 WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long)));
201 memset_l(ptr, value, len / sizeof(unsigned long));
202 }
203
page_same_filled(void * ptr,unsigned long * element)204 static bool page_same_filled(void *ptr, unsigned long *element)
205 {
206 unsigned long *page;
207 unsigned long val;
208 unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
209
210 page = (unsigned long *)ptr;
211 val = page[0];
212
213 if (val != page[last_pos])
214 return false;
215
216 for (pos = 1; pos < last_pos; pos++) {
217 if (val != page[pos])
218 return false;
219 }
220
221 *element = val;
222
223 return true;
224 }
225
initstate_show(struct device * dev,struct device_attribute * attr,char * buf)226 static ssize_t initstate_show(struct device *dev,
227 struct device_attribute *attr, char *buf)
228 {
229 u32 val;
230 struct zram *zram = dev_to_zram(dev);
231
232 down_read(&zram->init_lock);
233 val = init_done(zram);
234 up_read(&zram->init_lock);
235
236 return scnprintf(buf, PAGE_SIZE, "%u\n", val);
237 }
238
disksize_show(struct device * dev,struct device_attribute * attr,char * buf)239 static ssize_t disksize_show(struct device *dev,
240 struct device_attribute *attr, char *buf)
241 {
242 struct zram *zram = dev_to_zram(dev);
243
244 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
245 }
246
mem_limit_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)247 static ssize_t mem_limit_store(struct device *dev,
248 struct device_attribute *attr, const char *buf, size_t len)
249 {
250 u64 limit;
251 char *tmp;
252 struct zram *zram = dev_to_zram(dev);
253
254 limit = memparse(buf, &tmp);
255 if (buf == tmp) /* no chars parsed, invalid input */
256 return -EINVAL;
257
258 down_write(&zram->init_lock);
259 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
260 up_write(&zram->init_lock);
261
262 return len;
263 }
264
mem_used_max_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)265 static ssize_t mem_used_max_store(struct device *dev,
266 struct device_attribute *attr, const char *buf, size_t len)
267 {
268 int err;
269 unsigned long val;
270 struct zram *zram = dev_to_zram(dev);
271
272 err = kstrtoul(buf, 10, &val);
273 if (err || val != 0)
274 return -EINVAL;
275
276 down_read(&zram->init_lock);
277 if (init_done(zram)) {
278 atomic_long_set(&zram->stats.max_used_pages,
279 zs_get_total_pages(zram->mem_pool));
280 }
281 up_read(&zram->init_lock);
282
283 return len;
284 }
285
286 /*
287 * Mark all pages which are older than or equal to cutoff as IDLE.
288 * Callers should hold the zram init lock in read mode
289 */
mark_idle(struct zram * zram,ktime_t cutoff)290 static void mark_idle(struct zram *zram, ktime_t cutoff)
291 {
292 int is_idle = 1;
293 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
294 int index;
295
296 for (index = 0; index < nr_pages; index++) {
297 /*
298 * Do not mark ZRAM_UNDER_WB slot as ZRAM_IDLE to close race.
299 * See the comment in writeback_store.
300 */
301 zram_slot_lock(zram, index);
302 if (zram_allocated(zram, index) &&
303 !zram_test_flag(zram, index, ZRAM_UNDER_WB)) {
304 #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
305 is_idle = !cutoff || ktime_after(cutoff,
306 zram->table[index].ac_time);
307 #endif
308 if (is_idle)
309 zram_set_flag(zram, index, ZRAM_IDLE);
310 }
311 zram_slot_unlock(zram, index);
312 }
313 }
314
idle_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)315 static ssize_t idle_store(struct device *dev,
316 struct device_attribute *attr, const char *buf, size_t len)
317 {
318 struct zram *zram = dev_to_zram(dev);
319 ktime_t cutoff_time = 0;
320 ssize_t rv = -EINVAL;
321
322 if (!sysfs_streq(buf, "all")) {
323 /*
324 * If it did not parse as 'all' try to treat it as an integer
325 * when we have memory tracking enabled.
326 */
327 u64 age_sec;
328
329 if (IS_ENABLED(CONFIG_ZRAM_TRACK_ENTRY_ACTIME) && !kstrtoull(buf, 0, &age_sec))
330 cutoff_time = ktime_sub(ktime_get_boottime(),
331 ns_to_ktime(age_sec * NSEC_PER_SEC));
332 else
333 goto out;
334 }
335
336 down_read(&zram->init_lock);
337 if (!init_done(zram))
338 goto out_unlock;
339
340 /*
341 * A cutoff_time of 0 marks everything as idle, this is the
342 * "all" behavior.
343 */
344 mark_idle(zram, cutoff_time);
345 rv = len;
346
347 out_unlock:
348 up_read(&zram->init_lock);
349 out:
350 return rv;
351 }
352
353 #ifdef CONFIG_ZRAM_WRITEBACK
writeback_limit_enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)354 static ssize_t writeback_limit_enable_store(struct device *dev,
355 struct device_attribute *attr, const char *buf, size_t len)
356 {
357 struct zram *zram = dev_to_zram(dev);
358 u64 val;
359 ssize_t ret = -EINVAL;
360
361 if (kstrtoull(buf, 10, &val))
362 return ret;
363
364 down_read(&zram->init_lock);
365 spin_lock(&zram->wb_limit_lock);
366 zram->wb_limit_enable = val;
367 spin_unlock(&zram->wb_limit_lock);
368 up_read(&zram->init_lock);
369 ret = len;
370
371 return ret;
372 }
373
writeback_limit_enable_show(struct device * dev,struct device_attribute * attr,char * buf)374 static ssize_t writeback_limit_enable_show(struct device *dev,
375 struct device_attribute *attr, char *buf)
376 {
377 bool val;
378 struct zram *zram = dev_to_zram(dev);
379
380 down_read(&zram->init_lock);
381 spin_lock(&zram->wb_limit_lock);
382 val = zram->wb_limit_enable;
383 spin_unlock(&zram->wb_limit_lock);
384 up_read(&zram->init_lock);
385
386 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
387 }
388
writeback_limit_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)389 static ssize_t writeback_limit_store(struct device *dev,
390 struct device_attribute *attr, const char *buf, size_t len)
391 {
392 struct zram *zram = dev_to_zram(dev);
393 u64 val;
394 ssize_t ret = -EINVAL;
395
396 if (kstrtoull(buf, 10, &val))
397 return ret;
398
399 down_read(&zram->init_lock);
400 spin_lock(&zram->wb_limit_lock);
401 zram->bd_wb_limit = val;
402 spin_unlock(&zram->wb_limit_lock);
403 up_read(&zram->init_lock);
404 ret = len;
405
406 return ret;
407 }
408
writeback_limit_show(struct device * dev,struct device_attribute * attr,char * buf)409 static ssize_t writeback_limit_show(struct device *dev,
410 struct device_attribute *attr, char *buf)
411 {
412 u64 val;
413 struct zram *zram = dev_to_zram(dev);
414
415 down_read(&zram->init_lock);
416 spin_lock(&zram->wb_limit_lock);
417 val = zram->bd_wb_limit;
418 spin_unlock(&zram->wb_limit_lock);
419 up_read(&zram->init_lock);
420
421 return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
422 }
423
reset_bdev(struct zram * zram)424 static void reset_bdev(struct zram *zram)
425 {
426 if (!zram->backing_dev)
427 return;
428
429 /* hope filp_close flush all of IO */
430 filp_close(zram->backing_dev, NULL);
431 zram->backing_dev = NULL;
432 zram->bdev = NULL;
433 zram->disk->fops = &zram_devops;
434 kvfree(zram->bitmap);
435 zram->bitmap = NULL;
436 }
437
backing_dev_show(struct device * dev,struct device_attribute * attr,char * buf)438 static ssize_t backing_dev_show(struct device *dev,
439 struct device_attribute *attr, char *buf)
440 {
441 struct file *file;
442 struct zram *zram = dev_to_zram(dev);
443 char *p;
444 ssize_t ret;
445
446 down_read(&zram->init_lock);
447 file = zram->backing_dev;
448 if (!file) {
449 memcpy(buf, "none\n", 5);
450 up_read(&zram->init_lock);
451 return 5;
452 }
453
454 p = file_path(file, buf, PAGE_SIZE - 1);
455 if (IS_ERR(p)) {
456 ret = PTR_ERR(p);
457 goto out;
458 }
459
460 ret = strlen(p);
461 memmove(buf, p, ret);
462 buf[ret++] = '\n';
463 out:
464 up_read(&zram->init_lock);
465 return ret;
466 }
467
backing_dev_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)468 static ssize_t backing_dev_store(struct device *dev,
469 struct device_attribute *attr, const char *buf, size_t len)
470 {
471 char *file_name;
472 size_t sz;
473 struct file *backing_dev = NULL;
474 struct inode *inode;
475 unsigned int bitmap_sz;
476 unsigned long nr_pages, *bitmap = NULL;
477 int err;
478 struct zram *zram = dev_to_zram(dev);
479
480 file_name = kmalloc(PATH_MAX, GFP_KERNEL);
481 if (!file_name)
482 return -ENOMEM;
483
484 down_write(&zram->init_lock);
485 if (init_done(zram)) {
486 pr_info("Can't setup backing device for initialized device\n");
487 err = -EBUSY;
488 goto out;
489 }
490
491 strscpy(file_name, buf, PATH_MAX);
492 /* ignore trailing newline */
493 sz = strlen(file_name);
494 if (sz > 0 && file_name[sz - 1] == '\n')
495 file_name[sz - 1] = 0x00;
496
497 backing_dev = filp_open(file_name, O_RDWR | O_LARGEFILE | O_EXCL, 0);
498 if (IS_ERR(backing_dev)) {
499 err = PTR_ERR(backing_dev);
500 backing_dev = NULL;
501 goto out;
502 }
503
504 inode = backing_dev->f_mapping->host;
505
506 /* Support only block device in this moment */
507 if (!S_ISBLK(inode->i_mode)) {
508 err = -ENOTBLK;
509 goto out;
510 }
511
512 nr_pages = i_size_read(inode) >> PAGE_SHIFT;
513 bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
514 bitmap = kvzalloc(bitmap_sz, GFP_KERNEL);
515 if (!bitmap) {
516 err = -ENOMEM;
517 goto out;
518 }
519
520 reset_bdev(zram);
521
522 zram->bdev = I_BDEV(inode);
523 zram->backing_dev = backing_dev;
524 zram->bitmap = bitmap;
525 zram->nr_pages = nr_pages;
526 up_write(&zram->init_lock);
527
528 pr_info("setup backing device %s\n", file_name);
529 kfree(file_name);
530
531 return len;
532 out:
533 kvfree(bitmap);
534
535 if (backing_dev)
536 filp_close(backing_dev, NULL);
537
538 up_write(&zram->init_lock);
539
540 kfree(file_name);
541
542 return err;
543 }
544
alloc_block_bdev(struct zram * zram)545 static unsigned long alloc_block_bdev(struct zram *zram)
546 {
547 unsigned long blk_idx = 1;
548 retry:
549 /* skip 0 bit to confuse zram.handle = 0 */
550 blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, blk_idx);
551 if (blk_idx == zram->nr_pages)
552 return 0;
553
554 if (test_and_set_bit(blk_idx, zram->bitmap))
555 goto retry;
556
557 atomic64_inc(&zram->stats.bd_count);
558 return blk_idx;
559 }
560
free_block_bdev(struct zram * zram,unsigned long blk_idx)561 static void free_block_bdev(struct zram *zram, unsigned long blk_idx)
562 {
563 int was_set;
564
565 was_set = test_and_clear_bit(blk_idx, zram->bitmap);
566 WARN_ON_ONCE(!was_set);
567 atomic64_dec(&zram->stats.bd_count);
568 }
569
read_from_bdev_async(struct zram * zram,struct page * page,unsigned long entry,struct bio * parent)570 static void read_from_bdev_async(struct zram *zram, struct page *page,
571 unsigned long entry, struct bio *parent)
572 {
573 struct bio *bio;
574
575 bio = bio_alloc(zram->bdev, 1, parent->bi_opf, GFP_NOIO);
576 bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
577 __bio_add_page(bio, page, PAGE_SIZE, 0);
578 bio_chain(bio, parent);
579 submit_bio(bio);
580 }
581
582 #define PAGE_WB_SIG "page_index="
583
584 #define PAGE_WRITEBACK 0
585 #define HUGE_WRITEBACK (1<<0)
586 #define IDLE_WRITEBACK (1<<1)
587 #define INCOMPRESSIBLE_WRITEBACK (1<<2)
588
writeback_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)589 static ssize_t writeback_store(struct device *dev,
590 struct device_attribute *attr, const char *buf, size_t len)
591 {
592 struct zram *zram = dev_to_zram(dev);
593 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
594 unsigned long index = 0;
595 struct bio bio;
596 struct bio_vec bio_vec;
597 struct page *page;
598 ssize_t ret = len;
599 int mode, err;
600 unsigned long blk_idx = 0;
601
602 if (sysfs_streq(buf, "idle"))
603 mode = IDLE_WRITEBACK;
604 else if (sysfs_streq(buf, "huge"))
605 mode = HUGE_WRITEBACK;
606 else if (sysfs_streq(buf, "huge_idle"))
607 mode = IDLE_WRITEBACK | HUGE_WRITEBACK;
608 else if (sysfs_streq(buf, "incompressible"))
609 mode = INCOMPRESSIBLE_WRITEBACK;
610 else {
611 if (strncmp(buf, PAGE_WB_SIG, sizeof(PAGE_WB_SIG) - 1))
612 return -EINVAL;
613
614 if (kstrtol(buf + sizeof(PAGE_WB_SIG) - 1, 10, &index) ||
615 index >= nr_pages)
616 return -EINVAL;
617
618 nr_pages = 1;
619 mode = PAGE_WRITEBACK;
620 }
621
622 down_read(&zram->init_lock);
623 if (!init_done(zram)) {
624 ret = -EINVAL;
625 goto release_init_lock;
626 }
627
628 if (!zram->backing_dev) {
629 ret = -ENODEV;
630 goto release_init_lock;
631 }
632
633 page = alloc_page(GFP_KERNEL);
634 if (!page) {
635 ret = -ENOMEM;
636 goto release_init_lock;
637 }
638
639 for (; nr_pages != 0; index++, nr_pages--) {
640 spin_lock(&zram->wb_limit_lock);
641 if (zram->wb_limit_enable && !zram->bd_wb_limit) {
642 spin_unlock(&zram->wb_limit_lock);
643 ret = -EIO;
644 break;
645 }
646 spin_unlock(&zram->wb_limit_lock);
647
648 if (!blk_idx) {
649 blk_idx = alloc_block_bdev(zram);
650 if (!blk_idx) {
651 ret = -ENOSPC;
652 break;
653 }
654 }
655
656 zram_slot_lock(zram, index);
657 if (!zram_allocated(zram, index))
658 goto next;
659
660 if (zram_test_flag(zram, index, ZRAM_WB) ||
661 zram_test_flag(zram, index, ZRAM_SAME) ||
662 zram_test_flag(zram, index, ZRAM_UNDER_WB))
663 goto next;
664
665 if (mode & IDLE_WRITEBACK &&
666 !zram_test_flag(zram, index, ZRAM_IDLE))
667 goto next;
668 if (mode & HUGE_WRITEBACK &&
669 !zram_test_flag(zram, index, ZRAM_HUGE))
670 goto next;
671 if (mode & INCOMPRESSIBLE_WRITEBACK &&
672 !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
673 goto next;
674
675 /*
676 * Clearing ZRAM_UNDER_WB is duty of caller.
677 * IOW, zram_free_page never clear it.
678 */
679 zram_set_flag(zram, index, ZRAM_UNDER_WB);
680 /* Need for hugepage writeback racing */
681 zram_set_flag(zram, index, ZRAM_IDLE);
682 zram_slot_unlock(zram, index);
683 if (zram_read_page(zram, page, index, NULL)) {
684 zram_slot_lock(zram, index);
685 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
686 zram_clear_flag(zram, index, ZRAM_IDLE);
687 zram_slot_unlock(zram, index);
688 continue;
689 }
690
691 bio_init(&bio, zram->bdev, &bio_vec, 1,
692 REQ_OP_WRITE | REQ_SYNC);
693 bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
694 __bio_add_page(&bio, page, PAGE_SIZE, 0);
695
696 /*
697 * XXX: A single page IO would be inefficient for write
698 * but it would be not bad as starter.
699 */
700 err = submit_bio_wait(&bio);
701 if (err) {
702 zram_slot_lock(zram, index);
703 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
704 zram_clear_flag(zram, index, ZRAM_IDLE);
705 zram_slot_unlock(zram, index);
706 /*
707 * BIO errors are not fatal, we continue and simply
708 * attempt to writeback the remaining objects (pages).
709 * At the same time we need to signal user-space that
710 * some writes (at least one, but also could be all of
711 * them) were not successful and we do so by returning
712 * the most recent BIO error.
713 */
714 ret = err;
715 continue;
716 }
717
718 atomic64_inc(&zram->stats.bd_writes);
719 /*
720 * We released zram_slot_lock so need to check if the slot was
721 * changed. If there is freeing for the slot, we can catch it
722 * easily by zram_allocated.
723 * A subtle case is the slot is freed/reallocated/marked as
724 * ZRAM_IDLE again. To close the race, idle_store doesn't
725 * mark ZRAM_IDLE once it found the slot was ZRAM_UNDER_WB.
726 * Thus, we could close the race by checking ZRAM_IDLE bit.
727 */
728 zram_slot_lock(zram, index);
729 if (!zram_allocated(zram, index) ||
730 !zram_test_flag(zram, index, ZRAM_IDLE)) {
731 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
732 zram_clear_flag(zram, index, ZRAM_IDLE);
733 goto next;
734 }
735
736 zram_free_page(zram, index);
737 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
738 zram_set_flag(zram, index, ZRAM_WB);
739 zram_set_element(zram, index, blk_idx);
740 blk_idx = 0;
741 atomic64_inc(&zram->stats.pages_stored);
742 spin_lock(&zram->wb_limit_lock);
743 if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
744 zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12);
745 spin_unlock(&zram->wb_limit_lock);
746 next:
747 zram_slot_unlock(zram, index);
748 }
749
750 if (blk_idx)
751 free_block_bdev(zram, blk_idx);
752 __free_page(page);
753 release_init_lock:
754 up_read(&zram->init_lock);
755
756 return ret;
757 }
758
759 struct zram_work {
760 struct work_struct work;
761 struct zram *zram;
762 unsigned long entry;
763 struct page *page;
764 int error;
765 };
766
zram_sync_read(struct work_struct * work)767 static void zram_sync_read(struct work_struct *work)
768 {
769 struct zram_work *zw = container_of(work, struct zram_work, work);
770 struct bio_vec bv;
771 struct bio bio;
772
773 bio_init(&bio, zw->zram->bdev, &bv, 1, REQ_OP_READ);
774 bio.bi_iter.bi_sector = zw->entry * (PAGE_SIZE >> 9);
775 __bio_add_page(&bio, zw->page, PAGE_SIZE, 0);
776 zw->error = submit_bio_wait(&bio);
777 }
778
779 /*
780 * Block layer want one ->submit_bio to be active at a time, so if we use
781 * chained IO with parent IO in same context, it's a deadlock. To avoid that,
782 * use a worker thread context.
783 */
read_from_bdev_sync(struct zram * zram,struct page * page,unsigned long entry)784 static int read_from_bdev_sync(struct zram *zram, struct page *page,
785 unsigned long entry)
786 {
787 struct zram_work work;
788
789 work.page = page;
790 work.zram = zram;
791 work.entry = entry;
792
793 INIT_WORK_ONSTACK(&work.work, zram_sync_read);
794 queue_work(system_unbound_wq, &work.work);
795 flush_work(&work.work);
796 destroy_work_on_stack(&work.work);
797
798 return work.error;
799 }
800
read_from_bdev(struct zram * zram,struct page * page,unsigned long entry,struct bio * parent)801 static int read_from_bdev(struct zram *zram, struct page *page,
802 unsigned long entry, struct bio *parent)
803 {
804 atomic64_inc(&zram->stats.bd_reads);
805 if (!parent) {
806 if (WARN_ON_ONCE(!IS_ENABLED(ZRAM_PARTIAL_IO)))
807 return -EIO;
808 return read_from_bdev_sync(zram, page, entry);
809 }
810 read_from_bdev_async(zram, page, entry, parent);
811 return 0;
812 }
813 #else
reset_bdev(struct zram * zram)814 static inline void reset_bdev(struct zram *zram) {};
read_from_bdev(struct zram * zram,struct page * page,unsigned long entry,struct bio * parent)815 static int read_from_bdev(struct zram *zram, struct page *page,
816 unsigned long entry, struct bio *parent)
817 {
818 return -EIO;
819 }
820
free_block_bdev(struct zram * zram,unsigned long blk_idx)821 static void free_block_bdev(struct zram *zram, unsigned long blk_idx) {};
822 #endif
823
824 #ifdef CONFIG_ZRAM_MEMORY_TRACKING
825
826 static struct dentry *zram_debugfs_root;
827
zram_debugfs_create(void)828 static void zram_debugfs_create(void)
829 {
830 zram_debugfs_root = debugfs_create_dir("zram", NULL);
831 }
832
zram_debugfs_destroy(void)833 static void zram_debugfs_destroy(void)
834 {
835 debugfs_remove_recursive(zram_debugfs_root);
836 }
837
read_block_state(struct file * file,char __user * buf,size_t count,loff_t * ppos)838 static ssize_t read_block_state(struct file *file, char __user *buf,
839 size_t count, loff_t *ppos)
840 {
841 char *kbuf;
842 ssize_t index, written = 0;
843 struct zram *zram = file->private_data;
844 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
845 struct timespec64 ts;
846
847 kbuf = kvmalloc(count, GFP_KERNEL);
848 if (!kbuf)
849 return -ENOMEM;
850
851 down_read(&zram->init_lock);
852 if (!init_done(zram)) {
853 up_read(&zram->init_lock);
854 kvfree(kbuf);
855 return -EINVAL;
856 }
857
858 for (index = *ppos; index < nr_pages; index++) {
859 int copied;
860
861 zram_slot_lock(zram, index);
862 if (!zram_allocated(zram, index))
863 goto next;
864
865 ts = ktime_to_timespec64(zram->table[index].ac_time);
866 copied = snprintf(kbuf + written, count,
867 "%12zd %12lld.%06lu %c%c%c%c%c%c\n",
868 index, (s64)ts.tv_sec,
869 ts.tv_nsec / NSEC_PER_USEC,
870 zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.',
871 zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.',
872 zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.',
873 zram_test_flag(zram, index, ZRAM_IDLE) ? 'i' : '.',
874 zram_get_priority(zram, index) ? 'r' : '.',
875 zram_test_flag(zram, index,
876 ZRAM_INCOMPRESSIBLE) ? 'n' : '.');
877
878 if (count <= copied) {
879 zram_slot_unlock(zram, index);
880 break;
881 }
882 written += copied;
883 count -= copied;
884 next:
885 zram_slot_unlock(zram, index);
886 *ppos += 1;
887 }
888
889 up_read(&zram->init_lock);
890 if (copy_to_user(buf, kbuf, written))
891 written = -EFAULT;
892 kvfree(kbuf);
893
894 return written;
895 }
896
897 static const struct file_operations proc_zram_block_state_op = {
898 .open = simple_open,
899 .read = read_block_state,
900 .llseek = default_llseek,
901 };
902
zram_debugfs_register(struct zram * zram)903 static void zram_debugfs_register(struct zram *zram)
904 {
905 if (!zram_debugfs_root)
906 return;
907
908 zram->debugfs_dir = debugfs_create_dir(zram->disk->disk_name,
909 zram_debugfs_root);
910 debugfs_create_file("block_state", 0400, zram->debugfs_dir,
911 zram, &proc_zram_block_state_op);
912 }
913
zram_debugfs_unregister(struct zram * zram)914 static void zram_debugfs_unregister(struct zram *zram)
915 {
916 debugfs_remove_recursive(zram->debugfs_dir);
917 }
918 #else
zram_debugfs_create(void)919 static void zram_debugfs_create(void) {};
zram_debugfs_destroy(void)920 static void zram_debugfs_destroy(void) {};
zram_debugfs_register(struct zram * zram)921 static void zram_debugfs_register(struct zram *zram) {};
zram_debugfs_unregister(struct zram * zram)922 static void zram_debugfs_unregister(struct zram *zram) {};
923 #endif
924
925 /*
926 * We switched to per-cpu streams and this attr is not needed anymore.
927 * However, we will keep it around for some time, because:
928 * a) we may revert per-cpu streams in the future
929 * b) it's visible to user space and we need to follow our 2 years
930 * retirement rule; but we already have a number of 'soon to be
931 * altered' attrs, so max_comp_streams need to wait for the next
932 * layoff cycle.
933 */
max_comp_streams_show(struct device * dev,struct device_attribute * attr,char * buf)934 static ssize_t max_comp_streams_show(struct device *dev,
935 struct device_attribute *attr, char *buf)
936 {
937 return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
938 }
939
max_comp_streams_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)940 static ssize_t max_comp_streams_store(struct device *dev,
941 struct device_attribute *attr, const char *buf, size_t len)
942 {
943 return len;
944 }
945
comp_algorithm_set(struct zram * zram,u32 prio,const char * alg)946 static void comp_algorithm_set(struct zram *zram, u32 prio, const char *alg)
947 {
948 /* Do not free statically defined compression algorithms */
949 if (zram->comp_algs[prio] != default_compressor)
950 kfree(zram->comp_algs[prio]);
951
952 zram->comp_algs[prio] = alg;
953 }
954
__comp_algorithm_show(struct zram * zram,u32 prio,char * buf)955 static ssize_t __comp_algorithm_show(struct zram *zram, u32 prio, char *buf)
956 {
957 ssize_t sz;
958
959 down_read(&zram->init_lock);
960 sz = zcomp_available_show(zram->comp_algs[prio], buf);
961 up_read(&zram->init_lock);
962
963 return sz;
964 }
965
__comp_algorithm_store(struct zram * zram,u32 prio,const char * buf)966 static int __comp_algorithm_store(struct zram *zram, u32 prio, const char *buf)
967 {
968 char *compressor;
969 size_t sz;
970
971 sz = strlen(buf);
972 if (sz >= CRYPTO_MAX_ALG_NAME)
973 return -E2BIG;
974
975 compressor = kstrdup(buf, GFP_KERNEL);
976 if (!compressor)
977 return -ENOMEM;
978
979 /* ignore trailing newline */
980 if (sz > 0 && compressor[sz - 1] == '\n')
981 compressor[sz - 1] = 0x00;
982
983 if (!zcomp_available_algorithm(compressor)) {
984 kfree(compressor);
985 return -EINVAL;
986 }
987
988 down_write(&zram->init_lock);
989 if (init_done(zram)) {
990 up_write(&zram->init_lock);
991 kfree(compressor);
992 pr_info("Can't change algorithm for initialized device\n");
993 return -EBUSY;
994 }
995
996 comp_algorithm_set(zram, prio, compressor);
997 up_write(&zram->init_lock);
998 return 0;
999 }
1000
comp_algorithm_show(struct device * dev,struct device_attribute * attr,char * buf)1001 static ssize_t comp_algorithm_show(struct device *dev,
1002 struct device_attribute *attr,
1003 char *buf)
1004 {
1005 struct zram *zram = dev_to_zram(dev);
1006
1007 return __comp_algorithm_show(zram, ZRAM_PRIMARY_COMP, buf);
1008 }
1009
comp_algorithm_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1010 static ssize_t comp_algorithm_store(struct device *dev,
1011 struct device_attribute *attr,
1012 const char *buf,
1013 size_t len)
1014 {
1015 struct zram *zram = dev_to_zram(dev);
1016 int ret;
1017
1018 ret = __comp_algorithm_store(zram, ZRAM_PRIMARY_COMP, buf);
1019 return ret ? ret : len;
1020 }
1021
1022 #ifdef CONFIG_ZRAM_MULTI_COMP
recomp_algorithm_show(struct device * dev,struct device_attribute * attr,char * buf)1023 static ssize_t recomp_algorithm_show(struct device *dev,
1024 struct device_attribute *attr,
1025 char *buf)
1026 {
1027 struct zram *zram = dev_to_zram(dev);
1028 ssize_t sz = 0;
1029 u32 prio;
1030
1031 for (prio = ZRAM_SECONDARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
1032 if (!zram->comp_algs[prio])
1033 continue;
1034
1035 sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, "#%d: ", prio);
1036 sz += __comp_algorithm_show(zram, prio, buf + sz);
1037 }
1038
1039 return sz;
1040 }
1041
recomp_algorithm_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1042 static ssize_t recomp_algorithm_store(struct device *dev,
1043 struct device_attribute *attr,
1044 const char *buf,
1045 size_t len)
1046 {
1047 struct zram *zram = dev_to_zram(dev);
1048 int prio = ZRAM_SECONDARY_COMP;
1049 char *args, *param, *val;
1050 char *alg = NULL;
1051 int ret;
1052
1053 args = skip_spaces(buf);
1054 while (*args) {
1055 args = next_arg(args, ¶m, &val);
1056
1057 if (!val || !*val)
1058 return -EINVAL;
1059
1060 if (!strcmp(param, "algo")) {
1061 alg = val;
1062 continue;
1063 }
1064
1065 if (!strcmp(param, "priority")) {
1066 ret = kstrtoint(val, 10, &prio);
1067 if (ret)
1068 return ret;
1069 continue;
1070 }
1071 }
1072
1073 if (!alg)
1074 return -EINVAL;
1075
1076 if (prio < ZRAM_SECONDARY_COMP || prio >= ZRAM_MAX_COMPS)
1077 return -EINVAL;
1078
1079 ret = __comp_algorithm_store(zram, prio, alg);
1080 return ret ? ret : len;
1081 }
1082 #endif
1083
compact_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1084 static ssize_t compact_store(struct device *dev,
1085 struct device_attribute *attr, const char *buf, size_t len)
1086 {
1087 struct zram *zram = dev_to_zram(dev);
1088
1089 down_read(&zram->init_lock);
1090 if (!init_done(zram)) {
1091 up_read(&zram->init_lock);
1092 return -EINVAL;
1093 }
1094
1095 zs_compact(zram->mem_pool);
1096 up_read(&zram->init_lock);
1097
1098 return len;
1099 }
1100
io_stat_show(struct device * dev,struct device_attribute * attr,char * buf)1101 static ssize_t io_stat_show(struct device *dev,
1102 struct device_attribute *attr, char *buf)
1103 {
1104 struct zram *zram = dev_to_zram(dev);
1105 ssize_t ret;
1106
1107 down_read(&zram->init_lock);
1108 ret = scnprintf(buf, PAGE_SIZE,
1109 "%8llu %8llu 0 %8llu\n",
1110 (u64)atomic64_read(&zram->stats.failed_reads),
1111 (u64)atomic64_read(&zram->stats.failed_writes),
1112 (u64)atomic64_read(&zram->stats.notify_free));
1113 up_read(&zram->init_lock);
1114
1115 return ret;
1116 }
1117
mm_stat_show(struct device * dev,struct device_attribute * attr,char * buf)1118 static ssize_t mm_stat_show(struct device *dev,
1119 struct device_attribute *attr, char *buf)
1120 {
1121 struct zram *zram = dev_to_zram(dev);
1122 struct zs_pool_stats pool_stats;
1123 u64 orig_size, mem_used = 0;
1124 long max_used;
1125 ssize_t ret;
1126
1127 memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
1128
1129 down_read(&zram->init_lock);
1130 if (init_done(zram)) {
1131 mem_used = zs_get_total_pages(zram->mem_pool);
1132 zs_pool_stats(zram->mem_pool, &pool_stats);
1133 }
1134
1135 orig_size = atomic64_read(&zram->stats.pages_stored);
1136 max_used = atomic_long_read(&zram->stats.max_used_pages);
1137
1138 ret = scnprintf(buf, PAGE_SIZE,
1139 "%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu %8llu\n",
1140 orig_size << PAGE_SHIFT,
1141 (u64)atomic64_read(&zram->stats.compr_data_size),
1142 mem_used << PAGE_SHIFT,
1143 zram->limit_pages << PAGE_SHIFT,
1144 max_used << PAGE_SHIFT,
1145 (u64)atomic64_read(&zram->stats.same_pages),
1146 atomic_long_read(&pool_stats.pages_compacted),
1147 (u64)atomic64_read(&zram->stats.huge_pages),
1148 (u64)atomic64_read(&zram->stats.huge_pages_since));
1149 up_read(&zram->init_lock);
1150
1151 return ret;
1152 }
1153
1154 #ifdef CONFIG_ZRAM_WRITEBACK
1155 #define FOUR_K(x) ((x) * (1 << (PAGE_SHIFT - 12)))
bd_stat_show(struct device * dev,struct device_attribute * attr,char * buf)1156 static ssize_t bd_stat_show(struct device *dev,
1157 struct device_attribute *attr, char *buf)
1158 {
1159 struct zram *zram = dev_to_zram(dev);
1160 ssize_t ret;
1161
1162 down_read(&zram->init_lock);
1163 ret = scnprintf(buf, PAGE_SIZE,
1164 "%8llu %8llu %8llu\n",
1165 FOUR_K((u64)atomic64_read(&zram->stats.bd_count)),
1166 FOUR_K((u64)atomic64_read(&zram->stats.bd_reads)),
1167 FOUR_K((u64)atomic64_read(&zram->stats.bd_writes)));
1168 up_read(&zram->init_lock);
1169
1170 return ret;
1171 }
1172 #endif
1173
debug_stat_show(struct device * dev,struct device_attribute * attr,char * buf)1174 static ssize_t debug_stat_show(struct device *dev,
1175 struct device_attribute *attr, char *buf)
1176 {
1177 int version = 1;
1178 struct zram *zram = dev_to_zram(dev);
1179 ssize_t ret;
1180
1181 down_read(&zram->init_lock);
1182 ret = scnprintf(buf, PAGE_SIZE,
1183 "version: %d\n%8llu %8llu\n",
1184 version,
1185 (u64)atomic64_read(&zram->stats.writestall),
1186 (u64)atomic64_read(&zram->stats.miss_free));
1187 up_read(&zram->init_lock);
1188
1189 return ret;
1190 }
1191
1192 static DEVICE_ATTR_RO(io_stat);
1193 static DEVICE_ATTR_RO(mm_stat);
1194 #ifdef CONFIG_ZRAM_WRITEBACK
1195 static DEVICE_ATTR_RO(bd_stat);
1196 #endif
1197 static DEVICE_ATTR_RO(debug_stat);
1198
zram_meta_free(struct zram * zram,u64 disksize)1199 static void zram_meta_free(struct zram *zram, u64 disksize)
1200 {
1201 size_t num_pages = disksize >> PAGE_SHIFT;
1202 size_t index;
1203
1204 /* Free all pages that are still in this zram device */
1205 for (index = 0; index < num_pages; index++)
1206 zram_free_page(zram, index);
1207
1208 zs_destroy_pool(zram->mem_pool);
1209 vfree(zram->table);
1210 }
1211
zram_meta_alloc(struct zram * zram,u64 disksize)1212 static bool zram_meta_alloc(struct zram *zram, u64 disksize)
1213 {
1214 size_t num_pages, index;
1215
1216 num_pages = disksize >> PAGE_SHIFT;
1217 zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table)));
1218 if (!zram->table)
1219 return false;
1220
1221 zram->mem_pool = zs_create_pool(zram->disk->disk_name);
1222 if (!zram->mem_pool) {
1223 vfree(zram->table);
1224 return false;
1225 }
1226
1227 if (!huge_class_size)
1228 huge_class_size = zs_huge_class_size(zram->mem_pool);
1229
1230 for (index = 0; index < num_pages; index++)
1231 spin_lock_init(&zram->table[index].lock);
1232 return true;
1233 }
1234
1235 /*
1236 * To protect concurrent access to the same index entry,
1237 * caller should hold this table index entry's bit_spinlock to
1238 * indicate this index entry is accessing.
1239 */
zram_free_page(struct zram * zram,size_t index)1240 static void zram_free_page(struct zram *zram, size_t index)
1241 {
1242 unsigned long handle;
1243
1244 #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
1245 zram->table[index].ac_time = 0;
1246 #endif
1247 if (zram_test_flag(zram, index, ZRAM_IDLE))
1248 zram_clear_flag(zram, index, ZRAM_IDLE);
1249
1250 if (zram_test_flag(zram, index, ZRAM_HUGE)) {
1251 zram_clear_flag(zram, index, ZRAM_HUGE);
1252 atomic64_dec(&zram->stats.huge_pages);
1253 }
1254
1255 if (zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
1256 zram_clear_flag(zram, index, ZRAM_INCOMPRESSIBLE);
1257
1258 zram_set_priority(zram, index, 0);
1259
1260 if (zram_test_flag(zram, index, ZRAM_WB)) {
1261 zram_clear_flag(zram, index, ZRAM_WB);
1262 free_block_bdev(zram, zram_get_element(zram, index));
1263 goto out;
1264 }
1265
1266 /*
1267 * No memory is allocated for same element filled pages.
1268 * Simply clear same page flag.
1269 */
1270 if (zram_test_flag(zram, index, ZRAM_SAME)) {
1271 zram_clear_flag(zram, index, ZRAM_SAME);
1272 atomic64_dec(&zram->stats.same_pages);
1273 goto out;
1274 }
1275
1276 handle = zram_get_handle(zram, index);
1277 if (!handle)
1278 return;
1279
1280 zs_free(zram->mem_pool, handle);
1281
1282 atomic64_sub(zram_get_obj_size(zram, index),
1283 &zram->stats.compr_data_size);
1284 out:
1285 atomic64_dec(&zram->stats.pages_stored);
1286 zram_set_handle(zram, index, 0);
1287 zram_set_obj_size(zram, index, 0);
1288 WARN_ON_ONCE(zram->table[index].flags &
1289 ~(1UL << ZRAM_UNDER_WB));
1290 }
1291
1292 /*
1293 * Reads (decompresses if needed) a page from zspool (zsmalloc).
1294 * Corresponding ZRAM slot should be locked.
1295 */
zram_read_from_zspool(struct zram * zram,struct page * page,u32 index)1296 static int zram_read_from_zspool(struct zram *zram, struct page *page,
1297 u32 index)
1298 {
1299 struct zcomp_strm *zstrm;
1300 unsigned long handle;
1301 unsigned int size;
1302 void *src, *dst;
1303 u32 prio;
1304 int ret;
1305
1306 handle = zram_get_handle(zram, index);
1307 if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
1308 unsigned long value;
1309 void *mem;
1310
1311 value = handle ? zram_get_element(zram, index) : 0;
1312 mem = kmap_local_page(page);
1313 zram_fill_page(mem, PAGE_SIZE, value);
1314 kunmap_local(mem);
1315 return 0;
1316 }
1317
1318 size = zram_get_obj_size(zram, index);
1319
1320 if (size != PAGE_SIZE) {
1321 prio = zram_get_priority(zram, index);
1322 zstrm = zcomp_stream_get(zram->comps[prio]);
1323 }
1324
1325 src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
1326 if (size == PAGE_SIZE) {
1327 dst = kmap_local_page(page);
1328 copy_page(dst, src);
1329 kunmap_local(dst);
1330 ret = 0;
1331 } else {
1332 dst = kmap_local_page(page);
1333 ret = zcomp_decompress(zstrm, src, size, dst);
1334 kunmap_local(dst);
1335 zcomp_stream_put(zram->comps[prio]);
1336 }
1337 zs_unmap_object(zram->mem_pool, handle);
1338 return ret;
1339 }
1340
zram_read_page(struct zram * zram,struct page * page,u32 index,struct bio * parent)1341 static int zram_read_page(struct zram *zram, struct page *page, u32 index,
1342 struct bio *parent)
1343 {
1344 int ret;
1345
1346 zram_slot_lock(zram, index);
1347 if (!zram_test_flag(zram, index, ZRAM_WB)) {
1348 /* Slot should be locked through out the function call */
1349 ret = zram_read_from_zspool(zram, page, index);
1350 zram_slot_unlock(zram, index);
1351 } else {
1352 /*
1353 * The slot should be unlocked before reading from the backing
1354 * device.
1355 */
1356 zram_slot_unlock(zram, index);
1357
1358 ret = read_from_bdev(zram, page, zram_get_element(zram, index),
1359 parent);
1360 }
1361
1362 /* Should NEVER happen. Return bio error if it does. */
1363 if (WARN_ON(ret < 0))
1364 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
1365
1366 return ret;
1367 }
1368
1369 /*
1370 * Use a temporary buffer to decompress the page, as the decompressor
1371 * always expects a full page for the output.
1372 */
zram_bvec_read_partial(struct zram * zram,struct bio_vec * bvec,u32 index,int offset)1373 static int zram_bvec_read_partial(struct zram *zram, struct bio_vec *bvec,
1374 u32 index, int offset)
1375 {
1376 struct page *page = alloc_page(GFP_NOIO);
1377 int ret;
1378
1379 if (!page)
1380 return -ENOMEM;
1381 ret = zram_read_page(zram, page, index, NULL);
1382 if (likely(!ret))
1383 memcpy_to_bvec(bvec, page_address(page) + offset);
1384 __free_page(page);
1385 return ret;
1386 }
1387
zram_bvec_read(struct zram * zram,struct bio_vec * bvec,u32 index,int offset,struct bio * bio)1388 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
1389 u32 index, int offset, struct bio *bio)
1390 {
1391 if (is_partial_io(bvec))
1392 return zram_bvec_read_partial(zram, bvec, index, offset);
1393 return zram_read_page(zram, bvec->bv_page, index, bio);
1394 }
1395
zram_write_page(struct zram * zram,struct page * page,u32 index)1396 static int zram_write_page(struct zram *zram, struct page *page, u32 index)
1397 {
1398 int ret = 0;
1399 unsigned long alloced_pages;
1400 unsigned long handle = -ENOMEM;
1401 unsigned int comp_len = 0;
1402 void *src, *dst, *mem;
1403 struct zcomp_strm *zstrm;
1404 unsigned long element = 0;
1405 enum zram_pageflags flags = 0;
1406
1407 mem = kmap_local_page(page);
1408 if (page_same_filled(mem, &element)) {
1409 kunmap_local(mem);
1410 /* Free memory associated with this sector now. */
1411 flags = ZRAM_SAME;
1412 atomic64_inc(&zram->stats.same_pages);
1413 goto out;
1414 }
1415 kunmap_local(mem);
1416
1417 compress_again:
1418 zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
1419 src = kmap_local_page(page);
1420 ret = zcomp_compress(zstrm, src, &comp_len);
1421 kunmap_local(src);
1422
1423 if (unlikely(ret)) {
1424 zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
1425 pr_err("Compression failed! err=%d\n", ret);
1426 zs_free(zram->mem_pool, handle);
1427 return ret;
1428 }
1429
1430 if (comp_len >= huge_class_size)
1431 comp_len = PAGE_SIZE;
1432 /*
1433 * handle allocation has 2 paths:
1434 * a) fast path is executed with preemption disabled (for
1435 * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
1436 * since we can't sleep;
1437 * b) slow path enables preemption and attempts to allocate
1438 * the page with __GFP_DIRECT_RECLAIM bit set. we have to
1439 * put per-cpu compression stream and, thus, to re-do
1440 * the compression once handle is allocated.
1441 *
1442 * if we have a 'non-null' handle here then we are coming
1443 * from the slow path and handle has already been allocated.
1444 */
1445 if (IS_ERR_VALUE(handle))
1446 handle = zs_malloc(zram->mem_pool, comp_len,
1447 __GFP_KSWAPD_RECLAIM |
1448 __GFP_NOWARN |
1449 __GFP_HIGHMEM |
1450 __GFP_MOVABLE);
1451 if (IS_ERR_VALUE(handle)) {
1452 zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
1453 atomic64_inc(&zram->stats.writestall);
1454 handle = zs_malloc(zram->mem_pool, comp_len,
1455 GFP_NOIO | __GFP_HIGHMEM |
1456 __GFP_MOVABLE);
1457 if (IS_ERR_VALUE(handle))
1458 return PTR_ERR((void *)handle);
1459
1460 if (comp_len != PAGE_SIZE)
1461 goto compress_again;
1462 /*
1463 * If the page is not compressible, you need to acquire the
1464 * lock and execute the code below. The zcomp_stream_get()
1465 * call is needed to disable the cpu hotplug and grab the
1466 * zstrm buffer back. It is necessary that the dereferencing
1467 * of the zstrm variable below occurs correctly.
1468 */
1469 zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
1470 }
1471
1472 alloced_pages = zs_get_total_pages(zram->mem_pool);
1473 update_used_max(zram, alloced_pages);
1474
1475 if (zram->limit_pages && alloced_pages > zram->limit_pages) {
1476 zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
1477 zs_free(zram->mem_pool, handle);
1478 return -ENOMEM;
1479 }
1480
1481 dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
1482
1483 src = zstrm->buffer;
1484 if (comp_len == PAGE_SIZE)
1485 src = kmap_local_page(page);
1486 memcpy(dst, src, comp_len);
1487 if (comp_len == PAGE_SIZE)
1488 kunmap_local(src);
1489
1490 zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
1491 zs_unmap_object(zram->mem_pool, handle);
1492 atomic64_add(comp_len, &zram->stats.compr_data_size);
1493 out:
1494 /*
1495 * Free memory associated with this sector
1496 * before overwriting unused sectors.
1497 */
1498 zram_slot_lock(zram, index);
1499 zram_free_page(zram, index);
1500
1501 if (comp_len == PAGE_SIZE) {
1502 zram_set_flag(zram, index, ZRAM_HUGE);
1503 atomic64_inc(&zram->stats.huge_pages);
1504 atomic64_inc(&zram->stats.huge_pages_since);
1505 }
1506
1507 if (flags) {
1508 zram_set_flag(zram, index, flags);
1509 zram_set_element(zram, index, element);
1510 } else {
1511 zram_set_handle(zram, index, handle);
1512 zram_set_obj_size(zram, index, comp_len);
1513 }
1514 zram_slot_unlock(zram, index);
1515
1516 /* Update stats */
1517 atomic64_inc(&zram->stats.pages_stored);
1518 return ret;
1519 }
1520
1521 /*
1522 * This is a partial IO. Read the full page before writing the changes.
1523 */
zram_bvec_write_partial(struct zram * zram,struct bio_vec * bvec,u32 index,int offset,struct bio * bio)1524 static int zram_bvec_write_partial(struct zram *zram, struct bio_vec *bvec,
1525 u32 index, int offset, struct bio *bio)
1526 {
1527 struct page *page = alloc_page(GFP_NOIO);
1528 int ret;
1529
1530 if (!page)
1531 return -ENOMEM;
1532
1533 ret = zram_read_page(zram, page, index, bio);
1534 if (!ret) {
1535 memcpy_from_bvec(page_address(page) + offset, bvec);
1536 ret = zram_write_page(zram, page, index);
1537 }
1538 __free_page(page);
1539 return ret;
1540 }
1541
zram_bvec_write(struct zram * zram,struct bio_vec * bvec,u32 index,int offset,struct bio * bio)1542 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
1543 u32 index, int offset, struct bio *bio)
1544 {
1545 if (is_partial_io(bvec))
1546 return zram_bvec_write_partial(zram, bvec, index, offset, bio);
1547 return zram_write_page(zram, bvec->bv_page, index);
1548 }
1549
1550 #ifdef CONFIG_ZRAM_MULTI_COMP
1551 /*
1552 * This function will decompress (unless it's ZRAM_HUGE) the page and then
1553 * attempt to compress it using provided compression algorithm priority
1554 * (which is potentially more effective).
1555 *
1556 * Corresponding ZRAM slot should be locked.
1557 */
zram_recompress(struct zram * zram,u32 index,struct page * page,u64 * num_recomp_pages,u32 threshold,u32 prio,u32 prio_max)1558 static int zram_recompress(struct zram *zram, u32 index, struct page *page,
1559 u64 *num_recomp_pages, u32 threshold, u32 prio,
1560 u32 prio_max)
1561 {
1562 struct zcomp_strm *zstrm = NULL;
1563 unsigned long handle_old;
1564 unsigned long handle_new;
1565 unsigned int comp_len_old;
1566 unsigned int comp_len_new;
1567 unsigned int class_index_old;
1568 unsigned int class_index_new;
1569 u32 num_recomps = 0;
1570 void *src, *dst;
1571 int ret;
1572
1573 handle_old = zram_get_handle(zram, index);
1574 if (!handle_old)
1575 return -EINVAL;
1576
1577 comp_len_old = zram_get_obj_size(zram, index);
1578 /*
1579 * Do not recompress objects that are already "small enough".
1580 */
1581 if (comp_len_old < threshold)
1582 return 0;
1583
1584 ret = zram_read_from_zspool(zram, page, index);
1585 if (ret)
1586 return ret;
1587
1588 class_index_old = zs_lookup_class_index(zram->mem_pool, comp_len_old);
1589 /*
1590 * Iterate the secondary comp algorithms list (in order of priority)
1591 * and try to recompress the page.
1592 */
1593 for (; prio < prio_max; prio++) {
1594 if (!zram->comps[prio])
1595 continue;
1596
1597 /*
1598 * Skip if the object is already re-compressed with a higher
1599 * priority algorithm (or same algorithm).
1600 */
1601 if (prio <= zram_get_priority(zram, index))
1602 continue;
1603
1604 num_recomps++;
1605 zstrm = zcomp_stream_get(zram->comps[prio]);
1606 src = kmap_local_page(page);
1607 ret = zcomp_compress(zstrm, src, &comp_len_new);
1608 kunmap_local(src);
1609
1610 if (ret) {
1611 zcomp_stream_put(zram->comps[prio]);
1612 return ret;
1613 }
1614
1615 class_index_new = zs_lookup_class_index(zram->mem_pool,
1616 comp_len_new);
1617
1618 /* Continue until we make progress */
1619 if (class_index_new >= class_index_old ||
1620 (threshold && comp_len_new >= threshold)) {
1621 zcomp_stream_put(zram->comps[prio]);
1622 continue;
1623 }
1624
1625 /* Recompression was successful so break out */
1626 break;
1627 }
1628
1629 /*
1630 * We did not try to recompress, e.g. when we have only one
1631 * secondary algorithm and the page is already recompressed
1632 * using that algorithm
1633 */
1634 if (!zstrm)
1635 return 0;
1636
1637 /*
1638 * Decrement the limit (if set) on pages we can recompress, even
1639 * when current recompression was unsuccessful or did not compress
1640 * the page below the threshold, because we still spent resources
1641 * on it.
1642 */
1643 if (*num_recomp_pages)
1644 *num_recomp_pages -= 1;
1645
1646 if (class_index_new >= class_index_old) {
1647 /*
1648 * Secondary algorithms failed to re-compress the page
1649 * in a way that would save memory, mark the object as
1650 * incompressible so that we will not try to compress
1651 * it again.
1652 *
1653 * We need to make sure that all secondary algorithms have
1654 * failed, so we test if the number of recompressions matches
1655 * the number of active secondary algorithms.
1656 */
1657 if (num_recomps == zram->num_active_comps - 1)
1658 zram_set_flag(zram, index, ZRAM_INCOMPRESSIBLE);
1659 return 0;
1660 }
1661
1662 /* Successful recompression but above threshold */
1663 if (threshold && comp_len_new >= threshold)
1664 return 0;
1665
1666 /*
1667 * No direct reclaim (slow path) for handle allocation and no
1668 * re-compression attempt (unlike in zram_write_bvec()) since
1669 * we already have stored that object in zsmalloc. If we cannot
1670 * alloc memory for recompressed object then we bail out and
1671 * simply keep the old (existing) object in zsmalloc.
1672 */
1673 handle_new = zs_malloc(zram->mem_pool, comp_len_new,
1674 __GFP_KSWAPD_RECLAIM |
1675 __GFP_NOWARN |
1676 __GFP_HIGHMEM |
1677 __GFP_MOVABLE);
1678 if (IS_ERR_VALUE(handle_new)) {
1679 zcomp_stream_put(zram->comps[prio]);
1680 return PTR_ERR((void *)handle_new);
1681 }
1682
1683 dst = zs_map_object(zram->mem_pool, handle_new, ZS_MM_WO);
1684 memcpy(dst, zstrm->buffer, comp_len_new);
1685 zcomp_stream_put(zram->comps[prio]);
1686
1687 zs_unmap_object(zram->mem_pool, handle_new);
1688
1689 zram_free_page(zram, index);
1690 zram_set_handle(zram, index, handle_new);
1691 zram_set_obj_size(zram, index, comp_len_new);
1692 zram_set_priority(zram, index, prio);
1693
1694 atomic64_add(comp_len_new, &zram->stats.compr_data_size);
1695 atomic64_inc(&zram->stats.pages_stored);
1696
1697 return 0;
1698 }
1699
1700 #define RECOMPRESS_IDLE (1 << 0)
1701 #define RECOMPRESS_HUGE (1 << 1)
1702
recompress_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1703 static ssize_t recompress_store(struct device *dev,
1704 struct device_attribute *attr,
1705 const char *buf, size_t len)
1706 {
1707 u32 prio = ZRAM_SECONDARY_COMP, prio_max = ZRAM_MAX_COMPS;
1708 struct zram *zram = dev_to_zram(dev);
1709 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
1710 char *args, *param, *val, *algo = NULL;
1711 u64 num_recomp_pages = ULLONG_MAX;
1712 u32 mode = 0, threshold = 0;
1713 unsigned long index;
1714 struct page *page;
1715 ssize_t ret;
1716
1717 args = skip_spaces(buf);
1718 while (*args) {
1719 args = next_arg(args, ¶m, &val);
1720
1721 if (!val || !*val)
1722 return -EINVAL;
1723
1724 if (!strcmp(param, "type")) {
1725 if (!strcmp(val, "idle"))
1726 mode = RECOMPRESS_IDLE;
1727 if (!strcmp(val, "huge"))
1728 mode = RECOMPRESS_HUGE;
1729 if (!strcmp(val, "huge_idle"))
1730 mode = RECOMPRESS_IDLE | RECOMPRESS_HUGE;
1731 continue;
1732 }
1733
1734 if (!strcmp(param, "max_pages")) {
1735 /*
1736 * Limit the number of entries (pages) we attempt to
1737 * recompress.
1738 */
1739 ret = kstrtoull(val, 10, &num_recomp_pages);
1740 if (ret)
1741 return ret;
1742 continue;
1743 }
1744
1745 if (!strcmp(param, "threshold")) {
1746 /*
1747 * We will re-compress only idle objects equal or
1748 * greater in size than watermark.
1749 */
1750 ret = kstrtouint(val, 10, &threshold);
1751 if (ret)
1752 return ret;
1753 continue;
1754 }
1755
1756 if (!strcmp(param, "algo")) {
1757 algo = val;
1758 continue;
1759 }
1760 }
1761
1762 if (threshold >= huge_class_size)
1763 return -EINVAL;
1764
1765 down_read(&zram->init_lock);
1766 if (!init_done(zram)) {
1767 ret = -EINVAL;
1768 goto release_init_lock;
1769 }
1770
1771 if (algo) {
1772 bool found = false;
1773
1774 for (; prio < ZRAM_MAX_COMPS; prio++) {
1775 if (!zram->comp_algs[prio])
1776 continue;
1777
1778 if (!strcmp(zram->comp_algs[prio], algo)) {
1779 prio_max = min(prio + 1, ZRAM_MAX_COMPS);
1780 found = true;
1781 break;
1782 }
1783 }
1784
1785 if (!found) {
1786 ret = -EINVAL;
1787 goto release_init_lock;
1788 }
1789 }
1790
1791 page = alloc_page(GFP_KERNEL);
1792 if (!page) {
1793 ret = -ENOMEM;
1794 goto release_init_lock;
1795 }
1796
1797 ret = len;
1798 for (index = 0; index < nr_pages; index++) {
1799 int err = 0;
1800
1801 if (!num_recomp_pages)
1802 break;
1803
1804 zram_slot_lock(zram, index);
1805
1806 if (!zram_allocated(zram, index))
1807 goto next;
1808
1809 if (mode & RECOMPRESS_IDLE &&
1810 !zram_test_flag(zram, index, ZRAM_IDLE))
1811 goto next;
1812
1813 if (mode & RECOMPRESS_HUGE &&
1814 !zram_test_flag(zram, index, ZRAM_HUGE))
1815 goto next;
1816
1817 if (zram_test_flag(zram, index, ZRAM_WB) ||
1818 zram_test_flag(zram, index, ZRAM_UNDER_WB) ||
1819 zram_test_flag(zram, index, ZRAM_SAME) ||
1820 zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
1821 goto next;
1822
1823 err = zram_recompress(zram, index, page, &num_recomp_pages,
1824 threshold, prio, prio_max);
1825 next:
1826 zram_slot_unlock(zram, index);
1827 if (err) {
1828 ret = err;
1829 break;
1830 }
1831
1832 cond_resched();
1833 }
1834
1835 __free_page(page);
1836
1837 release_init_lock:
1838 up_read(&zram->init_lock);
1839 return ret;
1840 }
1841 #endif
1842
zram_bio_discard(struct zram * zram,struct bio * bio)1843 static void zram_bio_discard(struct zram *zram, struct bio *bio)
1844 {
1845 size_t n = bio->bi_iter.bi_size;
1846 u32 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
1847 u32 offset = (bio->bi_iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
1848 SECTOR_SHIFT;
1849
1850 /*
1851 * zram manages data in physical block size units. Because logical block
1852 * size isn't identical with physical block size on some arch, we
1853 * could get a discard request pointing to a specific offset within a
1854 * certain physical block. Although we can handle this request by
1855 * reading that physiclal block and decompressing and partially zeroing
1856 * and re-compressing and then re-storing it, this isn't reasonable
1857 * because our intent with a discard request is to save memory. So
1858 * skipping this logical block is appropriate here.
1859 */
1860 if (offset) {
1861 if (n <= (PAGE_SIZE - offset))
1862 return;
1863
1864 n -= (PAGE_SIZE - offset);
1865 index++;
1866 }
1867
1868 while (n >= PAGE_SIZE) {
1869 zram_slot_lock(zram, index);
1870 zram_free_page(zram, index);
1871 zram_slot_unlock(zram, index);
1872 atomic64_inc(&zram->stats.notify_free);
1873 index++;
1874 n -= PAGE_SIZE;
1875 }
1876
1877 bio_endio(bio);
1878 }
1879
zram_bio_read(struct zram * zram,struct bio * bio)1880 static void zram_bio_read(struct zram *zram, struct bio *bio)
1881 {
1882 unsigned long start_time = bio_start_io_acct(bio);
1883 struct bvec_iter iter = bio->bi_iter;
1884
1885 do {
1886 u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
1887 u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
1888 SECTOR_SHIFT;
1889 struct bio_vec bv = bio_iter_iovec(bio, iter);
1890
1891 bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
1892
1893 if (zram_bvec_read(zram, &bv, index, offset, bio) < 0) {
1894 atomic64_inc(&zram->stats.failed_reads);
1895 bio->bi_status = BLK_STS_IOERR;
1896 break;
1897 }
1898 flush_dcache_page(bv.bv_page);
1899
1900 zram_slot_lock(zram, index);
1901 zram_accessed(zram, index);
1902 zram_slot_unlock(zram, index);
1903
1904 bio_advance_iter_single(bio, &iter, bv.bv_len);
1905 } while (iter.bi_size);
1906
1907 bio_end_io_acct(bio, start_time);
1908 bio_endio(bio);
1909 }
1910
zram_bio_write(struct zram * zram,struct bio * bio)1911 static void zram_bio_write(struct zram *zram, struct bio *bio)
1912 {
1913 unsigned long start_time = bio_start_io_acct(bio);
1914 struct bvec_iter iter = bio->bi_iter;
1915
1916 do {
1917 u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
1918 u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
1919 SECTOR_SHIFT;
1920 struct bio_vec bv = bio_iter_iovec(bio, iter);
1921
1922 bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
1923
1924 if (zram_bvec_write(zram, &bv, index, offset, bio) < 0) {
1925 atomic64_inc(&zram->stats.failed_writes);
1926 bio->bi_status = BLK_STS_IOERR;
1927 break;
1928 }
1929
1930 zram_slot_lock(zram, index);
1931 zram_accessed(zram, index);
1932 zram_slot_unlock(zram, index);
1933
1934 bio_advance_iter_single(bio, &iter, bv.bv_len);
1935 } while (iter.bi_size);
1936
1937 bio_end_io_acct(bio, start_time);
1938 bio_endio(bio);
1939 }
1940
1941 /*
1942 * Handler function for all zram I/O requests.
1943 */
zram_submit_bio(struct bio * bio)1944 static void zram_submit_bio(struct bio *bio)
1945 {
1946 struct zram *zram = bio->bi_bdev->bd_disk->private_data;
1947
1948 switch (bio_op(bio)) {
1949 case REQ_OP_READ:
1950 zram_bio_read(zram, bio);
1951 break;
1952 case REQ_OP_WRITE:
1953 zram_bio_write(zram, bio);
1954 break;
1955 case REQ_OP_DISCARD:
1956 case REQ_OP_WRITE_ZEROES:
1957 zram_bio_discard(zram, bio);
1958 break;
1959 default:
1960 WARN_ON_ONCE(1);
1961 bio_endio(bio);
1962 }
1963 }
1964
zram_slot_free_notify(struct block_device * bdev,unsigned long index)1965 static void zram_slot_free_notify(struct block_device *bdev,
1966 unsigned long index)
1967 {
1968 struct zram *zram;
1969
1970 zram = bdev->bd_disk->private_data;
1971
1972 atomic64_inc(&zram->stats.notify_free);
1973 if (!zram_slot_trylock(zram, index)) {
1974 atomic64_inc(&zram->stats.miss_free);
1975 return;
1976 }
1977
1978 zram_free_page(zram, index);
1979 zram_slot_unlock(zram, index);
1980 }
1981
zram_destroy_comps(struct zram * zram)1982 static void zram_destroy_comps(struct zram *zram)
1983 {
1984 u32 prio;
1985
1986 for (prio = 0; prio < ZRAM_MAX_COMPS; prio++) {
1987 struct zcomp *comp = zram->comps[prio];
1988
1989 zram->comps[prio] = NULL;
1990 if (!comp)
1991 continue;
1992 zcomp_destroy(comp);
1993 zram->num_active_comps--;
1994 }
1995 }
1996
zram_reset_device(struct zram * zram)1997 static void zram_reset_device(struct zram *zram)
1998 {
1999 down_write(&zram->init_lock);
2000
2001 zram->limit_pages = 0;
2002
2003 if (!init_done(zram)) {
2004 up_write(&zram->init_lock);
2005 return;
2006 }
2007
2008 set_capacity_and_notify(zram->disk, 0);
2009 part_stat_set_all(zram->disk->part0, 0);
2010
2011 /* I/O operation under all of CPU are done so let's free */
2012 zram_meta_free(zram, zram->disksize);
2013 zram->disksize = 0;
2014 zram_destroy_comps(zram);
2015 memset(&zram->stats, 0, sizeof(zram->stats));
2016 reset_bdev(zram);
2017
2018 comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
2019 up_write(&zram->init_lock);
2020 }
2021
disksize_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)2022 static ssize_t disksize_store(struct device *dev,
2023 struct device_attribute *attr, const char *buf, size_t len)
2024 {
2025 u64 disksize;
2026 struct zcomp *comp;
2027 struct zram *zram = dev_to_zram(dev);
2028 int err;
2029 u32 prio;
2030
2031 disksize = memparse(buf, NULL);
2032 if (!disksize)
2033 return -EINVAL;
2034
2035 down_write(&zram->init_lock);
2036 if (init_done(zram)) {
2037 pr_info("Cannot change disksize for initialized device\n");
2038 err = -EBUSY;
2039 goto out_unlock;
2040 }
2041
2042 disksize = PAGE_ALIGN(disksize);
2043 if (!zram_meta_alloc(zram, disksize)) {
2044 err = -ENOMEM;
2045 goto out_unlock;
2046 }
2047
2048 for (prio = 0; prio < ZRAM_MAX_COMPS; prio++) {
2049 if (!zram->comp_algs[prio])
2050 continue;
2051
2052 comp = zcomp_create(zram->comp_algs[prio]);
2053 if (IS_ERR(comp)) {
2054 pr_err("Cannot initialise %s compressing backend\n",
2055 zram->comp_algs[prio]);
2056 err = PTR_ERR(comp);
2057 goto out_free_comps;
2058 }
2059
2060 zram->comps[prio] = comp;
2061 zram->num_active_comps++;
2062 }
2063 zram->disksize = disksize;
2064 set_capacity_and_notify(zram->disk, zram->disksize >> SECTOR_SHIFT);
2065 up_write(&zram->init_lock);
2066
2067 return len;
2068
2069 out_free_comps:
2070 zram_destroy_comps(zram);
2071 zram_meta_free(zram, disksize);
2072 out_unlock:
2073 up_write(&zram->init_lock);
2074 return err;
2075 }
2076
reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)2077 static ssize_t reset_store(struct device *dev,
2078 struct device_attribute *attr, const char *buf, size_t len)
2079 {
2080 int ret;
2081 unsigned short do_reset;
2082 struct zram *zram;
2083 struct gendisk *disk;
2084
2085 ret = kstrtou16(buf, 10, &do_reset);
2086 if (ret)
2087 return ret;
2088
2089 if (!do_reset)
2090 return -EINVAL;
2091
2092 zram = dev_to_zram(dev);
2093 disk = zram->disk;
2094
2095 mutex_lock(&disk->open_mutex);
2096 /* Do not reset an active device or claimed device */
2097 if (disk_openers(disk) || zram->claim) {
2098 mutex_unlock(&disk->open_mutex);
2099 return -EBUSY;
2100 }
2101
2102 /* From now on, anyone can't open /dev/zram[0-9] */
2103 zram->claim = true;
2104 mutex_unlock(&disk->open_mutex);
2105
2106 /* Make sure all the pending I/O are finished */
2107 sync_blockdev(disk->part0);
2108 zram_reset_device(zram);
2109
2110 mutex_lock(&disk->open_mutex);
2111 zram->claim = false;
2112 mutex_unlock(&disk->open_mutex);
2113
2114 return len;
2115 }
2116
zram_open(struct gendisk * disk,blk_mode_t mode)2117 static int zram_open(struct gendisk *disk, blk_mode_t mode)
2118 {
2119 struct zram *zram = disk->private_data;
2120
2121 WARN_ON(!mutex_is_locked(&disk->open_mutex));
2122
2123 /* zram was claimed to reset so open request fails */
2124 if (zram->claim)
2125 return -EBUSY;
2126 return 0;
2127 }
2128
2129 static const struct block_device_operations zram_devops = {
2130 .open = zram_open,
2131 .submit_bio = zram_submit_bio,
2132 .swap_slot_free_notify = zram_slot_free_notify,
2133 .owner = THIS_MODULE
2134 };
2135
2136 static DEVICE_ATTR_WO(compact);
2137 static DEVICE_ATTR_RW(disksize);
2138 static DEVICE_ATTR_RO(initstate);
2139 static DEVICE_ATTR_WO(reset);
2140 static DEVICE_ATTR_WO(mem_limit);
2141 static DEVICE_ATTR_WO(mem_used_max);
2142 static DEVICE_ATTR_WO(idle);
2143 static DEVICE_ATTR_RW(max_comp_streams);
2144 static DEVICE_ATTR_RW(comp_algorithm);
2145 #ifdef CONFIG_ZRAM_WRITEBACK
2146 static DEVICE_ATTR_RW(backing_dev);
2147 static DEVICE_ATTR_WO(writeback);
2148 static DEVICE_ATTR_RW(writeback_limit);
2149 static DEVICE_ATTR_RW(writeback_limit_enable);
2150 #endif
2151 #ifdef CONFIG_ZRAM_MULTI_COMP
2152 static DEVICE_ATTR_RW(recomp_algorithm);
2153 static DEVICE_ATTR_WO(recompress);
2154 #endif
2155
2156 static struct attribute *zram_disk_attrs[] = {
2157 &dev_attr_disksize.attr,
2158 &dev_attr_initstate.attr,
2159 &dev_attr_reset.attr,
2160 &dev_attr_compact.attr,
2161 &dev_attr_mem_limit.attr,
2162 &dev_attr_mem_used_max.attr,
2163 &dev_attr_idle.attr,
2164 &dev_attr_max_comp_streams.attr,
2165 &dev_attr_comp_algorithm.attr,
2166 #ifdef CONFIG_ZRAM_WRITEBACK
2167 &dev_attr_backing_dev.attr,
2168 &dev_attr_writeback.attr,
2169 &dev_attr_writeback_limit.attr,
2170 &dev_attr_writeback_limit_enable.attr,
2171 #endif
2172 &dev_attr_io_stat.attr,
2173 &dev_attr_mm_stat.attr,
2174 #ifdef CONFIG_ZRAM_WRITEBACK
2175 &dev_attr_bd_stat.attr,
2176 #endif
2177 &dev_attr_debug_stat.attr,
2178 #ifdef CONFIG_ZRAM_MULTI_COMP
2179 &dev_attr_recomp_algorithm.attr,
2180 &dev_attr_recompress.attr,
2181 #endif
2182 NULL,
2183 };
2184
2185 ATTRIBUTE_GROUPS(zram_disk);
2186
2187 /*
2188 * Allocate and initialize new zram device. the function returns
2189 * '>= 0' device_id upon success, and negative value otherwise.
2190 */
zram_add(void)2191 static int zram_add(void)
2192 {
2193 struct queue_limits lim = {
2194 .logical_block_size = ZRAM_LOGICAL_BLOCK_SIZE,
2195 /*
2196 * To ensure that we always get PAGE_SIZE aligned and
2197 * n*PAGE_SIZED sized I/O requests.
2198 */
2199 .physical_block_size = PAGE_SIZE,
2200 .io_min = PAGE_SIZE,
2201 .io_opt = PAGE_SIZE,
2202 .max_hw_discard_sectors = UINT_MAX,
2203 /*
2204 * zram_bio_discard() will clear all logical blocks if logical
2205 * block size is identical with physical block size(PAGE_SIZE).
2206 * But if it is different, we will skip discarding some parts of
2207 * logical blocks in the part of the request range which isn't
2208 * aligned to physical block size. So we can't ensure that all
2209 * discarded logical blocks are zeroed.
2210 */
2211 #if ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE
2212 .max_write_zeroes_sectors = UINT_MAX,
2213 #endif
2214 .features = BLK_FEAT_STABLE_WRITES |
2215 BLK_FEAT_SYNCHRONOUS,
2216 };
2217 struct zram *zram;
2218 int ret, device_id;
2219
2220 zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
2221 if (!zram)
2222 return -ENOMEM;
2223
2224 ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL);
2225 if (ret < 0)
2226 goto out_free_dev;
2227 device_id = ret;
2228
2229 init_rwsem(&zram->init_lock);
2230 #ifdef CONFIG_ZRAM_WRITEBACK
2231 spin_lock_init(&zram->wb_limit_lock);
2232 #endif
2233
2234 /* gendisk structure */
2235 zram->disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
2236 if (IS_ERR(zram->disk)) {
2237 pr_err("Error allocating disk structure for device %d\n",
2238 device_id);
2239 ret = PTR_ERR(zram->disk);
2240 goto out_free_idr;
2241 }
2242
2243 zram->disk->major = zram_major;
2244 zram->disk->first_minor = device_id;
2245 zram->disk->minors = 1;
2246 zram->disk->flags |= GENHD_FL_NO_PART;
2247 zram->disk->fops = &zram_devops;
2248 zram->disk->private_data = zram;
2249 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
2250
2251 /* Actual capacity set using sysfs (/sys/block/zram<id>/disksize */
2252 set_capacity(zram->disk, 0);
2253 ret = device_add_disk(NULL, zram->disk, zram_disk_groups);
2254 if (ret)
2255 goto out_cleanup_disk;
2256
2257 comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
2258
2259 zram_debugfs_register(zram);
2260 pr_info("Added device: %s\n", zram->disk->disk_name);
2261 return device_id;
2262
2263 out_cleanup_disk:
2264 put_disk(zram->disk);
2265 out_free_idr:
2266 idr_remove(&zram_index_idr, device_id);
2267 out_free_dev:
2268 kfree(zram);
2269 return ret;
2270 }
2271
zram_remove(struct zram * zram)2272 static int zram_remove(struct zram *zram)
2273 {
2274 bool claimed;
2275
2276 mutex_lock(&zram->disk->open_mutex);
2277 if (disk_openers(zram->disk)) {
2278 mutex_unlock(&zram->disk->open_mutex);
2279 return -EBUSY;
2280 }
2281
2282 claimed = zram->claim;
2283 if (!claimed)
2284 zram->claim = true;
2285 mutex_unlock(&zram->disk->open_mutex);
2286
2287 zram_debugfs_unregister(zram);
2288
2289 if (claimed) {
2290 /*
2291 * If we were claimed by reset_store(), del_gendisk() will
2292 * wait until reset_store() is done, so nothing need to do.
2293 */
2294 ;
2295 } else {
2296 /* Make sure all the pending I/O are finished */
2297 sync_blockdev(zram->disk->part0);
2298 zram_reset_device(zram);
2299 }
2300
2301 pr_info("Removed device: %s\n", zram->disk->disk_name);
2302
2303 del_gendisk(zram->disk);
2304
2305 /* del_gendisk drains pending reset_store */
2306 WARN_ON_ONCE(claimed && zram->claim);
2307
2308 /*
2309 * disksize_store() may be called in between zram_reset_device()
2310 * and del_gendisk(), so run the last reset to avoid leaking
2311 * anything allocated with disksize_store()
2312 */
2313 zram_reset_device(zram);
2314
2315 put_disk(zram->disk);
2316 kfree(zram);
2317 return 0;
2318 }
2319
2320 /* zram-control sysfs attributes */
2321
2322 /*
2323 * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
2324 * sense that reading from this file does alter the state of your system -- it
2325 * creates a new un-initialized zram device and returns back this device's
2326 * device_id (or an error code if it fails to create a new device).
2327 */
hot_add_show(const struct class * class,const struct class_attribute * attr,char * buf)2328 static ssize_t hot_add_show(const struct class *class,
2329 const struct class_attribute *attr,
2330 char *buf)
2331 {
2332 int ret;
2333
2334 mutex_lock(&zram_index_mutex);
2335 ret = zram_add();
2336 mutex_unlock(&zram_index_mutex);
2337
2338 if (ret < 0)
2339 return ret;
2340 return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
2341 }
2342 /* This attribute must be set to 0400, so CLASS_ATTR_RO() can not be used */
2343 static struct class_attribute class_attr_hot_add =
2344 __ATTR(hot_add, 0400, hot_add_show, NULL);
2345
hot_remove_store(const struct class * class,const struct class_attribute * attr,const char * buf,size_t count)2346 static ssize_t hot_remove_store(const struct class *class,
2347 const struct class_attribute *attr,
2348 const char *buf,
2349 size_t count)
2350 {
2351 struct zram *zram;
2352 int ret, dev_id;
2353
2354 /* dev_id is gendisk->first_minor, which is `int' */
2355 ret = kstrtoint(buf, 10, &dev_id);
2356 if (ret)
2357 return ret;
2358 if (dev_id < 0)
2359 return -EINVAL;
2360
2361 mutex_lock(&zram_index_mutex);
2362
2363 zram = idr_find(&zram_index_idr, dev_id);
2364 if (zram) {
2365 ret = zram_remove(zram);
2366 if (!ret)
2367 idr_remove(&zram_index_idr, dev_id);
2368 } else {
2369 ret = -ENODEV;
2370 }
2371
2372 mutex_unlock(&zram_index_mutex);
2373 return ret ? ret : count;
2374 }
2375 static CLASS_ATTR_WO(hot_remove);
2376
2377 static struct attribute *zram_control_class_attrs[] = {
2378 &class_attr_hot_add.attr,
2379 &class_attr_hot_remove.attr,
2380 NULL,
2381 };
2382 ATTRIBUTE_GROUPS(zram_control_class);
2383
2384 static struct class zram_control_class = {
2385 .name = "zram-control",
2386 .class_groups = zram_control_class_groups,
2387 };
2388
zram_remove_cb(int id,void * ptr,void * data)2389 static int zram_remove_cb(int id, void *ptr, void *data)
2390 {
2391 WARN_ON_ONCE(zram_remove(ptr));
2392 return 0;
2393 }
2394
destroy_devices(void)2395 static void destroy_devices(void)
2396 {
2397 class_unregister(&zram_control_class);
2398 idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
2399 zram_debugfs_destroy();
2400 idr_destroy(&zram_index_idr);
2401 unregister_blkdev(zram_major, "zram");
2402 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
2403 }
2404
zram_init(void)2405 static int __init zram_init(void)
2406 {
2407 struct zram_table_entry zram_te;
2408 int ret;
2409
2410 BUILD_BUG_ON(__NR_ZRAM_PAGEFLAGS > sizeof(zram_te.flags) * 8);
2411
2412 ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
2413 zcomp_cpu_up_prepare, zcomp_cpu_dead);
2414 if (ret < 0)
2415 return ret;
2416
2417 ret = class_register(&zram_control_class);
2418 if (ret) {
2419 pr_err("Unable to register zram-control class\n");
2420 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
2421 return ret;
2422 }
2423
2424 zram_debugfs_create();
2425 zram_major = register_blkdev(0, "zram");
2426 if (zram_major <= 0) {
2427 pr_err("Unable to get major number\n");
2428 class_unregister(&zram_control_class);
2429 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
2430 return -EBUSY;
2431 }
2432
2433 while (num_devices != 0) {
2434 mutex_lock(&zram_index_mutex);
2435 ret = zram_add();
2436 mutex_unlock(&zram_index_mutex);
2437 if (ret < 0)
2438 goto out_error;
2439 num_devices--;
2440 }
2441
2442 return 0;
2443
2444 out_error:
2445 destroy_devices();
2446 return ret;
2447 }
2448
zram_exit(void)2449 static void __exit zram_exit(void)
2450 {
2451 destroy_devices();
2452 }
2453
2454 module_init(zram_init);
2455 module_exit(zram_exit);
2456
2457 module_param(num_devices, uint, 0);
2458 MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");
2459
2460 MODULE_LICENSE("Dual BSD/GPL");
2461 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
2462 MODULE_DESCRIPTION("Compressed RAM Block Device");
2463