Lines Matching refs:zram
57 static void zram_free_page(struct zram *zram, size_t index);
58 static int zram_read_from_zspool(struct zram *zram, struct page *page,
61 static int zram_slot_trylock(struct zram *zram, u32 index) in zram_slot_trylock() argument
63 return spin_trylock(&zram->table[index].lock); in zram_slot_trylock()
66 static void zram_slot_lock(struct zram *zram, u32 index) in zram_slot_lock() argument
68 spin_lock(&zram->table[index].lock); in zram_slot_lock()
71 static void zram_slot_unlock(struct zram *zram, u32 index) in zram_slot_unlock() argument
73 spin_unlock(&zram->table[index].lock); in zram_slot_unlock()
76 static inline bool init_done(struct zram *zram) in init_done() argument
78 return zram->disksize; in init_done()
81 static inline struct zram *dev_to_zram(struct device *dev) in dev_to_zram()
83 return (struct zram *)dev_to_disk(dev)->private_data; in dev_to_zram()
86 static unsigned long zram_get_handle(struct zram *zram, u32 index) in zram_get_handle() argument
88 return zram->table[index].handle; in zram_get_handle()
91 static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle) in zram_set_handle() argument
93 zram->table[index].handle = handle; in zram_set_handle()
97 static bool zram_test_flag(struct zram *zram, u32 index, in zram_test_flag() argument
100 return zram->table[index].flags & BIT(flag); in zram_test_flag()
103 static void zram_set_flag(struct zram *zram, u32 index, in zram_set_flag() argument
106 zram->table[index].flags |= BIT(flag); in zram_set_flag()
109 static void zram_clear_flag(struct zram *zram, u32 index, in zram_clear_flag() argument
112 zram->table[index].flags &= ~BIT(flag); in zram_clear_flag()
115 static size_t zram_get_obj_size(struct zram *zram, u32 index) in zram_get_obj_size() argument
117 return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1); in zram_get_obj_size()
120 static void zram_set_obj_size(struct zram *zram, in zram_set_obj_size() argument
123 unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT; in zram_set_obj_size()
125 zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size; in zram_set_obj_size()
128 static inline bool zram_allocated(struct zram *zram, u32 index) in zram_allocated() argument
130 return zram_get_obj_size(zram, index) || in zram_allocated()
131 zram_test_flag(zram, index, ZRAM_SAME) || in zram_allocated()
132 zram_test_flag(zram, index, ZRAM_WB); in zram_allocated()
135 static inline void update_used_max(struct zram *zram, const unsigned long pages) in update_used_max() argument
137 unsigned long cur_max = atomic_long_read(&zram->stats.max_used_pages); in update_used_max()
142 } while (!atomic_long_try_cmpxchg(&zram->stats.max_used_pages, in update_used_max()
146 static bool zram_can_store_page(struct zram *zram) in zram_can_store_page() argument
150 alloced_pages = zs_get_total_pages(zram->mem_pool); in zram_can_store_page()
151 update_used_max(zram, alloced_pages); in zram_can_store_page()
153 return !zram->limit_pages || alloced_pages <= zram->limit_pages; in zram_can_store_page()
169 static inline void zram_set_priority(struct zram *zram, u32 index, u32 prio) in zram_set_priority() argument
176 zram->table[index].flags &= ~(ZRAM_COMP_PRIORITY_MASK << in zram_set_priority()
178 zram->table[index].flags |= (prio << ZRAM_COMP_PRIORITY_BIT1); in zram_set_priority()
181 static inline u32 zram_get_priority(struct zram *zram, u32 index) in zram_get_priority() argument
183 u32 prio = zram->table[index].flags >> ZRAM_COMP_PRIORITY_BIT1; in zram_get_priority()
188 static void zram_accessed(struct zram *zram, u32 index) in zram_accessed() argument
190 zram_clear_flag(zram, index, ZRAM_IDLE); in zram_accessed()
191 zram_clear_flag(zram, index, ZRAM_PP_SLOT); in zram_accessed()
193 zram->table[index].ac_time = ktime_get_boottime(); in zram_accessed()
228 static void release_pp_slot(struct zram *zram, struct zram_pp_slot *pps) in release_pp_slot() argument
232 zram_slot_lock(zram, pps->index); in release_pp_slot()
233 zram_clear_flag(zram, pps->index, ZRAM_PP_SLOT); in release_pp_slot()
234 zram_slot_unlock(zram, pps->index); in release_pp_slot()
239 static void release_pp_ctl(struct zram *zram, struct zram_pp_ctl *ctl) in release_pp_ctl() argument
253 release_pp_slot(zram, pps); in release_pp_ctl()
260 static void place_pp_slot(struct zram *zram, struct zram_pp_ctl *ctl, in place_pp_slot() argument
265 idx = zram_get_obj_size(zram, pps->index) / PP_BUCKET_SIZE_RANGE; in place_pp_slot()
268 zram_set_flag(zram, pps->index, ZRAM_PP_SLOT); in place_pp_slot()
323 struct zram *zram = dev_to_zram(dev); in initstate_show() local
325 down_read(&zram->init_lock); in initstate_show()
326 val = init_done(zram); in initstate_show()
327 up_read(&zram->init_lock); in initstate_show()
335 struct zram *zram = dev_to_zram(dev); in disksize_show() local
337 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize); in disksize_show()
345 struct zram *zram = dev_to_zram(dev); in mem_limit_store() local
351 down_write(&zram->init_lock); in mem_limit_store()
352 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT; in mem_limit_store()
353 up_write(&zram->init_lock); in mem_limit_store()
363 struct zram *zram = dev_to_zram(dev); in mem_used_max_store() local
369 down_read(&zram->init_lock); in mem_used_max_store()
370 if (init_done(zram)) { in mem_used_max_store()
371 atomic_long_set(&zram->stats.max_used_pages, in mem_used_max_store()
372 zs_get_total_pages(zram->mem_pool)); in mem_used_max_store()
374 up_read(&zram->init_lock); in mem_used_max_store()
383 static void mark_idle(struct zram *zram, ktime_t cutoff) in mark_idle() argument
386 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; in mark_idle()
397 zram_slot_lock(zram, index); in mark_idle()
398 if (!zram_allocated(zram, index) || in mark_idle()
399 zram_test_flag(zram, index, ZRAM_WB) || in mark_idle()
400 zram_test_flag(zram, index, ZRAM_SAME)) { in mark_idle()
401 zram_slot_unlock(zram, index); in mark_idle()
407 ktime_after(cutoff, zram->table[index].ac_time); in mark_idle()
410 zram_set_flag(zram, index, ZRAM_IDLE); in mark_idle()
412 zram_clear_flag(zram, index, ZRAM_IDLE); in mark_idle()
413 zram_slot_unlock(zram, index); in mark_idle()
420 struct zram *zram = dev_to_zram(dev); in idle_store() local
438 down_read(&zram->init_lock); in idle_store()
439 if (!init_done(zram)) in idle_store()
446 mark_idle(zram, cutoff_time); in idle_store()
450 up_read(&zram->init_lock); in idle_store()
459 struct zram *zram = dev_to_zram(dev); in writeback_limit_enable_store() local
466 down_read(&zram->init_lock); in writeback_limit_enable_store()
467 spin_lock(&zram->wb_limit_lock); in writeback_limit_enable_store()
468 zram->wb_limit_enable = val; in writeback_limit_enable_store()
469 spin_unlock(&zram->wb_limit_lock); in writeback_limit_enable_store()
470 up_read(&zram->init_lock); in writeback_limit_enable_store()
480 struct zram *zram = dev_to_zram(dev); in writeback_limit_enable_show() local
482 down_read(&zram->init_lock); in writeback_limit_enable_show()
483 spin_lock(&zram->wb_limit_lock); in writeback_limit_enable_show()
484 val = zram->wb_limit_enable; in writeback_limit_enable_show()
485 spin_unlock(&zram->wb_limit_lock); in writeback_limit_enable_show()
486 up_read(&zram->init_lock); in writeback_limit_enable_show()
494 struct zram *zram = dev_to_zram(dev); in writeback_limit_store() local
501 down_read(&zram->init_lock); in writeback_limit_store()
502 spin_lock(&zram->wb_limit_lock); in writeback_limit_store()
503 zram->bd_wb_limit = val; in writeback_limit_store()
504 spin_unlock(&zram->wb_limit_lock); in writeback_limit_store()
505 up_read(&zram->init_lock); in writeback_limit_store()
515 struct zram *zram = dev_to_zram(dev); in writeback_limit_show() local
517 down_read(&zram->init_lock); in writeback_limit_show()
518 spin_lock(&zram->wb_limit_lock); in writeback_limit_show()
519 val = zram->bd_wb_limit; in writeback_limit_show()
520 spin_unlock(&zram->wb_limit_lock); in writeback_limit_show()
521 up_read(&zram->init_lock); in writeback_limit_show()
526 static void reset_bdev(struct zram *zram) in reset_bdev() argument
528 if (!zram->backing_dev) in reset_bdev()
532 filp_close(zram->backing_dev, NULL); in reset_bdev()
533 zram->backing_dev = NULL; in reset_bdev()
534 zram->bdev = NULL; in reset_bdev()
535 zram->disk->fops = &zram_devops; in reset_bdev()
536 kvfree(zram->bitmap); in reset_bdev()
537 zram->bitmap = NULL; in reset_bdev()
544 struct zram *zram = dev_to_zram(dev); in backing_dev_show() local
548 down_read(&zram->init_lock); in backing_dev_show()
549 file = zram->backing_dev; in backing_dev_show()
552 up_read(&zram->init_lock); in backing_dev_show()
566 up_read(&zram->init_lock); in backing_dev_show()
580 struct zram *zram = dev_to_zram(dev); in backing_dev_store() local
586 down_write(&zram->init_lock); in backing_dev_store()
587 if (init_done(zram)) { in backing_dev_store()
628 reset_bdev(zram); in backing_dev_store()
630 zram->bdev = I_BDEV(inode); in backing_dev_store()
631 zram->backing_dev = backing_dev; in backing_dev_store()
632 zram->bitmap = bitmap; in backing_dev_store()
633 zram->nr_pages = nr_pages; in backing_dev_store()
634 up_write(&zram->init_lock); in backing_dev_store()
646 up_write(&zram->init_lock); in backing_dev_store()
653 static unsigned long alloc_block_bdev(struct zram *zram) in alloc_block_bdev() argument
658 blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, blk_idx); in alloc_block_bdev()
659 if (blk_idx == zram->nr_pages) in alloc_block_bdev()
662 if (test_and_set_bit(blk_idx, zram->bitmap)) in alloc_block_bdev()
665 atomic64_inc(&zram->stats.bd_count); in alloc_block_bdev()
669 static void free_block_bdev(struct zram *zram, unsigned long blk_idx) in free_block_bdev() argument
673 was_set = test_and_clear_bit(blk_idx, zram->bitmap); in free_block_bdev()
675 atomic64_dec(&zram->stats.bd_count); in free_block_bdev()
678 static void read_from_bdev_async(struct zram *zram, struct page *page, in read_from_bdev_async() argument
683 bio = bio_alloc(zram->bdev, 1, parent->bi_opf, GFP_NOIO); in read_from_bdev_async()
697 static int scan_slots_for_writeback(struct zram *zram, u32 mode, in scan_slots_for_writeback() argument
712 zram_slot_lock(zram, index); in scan_slots_for_writeback()
713 if (!zram_allocated(zram, index)) in scan_slots_for_writeback()
716 if (zram_test_flag(zram, index, ZRAM_WB) || in scan_slots_for_writeback()
717 zram_test_flag(zram, index, ZRAM_SAME)) in scan_slots_for_writeback()
721 !zram_test_flag(zram, index, ZRAM_IDLE)) in scan_slots_for_writeback()
724 !zram_test_flag(zram, index, ZRAM_HUGE)) in scan_slots_for_writeback()
727 !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE)) in scan_slots_for_writeback()
731 place_pp_slot(zram, ctl, pps); in scan_slots_for_writeback()
734 zram_slot_unlock(zram, index); in scan_slots_for_writeback()
744 struct zram *zram = dev_to_zram(dev); in writeback_store() local
745 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; in writeback_store()
776 down_read(&zram->init_lock); in writeback_store()
777 if (!init_done(zram)) { in writeback_store()
783 if (atomic_xchg(&zram->pp_in_progress, 1)) { in writeback_store()
784 up_read(&zram->init_lock); in writeback_store()
788 if (!zram->backing_dev) { in writeback_store()
805 scan_slots_for_writeback(zram, mode, nr_pages, index, ctl); in writeback_store()
808 spin_lock(&zram->wb_limit_lock); in writeback_store()
809 if (zram->wb_limit_enable && !zram->bd_wb_limit) { in writeback_store()
810 spin_unlock(&zram->wb_limit_lock); in writeback_store()
814 spin_unlock(&zram->wb_limit_lock); in writeback_store()
817 blk_idx = alloc_block_bdev(zram); in writeback_store()
825 zram_slot_lock(zram, index); in writeback_store()
832 if (!zram_test_flag(zram, index, ZRAM_PP_SLOT)) in writeback_store()
834 if (zram_read_from_zspool(zram, page, index)) in writeback_store()
836 zram_slot_unlock(zram, index); in writeback_store()
838 bio_init(&bio, zram->bdev, &bio_vec, 1, in writeback_store()
849 release_pp_slot(zram, pps); in writeback_store()
862 atomic64_inc(&zram->stats.bd_writes); in writeback_store()
863 zram_slot_lock(zram, index); in writeback_store()
872 if (!zram_test_flag(zram, index, ZRAM_PP_SLOT)) in writeback_store()
875 zram_free_page(zram, index); in writeback_store()
876 zram_set_flag(zram, index, ZRAM_WB); in writeback_store()
877 zram_set_handle(zram, index, blk_idx); in writeback_store()
879 atomic64_inc(&zram->stats.pages_stored); in writeback_store()
880 spin_lock(&zram->wb_limit_lock); in writeback_store()
881 if (zram->wb_limit_enable && zram->bd_wb_limit > 0) in writeback_store()
882 zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12); in writeback_store()
883 spin_unlock(&zram->wb_limit_lock); in writeback_store()
885 zram_slot_unlock(zram, index); in writeback_store()
886 release_pp_slot(zram, pps); in writeback_store()
892 free_block_bdev(zram, blk_idx); in writeback_store()
895 release_pp_ctl(zram, ctl); in writeback_store()
896 atomic_set(&zram->pp_in_progress, 0); in writeback_store()
897 up_read(&zram->init_lock); in writeback_store()
904 struct zram *zram; member
916 bio_init(&bio, zw->zram->bdev, &bv, 1, REQ_OP_READ); in zram_sync_read()
927 static int read_from_bdev_sync(struct zram *zram, struct page *page, in read_from_bdev_sync() argument
933 work.zram = zram; in read_from_bdev_sync()
944 static int read_from_bdev(struct zram *zram, struct page *page, in read_from_bdev() argument
947 atomic64_inc(&zram->stats.bd_reads); in read_from_bdev()
951 return read_from_bdev_sync(zram, page, entry); in read_from_bdev()
953 read_from_bdev_async(zram, page, entry, parent); in read_from_bdev()
957 static inline void reset_bdev(struct zram *zram) {}; in reset_bdev() argument
958 static int read_from_bdev(struct zram *zram, struct page *page, in read_from_bdev() argument
964 static void free_block_bdev(struct zram *zram, unsigned long blk_idx) {}; in free_block_bdev() argument
986 struct zram *zram = file->private_data; in read_block_state() local
987 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; in read_block_state()
994 down_read(&zram->init_lock); in read_block_state()
995 if (!init_done(zram)) { in read_block_state()
996 up_read(&zram->init_lock); in read_block_state()
1004 zram_slot_lock(zram, index); in read_block_state()
1005 if (!zram_allocated(zram, index)) in read_block_state()
1008 ts = ktime_to_timespec64(zram->table[index].ac_time); in read_block_state()
1013 zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.', in read_block_state()
1014 zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.', in read_block_state()
1015 zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.', in read_block_state()
1016 zram_test_flag(zram, index, ZRAM_IDLE) ? 'i' : '.', in read_block_state()
1017 zram_get_priority(zram, index) ? 'r' : '.', in read_block_state()
1018 zram_test_flag(zram, index, in read_block_state()
1022 zram_slot_unlock(zram, index); in read_block_state()
1028 zram_slot_unlock(zram, index); in read_block_state()
1032 up_read(&zram->init_lock); in read_block_state()
1046 static void zram_debugfs_register(struct zram *zram) in zram_debugfs_register() argument
1051 zram->debugfs_dir = debugfs_create_dir(zram->disk->disk_name, in zram_debugfs_register()
1053 debugfs_create_file("block_state", 0400, zram->debugfs_dir, in zram_debugfs_register()
1054 zram, &proc_zram_block_state_op); in zram_debugfs_register()
1057 static void zram_debugfs_unregister(struct zram *zram) in zram_debugfs_unregister() argument
1059 debugfs_remove_recursive(zram->debugfs_dir); in zram_debugfs_unregister()
1064 static void zram_debugfs_register(struct zram *zram) {}; in zram_debugfs_register() argument
1065 static void zram_debugfs_unregister(struct zram *zram) {}; in zram_debugfs_unregister() argument
1089 static void comp_algorithm_set(struct zram *zram, u32 prio, const char *alg) in comp_algorithm_set() argument
1092 if (zram->comp_algs[prio] != default_compressor) in comp_algorithm_set()
1093 kfree(zram->comp_algs[prio]); in comp_algorithm_set()
1095 zram->comp_algs[prio] = alg; in comp_algorithm_set()
1098 static ssize_t __comp_algorithm_show(struct zram *zram, u32 prio, char *buf) in __comp_algorithm_show() argument
1102 down_read(&zram->init_lock); in __comp_algorithm_show()
1103 sz = zcomp_available_show(zram->comp_algs[prio], buf); in __comp_algorithm_show()
1104 up_read(&zram->init_lock); in __comp_algorithm_show()
1109 static int __comp_algorithm_store(struct zram *zram, u32 prio, const char *buf) in __comp_algorithm_store() argument
1131 down_write(&zram->init_lock); in __comp_algorithm_store()
1132 if (init_done(zram)) { in __comp_algorithm_store()
1133 up_write(&zram->init_lock); in __comp_algorithm_store()
1139 comp_algorithm_set(zram, prio, compressor); in __comp_algorithm_store()
1140 up_write(&zram->init_lock); in __comp_algorithm_store()
1144 static void comp_params_reset(struct zram *zram, u32 prio) in comp_params_reset() argument
1146 struct zcomp_params *params = &zram->params[prio]; in comp_params_reset()
1154 static int comp_params_store(struct zram *zram, u32 prio, s32 level, in comp_params_store() argument
1159 comp_params_reset(zram, prio); in comp_params_store()
1163 &zram->params[prio].dict, in comp_params_store()
1171 zram->params[prio].dict_sz = sz; in comp_params_store()
1172 zram->params[prio].level = level; in comp_params_store()
1183 struct zram *zram = dev_to_zram(dev); in algorithm_params_store() local
1224 if (!zram->comp_algs[p]) in algorithm_params_store()
1227 if (!strcmp(zram->comp_algs[p], algo)) { in algorithm_params_store()
1237 ret = comp_params_store(zram, prio, level, dict_path); in algorithm_params_store()
1245 struct zram *zram = dev_to_zram(dev); in comp_algorithm_show() local
1247 return __comp_algorithm_show(zram, ZRAM_PRIMARY_COMP, buf); in comp_algorithm_show()
1255 struct zram *zram = dev_to_zram(dev); in comp_algorithm_store() local
1258 ret = __comp_algorithm_store(zram, ZRAM_PRIMARY_COMP, buf); in comp_algorithm_store()
1267 struct zram *zram = dev_to_zram(dev); in recomp_algorithm_show() local
1272 if (!zram->comp_algs[prio]) in recomp_algorithm_show()
1276 sz += __comp_algorithm_show(zram, prio, buf + sz); in recomp_algorithm_show()
1287 struct zram *zram = dev_to_zram(dev); in recomp_algorithm_store() local
1319 ret = __comp_algorithm_store(zram, prio, alg); in recomp_algorithm_store()
1327 struct zram *zram = dev_to_zram(dev); in compact_store() local
1329 down_read(&zram->init_lock); in compact_store()
1330 if (!init_done(zram)) { in compact_store()
1331 up_read(&zram->init_lock); in compact_store()
1335 zs_compact(zram->mem_pool); in compact_store()
1336 up_read(&zram->init_lock); in compact_store()
1344 struct zram *zram = dev_to_zram(dev); in io_stat_show() local
1347 down_read(&zram->init_lock); in io_stat_show()
1350 (u64)atomic64_read(&zram->stats.failed_reads), in io_stat_show()
1351 (u64)atomic64_read(&zram->stats.failed_writes), in io_stat_show()
1352 (u64)atomic64_read(&zram->stats.notify_free)); in io_stat_show()
1353 up_read(&zram->init_lock); in io_stat_show()
1361 struct zram *zram = dev_to_zram(dev); in mm_stat_show() local
1369 down_read(&zram->init_lock); in mm_stat_show()
1370 if (init_done(zram)) { in mm_stat_show()
1371 mem_used = zs_get_total_pages(zram->mem_pool); in mm_stat_show()
1372 zs_pool_stats(zram->mem_pool, &pool_stats); in mm_stat_show()
1375 orig_size = atomic64_read(&zram->stats.pages_stored); in mm_stat_show()
1376 max_used = atomic_long_read(&zram->stats.max_used_pages); in mm_stat_show()
1381 (u64)atomic64_read(&zram->stats.compr_data_size), in mm_stat_show()
1383 zram->limit_pages << PAGE_SHIFT, in mm_stat_show()
1385 (u64)atomic64_read(&zram->stats.same_pages), in mm_stat_show()
1387 (u64)atomic64_read(&zram->stats.huge_pages), in mm_stat_show()
1388 (u64)atomic64_read(&zram->stats.huge_pages_since)); in mm_stat_show()
1389 up_read(&zram->init_lock); in mm_stat_show()
1399 struct zram *zram = dev_to_zram(dev); in bd_stat_show() local
1402 down_read(&zram->init_lock); in bd_stat_show()
1405 FOUR_K((u64)atomic64_read(&zram->stats.bd_count)), in bd_stat_show()
1406 FOUR_K((u64)atomic64_read(&zram->stats.bd_reads)), in bd_stat_show()
1407 FOUR_K((u64)atomic64_read(&zram->stats.bd_writes))); in bd_stat_show()
1408 up_read(&zram->init_lock); in bd_stat_show()
1418 struct zram *zram = dev_to_zram(dev); in debug_stat_show() local
1421 down_read(&zram->init_lock); in debug_stat_show()
1425 (u64)atomic64_read(&zram->stats.writestall), in debug_stat_show()
1426 (u64)atomic64_read(&zram->stats.miss_free)); in debug_stat_show()
1427 up_read(&zram->init_lock); in debug_stat_show()
1439 static void zram_meta_free(struct zram *zram, u64 disksize) in zram_meta_free() argument
1444 if (!zram->table) in zram_meta_free()
1449 zram_free_page(zram, index); in zram_meta_free()
1451 zs_destroy_pool(zram->mem_pool); in zram_meta_free()
1452 vfree(zram->table); in zram_meta_free()
1453 zram->table = NULL; in zram_meta_free()
1456 static bool zram_meta_alloc(struct zram *zram, u64 disksize) in zram_meta_alloc() argument
1461 zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table))); in zram_meta_alloc()
1462 if (!zram->table) in zram_meta_alloc()
1465 zram->mem_pool = zs_create_pool(zram->disk->disk_name); in zram_meta_alloc()
1466 if (!zram->mem_pool) { in zram_meta_alloc()
1467 vfree(zram->table); in zram_meta_alloc()
1468 zram->table = NULL; in zram_meta_alloc()
1473 huge_class_size = zs_huge_class_size(zram->mem_pool); in zram_meta_alloc()
1476 spin_lock_init(&zram->table[index].lock); in zram_meta_alloc()
1485 static void zram_free_page(struct zram *zram, size_t index) in zram_free_page() argument
1490 zram->table[index].ac_time = 0; in zram_free_page()
1493 zram_clear_flag(zram, index, ZRAM_IDLE); in zram_free_page()
1494 zram_clear_flag(zram, index, ZRAM_INCOMPRESSIBLE); in zram_free_page()
1495 zram_clear_flag(zram, index, ZRAM_PP_SLOT); in zram_free_page()
1496 zram_set_priority(zram, index, 0); in zram_free_page()
1498 if (zram_test_flag(zram, index, ZRAM_HUGE)) { in zram_free_page()
1499 zram_clear_flag(zram, index, ZRAM_HUGE); in zram_free_page()
1500 atomic64_dec(&zram->stats.huge_pages); in zram_free_page()
1503 if (zram_test_flag(zram, index, ZRAM_WB)) { in zram_free_page()
1504 zram_clear_flag(zram, index, ZRAM_WB); in zram_free_page()
1505 free_block_bdev(zram, zram_get_handle(zram, index)); in zram_free_page()
1513 if (zram_test_flag(zram, index, ZRAM_SAME)) { in zram_free_page()
1514 zram_clear_flag(zram, index, ZRAM_SAME); in zram_free_page()
1515 atomic64_dec(&zram->stats.same_pages); in zram_free_page()
1519 handle = zram_get_handle(zram, index); in zram_free_page()
1523 zs_free(zram->mem_pool, handle); in zram_free_page()
1525 atomic64_sub(zram_get_obj_size(zram, index), in zram_free_page()
1526 &zram->stats.compr_data_size); in zram_free_page()
1528 atomic64_dec(&zram->stats.pages_stored); in zram_free_page()
1529 zram_set_handle(zram, index, 0); in zram_free_page()
1530 zram_set_obj_size(zram, index, 0); in zram_free_page()
1533 static int read_same_filled_page(struct zram *zram, struct page *page, in read_same_filled_page() argument
1539 zram_fill_page(mem, PAGE_SIZE, zram_get_handle(zram, index)); in read_same_filled_page()
1544 static int read_incompressible_page(struct zram *zram, struct page *page, in read_incompressible_page() argument
1550 handle = zram_get_handle(zram, index); in read_incompressible_page()
1551 src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); in read_incompressible_page()
1555 zs_unmap_object(zram->mem_pool, handle); in read_incompressible_page()
1560 static int read_compressed_page(struct zram *zram, struct page *page, u32 index) in read_compressed_page() argument
1568 handle = zram_get_handle(zram, index); in read_compressed_page()
1569 size = zram_get_obj_size(zram, index); in read_compressed_page()
1570 prio = zram_get_priority(zram, index); in read_compressed_page()
1572 zstrm = zcomp_stream_get(zram->comps[prio]); in read_compressed_page()
1573 src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); in read_compressed_page()
1575 ret = zcomp_decompress(zram->comps[prio], zstrm, src, size, dst); in read_compressed_page()
1577 zs_unmap_object(zram->mem_pool, handle); in read_compressed_page()
1578 zcomp_stream_put(zram->comps[prio]); in read_compressed_page()
1587 static int zram_read_from_zspool(struct zram *zram, struct page *page, in zram_read_from_zspool() argument
1590 if (zram_test_flag(zram, index, ZRAM_SAME) || in zram_read_from_zspool()
1591 !zram_get_handle(zram, index)) in zram_read_from_zspool()
1592 return read_same_filled_page(zram, page, index); in zram_read_from_zspool()
1594 if (!zram_test_flag(zram, index, ZRAM_HUGE)) in zram_read_from_zspool()
1595 return read_compressed_page(zram, page, index); in zram_read_from_zspool()
1597 return read_incompressible_page(zram, page, index); in zram_read_from_zspool()
1600 static int zram_read_page(struct zram *zram, struct page *page, u32 index, in zram_read_page() argument
1605 zram_slot_lock(zram, index); in zram_read_page()
1606 if (!zram_test_flag(zram, index, ZRAM_WB)) { in zram_read_page()
1608 ret = zram_read_from_zspool(zram, page, index); in zram_read_page()
1609 zram_slot_unlock(zram, index); in zram_read_page()
1615 zram_slot_unlock(zram, index); in zram_read_page()
1617 ret = read_from_bdev(zram, page, zram_get_handle(zram, index), in zram_read_page()
1632 static int zram_bvec_read_partial(struct zram *zram, struct bio_vec *bvec, in zram_bvec_read_partial() argument
1640 ret = zram_read_page(zram, page, index, NULL); in zram_bvec_read_partial()
1647 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, in zram_bvec_read() argument
1651 return zram_bvec_read_partial(zram, bvec, index, offset); in zram_bvec_read()
1652 return zram_read_page(zram, bvec->bv_page, index, bio); in zram_bvec_read()
1655 static int write_same_filled_page(struct zram *zram, unsigned long fill, in write_same_filled_page() argument
1658 zram_slot_lock(zram, index); in write_same_filled_page()
1659 zram_set_flag(zram, index, ZRAM_SAME); in write_same_filled_page()
1660 zram_set_handle(zram, index, fill); in write_same_filled_page()
1661 zram_slot_unlock(zram, index); in write_same_filled_page()
1663 atomic64_inc(&zram->stats.same_pages); in write_same_filled_page()
1664 atomic64_inc(&zram->stats.pages_stored); in write_same_filled_page()
1669 static int write_incompressible_page(struct zram *zram, struct page *page, in write_incompressible_page() argument
1680 handle = zs_malloc(zram->mem_pool, PAGE_SIZE, in write_incompressible_page()
1685 if (!zram_can_store_page(zram)) { in write_incompressible_page()
1686 zs_free(zram->mem_pool, handle); in write_incompressible_page()
1690 dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO); in write_incompressible_page()
1694 zs_unmap_object(zram->mem_pool, handle); in write_incompressible_page()
1696 zram_slot_lock(zram, index); in write_incompressible_page()
1697 zram_set_flag(zram, index, ZRAM_HUGE); in write_incompressible_page()
1698 zram_set_handle(zram, index, handle); in write_incompressible_page()
1699 zram_set_obj_size(zram, index, PAGE_SIZE); in write_incompressible_page()
1700 zram_slot_unlock(zram, index); in write_incompressible_page()
1702 atomic64_add(PAGE_SIZE, &zram->stats.compr_data_size); in write_incompressible_page()
1703 atomic64_inc(&zram->stats.huge_pages); in write_incompressible_page()
1704 atomic64_inc(&zram->stats.huge_pages_since); in write_incompressible_page()
1705 atomic64_inc(&zram->stats.pages_stored); in write_incompressible_page()
1710 static int zram_write_page(struct zram *zram, struct page *page, u32 index) in zram_write_page() argument
1721 zram_slot_lock(zram, index); in zram_write_page()
1722 zram_free_page(zram, index); in zram_write_page()
1723 zram_slot_unlock(zram, index); in zram_write_page()
1729 return write_same_filled_page(zram, element, index); in zram_write_page()
1732 zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]); in zram_write_page()
1734 ret = zcomp_compress(zram->comps[ZRAM_PRIMARY_COMP], zstrm, in zram_write_page()
1739 zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); in zram_write_page()
1741 zs_free(zram->mem_pool, handle); in zram_write_page()
1746 zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); in zram_write_page()
1747 return write_incompressible_page(zram, page, index); in zram_write_page()
1764 handle = zs_malloc(zram->mem_pool, comp_len, in zram_write_page()
1770 zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); in zram_write_page()
1771 atomic64_inc(&zram->stats.writestall); in zram_write_page()
1772 handle = zs_malloc(zram->mem_pool, comp_len, in zram_write_page()
1781 if (!zram_can_store_page(zram)) { in zram_write_page()
1782 zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); in zram_write_page()
1783 zs_free(zram->mem_pool, handle); in zram_write_page()
1787 dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO); in zram_write_page()
1790 zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); in zram_write_page()
1791 zs_unmap_object(zram->mem_pool, handle); in zram_write_page()
1793 zram_slot_lock(zram, index); in zram_write_page()
1794 zram_set_handle(zram, index, handle); in zram_write_page()
1795 zram_set_obj_size(zram, index, comp_len); in zram_write_page()
1796 zram_slot_unlock(zram, index); in zram_write_page()
1799 atomic64_inc(&zram->stats.pages_stored); in zram_write_page()
1800 atomic64_add(comp_len, &zram->stats.compr_data_size); in zram_write_page()
1808 static int zram_bvec_write_partial(struct zram *zram, struct bio_vec *bvec, in zram_bvec_write_partial() argument
1817 ret = zram_read_page(zram, page, index, bio); in zram_bvec_write_partial()
1820 ret = zram_write_page(zram, page, index); in zram_bvec_write_partial()
1826 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, in zram_bvec_write() argument
1830 return zram_bvec_write_partial(zram, bvec, index, offset, bio); in zram_bvec_write()
1831 return zram_write_page(zram, bvec->bv_page, index); in zram_bvec_write()
1838 static int scan_slots_for_recompress(struct zram *zram, u32 mode, in scan_slots_for_recompress() argument
1841 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; in scan_slots_for_recompress()
1853 zram_slot_lock(zram, index); in scan_slots_for_recompress()
1854 if (!zram_allocated(zram, index)) in scan_slots_for_recompress()
1858 !zram_test_flag(zram, index, ZRAM_IDLE)) in scan_slots_for_recompress()
1862 !zram_test_flag(zram, index, ZRAM_HUGE)) in scan_slots_for_recompress()
1865 if (zram_test_flag(zram, index, ZRAM_WB) || in scan_slots_for_recompress()
1866 zram_test_flag(zram, index, ZRAM_SAME) || in scan_slots_for_recompress()
1867 zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE)) in scan_slots_for_recompress()
1871 place_pp_slot(zram, ctl, pps); in scan_slots_for_recompress()
1874 zram_slot_unlock(zram, index); in scan_slots_for_recompress()
1888 static int recompress_slot(struct zram *zram, u32 index, struct page *page, in recompress_slot() argument
1903 handle_old = zram_get_handle(zram, index); in recompress_slot()
1907 comp_len_old = zram_get_obj_size(zram, index); in recompress_slot()
1914 ret = zram_read_from_zspool(zram, page, index); in recompress_slot()
1923 zram_clear_flag(zram, index, ZRAM_IDLE); in recompress_slot()
1925 class_index_old = zs_lookup_class_index(zram->mem_pool, comp_len_old); in recompress_slot()
1931 if (!zram->comps[prio]) in recompress_slot()
1938 if (prio <= zram_get_priority(zram, index)) in recompress_slot()
1942 zstrm = zcomp_stream_get(zram->comps[prio]); in recompress_slot()
1944 ret = zcomp_compress(zram->comps[prio], zstrm, in recompress_slot()
1949 zcomp_stream_put(zram->comps[prio]); in recompress_slot()
1953 class_index_new = zs_lookup_class_index(zram->mem_pool, in recompress_slot()
1959 zcomp_stream_put(zram->comps[prio]); in recompress_slot()
1995 if (num_recomps == zram->num_active_comps - 1) in recompress_slot()
1996 zram_set_flag(zram, index, ZRAM_INCOMPRESSIBLE); in recompress_slot()
2011 handle_new = zs_malloc(zram->mem_pool, comp_len_new, in recompress_slot()
2017 zcomp_stream_put(zram->comps[prio]); in recompress_slot()
2021 dst = zs_map_object(zram->mem_pool, handle_new, ZS_MM_WO); in recompress_slot()
2023 zcomp_stream_put(zram->comps[prio]); in recompress_slot()
2025 zs_unmap_object(zram->mem_pool, handle_new); in recompress_slot()
2027 zram_free_page(zram, index); in recompress_slot()
2028 zram_set_handle(zram, index, handle_new); in recompress_slot()
2029 zram_set_obj_size(zram, index, comp_len_new); in recompress_slot()
2030 zram_set_priority(zram, index, prio); in recompress_slot()
2032 atomic64_add(comp_len_new, &zram->stats.compr_data_size); in recompress_slot()
2033 atomic64_inc(&zram->stats.pages_stored); in recompress_slot()
2043 struct zram *zram = dev_to_zram(dev); in recompress_store() local
2112 down_read(&zram->init_lock); in recompress_store()
2113 if (!init_done(zram)) { in recompress_store()
2119 if (atomic_xchg(&zram->pp_in_progress, 1)) { in recompress_store()
2120 up_read(&zram->init_lock); in recompress_store()
2128 if (!zram->comp_algs[prio]) in recompress_store()
2131 if (!strcmp(zram->comp_algs[prio], algo)) { in recompress_store()
2156 scan_slots_for_recompress(zram, mode, ctl); in recompress_store()
2165 zram_slot_lock(zram, pps->index); in recompress_store()
2166 if (!zram_test_flag(zram, pps->index, ZRAM_PP_SLOT)) in recompress_store()
2169 err = recompress_slot(zram, pps->index, page, in recompress_store()
2173 zram_slot_unlock(zram, pps->index); in recompress_store()
2174 release_pp_slot(zram, pps); in recompress_store()
2187 release_pp_ctl(zram, ctl); in recompress_store()
2188 atomic_set(&zram->pp_in_progress, 0); in recompress_store()
2189 up_read(&zram->init_lock); in recompress_store()
2194 static void zram_bio_discard(struct zram *zram, struct bio *bio) in zram_bio_discard() argument
2220 zram_slot_lock(zram, index); in zram_bio_discard()
2221 zram_free_page(zram, index); in zram_bio_discard()
2222 zram_slot_unlock(zram, index); in zram_bio_discard()
2223 atomic64_inc(&zram->stats.notify_free); in zram_bio_discard()
2231 static void zram_bio_read(struct zram *zram, struct bio *bio) in zram_bio_read() argument
2244 if (zram_bvec_read(zram, &bv, index, offset, bio) < 0) { in zram_bio_read()
2245 atomic64_inc(&zram->stats.failed_reads); in zram_bio_read()
2251 zram_slot_lock(zram, index); in zram_bio_read()
2252 zram_accessed(zram, index); in zram_bio_read()
2253 zram_slot_unlock(zram, index); in zram_bio_read()
2262 static void zram_bio_write(struct zram *zram, struct bio *bio) in zram_bio_write() argument
2275 if (zram_bvec_write(zram, &bv, index, offset, bio) < 0) { in zram_bio_write()
2276 atomic64_inc(&zram->stats.failed_writes); in zram_bio_write()
2281 zram_slot_lock(zram, index); in zram_bio_write()
2282 zram_accessed(zram, index); in zram_bio_write()
2283 zram_slot_unlock(zram, index); in zram_bio_write()
2297 struct zram *zram = bio->bi_bdev->bd_disk->private_data; in zram_submit_bio() local
2301 zram_bio_read(zram, bio); in zram_submit_bio()
2304 zram_bio_write(zram, bio); in zram_submit_bio()
2308 zram_bio_discard(zram, bio); in zram_submit_bio()
2319 struct zram *zram; in zram_slot_free_notify() local
2321 zram = bdev->bd_disk->private_data; in zram_slot_free_notify()
2323 atomic64_inc(&zram->stats.notify_free); in zram_slot_free_notify()
2324 if (!zram_slot_trylock(zram, index)) { in zram_slot_free_notify()
2325 atomic64_inc(&zram->stats.miss_free); in zram_slot_free_notify()
2329 zram_free_page(zram, index); in zram_slot_free_notify()
2330 zram_slot_unlock(zram, index); in zram_slot_free_notify()
2333 static void zram_comp_params_reset(struct zram *zram) in zram_comp_params_reset() argument
2338 comp_params_reset(zram, prio); in zram_comp_params_reset()
2342 static void zram_destroy_comps(struct zram *zram) in zram_destroy_comps() argument
2347 struct zcomp *comp = zram->comps[prio]; in zram_destroy_comps()
2349 zram->comps[prio] = NULL; in zram_destroy_comps()
2353 zram->num_active_comps--; in zram_destroy_comps()
2358 if (zram->comp_algs[prio] != default_compressor) in zram_destroy_comps()
2359 kfree(zram->comp_algs[prio]); in zram_destroy_comps()
2360 zram->comp_algs[prio] = NULL; in zram_destroy_comps()
2363 zram_comp_params_reset(zram); in zram_destroy_comps()
2366 static void zram_reset_device(struct zram *zram) in zram_reset_device() argument
2368 down_write(&zram->init_lock); in zram_reset_device()
2370 zram->limit_pages = 0; in zram_reset_device()
2372 set_capacity_and_notify(zram->disk, 0); in zram_reset_device()
2373 part_stat_set_all(zram->disk->part0, 0); in zram_reset_device()
2376 zram_meta_free(zram, zram->disksize); in zram_reset_device()
2377 zram->disksize = 0; in zram_reset_device()
2378 zram_destroy_comps(zram); in zram_reset_device()
2379 memset(&zram->stats, 0, sizeof(zram->stats)); in zram_reset_device()
2380 atomic_set(&zram->pp_in_progress, 0); in zram_reset_device()
2381 reset_bdev(zram); in zram_reset_device()
2383 comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor); in zram_reset_device()
2384 up_write(&zram->init_lock); in zram_reset_device()
2392 struct zram *zram = dev_to_zram(dev); in disksize_store() local
2400 down_write(&zram->init_lock); in disksize_store()
2401 if (init_done(zram)) { in disksize_store()
2408 if (!zram_meta_alloc(zram, disksize)) { in disksize_store()
2414 if (!zram->comp_algs[prio]) in disksize_store()
2417 comp = zcomp_create(zram->comp_algs[prio], in disksize_store()
2418 &zram->params[prio]); in disksize_store()
2421 zram->comp_algs[prio]); in disksize_store()
2426 zram->comps[prio] = comp; in disksize_store()
2427 zram->num_active_comps++; in disksize_store()
2429 zram->disksize = disksize; in disksize_store()
2430 set_capacity_and_notify(zram->disk, zram->disksize >> SECTOR_SHIFT); in disksize_store()
2431 up_write(&zram->init_lock); in disksize_store()
2436 zram_destroy_comps(zram); in disksize_store()
2437 zram_meta_free(zram, disksize); in disksize_store()
2439 up_write(&zram->init_lock); in disksize_store()
2448 struct zram *zram; in reset_store() local
2458 zram = dev_to_zram(dev); in reset_store()
2459 disk = zram->disk; in reset_store()
2463 if (disk_openers(disk) || zram->claim) { in reset_store()
2469 zram->claim = true; in reset_store()
2474 zram_reset_device(zram); in reset_store()
2477 zram->claim = false; in reset_store()
2485 struct zram *zram = disk->private_data; in zram_open() local
2490 if (zram->claim) in zram_open()
2585 struct zram *zram; in zram_add() local
2588 zram = kzalloc(sizeof(struct zram), GFP_KERNEL); in zram_add()
2589 if (!zram) in zram_add()
2592 ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL); in zram_add()
2597 init_rwsem(&zram->init_lock); in zram_add()
2599 spin_lock_init(&zram->wb_limit_lock); in zram_add()
2603 zram->disk = blk_alloc_disk(&lim, NUMA_NO_NODE); in zram_add()
2604 if (IS_ERR(zram->disk)) { in zram_add()
2607 ret = PTR_ERR(zram->disk); in zram_add()
2611 zram->disk->major = zram_major; in zram_add()
2612 zram->disk->first_minor = device_id; in zram_add()
2613 zram->disk->minors = 1; in zram_add()
2614 zram->disk->flags |= GENHD_FL_NO_PART; in zram_add()
2615 zram->disk->fops = &zram_devops; in zram_add()
2616 zram->disk->private_data = zram; in zram_add()
2617 snprintf(zram->disk->disk_name, 16, "zram%d", device_id); in zram_add()
2618 atomic_set(&zram->pp_in_progress, 0); in zram_add()
2619 zram_comp_params_reset(zram); in zram_add()
2620 comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor); in zram_add()
2623 set_capacity(zram->disk, 0); in zram_add()
2624 ret = device_add_disk(NULL, zram->disk, zram_disk_groups); in zram_add()
2628 zram_debugfs_register(zram); in zram_add()
2629 pr_info("Added device: %s\n", zram->disk->disk_name); in zram_add()
2633 put_disk(zram->disk); in zram_add()
2637 kfree(zram); in zram_add()
2641 static int zram_remove(struct zram *zram) in zram_remove() argument
2645 mutex_lock(&zram->disk->open_mutex); in zram_remove()
2646 if (disk_openers(zram->disk)) { in zram_remove()
2647 mutex_unlock(&zram->disk->open_mutex); in zram_remove()
2651 claimed = zram->claim; in zram_remove()
2653 zram->claim = true; in zram_remove()
2654 mutex_unlock(&zram->disk->open_mutex); in zram_remove()
2656 zram_debugfs_unregister(zram); in zram_remove()
2666 sync_blockdev(zram->disk->part0); in zram_remove()
2667 zram_reset_device(zram); in zram_remove()
2670 pr_info("Removed device: %s\n", zram->disk->disk_name); in zram_remove()
2672 del_gendisk(zram->disk); in zram_remove()
2675 WARN_ON_ONCE(claimed && zram->claim); in zram_remove()
2682 zram_reset_device(zram); in zram_remove()
2684 put_disk(zram->disk); in zram_remove()
2685 kfree(zram); in zram_remove()
2720 struct zram *zram; in hot_remove_store() local
2732 zram = idr_find(&zram_index_idr, dev_id); in hot_remove_store()
2733 if (zram) { in hot_remove_store()
2734 ret = zram_remove(zram); in hot_remove_store()