Lines Matching +full:atomic +full:- +full:threshold +full:- +full:us
10 * Released under the terms of 3-clause BSD License
27 #include <linux/backing-dev.h>
63 #define slot_dep_map(zram, index) (&(zram)->table[(index)].dep_map)
69 lockdep_init_map(slot_dep_map(zram, index), "zram->table[index].lock", in zram_slot_lock_init()
82 * 4) Use TRY lock variant when in atomic context
83 * - must check return value and handle locking failers
87 unsigned long *lock = &zram->table[index].flags; in zram_slot_trylock()
100 unsigned long *lock = &zram->table[index].flags; in zram_slot_lock()
109 unsigned long *lock = &zram->table[index].flags; in zram_slot_unlock()
117 return zram->disksize; in init_done()
122 return (struct zram *)dev_to_disk(dev)->private_data; in dev_to_zram()
127 return zram->table[index].handle; in zram_get_handle()
132 zram->table[index].handle = handle; in zram_set_handle()
138 return zram->table[index].flags & BIT(flag); in zram_test_flag()
144 zram->table[index].flags |= BIT(flag); in zram_set_flag()
150 zram->table[index].flags &= ~BIT(flag); in zram_clear_flag()
155 return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1); in zram_get_obj_size()
161 unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT; in zram_set_obj_size()
163 zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size; in zram_set_obj_size()
175 unsigned long cur_max = atomic_long_read(&zram->stats.max_used_pages); in update_used_max()
180 } while (!atomic_long_try_cmpxchg(&zram->stats.max_used_pages, in update_used_max()
188 alloced_pages = zs_get_total_pages(zram->mem_pool); in zram_can_store_page()
191 return !zram->limit_pages || alloced_pages <= zram->limit_pages; in zram_can_store_page()
197 return bvec->bv_len != PAGE_SIZE; in is_partial_io()
214 zram->table[index].flags &= ~(ZRAM_COMP_PRIORITY_MASK << in zram_set_priority()
216 zram->table[index].flags |= (prio << ZRAM_COMP_PRIORITY_BIT1); in zram_set_priority()
221 u32 prio = zram->table[index].flags >> ZRAM_COMP_PRIORITY_BIT1; in zram_get_priority()
231 zram->table[index].ac_time = ktime_get_boottime(); in zram_accessed()
242 * A post-processing bucket is, essentially, a size class, this defines
243 * the range (in bytes) of pp-slots sizes in particular bucket.
262 INIT_LIST_HEAD(&ctl->pp_buckets[idx]); in init_pp_ctl()
268 list_del_init(&pps->entry); in release_pp_slot()
270 zram_slot_lock(zram, pps->index); in release_pp_slot()
271 zram_clear_flag(zram, pps->index, ZRAM_PP_SLOT); in release_pp_slot()
272 zram_slot_unlock(zram, pps->index); in release_pp_slot()
285 while (!list_empty(&ctl->pp_buckets[idx])) { in release_pp_ctl()
288 pps = list_first_entry(&ctl->pp_buckets[idx], in release_pp_ctl()
308 INIT_LIST_HEAD(&pps->entry); in place_pp_slot()
309 pps->index = index; in place_pp_slot()
311 bid = zram_get_obj_size(zram, pps->index) / PP_BUCKET_SIZE_RANGE; in place_pp_slot()
312 list_add(&pps->entry, &ctl->pp_buckets[bid]); in place_pp_slot()
314 zram_set_flag(zram, pps->index, ZRAM_PP_SLOT); in place_pp_slot()
321 s32 idx = NUM_PP_BUCKETS - 1; in select_pp_slot()
323 /* The higher the bucket id the more optimal slot post-processing is */ in select_pp_slot()
325 pps = list_first_entry_or_null(&ctl->pp_buckets[idx], in select_pp_slot()
331 idx--; in select_pp_slot()
348 unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1; in page_same_filled()
372 down_read(&zram->init_lock); in initstate_show()
374 up_read(&zram->init_lock); in initstate_show()
384 return sysfs_emit(buf, "%llu\n", zram->disksize); in disksize_show()
396 return -EINVAL; in mem_limit_store()
398 down_write(&zram->init_lock); in mem_limit_store()
399 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT; in mem_limit_store()
400 up_write(&zram->init_lock); in mem_limit_store()
414 return -EINVAL; in mem_used_max_store()
416 down_read(&zram->init_lock); in mem_used_max_store()
418 atomic_long_set(&zram->stats.max_used_pages, in mem_used_max_store()
419 zs_get_total_pages(zram->mem_pool)); in mem_used_max_store()
421 up_read(&zram->init_lock); in mem_used_max_store()
433 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; in mark_idle()
439 * post-processing (recompress, writeback) happens to the in mark_idle()
454 ktime_after(cutoff, zram->table[index].ac_time); in mark_idle()
469 ssize_t rv = -EINVAL; in idle_store()
485 down_read(&zram->init_lock); in idle_store()
497 up_read(&zram->init_lock); in idle_store()
531 ssize_t ret = -EINVAL; in writeback_limit_enable_store()
536 down_write(&zram->init_lock); in writeback_limit_enable_store()
537 zram->wb_limit_enable = val; in writeback_limit_enable_store()
538 up_write(&zram->init_lock); in writeback_limit_enable_store()
551 down_read(&zram->init_lock); in writeback_limit_enable_show()
552 val = zram->wb_limit_enable; in writeback_limit_enable_show()
553 up_read(&zram->init_lock); in writeback_limit_enable_show()
564 ssize_t ret = -EINVAL; in writeback_limit_store()
571 * a value that is not page - size aligned, it will cause value in writeback_limit_store()
573 * bd_wb_limit is set to 3, a single write - back operation will in writeback_limit_store()
574 * cause bd_wb_limit to become -1. Even more terrifying is that in writeback_limit_store()
579 down_write(&zram->init_lock); in writeback_limit_store()
580 zram->bd_wb_limit = val; in writeback_limit_store()
581 up_write(&zram->init_lock); in writeback_limit_store()
593 down_read(&zram->init_lock); in writeback_limit_show()
594 val = zram->bd_wb_limit; in writeback_limit_show()
595 up_read(&zram->init_lock); in writeback_limit_show()
608 return -EINVAL; in writeback_batch_size_store()
611 return -EINVAL; in writeback_batch_size_store()
613 down_write(&zram->init_lock); in writeback_batch_size_store()
614 zram->wb_batch_size = val; in writeback_batch_size_store()
615 up_write(&zram->init_lock); in writeback_batch_size_store()
627 down_read(&zram->init_lock); in writeback_batch_size_show()
628 val = zram->wb_batch_size; in writeback_batch_size_show()
629 up_read(&zram->init_lock); in writeback_batch_size_show()
636 if (!zram->backing_dev) in reset_bdev()
640 filp_close(zram->backing_dev, NULL); in reset_bdev()
641 zram->backing_dev = NULL; in reset_bdev()
642 zram->bdev = NULL; in reset_bdev()
643 zram->disk->fops = &zram_devops; in reset_bdev()
644 kvfree(zram->bitmap); in reset_bdev()
645 zram->bitmap = NULL; in reset_bdev()
656 down_read(&zram->init_lock); in backing_dev_show()
657 file = zram->backing_dev; in backing_dev_show()
660 up_read(&zram->init_lock); in backing_dev_show()
664 p = file_path(file, buf, PAGE_SIZE - 1); in backing_dev_show()
674 up_read(&zram->init_lock); in backing_dev_show()
692 return -ENOMEM; in backing_dev_store()
694 down_write(&zram->init_lock); in backing_dev_store()
697 err = -EBUSY; in backing_dev_store()
704 if (sz > 0 && file_name[sz - 1] == '\n') in backing_dev_store()
705 file_name[sz - 1] = 0x00; in backing_dev_store()
714 inode = backing_dev->f_mapping->host; in backing_dev_store()
717 if (!S_ISBLK(inode->i_mode)) { in backing_dev_store()
718 err = -ENOTBLK; in backing_dev_store()
725 err = -EINVAL; in backing_dev_store()
732 err = -ENOMEM; in backing_dev_store()
738 zram->bdev = I_BDEV(inode); in backing_dev_store()
739 zram->backing_dev = backing_dev; in backing_dev_store()
740 zram->bitmap = bitmap; in backing_dev_store()
741 zram->nr_pages = nr_pages; in backing_dev_store()
742 up_write(&zram->init_lock); in backing_dev_store()
754 up_write(&zram->init_lock); in backing_dev_store()
765 blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, 0); in zram_reserve_bdev_block()
766 if (blk_idx == zram->nr_pages) in zram_reserve_bdev_block()
769 set_bit(blk_idx, zram->bitmap); in zram_reserve_bdev_block()
770 atomic64_inc(&zram->stats.bd_count); in zram_reserve_bdev_block()
778 was_set = test_and_clear_bit(blk_idx, zram->bitmap); in zram_release_bdev_block()
780 atomic64_dec(&zram->stats.bd_count); in zram_release_bdev_block()
788 bio = bio_alloc(zram->bdev, 1, parent->bi_opf, GFP_NOIO); in read_from_bdev_async()
789 bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9); in read_from_bdev_async()
797 __free_page(req->page); in release_wb_req()
807 WARN_ON(atomic_read(&wb_ctl->num_inflight)); in release_wb_ctl()
808 WARN_ON(!list_empty(&wb_ctl->done_reqs)); in release_wb_ctl()
810 while (!list_empty(&wb_ctl->idle_reqs)) { in release_wb_ctl()
813 req = list_first_entry(&wb_ctl->idle_reqs, in release_wb_ctl()
815 list_del(&req->entry); in release_wb_ctl()
831 INIT_LIST_HEAD(&wb_ctl->idle_reqs); in init_wb_ctl()
832 INIT_LIST_HEAD(&wb_ctl->done_reqs); in init_wb_ctl()
833 atomic_set(&wb_ctl->num_inflight, 0); in init_wb_ctl()
834 init_waitqueue_head(&wb_ctl->done_wait); in init_wb_ctl()
835 spin_lock_init(&wb_ctl->done_lock); in init_wb_ctl()
837 for (i = 0; i < zram->wb_batch_size; i++) { in init_wb_ctl()
851 req->page = alloc_page(GFP_KERNEL | __GFP_NOWARN); in init_wb_ctl()
852 if (!req->page) { in init_wb_ctl()
857 list_add(&req->entry, &wb_ctl->idle_reqs); in init_wb_ctl()
861 if (list_empty(&wb_ctl->idle_reqs)) in init_wb_ctl()
873 lockdep_assert_held_read(&zram->init_lock); in zram_account_writeback_rollback()
875 if (zram->wb_limit_enable) in zram_account_writeback_rollback()
876 zram->bd_wb_limit += 1UL << (PAGE_SHIFT - 12); in zram_account_writeback_rollback()
881 lockdep_assert_held_read(&zram->init_lock); in zram_account_writeback_submit()
883 if (zram->wb_limit_enable && zram->bd_wb_limit > 0) in zram_account_writeback_submit()
884 zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12); in zram_account_writeback_submit()
889 u32 index = req->pps->index; in zram_writeback_complete()
892 err = blk_status_to_errno(req->bio.bi_status); in zram_writeback_complete()
899 zram_release_bdev_block(zram, req->blk_idx); in zram_writeback_complete()
903 atomic64_inc(&zram->stats.bd_writes); in zram_writeback_complete()
906 * We release slot lock during writeback so slot can change under us: in zram_writeback_complete()
908 * slot loses ZRAM_PP_SLOT flag. No concurrent post-processing can in zram_writeback_complete()
909 * set ZRAM_PP_SLOT on such slots until current post-processing in zram_writeback_complete()
913 zram_release_bdev_block(zram, req->blk_idx); in zram_writeback_complete()
919 zram_set_handle(zram, index, req->blk_idx); in zram_writeback_complete()
920 atomic64_inc(&zram->stats.pages_stored); in zram_writeback_complete()
930 struct zram_wb_ctl *wb_ctl = bio->bi_private; in zram_writeback_endio()
933 spin_lock_irqsave(&wb_ctl->done_lock, flags); in zram_writeback_endio()
934 list_add(&req->entry, &wb_ctl->done_reqs); in zram_writeback_endio()
935 spin_unlock_irqrestore(&wb_ctl->done_lock, flags); in zram_writeback_endio()
937 wake_up(&wb_ctl->done_wait); in zram_writeback_endio()
946 * so that we don't over-submit. in zram_submit_wb_request()
949 atomic_inc(&wb_ctl->num_inflight); in zram_submit_wb_request()
950 req->bio.bi_private = wb_ctl; in zram_submit_wb_request()
951 submit_bio(&req->bio); in zram_submit_wb_request()
961 while (atomic_read(&wb_ctl->num_inflight) > 0) { in zram_complete_done_reqs()
962 spin_lock_irqsave(&wb_ctl->done_lock, flags); in zram_complete_done_reqs()
963 req = list_first_entry_or_null(&wb_ctl->done_reqs, in zram_complete_done_reqs()
966 list_del(&req->entry); in zram_complete_done_reqs()
967 spin_unlock_irqrestore(&wb_ctl->done_lock, flags); in zram_complete_done_reqs()
969 /* ->num_inflight > 0 doesn't mean we have done requests */ in zram_complete_done_reqs()
977 atomic_dec(&wb_ctl->num_inflight); in zram_complete_done_reqs()
978 release_pp_slot(zram, req->pps); in zram_complete_done_reqs()
979 req->pps = NULL; in zram_complete_done_reqs()
981 list_add(&req->entry, &wb_ctl->idle_reqs); in zram_complete_done_reqs()
991 req = list_first_entry_or_null(&wb_ctl->idle_reqs, in zram_select_idle_req()
994 list_del(&req->entry); in zram_select_idle_req()
1009 if (zram->wb_limit_enable && !zram->bd_wb_limit) { in zram_writeback_slots()
1010 ret = -EIO; in zram_writeback_slots()
1019 wait_event(wb_ctl->done_wait, in zram_writeback_slots()
1020 !list_empty(&wb_ctl->done_reqs)); in zram_writeback_slots()
1026 * At the same time we need to signal user-space that in zram_writeback_slots()
1038 ret = -ENOSPC; in zram_writeback_slots()
1043 index = pps->index; in zram_writeback_slots()
1049 * post-process them. in zram_writeback_slots()
1053 if (zram_read_from_zspool(zram, req->page, index)) in zram_writeback_slots()
1058 * From now on pp-slot is owned by the req, remove it from in zram_writeback_slots()
1061 list_del_init(&pps->entry); in zram_writeback_slots()
1063 req->blk_idx = blk_idx; in zram_writeback_slots()
1064 req->pps = pps; in zram_writeback_slots()
1065 bio_init(&req->bio, zram->bdev, &req->bio_vec, 1, REQ_OP_WRITE); in zram_writeback_slots()
1066 req->bio.bi_iter.bi_sector = req->blk_idx * (PAGE_SIZE >> 9); in zram_writeback_slots()
1067 req->bio.bi_end_io = zram_writeback_endio; in zram_writeback_slots()
1068 __bio_add_page(&req->bio, req->page, PAGE_SIZE, 0); in zram_writeback_slots()
1088 while (atomic_read(&wb_ctl->num_inflight) > 0) { in zram_writeback_slots()
1089 wait_event(wb_ctl->done_wait, !list_empty(&wb_ctl->done_reqs)); in zram_writeback_slots()
1112 return -ERANGE; in parse_page_index()
1123 delim = strchr(val, '-'); in parse_page_indexes()
1125 return -EINVAL; in parse_page_indexes()
1132 return -ERANGE; in parse_page_indexes()
1138 return -ERANGE; in parse_page_indexes()
1157 return -EINVAL; in parse_mode()
1204 u64 nr_pages = zram->disksize >> PAGE_SHIFT; in writeback_store()
1212 down_read(&zram->init_lock); in writeback_store()
1214 up_read(&zram->init_lock); in writeback_store()
1215 return -EINVAL; in writeback_store()
1218 /* Do not permit concurrent post-processing actions. */ in writeback_store()
1219 if (atomic_xchg(&zram->pp_in_progress, 1)) { in writeback_store()
1220 up_read(&zram->init_lock); in writeback_store()
1221 return -EAGAIN; in writeback_store()
1224 if (!zram->backing_dev) { in writeback_store()
1225 ret = -ENODEV; in writeback_store()
1231 ret = -ENOMEM; in writeback_store()
1237 ret = -ENOMEM; in writeback_store()
1310 atomic_set(&zram->pp_in_progress, 0); in writeback_store()
1311 up_read(&zram->init_lock); in writeback_store()
1330 bio_init(&bio, zw->zram->bdev, &bv, 1, REQ_OP_READ); in zram_sync_read()
1331 bio.bi_iter.bi_sector = zw->entry * (PAGE_SIZE >> 9); in zram_sync_read()
1332 __bio_add_page(&bio, zw->page, PAGE_SIZE, 0); in zram_sync_read()
1333 zw->error = submit_bio_wait(&bio); in zram_sync_read()
1337 * Block layer want one ->submit_bio to be active at a time, so if we use
1361 atomic64_inc(&zram->stats.bd_reads); in read_from_bdev()
1364 return -EIO; in read_from_bdev()
1375 return -EIO; in read_from_bdev()
1402 struct zram *zram = file->private_data; in read_block_state()
1403 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; in read_block_state()
1408 return -ENOMEM; in read_block_state()
1410 down_read(&zram->init_lock); in read_block_state()
1412 up_read(&zram->init_lock); in read_block_state()
1414 return -EINVAL; in read_block_state()
1424 ts = ktime_to_timespec64(zram->table[index].ac_time); in read_block_state()
1442 count -= copied; in read_block_state()
1448 up_read(&zram->init_lock); in read_block_state()
1450 written = -EFAULT; in read_block_state()
1467 zram->debugfs_dir = debugfs_create_dir(zram->disk->disk_name, in zram_debugfs_register()
1469 debugfs_create_file("block_state", 0400, zram->debugfs_dir, in zram_debugfs_register()
1475 debugfs_remove_recursive(zram->debugfs_dir); in zram_debugfs_unregister()
1487 if (zram->comp_algs[prio] != default_compressor) in comp_algorithm_set()
1488 kfree(zram->comp_algs[prio]); in comp_algorithm_set()
1490 zram->comp_algs[prio] = alg; in comp_algorithm_set()
1500 return -E2BIG; in __comp_algorithm_store()
1504 return -ENOMEM; in __comp_algorithm_store()
1507 if (sz > 0 && compressor[sz - 1] == '\n') in __comp_algorithm_store()
1508 compressor[sz - 1] = 0x00; in __comp_algorithm_store()
1512 return -EINVAL; in __comp_algorithm_store()
1515 down_write(&zram->init_lock); in __comp_algorithm_store()
1517 up_write(&zram->init_lock); in __comp_algorithm_store()
1520 return -EBUSY; in __comp_algorithm_store()
1524 up_write(&zram->init_lock); in __comp_algorithm_store()
1530 struct zcomp_params *params = &zram->params[prio]; in comp_params_reset()
1532 vfree(params->dict); in comp_params_reset()
1533 params->level = ZCOMP_PARAM_NOT_SET; in comp_params_reset()
1534 params->deflate.winbits = ZCOMP_PARAM_NOT_SET; in comp_params_reset()
1535 params->dict_sz = 0; in comp_params_reset()
1536 params->dict = NULL; in comp_params_reset()
1549 &zram->params[prio].dict, in comp_params_store()
1554 return -EINVAL; in comp_params_store()
1557 zram->params[prio].dict_sz = sz; in comp_params_store()
1558 zram->params[prio].level = level; in comp_params_store()
1559 zram->params[prio].deflate.winbits = deflate_params->winbits; in comp_params_store()
1581 return -EINVAL; in algorithm_params_store()
1619 prio = -EINVAL; in algorithm_params_store()
1621 if (!zram->comp_algs[p]) in algorithm_params_store()
1624 if (!strcmp(zram->comp_algs[p], algo)) { in algorithm_params_store()
1632 return -EINVAL; in algorithm_params_store()
1645 down_read(&zram->init_lock); in comp_algorithm_show()
1646 sz = zcomp_available_show(zram->comp_algs[ZRAM_PRIMARY_COMP], buf, 0); in comp_algorithm_show()
1647 up_read(&zram->init_lock); in comp_algorithm_show()
1672 down_read(&zram->init_lock); in recomp_algorithm_show()
1674 if (!zram->comp_algs[prio]) in recomp_algorithm_show()
1678 sz += zcomp_available_show(zram->comp_algs[prio], buf, sz); in recomp_algorithm_show()
1680 up_read(&zram->init_lock); in recomp_algorithm_show()
1700 return -EINVAL; in recomp_algorithm_store()
1716 return -EINVAL; in recomp_algorithm_store()
1719 return -EINVAL; in recomp_algorithm_store()
1731 down_read(&zram->init_lock); in compact_store()
1733 up_read(&zram->init_lock); in compact_store()
1734 return -EINVAL; in compact_store()
1737 zs_compact(zram->mem_pool); in compact_store()
1738 up_read(&zram->init_lock); in compact_store()
1749 down_read(&zram->init_lock); in io_stat_show()
1752 (u64)atomic64_read(&zram->stats.failed_reads), in io_stat_show()
1753 (u64)atomic64_read(&zram->stats.failed_writes), in io_stat_show()
1754 (u64)atomic64_read(&zram->stats.notify_free)); in io_stat_show()
1755 up_read(&zram->init_lock); in io_stat_show()
1771 down_read(&zram->init_lock); in mm_stat_show()
1773 mem_used = zs_get_total_pages(zram->mem_pool); in mm_stat_show()
1774 zs_pool_stats(zram->mem_pool, &pool_stats); in mm_stat_show()
1777 orig_size = atomic64_read(&zram->stats.pages_stored); in mm_stat_show()
1778 max_used = atomic_long_read(&zram->stats.max_used_pages); in mm_stat_show()
1783 (u64)atomic64_read(&zram->stats.compr_data_size), in mm_stat_show()
1785 zram->limit_pages << PAGE_SHIFT, in mm_stat_show()
1787 (u64)atomic64_read(&zram->stats.same_pages), in mm_stat_show()
1789 (u64)atomic64_read(&zram->stats.huge_pages), in mm_stat_show()
1790 (u64)atomic64_read(&zram->stats.huge_pages_since)); in mm_stat_show()
1791 up_read(&zram->init_lock); in mm_stat_show()
1797 #define FOUR_K(x) ((x) * (1 << (PAGE_SHIFT - 12)))
1804 down_read(&zram->init_lock); in bd_stat_show()
1807 FOUR_K((u64)atomic64_read(&zram->stats.bd_count)), in bd_stat_show()
1808 FOUR_K((u64)atomic64_read(&zram->stats.bd_reads)), in bd_stat_show()
1809 FOUR_K((u64)atomic64_read(&zram->stats.bd_writes))); in bd_stat_show()
1810 up_read(&zram->init_lock); in bd_stat_show()
1823 down_read(&zram->init_lock); in debug_stat_show()
1827 (u64)atomic64_read(&zram->stats.miss_free)); in debug_stat_show()
1828 up_read(&zram->init_lock); in debug_stat_show()
1845 if (!zram->table) in zram_meta_free()
1852 zs_destroy_pool(zram->mem_pool); in zram_meta_free()
1853 vfree(zram->table); in zram_meta_free()
1854 zram->table = NULL; in zram_meta_free()
1862 zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table))); in zram_meta_alloc()
1863 if (!zram->table) in zram_meta_alloc()
1866 zram->mem_pool = zs_create_pool(zram->disk->disk_name); in zram_meta_alloc()
1867 if (!zram->mem_pool) { in zram_meta_alloc()
1868 vfree(zram->table); in zram_meta_alloc()
1869 zram->table = NULL; in zram_meta_alloc()
1874 huge_class_size = zs_huge_class_size(zram->mem_pool); in zram_meta_alloc()
1887 zram->table[index].ac_time = 0; in zram_free_page()
1897 atomic64_dec(&zram->stats.huge_pages); in zram_free_page()
1912 atomic64_dec(&zram->stats.same_pages); in zram_free_page()
1920 zs_free(zram->mem_pool, handle); in zram_free_page()
1923 &zram->stats.compr_data_size); in zram_free_page()
1925 atomic64_dec(&zram->stats.pages_stored); in zram_free_page()
1948 src = zs_obj_read_begin(zram->mem_pool, handle, NULL); in read_incompressible_page()
1952 zs_obj_read_end(zram->mem_pool, handle, src); in read_incompressible_page()
1969 zstrm = zcomp_stream_get(zram->comps[prio]); in read_compressed_page()
1970 src = zs_obj_read_begin(zram->mem_pool, handle, zstrm->local_copy); in read_compressed_page()
1972 ret = zcomp_decompress(zram->comps[prio], zstrm, src, size, dst); in read_compressed_page()
1974 zs_obj_read_end(zram->mem_pool, handle, src); in read_compressed_page()
2036 return -ENOMEM; in zram_bvec_read_partial()
2049 return zram_read_page(zram, bvec->bv_page, index, bio); in zram_bvec_read()
2061 atomic64_inc(&zram->stats.same_pages); in write_same_filled_page()
2062 atomic64_inc(&zram->stats.pages_stored); in write_same_filled_page()
2078 handle = zs_malloc(zram->mem_pool, PAGE_SIZE, in write_incompressible_page()
2085 zs_free(zram->mem_pool, handle); in write_incompressible_page()
2086 return -ENOMEM; in write_incompressible_page()
2090 zs_obj_write(zram->mem_pool, handle, src, PAGE_SIZE); in write_incompressible_page()
2100 atomic64_add(PAGE_SIZE, &zram->stats.compr_data_size); in write_incompressible_page()
2101 atomic64_inc(&zram->stats.huge_pages); in write_incompressible_page()
2102 atomic64_inc(&zram->stats.huge_pages_since); in write_incompressible_page()
2103 atomic64_inc(&zram->stats.pages_stored); in write_incompressible_page()
2124 zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]); in zram_write_page()
2126 ret = zcomp_compress(zram->comps[ZRAM_PRIMARY_COMP], zstrm, in zram_write_page()
2141 handle = zs_malloc(zram->mem_pool, comp_len, in zram_write_page()
2151 zs_free(zram->mem_pool, handle); in zram_write_page()
2152 return -ENOMEM; in zram_write_page()
2155 zs_obj_write(zram->mem_pool, handle, zstrm->buffer, comp_len); in zram_write_page()
2165 atomic64_inc(&zram->stats.pages_stored); in zram_write_page()
2166 atomic64_add(comp_len, &zram->stats.compr_data_size); in zram_write_page()
2181 return -ENOMEM; in zram_bvec_write_partial()
2197 return zram_write_page(zram, bvec->bv_page, index); in zram_bvec_write()
2207 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; in scan_slots_for_recompress()
2252 u64 *num_recomp_pages, u32 threshold, u32 prio, in recompress_slot() argument
2267 return -EINVAL; in recompress_slot()
2273 if (comp_len_old < threshold) in recompress_slot()
2281 * We touched this entry so mark it as non-IDLE. This makes sure that in recompress_slot()
2283 * for different post-processing type (e.g. writeback). in recompress_slot()
2287 class_index_old = zs_lookup_class_index(zram->mem_pool, comp_len_old); in recompress_slot()
2303 if (!zram->comps[prio]) in recompress_slot()
2306 zstrm = zcomp_stream_get(zram->comps[prio]); in recompress_slot()
2308 ret = zcomp_compress(zram->comps[prio], zstrm, in recompress_slot()
2318 class_index_new = zs_lookup_class_index(zram->mem_pool, in recompress_slot()
2323 (threshold && comp_len_new >= threshold)) { in recompress_slot()
2336 * the page below the threshold, because we still spent resources in recompress_slot()
2340 *num_recomp_pages -= 1; in recompress_slot()
2348 * Secondary algorithms failed to re-compress the page in recompress_slot()
2351 * Mark the object incompressible if the max-priority in recompress_slot()
2352 * algorithm couldn't re-compress it. in recompress_slot()
2354 if (prio < zram->num_active_comps) in recompress_slot()
2361 * We are holding per-CPU stream mutex and entry lock so better in recompress_slot()
2366 * the original compressed data. But that would require us to modify in recompress_slot()
2370 handle_new = zs_malloc(zram->mem_pool, comp_len_new, in recompress_slot()
2378 zs_obj_write(zram->mem_pool, handle_new, zstrm->buffer, comp_len_new); in recompress_slot()
2386 atomic64_add(comp_len_new, &zram->stats.compr_data_size); in recompress_slot()
2387 atomic64_inc(&zram->stats.pages_stored); in recompress_slot()
2401 u32 mode = 0, threshold = 0; in recompress_store() local
2407 prio_max = zram->num_active_comps; in recompress_store()
2414 return -EINVAL; in recompress_store()
2437 if (!strcmp(param, "threshold")) { in recompress_store()
2439 * We will re-compress only idle objects equal or in recompress_store()
2442 ret = kstrtouint(val, 10, &threshold); in recompress_store()
2466 if (threshold >= huge_class_size) in recompress_store()
2467 return -EINVAL; in recompress_store()
2469 down_read(&zram->init_lock); in recompress_store()
2471 ret = -EINVAL; in recompress_store()
2475 /* Do not permit concurrent post-processing actions. */ in recompress_store()
2476 if (atomic_xchg(&zram->pp_in_progress, 1)) { in recompress_store()
2477 up_read(&zram->init_lock); in recompress_store()
2478 return -EAGAIN; in recompress_store()
2485 if (!zram->comp_algs[prio]) in recompress_store()
2488 if (!strcmp(zram->comp_algs[prio], algo)) { in recompress_store()
2496 ret = -EINVAL; in recompress_store()
2501 prio_max = min(prio_max, (u32)zram->num_active_comps); in recompress_store()
2503 ret = -EINVAL; in recompress_store()
2509 ret = -ENOMEM; in recompress_store()
2515 ret = -ENOMEM; in recompress_store()
2528 zram_slot_lock(zram, pps->index); in recompress_store()
2529 if (!zram_test_flag(zram, pps->index, ZRAM_PP_SLOT)) in recompress_store()
2532 err = recompress_slot(zram, pps->index, page, in recompress_store()
2533 &num_recomp_pages, threshold, in recompress_store()
2536 zram_slot_unlock(zram, pps->index); in recompress_store()
2551 atomic_set(&zram->pp_in_progress, 0); in recompress_store()
2552 up_read(&zram->init_lock); in recompress_store()
2559 size_t n = bio->bi_iter.bi_size; in zram_bio_discard()
2560 u32 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; in zram_bio_discard()
2561 u32 offset = (bio->bi_iter.bi_sector & (SECTORS_PER_PAGE - 1)) << in zram_bio_discard()
2570 * and re-compressing and then re-storing it, this isn't reasonable in zram_bio_discard()
2575 if (n <= (PAGE_SIZE - offset)) in zram_bio_discard()
2578 n -= (PAGE_SIZE - offset); in zram_bio_discard()
2586 atomic64_inc(&zram->stats.notify_free); in zram_bio_discard()
2588 n -= PAGE_SIZE; in zram_bio_discard()
2597 struct bvec_iter iter = bio->bi_iter; in zram_bio_read()
2601 u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) << in zram_bio_read()
2605 bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset); in zram_bio_read()
2608 atomic64_inc(&zram->stats.failed_reads); in zram_bio_read()
2609 bio->bi_status = BLK_STS_IOERR; in zram_bio_read()
2628 struct bvec_iter iter = bio->bi_iter; in zram_bio_write()
2632 u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) << in zram_bio_write()
2636 bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset); in zram_bio_write()
2639 atomic64_inc(&zram->stats.failed_writes); in zram_bio_write()
2640 bio->bi_status = BLK_STS_IOERR; in zram_bio_write()
2660 struct zram *zram = bio->bi_bdev->bd_disk->private_data; in zram_submit_bio()
2684 zram = bdev->bd_disk->private_data; in zram_slot_free_notify()
2686 atomic64_inc(&zram->stats.notify_free); in zram_slot_free_notify()
2688 atomic64_inc(&zram->stats.miss_free); in zram_slot_free_notify()
2710 struct zcomp *comp = zram->comps[prio]; in zram_destroy_comps()
2712 zram->comps[prio] = NULL; in zram_destroy_comps()
2716 zram->num_active_comps--; in zram_destroy_comps()
2721 if (zram->comp_algs[prio] != default_compressor) in zram_destroy_comps()
2722 kfree(zram->comp_algs[prio]); in zram_destroy_comps()
2723 zram->comp_algs[prio] = NULL; in zram_destroy_comps()
2731 down_write(&zram->init_lock); in zram_reset_device()
2733 zram->limit_pages = 0; in zram_reset_device()
2735 set_capacity_and_notify(zram->disk, 0); in zram_reset_device()
2736 part_stat_set_all(zram->disk->part0, 0); in zram_reset_device()
2739 zram_meta_free(zram, zram->disksize); in zram_reset_device()
2740 zram->disksize = 0; in zram_reset_device()
2742 memset(&zram->stats, 0, sizeof(zram->stats)); in zram_reset_device()
2743 atomic_set(&zram->pp_in_progress, 0); in zram_reset_device()
2747 up_write(&zram->init_lock); in zram_reset_device()
2761 return -EINVAL; in disksize_store()
2763 down_write(&zram->init_lock); in disksize_store()
2766 err = -EBUSY; in disksize_store()
2772 err = -ENOMEM; in disksize_store()
2777 if (!zram->comp_algs[prio]) in disksize_store()
2780 comp = zcomp_create(zram->comp_algs[prio], in disksize_store()
2781 &zram->params[prio]); in disksize_store()
2784 zram->comp_algs[prio]); in disksize_store()
2789 zram->comps[prio] = comp; in disksize_store()
2790 zram->num_active_comps++; in disksize_store()
2792 zram->disksize = disksize; in disksize_store()
2793 set_capacity_and_notify(zram->disk, zram->disksize >> SECTOR_SHIFT); in disksize_store()
2794 up_write(&zram->init_lock); in disksize_store()
2802 up_write(&zram->init_lock); in disksize_store()
2819 return -EINVAL; in reset_store()
2822 disk = zram->disk; in reset_store()
2824 mutex_lock(&disk->open_mutex); in reset_store()
2826 if (disk_openers(disk) || zram->claim) { in reset_store()
2827 mutex_unlock(&disk->open_mutex); in reset_store()
2828 return -EBUSY; in reset_store()
2831 /* From now on, anyone can't open /dev/zram[0-9] */ in reset_store()
2832 zram->claim = true; in reset_store()
2833 mutex_unlock(&disk->open_mutex); in reset_store()
2836 sync_blockdev(disk->part0); in reset_store()
2839 mutex_lock(&disk->open_mutex); in reset_store()
2840 zram->claim = false; in reset_store()
2841 mutex_unlock(&disk->open_mutex); in reset_store()
2848 struct zram *zram = disk->private_data; in zram_open()
2850 WARN_ON(!mutex_is_locked(&disk->open_mutex)); in zram_open()
2853 if (zram->claim) in zram_open()
2854 return -EBUSY; in zram_open()
2953 return -ENOMEM; in zram_add()
2960 init_rwsem(&zram->init_lock); in zram_add()
2962 zram->wb_batch_size = 32; in zram_add()
2966 zram->disk = blk_alloc_disk(&lim, NUMA_NO_NODE); in zram_add()
2967 if (IS_ERR(zram->disk)) { in zram_add()
2970 ret = PTR_ERR(zram->disk); in zram_add()
2974 zram->disk->major = zram_major; in zram_add()
2975 zram->disk->first_minor = device_id; in zram_add()
2976 zram->disk->minors = 1; in zram_add()
2977 zram->disk->flags |= GENHD_FL_NO_PART; in zram_add()
2978 zram->disk->fops = &zram_devops; in zram_add()
2979 zram->disk->private_data = zram; in zram_add()
2980 snprintf(zram->disk->disk_name, 16, "zram%d", device_id); in zram_add()
2981 atomic_set(&zram->pp_in_progress, 0); in zram_add()
2986 set_capacity(zram->disk, 0); in zram_add()
2987 ret = device_add_disk(NULL, zram->disk, zram_disk_groups); in zram_add()
2992 pr_info("Added device: %s\n", zram->disk->disk_name); in zram_add()
2996 put_disk(zram->disk); in zram_add()
3008 mutex_lock(&zram->disk->open_mutex); in zram_remove()
3009 if (disk_openers(zram->disk)) { in zram_remove()
3010 mutex_unlock(&zram->disk->open_mutex); in zram_remove()
3011 return -EBUSY; in zram_remove()
3014 claimed = zram->claim; in zram_remove()
3016 zram->claim = true; in zram_remove()
3017 mutex_unlock(&zram->disk->open_mutex); in zram_remove()
3029 sync_blockdev(zram->disk->part0); in zram_remove()
3033 pr_info("Removed device: %s\n", zram->disk->disk_name); in zram_remove()
3035 del_gendisk(zram->disk); in zram_remove()
3038 WARN_ON_ONCE(claimed && zram->claim); in zram_remove()
3047 put_disk(zram->disk); in zram_remove()
3052 /* zram-control sysfs attributes */
3055 * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
3056 * sense that reading from this file does alter the state of your system -- it
3057 * creates a new un-initialized zram device and returns back this device's
3086 /* dev_id is gendisk->first_minor, which is `int' */ in hot_remove_store()
3091 return -EINVAL; in hot_remove_store()
3101 ret = -ENODEV; in hot_remove_store()
3117 .name = "zram-control",
3151 pr_err("Unable to register zram-control class\n"); in zram_init()
3162 return -EBUSY; in zram_init()
3171 num_devices--; in zram_init()
3190 MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");