zram_drv.c (5561347aa598b6b12fb6069788ccec9b5e5ebec1) zram_drv.c (84b33bf7888975d28c0e57011b75c445279c60ec)
1/*
2 * Compressed RAM block device
3 *
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 2012, 2013 Minchan Kim
6 *
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.

--- 141 unchanged lines hidden (view full) ---

150}
151#else
152static inline bool is_partial_io(struct bio_vec *bvec)
153{
154 return false;
155}
156#endif
157
1/*
2 * Compressed RAM block device
3 *
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 2012, 2013 Minchan Kim
6 *
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.

--- 141 unchanged lines hidden (view full) ---

150}
151#else
152static inline bool is_partial_io(struct bio_vec *bvec)
153{
154 return false;
155}
156#endif
157
158static inline void zram_set_priority(struct zram *zram, u32 index, u32 prio)
159{
160 prio &= ZRAM_COMP_PRIORITY_MASK;
161 /*
162 * Clear previous priority value first, in case if we recompress
163 * further an already recompressed page
164 */
165 zram->table[index].flags &= ~(ZRAM_COMP_PRIORITY_MASK <<
166 ZRAM_COMP_PRIORITY_BIT1);
167 zram->table[index].flags |= (prio << ZRAM_COMP_PRIORITY_BIT1);
168}
169
170static inline u32 zram_get_priority(struct zram *zram, u32 index)
171{
172 u32 prio = zram->table[index].flags >> ZRAM_COMP_PRIORITY_BIT1;
173
174 return prio & ZRAM_COMP_PRIORITY_MASK;
175}
176
158/*
159 * Check if request is within bounds and aligned on zram logical blocks.
160 */
161static inline bool valid_io_request(struct zram *zram,
162 sector_t start, unsigned int size)
163{
164 u64 end, bound;
165

--- 1133 unchanged lines hidden (view full) ---

1299 if (zram_test_flag(zram, index, ZRAM_IDLE))
1300 zram_clear_flag(zram, index, ZRAM_IDLE);
1301
1302 if (zram_test_flag(zram, index, ZRAM_HUGE)) {
1303 zram_clear_flag(zram, index, ZRAM_HUGE);
1304 atomic64_dec(&zram->stats.huge_pages);
1305 }
1306
177/*
178 * Check if request is within bounds and aligned on zram logical blocks.
179 */
180static inline bool valid_io_request(struct zram *zram,
181 sector_t start, unsigned int size)
182{
183 u64 end, bound;
184

--- 1133 unchanged lines hidden (view full) ---

1318 if (zram_test_flag(zram, index, ZRAM_IDLE))
1319 zram_clear_flag(zram, index, ZRAM_IDLE);
1320
1321 if (zram_test_flag(zram, index, ZRAM_HUGE)) {
1322 zram_clear_flag(zram, index, ZRAM_HUGE);
1323 atomic64_dec(&zram->stats.huge_pages);
1324 }
1325
1326 if (zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
1327 zram_clear_flag(zram, index, ZRAM_INCOMPRESSIBLE);
1328
1329 zram_set_priority(zram, index, 0);
1330
1307 if (zram_test_flag(zram, index, ZRAM_WB)) {
1308 zram_clear_flag(zram, index, ZRAM_WB);
1309 free_block_bdev(zram, zram_get_element(zram, index));
1310 goto out;
1311 }
1312
1313 /*
1314 * No memory is allocated for same element filled pages.

--- 44 unchanged lines hidden (view full) ---

1359 */
1360static int zram_read_from_zspool(struct zram *zram, struct page *page,
1361 u32 index)
1362{
1363 struct zcomp_strm *zstrm;
1364 unsigned long handle;
1365 unsigned int size;
1366 void *src, *dst;
1331 if (zram_test_flag(zram, index, ZRAM_WB)) {
1332 zram_clear_flag(zram, index, ZRAM_WB);
1333 free_block_bdev(zram, zram_get_element(zram, index));
1334 goto out;
1335 }
1336
1337 /*
1338 * No memory is allocated for same element filled pages.

--- 44 unchanged lines hidden (view full) ---

1383 */
1384static int zram_read_from_zspool(struct zram *zram, struct page *page,
1385 u32 index)
1386{
1387 struct zcomp_strm *zstrm;
1388 unsigned long handle;
1389 unsigned int size;
1390 void *src, *dst;
1391 u32 prio;
1367 int ret;
1368
1369 handle = zram_get_handle(zram, index);
1370 if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
1371 unsigned long value;
1372 void *mem;
1373
1374 value = handle ? zram_get_element(zram, index) : 0;
1375 mem = kmap_atomic(page);
1376 zram_fill_page(mem, PAGE_SIZE, value);
1377 kunmap_atomic(mem);
1378 return 0;
1379 }
1380
1381 size = zram_get_obj_size(zram, index);
1382
1392 int ret;
1393
1394 handle = zram_get_handle(zram, index);
1395 if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
1396 unsigned long value;
1397 void *mem;
1398
1399 value = handle ? zram_get_element(zram, index) : 0;
1400 mem = kmap_atomic(page);
1401 zram_fill_page(mem, PAGE_SIZE, value);
1402 kunmap_atomic(mem);
1403 return 0;
1404 }
1405
1406 size = zram_get_obj_size(zram, index);
1407
1383 if (size != PAGE_SIZE)
1384 zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
1408 if (size != PAGE_SIZE) {
1409 prio = zram_get_priority(zram, index);
1410 zstrm = zcomp_stream_get(zram->comps[prio]);
1411 }
1385
1386 src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
1387 if (size == PAGE_SIZE) {
1388 dst = kmap_atomic(page);
1389 memcpy(dst, src, PAGE_SIZE);
1390 kunmap_atomic(dst);
1391 ret = 0;
1392 } else {
1393 dst = kmap_atomic(page);
1394 ret = zcomp_decompress(zstrm, src, size, dst);
1395 kunmap_atomic(dst);
1412
1413 src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
1414 if (size == PAGE_SIZE) {
1415 dst = kmap_atomic(page);
1416 memcpy(dst, src, PAGE_SIZE);
1417 kunmap_atomic(dst);
1418 ret = 0;
1419 } else {
1420 dst = kmap_atomic(page);
1421 ret = zcomp_decompress(zstrm, src, size, dst);
1422 kunmap_atomic(dst);
1396 zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
1423 zcomp_stream_put(zram->comps[prio]);
1397 }
1398 zs_unmap_object(zram->mem_pool, handle);
1399 return ret;
1400}
1401
1402static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
1403 struct bio *bio, bool partial_io)
1404{

--- 214 unchanged lines hidden (view full) ---

1619
1620 ret = __zram_bvec_write(zram, &vec, index, bio);
1621out:
1622 if (is_partial_io(bvec))
1623 __free_page(page);
1624 return ret;
1625}
1626
1424 }
1425 zs_unmap_object(zram->mem_pool, handle);
1426 return ret;
1427}
1428
1429static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
1430 struct bio *bio, bool partial_io)
1431{

--- 214 unchanged lines hidden (view full) ---

1646
1647 ret = __zram_bvec_write(zram, &vec, index, bio);
1648out:
1649 if (is_partial_io(bvec))
1650 __free_page(page);
1651 return ret;
1652}
1653
1654#ifdef CONFIG_ZRAM_MULTI_COMP
1627/*
1655/*
1656 * This function will decompress (unless it's ZRAM_HUGE) the page and then
1657 * attempt to compress it using provided compression algorithm priority
1658 * (which is potentially more effective).
1659 *
1660 * Corresponding ZRAM slot should be locked.
1661 */
1662static int zram_recompress(struct zram *zram, u32 index, struct page *page,
1663 u32 threshold, u32 prio, u32 prio_max)
1664{
1665 struct zcomp_strm *zstrm = NULL;
1666 unsigned long handle_old;
1667 unsigned long handle_new;
1668 unsigned int comp_len_old;
1669 unsigned int comp_len_new;
1670 void *src, *dst;
1671 int ret;
1672
1673 handle_old = zram_get_handle(zram, index);
1674 if (!handle_old)
1675 return -EINVAL;
1676
1677 comp_len_old = zram_get_obj_size(zram, index);
1678 /*
1679 * Do not recompress objects that are already "small enough".
1680 */
1681 if (comp_len_old < threshold)
1682 return 0;
1683
1684 ret = zram_read_from_zspool(zram, page, index);
1685 if (ret)
1686 return ret;
1687
1688 /*
1689 * Iterate the secondary comp algorithms list (in order of priority)
1690 * and try to recompress the page.
1691 */
1692 for (; prio < prio_max; prio++) {
1693 if (!zram->comps[prio])
1694 continue;
1695
1696 /*
1697 * Skip if the object is already re-compressed with a higher
1698 * priority algorithm (or same algorithm).
1699 */
1700 if (prio <= zram_get_priority(zram, index))
1701 continue;
1702
1703 zstrm = zcomp_stream_get(zram->comps[prio]);
1704 src = kmap_atomic(page);
1705 ret = zcomp_compress(zstrm, src, &comp_len_new);
1706 kunmap_atomic(src);
1707
1708 if (ret) {
1709 zcomp_stream_put(zram->comps[prio]);
1710 return ret;
1711 }
1712
1713 /* Continue until we make progress */
1714 if (comp_len_new >= huge_class_size ||
1715 comp_len_new >= comp_len_old ||
1716 (threshold && comp_len_new >= threshold)) {
1717 zcomp_stream_put(zram->comps[prio]);
1718 continue;
1719 }
1720
1721 /* Recompression was successful so break out */
1722 break;
1723 }
1724
1725 /*
1726 * We did not try to recompress, e.g. when we have only one
1727 * secondary algorithm and the page is already recompressed
1728 * using that algorithm
1729 */
1730 if (!zstrm)
1731 return 0;
1732
1733 /*
1734 * All secondary algorithms failed to re-compress the page in a way
1735 * that would save memory, mark the object as incompressible so that
1736 * we will not try to compress it again.
1737 */
1738 if (comp_len_new >= huge_class_size || comp_len_new >= comp_len_old) {
1739 zram_set_flag(zram, index, ZRAM_INCOMPRESSIBLE);
1740 return 0;
1741 }
1742
1743 /* Successful recompression but above threshold */
1744 if (threshold && comp_len_new >= threshold)
1745 return 0;
1746
1747 /*
1748 * No direct reclaim (slow path) for handle allocation and no
1749 * re-compression attempt (unlike in __zram_bvec_write()) since
1750 * we already have stored that object in zsmalloc. If we cannot
1751 * alloc memory for recompressed object then we bail out and
1752 * simply keep the old (existing) object in zsmalloc.
1753 */
1754 handle_new = zs_malloc(zram->mem_pool, comp_len_new,
1755 __GFP_KSWAPD_RECLAIM |
1756 __GFP_NOWARN |
1757 __GFP_HIGHMEM |
1758 __GFP_MOVABLE);
1759 if (IS_ERR_VALUE(handle_new)) {
1760 zcomp_stream_put(zram->comps[prio]);
1761 return PTR_ERR((void *)handle_new);
1762 }
1763
1764 dst = zs_map_object(zram->mem_pool, handle_new, ZS_MM_WO);
1765 memcpy(dst, zstrm->buffer, comp_len_new);
1766 zcomp_stream_put(zram->comps[prio]);
1767
1768 zs_unmap_object(zram->mem_pool, handle_new);
1769
1770 zram_free_page(zram, index);
1771 zram_set_handle(zram, index, handle_new);
1772 zram_set_obj_size(zram, index, comp_len_new);
1773 zram_set_priority(zram, index, prio);
1774
1775 atomic64_add(comp_len_new, &zram->stats.compr_data_size);
1776 atomic64_inc(&zram->stats.pages_stored);
1777
1778 return 0;
1779}
1780
1781#define RECOMPRESS_IDLE (1 << 0)
1782#define RECOMPRESS_HUGE (1 << 1)
1783
1784static ssize_t recompress_store(struct device *dev,
1785 struct device_attribute *attr,
1786 const char *buf, size_t len)
1787{
1788 struct zram *zram = dev_to_zram(dev);
1789 u32 mode = 0, threshold = 0, prio = ZRAM_SECONDARY_COMP;
1790 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
1791 char *args, *param, *val;
1792 unsigned long index;
1793 struct page *page;
1794 ssize_t ret;
1795
1796 args = skip_spaces(buf);
1797 while (*args) {
1798 args = next_arg(args, &param, &val);
1799
1800 if (!*val)
1801 return -EINVAL;
1802
1803 if (!strcmp(param, "type")) {
1804 if (!strcmp(val, "idle"))
1805 mode = RECOMPRESS_IDLE;
1806 if (!strcmp(val, "huge"))
1807 mode = RECOMPRESS_HUGE;
1808 if (!strcmp(val, "huge_idle"))
1809 mode = RECOMPRESS_IDLE | RECOMPRESS_HUGE;
1810 continue;
1811 }
1812
1813 if (!strcmp(param, "threshold")) {
1814 /*
1815 * We will re-compress only idle objects equal or
1816 * greater in size than watermark.
1817 */
1818 ret = kstrtouint(val, 10, &threshold);
1819 if (ret)
1820 return ret;
1821 continue;
1822 }
1823 }
1824
1825 if (threshold >= PAGE_SIZE)
1826 return -EINVAL;
1827
1828 down_read(&zram->init_lock);
1829 if (!init_done(zram)) {
1830 ret = -EINVAL;
1831 goto release_init_lock;
1832 }
1833
1834 page = alloc_page(GFP_KERNEL);
1835 if (!page) {
1836 ret = -ENOMEM;
1837 goto release_init_lock;
1838 }
1839
1840 ret = len;
1841 for (index = 0; index < nr_pages; index++) {
1842 int err = 0;
1843
1844 zram_slot_lock(zram, index);
1845
1846 if (!zram_allocated(zram, index))
1847 goto next;
1848
1849 if (mode & RECOMPRESS_IDLE &&
1850 !zram_test_flag(zram, index, ZRAM_IDLE))
1851 goto next;
1852
1853 if (mode & RECOMPRESS_HUGE &&
1854 !zram_test_flag(zram, index, ZRAM_HUGE))
1855 goto next;
1856
1857 if (zram_test_flag(zram, index, ZRAM_WB) ||
1858 zram_test_flag(zram, index, ZRAM_UNDER_WB) ||
1859 zram_test_flag(zram, index, ZRAM_SAME) ||
1860 zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
1861 goto next;
1862
1863 err = zram_recompress(zram, index, page, threshold,
1864 prio, ZRAM_MAX_COMPS);
1865next:
1866 zram_slot_unlock(zram, index);
1867 if (err) {
1868 ret = err;
1869 break;
1870 }
1871
1872 cond_resched();
1873 }
1874
1875 __free_page(page);
1876
1877release_init_lock:
1878 up_read(&zram->init_lock);
1879 return ret;
1880}
1881#endif
1882
1883/*
1628 * zram_bio_discard - handler on discard request
1629 * @index: physical block index in PAGE_SIZE units
1630 * @offset: byte offset within physical block
1631 */
1632static void zram_bio_discard(struct zram *zram, u32 index,
1633 int offset, struct bio *bio)
1634{
1635 size_t n = bio->bi_iter.bi_size;

--- 362 unchanged lines hidden (view full) ---

1998#ifdef CONFIG_ZRAM_WRITEBACK
1999static DEVICE_ATTR_RW(backing_dev);
2000static DEVICE_ATTR_WO(writeback);
2001static DEVICE_ATTR_RW(writeback_limit);
2002static DEVICE_ATTR_RW(writeback_limit_enable);
2003#endif
2004#ifdef CONFIG_ZRAM_MULTI_COMP
2005static DEVICE_ATTR_RW(recomp_algorithm);
1884 * zram_bio_discard - handler on discard request
1885 * @index: physical block index in PAGE_SIZE units
1886 * @offset: byte offset within physical block
1887 */
1888static void zram_bio_discard(struct zram *zram, u32 index,
1889 int offset, struct bio *bio)
1890{
1891 size_t n = bio->bi_iter.bi_size;

--- 362 unchanged lines hidden (view full) ---

2254#ifdef CONFIG_ZRAM_WRITEBACK
2255static DEVICE_ATTR_RW(backing_dev);
2256static DEVICE_ATTR_WO(writeback);
2257static DEVICE_ATTR_RW(writeback_limit);
2258static DEVICE_ATTR_RW(writeback_limit_enable);
2259#endif
2260#ifdef CONFIG_ZRAM_MULTI_COMP
2261static DEVICE_ATTR_RW(recomp_algorithm);
2262static DEVICE_ATTR_WO(recompress);
2006#endif
2007
2008static struct attribute *zram_disk_attrs[] = {
2009 &dev_attr_disksize.attr,
2010 &dev_attr_initstate.attr,
2011 &dev_attr_reset.attr,
2012 &dev_attr_compact.attr,
2013 &dev_attr_mem_limit.attr,

--- 10 unchanged lines hidden (view full) ---

2024 &dev_attr_io_stat.attr,
2025 &dev_attr_mm_stat.attr,
2026#ifdef CONFIG_ZRAM_WRITEBACK
2027 &dev_attr_bd_stat.attr,
2028#endif
2029 &dev_attr_debug_stat.attr,
2030#ifdef CONFIG_ZRAM_MULTI_COMP
2031 &dev_attr_recomp_algorithm.attr,
2263#endif
2264
2265static struct attribute *zram_disk_attrs[] = {
2266 &dev_attr_disksize.attr,
2267 &dev_attr_initstate.attr,
2268 &dev_attr_reset.attr,
2269 &dev_attr_compact.attr,
2270 &dev_attr_mem_limit.attr,

--- 10 unchanged lines hidden (view full) ---

2281 &dev_attr_io_stat.attr,
2282 &dev_attr_mm_stat.attr,
2283#ifdef CONFIG_ZRAM_WRITEBACK
2284 &dev_attr_bd_stat.attr,
2285#endif
2286 &dev_attr_debug_stat.attr,
2287#ifdef CONFIG_ZRAM_MULTI_COMP
2288 &dev_attr_recomp_algorithm.attr,
2289 &dev_attr_recompress.attr,
2032#endif
2033 NULL,
2034};
2035
2036ATTRIBUTE_GROUPS(zram_disk);
2037
2038/*
2039 * Allocate and initialize new zram device. the function returns

--- 277 unchanged lines hidden ---
2290#endif
2291 NULL,
2292};
2293
2294ATTRIBUTE_GROUPS(zram_disk);
2295
2296/*
2297 * Allocate and initialize new zram device. the function returns

--- 277 unchanged lines hidden ---