Lines Matching +full:cache +full:- +full:block +full:- +full:size
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Squashfs - a compressed read only filesystem for Linux
14 * compressed fragment block (tail-end packed block). The compressed size
15 * of each datablock is stored in a block list contained within the
19 * larger), the code implements an index cache that caches the mapping from
20 * block index to datablock location on disk.
22 * The index cache allows Squashfs to handle large files (up to 1.75 TiB) while
23 * retaining a simple and space-efficient block list on disk. The cache
26 * The index cache is designed to be memory efficient, and by default uses
45 * Locate cache slot in range [offset, index] for specified inode. If
52 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
55 mutex_lock(&msblk->meta_index_mutex);
59 if (msblk->meta_index == NULL)
63 if (msblk->meta_index[i].inode_number == inode->i_ino &&
64 msblk->meta_index[i].offset >= offset &&
65 msblk->meta_index[i].offset <= index &&
66 msblk->meta_index[i].locked == 0) {
68 msblk->meta_index[i].offset);
69 meta = &msblk->meta_index[i];
70 offset = meta->offset;
75 meta->locked = 1;
78 mutex_unlock(&msblk->meta_index_mutex);
85 * Find and initialise an empty cache slot for index offset.
90 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
94 mutex_lock(&msblk->meta_index_mutex);
98 if (msblk->meta_index == NULL) {
100 * First time cache index has been used, allocate and
101 * initialise. The cache index could be allocated at
105 msblk->meta_index = kcalloc(SQUASHFS_META_SLOTS,
106 sizeof(*(msblk->meta_index)), GFP_KERNEL);
107 if (msblk->meta_index == NULL) {
112 msblk->meta_index[i].inode_number = 0;
113 msblk->meta_index[i].locked = 0;
115 msblk->next_meta_index = 0;
119 msblk->meta_index[msblk->next_meta_index].locked; i--)
120 msblk->next_meta_index = (msblk->next_meta_index + 1) %
129 msblk->next_meta_index,
130 &msblk->meta_index[msblk->next_meta_index]);
132 meta = &msblk->meta_index[msblk->next_meta_index];
133 msblk->next_meta_index = (msblk->next_meta_index + 1) %
136 meta->inode_number = inode->i_ino;
137 meta->offset = offset;
138 meta->skip = skip;
139 meta->entries = 0;
140 meta->locked = 1;
143 mutex_unlock(&msblk->meta_index_mutex);
150 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
151 mutex_lock(&msblk->meta_index_mutex);
152 meta->locked = 0;
153 mutex_unlock(&msblk->meta_index_mutex);
158 * Read the next n blocks from the block list, starting from
159 * metadata block <start_block, offset>.
165 long long block = 0;
170 return -ENOMEM;
179 ERROR("read_indexes: reading block [%llx:%x]\n",
185 int size = squashfs_block_size(blist[i]);
186 if (size < 0) {
187 err = size;
190 block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size);
192 n -= blocks;
196 return block;
205 * Each cache index slot has SQUASHFS_META_ENTRIES, each of which
206 * can cache one index -> datablock/blocklist-block mapping. We wish
210 * limited to the size of the metadata cache (SQUASHFS_CACHED_BLKS) to ensure
211 * the number of metadata blocks that need to be read fits into the cache.
219 return min((u64) SQUASHFS_CACHED_BLKS - 1, skip + 1);
224 * Search and grow the index cache for the specified inode, returning the
225 * on-disk locations of the datablock and block list metadata block
226 * <index_block, index_offset> for index (scaled to nearest cache index).
231 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
232 int skip = calculate_skip(i_size_read(inode) >> msblk->block_log);
236 u64 cur_index_block = squashfs_i(inode)->block_list_start;
237 int cur_offset = squashfs_i(inode)->offset;
238 u64 cur_data_block = squashfs_i(inode)->start;
242 * Scale index to cache index (cache slot entry)
254 offset = index < meta->offset + meta->entries ? index :
255 meta->offset + meta->entries - 1;
256 meta_entry = &meta->meta_entry[offset - meta->offset];
257 cur_index_block = meta_entry->index_block +
258 msblk->inode_table;
259 cur_offset = meta_entry->offset;
260 cur_data_block = meta_entry->data_block;
261 TRACE("get_meta_index: offset %d, meta->offset %d, "
262 "meta->entries %d\n", offset, meta->offset,
263 meta->entries);
270 * If necessary grow cache slot by reading block list. Cache
274 for (i = meta->offset + meta->entries; i <= index &&
275 i < meta->offset + SQUASHFS_META_ENTRIES; i++) {
277 long long res = read_indexes(inode->i_sb, blocks,
281 if (meta->entries == 0)
286 meta->inode_number = 0;
292 meta_entry = &meta->meta_entry[i - meta->offset];
293 meta_entry->index_block = cur_index_block -
294 msblk->inode_table;
295 meta_entry->offset = cur_offset;
296 meta_entry->data_block = cur_data_block;
297 meta->entries++;
301 TRACE("get_meta_index: meta->offset %d, meta->entries %d\n",
302 meta->offset, meta->entries);
313 * Scale cache index (cache slot entry) to index
324 * Get the on-disk location and compressed size of the datablock
327 static int read_blocklist(struct inode *inode, int index, u64 *block)
332 __le32 size;
333 int res = fill_meta_index(inode, index, &start, &offset, block);
336 " 0x%x, block 0x%llx\n", res, index, start, offset,
337 *block);
345 * meta_index cache works at a higher granularity). Read any
346 * extra block indexes needed.
349 blks = read_indexes(inode->i_sb, index - res, &start, &offset);
352 *block += blks;
356 * Read length of block specified by index.
358 res = squashfs_read_metadata(inode->i_sb, &size, &start, &offset,
359 sizeof(size));
362 return squashfs_block_size(size);
374 memset(pageaddr + copied, 0, PAGE_SIZE - copied);
382 /* Copy data into page cache */
387 struct address_space *mapping = folio->mapping;
388 struct inode *inode = mapping->host;
389 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
390 int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
391 int start_index = folio->index & ~mask, end_index = start_index | mask;
395 * many PAGE_SIZE pages (default block size is 128 KiB) explicitly
396 * grab the pages from the page cache, except for the page that we've
400 bytes -= PAGE_SIZE, offset += PAGE_SIZE) {
407 push_folio = (i == folio->index) ? folio :
421 if (i != folio->index)
426 /* Read datablock stored packed inside a fragment (tail-end packed block) */
429 struct inode *inode = folio->mapping->host;
430 struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
431 squashfs_i(inode)->fragment_block,
432 squashfs_i(inode)->fragment_size);
433 int res = buffer->error;
436 ERROR("Unable to read page, block %llx, size %x\n",
437 squashfs_i(inode)->fragment_block,
438 squashfs_i(inode)->fragment_size);
441 squashfs_i(inode)->fragment_offset);
455 struct inode *inode = folio->mapping->host;
456 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
457 int index = folio->index >> (msblk->block_log - PAGE_SHIFT);
458 int file_end = i_size_read(inode) >> msblk->block_log;
460 (i_size_read(inode) & (msblk->block_size - 1)) :
461 msblk->block_size;
464 TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
465 folio->index, squashfs_i(inode)->start);
467 if (folio->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >>
471 if (index < file_end || squashfs_i(inode)->fragment_block ==
473 u64 block = 0;
475 res = read_blocklist(inode, index, &block);
482 res = squashfs_readpage_block(folio, block, res, expected);
499 struct inode *inode = page[0]->mapping->host;
500 struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
501 squashfs_i(inode)->fragment_block,
502 squashfs_i(inode)->fragment_size);
503 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
510 if (buffer->error)
522 int avail = min_t(int, expected - offset, PAGE_SIZE);
526 squashfs_i(inode)->fragment_offset, avail);
542 memzero_page(last_page, bytes, PAGE_SIZE - bytes);
568 struct inode *inode = ractl->mapping->host;
569 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
570 size_t mask = (1UL << msblk->block_log) - 1;
571 unsigned short shift = msblk->block_log - PAGE_SHIFT;
573 size_t len = readahead_length(ractl) + readahead_pos(ractl) - start;
578 loff_t file_end = i_size_read(inode) >> msblk->block_log;
589 u64 block = 0;
593 expected = start >> msblk->block_log == file_end ?
594 (i_size_read(inode) & (msblk->block_size - 1)) :
595 msblk->block_size;
597 max_pages = (expected + PAGE_SIZE - 1) >> PAGE_SHIFT;
606 if (start >> msblk->block_log == file_end &&
607 squashfs_i(inode)->fragment_block != SQUASHFS_INVALID_BLK) {
615 bsize = read_blocklist(inode, start >> msblk->block_log, &block);
624 res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
633 if (start >> msblk->block_log == file_end && bytes && last_page)
635 PAGE_SIZE - bytes);