xref: /linux/fs/erofs/data.c (revision 55ec81f7517fad09135f65552cea0a3ee84fff30)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017-2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Copyright (C) 2021, Alibaba Cloud
6  */
7 #include "internal.h"
8 #include <linux/prefetch.h>
9 #include <linux/sched/mm.h>
10 #include <linux/dax.h>
11 #include <trace/events/erofs.h>
12 
13 void erofs_unmap_metabuf(struct erofs_buf *buf)
14 {
15 	if (buf->kmap_type == EROFS_KMAP)
16 		kunmap_local(buf->base);
17 	buf->base = NULL;
18 	buf->kmap_type = EROFS_NO_KMAP;
19 }
20 
21 void erofs_put_metabuf(struct erofs_buf *buf)
22 {
23 	if (!buf->page)
24 		return;
25 	erofs_unmap_metabuf(buf);
26 	put_page(buf->page);
27 	buf->page = NULL;
28 }
29 
30 /*
31  * Derive the block size from inode->i_blkbits to make compatible with
32  * anonymous inode in fscache mode.
33  */
34 void *erofs_bread(struct erofs_buf *buf, erofs_blk_t blkaddr,
35 		  enum erofs_kmap_type type)
36 {
37 	struct inode *inode = buf->inode;
38 	erofs_off_t offset = (erofs_off_t)blkaddr << inode->i_blkbits;
39 	pgoff_t index = offset >> PAGE_SHIFT;
40 	struct page *page = buf->page;
41 	struct folio *folio;
42 	unsigned int nofs_flag;
43 
44 	if (!page || page->index != index) {
45 		erofs_put_metabuf(buf);
46 
47 		nofs_flag = memalloc_nofs_save();
48 		folio = read_cache_folio(inode->i_mapping, index, NULL, NULL);
49 		memalloc_nofs_restore(nofs_flag);
50 		if (IS_ERR(folio))
51 			return folio;
52 
53 		/* should already be PageUptodate, no need to lock page */
54 		page = folio_file_page(folio, index);
55 		buf->page = page;
56 	}
57 	if (buf->kmap_type == EROFS_NO_KMAP) {
58 		if (type == EROFS_KMAP)
59 			buf->base = kmap_local_page(page);
60 		buf->kmap_type = type;
61 	} else if (buf->kmap_type != type) {
62 		DBG_BUGON(1);
63 		return ERR_PTR(-EFAULT);
64 	}
65 	if (type == EROFS_NO_KMAP)
66 		return NULL;
67 	return buf->base + (offset & ~PAGE_MASK);
68 }
69 
70 void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb)
71 {
72 	if (erofs_is_fscache_mode(sb))
73 		buf->inode = EROFS_SB(sb)->s_fscache->inode;
74 	else
75 		buf->inode = sb->s_bdev->bd_inode;
76 }
77 
78 void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
79 			 erofs_blk_t blkaddr, enum erofs_kmap_type type)
80 {
81 	erofs_init_metabuf(buf, sb);
82 	return erofs_bread(buf, blkaddr, type);
83 }
84 
85 static int erofs_map_blocks_flatmode(struct inode *inode,
86 				     struct erofs_map_blocks *map)
87 {
88 	erofs_blk_t nblocks, lastblk;
89 	u64 offset = map->m_la;
90 	struct erofs_inode *vi = EROFS_I(inode);
91 	struct super_block *sb = inode->i_sb;
92 	bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
93 
94 	nblocks = erofs_iblks(inode);
95 	lastblk = nblocks - tailendpacking;
96 
97 	/* there is no hole in flatmode */
98 	map->m_flags = EROFS_MAP_MAPPED;
99 	if (offset < erofs_pos(sb, lastblk)) {
100 		map->m_pa = erofs_pos(sb, vi->raw_blkaddr) + map->m_la;
101 		map->m_plen = erofs_pos(sb, lastblk) - offset;
102 	} else if (tailendpacking) {
103 		map->m_pa = erofs_iloc(inode) + vi->inode_isize +
104 			vi->xattr_isize + erofs_blkoff(sb, offset);
105 		map->m_plen = inode->i_size - offset;
106 
107 		/* inline data should be located in the same meta block */
108 		if (erofs_blkoff(sb, map->m_pa) + map->m_plen > sb->s_blocksize) {
109 			erofs_err(sb, "inline data cross block boundary @ nid %llu",
110 				  vi->nid);
111 			DBG_BUGON(1);
112 			return -EFSCORRUPTED;
113 		}
114 		map->m_flags |= EROFS_MAP_META;
115 	} else {
116 		erofs_err(sb, "internal error @ nid: %llu (size %llu), m_la 0x%llx",
117 			  vi->nid, inode->i_size, map->m_la);
118 		DBG_BUGON(1);
119 		return -EIO;
120 	}
121 	return 0;
122 }
123 
124 int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map)
125 {
126 	struct super_block *sb = inode->i_sb;
127 	struct erofs_inode *vi = EROFS_I(inode);
128 	struct erofs_inode_chunk_index *idx;
129 	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
130 	u64 chunknr;
131 	unsigned int unit;
132 	erofs_off_t pos;
133 	void *kaddr;
134 	int err = 0;
135 
136 	trace_erofs_map_blocks_enter(inode, map, 0);
137 	map->m_deviceid = 0;
138 	if (map->m_la >= inode->i_size) {
139 		/* leave out-of-bound access unmapped */
140 		map->m_flags = 0;
141 		map->m_plen = 0;
142 		goto out;
143 	}
144 
145 	if (vi->datalayout != EROFS_INODE_CHUNK_BASED) {
146 		err = erofs_map_blocks_flatmode(inode, map);
147 		goto out;
148 	}
149 
150 	if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
151 		unit = sizeof(*idx);			/* chunk index */
152 	else
153 		unit = EROFS_BLOCK_MAP_ENTRY_SIZE;	/* block map */
154 
155 	chunknr = map->m_la >> vi->chunkbits;
156 	pos = ALIGN(erofs_iloc(inode) + vi->inode_isize +
157 		    vi->xattr_isize, unit) + unit * chunknr;
158 
159 	kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(sb, pos), EROFS_KMAP);
160 	if (IS_ERR(kaddr)) {
161 		err = PTR_ERR(kaddr);
162 		goto out;
163 	}
164 	map->m_la = chunknr << vi->chunkbits;
165 	map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits,
166 			round_up(inode->i_size - map->m_la, sb->s_blocksize));
167 
168 	/* handle block map */
169 	if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) {
170 		__le32 *blkaddr = kaddr + erofs_blkoff(sb, pos);
171 
172 		if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) {
173 			map->m_flags = 0;
174 		} else {
175 			map->m_pa = erofs_pos(sb, le32_to_cpu(*blkaddr));
176 			map->m_flags = EROFS_MAP_MAPPED;
177 		}
178 		goto out_unlock;
179 	}
180 	/* parse chunk indexes */
181 	idx = kaddr + erofs_blkoff(sb, pos);
182 	switch (le32_to_cpu(idx->blkaddr)) {
183 	case EROFS_NULL_ADDR:
184 		map->m_flags = 0;
185 		break;
186 	default:
187 		map->m_deviceid = le16_to_cpu(idx->device_id) &
188 			EROFS_SB(sb)->device_id_mask;
189 		map->m_pa = erofs_pos(sb, le32_to_cpu(idx->blkaddr));
190 		map->m_flags = EROFS_MAP_MAPPED;
191 		break;
192 	}
193 out_unlock:
194 	erofs_put_metabuf(&buf);
195 out:
196 	if (!err)
197 		map->m_llen = map->m_plen;
198 	trace_erofs_map_blocks_exit(inode, map, 0, err);
199 	return err;
200 }
201 
202 int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
203 {
204 	struct erofs_dev_context *devs = EROFS_SB(sb)->devs;
205 	struct erofs_device_info *dif;
206 	int id;
207 
208 	map->m_bdev = sb->s_bdev;
209 	map->m_daxdev = EROFS_SB(sb)->dax_dev;
210 	map->m_dax_part_off = EROFS_SB(sb)->dax_part_off;
211 	map->m_fscache = EROFS_SB(sb)->s_fscache;
212 
213 	if (map->m_deviceid) {
214 		down_read(&devs->rwsem);
215 		dif = idr_find(&devs->tree, map->m_deviceid - 1);
216 		if (!dif) {
217 			up_read(&devs->rwsem);
218 			return -ENODEV;
219 		}
220 		if (devs->flatdev) {
221 			map->m_pa += erofs_pos(sb, dif->mapped_blkaddr);
222 			up_read(&devs->rwsem);
223 			return 0;
224 		}
225 		map->m_bdev = dif->bdev;
226 		map->m_daxdev = dif->dax_dev;
227 		map->m_dax_part_off = dif->dax_part_off;
228 		map->m_fscache = dif->fscache;
229 		up_read(&devs->rwsem);
230 	} else if (devs->extra_devices && !devs->flatdev) {
231 		down_read(&devs->rwsem);
232 		idr_for_each_entry(&devs->tree, dif, id) {
233 			erofs_off_t startoff, length;
234 
235 			if (!dif->mapped_blkaddr)
236 				continue;
237 			startoff = erofs_pos(sb, dif->mapped_blkaddr);
238 			length = erofs_pos(sb, dif->blocks);
239 
240 			if (map->m_pa >= startoff &&
241 			    map->m_pa < startoff + length) {
242 				map->m_pa -= startoff;
243 				map->m_bdev = dif->bdev;
244 				map->m_daxdev = dif->dax_dev;
245 				map->m_dax_part_off = dif->dax_part_off;
246 				map->m_fscache = dif->fscache;
247 				break;
248 			}
249 		}
250 		up_read(&devs->rwsem);
251 	}
252 	return 0;
253 }
254 
255 static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
256 		unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
257 {
258 	int ret;
259 	struct super_block *sb = inode->i_sb;
260 	struct erofs_map_blocks map;
261 	struct erofs_map_dev mdev;
262 
263 	map.m_la = offset;
264 	map.m_llen = length;
265 
266 	ret = erofs_map_blocks(inode, &map);
267 	if (ret < 0)
268 		return ret;
269 
270 	mdev = (struct erofs_map_dev) {
271 		.m_deviceid = map.m_deviceid,
272 		.m_pa = map.m_pa,
273 	};
274 	ret = erofs_map_dev(sb, &mdev);
275 	if (ret)
276 		return ret;
277 
278 	iomap->offset = map.m_la;
279 	if (flags & IOMAP_DAX)
280 		iomap->dax_dev = mdev.m_daxdev;
281 	else
282 		iomap->bdev = mdev.m_bdev;
283 	iomap->length = map.m_llen;
284 	iomap->flags = 0;
285 	iomap->private = NULL;
286 
287 	if (!(map.m_flags & EROFS_MAP_MAPPED)) {
288 		iomap->type = IOMAP_HOLE;
289 		iomap->addr = IOMAP_NULL_ADDR;
290 		if (!iomap->length)
291 			iomap->length = length;
292 		return 0;
293 	}
294 
295 	if (map.m_flags & EROFS_MAP_META) {
296 		void *ptr;
297 		struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
298 
299 		iomap->type = IOMAP_INLINE;
300 		ptr = erofs_read_metabuf(&buf, sb,
301 				erofs_blknr(sb, mdev.m_pa), EROFS_KMAP);
302 		if (IS_ERR(ptr))
303 			return PTR_ERR(ptr);
304 		iomap->inline_data = ptr + erofs_blkoff(sb, mdev.m_pa);
305 		iomap->private = buf.base;
306 	} else {
307 		iomap->type = IOMAP_MAPPED;
308 		iomap->addr = mdev.m_pa;
309 		if (flags & IOMAP_DAX)
310 			iomap->addr += mdev.m_dax_part_off;
311 	}
312 	return 0;
313 }
314 
315 static int erofs_iomap_end(struct inode *inode, loff_t pos, loff_t length,
316 		ssize_t written, unsigned int flags, struct iomap *iomap)
317 {
318 	void *ptr = iomap->private;
319 
320 	if (ptr) {
321 		struct erofs_buf buf = {
322 			.page = kmap_to_page(ptr),
323 			.base = ptr,
324 			.kmap_type = EROFS_KMAP,
325 		};
326 
327 		DBG_BUGON(iomap->type != IOMAP_INLINE);
328 		erofs_put_metabuf(&buf);
329 	} else {
330 		DBG_BUGON(iomap->type == IOMAP_INLINE);
331 	}
332 	return written;
333 }
334 
335 static const struct iomap_ops erofs_iomap_ops = {
336 	.iomap_begin = erofs_iomap_begin,
337 	.iomap_end = erofs_iomap_end,
338 };
339 
340 int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
341 		 u64 start, u64 len)
342 {
343 	if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
344 #ifdef CONFIG_EROFS_FS_ZIP
345 		return iomap_fiemap(inode, fieinfo, start, len,
346 				    &z_erofs_iomap_report_ops);
347 #else
348 		return -EOPNOTSUPP;
349 #endif
350 	}
351 	return iomap_fiemap(inode, fieinfo, start, len, &erofs_iomap_ops);
352 }
353 
354 /*
355  * since we dont have write or truncate flows, so no inode
356  * locking needs to be held at the moment.
357  */
358 static int erofs_read_folio(struct file *file, struct folio *folio)
359 {
360 	return iomap_read_folio(folio, &erofs_iomap_ops);
361 }
362 
363 static void erofs_readahead(struct readahead_control *rac)
364 {
365 	return iomap_readahead(rac, &erofs_iomap_ops);
366 }
367 
368 static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
369 {
370 	return iomap_bmap(mapping, block, &erofs_iomap_ops);
371 }
372 
373 static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
374 {
375 	struct inode *inode = file_inode(iocb->ki_filp);
376 
377 	/* no need taking (shared) inode lock since it's a ro filesystem */
378 	if (!iov_iter_count(to))
379 		return 0;
380 
381 #ifdef CONFIG_FS_DAX
382 	if (IS_DAX(inode))
383 		return dax_iomap_rw(iocb, to, &erofs_iomap_ops);
384 #endif
385 	if (iocb->ki_flags & IOCB_DIRECT) {
386 		struct block_device *bdev = inode->i_sb->s_bdev;
387 		unsigned int blksize_mask;
388 
389 		if (bdev)
390 			blksize_mask = bdev_logical_block_size(bdev) - 1;
391 		else
392 			blksize_mask = i_blocksize(inode) - 1;
393 
394 		if ((iocb->ki_pos | iov_iter_count(to) |
395 		     iov_iter_alignment(to)) & blksize_mask)
396 			return -EINVAL;
397 
398 		return iomap_dio_rw(iocb, to, &erofs_iomap_ops,
399 				    NULL, 0, NULL, 0);
400 	}
401 	return filemap_read(iocb, to, 0);
402 }
403 
404 /* for uncompressed (aligned) files and raw access for other files */
405 const struct address_space_operations erofs_raw_access_aops = {
406 	.read_folio = erofs_read_folio,
407 	.readahead = erofs_readahead,
408 	.bmap = erofs_bmap,
409 	.direct_IO = noop_direct_IO,
410 	.release_folio = iomap_release_folio,
411 	.invalidate_folio = iomap_invalidate_folio,
412 };
413 
414 #ifdef CONFIG_FS_DAX
415 static vm_fault_t erofs_dax_huge_fault(struct vm_fault *vmf,
416 		unsigned int order)
417 {
418 	return dax_iomap_fault(vmf, order, NULL, NULL, &erofs_iomap_ops);
419 }
420 
421 static vm_fault_t erofs_dax_fault(struct vm_fault *vmf)
422 {
423 	return erofs_dax_huge_fault(vmf, 0);
424 }
425 
426 static const struct vm_operations_struct erofs_dax_vm_ops = {
427 	.fault		= erofs_dax_fault,
428 	.huge_fault	= erofs_dax_huge_fault,
429 };
430 
431 static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma)
432 {
433 	if (!IS_DAX(file_inode(file)))
434 		return generic_file_readonly_mmap(file, vma);
435 
436 	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
437 		return -EINVAL;
438 
439 	vma->vm_ops = &erofs_dax_vm_ops;
440 	vm_flags_set(vma, VM_HUGEPAGE);
441 	return 0;
442 }
443 #else
444 #define erofs_file_mmap	generic_file_readonly_mmap
445 #endif
446 
447 const struct file_operations erofs_file_fops = {
448 	.llseek		= generic_file_llseek,
449 	.read_iter	= erofs_file_read_iter,
450 	.mmap		= erofs_file_mmap,
451 	.splice_read	= filemap_splice_read,
452 };
453