xref: /linux/fs/erofs/data.c (revision ed90ed56e4b1311797302c2e6107f5049ba4586d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017-2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Copyright (C) 2021, Alibaba Cloud
6  */
7 #include "internal.h"
8 #include <linux/sched/mm.h>
9 #include <trace/events/erofs.h>
10 
erofs_unmap_metabuf(struct erofs_buf * buf)11 void erofs_unmap_metabuf(struct erofs_buf *buf)
12 {
13 	if (!buf->base)
14 		return;
15 	kunmap_local(buf->base);
16 	buf->base = NULL;
17 }
18 
erofs_put_metabuf(struct erofs_buf * buf)19 void erofs_put_metabuf(struct erofs_buf *buf)
20 {
21 	if (!buf->page)
22 		return;
23 	erofs_unmap_metabuf(buf);
24 	folio_put(page_folio(buf->page));
25 	buf->page = NULL;
26 }
27 
erofs_bread(struct erofs_buf * buf,erofs_off_t offset,enum erofs_kmap_type type)28 void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset,
29 		  enum erofs_kmap_type type)
30 {
31 	pgoff_t index = offset >> PAGE_SHIFT;
32 	struct folio *folio = NULL;
33 
34 	if (buf->page) {
35 		folio = page_folio(buf->page);
36 		if (folio_file_page(folio, index) != buf->page)
37 			erofs_unmap_metabuf(buf);
38 	}
39 	if (!folio || !folio_contains(folio, index)) {
40 		erofs_put_metabuf(buf);
41 		folio = read_mapping_folio(buf->mapping, index, buf->file);
42 		if (IS_ERR(folio))
43 			return folio;
44 	}
45 	buf->page = folio_file_page(folio, index);
46 	if (!buf->base && type == EROFS_KMAP)
47 		buf->base = kmap_local_page(buf->page);
48 	if (type == EROFS_NO_KMAP)
49 		return NULL;
50 	return buf->base + (offset & ~PAGE_MASK);
51 }
52 
erofs_init_metabuf(struct erofs_buf * buf,struct super_block * sb)53 void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb)
54 {
55 	struct erofs_sb_info *sbi = EROFS_SB(sb);
56 
57 	buf->file = NULL;
58 	if (erofs_is_fileio_mode(sbi)) {
59 		buf->file = sbi->dif0.file;	/* some fs like FUSE needs it */
60 		buf->mapping = buf->file->f_mapping;
61 	} else if (erofs_is_fscache_mode(sb))
62 		buf->mapping = sbi->dif0.fscache->inode->i_mapping;
63 	else
64 		buf->mapping = sb->s_bdev->bd_mapping;
65 }
66 
erofs_read_metabuf(struct erofs_buf * buf,struct super_block * sb,erofs_off_t offset,enum erofs_kmap_type type)67 void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
68 			 erofs_off_t offset, enum erofs_kmap_type type)
69 {
70 	erofs_init_metabuf(buf, sb);
71 	return erofs_bread(buf, offset, type);
72 }
73 
erofs_map_blocks_flatmode(struct inode * inode,struct erofs_map_blocks * map)74 static int erofs_map_blocks_flatmode(struct inode *inode,
75 				     struct erofs_map_blocks *map)
76 {
77 	struct erofs_inode *vi = EROFS_I(inode);
78 	struct super_block *sb = inode->i_sb;
79 	bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
80 	erofs_blk_t lastblk = erofs_iblks(inode) - tailendpacking;
81 
82 	map->m_flags = EROFS_MAP_MAPPED;	/* no hole in flat inodes */
83 	if (map->m_la < erofs_pos(sb, lastblk)) {
84 		map->m_pa = erofs_pos(sb, vi->raw_blkaddr) + map->m_la;
85 		map->m_plen = erofs_pos(sb, lastblk) - map->m_la;
86 	} else {
87 		DBG_BUGON(!tailendpacking);
88 		map->m_pa = erofs_iloc(inode) + vi->inode_isize +
89 			vi->xattr_isize + erofs_blkoff(sb, map->m_la);
90 		map->m_plen = inode->i_size - map->m_la;
91 
92 		/* inline data should be located in the same meta block */
93 		if (erofs_blkoff(sb, map->m_pa) + map->m_plen > sb->s_blocksize) {
94 			erofs_err(sb, "inline data across blocks @ nid %llu", vi->nid);
95 			DBG_BUGON(1);
96 			return -EFSCORRUPTED;
97 		}
98 		map->m_flags |= EROFS_MAP_META;
99 	}
100 	return 0;
101 }
102 
erofs_map_blocks(struct inode * inode,struct erofs_map_blocks * map)103 int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map)
104 {
105 	struct super_block *sb = inode->i_sb;
106 	struct erofs_inode *vi = EROFS_I(inode);
107 	struct erofs_inode_chunk_index *idx;
108 	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
109 	u64 chunknr;
110 	unsigned int unit;
111 	erofs_off_t pos;
112 	void *kaddr;
113 	int err = 0;
114 
115 	trace_erofs_map_blocks_enter(inode, map, 0);
116 	map->m_deviceid = 0;
117 	if (map->m_la >= inode->i_size) {
118 		/* leave out-of-bound access unmapped */
119 		map->m_flags = 0;
120 		map->m_plen = map->m_llen;
121 		goto out;
122 	}
123 
124 	if (vi->datalayout != EROFS_INODE_CHUNK_BASED) {
125 		err = erofs_map_blocks_flatmode(inode, map);
126 		goto out;
127 	}
128 
129 	if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
130 		unit = sizeof(*idx);			/* chunk index */
131 	else
132 		unit = EROFS_BLOCK_MAP_ENTRY_SIZE;	/* block map */
133 
134 	chunknr = map->m_la >> vi->chunkbits;
135 	pos = ALIGN(erofs_iloc(inode) + vi->inode_isize +
136 		    vi->xattr_isize, unit) + unit * chunknr;
137 
138 	kaddr = erofs_read_metabuf(&buf, sb, pos, EROFS_KMAP);
139 	if (IS_ERR(kaddr)) {
140 		err = PTR_ERR(kaddr);
141 		goto out;
142 	}
143 	map->m_la = chunknr << vi->chunkbits;
144 	map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits,
145 			round_up(inode->i_size - map->m_la, sb->s_blocksize));
146 
147 	/* handle block map */
148 	if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) {
149 		__le32 *blkaddr = kaddr;
150 
151 		if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) {
152 			map->m_flags = 0;
153 		} else {
154 			map->m_pa = erofs_pos(sb, le32_to_cpu(*blkaddr));
155 			map->m_flags = EROFS_MAP_MAPPED;
156 		}
157 		goto out_unlock;
158 	}
159 	/* parse chunk indexes */
160 	idx = kaddr;
161 	switch (le32_to_cpu(idx->blkaddr)) {
162 	case EROFS_NULL_ADDR:
163 		map->m_flags = 0;
164 		break;
165 	default:
166 		map->m_deviceid = le16_to_cpu(idx->device_id) &
167 			EROFS_SB(sb)->device_id_mask;
168 		map->m_pa = erofs_pos(sb, le32_to_cpu(idx->blkaddr));
169 		map->m_flags = EROFS_MAP_MAPPED;
170 		break;
171 	}
172 out_unlock:
173 	erofs_put_metabuf(&buf);
174 out:
175 	if (!err)
176 		map->m_llen = map->m_plen;
177 	trace_erofs_map_blocks_exit(inode, map, 0, err);
178 	return err;
179 }
180 
erofs_fill_from_devinfo(struct erofs_map_dev * map,struct super_block * sb,struct erofs_device_info * dif)181 static void erofs_fill_from_devinfo(struct erofs_map_dev *map,
182 		struct super_block *sb, struct erofs_device_info *dif)
183 {
184 	map->m_sb = sb;
185 	map->m_dif = dif;
186 	map->m_bdev = NULL;
187 	if (dif->file && S_ISBLK(file_inode(dif->file)->i_mode))
188 		map->m_bdev = file_bdev(dif->file);
189 }
190 
erofs_map_dev(struct super_block * sb,struct erofs_map_dev * map)191 int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
192 {
193 	struct erofs_dev_context *devs = EROFS_SB(sb)->devs;
194 	struct erofs_device_info *dif;
195 	erofs_off_t startoff, length;
196 	int id;
197 
198 	erofs_fill_from_devinfo(map, sb, &EROFS_SB(sb)->dif0);
199 	map->m_bdev = sb->s_bdev;	/* use s_bdev for the primary device */
200 	if (map->m_deviceid) {
201 		down_read(&devs->rwsem);
202 		dif = idr_find(&devs->tree, map->m_deviceid - 1);
203 		if (!dif) {
204 			up_read(&devs->rwsem);
205 			return -ENODEV;
206 		}
207 		if (devs->flatdev) {
208 			map->m_pa += erofs_pos(sb, dif->mapped_blkaddr);
209 			up_read(&devs->rwsem);
210 			return 0;
211 		}
212 		erofs_fill_from_devinfo(map, sb, dif);
213 		up_read(&devs->rwsem);
214 	} else if (devs->extra_devices && !devs->flatdev) {
215 		down_read(&devs->rwsem);
216 		idr_for_each_entry(&devs->tree, dif, id) {
217 			if (!dif->mapped_blkaddr)
218 				continue;
219 
220 			startoff = erofs_pos(sb, dif->mapped_blkaddr);
221 			length = erofs_pos(sb, dif->blocks);
222 			if (map->m_pa >= startoff &&
223 			    map->m_pa < startoff + length) {
224 				map->m_pa -= startoff;
225 				erofs_fill_from_devinfo(map, sb, dif);
226 				break;
227 			}
228 		}
229 		up_read(&devs->rwsem);
230 	}
231 	return 0;
232 }
233 
234 /*
235  * bit 30: I/O error occurred on this folio
236  * bit 0 - 29: remaining parts to complete this folio
237  */
238 #define EROFS_ONLINEFOLIO_EIO			(1 << 30)
239 
erofs_onlinefolio_init(struct folio * folio)240 void erofs_onlinefolio_init(struct folio *folio)
241 {
242 	union {
243 		atomic_t o;
244 		void *v;
245 	} u = { .o = ATOMIC_INIT(1) };
246 
247 	folio->private = u.v;	/* valid only if file-backed folio is locked */
248 }
249 
erofs_onlinefolio_split(struct folio * folio)250 void erofs_onlinefolio_split(struct folio *folio)
251 {
252 	atomic_inc((atomic_t *)&folio->private);
253 }
254 
erofs_onlinefolio_end(struct folio * folio,int err)255 void erofs_onlinefolio_end(struct folio *folio, int err)
256 {
257 	int orig, v;
258 
259 	do {
260 		orig = atomic_read((atomic_t *)&folio->private);
261 		v = (orig - 1) | (err ? EROFS_ONLINEFOLIO_EIO : 0);
262 	} while (atomic_cmpxchg((atomic_t *)&folio->private, orig, v) != orig);
263 
264 	if (v & ~EROFS_ONLINEFOLIO_EIO)
265 		return;
266 	folio->private = 0;
267 	folio_end_read(folio, !(v & EROFS_ONLINEFOLIO_EIO));
268 }
269 
erofs_iomap_begin(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap)270 static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
271 		unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
272 {
273 	int ret;
274 	struct super_block *sb = inode->i_sb;
275 	struct erofs_map_blocks map;
276 	struct erofs_map_dev mdev;
277 
278 	map.m_la = offset;
279 	map.m_llen = length;
280 
281 	ret = erofs_map_blocks(inode, &map);
282 	if (ret < 0)
283 		return ret;
284 
285 	mdev = (struct erofs_map_dev) {
286 		.m_deviceid = map.m_deviceid,
287 		.m_pa = map.m_pa,
288 	};
289 	ret = erofs_map_dev(sb, &mdev);
290 	if (ret)
291 		return ret;
292 
293 	iomap->offset = map.m_la;
294 	if (flags & IOMAP_DAX)
295 		iomap->dax_dev = mdev.m_dif->dax_dev;
296 	else
297 		iomap->bdev = mdev.m_bdev;
298 	iomap->length = map.m_llen;
299 	iomap->flags = 0;
300 	iomap->private = NULL;
301 
302 	if (!(map.m_flags & EROFS_MAP_MAPPED)) {
303 		iomap->type = IOMAP_HOLE;
304 		iomap->addr = IOMAP_NULL_ADDR;
305 		if (!iomap->length)
306 			iomap->length = length;
307 		return 0;
308 	}
309 
310 	if (map.m_flags & EROFS_MAP_META) {
311 		void *ptr;
312 		struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
313 
314 		iomap->type = IOMAP_INLINE;
315 		ptr = erofs_read_metabuf(&buf, sb, mdev.m_pa, EROFS_KMAP);
316 		if (IS_ERR(ptr))
317 			return PTR_ERR(ptr);
318 		iomap->inline_data = ptr;
319 		iomap->private = buf.base;
320 	} else {
321 		iomap->type = IOMAP_MAPPED;
322 		iomap->addr = mdev.m_pa;
323 		if (flags & IOMAP_DAX)
324 			iomap->addr += mdev.m_dif->dax_part_off;
325 	}
326 	return 0;
327 }
328 
erofs_iomap_end(struct inode * inode,loff_t pos,loff_t length,ssize_t written,unsigned int flags,struct iomap * iomap)329 static int erofs_iomap_end(struct inode *inode, loff_t pos, loff_t length,
330 		ssize_t written, unsigned int flags, struct iomap *iomap)
331 {
332 	void *ptr = iomap->private;
333 
334 	if (ptr) {
335 		struct erofs_buf buf = {
336 			.page = kmap_to_page(ptr),
337 			.base = ptr,
338 		};
339 
340 		DBG_BUGON(iomap->type != IOMAP_INLINE);
341 		erofs_put_metabuf(&buf);
342 	} else {
343 		DBG_BUGON(iomap->type == IOMAP_INLINE);
344 	}
345 	return written;
346 }
347 
348 static const struct iomap_ops erofs_iomap_ops = {
349 	.iomap_begin = erofs_iomap_begin,
350 	.iomap_end = erofs_iomap_end,
351 };
352 
erofs_fiemap(struct inode * inode,struct fiemap_extent_info * fieinfo,u64 start,u64 len)353 int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
354 		 u64 start, u64 len)
355 {
356 	if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
357 #ifdef CONFIG_EROFS_FS_ZIP
358 		return iomap_fiemap(inode, fieinfo, start, len,
359 				    &z_erofs_iomap_report_ops);
360 #else
361 		return -EOPNOTSUPP;
362 #endif
363 	}
364 	return iomap_fiemap(inode, fieinfo, start, len, &erofs_iomap_ops);
365 }
366 
367 /*
368  * since we dont have write or truncate flows, so no inode
369  * locking needs to be held at the moment.
370  */
erofs_read_folio(struct file * file,struct folio * folio)371 static int erofs_read_folio(struct file *file, struct folio *folio)
372 {
373 	return iomap_read_folio(folio, &erofs_iomap_ops);
374 }
375 
erofs_readahead(struct readahead_control * rac)376 static void erofs_readahead(struct readahead_control *rac)
377 {
378 	return iomap_readahead(rac, &erofs_iomap_ops);
379 }
380 
erofs_bmap(struct address_space * mapping,sector_t block)381 static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
382 {
383 	return iomap_bmap(mapping, block, &erofs_iomap_ops);
384 }
385 
erofs_file_read_iter(struct kiocb * iocb,struct iov_iter * to)386 static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
387 {
388 	struct inode *inode = file_inode(iocb->ki_filp);
389 
390 	/* no need taking (shared) inode lock since it's a ro filesystem */
391 	if (!iov_iter_count(to))
392 		return 0;
393 
394 #ifdef CONFIG_FS_DAX
395 	if (IS_DAX(inode))
396 		return dax_iomap_rw(iocb, to, &erofs_iomap_ops);
397 #endif
398 	if ((iocb->ki_flags & IOCB_DIRECT) && inode->i_sb->s_bdev)
399 		return iomap_dio_rw(iocb, to, &erofs_iomap_ops,
400 				    NULL, 0, NULL, 0);
401 	return filemap_read(iocb, to, 0);
402 }
403 
404 /* for uncompressed (aligned) files and raw access for other files */
405 const struct address_space_operations erofs_aops = {
406 	.read_folio = erofs_read_folio,
407 	.readahead = erofs_readahead,
408 	.bmap = erofs_bmap,
409 	.direct_IO = noop_direct_IO,
410 	.release_folio = iomap_release_folio,
411 	.invalidate_folio = iomap_invalidate_folio,
412 };
413 
414 #ifdef CONFIG_FS_DAX
erofs_dax_huge_fault(struct vm_fault * vmf,unsigned int order)415 static vm_fault_t erofs_dax_huge_fault(struct vm_fault *vmf,
416 		unsigned int order)
417 {
418 	return dax_iomap_fault(vmf, order, NULL, NULL, &erofs_iomap_ops);
419 }
420 
erofs_dax_fault(struct vm_fault * vmf)421 static vm_fault_t erofs_dax_fault(struct vm_fault *vmf)
422 {
423 	return erofs_dax_huge_fault(vmf, 0);
424 }
425 
426 static const struct vm_operations_struct erofs_dax_vm_ops = {
427 	.fault		= erofs_dax_fault,
428 	.huge_fault	= erofs_dax_huge_fault,
429 };
430 
erofs_file_mmap(struct file * file,struct vm_area_struct * vma)431 static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma)
432 {
433 	if (!IS_DAX(file_inode(file)))
434 		return generic_file_readonly_mmap(file, vma);
435 
436 	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
437 		return -EINVAL;
438 
439 	vma->vm_ops = &erofs_dax_vm_ops;
440 	vm_flags_set(vma, VM_HUGEPAGE);
441 	return 0;
442 }
443 #else
444 #define erofs_file_mmap	generic_file_readonly_mmap
445 #endif
446 
erofs_file_llseek(struct file * file,loff_t offset,int whence)447 static loff_t erofs_file_llseek(struct file *file, loff_t offset, int whence)
448 {
449 	struct inode *inode = file->f_mapping->host;
450 	const struct iomap_ops *ops = &erofs_iomap_ops;
451 
452 	if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout))
453 #ifdef CONFIG_EROFS_FS_ZIP
454 		ops = &z_erofs_iomap_report_ops;
455 #else
456 		return generic_file_llseek(file, offset, whence);
457 #endif
458 
459 	if (whence == SEEK_HOLE)
460 		offset = iomap_seek_hole(inode, offset, ops);
461 	else if (whence == SEEK_DATA)
462 		offset = iomap_seek_data(inode, offset, ops);
463 	else
464 		return generic_file_llseek(file, offset, whence);
465 
466 	if (offset < 0)
467 		return offset;
468 	return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
469 }
470 
471 const struct file_operations erofs_file_fops = {
472 	.llseek		= erofs_file_llseek,
473 	.read_iter	= erofs_file_read_iter,
474 	.mmap		= erofs_file_mmap,
475 	.get_unmapped_area = thp_get_unmapped_area,
476 	.splice_read	= filemap_splice_read,
477 };
478