xref: /linux/fs/erofs/data.c (revision 3b428e1cfcc4c5f063bb8b367beb71ee06470d4b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017-2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Copyright (C) 2021, Alibaba Cloud
6  */
7 #include "internal.h"
8 #include <linux/sched/mm.h>
9 #include <trace/events/erofs.h>
10 
erofs_unmap_metabuf(struct erofs_buf * buf)11 void erofs_unmap_metabuf(struct erofs_buf *buf)
12 {
13 	if (!buf->base)
14 		return;
15 	kunmap_local(buf->base);
16 	buf->base = NULL;
17 }
18 
erofs_put_metabuf(struct erofs_buf * buf)19 void erofs_put_metabuf(struct erofs_buf *buf)
20 {
21 	if (!buf->page)
22 		return;
23 	erofs_unmap_metabuf(buf);
24 	folio_put(page_folio(buf->page));
25 	buf->page = NULL;
26 }
27 
erofs_bread(struct erofs_buf * buf,erofs_off_t offset,bool need_kmap)28 void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset, bool need_kmap)
29 {
30 	pgoff_t index = (buf->off + offset) >> PAGE_SHIFT;
31 	struct folio *folio = NULL;
32 
33 	if (buf->page) {
34 		folio = page_folio(buf->page);
35 		if (folio_file_page(folio, index) != buf->page)
36 			erofs_unmap_metabuf(buf);
37 	}
38 	if (!folio || !folio_contains(folio, index)) {
39 		erofs_put_metabuf(buf);
40 		folio = read_mapping_folio(buf->mapping, index, buf->file);
41 		if (IS_ERR(folio))
42 			return folio;
43 	}
44 	buf->page = folio_file_page(folio, index);
45 	if (!need_kmap)
46 		return NULL;
47 	if (!buf->base)
48 		buf->base = kmap_local_page(buf->page);
49 	return buf->base + (offset & ~PAGE_MASK);
50 }
51 
erofs_init_metabuf(struct erofs_buf * buf,struct super_block * sb)52 void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb)
53 {
54 	struct erofs_sb_info *sbi = EROFS_SB(sb);
55 
56 	buf->file = NULL;
57 	buf->off = sbi->dif0.fsoff;
58 	if (erofs_is_fileio_mode(sbi)) {
59 		buf->file = sbi->dif0.file;	/* some fs like FUSE needs it */
60 		buf->mapping = buf->file->f_mapping;
61 	} else if (erofs_is_fscache_mode(sb))
62 		buf->mapping = sbi->dif0.fscache->inode->i_mapping;
63 	else
64 		buf->mapping = sb->s_bdev->bd_mapping;
65 }
66 
erofs_read_metabuf(struct erofs_buf * buf,struct super_block * sb,erofs_off_t offset,bool need_kmap)67 void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
68 			 erofs_off_t offset, bool need_kmap)
69 {
70 	erofs_init_metabuf(buf, sb);
71 	return erofs_bread(buf, offset, need_kmap);
72 }
73 
erofs_map_blocks(struct inode * inode,struct erofs_map_blocks * map)74 int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map)
75 {
76 	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
77 	struct super_block *sb = inode->i_sb;
78 	unsigned int unit, blksz = sb->s_blocksize;
79 	struct erofs_inode *vi = EROFS_I(inode);
80 	struct erofs_inode_chunk_index *idx;
81 	erofs_blk_t startblk, addrmask;
82 	bool tailpacking;
83 	erofs_off_t pos;
84 	u64 chunknr;
85 	int err = 0;
86 
87 	trace_erofs_map_blocks_enter(inode, map, 0);
88 	map->m_deviceid = 0;
89 	map->m_flags = 0;
90 	if (map->m_la >= inode->i_size)
91 		goto out;
92 
93 	if (vi->datalayout != EROFS_INODE_CHUNK_BASED) {
94 		tailpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
95 		if (!tailpacking && vi->startblk == EROFS_NULL_ADDR)
96 			goto out;
97 		pos = erofs_pos(sb, erofs_iblks(inode) - tailpacking);
98 
99 		map->m_flags = EROFS_MAP_MAPPED;
100 		if (map->m_la < pos) {
101 			map->m_pa = erofs_pos(sb, vi->startblk) + map->m_la;
102 			map->m_llen = pos - map->m_la;
103 		} else {
104 			map->m_pa = erofs_iloc(inode) + vi->inode_isize +
105 				vi->xattr_isize + erofs_blkoff(sb, map->m_la);
106 			map->m_llen = inode->i_size - map->m_la;
107 			map->m_flags |= EROFS_MAP_META;
108 		}
109 		goto out;
110 	}
111 
112 	if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
113 		unit = sizeof(*idx);			/* chunk index */
114 	else
115 		unit = EROFS_BLOCK_MAP_ENTRY_SIZE;	/* block map */
116 
117 	chunknr = map->m_la >> vi->chunkbits;
118 	pos = ALIGN(erofs_iloc(inode) + vi->inode_isize +
119 		    vi->xattr_isize, unit) + unit * chunknr;
120 
121 	idx = erofs_read_metabuf(&buf, sb, pos, true);
122 	if (IS_ERR(idx)) {
123 		err = PTR_ERR(idx);
124 		goto out;
125 	}
126 	map->m_la = chunknr << vi->chunkbits;
127 	map->m_llen = min_t(erofs_off_t, 1UL << vi->chunkbits,
128 			    round_up(inode->i_size - map->m_la, blksz));
129 	if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES) {
130 		addrmask = (vi->chunkformat & EROFS_CHUNK_FORMAT_48BIT) ?
131 			BIT_ULL(48) - 1 : BIT_ULL(32) - 1;
132 		startblk = (((u64)le16_to_cpu(idx->startblk_hi) << 32) |
133 			    le32_to_cpu(idx->startblk_lo)) & addrmask;
134 		if ((startblk ^ EROFS_NULL_ADDR) & addrmask) {
135 			map->m_deviceid = le16_to_cpu(idx->device_id) &
136 				EROFS_SB(sb)->device_id_mask;
137 			map->m_pa = erofs_pos(sb, startblk);
138 			map->m_flags = EROFS_MAP_MAPPED;
139 		}
140 	} else {
141 		startblk = le32_to_cpu(*(__le32 *)idx);
142 		if (startblk != (u32)EROFS_NULL_ADDR) {
143 			map->m_pa = erofs_pos(sb, startblk);
144 			map->m_flags = EROFS_MAP_MAPPED;
145 		}
146 	}
147 	erofs_put_metabuf(&buf);
148 out:
149 	if (!err) {
150 		map->m_plen = map->m_llen;
151 		/* inline data should be located in the same meta block */
152 		if ((map->m_flags & EROFS_MAP_META) &&
153 		    erofs_blkoff(sb, map->m_pa) + map->m_plen > blksz) {
154 			erofs_err(sb, "inline data across blocks @ nid %llu", vi->nid);
155 			DBG_BUGON(1);
156 			return -EFSCORRUPTED;
157 		}
158 	}
159 	trace_erofs_map_blocks_exit(inode, map, 0, err);
160 	return err;
161 }
162 
erofs_fill_from_devinfo(struct erofs_map_dev * map,struct super_block * sb,struct erofs_device_info * dif)163 static void erofs_fill_from_devinfo(struct erofs_map_dev *map,
164 		struct super_block *sb, struct erofs_device_info *dif)
165 {
166 	map->m_sb = sb;
167 	map->m_dif = dif;
168 	map->m_bdev = NULL;
169 	if (dif->file && S_ISBLK(file_inode(dif->file)->i_mode))
170 		map->m_bdev = file_bdev(dif->file);
171 }
172 
erofs_map_dev(struct super_block * sb,struct erofs_map_dev * map)173 int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
174 {
175 	struct erofs_dev_context *devs = EROFS_SB(sb)->devs;
176 	struct erofs_device_info *dif;
177 	erofs_off_t startoff;
178 	int id;
179 
180 	erofs_fill_from_devinfo(map, sb, &EROFS_SB(sb)->dif0);
181 	map->m_bdev = sb->s_bdev;	/* use s_bdev for the primary device */
182 	if (map->m_deviceid) {
183 		down_read(&devs->rwsem);
184 		dif = idr_find(&devs->tree, map->m_deviceid - 1);
185 		if (!dif) {
186 			up_read(&devs->rwsem);
187 			return -ENODEV;
188 		}
189 		if (devs->flatdev) {
190 			map->m_pa += erofs_pos(sb, dif->uniaddr);
191 			up_read(&devs->rwsem);
192 			return 0;
193 		}
194 		erofs_fill_from_devinfo(map, sb, dif);
195 		up_read(&devs->rwsem);
196 	} else if (devs->extra_devices && !devs->flatdev) {
197 		down_read(&devs->rwsem);
198 		idr_for_each_entry(&devs->tree, dif, id) {
199 			if (!dif->uniaddr)
200 				continue;
201 
202 			startoff = erofs_pos(sb, dif->uniaddr);
203 			if (map->m_pa >= startoff &&
204 			    map->m_pa < startoff + erofs_pos(sb, dif->blocks)) {
205 				map->m_pa -= startoff;
206 				erofs_fill_from_devinfo(map, sb, dif);
207 				break;
208 			}
209 		}
210 		up_read(&devs->rwsem);
211 	}
212 	return 0;
213 }
214 
215 /*
216  * bit 30: I/O error occurred on this folio
217  * bit 29: CPU has dirty data in D-cache (needs aliasing handling);
218  * bit 0 - 29: remaining parts to complete this folio
219  */
220 #define EROFS_ONLINEFOLIO_EIO		30
221 #define EROFS_ONLINEFOLIO_DIRTY		29
222 
erofs_onlinefolio_init(struct folio * folio)223 void erofs_onlinefolio_init(struct folio *folio)
224 {
225 	union {
226 		atomic_t o;
227 		void *v;
228 	} u = { .o = ATOMIC_INIT(1) };
229 
230 	folio->private = u.v;	/* valid only if file-backed folio is locked */
231 }
232 
erofs_onlinefolio_split(struct folio * folio)233 void erofs_onlinefolio_split(struct folio *folio)
234 {
235 	atomic_inc((atomic_t *)&folio->private);
236 }
237 
erofs_onlinefolio_end(struct folio * folio,int err,bool dirty)238 void erofs_onlinefolio_end(struct folio *folio, int err, bool dirty)
239 {
240 	int orig, v;
241 
242 	do {
243 		orig = atomic_read((atomic_t *)&folio->private);
244 		DBG_BUGON(orig <= 0);
245 		v = dirty << EROFS_ONLINEFOLIO_DIRTY;
246 		v |= (orig - 1) | (!!err << EROFS_ONLINEFOLIO_EIO);
247 	} while (atomic_cmpxchg((atomic_t *)&folio->private, orig, v) != orig);
248 
249 	if (v & (BIT(EROFS_ONLINEFOLIO_DIRTY) - 1))
250 		return;
251 	folio->private = 0;
252 	if (v & BIT(EROFS_ONLINEFOLIO_DIRTY))
253 		flush_dcache_folio(folio);
254 	folio_end_read(folio, !(v & BIT(EROFS_ONLINEFOLIO_EIO)));
255 }
256 
erofs_iomap_begin(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap)257 static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
258 		unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
259 {
260 	int ret;
261 	struct super_block *sb = inode->i_sb;
262 	struct erofs_map_blocks map;
263 	struct erofs_map_dev mdev;
264 
265 	map.m_la = offset;
266 	map.m_llen = length;
267 
268 	ret = erofs_map_blocks(inode, &map);
269 	if (ret < 0)
270 		return ret;
271 
272 	mdev = (struct erofs_map_dev) {
273 		.m_deviceid = map.m_deviceid,
274 		.m_pa = map.m_pa,
275 	};
276 	ret = erofs_map_dev(sb, &mdev);
277 	if (ret)
278 		return ret;
279 
280 	iomap->offset = map.m_la;
281 	if (flags & IOMAP_DAX)
282 		iomap->dax_dev = mdev.m_dif->dax_dev;
283 	else
284 		iomap->bdev = mdev.m_bdev;
285 	iomap->length = map.m_llen;
286 	iomap->flags = 0;
287 	iomap->private = NULL;
288 
289 	if (!(map.m_flags & EROFS_MAP_MAPPED)) {
290 		iomap->type = IOMAP_HOLE;
291 		iomap->addr = IOMAP_NULL_ADDR;
292 		if (!iomap->length)
293 			iomap->length = length;
294 		return 0;
295 	}
296 
297 	if (map.m_flags & EROFS_MAP_META) {
298 		void *ptr;
299 		struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
300 
301 		iomap->type = IOMAP_INLINE;
302 		ptr = erofs_read_metabuf(&buf, sb, mdev.m_pa, true);
303 		if (IS_ERR(ptr))
304 			return PTR_ERR(ptr);
305 		iomap->inline_data = ptr;
306 		iomap->private = buf.base;
307 	} else {
308 		iomap->type = IOMAP_MAPPED;
309 		iomap->addr = mdev.m_dif->fsoff + mdev.m_pa;
310 		if (flags & IOMAP_DAX)
311 			iomap->addr += mdev.m_dif->dax_part_off;
312 	}
313 	return 0;
314 }
315 
erofs_iomap_end(struct inode * inode,loff_t pos,loff_t length,ssize_t written,unsigned int flags,struct iomap * iomap)316 static int erofs_iomap_end(struct inode *inode, loff_t pos, loff_t length,
317 		ssize_t written, unsigned int flags, struct iomap *iomap)
318 {
319 	void *ptr = iomap->private;
320 
321 	if (ptr) {
322 		struct erofs_buf buf = {
323 			.page = kmap_to_page(ptr),
324 			.base = ptr,
325 		};
326 
327 		DBG_BUGON(iomap->type != IOMAP_INLINE);
328 		erofs_put_metabuf(&buf);
329 	} else {
330 		DBG_BUGON(iomap->type == IOMAP_INLINE);
331 	}
332 	return written;
333 }
334 
335 static const struct iomap_ops erofs_iomap_ops = {
336 	.iomap_begin = erofs_iomap_begin,
337 	.iomap_end = erofs_iomap_end,
338 };
339 
erofs_fiemap(struct inode * inode,struct fiemap_extent_info * fieinfo,u64 start,u64 len)340 int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
341 		 u64 start, u64 len)
342 {
343 	if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
344 #ifdef CONFIG_EROFS_FS_ZIP
345 		return iomap_fiemap(inode, fieinfo, start, len,
346 				    &z_erofs_iomap_report_ops);
347 #else
348 		return -EOPNOTSUPP;
349 #endif
350 	}
351 	return iomap_fiemap(inode, fieinfo, start, len, &erofs_iomap_ops);
352 }
353 
354 /*
355  * since we dont have write or truncate flows, so no inode
356  * locking needs to be held at the moment.
357  */
erofs_read_folio(struct file * file,struct folio * folio)358 static int erofs_read_folio(struct file *file, struct folio *folio)
359 {
360 	trace_erofs_read_folio(folio, true);
361 
362 	return iomap_read_folio(folio, &erofs_iomap_ops);
363 }
364 
erofs_readahead(struct readahead_control * rac)365 static void erofs_readahead(struct readahead_control *rac)
366 {
367 	trace_erofs_readahead(rac->mapping->host, readahead_index(rac),
368 					readahead_count(rac), true);
369 
370 	return iomap_readahead(rac, &erofs_iomap_ops);
371 }
372 
erofs_bmap(struct address_space * mapping,sector_t block)373 static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
374 {
375 	return iomap_bmap(mapping, block, &erofs_iomap_ops);
376 }
377 
erofs_file_read_iter(struct kiocb * iocb,struct iov_iter * to)378 static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
379 {
380 	struct inode *inode = file_inode(iocb->ki_filp);
381 
382 	/* no need taking (shared) inode lock since it's a ro filesystem */
383 	if (!iov_iter_count(to))
384 		return 0;
385 
386 #ifdef CONFIG_FS_DAX
387 	if (IS_DAX(inode))
388 		return dax_iomap_rw(iocb, to, &erofs_iomap_ops);
389 #endif
390 	if ((iocb->ki_flags & IOCB_DIRECT) && inode->i_sb->s_bdev)
391 		return iomap_dio_rw(iocb, to, &erofs_iomap_ops,
392 				    NULL, 0, NULL, 0);
393 	return filemap_read(iocb, to, 0);
394 }
395 
396 /* for uncompressed (aligned) files and raw access for other files */
397 const struct address_space_operations erofs_aops = {
398 	.read_folio = erofs_read_folio,
399 	.readahead = erofs_readahead,
400 	.bmap = erofs_bmap,
401 	.direct_IO = noop_direct_IO,
402 	.release_folio = iomap_release_folio,
403 	.invalidate_folio = iomap_invalidate_folio,
404 };
405 
406 #ifdef CONFIG_FS_DAX
erofs_dax_huge_fault(struct vm_fault * vmf,unsigned int order)407 static vm_fault_t erofs_dax_huge_fault(struct vm_fault *vmf,
408 		unsigned int order)
409 {
410 	return dax_iomap_fault(vmf, order, NULL, NULL, &erofs_iomap_ops);
411 }
412 
erofs_dax_fault(struct vm_fault * vmf)413 static vm_fault_t erofs_dax_fault(struct vm_fault *vmf)
414 {
415 	return erofs_dax_huge_fault(vmf, 0);
416 }
417 
418 static const struct vm_operations_struct erofs_dax_vm_ops = {
419 	.fault		= erofs_dax_fault,
420 	.huge_fault	= erofs_dax_huge_fault,
421 };
422 
erofs_file_mmap(struct file * file,struct vm_area_struct * vma)423 static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma)
424 {
425 	if (!IS_DAX(file_inode(file)))
426 		return generic_file_readonly_mmap(file, vma);
427 
428 	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
429 		return -EINVAL;
430 
431 	vma->vm_ops = &erofs_dax_vm_ops;
432 	vm_flags_set(vma, VM_HUGEPAGE);
433 	return 0;
434 }
435 #else
436 #define erofs_file_mmap	generic_file_readonly_mmap
437 #endif
438 
erofs_file_llseek(struct file * file,loff_t offset,int whence)439 static loff_t erofs_file_llseek(struct file *file, loff_t offset, int whence)
440 {
441 	struct inode *inode = file->f_mapping->host;
442 	const struct iomap_ops *ops = &erofs_iomap_ops;
443 
444 	if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout))
445 #ifdef CONFIG_EROFS_FS_ZIP
446 		ops = &z_erofs_iomap_report_ops;
447 #else
448 		return generic_file_llseek(file, offset, whence);
449 #endif
450 
451 	if (whence == SEEK_HOLE)
452 		offset = iomap_seek_hole(inode, offset, ops);
453 	else if (whence == SEEK_DATA)
454 		offset = iomap_seek_data(inode, offset, ops);
455 	else
456 		return generic_file_llseek(file, offset, whence);
457 
458 	if (offset < 0)
459 		return offset;
460 	return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
461 }
462 
463 const struct file_operations erofs_file_fops = {
464 	.llseek		= erofs_file_llseek,
465 	.read_iter	= erofs_file_read_iter,
466 	.mmap		= erofs_file_mmap,
467 	.get_unmapped_area = thp_get_unmapped_area,
468 	.splice_read	= filemap_splice_read,
469 };
470