xref: /linux/fs/erofs/data.c (revision da5b2ad1c2f18834cb1ce429e2e5a5cf5cbdf21b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017-2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Copyright (C) 2021, Alibaba Cloud
6  */
7 #include "internal.h"
8 #include <linux/sched/mm.h>
9 #include <trace/events/erofs.h>
10 
11 void erofs_unmap_metabuf(struct erofs_buf *buf)
12 {
13 	if (buf->kmap_type == EROFS_KMAP)
14 		kunmap_local(buf->base);
15 	buf->base = NULL;
16 	buf->kmap_type = EROFS_NO_KMAP;
17 }
18 
19 void erofs_put_metabuf(struct erofs_buf *buf)
20 {
21 	if (!buf->page)
22 		return;
23 	erofs_unmap_metabuf(buf);
24 	folio_put(page_folio(buf->page));
25 	buf->page = NULL;
26 }
27 
28 void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset,
29 		  enum erofs_kmap_type type)
30 {
31 	pgoff_t index = offset >> PAGE_SHIFT;
32 	struct folio *folio = NULL;
33 
34 	if (buf->page) {
35 		folio = page_folio(buf->page);
36 		if (folio_file_page(folio, index) != buf->page)
37 			erofs_unmap_metabuf(buf);
38 	}
39 	if (!folio || !folio_contains(folio, index)) {
40 		erofs_put_metabuf(buf);
41 		folio = read_mapping_folio(buf->mapping, index, NULL);
42 		if (IS_ERR(folio))
43 			return folio;
44 	}
45 	buf->page = folio_file_page(folio, index);
46 
47 	if (buf->kmap_type == EROFS_NO_KMAP) {
48 		if (type == EROFS_KMAP)
49 			buf->base = kmap_local_page(buf->page);
50 		buf->kmap_type = type;
51 	} else if (buf->kmap_type != type) {
52 		DBG_BUGON(1);
53 		return ERR_PTR(-EFAULT);
54 	}
55 	if (type == EROFS_NO_KMAP)
56 		return NULL;
57 	return buf->base + (offset & ~PAGE_MASK);
58 }
59 
60 void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb)
61 {
62 	if (erofs_is_fscache_mode(sb))
63 		buf->mapping = EROFS_SB(sb)->s_fscache->inode->i_mapping;
64 	else
65 		buf->mapping = sb->s_bdev->bd_mapping;
66 }
67 
68 void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
69 			 erofs_off_t offset, enum erofs_kmap_type type)
70 {
71 	erofs_init_metabuf(buf, sb);
72 	return erofs_bread(buf, offset, type);
73 }
74 
75 static int erofs_map_blocks_flatmode(struct inode *inode,
76 				     struct erofs_map_blocks *map)
77 {
78 	erofs_blk_t nblocks, lastblk;
79 	u64 offset = map->m_la;
80 	struct erofs_inode *vi = EROFS_I(inode);
81 	struct super_block *sb = inode->i_sb;
82 	bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
83 
84 	nblocks = erofs_iblks(inode);
85 	lastblk = nblocks - tailendpacking;
86 
87 	/* there is no hole in flatmode */
88 	map->m_flags = EROFS_MAP_MAPPED;
89 	if (offset < erofs_pos(sb, lastblk)) {
90 		map->m_pa = erofs_pos(sb, vi->raw_blkaddr) + map->m_la;
91 		map->m_plen = erofs_pos(sb, lastblk) - offset;
92 	} else if (tailendpacking) {
93 		map->m_pa = erofs_iloc(inode) + vi->inode_isize +
94 			vi->xattr_isize + erofs_blkoff(sb, offset);
95 		map->m_plen = inode->i_size - offset;
96 
97 		/* inline data should be located in the same meta block */
98 		if (erofs_blkoff(sb, map->m_pa) + map->m_plen > sb->s_blocksize) {
99 			erofs_err(sb, "inline data cross block boundary @ nid %llu",
100 				  vi->nid);
101 			DBG_BUGON(1);
102 			return -EFSCORRUPTED;
103 		}
104 		map->m_flags |= EROFS_MAP_META;
105 	} else {
106 		erofs_err(sb, "internal error @ nid: %llu (size %llu), m_la 0x%llx",
107 			  vi->nid, inode->i_size, map->m_la);
108 		DBG_BUGON(1);
109 		return -EIO;
110 	}
111 	return 0;
112 }
113 
114 int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map)
115 {
116 	struct super_block *sb = inode->i_sb;
117 	struct erofs_inode *vi = EROFS_I(inode);
118 	struct erofs_inode_chunk_index *idx;
119 	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
120 	u64 chunknr;
121 	unsigned int unit;
122 	erofs_off_t pos;
123 	void *kaddr;
124 	int err = 0;
125 
126 	trace_erofs_map_blocks_enter(inode, map, 0);
127 	map->m_deviceid = 0;
128 	if (map->m_la >= inode->i_size) {
129 		/* leave out-of-bound access unmapped */
130 		map->m_flags = 0;
131 		map->m_plen = 0;
132 		goto out;
133 	}
134 
135 	if (vi->datalayout != EROFS_INODE_CHUNK_BASED) {
136 		err = erofs_map_blocks_flatmode(inode, map);
137 		goto out;
138 	}
139 
140 	if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
141 		unit = sizeof(*idx);			/* chunk index */
142 	else
143 		unit = EROFS_BLOCK_MAP_ENTRY_SIZE;	/* block map */
144 
145 	chunknr = map->m_la >> vi->chunkbits;
146 	pos = ALIGN(erofs_iloc(inode) + vi->inode_isize +
147 		    vi->xattr_isize, unit) + unit * chunknr;
148 
149 	kaddr = erofs_read_metabuf(&buf, sb, pos, EROFS_KMAP);
150 	if (IS_ERR(kaddr)) {
151 		err = PTR_ERR(kaddr);
152 		goto out;
153 	}
154 	map->m_la = chunknr << vi->chunkbits;
155 	map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits,
156 			round_up(inode->i_size - map->m_la, sb->s_blocksize));
157 
158 	/* handle block map */
159 	if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) {
160 		__le32 *blkaddr = kaddr;
161 
162 		if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) {
163 			map->m_flags = 0;
164 		} else {
165 			map->m_pa = erofs_pos(sb, le32_to_cpu(*blkaddr));
166 			map->m_flags = EROFS_MAP_MAPPED;
167 		}
168 		goto out_unlock;
169 	}
170 	/* parse chunk indexes */
171 	idx = kaddr;
172 	switch (le32_to_cpu(idx->blkaddr)) {
173 	case EROFS_NULL_ADDR:
174 		map->m_flags = 0;
175 		break;
176 	default:
177 		map->m_deviceid = le16_to_cpu(idx->device_id) &
178 			EROFS_SB(sb)->device_id_mask;
179 		map->m_pa = erofs_pos(sb, le32_to_cpu(idx->blkaddr));
180 		map->m_flags = EROFS_MAP_MAPPED;
181 		break;
182 	}
183 out_unlock:
184 	erofs_put_metabuf(&buf);
185 out:
186 	if (!err)
187 		map->m_llen = map->m_plen;
188 	trace_erofs_map_blocks_exit(inode, map, 0, err);
189 	return err;
190 }
191 
192 int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
193 {
194 	struct erofs_dev_context *devs = EROFS_SB(sb)->devs;
195 	struct erofs_device_info *dif;
196 	int id;
197 
198 	map->m_bdev = sb->s_bdev;
199 	map->m_daxdev = EROFS_SB(sb)->dax_dev;
200 	map->m_dax_part_off = EROFS_SB(sb)->dax_part_off;
201 	map->m_fscache = EROFS_SB(sb)->s_fscache;
202 
203 	if (map->m_deviceid) {
204 		down_read(&devs->rwsem);
205 		dif = idr_find(&devs->tree, map->m_deviceid - 1);
206 		if (!dif) {
207 			up_read(&devs->rwsem);
208 			return -ENODEV;
209 		}
210 		if (devs->flatdev) {
211 			map->m_pa += erofs_pos(sb, dif->mapped_blkaddr);
212 			up_read(&devs->rwsem);
213 			return 0;
214 		}
215 		map->m_bdev = dif->bdev_file ? file_bdev(dif->bdev_file) : NULL;
216 		map->m_daxdev = dif->dax_dev;
217 		map->m_dax_part_off = dif->dax_part_off;
218 		map->m_fscache = dif->fscache;
219 		up_read(&devs->rwsem);
220 	} else if (devs->extra_devices && !devs->flatdev) {
221 		down_read(&devs->rwsem);
222 		idr_for_each_entry(&devs->tree, dif, id) {
223 			erofs_off_t startoff, length;
224 
225 			if (!dif->mapped_blkaddr)
226 				continue;
227 			startoff = erofs_pos(sb, dif->mapped_blkaddr);
228 			length = erofs_pos(sb, dif->blocks);
229 
230 			if (map->m_pa >= startoff &&
231 			    map->m_pa < startoff + length) {
232 				map->m_pa -= startoff;
233 				map->m_bdev = dif->bdev_file ?
234 					      file_bdev(dif->bdev_file) : NULL;
235 				map->m_daxdev = dif->dax_dev;
236 				map->m_dax_part_off = dif->dax_part_off;
237 				map->m_fscache = dif->fscache;
238 				break;
239 			}
240 		}
241 		up_read(&devs->rwsem);
242 	}
243 	return 0;
244 }
245 
246 static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
247 		unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
248 {
249 	int ret;
250 	struct super_block *sb = inode->i_sb;
251 	struct erofs_map_blocks map;
252 	struct erofs_map_dev mdev;
253 
254 	map.m_la = offset;
255 	map.m_llen = length;
256 
257 	ret = erofs_map_blocks(inode, &map);
258 	if (ret < 0)
259 		return ret;
260 
261 	mdev = (struct erofs_map_dev) {
262 		.m_deviceid = map.m_deviceid,
263 		.m_pa = map.m_pa,
264 	};
265 	ret = erofs_map_dev(sb, &mdev);
266 	if (ret)
267 		return ret;
268 
269 	iomap->offset = map.m_la;
270 	if (flags & IOMAP_DAX)
271 		iomap->dax_dev = mdev.m_daxdev;
272 	else
273 		iomap->bdev = mdev.m_bdev;
274 	iomap->length = map.m_llen;
275 	iomap->flags = 0;
276 	iomap->private = NULL;
277 
278 	if (!(map.m_flags & EROFS_MAP_MAPPED)) {
279 		iomap->type = IOMAP_HOLE;
280 		iomap->addr = IOMAP_NULL_ADDR;
281 		if (!iomap->length)
282 			iomap->length = length;
283 		return 0;
284 	}
285 
286 	if (map.m_flags & EROFS_MAP_META) {
287 		void *ptr;
288 		struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
289 
290 		iomap->type = IOMAP_INLINE;
291 		ptr = erofs_read_metabuf(&buf, sb, mdev.m_pa, EROFS_KMAP);
292 		if (IS_ERR(ptr))
293 			return PTR_ERR(ptr);
294 		iomap->inline_data = ptr;
295 		iomap->private = buf.base;
296 	} else {
297 		iomap->type = IOMAP_MAPPED;
298 		iomap->addr = mdev.m_pa;
299 		if (flags & IOMAP_DAX)
300 			iomap->addr += mdev.m_dax_part_off;
301 	}
302 	return 0;
303 }
304 
305 static int erofs_iomap_end(struct inode *inode, loff_t pos, loff_t length,
306 		ssize_t written, unsigned int flags, struct iomap *iomap)
307 {
308 	void *ptr = iomap->private;
309 
310 	if (ptr) {
311 		struct erofs_buf buf = {
312 			.page = kmap_to_page(ptr),
313 			.base = ptr,
314 			.kmap_type = EROFS_KMAP,
315 		};
316 
317 		DBG_BUGON(iomap->type != IOMAP_INLINE);
318 		erofs_put_metabuf(&buf);
319 	} else {
320 		DBG_BUGON(iomap->type == IOMAP_INLINE);
321 	}
322 	return written;
323 }
324 
325 static const struct iomap_ops erofs_iomap_ops = {
326 	.iomap_begin = erofs_iomap_begin,
327 	.iomap_end = erofs_iomap_end,
328 };
329 
330 int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
331 		 u64 start, u64 len)
332 {
333 	if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
334 #ifdef CONFIG_EROFS_FS_ZIP
335 		return iomap_fiemap(inode, fieinfo, start, len,
336 				    &z_erofs_iomap_report_ops);
337 #else
338 		return -EOPNOTSUPP;
339 #endif
340 	}
341 	return iomap_fiemap(inode, fieinfo, start, len, &erofs_iomap_ops);
342 }
343 
344 /*
345  * since we dont have write or truncate flows, so no inode
346  * locking needs to be held at the moment.
347  */
348 static int erofs_read_folio(struct file *file, struct folio *folio)
349 {
350 	return iomap_read_folio(folio, &erofs_iomap_ops);
351 }
352 
353 static void erofs_readahead(struct readahead_control *rac)
354 {
355 	return iomap_readahead(rac, &erofs_iomap_ops);
356 }
357 
358 static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
359 {
360 	return iomap_bmap(mapping, block, &erofs_iomap_ops);
361 }
362 
363 static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
364 {
365 	struct inode *inode = file_inode(iocb->ki_filp);
366 
367 	/* no need taking (shared) inode lock since it's a ro filesystem */
368 	if (!iov_iter_count(to))
369 		return 0;
370 
371 #ifdef CONFIG_FS_DAX
372 	if (IS_DAX(inode))
373 		return dax_iomap_rw(iocb, to, &erofs_iomap_ops);
374 #endif
375 	if (iocb->ki_flags & IOCB_DIRECT) {
376 		struct block_device *bdev = inode->i_sb->s_bdev;
377 		unsigned int blksize_mask;
378 
379 		if (bdev)
380 			blksize_mask = bdev_logical_block_size(bdev) - 1;
381 		else
382 			blksize_mask = i_blocksize(inode) - 1;
383 
384 		if ((iocb->ki_pos | iov_iter_count(to) |
385 		     iov_iter_alignment(to)) & blksize_mask)
386 			return -EINVAL;
387 
388 		return iomap_dio_rw(iocb, to, &erofs_iomap_ops,
389 				    NULL, 0, NULL, 0);
390 	}
391 	return filemap_read(iocb, to, 0);
392 }
393 
394 /* for uncompressed (aligned) files and raw access for other files */
395 const struct address_space_operations erofs_raw_access_aops = {
396 	.read_folio = erofs_read_folio,
397 	.readahead = erofs_readahead,
398 	.bmap = erofs_bmap,
399 	.direct_IO = noop_direct_IO,
400 	.release_folio = iomap_release_folio,
401 	.invalidate_folio = iomap_invalidate_folio,
402 };
403 
404 #ifdef CONFIG_FS_DAX
405 static vm_fault_t erofs_dax_huge_fault(struct vm_fault *vmf,
406 		unsigned int order)
407 {
408 	return dax_iomap_fault(vmf, order, NULL, NULL, &erofs_iomap_ops);
409 }
410 
411 static vm_fault_t erofs_dax_fault(struct vm_fault *vmf)
412 {
413 	return erofs_dax_huge_fault(vmf, 0);
414 }
415 
416 static const struct vm_operations_struct erofs_dax_vm_ops = {
417 	.fault		= erofs_dax_fault,
418 	.huge_fault	= erofs_dax_huge_fault,
419 };
420 
421 static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma)
422 {
423 	if (!IS_DAX(file_inode(file)))
424 		return generic_file_readonly_mmap(file, vma);
425 
426 	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
427 		return -EINVAL;
428 
429 	vma->vm_ops = &erofs_dax_vm_ops;
430 	vm_flags_set(vma, VM_HUGEPAGE);
431 	return 0;
432 }
433 #else
434 #define erofs_file_mmap	generic_file_readonly_mmap
435 #endif
436 
437 const struct file_operations erofs_file_fops = {
438 	.llseek		= generic_file_llseek,
439 	.read_iter	= erofs_file_read_iter,
440 	.mmap		= erofs_file_mmap,
441 	.get_unmapped_area = thp_get_unmapped_area,
442 	.splice_read	= filemap_splice_read,
443 };
444