xref: /linux/fs/erofs/zmap.c (revision f4d0ec0aa20d49f09dc01d82894ce80d72de0560)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2018-2019 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  */
6 #include "internal.h"
7 #include <linux/unaligned.h>
8 #include <trace/events/erofs.h>
9 
10 struct z_erofs_maprecorder {
11 	struct inode *inode;
12 	struct erofs_map_blocks *map;
13 	unsigned long lcn;
14 	/* compression extent information gathered */
15 	u8  type, headtype;
16 	u16 clusterofs;
17 	u16 delta[2];
18 	erofs_blk_t pblk, compressedblks;
19 	erofs_off_t nextpackoff;
20 	bool partialref, in_mbox;
21 };
22 
z_erofs_load_full_lcluster(struct z_erofs_maprecorder * m,unsigned long lcn)23 static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m,
24 				      unsigned long lcn)
25 {
26 	struct inode *const inode = m->inode;
27 	struct erofs_inode *const vi = EROFS_I(inode);
28 	const erofs_off_t pos = Z_EROFS_FULL_INDEX_START(erofs_iloc(inode) +
29 			vi->inode_isize + vi->xattr_isize) +
30 			lcn * sizeof(struct z_erofs_lcluster_index);
31 	struct z_erofs_lcluster_index *di;
32 	unsigned int advise;
33 
34 	di = erofs_read_metabuf(&m->map->buf, inode->i_sb, pos, m->in_mbox);
35 	if (IS_ERR(di))
36 		return PTR_ERR(di);
37 	m->lcn = lcn;
38 	m->nextpackoff = pos + sizeof(struct z_erofs_lcluster_index);
39 
40 	advise = le16_to_cpu(di->di_advise);
41 	m->type = advise & Z_EROFS_LI_LCLUSTER_TYPE_MASK;
42 	if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
43 		m->clusterofs = 1 << vi->z_lclusterbits;
44 		m->delta[0] = le16_to_cpu(di->di_u.delta[0]);
45 		if (m->delta[0] & Z_EROFS_LI_D0_CBLKCNT) {
46 			if (!(vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
47 					Z_EROFS_ADVISE_BIG_PCLUSTER_2))) {
48 				DBG_BUGON(1);
49 				return -EFSCORRUPTED;
50 			}
51 			m->compressedblks = m->delta[0] & ~Z_EROFS_LI_D0_CBLKCNT;
52 			m->delta[0] = 1;
53 		}
54 		m->delta[1] = le16_to_cpu(di->di_u.delta[1]);
55 	} else {
56 		m->partialref = !!(advise & Z_EROFS_LI_PARTIAL_REF);
57 		m->clusterofs = le16_to_cpu(di->di_clusterofs);
58 		m->pblk = le32_to_cpu(di->di_u.blkaddr);
59 	}
60 	return 0;
61 }
62 
decode_compactedbits(unsigned int lobits,u8 * in,unsigned int pos,u8 * type)63 static unsigned int decode_compactedbits(unsigned int lobits,
64 					 u8 *in, unsigned int pos, u8 *type)
65 {
66 	const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7);
67 	const unsigned int lo = v & ((1 << lobits) - 1);
68 
69 	*type = (v >> lobits) & 3;
70 	return lo;
71 }
72 
get_compacted_la_distance(unsigned int lobits,unsigned int encodebits,unsigned int vcnt,u8 * in,int i)73 static int get_compacted_la_distance(unsigned int lobits,
74 				     unsigned int encodebits,
75 				     unsigned int vcnt, u8 *in, int i)
76 {
77 	unsigned int lo, d1 = 0;
78 	u8 type;
79 
80 	DBG_BUGON(i >= vcnt);
81 
82 	do {
83 		lo = decode_compactedbits(lobits, in, encodebits * i, &type);
84 
85 		if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
86 			return d1;
87 		++d1;
88 	} while (++i < vcnt);
89 
90 	/* vcnt - 1 (Z_EROFS_LCLUSTER_TYPE_NONHEAD) item */
91 	if (!(lo & Z_EROFS_LI_D0_CBLKCNT))
92 		d1 += lo - 1;
93 	return d1;
94 }
95 
z_erofs_load_compact_lcluster(struct z_erofs_maprecorder * m,unsigned long lcn,bool lookahead)96 static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m,
97 					 unsigned long lcn, bool lookahead)
98 {
99 	struct inode *const inode = m->inode;
100 	struct erofs_inode *const vi = EROFS_I(inode);
101 	const erofs_off_t ebase = Z_EROFS_MAP_HEADER_END(erofs_iloc(inode) +
102 			vi->inode_isize + vi->xattr_isize);
103 	const unsigned int lclusterbits = vi->z_lclusterbits;
104 	const unsigned int totalidx = erofs_iblks(inode);
105 	unsigned int compacted_4b_initial, compacted_2b, amortizedshift;
106 	unsigned int vcnt, lo, lobits, encodebits, nblk, bytes;
107 	bool big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
108 	erofs_off_t pos;
109 	u8 *in, type;
110 	int i;
111 
112 	if (lcn >= totalidx || lclusterbits > 14)
113 		return -EINVAL;
114 
115 	m->lcn = lcn;
116 	/* used to align to 32-byte (compacted_2b) alignment */
117 	compacted_4b_initial = ((32 - ebase % 32) / 4) & 7;
118 	compacted_2b = 0;
119 	if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
120 	    compacted_4b_initial < totalidx)
121 		compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
122 
123 	pos = ebase;
124 	amortizedshift = 2;	/* compact_4b */
125 	if (lcn >= compacted_4b_initial) {
126 		pos += compacted_4b_initial * 4;
127 		lcn -= compacted_4b_initial;
128 		if (lcn < compacted_2b) {
129 			amortizedshift = 1;
130 		} else {
131 			pos += compacted_2b * 2;
132 			lcn -= compacted_2b;
133 		}
134 	}
135 	pos += lcn * (1 << amortizedshift);
136 
137 	/* figure out the lcluster count in this pack */
138 	if (1 << amortizedshift == 4 && lclusterbits <= 14)
139 		vcnt = 2;
140 	else if (1 << amortizedshift == 2 && lclusterbits <= 12)
141 		vcnt = 16;
142 	else
143 		return -EOPNOTSUPP;
144 
145 	in = erofs_read_metabuf(&m->map->buf, inode->i_sb, pos, m->in_mbox);
146 	if (IS_ERR(in))
147 		return PTR_ERR(in);
148 
149 	/* it doesn't equal to round_up(..) */
150 	m->nextpackoff = round_down(pos, vcnt << amortizedshift) +
151 			 (vcnt << amortizedshift);
152 	lobits = max(lclusterbits, ilog2(Z_EROFS_LI_D0_CBLKCNT) + 1U);
153 	encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
154 	bytes = pos & ((vcnt << amortizedshift) - 1);
155 	in -= bytes;
156 	i = bytes >> amortizedshift;
157 
158 	lo = decode_compactedbits(lobits, in, encodebits * i, &type);
159 	m->type = type;
160 	if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
161 		m->clusterofs = 1 << lclusterbits;
162 
163 		/* figure out lookahead_distance: delta[1] if needed */
164 		if (lookahead)
165 			m->delta[1] = get_compacted_la_distance(lobits,
166 						encodebits, vcnt, in, i);
167 		if (lo & Z_EROFS_LI_D0_CBLKCNT) {
168 			if (!big_pcluster) {
169 				DBG_BUGON(1);
170 				return -EFSCORRUPTED;
171 			}
172 			m->compressedblks = lo & ~Z_EROFS_LI_D0_CBLKCNT;
173 			m->delta[0] = 1;
174 			return 0;
175 		} else if (i + 1 != (int)vcnt) {
176 			m->delta[0] = lo;
177 			return 0;
178 		}
179 		/*
180 		 * since the last lcluster in the pack is special,
181 		 * of which lo saves delta[1] rather than delta[0].
182 		 * Hence, get delta[0] by the previous lcluster indirectly.
183 		 */
184 		lo = decode_compactedbits(lobits, in,
185 					  encodebits * (i - 1), &type);
186 		if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
187 			lo = 0;
188 		else if (lo & Z_EROFS_LI_D0_CBLKCNT)
189 			lo = 1;
190 		m->delta[0] = lo + 1;
191 		return 0;
192 	}
193 	m->clusterofs = lo;
194 	m->delta[0] = 0;
195 	/* figout out blkaddr (pblk) for HEAD lclusters */
196 	if (!big_pcluster) {
197 		nblk = 1;
198 		while (i > 0) {
199 			--i;
200 			lo = decode_compactedbits(lobits, in,
201 						  encodebits * i, &type);
202 			if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD)
203 				i -= lo;
204 
205 			if (i >= 0)
206 				++nblk;
207 		}
208 	} else {
209 		nblk = 0;
210 		while (i > 0) {
211 			--i;
212 			lo = decode_compactedbits(lobits, in,
213 						  encodebits * i, &type);
214 			if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
215 				if (lo & Z_EROFS_LI_D0_CBLKCNT) {
216 					--i;
217 					nblk += lo & ~Z_EROFS_LI_D0_CBLKCNT;
218 					continue;
219 				}
220 				/* bigpcluster shouldn't have plain d0 == 1 */
221 				if (lo <= 1) {
222 					DBG_BUGON(1);
223 					return -EFSCORRUPTED;
224 				}
225 				i -= lo - 2;
226 				continue;
227 			}
228 			++nblk;
229 		}
230 	}
231 	in += (vcnt << amortizedshift) - sizeof(__le32);
232 	m->pblk = le32_to_cpu(*(__le32 *)in) + nblk;
233 	return 0;
234 }
235 
z_erofs_load_lcluster_from_disk(struct z_erofs_maprecorder * m,unsigned int lcn,bool lookahead)236 static int z_erofs_load_lcluster_from_disk(struct z_erofs_maprecorder *m,
237 					   unsigned int lcn, bool lookahead)
238 {
239 	struct erofs_inode *vi = EROFS_I(m->inode);
240 	int err;
241 
242 	if (vi->datalayout == EROFS_INODE_COMPRESSED_COMPACT) {
243 		err = z_erofs_load_compact_lcluster(m, lcn, lookahead);
244 	} else {
245 		DBG_BUGON(vi->datalayout != EROFS_INODE_COMPRESSED_FULL);
246 		err = z_erofs_load_full_lcluster(m, lcn);
247 	}
248 	if (err)
249 		return err;
250 
251 	if (m->type >= Z_EROFS_LCLUSTER_TYPE_MAX) {
252 		erofs_err(m->inode->i_sb, "unknown type %u @ lcn %u of nid %llu",
253 			  m->type, lcn, EROFS_I(m->inode)->nid);
254 		DBG_BUGON(1);
255 		return -EOPNOTSUPP;
256 	} else if (m->type != Z_EROFS_LCLUSTER_TYPE_NONHEAD &&
257 		   m->clusterofs >= (1 << vi->z_lclusterbits)) {
258 		DBG_BUGON(1);
259 		return -EFSCORRUPTED;
260 	}
261 	return 0;
262 }
263 
z_erofs_extent_lookback(struct z_erofs_maprecorder * m,unsigned int lookback_distance)264 static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
265 				   unsigned int lookback_distance)
266 {
267 	struct super_block *sb = m->inode->i_sb;
268 	struct erofs_inode *const vi = EROFS_I(m->inode);
269 	const unsigned int lclusterbits = vi->z_lclusterbits;
270 
271 	while (m->lcn >= lookback_distance) {
272 		unsigned long lcn = m->lcn - lookback_distance;
273 		int err;
274 
275 		if (!lookback_distance)
276 			break;
277 
278 		err = z_erofs_load_lcluster_from_disk(m, lcn, false);
279 		if (err)
280 			return err;
281 		if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
282 			lookback_distance = m->delta[0];
283 			continue;
284 		}
285 		m->headtype = m->type;
286 		m->map->m_la = (lcn << lclusterbits) | m->clusterofs;
287 		return 0;
288 	}
289 	erofs_err(sb, "bogus lookback distance %u @ lcn %lu of nid %llu",
290 		  lookback_distance, m->lcn, vi->nid);
291 	DBG_BUGON(1);
292 	return -EFSCORRUPTED;
293 }
294 
z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder * m,unsigned int initial_lcn)295 static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
296 					    unsigned int initial_lcn)
297 {
298 	struct inode *inode = m->inode;
299 	struct super_block *sb = inode->i_sb;
300 	struct erofs_inode *vi = EROFS_I(inode);
301 	bool bigpcl1 = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
302 	bool bigpcl2 = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2;
303 	unsigned long lcn = m->lcn + 1;
304 	int err;
305 
306 	DBG_BUGON(m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD);
307 	DBG_BUGON(m->type != m->headtype);
308 
309 	if ((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD1 && !bigpcl1) ||
310 	    ((m->headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
311 	      m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) && !bigpcl2) ||
312 	    (lcn << vi->z_lclusterbits) >= inode->i_size)
313 		m->compressedblks = 1;
314 
315 	if (m->compressedblks)
316 		goto out;
317 
318 	err = z_erofs_load_lcluster_from_disk(m, lcn, false);
319 	if (err)
320 		return err;
321 
322 	/*
323 	 * If the 1st NONHEAD lcluster has already been handled initially w/o
324 	 * valid compressedblks, which means at least it mustn't be CBLKCNT, or
325 	 * an internal implemenatation error is detected.
326 	 *
327 	 * The following code can also handle it properly anyway, but let's
328 	 * BUG_ON in the debugging mode only for developers to notice that.
329 	 */
330 	DBG_BUGON(lcn == initial_lcn &&
331 		  m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD);
332 
333 	if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD && m->delta[0] != 1) {
334 		erofs_err(sb, "bogus CBLKCNT @ lcn %lu of nid %llu", lcn, vi->nid);
335 		DBG_BUGON(1);
336 		return -EFSCORRUPTED;
337 	}
338 
339 	/*
340 	 * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type rather
341 	 * than CBLKCNT, it's a 1 block-sized pcluster.
342 	 */
343 	if (m->type != Z_EROFS_LCLUSTER_TYPE_NONHEAD || !m->compressedblks)
344 		m->compressedblks = 1;
345 out:
346 	m->map->m_plen = erofs_pos(sb, m->compressedblks);
347 	return 0;
348 }
349 
z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder * m)350 static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
351 {
352 	struct inode *inode = m->inode;
353 	struct erofs_inode *vi = EROFS_I(inode);
354 	struct erofs_map_blocks *map = m->map;
355 	unsigned int lclusterbits = vi->z_lclusterbits;
356 	u64 lcn = m->lcn, headlcn = map->m_la >> lclusterbits;
357 	int err;
358 
359 	while (1) {
360 		/* handle the last EOF pcluster (no next HEAD lcluster) */
361 		if ((lcn << lclusterbits) >= inode->i_size) {
362 			map->m_llen = inode->i_size - map->m_la;
363 			return 0;
364 		}
365 
366 		err = z_erofs_load_lcluster_from_disk(m, lcn, true);
367 		if (err)
368 			return err;
369 
370 		if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
371 			/* work around invalid d1 generated by pre-1.0 mkfs */
372 			if (unlikely(!m->delta[1])) {
373 				m->delta[1] = 1;
374 				DBG_BUGON(1);
375 			}
376 		} else if (m->type < Z_EROFS_LCLUSTER_TYPE_MAX) {
377 			if (lcn != headlcn)
378 				break;	/* ends at the next HEAD lcluster */
379 			m->delta[1] = 1;
380 		}
381 		lcn += m->delta[1];
382 	}
383 	map->m_llen = (lcn << lclusterbits) + m->clusterofs - map->m_la;
384 	return 0;
385 }
386 
z_erofs_map_blocks_fo(struct inode * inode,struct erofs_map_blocks * map,int flags)387 static int z_erofs_map_blocks_fo(struct inode *inode,
388 				 struct erofs_map_blocks *map, int flags)
389 {
390 	struct erofs_inode *vi = EROFS_I(inode);
391 	struct super_block *sb = inode->i_sb;
392 	bool fragment = vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
393 	bool ztailpacking = vi->z_idata_size;
394 	unsigned int lclusterbits = vi->z_lclusterbits;
395 	struct z_erofs_maprecorder m = {
396 		.inode = inode,
397 		.map = map,
398 		.in_mbox = erofs_inode_in_metabox(inode),
399 	};
400 	unsigned int endoff;
401 	unsigned long initial_lcn;
402 	unsigned long long ofs, end;
403 	int err;
404 
405 	ofs = flags & EROFS_GET_BLOCKS_FINDTAIL ? inode->i_size - 1 : map->m_la;
406 	if (fragment && !(flags & EROFS_GET_BLOCKS_FINDTAIL) &&
407 	    !vi->z_tailextent_headlcn) {
408 		map->m_la = 0;
409 		map->m_llen = inode->i_size;
410 		map->m_flags = EROFS_MAP_FRAGMENT;
411 		return 0;
412 	}
413 	initial_lcn = ofs >> lclusterbits;
414 	endoff = ofs & ((1 << lclusterbits) - 1);
415 
416 	err = z_erofs_load_lcluster_from_disk(&m, initial_lcn, false);
417 	if (err)
418 		goto unmap_out;
419 
420 	if ((flags & EROFS_GET_BLOCKS_FINDTAIL) && ztailpacking)
421 		vi->z_fragmentoff = m.nextpackoff;
422 	map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_ENCODED;
423 	end = (m.lcn + 1ULL) << lclusterbits;
424 
425 	if (m.type != Z_EROFS_LCLUSTER_TYPE_NONHEAD && endoff >= m.clusterofs) {
426 		m.headtype = m.type;
427 		map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
428 		/*
429 		 * For ztailpacking files, in order to inline data more
430 		 * effectively, special EOF lclusters are now supported
431 		 * which can have three parts at most.
432 		 */
433 		if (ztailpacking && end > inode->i_size)
434 			end = inode->i_size;
435 	} else {
436 		if (m.type != Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
437 			end = (m.lcn << lclusterbits) | m.clusterofs;
438 			map->m_flags |= EROFS_MAP_FULL_MAPPED;
439 			m.delta[0] = 1;
440 		}
441 		/* get the corresponding first chunk */
442 		err = z_erofs_extent_lookback(&m, m.delta[0]);
443 		if (err)
444 			goto unmap_out;
445 	}
446 	if (m.partialref)
447 		map->m_flags |= EROFS_MAP_PARTIAL_REF;
448 	map->m_llen = end - map->m_la;
449 
450 	if (flags & EROFS_GET_BLOCKS_FINDTAIL) {
451 		vi->z_tailextent_headlcn = m.lcn;
452 		/* for non-compact indexes, fragmentoff is 64 bits */
453 		if (fragment && vi->datalayout == EROFS_INODE_COMPRESSED_FULL)
454 			vi->z_fragmentoff |= (u64)m.pblk << 32;
455 	}
456 	if (ztailpacking && m.lcn == vi->z_tailextent_headlcn) {
457 		map->m_flags |= EROFS_MAP_META;
458 		map->m_pa = vi->z_fragmentoff;
459 		map->m_plen = vi->z_idata_size;
460 		if (erofs_blkoff(sb, map->m_pa) + map->m_plen > sb->s_blocksize) {
461 			erofs_err(sb, "ztailpacking inline data across blocks @ nid %llu",
462 				  vi->nid);
463 			err = -EFSCORRUPTED;
464 			goto unmap_out;
465 		}
466 	} else if (fragment && m.lcn == vi->z_tailextent_headlcn) {
467 		map->m_flags = EROFS_MAP_FRAGMENT;
468 	} else {
469 		map->m_pa = erofs_pos(sb, m.pblk);
470 		err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
471 		if (err)
472 			goto unmap_out;
473 	}
474 
475 	if (m.headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN) {
476 		if (map->m_llen > map->m_plen) {
477 			DBG_BUGON(1);
478 			err = -EFSCORRUPTED;
479 			goto unmap_out;
480 		}
481 		if (vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER)
482 			map->m_algorithmformat = Z_EROFS_COMPRESSION_INTERLACED;
483 		else
484 			map->m_algorithmformat = Z_EROFS_COMPRESSION_SHIFTED;
485 	} else if (m.headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) {
486 		map->m_algorithmformat = vi->z_algorithmtype[1];
487 	} else {
488 		map->m_algorithmformat = vi->z_algorithmtype[0];
489 	}
490 
491 	if ((flags & EROFS_GET_BLOCKS_FIEMAP) ||
492 	    ((flags & EROFS_GET_BLOCKS_READMORE) &&
493 	     (map->m_algorithmformat == Z_EROFS_COMPRESSION_LZMA ||
494 	      map->m_algorithmformat == Z_EROFS_COMPRESSION_DEFLATE ||
495 	      map->m_algorithmformat == Z_EROFS_COMPRESSION_ZSTD) &&
496 	      map->m_llen >= i_blocksize(inode))) {
497 		err = z_erofs_get_extent_decompressedlen(&m);
498 		if (!err)
499 			map->m_flags |= EROFS_MAP_FULL_MAPPED;
500 	}
501 
502 unmap_out:
503 	erofs_unmap_metabuf(&m.map->buf);
504 	return err;
505 }
506 
z_erofs_map_blocks_ext(struct inode * inode,struct erofs_map_blocks * map,int flags)507 static int z_erofs_map_blocks_ext(struct inode *inode,
508 				  struct erofs_map_blocks *map, int flags)
509 {
510 	struct erofs_inode *vi = EROFS_I(inode);
511 	struct super_block *sb = inode->i_sb;
512 	bool interlaced = vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER;
513 	unsigned int recsz = z_erofs_extent_recsize(vi->z_advise);
514 	erofs_off_t pos = round_up(Z_EROFS_MAP_HEADER_END(erofs_iloc(inode) +
515 				   vi->inode_isize + vi->xattr_isize), recsz);
516 	unsigned int bmask = sb->s_blocksize - 1;
517 	bool in_mbox = erofs_inode_in_metabox(inode);
518 	erofs_off_t lend = inode->i_size;
519 	erofs_off_t l, r, mid, pa, la, lstart;
520 	struct z_erofs_extent *ext;
521 	unsigned int fmt;
522 	bool last;
523 
524 	map->m_flags = 0;
525 	if (recsz <= offsetof(struct z_erofs_extent, pstart_hi)) {
526 		if (recsz <= offsetof(struct z_erofs_extent, pstart_lo)) {
527 			ext = erofs_read_metabuf(&map->buf, sb, pos, in_mbox);
528 			if (IS_ERR(ext))
529 				return PTR_ERR(ext);
530 			pa = le64_to_cpu(*(__le64 *)ext);
531 			pos += sizeof(__le64);
532 			lstart = 0;
533 		} else {
534 			lstart = round_down(map->m_la, 1 << vi->z_lclusterbits);
535 			pos += (lstart >> vi->z_lclusterbits) * recsz;
536 			pa = EROFS_NULL_ADDR;
537 		}
538 
539 		for (; lstart <= map->m_la; lstart += 1 << vi->z_lclusterbits) {
540 			ext = erofs_read_metabuf(&map->buf, sb, pos, in_mbox);
541 			if (IS_ERR(ext))
542 				return PTR_ERR(ext);
543 			map->m_plen = le32_to_cpu(ext->plen);
544 			if (pa != EROFS_NULL_ADDR) {
545 				map->m_pa = pa;
546 				pa += map->m_plen & Z_EROFS_EXTENT_PLEN_MASK;
547 			} else {
548 				map->m_pa = le32_to_cpu(ext->pstart_lo);
549 			}
550 			pos += recsz;
551 		}
552 		last = (lstart >= round_up(lend, 1 << vi->z_lclusterbits));
553 		lend = min(lstart, lend);
554 		lstart -= 1 << vi->z_lclusterbits;
555 	} else {
556 		lstart = lend;
557 		for (l = 0, r = vi->z_extents; l < r; ) {
558 			mid = l + (r - l) / 2;
559 			ext = erofs_read_metabuf(&map->buf, sb,
560 						 pos + mid * recsz, in_mbox);
561 			if (IS_ERR(ext))
562 				return PTR_ERR(ext);
563 
564 			la = le32_to_cpu(ext->lstart_lo);
565 			pa = le32_to_cpu(ext->pstart_lo) |
566 				(u64)le32_to_cpu(ext->pstart_hi) << 32;
567 			if (recsz > offsetof(struct z_erofs_extent, lstart_hi))
568 				la |= (u64)le32_to_cpu(ext->lstart_hi) << 32;
569 
570 			if (la > map->m_la) {
571 				r = mid;
572 				if (la > lend) {
573 					DBG_BUGON(1);
574 					return -EFSCORRUPTED;
575 				}
576 				lend = la;
577 			} else {
578 				l = mid + 1;
579 				if (map->m_la == la)
580 					r = min(l + 1, r);
581 				lstart = la;
582 				map->m_plen = le32_to_cpu(ext->plen);
583 				map->m_pa = pa;
584 			}
585 		}
586 		last = (l >= vi->z_extents);
587 	}
588 
589 	if (lstart < lend) {
590 		map->m_la = lstart;
591 		if (last && (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER)) {
592 			map->m_flags = EROFS_MAP_FRAGMENT;
593 			vi->z_fragmentoff = map->m_plen;
594 			if (recsz > offsetof(struct z_erofs_extent, pstart_lo))
595 				vi->z_fragmentoff |= map->m_pa << 32;
596 		} else if (map->m_plen & Z_EROFS_EXTENT_PLEN_MASK) {
597 			map->m_flags |= EROFS_MAP_MAPPED |
598 				EROFS_MAP_FULL_MAPPED | EROFS_MAP_ENCODED;
599 			fmt = map->m_plen >> Z_EROFS_EXTENT_PLEN_FMT_BIT;
600 			if (map->m_plen & Z_EROFS_EXTENT_PLEN_PARTIAL)
601 				map->m_flags |= EROFS_MAP_PARTIAL_REF;
602 			map->m_plen &= Z_EROFS_EXTENT_PLEN_MASK;
603 			if (fmt)
604 				map->m_algorithmformat = fmt - 1;
605 			else if (interlaced && !((map->m_pa | map->m_plen) & bmask))
606 				map->m_algorithmformat =
607 					Z_EROFS_COMPRESSION_INTERLACED;
608 			else
609 				map->m_algorithmformat =
610 					Z_EROFS_COMPRESSION_SHIFTED;
611 		}
612 	}
613 	map->m_llen = lend - map->m_la;
614 	return 0;
615 }
616 
z_erofs_fill_inode(struct inode * inode,struct erofs_map_blocks * map)617 static int z_erofs_fill_inode(struct inode *inode, struct erofs_map_blocks *map)
618 {
619 	struct erofs_inode *const vi = EROFS_I(inode);
620 	struct super_block *const sb = inode->i_sb;
621 	struct z_erofs_map_header *h;
622 	erofs_off_t pos;
623 	int err = 0;
624 
625 	if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) {
626 		/*
627 		 * paired with smp_mb() at the end of the function to ensure
628 		 * fields will only be observed after the bit is set.
629 		 */
630 		smp_mb();
631 		return 0;
632 	}
633 
634 	if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE))
635 		return -ERESTARTSYS;
636 
637 	if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags))
638 		goto out_unlock;
639 
640 	pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
641 	h = erofs_read_metabuf(&map->buf, sb, pos, erofs_inode_in_metabox(inode));
642 	if (IS_ERR(h)) {
643 		err = PTR_ERR(h);
644 		goto out_unlock;
645 	}
646 
647 	/*
648 	 * if the highest bit of the 8-byte map header is set, the whole file
649 	 * is stored in the packed inode. The rest bits keeps z_fragmentoff.
650 	 */
651 	if (h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT) {
652 		vi->z_advise = Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
653 		vi->z_fragmentoff = le64_to_cpu(*(__le64 *)h) ^ (1ULL << 63);
654 		vi->z_tailextent_headlcn = 0;
655 		goto done;
656 	}
657 	vi->z_advise = le16_to_cpu(h->h_advise);
658 	vi->z_lclusterbits = sb->s_blocksize_bits + (h->h_clusterbits & 15);
659 	if (vi->datalayout == EROFS_INODE_COMPRESSED_FULL &&
660 	    (vi->z_advise & Z_EROFS_ADVISE_EXTENTS)) {
661 		vi->z_extents = le32_to_cpu(h->h_extents_lo) |
662 			((u64)le16_to_cpu(h->h_extents_hi) << 32);
663 		goto done;
664 	}
665 
666 	vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
667 	vi->z_algorithmtype[1] = h->h_algorithmtype >> 4;
668 	if (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER)
669 		vi->z_fragmentoff = le32_to_cpu(h->h_fragmentoff);
670 	else if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER)
671 		vi->z_idata_size = le16_to_cpu(h->h_idata_size);
672 
673 	if (!erofs_sb_has_big_pcluster(EROFS_SB(sb)) &&
674 	    vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
675 			    Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
676 		erofs_err(sb, "per-inode big pcluster without sb feature for nid %llu",
677 			  vi->nid);
678 		err = -EFSCORRUPTED;
679 		goto out_unlock;
680 	}
681 	if (vi->datalayout == EROFS_INODE_COMPRESSED_COMPACT &&
682 	    !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^
683 	    !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
684 		erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu",
685 			  vi->nid);
686 		err = -EFSCORRUPTED;
687 		goto out_unlock;
688 	}
689 
690 	if (vi->z_idata_size ||
691 	    (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER)) {
692 		struct erofs_map_blocks tm = {
693 			.buf = __EROFS_BUF_INITIALIZER
694 		};
695 
696 		err = z_erofs_map_blocks_fo(inode, &tm,
697 					    EROFS_GET_BLOCKS_FINDTAIL);
698 		erofs_put_metabuf(&tm.buf);
699 		if (err < 0)
700 			goto out_unlock;
701 	}
702 done:
703 	/* paired with smp_mb() at the beginning of the function */
704 	smp_mb();
705 	set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
706 out_unlock:
707 	clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags);
708 	return err;
709 }
710 
z_erofs_map_sanity_check(struct inode * inode,struct erofs_map_blocks * map)711 static int z_erofs_map_sanity_check(struct inode *inode,
712 				    struct erofs_map_blocks *map)
713 {
714 	struct erofs_sb_info *sbi = EROFS_I_SB(inode);
715 	u64 pend;
716 
717 	if (!(map->m_flags & EROFS_MAP_ENCODED))
718 		return 0;
719 	if (unlikely(map->m_algorithmformat >= Z_EROFS_COMPRESSION_RUNTIME_MAX)) {
720 		erofs_err(inode->i_sb, "unknown algorithm %d @ pos %llu for nid %llu, please upgrade kernel",
721 			  map->m_algorithmformat, map->m_la, EROFS_I(inode)->nid);
722 		return -EOPNOTSUPP;
723 	}
724 	if (unlikely(map->m_algorithmformat < Z_EROFS_COMPRESSION_MAX &&
725 		     !(sbi->available_compr_algs & (1 << map->m_algorithmformat)))) {
726 		erofs_err(inode->i_sb, "inconsistent algorithmtype %u for nid %llu",
727 			  map->m_algorithmformat, EROFS_I(inode)->nid);
728 		return -EFSCORRUPTED;
729 	}
730 	if (unlikely(map->m_plen > Z_EROFS_PCLUSTER_MAX_SIZE ||
731 		     map->m_llen > Z_EROFS_PCLUSTER_MAX_DSIZE))
732 		return -EOPNOTSUPP;
733 	/* Filesystems beyond 48-bit physical block addresses are invalid */
734 	if (unlikely(check_add_overflow(map->m_pa, map->m_plen, &pend) ||
735 		     (pend >> sbi->blkszbits) >= BIT_ULL(48)))
736 		return -EFSCORRUPTED;
737 	return 0;
738 }
739 
z_erofs_map_blocks_iter(struct inode * inode,struct erofs_map_blocks * map,int flags)740 int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
741 			    int flags)
742 {
743 	struct erofs_inode *const vi = EROFS_I(inode);
744 	int err = 0;
745 
746 	trace_erofs_map_blocks_enter(inode, map, flags);
747 	if (map->m_la >= inode->i_size) {	/* post-EOF unmapped extent */
748 		map->m_llen = map->m_la + 1 - inode->i_size;
749 		map->m_la = inode->i_size;
750 		map->m_flags = 0;
751 	} else {
752 		err = z_erofs_fill_inode(inode, map);
753 		if (!err) {
754 			if (vi->datalayout == EROFS_INODE_COMPRESSED_FULL &&
755 			    (vi->z_advise & Z_EROFS_ADVISE_EXTENTS))
756 				err = z_erofs_map_blocks_ext(inode, map, flags);
757 			else
758 				err = z_erofs_map_blocks_fo(inode, map, flags);
759 		}
760 		if (!err)
761 			err = z_erofs_map_sanity_check(inode, map);
762 		if (err)
763 			map->m_llen = 0;
764 	}
765 	trace_erofs_map_blocks_exit(inode, map, flags, err);
766 	return err;
767 }
768 
z_erofs_iomap_begin_report(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap)769 static int z_erofs_iomap_begin_report(struct inode *inode, loff_t offset,
770 				loff_t length, unsigned int flags,
771 				struct iomap *iomap, struct iomap *srcmap)
772 {
773 	int ret;
774 	struct erofs_map_blocks map = { .m_la = offset };
775 
776 	ret = z_erofs_map_blocks_iter(inode, &map, EROFS_GET_BLOCKS_FIEMAP);
777 	erofs_put_metabuf(&map.buf);
778 	if (ret < 0)
779 		return ret;
780 
781 	iomap->bdev = inode->i_sb->s_bdev;
782 	iomap->offset = map.m_la;
783 	iomap->length = map.m_llen;
784 	if (map.m_flags & EROFS_MAP_MAPPED) {
785 		iomap->type = IOMAP_MAPPED;
786 		iomap->addr = map.m_flags & __EROFS_MAP_FRAGMENT ?
787 			      IOMAP_NULL_ADDR : map.m_pa;
788 	} else {
789 		iomap->type = IOMAP_HOLE;
790 		iomap->addr = IOMAP_NULL_ADDR;
791 		/*
792 		 * No strict rule on how to describe extents for post EOF, yet
793 		 * we need to do like below. Otherwise, iomap itself will get
794 		 * into an endless loop on post EOF.
795 		 *
796 		 * Calculate the effective offset by subtracting extent start
797 		 * (map.m_la) from the requested offset, and add it to length.
798 		 * (NB: offset >= map.m_la always)
799 		 */
800 		if (iomap->offset >= inode->i_size)
801 			iomap->length = length + offset - map.m_la;
802 	}
803 	iomap->flags = 0;
804 	return 0;
805 }
806 
807 const struct iomap_ops z_erofs_iomap_report_ops = {
808 	.iomap_begin = z_erofs_iomap_begin_report,
809 };
810