1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2018-2019 HUAWEI, Inc.
4 * https://www.huawei.com/
5 */
6 #include "internal.h"
7 #include <linux/unaligned.h>
8 #include <trace/events/erofs.h>
9
10 struct z_erofs_maprecorder {
11 struct inode *inode;
12 struct erofs_map_blocks *map;
13 unsigned long lcn;
14 /* compression extent information gathered */
15 u8 type, headtype;
16 u16 clusterofs;
17 u16 delta[2];
18 erofs_blk_t pblk, compressedblks;
19 erofs_off_t nextpackoff;
20 bool partialref;
21 };
22
z_erofs_load_full_lcluster(struct z_erofs_maprecorder * m,unsigned long lcn)23 static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m,
24 unsigned long lcn)
25 {
26 struct inode *const inode = m->inode;
27 struct erofs_inode *const vi = EROFS_I(inode);
28 const erofs_off_t pos = Z_EROFS_FULL_INDEX_ALIGN(erofs_iloc(inode) +
29 vi->inode_isize + vi->xattr_isize) +
30 lcn * sizeof(struct z_erofs_lcluster_index);
31 struct z_erofs_lcluster_index *di;
32 unsigned int advise;
33
34 di = erofs_read_metabuf(&m->map->buf, inode->i_sb, pos, EROFS_KMAP);
35 if (IS_ERR(di))
36 return PTR_ERR(di);
37 m->lcn = lcn;
38 m->nextpackoff = pos + sizeof(struct z_erofs_lcluster_index);
39
40 advise = le16_to_cpu(di->di_advise);
41 m->type = advise & Z_EROFS_LI_LCLUSTER_TYPE_MASK;
42 if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
43 m->clusterofs = 1 << vi->z_logical_clusterbits;
44 m->delta[0] = le16_to_cpu(di->di_u.delta[0]);
45 if (m->delta[0] & Z_EROFS_LI_D0_CBLKCNT) {
46 if (!(vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
47 Z_EROFS_ADVISE_BIG_PCLUSTER_2))) {
48 DBG_BUGON(1);
49 return -EFSCORRUPTED;
50 }
51 m->compressedblks = m->delta[0] & ~Z_EROFS_LI_D0_CBLKCNT;
52 m->delta[0] = 1;
53 }
54 m->delta[1] = le16_to_cpu(di->di_u.delta[1]);
55 } else {
56 m->partialref = !!(advise & Z_EROFS_LI_PARTIAL_REF);
57 m->clusterofs = le16_to_cpu(di->di_clusterofs);
58 if (m->clusterofs >= 1 << vi->z_logical_clusterbits) {
59 DBG_BUGON(1);
60 return -EFSCORRUPTED;
61 }
62 m->pblk = le32_to_cpu(di->di_u.blkaddr);
63 }
64 return 0;
65 }
66
decode_compactedbits(unsigned int lobits,u8 * in,unsigned int pos,u8 * type)67 static unsigned int decode_compactedbits(unsigned int lobits,
68 u8 *in, unsigned int pos, u8 *type)
69 {
70 const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7);
71 const unsigned int lo = v & ((1 << lobits) - 1);
72
73 *type = (v >> lobits) & 3;
74 return lo;
75 }
76
get_compacted_la_distance(unsigned int lobits,unsigned int encodebits,unsigned int vcnt,u8 * in,int i)77 static int get_compacted_la_distance(unsigned int lobits,
78 unsigned int encodebits,
79 unsigned int vcnt, u8 *in, int i)
80 {
81 unsigned int lo, d1 = 0;
82 u8 type;
83
84 DBG_BUGON(i >= vcnt);
85
86 do {
87 lo = decode_compactedbits(lobits, in, encodebits * i, &type);
88
89 if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
90 return d1;
91 ++d1;
92 } while (++i < vcnt);
93
94 /* vcnt - 1 (Z_EROFS_LCLUSTER_TYPE_NONHEAD) item */
95 if (!(lo & Z_EROFS_LI_D0_CBLKCNT))
96 d1 += lo - 1;
97 return d1;
98 }
99
unpack_compacted_index(struct z_erofs_maprecorder * m,unsigned int amortizedshift,erofs_off_t pos,bool lookahead)100 static int unpack_compacted_index(struct z_erofs_maprecorder *m,
101 unsigned int amortizedshift,
102 erofs_off_t pos, bool lookahead)
103 {
104 struct erofs_inode *const vi = EROFS_I(m->inode);
105 const unsigned int lclusterbits = vi->z_logical_clusterbits;
106 unsigned int vcnt, lo, lobits, encodebits, nblk, bytes;
107 bool big_pcluster;
108 u8 *in, type;
109 int i;
110
111 if (1 << amortizedshift == 4 && lclusterbits <= 14)
112 vcnt = 2;
113 else if (1 << amortizedshift == 2 && lclusterbits <= 12)
114 vcnt = 16;
115 else
116 return -EOPNOTSUPP;
117
118 in = erofs_read_metabuf(&m->map->buf, m->inode->i_sb, pos, EROFS_KMAP);
119 if (IS_ERR(in))
120 return PTR_ERR(in);
121
122 /* it doesn't equal to round_up(..) */
123 m->nextpackoff = round_down(pos, vcnt << amortizedshift) +
124 (vcnt << amortizedshift);
125 big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
126 lobits = max(lclusterbits, ilog2(Z_EROFS_LI_D0_CBLKCNT) + 1U);
127 encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
128 bytes = pos & ((vcnt << amortizedshift) - 1);
129 in -= bytes;
130 i = bytes >> amortizedshift;
131
132 lo = decode_compactedbits(lobits, in, encodebits * i, &type);
133 m->type = type;
134 if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
135 m->clusterofs = 1 << lclusterbits;
136
137 /* figure out lookahead_distance: delta[1] if needed */
138 if (lookahead)
139 m->delta[1] = get_compacted_la_distance(lobits,
140 encodebits, vcnt, in, i);
141 if (lo & Z_EROFS_LI_D0_CBLKCNT) {
142 if (!big_pcluster) {
143 DBG_BUGON(1);
144 return -EFSCORRUPTED;
145 }
146 m->compressedblks = lo & ~Z_EROFS_LI_D0_CBLKCNT;
147 m->delta[0] = 1;
148 return 0;
149 } else if (i + 1 != (int)vcnt) {
150 m->delta[0] = lo;
151 return 0;
152 }
153 /*
154 * since the last lcluster in the pack is special,
155 * of which lo saves delta[1] rather than delta[0].
156 * Hence, get delta[0] by the previous lcluster indirectly.
157 */
158 lo = decode_compactedbits(lobits, in,
159 encodebits * (i - 1), &type);
160 if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
161 lo = 0;
162 else if (lo & Z_EROFS_LI_D0_CBLKCNT)
163 lo = 1;
164 m->delta[0] = lo + 1;
165 return 0;
166 }
167 m->clusterofs = lo;
168 m->delta[0] = 0;
169 /* figout out blkaddr (pblk) for HEAD lclusters */
170 if (!big_pcluster) {
171 nblk = 1;
172 while (i > 0) {
173 --i;
174 lo = decode_compactedbits(lobits, in,
175 encodebits * i, &type);
176 if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD)
177 i -= lo;
178
179 if (i >= 0)
180 ++nblk;
181 }
182 } else {
183 nblk = 0;
184 while (i > 0) {
185 --i;
186 lo = decode_compactedbits(lobits, in,
187 encodebits * i, &type);
188 if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
189 if (lo & Z_EROFS_LI_D0_CBLKCNT) {
190 --i;
191 nblk += lo & ~Z_EROFS_LI_D0_CBLKCNT;
192 continue;
193 }
194 /* bigpcluster shouldn't have plain d0 == 1 */
195 if (lo <= 1) {
196 DBG_BUGON(1);
197 return -EFSCORRUPTED;
198 }
199 i -= lo - 2;
200 continue;
201 }
202 ++nblk;
203 }
204 }
205 in += (vcnt << amortizedshift) - sizeof(__le32);
206 m->pblk = le32_to_cpu(*(__le32 *)in) + nblk;
207 return 0;
208 }
209
z_erofs_load_compact_lcluster(struct z_erofs_maprecorder * m,unsigned long lcn,bool lookahead)210 static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m,
211 unsigned long lcn, bool lookahead)
212 {
213 struct inode *const inode = m->inode;
214 struct erofs_inode *const vi = EROFS_I(inode);
215 const erofs_off_t ebase = sizeof(struct z_erofs_map_header) +
216 ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
217 unsigned int totalidx = erofs_iblks(inode);
218 unsigned int compacted_4b_initial, compacted_2b;
219 unsigned int amortizedshift;
220 erofs_off_t pos;
221
222 if (lcn >= totalidx || vi->z_logical_clusterbits > 14)
223 return -EINVAL;
224
225 m->lcn = lcn;
226 /* used to align to 32-byte (compacted_2b) alignment */
227 compacted_4b_initial = (32 - ebase % 32) / 4;
228 if (compacted_4b_initial == 32 / 4)
229 compacted_4b_initial = 0;
230
231 if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
232 compacted_4b_initial < totalidx)
233 compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
234 else
235 compacted_2b = 0;
236
237 pos = ebase;
238 if (lcn < compacted_4b_initial) {
239 amortizedshift = 2;
240 goto out;
241 }
242 pos += compacted_4b_initial * 4;
243 lcn -= compacted_4b_initial;
244
245 if (lcn < compacted_2b) {
246 amortizedshift = 1;
247 goto out;
248 }
249 pos += compacted_2b * 2;
250 lcn -= compacted_2b;
251 amortizedshift = 2;
252 out:
253 pos += lcn * (1 << amortizedshift);
254 return unpack_compacted_index(m, amortizedshift, pos, lookahead);
255 }
256
z_erofs_load_lcluster_from_disk(struct z_erofs_maprecorder * m,unsigned int lcn,bool lookahead)257 static int z_erofs_load_lcluster_from_disk(struct z_erofs_maprecorder *m,
258 unsigned int lcn, bool lookahead)
259 {
260 switch (EROFS_I(m->inode)->datalayout) {
261 case EROFS_INODE_COMPRESSED_FULL:
262 return z_erofs_load_full_lcluster(m, lcn);
263 case EROFS_INODE_COMPRESSED_COMPACT:
264 return z_erofs_load_compact_lcluster(m, lcn, lookahead);
265 default:
266 return -EINVAL;
267 }
268 }
269
z_erofs_extent_lookback(struct z_erofs_maprecorder * m,unsigned int lookback_distance)270 static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
271 unsigned int lookback_distance)
272 {
273 struct super_block *sb = m->inode->i_sb;
274 struct erofs_inode *const vi = EROFS_I(m->inode);
275 const unsigned int lclusterbits = vi->z_logical_clusterbits;
276
277 while (m->lcn >= lookback_distance) {
278 unsigned long lcn = m->lcn - lookback_distance;
279 int err;
280
281 err = z_erofs_load_lcluster_from_disk(m, lcn, false);
282 if (err)
283 return err;
284
285 switch (m->type) {
286 case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
287 lookback_distance = m->delta[0];
288 if (!lookback_distance)
289 goto err_bogus;
290 continue;
291 case Z_EROFS_LCLUSTER_TYPE_PLAIN:
292 case Z_EROFS_LCLUSTER_TYPE_HEAD1:
293 case Z_EROFS_LCLUSTER_TYPE_HEAD2:
294 m->headtype = m->type;
295 m->map->m_la = (lcn << lclusterbits) | m->clusterofs;
296 return 0;
297 default:
298 erofs_err(sb, "unknown type %u @ lcn %lu of nid %llu",
299 m->type, lcn, vi->nid);
300 DBG_BUGON(1);
301 return -EOPNOTSUPP;
302 }
303 }
304 err_bogus:
305 erofs_err(sb, "bogus lookback distance %u @ lcn %lu of nid %llu",
306 lookback_distance, m->lcn, vi->nid);
307 DBG_BUGON(1);
308 return -EFSCORRUPTED;
309 }
310
z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder * m,unsigned int initial_lcn)311 static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
312 unsigned int initial_lcn)
313 {
314 struct super_block *sb = m->inode->i_sb;
315 struct erofs_inode *const vi = EROFS_I(m->inode);
316 struct erofs_map_blocks *const map = m->map;
317 const unsigned int lclusterbits = vi->z_logical_clusterbits;
318 unsigned long lcn;
319 int err;
320
321 DBG_BUGON(m->type != Z_EROFS_LCLUSTER_TYPE_PLAIN &&
322 m->type != Z_EROFS_LCLUSTER_TYPE_HEAD1 &&
323 m->type != Z_EROFS_LCLUSTER_TYPE_HEAD2);
324 DBG_BUGON(m->type != m->headtype);
325
326 if (m->headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
327 ((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD1) &&
328 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) ||
329 ((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) &&
330 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2))) {
331 map->m_plen = 1ULL << lclusterbits;
332 return 0;
333 }
334 lcn = m->lcn + 1;
335 if (m->compressedblks)
336 goto out;
337
338 err = z_erofs_load_lcluster_from_disk(m, lcn, false);
339 if (err)
340 return err;
341
342 /*
343 * If the 1st NONHEAD lcluster has already been handled initially w/o
344 * valid compressedblks, which means at least it mustn't be CBLKCNT, or
345 * an internal implemenatation error is detected.
346 *
347 * The following code can also handle it properly anyway, but let's
348 * BUG_ON in the debugging mode only for developers to notice that.
349 */
350 DBG_BUGON(lcn == initial_lcn &&
351 m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD);
352
353 switch (m->type) {
354 case Z_EROFS_LCLUSTER_TYPE_PLAIN:
355 case Z_EROFS_LCLUSTER_TYPE_HEAD1:
356 case Z_EROFS_LCLUSTER_TYPE_HEAD2:
357 /*
358 * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type
359 * rather than CBLKCNT, it's a 1 lcluster-sized pcluster.
360 */
361 m->compressedblks = 1 << (lclusterbits - sb->s_blocksize_bits);
362 break;
363 case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
364 if (m->delta[0] != 1)
365 goto err_bonus_cblkcnt;
366 if (m->compressedblks)
367 break;
368 fallthrough;
369 default:
370 erofs_err(sb, "cannot found CBLKCNT @ lcn %lu of nid %llu", lcn,
371 vi->nid);
372 DBG_BUGON(1);
373 return -EFSCORRUPTED;
374 }
375 out:
376 map->m_plen = erofs_pos(sb, m->compressedblks);
377 return 0;
378 err_bonus_cblkcnt:
379 erofs_err(sb, "bogus CBLKCNT @ lcn %lu of nid %llu", lcn, vi->nid);
380 DBG_BUGON(1);
381 return -EFSCORRUPTED;
382 }
383
z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder * m)384 static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
385 {
386 struct inode *inode = m->inode;
387 struct erofs_inode *vi = EROFS_I(inode);
388 struct erofs_map_blocks *map = m->map;
389 unsigned int lclusterbits = vi->z_logical_clusterbits;
390 u64 lcn = m->lcn, headlcn = map->m_la >> lclusterbits;
391 int err;
392
393 while (1) {
394 /* handle the last EOF pcluster (no next HEAD lcluster) */
395 if ((lcn << lclusterbits) >= inode->i_size) {
396 map->m_llen = inode->i_size - map->m_la;
397 return 0;
398 }
399
400 err = z_erofs_load_lcluster_from_disk(m, lcn, true);
401 if (err)
402 return err;
403
404 if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
405 /* work around invalid d1 generated by pre-1.0 mkfs */
406 if (unlikely(!m->delta[1])) {
407 m->delta[1] = 1;
408 DBG_BUGON(1);
409 }
410 } else if (m->type == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
411 m->type == Z_EROFS_LCLUSTER_TYPE_HEAD1 ||
412 m->type == Z_EROFS_LCLUSTER_TYPE_HEAD2) {
413 if (lcn != headlcn)
414 break; /* ends at the next HEAD lcluster */
415 m->delta[1] = 1;
416 } else {
417 erofs_err(inode->i_sb, "unknown type %u @ lcn %llu of nid %llu",
418 m->type, lcn, vi->nid);
419 DBG_BUGON(1);
420 return -EOPNOTSUPP;
421 }
422 lcn += m->delta[1];
423 }
424 map->m_llen = (lcn << lclusterbits) + m->clusterofs - map->m_la;
425 return 0;
426 }
427
z_erofs_do_map_blocks(struct inode * inode,struct erofs_map_blocks * map,int flags)428 static int z_erofs_do_map_blocks(struct inode *inode,
429 struct erofs_map_blocks *map, int flags)
430 {
431 struct erofs_inode *const vi = EROFS_I(inode);
432 bool ztailpacking = vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER;
433 bool fragment = vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
434 struct z_erofs_maprecorder m = {
435 .inode = inode,
436 .map = map,
437 };
438 int err = 0;
439 unsigned int lclusterbits, endoff, afmt;
440 unsigned long initial_lcn;
441 unsigned long long ofs, end;
442
443 lclusterbits = vi->z_logical_clusterbits;
444 ofs = flags & EROFS_GET_BLOCKS_FINDTAIL ? inode->i_size - 1 : map->m_la;
445 initial_lcn = ofs >> lclusterbits;
446 endoff = ofs & ((1 << lclusterbits) - 1);
447
448 err = z_erofs_load_lcluster_from_disk(&m, initial_lcn, false);
449 if (err)
450 goto unmap_out;
451
452 if (ztailpacking && (flags & EROFS_GET_BLOCKS_FINDTAIL))
453 vi->z_idataoff = m.nextpackoff;
454
455 map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_ENCODED;
456 end = (m.lcn + 1ULL) << lclusterbits;
457
458 switch (m.type) {
459 case Z_EROFS_LCLUSTER_TYPE_PLAIN:
460 case Z_EROFS_LCLUSTER_TYPE_HEAD1:
461 case Z_EROFS_LCLUSTER_TYPE_HEAD2:
462 if (endoff >= m.clusterofs) {
463 m.headtype = m.type;
464 map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
465 /*
466 * For ztailpacking files, in order to inline data more
467 * effectively, special EOF lclusters are now supported
468 * which can have three parts at most.
469 */
470 if (ztailpacking && end > inode->i_size)
471 end = inode->i_size;
472 break;
473 }
474 /* m.lcn should be >= 1 if endoff < m.clusterofs */
475 if (!m.lcn) {
476 erofs_err(inode->i_sb,
477 "invalid logical cluster 0 at nid %llu",
478 vi->nid);
479 err = -EFSCORRUPTED;
480 goto unmap_out;
481 }
482 end = (m.lcn << lclusterbits) | m.clusterofs;
483 map->m_flags |= EROFS_MAP_FULL_MAPPED;
484 m.delta[0] = 1;
485 fallthrough;
486 case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
487 /* get the corresponding first chunk */
488 err = z_erofs_extent_lookback(&m, m.delta[0]);
489 if (err)
490 goto unmap_out;
491 break;
492 default:
493 erofs_err(inode->i_sb,
494 "unknown type %u @ offset %llu of nid %llu",
495 m.type, ofs, vi->nid);
496 err = -EOPNOTSUPP;
497 goto unmap_out;
498 }
499 if (m.partialref)
500 map->m_flags |= EROFS_MAP_PARTIAL_REF;
501 map->m_llen = end - map->m_la;
502
503 if (flags & EROFS_GET_BLOCKS_FINDTAIL) {
504 vi->z_tailextent_headlcn = m.lcn;
505 /* for non-compact indexes, fragmentoff is 64 bits */
506 if (fragment && vi->datalayout == EROFS_INODE_COMPRESSED_FULL)
507 vi->z_fragmentoff |= (u64)m.pblk << 32;
508 }
509 if (ztailpacking && m.lcn == vi->z_tailextent_headlcn) {
510 map->m_flags |= EROFS_MAP_META;
511 map->m_pa = vi->z_idataoff;
512 map->m_plen = vi->z_idata_size;
513 } else if (fragment && m.lcn == vi->z_tailextent_headlcn) {
514 map->m_flags |= EROFS_MAP_FRAGMENT;
515 } else {
516 map->m_pa = erofs_pos(inode->i_sb, m.pblk);
517 err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
518 if (err)
519 goto unmap_out;
520 }
521
522 if (m.headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN) {
523 if (map->m_llen > map->m_plen) {
524 DBG_BUGON(1);
525 err = -EFSCORRUPTED;
526 goto unmap_out;
527 }
528 afmt = vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER ?
529 Z_EROFS_COMPRESSION_INTERLACED :
530 Z_EROFS_COMPRESSION_SHIFTED;
531 } else {
532 afmt = m.headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2 ?
533 vi->z_algorithmtype[1] : vi->z_algorithmtype[0];
534 if (!(EROFS_I_SB(inode)->available_compr_algs & (1 << afmt))) {
535 erofs_err(inode->i_sb, "inconsistent algorithmtype %u for nid %llu",
536 afmt, vi->nid);
537 err = -EFSCORRUPTED;
538 goto unmap_out;
539 }
540 }
541 map->m_algorithmformat = afmt;
542
543 if ((flags & EROFS_GET_BLOCKS_FIEMAP) ||
544 ((flags & EROFS_GET_BLOCKS_READMORE) &&
545 (map->m_algorithmformat == Z_EROFS_COMPRESSION_LZMA ||
546 map->m_algorithmformat == Z_EROFS_COMPRESSION_DEFLATE ||
547 map->m_algorithmformat == Z_EROFS_COMPRESSION_ZSTD) &&
548 map->m_llen >= i_blocksize(inode))) {
549 err = z_erofs_get_extent_decompressedlen(&m);
550 if (!err)
551 map->m_flags |= EROFS_MAP_FULL_MAPPED;
552 }
553
554 unmap_out:
555 erofs_unmap_metabuf(&m.map->buf);
556 return err;
557 }
558
z_erofs_fill_inode_lazy(struct inode * inode)559 static int z_erofs_fill_inode_lazy(struct inode *inode)
560 {
561 struct erofs_inode *const vi = EROFS_I(inode);
562 struct super_block *const sb = inode->i_sb;
563 int err, headnr;
564 erofs_off_t pos;
565 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
566 struct z_erofs_map_header *h;
567
568 if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) {
569 /*
570 * paired with smp_mb() at the end of the function to ensure
571 * fields will only be observed after the bit is set.
572 */
573 smp_mb();
574 return 0;
575 }
576
577 if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE))
578 return -ERESTARTSYS;
579
580 err = 0;
581 if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags))
582 goto out_unlock;
583
584 pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
585 h = erofs_read_metabuf(&buf, sb, pos, EROFS_KMAP);
586 if (IS_ERR(h)) {
587 err = PTR_ERR(h);
588 goto out_unlock;
589 }
590
591 /*
592 * if the highest bit of the 8-byte map header is set, the whole file
593 * is stored in the packed inode. The rest bits keeps z_fragmentoff.
594 */
595 if (h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT) {
596 vi->z_advise = Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
597 vi->z_fragmentoff = le64_to_cpu(*(__le64 *)h) ^ (1ULL << 63);
598 vi->z_tailextent_headlcn = 0;
599 goto done;
600 }
601 vi->z_advise = le16_to_cpu(h->h_advise);
602 vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
603 vi->z_algorithmtype[1] = h->h_algorithmtype >> 4;
604
605 headnr = 0;
606 if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX ||
607 vi->z_algorithmtype[++headnr] >= Z_EROFS_COMPRESSION_MAX) {
608 erofs_err(sb, "unknown HEAD%u format %u for nid %llu, please upgrade kernel",
609 headnr + 1, vi->z_algorithmtype[headnr], vi->nid);
610 err = -EOPNOTSUPP;
611 goto out_put_metabuf;
612 }
613
614 vi->z_logical_clusterbits = sb->s_blocksize_bits + (h->h_clusterbits & 7);
615 if (!erofs_sb_has_big_pcluster(EROFS_SB(sb)) &&
616 vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
617 Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
618 erofs_err(sb, "per-inode big pcluster without sb feature for nid %llu",
619 vi->nid);
620 err = -EFSCORRUPTED;
621 goto out_put_metabuf;
622 }
623 if (vi->datalayout == EROFS_INODE_COMPRESSED_COMPACT &&
624 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^
625 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
626 erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu",
627 vi->nid);
628 err = -EFSCORRUPTED;
629 goto out_put_metabuf;
630 }
631
632 if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER) {
633 struct erofs_map_blocks map = {
634 .buf = __EROFS_BUF_INITIALIZER
635 };
636
637 vi->z_idata_size = le16_to_cpu(h->h_idata_size);
638 err = z_erofs_do_map_blocks(inode, &map,
639 EROFS_GET_BLOCKS_FINDTAIL);
640 erofs_put_metabuf(&map.buf);
641
642 if (!map.m_plen ||
643 erofs_blkoff(sb, map.m_pa) + map.m_plen > sb->s_blocksize) {
644 erofs_err(sb, "invalid tail-packing pclustersize %llu",
645 map.m_plen);
646 err = -EFSCORRUPTED;
647 }
648 if (err < 0)
649 goto out_put_metabuf;
650 }
651
652 if (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER &&
653 !(h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT)) {
654 struct erofs_map_blocks map = {
655 .buf = __EROFS_BUF_INITIALIZER
656 };
657
658 vi->z_fragmentoff = le32_to_cpu(h->h_fragmentoff);
659 err = z_erofs_do_map_blocks(inode, &map,
660 EROFS_GET_BLOCKS_FINDTAIL);
661 erofs_put_metabuf(&map.buf);
662 if (err < 0)
663 goto out_put_metabuf;
664 }
665 done:
666 /* paired with smp_mb() at the beginning of the function */
667 smp_mb();
668 set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
669 out_put_metabuf:
670 erofs_put_metabuf(&buf);
671 out_unlock:
672 clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags);
673 return err;
674 }
675
z_erofs_map_blocks_iter(struct inode * inode,struct erofs_map_blocks * map,int flags)676 int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
677 int flags)
678 {
679 struct erofs_inode *const vi = EROFS_I(inode);
680 int err = 0;
681
682 trace_erofs_map_blocks_enter(inode, map, flags);
683 if (map->m_la >= inode->i_size) { /* post-EOF unmapped extent */
684 map->m_llen = map->m_la + 1 - inode->i_size;
685 map->m_la = inode->i_size;
686 map->m_flags = 0;
687 } else {
688 err = z_erofs_fill_inode_lazy(inode);
689 if (!err) {
690 if ((vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER) &&
691 !vi->z_tailextent_headlcn) {
692 map->m_la = 0;
693 map->m_llen = inode->i_size;
694 map->m_flags = EROFS_MAP_MAPPED |
695 EROFS_MAP_FULL_MAPPED | EROFS_MAP_FRAGMENT;
696 } else {
697 err = z_erofs_do_map_blocks(inode, map, flags);
698 }
699 }
700 if (!err && (map->m_flags & EROFS_MAP_ENCODED) &&
701 unlikely(map->m_plen > Z_EROFS_PCLUSTER_MAX_SIZE ||
702 map->m_llen > Z_EROFS_PCLUSTER_MAX_DSIZE))
703 err = -EOPNOTSUPP;
704 if (err)
705 map->m_llen = 0;
706 }
707 trace_erofs_map_blocks_exit(inode, map, flags, err);
708 return err;
709 }
710
z_erofs_iomap_begin_report(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap)711 static int z_erofs_iomap_begin_report(struct inode *inode, loff_t offset,
712 loff_t length, unsigned int flags,
713 struct iomap *iomap, struct iomap *srcmap)
714 {
715 int ret;
716 struct erofs_map_blocks map = { .m_la = offset };
717
718 ret = z_erofs_map_blocks_iter(inode, &map, EROFS_GET_BLOCKS_FIEMAP);
719 erofs_put_metabuf(&map.buf);
720 if (ret < 0)
721 return ret;
722
723 iomap->bdev = inode->i_sb->s_bdev;
724 iomap->offset = map.m_la;
725 iomap->length = map.m_llen;
726 if (map.m_flags & EROFS_MAP_MAPPED) {
727 iomap->type = IOMAP_MAPPED;
728 iomap->addr = map.m_flags & EROFS_MAP_FRAGMENT ?
729 IOMAP_NULL_ADDR : map.m_pa;
730 } else {
731 iomap->type = IOMAP_HOLE;
732 iomap->addr = IOMAP_NULL_ADDR;
733 /*
734 * No strict rule on how to describe extents for post EOF, yet
735 * we need to do like below. Otherwise, iomap itself will get
736 * into an endless loop on post EOF.
737 *
738 * Calculate the effective offset by subtracting extent start
739 * (map.m_la) from the requested offset, and add it to length.
740 * (NB: offset >= map.m_la always)
741 */
742 if (iomap->offset >= inode->i_size)
743 iomap->length = length + offset - map.m_la;
744 }
745 iomap->flags = 0;
746 return 0;
747 }
748
749 const struct iomap_ops z_erofs_iomap_report_ops = {
750 .iomap_begin = z_erofs_iomap_begin_report,
751 };
752