1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2018-2019 HUAWEI, Inc. 4 * https://www.huawei.com/ 5 */ 6 #include "internal.h" 7 #include <asm/unaligned.h> 8 #include <trace/events/erofs.h> 9 10 static int z_erofs_do_map_blocks(struct inode *inode, 11 struct erofs_map_blocks *map, 12 int flags); 13 14 int z_erofs_fill_inode(struct inode *inode) 15 { 16 struct erofs_inode *const vi = EROFS_I(inode); 17 struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); 18 19 if (!erofs_sb_has_big_pcluster(sbi) && 20 !erofs_sb_has_ztailpacking(sbi) && 21 vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY) { 22 vi->z_advise = 0; 23 vi->z_algorithmtype[0] = 0; 24 vi->z_algorithmtype[1] = 0; 25 vi->z_logical_clusterbits = LOG_BLOCK_SIZE; 26 set_bit(EROFS_I_Z_INITED_BIT, &vi->flags); 27 } 28 inode->i_mapping->a_ops = &z_erofs_aops; 29 return 0; 30 } 31 32 static int z_erofs_fill_inode_lazy(struct inode *inode) 33 { 34 struct erofs_inode *const vi = EROFS_I(inode); 35 struct super_block *const sb = inode->i_sb; 36 int err, headnr; 37 erofs_off_t pos; 38 struct erofs_buf buf = __EROFS_BUF_INITIALIZER; 39 void *kaddr; 40 struct z_erofs_map_header *h; 41 42 if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) { 43 /* 44 * paired with smp_mb() at the end of the function to ensure 45 * fields will only be observed after the bit is set. 46 */ 47 smp_mb(); 48 return 0; 49 } 50 51 if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE)) 52 return -ERESTARTSYS; 53 54 err = 0; 55 if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) 56 goto out_unlock; 57 58 DBG_BUGON(!erofs_sb_has_big_pcluster(EROFS_SB(sb)) && 59 !erofs_sb_has_ztailpacking(EROFS_SB(sb)) && 60 vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY); 61 62 pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize + 63 vi->xattr_isize, 8); 64 kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(pos), 65 EROFS_KMAP_ATOMIC); 66 if (IS_ERR(kaddr)) { 67 err = PTR_ERR(kaddr); 68 goto out_unlock; 69 } 70 71 h = kaddr + erofs_blkoff(pos); 72 vi->z_advise = le16_to_cpu(h->h_advise); 73 vi->z_algorithmtype[0] = h->h_algorithmtype & 15; 74 vi->z_algorithmtype[1] = h->h_algorithmtype >> 4; 75 76 headnr = 0; 77 if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX || 78 vi->z_algorithmtype[++headnr] >= Z_EROFS_COMPRESSION_MAX) { 79 erofs_err(sb, "unknown HEAD%u format %u for nid %llu, please upgrade kernel", 80 headnr + 1, vi->z_algorithmtype[headnr], vi->nid); 81 err = -EOPNOTSUPP; 82 goto unmap_done; 83 } 84 85 vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7); 86 if (!erofs_sb_has_big_pcluster(EROFS_SB(sb)) && 87 vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 | 88 Z_EROFS_ADVISE_BIG_PCLUSTER_2)) { 89 erofs_err(sb, "per-inode big pcluster without sb feature for nid %llu", 90 vi->nid); 91 err = -EFSCORRUPTED; 92 goto unmap_done; 93 } 94 if (vi->datalayout == EROFS_INODE_FLAT_COMPRESSION && 95 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^ 96 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2)) { 97 erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu", 98 vi->nid); 99 err = -EFSCORRUPTED; 100 goto unmap_done; 101 } 102 unmap_done: 103 erofs_put_metabuf(&buf); 104 if (err) 105 goto out_unlock; 106 107 if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER) { 108 struct erofs_map_blocks map = { 109 .buf = __EROFS_BUF_INITIALIZER 110 }; 111 112 vi->z_idata_size = le16_to_cpu(h->h_idata_size); 113 err = z_erofs_do_map_blocks(inode, &map, 114 EROFS_GET_BLOCKS_FINDTAIL); 115 erofs_put_metabuf(&map.buf); 116 117 if (!map.m_plen || 118 erofs_blkoff(map.m_pa) + map.m_plen > EROFS_BLKSIZ) { 119 erofs_err(sb, "invalid tail-packing pclustersize %llu", 120 map.m_plen); 121 err = -EFSCORRUPTED; 122 } 123 if (err < 0) 124 goto out_unlock; 125 } 126 /* paired with smp_mb() at the beginning of the function */ 127 smp_mb(); 128 set_bit(EROFS_I_Z_INITED_BIT, &vi->flags); 129 out_unlock: 130 clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags); 131 return err; 132 } 133 134 struct z_erofs_maprecorder { 135 struct inode *inode; 136 struct erofs_map_blocks *map; 137 void *kaddr; 138 139 unsigned long lcn; 140 /* compression extent information gathered */ 141 u8 type, headtype; 142 u16 clusterofs; 143 u16 delta[2]; 144 erofs_blk_t pblk, compressedblks; 145 erofs_off_t nextpackoff; 146 }; 147 148 static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m, 149 erofs_blk_t eblk) 150 { 151 struct super_block *const sb = m->inode->i_sb; 152 153 m->kaddr = erofs_read_metabuf(&m->map->buf, sb, eblk, 154 EROFS_KMAP_ATOMIC); 155 if (IS_ERR(m->kaddr)) 156 return PTR_ERR(m->kaddr); 157 return 0; 158 } 159 160 static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m, 161 unsigned long lcn) 162 { 163 struct inode *const inode = m->inode; 164 struct erofs_inode *const vi = EROFS_I(inode); 165 const erofs_off_t ibase = iloc(EROFS_I_SB(inode), vi->nid); 166 const erofs_off_t pos = 167 Z_EROFS_VLE_LEGACY_INDEX_ALIGN(ibase + vi->inode_isize + 168 vi->xattr_isize) + 169 lcn * sizeof(struct z_erofs_vle_decompressed_index); 170 struct z_erofs_vle_decompressed_index *di; 171 unsigned int advise, type; 172 int err; 173 174 err = z_erofs_reload_indexes(m, erofs_blknr(pos)); 175 if (err) 176 return err; 177 178 m->nextpackoff = pos + sizeof(struct z_erofs_vle_decompressed_index); 179 m->lcn = lcn; 180 di = m->kaddr + erofs_blkoff(pos); 181 182 advise = le16_to_cpu(di->di_advise); 183 type = (advise >> Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT) & 184 ((1 << Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) - 1); 185 switch (type) { 186 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: 187 m->clusterofs = 1 << vi->z_logical_clusterbits; 188 m->delta[0] = le16_to_cpu(di->di_u.delta[0]); 189 if (m->delta[0] & Z_EROFS_VLE_DI_D0_CBLKCNT) { 190 if (!(vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 | 191 Z_EROFS_ADVISE_BIG_PCLUSTER_2))) { 192 DBG_BUGON(1); 193 return -EFSCORRUPTED; 194 } 195 m->compressedblks = m->delta[0] & 196 ~Z_EROFS_VLE_DI_D0_CBLKCNT; 197 m->delta[0] = 1; 198 } 199 m->delta[1] = le16_to_cpu(di->di_u.delta[1]); 200 break; 201 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: 202 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1: 203 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2: 204 m->clusterofs = le16_to_cpu(di->di_clusterofs); 205 m->pblk = le32_to_cpu(di->di_u.blkaddr); 206 break; 207 default: 208 DBG_BUGON(1); 209 return -EOPNOTSUPP; 210 } 211 m->type = type; 212 return 0; 213 } 214 215 static unsigned int decode_compactedbits(unsigned int lobits, 216 unsigned int lomask, 217 u8 *in, unsigned int pos, u8 *type) 218 { 219 const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7); 220 const unsigned int lo = v & lomask; 221 222 *type = (v >> lobits) & 3; 223 return lo; 224 } 225 226 static int get_compacted_la_distance(unsigned int lclusterbits, 227 unsigned int encodebits, 228 unsigned int vcnt, u8 *in, int i) 229 { 230 const unsigned int lomask = (1 << lclusterbits) - 1; 231 unsigned int lo, d1 = 0; 232 u8 type; 233 234 DBG_BUGON(i >= vcnt); 235 236 do { 237 lo = decode_compactedbits(lclusterbits, lomask, 238 in, encodebits * i, &type); 239 240 if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) 241 return d1; 242 ++d1; 243 } while (++i < vcnt); 244 245 /* vcnt - 1 (Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) item */ 246 if (!(lo & Z_EROFS_VLE_DI_D0_CBLKCNT)) 247 d1 += lo - 1; 248 return d1; 249 } 250 251 static int unpack_compacted_index(struct z_erofs_maprecorder *m, 252 unsigned int amortizedshift, 253 erofs_off_t pos, bool lookahead) 254 { 255 struct erofs_inode *const vi = EROFS_I(m->inode); 256 const unsigned int lclusterbits = vi->z_logical_clusterbits; 257 const unsigned int lomask = (1 << lclusterbits) - 1; 258 unsigned int vcnt, base, lo, encodebits, nblk, eofs; 259 int i; 260 u8 *in, type; 261 bool big_pcluster; 262 263 if (1 << amortizedshift == 4) 264 vcnt = 2; 265 else if (1 << amortizedshift == 2 && lclusterbits == 12) 266 vcnt = 16; 267 else 268 return -EOPNOTSUPP; 269 270 /* it doesn't equal to round_up(..) */ 271 m->nextpackoff = round_down(pos, vcnt << amortizedshift) + 272 (vcnt << amortizedshift); 273 big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1; 274 encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt; 275 eofs = erofs_blkoff(pos); 276 base = round_down(eofs, vcnt << amortizedshift); 277 in = m->kaddr + base; 278 279 i = (eofs - base) >> amortizedshift; 280 281 lo = decode_compactedbits(lclusterbits, lomask, 282 in, encodebits * i, &type); 283 m->type = type; 284 if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) { 285 m->clusterofs = 1 << lclusterbits; 286 287 /* figure out lookahead_distance: delta[1] if needed */ 288 if (lookahead) 289 m->delta[1] = get_compacted_la_distance(lclusterbits, 290 encodebits, vcnt, in, i); 291 if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) { 292 if (!big_pcluster) { 293 DBG_BUGON(1); 294 return -EFSCORRUPTED; 295 } 296 m->compressedblks = lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT; 297 m->delta[0] = 1; 298 return 0; 299 } else if (i + 1 != (int)vcnt) { 300 m->delta[0] = lo; 301 return 0; 302 } 303 /* 304 * since the last lcluster in the pack is special, 305 * of which lo saves delta[1] rather than delta[0]. 306 * Hence, get delta[0] by the previous lcluster indirectly. 307 */ 308 lo = decode_compactedbits(lclusterbits, lomask, 309 in, encodebits * (i - 1), &type); 310 if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) 311 lo = 0; 312 else if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) 313 lo = 1; 314 m->delta[0] = lo + 1; 315 return 0; 316 } 317 m->clusterofs = lo; 318 m->delta[0] = 0; 319 /* figout out blkaddr (pblk) for HEAD lclusters */ 320 if (!big_pcluster) { 321 nblk = 1; 322 while (i > 0) { 323 --i; 324 lo = decode_compactedbits(lclusterbits, lomask, 325 in, encodebits * i, &type); 326 if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) 327 i -= lo; 328 329 if (i >= 0) 330 ++nblk; 331 } 332 } else { 333 nblk = 0; 334 while (i > 0) { 335 --i; 336 lo = decode_compactedbits(lclusterbits, lomask, 337 in, encodebits * i, &type); 338 if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) { 339 if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) { 340 --i; 341 nblk += lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT; 342 continue; 343 } 344 /* bigpcluster shouldn't have plain d0 == 1 */ 345 if (lo <= 1) { 346 DBG_BUGON(1); 347 return -EFSCORRUPTED; 348 } 349 i -= lo - 2; 350 continue; 351 } 352 ++nblk; 353 } 354 } 355 in += (vcnt << amortizedshift) - sizeof(__le32); 356 m->pblk = le32_to_cpu(*(__le32 *)in) + nblk; 357 return 0; 358 } 359 360 static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m, 361 unsigned long lcn, bool lookahead) 362 { 363 struct inode *const inode = m->inode; 364 struct erofs_inode *const vi = EROFS_I(inode); 365 const unsigned int lclusterbits = vi->z_logical_clusterbits; 366 const erofs_off_t ebase = ALIGN(iloc(EROFS_I_SB(inode), vi->nid) + 367 vi->inode_isize + vi->xattr_isize, 8) + 368 sizeof(struct z_erofs_map_header); 369 const unsigned int totalidx = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ); 370 unsigned int compacted_4b_initial, compacted_2b; 371 unsigned int amortizedshift; 372 erofs_off_t pos; 373 int err; 374 375 if (lclusterbits != 12) 376 return -EOPNOTSUPP; 377 378 if (lcn >= totalidx) 379 return -EINVAL; 380 381 m->lcn = lcn; 382 /* used to align to 32-byte (compacted_2b) alignment */ 383 compacted_4b_initial = (32 - ebase % 32) / 4; 384 if (compacted_4b_initial == 32 / 4) 385 compacted_4b_initial = 0; 386 387 if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) && 388 compacted_4b_initial < totalidx) 389 compacted_2b = rounddown(totalidx - compacted_4b_initial, 16); 390 else 391 compacted_2b = 0; 392 393 pos = ebase; 394 if (lcn < compacted_4b_initial) { 395 amortizedshift = 2; 396 goto out; 397 } 398 pos += compacted_4b_initial * 4; 399 lcn -= compacted_4b_initial; 400 401 if (lcn < compacted_2b) { 402 amortizedshift = 1; 403 goto out; 404 } 405 pos += compacted_2b * 2; 406 lcn -= compacted_2b; 407 amortizedshift = 2; 408 out: 409 pos += lcn * (1 << amortizedshift); 410 err = z_erofs_reload_indexes(m, erofs_blknr(pos)); 411 if (err) 412 return err; 413 return unpack_compacted_index(m, amortizedshift, pos, lookahead); 414 } 415 416 static int z_erofs_load_cluster_from_disk(struct z_erofs_maprecorder *m, 417 unsigned int lcn, bool lookahead) 418 { 419 const unsigned int datamode = EROFS_I(m->inode)->datalayout; 420 421 if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY) 422 return legacy_load_cluster_from_disk(m, lcn); 423 424 if (datamode == EROFS_INODE_FLAT_COMPRESSION) 425 return compacted_load_cluster_from_disk(m, lcn, lookahead); 426 427 return -EINVAL; 428 } 429 430 static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m, 431 unsigned int lookback_distance) 432 { 433 struct erofs_inode *const vi = EROFS_I(m->inode); 434 const unsigned int lclusterbits = vi->z_logical_clusterbits; 435 436 while (m->lcn >= lookback_distance) { 437 unsigned long lcn = m->lcn - lookback_distance; 438 int err; 439 440 /* load extent head logical cluster if needed */ 441 err = z_erofs_load_cluster_from_disk(m, lcn, false); 442 if (err) 443 return err; 444 445 switch (m->type) { 446 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: 447 if (!m->delta[0]) { 448 erofs_err(m->inode->i_sb, 449 "invalid lookback distance 0 @ nid %llu", 450 vi->nid); 451 DBG_BUGON(1); 452 return -EFSCORRUPTED; 453 } 454 lookback_distance = m->delta[0]; 455 continue; 456 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: 457 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1: 458 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2: 459 m->headtype = m->type; 460 m->map->m_la = (lcn << lclusterbits) | m->clusterofs; 461 return 0; 462 default: 463 erofs_err(m->inode->i_sb, 464 "unknown type %u @ lcn %lu of nid %llu", 465 m->type, lcn, vi->nid); 466 DBG_BUGON(1); 467 return -EOPNOTSUPP; 468 } 469 } 470 471 erofs_err(m->inode->i_sb, "bogus lookback distance @ nid %llu", 472 vi->nid); 473 DBG_BUGON(1); 474 return -EFSCORRUPTED; 475 } 476 477 static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m, 478 unsigned int initial_lcn) 479 { 480 struct erofs_inode *const vi = EROFS_I(m->inode); 481 struct erofs_map_blocks *const map = m->map; 482 const unsigned int lclusterbits = vi->z_logical_clusterbits; 483 unsigned long lcn; 484 int err; 485 486 DBG_BUGON(m->type != Z_EROFS_VLE_CLUSTER_TYPE_PLAIN && 487 m->type != Z_EROFS_VLE_CLUSTER_TYPE_HEAD1 && 488 m->type != Z_EROFS_VLE_CLUSTER_TYPE_HEAD2); 489 DBG_BUGON(m->type != m->headtype); 490 491 if (m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN || 492 ((m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD1) && 493 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) || 494 ((m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2) && 495 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2))) { 496 map->m_plen = 1ULL << lclusterbits; 497 return 0; 498 } 499 lcn = m->lcn + 1; 500 if (m->compressedblks) 501 goto out; 502 503 err = z_erofs_load_cluster_from_disk(m, lcn, false); 504 if (err) 505 return err; 506 507 /* 508 * If the 1st NONHEAD lcluster has already been handled initially w/o 509 * valid compressedblks, which means at least it mustn't be CBLKCNT, or 510 * an internal implemenatation error is detected. 511 * 512 * The following code can also handle it properly anyway, but let's 513 * BUG_ON in the debugging mode only for developers to notice that. 514 */ 515 DBG_BUGON(lcn == initial_lcn && 516 m->type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD); 517 518 switch (m->type) { 519 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: 520 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1: 521 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2: 522 /* 523 * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type 524 * rather than CBLKCNT, it's a 1 lcluster-sized pcluster. 525 */ 526 m->compressedblks = 1 << (lclusterbits - LOG_BLOCK_SIZE); 527 break; 528 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: 529 if (m->delta[0] != 1) 530 goto err_bonus_cblkcnt; 531 if (m->compressedblks) 532 break; 533 fallthrough; 534 default: 535 erofs_err(m->inode->i_sb, 536 "cannot found CBLKCNT @ lcn %lu of nid %llu", 537 lcn, vi->nid); 538 DBG_BUGON(1); 539 return -EFSCORRUPTED; 540 } 541 out: 542 map->m_plen = (u64)m->compressedblks << LOG_BLOCK_SIZE; 543 return 0; 544 err_bonus_cblkcnt: 545 erofs_err(m->inode->i_sb, 546 "bogus CBLKCNT @ lcn %lu of nid %llu", 547 lcn, vi->nid); 548 DBG_BUGON(1); 549 return -EFSCORRUPTED; 550 } 551 552 static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m) 553 { 554 struct inode *inode = m->inode; 555 struct erofs_inode *vi = EROFS_I(inode); 556 struct erofs_map_blocks *map = m->map; 557 unsigned int lclusterbits = vi->z_logical_clusterbits; 558 u64 lcn = m->lcn, headlcn = map->m_la >> lclusterbits; 559 int err; 560 561 do { 562 /* handle the last EOF pcluster (no next HEAD lcluster) */ 563 if ((lcn << lclusterbits) >= inode->i_size) { 564 map->m_llen = inode->i_size - map->m_la; 565 return 0; 566 } 567 568 err = z_erofs_load_cluster_from_disk(m, lcn, true); 569 if (err) 570 return err; 571 572 if (m->type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) { 573 DBG_BUGON(!m->delta[1] && 574 m->clusterofs != 1 << lclusterbits); 575 } else if (m->type == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN || 576 m->type == Z_EROFS_VLE_CLUSTER_TYPE_HEAD1 || 577 m->type == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2) { 578 /* go on until the next HEAD lcluster */ 579 if (lcn != headlcn) 580 break; 581 m->delta[1] = 1; 582 } else { 583 erofs_err(inode->i_sb, "unknown type %u @ lcn %llu of nid %llu", 584 m->type, lcn, vi->nid); 585 DBG_BUGON(1); 586 return -EOPNOTSUPP; 587 } 588 lcn += m->delta[1]; 589 } while (m->delta[1]); 590 591 map->m_llen = (lcn << lclusterbits) + m->clusterofs - map->m_la; 592 return 0; 593 } 594 595 static int z_erofs_do_map_blocks(struct inode *inode, 596 struct erofs_map_blocks *map, 597 int flags) 598 { 599 struct erofs_inode *const vi = EROFS_I(inode); 600 bool ztailpacking = vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER; 601 struct z_erofs_maprecorder m = { 602 .inode = inode, 603 .map = map, 604 }; 605 int err = 0; 606 unsigned int lclusterbits, endoff; 607 unsigned long initial_lcn; 608 unsigned long long ofs, end; 609 610 lclusterbits = vi->z_logical_clusterbits; 611 ofs = flags & EROFS_GET_BLOCKS_FINDTAIL ? inode->i_size - 1 : map->m_la; 612 initial_lcn = ofs >> lclusterbits; 613 endoff = ofs & ((1 << lclusterbits) - 1); 614 615 err = z_erofs_load_cluster_from_disk(&m, initial_lcn, false); 616 if (err) 617 goto unmap_out; 618 619 if (ztailpacking && (flags & EROFS_GET_BLOCKS_FINDTAIL)) 620 vi->z_idataoff = m.nextpackoff; 621 622 map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_ENCODED; 623 end = (m.lcn + 1ULL) << lclusterbits; 624 625 switch (m.type) { 626 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: 627 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1: 628 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2: 629 if (endoff >= m.clusterofs) { 630 m.headtype = m.type; 631 map->m_la = (m.lcn << lclusterbits) | m.clusterofs; 632 /* 633 * For ztailpacking files, in order to inline data more 634 * effectively, special EOF lclusters are now supported 635 * which can have three parts at most. 636 */ 637 if (ztailpacking && end > inode->i_size) 638 end = inode->i_size; 639 break; 640 } 641 /* m.lcn should be >= 1 if endoff < m.clusterofs */ 642 if (!m.lcn) { 643 erofs_err(inode->i_sb, 644 "invalid logical cluster 0 at nid %llu", 645 vi->nid); 646 err = -EFSCORRUPTED; 647 goto unmap_out; 648 } 649 end = (m.lcn << lclusterbits) | m.clusterofs; 650 map->m_flags |= EROFS_MAP_FULL_MAPPED; 651 m.delta[0] = 1; 652 fallthrough; 653 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: 654 /* get the corresponding first chunk */ 655 err = z_erofs_extent_lookback(&m, m.delta[0]); 656 if (err) 657 goto unmap_out; 658 break; 659 default: 660 erofs_err(inode->i_sb, 661 "unknown type %u @ offset %llu of nid %llu", 662 m.type, ofs, vi->nid); 663 err = -EOPNOTSUPP; 664 goto unmap_out; 665 } 666 667 map->m_llen = end - map->m_la; 668 669 if (flags & EROFS_GET_BLOCKS_FINDTAIL) 670 vi->z_tailextent_headlcn = m.lcn; 671 if (ztailpacking && m.lcn == vi->z_tailextent_headlcn) { 672 map->m_flags |= EROFS_MAP_META; 673 map->m_pa = vi->z_idataoff; 674 map->m_plen = vi->z_idata_size; 675 } else { 676 map->m_pa = blknr_to_addr(m.pblk); 677 err = z_erofs_get_extent_compressedlen(&m, initial_lcn); 678 if (err) 679 goto out; 680 } 681 682 if (m.headtype == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN) 683 map->m_algorithmformat = Z_EROFS_COMPRESSION_SHIFTED; 684 else if (m.headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2) 685 map->m_algorithmformat = vi->z_algorithmtype[1]; 686 else 687 map->m_algorithmformat = vi->z_algorithmtype[0]; 688 689 if ((flags & EROFS_GET_BLOCKS_FIEMAP) || 690 ((flags & EROFS_GET_BLOCKS_READMORE) && 691 map->m_algorithmformat == Z_EROFS_COMPRESSION_LZMA && 692 map->m_llen >= EROFS_BLKSIZ)) { 693 err = z_erofs_get_extent_decompressedlen(&m); 694 if (!err) 695 map->m_flags |= EROFS_MAP_FULL_MAPPED; 696 } 697 unmap_out: 698 erofs_unmap_metabuf(&m.map->buf); 699 700 out: 701 erofs_dbg("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o", 702 __func__, map->m_la, map->m_pa, 703 map->m_llen, map->m_plen, map->m_flags); 704 705 return err; 706 } 707 708 int z_erofs_map_blocks_iter(struct inode *inode, 709 struct erofs_map_blocks *map, 710 int flags) 711 { 712 int err = 0; 713 714 trace_z_erofs_map_blocks_iter_enter(inode, map, flags); 715 716 /* when trying to read beyond EOF, leave it unmapped */ 717 if (map->m_la >= inode->i_size) { 718 map->m_llen = map->m_la + 1 - inode->i_size; 719 map->m_la = inode->i_size; 720 map->m_flags = 0; 721 goto out; 722 } 723 724 err = z_erofs_fill_inode_lazy(inode); 725 if (err) 726 goto out; 727 728 err = z_erofs_do_map_blocks(inode, map, flags); 729 out: 730 trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err); 731 732 /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */ 733 DBG_BUGON(err < 0 && err != -ENOMEM); 734 return err; 735 } 736 737 static int z_erofs_iomap_begin_report(struct inode *inode, loff_t offset, 738 loff_t length, unsigned int flags, 739 struct iomap *iomap, struct iomap *srcmap) 740 { 741 int ret; 742 struct erofs_map_blocks map = { .m_la = offset }; 743 744 ret = z_erofs_map_blocks_iter(inode, &map, EROFS_GET_BLOCKS_FIEMAP); 745 erofs_put_metabuf(&map.buf); 746 if (ret < 0) 747 return ret; 748 749 iomap->bdev = inode->i_sb->s_bdev; 750 iomap->offset = map.m_la; 751 iomap->length = map.m_llen; 752 if (map.m_flags & EROFS_MAP_MAPPED) { 753 iomap->type = IOMAP_MAPPED; 754 iomap->addr = map.m_pa; 755 } else { 756 iomap->type = IOMAP_HOLE; 757 iomap->addr = IOMAP_NULL_ADDR; 758 /* 759 * No strict rule how to describe extents for post EOF, yet 760 * we need do like below. Otherwise, iomap itself will get 761 * into an endless loop on post EOF. 762 */ 763 if (iomap->offset >= inode->i_size) 764 iomap->length = length + map.m_la - offset; 765 } 766 iomap->flags = 0; 767 return 0; 768 } 769 770 const struct iomap_ops z_erofs_iomap_report_ops = { 771 .iomap_begin = z_erofs_iomap_begin_report, 772 }; 773