1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2017-2018 HUAWEI, Inc. 4 * https://www.huawei.com/ 5 * Copyright (C) 2021, Alibaba Cloud 6 */ 7 #include "internal.h" 8 #include <linux/sched/mm.h> 9 #include <trace/events/erofs.h> 10 11 void erofs_unmap_metabuf(struct erofs_buf *buf) 12 { 13 if (buf->kmap_type == EROFS_KMAP) 14 kunmap_local(buf->base); 15 buf->base = NULL; 16 buf->kmap_type = EROFS_NO_KMAP; 17 } 18 19 void erofs_put_metabuf(struct erofs_buf *buf) 20 { 21 if (!buf->page) 22 return; 23 erofs_unmap_metabuf(buf); 24 put_page(buf->page); 25 buf->page = NULL; 26 } 27 28 /* 29 * Derive the block size from inode->i_blkbits to make compatible with 30 * anonymous inode in fscache mode. 31 */ 32 void *erofs_bread(struct erofs_buf *buf, erofs_blk_t blkaddr, 33 enum erofs_kmap_type type) 34 { 35 struct inode *inode = buf->inode; 36 erofs_off_t offset = (erofs_off_t)blkaddr << inode->i_blkbits; 37 pgoff_t index = offset >> PAGE_SHIFT; 38 struct page *page = buf->page; 39 struct folio *folio; 40 unsigned int nofs_flag; 41 42 if (!page || page->index != index) { 43 erofs_put_metabuf(buf); 44 45 nofs_flag = memalloc_nofs_save(); 46 folio = read_cache_folio(inode->i_mapping, index, NULL, NULL); 47 memalloc_nofs_restore(nofs_flag); 48 if (IS_ERR(folio)) 49 return folio; 50 51 /* should already be PageUptodate, no need to lock page */ 52 page = folio_file_page(folio, index); 53 buf->page = page; 54 } 55 if (buf->kmap_type == EROFS_NO_KMAP) { 56 if (type == EROFS_KMAP) 57 buf->base = kmap_local_page(page); 58 buf->kmap_type = type; 59 } else if (buf->kmap_type != type) { 60 DBG_BUGON(1); 61 return ERR_PTR(-EFAULT); 62 } 63 if (type == EROFS_NO_KMAP) 64 return NULL; 65 return buf->base + (offset & ~PAGE_MASK); 66 } 67 68 void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb) 69 { 70 if (erofs_is_fscache_mode(sb)) 71 buf->inode = EROFS_SB(sb)->s_fscache->inode; 72 else 73 buf->inode = sb->s_bdev->bd_inode; 74 } 75 76 void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb, 77 erofs_blk_t blkaddr, enum erofs_kmap_type type) 78 { 79 erofs_init_metabuf(buf, sb); 80 return erofs_bread(buf, blkaddr, type); 81 } 82 83 static int erofs_map_blocks_flatmode(struct inode *inode, 84 struct erofs_map_blocks *map) 85 { 86 erofs_blk_t nblocks, lastblk; 87 u64 offset = map->m_la; 88 struct erofs_inode *vi = EROFS_I(inode); 89 struct super_block *sb = inode->i_sb; 90 bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE); 91 92 nblocks = erofs_iblks(inode); 93 lastblk = nblocks - tailendpacking; 94 95 /* there is no hole in flatmode */ 96 map->m_flags = EROFS_MAP_MAPPED; 97 if (offset < erofs_pos(sb, lastblk)) { 98 map->m_pa = erofs_pos(sb, vi->raw_blkaddr) + map->m_la; 99 map->m_plen = erofs_pos(sb, lastblk) - offset; 100 } else if (tailendpacking) { 101 map->m_pa = erofs_iloc(inode) + vi->inode_isize + 102 vi->xattr_isize + erofs_blkoff(sb, offset); 103 map->m_plen = inode->i_size - offset; 104 105 /* inline data should be located in the same meta block */ 106 if (erofs_blkoff(sb, map->m_pa) + map->m_plen > sb->s_blocksize) { 107 erofs_err(sb, "inline data cross block boundary @ nid %llu", 108 vi->nid); 109 DBG_BUGON(1); 110 return -EFSCORRUPTED; 111 } 112 map->m_flags |= EROFS_MAP_META; 113 } else { 114 erofs_err(sb, "internal error @ nid: %llu (size %llu), m_la 0x%llx", 115 vi->nid, inode->i_size, map->m_la); 116 DBG_BUGON(1); 117 return -EIO; 118 } 119 return 0; 120 } 121 122 int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map) 123 { 124 struct super_block *sb = inode->i_sb; 125 struct erofs_inode *vi = EROFS_I(inode); 126 struct erofs_inode_chunk_index *idx; 127 struct erofs_buf buf = __EROFS_BUF_INITIALIZER; 128 u64 chunknr; 129 unsigned int unit; 130 erofs_off_t pos; 131 void *kaddr; 132 int err = 0; 133 134 trace_erofs_map_blocks_enter(inode, map, 0); 135 map->m_deviceid = 0; 136 if (map->m_la >= inode->i_size) { 137 /* leave out-of-bound access unmapped */ 138 map->m_flags = 0; 139 map->m_plen = 0; 140 goto out; 141 } 142 143 if (vi->datalayout != EROFS_INODE_CHUNK_BASED) { 144 err = erofs_map_blocks_flatmode(inode, map); 145 goto out; 146 } 147 148 if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES) 149 unit = sizeof(*idx); /* chunk index */ 150 else 151 unit = EROFS_BLOCK_MAP_ENTRY_SIZE; /* block map */ 152 153 chunknr = map->m_la >> vi->chunkbits; 154 pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + 155 vi->xattr_isize, unit) + unit * chunknr; 156 157 kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(sb, pos), EROFS_KMAP); 158 if (IS_ERR(kaddr)) { 159 err = PTR_ERR(kaddr); 160 goto out; 161 } 162 map->m_la = chunknr << vi->chunkbits; 163 map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits, 164 round_up(inode->i_size - map->m_la, sb->s_blocksize)); 165 166 /* handle block map */ 167 if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) { 168 __le32 *blkaddr = kaddr + erofs_blkoff(sb, pos); 169 170 if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) { 171 map->m_flags = 0; 172 } else { 173 map->m_pa = erofs_pos(sb, le32_to_cpu(*blkaddr)); 174 map->m_flags = EROFS_MAP_MAPPED; 175 } 176 goto out_unlock; 177 } 178 /* parse chunk indexes */ 179 idx = kaddr + erofs_blkoff(sb, pos); 180 switch (le32_to_cpu(idx->blkaddr)) { 181 case EROFS_NULL_ADDR: 182 map->m_flags = 0; 183 break; 184 default: 185 map->m_deviceid = le16_to_cpu(idx->device_id) & 186 EROFS_SB(sb)->device_id_mask; 187 map->m_pa = erofs_pos(sb, le32_to_cpu(idx->blkaddr)); 188 map->m_flags = EROFS_MAP_MAPPED; 189 break; 190 } 191 out_unlock: 192 erofs_put_metabuf(&buf); 193 out: 194 if (!err) 195 map->m_llen = map->m_plen; 196 trace_erofs_map_blocks_exit(inode, map, 0, err); 197 return err; 198 } 199 200 int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map) 201 { 202 struct erofs_dev_context *devs = EROFS_SB(sb)->devs; 203 struct erofs_device_info *dif; 204 int id; 205 206 map->m_bdev = sb->s_bdev; 207 map->m_daxdev = EROFS_SB(sb)->dax_dev; 208 map->m_dax_part_off = EROFS_SB(sb)->dax_part_off; 209 map->m_fscache = EROFS_SB(sb)->s_fscache; 210 211 if (map->m_deviceid) { 212 down_read(&devs->rwsem); 213 dif = idr_find(&devs->tree, map->m_deviceid - 1); 214 if (!dif) { 215 up_read(&devs->rwsem); 216 return -ENODEV; 217 } 218 if (devs->flatdev) { 219 map->m_pa += erofs_pos(sb, dif->mapped_blkaddr); 220 up_read(&devs->rwsem); 221 return 0; 222 } 223 map->m_bdev = dif->bdev_handle->bdev; 224 map->m_daxdev = dif->dax_dev; 225 map->m_dax_part_off = dif->dax_part_off; 226 map->m_fscache = dif->fscache; 227 up_read(&devs->rwsem); 228 } else if (devs->extra_devices && !devs->flatdev) { 229 down_read(&devs->rwsem); 230 idr_for_each_entry(&devs->tree, dif, id) { 231 erofs_off_t startoff, length; 232 233 if (!dif->mapped_blkaddr) 234 continue; 235 startoff = erofs_pos(sb, dif->mapped_blkaddr); 236 length = erofs_pos(sb, dif->blocks); 237 238 if (map->m_pa >= startoff && 239 map->m_pa < startoff + length) { 240 map->m_pa -= startoff; 241 map->m_bdev = dif->bdev_handle->bdev; 242 map->m_daxdev = dif->dax_dev; 243 map->m_dax_part_off = dif->dax_part_off; 244 map->m_fscache = dif->fscache; 245 break; 246 } 247 } 248 up_read(&devs->rwsem); 249 } 250 return 0; 251 } 252 253 static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, 254 unsigned int flags, struct iomap *iomap, struct iomap *srcmap) 255 { 256 int ret; 257 struct super_block *sb = inode->i_sb; 258 struct erofs_map_blocks map; 259 struct erofs_map_dev mdev; 260 261 map.m_la = offset; 262 map.m_llen = length; 263 264 ret = erofs_map_blocks(inode, &map); 265 if (ret < 0) 266 return ret; 267 268 mdev = (struct erofs_map_dev) { 269 .m_deviceid = map.m_deviceid, 270 .m_pa = map.m_pa, 271 }; 272 ret = erofs_map_dev(sb, &mdev); 273 if (ret) 274 return ret; 275 276 iomap->offset = map.m_la; 277 if (flags & IOMAP_DAX) 278 iomap->dax_dev = mdev.m_daxdev; 279 else 280 iomap->bdev = mdev.m_bdev; 281 iomap->length = map.m_llen; 282 iomap->flags = 0; 283 iomap->private = NULL; 284 285 if (!(map.m_flags & EROFS_MAP_MAPPED)) { 286 iomap->type = IOMAP_HOLE; 287 iomap->addr = IOMAP_NULL_ADDR; 288 if (!iomap->length) 289 iomap->length = length; 290 return 0; 291 } 292 293 if (map.m_flags & EROFS_MAP_META) { 294 void *ptr; 295 struct erofs_buf buf = __EROFS_BUF_INITIALIZER; 296 297 iomap->type = IOMAP_INLINE; 298 ptr = erofs_read_metabuf(&buf, sb, 299 erofs_blknr(sb, mdev.m_pa), EROFS_KMAP); 300 if (IS_ERR(ptr)) 301 return PTR_ERR(ptr); 302 iomap->inline_data = ptr + erofs_blkoff(sb, mdev.m_pa); 303 iomap->private = buf.base; 304 } else { 305 iomap->type = IOMAP_MAPPED; 306 iomap->addr = mdev.m_pa; 307 if (flags & IOMAP_DAX) 308 iomap->addr += mdev.m_dax_part_off; 309 } 310 return 0; 311 } 312 313 static int erofs_iomap_end(struct inode *inode, loff_t pos, loff_t length, 314 ssize_t written, unsigned int flags, struct iomap *iomap) 315 { 316 void *ptr = iomap->private; 317 318 if (ptr) { 319 struct erofs_buf buf = { 320 .page = kmap_to_page(ptr), 321 .base = ptr, 322 .kmap_type = EROFS_KMAP, 323 }; 324 325 DBG_BUGON(iomap->type != IOMAP_INLINE); 326 erofs_put_metabuf(&buf); 327 } else { 328 DBG_BUGON(iomap->type == IOMAP_INLINE); 329 } 330 return written; 331 } 332 333 static const struct iomap_ops erofs_iomap_ops = { 334 .iomap_begin = erofs_iomap_begin, 335 .iomap_end = erofs_iomap_end, 336 }; 337 338 int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 339 u64 start, u64 len) 340 { 341 if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) { 342 #ifdef CONFIG_EROFS_FS_ZIP 343 return iomap_fiemap(inode, fieinfo, start, len, 344 &z_erofs_iomap_report_ops); 345 #else 346 return -EOPNOTSUPP; 347 #endif 348 } 349 return iomap_fiemap(inode, fieinfo, start, len, &erofs_iomap_ops); 350 } 351 352 /* 353 * since we dont have write or truncate flows, so no inode 354 * locking needs to be held at the moment. 355 */ 356 static int erofs_read_folio(struct file *file, struct folio *folio) 357 { 358 return iomap_read_folio(folio, &erofs_iomap_ops); 359 } 360 361 static void erofs_readahead(struct readahead_control *rac) 362 { 363 return iomap_readahead(rac, &erofs_iomap_ops); 364 } 365 366 static sector_t erofs_bmap(struct address_space *mapping, sector_t block) 367 { 368 return iomap_bmap(mapping, block, &erofs_iomap_ops); 369 } 370 371 static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 372 { 373 struct inode *inode = file_inode(iocb->ki_filp); 374 375 /* no need taking (shared) inode lock since it's a ro filesystem */ 376 if (!iov_iter_count(to)) 377 return 0; 378 379 #ifdef CONFIG_FS_DAX 380 if (IS_DAX(inode)) 381 return dax_iomap_rw(iocb, to, &erofs_iomap_ops); 382 #endif 383 if (iocb->ki_flags & IOCB_DIRECT) { 384 struct block_device *bdev = inode->i_sb->s_bdev; 385 unsigned int blksize_mask; 386 387 if (bdev) 388 blksize_mask = bdev_logical_block_size(bdev) - 1; 389 else 390 blksize_mask = i_blocksize(inode) - 1; 391 392 if ((iocb->ki_pos | iov_iter_count(to) | 393 iov_iter_alignment(to)) & blksize_mask) 394 return -EINVAL; 395 396 return iomap_dio_rw(iocb, to, &erofs_iomap_ops, 397 NULL, 0, NULL, 0); 398 } 399 return filemap_read(iocb, to, 0); 400 } 401 402 /* for uncompressed (aligned) files and raw access for other files */ 403 const struct address_space_operations erofs_raw_access_aops = { 404 .read_folio = erofs_read_folio, 405 .readahead = erofs_readahead, 406 .bmap = erofs_bmap, 407 .direct_IO = noop_direct_IO, 408 .release_folio = iomap_release_folio, 409 .invalidate_folio = iomap_invalidate_folio, 410 }; 411 412 #ifdef CONFIG_FS_DAX 413 static vm_fault_t erofs_dax_huge_fault(struct vm_fault *vmf, 414 unsigned int order) 415 { 416 return dax_iomap_fault(vmf, order, NULL, NULL, &erofs_iomap_ops); 417 } 418 419 static vm_fault_t erofs_dax_fault(struct vm_fault *vmf) 420 { 421 return erofs_dax_huge_fault(vmf, 0); 422 } 423 424 static const struct vm_operations_struct erofs_dax_vm_ops = { 425 .fault = erofs_dax_fault, 426 .huge_fault = erofs_dax_huge_fault, 427 }; 428 429 static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma) 430 { 431 if (!IS_DAX(file_inode(file))) 432 return generic_file_readonly_mmap(file, vma); 433 434 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) 435 return -EINVAL; 436 437 vma->vm_ops = &erofs_dax_vm_ops; 438 vm_flags_set(vma, VM_HUGEPAGE); 439 return 0; 440 } 441 #else 442 #define erofs_file_mmap generic_file_readonly_mmap 443 #endif 444 445 const struct file_operations erofs_file_fops = { 446 .llseek = generic_file_llseek, 447 .read_iter = erofs_file_read_iter, 448 .mmap = erofs_file_mmap, 449 .splice_read = filemap_splice_read, 450 }; 451