1 /* 2 * fs/f2fs/inline.c 3 * Copyright (c) 2013, Intel Corporation 4 * Authors: Huajun Li <huajun.li@intel.com> 5 * Haicheng Li <haicheng.li@intel.com> 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 14 #include "f2fs.h" 15 16 bool f2fs_may_inline_data(struct inode *inode) 17 { 18 if (!test_opt(F2FS_I_SB(inode), INLINE_DATA)) 19 return false; 20 21 if (f2fs_is_atomic_file(inode)) 22 return false; 23 24 if (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode)) 25 return false; 26 27 if (i_size_read(inode) > MAX_INLINE_DATA) 28 return false; 29 30 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) 31 return false; 32 33 return true; 34 } 35 36 bool f2fs_may_inline_dentry(struct inode *inode) 37 { 38 if (!test_opt(F2FS_I_SB(inode), INLINE_DENTRY)) 39 return false; 40 41 if (!S_ISDIR(inode->i_mode)) 42 return false; 43 44 return true; 45 } 46 47 void read_inline_data(struct page *page, struct page *ipage) 48 { 49 void *src_addr, *dst_addr; 50 51 if (PageUptodate(page)) 52 return; 53 54 f2fs_bug_on(F2FS_P_SB(page), page->index); 55 56 zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); 57 58 /* Copy the whole inline data block */ 59 src_addr = inline_data_addr(ipage); 60 dst_addr = kmap_atomic(page); 61 memcpy(dst_addr, src_addr, MAX_INLINE_DATA); 62 flush_dcache_page(page); 63 kunmap_atomic(dst_addr); 64 SetPageUptodate(page); 65 } 66 67 bool truncate_inline_inode(struct page *ipage, u64 from) 68 { 69 void *addr; 70 71 if (from >= MAX_INLINE_DATA) 72 return false; 73 74 addr = inline_data_addr(ipage); 75 76 f2fs_wait_on_page_writeback(ipage, NODE); 77 memset(addr + from, 0, MAX_INLINE_DATA - from); 78 79 return true; 80 } 81 82 int f2fs_read_inline_data(struct inode *inode, struct page *page) 83 { 84 struct page *ipage; 85 86 ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino); 87 if (IS_ERR(ipage)) { 88 unlock_page(page); 89 return PTR_ERR(ipage); 90 } 91 92 if (!f2fs_has_inline_data(inode)) { 93 f2fs_put_page(ipage, 1); 94 return -EAGAIN; 95 } 96 97 if (page->index) 98 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 99 else 100 read_inline_data(page, ipage); 101 102 SetPageUptodate(page); 103 f2fs_put_page(ipage, 1); 104 unlock_page(page); 105 return 0; 106 } 107 108 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) 109 { 110 void *src_addr, *dst_addr; 111 struct f2fs_io_info fio = { 112 .sbi = F2FS_I_SB(dn->inode), 113 .type = DATA, 114 .rw = WRITE_SYNC | REQ_PRIO, 115 .page = page, 116 .encrypted_page = NULL, 117 }; 118 int dirty, err; 119 120 f2fs_bug_on(F2FS_I_SB(dn->inode), page->index); 121 122 if (!f2fs_exist_data(dn->inode)) 123 goto clear_out; 124 125 err = f2fs_reserve_block(dn, 0); 126 if (err) 127 return err; 128 129 f2fs_wait_on_page_writeback(page, DATA); 130 131 if (PageUptodate(page)) 132 goto no_update; 133 134 zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); 135 136 /* Copy the whole inline data block */ 137 src_addr = inline_data_addr(dn->inode_page); 138 dst_addr = kmap_atomic(page); 139 memcpy(dst_addr, src_addr, MAX_INLINE_DATA); 140 flush_dcache_page(page); 141 kunmap_atomic(dst_addr); 142 SetPageUptodate(page); 143 no_update: 144 set_page_dirty(page); 145 146 /* clear dirty state */ 147 dirty = clear_page_dirty_for_io(page); 148 149 /* write data page to try to make data consistent */ 150 set_page_writeback(page); 151 fio.blk_addr = dn->data_blkaddr; 152 write_data_page(dn, &fio); 153 set_data_blkaddr(dn); 154 f2fs_update_extent_cache(dn); 155 f2fs_wait_on_page_writeback(page, DATA); 156 if (dirty) 157 inode_dec_dirty_pages(dn->inode); 158 159 /* this converted inline_data should be recovered. */ 160 set_inode_flag(F2FS_I(dn->inode), FI_APPEND_WRITE); 161 162 /* clear inline data and flag after data writeback */ 163 truncate_inline_inode(dn->inode_page, 0); 164 clear_out: 165 stat_dec_inline_inode(dn->inode); 166 f2fs_clear_inline_inode(dn->inode); 167 sync_inode_page(dn); 168 f2fs_put_dnode(dn); 169 return 0; 170 } 171 172 int f2fs_convert_inline_inode(struct inode *inode) 173 { 174 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 175 struct dnode_of_data dn; 176 struct page *ipage, *page; 177 int err = 0; 178 179 page = grab_cache_page(inode->i_mapping, 0); 180 if (!page) 181 return -ENOMEM; 182 183 f2fs_lock_op(sbi); 184 185 ipage = get_node_page(sbi, inode->i_ino); 186 if (IS_ERR(ipage)) { 187 err = PTR_ERR(ipage); 188 goto out; 189 } 190 191 set_new_dnode(&dn, inode, ipage, ipage, 0); 192 193 if (f2fs_has_inline_data(inode)) 194 err = f2fs_convert_inline_page(&dn, page); 195 196 f2fs_put_dnode(&dn); 197 out: 198 f2fs_unlock_op(sbi); 199 200 f2fs_put_page(page, 1); 201 return err; 202 } 203 204 int f2fs_write_inline_data(struct inode *inode, struct page *page) 205 { 206 void *src_addr, *dst_addr; 207 struct dnode_of_data dn; 208 int err; 209 210 set_new_dnode(&dn, inode, NULL, NULL, 0); 211 err = get_dnode_of_data(&dn, 0, LOOKUP_NODE); 212 if (err) 213 return err; 214 215 if (!f2fs_has_inline_data(inode)) { 216 f2fs_put_dnode(&dn); 217 return -EAGAIN; 218 } 219 220 f2fs_bug_on(F2FS_I_SB(inode), page->index); 221 222 f2fs_wait_on_page_writeback(dn.inode_page, NODE); 223 src_addr = kmap_atomic(page); 224 dst_addr = inline_data_addr(dn.inode_page); 225 memcpy(dst_addr, src_addr, MAX_INLINE_DATA); 226 kunmap_atomic(src_addr); 227 228 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); 229 set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); 230 231 sync_inode_page(&dn); 232 f2fs_put_dnode(&dn); 233 return 0; 234 } 235 236 bool recover_inline_data(struct inode *inode, struct page *npage) 237 { 238 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 239 struct f2fs_inode *ri = NULL; 240 void *src_addr, *dst_addr; 241 struct page *ipage; 242 243 /* 244 * The inline_data recovery policy is as follows. 245 * [prev.] [next] of inline_data flag 246 * o o -> recover inline_data 247 * o x -> remove inline_data, and then recover data blocks 248 * x o -> remove inline_data, and then recover inline_data 249 * x x -> recover data blocks 250 */ 251 if (IS_INODE(npage)) 252 ri = F2FS_INODE(npage); 253 254 if (f2fs_has_inline_data(inode) && 255 ri && (ri->i_inline & F2FS_INLINE_DATA)) { 256 process_inline: 257 ipage = get_node_page(sbi, inode->i_ino); 258 f2fs_bug_on(sbi, IS_ERR(ipage)); 259 260 f2fs_wait_on_page_writeback(ipage, NODE); 261 262 src_addr = inline_data_addr(npage); 263 dst_addr = inline_data_addr(ipage); 264 memcpy(dst_addr, src_addr, MAX_INLINE_DATA); 265 266 set_inode_flag(F2FS_I(inode), FI_INLINE_DATA); 267 set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); 268 269 update_inode(inode, ipage); 270 f2fs_put_page(ipage, 1); 271 return true; 272 } 273 274 if (f2fs_has_inline_data(inode)) { 275 ipage = get_node_page(sbi, inode->i_ino); 276 f2fs_bug_on(sbi, IS_ERR(ipage)); 277 truncate_inline_inode(ipage, 0); 278 f2fs_clear_inline_inode(inode); 279 update_inode(inode, ipage); 280 f2fs_put_page(ipage, 1); 281 } else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) { 282 truncate_blocks(inode, 0, false); 283 goto process_inline; 284 } 285 return false; 286 } 287 288 struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir, 289 struct f2fs_filename *fname, struct page **res_page) 290 { 291 struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); 292 struct f2fs_inline_dentry *inline_dentry; 293 struct qstr name = FSTR_TO_QSTR(&fname->disk_name); 294 struct f2fs_dir_entry *de; 295 struct f2fs_dentry_ptr d; 296 struct page *ipage; 297 f2fs_hash_t namehash; 298 299 ipage = get_node_page(sbi, dir->i_ino); 300 if (IS_ERR(ipage)) 301 return NULL; 302 303 namehash = f2fs_dentry_hash(&name); 304 305 inline_dentry = inline_data_addr(ipage); 306 307 make_dentry_ptr(NULL, &d, (void *)inline_dentry, 2); 308 de = find_target_dentry(fname, namehash, NULL, &d); 309 unlock_page(ipage); 310 if (de) 311 *res_page = ipage; 312 else 313 f2fs_put_page(ipage, 0); 314 315 /* 316 * For the most part, it should be a bug when name_len is zero. 317 * We stop here for figuring out where the bugs has occurred. 318 */ 319 f2fs_bug_on(sbi, d.max < 0); 320 return de; 321 } 322 323 struct f2fs_dir_entry *f2fs_parent_inline_dir(struct inode *dir, 324 struct page **p) 325 { 326 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); 327 struct page *ipage; 328 struct f2fs_dir_entry *de; 329 struct f2fs_inline_dentry *dentry_blk; 330 331 ipage = get_node_page(sbi, dir->i_ino); 332 if (IS_ERR(ipage)) 333 return NULL; 334 335 dentry_blk = inline_data_addr(ipage); 336 de = &dentry_blk->dentry[1]; 337 *p = ipage; 338 unlock_page(ipage); 339 return de; 340 } 341 342 int make_empty_inline_dir(struct inode *inode, struct inode *parent, 343 struct page *ipage) 344 { 345 struct f2fs_inline_dentry *dentry_blk; 346 struct f2fs_dentry_ptr d; 347 348 dentry_blk = inline_data_addr(ipage); 349 350 make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2); 351 do_make_empty_dir(inode, parent, &d); 352 353 set_page_dirty(ipage); 354 355 /* update i_size to MAX_INLINE_DATA */ 356 if (i_size_read(inode) < MAX_INLINE_DATA) { 357 i_size_write(inode, MAX_INLINE_DATA); 358 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR); 359 } 360 return 0; 361 } 362 363 /* 364 * NOTE: ipage is grabbed by caller, but if any error occurs, we should 365 * release ipage in this function. 366 */ 367 static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage, 368 struct f2fs_inline_dentry *inline_dentry) 369 { 370 struct page *page; 371 struct dnode_of_data dn; 372 struct f2fs_dentry_block *dentry_blk; 373 int err; 374 375 page = grab_cache_page(dir->i_mapping, 0); 376 if (!page) { 377 f2fs_put_page(ipage, 1); 378 return -ENOMEM; 379 } 380 381 set_new_dnode(&dn, dir, ipage, NULL, 0); 382 err = f2fs_reserve_block(&dn, 0); 383 if (err) 384 goto out; 385 386 f2fs_wait_on_page_writeback(page, DATA); 387 zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); 388 389 dentry_blk = kmap_atomic(page); 390 391 /* copy data from inline dentry block to new dentry block */ 392 memcpy(dentry_blk->dentry_bitmap, inline_dentry->dentry_bitmap, 393 INLINE_DENTRY_BITMAP_SIZE); 394 memset(dentry_blk->dentry_bitmap + INLINE_DENTRY_BITMAP_SIZE, 0, 395 SIZE_OF_DENTRY_BITMAP - INLINE_DENTRY_BITMAP_SIZE); 396 /* 397 * we do not need to zero out remainder part of dentry and filename 398 * field, since we have used bitmap for marking the usage status of 399 * them, besides, we can also ignore copying/zeroing reserved space 400 * of dentry block, because them haven't been used so far. 401 */ 402 memcpy(dentry_blk->dentry, inline_dentry->dentry, 403 sizeof(struct f2fs_dir_entry) * NR_INLINE_DENTRY); 404 memcpy(dentry_blk->filename, inline_dentry->filename, 405 NR_INLINE_DENTRY * F2FS_SLOT_LEN); 406 407 kunmap_atomic(dentry_blk); 408 SetPageUptodate(page); 409 set_page_dirty(page); 410 411 /* clear inline dir and flag after data writeback */ 412 truncate_inline_inode(ipage, 0); 413 414 stat_dec_inline_dir(dir); 415 clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY); 416 417 if (i_size_read(dir) < PAGE_CACHE_SIZE) { 418 i_size_write(dir, PAGE_CACHE_SIZE); 419 set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); 420 } 421 422 sync_inode_page(&dn); 423 out: 424 f2fs_put_page(page, 1); 425 return err; 426 } 427 428 int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name, 429 struct inode *inode, nid_t ino, umode_t mode) 430 { 431 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); 432 struct page *ipage; 433 unsigned int bit_pos; 434 f2fs_hash_t name_hash; 435 size_t namelen = name->len; 436 struct f2fs_inline_dentry *dentry_blk = NULL; 437 struct f2fs_dentry_ptr d; 438 int slots = GET_DENTRY_SLOTS(namelen); 439 struct page *page = NULL; 440 int err = 0; 441 442 ipage = get_node_page(sbi, dir->i_ino); 443 if (IS_ERR(ipage)) 444 return PTR_ERR(ipage); 445 446 dentry_blk = inline_data_addr(ipage); 447 bit_pos = room_for_filename(&dentry_blk->dentry_bitmap, 448 slots, NR_INLINE_DENTRY); 449 if (bit_pos >= NR_INLINE_DENTRY) { 450 err = f2fs_convert_inline_dir(dir, ipage, dentry_blk); 451 if (err) 452 return err; 453 err = -EAGAIN; 454 goto out; 455 } 456 457 if (inode) { 458 down_write(&F2FS_I(inode)->i_sem); 459 page = init_inode_metadata(inode, dir, name, ipage); 460 if (IS_ERR(page)) { 461 err = PTR_ERR(page); 462 goto fail; 463 } 464 } 465 466 f2fs_wait_on_page_writeback(ipage, NODE); 467 468 name_hash = f2fs_dentry_hash(name); 469 make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2); 470 f2fs_update_dentry(ino, mode, &d, name, name_hash, bit_pos); 471 472 set_page_dirty(ipage); 473 474 /* we don't need to mark_inode_dirty now */ 475 if (inode) { 476 F2FS_I(inode)->i_pino = dir->i_ino; 477 update_inode(inode, page); 478 f2fs_put_page(page, 1); 479 } 480 481 update_parent_metadata(dir, inode, 0); 482 fail: 483 if (inode) 484 up_write(&F2FS_I(inode)->i_sem); 485 486 if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR)) { 487 update_inode(dir, ipage); 488 clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); 489 } 490 out: 491 f2fs_put_page(ipage, 1); 492 return err; 493 } 494 495 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page, 496 struct inode *dir, struct inode *inode) 497 { 498 struct f2fs_inline_dentry *inline_dentry; 499 int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len)); 500 unsigned int bit_pos; 501 int i; 502 503 lock_page(page); 504 f2fs_wait_on_page_writeback(page, NODE); 505 506 inline_dentry = inline_data_addr(page); 507 bit_pos = dentry - inline_dentry->dentry; 508 for (i = 0; i < slots; i++) 509 test_and_clear_bit_le(bit_pos + i, 510 &inline_dentry->dentry_bitmap); 511 512 set_page_dirty(page); 513 514 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 515 516 if (inode) 517 f2fs_drop_nlink(dir, inode, page); 518 519 f2fs_put_page(page, 1); 520 } 521 522 bool f2fs_empty_inline_dir(struct inode *dir) 523 { 524 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); 525 struct page *ipage; 526 unsigned int bit_pos = 2; 527 struct f2fs_inline_dentry *dentry_blk; 528 529 ipage = get_node_page(sbi, dir->i_ino); 530 if (IS_ERR(ipage)) 531 return false; 532 533 dentry_blk = inline_data_addr(ipage); 534 bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap, 535 NR_INLINE_DENTRY, 536 bit_pos); 537 538 f2fs_put_page(ipage, 1); 539 540 if (bit_pos < NR_INLINE_DENTRY) 541 return false; 542 543 return true; 544 } 545 546 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx, 547 struct f2fs_str *fstr) 548 { 549 struct inode *inode = file_inode(file); 550 struct f2fs_inline_dentry *inline_dentry = NULL; 551 struct page *ipage = NULL; 552 struct f2fs_dentry_ptr d; 553 554 if (ctx->pos == NR_INLINE_DENTRY) 555 return 0; 556 557 ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino); 558 if (IS_ERR(ipage)) 559 return PTR_ERR(ipage); 560 561 inline_dentry = inline_data_addr(ipage); 562 563 make_dentry_ptr(inode, &d, (void *)inline_dentry, 2); 564 565 if (!f2fs_fill_dentries(ctx, &d, 0, fstr)) 566 ctx->pos = NR_INLINE_DENTRY; 567 568 f2fs_put_page(ipage, 1); 569 return 0; 570 } 571