1 /* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_types.h" 21 #include "xfs_bit.h" 22 #include "xfs_log.h" 23 #include "xfs_inum.h" 24 #include "xfs_imap.h" 25 #include "xfs_trans.h" 26 #include "xfs_trans_priv.h" 27 #include "xfs_sb.h" 28 #include "xfs_ag.h" 29 #include "xfs_dir2.h" 30 #include "xfs_dmapi.h" 31 #include "xfs_mount.h" 32 #include "xfs_bmap_btree.h" 33 #include "xfs_alloc_btree.h" 34 #include "xfs_ialloc_btree.h" 35 #include "xfs_dir2_sf.h" 36 #include "xfs_attr_sf.h" 37 #include "xfs_dinode.h" 38 #include "xfs_inode.h" 39 #include "xfs_buf_item.h" 40 #include "xfs_inode_item.h" 41 #include "xfs_btree.h" 42 #include "xfs_alloc.h" 43 #include "xfs_ialloc.h" 44 #include "xfs_bmap.h" 45 #include "xfs_rw.h" 46 #include "xfs_error.h" 47 #include "xfs_utils.h" 48 #include "xfs_dir2_trace.h" 49 #include "xfs_quota.h" 50 #include "xfs_mac.h" 51 #include "xfs_acl.h" 52 53 54 kmem_zone_t *xfs_ifork_zone; 55 kmem_zone_t *xfs_inode_zone; 56 kmem_zone_t *xfs_chashlist_zone; 57 58 /* 59 * Used in xfs_itruncate(). This is the maximum number of extents 60 * freed from a file in a single transaction. 61 */ 62 #define XFS_ITRUNC_MAX_EXTENTS 2 63 64 STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *); 65 STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int); 66 STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int); 67 STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int); 68 69 70 #ifdef DEBUG 71 /* 72 * Make sure that the extents in the given memory buffer 73 * are valid. 74 */ 75 STATIC void 76 xfs_validate_extents( 77 xfs_ifork_t *ifp, 78 int nrecs, 79 int disk, 80 xfs_exntfmt_t fmt) 81 { 82 xfs_bmbt_rec_t *ep; 83 xfs_bmbt_irec_t irec; 84 xfs_bmbt_rec_t rec; 85 int i; 86 87 for (i = 0; i < nrecs; i++) { 88 ep = xfs_iext_get_ext(ifp, i); 89 rec.l0 = get_unaligned((__uint64_t*)&ep->l0); 90 rec.l1 = get_unaligned((__uint64_t*)&ep->l1); 91 if (disk) 92 xfs_bmbt_disk_get_all(&rec, &irec); 93 else 94 xfs_bmbt_get_all(&rec, &irec); 95 if (fmt == XFS_EXTFMT_NOSTATE) 96 ASSERT(irec.br_state == XFS_EXT_NORM); 97 } 98 } 99 #else /* DEBUG */ 100 #define xfs_validate_extents(ifp, nrecs, disk, fmt) 101 #endif /* DEBUG */ 102 103 /* 104 * Check that none of the inode's in the buffer have a next 105 * unlinked field of 0. 106 */ 107 #if defined(DEBUG) 108 void 109 xfs_inobp_check( 110 xfs_mount_t *mp, 111 xfs_buf_t *bp) 112 { 113 int i; 114 int j; 115 xfs_dinode_t *dip; 116 117 j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog; 118 119 for (i = 0; i < j; i++) { 120 dip = (xfs_dinode_t *)xfs_buf_offset(bp, 121 i * mp->m_sb.sb_inodesize); 122 if (!dip->di_next_unlinked) { 123 xfs_fs_cmn_err(CE_ALERT, mp, 124 "Detected a bogus zero next_unlinked field in incore inode buffer 0x%p. About to pop an ASSERT.", 125 bp); 126 ASSERT(dip->di_next_unlinked); 127 } 128 } 129 } 130 #endif 131 132 /* 133 * This routine is called to map an inode number within a file 134 * system to the buffer containing the on-disk version of the 135 * inode. It returns a pointer to the buffer containing the 136 * on-disk inode in the bpp parameter, and in the dip parameter 137 * it returns a pointer to the on-disk inode within that buffer. 138 * 139 * If a non-zero error is returned, then the contents of bpp and 140 * dipp are undefined. 141 * 142 * Use xfs_imap() to determine the size and location of the 143 * buffer to read from disk. 144 */ 145 STATIC int 146 xfs_inotobp( 147 xfs_mount_t *mp, 148 xfs_trans_t *tp, 149 xfs_ino_t ino, 150 xfs_dinode_t **dipp, 151 xfs_buf_t **bpp, 152 int *offset) 153 { 154 int di_ok; 155 xfs_imap_t imap; 156 xfs_buf_t *bp; 157 int error; 158 xfs_dinode_t *dip; 159 160 /* 161 * Call the space management code to find the location of the 162 * inode on disk. 163 */ 164 imap.im_blkno = 0; 165 error = xfs_imap(mp, tp, ino, &imap, XFS_IMAP_LOOKUP); 166 if (error != 0) { 167 cmn_err(CE_WARN, 168 "xfs_inotobp: xfs_imap() returned an " 169 "error %d on %s. Returning error.", error, mp->m_fsname); 170 return error; 171 } 172 173 /* 174 * If the inode number maps to a block outside the bounds of the 175 * file system then return NULL rather than calling read_buf 176 * and panicing when we get an error from the driver. 177 */ 178 if ((imap.im_blkno + imap.im_len) > 179 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) { 180 cmn_err(CE_WARN, 181 "xfs_inotobp: inode number (%llu + %d) maps to a block outside the bounds " 182 "of the file system %s. Returning EINVAL.", 183 (unsigned long long)imap.im_blkno, 184 imap.im_len, mp->m_fsname); 185 return XFS_ERROR(EINVAL); 186 } 187 188 /* 189 * Read in the buffer. If tp is NULL, xfs_trans_read_buf() will 190 * default to just a read_buf() call. 191 */ 192 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap.im_blkno, 193 (int)imap.im_len, XFS_BUF_LOCK, &bp); 194 195 if (error) { 196 cmn_err(CE_WARN, 197 "xfs_inotobp: xfs_trans_read_buf() returned an " 198 "error %d on %s. Returning error.", error, mp->m_fsname); 199 return error; 200 } 201 dip = (xfs_dinode_t *)xfs_buf_offset(bp, 0); 202 di_ok = 203 INT_GET(dip->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC && 204 XFS_DINODE_GOOD_VERSION(INT_GET(dip->di_core.di_version, ARCH_CONVERT)); 205 if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP, 206 XFS_RANDOM_ITOBP_INOTOBP))) { 207 XFS_CORRUPTION_ERROR("xfs_inotobp", XFS_ERRLEVEL_LOW, mp, dip); 208 xfs_trans_brelse(tp, bp); 209 cmn_err(CE_WARN, 210 "xfs_inotobp: XFS_TEST_ERROR() returned an " 211 "error on %s. Returning EFSCORRUPTED.", mp->m_fsname); 212 return XFS_ERROR(EFSCORRUPTED); 213 } 214 215 xfs_inobp_check(mp, bp); 216 217 /* 218 * Set *dipp to point to the on-disk inode in the buffer. 219 */ 220 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset); 221 *bpp = bp; 222 *offset = imap.im_boffset; 223 return 0; 224 } 225 226 227 /* 228 * This routine is called to map an inode to the buffer containing 229 * the on-disk version of the inode. It returns a pointer to the 230 * buffer containing the on-disk inode in the bpp parameter, and in 231 * the dip parameter it returns a pointer to the on-disk inode within 232 * that buffer. 233 * 234 * If a non-zero error is returned, then the contents of bpp and 235 * dipp are undefined. 236 * 237 * If the inode is new and has not yet been initialized, use xfs_imap() 238 * to determine the size and location of the buffer to read from disk. 239 * If the inode has already been mapped to its buffer and read in once, 240 * then use the mapping information stored in the inode rather than 241 * calling xfs_imap(). This allows us to avoid the overhead of looking 242 * at the inode btree for small block file systems (see xfs_dilocate()). 243 * We can tell whether the inode has been mapped in before by comparing 244 * its disk block address to 0. Only uninitialized inodes will have 245 * 0 for the disk block address. 246 */ 247 int 248 xfs_itobp( 249 xfs_mount_t *mp, 250 xfs_trans_t *tp, 251 xfs_inode_t *ip, 252 xfs_dinode_t **dipp, 253 xfs_buf_t **bpp, 254 xfs_daddr_t bno, 255 uint imap_flags) 256 { 257 xfs_imap_t imap; 258 xfs_buf_t *bp; 259 int error; 260 int i; 261 int ni; 262 263 if (ip->i_blkno == (xfs_daddr_t)0) { 264 /* 265 * Call the space management code to find the location of the 266 * inode on disk. 267 */ 268 imap.im_blkno = bno; 269 if ((error = xfs_imap(mp, tp, ip->i_ino, &imap, 270 XFS_IMAP_LOOKUP | imap_flags))) 271 return error; 272 273 /* 274 * If the inode number maps to a block outside the bounds 275 * of the file system then return NULL rather than calling 276 * read_buf and panicing when we get an error from the 277 * driver. 278 */ 279 if ((imap.im_blkno + imap.im_len) > 280 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) { 281 #ifdef DEBUG 282 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_itobp: " 283 "(imap.im_blkno (0x%llx) " 284 "+ imap.im_len (0x%llx)) > " 285 " XFS_FSB_TO_BB(mp, " 286 "mp->m_sb.sb_dblocks) (0x%llx)", 287 (unsigned long long) imap.im_blkno, 288 (unsigned long long) imap.im_len, 289 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)); 290 #endif /* DEBUG */ 291 return XFS_ERROR(EINVAL); 292 } 293 294 /* 295 * Fill in the fields in the inode that will be used to 296 * map the inode to its buffer from now on. 297 */ 298 ip->i_blkno = imap.im_blkno; 299 ip->i_len = imap.im_len; 300 ip->i_boffset = imap.im_boffset; 301 } else { 302 /* 303 * We've already mapped the inode once, so just use the 304 * mapping that we saved the first time. 305 */ 306 imap.im_blkno = ip->i_blkno; 307 imap.im_len = ip->i_len; 308 imap.im_boffset = ip->i_boffset; 309 } 310 ASSERT(bno == 0 || bno == imap.im_blkno); 311 312 /* 313 * Read in the buffer. If tp is NULL, xfs_trans_read_buf() will 314 * default to just a read_buf() call. 315 */ 316 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap.im_blkno, 317 (int)imap.im_len, XFS_BUF_LOCK, &bp); 318 if (error) { 319 #ifdef DEBUG 320 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_itobp: " 321 "xfs_trans_read_buf() returned error %d, " 322 "imap.im_blkno 0x%llx, imap.im_len 0x%llx", 323 error, (unsigned long long) imap.im_blkno, 324 (unsigned long long) imap.im_len); 325 #endif /* DEBUG */ 326 return error; 327 } 328 329 /* 330 * Validate the magic number and version of every inode in the buffer 331 * (if DEBUG kernel) or the first inode in the buffer, otherwise. 332 * No validation is done here in userspace (xfs_repair). 333 */ 334 #if !defined(__KERNEL__) 335 ni = 0; 336 #elif defined(DEBUG) 337 ni = (imap_flags & XFS_IMAP_BULKSTAT) ? 0 : 338 (BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog); 339 #else /* usual case */ 340 ni = (imap_flags & XFS_IMAP_BULKSTAT) ? 0 : 1; 341 #endif 342 343 for (i = 0; i < ni; i++) { 344 int di_ok; 345 xfs_dinode_t *dip; 346 347 dip = (xfs_dinode_t *)xfs_buf_offset(bp, 348 (i << mp->m_sb.sb_inodelog)); 349 di_ok = INT_GET(dip->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC && 350 XFS_DINODE_GOOD_VERSION(INT_GET(dip->di_core.di_version, ARCH_CONVERT)); 351 if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP, 352 XFS_RANDOM_ITOBP_INOTOBP))) { 353 #ifdef DEBUG 354 if (!(imap_flags & XFS_IMAP_BULKSTAT)) 355 cmn_err(CE_ALERT, 356 "Device %s - bad inode magic/vsn " 357 "daddr %lld #%d (magic=%x)", 358 XFS_BUFTARG_NAME(mp->m_ddev_targp), 359 (unsigned long long)imap.im_blkno, i, 360 INT_GET(dip->di_core.di_magic, ARCH_CONVERT)); 361 #endif 362 XFS_CORRUPTION_ERROR("xfs_itobp", XFS_ERRLEVEL_HIGH, 363 mp, dip); 364 xfs_trans_brelse(tp, bp); 365 return XFS_ERROR(EFSCORRUPTED); 366 } 367 } 368 369 xfs_inobp_check(mp, bp); 370 371 /* 372 * Mark the buffer as an inode buffer now that it looks good 373 */ 374 XFS_BUF_SET_VTYPE(bp, B_FS_INO); 375 376 /* 377 * Set *dipp to point to the on-disk inode in the buffer. 378 */ 379 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset); 380 *bpp = bp; 381 return 0; 382 } 383 384 /* 385 * Move inode type and inode format specific information from the 386 * on-disk inode to the in-core inode. For fifos, devs, and sockets 387 * this means set if_rdev to the proper value. For files, directories, 388 * and symlinks this means to bring in the in-line data or extent 389 * pointers. For a file in B-tree format, only the root is immediately 390 * brought in-core. The rest will be in-lined in if_extents when it 391 * is first referenced (see xfs_iread_extents()). 392 */ 393 STATIC int 394 xfs_iformat( 395 xfs_inode_t *ip, 396 xfs_dinode_t *dip) 397 { 398 xfs_attr_shortform_t *atp; 399 int size; 400 int error; 401 xfs_fsize_t di_size; 402 ip->i_df.if_ext_max = 403 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); 404 error = 0; 405 406 if (unlikely( 407 INT_GET(dip->di_core.di_nextents, ARCH_CONVERT) + 408 INT_GET(dip->di_core.di_anextents, ARCH_CONVERT) > 409 INT_GET(dip->di_core.di_nblocks, ARCH_CONVERT))) { 410 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 411 "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.", 412 (unsigned long long)ip->i_ino, 413 (int)(INT_GET(dip->di_core.di_nextents, ARCH_CONVERT) 414 + INT_GET(dip->di_core.di_anextents, ARCH_CONVERT)), 415 (unsigned long long) 416 INT_GET(dip->di_core.di_nblocks, ARCH_CONVERT)); 417 XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW, 418 ip->i_mount, dip); 419 return XFS_ERROR(EFSCORRUPTED); 420 } 421 422 if (unlikely(INT_GET(dip->di_core.di_forkoff, ARCH_CONVERT) > ip->i_mount->m_sb.sb_inodesize)) { 423 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 424 "corrupt dinode %Lu, forkoff = 0x%x.", 425 (unsigned long long)ip->i_ino, 426 (int)(INT_GET(dip->di_core.di_forkoff, ARCH_CONVERT))); 427 XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW, 428 ip->i_mount, dip); 429 return XFS_ERROR(EFSCORRUPTED); 430 } 431 432 switch (ip->i_d.di_mode & S_IFMT) { 433 case S_IFIFO: 434 case S_IFCHR: 435 case S_IFBLK: 436 case S_IFSOCK: 437 if (unlikely(INT_GET(dip->di_core.di_format, ARCH_CONVERT) != XFS_DINODE_FMT_DEV)) { 438 XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW, 439 ip->i_mount, dip); 440 return XFS_ERROR(EFSCORRUPTED); 441 } 442 ip->i_d.di_size = 0; 443 ip->i_df.if_u2.if_rdev = INT_GET(dip->di_u.di_dev, ARCH_CONVERT); 444 break; 445 446 case S_IFREG: 447 case S_IFLNK: 448 case S_IFDIR: 449 switch (INT_GET(dip->di_core.di_format, ARCH_CONVERT)) { 450 case XFS_DINODE_FMT_LOCAL: 451 /* 452 * no local regular files yet 453 */ 454 if (unlikely((INT_GET(dip->di_core.di_mode, ARCH_CONVERT) & S_IFMT) == S_IFREG)) { 455 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 456 "corrupt inode %Lu " 457 "(local format for regular file).", 458 (unsigned long long) ip->i_ino); 459 XFS_CORRUPTION_ERROR("xfs_iformat(4)", 460 XFS_ERRLEVEL_LOW, 461 ip->i_mount, dip); 462 return XFS_ERROR(EFSCORRUPTED); 463 } 464 465 di_size = INT_GET(dip->di_core.di_size, ARCH_CONVERT); 466 if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) { 467 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 468 "corrupt inode %Lu " 469 "(bad size %Ld for local inode).", 470 (unsigned long long) ip->i_ino, 471 (long long) di_size); 472 XFS_CORRUPTION_ERROR("xfs_iformat(5)", 473 XFS_ERRLEVEL_LOW, 474 ip->i_mount, dip); 475 return XFS_ERROR(EFSCORRUPTED); 476 } 477 478 size = (int)di_size; 479 error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size); 480 break; 481 case XFS_DINODE_FMT_EXTENTS: 482 error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK); 483 break; 484 case XFS_DINODE_FMT_BTREE: 485 error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK); 486 break; 487 default: 488 XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW, 489 ip->i_mount); 490 return XFS_ERROR(EFSCORRUPTED); 491 } 492 break; 493 494 default: 495 XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount); 496 return XFS_ERROR(EFSCORRUPTED); 497 } 498 if (error) { 499 return error; 500 } 501 if (!XFS_DFORK_Q(dip)) 502 return 0; 503 ASSERT(ip->i_afp == NULL); 504 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP); 505 ip->i_afp->if_ext_max = 506 XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); 507 switch (INT_GET(dip->di_core.di_aformat, ARCH_CONVERT)) { 508 case XFS_DINODE_FMT_LOCAL: 509 atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip); 510 size = be16_to_cpu(atp->hdr.totsize); 511 error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size); 512 break; 513 case XFS_DINODE_FMT_EXTENTS: 514 error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK); 515 break; 516 case XFS_DINODE_FMT_BTREE: 517 error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK); 518 break; 519 default: 520 error = XFS_ERROR(EFSCORRUPTED); 521 break; 522 } 523 if (error) { 524 kmem_zone_free(xfs_ifork_zone, ip->i_afp); 525 ip->i_afp = NULL; 526 xfs_idestroy_fork(ip, XFS_DATA_FORK); 527 } 528 return error; 529 } 530 531 /* 532 * The file is in-lined in the on-disk inode. 533 * If it fits into if_inline_data, then copy 534 * it there, otherwise allocate a buffer for it 535 * and copy the data there. Either way, set 536 * if_data to point at the data. 537 * If we allocate a buffer for the data, make 538 * sure that its size is a multiple of 4 and 539 * record the real size in i_real_bytes. 540 */ 541 STATIC int 542 xfs_iformat_local( 543 xfs_inode_t *ip, 544 xfs_dinode_t *dip, 545 int whichfork, 546 int size) 547 { 548 xfs_ifork_t *ifp; 549 int real_size; 550 551 /* 552 * If the size is unreasonable, then something 553 * is wrong and we just bail out rather than crash in 554 * kmem_alloc() or memcpy() below. 555 */ 556 if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) { 557 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 558 "corrupt inode %Lu " 559 "(bad size %d for local fork, size = %d).", 560 (unsigned long long) ip->i_ino, size, 561 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)); 562 XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW, 563 ip->i_mount, dip); 564 return XFS_ERROR(EFSCORRUPTED); 565 } 566 ifp = XFS_IFORK_PTR(ip, whichfork); 567 real_size = 0; 568 if (size == 0) 569 ifp->if_u1.if_data = NULL; 570 else if (size <= sizeof(ifp->if_u2.if_inline_data)) 571 ifp->if_u1.if_data = ifp->if_u2.if_inline_data; 572 else { 573 real_size = roundup(size, 4); 574 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP); 575 } 576 ifp->if_bytes = size; 577 ifp->if_real_bytes = real_size; 578 if (size) 579 memcpy(ifp->if_u1.if_data, XFS_DFORK_PTR(dip, whichfork), size); 580 ifp->if_flags &= ~XFS_IFEXTENTS; 581 ifp->if_flags |= XFS_IFINLINE; 582 return 0; 583 } 584 585 /* 586 * The file consists of a set of extents all 587 * of which fit into the on-disk inode. 588 * If there are few enough extents to fit into 589 * the if_inline_ext, then copy them there. 590 * Otherwise allocate a buffer for them and copy 591 * them into it. Either way, set if_extents 592 * to point at the extents. 593 */ 594 STATIC int 595 xfs_iformat_extents( 596 xfs_inode_t *ip, 597 xfs_dinode_t *dip, 598 int whichfork) 599 { 600 xfs_bmbt_rec_t *ep, *dp; 601 xfs_ifork_t *ifp; 602 int nex; 603 int size; 604 int i; 605 606 ifp = XFS_IFORK_PTR(ip, whichfork); 607 nex = XFS_DFORK_NEXTENTS(dip, whichfork); 608 size = nex * (uint)sizeof(xfs_bmbt_rec_t); 609 610 /* 611 * If the number of extents is unreasonable, then something 612 * is wrong and we just bail out rather than crash in 613 * kmem_alloc() or memcpy() below. 614 */ 615 if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) { 616 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 617 "corrupt inode %Lu ((a)extents = %d).", 618 (unsigned long long) ip->i_ino, nex); 619 XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW, 620 ip->i_mount, dip); 621 return XFS_ERROR(EFSCORRUPTED); 622 } 623 624 ifp->if_real_bytes = 0; 625 if (nex == 0) 626 ifp->if_u1.if_extents = NULL; 627 else if (nex <= XFS_INLINE_EXTS) 628 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; 629 else 630 xfs_iext_add(ifp, 0, nex); 631 632 ifp->if_bytes = size; 633 if (size) { 634 dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork); 635 xfs_validate_extents(ifp, nex, 1, XFS_EXTFMT_INODE(ip)); 636 for (i = 0; i < nex; i++, dp++) { 637 ep = xfs_iext_get_ext(ifp, i); 638 ep->l0 = INT_GET(get_unaligned((__uint64_t*)&dp->l0), 639 ARCH_CONVERT); 640 ep->l1 = INT_GET(get_unaligned((__uint64_t*)&dp->l1), 641 ARCH_CONVERT); 642 } 643 xfs_bmap_trace_exlist("xfs_iformat_extents", ip, nex, 644 whichfork); 645 if (whichfork != XFS_DATA_FORK || 646 XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE) 647 if (unlikely(xfs_check_nostate_extents( 648 ifp, 0, nex))) { 649 XFS_ERROR_REPORT("xfs_iformat_extents(2)", 650 XFS_ERRLEVEL_LOW, 651 ip->i_mount); 652 return XFS_ERROR(EFSCORRUPTED); 653 } 654 } 655 ifp->if_flags |= XFS_IFEXTENTS; 656 return 0; 657 } 658 659 /* 660 * The file has too many extents to fit into 661 * the inode, so they are in B-tree format. 662 * Allocate a buffer for the root of the B-tree 663 * and copy the root into it. The i_extents 664 * field will remain NULL until all of the 665 * extents are read in (when they are needed). 666 */ 667 STATIC int 668 xfs_iformat_btree( 669 xfs_inode_t *ip, 670 xfs_dinode_t *dip, 671 int whichfork) 672 { 673 xfs_bmdr_block_t *dfp; 674 xfs_ifork_t *ifp; 675 /* REFERENCED */ 676 int nrecs; 677 int size; 678 679 ifp = XFS_IFORK_PTR(ip, whichfork); 680 dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork); 681 size = XFS_BMAP_BROOT_SPACE(dfp); 682 nrecs = XFS_BMAP_BROOT_NUMRECS(dfp); 683 684 /* 685 * blow out if -- fork has less extents than can fit in 686 * fork (fork shouldn't be a btree format), root btree 687 * block has more records than can fit into the fork, 688 * or the number of extents is greater than the number of 689 * blocks. 690 */ 691 if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max 692 || XFS_BMDR_SPACE_CALC(nrecs) > 693 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork) 694 || XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) { 695 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 696 "corrupt inode %Lu (btree).", 697 (unsigned long long) ip->i_ino); 698 XFS_ERROR_REPORT("xfs_iformat_btree", XFS_ERRLEVEL_LOW, 699 ip->i_mount); 700 return XFS_ERROR(EFSCORRUPTED); 701 } 702 703 ifp->if_broot_bytes = size; 704 ifp->if_broot = kmem_alloc(size, KM_SLEEP); 705 ASSERT(ifp->if_broot != NULL); 706 /* 707 * Copy and convert from the on-disk structure 708 * to the in-memory structure. 709 */ 710 xfs_bmdr_to_bmbt(dfp, XFS_DFORK_SIZE(dip, ip->i_mount, whichfork), 711 ifp->if_broot, size); 712 ifp->if_flags &= ~XFS_IFEXTENTS; 713 ifp->if_flags |= XFS_IFBROOT; 714 715 return 0; 716 } 717 718 /* 719 * xfs_xlate_dinode_core - translate an xfs_inode_core_t between ondisk 720 * and native format 721 * 722 * buf = on-disk representation 723 * dip = native representation 724 * dir = direction - +ve -> disk to native 725 * -ve -> native to disk 726 */ 727 void 728 xfs_xlate_dinode_core( 729 xfs_caddr_t buf, 730 xfs_dinode_core_t *dip, 731 int dir) 732 { 733 xfs_dinode_core_t *buf_core = (xfs_dinode_core_t *)buf; 734 xfs_dinode_core_t *mem_core = (xfs_dinode_core_t *)dip; 735 xfs_arch_t arch = ARCH_CONVERT; 736 737 ASSERT(dir); 738 739 INT_XLATE(buf_core->di_magic, mem_core->di_magic, dir, arch); 740 INT_XLATE(buf_core->di_mode, mem_core->di_mode, dir, arch); 741 INT_XLATE(buf_core->di_version, mem_core->di_version, dir, arch); 742 INT_XLATE(buf_core->di_format, mem_core->di_format, dir, arch); 743 INT_XLATE(buf_core->di_onlink, mem_core->di_onlink, dir, arch); 744 INT_XLATE(buf_core->di_uid, mem_core->di_uid, dir, arch); 745 INT_XLATE(buf_core->di_gid, mem_core->di_gid, dir, arch); 746 INT_XLATE(buf_core->di_nlink, mem_core->di_nlink, dir, arch); 747 INT_XLATE(buf_core->di_projid, mem_core->di_projid, dir, arch); 748 749 if (dir > 0) { 750 memcpy(mem_core->di_pad, buf_core->di_pad, 751 sizeof(buf_core->di_pad)); 752 } else { 753 memcpy(buf_core->di_pad, mem_core->di_pad, 754 sizeof(buf_core->di_pad)); 755 } 756 757 INT_XLATE(buf_core->di_flushiter, mem_core->di_flushiter, dir, arch); 758 759 INT_XLATE(buf_core->di_atime.t_sec, mem_core->di_atime.t_sec, 760 dir, arch); 761 INT_XLATE(buf_core->di_atime.t_nsec, mem_core->di_atime.t_nsec, 762 dir, arch); 763 INT_XLATE(buf_core->di_mtime.t_sec, mem_core->di_mtime.t_sec, 764 dir, arch); 765 INT_XLATE(buf_core->di_mtime.t_nsec, mem_core->di_mtime.t_nsec, 766 dir, arch); 767 INT_XLATE(buf_core->di_ctime.t_sec, mem_core->di_ctime.t_sec, 768 dir, arch); 769 INT_XLATE(buf_core->di_ctime.t_nsec, mem_core->di_ctime.t_nsec, 770 dir, arch); 771 INT_XLATE(buf_core->di_size, mem_core->di_size, dir, arch); 772 INT_XLATE(buf_core->di_nblocks, mem_core->di_nblocks, dir, arch); 773 INT_XLATE(buf_core->di_extsize, mem_core->di_extsize, dir, arch); 774 INT_XLATE(buf_core->di_nextents, mem_core->di_nextents, dir, arch); 775 INT_XLATE(buf_core->di_anextents, mem_core->di_anextents, dir, arch); 776 INT_XLATE(buf_core->di_forkoff, mem_core->di_forkoff, dir, arch); 777 INT_XLATE(buf_core->di_aformat, mem_core->di_aformat, dir, arch); 778 INT_XLATE(buf_core->di_dmevmask, mem_core->di_dmevmask, dir, arch); 779 INT_XLATE(buf_core->di_dmstate, mem_core->di_dmstate, dir, arch); 780 INT_XLATE(buf_core->di_flags, mem_core->di_flags, dir, arch); 781 INT_XLATE(buf_core->di_gen, mem_core->di_gen, dir, arch); 782 } 783 784 STATIC uint 785 _xfs_dic2xflags( 786 __uint16_t di_flags) 787 { 788 uint flags = 0; 789 790 if (di_flags & XFS_DIFLAG_ANY) { 791 if (di_flags & XFS_DIFLAG_REALTIME) 792 flags |= XFS_XFLAG_REALTIME; 793 if (di_flags & XFS_DIFLAG_PREALLOC) 794 flags |= XFS_XFLAG_PREALLOC; 795 if (di_flags & XFS_DIFLAG_IMMUTABLE) 796 flags |= XFS_XFLAG_IMMUTABLE; 797 if (di_flags & XFS_DIFLAG_APPEND) 798 flags |= XFS_XFLAG_APPEND; 799 if (di_flags & XFS_DIFLAG_SYNC) 800 flags |= XFS_XFLAG_SYNC; 801 if (di_flags & XFS_DIFLAG_NOATIME) 802 flags |= XFS_XFLAG_NOATIME; 803 if (di_flags & XFS_DIFLAG_NODUMP) 804 flags |= XFS_XFLAG_NODUMP; 805 if (di_flags & XFS_DIFLAG_RTINHERIT) 806 flags |= XFS_XFLAG_RTINHERIT; 807 if (di_flags & XFS_DIFLAG_PROJINHERIT) 808 flags |= XFS_XFLAG_PROJINHERIT; 809 if (di_flags & XFS_DIFLAG_NOSYMLINKS) 810 flags |= XFS_XFLAG_NOSYMLINKS; 811 if (di_flags & XFS_DIFLAG_EXTSIZE) 812 flags |= XFS_XFLAG_EXTSIZE; 813 if (di_flags & XFS_DIFLAG_EXTSZINHERIT) 814 flags |= XFS_XFLAG_EXTSZINHERIT; 815 if (di_flags & XFS_DIFLAG_NODEFRAG) 816 flags |= XFS_XFLAG_NODEFRAG; 817 } 818 819 return flags; 820 } 821 822 uint 823 xfs_ip2xflags( 824 xfs_inode_t *ip) 825 { 826 xfs_dinode_core_t *dic = &ip->i_d; 827 828 return _xfs_dic2xflags(dic->di_flags) | 829 (XFS_CFORK_Q(dic) ? XFS_XFLAG_HASATTR : 0); 830 } 831 832 uint 833 xfs_dic2xflags( 834 xfs_dinode_core_t *dic) 835 { 836 return _xfs_dic2xflags(INT_GET(dic->di_flags, ARCH_CONVERT)) | 837 (XFS_CFORK_Q_DISK(dic) ? XFS_XFLAG_HASATTR : 0); 838 } 839 840 /* 841 * Given a mount structure and an inode number, return a pointer 842 * to a newly allocated in-core inode corresponding to the given 843 * inode number. 844 * 845 * Initialize the inode's attributes and extent pointers if it 846 * already has them (it will not if the inode has no links). 847 */ 848 int 849 xfs_iread( 850 xfs_mount_t *mp, 851 xfs_trans_t *tp, 852 xfs_ino_t ino, 853 xfs_inode_t **ipp, 854 xfs_daddr_t bno) 855 { 856 xfs_buf_t *bp; 857 xfs_dinode_t *dip; 858 xfs_inode_t *ip; 859 int error; 860 861 ASSERT(xfs_inode_zone != NULL); 862 863 ip = kmem_zone_zalloc(xfs_inode_zone, KM_SLEEP); 864 ip->i_ino = ino; 865 ip->i_mount = mp; 866 867 /* 868 * Get pointer's to the on-disk inode and the buffer containing it. 869 * If the inode number refers to a block outside the file system 870 * then xfs_itobp() will return NULL. In this case we should 871 * return NULL as well. Set i_blkno to 0 so that xfs_itobp() will 872 * know that this is a new incore inode. 873 */ 874 error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, 0); 875 if (error) { 876 kmem_zone_free(xfs_inode_zone, ip); 877 return error; 878 } 879 880 /* 881 * Initialize inode's trace buffers. 882 * Do this before xfs_iformat in case it adds entries. 883 */ 884 #ifdef XFS_BMAP_TRACE 885 ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_SLEEP); 886 #endif 887 #ifdef XFS_BMBT_TRACE 888 ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_SLEEP); 889 #endif 890 #ifdef XFS_RW_TRACE 891 ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_SLEEP); 892 #endif 893 #ifdef XFS_ILOCK_TRACE 894 ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_SLEEP); 895 #endif 896 #ifdef XFS_DIR2_TRACE 897 ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_SLEEP); 898 #endif 899 900 /* 901 * If we got something that isn't an inode it means someone 902 * (nfs or dmi) has a stale handle. 903 */ 904 if (INT_GET(dip->di_core.di_magic, ARCH_CONVERT) != XFS_DINODE_MAGIC) { 905 kmem_zone_free(xfs_inode_zone, ip); 906 xfs_trans_brelse(tp, bp); 907 #ifdef DEBUG 908 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: " 909 "dip->di_core.di_magic (0x%x) != " 910 "XFS_DINODE_MAGIC (0x%x)", 911 INT_GET(dip->di_core.di_magic, ARCH_CONVERT), 912 XFS_DINODE_MAGIC); 913 #endif /* DEBUG */ 914 return XFS_ERROR(EINVAL); 915 } 916 917 /* 918 * If the on-disk inode is already linked to a directory 919 * entry, copy all of the inode into the in-core inode. 920 * xfs_iformat() handles copying in the inode format 921 * specific information. 922 * Otherwise, just get the truly permanent information. 923 */ 924 if (dip->di_core.di_mode) { 925 xfs_xlate_dinode_core((xfs_caddr_t)&dip->di_core, 926 &(ip->i_d), 1); 927 error = xfs_iformat(ip, dip); 928 if (error) { 929 kmem_zone_free(xfs_inode_zone, ip); 930 xfs_trans_brelse(tp, bp); 931 #ifdef DEBUG 932 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: " 933 "xfs_iformat() returned error %d", 934 error); 935 #endif /* DEBUG */ 936 return error; 937 } 938 } else { 939 ip->i_d.di_magic = INT_GET(dip->di_core.di_magic, ARCH_CONVERT); 940 ip->i_d.di_version = INT_GET(dip->di_core.di_version, ARCH_CONVERT); 941 ip->i_d.di_gen = INT_GET(dip->di_core.di_gen, ARCH_CONVERT); 942 ip->i_d.di_flushiter = INT_GET(dip->di_core.di_flushiter, ARCH_CONVERT); 943 /* 944 * Make sure to pull in the mode here as well in 945 * case the inode is released without being used. 946 * This ensures that xfs_inactive() will see that 947 * the inode is already free and not try to mess 948 * with the uninitialized part of it. 949 */ 950 ip->i_d.di_mode = 0; 951 /* 952 * Initialize the per-fork minima and maxima for a new 953 * inode here. xfs_iformat will do it for old inodes. 954 */ 955 ip->i_df.if_ext_max = 956 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); 957 } 958 959 INIT_LIST_HEAD(&ip->i_reclaim); 960 961 /* 962 * The inode format changed when we moved the link count and 963 * made it 32 bits long. If this is an old format inode, 964 * convert it in memory to look like a new one. If it gets 965 * flushed to disk we will convert back before flushing or 966 * logging it. We zero out the new projid field and the old link 967 * count field. We'll handle clearing the pad field (the remains 968 * of the old uuid field) when we actually convert the inode to 969 * the new format. We don't change the version number so that we 970 * can distinguish this from a real new format inode. 971 */ 972 if (ip->i_d.di_version == XFS_DINODE_VERSION_1) { 973 ip->i_d.di_nlink = ip->i_d.di_onlink; 974 ip->i_d.di_onlink = 0; 975 ip->i_d.di_projid = 0; 976 } 977 978 ip->i_delayed_blks = 0; 979 980 /* 981 * Mark the buffer containing the inode as something to keep 982 * around for a while. This helps to keep recently accessed 983 * meta-data in-core longer. 984 */ 985 XFS_BUF_SET_REF(bp, XFS_INO_REF); 986 987 /* 988 * Use xfs_trans_brelse() to release the buffer containing the 989 * on-disk inode, because it was acquired with xfs_trans_read_buf() 990 * in xfs_itobp() above. If tp is NULL, this is just a normal 991 * brelse(). If we're within a transaction, then xfs_trans_brelse() 992 * will only release the buffer if it is not dirty within the 993 * transaction. It will be OK to release the buffer in this case, 994 * because inodes on disk are never destroyed and we will be 995 * locking the new in-core inode before putting it in the hash 996 * table where other processes can find it. Thus we don't have 997 * to worry about the inode being changed just because we released 998 * the buffer. 999 */ 1000 xfs_trans_brelse(tp, bp); 1001 *ipp = ip; 1002 return 0; 1003 } 1004 1005 /* 1006 * Read in extents from a btree-format inode. 1007 * Allocate and fill in if_extents. Real work is done in xfs_bmap.c. 1008 */ 1009 int 1010 xfs_iread_extents( 1011 xfs_trans_t *tp, 1012 xfs_inode_t *ip, 1013 int whichfork) 1014 { 1015 int error; 1016 xfs_ifork_t *ifp; 1017 xfs_extnum_t nextents; 1018 size_t size; 1019 1020 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { 1021 XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW, 1022 ip->i_mount); 1023 return XFS_ERROR(EFSCORRUPTED); 1024 } 1025 nextents = XFS_IFORK_NEXTENTS(ip, whichfork); 1026 size = nextents * sizeof(xfs_bmbt_rec_t); 1027 ifp = XFS_IFORK_PTR(ip, whichfork); 1028 1029 /* 1030 * We know that the size is valid (it's checked in iformat_btree) 1031 */ 1032 ifp->if_lastex = NULLEXTNUM; 1033 ifp->if_bytes = ifp->if_real_bytes = 0; 1034 ifp->if_flags |= XFS_IFEXTENTS; 1035 xfs_iext_add(ifp, 0, nextents); 1036 error = xfs_bmap_read_extents(tp, ip, whichfork); 1037 if (error) { 1038 xfs_iext_destroy(ifp); 1039 ifp->if_flags &= ~XFS_IFEXTENTS; 1040 return error; 1041 } 1042 xfs_validate_extents(ifp, nextents, 0, XFS_EXTFMT_INODE(ip)); 1043 return 0; 1044 } 1045 1046 /* 1047 * Allocate an inode on disk and return a copy of its in-core version. 1048 * The in-core inode is locked exclusively. Set mode, nlink, and rdev 1049 * appropriately within the inode. The uid and gid for the inode are 1050 * set according to the contents of the given cred structure. 1051 * 1052 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc() 1053 * has a free inode available, call xfs_iget() 1054 * to obtain the in-core version of the allocated inode. Finally, 1055 * fill in the inode and log its initial contents. In this case, 1056 * ialloc_context would be set to NULL and call_again set to false. 1057 * 1058 * If xfs_dialloc() does not have an available inode, 1059 * it will replenish its supply by doing an allocation. Since we can 1060 * only do one allocation within a transaction without deadlocks, we 1061 * must commit the current transaction before returning the inode itself. 1062 * In this case, therefore, we will set call_again to true and return. 1063 * The caller should then commit the current transaction, start a new 1064 * transaction, and call xfs_ialloc() again to actually get the inode. 1065 * 1066 * To ensure that some other process does not grab the inode that 1067 * was allocated during the first call to xfs_ialloc(), this routine 1068 * also returns the [locked] bp pointing to the head of the freelist 1069 * as ialloc_context. The caller should hold this buffer across 1070 * the commit and pass it back into this routine on the second call. 1071 */ 1072 int 1073 xfs_ialloc( 1074 xfs_trans_t *tp, 1075 xfs_inode_t *pip, 1076 mode_t mode, 1077 xfs_nlink_t nlink, 1078 xfs_dev_t rdev, 1079 cred_t *cr, 1080 xfs_prid_t prid, 1081 int okalloc, 1082 xfs_buf_t **ialloc_context, 1083 boolean_t *call_again, 1084 xfs_inode_t **ipp) 1085 { 1086 xfs_ino_t ino; 1087 xfs_inode_t *ip; 1088 bhv_vnode_t *vp; 1089 uint flags; 1090 int error; 1091 1092 /* 1093 * Call the space management code to pick 1094 * the on-disk inode to be allocated. 1095 */ 1096 error = xfs_dialloc(tp, pip->i_ino, mode, okalloc, 1097 ialloc_context, call_again, &ino); 1098 if (error != 0) { 1099 return error; 1100 } 1101 if (*call_again || ino == NULLFSINO) { 1102 *ipp = NULL; 1103 return 0; 1104 } 1105 ASSERT(*ialloc_context == NULL); 1106 1107 /* 1108 * Get the in-core inode with the lock held exclusively. 1109 * This is because we're setting fields here we need 1110 * to prevent others from looking at until we're done. 1111 */ 1112 error = xfs_trans_iget(tp->t_mountp, tp, ino, 1113 IGET_CREATE, XFS_ILOCK_EXCL, &ip); 1114 if (error != 0) { 1115 return error; 1116 } 1117 ASSERT(ip != NULL); 1118 1119 vp = XFS_ITOV(ip); 1120 ip->i_d.di_mode = (__uint16_t)mode; 1121 ip->i_d.di_onlink = 0; 1122 ip->i_d.di_nlink = nlink; 1123 ASSERT(ip->i_d.di_nlink == nlink); 1124 ip->i_d.di_uid = current_fsuid(cr); 1125 ip->i_d.di_gid = current_fsgid(cr); 1126 ip->i_d.di_projid = prid; 1127 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); 1128 1129 /* 1130 * If the superblock version is up to where we support new format 1131 * inodes and this is currently an old format inode, then change 1132 * the inode version number now. This way we only do the conversion 1133 * here rather than here and in the flush/logging code. 1134 */ 1135 if (XFS_SB_VERSION_HASNLINK(&tp->t_mountp->m_sb) && 1136 ip->i_d.di_version == XFS_DINODE_VERSION_1) { 1137 ip->i_d.di_version = XFS_DINODE_VERSION_2; 1138 /* 1139 * We've already zeroed the old link count, the projid field, 1140 * and the pad field. 1141 */ 1142 } 1143 1144 /* 1145 * Project ids won't be stored on disk if we are using a version 1 inode. 1146 */ 1147 if ( (prid != 0) && (ip->i_d.di_version == XFS_DINODE_VERSION_1)) 1148 xfs_bump_ino_vers2(tp, ip); 1149 1150 if (XFS_INHERIT_GID(pip, vp->v_vfsp)) { 1151 ip->i_d.di_gid = pip->i_d.di_gid; 1152 if ((pip->i_d.di_mode & S_ISGID) && (mode & S_IFMT) == S_IFDIR) { 1153 ip->i_d.di_mode |= S_ISGID; 1154 } 1155 } 1156 1157 /* 1158 * If the group ID of the new file does not match the effective group 1159 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared 1160 * (and only if the irix_sgid_inherit compatibility variable is set). 1161 */ 1162 if ((irix_sgid_inherit) && 1163 (ip->i_d.di_mode & S_ISGID) && 1164 (!in_group_p((gid_t)ip->i_d.di_gid))) { 1165 ip->i_d.di_mode &= ~S_ISGID; 1166 } 1167 1168 ip->i_d.di_size = 0; 1169 ip->i_d.di_nextents = 0; 1170 ASSERT(ip->i_d.di_nblocks == 0); 1171 xfs_ichgtime(ip, XFS_ICHGTIME_CHG|XFS_ICHGTIME_ACC|XFS_ICHGTIME_MOD); 1172 /* 1173 * di_gen will have been taken care of in xfs_iread. 1174 */ 1175 ip->i_d.di_extsize = 0; 1176 ip->i_d.di_dmevmask = 0; 1177 ip->i_d.di_dmstate = 0; 1178 ip->i_d.di_flags = 0; 1179 flags = XFS_ILOG_CORE; 1180 switch (mode & S_IFMT) { 1181 case S_IFIFO: 1182 case S_IFCHR: 1183 case S_IFBLK: 1184 case S_IFSOCK: 1185 ip->i_d.di_format = XFS_DINODE_FMT_DEV; 1186 ip->i_df.if_u2.if_rdev = rdev; 1187 ip->i_df.if_flags = 0; 1188 flags |= XFS_ILOG_DEV; 1189 break; 1190 case S_IFREG: 1191 case S_IFDIR: 1192 if (unlikely(pip->i_d.di_flags & XFS_DIFLAG_ANY)) { 1193 uint di_flags = 0; 1194 1195 if ((mode & S_IFMT) == S_IFDIR) { 1196 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) 1197 di_flags |= XFS_DIFLAG_RTINHERIT; 1198 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { 1199 di_flags |= XFS_DIFLAG_EXTSZINHERIT; 1200 ip->i_d.di_extsize = pip->i_d.di_extsize; 1201 } 1202 } else if ((mode & S_IFMT) == S_IFREG) { 1203 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) { 1204 di_flags |= XFS_DIFLAG_REALTIME; 1205 ip->i_iocore.io_flags |= XFS_IOCORE_RT; 1206 } 1207 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { 1208 di_flags |= XFS_DIFLAG_EXTSIZE; 1209 ip->i_d.di_extsize = pip->i_d.di_extsize; 1210 } 1211 } 1212 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) && 1213 xfs_inherit_noatime) 1214 di_flags |= XFS_DIFLAG_NOATIME; 1215 if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) && 1216 xfs_inherit_nodump) 1217 di_flags |= XFS_DIFLAG_NODUMP; 1218 if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) && 1219 xfs_inherit_sync) 1220 di_flags |= XFS_DIFLAG_SYNC; 1221 if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) && 1222 xfs_inherit_nosymlinks) 1223 di_flags |= XFS_DIFLAG_NOSYMLINKS; 1224 if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) 1225 di_flags |= XFS_DIFLAG_PROJINHERIT; 1226 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) && 1227 xfs_inherit_nodefrag) 1228 di_flags |= XFS_DIFLAG_NODEFRAG; 1229 ip->i_d.di_flags |= di_flags; 1230 } 1231 /* FALLTHROUGH */ 1232 case S_IFLNK: 1233 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; 1234 ip->i_df.if_flags = XFS_IFEXTENTS; 1235 ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0; 1236 ip->i_df.if_u1.if_extents = NULL; 1237 break; 1238 default: 1239 ASSERT(0); 1240 } 1241 /* 1242 * Attribute fork settings for new inode. 1243 */ 1244 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 1245 ip->i_d.di_anextents = 0; 1246 1247 /* 1248 * Log the new values stuffed into the inode. 1249 */ 1250 xfs_trans_log_inode(tp, ip, flags); 1251 1252 /* now that we have an i_mode we can setup inode ops and unlock */ 1253 bhv_vfs_init_vnode(XFS_MTOVFS(tp->t_mountp), vp, XFS_ITOBHV(ip), 1); 1254 1255 *ipp = ip; 1256 return 0; 1257 } 1258 1259 /* 1260 * Check to make sure that there are no blocks allocated to the 1261 * file beyond the size of the file. We don't check this for 1262 * files with fixed size extents or real time extents, but we 1263 * at least do it for regular files. 1264 */ 1265 #ifdef DEBUG 1266 void 1267 xfs_isize_check( 1268 xfs_mount_t *mp, 1269 xfs_inode_t *ip, 1270 xfs_fsize_t isize) 1271 { 1272 xfs_fileoff_t map_first; 1273 int nimaps; 1274 xfs_bmbt_irec_t imaps[2]; 1275 1276 if ((ip->i_d.di_mode & S_IFMT) != S_IFREG) 1277 return; 1278 1279 if (ip->i_d.di_flags & (XFS_DIFLAG_REALTIME | XFS_DIFLAG_EXTSIZE)) 1280 return; 1281 1282 nimaps = 2; 1283 map_first = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); 1284 /* 1285 * The filesystem could be shutting down, so bmapi may return 1286 * an error. 1287 */ 1288 if (xfs_bmapi(NULL, ip, map_first, 1289 (XFS_B_TO_FSB(mp, 1290 (xfs_ufsize_t)XFS_MAXIOFFSET(mp)) - 1291 map_first), 1292 XFS_BMAPI_ENTIRE, NULL, 0, imaps, &nimaps, 1293 NULL, NULL)) 1294 return; 1295 ASSERT(nimaps == 1); 1296 ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK); 1297 } 1298 #endif /* DEBUG */ 1299 1300 /* 1301 * Calculate the last possible buffered byte in a file. This must 1302 * include data that was buffered beyond the EOF by the write code. 1303 * This also needs to deal with overflowing the xfs_fsize_t type 1304 * which can happen for sizes near the limit. 1305 * 1306 * We also need to take into account any blocks beyond the EOF. It 1307 * may be the case that they were buffered by a write which failed. 1308 * In that case the pages will still be in memory, but the inode size 1309 * will never have been updated. 1310 */ 1311 xfs_fsize_t 1312 xfs_file_last_byte( 1313 xfs_inode_t *ip) 1314 { 1315 xfs_mount_t *mp; 1316 xfs_fsize_t last_byte; 1317 xfs_fileoff_t last_block; 1318 xfs_fileoff_t size_last_block; 1319 int error; 1320 1321 ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE | MR_ACCESS)); 1322 1323 mp = ip->i_mount; 1324 /* 1325 * Only check for blocks beyond the EOF if the extents have 1326 * been read in. This eliminates the need for the inode lock, 1327 * and it also saves us from looking when it really isn't 1328 * necessary. 1329 */ 1330 if (ip->i_df.if_flags & XFS_IFEXTENTS) { 1331 error = xfs_bmap_last_offset(NULL, ip, &last_block, 1332 XFS_DATA_FORK); 1333 if (error) { 1334 last_block = 0; 1335 } 1336 } else { 1337 last_block = 0; 1338 } 1339 size_last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)ip->i_d.di_size); 1340 last_block = XFS_FILEOFF_MAX(last_block, size_last_block); 1341 1342 last_byte = XFS_FSB_TO_B(mp, last_block); 1343 if (last_byte < 0) { 1344 return XFS_MAXIOFFSET(mp); 1345 } 1346 last_byte += (1 << mp->m_writeio_log); 1347 if (last_byte < 0) { 1348 return XFS_MAXIOFFSET(mp); 1349 } 1350 return last_byte; 1351 } 1352 1353 #if defined(XFS_RW_TRACE) 1354 STATIC void 1355 xfs_itrunc_trace( 1356 int tag, 1357 xfs_inode_t *ip, 1358 int flag, 1359 xfs_fsize_t new_size, 1360 xfs_off_t toss_start, 1361 xfs_off_t toss_finish) 1362 { 1363 if (ip->i_rwtrace == NULL) { 1364 return; 1365 } 1366 1367 ktrace_enter(ip->i_rwtrace, 1368 (void*)((long)tag), 1369 (void*)ip, 1370 (void*)(unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff), 1371 (void*)(unsigned long)(ip->i_d.di_size & 0xffffffff), 1372 (void*)((long)flag), 1373 (void*)(unsigned long)((new_size >> 32) & 0xffffffff), 1374 (void*)(unsigned long)(new_size & 0xffffffff), 1375 (void*)(unsigned long)((toss_start >> 32) & 0xffffffff), 1376 (void*)(unsigned long)(toss_start & 0xffffffff), 1377 (void*)(unsigned long)((toss_finish >> 32) & 0xffffffff), 1378 (void*)(unsigned long)(toss_finish & 0xffffffff), 1379 (void*)(unsigned long)current_cpu(), 1380 (void*)(unsigned long)current_pid(), 1381 (void*)NULL, 1382 (void*)NULL, 1383 (void*)NULL); 1384 } 1385 #else 1386 #define xfs_itrunc_trace(tag, ip, flag, new_size, toss_start, toss_finish) 1387 #endif 1388 1389 /* 1390 * Start the truncation of the file to new_size. The new size 1391 * must be smaller than the current size. This routine will 1392 * clear the buffer and page caches of file data in the removed 1393 * range, and xfs_itruncate_finish() will remove the underlying 1394 * disk blocks. 1395 * 1396 * The inode must have its I/O lock locked EXCLUSIVELY, and it 1397 * must NOT have the inode lock held at all. This is because we're 1398 * calling into the buffer/page cache code and we can't hold the 1399 * inode lock when we do so. 1400 * 1401 * We need to wait for any direct I/Os in flight to complete before we 1402 * proceed with the truncate. This is needed to prevent the extents 1403 * being read or written by the direct I/Os from being removed while the 1404 * I/O is in flight as there is no other method of synchronising 1405 * direct I/O with the truncate operation. Also, because we hold 1406 * the IOLOCK in exclusive mode, we prevent new direct I/Os from being 1407 * started until the truncate completes and drops the lock. Essentially, 1408 * the vn_iowait() call forms an I/O barrier that provides strict ordering 1409 * between direct I/Os and the truncate operation. 1410 * 1411 * The flags parameter can have either the value XFS_ITRUNC_DEFINITE 1412 * or XFS_ITRUNC_MAYBE. The XFS_ITRUNC_MAYBE value should be used 1413 * in the case that the caller is locking things out of order and 1414 * may not be able to call xfs_itruncate_finish() with the inode lock 1415 * held without dropping the I/O lock. If the caller must drop the 1416 * I/O lock before calling xfs_itruncate_finish(), then xfs_itruncate_start() 1417 * must be called again with all the same restrictions as the initial 1418 * call. 1419 */ 1420 void 1421 xfs_itruncate_start( 1422 xfs_inode_t *ip, 1423 uint flags, 1424 xfs_fsize_t new_size) 1425 { 1426 xfs_fsize_t last_byte; 1427 xfs_off_t toss_start; 1428 xfs_mount_t *mp; 1429 bhv_vnode_t *vp; 1430 1431 ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0); 1432 ASSERT((new_size == 0) || (new_size <= ip->i_d.di_size)); 1433 ASSERT((flags == XFS_ITRUNC_DEFINITE) || 1434 (flags == XFS_ITRUNC_MAYBE)); 1435 1436 mp = ip->i_mount; 1437 vp = XFS_ITOV(ip); 1438 1439 vn_iowait(vp); /* wait for the completion of any pending DIOs */ 1440 1441 /* 1442 * Call toss_pages or flushinval_pages to get rid of pages 1443 * overlapping the region being removed. We have to use 1444 * the less efficient flushinval_pages in the case that the 1445 * caller may not be able to finish the truncate without 1446 * dropping the inode's I/O lock. Make sure 1447 * to catch any pages brought in by buffers overlapping 1448 * the EOF by searching out beyond the isize by our 1449 * block size. We round new_size up to a block boundary 1450 * so that we don't toss things on the same block as 1451 * new_size but before it. 1452 * 1453 * Before calling toss_page or flushinval_pages, make sure to 1454 * call remapf() over the same region if the file is mapped. 1455 * This frees up mapped file references to the pages in the 1456 * given range and for the flushinval_pages case it ensures 1457 * that we get the latest mapped changes flushed out. 1458 */ 1459 toss_start = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); 1460 toss_start = XFS_FSB_TO_B(mp, toss_start); 1461 if (toss_start < 0) { 1462 /* 1463 * The place to start tossing is beyond our maximum 1464 * file size, so there is no way that the data extended 1465 * out there. 1466 */ 1467 return; 1468 } 1469 last_byte = xfs_file_last_byte(ip); 1470 xfs_itrunc_trace(XFS_ITRUNC_START, ip, flags, new_size, toss_start, 1471 last_byte); 1472 if (last_byte > toss_start) { 1473 if (flags & XFS_ITRUNC_DEFINITE) { 1474 bhv_vop_toss_pages(vp, toss_start, -1, FI_REMAPF_LOCKED); 1475 } else { 1476 bhv_vop_flushinval_pages(vp, toss_start, -1, FI_REMAPF_LOCKED); 1477 } 1478 } 1479 1480 #ifdef DEBUG 1481 if (new_size == 0) { 1482 ASSERT(VN_CACHED(vp) == 0); 1483 } 1484 #endif 1485 } 1486 1487 /* 1488 * Shrink the file to the given new_size. The new 1489 * size must be smaller than the current size. 1490 * This will free up the underlying blocks 1491 * in the removed range after a call to xfs_itruncate_start() 1492 * or xfs_atruncate_start(). 1493 * 1494 * The transaction passed to this routine must have made 1495 * a permanent log reservation of at least XFS_ITRUNCATE_LOG_RES. 1496 * This routine may commit the given transaction and 1497 * start new ones, so make sure everything involved in 1498 * the transaction is tidy before calling here. 1499 * Some transaction will be returned to the caller to be 1500 * committed. The incoming transaction must already include 1501 * the inode, and both inode locks must be held exclusively. 1502 * The inode must also be "held" within the transaction. On 1503 * return the inode will be "held" within the returned transaction. 1504 * This routine does NOT require any disk space to be reserved 1505 * for it within the transaction. 1506 * 1507 * The fork parameter must be either xfs_attr_fork or xfs_data_fork, 1508 * and it indicates the fork which is to be truncated. For the 1509 * attribute fork we only support truncation to size 0. 1510 * 1511 * We use the sync parameter to indicate whether or not the first 1512 * transaction we perform might have to be synchronous. For the attr fork, 1513 * it needs to be so if the unlink of the inode is not yet known to be 1514 * permanent in the log. This keeps us from freeing and reusing the 1515 * blocks of the attribute fork before the unlink of the inode becomes 1516 * permanent. 1517 * 1518 * For the data fork, we normally have to run synchronously if we're 1519 * being called out of the inactive path or we're being called 1520 * out of the create path where we're truncating an existing file. 1521 * Either way, the truncate needs to be sync so blocks don't reappear 1522 * in the file with altered data in case of a crash. wsync filesystems 1523 * can run the first case async because anything that shrinks the inode 1524 * has to run sync so by the time we're called here from inactive, the 1525 * inode size is permanently set to 0. 1526 * 1527 * Calls from the truncate path always need to be sync unless we're 1528 * in a wsync filesystem and the file has already been unlinked. 1529 * 1530 * The caller is responsible for correctly setting the sync parameter. 1531 * It gets too hard for us to guess here which path we're being called 1532 * out of just based on inode state. 1533 */ 1534 int 1535 xfs_itruncate_finish( 1536 xfs_trans_t **tp, 1537 xfs_inode_t *ip, 1538 xfs_fsize_t new_size, 1539 int fork, 1540 int sync) 1541 { 1542 xfs_fsblock_t first_block; 1543 xfs_fileoff_t first_unmap_block; 1544 xfs_fileoff_t last_block; 1545 xfs_filblks_t unmap_len=0; 1546 xfs_mount_t *mp; 1547 xfs_trans_t *ntp; 1548 int done; 1549 int committed; 1550 xfs_bmap_free_t free_list; 1551 int error; 1552 1553 ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0); 1554 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0); 1555 ASSERT((new_size == 0) || (new_size <= ip->i_d.di_size)); 1556 ASSERT(*tp != NULL); 1557 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES); 1558 ASSERT(ip->i_transp == *tp); 1559 ASSERT(ip->i_itemp != NULL); 1560 ASSERT(ip->i_itemp->ili_flags & XFS_ILI_HOLD); 1561 1562 1563 ntp = *tp; 1564 mp = (ntp)->t_mountp; 1565 ASSERT(! XFS_NOT_DQATTACHED(mp, ip)); 1566 1567 /* 1568 * We only support truncating the entire attribute fork. 1569 */ 1570 if (fork == XFS_ATTR_FORK) { 1571 new_size = 0LL; 1572 } 1573 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); 1574 xfs_itrunc_trace(XFS_ITRUNC_FINISH1, ip, 0, new_size, 0, 0); 1575 /* 1576 * The first thing we do is set the size to new_size permanently 1577 * on disk. This way we don't have to worry about anyone ever 1578 * being able to look at the data being freed even in the face 1579 * of a crash. What we're getting around here is the case where 1580 * we free a block, it is allocated to another file, it is written 1581 * to, and then we crash. If the new data gets written to the 1582 * file but the log buffers containing the free and reallocation 1583 * don't, then we'd end up with garbage in the blocks being freed. 1584 * As long as we make the new_size permanent before actually 1585 * freeing any blocks it doesn't matter if they get writtten to. 1586 * 1587 * The callers must signal into us whether or not the size 1588 * setting here must be synchronous. There are a few cases 1589 * where it doesn't have to be synchronous. Those cases 1590 * occur if the file is unlinked and we know the unlink is 1591 * permanent or if the blocks being truncated are guaranteed 1592 * to be beyond the inode eof (regardless of the link count) 1593 * and the eof value is permanent. Both of these cases occur 1594 * only on wsync-mounted filesystems. In those cases, we're 1595 * guaranteed that no user will ever see the data in the blocks 1596 * that are being truncated so the truncate can run async. 1597 * In the free beyond eof case, the file may wind up with 1598 * more blocks allocated to it than it needs if we crash 1599 * and that won't get fixed until the next time the file 1600 * is re-opened and closed but that's ok as that shouldn't 1601 * be too many blocks. 1602 * 1603 * However, we can't just make all wsync xactions run async 1604 * because there's one call out of the create path that needs 1605 * to run sync where it's truncating an existing file to size 1606 * 0 whose size is > 0. 1607 * 1608 * It's probably possible to come up with a test in this 1609 * routine that would correctly distinguish all the above 1610 * cases from the values of the function parameters and the 1611 * inode state but for sanity's sake, I've decided to let the 1612 * layers above just tell us. It's simpler to correctly figure 1613 * out in the layer above exactly under what conditions we 1614 * can run async and I think it's easier for others read and 1615 * follow the logic in case something has to be changed. 1616 * cscope is your friend -- rcc. 1617 * 1618 * The attribute fork is much simpler. 1619 * 1620 * For the attribute fork we allow the caller to tell us whether 1621 * the unlink of the inode that led to this call is yet permanent 1622 * in the on disk log. If it is not and we will be freeing extents 1623 * in this inode then we make the first transaction synchronous 1624 * to make sure that the unlink is permanent by the time we free 1625 * the blocks. 1626 */ 1627 if (fork == XFS_DATA_FORK) { 1628 if (ip->i_d.di_nextents > 0) { 1629 ip->i_d.di_size = new_size; 1630 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); 1631 } 1632 } else if (sync) { 1633 ASSERT(!(mp->m_flags & XFS_MOUNT_WSYNC)); 1634 if (ip->i_d.di_anextents > 0) 1635 xfs_trans_set_sync(ntp); 1636 } 1637 ASSERT(fork == XFS_DATA_FORK || 1638 (fork == XFS_ATTR_FORK && 1639 ((sync && !(mp->m_flags & XFS_MOUNT_WSYNC)) || 1640 (sync == 0 && (mp->m_flags & XFS_MOUNT_WSYNC))))); 1641 1642 /* 1643 * Since it is possible for space to become allocated beyond 1644 * the end of the file (in a crash where the space is allocated 1645 * but the inode size is not yet updated), simply remove any 1646 * blocks which show up between the new EOF and the maximum 1647 * possible file size. If the first block to be removed is 1648 * beyond the maximum file size (ie it is the same as last_block), 1649 * then there is nothing to do. 1650 */ 1651 last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); 1652 ASSERT(first_unmap_block <= last_block); 1653 done = 0; 1654 if (last_block == first_unmap_block) { 1655 done = 1; 1656 } else { 1657 unmap_len = last_block - first_unmap_block + 1; 1658 } 1659 while (!done) { 1660 /* 1661 * Free up up to XFS_ITRUNC_MAX_EXTENTS. xfs_bunmapi() 1662 * will tell us whether it freed the entire range or 1663 * not. If this is a synchronous mount (wsync), 1664 * then we can tell bunmapi to keep all the 1665 * transactions asynchronous since the unlink 1666 * transaction that made this inode inactive has 1667 * already hit the disk. There's no danger of 1668 * the freed blocks being reused, there being a 1669 * crash, and the reused blocks suddenly reappearing 1670 * in this file with garbage in them once recovery 1671 * runs. 1672 */ 1673 XFS_BMAP_INIT(&free_list, &first_block); 1674 error = XFS_BUNMAPI(mp, ntp, &ip->i_iocore, 1675 first_unmap_block, unmap_len, 1676 XFS_BMAPI_AFLAG(fork) | 1677 (sync ? 0 : XFS_BMAPI_ASYNC), 1678 XFS_ITRUNC_MAX_EXTENTS, 1679 &first_block, &free_list, 1680 NULL, &done); 1681 if (error) { 1682 /* 1683 * If the bunmapi call encounters an error, 1684 * return to the caller where the transaction 1685 * can be properly aborted. We just need to 1686 * make sure we're not holding any resources 1687 * that we were not when we came in. 1688 */ 1689 xfs_bmap_cancel(&free_list); 1690 return error; 1691 } 1692 1693 /* 1694 * Duplicate the transaction that has the permanent 1695 * reservation and commit the old transaction. 1696 */ 1697 error = xfs_bmap_finish(tp, &free_list, first_block, 1698 &committed); 1699 ntp = *tp; 1700 if (error) { 1701 /* 1702 * If the bmap finish call encounters an error, 1703 * return to the caller where the transaction 1704 * can be properly aborted. We just need to 1705 * make sure we're not holding any resources 1706 * that we were not when we came in. 1707 * 1708 * Aborting from this point might lose some 1709 * blocks in the file system, but oh well. 1710 */ 1711 xfs_bmap_cancel(&free_list); 1712 if (committed) { 1713 /* 1714 * If the passed in transaction committed 1715 * in xfs_bmap_finish(), then we want to 1716 * add the inode to this one before returning. 1717 * This keeps things simple for the higher 1718 * level code, because it always knows that 1719 * the inode is locked and held in the 1720 * transaction that returns to it whether 1721 * errors occur or not. We don't mark the 1722 * inode dirty so that this transaction can 1723 * be easily aborted if possible. 1724 */ 1725 xfs_trans_ijoin(ntp, ip, 1726 XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 1727 xfs_trans_ihold(ntp, ip); 1728 } 1729 return error; 1730 } 1731 1732 if (committed) { 1733 /* 1734 * The first xact was committed, 1735 * so add the inode to the new one. 1736 * Mark it dirty so it will be logged 1737 * and moved forward in the log as 1738 * part of every commit. 1739 */ 1740 xfs_trans_ijoin(ntp, ip, 1741 XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 1742 xfs_trans_ihold(ntp, ip); 1743 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); 1744 } 1745 ntp = xfs_trans_dup(ntp); 1746 (void) xfs_trans_commit(*tp, 0, NULL); 1747 *tp = ntp; 1748 error = xfs_trans_reserve(ntp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 1749 XFS_TRANS_PERM_LOG_RES, 1750 XFS_ITRUNCATE_LOG_COUNT); 1751 /* 1752 * Add the inode being truncated to the next chained 1753 * transaction. 1754 */ 1755 xfs_trans_ijoin(ntp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 1756 xfs_trans_ihold(ntp, ip); 1757 if (error) 1758 return (error); 1759 } 1760 /* 1761 * Only update the size in the case of the data fork, but 1762 * always re-log the inode so that our permanent transaction 1763 * can keep on rolling it forward in the log. 1764 */ 1765 if (fork == XFS_DATA_FORK) { 1766 xfs_isize_check(mp, ip, new_size); 1767 ip->i_d.di_size = new_size; 1768 } 1769 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); 1770 ASSERT((new_size != 0) || 1771 (fork == XFS_ATTR_FORK) || 1772 (ip->i_delayed_blks == 0)); 1773 ASSERT((new_size != 0) || 1774 (fork == XFS_ATTR_FORK) || 1775 (ip->i_d.di_nextents == 0)); 1776 xfs_itrunc_trace(XFS_ITRUNC_FINISH2, ip, 0, new_size, 0, 0); 1777 return 0; 1778 } 1779 1780 1781 /* 1782 * xfs_igrow_start 1783 * 1784 * Do the first part of growing a file: zero any data in the last 1785 * block that is beyond the old EOF. We need to do this before 1786 * the inode is joined to the transaction to modify the i_size. 1787 * That way we can drop the inode lock and call into the buffer 1788 * cache to get the buffer mapping the EOF. 1789 */ 1790 int 1791 xfs_igrow_start( 1792 xfs_inode_t *ip, 1793 xfs_fsize_t new_size, 1794 cred_t *credp) 1795 { 1796 int error; 1797 1798 ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0); 1799 ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0); 1800 ASSERT(new_size > ip->i_d.di_size); 1801 1802 /* 1803 * Zero any pages that may have been created by 1804 * xfs_write_file() beyond the end of the file 1805 * and any blocks between the old and new file sizes. 1806 */ 1807 error = xfs_zero_eof(XFS_ITOV(ip), &ip->i_iocore, new_size, 1808 ip->i_d.di_size, new_size); 1809 return error; 1810 } 1811 1812 /* 1813 * xfs_igrow_finish 1814 * 1815 * This routine is called to extend the size of a file. 1816 * The inode must have both the iolock and the ilock locked 1817 * for update and it must be a part of the current transaction. 1818 * The xfs_igrow_start() function must have been called previously. 1819 * If the change_flag is not zero, the inode change timestamp will 1820 * be updated. 1821 */ 1822 void 1823 xfs_igrow_finish( 1824 xfs_trans_t *tp, 1825 xfs_inode_t *ip, 1826 xfs_fsize_t new_size, 1827 int change_flag) 1828 { 1829 ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0); 1830 ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0); 1831 ASSERT(ip->i_transp == tp); 1832 ASSERT(new_size > ip->i_d.di_size); 1833 1834 /* 1835 * Update the file size. Update the inode change timestamp 1836 * if change_flag set. 1837 */ 1838 ip->i_d.di_size = new_size; 1839 if (change_flag) 1840 xfs_ichgtime(ip, XFS_ICHGTIME_CHG); 1841 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1842 1843 } 1844 1845 1846 /* 1847 * This is called when the inode's link count goes to 0. 1848 * We place the on-disk inode on a list in the AGI. It 1849 * will be pulled from this list when the inode is freed. 1850 */ 1851 int 1852 xfs_iunlink( 1853 xfs_trans_t *tp, 1854 xfs_inode_t *ip) 1855 { 1856 xfs_mount_t *mp; 1857 xfs_agi_t *agi; 1858 xfs_dinode_t *dip; 1859 xfs_buf_t *agibp; 1860 xfs_buf_t *ibp; 1861 xfs_agnumber_t agno; 1862 xfs_daddr_t agdaddr; 1863 xfs_agino_t agino; 1864 short bucket_index; 1865 int offset; 1866 int error; 1867 int agi_ok; 1868 1869 ASSERT(ip->i_d.di_nlink == 0); 1870 ASSERT(ip->i_d.di_mode != 0); 1871 ASSERT(ip->i_transp == tp); 1872 1873 mp = tp->t_mountp; 1874 1875 agno = XFS_INO_TO_AGNO(mp, ip->i_ino); 1876 agdaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)); 1877 1878 /* 1879 * Get the agi buffer first. It ensures lock ordering 1880 * on the list. 1881 */ 1882 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, agdaddr, 1883 XFS_FSS_TO_BB(mp, 1), 0, &agibp); 1884 if (error) { 1885 return error; 1886 } 1887 /* 1888 * Validate the magic number of the agi block. 1889 */ 1890 agi = XFS_BUF_TO_AGI(agibp); 1891 agi_ok = 1892 be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC && 1893 XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)); 1894 if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK, 1895 XFS_RANDOM_IUNLINK))) { 1896 XFS_CORRUPTION_ERROR("xfs_iunlink", XFS_ERRLEVEL_LOW, mp, agi); 1897 xfs_trans_brelse(tp, agibp); 1898 return XFS_ERROR(EFSCORRUPTED); 1899 } 1900 /* 1901 * Get the index into the agi hash table for the 1902 * list this inode will go on. 1903 */ 1904 agino = XFS_INO_TO_AGINO(mp, ip->i_ino); 1905 ASSERT(agino != 0); 1906 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; 1907 ASSERT(agi->agi_unlinked[bucket_index]); 1908 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino); 1909 1910 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO) { 1911 /* 1912 * There is already another inode in the bucket we need 1913 * to add ourselves to. Add us at the front of the list. 1914 * Here we put the head pointer into our next pointer, 1915 * and then we fall through to point the head at us. 1916 */ 1917 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0); 1918 if (error) { 1919 return error; 1920 } 1921 ASSERT(INT_GET(dip->di_next_unlinked, ARCH_CONVERT) == NULLAGINO); 1922 ASSERT(dip->di_next_unlinked); 1923 /* both on-disk, don't endian flip twice */ 1924 dip->di_next_unlinked = agi->agi_unlinked[bucket_index]; 1925 offset = ip->i_boffset + 1926 offsetof(xfs_dinode_t, di_next_unlinked); 1927 xfs_trans_inode_buf(tp, ibp); 1928 xfs_trans_log_buf(tp, ibp, offset, 1929 (offset + sizeof(xfs_agino_t) - 1)); 1930 xfs_inobp_check(mp, ibp); 1931 } 1932 1933 /* 1934 * Point the bucket head pointer at the inode being inserted. 1935 */ 1936 ASSERT(agino != 0); 1937 agi->agi_unlinked[bucket_index] = cpu_to_be32(agino); 1938 offset = offsetof(xfs_agi_t, agi_unlinked) + 1939 (sizeof(xfs_agino_t) * bucket_index); 1940 xfs_trans_log_buf(tp, agibp, offset, 1941 (offset + sizeof(xfs_agino_t) - 1)); 1942 return 0; 1943 } 1944 1945 /* 1946 * Pull the on-disk inode from the AGI unlinked list. 1947 */ 1948 STATIC int 1949 xfs_iunlink_remove( 1950 xfs_trans_t *tp, 1951 xfs_inode_t *ip) 1952 { 1953 xfs_ino_t next_ino; 1954 xfs_mount_t *mp; 1955 xfs_agi_t *agi; 1956 xfs_dinode_t *dip; 1957 xfs_buf_t *agibp; 1958 xfs_buf_t *ibp; 1959 xfs_agnumber_t agno; 1960 xfs_daddr_t agdaddr; 1961 xfs_agino_t agino; 1962 xfs_agino_t next_agino; 1963 xfs_buf_t *last_ibp; 1964 xfs_dinode_t *last_dip; 1965 short bucket_index; 1966 int offset, last_offset; 1967 int error; 1968 int agi_ok; 1969 1970 /* 1971 * First pull the on-disk inode from the AGI unlinked list. 1972 */ 1973 mp = tp->t_mountp; 1974 1975 agno = XFS_INO_TO_AGNO(mp, ip->i_ino); 1976 agdaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)); 1977 1978 /* 1979 * Get the agi buffer first. It ensures lock ordering 1980 * on the list. 1981 */ 1982 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, agdaddr, 1983 XFS_FSS_TO_BB(mp, 1), 0, &agibp); 1984 if (error) { 1985 cmn_err(CE_WARN, 1986 "xfs_iunlink_remove: xfs_trans_read_buf() returned an error %d on %s. Returning error.", 1987 error, mp->m_fsname); 1988 return error; 1989 } 1990 /* 1991 * Validate the magic number of the agi block. 1992 */ 1993 agi = XFS_BUF_TO_AGI(agibp); 1994 agi_ok = 1995 be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC && 1996 XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)); 1997 if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK_REMOVE, 1998 XFS_RANDOM_IUNLINK_REMOVE))) { 1999 XFS_CORRUPTION_ERROR("xfs_iunlink_remove", XFS_ERRLEVEL_LOW, 2000 mp, agi); 2001 xfs_trans_brelse(tp, agibp); 2002 cmn_err(CE_WARN, 2003 "xfs_iunlink_remove: XFS_TEST_ERROR() returned an error on %s. Returning EFSCORRUPTED.", 2004 mp->m_fsname); 2005 return XFS_ERROR(EFSCORRUPTED); 2006 } 2007 /* 2008 * Get the index into the agi hash table for the 2009 * list this inode will go on. 2010 */ 2011 agino = XFS_INO_TO_AGINO(mp, ip->i_ino); 2012 ASSERT(agino != 0); 2013 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; 2014 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO); 2015 ASSERT(agi->agi_unlinked[bucket_index]); 2016 2017 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) { 2018 /* 2019 * We're at the head of the list. Get the inode's 2020 * on-disk buffer to see if there is anyone after us 2021 * on the list. Only modify our next pointer if it 2022 * is not already NULLAGINO. This saves us the overhead 2023 * of dealing with the buffer when there is no need to 2024 * change it. 2025 */ 2026 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0); 2027 if (error) { 2028 cmn_err(CE_WARN, 2029 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", 2030 error, mp->m_fsname); 2031 return error; 2032 } 2033 next_agino = INT_GET(dip->di_next_unlinked, ARCH_CONVERT); 2034 ASSERT(next_agino != 0); 2035 if (next_agino != NULLAGINO) { 2036 INT_SET(dip->di_next_unlinked, ARCH_CONVERT, NULLAGINO); 2037 offset = ip->i_boffset + 2038 offsetof(xfs_dinode_t, di_next_unlinked); 2039 xfs_trans_inode_buf(tp, ibp); 2040 xfs_trans_log_buf(tp, ibp, offset, 2041 (offset + sizeof(xfs_agino_t) - 1)); 2042 xfs_inobp_check(mp, ibp); 2043 } else { 2044 xfs_trans_brelse(tp, ibp); 2045 } 2046 /* 2047 * Point the bucket head pointer at the next inode. 2048 */ 2049 ASSERT(next_agino != 0); 2050 ASSERT(next_agino != agino); 2051 agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino); 2052 offset = offsetof(xfs_agi_t, agi_unlinked) + 2053 (sizeof(xfs_agino_t) * bucket_index); 2054 xfs_trans_log_buf(tp, agibp, offset, 2055 (offset + sizeof(xfs_agino_t) - 1)); 2056 } else { 2057 /* 2058 * We need to search the list for the inode being freed. 2059 */ 2060 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]); 2061 last_ibp = NULL; 2062 while (next_agino != agino) { 2063 /* 2064 * If the last inode wasn't the one pointing to 2065 * us, then release its buffer since we're not 2066 * going to do anything with it. 2067 */ 2068 if (last_ibp != NULL) { 2069 xfs_trans_brelse(tp, last_ibp); 2070 } 2071 next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino); 2072 error = xfs_inotobp(mp, tp, next_ino, &last_dip, 2073 &last_ibp, &last_offset); 2074 if (error) { 2075 cmn_err(CE_WARN, 2076 "xfs_iunlink_remove: xfs_inotobp() returned an error %d on %s. Returning error.", 2077 error, mp->m_fsname); 2078 return error; 2079 } 2080 next_agino = INT_GET(last_dip->di_next_unlinked, ARCH_CONVERT); 2081 ASSERT(next_agino != NULLAGINO); 2082 ASSERT(next_agino != 0); 2083 } 2084 /* 2085 * Now last_ibp points to the buffer previous to us on 2086 * the unlinked list. Pull us from the list. 2087 */ 2088 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0); 2089 if (error) { 2090 cmn_err(CE_WARN, 2091 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", 2092 error, mp->m_fsname); 2093 return error; 2094 } 2095 next_agino = INT_GET(dip->di_next_unlinked, ARCH_CONVERT); 2096 ASSERT(next_agino != 0); 2097 ASSERT(next_agino != agino); 2098 if (next_agino != NULLAGINO) { 2099 INT_SET(dip->di_next_unlinked, ARCH_CONVERT, NULLAGINO); 2100 offset = ip->i_boffset + 2101 offsetof(xfs_dinode_t, di_next_unlinked); 2102 xfs_trans_inode_buf(tp, ibp); 2103 xfs_trans_log_buf(tp, ibp, offset, 2104 (offset + sizeof(xfs_agino_t) - 1)); 2105 xfs_inobp_check(mp, ibp); 2106 } else { 2107 xfs_trans_brelse(tp, ibp); 2108 } 2109 /* 2110 * Point the previous inode on the list to the next inode. 2111 */ 2112 INT_SET(last_dip->di_next_unlinked, ARCH_CONVERT, next_agino); 2113 ASSERT(next_agino != 0); 2114 offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked); 2115 xfs_trans_inode_buf(tp, last_ibp); 2116 xfs_trans_log_buf(tp, last_ibp, offset, 2117 (offset + sizeof(xfs_agino_t) - 1)); 2118 xfs_inobp_check(mp, last_ibp); 2119 } 2120 return 0; 2121 } 2122 2123 static __inline__ int xfs_inode_clean(xfs_inode_t *ip) 2124 { 2125 return (((ip->i_itemp == NULL) || 2126 !(ip->i_itemp->ili_format.ilf_fields & XFS_ILOG_ALL)) && 2127 (ip->i_update_core == 0)); 2128 } 2129 2130 STATIC void 2131 xfs_ifree_cluster( 2132 xfs_inode_t *free_ip, 2133 xfs_trans_t *tp, 2134 xfs_ino_t inum) 2135 { 2136 xfs_mount_t *mp = free_ip->i_mount; 2137 int blks_per_cluster; 2138 int nbufs; 2139 int ninodes; 2140 int i, j, found, pre_flushed; 2141 xfs_daddr_t blkno; 2142 xfs_buf_t *bp; 2143 xfs_ihash_t *ih; 2144 xfs_inode_t *ip, **ip_found; 2145 xfs_inode_log_item_t *iip; 2146 xfs_log_item_t *lip; 2147 SPLDECL(s); 2148 2149 if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) { 2150 blks_per_cluster = 1; 2151 ninodes = mp->m_sb.sb_inopblock; 2152 nbufs = XFS_IALLOC_BLOCKS(mp); 2153 } else { 2154 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) / 2155 mp->m_sb.sb_blocksize; 2156 ninodes = blks_per_cluster * mp->m_sb.sb_inopblock; 2157 nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster; 2158 } 2159 2160 ip_found = kmem_alloc(ninodes * sizeof(xfs_inode_t *), KM_NOFS); 2161 2162 for (j = 0; j < nbufs; j++, inum += ninodes) { 2163 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum), 2164 XFS_INO_TO_AGBNO(mp, inum)); 2165 2166 2167 /* 2168 * Look for each inode in memory and attempt to lock it, 2169 * we can be racing with flush and tail pushing here. 2170 * any inode we get the locks on, add to an array of 2171 * inode items to process later. 2172 * 2173 * The get the buffer lock, we could beat a flush 2174 * or tail pushing thread to the lock here, in which 2175 * case they will go looking for the inode buffer 2176 * and fail, we need some other form of interlock 2177 * here. 2178 */ 2179 found = 0; 2180 for (i = 0; i < ninodes; i++) { 2181 ih = XFS_IHASH(mp, inum + i); 2182 read_lock(&ih->ih_lock); 2183 for (ip = ih->ih_next; ip != NULL; ip = ip->i_next) { 2184 if (ip->i_ino == inum + i) 2185 break; 2186 } 2187 2188 /* Inode not in memory or we found it already, 2189 * nothing to do 2190 */ 2191 if (!ip || (ip->i_flags & XFS_ISTALE)) { 2192 read_unlock(&ih->ih_lock); 2193 continue; 2194 } 2195 2196 if (xfs_inode_clean(ip)) { 2197 read_unlock(&ih->ih_lock); 2198 continue; 2199 } 2200 2201 /* If we can get the locks then add it to the 2202 * list, otherwise by the time we get the bp lock 2203 * below it will already be attached to the 2204 * inode buffer. 2205 */ 2206 2207 /* This inode will already be locked - by us, lets 2208 * keep it that way. 2209 */ 2210 2211 if (ip == free_ip) { 2212 if (xfs_iflock_nowait(ip)) { 2213 ip->i_flags |= XFS_ISTALE; 2214 2215 if (xfs_inode_clean(ip)) { 2216 xfs_ifunlock(ip); 2217 } else { 2218 ip_found[found++] = ip; 2219 } 2220 } 2221 read_unlock(&ih->ih_lock); 2222 continue; 2223 } 2224 2225 if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { 2226 if (xfs_iflock_nowait(ip)) { 2227 ip->i_flags |= XFS_ISTALE; 2228 2229 if (xfs_inode_clean(ip)) { 2230 xfs_ifunlock(ip); 2231 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2232 } else { 2233 ip_found[found++] = ip; 2234 } 2235 } else { 2236 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2237 } 2238 } 2239 2240 read_unlock(&ih->ih_lock); 2241 } 2242 2243 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, 2244 mp->m_bsize * blks_per_cluster, 2245 XFS_BUF_LOCK); 2246 2247 pre_flushed = 0; 2248 lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); 2249 while (lip) { 2250 if (lip->li_type == XFS_LI_INODE) { 2251 iip = (xfs_inode_log_item_t *)lip; 2252 ASSERT(iip->ili_logged == 1); 2253 lip->li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*)) xfs_istale_done; 2254 AIL_LOCK(mp,s); 2255 iip->ili_flush_lsn = iip->ili_item.li_lsn; 2256 AIL_UNLOCK(mp, s); 2257 iip->ili_inode->i_flags |= XFS_ISTALE; 2258 pre_flushed++; 2259 } 2260 lip = lip->li_bio_list; 2261 } 2262 2263 for (i = 0; i < found; i++) { 2264 ip = ip_found[i]; 2265 iip = ip->i_itemp; 2266 2267 if (!iip) { 2268 ip->i_update_core = 0; 2269 xfs_ifunlock(ip); 2270 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2271 continue; 2272 } 2273 2274 iip->ili_last_fields = iip->ili_format.ilf_fields; 2275 iip->ili_format.ilf_fields = 0; 2276 iip->ili_logged = 1; 2277 AIL_LOCK(mp,s); 2278 iip->ili_flush_lsn = iip->ili_item.li_lsn; 2279 AIL_UNLOCK(mp, s); 2280 2281 xfs_buf_attach_iodone(bp, 2282 (void(*)(xfs_buf_t*,xfs_log_item_t*)) 2283 xfs_istale_done, (xfs_log_item_t *)iip); 2284 if (ip != free_ip) { 2285 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2286 } 2287 } 2288 2289 if (found || pre_flushed) 2290 xfs_trans_stale_inode_buf(tp, bp); 2291 xfs_trans_binval(tp, bp); 2292 } 2293 2294 kmem_free(ip_found, ninodes * sizeof(xfs_inode_t *)); 2295 } 2296 2297 /* 2298 * This is called to return an inode to the inode free list. 2299 * The inode should already be truncated to 0 length and have 2300 * no pages associated with it. This routine also assumes that 2301 * the inode is already a part of the transaction. 2302 * 2303 * The on-disk copy of the inode will have been added to the list 2304 * of unlinked inodes in the AGI. We need to remove the inode from 2305 * that list atomically with respect to freeing it here. 2306 */ 2307 int 2308 xfs_ifree( 2309 xfs_trans_t *tp, 2310 xfs_inode_t *ip, 2311 xfs_bmap_free_t *flist) 2312 { 2313 int error; 2314 int delete; 2315 xfs_ino_t first_ino; 2316 2317 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); 2318 ASSERT(ip->i_transp == tp); 2319 ASSERT(ip->i_d.di_nlink == 0); 2320 ASSERT(ip->i_d.di_nextents == 0); 2321 ASSERT(ip->i_d.di_anextents == 0); 2322 ASSERT((ip->i_d.di_size == 0) || 2323 ((ip->i_d.di_mode & S_IFMT) != S_IFREG)); 2324 ASSERT(ip->i_d.di_nblocks == 0); 2325 2326 /* 2327 * Pull the on-disk inode from the AGI unlinked list. 2328 */ 2329 error = xfs_iunlink_remove(tp, ip); 2330 if (error != 0) { 2331 return error; 2332 } 2333 2334 error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino); 2335 if (error != 0) { 2336 return error; 2337 } 2338 ip->i_d.di_mode = 0; /* mark incore inode as free */ 2339 ip->i_d.di_flags = 0; 2340 ip->i_d.di_dmevmask = 0; 2341 ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */ 2342 ip->i_df.if_ext_max = 2343 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); 2344 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; 2345 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 2346 /* 2347 * Bump the generation count so no one will be confused 2348 * by reincarnations of this inode. 2349 */ 2350 ip->i_d.di_gen++; 2351 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 2352 2353 if (delete) { 2354 xfs_ifree_cluster(ip, tp, first_ino); 2355 } 2356 2357 return 0; 2358 } 2359 2360 /* 2361 * Reallocate the space for if_broot based on the number of records 2362 * being added or deleted as indicated in rec_diff. Move the records 2363 * and pointers in if_broot to fit the new size. When shrinking this 2364 * will eliminate holes between the records and pointers created by 2365 * the caller. When growing this will create holes to be filled in 2366 * by the caller. 2367 * 2368 * The caller must not request to add more records than would fit in 2369 * the on-disk inode root. If the if_broot is currently NULL, then 2370 * if we adding records one will be allocated. The caller must also 2371 * not request that the number of records go below zero, although 2372 * it can go to zero. 2373 * 2374 * ip -- the inode whose if_broot area is changing 2375 * ext_diff -- the change in the number of records, positive or negative, 2376 * requested for the if_broot array. 2377 */ 2378 void 2379 xfs_iroot_realloc( 2380 xfs_inode_t *ip, 2381 int rec_diff, 2382 int whichfork) 2383 { 2384 int cur_max; 2385 xfs_ifork_t *ifp; 2386 xfs_bmbt_block_t *new_broot; 2387 int new_max; 2388 size_t new_size; 2389 char *np; 2390 char *op; 2391 2392 /* 2393 * Handle the degenerate case quietly. 2394 */ 2395 if (rec_diff == 0) { 2396 return; 2397 } 2398 2399 ifp = XFS_IFORK_PTR(ip, whichfork); 2400 if (rec_diff > 0) { 2401 /* 2402 * If there wasn't any memory allocated before, just 2403 * allocate it now and get out. 2404 */ 2405 if (ifp->if_broot_bytes == 0) { 2406 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff); 2407 ifp->if_broot = (xfs_bmbt_block_t*)kmem_alloc(new_size, 2408 KM_SLEEP); 2409 ifp->if_broot_bytes = (int)new_size; 2410 return; 2411 } 2412 2413 /* 2414 * If there is already an existing if_broot, then we need 2415 * to realloc() it and shift the pointers to their new 2416 * location. The records don't change location because 2417 * they are kept butted up against the btree block header. 2418 */ 2419 cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes); 2420 new_max = cur_max + rec_diff; 2421 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max); 2422 ifp->if_broot = (xfs_bmbt_block_t *) 2423 kmem_realloc(ifp->if_broot, 2424 new_size, 2425 (size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */ 2426 KM_SLEEP); 2427 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1, 2428 ifp->if_broot_bytes); 2429 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1, 2430 (int)new_size); 2431 ifp->if_broot_bytes = (int)new_size; 2432 ASSERT(ifp->if_broot_bytes <= 2433 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ); 2434 memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t)); 2435 return; 2436 } 2437 2438 /* 2439 * rec_diff is less than 0. In this case, we are shrinking the 2440 * if_broot buffer. It must already exist. If we go to zero 2441 * records, just get rid of the root and clear the status bit. 2442 */ 2443 ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0)); 2444 cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes); 2445 new_max = cur_max + rec_diff; 2446 ASSERT(new_max >= 0); 2447 if (new_max > 0) 2448 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max); 2449 else 2450 new_size = 0; 2451 if (new_size > 0) { 2452 new_broot = (xfs_bmbt_block_t *)kmem_alloc(new_size, KM_SLEEP); 2453 /* 2454 * First copy over the btree block header. 2455 */ 2456 memcpy(new_broot, ifp->if_broot, sizeof(xfs_bmbt_block_t)); 2457 } else { 2458 new_broot = NULL; 2459 ifp->if_flags &= ~XFS_IFBROOT; 2460 } 2461 2462 /* 2463 * Only copy the records and pointers if there are any. 2464 */ 2465 if (new_max > 0) { 2466 /* 2467 * First copy the records. 2468 */ 2469 op = (char *)XFS_BMAP_BROOT_REC_ADDR(ifp->if_broot, 1, 2470 ifp->if_broot_bytes); 2471 np = (char *)XFS_BMAP_BROOT_REC_ADDR(new_broot, 1, 2472 (int)new_size); 2473 memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t)); 2474 2475 /* 2476 * Then copy the pointers. 2477 */ 2478 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1, 2479 ifp->if_broot_bytes); 2480 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(new_broot, 1, 2481 (int)new_size); 2482 memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t)); 2483 } 2484 kmem_free(ifp->if_broot, ifp->if_broot_bytes); 2485 ifp->if_broot = new_broot; 2486 ifp->if_broot_bytes = (int)new_size; 2487 ASSERT(ifp->if_broot_bytes <= 2488 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ); 2489 return; 2490 } 2491 2492 2493 /* 2494 * This is called when the amount of space needed for if_data 2495 * is increased or decreased. The change in size is indicated by 2496 * the number of bytes that need to be added or deleted in the 2497 * byte_diff parameter. 2498 * 2499 * If the amount of space needed has decreased below the size of the 2500 * inline buffer, then switch to using the inline buffer. Otherwise, 2501 * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer 2502 * to what is needed. 2503 * 2504 * ip -- the inode whose if_data area is changing 2505 * byte_diff -- the change in the number of bytes, positive or negative, 2506 * requested for the if_data array. 2507 */ 2508 void 2509 xfs_idata_realloc( 2510 xfs_inode_t *ip, 2511 int byte_diff, 2512 int whichfork) 2513 { 2514 xfs_ifork_t *ifp; 2515 int new_size; 2516 int real_size; 2517 2518 if (byte_diff == 0) { 2519 return; 2520 } 2521 2522 ifp = XFS_IFORK_PTR(ip, whichfork); 2523 new_size = (int)ifp->if_bytes + byte_diff; 2524 ASSERT(new_size >= 0); 2525 2526 if (new_size == 0) { 2527 if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { 2528 kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes); 2529 } 2530 ifp->if_u1.if_data = NULL; 2531 real_size = 0; 2532 } else if (new_size <= sizeof(ifp->if_u2.if_inline_data)) { 2533 /* 2534 * If the valid extents/data can fit in if_inline_ext/data, 2535 * copy them from the malloc'd vector and free it. 2536 */ 2537 if (ifp->if_u1.if_data == NULL) { 2538 ifp->if_u1.if_data = ifp->if_u2.if_inline_data; 2539 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { 2540 ASSERT(ifp->if_real_bytes != 0); 2541 memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data, 2542 new_size); 2543 kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes); 2544 ifp->if_u1.if_data = ifp->if_u2.if_inline_data; 2545 } 2546 real_size = 0; 2547 } else { 2548 /* 2549 * Stuck with malloc/realloc. 2550 * For inline data, the underlying buffer must be 2551 * a multiple of 4 bytes in size so that it can be 2552 * logged and stay on word boundaries. We enforce 2553 * that here. 2554 */ 2555 real_size = roundup(new_size, 4); 2556 if (ifp->if_u1.if_data == NULL) { 2557 ASSERT(ifp->if_real_bytes == 0); 2558 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP); 2559 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { 2560 /* 2561 * Only do the realloc if the underlying size 2562 * is really changing. 2563 */ 2564 if (ifp->if_real_bytes != real_size) { 2565 ifp->if_u1.if_data = 2566 kmem_realloc(ifp->if_u1.if_data, 2567 real_size, 2568 ifp->if_real_bytes, 2569 KM_SLEEP); 2570 } 2571 } else { 2572 ASSERT(ifp->if_real_bytes == 0); 2573 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP); 2574 memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data, 2575 ifp->if_bytes); 2576 } 2577 } 2578 ifp->if_real_bytes = real_size; 2579 ifp->if_bytes = new_size; 2580 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork)); 2581 } 2582 2583 2584 2585 2586 /* 2587 * Map inode to disk block and offset. 2588 * 2589 * mp -- the mount point structure for the current file system 2590 * tp -- the current transaction 2591 * ino -- the inode number of the inode to be located 2592 * imap -- this structure is filled in with the information necessary 2593 * to retrieve the given inode from disk 2594 * flags -- flags to pass to xfs_dilocate indicating whether or not 2595 * lookups in the inode btree were OK or not 2596 */ 2597 int 2598 xfs_imap( 2599 xfs_mount_t *mp, 2600 xfs_trans_t *tp, 2601 xfs_ino_t ino, 2602 xfs_imap_t *imap, 2603 uint flags) 2604 { 2605 xfs_fsblock_t fsbno; 2606 int len; 2607 int off; 2608 int error; 2609 2610 fsbno = imap->im_blkno ? 2611 XFS_DADDR_TO_FSB(mp, imap->im_blkno) : NULLFSBLOCK; 2612 error = xfs_dilocate(mp, tp, ino, &fsbno, &len, &off, flags); 2613 if (error != 0) { 2614 return error; 2615 } 2616 imap->im_blkno = XFS_FSB_TO_DADDR(mp, fsbno); 2617 imap->im_len = XFS_FSB_TO_BB(mp, len); 2618 imap->im_agblkno = XFS_FSB_TO_AGBNO(mp, fsbno); 2619 imap->im_ioffset = (ushort)off; 2620 imap->im_boffset = (ushort)(off << mp->m_sb.sb_inodelog); 2621 return 0; 2622 } 2623 2624 void 2625 xfs_idestroy_fork( 2626 xfs_inode_t *ip, 2627 int whichfork) 2628 { 2629 xfs_ifork_t *ifp; 2630 2631 ifp = XFS_IFORK_PTR(ip, whichfork); 2632 if (ifp->if_broot != NULL) { 2633 kmem_free(ifp->if_broot, ifp->if_broot_bytes); 2634 ifp->if_broot = NULL; 2635 } 2636 2637 /* 2638 * If the format is local, then we can't have an extents 2639 * array so just look for an inline data array. If we're 2640 * not local then we may or may not have an extents list, 2641 * so check and free it up if we do. 2642 */ 2643 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { 2644 if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) && 2645 (ifp->if_u1.if_data != NULL)) { 2646 ASSERT(ifp->if_real_bytes != 0); 2647 kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes); 2648 ifp->if_u1.if_data = NULL; 2649 ifp->if_real_bytes = 0; 2650 } 2651 } else if ((ifp->if_flags & XFS_IFEXTENTS) && 2652 ((ifp->if_flags & XFS_IFEXTIREC) || 2653 ((ifp->if_u1.if_extents != NULL) && 2654 (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) { 2655 ASSERT(ifp->if_real_bytes != 0); 2656 xfs_iext_destroy(ifp); 2657 } 2658 ASSERT(ifp->if_u1.if_extents == NULL || 2659 ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext); 2660 ASSERT(ifp->if_real_bytes == 0); 2661 if (whichfork == XFS_ATTR_FORK) { 2662 kmem_zone_free(xfs_ifork_zone, ip->i_afp); 2663 ip->i_afp = NULL; 2664 } 2665 } 2666 2667 /* 2668 * This is called free all the memory associated with an inode. 2669 * It must free the inode itself and any buffers allocated for 2670 * if_extents/if_data and if_broot. It must also free the lock 2671 * associated with the inode. 2672 */ 2673 void 2674 xfs_idestroy( 2675 xfs_inode_t *ip) 2676 { 2677 2678 switch (ip->i_d.di_mode & S_IFMT) { 2679 case S_IFREG: 2680 case S_IFDIR: 2681 case S_IFLNK: 2682 xfs_idestroy_fork(ip, XFS_DATA_FORK); 2683 break; 2684 } 2685 if (ip->i_afp) 2686 xfs_idestroy_fork(ip, XFS_ATTR_FORK); 2687 mrfree(&ip->i_lock); 2688 mrfree(&ip->i_iolock); 2689 freesema(&ip->i_flock); 2690 #ifdef XFS_BMAP_TRACE 2691 ktrace_free(ip->i_xtrace); 2692 #endif 2693 #ifdef XFS_BMBT_TRACE 2694 ktrace_free(ip->i_btrace); 2695 #endif 2696 #ifdef XFS_RW_TRACE 2697 ktrace_free(ip->i_rwtrace); 2698 #endif 2699 #ifdef XFS_ILOCK_TRACE 2700 ktrace_free(ip->i_lock_trace); 2701 #endif 2702 #ifdef XFS_DIR2_TRACE 2703 ktrace_free(ip->i_dir_trace); 2704 #endif 2705 if (ip->i_itemp) { 2706 /* XXXdpd should be able to assert this but shutdown 2707 * is leaving the AIL behind. */ 2708 ASSERT(((ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL) == 0) || 2709 XFS_FORCED_SHUTDOWN(ip->i_mount)); 2710 xfs_inode_item_destroy(ip); 2711 } 2712 kmem_zone_free(xfs_inode_zone, ip); 2713 } 2714 2715 2716 /* 2717 * Increment the pin count of the given buffer. 2718 * This value is protected by ipinlock spinlock in the mount structure. 2719 */ 2720 void 2721 xfs_ipin( 2722 xfs_inode_t *ip) 2723 { 2724 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); 2725 2726 atomic_inc(&ip->i_pincount); 2727 } 2728 2729 /* 2730 * Decrement the pin count of the given inode, and wake up 2731 * anyone in xfs_iwait_unpin() if the count goes to 0. The 2732 * inode must have been previously pinned with a call to xfs_ipin(). 2733 */ 2734 void 2735 xfs_iunpin( 2736 xfs_inode_t *ip) 2737 { 2738 ASSERT(atomic_read(&ip->i_pincount) > 0); 2739 2740 if (atomic_dec_and_test(&ip->i_pincount)) { 2741 /* 2742 * If the inode is currently being reclaimed, the 2743 * linux inode _and_ the xfs vnode may have been 2744 * freed so we cannot reference either of them safely. 2745 * Hence we should not try to do anything to them 2746 * if the xfs inode is currently in the reclaim 2747 * path. 2748 * 2749 * However, we still need to issue the unpin wakeup 2750 * call as the inode reclaim may be blocked waiting for 2751 * the inode to become unpinned. 2752 */ 2753 if (!(ip->i_flags & (XFS_IRECLAIM|XFS_IRECLAIMABLE))) { 2754 bhv_vnode_t *vp = XFS_ITOV_NULL(ip); 2755 2756 /* make sync come back and flush this inode */ 2757 if (vp) { 2758 struct inode *inode = vn_to_inode(vp); 2759 2760 if (!(inode->i_state & 2761 (I_NEW|I_FREEING|I_CLEAR))) 2762 mark_inode_dirty_sync(inode); 2763 } 2764 } 2765 wake_up(&ip->i_ipin_wait); 2766 } 2767 } 2768 2769 /* 2770 * This is called to wait for the given inode to be unpinned. 2771 * It will sleep until this happens. The caller must have the 2772 * inode locked in at least shared mode so that the buffer cannot 2773 * be subsequently pinned once someone is waiting for it to be 2774 * unpinned. 2775 */ 2776 STATIC void 2777 xfs_iunpin_wait( 2778 xfs_inode_t *ip) 2779 { 2780 xfs_inode_log_item_t *iip; 2781 xfs_lsn_t lsn; 2782 2783 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE | MR_ACCESS)); 2784 2785 if (atomic_read(&ip->i_pincount) == 0) { 2786 return; 2787 } 2788 2789 iip = ip->i_itemp; 2790 if (iip && iip->ili_last_lsn) { 2791 lsn = iip->ili_last_lsn; 2792 } else { 2793 lsn = (xfs_lsn_t)0; 2794 } 2795 2796 /* 2797 * Give the log a push so we don't wait here too long. 2798 */ 2799 xfs_log_force(ip->i_mount, lsn, XFS_LOG_FORCE); 2800 2801 wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0)); 2802 } 2803 2804 2805 /* 2806 * xfs_iextents_copy() 2807 * 2808 * This is called to copy the REAL extents (as opposed to the delayed 2809 * allocation extents) from the inode into the given buffer. It 2810 * returns the number of bytes copied into the buffer. 2811 * 2812 * If there are no delayed allocation extents, then we can just 2813 * memcpy() the extents into the buffer. Otherwise, we need to 2814 * examine each extent in turn and skip those which are delayed. 2815 */ 2816 int 2817 xfs_iextents_copy( 2818 xfs_inode_t *ip, 2819 xfs_bmbt_rec_t *buffer, 2820 int whichfork) 2821 { 2822 int copied; 2823 xfs_bmbt_rec_t *dest_ep; 2824 xfs_bmbt_rec_t *ep; 2825 #ifdef XFS_BMAP_TRACE 2826 static char fname[] = "xfs_iextents_copy"; 2827 #endif 2828 int i; 2829 xfs_ifork_t *ifp; 2830 int nrecs; 2831 xfs_fsblock_t start_block; 2832 2833 ifp = XFS_IFORK_PTR(ip, whichfork); 2834 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS)); 2835 ASSERT(ifp->if_bytes > 0); 2836 2837 nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 2838 xfs_bmap_trace_exlist(fname, ip, nrecs, whichfork); 2839 ASSERT(nrecs > 0); 2840 2841 /* 2842 * There are some delayed allocation extents in the 2843 * inode, so copy the extents one at a time and skip 2844 * the delayed ones. There must be at least one 2845 * non-delayed extent. 2846 */ 2847 dest_ep = buffer; 2848 copied = 0; 2849 for (i = 0; i < nrecs; i++) { 2850 ep = xfs_iext_get_ext(ifp, i); 2851 start_block = xfs_bmbt_get_startblock(ep); 2852 if (ISNULLSTARTBLOCK(start_block)) { 2853 /* 2854 * It's a delayed allocation extent, so skip it. 2855 */ 2856 continue; 2857 } 2858 2859 /* Translate to on disk format */ 2860 put_unaligned(INT_GET(ep->l0, ARCH_CONVERT), 2861 (__uint64_t*)&dest_ep->l0); 2862 put_unaligned(INT_GET(ep->l1, ARCH_CONVERT), 2863 (__uint64_t*)&dest_ep->l1); 2864 dest_ep++; 2865 copied++; 2866 } 2867 ASSERT(copied != 0); 2868 xfs_validate_extents(ifp, copied, 1, XFS_EXTFMT_INODE(ip)); 2869 2870 return (copied * (uint)sizeof(xfs_bmbt_rec_t)); 2871 } 2872 2873 /* 2874 * Each of the following cases stores data into the same region 2875 * of the on-disk inode, so only one of them can be valid at 2876 * any given time. While it is possible to have conflicting formats 2877 * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is 2878 * in EXTENTS format, this can only happen when the fork has 2879 * changed formats after being modified but before being flushed. 2880 * In these cases, the format always takes precedence, because the 2881 * format indicates the current state of the fork. 2882 */ 2883 /*ARGSUSED*/ 2884 STATIC int 2885 xfs_iflush_fork( 2886 xfs_inode_t *ip, 2887 xfs_dinode_t *dip, 2888 xfs_inode_log_item_t *iip, 2889 int whichfork, 2890 xfs_buf_t *bp) 2891 { 2892 char *cp; 2893 xfs_ifork_t *ifp; 2894 xfs_mount_t *mp; 2895 #ifdef XFS_TRANS_DEBUG 2896 int first; 2897 #endif 2898 static const short brootflag[2] = 2899 { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT }; 2900 static const short dataflag[2] = 2901 { XFS_ILOG_DDATA, XFS_ILOG_ADATA }; 2902 static const short extflag[2] = 2903 { XFS_ILOG_DEXT, XFS_ILOG_AEXT }; 2904 2905 if (iip == NULL) 2906 return 0; 2907 ifp = XFS_IFORK_PTR(ip, whichfork); 2908 /* 2909 * This can happen if we gave up in iformat in an error path, 2910 * for the attribute fork. 2911 */ 2912 if (ifp == NULL) { 2913 ASSERT(whichfork == XFS_ATTR_FORK); 2914 return 0; 2915 } 2916 cp = XFS_DFORK_PTR(dip, whichfork); 2917 mp = ip->i_mount; 2918 switch (XFS_IFORK_FORMAT(ip, whichfork)) { 2919 case XFS_DINODE_FMT_LOCAL: 2920 if ((iip->ili_format.ilf_fields & dataflag[whichfork]) && 2921 (ifp->if_bytes > 0)) { 2922 ASSERT(ifp->if_u1.if_data != NULL); 2923 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork)); 2924 memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes); 2925 } 2926 break; 2927 2928 case XFS_DINODE_FMT_EXTENTS: 2929 ASSERT((ifp->if_flags & XFS_IFEXTENTS) || 2930 !(iip->ili_format.ilf_fields & extflag[whichfork])); 2931 ASSERT((xfs_iext_get_ext(ifp, 0) != NULL) || 2932 (ifp->if_bytes == 0)); 2933 ASSERT((xfs_iext_get_ext(ifp, 0) == NULL) || 2934 (ifp->if_bytes > 0)); 2935 if ((iip->ili_format.ilf_fields & extflag[whichfork]) && 2936 (ifp->if_bytes > 0)) { 2937 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0); 2938 (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp, 2939 whichfork); 2940 } 2941 break; 2942 2943 case XFS_DINODE_FMT_BTREE: 2944 if ((iip->ili_format.ilf_fields & brootflag[whichfork]) && 2945 (ifp->if_broot_bytes > 0)) { 2946 ASSERT(ifp->if_broot != NULL); 2947 ASSERT(ifp->if_broot_bytes <= 2948 (XFS_IFORK_SIZE(ip, whichfork) + 2949 XFS_BROOT_SIZE_ADJ)); 2950 xfs_bmbt_to_bmdr(ifp->if_broot, ifp->if_broot_bytes, 2951 (xfs_bmdr_block_t *)cp, 2952 XFS_DFORK_SIZE(dip, mp, whichfork)); 2953 } 2954 break; 2955 2956 case XFS_DINODE_FMT_DEV: 2957 if (iip->ili_format.ilf_fields & XFS_ILOG_DEV) { 2958 ASSERT(whichfork == XFS_DATA_FORK); 2959 INT_SET(dip->di_u.di_dev, ARCH_CONVERT, ip->i_df.if_u2.if_rdev); 2960 } 2961 break; 2962 2963 case XFS_DINODE_FMT_UUID: 2964 if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) { 2965 ASSERT(whichfork == XFS_DATA_FORK); 2966 memcpy(&dip->di_u.di_muuid, &ip->i_df.if_u2.if_uuid, 2967 sizeof(uuid_t)); 2968 } 2969 break; 2970 2971 default: 2972 ASSERT(0); 2973 break; 2974 } 2975 2976 return 0; 2977 } 2978 2979 /* 2980 * xfs_iflush() will write a modified inode's changes out to the 2981 * inode's on disk home. The caller must have the inode lock held 2982 * in at least shared mode and the inode flush semaphore must be 2983 * held as well. The inode lock will still be held upon return from 2984 * the call and the caller is free to unlock it. 2985 * The inode flush lock will be unlocked when the inode reaches the disk. 2986 * The flags indicate how the inode's buffer should be written out. 2987 */ 2988 int 2989 xfs_iflush( 2990 xfs_inode_t *ip, 2991 uint flags) 2992 { 2993 xfs_inode_log_item_t *iip; 2994 xfs_buf_t *bp; 2995 xfs_dinode_t *dip; 2996 xfs_mount_t *mp; 2997 int error; 2998 /* REFERENCED */ 2999 xfs_chash_t *ch; 3000 xfs_inode_t *iq; 3001 int clcount; /* count of inodes clustered */ 3002 int bufwasdelwri; 3003 enum { INT_DELWRI = (1 << 0), INT_ASYNC = (1 << 1) }; 3004 SPLDECL(s); 3005 3006 XFS_STATS_INC(xs_iflush_count); 3007 3008 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS)); 3009 ASSERT(issemalocked(&(ip->i_flock))); 3010 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || 3011 ip->i_d.di_nextents > ip->i_df.if_ext_max); 3012 3013 iip = ip->i_itemp; 3014 mp = ip->i_mount; 3015 3016 /* 3017 * If the inode isn't dirty, then just release the inode 3018 * flush lock and do nothing. 3019 */ 3020 if ((ip->i_update_core == 0) && 3021 ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL))) { 3022 ASSERT((iip != NULL) ? 3023 !(iip->ili_item.li_flags & XFS_LI_IN_AIL) : 1); 3024 xfs_ifunlock(ip); 3025 return 0; 3026 } 3027 3028 /* 3029 * We can't flush the inode until it is unpinned, so 3030 * wait for it. We know noone new can pin it, because 3031 * we are holding the inode lock shared and you need 3032 * to hold it exclusively to pin the inode. 3033 */ 3034 xfs_iunpin_wait(ip); 3035 3036 /* 3037 * This may have been unpinned because the filesystem is shutting 3038 * down forcibly. If that's the case we must not write this inode 3039 * to disk, because the log record didn't make it to disk! 3040 */ 3041 if (XFS_FORCED_SHUTDOWN(mp)) { 3042 ip->i_update_core = 0; 3043 if (iip) 3044 iip->ili_format.ilf_fields = 0; 3045 xfs_ifunlock(ip); 3046 return XFS_ERROR(EIO); 3047 } 3048 3049 /* 3050 * Get the buffer containing the on-disk inode. 3051 */ 3052 error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0, 0); 3053 if (error) { 3054 xfs_ifunlock(ip); 3055 return error; 3056 } 3057 3058 /* 3059 * Decide how buffer will be flushed out. This is done before 3060 * the call to xfs_iflush_int because this field is zeroed by it. 3061 */ 3062 if (iip != NULL && iip->ili_format.ilf_fields != 0) { 3063 /* 3064 * Flush out the inode buffer according to the directions 3065 * of the caller. In the cases where the caller has given 3066 * us a choice choose the non-delwri case. This is because 3067 * the inode is in the AIL and we need to get it out soon. 3068 */ 3069 switch (flags) { 3070 case XFS_IFLUSH_SYNC: 3071 case XFS_IFLUSH_DELWRI_ELSE_SYNC: 3072 flags = 0; 3073 break; 3074 case XFS_IFLUSH_ASYNC: 3075 case XFS_IFLUSH_DELWRI_ELSE_ASYNC: 3076 flags = INT_ASYNC; 3077 break; 3078 case XFS_IFLUSH_DELWRI: 3079 flags = INT_DELWRI; 3080 break; 3081 default: 3082 ASSERT(0); 3083 flags = 0; 3084 break; 3085 } 3086 } else { 3087 switch (flags) { 3088 case XFS_IFLUSH_DELWRI_ELSE_SYNC: 3089 case XFS_IFLUSH_DELWRI_ELSE_ASYNC: 3090 case XFS_IFLUSH_DELWRI: 3091 flags = INT_DELWRI; 3092 break; 3093 case XFS_IFLUSH_ASYNC: 3094 flags = INT_ASYNC; 3095 break; 3096 case XFS_IFLUSH_SYNC: 3097 flags = 0; 3098 break; 3099 default: 3100 ASSERT(0); 3101 flags = 0; 3102 break; 3103 } 3104 } 3105 3106 /* 3107 * First flush out the inode that xfs_iflush was called with. 3108 */ 3109 error = xfs_iflush_int(ip, bp); 3110 if (error) { 3111 goto corrupt_out; 3112 } 3113 3114 /* 3115 * inode clustering: 3116 * see if other inodes can be gathered into this write 3117 */ 3118 3119 ip->i_chash->chl_buf = bp; 3120 3121 ch = XFS_CHASH(mp, ip->i_blkno); 3122 s = mutex_spinlock(&ch->ch_lock); 3123 3124 clcount = 0; 3125 for (iq = ip->i_cnext; iq != ip; iq = iq->i_cnext) { 3126 /* 3127 * Do an un-protected check to see if the inode is dirty and 3128 * is a candidate for flushing. These checks will be repeated 3129 * later after the appropriate locks are acquired. 3130 */ 3131 iip = iq->i_itemp; 3132 if ((iq->i_update_core == 0) && 3133 ((iip == NULL) || 3134 !(iip->ili_format.ilf_fields & XFS_ILOG_ALL)) && 3135 xfs_ipincount(iq) == 0) { 3136 continue; 3137 } 3138 3139 /* 3140 * Try to get locks. If any are unavailable, 3141 * then this inode cannot be flushed and is skipped. 3142 */ 3143 3144 /* get inode locks (just i_lock) */ 3145 if (xfs_ilock_nowait(iq, XFS_ILOCK_SHARED)) { 3146 /* get inode flush lock */ 3147 if (xfs_iflock_nowait(iq)) { 3148 /* check if pinned */ 3149 if (xfs_ipincount(iq) == 0) { 3150 /* arriving here means that 3151 * this inode can be flushed. 3152 * first re-check that it's 3153 * dirty 3154 */ 3155 iip = iq->i_itemp; 3156 if ((iq->i_update_core != 0)|| 3157 ((iip != NULL) && 3158 (iip->ili_format.ilf_fields & XFS_ILOG_ALL))) { 3159 clcount++; 3160 error = xfs_iflush_int(iq, bp); 3161 if (error) { 3162 xfs_iunlock(iq, 3163 XFS_ILOCK_SHARED); 3164 goto cluster_corrupt_out; 3165 } 3166 } else { 3167 xfs_ifunlock(iq); 3168 } 3169 } else { 3170 xfs_ifunlock(iq); 3171 } 3172 } 3173 xfs_iunlock(iq, XFS_ILOCK_SHARED); 3174 } 3175 } 3176 mutex_spinunlock(&ch->ch_lock, s); 3177 3178 if (clcount) { 3179 XFS_STATS_INC(xs_icluster_flushcnt); 3180 XFS_STATS_ADD(xs_icluster_flushinode, clcount); 3181 } 3182 3183 /* 3184 * If the buffer is pinned then push on the log so we won't 3185 * get stuck waiting in the write for too long. 3186 */ 3187 if (XFS_BUF_ISPINNED(bp)){ 3188 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); 3189 } 3190 3191 if (flags & INT_DELWRI) { 3192 xfs_bdwrite(mp, bp); 3193 } else if (flags & INT_ASYNC) { 3194 xfs_bawrite(mp, bp); 3195 } else { 3196 error = xfs_bwrite(mp, bp); 3197 } 3198 return error; 3199 3200 corrupt_out: 3201 xfs_buf_relse(bp); 3202 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 3203 xfs_iflush_abort(ip); 3204 /* 3205 * Unlocks the flush lock 3206 */ 3207 return XFS_ERROR(EFSCORRUPTED); 3208 3209 cluster_corrupt_out: 3210 /* Corruption detected in the clustering loop. Invalidate the 3211 * inode buffer and shut down the filesystem. 3212 */ 3213 mutex_spinunlock(&ch->ch_lock, s); 3214 3215 /* 3216 * Clean up the buffer. If it was B_DELWRI, just release it -- 3217 * brelse can handle it with no problems. If not, shut down the 3218 * filesystem before releasing the buffer. 3219 */ 3220 if ((bufwasdelwri= XFS_BUF_ISDELAYWRITE(bp))) { 3221 xfs_buf_relse(bp); 3222 } 3223 3224 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 3225 3226 if(!bufwasdelwri) { 3227 /* 3228 * Just like incore_relse: if we have b_iodone functions, 3229 * mark the buffer as an error and call them. Otherwise 3230 * mark it as stale and brelse. 3231 */ 3232 if (XFS_BUF_IODONE_FUNC(bp)) { 3233 XFS_BUF_CLR_BDSTRAT_FUNC(bp); 3234 XFS_BUF_UNDONE(bp); 3235 XFS_BUF_STALE(bp); 3236 XFS_BUF_SHUT(bp); 3237 XFS_BUF_ERROR(bp,EIO); 3238 xfs_biodone(bp); 3239 } else { 3240 XFS_BUF_STALE(bp); 3241 xfs_buf_relse(bp); 3242 } 3243 } 3244 3245 xfs_iflush_abort(iq); 3246 /* 3247 * Unlocks the flush lock 3248 */ 3249 return XFS_ERROR(EFSCORRUPTED); 3250 } 3251 3252 3253 STATIC int 3254 xfs_iflush_int( 3255 xfs_inode_t *ip, 3256 xfs_buf_t *bp) 3257 { 3258 xfs_inode_log_item_t *iip; 3259 xfs_dinode_t *dip; 3260 xfs_mount_t *mp; 3261 #ifdef XFS_TRANS_DEBUG 3262 int first; 3263 #endif 3264 SPLDECL(s); 3265 3266 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS)); 3267 ASSERT(issemalocked(&(ip->i_flock))); 3268 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || 3269 ip->i_d.di_nextents > ip->i_df.if_ext_max); 3270 3271 iip = ip->i_itemp; 3272 mp = ip->i_mount; 3273 3274 3275 /* 3276 * If the inode isn't dirty, then just release the inode 3277 * flush lock and do nothing. 3278 */ 3279 if ((ip->i_update_core == 0) && 3280 ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL))) { 3281 xfs_ifunlock(ip); 3282 return 0; 3283 } 3284 3285 /* set *dip = inode's place in the buffer */ 3286 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_boffset); 3287 3288 /* 3289 * Clear i_update_core before copying out the data. 3290 * This is for coordination with our timestamp updates 3291 * that don't hold the inode lock. They will always 3292 * update the timestamps BEFORE setting i_update_core, 3293 * so if we clear i_update_core after they set it we 3294 * are guaranteed to see their updates to the timestamps. 3295 * I believe that this depends on strongly ordered memory 3296 * semantics, but we have that. We use the SYNCHRONIZE 3297 * macro to make sure that the compiler does not reorder 3298 * the i_update_core access below the data copy below. 3299 */ 3300 ip->i_update_core = 0; 3301 SYNCHRONIZE(); 3302 3303 /* 3304 * Make sure to get the latest atime from the Linux inode. 3305 */ 3306 xfs_synchronize_atime(ip); 3307 3308 if (XFS_TEST_ERROR(INT_GET(dip->di_core.di_magic,ARCH_CONVERT) != XFS_DINODE_MAGIC, 3309 mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) { 3310 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 3311 "xfs_iflush: Bad inode %Lu magic number 0x%x, ptr 0x%p", 3312 ip->i_ino, (int) INT_GET(dip->di_core.di_magic, ARCH_CONVERT), dip); 3313 goto corrupt_out; 3314 } 3315 if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC, 3316 mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) { 3317 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 3318 "xfs_iflush: Bad inode %Lu, ptr 0x%p, magic number 0x%x", 3319 ip->i_ino, ip, ip->i_d.di_magic); 3320 goto corrupt_out; 3321 } 3322 if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) { 3323 if (XFS_TEST_ERROR( 3324 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && 3325 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE), 3326 mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) { 3327 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 3328 "xfs_iflush: Bad regular inode %Lu, ptr 0x%p", 3329 ip->i_ino, ip); 3330 goto corrupt_out; 3331 } 3332 } else if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) { 3333 if (XFS_TEST_ERROR( 3334 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && 3335 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) && 3336 (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL), 3337 mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) { 3338 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 3339 "xfs_iflush: Bad directory inode %Lu, ptr 0x%p", 3340 ip->i_ino, ip); 3341 goto corrupt_out; 3342 } 3343 } 3344 if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents > 3345 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5, 3346 XFS_RANDOM_IFLUSH_5)) { 3347 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 3348 "xfs_iflush: detected corrupt incore inode %Lu, total extents = %d, nblocks = %Ld, ptr 0x%p", 3349 ip->i_ino, 3350 ip->i_d.di_nextents + ip->i_d.di_anextents, 3351 ip->i_d.di_nblocks, 3352 ip); 3353 goto corrupt_out; 3354 } 3355 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize, 3356 mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) { 3357 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 3358 "xfs_iflush: bad inode %Lu, forkoff 0x%x, ptr 0x%p", 3359 ip->i_ino, ip->i_d.di_forkoff, ip); 3360 goto corrupt_out; 3361 } 3362 /* 3363 * bump the flush iteration count, used to detect flushes which 3364 * postdate a log record during recovery. 3365 */ 3366 3367 ip->i_d.di_flushiter++; 3368 3369 /* 3370 * Copy the dirty parts of the inode into the on-disk 3371 * inode. We always copy out the core of the inode, 3372 * because if the inode is dirty at all the core must 3373 * be. 3374 */ 3375 xfs_xlate_dinode_core((xfs_caddr_t)&(dip->di_core), &(ip->i_d), -1); 3376 3377 /* Wrap, we never let the log put out DI_MAX_FLUSH */ 3378 if (ip->i_d.di_flushiter == DI_MAX_FLUSH) 3379 ip->i_d.di_flushiter = 0; 3380 3381 /* 3382 * If this is really an old format inode and the superblock version 3383 * has not been updated to support only new format inodes, then 3384 * convert back to the old inode format. If the superblock version 3385 * has been updated, then make the conversion permanent. 3386 */ 3387 ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1 || 3388 XFS_SB_VERSION_HASNLINK(&mp->m_sb)); 3389 if (ip->i_d.di_version == XFS_DINODE_VERSION_1) { 3390 if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) { 3391 /* 3392 * Convert it back. 3393 */ 3394 ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1); 3395 INT_SET(dip->di_core.di_onlink, ARCH_CONVERT, ip->i_d.di_nlink); 3396 } else { 3397 /* 3398 * The superblock version has already been bumped, 3399 * so just make the conversion to the new inode 3400 * format permanent. 3401 */ 3402 ip->i_d.di_version = XFS_DINODE_VERSION_2; 3403 INT_SET(dip->di_core.di_version, ARCH_CONVERT, XFS_DINODE_VERSION_2); 3404 ip->i_d.di_onlink = 0; 3405 dip->di_core.di_onlink = 0; 3406 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); 3407 memset(&(dip->di_core.di_pad[0]), 0, 3408 sizeof(dip->di_core.di_pad)); 3409 ASSERT(ip->i_d.di_projid == 0); 3410 } 3411 } 3412 3413 if (xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp) == EFSCORRUPTED) { 3414 goto corrupt_out; 3415 } 3416 3417 if (XFS_IFORK_Q(ip)) { 3418 /* 3419 * The only error from xfs_iflush_fork is on the data fork. 3420 */ 3421 (void) xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp); 3422 } 3423 xfs_inobp_check(mp, bp); 3424 3425 /* 3426 * We've recorded everything logged in the inode, so we'd 3427 * like to clear the ilf_fields bits so we don't log and 3428 * flush things unnecessarily. However, we can't stop 3429 * logging all this information until the data we've copied 3430 * into the disk buffer is written to disk. If we did we might 3431 * overwrite the copy of the inode in the log with all the 3432 * data after re-logging only part of it, and in the face of 3433 * a crash we wouldn't have all the data we need to recover. 3434 * 3435 * What we do is move the bits to the ili_last_fields field. 3436 * When logging the inode, these bits are moved back to the 3437 * ilf_fields field. In the xfs_iflush_done() routine we 3438 * clear ili_last_fields, since we know that the information 3439 * those bits represent is permanently on disk. As long as 3440 * the flush completes before the inode is logged again, then 3441 * both ilf_fields and ili_last_fields will be cleared. 3442 * 3443 * We can play with the ilf_fields bits here, because the inode 3444 * lock must be held exclusively in order to set bits there 3445 * and the flush lock protects the ili_last_fields bits. 3446 * Set ili_logged so the flush done 3447 * routine can tell whether or not to look in the AIL. 3448 * Also, store the current LSN of the inode so that we can tell 3449 * whether the item has moved in the AIL from xfs_iflush_done(). 3450 * In order to read the lsn we need the AIL lock, because 3451 * it is a 64 bit value that cannot be read atomically. 3452 */ 3453 if (iip != NULL && iip->ili_format.ilf_fields != 0) { 3454 iip->ili_last_fields = iip->ili_format.ilf_fields; 3455 iip->ili_format.ilf_fields = 0; 3456 iip->ili_logged = 1; 3457 3458 ASSERT(sizeof(xfs_lsn_t) == 8); /* don't lock if it shrinks */ 3459 AIL_LOCK(mp,s); 3460 iip->ili_flush_lsn = iip->ili_item.li_lsn; 3461 AIL_UNLOCK(mp, s); 3462 3463 /* 3464 * Attach the function xfs_iflush_done to the inode's 3465 * buffer. This will remove the inode from the AIL 3466 * and unlock the inode's flush lock when the inode is 3467 * completely written to disk. 3468 */ 3469 xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t*,xfs_log_item_t*)) 3470 xfs_iflush_done, (xfs_log_item_t *)iip); 3471 3472 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); 3473 ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL); 3474 } else { 3475 /* 3476 * We're flushing an inode which is not in the AIL and has 3477 * not been logged but has i_update_core set. For this 3478 * case we can use a B_DELWRI flush and immediately drop 3479 * the inode flush lock because we can avoid the whole 3480 * AIL state thing. It's OK to drop the flush lock now, 3481 * because we've already locked the buffer and to do anything 3482 * you really need both. 3483 */ 3484 if (iip != NULL) { 3485 ASSERT(iip->ili_logged == 0); 3486 ASSERT(iip->ili_last_fields == 0); 3487 ASSERT((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0); 3488 } 3489 xfs_ifunlock(ip); 3490 } 3491 3492 return 0; 3493 3494 corrupt_out: 3495 return XFS_ERROR(EFSCORRUPTED); 3496 } 3497 3498 3499 /* 3500 * Flush all inactive inodes in mp. 3501 */ 3502 void 3503 xfs_iflush_all( 3504 xfs_mount_t *mp) 3505 { 3506 xfs_inode_t *ip; 3507 bhv_vnode_t *vp; 3508 3509 again: 3510 XFS_MOUNT_ILOCK(mp); 3511 ip = mp->m_inodes; 3512 if (ip == NULL) 3513 goto out; 3514 3515 do { 3516 /* Make sure we skip markers inserted by sync */ 3517 if (ip->i_mount == NULL) { 3518 ip = ip->i_mnext; 3519 continue; 3520 } 3521 3522 vp = XFS_ITOV_NULL(ip); 3523 if (!vp) { 3524 XFS_MOUNT_IUNLOCK(mp); 3525 xfs_finish_reclaim(ip, 0, XFS_IFLUSH_ASYNC); 3526 goto again; 3527 } 3528 3529 ASSERT(vn_count(vp) == 0); 3530 3531 ip = ip->i_mnext; 3532 } while (ip != mp->m_inodes); 3533 out: 3534 XFS_MOUNT_IUNLOCK(mp); 3535 } 3536 3537 /* 3538 * xfs_iaccess: check accessibility of inode for mode. 3539 */ 3540 int 3541 xfs_iaccess( 3542 xfs_inode_t *ip, 3543 mode_t mode, 3544 cred_t *cr) 3545 { 3546 int error; 3547 mode_t orgmode = mode; 3548 struct inode *inode = vn_to_inode(XFS_ITOV(ip)); 3549 3550 if (mode & S_IWUSR) { 3551 umode_t imode = inode->i_mode; 3552 3553 if (IS_RDONLY(inode) && 3554 (S_ISREG(imode) || S_ISDIR(imode) || S_ISLNK(imode))) 3555 return XFS_ERROR(EROFS); 3556 3557 if (IS_IMMUTABLE(inode)) 3558 return XFS_ERROR(EACCES); 3559 } 3560 3561 /* 3562 * If there's an Access Control List it's used instead of 3563 * the mode bits. 3564 */ 3565 if ((error = _ACL_XFS_IACCESS(ip, mode, cr)) != -1) 3566 return error ? XFS_ERROR(error) : 0; 3567 3568 if (current_fsuid(cr) != ip->i_d.di_uid) { 3569 mode >>= 3; 3570 if (!in_group_p((gid_t)ip->i_d.di_gid)) 3571 mode >>= 3; 3572 } 3573 3574 /* 3575 * If the DACs are ok we don't need any capability check. 3576 */ 3577 if ((ip->i_d.di_mode & mode) == mode) 3578 return 0; 3579 /* 3580 * Read/write DACs are always overridable. 3581 * Executable DACs are overridable if at least one exec bit is set. 3582 */ 3583 if (!(orgmode & S_IXUSR) || 3584 (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode)) 3585 if (capable_cred(cr, CAP_DAC_OVERRIDE)) 3586 return 0; 3587 3588 if ((orgmode == S_IRUSR) || 3589 (S_ISDIR(inode->i_mode) && (!(orgmode & S_IWUSR)))) { 3590 if (capable_cred(cr, CAP_DAC_READ_SEARCH)) 3591 return 0; 3592 #ifdef NOISE 3593 cmn_err(CE_NOTE, "Ick: mode=%o, orgmode=%o", mode, orgmode); 3594 #endif /* NOISE */ 3595 return XFS_ERROR(EACCES); 3596 } 3597 return XFS_ERROR(EACCES); 3598 } 3599 3600 /* 3601 * xfs_iroundup: round up argument to next power of two 3602 */ 3603 uint 3604 xfs_iroundup( 3605 uint v) 3606 { 3607 int i; 3608 uint m; 3609 3610 if ((v & (v - 1)) == 0) 3611 return v; 3612 ASSERT((v & 0x80000000) == 0); 3613 if ((v & (v + 1)) == 0) 3614 return v + 1; 3615 for (i = 0, m = 1; i < 31; i++, m <<= 1) { 3616 if (v & m) 3617 continue; 3618 v |= m; 3619 if ((v & (v + 1)) == 0) 3620 return v + 1; 3621 } 3622 ASSERT(0); 3623 return( 0 ); 3624 } 3625 3626 #ifdef XFS_ILOCK_TRACE 3627 ktrace_t *xfs_ilock_trace_buf; 3628 3629 void 3630 xfs_ilock_trace(xfs_inode_t *ip, int lock, unsigned int lockflags, inst_t *ra) 3631 { 3632 ktrace_enter(ip->i_lock_trace, 3633 (void *)ip, 3634 (void *)(unsigned long)lock, /* 1 = LOCK, 3=UNLOCK, etc */ 3635 (void *)(unsigned long)lockflags, /* XFS_ILOCK_EXCL etc */ 3636 (void *)ra, /* caller of ilock */ 3637 (void *)(unsigned long)current_cpu(), 3638 (void *)(unsigned long)current_pid(), 3639 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL); 3640 } 3641 #endif 3642 3643 /* 3644 * Return a pointer to the extent record at file index idx. 3645 */ 3646 xfs_bmbt_rec_t * 3647 xfs_iext_get_ext( 3648 xfs_ifork_t *ifp, /* inode fork pointer */ 3649 xfs_extnum_t idx) /* index of target extent */ 3650 { 3651 ASSERT(idx >= 0); 3652 if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) { 3653 return ifp->if_u1.if_ext_irec->er_extbuf; 3654 } else if (ifp->if_flags & XFS_IFEXTIREC) { 3655 xfs_ext_irec_t *erp; /* irec pointer */ 3656 int erp_idx = 0; /* irec index */ 3657 xfs_extnum_t page_idx = idx; /* ext index in target list */ 3658 3659 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0); 3660 return &erp->er_extbuf[page_idx]; 3661 } else if (ifp->if_bytes) { 3662 return &ifp->if_u1.if_extents[idx]; 3663 } else { 3664 return NULL; 3665 } 3666 } 3667 3668 /* 3669 * Insert new item(s) into the extent records for incore inode 3670 * fork 'ifp'. 'count' new items are inserted at index 'idx'. 3671 */ 3672 void 3673 xfs_iext_insert( 3674 xfs_ifork_t *ifp, /* inode fork pointer */ 3675 xfs_extnum_t idx, /* starting index of new items */ 3676 xfs_extnum_t count, /* number of inserted items */ 3677 xfs_bmbt_irec_t *new) /* items to insert */ 3678 { 3679 xfs_bmbt_rec_t *ep; /* extent record pointer */ 3680 xfs_extnum_t i; /* extent record index */ 3681 3682 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 3683 xfs_iext_add(ifp, idx, count); 3684 for (i = idx; i < idx + count; i++, new++) { 3685 ep = xfs_iext_get_ext(ifp, i); 3686 xfs_bmbt_set_all(ep, new); 3687 } 3688 } 3689 3690 /* 3691 * This is called when the amount of space required for incore file 3692 * extents needs to be increased. The ext_diff parameter stores the 3693 * number of new extents being added and the idx parameter contains 3694 * the extent index where the new extents will be added. If the new 3695 * extents are being appended, then we just need to (re)allocate and 3696 * initialize the space. Otherwise, if the new extents are being 3697 * inserted into the middle of the existing entries, a bit more work 3698 * is required to make room for the new extents to be inserted. The 3699 * caller is responsible for filling in the new extent entries upon 3700 * return. 3701 */ 3702 void 3703 xfs_iext_add( 3704 xfs_ifork_t *ifp, /* inode fork pointer */ 3705 xfs_extnum_t idx, /* index to begin adding exts */ 3706 int ext_diff) /* number of extents to add */ 3707 { 3708 int byte_diff; /* new bytes being added */ 3709 int new_size; /* size of extents after adding */ 3710 xfs_extnum_t nextents; /* number of extents in file */ 3711 3712 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3713 ASSERT((idx >= 0) && (idx <= nextents)); 3714 byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t); 3715 new_size = ifp->if_bytes + byte_diff; 3716 /* 3717 * If the new number of extents (nextents + ext_diff) 3718 * fits inside the inode, then continue to use the inline 3719 * extent buffer. 3720 */ 3721 if (nextents + ext_diff <= XFS_INLINE_EXTS) { 3722 if (idx < nextents) { 3723 memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff], 3724 &ifp->if_u2.if_inline_ext[idx], 3725 (nextents - idx) * sizeof(xfs_bmbt_rec_t)); 3726 memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff); 3727 } 3728 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; 3729 ifp->if_real_bytes = 0; 3730 ifp->if_lastex = nextents + ext_diff; 3731 } 3732 /* 3733 * Otherwise use a linear (direct) extent list. 3734 * If the extents are currently inside the inode, 3735 * xfs_iext_realloc_direct will switch us from 3736 * inline to direct extent allocation mode. 3737 */ 3738 else if (nextents + ext_diff <= XFS_LINEAR_EXTS) { 3739 xfs_iext_realloc_direct(ifp, new_size); 3740 if (idx < nextents) { 3741 memmove(&ifp->if_u1.if_extents[idx + ext_diff], 3742 &ifp->if_u1.if_extents[idx], 3743 (nextents - idx) * sizeof(xfs_bmbt_rec_t)); 3744 memset(&ifp->if_u1.if_extents[idx], 0, byte_diff); 3745 } 3746 } 3747 /* Indirection array */ 3748 else { 3749 xfs_ext_irec_t *erp; 3750 int erp_idx = 0; 3751 int page_idx = idx; 3752 3753 ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS); 3754 if (ifp->if_flags & XFS_IFEXTIREC) { 3755 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1); 3756 } else { 3757 xfs_iext_irec_init(ifp); 3758 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3759 erp = ifp->if_u1.if_ext_irec; 3760 } 3761 /* Extents fit in target extent page */ 3762 if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) { 3763 if (page_idx < erp->er_extcount) { 3764 memmove(&erp->er_extbuf[page_idx + ext_diff], 3765 &erp->er_extbuf[page_idx], 3766 (erp->er_extcount - page_idx) * 3767 sizeof(xfs_bmbt_rec_t)); 3768 memset(&erp->er_extbuf[page_idx], 0, byte_diff); 3769 } 3770 erp->er_extcount += ext_diff; 3771 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); 3772 } 3773 /* Insert a new extent page */ 3774 else if (erp) { 3775 xfs_iext_add_indirect_multi(ifp, 3776 erp_idx, page_idx, ext_diff); 3777 } 3778 /* 3779 * If extent(s) are being appended to the last page in 3780 * the indirection array and the new extent(s) don't fit 3781 * in the page, then erp is NULL and erp_idx is set to 3782 * the next index needed in the indirection array. 3783 */ 3784 else { 3785 int count = ext_diff; 3786 3787 while (count) { 3788 erp = xfs_iext_irec_new(ifp, erp_idx); 3789 erp->er_extcount = count; 3790 count -= MIN(count, (int)XFS_LINEAR_EXTS); 3791 if (count) { 3792 erp_idx++; 3793 } 3794 } 3795 } 3796 } 3797 ifp->if_bytes = new_size; 3798 } 3799 3800 /* 3801 * This is called when incore extents are being added to the indirection 3802 * array and the new extents do not fit in the target extent list. The 3803 * erp_idx parameter contains the irec index for the target extent list 3804 * in the indirection array, and the idx parameter contains the extent 3805 * index within the list. The number of extents being added is stored 3806 * in the count parameter. 3807 * 3808 * |-------| |-------| 3809 * | | | | idx - number of extents before idx 3810 * | idx | | count | 3811 * | | | | count - number of extents being inserted at idx 3812 * |-------| |-------| 3813 * | count | | nex2 | nex2 - number of extents after idx + count 3814 * |-------| |-------| 3815 */ 3816 void 3817 xfs_iext_add_indirect_multi( 3818 xfs_ifork_t *ifp, /* inode fork pointer */ 3819 int erp_idx, /* target extent irec index */ 3820 xfs_extnum_t idx, /* index within target list */ 3821 int count) /* new extents being added */ 3822 { 3823 int byte_diff; /* new bytes being added */ 3824 xfs_ext_irec_t *erp; /* pointer to irec entry */ 3825 xfs_extnum_t ext_diff; /* number of extents to add */ 3826 xfs_extnum_t ext_cnt; /* new extents still needed */ 3827 xfs_extnum_t nex2; /* extents after idx + count */ 3828 xfs_bmbt_rec_t *nex2_ep = NULL; /* temp list for nex2 extents */ 3829 int nlists; /* number of irec's (lists) */ 3830 3831 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3832 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 3833 nex2 = erp->er_extcount - idx; 3834 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3835 3836 /* 3837 * Save second part of target extent list 3838 * (all extents past */ 3839 if (nex2) { 3840 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t); 3841 nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_SLEEP); 3842 memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff); 3843 erp->er_extcount -= nex2; 3844 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2); 3845 memset(&erp->er_extbuf[idx], 0, byte_diff); 3846 } 3847 3848 /* 3849 * Add the new extents to the end of the target 3850 * list, then allocate new irec record(s) and 3851 * extent buffer(s) as needed to store the rest 3852 * of the new extents. 3853 */ 3854 ext_cnt = count; 3855 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount); 3856 if (ext_diff) { 3857 erp->er_extcount += ext_diff; 3858 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); 3859 ext_cnt -= ext_diff; 3860 } 3861 while (ext_cnt) { 3862 erp_idx++; 3863 erp = xfs_iext_irec_new(ifp, erp_idx); 3864 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS); 3865 erp->er_extcount = ext_diff; 3866 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); 3867 ext_cnt -= ext_diff; 3868 } 3869 3870 /* Add nex2 extents back to indirection array */ 3871 if (nex2) { 3872 xfs_extnum_t ext_avail; 3873 int i; 3874 3875 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t); 3876 ext_avail = XFS_LINEAR_EXTS - erp->er_extcount; 3877 i = 0; 3878 /* 3879 * If nex2 extents fit in the current page, append 3880 * nex2_ep after the new extents. 3881 */ 3882 if (nex2 <= ext_avail) { 3883 i = erp->er_extcount; 3884 } 3885 /* 3886 * Otherwise, check if space is available in the 3887 * next page. 3888 */ 3889 else if ((erp_idx < nlists - 1) && 3890 (nex2 <= (ext_avail = XFS_LINEAR_EXTS - 3891 ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) { 3892 erp_idx++; 3893 erp++; 3894 /* Create a hole for nex2 extents */ 3895 memmove(&erp->er_extbuf[nex2], erp->er_extbuf, 3896 erp->er_extcount * sizeof(xfs_bmbt_rec_t)); 3897 } 3898 /* 3899 * Final choice, create a new extent page for 3900 * nex2 extents. 3901 */ 3902 else { 3903 erp_idx++; 3904 erp = xfs_iext_irec_new(ifp, erp_idx); 3905 } 3906 memmove(&erp->er_extbuf[i], nex2_ep, byte_diff); 3907 kmem_free(nex2_ep, byte_diff); 3908 erp->er_extcount += nex2; 3909 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2); 3910 } 3911 } 3912 3913 /* 3914 * This is called when the amount of space required for incore file 3915 * extents needs to be decreased. The ext_diff parameter stores the 3916 * number of extents to be removed and the idx parameter contains 3917 * the extent index where the extents will be removed from. 3918 * 3919 * If the amount of space needed has decreased below the linear 3920 * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous 3921 * extent array. Otherwise, use kmem_realloc() to adjust the 3922 * size to what is needed. 3923 */ 3924 void 3925 xfs_iext_remove( 3926 xfs_ifork_t *ifp, /* inode fork pointer */ 3927 xfs_extnum_t idx, /* index to begin removing exts */ 3928 int ext_diff) /* number of extents to remove */ 3929 { 3930 xfs_extnum_t nextents; /* number of extents in file */ 3931 int new_size; /* size of extents after removal */ 3932 3933 ASSERT(ext_diff > 0); 3934 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3935 new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t); 3936 3937 if (new_size == 0) { 3938 xfs_iext_destroy(ifp); 3939 } else if (ifp->if_flags & XFS_IFEXTIREC) { 3940 xfs_iext_remove_indirect(ifp, idx, ext_diff); 3941 } else if (ifp->if_real_bytes) { 3942 xfs_iext_remove_direct(ifp, idx, ext_diff); 3943 } else { 3944 xfs_iext_remove_inline(ifp, idx, ext_diff); 3945 } 3946 ifp->if_bytes = new_size; 3947 } 3948 3949 /* 3950 * This removes ext_diff extents from the inline buffer, beginning 3951 * at extent index idx. 3952 */ 3953 void 3954 xfs_iext_remove_inline( 3955 xfs_ifork_t *ifp, /* inode fork pointer */ 3956 xfs_extnum_t idx, /* index to begin removing exts */ 3957 int ext_diff) /* number of extents to remove */ 3958 { 3959 int nextents; /* number of extents in file */ 3960 3961 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); 3962 ASSERT(idx < XFS_INLINE_EXTS); 3963 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3964 ASSERT(((nextents - ext_diff) > 0) && 3965 (nextents - ext_diff) < XFS_INLINE_EXTS); 3966 3967 if (idx + ext_diff < nextents) { 3968 memmove(&ifp->if_u2.if_inline_ext[idx], 3969 &ifp->if_u2.if_inline_ext[idx + ext_diff], 3970 (nextents - (idx + ext_diff)) * 3971 sizeof(xfs_bmbt_rec_t)); 3972 memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff], 3973 0, ext_diff * sizeof(xfs_bmbt_rec_t)); 3974 } else { 3975 memset(&ifp->if_u2.if_inline_ext[idx], 0, 3976 ext_diff * sizeof(xfs_bmbt_rec_t)); 3977 } 3978 } 3979 3980 /* 3981 * This removes ext_diff extents from a linear (direct) extent list, 3982 * beginning at extent index idx. If the extents are being removed 3983 * from the end of the list (ie. truncate) then we just need to re- 3984 * allocate the list to remove the extra space. Otherwise, if the 3985 * extents are being removed from the middle of the existing extent 3986 * entries, then we first need to move the extent records beginning 3987 * at idx + ext_diff up in the list to overwrite the records being 3988 * removed, then remove the extra space via kmem_realloc. 3989 */ 3990 void 3991 xfs_iext_remove_direct( 3992 xfs_ifork_t *ifp, /* inode fork pointer */ 3993 xfs_extnum_t idx, /* index to begin removing exts */ 3994 int ext_diff) /* number of extents to remove */ 3995 { 3996 xfs_extnum_t nextents; /* number of extents in file */ 3997 int new_size; /* size of extents after removal */ 3998 3999 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); 4000 new_size = ifp->if_bytes - 4001 (ext_diff * sizeof(xfs_bmbt_rec_t)); 4002 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 4003 4004 if (new_size == 0) { 4005 xfs_iext_destroy(ifp); 4006 return; 4007 } 4008 /* Move extents up in the list (if needed) */ 4009 if (idx + ext_diff < nextents) { 4010 memmove(&ifp->if_u1.if_extents[idx], 4011 &ifp->if_u1.if_extents[idx + ext_diff], 4012 (nextents - (idx + ext_diff)) * 4013 sizeof(xfs_bmbt_rec_t)); 4014 } 4015 memset(&ifp->if_u1.if_extents[nextents - ext_diff], 4016 0, ext_diff * sizeof(xfs_bmbt_rec_t)); 4017 /* 4018 * Reallocate the direct extent list. If the extents 4019 * will fit inside the inode then xfs_iext_realloc_direct 4020 * will switch from direct to inline extent allocation 4021 * mode for us. 4022 */ 4023 xfs_iext_realloc_direct(ifp, new_size); 4024 ifp->if_bytes = new_size; 4025 } 4026 4027 /* 4028 * This is called when incore extents are being removed from the 4029 * indirection array and the extents being removed span multiple extent 4030 * buffers. The idx parameter contains the file extent index where we 4031 * want to begin removing extents, and the count parameter contains 4032 * how many extents need to be removed. 4033 * 4034 * |-------| |-------| 4035 * | nex1 | | | nex1 - number of extents before idx 4036 * |-------| | count | 4037 * | | | | count - number of extents being removed at idx 4038 * | count | |-------| 4039 * | | | nex2 | nex2 - number of extents after idx + count 4040 * |-------| |-------| 4041 */ 4042 void 4043 xfs_iext_remove_indirect( 4044 xfs_ifork_t *ifp, /* inode fork pointer */ 4045 xfs_extnum_t idx, /* index to begin removing extents */ 4046 int count) /* number of extents to remove */ 4047 { 4048 xfs_ext_irec_t *erp; /* indirection array pointer */ 4049 int erp_idx = 0; /* indirection array index */ 4050 xfs_extnum_t ext_cnt; /* extents left to remove */ 4051 xfs_extnum_t ext_diff; /* extents to remove in current list */ 4052 xfs_extnum_t nex1; /* number of extents before idx */ 4053 xfs_extnum_t nex2; /* extents after idx + count */ 4054 int nlists; /* entries in indirection array */ 4055 int page_idx = idx; /* index in target extent list */ 4056 4057 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4058 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0); 4059 ASSERT(erp != NULL); 4060 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4061 nex1 = page_idx; 4062 ext_cnt = count; 4063 while (ext_cnt) { 4064 nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0); 4065 ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1)); 4066 /* 4067 * Check for deletion of entire list; 4068 * xfs_iext_irec_remove() updates extent offsets. 4069 */ 4070 if (ext_diff == erp->er_extcount) { 4071 xfs_iext_irec_remove(ifp, erp_idx); 4072 ext_cnt -= ext_diff; 4073 nex1 = 0; 4074 if (ext_cnt) { 4075 ASSERT(erp_idx < ifp->if_real_bytes / 4076 XFS_IEXT_BUFSZ); 4077 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4078 nex1 = 0; 4079 continue; 4080 } else { 4081 break; 4082 } 4083 } 4084 /* Move extents up (if needed) */ 4085 if (nex2) { 4086 memmove(&erp->er_extbuf[nex1], 4087 &erp->er_extbuf[nex1 + ext_diff], 4088 nex2 * sizeof(xfs_bmbt_rec_t)); 4089 } 4090 /* Zero out rest of page */ 4091 memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ - 4092 ((nex1 + nex2) * sizeof(xfs_bmbt_rec_t)))); 4093 /* Update remaining counters */ 4094 erp->er_extcount -= ext_diff; 4095 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff); 4096 ext_cnt -= ext_diff; 4097 nex1 = 0; 4098 erp_idx++; 4099 erp++; 4100 } 4101 ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t); 4102 xfs_iext_irec_compact(ifp); 4103 } 4104 4105 /* 4106 * Create, destroy, or resize a linear (direct) block of extents. 4107 */ 4108 void 4109 xfs_iext_realloc_direct( 4110 xfs_ifork_t *ifp, /* inode fork pointer */ 4111 int new_size) /* new size of extents */ 4112 { 4113 int rnew_size; /* real new size of extents */ 4114 4115 rnew_size = new_size; 4116 4117 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) || 4118 ((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) && 4119 (new_size != ifp->if_real_bytes))); 4120 4121 /* Free extent records */ 4122 if (new_size == 0) { 4123 xfs_iext_destroy(ifp); 4124 } 4125 /* Resize direct extent list and zero any new bytes */ 4126 else if (ifp->if_real_bytes) { 4127 /* Check if extents will fit inside the inode */ 4128 if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) { 4129 xfs_iext_direct_to_inline(ifp, new_size / 4130 (uint)sizeof(xfs_bmbt_rec_t)); 4131 ifp->if_bytes = new_size; 4132 return; 4133 } 4134 if ((new_size & (new_size - 1)) != 0) { 4135 rnew_size = xfs_iroundup(new_size); 4136 } 4137 if (rnew_size != ifp->if_real_bytes) { 4138 ifp->if_u1.if_extents = (xfs_bmbt_rec_t *) 4139 kmem_realloc(ifp->if_u1.if_extents, 4140 rnew_size, 4141 ifp->if_real_bytes, 4142 KM_SLEEP); 4143 } 4144 if (rnew_size > ifp->if_real_bytes) { 4145 memset(&ifp->if_u1.if_extents[ifp->if_bytes / 4146 (uint)sizeof(xfs_bmbt_rec_t)], 0, 4147 rnew_size - ifp->if_real_bytes); 4148 } 4149 } 4150 /* 4151 * Switch from the inline extent buffer to a direct 4152 * extent list. Be sure to include the inline extent 4153 * bytes in new_size. 4154 */ 4155 else { 4156 new_size += ifp->if_bytes; 4157 if ((new_size & (new_size - 1)) != 0) { 4158 rnew_size = xfs_iroundup(new_size); 4159 } 4160 xfs_iext_inline_to_direct(ifp, rnew_size); 4161 } 4162 ifp->if_real_bytes = rnew_size; 4163 ifp->if_bytes = new_size; 4164 } 4165 4166 /* 4167 * Switch from linear (direct) extent records to inline buffer. 4168 */ 4169 void 4170 xfs_iext_direct_to_inline( 4171 xfs_ifork_t *ifp, /* inode fork pointer */ 4172 xfs_extnum_t nextents) /* number of extents in file */ 4173 { 4174 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 4175 ASSERT(nextents <= XFS_INLINE_EXTS); 4176 /* 4177 * The inline buffer was zeroed when we switched 4178 * from inline to direct extent allocation mode, 4179 * so we don't need to clear it here. 4180 */ 4181 memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents, 4182 nextents * sizeof(xfs_bmbt_rec_t)); 4183 kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes); 4184 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; 4185 ifp->if_real_bytes = 0; 4186 } 4187 4188 /* 4189 * Switch from inline buffer to linear (direct) extent records. 4190 * new_size should already be rounded up to the next power of 2 4191 * by the caller (when appropriate), so use new_size as it is. 4192 * However, since new_size may be rounded up, we can't update 4193 * if_bytes here. It is the caller's responsibility to update 4194 * if_bytes upon return. 4195 */ 4196 void 4197 xfs_iext_inline_to_direct( 4198 xfs_ifork_t *ifp, /* inode fork pointer */ 4199 int new_size) /* number of extents in file */ 4200 { 4201 ifp->if_u1.if_extents = (xfs_bmbt_rec_t *) 4202 kmem_alloc(new_size, KM_SLEEP); 4203 memset(ifp->if_u1.if_extents, 0, new_size); 4204 if (ifp->if_bytes) { 4205 memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext, 4206 ifp->if_bytes); 4207 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS * 4208 sizeof(xfs_bmbt_rec_t)); 4209 } 4210 ifp->if_real_bytes = new_size; 4211 } 4212 4213 /* 4214 * Resize an extent indirection array to new_size bytes. 4215 */ 4216 void 4217 xfs_iext_realloc_indirect( 4218 xfs_ifork_t *ifp, /* inode fork pointer */ 4219 int new_size) /* new indirection array size */ 4220 { 4221 int nlists; /* number of irec's (ex lists) */ 4222 int size; /* current indirection array size */ 4223 4224 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4225 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4226 size = nlists * sizeof(xfs_ext_irec_t); 4227 ASSERT(ifp->if_real_bytes); 4228 ASSERT((new_size >= 0) && (new_size != size)); 4229 if (new_size == 0) { 4230 xfs_iext_destroy(ifp); 4231 } else { 4232 ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *) 4233 kmem_realloc(ifp->if_u1.if_ext_irec, 4234 new_size, size, KM_SLEEP); 4235 } 4236 } 4237 4238 /* 4239 * Switch from indirection array to linear (direct) extent allocations. 4240 */ 4241 void 4242 xfs_iext_indirect_to_direct( 4243 xfs_ifork_t *ifp) /* inode fork pointer */ 4244 { 4245 xfs_bmbt_rec_t *ep; /* extent record pointer */ 4246 xfs_extnum_t nextents; /* number of extents in file */ 4247 int size; /* size of file extents */ 4248 4249 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4250 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 4251 ASSERT(nextents <= XFS_LINEAR_EXTS); 4252 size = nextents * sizeof(xfs_bmbt_rec_t); 4253 4254 xfs_iext_irec_compact_full(ifp); 4255 ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ); 4256 4257 ep = ifp->if_u1.if_ext_irec->er_extbuf; 4258 kmem_free(ifp->if_u1.if_ext_irec, sizeof(xfs_ext_irec_t)); 4259 ifp->if_flags &= ~XFS_IFEXTIREC; 4260 ifp->if_u1.if_extents = ep; 4261 ifp->if_bytes = size; 4262 if (nextents < XFS_LINEAR_EXTS) { 4263 xfs_iext_realloc_direct(ifp, size); 4264 } 4265 } 4266 4267 /* 4268 * Free incore file extents. 4269 */ 4270 void 4271 xfs_iext_destroy( 4272 xfs_ifork_t *ifp) /* inode fork pointer */ 4273 { 4274 if (ifp->if_flags & XFS_IFEXTIREC) { 4275 int erp_idx; 4276 int nlists; 4277 4278 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4279 for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) { 4280 xfs_iext_irec_remove(ifp, erp_idx); 4281 } 4282 ifp->if_flags &= ~XFS_IFEXTIREC; 4283 } else if (ifp->if_real_bytes) { 4284 kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes); 4285 } else if (ifp->if_bytes) { 4286 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS * 4287 sizeof(xfs_bmbt_rec_t)); 4288 } 4289 ifp->if_u1.if_extents = NULL; 4290 ifp->if_real_bytes = 0; 4291 ifp->if_bytes = 0; 4292 } 4293 4294 /* 4295 * Return a pointer to the extent record for file system block bno. 4296 */ 4297 xfs_bmbt_rec_t * /* pointer to found extent record */ 4298 xfs_iext_bno_to_ext( 4299 xfs_ifork_t *ifp, /* inode fork pointer */ 4300 xfs_fileoff_t bno, /* block number to search for */ 4301 xfs_extnum_t *idxp) /* index of target extent */ 4302 { 4303 xfs_bmbt_rec_t *base; /* pointer to first extent */ 4304 xfs_filblks_t blockcount = 0; /* number of blocks in extent */ 4305 xfs_bmbt_rec_t *ep = NULL; /* pointer to target extent */ 4306 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */ 4307 int high; /* upper boundary in search */ 4308 xfs_extnum_t idx = 0; /* index of target extent */ 4309 int low; /* lower boundary in search */ 4310 xfs_extnum_t nextents; /* number of file extents */ 4311 xfs_fileoff_t startoff = 0; /* start offset of extent */ 4312 4313 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 4314 if (nextents == 0) { 4315 *idxp = 0; 4316 return NULL; 4317 } 4318 low = 0; 4319 if (ifp->if_flags & XFS_IFEXTIREC) { 4320 /* Find target extent list */ 4321 int erp_idx = 0; 4322 erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx); 4323 base = erp->er_extbuf; 4324 high = erp->er_extcount - 1; 4325 } else { 4326 base = ifp->if_u1.if_extents; 4327 high = nextents - 1; 4328 } 4329 /* Binary search extent records */ 4330 while (low <= high) { 4331 idx = (low + high) >> 1; 4332 ep = base + idx; 4333 startoff = xfs_bmbt_get_startoff(ep); 4334 blockcount = xfs_bmbt_get_blockcount(ep); 4335 if (bno < startoff) { 4336 high = idx - 1; 4337 } else if (bno >= startoff + blockcount) { 4338 low = idx + 1; 4339 } else { 4340 /* Convert back to file-based extent index */ 4341 if (ifp->if_flags & XFS_IFEXTIREC) { 4342 idx += erp->er_extoff; 4343 } 4344 *idxp = idx; 4345 return ep; 4346 } 4347 } 4348 /* Convert back to file-based extent index */ 4349 if (ifp->if_flags & XFS_IFEXTIREC) { 4350 idx += erp->er_extoff; 4351 } 4352 if (bno >= startoff + blockcount) { 4353 if (++idx == nextents) { 4354 ep = NULL; 4355 } else { 4356 ep = xfs_iext_get_ext(ifp, idx); 4357 } 4358 } 4359 *idxp = idx; 4360 return ep; 4361 } 4362 4363 /* 4364 * Return a pointer to the indirection array entry containing the 4365 * extent record for filesystem block bno. Store the index of the 4366 * target irec in *erp_idxp. 4367 */ 4368 xfs_ext_irec_t * /* pointer to found extent record */ 4369 xfs_iext_bno_to_irec( 4370 xfs_ifork_t *ifp, /* inode fork pointer */ 4371 xfs_fileoff_t bno, /* block number to search for */ 4372 int *erp_idxp) /* irec index of target ext list */ 4373 { 4374 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */ 4375 xfs_ext_irec_t *erp_next; /* next indirection array entry */ 4376 int erp_idx; /* indirection array index */ 4377 int nlists; /* number of extent irec's (lists) */ 4378 int high; /* binary search upper limit */ 4379 int low; /* binary search lower limit */ 4380 4381 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4382 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4383 erp_idx = 0; 4384 low = 0; 4385 high = nlists - 1; 4386 while (low <= high) { 4387 erp_idx = (low + high) >> 1; 4388 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4389 erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL; 4390 if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) { 4391 high = erp_idx - 1; 4392 } else if (erp_next && bno >= 4393 xfs_bmbt_get_startoff(erp_next->er_extbuf)) { 4394 low = erp_idx + 1; 4395 } else { 4396 break; 4397 } 4398 } 4399 *erp_idxp = erp_idx; 4400 return erp; 4401 } 4402 4403 /* 4404 * Return a pointer to the indirection array entry containing the 4405 * extent record at file extent index *idxp. Store the index of the 4406 * target irec in *erp_idxp and store the page index of the target 4407 * extent record in *idxp. 4408 */ 4409 xfs_ext_irec_t * 4410 xfs_iext_idx_to_irec( 4411 xfs_ifork_t *ifp, /* inode fork pointer */ 4412 xfs_extnum_t *idxp, /* extent index (file -> page) */ 4413 int *erp_idxp, /* pointer to target irec */ 4414 int realloc) /* new bytes were just added */ 4415 { 4416 xfs_ext_irec_t *prev; /* pointer to previous irec */ 4417 xfs_ext_irec_t *erp = NULL; /* pointer to current irec */ 4418 int erp_idx; /* indirection array index */ 4419 int nlists; /* number of irec's (ex lists) */ 4420 int high; /* binary search upper limit */ 4421 int low; /* binary search lower limit */ 4422 xfs_extnum_t page_idx = *idxp; /* extent index in target list */ 4423 4424 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4425 ASSERT(page_idx >= 0 && page_idx <= 4426 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)); 4427 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4428 erp_idx = 0; 4429 low = 0; 4430 high = nlists - 1; 4431 4432 /* Binary search extent irec's */ 4433 while (low <= high) { 4434 erp_idx = (low + high) >> 1; 4435 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4436 prev = erp_idx > 0 ? erp - 1 : NULL; 4437 if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff && 4438 realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) { 4439 high = erp_idx - 1; 4440 } else if (page_idx > erp->er_extoff + erp->er_extcount || 4441 (page_idx == erp->er_extoff + erp->er_extcount && 4442 !realloc)) { 4443 low = erp_idx + 1; 4444 } else if (page_idx == erp->er_extoff + erp->er_extcount && 4445 erp->er_extcount == XFS_LINEAR_EXTS) { 4446 ASSERT(realloc); 4447 page_idx = 0; 4448 erp_idx++; 4449 erp = erp_idx < nlists ? erp + 1 : NULL; 4450 break; 4451 } else { 4452 page_idx -= erp->er_extoff; 4453 break; 4454 } 4455 } 4456 *idxp = page_idx; 4457 *erp_idxp = erp_idx; 4458 return(erp); 4459 } 4460 4461 /* 4462 * Allocate and initialize an indirection array once the space needed 4463 * for incore extents increases above XFS_IEXT_BUFSZ. 4464 */ 4465 void 4466 xfs_iext_irec_init( 4467 xfs_ifork_t *ifp) /* inode fork pointer */ 4468 { 4469 xfs_ext_irec_t *erp; /* indirection array pointer */ 4470 xfs_extnum_t nextents; /* number of extents in file */ 4471 4472 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); 4473 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 4474 ASSERT(nextents <= XFS_LINEAR_EXTS); 4475 4476 erp = (xfs_ext_irec_t *) 4477 kmem_alloc(sizeof(xfs_ext_irec_t), KM_SLEEP); 4478 4479 if (nextents == 0) { 4480 ifp->if_u1.if_extents = (xfs_bmbt_rec_t *) 4481 kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP); 4482 } else if (!ifp->if_real_bytes) { 4483 xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ); 4484 } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) { 4485 xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ); 4486 } 4487 erp->er_extbuf = ifp->if_u1.if_extents; 4488 erp->er_extcount = nextents; 4489 erp->er_extoff = 0; 4490 4491 ifp->if_flags |= XFS_IFEXTIREC; 4492 ifp->if_real_bytes = XFS_IEXT_BUFSZ; 4493 ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t); 4494 ifp->if_u1.if_ext_irec = erp; 4495 4496 return; 4497 } 4498 4499 /* 4500 * Allocate and initialize a new entry in the indirection array. 4501 */ 4502 xfs_ext_irec_t * 4503 xfs_iext_irec_new( 4504 xfs_ifork_t *ifp, /* inode fork pointer */ 4505 int erp_idx) /* index for new irec */ 4506 { 4507 xfs_ext_irec_t *erp; /* indirection array pointer */ 4508 int i; /* loop counter */ 4509 int nlists; /* number of irec's (ex lists) */ 4510 4511 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4512 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4513 4514 /* Resize indirection array */ 4515 xfs_iext_realloc_indirect(ifp, ++nlists * 4516 sizeof(xfs_ext_irec_t)); 4517 /* 4518 * Move records down in the array so the 4519 * new page can use erp_idx. 4520 */ 4521 erp = ifp->if_u1.if_ext_irec; 4522 for (i = nlists - 1; i > erp_idx; i--) { 4523 memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t)); 4524 } 4525 ASSERT(i == erp_idx); 4526 4527 /* Initialize new extent record */ 4528 erp = ifp->if_u1.if_ext_irec; 4529 erp[erp_idx].er_extbuf = (xfs_bmbt_rec_t *) 4530 kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP); 4531 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ; 4532 memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ); 4533 erp[erp_idx].er_extcount = 0; 4534 erp[erp_idx].er_extoff = erp_idx > 0 ? 4535 erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0; 4536 return (&erp[erp_idx]); 4537 } 4538 4539 /* 4540 * Remove a record from the indirection array. 4541 */ 4542 void 4543 xfs_iext_irec_remove( 4544 xfs_ifork_t *ifp, /* inode fork pointer */ 4545 int erp_idx) /* irec index to remove */ 4546 { 4547 xfs_ext_irec_t *erp; /* indirection array pointer */ 4548 int i; /* loop counter */ 4549 int nlists; /* number of irec's (ex lists) */ 4550 4551 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4552 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4553 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4554 if (erp->er_extbuf) { 4555 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, 4556 -erp->er_extcount); 4557 kmem_free(erp->er_extbuf, XFS_IEXT_BUFSZ); 4558 } 4559 /* Compact extent records */ 4560 erp = ifp->if_u1.if_ext_irec; 4561 for (i = erp_idx; i < nlists - 1; i++) { 4562 memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t)); 4563 } 4564 /* 4565 * Manually free the last extent record from the indirection 4566 * array. A call to xfs_iext_realloc_indirect() with a size 4567 * of zero would result in a call to xfs_iext_destroy() which 4568 * would in turn call this function again, creating a nasty 4569 * infinite loop. 4570 */ 4571 if (--nlists) { 4572 xfs_iext_realloc_indirect(ifp, 4573 nlists * sizeof(xfs_ext_irec_t)); 4574 } else { 4575 kmem_free(ifp->if_u1.if_ext_irec, 4576 sizeof(xfs_ext_irec_t)); 4577 } 4578 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ; 4579 } 4580 4581 /* 4582 * This is called to clean up large amounts of unused memory allocated 4583 * by the indirection array. Before compacting anything though, verify 4584 * that the indirection array is still needed and switch back to the 4585 * linear extent list (or even the inline buffer) if possible. The 4586 * compaction policy is as follows: 4587 * 4588 * Full Compaction: Extents fit into a single page (or inline buffer) 4589 * Full Compaction: Extents occupy less than 10% of allocated space 4590 * Partial Compaction: Extents occupy > 10% and < 50% of allocated space 4591 * No Compaction: Extents occupy at least 50% of allocated space 4592 */ 4593 void 4594 xfs_iext_irec_compact( 4595 xfs_ifork_t *ifp) /* inode fork pointer */ 4596 { 4597 xfs_extnum_t nextents; /* number of extents in file */ 4598 int nlists; /* number of irec's (ex lists) */ 4599 4600 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4601 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4602 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 4603 4604 if (nextents == 0) { 4605 xfs_iext_destroy(ifp); 4606 } else if (nextents <= XFS_INLINE_EXTS) { 4607 xfs_iext_indirect_to_direct(ifp); 4608 xfs_iext_direct_to_inline(ifp, nextents); 4609 } else if (nextents <= XFS_LINEAR_EXTS) { 4610 xfs_iext_indirect_to_direct(ifp); 4611 } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 3) { 4612 xfs_iext_irec_compact_full(ifp); 4613 } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) { 4614 xfs_iext_irec_compact_pages(ifp); 4615 } 4616 } 4617 4618 /* 4619 * Combine extents from neighboring extent pages. 4620 */ 4621 void 4622 xfs_iext_irec_compact_pages( 4623 xfs_ifork_t *ifp) /* inode fork pointer */ 4624 { 4625 xfs_ext_irec_t *erp, *erp_next;/* pointers to irec entries */ 4626 int erp_idx = 0; /* indirection array index */ 4627 int nlists; /* number of irec's (ex lists) */ 4628 4629 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4630 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4631 while (erp_idx < nlists - 1) { 4632 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4633 erp_next = erp + 1; 4634 if (erp_next->er_extcount <= 4635 (XFS_LINEAR_EXTS - erp->er_extcount)) { 4636 memmove(&erp->er_extbuf[erp->er_extcount], 4637 erp_next->er_extbuf, erp_next->er_extcount * 4638 sizeof(xfs_bmbt_rec_t)); 4639 erp->er_extcount += erp_next->er_extcount; 4640 /* 4641 * Free page before removing extent record 4642 * so er_extoffs don't get modified in 4643 * xfs_iext_irec_remove. 4644 */ 4645 kmem_free(erp_next->er_extbuf, XFS_IEXT_BUFSZ); 4646 erp_next->er_extbuf = NULL; 4647 xfs_iext_irec_remove(ifp, erp_idx + 1); 4648 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4649 } else { 4650 erp_idx++; 4651 } 4652 } 4653 } 4654 4655 /* 4656 * Fully compact the extent records managed by the indirection array. 4657 */ 4658 void 4659 xfs_iext_irec_compact_full( 4660 xfs_ifork_t *ifp) /* inode fork pointer */ 4661 { 4662 xfs_bmbt_rec_t *ep, *ep_next; /* extent record pointers */ 4663 xfs_ext_irec_t *erp, *erp_next; /* extent irec pointers */ 4664 int erp_idx = 0; /* extent irec index */ 4665 int ext_avail; /* empty entries in ex list */ 4666 int ext_diff; /* number of exts to add */ 4667 int nlists; /* number of irec's (ex lists) */ 4668 4669 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4670 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4671 erp = ifp->if_u1.if_ext_irec; 4672 ep = &erp->er_extbuf[erp->er_extcount]; 4673 erp_next = erp + 1; 4674 ep_next = erp_next->er_extbuf; 4675 while (erp_idx < nlists - 1) { 4676 ext_avail = XFS_LINEAR_EXTS - erp->er_extcount; 4677 ext_diff = MIN(ext_avail, erp_next->er_extcount); 4678 memcpy(ep, ep_next, ext_diff * sizeof(xfs_bmbt_rec_t)); 4679 erp->er_extcount += ext_diff; 4680 erp_next->er_extcount -= ext_diff; 4681 /* Remove next page */ 4682 if (erp_next->er_extcount == 0) { 4683 /* 4684 * Free page before removing extent record 4685 * so er_extoffs don't get modified in 4686 * xfs_iext_irec_remove. 4687 */ 4688 kmem_free(erp_next->er_extbuf, 4689 erp_next->er_extcount * sizeof(xfs_bmbt_rec_t)); 4690 erp_next->er_extbuf = NULL; 4691 xfs_iext_irec_remove(ifp, erp_idx + 1); 4692 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4693 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4694 /* Update next page */ 4695 } else { 4696 /* Move rest of page up to become next new page */ 4697 memmove(erp_next->er_extbuf, ep_next, 4698 erp_next->er_extcount * sizeof(xfs_bmbt_rec_t)); 4699 ep_next = erp_next->er_extbuf; 4700 memset(&ep_next[erp_next->er_extcount], 0, 4701 (XFS_LINEAR_EXTS - erp_next->er_extcount) * 4702 sizeof(xfs_bmbt_rec_t)); 4703 } 4704 if (erp->er_extcount == XFS_LINEAR_EXTS) { 4705 erp_idx++; 4706 if (erp_idx < nlists) 4707 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4708 else 4709 break; 4710 } 4711 ep = &erp->er_extbuf[erp->er_extcount]; 4712 erp_next = erp + 1; 4713 ep_next = erp_next->er_extbuf; 4714 } 4715 } 4716 4717 /* 4718 * This is called to update the er_extoff field in the indirection 4719 * array when extents have been added or removed from one of the 4720 * extent lists. erp_idx contains the irec index to begin updating 4721 * at and ext_diff contains the number of extents that were added 4722 * or removed. 4723 */ 4724 void 4725 xfs_iext_irec_update_extoffs( 4726 xfs_ifork_t *ifp, /* inode fork pointer */ 4727 int erp_idx, /* irec index to update */ 4728 int ext_diff) /* number of new extents */ 4729 { 4730 int i; /* loop counter */ 4731 int nlists; /* number of irec's (ex lists */ 4732 4733 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4734 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4735 for (i = erp_idx; i < nlists; i++) { 4736 ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff; 4737 } 4738 } 4739