1 /* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include <linux/log2.h> 19 20 #include "xfs.h" 21 #include "xfs_fs.h" 22 #include "xfs_types.h" 23 #include "xfs_log.h" 24 #include "xfs_inum.h" 25 #include "xfs_trans.h" 26 #include "xfs_trans_priv.h" 27 #include "xfs_sb.h" 28 #include "xfs_ag.h" 29 #include "xfs_mount.h" 30 #include "xfs_bmap_btree.h" 31 #include "xfs_alloc_btree.h" 32 #include "xfs_ialloc_btree.h" 33 #include "xfs_attr_sf.h" 34 #include "xfs_dinode.h" 35 #include "xfs_inode.h" 36 #include "xfs_buf_item.h" 37 #include "xfs_inode_item.h" 38 #include "xfs_btree.h" 39 #include "xfs_alloc.h" 40 #include "xfs_ialloc.h" 41 #include "xfs_bmap.h" 42 #include "xfs_error.h" 43 #include "xfs_utils.h" 44 #include "xfs_quota.h" 45 #include "xfs_filestream.h" 46 #include "xfs_vnodeops.h" 47 #include "xfs_trace.h" 48 49 kmem_zone_t *xfs_ifork_zone; 50 kmem_zone_t *xfs_inode_zone; 51 52 /* 53 * Used in xfs_itruncate_extents(). This is the maximum number of extents 54 * freed from a file in a single transaction. 55 */ 56 #define XFS_ITRUNC_MAX_EXTENTS 2 57 58 STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *); 59 STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int); 60 STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int); 61 STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int); 62 63 /* 64 * helper function to extract extent size hint from inode 65 */ 66 xfs_extlen_t 67 xfs_get_extsz_hint( 68 struct xfs_inode *ip) 69 { 70 if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize) 71 return ip->i_d.di_extsize; 72 if (XFS_IS_REALTIME_INODE(ip)) 73 return ip->i_mount->m_sb.sb_rextsize; 74 return 0; 75 } 76 77 #ifdef DEBUG 78 /* 79 * Make sure that the extents in the given memory buffer 80 * are valid. 81 */ 82 STATIC void 83 xfs_validate_extents( 84 xfs_ifork_t *ifp, 85 int nrecs, 86 xfs_exntfmt_t fmt) 87 { 88 xfs_bmbt_irec_t irec; 89 xfs_bmbt_rec_host_t rec; 90 int i; 91 92 for (i = 0; i < nrecs; i++) { 93 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i); 94 rec.l0 = get_unaligned(&ep->l0); 95 rec.l1 = get_unaligned(&ep->l1); 96 xfs_bmbt_get_all(&rec, &irec); 97 if (fmt == XFS_EXTFMT_NOSTATE) 98 ASSERT(irec.br_state == XFS_EXT_NORM); 99 } 100 } 101 #else /* DEBUG */ 102 #define xfs_validate_extents(ifp, nrecs, fmt) 103 #endif /* DEBUG */ 104 105 /* 106 * Check that none of the inode's in the buffer have a next 107 * unlinked field of 0. 108 */ 109 #if defined(DEBUG) 110 void 111 xfs_inobp_check( 112 xfs_mount_t *mp, 113 xfs_buf_t *bp) 114 { 115 int i; 116 int j; 117 xfs_dinode_t *dip; 118 119 j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog; 120 121 for (i = 0; i < j; i++) { 122 dip = (xfs_dinode_t *)xfs_buf_offset(bp, 123 i * mp->m_sb.sb_inodesize); 124 if (!dip->di_next_unlinked) { 125 xfs_alert(mp, 126 "Detected bogus zero next_unlinked field in incore inode buffer 0x%p.", 127 bp); 128 ASSERT(dip->di_next_unlinked); 129 } 130 } 131 } 132 #endif 133 134 /* 135 * This routine is called to map an inode to the buffer containing the on-disk 136 * version of the inode. It returns a pointer to the buffer containing the 137 * on-disk inode in the bpp parameter, and in the dipp parameter it returns a 138 * pointer to the on-disk inode within that buffer. 139 * 140 * If a non-zero error is returned, then the contents of bpp and dipp are 141 * undefined. 142 */ 143 int 144 xfs_imap_to_bp( 145 struct xfs_mount *mp, 146 struct xfs_trans *tp, 147 struct xfs_imap *imap, 148 struct xfs_dinode **dipp, 149 struct xfs_buf **bpp, 150 uint buf_flags, 151 uint iget_flags) 152 { 153 struct xfs_buf *bp; 154 int error; 155 int i; 156 int ni; 157 158 buf_flags |= XBF_UNMAPPED; 159 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno, 160 (int)imap->im_len, buf_flags, &bp); 161 if (error) { 162 if (error != EAGAIN) { 163 xfs_warn(mp, 164 "%s: xfs_trans_read_buf() returned error %d.", 165 __func__, error); 166 } else { 167 ASSERT(buf_flags & XBF_TRYLOCK); 168 } 169 return error; 170 } 171 172 /* 173 * Validate the magic number and version of every inode in the buffer 174 * (if DEBUG kernel) or the first inode in the buffer, otherwise. 175 */ 176 #ifdef DEBUG 177 ni = BBTOB(imap->im_len) >> mp->m_sb.sb_inodelog; 178 #else /* usual case */ 179 ni = 1; 180 #endif 181 182 for (i = 0; i < ni; i++) { 183 int di_ok; 184 xfs_dinode_t *dip; 185 186 dip = (xfs_dinode_t *)xfs_buf_offset(bp, 187 (i << mp->m_sb.sb_inodelog)); 188 di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) && 189 XFS_DINODE_GOOD_VERSION(dip->di_version); 190 if (unlikely(XFS_TEST_ERROR(!di_ok, mp, 191 XFS_ERRTAG_ITOBP_INOTOBP, 192 XFS_RANDOM_ITOBP_INOTOBP))) { 193 if (iget_flags & XFS_IGET_UNTRUSTED) { 194 xfs_trans_brelse(tp, bp); 195 return XFS_ERROR(EINVAL); 196 } 197 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_HIGH, 198 mp, dip); 199 #ifdef DEBUG 200 xfs_emerg(mp, 201 "bad inode magic/vsn daddr %lld #%d (magic=%x)", 202 (unsigned long long)imap->im_blkno, i, 203 be16_to_cpu(dip->di_magic)); 204 ASSERT(0); 205 #endif 206 xfs_trans_brelse(tp, bp); 207 return XFS_ERROR(EFSCORRUPTED); 208 } 209 } 210 211 xfs_inobp_check(mp, bp); 212 213 *bpp = bp; 214 *dipp = (struct xfs_dinode *)xfs_buf_offset(bp, imap->im_boffset); 215 return 0; 216 } 217 218 /* 219 * Move inode type and inode format specific information from the 220 * on-disk inode to the in-core inode. For fifos, devs, and sockets 221 * this means set if_rdev to the proper value. For files, directories, 222 * and symlinks this means to bring in the in-line data or extent 223 * pointers. For a file in B-tree format, only the root is immediately 224 * brought in-core. The rest will be in-lined in if_extents when it 225 * is first referenced (see xfs_iread_extents()). 226 */ 227 STATIC int 228 xfs_iformat( 229 xfs_inode_t *ip, 230 xfs_dinode_t *dip) 231 { 232 xfs_attr_shortform_t *atp; 233 int size; 234 int error = 0; 235 xfs_fsize_t di_size; 236 237 if (unlikely(be32_to_cpu(dip->di_nextents) + 238 be16_to_cpu(dip->di_anextents) > 239 be64_to_cpu(dip->di_nblocks))) { 240 xfs_warn(ip->i_mount, 241 "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.", 242 (unsigned long long)ip->i_ino, 243 (int)(be32_to_cpu(dip->di_nextents) + 244 be16_to_cpu(dip->di_anextents)), 245 (unsigned long long) 246 be64_to_cpu(dip->di_nblocks)); 247 XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW, 248 ip->i_mount, dip); 249 return XFS_ERROR(EFSCORRUPTED); 250 } 251 252 if (unlikely(dip->di_forkoff > ip->i_mount->m_sb.sb_inodesize)) { 253 xfs_warn(ip->i_mount, "corrupt dinode %Lu, forkoff = 0x%x.", 254 (unsigned long long)ip->i_ino, 255 dip->di_forkoff); 256 XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW, 257 ip->i_mount, dip); 258 return XFS_ERROR(EFSCORRUPTED); 259 } 260 261 if (unlikely((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) && 262 !ip->i_mount->m_rtdev_targp)) { 263 xfs_warn(ip->i_mount, 264 "corrupt dinode %Lu, has realtime flag set.", 265 ip->i_ino); 266 XFS_CORRUPTION_ERROR("xfs_iformat(realtime)", 267 XFS_ERRLEVEL_LOW, ip->i_mount, dip); 268 return XFS_ERROR(EFSCORRUPTED); 269 } 270 271 switch (ip->i_d.di_mode & S_IFMT) { 272 case S_IFIFO: 273 case S_IFCHR: 274 case S_IFBLK: 275 case S_IFSOCK: 276 if (unlikely(dip->di_format != XFS_DINODE_FMT_DEV)) { 277 XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW, 278 ip->i_mount, dip); 279 return XFS_ERROR(EFSCORRUPTED); 280 } 281 ip->i_d.di_size = 0; 282 ip->i_df.if_u2.if_rdev = xfs_dinode_get_rdev(dip); 283 break; 284 285 case S_IFREG: 286 case S_IFLNK: 287 case S_IFDIR: 288 switch (dip->di_format) { 289 case XFS_DINODE_FMT_LOCAL: 290 /* 291 * no local regular files yet 292 */ 293 if (unlikely(S_ISREG(be16_to_cpu(dip->di_mode)))) { 294 xfs_warn(ip->i_mount, 295 "corrupt inode %Lu (local format for regular file).", 296 (unsigned long long) ip->i_ino); 297 XFS_CORRUPTION_ERROR("xfs_iformat(4)", 298 XFS_ERRLEVEL_LOW, 299 ip->i_mount, dip); 300 return XFS_ERROR(EFSCORRUPTED); 301 } 302 303 di_size = be64_to_cpu(dip->di_size); 304 if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) { 305 xfs_warn(ip->i_mount, 306 "corrupt inode %Lu (bad size %Ld for local inode).", 307 (unsigned long long) ip->i_ino, 308 (long long) di_size); 309 XFS_CORRUPTION_ERROR("xfs_iformat(5)", 310 XFS_ERRLEVEL_LOW, 311 ip->i_mount, dip); 312 return XFS_ERROR(EFSCORRUPTED); 313 } 314 315 size = (int)di_size; 316 error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size); 317 break; 318 case XFS_DINODE_FMT_EXTENTS: 319 error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK); 320 break; 321 case XFS_DINODE_FMT_BTREE: 322 error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK); 323 break; 324 default: 325 XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW, 326 ip->i_mount); 327 return XFS_ERROR(EFSCORRUPTED); 328 } 329 break; 330 331 default: 332 XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount); 333 return XFS_ERROR(EFSCORRUPTED); 334 } 335 if (error) { 336 return error; 337 } 338 if (!XFS_DFORK_Q(dip)) 339 return 0; 340 341 ASSERT(ip->i_afp == NULL); 342 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP | KM_NOFS); 343 344 switch (dip->di_aformat) { 345 case XFS_DINODE_FMT_LOCAL: 346 atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip); 347 size = be16_to_cpu(atp->hdr.totsize); 348 349 if (unlikely(size < sizeof(struct xfs_attr_sf_hdr))) { 350 xfs_warn(ip->i_mount, 351 "corrupt inode %Lu (bad attr fork size %Ld).", 352 (unsigned long long) ip->i_ino, 353 (long long) size); 354 XFS_CORRUPTION_ERROR("xfs_iformat(8)", 355 XFS_ERRLEVEL_LOW, 356 ip->i_mount, dip); 357 return XFS_ERROR(EFSCORRUPTED); 358 } 359 360 error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size); 361 break; 362 case XFS_DINODE_FMT_EXTENTS: 363 error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK); 364 break; 365 case XFS_DINODE_FMT_BTREE: 366 error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK); 367 break; 368 default: 369 error = XFS_ERROR(EFSCORRUPTED); 370 break; 371 } 372 if (error) { 373 kmem_zone_free(xfs_ifork_zone, ip->i_afp); 374 ip->i_afp = NULL; 375 xfs_idestroy_fork(ip, XFS_DATA_FORK); 376 } 377 return error; 378 } 379 380 /* 381 * The file is in-lined in the on-disk inode. 382 * If it fits into if_inline_data, then copy 383 * it there, otherwise allocate a buffer for it 384 * and copy the data there. Either way, set 385 * if_data to point at the data. 386 * If we allocate a buffer for the data, make 387 * sure that its size is a multiple of 4 and 388 * record the real size in i_real_bytes. 389 */ 390 STATIC int 391 xfs_iformat_local( 392 xfs_inode_t *ip, 393 xfs_dinode_t *dip, 394 int whichfork, 395 int size) 396 { 397 xfs_ifork_t *ifp; 398 int real_size; 399 400 /* 401 * If the size is unreasonable, then something 402 * is wrong and we just bail out rather than crash in 403 * kmem_alloc() or memcpy() below. 404 */ 405 if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) { 406 xfs_warn(ip->i_mount, 407 "corrupt inode %Lu (bad size %d for local fork, size = %d).", 408 (unsigned long long) ip->i_ino, size, 409 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)); 410 XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW, 411 ip->i_mount, dip); 412 return XFS_ERROR(EFSCORRUPTED); 413 } 414 ifp = XFS_IFORK_PTR(ip, whichfork); 415 real_size = 0; 416 if (size == 0) 417 ifp->if_u1.if_data = NULL; 418 else if (size <= sizeof(ifp->if_u2.if_inline_data)) 419 ifp->if_u1.if_data = ifp->if_u2.if_inline_data; 420 else { 421 real_size = roundup(size, 4); 422 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP | KM_NOFS); 423 } 424 ifp->if_bytes = size; 425 ifp->if_real_bytes = real_size; 426 if (size) 427 memcpy(ifp->if_u1.if_data, XFS_DFORK_PTR(dip, whichfork), size); 428 ifp->if_flags &= ~XFS_IFEXTENTS; 429 ifp->if_flags |= XFS_IFINLINE; 430 return 0; 431 } 432 433 /* 434 * The file consists of a set of extents all 435 * of which fit into the on-disk inode. 436 * If there are few enough extents to fit into 437 * the if_inline_ext, then copy them there. 438 * Otherwise allocate a buffer for them and copy 439 * them into it. Either way, set if_extents 440 * to point at the extents. 441 */ 442 STATIC int 443 xfs_iformat_extents( 444 xfs_inode_t *ip, 445 xfs_dinode_t *dip, 446 int whichfork) 447 { 448 xfs_bmbt_rec_t *dp; 449 xfs_ifork_t *ifp; 450 int nex; 451 int size; 452 int i; 453 454 ifp = XFS_IFORK_PTR(ip, whichfork); 455 nex = XFS_DFORK_NEXTENTS(dip, whichfork); 456 size = nex * (uint)sizeof(xfs_bmbt_rec_t); 457 458 /* 459 * If the number of extents is unreasonable, then something 460 * is wrong and we just bail out rather than crash in 461 * kmem_alloc() or memcpy() below. 462 */ 463 if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) { 464 xfs_warn(ip->i_mount, "corrupt inode %Lu ((a)extents = %d).", 465 (unsigned long long) ip->i_ino, nex); 466 XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW, 467 ip->i_mount, dip); 468 return XFS_ERROR(EFSCORRUPTED); 469 } 470 471 ifp->if_real_bytes = 0; 472 if (nex == 0) 473 ifp->if_u1.if_extents = NULL; 474 else if (nex <= XFS_INLINE_EXTS) 475 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; 476 else 477 xfs_iext_add(ifp, 0, nex); 478 479 ifp->if_bytes = size; 480 if (size) { 481 dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork); 482 xfs_validate_extents(ifp, nex, XFS_EXTFMT_INODE(ip)); 483 for (i = 0; i < nex; i++, dp++) { 484 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i); 485 ep->l0 = get_unaligned_be64(&dp->l0); 486 ep->l1 = get_unaligned_be64(&dp->l1); 487 } 488 XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork); 489 if (whichfork != XFS_DATA_FORK || 490 XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE) 491 if (unlikely(xfs_check_nostate_extents( 492 ifp, 0, nex))) { 493 XFS_ERROR_REPORT("xfs_iformat_extents(2)", 494 XFS_ERRLEVEL_LOW, 495 ip->i_mount); 496 return XFS_ERROR(EFSCORRUPTED); 497 } 498 } 499 ifp->if_flags |= XFS_IFEXTENTS; 500 return 0; 501 } 502 503 /* 504 * The file has too many extents to fit into 505 * the inode, so they are in B-tree format. 506 * Allocate a buffer for the root of the B-tree 507 * and copy the root into it. The i_extents 508 * field will remain NULL until all of the 509 * extents are read in (when they are needed). 510 */ 511 STATIC int 512 xfs_iformat_btree( 513 xfs_inode_t *ip, 514 xfs_dinode_t *dip, 515 int whichfork) 516 { 517 xfs_bmdr_block_t *dfp; 518 xfs_ifork_t *ifp; 519 /* REFERENCED */ 520 int nrecs; 521 int size; 522 523 ifp = XFS_IFORK_PTR(ip, whichfork); 524 dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork); 525 size = XFS_BMAP_BROOT_SPACE(dfp); 526 nrecs = be16_to_cpu(dfp->bb_numrecs); 527 528 /* 529 * blow out if -- fork has less extents than can fit in 530 * fork (fork shouldn't be a btree format), root btree 531 * block has more records than can fit into the fork, 532 * or the number of extents is greater than the number of 533 * blocks. 534 */ 535 if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <= 536 XFS_IFORK_MAXEXT(ip, whichfork) || 537 XFS_BMDR_SPACE_CALC(nrecs) > 538 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork) || 539 XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) { 540 xfs_warn(ip->i_mount, "corrupt inode %Lu (btree).", 541 (unsigned long long) ip->i_ino); 542 XFS_CORRUPTION_ERROR("xfs_iformat_btree", XFS_ERRLEVEL_LOW, 543 ip->i_mount, dip); 544 return XFS_ERROR(EFSCORRUPTED); 545 } 546 547 ifp->if_broot_bytes = size; 548 ifp->if_broot = kmem_alloc(size, KM_SLEEP | KM_NOFS); 549 ASSERT(ifp->if_broot != NULL); 550 /* 551 * Copy and convert from the on-disk structure 552 * to the in-memory structure. 553 */ 554 xfs_bmdr_to_bmbt(ip->i_mount, dfp, 555 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork), 556 ifp->if_broot, size); 557 ifp->if_flags &= ~XFS_IFEXTENTS; 558 ifp->if_flags |= XFS_IFBROOT; 559 560 return 0; 561 } 562 563 STATIC void 564 xfs_dinode_from_disk( 565 xfs_icdinode_t *to, 566 xfs_dinode_t *from) 567 { 568 to->di_magic = be16_to_cpu(from->di_magic); 569 to->di_mode = be16_to_cpu(from->di_mode); 570 to->di_version = from ->di_version; 571 to->di_format = from->di_format; 572 to->di_onlink = be16_to_cpu(from->di_onlink); 573 to->di_uid = be32_to_cpu(from->di_uid); 574 to->di_gid = be32_to_cpu(from->di_gid); 575 to->di_nlink = be32_to_cpu(from->di_nlink); 576 to->di_projid_lo = be16_to_cpu(from->di_projid_lo); 577 to->di_projid_hi = be16_to_cpu(from->di_projid_hi); 578 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad)); 579 to->di_flushiter = be16_to_cpu(from->di_flushiter); 580 to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec); 581 to->di_atime.t_nsec = be32_to_cpu(from->di_atime.t_nsec); 582 to->di_mtime.t_sec = be32_to_cpu(from->di_mtime.t_sec); 583 to->di_mtime.t_nsec = be32_to_cpu(from->di_mtime.t_nsec); 584 to->di_ctime.t_sec = be32_to_cpu(from->di_ctime.t_sec); 585 to->di_ctime.t_nsec = be32_to_cpu(from->di_ctime.t_nsec); 586 to->di_size = be64_to_cpu(from->di_size); 587 to->di_nblocks = be64_to_cpu(from->di_nblocks); 588 to->di_extsize = be32_to_cpu(from->di_extsize); 589 to->di_nextents = be32_to_cpu(from->di_nextents); 590 to->di_anextents = be16_to_cpu(from->di_anextents); 591 to->di_forkoff = from->di_forkoff; 592 to->di_aformat = from->di_aformat; 593 to->di_dmevmask = be32_to_cpu(from->di_dmevmask); 594 to->di_dmstate = be16_to_cpu(from->di_dmstate); 595 to->di_flags = be16_to_cpu(from->di_flags); 596 to->di_gen = be32_to_cpu(from->di_gen); 597 } 598 599 void 600 xfs_dinode_to_disk( 601 xfs_dinode_t *to, 602 xfs_icdinode_t *from) 603 { 604 to->di_magic = cpu_to_be16(from->di_magic); 605 to->di_mode = cpu_to_be16(from->di_mode); 606 to->di_version = from ->di_version; 607 to->di_format = from->di_format; 608 to->di_onlink = cpu_to_be16(from->di_onlink); 609 to->di_uid = cpu_to_be32(from->di_uid); 610 to->di_gid = cpu_to_be32(from->di_gid); 611 to->di_nlink = cpu_to_be32(from->di_nlink); 612 to->di_projid_lo = cpu_to_be16(from->di_projid_lo); 613 to->di_projid_hi = cpu_to_be16(from->di_projid_hi); 614 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad)); 615 to->di_flushiter = cpu_to_be16(from->di_flushiter); 616 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec); 617 to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec); 618 to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec); 619 to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec); 620 to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec); 621 to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec); 622 to->di_size = cpu_to_be64(from->di_size); 623 to->di_nblocks = cpu_to_be64(from->di_nblocks); 624 to->di_extsize = cpu_to_be32(from->di_extsize); 625 to->di_nextents = cpu_to_be32(from->di_nextents); 626 to->di_anextents = cpu_to_be16(from->di_anextents); 627 to->di_forkoff = from->di_forkoff; 628 to->di_aformat = from->di_aformat; 629 to->di_dmevmask = cpu_to_be32(from->di_dmevmask); 630 to->di_dmstate = cpu_to_be16(from->di_dmstate); 631 to->di_flags = cpu_to_be16(from->di_flags); 632 to->di_gen = cpu_to_be32(from->di_gen); 633 } 634 635 STATIC uint 636 _xfs_dic2xflags( 637 __uint16_t di_flags) 638 { 639 uint flags = 0; 640 641 if (di_flags & XFS_DIFLAG_ANY) { 642 if (di_flags & XFS_DIFLAG_REALTIME) 643 flags |= XFS_XFLAG_REALTIME; 644 if (di_flags & XFS_DIFLAG_PREALLOC) 645 flags |= XFS_XFLAG_PREALLOC; 646 if (di_flags & XFS_DIFLAG_IMMUTABLE) 647 flags |= XFS_XFLAG_IMMUTABLE; 648 if (di_flags & XFS_DIFLAG_APPEND) 649 flags |= XFS_XFLAG_APPEND; 650 if (di_flags & XFS_DIFLAG_SYNC) 651 flags |= XFS_XFLAG_SYNC; 652 if (di_flags & XFS_DIFLAG_NOATIME) 653 flags |= XFS_XFLAG_NOATIME; 654 if (di_flags & XFS_DIFLAG_NODUMP) 655 flags |= XFS_XFLAG_NODUMP; 656 if (di_flags & XFS_DIFLAG_RTINHERIT) 657 flags |= XFS_XFLAG_RTINHERIT; 658 if (di_flags & XFS_DIFLAG_PROJINHERIT) 659 flags |= XFS_XFLAG_PROJINHERIT; 660 if (di_flags & XFS_DIFLAG_NOSYMLINKS) 661 flags |= XFS_XFLAG_NOSYMLINKS; 662 if (di_flags & XFS_DIFLAG_EXTSIZE) 663 flags |= XFS_XFLAG_EXTSIZE; 664 if (di_flags & XFS_DIFLAG_EXTSZINHERIT) 665 flags |= XFS_XFLAG_EXTSZINHERIT; 666 if (di_flags & XFS_DIFLAG_NODEFRAG) 667 flags |= XFS_XFLAG_NODEFRAG; 668 if (di_flags & XFS_DIFLAG_FILESTREAM) 669 flags |= XFS_XFLAG_FILESTREAM; 670 } 671 672 return flags; 673 } 674 675 uint 676 xfs_ip2xflags( 677 xfs_inode_t *ip) 678 { 679 xfs_icdinode_t *dic = &ip->i_d; 680 681 return _xfs_dic2xflags(dic->di_flags) | 682 (XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0); 683 } 684 685 uint 686 xfs_dic2xflags( 687 xfs_dinode_t *dip) 688 { 689 return _xfs_dic2xflags(be16_to_cpu(dip->di_flags)) | 690 (XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0); 691 } 692 693 /* 694 * Read the disk inode attributes into the in-core inode structure. 695 */ 696 int 697 xfs_iread( 698 xfs_mount_t *mp, 699 xfs_trans_t *tp, 700 xfs_inode_t *ip, 701 uint iget_flags) 702 { 703 xfs_buf_t *bp; 704 xfs_dinode_t *dip; 705 int error; 706 707 /* 708 * Fill in the location information in the in-core inode. 709 */ 710 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags); 711 if (error) 712 return error; 713 714 /* 715 * Get pointers to the on-disk inode and the buffer containing it. 716 */ 717 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags); 718 if (error) 719 return error; 720 721 /* 722 * If we got something that isn't an inode it means someone 723 * (nfs or dmi) has a stale handle. 724 */ 725 if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC)) { 726 #ifdef DEBUG 727 xfs_alert(mp, 728 "%s: dip->di_magic (0x%x) != XFS_DINODE_MAGIC (0x%x)", 729 __func__, be16_to_cpu(dip->di_magic), XFS_DINODE_MAGIC); 730 #endif /* DEBUG */ 731 error = XFS_ERROR(EINVAL); 732 goto out_brelse; 733 } 734 735 /* 736 * If the on-disk inode is already linked to a directory 737 * entry, copy all of the inode into the in-core inode. 738 * xfs_iformat() handles copying in the inode format 739 * specific information. 740 * Otherwise, just get the truly permanent information. 741 */ 742 if (dip->di_mode) { 743 xfs_dinode_from_disk(&ip->i_d, dip); 744 error = xfs_iformat(ip, dip); 745 if (error) { 746 #ifdef DEBUG 747 xfs_alert(mp, "%s: xfs_iformat() returned error %d", 748 __func__, error); 749 #endif /* DEBUG */ 750 goto out_brelse; 751 } 752 } else { 753 ip->i_d.di_magic = be16_to_cpu(dip->di_magic); 754 ip->i_d.di_version = dip->di_version; 755 ip->i_d.di_gen = be32_to_cpu(dip->di_gen); 756 ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter); 757 /* 758 * Make sure to pull in the mode here as well in 759 * case the inode is released without being used. 760 * This ensures that xfs_inactive() will see that 761 * the inode is already free and not try to mess 762 * with the uninitialized part of it. 763 */ 764 ip->i_d.di_mode = 0; 765 } 766 767 /* 768 * The inode format changed when we moved the link count and 769 * made it 32 bits long. If this is an old format inode, 770 * convert it in memory to look like a new one. If it gets 771 * flushed to disk we will convert back before flushing or 772 * logging it. We zero out the new projid field and the old link 773 * count field. We'll handle clearing the pad field (the remains 774 * of the old uuid field) when we actually convert the inode to 775 * the new format. We don't change the version number so that we 776 * can distinguish this from a real new format inode. 777 */ 778 if (ip->i_d.di_version == 1) { 779 ip->i_d.di_nlink = ip->i_d.di_onlink; 780 ip->i_d.di_onlink = 0; 781 xfs_set_projid(ip, 0); 782 } 783 784 ip->i_delayed_blks = 0; 785 786 /* 787 * Mark the buffer containing the inode as something to keep 788 * around for a while. This helps to keep recently accessed 789 * meta-data in-core longer. 790 */ 791 xfs_buf_set_ref(bp, XFS_INO_REF); 792 793 /* 794 * Use xfs_trans_brelse() to release the buffer containing the 795 * on-disk inode, because it was acquired with xfs_trans_read_buf() 796 * in xfs_imap_to_bp() above. If tp is NULL, this is just a normal 797 * brelse(). If we're within a transaction, then xfs_trans_brelse() 798 * will only release the buffer if it is not dirty within the 799 * transaction. It will be OK to release the buffer in this case, 800 * because inodes on disk are never destroyed and we will be 801 * locking the new in-core inode before putting it in the hash 802 * table where other processes can find it. Thus we don't have 803 * to worry about the inode being changed just because we released 804 * the buffer. 805 */ 806 out_brelse: 807 xfs_trans_brelse(tp, bp); 808 return error; 809 } 810 811 /* 812 * Read in extents from a btree-format inode. 813 * Allocate and fill in if_extents. Real work is done in xfs_bmap.c. 814 */ 815 int 816 xfs_iread_extents( 817 xfs_trans_t *tp, 818 xfs_inode_t *ip, 819 int whichfork) 820 { 821 int error; 822 xfs_ifork_t *ifp; 823 xfs_extnum_t nextents; 824 825 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { 826 XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW, 827 ip->i_mount); 828 return XFS_ERROR(EFSCORRUPTED); 829 } 830 nextents = XFS_IFORK_NEXTENTS(ip, whichfork); 831 ifp = XFS_IFORK_PTR(ip, whichfork); 832 833 /* 834 * We know that the size is valid (it's checked in iformat_btree) 835 */ 836 ifp->if_bytes = ifp->if_real_bytes = 0; 837 ifp->if_flags |= XFS_IFEXTENTS; 838 xfs_iext_add(ifp, 0, nextents); 839 error = xfs_bmap_read_extents(tp, ip, whichfork); 840 if (error) { 841 xfs_iext_destroy(ifp); 842 ifp->if_flags &= ~XFS_IFEXTENTS; 843 return error; 844 } 845 xfs_validate_extents(ifp, nextents, XFS_EXTFMT_INODE(ip)); 846 return 0; 847 } 848 849 /* 850 * Allocate an inode on disk and return a copy of its in-core version. 851 * The in-core inode is locked exclusively. Set mode, nlink, and rdev 852 * appropriately within the inode. The uid and gid for the inode are 853 * set according to the contents of the given cred structure. 854 * 855 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc() 856 * has a free inode available, call xfs_iget() 857 * to obtain the in-core version of the allocated inode. Finally, 858 * fill in the inode and log its initial contents. In this case, 859 * ialloc_context would be set to NULL and call_again set to false. 860 * 861 * If xfs_dialloc() does not have an available inode, 862 * it will replenish its supply by doing an allocation. Since we can 863 * only do one allocation within a transaction without deadlocks, we 864 * must commit the current transaction before returning the inode itself. 865 * In this case, therefore, we will set call_again to true and return. 866 * The caller should then commit the current transaction, start a new 867 * transaction, and call xfs_ialloc() again to actually get the inode. 868 * 869 * To ensure that some other process does not grab the inode that 870 * was allocated during the first call to xfs_ialloc(), this routine 871 * also returns the [locked] bp pointing to the head of the freelist 872 * as ialloc_context. The caller should hold this buffer across 873 * the commit and pass it back into this routine on the second call. 874 * 875 * If we are allocating quota inodes, we do not have a parent inode 876 * to attach to or associate with (i.e. pip == NULL) because they 877 * are not linked into the directory structure - they are attached 878 * directly to the superblock - and so have no parent. 879 */ 880 int 881 xfs_ialloc( 882 xfs_trans_t *tp, 883 xfs_inode_t *pip, 884 umode_t mode, 885 xfs_nlink_t nlink, 886 xfs_dev_t rdev, 887 prid_t prid, 888 int okalloc, 889 xfs_buf_t **ialloc_context, 890 xfs_inode_t **ipp) 891 { 892 xfs_ino_t ino; 893 xfs_inode_t *ip; 894 uint flags; 895 int error; 896 timespec_t tv; 897 int filestreams = 0; 898 899 /* 900 * Call the space management code to pick 901 * the on-disk inode to be allocated. 902 */ 903 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc, 904 ialloc_context, &ino); 905 if (error) 906 return error; 907 if (*ialloc_context || ino == NULLFSINO) { 908 *ipp = NULL; 909 return 0; 910 } 911 ASSERT(*ialloc_context == NULL); 912 913 /* 914 * Get the in-core inode with the lock held exclusively. 915 * This is because we're setting fields here we need 916 * to prevent others from looking at until we're done. 917 */ 918 error = xfs_iget(tp->t_mountp, tp, ino, XFS_IGET_CREATE, 919 XFS_ILOCK_EXCL, &ip); 920 if (error) 921 return error; 922 ASSERT(ip != NULL); 923 924 ip->i_d.di_mode = mode; 925 ip->i_d.di_onlink = 0; 926 ip->i_d.di_nlink = nlink; 927 ASSERT(ip->i_d.di_nlink == nlink); 928 ip->i_d.di_uid = current_fsuid(); 929 ip->i_d.di_gid = current_fsgid(); 930 xfs_set_projid(ip, prid); 931 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); 932 933 /* 934 * If the superblock version is up to where we support new format 935 * inodes and this is currently an old format inode, then change 936 * the inode version number now. This way we only do the conversion 937 * here rather than here and in the flush/logging code. 938 */ 939 if (xfs_sb_version_hasnlink(&tp->t_mountp->m_sb) && 940 ip->i_d.di_version == 1) { 941 ip->i_d.di_version = 2; 942 /* 943 * We've already zeroed the old link count, the projid field, 944 * and the pad field. 945 */ 946 } 947 948 /* 949 * Project ids won't be stored on disk if we are using a version 1 inode. 950 */ 951 if ((prid != 0) && (ip->i_d.di_version == 1)) 952 xfs_bump_ino_vers2(tp, ip); 953 954 if (pip && XFS_INHERIT_GID(pip)) { 955 ip->i_d.di_gid = pip->i_d.di_gid; 956 if ((pip->i_d.di_mode & S_ISGID) && S_ISDIR(mode)) { 957 ip->i_d.di_mode |= S_ISGID; 958 } 959 } 960 961 /* 962 * If the group ID of the new file does not match the effective group 963 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared 964 * (and only if the irix_sgid_inherit compatibility variable is set). 965 */ 966 if ((irix_sgid_inherit) && 967 (ip->i_d.di_mode & S_ISGID) && 968 (!in_group_p((gid_t)ip->i_d.di_gid))) { 969 ip->i_d.di_mode &= ~S_ISGID; 970 } 971 972 ip->i_d.di_size = 0; 973 ip->i_d.di_nextents = 0; 974 ASSERT(ip->i_d.di_nblocks == 0); 975 976 nanotime(&tv); 977 ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec; 978 ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec; 979 ip->i_d.di_atime = ip->i_d.di_mtime; 980 ip->i_d.di_ctime = ip->i_d.di_mtime; 981 982 /* 983 * di_gen will have been taken care of in xfs_iread. 984 */ 985 ip->i_d.di_extsize = 0; 986 ip->i_d.di_dmevmask = 0; 987 ip->i_d.di_dmstate = 0; 988 ip->i_d.di_flags = 0; 989 flags = XFS_ILOG_CORE; 990 switch (mode & S_IFMT) { 991 case S_IFIFO: 992 case S_IFCHR: 993 case S_IFBLK: 994 case S_IFSOCK: 995 ip->i_d.di_format = XFS_DINODE_FMT_DEV; 996 ip->i_df.if_u2.if_rdev = rdev; 997 ip->i_df.if_flags = 0; 998 flags |= XFS_ILOG_DEV; 999 break; 1000 case S_IFREG: 1001 /* 1002 * we can't set up filestreams until after the VFS inode 1003 * is set up properly. 1004 */ 1005 if (pip && xfs_inode_is_filestream(pip)) 1006 filestreams = 1; 1007 /* fall through */ 1008 case S_IFDIR: 1009 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) { 1010 uint di_flags = 0; 1011 1012 if (S_ISDIR(mode)) { 1013 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) 1014 di_flags |= XFS_DIFLAG_RTINHERIT; 1015 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { 1016 di_flags |= XFS_DIFLAG_EXTSZINHERIT; 1017 ip->i_d.di_extsize = pip->i_d.di_extsize; 1018 } 1019 } else if (S_ISREG(mode)) { 1020 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) 1021 di_flags |= XFS_DIFLAG_REALTIME; 1022 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { 1023 di_flags |= XFS_DIFLAG_EXTSIZE; 1024 ip->i_d.di_extsize = pip->i_d.di_extsize; 1025 } 1026 } 1027 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) && 1028 xfs_inherit_noatime) 1029 di_flags |= XFS_DIFLAG_NOATIME; 1030 if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) && 1031 xfs_inherit_nodump) 1032 di_flags |= XFS_DIFLAG_NODUMP; 1033 if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) && 1034 xfs_inherit_sync) 1035 di_flags |= XFS_DIFLAG_SYNC; 1036 if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) && 1037 xfs_inherit_nosymlinks) 1038 di_flags |= XFS_DIFLAG_NOSYMLINKS; 1039 if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) 1040 di_flags |= XFS_DIFLAG_PROJINHERIT; 1041 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) && 1042 xfs_inherit_nodefrag) 1043 di_flags |= XFS_DIFLAG_NODEFRAG; 1044 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM) 1045 di_flags |= XFS_DIFLAG_FILESTREAM; 1046 ip->i_d.di_flags |= di_flags; 1047 } 1048 /* FALLTHROUGH */ 1049 case S_IFLNK: 1050 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; 1051 ip->i_df.if_flags = XFS_IFEXTENTS; 1052 ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0; 1053 ip->i_df.if_u1.if_extents = NULL; 1054 break; 1055 default: 1056 ASSERT(0); 1057 } 1058 /* 1059 * Attribute fork settings for new inode. 1060 */ 1061 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 1062 ip->i_d.di_anextents = 0; 1063 1064 /* 1065 * Log the new values stuffed into the inode. 1066 */ 1067 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 1068 xfs_trans_log_inode(tp, ip, flags); 1069 1070 /* now that we have an i_mode we can setup inode ops and unlock */ 1071 xfs_setup_inode(ip); 1072 1073 /* now we have set up the vfs inode we can associate the filestream */ 1074 if (filestreams) { 1075 error = xfs_filestream_associate(pip, ip); 1076 if (error < 0) 1077 return -error; 1078 if (!error) 1079 xfs_iflags_set(ip, XFS_IFILESTREAM); 1080 } 1081 1082 *ipp = ip; 1083 return 0; 1084 } 1085 1086 /* 1087 * Free up the underlying blocks past new_size. The new size must be smaller 1088 * than the current size. This routine can be used both for the attribute and 1089 * data fork, and does not modify the inode size, which is left to the caller. 1090 * 1091 * The transaction passed to this routine must have made a permanent log 1092 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the 1093 * given transaction and start new ones, so make sure everything involved in 1094 * the transaction is tidy before calling here. Some transaction will be 1095 * returned to the caller to be committed. The incoming transaction must 1096 * already include the inode, and both inode locks must be held exclusively. 1097 * The inode must also be "held" within the transaction. On return the inode 1098 * will be "held" within the returned transaction. This routine does NOT 1099 * require any disk space to be reserved for it within the transaction. 1100 * 1101 * If we get an error, we must return with the inode locked and linked into the 1102 * current transaction. This keeps things simple for the higher level code, 1103 * because it always knows that the inode is locked and held in the transaction 1104 * that returns to it whether errors occur or not. We don't mark the inode 1105 * dirty on error so that transactions can be easily aborted if possible. 1106 */ 1107 int 1108 xfs_itruncate_extents( 1109 struct xfs_trans **tpp, 1110 struct xfs_inode *ip, 1111 int whichfork, 1112 xfs_fsize_t new_size) 1113 { 1114 struct xfs_mount *mp = ip->i_mount; 1115 struct xfs_trans *tp = *tpp; 1116 struct xfs_trans *ntp; 1117 xfs_bmap_free_t free_list; 1118 xfs_fsblock_t first_block; 1119 xfs_fileoff_t first_unmap_block; 1120 xfs_fileoff_t last_block; 1121 xfs_filblks_t unmap_len; 1122 int committed; 1123 int error = 0; 1124 int done = 0; 1125 1126 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1127 ASSERT(!atomic_read(&VFS_I(ip)->i_count) || 1128 xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 1129 ASSERT(new_size <= XFS_ISIZE(ip)); 1130 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 1131 ASSERT(ip->i_itemp != NULL); 1132 ASSERT(ip->i_itemp->ili_lock_flags == 0); 1133 ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); 1134 1135 trace_xfs_itruncate_extents_start(ip, new_size); 1136 1137 /* 1138 * Since it is possible for space to become allocated beyond 1139 * the end of the file (in a crash where the space is allocated 1140 * but the inode size is not yet updated), simply remove any 1141 * blocks which show up between the new EOF and the maximum 1142 * possible file size. If the first block to be removed is 1143 * beyond the maximum file size (ie it is the same as last_block), 1144 * then there is nothing to do. 1145 */ 1146 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); 1147 last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 1148 if (first_unmap_block == last_block) 1149 return 0; 1150 1151 ASSERT(first_unmap_block < last_block); 1152 unmap_len = last_block - first_unmap_block + 1; 1153 while (!done) { 1154 xfs_bmap_init(&free_list, &first_block); 1155 error = xfs_bunmapi(tp, ip, 1156 first_unmap_block, unmap_len, 1157 xfs_bmapi_aflag(whichfork), 1158 XFS_ITRUNC_MAX_EXTENTS, 1159 &first_block, &free_list, 1160 &done); 1161 if (error) 1162 goto out_bmap_cancel; 1163 1164 /* 1165 * Duplicate the transaction that has the permanent 1166 * reservation and commit the old transaction. 1167 */ 1168 error = xfs_bmap_finish(&tp, &free_list, &committed); 1169 if (committed) 1170 xfs_trans_ijoin(tp, ip, 0); 1171 if (error) 1172 goto out_bmap_cancel; 1173 1174 if (committed) { 1175 /* 1176 * Mark the inode dirty so it will be logged and 1177 * moved forward in the log as part of every commit. 1178 */ 1179 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1180 } 1181 1182 ntp = xfs_trans_dup(tp); 1183 error = xfs_trans_commit(tp, 0); 1184 tp = ntp; 1185 1186 xfs_trans_ijoin(tp, ip, 0); 1187 1188 if (error) 1189 goto out; 1190 1191 /* 1192 * Transaction commit worked ok so we can drop the extra ticket 1193 * reference that we gained in xfs_trans_dup() 1194 */ 1195 xfs_log_ticket_put(tp->t_ticket); 1196 error = xfs_trans_reserve(tp, 0, 1197 XFS_ITRUNCATE_LOG_RES(mp), 0, 1198 XFS_TRANS_PERM_LOG_RES, 1199 XFS_ITRUNCATE_LOG_COUNT); 1200 if (error) 1201 goto out; 1202 } 1203 1204 /* 1205 * Always re-log the inode so that our permanent transaction can keep 1206 * on rolling it forward in the log. 1207 */ 1208 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1209 1210 trace_xfs_itruncate_extents_end(ip, new_size); 1211 1212 out: 1213 *tpp = tp; 1214 return error; 1215 out_bmap_cancel: 1216 /* 1217 * If the bunmapi call encounters an error, return to the caller where 1218 * the transaction can be properly aborted. We just need to make sure 1219 * we're not holding any resources that we were not when we came in. 1220 */ 1221 xfs_bmap_cancel(&free_list); 1222 goto out; 1223 } 1224 1225 /* 1226 * This is called when the inode's link count goes to 0. 1227 * We place the on-disk inode on a list in the AGI. It 1228 * will be pulled from this list when the inode is freed. 1229 */ 1230 int 1231 xfs_iunlink( 1232 xfs_trans_t *tp, 1233 xfs_inode_t *ip) 1234 { 1235 xfs_mount_t *mp; 1236 xfs_agi_t *agi; 1237 xfs_dinode_t *dip; 1238 xfs_buf_t *agibp; 1239 xfs_buf_t *ibp; 1240 xfs_agino_t agino; 1241 short bucket_index; 1242 int offset; 1243 int error; 1244 1245 ASSERT(ip->i_d.di_nlink == 0); 1246 ASSERT(ip->i_d.di_mode != 0); 1247 1248 mp = tp->t_mountp; 1249 1250 /* 1251 * Get the agi buffer first. It ensures lock ordering 1252 * on the list. 1253 */ 1254 error = xfs_read_agi(mp, tp, XFS_INO_TO_AGNO(mp, ip->i_ino), &agibp); 1255 if (error) 1256 return error; 1257 agi = XFS_BUF_TO_AGI(agibp); 1258 1259 /* 1260 * Get the index into the agi hash table for the 1261 * list this inode will go on. 1262 */ 1263 agino = XFS_INO_TO_AGINO(mp, ip->i_ino); 1264 ASSERT(agino != 0); 1265 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; 1266 ASSERT(agi->agi_unlinked[bucket_index]); 1267 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino); 1268 1269 if (agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)) { 1270 /* 1271 * There is already another inode in the bucket we need 1272 * to add ourselves to. Add us at the front of the list. 1273 * Here we put the head pointer into our next pointer, 1274 * and then we fall through to point the head at us. 1275 */ 1276 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, 1277 0, 0); 1278 if (error) 1279 return error; 1280 1281 ASSERT(dip->di_next_unlinked == cpu_to_be32(NULLAGINO)); 1282 dip->di_next_unlinked = agi->agi_unlinked[bucket_index]; 1283 offset = ip->i_imap.im_boffset + 1284 offsetof(xfs_dinode_t, di_next_unlinked); 1285 xfs_trans_inode_buf(tp, ibp); 1286 xfs_trans_log_buf(tp, ibp, offset, 1287 (offset + sizeof(xfs_agino_t) - 1)); 1288 xfs_inobp_check(mp, ibp); 1289 } 1290 1291 /* 1292 * Point the bucket head pointer at the inode being inserted. 1293 */ 1294 ASSERT(agino != 0); 1295 agi->agi_unlinked[bucket_index] = cpu_to_be32(agino); 1296 offset = offsetof(xfs_agi_t, agi_unlinked) + 1297 (sizeof(xfs_agino_t) * bucket_index); 1298 xfs_trans_log_buf(tp, agibp, offset, 1299 (offset + sizeof(xfs_agino_t) - 1)); 1300 return 0; 1301 } 1302 1303 /* 1304 * Pull the on-disk inode from the AGI unlinked list. 1305 */ 1306 STATIC int 1307 xfs_iunlink_remove( 1308 xfs_trans_t *tp, 1309 xfs_inode_t *ip) 1310 { 1311 xfs_ino_t next_ino; 1312 xfs_mount_t *mp; 1313 xfs_agi_t *agi; 1314 xfs_dinode_t *dip; 1315 xfs_buf_t *agibp; 1316 xfs_buf_t *ibp; 1317 xfs_agnumber_t agno; 1318 xfs_agino_t agino; 1319 xfs_agino_t next_agino; 1320 xfs_buf_t *last_ibp; 1321 xfs_dinode_t *last_dip = NULL; 1322 short bucket_index; 1323 int offset, last_offset = 0; 1324 int error; 1325 1326 mp = tp->t_mountp; 1327 agno = XFS_INO_TO_AGNO(mp, ip->i_ino); 1328 1329 /* 1330 * Get the agi buffer first. It ensures lock ordering 1331 * on the list. 1332 */ 1333 error = xfs_read_agi(mp, tp, agno, &agibp); 1334 if (error) 1335 return error; 1336 1337 agi = XFS_BUF_TO_AGI(agibp); 1338 1339 /* 1340 * Get the index into the agi hash table for the 1341 * list this inode will go on. 1342 */ 1343 agino = XFS_INO_TO_AGINO(mp, ip->i_ino); 1344 ASSERT(agino != 0); 1345 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; 1346 ASSERT(agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)); 1347 ASSERT(agi->agi_unlinked[bucket_index]); 1348 1349 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) { 1350 /* 1351 * We're at the head of the list. Get the inode's on-disk 1352 * buffer to see if there is anyone after us on the list. 1353 * Only modify our next pointer if it is not already NULLAGINO. 1354 * This saves us the overhead of dealing with the buffer when 1355 * there is no need to change it. 1356 */ 1357 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, 1358 0, 0); 1359 if (error) { 1360 xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.", 1361 __func__, error); 1362 return error; 1363 } 1364 next_agino = be32_to_cpu(dip->di_next_unlinked); 1365 ASSERT(next_agino != 0); 1366 if (next_agino != NULLAGINO) { 1367 dip->di_next_unlinked = cpu_to_be32(NULLAGINO); 1368 offset = ip->i_imap.im_boffset + 1369 offsetof(xfs_dinode_t, di_next_unlinked); 1370 xfs_trans_inode_buf(tp, ibp); 1371 xfs_trans_log_buf(tp, ibp, offset, 1372 (offset + sizeof(xfs_agino_t) - 1)); 1373 xfs_inobp_check(mp, ibp); 1374 } else { 1375 xfs_trans_brelse(tp, ibp); 1376 } 1377 /* 1378 * Point the bucket head pointer at the next inode. 1379 */ 1380 ASSERT(next_agino != 0); 1381 ASSERT(next_agino != agino); 1382 agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino); 1383 offset = offsetof(xfs_agi_t, agi_unlinked) + 1384 (sizeof(xfs_agino_t) * bucket_index); 1385 xfs_trans_log_buf(tp, agibp, offset, 1386 (offset + sizeof(xfs_agino_t) - 1)); 1387 } else { 1388 /* 1389 * We need to search the list for the inode being freed. 1390 */ 1391 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]); 1392 last_ibp = NULL; 1393 while (next_agino != agino) { 1394 struct xfs_imap imap; 1395 1396 if (last_ibp) 1397 xfs_trans_brelse(tp, last_ibp); 1398 1399 imap.im_blkno = 0; 1400 next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino); 1401 1402 error = xfs_imap(mp, tp, next_ino, &imap, 0); 1403 if (error) { 1404 xfs_warn(mp, 1405 "%s: xfs_imap returned error %d.", 1406 __func__, error); 1407 return error; 1408 } 1409 1410 error = xfs_imap_to_bp(mp, tp, &imap, &last_dip, 1411 &last_ibp, 0, 0); 1412 if (error) { 1413 xfs_warn(mp, 1414 "%s: xfs_imap_to_bp returned error %d.", 1415 __func__, error); 1416 return error; 1417 } 1418 1419 last_offset = imap.im_boffset; 1420 next_agino = be32_to_cpu(last_dip->di_next_unlinked); 1421 ASSERT(next_agino != NULLAGINO); 1422 ASSERT(next_agino != 0); 1423 } 1424 1425 /* 1426 * Now last_ibp points to the buffer previous to us on the 1427 * unlinked list. Pull us from the list. 1428 */ 1429 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, 1430 0, 0); 1431 if (error) { 1432 xfs_warn(mp, "%s: xfs_imap_to_bp(2) returned error %d.", 1433 __func__, error); 1434 return error; 1435 } 1436 next_agino = be32_to_cpu(dip->di_next_unlinked); 1437 ASSERT(next_agino != 0); 1438 ASSERT(next_agino != agino); 1439 if (next_agino != NULLAGINO) { 1440 dip->di_next_unlinked = cpu_to_be32(NULLAGINO); 1441 offset = ip->i_imap.im_boffset + 1442 offsetof(xfs_dinode_t, di_next_unlinked); 1443 xfs_trans_inode_buf(tp, ibp); 1444 xfs_trans_log_buf(tp, ibp, offset, 1445 (offset + sizeof(xfs_agino_t) - 1)); 1446 xfs_inobp_check(mp, ibp); 1447 } else { 1448 xfs_trans_brelse(tp, ibp); 1449 } 1450 /* 1451 * Point the previous inode on the list to the next inode. 1452 */ 1453 last_dip->di_next_unlinked = cpu_to_be32(next_agino); 1454 ASSERT(next_agino != 0); 1455 offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked); 1456 xfs_trans_inode_buf(tp, last_ibp); 1457 xfs_trans_log_buf(tp, last_ibp, offset, 1458 (offset + sizeof(xfs_agino_t) - 1)); 1459 xfs_inobp_check(mp, last_ibp); 1460 } 1461 return 0; 1462 } 1463 1464 /* 1465 * A big issue when freeing the inode cluster is is that we _cannot_ skip any 1466 * inodes that are in memory - they all must be marked stale and attached to 1467 * the cluster buffer. 1468 */ 1469 STATIC int 1470 xfs_ifree_cluster( 1471 xfs_inode_t *free_ip, 1472 xfs_trans_t *tp, 1473 xfs_ino_t inum) 1474 { 1475 xfs_mount_t *mp = free_ip->i_mount; 1476 int blks_per_cluster; 1477 int nbufs; 1478 int ninodes; 1479 int i, j; 1480 xfs_daddr_t blkno; 1481 xfs_buf_t *bp; 1482 xfs_inode_t *ip; 1483 xfs_inode_log_item_t *iip; 1484 xfs_log_item_t *lip; 1485 struct xfs_perag *pag; 1486 1487 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum)); 1488 if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) { 1489 blks_per_cluster = 1; 1490 ninodes = mp->m_sb.sb_inopblock; 1491 nbufs = XFS_IALLOC_BLOCKS(mp); 1492 } else { 1493 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) / 1494 mp->m_sb.sb_blocksize; 1495 ninodes = blks_per_cluster * mp->m_sb.sb_inopblock; 1496 nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster; 1497 } 1498 1499 for (j = 0; j < nbufs; j++, inum += ninodes) { 1500 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum), 1501 XFS_INO_TO_AGBNO(mp, inum)); 1502 1503 /* 1504 * We obtain and lock the backing buffer first in the process 1505 * here, as we have to ensure that any dirty inode that we 1506 * can't get the flush lock on is attached to the buffer. 1507 * If we scan the in-memory inodes first, then buffer IO can 1508 * complete before we get a lock on it, and hence we may fail 1509 * to mark all the active inodes on the buffer stale. 1510 */ 1511 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, 1512 mp->m_bsize * blks_per_cluster, 0); 1513 1514 if (!bp) 1515 return ENOMEM; 1516 /* 1517 * Walk the inodes already attached to the buffer and mark them 1518 * stale. These will all have the flush locks held, so an 1519 * in-memory inode walk can't lock them. By marking them all 1520 * stale first, we will not attempt to lock them in the loop 1521 * below as the XFS_ISTALE flag will be set. 1522 */ 1523 lip = bp->b_fspriv; 1524 while (lip) { 1525 if (lip->li_type == XFS_LI_INODE) { 1526 iip = (xfs_inode_log_item_t *)lip; 1527 ASSERT(iip->ili_logged == 1); 1528 lip->li_cb = xfs_istale_done; 1529 xfs_trans_ail_copy_lsn(mp->m_ail, 1530 &iip->ili_flush_lsn, 1531 &iip->ili_item.li_lsn); 1532 xfs_iflags_set(iip->ili_inode, XFS_ISTALE); 1533 } 1534 lip = lip->li_bio_list; 1535 } 1536 1537 1538 /* 1539 * For each inode in memory attempt to add it to the inode 1540 * buffer and set it up for being staled on buffer IO 1541 * completion. This is safe as we've locked out tail pushing 1542 * and flushing by locking the buffer. 1543 * 1544 * We have already marked every inode that was part of a 1545 * transaction stale above, which means there is no point in 1546 * even trying to lock them. 1547 */ 1548 for (i = 0; i < ninodes; i++) { 1549 retry: 1550 rcu_read_lock(); 1551 ip = radix_tree_lookup(&pag->pag_ici_root, 1552 XFS_INO_TO_AGINO(mp, (inum + i))); 1553 1554 /* Inode not in memory, nothing to do */ 1555 if (!ip) { 1556 rcu_read_unlock(); 1557 continue; 1558 } 1559 1560 /* 1561 * because this is an RCU protected lookup, we could 1562 * find a recently freed or even reallocated inode 1563 * during the lookup. We need to check under the 1564 * i_flags_lock for a valid inode here. Skip it if it 1565 * is not valid, the wrong inode or stale. 1566 */ 1567 spin_lock(&ip->i_flags_lock); 1568 if (ip->i_ino != inum + i || 1569 __xfs_iflags_test(ip, XFS_ISTALE)) { 1570 spin_unlock(&ip->i_flags_lock); 1571 rcu_read_unlock(); 1572 continue; 1573 } 1574 spin_unlock(&ip->i_flags_lock); 1575 1576 /* 1577 * Don't try to lock/unlock the current inode, but we 1578 * _cannot_ skip the other inodes that we did not find 1579 * in the list attached to the buffer and are not 1580 * already marked stale. If we can't lock it, back off 1581 * and retry. 1582 */ 1583 if (ip != free_ip && 1584 !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { 1585 rcu_read_unlock(); 1586 delay(1); 1587 goto retry; 1588 } 1589 rcu_read_unlock(); 1590 1591 xfs_iflock(ip); 1592 xfs_iflags_set(ip, XFS_ISTALE); 1593 1594 /* 1595 * we don't need to attach clean inodes or those only 1596 * with unlogged changes (which we throw away, anyway). 1597 */ 1598 iip = ip->i_itemp; 1599 if (!iip || xfs_inode_clean(ip)) { 1600 ASSERT(ip != free_ip); 1601 xfs_ifunlock(ip); 1602 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1603 continue; 1604 } 1605 1606 iip->ili_last_fields = iip->ili_fields; 1607 iip->ili_fields = 0; 1608 iip->ili_logged = 1; 1609 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, 1610 &iip->ili_item.li_lsn); 1611 1612 xfs_buf_attach_iodone(bp, xfs_istale_done, 1613 &iip->ili_item); 1614 1615 if (ip != free_ip) 1616 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1617 } 1618 1619 xfs_trans_stale_inode_buf(tp, bp); 1620 xfs_trans_binval(tp, bp); 1621 } 1622 1623 xfs_perag_put(pag); 1624 return 0; 1625 } 1626 1627 /* 1628 * This is called to return an inode to the inode free list. 1629 * The inode should already be truncated to 0 length and have 1630 * no pages associated with it. This routine also assumes that 1631 * the inode is already a part of the transaction. 1632 * 1633 * The on-disk copy of the inode will have been added to the list 1634 * of unlinked inodes in the AGI. We need to remove the inode from 1635 * that list atomically with respect to freeing it here. 1636 */ 1637 int 1638 xfs_ifree( 1639 xfs_trans_t *tp, 1640 xfs_inode_t *ip, 1641 xfs_bmap_free_t *flist) 1642 { 1643 int error; 1644 int delete; 1645 xfs_ino_t first_ino; 1646 xfs_dinode_t *dip; 1647 xfs_buf_t *ibp; 1648 1649 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1650 ASSERT(ip->i_d.di_nlink == 0); 1651 ASSERT(ip->i_d.di_nextents == 0); 1652 ASSERT(ip->i_d.di_anextents == 0); 1653 ASSERT(ip->i_d.di_size == 0 || !S_ISREG(ip->i_d.di_mode)); 1654 ASSERT(ip->i_d.di_nblocks == 0); 1655 1656 /* 1657 * Pull the on-disk inode from the AGI unlinked list. 1658 */ 1659 error = xfs_iunlink_remove(tp, ip); 1660 if (error != 0) { 1661 return error; 1662 } 1663 1664 error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino); 1665 if (error != 0) { 1666 return error; 1667 } 1668 ip->i_d.di_mode = 0; /* mark incore inode as free */ 1669 ip->i_d.di_flags = 0; 1670 ip->i_d.di_dmevmask = 0; 1671 ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */ 1672 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; 1673 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 1674 /* 1675 * Bump the generation count so no one will be confused 1676 * by reincarnations of this inode. 1677 */ 1678 ip->i_d.di_gen++; 1679 1680 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1681 1682 error = xfs_imap_to_bp(ip->i_mount, tp, &ip->i_imap, &dip, &ibp, 1683 0, 0); 1684 if (error) 1685 return error; 1686 1687 /* 1688 * Clear the on-disk di_mode. This is to prevent xfs_bulkstat 1689 * from picking up this inode when it is reclaimed (its incore state 1690 * initialzed but not flushed to disk yet). The in-core di_mode is 1691 * already cleared and a corresponding transaction logged. 1692 * The hack here just synchronizes the in-core to on-disk 1693 * di_mode value in advance before the actual inode sync to disk. 1694 * This is OK because the inode is already unlinked and would never 1695 * change its di_mode again for this inode generation. 1696 * This is a temporary hack that would require a proper fix 1697 * in the future. 1698 */ 1699 dip->di_mode = 0; 1700 1701 if (delete) { 1702 error = xfs_ifree_cluster(ip, tp, first_ino); 1703 } 1704 1705 return error; 1706 } 1707 1708 /* 1709 * Reallocate the space for if_broot based on the number of records 1710 * being added or deleted as indicated in rec_diff. Move the records 1711 * and pointers in if_broot to fit the new size. When shrinking this 1712 * will eliminate holes between the records and pointers created by 1713 * the caller. When growing this will create holes to be filled in 1714 * by the caller. 1715 * 1716 * The caller must not request to add more records than would fit in 1717 * the on-disk inode root. If the if_broot is currently NULL, then 1718 * if we adding records one will be allocated. The caller must also 1719 * not request that the number of records go below zero, although 1720 * it can go to zero. 1721 * 1722 * ip -- the inode whose if_broot area is changing 1723 * ext_diff -- the change in the number of records, positive or negative, 1724 * requested for the if_broot array. 1725 */ 1726 void 1727 xfs_iroot_realloc( 1728 xfs_inode_t *ip, 1729 int rec_diff, 1730 int whichfork) 1731 { 1732 struct xfs_mount *mp = ip->i_mount; 1733 int cur_max; 1734 xfs_ifork_t *ifp; 1735 struct xfs_btree_block *new_broot; 1736 int new_max; 1737 size_t new_size; 1738 char *np; 1739 char *op; 1740 1741 /* 1742 * Handle the degenerate case quietly. 1743 */ 1744 if (rec_diff == 0) { 1745 return; 1746 } 1747 1748 ifp = XFS_IFORK_PTR(ip, whichfork); 1749 if (rec_diff > 0) { 1750 /* 1751 * If there wasn't any memory allocated before, just 1752 * allocate it now and get out. 1753 */ 1754 if (ifp->if_broot_bytes == 0) { 1755 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff); 1756 ifp->if_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS); 1757 ifp->if_broot_bytes = (int)new_size; 1758 return; 1759 } 1760 1761 /* 1762 * If there is already an existing if_broot, then we need 1763 * to realloc() it and shift the pointers to their new 1764 * location. The records don't change location because 1765 * they are kept butted up against the btree block header. 1766 */ 1767 cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0); 1768 new_max = cur_max + rec_diff; 1769 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max); 1770 ifp->if_broot = kmem_realloc(ifp->if_broot, new_size, 1771 (size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */ 1772 KM_SLEEP | KM_NOFS); 1773 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, 1774 ifp->if_broot_bytes); 1775 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, 1776 (int)new_size); 1777 ifp->if_broot_bytes = (int)new_size; 1778 ASSERT(ifp->if_broot_bytes <= 1779 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ); 1780 memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t)); 1781 return; 1782 } 1783 1784 /* 1785 * rec_diff is less than 0. In this case, we are shrinking the 1786 * if_broot buffer. It must already exist. If we go to zero 1787 * records, just get rid of the root and clear the status bit. 1788 */ 1789 ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0)); 1790 cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0); 1791 new_max = cur_max + rec_diff; 1792 ASSERT(new_max >= 0); 1793 if (new_max > 0) 1794 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max); 1795 else 1796 new_size = 0; 1797 if (new_size > 0) { 1798 new_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS); 1799 /* 1800 * First copy over the btree block header. 1801 */ 1802 memcpy(new_broot, ifp->if_broot, XFS_BTREE_LBLOCK_LEN); 1803 } else { 1804 new_broot = NULL; 1805 ifp->if_flags &= ~XFS_IFBROOT; 1806 } 1807 1808 /* 1809 * Only copy the records and pointers if there are any. 1810 */ 1811 if (new_max > 0) { 1812 /* 1813 * First copy the records. 1814 */ 1815 op = (char *)XFS_BMBT_REC_ADDR(mp, ifp->if_broot, 1); 1816 np = (char *)XFS_BMBT_REC_ADDR(mp, new_broot, 1); 1817 memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t)); 1818 1819 /* 1820 * Then copy the pointers. 1821 */ 1822 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, 1823 ifp->if_broot_bytes); 1824 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, new_broot, 1, 1825 (int)new_size); 1826 memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t)); 1827 } 1828 kmem_free(ifp->if_broot); 1829 ifp->if_broot = new_broot; 1830 ifp->if_broot_bytes = (int)new_size; 1831 ASSERT(ifp->if_broot_bytes <= 1832 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ); 1833 return; 1834 } 1835 1836 1837 /* 1838 * This is called when the amount of space needed for if_data 1839 * is increased or decreased. The change in size is indicated by 1840 * the number of bytes that need to be added or deleted in the 1841 * byte_diff parameter. 1842 * 1843 * If the amount of space needed has decreased below the size of the 1844 * inline buffer, then switch to using the inline buffer. Otherwise, 1845 * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer 1846 * to what is needed. 1847 * 1848 * ip -- the inode whose if_data area is changing 1849 * byte_diff -- the change in the number of bytes, positive or negative, 1850 * requested for the if_data array. 1851 */ 1852 void 1853 xfs_idata_realloc( 1854 xfs_inode_t *ip, 1855 int byte_diff, 1856 int whichfork) 1857 { 1858 xfs_ifork_t *ifp; 1859 int new_size; 1860 int real_size; 1861 1862 if (byte_diff == 0) { 1863 return; 1864 } 1865 1866 ifp = XFS_IFORK_PTR(ip, whichfork); 1867 new_size = (int)ifp->if_bytes + byte_diff; 1868 ASSERT(new_size >= 0); 1869 1870 if (new_size == 0) { 1871 if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { 1872 kmem_free(ifp->if_u1.if_data); 1873 } 1874 ifp->if_u1.if_data = NULL; 1875 real_size = 0; 1876 } else if (new_size <= sizeof(ifp->if_u2.if_inline_data)) { 1877 /* 1878 * If the valid extents/data can fit in if_inline_ext/data, 1879 * copy them from the malloc'd vector and free it. 1880 */ 1881 if (ifp->if_u1.if_data == NULL) { 1882 ifp->if_u1.if_data = ifp->if_u2.if_inline_data; 1883 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { 1884 ASSERT(ifp->if_real_bytes != 0); 1885 memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data, 1886 new_size); 1887 kmem_free(ifp->if_u1.if_data); 1888 ifp->if_u1.if_data = ifp->if_u2.if_inline_data; 1889 } 1890 real_size = 0; 1891 } else { 1892 /* 1893 * Stuck with malloc/realloc. 1894 * For inline data, the underlying buffer must be 1895 * a multiple of 4 bytes in size so that it can be 1896 * logged and stay on word boundaries. We enforce 1897 * that here. 1898 */ 1899 real_size = roundup(new_size, 4); 1900 if (ifp->if_u1.if_data == NULL) { 1901 ASSERT(ifp->if_real_bytes == 0); 1902 ifp->if_u1.if_data = kmem_alloc(real_size, 1903 KM_SLEEP | KM_NOFS); 1904 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { 1905 /* 1906 * Only do the realloc if the underlying size 1907 * is really changing. 1908 */ 1909 if (ifp->if_real_bytes != real_size) { 1910 ifp->if_u1.if_data = 1911 kmem_realloc(ifp->if_u1.if_data, 1912 real_size, 1913 ifp->if_real_bytes, 1914 KM_SLEEP | KM_NOFS); 1915 } 1916 } else { 1917 ASSERT(ifp->if_real_bytes == 0); 1918 ifp->if_u1.if_data = kmem_alloc(real_size, 1919 KM_SLEEP | KM_NOFS); 1920 memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data, 1921 ifp->if_bytes); 1922 } 1923 } 1924 ifp->if_real_bytes = real_size; 1925 ifp->if_bytes = new_size; 1926 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork)); 1927 } 1928 1929 void 1930 xfs_idestroy_fork( 1931 xfs_inode_t *ip, 1932 int whichfork) 1933 { 1934 xfs_ifork_t *ifp; 1935 1936 ifp = XFS_IFORK_PTR(ip, whichfork); 1937 if (ifp->if_broot != NULL) { 1938 kmem_free(ifp->if_broot); 1939 ifp->if_broot = NULL; 1940 } 1941 1942 /* 1943 * If the format is local, then we can't have an extents 1944 * array so just look for an inline data array. If we're 1945 * not local then we may or may not have an extents list, 1946 * so check and free it up if we do. 1947 */ 1948 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { 1949 if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) && 1950 (ifp->if_u1.if_data != NULL)) { 1951 ASSERT(ifp->if_real_bytes != 0); 1952 kmem_free(ifp->if_u1.if_data); 1953 ifp->if_u1.if_data = NULL; 1954 ifp->if_real_bytes = 0; 1955 } 1956 } else if ((ifp->if_flags & XFS_IFEXTENTS) && 1957 ((ifp->if_flags & XFS_IFEXTIREC) || 1958 ((ifp->if_u1.if_extents != NULL) && 1959 (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) { 1960 ASSERT(ifp->if_real_bytes != 0); 1961 xfs_iext_destroy(ifp); 1962 } 1963 ASSERT(ifp->if_u1.if_extents == NULL || 1964 ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext); 1965 ASSERT(ifp->if_real_bytes == 0); 1966 if (whichfork == XFS_ATTR_FORK) { 1967 kmem_zone_free(xfs_ifork_zone, ip->i_afp); 1968 ip->i_afp = NULL; 1969 } 1970 } 1971 1972 /* 1973 * This is called to unpin an inode. The caller must have the inode locked 1974 * in at least shared mode so that the buffer cannot be subsequently pinned 1975 * once someone is waiting for it to be unpinned. 1976 */ 1977 static void 1978 xfs_iunpin( 1979 struct xfs_inode *ip) 1980 { 1981 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 1982 1983 trace_xfs_inode_unpin_nowait(ip, _RET_IP_); 1984 1985 /* Give the log a push to start the unpinning I/O */ 1986 xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0); 1987 1988 } 1989 1990 static void 1991 __xfs_iunpin_wait( 1992 struct xfs_inode *ip) 1993 { 1994 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT); 1995 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT); 1996 1997 xfs_iunpin(ip); 1998 1999 do { 2000 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); 2001 if (xfs_ipincount(ip)) 2002 io_schedule(); 2003 } while (xfs_ipincount(ip)); 2004 finish_wait(wq, &wait.wait); 2005 } 2006 2007 void 2008 xfs_iunpin_wait( 2009 struct xfs_inode *ip) 2010 { 2011 if (xfs_ipincount(ip)) 2012 __xfs_iunpin_wait(ip); 2013 } 2014 2015 /* 2016 * xfs_iextents_copy() 2017 * 2018 * This is called to copy the REAL extents (as opposed to the delayed 2019 * allocation extents) from the inode into the given buffer. It 2020 * returns the number of bytes copied into the buffer. 2021 * 2022 * If there are no delayed allocation extents, then we can just 2023 * memcpy() the extents into the buffer. Otherwise, we need to 2024 * examine each extent in turn and skip those which are delayed. 2025 */ 2026 int 2027 xfs_iextents_copy( 2028 xfs_inode_t *ip, 2029 xfs_bmbt_rec_t *dp, 2030 int whichfork) 2031 { 2032 int copied; 2033 int i; 2034 xfs_ifork_t *ifp; 2035 int nrecs; 2036 xfs_fsblock_t start_block; 2037 2038 ifp = XFS_IFORK_PTR(ip, whichfork); 2039 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 2040 ASSERT(ifp->if_bytes > 0); 2041 2042 nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 2043 XFS_BMAP_TRACE_EXLIST(ip, nrecs, whichfork); 2044 ASSERT(nrecs > 0); 2045 2046 /* 2047 * There are some delayed allocation extents in the 2048 * inode, so copy the extents one at a time and skip 2049 * the delayed ones. There must be at least one 2050 * non-delayed extent. 2051 */ 2052 copied = 0; 2053 for (i = 0; i < nrecs; i++) { 2054 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i); 2055 start_block = xfs_bmbt_get_startblock(ep); 2056 if (isnullstartblock(start_block)) { 2057 /* 2058 * It's a delayed allocation extent, so skip it. 2059 */ 2060 continue; 2061 } 2062 2063 /* Translate to on disk format */ 2064 put_unaligned(cpu_to_be64(ep->l0), &dp->l0); 2065 put_unaligned(cpu_to_be64(ep->l1), &dp->l1); 2066 dp++; 2067 copied++; 2068 } 2069 ASSERT(copied != 0); 2070 xfs_validate_extents(ifp, copied, XFS_EXTFMT_INODE(ip)); 2071 2072 return (copied * (uint)sizeof(xfs_bmbt_rec_t)); 2073 } 2074 2075 /* 2076 * Each of the following cases stores data into the same region 2077 * of the on-disk inode, so only one of them can be valid at 2078 * any given time. While it is possible to have conflicting formats 2079 * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is 2080 * in EXTENTS format, this can only happen when the fork has 2081 * changed formats after being modified but before being flushed. 2082 * In these cases, the format always takes precedence, because the 2083 * format indicates the current state of the fork. 2084 */ 2085 /*ARGSUSED*/ 2086 STATIC void 2087 xfs_iflush_fork( 2088 xfs_inode_t *ip, 2089 xfs_dinode_t *dip, 2090 xfs_inode_log_item_t *iip, 2091 int whichfork, 2092 xfs_buf_t *bp) 2093 { 2094 char *cp; 2095 xfs_ifork_t *ifp; 2096 xfs_mount_t *mp; 2097 #ifdef XFS_TRANS_DEBUG 2098 int first; 2099 #endif 2100 static const short brootflag[2] = 2101 { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT }; 2102 static const short dataflag[2] = 2103 { XFS_ILOG_DDATA, XFS_ILOG_ADATA }; 2104 static const short extflag[2] = 2105 { XFS_ILOG_DEXT, XFS_ILOG_AEXT }; 2106 2107 if (!iip) 2108 return; 2109 ifp = XFS_IFORK_PTR(ip, whichfork); 2110 /* 2111 * This can happen if we gave up in iformat in an error path, 2112 * for the attribute fork. 2113 */ 2114 if (!ifp) { 2115 ASSERT(whichfork == XFS_ATTR_FORK); 2116 return; 2117 } 2118 cp = XFS_DFORK_PTR(dip, whichfork); 2119 mp = ip->i_mount; 2120 switch (XFS_IFORK_FORMAT(ip, whichfork)) { 2121 case XFS_DINODE_FMT_LOCAL: 2122 if ((iip->ili_fields & dataflag[whichfork]) && 2123 (ifp->if_bytes > 0)) { 2124 ASSERT(ifp->if_u1.if_data != NULL); 2125 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork)); 2126 memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes); 2127 } 2128 break; 2129 2130 case XFS_DINODE_FMT_EXTENTS: 2131 ASSERT((ifp->if_flags & XFS_IFEXTENTS) || 2132 !(iip->ili_fields & extflag[whichfork])); 2133 if ((iip->ili_fields & extflag[whichfork]) && 2134 (ifp->if_bytes > 0)) { 2135 ASSERT(xfs_iext_get_ext(ifp, 0)); 2136 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0); 2137 (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp, 2138 whichfork); 2139 } 2140 break; 2141 2142 case XFS_DINODE_FMT_BTREE: 2143 if ((iip->ili_fields & brootflag[whichfork]) && 2144 (ifp->if_broot_bytes > 0)) { 2145 ASSERT(ifp->if_broot != NULL); 2146 ASSERT(ifp->if_broot_bytes <= 2147 (XFS_IFORK_SIZE(ip, whichfork) + 2148 XFS_BROOT_SIZE_ADJ)); 2149 xfs_bmbt_to_bmdr(mp, ifp->if_broot, ifp->if_broot_bytes, 2150 (xfs_bmdr_block_t *)cp, 2151 XFS_DFORK_SIZE(dip, mp, whichfork)); 2152 } 2153 break; 2154 2155 case XFS_DINODE_FMT_DEV: 2156 if (iip->ili_fields & XFS_ILOG_DEV) { 2157 ASSERT(whichfork == XFS_DATA_FORK); 2158 xfs_dinode_put_rdev(dip, ip->i_df.if_u2.if_rdev); 2159 } 2160 break; 2161 2162 case XFS_DINODE_FMT_UUID: 2163 if (iip->ili_fields & XFS_ILOG_UUID) { 2164 ASSERT(whichfork == XFS_DATA_FORK); 2165 memcpy(XFS_DFORK_DPTR(dip), 2166 &ip->i_df.if_u2.if_uuid, 2167 sizeof(uuid_t)); 2168 } 2169 break; 2170 2171 default: 2172 ASSERT(0); 2173 break; 2174 } 2175 } 2176 2177 STATIC int 2178 xfs_iflush_cluster( 2179 xfs_inode_t *ip, 2180 xfs_buf_t *bp) 2181 { 2182 xfs_mount_t *mp = ip->i_mount; 2183 struct xfs_perag *pag; 2184 unsigned long first_index, mask; 2185 unsigned long inodes_per_cluster; 2186 int ilist_size; 2187 xfs_inode_t **ilist; 2188 xfs_inode_t *iq; 2189 int nr_found; 2190 int clcount = 0; 2191 int bufwasdelwri; 2192 int i; 2193 2194 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 2195 2196 inodes_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog; 2197 ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *); 2198 ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS); 2199 if (!ilist) 2200 goto out_put; 2201 2202 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1); 2203 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask; 2204 rcu_read_lock(); 2205 /* really need a gang lookup range call here */ 2206 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist, 2207 first_index, inodes_per_cluster); 2208 if (nr_found == 0) 2209 goto out_free; 2210 2211 for (i = 0; i < nr_found; i++) { 2212 iq = ilist[i]; 2213 if (iq == ip) 2214 continue; 2215 2216 /* 2217 * because this is an RCU protected lookup, we could find a 2218 * recently freed or even reallocated inode during the lookup. 2219 * We need to check under the i_flags_lock for a valid inode 2220 * here. Skip it if it is not valid or the wrong inode. 2221 */ 2222 spin_lock(&ip->i_flags_lock); 2223 if (!ip->i_ino || 2224 (XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) { 2225 spin_unlock(&ip->i_flags_lock); 2226 continue; 2227 } 2228 spin_unlock(&ip->i_flags_lock); 2229 2230 /* 2231 * Do an un-protected check to see if the inode is dirty and 2232 * is a candidate for flushing. These checks will be repeated 2233 * later after the appropriate locks are acquired. 2234 */ 2235 if (xfs_inode_clean(iq) && xfs_ipincount(iq) == 0) 2236 continue; 2237 2238 /* 2239 * Try to get locks. If any are unavailable or it is pinned, 2240 * then this inode cannot be flushed and is skipped. 2241 */ 2242 2243 if (!xfs_ilock_nowait(iq, XFS_ILOCK_SHARED)) 2244 continue; 2245 if (!xfs_iflock_nowait(iq)) { 2246 xfs_iunlock(iq, XFS_ILOCK_SHARED); 2247 continue; 2248 } 2249 if (xfs_ipincount(iq)) { 2250 xfs_ifunlock(iq); 2251 xfs_iunlock(iq, XFS_ILOCK_SHARED); 2252 continue; 2253 } 2254 2255 /* 2256 * arriving here means that this inode can be flushed. First 2257 * re-check that it's dirty before flushing. 2258 */ 2259 if (!xfs_inode_clean(iq)) { 2260 int error; 2261 error = xfs_iflush_int(iq, bp); 2262 if (error) { 2263 xfs_iunlock(iq, XFS_ILOCK_SHARED); 2264 goto cluster_corrupt_out; 2265 } 2266 clcount++; 2267 } else { 2268 xfs_ifunlock(iq); 2269 } 2270 xfs_iunlock(iq, XFS_ILOCK_SHARED); 2271 } 2272 2273 if (clcount) { 2274 XFS_STATS_INC(xs_icluster_flushcnt); 2275 XFS_STATS_ADD(xs_icluster_flushinode, clcount); 2276 } 2277 2278 out_free: 2279 rcu_read_unlock(); 2280 kmem_free(ilist); 2281 out_put: 2282 xfs_perag_put(pag); 2283 return 0; 2284 2285 2286 cluster_corrupt_out: 2287 /* 2288 * Corruption detected in the clustering loop. Invalidate the 2289 * inode buffer and shut down the filesystem. 2290 */ 2291 rcu_read_unlock(); 2292 /* 2293 * Clean up the buffer. If it was delwri, just release it -- 2294 * brelse can handle it with no problems. If not, shut down the 2295 * filesystem before releasing the buffer. 2296 */ 2297 bufwasdelwri = (bp->b_flags & _XBF_DELWRI_Q); 2298 if (bufwasdelwri) 2299 xfs_buf_relse(bp); 2300 2301 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 2302 2303 if (!bufwasdelwri) { 2304 /* 2305 * Just like incore_relse: if we have b_iodone functions, 2306 * mark the buffer as an error and call them. Otherwise 2307 * mark it as stale and brelse. 2308 */ 2309 if (bp->b_iodone) { 2310 XFS_BUF_UNDONE(bp); 2311 xfs_buf_stale(bp); 2312 xfs_buf_ioerror(bp, EIO); 2313 xfs_buf_ioend(bp, 0); 2314 } else { 2315 xfs_buf_stale(bp); 2316 xfs_buf_relse(bp); 2317 } 2318 } 2319 2320 /* 2321 * Unlocks the flush lock 2322 */ 2323 xfs_iflush_abort(iq, false); 2324 kmem_free(ilist); 2325 xfs_perag_put(pag); 2326 return XFS_ERROR(EFSCORRUPTED); 2327 } 2328 2329 /* 2330 * Flush dirty inode metadata into the backing buffer. 2331 * 2332 * The caller must have the inode lock and the inode flush lock held. The 2333 * inode lock will still be held upon return to the caller, and the inode 2334 * flush lock will be released after the inode has reached the disk. 2335 * 2336 * The caller must write out the buffer returned in *bpp and release it. 2337 */ 2338 int 2339 xfs_iflush( 2340 struct xfs_inode *ip, 2341 struct xfs_buf **bpp) 2342 { 2343 struct xfs_mount *mp = ip->i_mount; 2344 struct xfs_buf *bp; 2345 struct xfs_dinode *dip; 2346 int error; 2347 2348 XFS_STATS_INC(xs_iflush_count); 2349 2350 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 2351 ASSERT(xfs_isiflocked(ip)); 2352 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || 2353 ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)); 2354 2355 *bpp = NULL; 2356 2357 xfs_iunpin_wait(ip); 2358 2359 /* 2360 * For stale inodes we cannot rely on the backing buffer remaining 2361 * stale in cache for the remaining life of the stale inode and so 2362 * xfs_imap_to_bp() below may give us a buffer that no longer contains 2363 * inodes below. We have to check this after ensuring the inode is 2364 * unpinned so that it is safe to reclaim the stale inode after the 2365 * flush call. 2366 */ 2367 if (xfs_iflags_test(ip, XFS_ISTALE)) { 2368 xfs_ifunlock(ip); 2369 return 0; 2370 } 2371 2372 /* 2373 * This may have been unpinned because the filesystem is shutting 2374 * down forcibly. If that's the case we must not write this inode 2375 * to disk, because the log record didn't make it to disk. 2376 * 2377 * We also have to remove the log item from the AIL in this case, 2378 * as we wait for an empty AIL as part of the unmount process. 2379 */ 2380 if (XFS_FORCED_SHUTDOWN(mp)) { 2381 error = XFS_ERROR(EIO); 2382 goto abort_out; 2383 } 2384 2385 /* 2386 * Get the buffer containing the on-disk inode. 2387 */ 2388 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK, 2389 0); 2390 if (error || !bp) { 2391 xfs_ifunlock(ip); 2392 return error; 2393 } 2394 2395 /* 2396 * First flush out the inode that xfs_iflush was called with. 2397 */ 2398 error = xfs_iflush_int(ip, bp); 2399 if (error) 2400 goto corrupt_out; 2401 2402 /* 2403 * If the buffer is pinned then push on the log now so we won't 2404 * get stuck waiting in the write for too long. 2405 */ 2406 if (xfs_buf_ispinned(bp)) 2407 xfs_log_force(mp, 0); 2408 2409 /* 2410 * inode clustering: 2411 * see if other inodes can be gathered into this write 2412 */ 2413 error = xfs_iflush_cluster(ip, bp); 2414 if (error) 2415 goto cluster_corrupt_out; 2416 2417 *bpp = bp; 2418 return 0; 2419 2420 corrupt_out: 2421 xfs_buf_relse(bp); 2422 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 2423 cluster_corrupt_out: 2424 error = XFS_ERROR(EFSCORRUPTED); 2425 abort_out: 2426 /* 2427 * Unlocks the flush lock 2428 */ 2429 xfs_iflush_abort(ip, false); 2430 return error; 2431 } 2432 2433 2434 STATIC int 2435 xfs_iflush_int( 2436 xfs_inode_t *ip, 2437 xfs_buf_t *bp) 2438 { 2439 xfs_inode_log_item_t *iip; 2440 xfs_dinode_t *dip; 2441 xfs_mount_t *mp; 2442 #ifdef XFS_TRANS_DEBUG 2443 int first; 2444 #endif 2445 2446 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 2447 ASSERT(xfs_isiflocked(ip)); 2448 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || 2449 ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)); 2450 2451 iip = ip->i_itemp; 2452 mp = ip->i_mount; 2453 2454 /* set *dip = inode's place in the buffer */ 2455 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset); 2456 2457 if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC), 2458 mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) { 2459 xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 2460 "%s: Bad inode %Lu magic number 0x%x, ptr 0x%p", 2461 __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip); 2462 goto corrupt_out; 2463 } 2464 if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC, 2465 mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) { 2466 xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 2467 "%s: Bad inode %Lu, ptr 0x%p, magic number 0x%x", 2468 __func__, ip->i_ino, ip, ip->i_d.di_magic); 2469 goto corrupt_out; 2470 } 2471 if (S_ISREG(ip->i_d.di_mode)) { 2472 if (XFS_TEST_ERROR( 2473 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && 2474 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE), 2475 mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) { 2476 xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 2477 "%s: Bad regular inode %Lu, ptr 0x%p", 2478 __func__, ip->i_ino, ip); 2479 goto corrupt_out; 2480 } 2481 } else if (S_ISDIR(ip->i_d.di_mode)) { 2482 if (XFS_TEST_ERROR( 2483 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && 2484 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) && 2485 (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL), 2486 mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) { 2487 xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 2488 "%s: Bad directory inode %Lu, ptr 0x%p", 2489 __func__, ip->i_ino, ip); 2490 goto corrupt_out; 2491 } 2492 } 2493 if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents > 2494 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5, 2495 XFS_RANDOM_IFLUSH_5)) { 2496 xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 2497 "%s: detected corrupt incore inode %Lu, " 2498 "total extents = %d, nblocks = %Ld, ptr 0x%p", 2499 __func__, ip->i_ino, 2500 ip->i_d.di_nextents + ip->i_d.di_anextents, 2501 ip->i_d.di_nblocks, ip); 2502 goto corrupt_out; 2503 } 2504 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize, 2505 mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) { 2506 xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 2507 "%s: bad inode %Lu, forkoff 0x%x, ptr 0x%p", 2508 __func__, ip->i_ino, ip->i_d.di_forkoff, ip); 2509 goto corrupt_out; 2510 } 2511 /* 2512 * bump the flush iteration count, used to detect flushes which 2513 * postdate a log record during recovery. 2514 */ 2515 2516 ip->i_d.di_flushiter++; 2517 2518 /* 2519 * Copy the dirty parts of the inode into the on-disk 2520 * inode. We always copy out the core of the inode, 2521 * because if the inode is dirty at all the core must 2522 * be. 2523 */ 2524 xfs_dinode_to_disk(dip, &ip->i_d); 2525 2526 /* Wrap, we never let the log put out DI_MAX_FLUSH */ 2527 if (ip->i_d.di_flushiter == DI_MAX_FLUSH) 2528 ip->i_d.di_flushiter = 0; 2529 2530 /* 2531 * If this is really an old format inode and the superblock version 2532 * has not been updated to support only new format inodes, then 2533 * convert back to the old inode format. If the superblock version 2534 * has been updated, then make the conversion permanent. 2535 */ 2536 ASSERT(ip->i_d.di_version == 1 || xfs_sb_version_hasnlink(&mp->m_sb)); 2537 if (ip->i_d.di_version == 1) { 2538 if (!xfs_sb_version_hasnlink(&mp->m_sb)) { 2539 /* 2540 * Convert it back. 2541 */ 2542 ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1); 2543 dip->di_onlink = cpu_to_be16(ip->i_d.di_nlink); 2544 } else { 2545 /* 2546 * The superblock version has already been bumped, 2547 * so just make the conversion to the new inode 2548 * format permanent. 2549 */ 2550 ip->i_d.di_version = 2; 2551 dip->di_version = 2; 2552 ip->i_d.di_onlink = 0; 2553 dip->di_onlink = 0; 2554 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); 2555 memset(&(dip->di_pad[0]), 0, 2556 sizeof(dip->di_pad)); 2557 ASSERT(xfs_get_projid(ip) == 0); 2558 } 2559 } 2560 2561 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp); 2562 if (XFS_IFORK_Q(ip)) 2563 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp); 2564 xfs_inobp_check(mp, bp); 2565 2566 /* 2567 * We've recorded everything logged in the inode, so we'd like to clear 2568 * the ili_fields bits so we don't log and flush things unnecessarily. 2569 * However, we can't stop logging all this information until the data 2570 * we've copied into the disk buffer is written to disk. If we did we 2571 * might overwrite the copy of the inode in the log with all the data 2572 * after re-logging only part of it, and in the face of a crash we 2573 * wouldn't have all the data we need to recover. 2574 * 2575 * What we do is move the bits to the ili_last_fields field. When 2576 * logging the inode, these bits are moved back to the ili_fields field. 2577 * In the xfs_iflush_done() routine we clear ili_last_fields, since we 2578 * know that the information those bits represent is permanently on 2579 * disk. As long as the flush completes before the inode is logged 2580 * again, then both ili_fields and ili_last_fields will be cleared. 2581 * 2582 * We can play with the ili_fields bits here, because the inode lock 2583 * must be held exclusively in order to set bits there and the flush 2584 * lock protects the ili_last_fields bits. Set ili_logged so the flush 2585 * done routine can tell whether or not to look in the AIL. Also, store 2586 * the current LSN of the inode so that we can tell whether the item has 2587 * moved in the AIL from xfs_iflush_done(). In order to read the lsn we 2588 * need the AIL lock, because it is a 64 bit value that cannot be read 2589 * atomically. 2590 */ 2591 if (iip != NULL && iip->ili_fields != 0) { 2592 iip->ili_last_fields = iip->ili_fields; 2593 iip->ili_fields = 0; 2594 iip->ili_logged = 1; 2595 2596 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, 2597 &iip->ili_item.li_lsn); 2598 2599 /* 2600 * Attach the function xfs_iflush_done to the inode's 2601 * buffer. This will remove the inode from the AIL 2602 * and unlock the inode's flush lock when the inode is 2603 * completely written to disk. 2604 */ 2605 xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item); 2606 2607 ASSERT(bp->b_fspriv != NULL); 2608 ASSERT(bp->b_iodone != NULL); 2609 } else { 2610 /* 2611 * We're flushing an inode which is not in the AIL and has 2612 * not been logged. For this case we can immediately drop 2613 * the inode flush lock because we can avoid the whole 2614 * AIL state thing. It's OK to drop the flush lock now, 2615 * because we've already locked the buffer and to do anything 2616 * you really need both. 2617 */ 2618 if (iip != NULL) { 2619 ASSERT(iip->ili_logged == 0); 2620 ASSERT(iip->ili_last_fields == 0); 2621 ASSERT((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0); 2622 } 2623 xfs_ifunlock(ip); 2624 } 2625 2626 return 0; 2627 2628 corrupt_out: 2629 return XFS_ERROR(EFSCORRUPTED); 2630 } 2631 2632 /* 2633 * Return a pointer to the extent record at file index idx. 2634 */ 2635 xfs_bmbt_rec_host_t * 2636 xfs_iext_get_ext( 2637 xfs_ifork_t *ifp, /* inode fork pointer */ 2638 xfs_extnum_t idx) /* index of target extent */ 2639 { 2640 ASSERT(idx >= 0); 2641 ASSERT(idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t)); 2642 2643 if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) { 2644 return ifp->if_u1.if_ext_irec->er_extbuf; 2645 } else if (ifp->if_flags & XFS_IFEXTIREC) { 2646 xfs_ext_irec_t *erp; /* irec pointer */ 2647 int erp_idx = 0; /* irec index */ 2648 xfs_extnum_t page_idx = idx; /* ext index in target list */ 2649 2650 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0); 2651 return &erp->er_extbuf[page_idx]; 2652 } else if (ifp->if_bytes) { 2653 return &ifp->if_u1.if_extents[idx]; 2654 } else { 2655 return NULL; 2656 } 2657 } 2658 2659 /* 2660 * Insert new item(s) into the extent records for incore inode 2661 * fork 'ifp'. 'count' new items are inserted at index 'idx'. 2662 */ 2663 void 2664 xfs_iext_insert( 2665 xfs_inode_t *ip, /* incore inode pointer */ 2666 xfs_extnum_t idx, /* starting index of new items */ 2667 xfs_extnum_t count, /* number of inserted items */ 2668 xfs_bmbt_irec_t *new, /* items to insert */ 2669 int state) /* type of extent conversion */ 2670 { 2671 xfs_ifork_t *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df; 2672 xfs_extnum_t i; /* extent record index */ 2673 2674 trace_xfs_iext_insert(ip, idx, new, state, _RET_IP_); 2675 2676 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 2677 xfs_iext_add(ifp, idx, count); 2678 for (i = idx; i < idx + count; i++, new++) 2679 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, i), new); 2680 } 2681 2682 /* 2683 * This is called when the amount of space required for incore file 2684 * extents needs to be increased. The ext_diff parameter stores the 2685 * number of new extents being added and the idx parameter contains 2686 * the extent index where the new extents will be added. If the new 2687 * extents are being appended, then we just need to (re)allocate and 2688 * initialize the space. Otherwise, if the new extents are being 2689 * inserted into the middle of the existing entries, a bit more work 2690 * is required to make room for the new extents to be inserted. The 2691 * caller is responsible for filling in the new extent entries upon 2692 * return. 2693 */ 2694 void 2695 xfs_iext_add( 2696 xfs_ifork_t *ifp, /* inode fork pointer */ 2697 xfs_extnum_t idx, /* index to begin adding exts */ 2698 int ext_diff) /* number of extents to add */ 2699 { 2700 int byte_diff; /* new bytes being added */ 2701 int new_size; /* size of extents after adding */ 2702 xfs_extnum_t nextents; /* number of extents in file */ 2703 2704 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 2705 ASSERT((idx >= 0) && (idx <= nextents)); 2706 byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t); 2707 new_size = ifp->if_bytes + byte_diff; 2708 /* 2709 * If the new number of extents (nextents + ext_diff) 2710 * fits inside the inode, then continue to use the inline 2711 * extent buffer. 2712 */ 2713 if (nextents + ext_diff <= XFS_INLINE_EXTS) { 2714 if (idx < nextents) { 2715 memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff], 2716 &ifp->if_u2.if_inline_ext[idx], 2717 (nextents - idx) * sizeof(xfs_bmbt_rec_t)); 2718 memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff); 2719 } 2720 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; 2721 ifp->if_real_bytes = 0; 2722 } 2723 /* 2724 * Otherwise use a linear (direct) extent list. 2725 * If the extents are currently inside the inode, 2726 * xfs_iext_realloc_direct will switch us from 2727 * inline to direct extent allocation mode. 2728 */ 2729 else if (nextents + ext_diff <= XFS_LINEAR_EXTS) { 2730 xfs_iext_realloc_direct(ifp, new_size); 2731 if (idx < nextents) { 2732 memmove(&ifp->if_u1.if_extents[idx + ext_diff], 2733 &ifp->if_u1.if_extents[idx], 2734 (nextents - idx) * sizeof(xfs_bmbt_rec_t)); 2735 memset(&ifp->if_u1.if_extents[idx], 0, byte_diff); 2736 } 2737 } 2738 /* Indirection array */ 2739 else { 2740 xfs_ext_irec_t *erp; 2741 int erp_idx = 0; 2742 int page_idx = idx; 2743 2744 ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS); 2745 if (ifp->if_flags & XFS_IFEXTIREC) { 2746 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1); 2747 } else { 2748 xfs_iext_irec_init(ifp); 2749 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 2750 erp = ifp->if_u1.if_ext_irec; 2751 } 2752 /* Extents fit in target extent page */ 2753 if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) { 2754 if (page_idx < erp->er_extcount) { 2755 memmove(&erp->er_extbuf[page_idx + ext_diff], 2756 &erp->er_extbuf[page_idx], 2757 (erp->er_extcount - page_idx) * 2758 sizeof(xfs_bmbt_rec_t)); 2759 memset(&erp->er_extbuf[page_idx], 0, byte_diff); 2760 } 2761 erp->er_extcount += ext_diff; 2762 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); 2763 } 2764 /* Insert a new extent page */ 2765 else if (erp) { 2766 xfs_iext_add_indirect_multi(ifp, 2767 erp_idx, page_idx, ext_diff); 2768 } 2769 /* 2770 * If extent(s) are being appended to the last page in 2771 * the indirection array and the new extent(s) don't fit 2772 * in the page, then erp is NULL and erp_idx is set to 2773 * the next index needed in the indirection array. 2774 */ 2775 else { 2776 int count = ext_diff; 2777 2778 while (count) { 2779 erp = xfs_iext_irec_new(ifp, erp_idx); 2780 erp->er_extcount = count; 2781 count -= MIN(count, (int)XFS_LINEAR_EXTS); 2782 if (count) { 2783 erp_idx++; 2784 } 2785 } 2786 } 2787 } 2788 ifp->if_bytes = new_size; 2789 } 2790 2791 /* 2792 * This is called when incore extents are being added to the indirection 2793 * array and the new extents do not fit in the target extent list. The 2794 * erp_idx parameter contains the irec index for the target extent list 2795 * in the indirection array, and the idx parameter contains the extent 2796 * index within the list. The number of extents being added is stored 2797 * in the count parameter. 2798 * 2799 * |-------| |-------| 2800 * | | | | idx - number of extents before idx 2801 * | idx | | count | 2802 * | | | | count - number of extents being inserted at idx 2803 * |-------| |-------| 2804 * | count | | nex2 | nex2 - number of extents after idx + count 2805 * |-------| |-------| 2806 */ 2807 void 2808 xfs_iext_add_indirect_multi( 2809 xfs_ifork_t *ifp, /* inode fork pointer */ 2810 int erp_idx, /* target extent irec index */ 2811 xfs_extnum_t idx, /* index within target list */ 2812 int count) /* new extents being added */ 2813 { 2814 int byte_diff; /* new bytes being added */ 2815 xfs_ext_irec_t *erp; /* pointer to irec entry */ 2816 xfs_extnum_t ext_diff; /* number of extents to add */ 2817 xfs_extnum_t ext_cnt; /* new extents still needed */ 2818 xfs_extnum_t nex2; /* extents after idx + count */ 2819 xfs_bmbt_rec_t *nex2_ep = NULL; /* temp list for nex2 extents */ 2820 int nlists; /* number of irec's (lists) */ 2821 2822 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 2823 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 2824 nex2 = erp->er_extcount - idx; 2825 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 2826 2827 /* 2828 * Save second part of target extent list 2829 * (all extents past */ 2830 if (nex2) { 2831 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t); 2832 nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_NOFS); 2833 memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff); 2834 erp->er_extcount -= nex2; 2835 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2); 2836 memset(&erp->er_extbuf[idx], 0, byte_diff); 2837 } 2838 2839 /* 2840 * Add the new extents to the end of the target 2841 * list, then allocate new irec record(s) and 2842 * extent buffer(s) as needed to store the rest 2843 * of the new extents. 2844 */ 2845 ext_cnt = count; 2846 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount); 2847 if (ext_diff) { 2848 erp->er_extcount += ext_diff; 2849 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); 2850 ext_cnt -= ext_diff; 2851 } 2852 while (ext_cnt) { 2853 erp_idx++; 2854 erp = xfs_iext_irec_new(ifp, erp_idx); 2855 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS); 2856 erp->er_extcount = ext_diff; 2857 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); 2858 ext_cnt -= ext_diff; 2859 } 2860 2861 /* Add nex2 extents back to indirection array */ 2862 if (nex2) { 2863 xfs_extnum_t ext_avail; 2864 int i; 2865 2866 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t); 2867 ext_avail = XFS_LINEAR_EXTS - erp->er_extcount; 2868 i = 0; 2869 /* 2870 * If nex2 extents fit in the current page, append 2871 * nex2_ep after the new extents. 2872 */ 2873 if (nex2 <= ext_avail) { 2874 i = erp->er_extcount; 2875 } 2876 /* 2877 * Otherwise, check if space is available in the 2878 * next page. 2879 */ 2880 else if ((erp_idx < nlists - 1) && 2881 (nex2 <= (ext_avail = XFS_LINEAR_EXTS - 2882 ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) { 2883 erp_idx++; 2884 erp++; 2885 /* Create a hole for nex2 extents */ 2886 memmove(&erp->er_extbuf[nex2], erp->er_extbuf, 2887 erp->er_extcount * sizeof(xfs_bmbt_rec_t)); 2888 } 2889 /* 2890 * Final choice, create a new extent page for 2891 * nex2 extents. 2892 */ 2893 else { 2894 erp_idx++; 2895 erp = xfs_iext_irec_new(ifp, erp_idx); 2896 } 2897 memmove(&erp->er_extbuf[i], nex2_ep, byte_diff); 2898 kmem_free(nex2_ep); 2899 erp->er_extcount += nex2; 2900 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2); 2901 } 2902 } 2903 2904 /* 2905 * This is called when the amount of space required for incore file 2906 * extents needs to be decreased. The ext_diff parameter stores the 2907 * number of extents to be removed and the idx parameter contains 2908 * the extent index where the extents will be removed from. 2909 * 2910 * If the amount of space needed has decreased below the linear 2911 * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous 2912 * extent array. Otherwise, use kmem_realloc() to adjust the 2913 * size to what is needed. 2914 */ 2915 void 2916 xfs_iext_remove( 2917 xfs_inode_t *ip, /* incore inode pointer */ 2918 xfs_extnum_t idx, /* index to begin removing exts */ 2919 int ext_diff, /* number of extents to remove */ 2920 int state) /* type of extent conversion */ 2921 { 2922 xfs_ifork_t *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df; 2923 xfs_extnum_t nextents; /* number of extents in file */ 2924 int new_size; /* size of extents after removal */ 2925 2926 trace_xfs_iext_remove(ip, idx, state, _RET_IP_); 2927 2928 ASSERT(ext_diff > 0); 2929 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 2930 new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t); 2931 2932 if (new_size == 0) { 2933 xfs_iext_destroy(ifp); 2934 } else if (ifp->if_flags & XFS_IFEXTIREC) { 2935 xfs_iext_remove_indirect(ifp, idx, ext_diff); 2936 } else if (ifp->if_real_bytes) { 2937 xfs_iext_remove_direct(ifp, idx, ext_diff); 2938 } else { 2939 xfs_iext_remove_inline(ifp, idx, ext_diff); 2940 } 2941 ifp->if_bytes = new_size; 2942 } 2943 2944 /* 2945 * This removes ext_diff extents from the inline buffer, beginning 2946 * at extent index idx. 2947 */ 2948 void 2949 xfs_iext_remove_inline( 2950 xfs_ifork_t *ifp, /* inode fork pointer */ 2951 xfs_extnum_t idx, /* index to begin removing exts */ 2952 int ext_diff) /* number of extents to remove */ 2953 { 2954 int nextents; /* number of extents in file */ 2955 2956 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); 2957 ASSERT(idx < XFS_INLINE_EXTS); 2958 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 2959 ASSERT(((nextents - ext_diff) > 0) && 2960 (nextents - ext_diff) < XFS_INLINE_EXTS); 2961 2962 if (idx + ext_diff < nextents) { 2963 memmove(&ifp->if_u2.if_inline_ext[idx], 2964 &ifp->if_u2.if_inline_ext[idx + ext_diff], 2965 (nextents - (idx + ext_diff)) * 2966 sizeof(xfs_bmbt_rec_t)); 2967 memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff], 2968 0, ext_diff * sizeof(xfs_bmbt_rec_t)); 2969 } else { 2970 memset(&ifp->if_u2.if_inline_ext[idx], 0, 2971 ext_diff * sizeof(xfs_bmbt_rec_t)); 2972 } 2973 } 2974 2975 /* 2976 * This removes ext_diff extents from a linear (direct) extent list, 2977 * beginning at extent index idx. If the extents are being removed 2978 * from the end of the list (ie. truncate) then we just need to re- 2979 * allocate the list to remove the extra space. Otherwise, if the 2980 * extents are being removed from the middle of the existing extent 2981 * entries, then we first need to move the extent records beginning 2982 * at idx + ext_diff up in the list to overwrite the records being 2983 * removed, then remove the extra space via kmem_realloc. 2984 */ 2985 void 2986 xfs_iext_remove_direct( 2987 xfs_ifork_t *ifp, /* inode fork pointer */ 2988 xfs_extnum_t idx, /* index to begin removing exts */ 2989 int ext_diff) /* number of extents to remove */ 2990 { 2991 xfs_extnum_t nextents; /* number of extents in file */ 2992 int new_size; /* size of extents after removal */ 2993 2994 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); 2995 new_size = ifp->if_bytes - 2996 (ext_diff * sizeof(xfs_bmbt_rec_t)); 2997 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 2998 2999 if (new_size == 0) { 3000 xfs_iext_destroy(ifp); 3001 return; 3002 } 3003 /* Move extents up in the list (if needed) */ 3004 if (idx + ext_diff < nextents) { 3005 memmove(&ifp->if_u1.if_extents[idx], 3006 &ifp->if_u1.if_extents[idx + ext_diff], 3007 (nextents - (idx + ext_diff)) * 3008 sizeof(xfs_bmbt_rec_t)); 3009 } 3010 memset(&ifp->if_u1.if_extents[nextents - ext_diff], 3011 0, ext_diff * sizeof(xfs_bmbt_rec_t)); 3012 /* 3013 * Reallocate the direct extent list. If the extents 3014 * will fit inside the inode then xfs_iext_realloc_direct 3015 * will switch from direct to inline extent allocation 3016 * mode for us. 3017 */ 3018 xfs_iext_realloc_direct(ifp, new_size); 3019 ifp->if_bytes = new_size; 3020 } 3021 3022 /* 3023 * This is called when incore extents are being removed from the 3024 * indirection array and the extents being removed span multiple extent 3025 * buffers. The idx parameter contains the file extent index where we 3026 * want to begin removing extents, and the count parameter contains 3027 * how many extents need to be removed. 3028 * 3029 * |-------| |-------| 3030 * | nex1 | | | nex1 - number of extents before idx 3031 * |-------| | count | 3032 * | | | | count - number of extents being removed at idx 3033 * | count | |-------| 3034 * | | | nex2 | nex2 - number of extents after idx + count 3035 * |-------| |-------| 3036 */ 3037 void 3038 xfs_iext_remove_indirect( 3039 xfs_ifork_t *ifp, /* inode fork pointer */ 3040 xfs_extnum_t idx, /* index to begin removing extents */ 3041 int count) /* number of extents to remove */ 3042 { 3043 xfs_ext_irec_t *erp; /* indirection array pointer */ 3044 int erp_idx = 0; /* indirection array index */ 3045 xfs_extnum_t ext_cnt; /* extents left to remove */ 3046 xfs_extnum_t ext_diff; /* extents to remove in current list */ 3047 xfs_extnum_t nex1; /* number of extents before idx */ 3048 xfs_extnum_t nex2; /* extents after idx + count */ 3049 int page_idx = idx; /* index in target extent list */ 3050 3051 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3052 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0); 3053 ASSERT(erp != NULL); 3054 nex1 = page_idx; 3055 ext_cnt = count; 3056 while (ext_cnt) { 3057 nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0); 3058 ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1)); 3059 /* 3060 * Check for deletion of entire list; 3061 * xfs_iext_irec_remove() updates extent offsets. 3062 */ 3063 if (ext_diff == erp->er_extcount) { 3064 xfs_iext_irec_remove(ifp, erp_idx); 3065 ext_cnt -= ext_diff; 3066 nex1 = 0; 3067 if (ext_cnt) { 3068 ASSERT(erp_idx < ifp->if_real_bytes / 3069 XFS_IEXT_BUFSZ); 3070 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 3071 nex1 = 0; 3072 continue; 3073 } else { 3074 break; 3075 } 3076 } 3077 /* Move extents up (if needed) */ 3078 if (nex2) { 3079 memmove(&erp->er_extbuf[nex1], 3080 &erp->er_extbuf[nex1 + ext_diff], 3081 nex2 * sizeof(xfs_bmbt_rec_t)); 3082 } 3083 /* Zero out rest of page */ 3084 memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ - 3085 ((nex1 + nex2) * sizeof(xfs_bmbt_rec_t)))); 3086 /* Update remaining counters */ 3087 erp->er_extcount -= ext_diff; 3088 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff); 3089 ext_cnt -= ext_diff; 3090 nex1 = 0; 3091 erp_idx++; 3092 erp++; 3093 } 3094 ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t); 3095 xfs_iext_irec_compact(ifp); 3096 } 3097 3098 /* 3099 * Create, destroy, or resize a linear (direct) block of extents. 3100 */ 3101 void 3102 xfs_iext_realloc_direct( 3103 xfs_ifork_t *ifp, /* inode fork pointer */ 3104 int new_size) /* new size of extents */ 3105 { 3106 int rnew_size; /* real new size of extents */ 3107 3108 rnew_size = new_size; 3109 3110 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) || 3111 ((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) && 3112 (new_size != ifp->if_real_bytes))); 3113 3114 /* Free extent records */ 3115 if (new_size == 0) { 3116 xfs_iext_destroy(ifp); 3117 } 3118 /* Resize direct extent list and zero any new bytes */ 3119 else if (ifp->if_real_bytes) { 3120 /* Check if extents will fit inside the inode */ 3121 if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) { 3122 xfs_iext_direct_to_inline(ifp, new_size / 3123 (uint)sizeof(xfs_bmbt_rec_t)); 3124 ifp->if_bytes = new_size; 3125 return; 3126 } 3127 if (!is_power_of_2(new_size)){ 3128 rnew_size = roundup_pow_of_two(new_size); 3129 } 3130 if (rnew_size != ifp->if_real_bytes) { 3131 ifp->if_u1.if_extents = 3132 kmem_realloc(ifp->if_u1.if_extents, 3133 rnew_size, 3134 ifp->if_real_bytes, KM_NOFS); 3135 } 3136 if (rnew_size > ifp->if_real_bytes) { 3137 memset(&ifp->if_u1.if_extents[ifp->if_bytes / 3138 (uint)sizeof(xfs_bmbt_rec_t)], 0, 3139 rnew_size - ifp->if_real_bytes); 3140 } 3141 } 3142 /* 3143 * Switch from the inline extent buffer to a direct 3144 * extent list. Be sure to include the inline extent 3145 * bytes in new_size. 3146 */ 3147 else { 3148 new_size += ifp->if_bytes; 3149 if (!is_power_of_2(new_size)) { 3150 rnew_size = roundup_pow_of_two(new_size); 3151 } 3152 xfs_iext_inline_to_direct(ifp, rnew_size); 3153 } 3154 ifp->if_real_bytes = rnew_size; 3155 ifp->if_bytes = new_size; 3156 } 3157 3158 /* 3159 * Switch from linear (direct) extent records to inline buffer. 3160 */ 3161 void 3162 xfs_iext_direct_to_inline( 3163 xfs_ifork_t *ifp, /* inode fork pointer */ 3164 xfs_extnum_t nextents) /* number of extents in file */ 3165 { 3166 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 3167 ASSERT(nextents <= XFS_INLINE_EXTS); 3168 /* 3169 * The inline buffer was zeroed when we switched 3170 * from inline to direct extent allocation mode, 3171 * so we don't need to clear it here. 3172 */ 3173 memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents, 3174 nextents * sizeof(xfs_bmbt_rec_t)); 3175 kmem_free(ifp->if_u1.if_extents); 3176 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; 3177 ifp->if_real_bytes = 0; 3178 } 3179 3180 /* 3181 * Switch from inline buffer to linear (direct) extent records. 3182 * new_size should already be rounded up to the next power of 2 3183 * by the caller (when appropriate), so use new_size as it is. 3184 * However, since new_size may be rounded up, we can't update 3185 * if_bytes here. It is the caller's responsibility to update 3186 * if_bytes upon return. 3187 */ 3188 void 3189 xfs_iext_inline_to_direct( 3190 xfs_ifork_t *ifp, /* inode fork pointer */ 3191 int new_size) /* number of extents in file */ 3192 { 3193 ifp->if_u1.if_extents = kmem_alloc(new_size, KM_NOFS); 3194 memset(ifp->if_u1.if_extents, 0, new_size); 3195 if (ifp->if_bytes) { 3196 memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext, 3197 ifp->if_bytes); 3198 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS * 3199 sizeof(xfs_bmbt_rec_t)); 3200 } 3201 ifp->if_real_bytes = new_size; 3202 } 3203 3204 /* 3205 * Resize an extent indirection array to new_size bytes. 3206 */ 3207 STATIC void 3208 xfs_iext_realloc_indirect( 3209 xfs_ifork_t *ifp, /* inode fork pointer */ 3210 int new_size) /* new indirection array size */ 3211 { 3212 int nlists; /* number of irec's (ex lists) */ 3213 int size; /* current indirection array size */ 3214 3215 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3216 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3217 size = nlists * sizeof(xfs_ext_irec_t); 3218 ASSERT(ifp->if_real_bytes); 3219 ASSERT((new_size >= 0) && (new_size != size)); 3220 if (new_size == 0) { 3221 xfs_iext_destroy(ifp); 3222 } else { 3223 ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *) 3224 kmem_realloc(ifp->if_u1.if_ext_irec, 3225 new_size, size, KM_NOFS); 3226 } 3227 } 3228 3229 /* 3230 * Switch from indirection array to linear (direct) extent allocations. 3231 */ 3232 STATIC void 3233 xfs_iext_indirect_to_direct( 3234 xfs_ifork_t *ifp) /* inode fork pointer */ 3235 { 3236 xfs_bmbt_rec_host_t *ep; /* extent record pointer */ 3237 xfs_extnum_t nextents; /* number of extents in file */ 3238 int size; /* size of file extents */ 3239 3240 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3241 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3242 ASSERT(nextents <= XFS_LINEAR_EXTS); 3243 size = nextents * sizeof(xfs_bmbt_rec_t); 3244 3245 xfs_iext_irec_compact_pages(ifp); 3246 ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ); 3247 3248 ep = ifp->if_u1.if_ext_irec->er_extbuf; 3249 kmem_free(ifp->if_u1.if_ext_irec); 3250 ifp->if_flags &= ~XFS_IFEXTIREC; 3251 ifp->if_u1.if_extents = ep; 3252 ifp->if_bytes = size; 3253 if (nextents < XFS_LINEAR_EXTS) { 3254 xfs_iext_realloc_direct(ifp, size); 3255 } 3256 } 3257 3258 /* 3259 * Free incore file extents. 3260 */ 3261 void 3262 xfs_iext_destroy( 3263 xfs_ifork_t *ifp) /* inode fork pointer */ 3264 { 3265 if (ifp->if_flags & XFS_IFEXTIREC) { 3266 int erp_idx; 3267 int nlists; 3268 3269 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3270 for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) { 3271 xfs_iext_irec_remove(ifp, erp_idx); 3272 } 3273 ifp->if_flags &= ~XFS_IFEXTIREC; 3274 } else if (ifp->if_real_bytes) { 3275 kmem_free(ifp->if_u1.if_extents); 3276 } else if (ifp->if_bytes) { 3277 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS * 3278 sizeof(xfs_bmbt_rec_t)); 3279 } 3280 ifp->if_u1.if_extents = NULL; 3281 ifp->if_real_bytes = 0; 3282 ifp->if_bytes = 0; 3283 } 3284 3285 /* 3286 * Return a pointer to the extent record for file system block bno. 3287 */ 3288 xfs_bmbt_rec_host_t * /* pointer to found extent record */ 3289 xfs_iext_bno_to_ext( 3290 xfs_ifork_t *ifp, /* inode fork pointer */ 3291 xfs_fileoff_t bno, /* block number to search for */ 3292 xfs_extnum_t *idxp) /* index of target extent */ 3293 { 3294 xfs_bmbt_rec_host_t *base; /* pointer to first extent */ 3295 xfs_filblks_t blockcount = 0; /* number of blocks in extent */ 3296 xfs_bmbt_rec_host_t *ep = NULL; /* pointer to target extent */ 3297 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */ 3298 int high; /* upper boundary in search */ 3299 xfs_extnum_t idx = 0; /* index of target extent */ 3300 int low; /* lower boundary in search */ 3301 xfs_extnum_t nextents; /* number of file extents */ 3302 xfs_fileoff_t startoff = 0; /* start offset of extent */ 3303 3304 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3305 if (nextents == 0) { 3306 *idxp = 0; 3307 return NULL; 3308 } 3309 low = 0; 3310 if (ifp->if_flags & XFS_IFEXTIREC) { 3311 /* Find target extent list */ 3312 int erp_idx = 0; 3313 erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx); 3314 base = erp->er_extbuf; 3315 high = erp->er_extcount - 1; 3316 } else { 3317 base = ifp->if_u1.if_extents; 3318 high = nextents - 1; 3319 } 3320 /* Binary search extent records */ 3321 while (low <= high) { 3322 idx = (low + high) >> 1; 3323 ep = base + idx; 3324 startoff = xfs_bmbt_get_startoff(ep); 3325 blockcount = xfs_bmbt_get_blockcount(ep); 3326 if (bno < startoff) { 3327 high = idx - 1; 3328 } else if (bno >= startoff + blockcount) { 3329 low = idx + 1; 3330 } else { 3331 /* Convert back to file-based extent index */ 3332 if (ifp->if_flags & XFS_IFEXTIREC) { 3333 idx += erp->er_extoff; 3334 } 3335 *idxp = idx; 3336 return ep; 3337 } 3338 } 3339 /* Convert back to file-based extent index */ 3340 if (ifp->if_flags & XFS_IFEXTIREC) { 3341 idx += erp->er_extoff; 3342 } 3343 if (bno >= startoff + blockcount) { 3344 if (++idx == nextents) { 3345 ep = NULL; 3346 } else { 3347 ep = xfs_iext_get_ext(ifp, idx); 3348 } 3349 } 3350 *idxp = idx; 3351 return ep; 3352 } 3353 3354 /* 3355 * Return a pointer to the indirection array entry containing the 3356 * extent record for filesystem block bno. Store the index of the 3357 * target irec in *erp_idxp. 3358 */ 3359 xfs_ext_irec_t * /* pointer to found extent record */ 3360 xfs_iext_bno_to_irec( 3361 xfs_ifork_t *ifp, /* inode fork pointer */ 3362 xfs_fileoff_t bno, /* block number to search for */ 3363 int *erp_idxp) /* irec index of target ext list */ 3364 { 3365 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */ 3366 xfs_ext_irec_t *erp_next; /* next indirection array entry */ 3367 int erp_idx; /* indirection array index */ 3368 int nlists; /* number of extent irec's (lists) */ 3369 int high; /* binary search upper limit */ 3370 int low; /* binary search lower limit */ 3371 3372 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3373 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3374 erp_idx = 0; 3375 low = 0; 3376 high = nlists - 1; 3377 while (low <= high) { 3378 erp_idx = (low + high) >> 1; 3379 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 3380 erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL; 3381 if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) { 3382 high = erp_idx - 1; 3383 } else if (erp_next && bno >= 3384 xfs_bmbt_get_startoff(erp_next->er_extbuf)) { 3385 low = erp_idx + 1; 3386 } else { 3387 break; 3388 } 3389 } 3390 *erp_idxp = erp_idx; 3391 return erp; 3392 } 3393 3394 /* 3395 * Return a pointer to the indirection array entry containing the 3396 * extent record at file extent index *idxp. Store the index of the 3397 * target irec in *erp_idxp and store the page index of the target 3398 * extent record in *idxp. 3399 */ 3400 xfs_ext_irec_t * 3401 xfs_iext_idx_to_irec( 3402 xfs_ifork_t *ifp, /* inode fork pointer */ 3403 xfs_extnum_t *idxp, /* extent index (file -> page) */ 3404 int *erp_idxp, /* pointer to target irec */ 3405 int realloc) /* new bytes were just added */ 3406 { 3407 xfs_ext_irec_t *prev; /* pointer to previous irec */ 3408 xfs_ext_irec_t *erp = NULL; /* pointer to current irec */ 3409 int erp_idx; /* indirection array index */ 3410 int nlists; /* number of irec's (ex lists) */ 3411 int high; /* binary search upper limit */ 3412 int low; /* binary search lower limit */ 3413 xfs_extnum_t page_idx = *idxp; /* extent index in target list */ 3414 3415 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3416 ASSERT(page_idx >= 0); 3417 ASSERT(page_idx <= ifp->if_bytes / sizeof(xfs_bmbt_rec_t)); 3418 ASSERT(page_idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t) || realloc); 3419 3420 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3421 erp_idx = 0; 3422 low = 0; 3423 high = nlists - 1; 3424 3425 /* Binary search extent irec's */ 3426 while (low <= high) { 3427 erp_idx = (low + high) >> 1; 3428 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 3429 prev = erp_idx > 0 ? erp - 1 : NULL; 3430 if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff && 3431 realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) { 3432 high = erp_idx - 1; 3433 } else if (page_idx > erp->er_extoff + erp->er_extcount || 3434 (page_idx == erp->er_extoff + erp->er_extcount && 3435 !realloc)) { 3436 low = erp_idx + 1; 3437 } else if (page_idx == erp->er_extoff + erp->er_extcount && 3438 erp->er_extcount == XFS_LINEAR_EXTS) { 3439 ASSERT(realloc); 3440 page_idx = 0; 3441 erp_idx++; 3442 erp = erp_idx < nlists ? erp + 1 : NULL; 3443 break; 3444 } else { 3445 page_idx -= erp->er_extoff; 3446 break; 3447 } 3448 } 3449 *idxp = page_idx; 3450 *erp_idxp = erp_idx; 3451 return(erp); 3452 } 3453 3454 /* 3455 * Allocate and initialize an indirection array once the space needed 3456 * for incore extents increases above XFS_IEXT_BUFSZ. 3457 */ 3458 void 3459 xfs_iext_irec_init( 3460 xfs_ifork_t *ifp) /* inode fork pointer */ 3461 { 3462 xfs_ext_irec_t *erp; /* indirection array pointer */ 3463 xfs_extnum_t nextents; /* number of extents in file */ 3464 3465 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); 3466 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3467 ASSERT(nextents <= XFS_LINEAR_EXTS); 3468 3469 erp = kmem_alloc(sizeof(xfs_ext_irec_t), KM_NOFS); 3470 3471 if (nextents == 0) { 3472 ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS); 3473 } else if (!ifp->if_real_bytes) { 3474 xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ); 3475 } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) { 3476 xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ); 3477 } 3478 erp->er_extbuf = ifp->if_u1.if_extents; 3479 erp->er_extcount = nextents; 3480 erp->er_extoff = 0; 3481 3482 ifp->if_flags |= XFS_IFEXTIREC; 3483 ifp->if_real_bytes = XFS_IEXT_BUFSZ; 3484 ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t); 3485 ifp->if_u1.if_ext_irec = erp; 3486 3487 return; 3488 } 3489 3490 /* 3491 * Allocate and initialize a new entry in the indirection array. 3492 */ 3493 xfs_ext_irec_t * 3494 xfs_iext_irec_new( 3495 xfs_ifork_t *ifp, /* inode fork pointer */ 3496 int erp_idx) /* index for new irec */ 3497 { 3498 xfs_ext_irec_t *erp; /* indirection array pointer */ 3499 int i; /* loop counter */ 3500 int nlists; /* number of irec's (ex lists) */ 3501 3502 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3503 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3504 3505 /* Resize indirection array */ 3506 xfs_iext_realloc_indirect(ifp, ++nlists * 3507 sizeof(xfs_ext_irec_t)); 3508 /* 3509 * Move records down in the array so the 3510 * new page can use erp_idx. 3511 */ 3512 erp = ifp->if_u1.if_ext_irec; 3513 for (i = nlists - 1; i > erp_idx; i--) { 3514 memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t)); 3515 } 3516 ASSERT(i == erp_idx); 3517 3518 /* Initialize new extent record */ 3519 erp = ifp->if_u1.if_ext_irec; 3520 erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS); 3521 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ; 3522 memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ); 3523 erp[erp_idx].er_extcount = 0; 3524 erp[erp_idx].er_extoff = erp_idx > 0 ? 3525 erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0; 3526 return (&erp[erp_idx]); 3527 } 3528 3529 /* 3530 * Remove a record from the indirection array. 3531 */ 3532 void 3533 xfs_iext_irec_remove( 3534 xfs_ifork_t *ifp, /* inode fork pointer */ 3535 int erp_idx) /* irec index to remove */ 3536 { 3537 xfs_ext_irec_t *erp; /* indirection array pointer */ 3538 int i; /* loop counter */ 3539 int nlists; /* number of irec's (ex lists) */ 3540 3541 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3542 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3543 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 3544 if (erp->er_extbuf) { 3545 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, 3546 -erp->er_extcount); 3547 kmem_free(erp->er_extbuf); 3548 } 3549 /* Compact extent records */ 3550 erp = ifp->if_u1.if_ext_irec; 3551 for (i = erp_idx; i < nlists - 1; i++) { 3552 memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t)); 3553 } 3554 /* 3555 * Manually free the last extent record from the indirection 3556 * array. A call to xfs_iext_realloc_indirect() with a size 3557 * of zero would result in a call to xfs_iext_destroy() which 3558 * would in turn call this function again, creating a nasty 3559 * infinite loop. 3560 */ 3561 if (--nlists) { 3562 xfs_iext_realloc_indirect(ifp, 3563 nlists * sizeof(xfs_ext_irec_t)); 3564 } else { 3565 kmem_free(ifp->if_u1.if_ext_irec); 3566 } 3567 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ; 3568 } 3569 3570 /* 3571 * This is called to clean up large amounts of unused memory allocated 3572 * by the indirection array. Before compacting anything though, verify 3573 * that the indirection array is still needed and switch back to the 3574 * linear extent list (or even the inline buffer) if possible. The 3575 * compaction policy is as follows: 3576 * 3577 * Full Compaction: Extents fit into a single page (or inline buffer) 3578 * Partial Compaction: Extents occupy less than 50% of allocated space 3579 * No Compaction: Extents occupy at least 50% of allocated space 3580 */ 3581 void 3582 xfs_iext_irec_compact( 3583 xfs_ifork_t *ifp) /* inode fork pointer */ 3584 { 3585 xfs_extnum_t nextents; /* number of extents in file */ 3586 int nlists; /* number of irec's (ex lists) */ 3587 3588 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3589 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3590 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3591 3592 if (nextents == 0) { 3593 xfs_iext_destroy(ifp); 3594 } else if (nextents <= XFS_INLINE_EXTS) { 3595 xfs_iext_indirect_to_direct(ifp); 3596 xfs_iext_direct_to_inline(ifp, nextents); 3597 } else if (nextents <= XFS_LINEAR_EXTS) { 3598 xfs_iext_indirect_to_direct(ifp); 3599 } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) { 3600 xfs_iext_irec_compact_pages(ifp); 3601 } 3602 } 3603 3604 /* 3605 * Combine extents from neighboring extent pages. 3606 */ 3607 void 3608 xfs_iext_irec_compact_pages( 3609 xfs_ifork_t *ifp) /* inode fork pointer */ 3610 { 3611 xfs_ext_irec_t *erp, *erp_next;/* pointers to irec entries */ 3612 int erp_idx = 0; /* indirection array index */ 3613 int nlists; /* number of irec's (ex lists) */ 3614 3615 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3616 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3617 while (erp_idx < nlists - 1) { 3618 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 3619 erp_next = erp + 1; 3620 if (erp_next->er_extcount <= 3621 (XFS_LINEAR_EXTS - erp->er_extcount)) { 3622 memcpy(&erp->er_extbuf[erp->er_extcount], 3623 erp_next->er_extbuf, erp_next->er_extcount * 3624 sizeof(xfs_bmbt_rec_t)); 3625 erp->er_extcount += erp_next->er_extcount; 3626 /* 3627 * Free page before removing extent record 3628 * so er_extoffs don't get modified in 3629 * xfs_iext_irec_remove. 3630 */ 3631 kmem_free(erp_next->er_extbuf); 3632 erp_next->er_extbuf = NULL; 3633 xfs_iext_irec_remove(ifp, erp_idx + 1); 3634 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3635 } else { 3636 erp_idx++; 3637 } 3638 } 3639 } 3640 3641 /* 3642 * This is called to update the er_extoff field in the indirection 3643 * array when extents have been added or removed from one of the 3644 * extent lists. erp_idx contains the irec index to begin updating 3645 * at and ext_diff contains the number of extents that were added 3646 * or removed. 3647 */ 3648 void 3649 xfs_iext_irec_update_extoffs( 3650 xfs_ifork_t *ifp, /* inode fork pointer */ 3651 int erp_idx, /* irec index to update */ 3652 int ext_diff) /* number of new extents */ 3653 { 3654 int i; /* loop counter */ 3655 int nlists; /* number of irec's (ex lists */ 3656 3657 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3658 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3659 for (i = erp_idx; i < nlists; i++) { 3660 ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff; 3661 } 3662 } 3663