1 /* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include <linux/log2.h> 19 20 #include "xfs.h" 21 #include "xfs_fs.h" 22 #include "xfs_types.h" 23 #include "xfs_log.h" 24 #include "xfs_inum.h" 25 #include "xfs_trans.h" 26 #include "xfs_trans_priv.h" 27 #include "xfs_sb.h" 28 #include "xfs_ag.h" 29 #include "xfs_mount.h" 30 #include "xfs_bmap_btree.h" 31 #include "xfs_alloc_btree.h" 32 #include "xfs_ialloc_btree.h" 33 #include "xfs_attr_sf.h" 34 #include "xfs_dinode.h" 35 #include "xfs_inode.h" 36 #include "xfs_buf_item.h" 37 #include "xfs_inode_item.h" 38 #include "xfs_btree.h" 39 #include "xfs_alloc.h" 40 #include "xfs_ialloc.h" 41 #include "xfs_bmap.h" 42 #include "xfs_error.h" 43 #include "xfs_utils.h" 44 #include "xfs_quota.h" 45 #include "xfs_filestream.h" 46 #include "xfs_vnodeops.h" 47 #include "xfs_trace.h" 48 49 kmem_zone_t *xfs_ifork_zone; 50 kmem_zone_t *xfs_inode_zone; 51 52 /* 53 * Used in xfs_itruncate_extents(). This is the maximum number of extents 54 * freed from a file in a single transaction. 55 */ 56 #define XFS_ITRUNC_MAX_EXTENTS 2 57 58 STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *); 59 STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int); 60 STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int); 61 STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int); 62 63 /* 64 * helper function to extract extent size hint from inode 65 */ 66 xfs_extlen_t 67 xfs_get_extsz_hint( 68 struct xfs_inode *ip) 69 { 70 if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize) 71 return ip->i_d.di_extsize; 72 if (XFS_IS_REALTIME_INODE(ip)) 73 return ip->i_mount->m_sb.sb_rextsize; 74 return 0; 75 } 76 77 #ifdef DEBUG 78 /* 79 * Make sure that the extents in the given memory buffer 80 * are valid. 81 */ 82 STATIC void 83 xfs_validate_extents( 84 xfs_ifork_t *ifp, 85 int nrecs, 86 xfs_exntfmt_t fmt) 87 { 88 xfs_bmbt_irec_t irec; 89 xfs_bmbt_rec_host_t rec; 90 int i; 91 92 for (i = 0; i < nrecs; i++) { 93 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i); 94 rec.l0 = get_unaligned(&ep->l0); 95 rec.l1 = get_unaligned(&ep->l1); 96 xfs_bmbt_get_all(&rec, &irec); 97 if (fmt == XFS_EXTFMT_NOSTATE) 98 ASSERT(irec.br_state == XFS_EXT_NORM); 99 } 100 } 101 #else /* DEBUG */ 102 #define xfs_validate_extents(ifp, nrecs, fmt) 103 #endif /* DEBUG */ 104 105 /* 106 * Check that none of the inode's in the buffer have a next 107 * unlinked field of 0. 108 */ 109 #if defined(DEBUG) 110 void 111 xfs_inobp_check( 112 xfs_mount_t *mp, 113 xfs_buf_t *bp) 114 { 115 int i; 116 int j; 117 xfs_dinode_t *dip; 118 119 j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog; 120 121 for (i = 0; i < j; i++) { 122 dip = (xfs_dinode_t *)xfs_buf_offset(bp, 123 i * mp->m_sb.sb_inodesize); 124 if (!dip->di_next_unlinked) { 125 xfs_alert(mp, 126 "Detected bogus zero next_unlinked field in incore inode buffer 0x%p.", 127 bp); 128 ASSERT(dip->di_next_unlinked); 129 } 130 } 131 } 132 #endif 133 134 /* 135 * This routine is called to map an inode to the buffer containing the on-disk 136 * version of the inode. It returns a pointer to the buffer containing the 137 * on-disk inode in the bpp parameter, and in the dipp parameter it returns a 138 * pointer to the on-disk inode within that buffer. 139 * 140 * If a non-zero error is returned, then the contents of bpp and dipp are 141 * undefined. 142 */ 143 int 144 xfs_imap_to_bp( 145 struct xfs_mount *mp, 146 struct xfs_trans *tp, 147 struct xfs_imap *imap, 148 struct xfs_dinode **dipp, 149 struct xfs_buf **bpp, 150 uint buf_flags, 151 uint iget_flags) 152 { 153 struct xfs_buf *bp; 154 int error; 155 int i; 156 int ni; 157 158 buf_flags |= XBF_UNMAPPED; 159 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno, 160 (int)imap->im_len, buf_flags, &bp); 161 if (error) { 162 if (error != EAGAIN) { 163 xfs_warn(mp, 164 "%s: xfs_trans_read_buf() returned error %d.", 165 __func__, error); 166 } else { 167 ASSERT(buf_flags & XBF_TRYLOCK); 168 } 169 return error; 170 } 171 172 /* 173 * Validate the magic number and version of every inode in the buffer 174 * (if DEBUG kernel) or the first inode in the buffer, otherwise. 175 */ 176 #ifdef DEBUG 177 ni = BBTOB(imap->im_len) >> mp->m_sb.sb_inodelog; 178 #else /* usual case */ 179 ni = 1; 180 #endif 181 182 for (i = 0; i < ni; i++) { 183 int di_ok; 184 xfs_dinode_t *dip; 185 186 dip = (xfs_dinode_t *)xfs_buf_offset(bp, 187 (i << mp->m_sb.sb_inodelog)); 188 di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) && 189 XFS_DINODE_GOOD_VERSION(dip->di_version); 190 if (unlikely(XFS_TEST_ERROR(!di_ok, mp, 191 XFS_ERRTAG_ITOBP_INOTOBP, 192 XFS_RANDOM_ITOBP_INOTOBP))) { 193 if (iget_flags & XFS_IGET_UNTRUSTED) { 194 xfs_trans_brelse(tp, bp); 195 return XFS_ERROR(EINVAL); 196 } 197 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_HIGH, 198 mp, dip); 199 #ifdef DEBUG 200 xfs_emerg(mp, 201 "bad inode magic/vsn daddr %lld #%d (magic=%x)", 202 (unsigned long long)imap->im_blkno, i, 203 be16_to_cpu(dip->di_magic)); 204 ASSERT(0); 205 #endif 206 xfs_trans_brelse(tp, bp); 207 return XFS_ERROR(EFSCORRUPTED); 208 } 209 } 210 211 xfs_inobp_check(mp, bp); 212 213 *bpp = bp; 214 *dipp = (struct xfs_dinode *)xfs_buf_offset(bp, imap->im_boffset); 215 return 0; 216 } 217 218 /* 219 * Move inode type and inode format specific information from the 220 * on-disk inode to the in-core inode. For fifos, devs, and sockets 221 * this means set if_rdev to the proper value. For files, directories, 222 * and symlinks this means to bring in the in-line data or extent 223 * pointers. For a file in B-tree format, only the root is immediately 224 * brought in-core. The rest will be in-lined in if_extents when it 225 * is first referenced (see xfs_iread_extents()). 226 */ 227 STATIC int 228 xfs_iformat( 229 xfs_inode_t *ip, 230 xfs_dinode_t *dip) 231 { 232 xfs_attr_shortform_t *atp; 233 int size; 234 int error = 0; 235 xfs_fsize_t di_size; 236 237 if (unlikely(be32_to_cpu(dip->di_nextents) + 238 be16_to_cpu(dip->di_anextents) > 239 be64_to_cpu(dip->di_nblocks))) { 240 xfs_warn(ip->i_mount, 241 "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.", 242 (unsigned long long)ip->i_ino, 243 (int)(be32_to_cpu(dip->di_nextents) + 244 be16_to_cpu(dip->di_anextents)), 245 (unsigned long long) 246 be64_to_cpu(dip->di_nblocks)); 247 XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW, 248 ip->i_mount, dip); 249 return XFS_ERROR(EFSCORRUPTED); 250 } 251 252 if (unlikely(dip->di_forkoff > ip->i_mount->m_sb.sb_inodesize)) { 253 xfs_warn(ip->i_mount, "corrupt dinode %Lu, forkoff = 0x%x.", 254 (unsigned long long)ip->i_ino, 255 dip->di_forkoff); 256 XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW, 257 ip->i_mount, dip); 258 return XFS_ERROR(EFSCORRUPTED); 259 } 260 261 if (unlikely((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) && 262 !ip->i_mount->m_rtdev_targp)) { 263 xfs_warn(ip->i_mount, 264 "corrupt dinode %Lu, has realtime flag set.", 265 ip->i_ino); 266 XFS_CORRUPTION_ERROR("xfs_iformat(realtime)", 267 XFS_ERRLEVEL_LOW, ip->i_mount, dip); 268 return XFS_ERROR(EFSCORRUPTED); 269 } 270 271 switch (ip->i_d.di_mode & S_IFMT) { 272 case S_IFIFO: 273 case S_IFCHR: 274 case S_IFBLK: 275 case S_IFSOCK: 276 if (unlikely(dip->di_format != XFS_DINODE_FMT_DEV)) { 277 XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW, 278 ip->i_mount, dip); 279 return XFS_ERROR(EFSCORRUPTED); 280 } 281 ip->i_d.di_size = 0; 282 ip->i_df.if_u2.if_rdev = xfs_dinode_get_rdev(dip); 283 break; 284 285 case S_IFREG: 286 case S_IFLNK: 287 case S_IFDIR: 288 switch (dip->di_format) { 289 case XFS_DINODE_FMT_LOCAL: 290 /* 291 * no local regular files yet 292 */ 293 if (unlikely(S_ISREG(be16_to_cpu(dip->di_mode)))) { 294 xfs_warn(ip->i_mount, 295 "corrupt inode %Lu (local format for regular file).", 296 (unsigned long long) ip->i_ino); 297 XFS_CORRUPTION_ERROR("xfs_iformat(4)", 298 XFS_ERRLEVEL_LOW, 299 ip->i_mount, dip); 300 return XFS_ERROR(EFSCORRUPTED); 301 } 302 303 di_size = be64_to_cpu(dip->di_size); 304 if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) { 305 xfs_warn(ip->i_mount, 306 "corrupt inode %Lu (bad size %Ld for local inode).", 307 (unsigned long long) ip->i_ino, 308 (long long) di_size); 309 XFS_CORRUPTION_ERROR("xfs_iformat(5)", 310 XFS_ERRLEVEL_LOW, 311 ip->i_mount, dip); 312 return XFS_ERROR(EFSCORRUPTED); 313 } 314 315 size = (int)di_size; 316 error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size); 317 break; 318 case XFS_DINODE_FMT_EXTENTS: 319 error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK); 320 break; 321 case XFS_DINODE_FMT_BTREE: 322 error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK); 323 break; 324 default: 325 XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW, 326 ip->i_mount); 327 return XFS_ERROR(EFSCORRUPTED); 328 } 329 break; 330 331 default: 332 XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount); 333 return XFS_ERROR(EFSCORRUPTED); 334 } 335 if (error) { 336 return error; 337 } 338 if (!XFS_DFORK_Q(dip)) 339 return 0; 340 341 ASSERT(ip->i_afp == NULL); 342 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP | KM_NOFS); 343 344 switch (dip->di_aformat) { 345 case XFS_DINODE_FMT_LOCAL: 346 atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip); 347 size = be16_to_cpu(atp->hdr.totsize); 348 349 if (unlikely(size < sizeof(struct xfs_attr_sf_hdr))) { 350 xfs_warn(ip->i_mount, 351 "corrupt inode %Lu (bad attr fork size %Ld).", 352 (unsigned long long) ip->i_ino, 353 (long long) size); 354 XFS_CORRUPTION_ERROR("xfs_iformat(8)", 355 XFS_ERRLEVEL_LOW, 356 ip->i_mount, dip); 357 return XFS_ERROR(EFSCORRUPTED); 358 } 359 360 error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size); 361 break; 362 case XFS_DINODE_FMT_EXTENTS: 363 error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK); 364 break; 365 case XFS_DINODE_FMT_BTREE: 366 error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK); 367 break; 368 default: 369 error = XFS_ERROR(EFSCORRUPTED); 370 break; 371 } 372 if (error) { 373 kmem_zone_free(xfs_ifork_zone, ip->i_afp); 374 ip->i_afp = NULL; 375 xfs_idestroy_fork(ip, XFS_DATA_FORK); 376 } 377 return error; 378 } 379 380 /* 381 * The file is in-lined in the on-disk inode. 382 * If it fits into if_inline_data, then copy 383 * it there, otherwise allocate a buffer for it 384 * and copy the data there. Either way, set 385 * if_data to point at the data. 386 * If we allocate a buffer for the data, make 387 * sure that its size is a multiple of 4 and 388 * record the real size in i_real_bytes. 389 */ 390 STATIC int 391 xfs_iformat_local( 392 xfs_inode_t *ip, 393 xfs_dinode_t *dip, 394 int whichfork, 395 int size) 396 { 397 xfs_ifork_t *ifp; 398 int real_size; 399 400 /* 401 * If the size is unreasonable, then something 402 * is wrong and we just bail out rather than crash in 403 * kmem_alloc() or memcpy() below. 404 */ 405 if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) { 406 xfs_warn(ip->i_mount, 407 "corrupt inode %Lu (bad size %d for local fork, size = %d).", 408 (unsigned long long) ip->i_ino, size, 409 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)); 410 XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW, 411 ip->i_mount, dip); 412 return XFS_ERROR(EFSCORRUPTED); 413 } 414 ifp = XFS_IFORK_PTR(ip, whichfork); 415 real_size = 0; 416 if (size == 0) 417 ifp->if_u1.if_data = NULL; 418 else if (size <= sizeof(ifp->if_u2.if_inline_data)) 419 ifp->if_u1.if_data = ifp->if_u2.if_inline_data; 420 else { 421 real_size = roundup(size, 4); 422 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP | KM_NOFS); 423 } 424 ifp->if_bytes = size; 425 ifp->if_real_bytes = real_size; 426 if (size) 427 memcpy(ifp->if_u1.if_data, XFS_DFORK_PTR(dip, whichfork), size); 428 ifp->if_flags &= ~XFS_IFEXTENTS; 429 ifp->if_flags |= XFS_IFINLINE; 430 return 0; 431 } 432 433 /* 434 * The file consists of a set of extents all 435 * of which fit into the on-disk inode. 436 * If there are few enough extents to fit into 437 * the if_inline_ext, then copy them there. 438 * Otherwise allocate a buffer for them and copy 439 * them into it. Either way, set if_extents 440 * to point at the extents. 441 */ 442 STATIC int 443 xfs_iformat_extents( 444 xfs_inode_t *ip, 445 xfs_dinode_t *dip, 446 int whichfork) 447 { 448 xfs_bmbt_rec_t *dp; 449 xfs_ifork_t *ifp; 450 int nex; 451 int size; 452 int i; 453 454 ifp = XFS_IFORK_PTR(ip, whichfork); 455 nex = XFS_DFORK_NEXTENTS(dip, whichfork); 456 size = nex * (uint)sizeof(xfs_bmbt_rec_t); 457 458 /* 459 * If the number of extents is unreasonable, then something 460 * is wrong and we just bail out rather than crash in 461 * kmem_alloc() or memcpy() below. 462 */ 463 if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) { 464 xfs_warn(ip->i_mount, "corrupt inode %Lu ((a)extents = %d).", 465 (unsigned long long) ip->i_ino, nex); 466 XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW, 467 ip->i_mount, dip); 468 return XFS_ERROR(EFSCORRUPTED); 469 } 470 471 ifp->if_real_bytes = 0; 472 if (nex == 0) 473 ifp->if_u1.if_extents = NULL; 474 else if (nex <= XFS_INLINE_EXTS) 475 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; 476 else 477 xfs_iext_add(ifp, 0, nex); 478 479 ifp->if_bytes = size; 480 if (size) { 481 dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork); 482 xfs_validate_extents(ifp, nex, XFS_EXTFMT_INODE(ip)); 483 for (i = 0; i < nex; i++, dp++) { 484 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i); 485 ep->l0 = get_unaligned_be64(&dp->l0); 486 ep->l1 = get_unaligned_be64(&dp->l1); 487 } 488 XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork); 489 if (whichfork != XFS_DATA_FORK || 490 XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE) 491 if (unlikely(xfs_check_nostate_extents( 492 ifp, 0, nex))) { 493 XFS_ERROR_REPORT("xfs_iformat_extents(2)", 494 XFS_ERRLEVEL_LOW, 495 ip->i_mount); 496 return XFS_ERROR(EFSCORRUPTED); 497 } 498 } 499 ifp->if_flags |= XFS_IFEXTENTS; 500 return 0; 501 } 502 503 /* 504 * The file has too many extents to fit into 505 * the inode, so they are in B-tree format. 506 * Allocate a buffer for the root of the B-tree 507 * and copy the root into it. The i_extents 508 * field will remain NULL until all of the 509 * extents are read in (when they are needed). 510 */ 511 STATIC int 512 xfs_iformat_btree( 513 xfs_inode_t *ip, 514 xfs_dinode_t *dip, 515 int whichfork) 516 { 517 xfs_bmdr_block_t *dfp; 518 xfs_ifork_t *ifp; 519 /* REFERENCED */ 520 int nrecs; 521 int size; 522 523 ifp = XFS_IFORK_PTR(ip, whichfork); 524 dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork); 525 size = XFS_BMAP_BROOT_SPACE(dfp); 526 nrecs = be16_to_cpu(dfp->bb_numrecs); 527 528 /* 529 * blow out if -- fork has less extents than can fit in 530 * fork (fork shouldn't be a btree format), root btree 531 * block has more records than can fit into the fork, 532 * or the number of extents is greater than the number of 533 * blocks. 534 */ 535 if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <= 536 XFS_IFORK_MAXEXT(ip, whichfork) || 537 XFS_BMDR_SPACE_CALC(nrecs) > 538 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork) || 539 XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) { 540 xfs_warn(ip->i_mount, "corrupt inode %Lu (btree).", 541 (unsigned long long) ip->i_ino); 542 XFS_CORRUPTION_ERROR("xfs_iformat_btree", XFS_ERRLEVEL_LOW, 543 ip->i_mount, dip); 544 return XFS_ERROR(EFSCORRUPTED); 545 } 546 547 ifp->if_broot_bytes = size; 548 ifp->if_broot = kmem_alloc(size, KM_SLEEP | KM_NOFS); 549 ASSERT(ifp->if_broot != NULL); 550 /* 551 * Copy and convert from the on-disk structure 552 * to the in-memory structure. 553 */ 554 xfs_bmdr_to_bmbt(ip->i_mount, dfp, 555 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork), 556 ifp->if_broot, size); 557 ifp->if_flags &= ~XFS_IFEXTENTS; 558 ifp->if_flags |= XFS_IFBROOT; 559 560 return 0; 561 } 562 563 STATIC void 564 xfs_dinode_from_disk( 565 xfs_icdinode_t *to, 566 xfs_dinode_t *from) 567 { 568 to->di_magic = be16_to_cpu(from->di_magic); 569 to->di_mode = be16_to_cpu(from->di_mode); 570 to->di_version = from ->di_version; 571 to->di_format = from->di_format; 572 to->di_onlink = be16_to_cpu(from->di_onlink); 573 to->di_uid = be32_to_cpu(from->di_uid); 574 to->di_gid = be32_to_cpu(from->di_gid); 575 to->di_nlink = be32_to_cpu(from->di_nlink); 576 to->di_projid_lo = be16_to_cpu(from->di_projid_lo); 577 to->di_projid_hi = be16_to_cpu(from->di_projid_hi); 578 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad)); 579 to->di_flushiter = be16_to_cpu(from->di_flushiter); 580 to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec); 581 to->di_atime.t_nsec = be32_to_cpu(from->di_atime.t_nsec); 582 to->di_mtime.t_sec = be32_to_cpu(from->di_mtime.t_sec); 583 to->di_mtime.t_nsec = be32_to_cpu(from->di_mtime.t_nsec); 584 to->di_ctime.t_sec = be32_to_cpu(from->di_ctime.t_sec); 585 to->di_ctime.t_nsec = be32_to_cpu(from->di_ctime.t_nsec); 586 to->di_size = be64_to_cpu(from->di_size); 587 to->di_nblocks = be64_to_cpu(from->di_nblocks); 588 to->di_extsize = be32_to_cpu(from->di_extsize); 589 to->di_nextents = be32_to_cpu(from->di_nextents); 590 to->di_anextents = be16_to_cpu(from->di_anextents); 591 to->di_forkoff = from->di_forkoff; 592 to->di_aformat = from->di_aformat; 593 to->di_dmevmask = be32_to_cpu(from->di_dmevmask); 594 to->di_dmstate = be16_to_cpu(from->di_dmstate); 595 to->di_flags = be16_to_cpu(from->di_flags); 596 to->di_gen = be32_to_cpu(from->di_gen); 597 } 598 599 void 600 xfs_dinode_to_disk( 601 xfs_dinode_t *to, 602 xfs_icdinode_t *from) 603 { 604 to->di_magic = cpu_to_be16(from->di_magic); 605 to->di_mode = cpu_to_be16(from->di_mode); 606 to->di_version = from ->di_version; 607 to->di_format = from->di_format; 608 to->di_onlink = cpu_to_be16(from->di_onlink); 609 to->di_uid = cpu_to_be32(from->di_uid); 610 to->di_gid = cpu_to_be32(from->di_gid); 611 to->di_nlink = cpu_to_be32(from->di_nlink); 612 to->di_projid_lo = cpu_to_be16(from->di_projid_lo); 613 to->di_projid_hi = cpu_to_be16(from->di_projid_hi); 614 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad)); 615 to->di_flushiter = cpu_to_be16(from->di_flushiter); 616 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec); 617 to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec); 618 to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec); 619 to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec); 620 to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec); 621 to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec); 622 to->di_size = cpu_to_be64(from->di_size); 623 to->di_nblocks = cpu_to_be64(from->di_nblocks); 624 to->di_extsize = cpu_to_be32(from->di_extsize); 625 to->di_nextents = cpu_to_be32(from->di_nextents); 626 to->di_anextents = cpu_to_be16(from->di_anextents); 627 to->di_forkoff = from->di_forkoff; 628 to->di_aformat = from->di_aformat; 629 to->di_dmevmask = cpu_to_be32(from->di_dmevmask); 630 to->di_dmstate = cpu_to_be16(from->di_dmstate); 631 to->di_flags = cpu_to_be16(from->di_flags); 632 to->di_gen = cpu_to_be32(from->di_gen); 633 } 634 635 STATIC uint 636 _xfs_dic2xflags( 637 __uint16_t di_flags) 638 { 639 uint flags = 0; 640 641 if (di_flags & XFS_DIFLAG_ANY) { 642 if (di_flags & XFS_DIFLAG_REALTIME) 643 flags |= XFS_XFLAG_REALTIME; 644 if (di_flags & XFS_DIFLAG_PREALLOC) 645 flags |= XFS_XFLAG_PREALLOC; 646 if (di_flags & XFS_DIFLAG_IMMUTABLE) 647 flags |= XFS_XFLAG_IMMUTABLE; 648 if (di_flags & XFS_DIFLAG_APPEND) 649 flags |= XFS_XFLAG_APPEND; 650 if (di_flags & XFS_DIFLAG_SYNC) 651 flags |= XFS_XFLAG_SYNC; 652 if (di_flags & XFS_DIFLAG_NOATIME) 653 flags |= XFS_XFLAG_NOATIME; 654 if (di_flags & XFS_DIFLAG_NODUMP) 655 flags |= XFS_XFLAG_NODUMP; 656 if (di_flags & XFS_DIFLAG_RTINHERIT) 657 flags |= XFS_XFLAG_RTINHERIT; 658 if (di_flags & XFS_DIFLAG_PROJINHERIT) 659 flags |= XFS_XFLAG_PROJINHERIT; 660 if (di_flags & XFS_DIFLAG_NOSYMLINKS) 661 flags |= XFS_XFLAG_NOSYMLINKS; 662 if (di_flags & XFS_DIFLAG_EXTSIZE) 663 flags |= XFS_XFLAG_EXTSIZE; 664 if (di_flags & XFS_DIFLAG_EXTSZINHERIT) 665 flags |= XFS_XFLAG_EXTSZINHERIT; 666 if (di_flags & XFS_DIFLAG_NODEFRAG) 667 flags |= XFS_XFLAG_NODEFRAG; 668 if (di_flags & XFS_DIFLAG_FILESTREAM) 669 flags |= XFS_XFLAG_FILESTREAM; 670 } 671 672 return flags; 673 } 674 675 uint 676 xfs_ip2xflags( 677 xfs_inode_t *ip) 678 { 679 xfs_icdinode_t *dic = &ip->i_d; 680 681 return _xfs_dic2xflags(dic->di_flags) | 682 (XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0); 683 } 684 685 uint 686 xfs_dic2xflags( 687 xfs_dinode_t *dip) 688 { 689 return _xfs_dic2xflags(be16_to_cpu(dip->di_flags)) | 690 (XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0); 691 } 692 693 /* 694 * Read the disk inode attributes into the in-core inode structure. 695 */ 696 int 697 xfs_iread( 698 xfs_mount_t *mp, 699 xfs_trans_t *tp, 700 xfs_inode_t *ip, 701 uint iget_flags) 702 { 703 xfs_buf_t *bp; 704 xfs_dinode_t *dip; 705 int error; 706 707 /* 708 * Fill in the location information in the in-core inode. 709 */ 710 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags); 711 if (error) 712 return error; 713 714 /* 715 * Get pointers to the on-disk inode and the buffer containing it. 716 */ 717 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags); 718 if (error) 719 return error; 720 721 /* 722 * If we got something that isn't an inode it means someone 723 * (nfs or dmi) has a stale handle. 724 */ 725 if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC)) { 726 #ifdef DEBUG 727 xfs_alert(mp, 728 "%s: dip->di_magic (0x%x) != XFS_DINODE_MAGIC (0x%x)", 729 __func__, be16_to_cpu(dip->di_magic), XFS_DINODE_MAGIC); 730 #endif /* DEBUG */ 731 error = XFS_ERROR(EINVAL); 732 goto out_brelse; 733 } 734 735 /* 736 * If the on-disk inode is already linked to a directory 737 * entry, copy all of the inode into the in-core inode. 738 * xfs_iformat() handles copying in the inode format 739 * specific information. 740 * Otherwise, just get the truly permanent information. 741 */ 742 if (dip->di_mode) { 743 xfs_dinode_from_disk(&ip->i_d, dip); 744 error = xfs_iformat(ip, dip); 745 if (error) { 746 #ifdef DEBUG 747 xfs_alert(mp, "%s: xfs_iformat() returned error %d", 748 __func__, error); 749 #endif /* DEBUG */ 750 goto out_brelse; 751 } 752 } else { 753 ip->i_d.di_magic = be16_to_cpu(dip->di_magic); 754 ip->i_d.di_version = dip->di_version; 755 ip->i_d.di_gen = be32_to_cpu(dip->di_gen); 756 ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter); 757 /* 758 * Make sure to pull in the mode here as well in 759 * case the inode is released without being used. 760 * This ensures that xfs_inactive() will see that 761 * the inode is already free and not try to mess 762 * with the uninitialized part of it. 763 */ 764 ip->i_d.di_mode = 0; 765 } 766 767 /* 768 * The inode format changed when we moved the link count and 769 * made it 32 bits long. If this is an old format inode, 770 * convert it in memory to look like a new one. If it gets 771 * flushed to disk we will convert back before flushing or 772 * logging it. We zero out the new projid field and the old link 773 * count field. We'll handle clearing the pad field (the remains 774 * of the old uuid field) when we actually convert the inode to 775 * the new format. We don't change the version number so that we 776 * can distinguish this from a real new format inode. 777 */ 778 if (ip->i_d.di_version == 1) { 779 ip->i_d.di_nlink = ip->i_d.di_onlink; 780 ip->i_d.di_onlink = 0; 781 xfs_set_projid(ip, 0); 782 } 783 784 ip->i_delayed_blks = 0; 785 786 /* 787 * Mark the buffer containing the inode as something to keep 788 * around for a while. This helps to keep recently accessed 789 * meta-data in-core longer. 790 */ 791 xfs_buf_set_ref(bp, XFS_INO_REF); 792 793 /* 794 * Use xfs_trans_brelse() to release the buffer containing the 795 * on-disk inode, because it was acquired with xfs_trans_read_buf() 796 * in xfs_imap_to_bp() above. If tp is NULL, this is just a normal 797 * brelse(). If we're within a transaction, then xfs_trans_brelse() 798 * will only release the buffer if it is not dirty within the 799 * transaction. It will be OK to release the buffer in this case, 800 * because inodes on disk are never destroyed and we will be 801 * locking the new in-core inode before putting it in the hash 802 * table where other processes can find it. Thus we don't have 803 * to worry about the inode being changed just because we released 804 * the buffer. 805 */ 806 out_brelse: 807 xfs_trans_brelse(tp, bp); 808 return error; 809 } 810 811 /* 812 * Read in extents from a btree-format inode. 813 * Allocate and fill in if_extents. Real work is done in xfs_bmap.c. 814 */ 815 int 816 xfs_iread_extents( 817 xfs_trans_t *tp, 818 xfs_inode_t *ip, 819 int whichfork) 820 { 821 int error; 822 xfs_ifork_t *ifp; 823 xfs_extnum_t nextents; 824 825 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { 826 XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW, 827 ip->i_mount); 828 return XFS_ERROR(EFSCORRUPTED); 829 } 830 nextents = XFS_IFORK_NEXTENTS(ip, whichfork); 831 ifp = XFS_IFORK_PTR(ip, whichfork); 832 833 /* 834 * We know that the size is valid (it's checked in iformat_btree) 835 */ 836 ifp->if_bytes = ifp->if_real_bytes = 0; 837 ifp->if_flags |= XFS_IFEXTENTS; 838 xfs_iext_add(ifp, 0, nextents); 839 error = xfs_bmap_read_extents(tp, ip, whichfork); 840 if (error) { 841 xfs_iext_destroy(ifp); 842 ifp->if_flags &= ~XFS_IFEXTENTS; 843 return error; 844 } 845 xfs_validate_extents(ifp, nextents, XFS_EXTFMT_INODE(ip)); 846 return 0; 847 } 848 849 /* 850 * Allocate an inode on disk and return a copy of its in-core version. 851 * The in-core inode is locked exclusively. Set mode, nlink, and rdev 852 * appropriately within the inode. The uid and gid for the inode are 853 * set according to the contents of the given cred structure. 854 * 855 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc() 856 * has a free inode available, call xfs_iget() 857 * to obtain the in-core version of the allocated inode. Finally, 858 * fill in the inode and log its initial contents. In this case, 859 * ialloc_context would be set to NULL and call_again set to false. 860 * 861 * If xfs_dialloc() does not have an available inode, 862 * it will replenish its supply by doing an allocation. Since we can 863 * only do one allocation within a transaction without deadlocks, we 864 * must commit the current transaction before returning the inode itself. 865 * In this case, therefore, we will set call_again to true and return. 866 * The caller should then commit the current transaction, start a new 867 * transaction, and call xfs_ialloc() again to actually get the inode. 868 * 869 * To ensure that some other process does not grab the inode that 870 * was allocated during the first call to xfs_ialloc(), this routine 871 * also returns the [locked] bp pointing to the head of the freelist 872 * as ialloc_context. The caller should hold this buffer across 873 * the commit and pass it back into this routine on the second call. 874 * 875 * If we are allocating quota inodes, we do not have a parent inode 876 * to attach to or associate with (i.e. pip == NULL) because they 877 * are not linked into the directory structure - they are attached 878 * directly to the superblock - and so have no parent. 879 */ 880 int 881 xfs_ialloc( 882 xfs_trans_t *tp, 883 xfs_inode_t *pip, 884 umode_t mode, 885 xfs_nlink_t nlink, 886 xfs_dev_t rdev, 887 prid_t prid, 888 int okalloc, 889 xfs_buf_t **ialloc_context, 890 xfs_inode_t **ipp) 891 { 892 xfs_ino_t ino; 893 xfs_inode_t *ip; 894 uint flags; 895 int error; 896 timespec_t tv; 897 int filestreams = 0; 898 899 /* 900 * Call the space management code to pick 901 * the on-disk inode to be allocated. 902 */ 903 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc, 904 ialloc_context, &ino); 905 if (error) 906 return error; 907 if (*ialloc_context || ino == NULLFSINO) { 908 *ipp = NULL; 909 return 0; 910 } 911 ASSERT(*ialloc_context == NULL); 912 913 /* 914 * Get the in-core inode with the lock held exclusively. 915 * This is because we're setting fields here we need 916 * to prevent others from looking at until we're done. 917 */ 918 error = xfs_iget(tp->t_mountp, tp, ino, XFS_IGET_CREATE, 919 XFS_ILOCK_EXCL, &ip); 920 if (error) 921 return error; 922 ASSERT(ip != NULL); 923 924 ip->i_d.di_mode = mode; 925 ip->i_d.di_onlink = 0; 926 ip->i_d.di_nlink = nlink; 927 ASSERT(ip->i_d.di_nlink == nlink); 928 ip->i_d.di_uid = current_fsuid(); 929 ip->i_d.di_gid = current_fsgid(); 930 xfs_set_projid(ip, prid); 931 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); 932 933 /* 934 * If the superblock version is up to where we support new format 935 * inodes and this is currently an old format inode, then change 936 * the inode version number now. This way we only do the conversion 937 * here rather than here and in the flush/logging code. 938 */ 939 if (xfs_sb_version_hasnlink(&tp->t_mountp->m_sb) && 940 ip->i_d.di_version == 1) { 941 ip->i_d.di_version = 2; 942 /* 943 * We've already zeroed the old link count, the projid field, 944 * and the pad field. 945 */ 946 } 947 948 /* 949 * Project ids won't be stored on disk if we are using a version 1 inode. 950 */ 951 if ((prid != 0) && (ip->i_d.di_version == 1)) 952 xfs_bump_ino_vers2(tp, ip); 953 954 if (pip && XFS_INHERIT_GID(pip)) { 955 ip->i_d.di_gid = pip->i_d.di_gid; 956 if ((pip->i_d.di_mode & S_ISGID) && S_ISDIR(mode)) { 957 ip->i_d.di_mode |= S_ISGID; 958 } 959 } 960 961 /* 962 * If the group ID of the new file does not match the effective group 963 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared 964 * (and only if the irix_sgid_inherit compatibility variable is set). 965 */ 966 if ((irix_sgid_inherit) && 967 (ip->i_d.di_mode & S_ISGID) && 968 (!in_group_p((gid_t)ip->i_d.di_gid))) { 969 ip->i_d.di_mode &= ~S_ISGID; 970 } 971 972 ip->i_d.di_size = 0; 973 ip->i_d.di_nextents = 0; 974 ASSERT(ip->i_d.di_nblocks == 0); 975 976 nanotime(&tv); 977 ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec; 978 ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec; 979 ip->i_d.di_atime = ip->i_d.di_mtime; 980 ip->i_d.di_ctime = ip->i_d.di_mtime; 981 982 /* 983 * di_gen will have been taken care of in xfs_iread. 984 */ 985 ip->i_d.di_extsize = 0; 986 ip->i_d.di_dmevmask = 0; 987 ip->i_d.di_dmstate = 0; 988 ip->i_d.di_flags = 0; 989 flags = XFS_ILOG_CORE; 990 switch (mode & S_IFMT) { 991 case S_IFIFO: 992 case S_IFCHR: 993 case S_IFBLK: 994 case S_IFSOCK: 995 ip->i_d.di_format = XFS_DINODE_FMT_DEV; 996 ip->i_df.if_u2.if_rdev = rdev; 997 ip->i_df.if_flags = 0; 998 flags |= XFS_ILOG_DEV; 999 break; 1000 case S_IFREG: 1001 /* 1002 * we can't set up filestreams until after the VFS inode 1003 * is set up properly. 1004 */ 1005 if (pip && xfs_inode_is_filestream(pip)) 1006 filestreams = 1; 1007 /* fall through */ 1008 case S_IFDIR: 1009 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) { 1010 uint di_flags = 0; 1011 1012 if (S_ISDIR(mode)) { 1013 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) 1014 di_flags |= XFS_DIFLAG_RTINHERIT; 1015 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { 1016 di_flags |= XFS_DIFLAG_EXTSZINHERIT; 1017 ip->i_d.di_extsize = pip->i_d.di_extsize; 1018 } 1019 } else if (S_ISREG(mode)) { 1020 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) 1021 di_flags |= XFS_DIFLAG_REALTIME; 1022 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { 1023 di_flags |= XFS_DIFLAG_EXTSIZE; 1024 ip->i_d.di_extsize = pip->i_d.di_extsize; 1025 } 1026 } 1027 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) && 1028 xfs_inherit_noatime) 1029 di_flags |= XFS_DIFLAG_NOATIME; 1030 if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) && 1031 xfs_inherit_nodump) 1032 di_flags |= XFS_DIFLAG_NODUMP; 1033 if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) && 1034 xfs_inherit_sync) 1035 di_flags |= XFS_DIFLAG_SYNC; 1036 if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) && 1037 xfs_inherit_nosymlinks) 1038 di_flags |= XFS_DIFLAG_NOSYMLINKS; 1039 if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) 1040 di_flags |= XFS_DIFLAG_PROJINHERIT; 1041 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) && 1042 xfs_inherit_nodefrag) 1043 di_flags |= XFS_DIFLAG_NODEFRAG; 1044 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM) 1045 di_flags |= XFS_DIFLAG_FILESTREAM; 1046 ip->i_d.di_flags |= di_flags; 1047 } 1048 /* FALLTHROUGH */ 1049 case S_IFLNK: 1050 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; 1051 ip->i_df.if_flags = XFS_IFEXTENTS; 1052 ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0; 1053 ip->i_df.if_u1.if_extents = NULL; 1054 break; 1055 default: 1056 ASSERT(0); 1057 } 1058 /* 1059 * Attribute fork settings for new inode. 1060 */ 1061 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 1062 ip->i_d.di_anextents = 0; 1063 1064 /* 1065 * Log the new values stuffed into the inode. 1066 */ 1067 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 1068 xfs_trans_log_inode(tp, ip, flags); 1069 1070 /* now that we have an i_mode we can setup inode ops and unlock */ 1071 xfs_setup_inode(ip); 1072 1073 /* now we have set up the vfs inode we can associate the filestream */ 1074 if (filestreams) { 1075 error = xfs_filestream_associate(pip, ip); 1076 if (error < 0) 1077 return -error; 1078 if (!error) 1079 xfs_iflags_set(ip, XFS_IFILESTREAM); 1080 } 1081 1082 *ipp = ip; 1083 return 0; 1084 } 1085 1086 /* 1087 * Free up the underlying blocks past new_size. The new size must be smaller 1088 * than the current size. This routine can be used both for the attribute and 1089 * data fork, and does not modify the inode size, which is left to the caller. 1090 * 1091 * The transaction passed to this routine must have made a permanent log 1092 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the 1093 * given transaction and start new ones, so make sure everything involved in 1094 * the transaction is tidy before calling here. Some transaction will be 1095 * returned to the caller to be committed. The incoming transaction must 1096 * already include the inode, and both inode locks must be held exclusively. 1097 * The inode must also be "held" within the transaction. On return the inode 1098 * will be "held" within the returned transaction. This routine does NOT 1099 * require any disk space to be reserved for it within the transaction. 1100 * 1101 * If we get an error, we must return with the inode locked and linked into the 1102 * current transaction. This keeps things simple for the higher level code, 1103 * because it always knows that the inode is locked and held in the transaction 1104 * that returns to it whether errors occur or not. We don't mark the inode 1105 * dirty on error so that transactions can be easily aborted if possible. 1106 */ 1107 int 1108 xfs_itruncate_extents( 1109 struct xfs_trans **tpp, 1110 struct xfs_inode *ip, 1111 int whichfork, 1112 xfs_fsize_t new_size) 1113 { 1114 struct xfs_mount *mp = ip->i_mount; 1115 struct xfs_trans *tp = *tpp; 1116 struct xfs_trans *ntp; 1117 xfs_bmap_free_t free_list; 1118 xfs_fsblock_t first_block; 1119 xfs_fileoff_t first_unmap_block; 1120 xfs_fileoff_t last_block; 1121 xfs_filblks_t unmap_len; 1122 int committed; 1123 int error = 0; 1124 int done = 0; 1125 1126 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1127 ASSERT(!atomic_read(&VFS_I(ip)->i_count) || 1128 xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 1129 ASSERT(new_size <= XFS_ISIZE(ip)); 1130 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 1131 ASSERT(ip->i_itemp != NULL); 1132 ASSERT(ip->i_itemp->ili_lock_flags == 0); 1133 ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); 1134 1135 trace_xfs_itruncate_extents_start(ip, new_size); 1136 1137 /* 1138 * Since it is possible for space to become allocated beyond 1139 * the end of the file (in a crash where the space is allocated 1140 * but the inode size is not yet updated), simply remove any 1141 * blocks which show up between the new EOF and the maximum 1142 * possible file size. If the first block to be removed is 1143 * beyond the maximum file size (ie it is the same as last_block), 1144 * then there is nothing to do. 1145 */ 1146 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); 1147 last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 1148 if (first_unmap_block == last_block) 1149 return 0; 1150 1151 ASSERT(first_unmap_block < last_block); 1152 unmap_len = last_block - first_unmap_block + 1; 1153 while (!done) { 1154 xfs_bmap_init(&free_list, &first_block); 1155 error = xfs_bunmapi(tp, ip, 1156 first_unmap_block, unmap_len, 1157 xfs_bmapi_aflag(whichfork), 1158 XFS_ITRUNC_MAX_EXTENTS, 1159 &first_block, &free_list, 1160 &done); 1161 if (error) 1162 goto out_bmap_cancel; 1163 1164 /* 1165 * Duplicate the transaction that has the permanent 1166 * reservation and commit the old transaction. 1167 */ 1168 error = xfs_bmap_finish(&tp, &free_list, &committed); 1169 if (committed) 1170 xfs_trans_ijoin(tp, ip, 0); 1171 if (error) 1172 goto out_bmap_cancel; 1173 1174 if (committed) { 1175 /* 1176 * Mark the inode dirty so it will be logged and 1177 * moved forward in the log as part of every commit. 1178 */ 1179 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1180 } 1181 1182 ntp = xfs_trans_dup(tp); 1183 error = xfs_trans_commit(tp, 0); 1184 tp = ntp; 1185 1186 xfs_trans_ijoin(tp, ip, 0); 1187 1188 if (error) 1189 goto out; 1190 1191 /* 1192 * Transaction commit worked ok so we can drop the extra ticket 1193 * reference that we gained in xfs_trans_dup() 1194 */ 1195 xfs_log_ticket_put(tp->t_ticket); 1196 error = xfs_trans_reserve(tp, 0, 1197 XFS_ITRUNCATE_LOG_RES(mp), 0, 1198 XFS_TRANS_PERM_LOG_RES, 1199 XFS_ITRUNCATE_LOG_COUNT); 1200 if (error) 1201 goto out; 1202 } 1203 1204 /* 1205 * Always re-log the inode so that our permanent transaction can keep 1206 * on rolling it forward in the log. 1207 */ 1208 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1209 1210 trace_xfs_itruncate_extents_end(ip, new_size); 1211 1212 out: 1213 *tpp = tp; 1214 return error; 1215 out_bmap_cancel: 1216 /* 1217 * If the bunmapi call encounters an error, return to the caller where 1218 * the transaction can be properly aborted. We just need to make sure 1219 * we're not holding any resources that we were not when we came in. 1220 */ 1221 xfs_bmap_cancel(&free_list); 1222 goto out; 1223 } 1224 1225 /* 1226 * This is called when the inode's link count goes to 0. 1227 * We place the on-disk inode on a list in the AGI. It 1228 * will be pulled from this list when the inode is freed. 1229 */ 1230 int 1231 xfs_iunlink( 1232 xfs_trans_t *tp, 1233 xfs_inode_t *ip) 1234 { 1235 xfs_mount_t *mp; 1236 xfs_agi_t *agi; 1237 xfs_dinode_t *dip; 1238 xfs_buf_t *agibp; 1239 xfs_buf_t *ibp; 1240 xfs_agino_t agino; 1241 short bucket_index; 1242 int offset; 1243 int error; 1244 1245 ASSERT(ip->i_d.di_nlink == 0); 1246 ASSERT(ip->i_d.di_mode != 0); 1247 1248 mp = tp->t_mountp; 1249 1250 /* 1251 * Get the agi buffer first. It ensures lock ordering 1252 * on the list. 1253 */ 1254 error = xfs_read_agi(mp, tp, XFS_INO_TO_AGNO(mp, ip->i_ino), &agibp); 1255 if (error) 1256 return error; 1257 agi = XFS_BUF_TO_AGI(agibp); 1258 1259 /* 1260 * Get the index into the agi hash table for the 1261 * list this inode will go on. 1262 */ 1263 agino = XFS_INO_TO_AGINO(mp, ip->i_ino); 1264 ASSERT(agino != 0); 1265 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; 1266 ASSERT(agi->agi_unlinked[bucket_index]); 1267 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino); 1268 1269 if (agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)) { 1270 /* 1271 * There is already another inode in the bucket we need 1272 * to add ourselves to. Add us at the front of the list. 1273 * Here we put the head pointer into our next pointer, 1274 * and then we fall through to point the head at us. 1275 */ 1276 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, 1277 0, 0); 1278 if (error) 1279 return error; 1280 1281 ASSERT(dip->di_next_unlinked == cpu_to_be32(NULLAGINO)); 1282 dip->di_next_unlinked = agi->agi_unlinked[bucket_index]; 1283 offset = ip->i_imap.im_boffset + 1284 offsetof(xfs_dinode_t, di_next_unlinked); 1285 xfs_trans_inode_buf(tp, ibp); 1286 xfs_trans_log_buf(tp, ibp, offset, 1287 (offset + sizeof(xfs_agino_t) - 1)); 1288 xfs_inobp_check(mp, ibp); 1289 } 1290 1291 /* 1292 * Point the bucket head pointer at the inode being inserted. 1293 */ 1294 ASSERT(agino != 0); 1295 agi->agi_unlinked[bucket_index] = cpu_to_be32(agino); 1296 offset = offsetof(xfs_agi_t, agi_unlinked) + 1297 (sizeof(xfs_agino_t) * bucket_index); 1298 xfs_trans_log_buf(tp, agibp, offset, 1299 (offset + sizeof(xfs_agino_t) - 1)); 1300 return 0; 1301 } 1302 1303 /* 1304 * Pull the on-disk inode from the AGI unlinked list. 1305 */ 1306 STATIC int 1307 xfs_iunlink_remove( 1308 xfs_trans_t *tp, 1309 xfs_inode_t *ip) 1310 { 1311 xfs_ino_t next_ino; 1312 xfs_mount_t *mp; 1313 xfs_agi_t *agi; 1314 xfs_dinode_t *dip; 1315 xfs_buf_t *agibp; 1316 xfs_buf_t *ibp; 1317 xfs_agnumber_t agno; 1318 xfs_agino_t agino; 1319 xfs_agino_t next_agino; 1320 xfs_buf_t *last_ibp; 1321 xfs_dinode_t *last_dip = NULL; 1322 short bucket_index; 1323 int offset, last_offset = 0; 1324 int error; 1325 1326 mp = tp->t_mountp; 1327 agno = XFS_INO_TO_AGNO(mp, ip->i_ino); 1328 1329 /* 1330 * Get the agi buffer first. It ensures lock ordering 1331 * on the list. 1332 */ 1333 error = xfs_read_agi(mp, tp, agno, &agibp); 1334 if (error) 1335 return error; 1336 1337 agi = XFS_BUF_TO_AGI(agibp); 1338 1339 /* 1340 * Get the index into the agi hash table for the 1341 * list this inode will go on. 1342 */ 1343 agino = XFS_INO_TO_AGINO(mp, ip->i_ino); 1344 ASSERT(agino != 0); 1345 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; 1346 ASSERT(agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)); 1347 ASSERT(agi->agi_unlinked[bucket_index]); 1348 1349 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) { 1350 /* 1351 * We're at the head of the list. Get the inode's on-disk 1352 * buffer to see if there is anyone after us on the list. 1353 * Only modify our next pointer if it is not already NULLAGINO. 1354 * This saves us the overhead of dealing with the buffer when 1355 * there is no need to change it. 1356 */ 1357 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, 1358 0, 0); 1359 if (error) { 1360 xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.", 1361 __func__, error); 1362 return error; 1363 } 1364 next_agino = be32_to_cpu(dip->di_next_unlinked); 1365 ASSERT(next_agino != 0); 1366 if (next_agino != NULLAGINO) { 1367 dip->di_next_unlinked = cpu_to_be32(NULLAGINO); 1368 offset = ip->i_imap.im_boffset + 1369 offsetof(xfs_dinode_t, di_next_unlinked); 1370 xfs_trans_inode_buf(tp, ibp); 1371 xfs_trans_log_buf(tp, ibp, offset, 1372 (offset + sizeof(xfs_agino_t) - 1)); 1373 xfs_inobp_check(mp, ibp); 1374 } else { 1375 xfs_trans_brelse(tp, ibp); 1376 } 1377 /* 1378 * Point the bucket head pointer at the next inode. 1379 */ 1380 ASSERT(next_agino != 0); 1381 ASSERT(next_agino != agino); 1382 agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino); 1383 offset = offsetof(xfs_agi_t, agi_unlinked) + 1384 (sizeof(xfs_agino_t) * bucket_index); 1385 xfs_trans_log_buf(tp, agibp, offset, 1386 (offset + sizeof(xfs_agino_t) - 1)); 1387 } else { 1388 /* 1389 * We need to search the list for the inode being freed. 1390 */ 1391 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]); 1392 last_ibp = NULL; 1393 while (next_agino != agino) { 1394 struct xfs_imap imap; 1395 1396 if (last_ibp) 1397 xfs_trans_brelse(tp, last_ibp); 1398 1399 imap.im_blkno = 0; 1400 next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino); 1401 1402 error = xfs_imap(mp, tp, next_ino, &imap, 0); 1403 if (error) { 1404 xfs_warn(mp, 1405 "%s: xfs_imap returned error %d.", 1406 __func__, error); 1407 return error; 1408 } 1409 1410 error = xfs_imap_to_bp(mp, tp, &imap, &last_dip, 1411 &last_ibp, 0, 0); 1412 if (error) { 1413 xfs_warn(mp, 1414 "%s: xfs_imap_to_bp returned error %d.", 1415 __func__, error); 1416 return error; 1417 } 1418 1419 last_offset = imap.im_boffset; 1420 next_agino = be32_to_cpu(last_dip->di_next_unlinked); 1421 ASSERT(next_agino != NULLAGINO); 1422 ASSERT(next_agino != 0); 1423 } 1424 1425 /* 1426 * Now last_ibp points to the buffer previous to us on the 1427 * unlinked list. Pull us from the list. 1428 */ 1429 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, 1430 0, 0); 1431 if (error) { 1432 xfs_warn(mp, "%s: xfs_imap_to_bp(2) returned error %d.", 1433 __func__, error); 1434 return error; 1435 } 1436 next_agino = be32_to_cpu(dip->di_next_unlinked); 1437 ASSERT(next_agino != 0); 1438 ASSERT(next_agino != agino); 1439 if (next_agino != NULLAGINO) { 1440 dip->di_next_unlinked = cpu_to_be32(NULLAGINO); 1441 offset = ip->i_imap.im_boffset + 1442 offsetof(xfs_dinode_t, di_next_unlinked); 1443 xfs_trans_inode_buf(tp, ibp); 1444 xfs_trans_log_buf(tp, ibp, offset, 1445 (offset + sizeof(xfs_agino_t) - 1)); 1446 xfs_inobp_check(mp, ibp); 1447 } else { 1448 xfs_trans_brelse(tp, ibp); 1449 } 1450 /* 1451 * Point the previous inode on the list to the next inode. 1452 */ 1453 last_dip->di_next_unlinked = cpu_to_be32(next_agino); 1454 ASSERT(next_agino != 0); 1455 offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked); 1456 xfs_trans_inode_buf(tp, last_ibp); 1457 xfs_trans_log_buf(tp, last_ibp, offset, 1458 (offset + sizeof(xfs_agino_t) - 1)); 1459 xfs_inobp_check(mp, last_ibp); 1460 } 1461 return 0; 1462 } 1463 1464 /* 1465 * A big issue when freeing the inode cluster is is that we _cannot_ skip any 1466 * inodes that are in memory - they all must be marked stale and attached to 1467 * the cluster buffer. 1468 */ 1469 STATIC int 1470 xfs_ifree_cluster( 1471 xfs_inode_t *free_ip, 1472 xfs_trans_t *tp, 1473 xfs_ino_t inum) 1474 { 1475 xfs_mount_t *mp = free_ip->i_mount; 1476 int blks_per_cluster; 1477 int nbufs; 1478 int ninodes; 1479 int i, j; 1480 xfs_daddr_t blkno; 1481 xfs_buf_t *bp; 1482 xfs_inode_t *ip; 1483 xfs_inode_log_item_t *iip; 1484 xfs_log_item_t *lip; 1485 struct xfs_perag *pag; 1486 1487 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum)); 1488 if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) { 1489 blks_per_cluster = 1; 1490 ninodes = mp->m_sb.sb_inopblock; 1491 nbufs = XFS_IALLOC_BLOCKS(mp); 1492 } else { 1493 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) / 1494 mp->m_sb.sb_blocksize; 1495 ninodes = blks_per_cluster * mp->m_sb.sb_inopblock; 1496 nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster; 1497 } 1498 1499 for (j = 0; j < nbufs; j++, inum += ninodes) { 1500 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum), 1501 XFS_INO_TO_AGBNO(mp, inum)); 1502 1503 /* 1504 * We obtain and lock the backing buffer first in the process 1505 * here, as we have to ensure that any dirty inode that we 1506 * can't get the flush lock on is attached to the buffer. 1507 * If we scan the in-memory inodes first, then buffer IO can 1508 * complete before we get a lock on it, and hence we may fail 1509 * to mark all the active inodes on the buffer stale. 1510 */ 1511 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, 1512 mp->m_bsize * blks_per_cluster, 1513 XBF_UNMAPPED); 1514 1515 if (!bp) 1516 return ENOMEM; 1517 /* 1518 * Walk the inodes already attached to the buffer and mark them 1519 * stale. These will all have the flush locks held, so an 1520 * in-memory inode walk can't lock them. By marking them all 1521 * stale first, we will not attempt to lock them in the loop 1522 * below as the XFS_ISTALE flag will be set. 1523 */ 1524 lip = bp->b_fspriv; 1525 while (lip) { 1526 if (lip->li_type == XFS_LI_INODE) { 1527 iip = (xfs_inode_log_item_t *)lip; 1528 ASSERT(iip->ili_logged == 1); 1529 lip->li_cb = xfs_istale_done; 1530 xfs_trans_ail_copy_lsn(mp->m_ail, 1531 &iip->ili_flush_lsn, 1532 &iip->ili_item.li_lsn); 1533 xfs_iflags_set(iip->ili_inode, XFS_ISTALE); 1534 } 1535 lip = lip->li_bio_list; 1536 } 1537 1538 1539 /* 1540 * For each inode in memory attempt to add it to the inode 1541 * buffer and set it up for being staled on buffer IO 1542 * completion. This is safe as we've locked out tail pushing 1543 * and flushing by locking the buffer. 1544 * 1545 * We have already marked every inode that was part of a 1546 * transaction stale above, which means there is no point in 1547 * even trying to lock them. 1548 */ 1549 for (i = 0; i < ninodes; i++) { 1550 retry: 1551 rcu_read_lock(); 1552 ip = radix_tree_lookup(&pag->pag_ici_root, 1553 XFS_INO_TO_AGINO(mp, (inum + i))); 1554 1555 /* Inode not in memory, nothing to do */ 1556 if (!ip) { 1557 rcu_read_unlock(); 1558 continue; 1559 } 1560 1561 /* 1562 * because this is an RCU protected lookup, we could 1563 * find a recently freed or even reallocated inode 1564 * during the lookup. We need to check under the 1565 * i_flags_lock for a valid inode here. Skip it if it 1566 * is not valid, the wrong inode or stale. 1567 */ 1568 spin_lock(&ip->i_flags_lock); 1569 if (ip->i_ino != inum + i || 1570 __xfs_iflags_test(ip, XFS_ISTALE)) { 1571 spin_unlock(&ip->i_flags_lock); 1572 rcu_read_unlock(); 1573 continue; 1574 } 1575 spin_unlock(&ip->i_flags_lock); 1576 1577 /* 1578 * Don't try to lock/unlock the current inode, but we 1579 * _cannot_ skip the other inodes that we did not find 1580 * in the list attached to the buffer and are not 1581 * already marked stale. If we can't lock it, back off 1582 * and retry. 1583 */ 1584 if (ip != free_ip && 1585 !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { 1586 rcu_read_unlock(); 1587 delay(1); 1588 goto retry; 1589 } 1590 rcu_read_unlock(); 1591 1592 xfs_iflock(ip); 1593 xfs_iflags_set(ip, XFS_ISTALE); 1594 1595 /* 1596 * we don't need to attach clean inodes or those only 1597 * with unlogged changes (which we throw away, anyway). 1598 */ 1599 iip = ip->i_itemp; 1600 if (!iip || xfs_inode_clean(ip)) { 1601 ASSERT(ip != free_ip); 1602 xfs_ifunlock(ip); 1603 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1604 continue; 1605 } 1606 1607 iip->ili_last_fields = iip->ili_fields; 1608 iip->ili_fields = 0; 1609 iip->ili_logged = 1; 1610 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, 1611 &iip->ili_item.li_lsn); 1612 1613 xfs_buf_attach_iodone(bp, xfs_istale_done, 1614 &iip->ili_item); 1615 1616 if (ip != free_ip) 1617 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1618 } 1619 1620 xfs_trans_stale_inode_buf(tp, bp); 1621 xfs_trans_binval(tp, bp); 1622 } 1623 1624 xfs_perag_put(pag); 1625 return 0; 1626 } 1627 1628 /* 1629 * This is called to return an inode to the inode free list. 1630 * The inode should already be truncated to 0 length and have 1631 * no pages associated with it. This routine also assumes that 1632 * the inode is already a part of the transaction. 1633 * 1634 * The on-disk copy of the inode will have been added to the list 1635 * of unlinked inodes in the AGI. We need to remove the inode from 1636 * that list atomically with respect to freeing it here. 1637 */ 1638 int 1639 xfs_ifree( 1640 xfs_trans_t *tp, 1641 xfs_inode_t *ip, 1642 xfs_bmap_free_t *flist) 1643 { 1644 int error; 1645 int delete; 1646 xfs_ino_t first_ino; 1647 xfs_dinode_t *dip; 1648 xfs_buf_t *ibp; 1649 1650 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1651 ASSERT(ip->i_d.di_nlink == 0); 1652 ASSERT(ip->i_d.di_nextents == 0); 1653 ASSERT(ip->i_d.di_anextents == 0); 1654 ASSERT(ip->i_d.di_size == 0 || !S_ISREG(ip->i_d.di_mode)); 1655 ASSERT(ip->i_d.di_nblocks == 0); 1656 1657 /* 1658 * Pull the on-disk inode from the AGI unlinked list. 1659 */ 1660 error = xfs_iunlink_remove(tp, ip); 1661 if (error != 0) { 1662 return error; 1663 } 1664 1665 error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino); 1666 if (error != 0) { 1667 return error; 1668 } 1669 ip->i_d.di_mode = 0; /* mark incore inode as free */ 1670 ip->i_d.di_flags = 0; 1671 ip->i_d.di_dmevmask = 0; 1672 ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */ 1673 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; 1674 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 1675 /* 1676 * Bump the generation count so no one will be confused 1677 * by reincarnations of this inode. 1678 */ 1679 ip->i_d.di_gen++; 1680 1681 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1682 1683 error = xfs_imap_to_bp(ip->i_mount, tp, &ip->i_imap, &dip, &ibp, 1684 0, 0); 1685 if (error) 1686 return error; 1687 1688 /* 1689 * Clear the on-disk di_mode. This is to prevent xfs_bulkstat 1690 * from picking up this inode when it is reclaimed (its incore state 1691 * initialzed but not flushed to disk yet). The in-core di_mode is 1692 * already cleared and a corresponding transaction logged. 1693 * The hack here just synchronizes the in-core to on-disk 1694 * di_mode value in advance before the actual inode sync to disk. 1695 * This is OK because the inode is already unlinked and would never 1696 * change its di_mode again for this inode generation. 1697 * This is a temporary hack that would require a proper fix 1698 * in the future. 1699 */ 1700 dip->di_mode = 0; 1701 1702 if (delete) { 1703 error = xfs_ifree_cluster(ip, tp, first_ino); 1704 } 1705 1706 return error; 1707 } 1708 1709 /* 1710 * Reallocate the space for if_broot based on the number of records 1711 * being added or deleted as indicated in rec_diff. Move the records 1712 * and pointers in if_broot to fit the new size. When shrinking this 1713 * will eliminate holes between the records and pointers created by 1714 * the caller. When growing this will create holes to be filled in 1715 * by the caller. 1716 * 1717 * The caller must not request to add more records than would fit in 1718 * the on-disk inode root. If the if_broot is currently NULL, then 1719 * if we adding records one will be allocated. The caller must also 1720 * not request that the number of records go below zero, although 1721 * it can go to zero. 1722 * 1723 * ip -- the inode whose if_broot area is changing 1724 * ext_diff -- the change in the number of records, positive or negative, 1725 * requested for the if_broot array. 1726 */ 1727 void 1728 xfs_iroot_realloc( 1729 xfs_inode_t *ip, 1730 int rec_diff, 1731 int whichfork) 1732 { 1733 struct xfs_mount *mp = ip->i_mount; 1734 int cur_max; 1735 xfs_ifork_t *ifp; 1736 struct xfs_btree_block *new_broot; 1737 int new_max; 1738 size_t new_size; 1739 char *np; 1740 char *op; 1741 1742 /* 1743 * Handle the degenerate case quietly. 1744 */ 1745 if (rec_diff == 0) { 1746 return; 1747 } 1748 1749 ifp = XFS_IFORK_PTR(ip, whichfork); 1750 if (rec_diff > 0) { 1751 /* 1752 * If there wasn't any memory allocated before, just 1753 * allocate it now and get out. 1754 */ 1755 if (ifp->if_broot_bytes == 0) { 1756 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff); 1757 ifp->if_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS); 1758 ifp->if_broot_bytes = (int)new_size; 1759 return; 1760 } 1761 1762 /* 1763 * If there is already an existing if_broot, then we need 1764 * to realloc() it and shift the pointers to their new 1765 * location. The records don't change location because 1766 * they are kept butted up against the btree block header. 1767 */ 1768 cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0); 1769 new_max = cur_max + rec_diff; 1770 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max); 1771 ifp->if_broot = kmem_realloc(ifp->if_broot, new_size, 1772 (size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */ 1773 KM_SLEEP | KM_NOFS); 1774 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, 1775 ifp->if_broot_bytes); 1776 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, 1777 (int)new_size); 1778 ifp->if_broot_bytes = (int)new_size; 1779 ASSERT(ifp->if_broot_bytes <= 1780 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ); 1781 memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t)); 1782 return; 1783 } 1784 1785 /* 1786 * rec_diff is less than 0. In this case, we are shrinking the 1787 * if_broot buffer. It must already exist. If we go to zero 1788 * records, just get rid of the root and clear the status bit. 1789 */ 1790 ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0)); 1791 cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0); 1792 new_max = cur_max + rec_diff; 1793 ASSERT(new_max >= 0); 1794 if (new_max > 0) 1795 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max); 1796 else 1797 new_size = 0; 1798 if (new_size > 0) { 1799 new_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS); 1800 /* 1801 * First copy over the btree block header. 1802 */ 1803 memcpy(new_broot, ifp->if_broot, XFS_BTREE_LBLOCK_LEN); 1804 } else { 1805 new_broot = NULL; 1806 ifp->if_flags &= ~XFS_IFBROOT; 1807 } 1808 1809 /* 1810 * Only copy the records and pointers if there are any. 1811 */ 1812 if (new_max > 0) { 1813 /* 1814 * First copy the records. 1815 */ 1816 op = (char *)XFS_BMBT_REC_ADDR(mp, ifp->if_broot, 1); 1817 np = (char *)XFS_BMBT_REC_ADDR(mp, new_broot, 1); 1818 memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t)); 1819 1820 /* 1821 * Then copy the pointers. 1822 */ 1823 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, 1824 ifp->if_broot_bytes); 1825 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, new_broot, 1, 1826 (int)new_size); 1827 memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t)); 1828 } 1829 kmem_free(ifp->if_broot); 1830 ifp->if_broot = new_broot; 1831 ifp->if_broot_bytes = (int)new_size; 1832 ASSERT(ifp->if_broot_bytes <= 1833 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ); 1834 return; 1835 } 1836 1837 1838 /* 1839 * This is called when the amount of space needed for if_data 1840 * is increased or decreased. The change in size is indicated by 1841 * the number of bytes that need to be added or deleted in the 1842 * byte_diff parameter. 1843 * 1844 * If the amount of space needed has decreased below the size of the 1845 * inline buffer, then switch to using the inline buffer. Otherwise, 1846 * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer 1847 * to what is needed. 1848 * 1849 * ip -- the inode whose if_data area is changing 1850 * byte_diff -- the change in the number of bytes, positive or negative, 1851 * requested for the if_data array. 1852 */ 1853 void 1854 xfs_idata_realloc( 1855 xfs_inode_t *ip, 1856 int byte_diff, 1857 int whichfork) 1858 { 1859 xfs_ifork_t *ifp; 1860 int new_size; 1861 int real_size; 1862 1863 if (byte_diff == 0) { 1864 return; 1865 } 1866 1867 ifp = XFS_IFORK_PTR(ip, whichfork); 1868 new_size = (int)ifp->if_bytes + byte_diff; 1869 ASSERT(new_size >= 0); 1870 1871 if (new_size == 0) { 1872 if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { 1873 kmem_free(ifp->if_u1.if_data); 1874 } 1875 ifp->if_u1.if_data = NULL; 1876 real_size = 0; 1877 } else if (new_size <= sizeof(ifp->if_u2.if_inline_data)) { 1878 /* 1879 * If the valid extents/data can fit in if_inline_ext/data, 1880 * copy them from the malloc'd vector and free it. 1881 */ 1882 if (ifp->if_u1.if_data == NULL) { 1883 ifp->if_u1.if_data = ifp->if_u2.if_inline_data; 1884 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { 1885 ASSERT(ifp->if_real_bytes != 0); 1886 memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data, 1887 new_size); 1888 kmem_free(ifp->if_u1.if_data); 1889 ifp->if_u1.if_data = ifp->if_u2.if_inline_data; 1890 } 1891 real_size = 0; 1892 } else { 1893 /* 1894 * Stuck with malloc/realloc. 1895 * For inline data, the underlying buffer must be 1896 * a multiple of 4 bytes in size so that it can be 1897 * logged and stay on word boundaries. We enforce 1898 * that here. 1899 */ 1900 real_size = roundup(new_size, 4); 1901 if (ifp->if_u1.if_data == NULL) { 1902 ASSERT(ifp->if_real_bytes == 0); 1903 ifp->if_u1.if_data = kmem_alloc(real_size, 1904 KM_SLEEP | KM_NOFS); 1905 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { 1906 /* 1907 * Only do the realloc if the underlying size 1908 * is really changing. 1909 */ 1910 if (ifp->if_real_bytes != real_size) { 1911 ifp->if_u1.if_data = 1912 kmem_realloc(ifp->if_u1.if_data, 1913 real_size, 1914 ifp->if_real_bytes, 1915 KM_SLEEP | KM_NOFS); 1916 } 1917 } else { 1918 ASSERT(ifp->if_real_bytes == 0); 1919 ifp->if_u1.if_data = kmem_alloc(real_size, 1920 KM_SLEEP | KM_NOFS); 1921 memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data, 1922 ifp->if_bytes); 1923 } 1924 } 1925 ifp->if_real_bytes = real_size; 1926 ifp->if_bytes = new_size; 1927 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork)); 1928 } 1929 1930 void 1931 xfs_idestroy_fork( 1932 xfs_inode_t *ip, 1933 int whichfork) 1934 { 1935 xfs_ifork_t *ifp; 1936 1937 ifp = XFS_IFORK_PTR(ip, whichfork); 1938 if (ifp->if_broot != NULL) { 1939 kmem_free(ifp->if_broot); 1940 ifp->if_broot = NULL; 1941 } 1942 1943 /* 1944 * If the format is local, then we can't have an extents 1945 * array so just look for an inline data array. If we're 1946 * not local then we may or may not have an extents list, 1947 * so check and free it up if we do. 1948 */ 1949 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { 1950 if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) && 1951 (ifp->if_u1.if_data != NULL)) { 1952 ASSERT(ifp->if_real_bytes != 0); 1953 kmem_free(ifp->if_u1.if_data); 1954 ifp->if_u1.if_data = NULL; 1955 ifp->if_real_bytes = 0; 1956 } 1957 } else if ((ifp->if_flags & XFS_IFEXTENTS) && 1958 ((ifp->if_flags & XFS_IFEXTIREC) || 1959 ((ifp->if_u1.if_extents != NULL) && 1960 (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) { 1961 ASSERT(ifp->if_real_bytes != 0); 1962 xfs_iext_destroy(ifp); 1963 } 1964 ASSERT(ifp->if_u1.if_extents == NULL || 1965 ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext); 1966 ASSERT(ifp->if_real_bytes == 0); 1967 if (whichfork == XFS_ATTR_FORK) { 1968 kmem_zone_free(xfs_ifork_zone, ip->i_afp); 1969 ip->i_afp = NULL; 1970 } 1971 } 1972 1973 /* 1974 * This is called to unpin an inode. The caller must have the inode locked 1975 * in at least shared mode so that the buffer cannot be subsequently pinned 1976 * once someone is waiting for it to be unpinned. 1977 */ 1978 static void 1979 xfs_iunpin( 1980 struct xfs_inode *ip) 1981 { 1982 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 1983 1984 trace_xfs_inode_unpin_nowait(ip, _RET_IP_); 1985 1986 /* Give the log a push to start the unpinning I/O */ 1987 xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0); 1988 1989 } 1990 1991 static void 1992 __xfs_iunpin_wait( 1993 struct xfs_inode *ip) 1994 { 1995 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT); 1996 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT); 1997 1998 xfs_iunpin(ip); 1999 2000 do { 2001 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); 2002 if (xfs_ipincount(ip)) 2003 io_schedule(); 2004 } while (xfs_ipincount(ip)); 2005 finish_wait(wq, &wait.wait); 2006 } 2007 2008 void 2009 xfs_iunpin_wait( 2010 struct xfs_inode *ip) 2011 { 2012 if (xfs_ipincount(ip)) 2013 __xfs_iunpin_wait(ip); 2014 } 2015 2016 /* 2017 * xfs_iextents_copy() 2018 * 2019 * This is called to copy the REAL extents (as opposed to the delayed 2020 * allocation extents) from the inode into the given buffer. It 2021 * returns the number of bytes copied into the buffer. 2022 * 2023 * If there are no delayed allocation extents, then we can just 2024 * memcpy() the extents into the buffer. Otherwise, we need to 2025 * examine each extent in turn and skip those which are delayed. 2026 */ 2027 int 2028 xfs_iextents_copy( 2029 xfs_inode_t *ip, 2030 xfs_bmbt_rec_t *dp, 2031 int whichfork) 2032 { 2033 int copied; 2034 int i; 2035 xfs_ifork_t *ifp; 2036 int nrecs; 2037 xfs_fsblock_t start_block; 2038 2039 ifp = XFS_IFORK_PTR(ip, whichfork); 2040 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 2041 ASSERT(ifp->if_bytes > 0); 2042 2043 nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 2044 XFS_BMAP_TRACE_EXLIST(ip, nrecs, whichfork); 2045 ASSERT(nrecs > 0); 2046 2047 /* 2048 * There are some delayed allocation extents in the 2049 * inode, so copy the extents one at a time and skip 2050 * the delayed ones. There must be at least one 2051 * non-delayed extent. 2052 */ 2053 copied = 0; 2054 for (i = 0; i < nrecs; i++) { 2055 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i); 2056 start_block = xfs_bmbt_get_startblock(ep); 2057 if (isnullstartblock(start_block)) { 2058 /* 2059 * It's a delayed allocation extent, so skip it. 2060 */ 2061 continue; 2062 } 2063 2064 /* Translate to on disk format */ 2065 put_unaligned(cpu_to_be64(ep->l0), &dp->l0); 2066 put_unaligned(cpu_to_be64(ep->l1), &dp->l1); 2067 dp++; 2068 copied++; 2069 } 2070 ASSERT(copied != 0); 2071 xfs_validate_extents(ifp, copied, XFS_EXTFMT_INODE(ip)); 2072 2073 return (copied * (uint)sizeof(xfs_bmbt_rec_t)); 2074 } 2075 2076 /* 2077 * Each of the following cases stores data into the same region 2078 * of the on-disk inode, so only one of them can be valid at 2079 * any given time. While it is possible to have conflicting formats 2080 * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is 2081 * in EXTENTS format, this can only happen when the fork has 2082 * changed formats after being modified but before being flushed. 2083 * In these cases, the format always takes precedence, because the 2084 * format indicates the current state of the fork. 2085 */ 2086 /*ARGSUSED*/ 2087 STATIC void 2088 xfs_iflush_fork( 2089 xfs_inode_t *ip, 2090 xfs_dinode_t *dip, 2091 xfs_inode_log_item_t *iip, 2092 int whichfork, 2093 xfs_buf_t *bp) 2094 { 2095 char *cp; 2096 xfs_ifork_t *ifp; 2097 xfs_mount_t *mp; 2098 #ifdef XFS_TRANS_DEBUG 2099 int first; 2100 #endif 2101 static const short brootflag[2] = 2102 { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT }; 2103 static const short dataflag[2] = 2104 { XFS_ILOG_DDATA, XFS_ILOG_ADATA }; 2105 static const short extflag[2] = 2106 { XFS_ILOG_DEXT, XFS_ILOG_AEXT }; 2107 2108 if (!iip) 2109 return; 2110 ifp = XFS_IFORK_PTR(ip, whichfork); 2111 /* 2112 * This can happen if we gave up in iformat in an error path, 2113 * for the attribute fork. 2114 */ 2115 if (!ifp) { 2116 ASSERT(whichfork == XFS_ATTR_FORK); 2117 return; 2118 } 2119 cp = XFS_DFORK_PTR(dip, whichfork); 2120 mp = ip->i_mount; 2121 switch (XFS_IFORK_FORMAT(ip, whichfork)) { 2122 case XFS_DINODE_FMT_LOCAL: 2123 if ((iip->ili_fields & dataflag[whichfork]) && 2124 (ifp->if_bytes > 0)) { 2125 ASSERT(ifp->if_u1.if_data != NULL); 2126 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork)); 2127 memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes); 2128 } 2129 break; 2130 2131 case XFS_DINODE_FMT_EXTENTS: 2132 ASSERT((ifp->if_flags & XFS_IFEXTENTS) || 2133 !(iip->ili_fields & extflag[whichfork])); 2134 if ((iip->ili_fields & extflag[whichfork]) && 2135 (ifp->if_bytes > 0)) { 2136 ASSERT(xfs_iext_get_ext(ifp, 0)); 2137 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0); 2138 (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp, 2139 whichfork); 2140 } 2141 break; 2142 2143 case XFS_DINODE_FMT_BTREE: 2144 if ((iip->ili_fields & brootflag[whichfork]) && 2145 (ifp->if_broot_bytes > 0)) { 2146 ASSERT(ifp->if_broot != NULL); 2147 ASSERT(ifp->if_broot_bytes <= 2148 (XFS_IFORK_SIZE(ip, whichfork) + 2149 XFS_BROOT_SIZE_ADJ)); 2150 xfs_bmbt_to_bmdr(mp, ifp->if_broot, ifp->if_broot_bytes, 2151 (xfs_bmdr_block_t *)cp, 2152 XFS_DFORK_SIZE(dip, mp, whichfork)); 2153 } 2154 break; 2155 2156 case XFS_DINODE_FMT_DEV: 2157 if (iip->ili_fields & XFS_ILOG_DEV) { 2158 ASSERT(whichfork == XFS_DATA_FORK); 2159 xfs_dinode_put_rdev(dip, ip->i_df.if_u2.if_rdev); 2160 } 2161 break; 2162 2163 case XFS_DINODE_FMT_UUID: 2164 if (iip->ili_fields & XFS_ILOG_UUID) { 2165 ASSERT(whichfork == XFS_DATA_FORK); 2166 memcpy(XFS_DFORK_DPTR(dip), 2167 &ip->i_df.if_u2.if_uuid, 2168 sizeof(uuid_t)); 2169 } 2170 break; 2171 2172 default: 2173 ASSERT(0); 2174 break; 2175 } 2176 } 2177 2178 STATIC int 2179 xfs_iflush_cluster( 2180 xfs_inode_t *ip, 2181 xfs_buf_t *bp) 2182 { 2183 xfs_mount_t *mp = ip->i_mount; 2184 struct xfs_perag *pag; 2185 unsigned long first_index, mask; 2186 unsigned long inodes_per_cluster; 2187 int ilist_size; 2188 xfs_inode_t **ilist; 2189 xfs_inode_t *iq; 2190 int nr_found; 2191 int clcount = 0; 2192 int bufwasdelwri; 2193 int i; 2194 2195 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 2196 2197 inodes_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog; 2198 ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *); 2199 ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS); 2200 if (!ilist) 2201 goto out_put; 2202 2203 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1); 2204 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask; 2205 rcu_read_lock(); 2206 /* really need a gang lookup range call here */ 2207 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist, 2208 first_index, inodes_per_cluster); 2209 if (nr_found == 0) 2210 goto out_free; 2211 2212 for (i = 0; i < nr_found; i++) { 2213 iq = ilist[i]; 2214 if (iq == ip) 2215 continue; 2216 2217 /* 2218 * because this is an RCU protected lookup, we could find a 2219 * recently freed or even reallocated inode during the lookup. 2220 * We need to check under the i_flags_lock for a valid inode 2221 * here. Skip it if it is not valid or the wrong inode. 2222 */ 2223 spin_lock(&ip->i_flags_lock); 2224 if (!ip->i_ino || 2225 (XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) { 2226 spin_unlock(&ip->i_flags_lock); 2227 continue; 2228 } 2229 spin_unlock(&ip->i_flags_lock); 2230 2231 /* 2232 * Do an un-protected check to see if the inode is dirty and 2233 * is a candidate for flushing. These checks will be repeated 2234 * later after the appropriate locks are acquired. 2235 */ 2236 if (xfs_inode_clean(iq) && xfs_ipincount(iq) == 0) 2237 continue; 2238 2239 /* 2240 * Try to get locks. If any are unavailable or it is pinned, 2241 * then this inode cannot be flushed and is skipped. 2242 */ 2243 2244 if (!xfs_ilock_nowait(iq, XFS_ILOCK_SHARED)) 2245 continue; 2246 if (!xfs_iflock_nowait(iq)) { 2247 xfs_iunlock(iq, XFS_ILOCK_SHARED); 2248 continue; 2249 } 2250 if (xfs_ipincount(iq)) { 2251 xfs_ifunlock(iq); 2252 xfs_iunlock(iq, XFS_ILOCK_SHARED); 2253 continue; 2254 } 2255 2256 /* 2257 * arriving here means that this inode can be flushed. First 2258 * re-check that it's dirty before flushing. 2259 */ 2260 if (!xfs_inode_clean(iq)) { 2261 int error; 2262 error = xfs_iflush_int(iq, bp); 2263 if (error) { 2264 xfs_iunlock(iq, XFS_ILOCK_SHARED); 2265 goto cluster_corrupt_out; 2266 } 2267 clcount++; 2268 } else { 2269 xfs_ifunlock(iq); 2270 } 2271 xfs_iunlock(iq, XFS_ILOCK_SHARED); 2272 } 2273 2274 if (clcount) { 2275 XFS_STATS_INC(xs_icluster_flushcnt); 2276 XFS_STATS_ADD(xs_icluster_flushinode, clcount); 2277 } 2278 2279 out_free: 2280 rcu_read_unlock(); 2281 kmem_free(ilist); 2282 out_put: 2283 xfs_perag_put(pag); 2284 return 0; 2285 2286 2287 cluster_corrupt_out: 2288 /* 2289 * Corruption detected in the clustering loop. Invalidate the 2290 * inode buffer and shut down the filesystem. 2291 */ 2292 rcu_read_unlock(); 2293 /* 2294 * Clean up the buffer. If it was delwri, just release it -- 2295 * brelse can handle it with no problems. If not, shut down the 2296 * filesystem before releasing the buffer. 2297 */ 2298 bufwasdelwri = (bp->b_flags & _XBF_DELWRI_Q); 2299 if (bufwasdelwri) 2300 xfs_buf_relse(bp); 2301 2302 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 2303 2304 if (!bufwasdelwri) { 2305 /* 2306 * Just like incore_relse: if we have b_iodone functions, 2307 * mark the buffer as an error and call them. Otherwise 2308 * mark it as stale and brelse. 2309 */ 2310 if (bp->b_iodone) { 2311 XFS_BUF_UNDONE(bp); 2312 xfs_buf_stale(bp); 2313 xfs_buf_ioerror(bp, EIO); 2314 xfs_buf_ioend(bp, 0); 2315 } else { 2316 xfs_buf_stale(bp); 2317 xfs_buf_relse(bp); 2318 } 2319 } 2320 2321 /* 2322 * Unlocks the flush lock 2323 */ 2324 xfs_iflush_abort(iq, false); 2325 kmem_free(ilist); 2326 xfs_perag_put(pag); 2327 return XFS_ERROR(EFSCORRUPTED); 2328 } 2329 2330 /* 2331 * Flush dirty inode metadata into the backing buffer. 2332 * 2333 * The caller must have the inode lock and the inode flush lock held. The 2334 * inode lock will still be held upon return to the caller, and the inode 2335 * flush lock will be released after the inode has reached the disk. 2336 * 2337 * The caller must write out the buffer returned in *bpp and release it. 2338 */ 2339 int 2340 xfs_iflush( 2341 struct xfs_inode *ip, 2342 struct xfs_buf **bpp) 2343 { 2344 struct xfs_mount *mp = ip->i_mount; 2345 struct xfs_buf *bp; 2346 struct xfs_dinode *dip; 2347 int error; 2348 2349 XFS_STATS_INC(xs_iflush_count); 2350 2351 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 2352 ASSERT(xfs_isiflocked(ip)); 2353 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || 2354 ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)); 2355 2356 *bpp = NULL; 2357 2358 xfs_iunpin_wait(ip); 2359 2360 /* 2361 * For stale inodes we cannot rely on the backing buffer remaining 2362 * stale in cache for the remaining life of the stale inode and so 2363 * xfs_imap_to_bp() below may give us a buffer that no longer contains 2364 * inodes below. We have to check this after ensuring the inode is 2365 * unpinned so that it is safe to reclaim the stale inode after the 2366 * flush call. 2367 */ 2368 if (xfs_iflags_test(ip, XFS_ISTALE)) { 2369 xfs_ifunlock(ip); 2370 return 0; 2371 } 2372 2373 /* 2374 * This may have been unpinned because the filesystem is shutting 2375 * down forcibly. If that's the case we must not write this inode 2376 * to disk, because the log record didn't make it to disk. 2377 * 2378 * We also have to remove the log item from the AIL in this case, 2379 * as we wait for an empty AIL as part of the unmount process. 2380 */ 2381 if (XFS_FORCED_SHUTDOWN(mp)) { 2382 error = XFS_ERROR(EIO); 2383 goto abort_out; 2384 } 2385 2386 /* 2387 * Get the buffer containing the on-disk inode. 2388 */ 2389 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK, 2390 0); 2391 if (error || !bp) { 2392 xfs_ifunlock(ip); 2393 return error; 2394 } 2395 2396 /* 2397 * First flush out the inode that xfs_iflush was called with. 2398 */ 2399 error = xfs_iflush_int(ip, bp); 2400 if (error) 2401 goto corrupt_out; 2402 2403 /* 2404 * If the buffer is pinned then push on the log now so we won't 2405 * get stuck waiting in the write for too long. 2406 */ 2407 if (xfs_buf_ispinned(bp)) 2408 xfs_log_force(mp, 0); 2409 2410 /* 2411 * inode clustering: 2412 * see if other inodes can be gathered into this write 2413 */ 2414 error = xfs_iflush_cluster(ip, bp); 2415 if (error) 2416 goto cluster_corrupt_out; 2417 2418 *bpp = bp; 2419 return 0; 2420 2421 corrupt_out: 2422 xfs_buf_relse(bp); 2423 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 2424 cluster_corrupt_out: 2425 error = XFS_ERROR(EFSCORRUPTED); 2426 abort_out: 2427 /* 2428 * Unlocks the flush lock 2429 */ 2430 xfs_iflush_abort(ip, false); 2431 return error; 2432 } 2433 2434 2435 STATIC int 2436 xfs_iflush_int( 2437 xfs_inode_t *ip, 2438 xfs_buf_t *bp) 2439 { 2440 xfs_inode_log_item_t *iip; 2441 xfs_dinode_t *dip; 2442 xfs_mount_t *mp; 2443 #ifdef XFS_TRANS_DEBUG 2444 int first; 2445 #endif 2446 2447 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 2448 ASSERT(xfs_isiflocked(ip)); 2449 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || 2450 ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)); 2451 2452 iip = ip->i_itemp; 2453 mp = ip->i_mount; 2454 2455 /* set *dip = inode's place in the buffer */ 2456 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset); 2457 2458 if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC), 2459 mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) { 2460 xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 2461 "%s: Bad inode %Lu magic number 0x%x, ptr 0x%p", 2462 __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip); 2463 goto corrupt_out; 2464 } 2465 if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC, 2466 mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) { 2467 xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 2468 "%s: Bad inode %Lu, ptr 0x%p, magic number 0x%x", 2469 __func__, ip->i_ino, ip, ip->i_d.di_magic); 2470 goto corrupt_out; 2471 } 2472 if (S_ISREG(ip->i_d.di_mode)) { 2473 if (XFS_TEST_ERROR( 2474 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && 2475 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE), 2476 mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) { 2477 xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 2478 "%s: Bad regular inode %Lu, ptr 0x%p", 2479 __func__, ip->i_ino, ip); 2480 goto corrupt_out; 2481 } 2482 } else if (S_ISDIR(ip->i_d.di_mode)) { 2483 if (XFS_TEST_ERROR( 2484 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && 2485 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) && 2486 (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL), 2487 mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) { 2488 xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 2489 "%s: Bad directory inode %Lu, ptr 0x%p", 2490 __func__, ip->i_ino, ip); 2491 goto corrupt_out; 2492 } 2493 } 2494 if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents > 2495 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5, 2496 XFS_RANDOM_IFLUSH_5)) { 2497 xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 2498 "%s: detected corrupt incore inode %Lu, " 2499 "total extents = %d, nblocks = %Ld, ptr 0x%p", 2500 __func__, ip->i_ino, 2501 ip->i_d.di_nextents + ip->i_d.di_anextents, 2502 ip->i_d.di_nblocks, ip); 2503 goto corrupt_out; 2504 } 2505 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize, 2506 mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) { 2507 xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 2508 "%s: bad inode %Lu, forkoff 0x%x, ptr 0x%p", 2509 __func__, ip->i_ino, ip->i_d.di_forkoff, ip); 2510 goto corrupt_out; 2511 } 2512 /* 2513 * bump the flush iteration count, used to detect flushes which 2514 * postdate a log record during recovery. 2515 */ 2516 2517 ip->i_d.di_flushiter++; 2518 2519 /* 2520 * Copy the dirty parts of the inode into the on-disk 2521 * inode. We always copy out the core of the inode, 2522 * because if the inode is dirty at all the core must 2523 * be. 2524 */ 2525 xfs_dinode_to_disk(dip, &ip->i_d); 2526 2527 /* Wrap, we never let the log put out DI_MAX_FLUSH */ 2528 if (ip->i_d.di_flushiter == DI_MAX_FLUSH) 2529 ip->i_d.di_flushiter = 0; 2530 2531 /* 2532 * If this is really an old format inode and the superblock version 2533 * has not been updated to support only new format inodes, then 2534 * convert back to the old inode format. If the superblock version 2535 * has been updated, then make the conversion permanent. 2536 */ 2537 ASSERT(ip->i_d.di_version == 1 || xfs_sb_version_hasnlink(&mp->m_sb)); 2538 if (ip->i_d.di_version == 1) { 2539 if (!xfs_sb_version_hasnlink(&mp->m_sb)) { 2540 /* 2541 * Convert it back. 2542 */ 2543 ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1); 2544 dip->di_onlink = cpu_to_be16(ip->i_d.di_nlink); 2545 } else { 2546 /* 2547 * The superblock version has already been bumped, 2548 * so just make the conversion to the new inode 2549 * format permanent. 2550 */ 2551 ip->i_d.di_version = 2; 2552 dip->di_version = 2; 2553 ip->i_d.di_onlink = 0; 2554 dip->di_onlink = 0; 2555 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); 2556 memset(&(dip->di_pad[0]), 0, 2557 sizeof(dip->di_pad)); 2558 ASSERT(xfs_get_projid(ip) == 0); 2559 } 2560 } 2561 2562 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp); 2563 if (XFS_IFORK_Q(ip)) 2564 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp); 2565 xfs_inobp_check(mp, bp); 2566 2567 /* 2568 * We've recorded everything logged in the inode, so we'd like to clear 2569 * the ili_fields bits so we don't log and flush things unnecessarily. 2570 * However, we can't stop logging all this information until the data 2571 * we've copied into the disk buffer is written to disk. If we did we 2572 * might overwrite the copy of the inode in the log with all the data 2573 * after re-logging only part of it, and in the face of a crash we 2574 * wouldn't have all the data we need to recover. 2575 * 2576 * What we do is move the bits to the ili_last_fields field. When 2577 * logging the inode, these bits are moved back to the ili_fields field. 2578 * In the xfs_iflush_done() routine we clear ili_last_fields, since we 2579 * know that the information those bits represent is permanently on 2580 * disk. As long as the flush completes before the inode is logged 2581 * again, then both ili_fields and ili_last_fields will be cleared. 2582 * 2583 * We can play with the ili_fields bits here, because the inode lock 2584 * must be held exclusively in order to set bits there and the flush 2585 * lock protects the ili_last_fields bits. Set ili_logged so the flush 2586 * done routine can tell whether or not to look in the AIL. Also, store 2587 * the current LSN of the inode so that we can tell whether the item has 2588 * moved in the AIL from xfs_iflush_done(). In order to read the lsn we 2589 * need the AIL lock, because it is a 64 bit value that cannot be read 2590 * atomically. 2591 */ 2592 if (iip != NULL && iip->ili_fields != 0) { 2593 iip->ili_last_fields = iip->ili_fields; 2594 iip->ili_fields = 0; 2595 iip->ili_logged = 1; 2596 2597 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, 2598 &iip->ili_item.li_lsn); 2599 2600 /* 2601 * Attach the function xfs_iflush_done to the inode's 2602 * buffer. This will remove the inode from the AIL 2603 * and unlock the inode's flush lock when the inode is 2604 * completely written to disk. 2605 */ 2606 xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item); 2607 2608 ASSERT(bp->b_fspriv != NULL); 2609 ASSERT(bp->b_iodone != NULL); 2610 } else { 2611 /* 2612 * We're flushing an inode which is not in the AIL and has 2613 * not been logged. For this case we can immediately drop 2614 * the inode flush lock because we can avoid the whole 2615 * AIL state thing. It's OK to drop the flush lock now, 2616 * because we've already locked the buffer and to do anything 2617 * you really need both. 2618 */ 2619 if (iip != NULL) { 2620 ASSERT(iip->ili_logged == 0); 2621 ASSERT(iip->ili_last_fields == 0); 2622 ASSERT((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0); 2623 } 2624 xfs_ifunlock(ip); 2625 } 2626 2627 return 0; 2628 2629 corrupt_out: 2630 return XFS_ERROR(EFSCORRUPTED); 2631 } 2632 2633 /* 2634 * Return a pointer to the extent record at file index idx. 2635 */ 2636 xfs_bmbt_rec_host_t * 2637 xfs_iext_get_ext( 2638 xfs_ifork_t *ifp, /* inode fork pointer */ 2639 xfs_extnum_t idx) /* index of target extent */ 2640 { 2641 ASSERT(idx >= 0); 2642 ASSERT(idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t)); 2643 2644 if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) { 2645 return ifp->if_u1.if_ext_irec->er_extbuf; 2646 } else if (ifp->if_flags & XFS_IFEXTIREC) { 2647 xfs_ext_irec_t *erp; /* irec pointer */ 2648 int erp_idx = 0; /* irec index */ 2649 xfs_extnum_t page_idx = idx; /* ext index in target list */ 2650 2651 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0); 2652 return &erp->er_extbuf[page_idx]; 2653 } else if (ifp->if_bytes) { 2654 return &ifp->if_u1.if_extents[idx]; 2655 } else { 2656 return NULL; 2657 } 2658 } 2659 2660 /* 2661 * Insert new item(s) into the extent records for incore inode 2662 * fork 'ifp'. 'count' new items are inserted at index 'idx'. 2663 */ 2664 void 2665 xfs_iext_insert( 2666 xfs_inode_t *ip, /* incore inode pointer */ 2667 xfs_extnum_t idx, /* starting index of new items */ 2668 xfs_extnum_t count, /* number of inserted items */ 2669 xfs_bmbt_irec_t *new, /* items to insert */ 2670 int state) /* type of extent conversion */ 2671 { 2672 xfs_ifork_t *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df; 2673 xfs_extnum_t i; /* extent record index */ 2674 2675 trace_xfs_iext_insert(ip, idx, new, state, _RET_IP_); 2676 2677 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 2678 xfs_iext_add(ifp, idx, count); 2679 for (i = idx; i < idx + count; i++, new++) 2680 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, i), new); 2681 } 2682 2683 /* 2684 * This is called when the amount of space required for incore file 2685 * extents needs to be increased. The ext_diff parameter stores the 2686 * number of new extents being added and the idx parameter contains 2687 * the extent index where the new extents will be added. If the new 2688 * extents are being appended, then we just need to (re)allocate and 2689 * initialize the space. Otherwise, if the new extents are being 2690 * inserted into the middle of the existing entries, a bit more work 2691 * is required to make room for the new extents to be inserted. The 2692 * caller is responsible for filling in the new extent entries upon 2693 * return. 2694 */ 2695 void 2696 xfs_iext_add( 2697 xfs_ifork_t *ifp, /* inode fork pointer */ 2698 xfs_extnum_t idx, /* index to begin adding exts */ 2699 int ext_diff) /* number of extents to add */ 2700 { 2701 int byte_diff; /* new bytes being added */ 2702 int new_size; /* size of extents after adding */ 2703 xfs_extnum_t nextents; /* number of extents in file */ 2704 2705 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 2706 ASSERT((idx >= 0) && (idx <= nextents)); 2707 byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t); 2708 new_size = ifp->if_bytes + byte_diff; 2709 /* 2710 * If the new number of extents (nextents + ext_diff) 2711 * fits inside the inode, then continue to use the inline 2712 * extent buffer. 2713 */ 2714 if (nextents + ext_diff <= XFS_INLINE_EXTS) { 2715 if (idx < nextents) { 2716 memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff], 2717 &ifp->if_u2.if_inline_ext[idx], 2718 (nextents - idx) * sizeof(xfs_bmbt_rec_t)); 2719 memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff); 2720 } 2721 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; 2722 ifp->if_real_bytes = 0; 2723 } 2724 /* 2725 * Otherwise use a linear (direct) extent list. 2726 * If the extents are currently inside the inode, 2727 * xfs_iext_realloc_direct will switch us from 2728 * inline to direct extent allocation mode. 2729 */ 2730 else if (nextents + ext_diff <= XFS_LINEAR_EXTS) { 2731 xfs_iext_realloc_direct(ifp, new_size); 2732 if (idx < nextents) { 2733 memmove(&ifp->if_u1.if_extents[idx + ext_diff], 2734 &ifp->if_u1.if_extents[idx], 2735 (nextents - idx) * sizeof(xfs_bmbt_rec_t)); 2736 memset(&ifp->if_u1.if_extents[idx], 0, byte_diff); 2737 } 2738 } 2739 /* Indirection array */ 2740 else { 2741 xfs_ext_irec_t *erp; 2742 int erp_idx = 0; 2743 int page_idx = idx; 2744 2745 ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS); 2746 if (ifp->if_flags & XFS_IFEXTIREC) { 2747 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1); 2748 } else { 2749 xfs_iext_irec_init(ifp); 2750 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 2751 erp = ifp->if_u1.if_ext_irec; 2752 } 2753 /* Extents fit in target extent page */ 2754 if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) { 2755 if (page_idx < erp->er_extcount) { 2756 memmove(&erp->er_extbuf[page_idx + ext_diff], 2757 &erp->er_extbuf[page_idx], 2758 (erp->er_extcount - page_idx) * 2759 sizeof(xfs_bmbt_rec_t)); 2760 memset(&erp->er_extbuf[page_idx], 0, byte_diff); 2761 } 2762 erp->er_extcount += ext_diff; 2763 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); 2764 } 2765 /* Insert a new extent page */ 2766 else if (erp) { 2767 xfs_iext_add_indirect_multi(ifp, 2768 erp_idx, page_idx, ext_diff); 2769 } 2770 /* 2771 * If extent(s) are being appended to the last page in 2772 * the indirection array and the new extent(s) don't fit 2773 * in the page, then erp is NULL and erp_idx is set to 2774 * the next index needed in the indirection array. 2775 */ 2776 else { 2777 int count = ext_diff; 2778 2779 while (count) { 2780 erp = xfs_iext_irec_new(ifp, erp_idx); 2781 erp->er_extcount = count; 2782 count -= MIN(count, (int)XFS_LINEAR_EXTS); 2783 if (count) { 2784 erp_idx++; 2785 } 2786 } 2787 } 2788 } 2789 ifp->if_bytes = new_size; 2790 } 2791 2792 /* 2793 * This is called when incore extents are being added to the indirection 2794 * array and the new extents do not fit in the target extent list. The 2795 * erp_idx parameter contains the irec index for the target extent list 2796 * in the indirection array, and the idx parameter contains the extent 2797 * index within the list. The number of extents being added is stored 2798 * in the count parameter. 2799 * 2800 * |-------| |-------| 2801 * | | | | idx - number of extents before idx 2802 * | idx | | count | 2803 * | | | | count - number of extents being inserted at idx 2804 * |-------| |-------| 2805 * | count | | nex2 | nex2 - number of extents after idx + count 2806 * |-------| |-------| 2807 */ 2808 void 2809 xfs_iext_add_indirect_multi( 2810 xfs_ifork_t *ifp, /* inode fork pointer */ 2811 int erp_idx, /* target extent irec index */ 2812 xfs_extnum_t idx, /* index within target list */ 2813 int count) /* new extents being added */ 2814 { 2815 int byte_diff; /* new bytes being added */ 2816 xfs_ext_irec_t *erp; /* pointer to irec entry */ 2817 xfs_extnum_t ext_diff; /* number of extents to add */ 2818 xfs_extnum_t ext_cnt; /* new extents still needed */ 2819 xfs_extnum_t nex2; /* extents after idx + count */ 2820 xfs_bmbt_rec_t *nex2_ep = NULL; /* temp list for nex2 extents */ 2821 int nlists; /* number of irec's (lists) */ 2822 2823 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 2824 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 2825 nex2 = erp->er_extcount - idx; 2826 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 2827 2828 /* 2829 * Save second part of target extent list 2830 * (all extents past */ 2831 if (nex2) { 2832 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t); 2833 nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_NOFS); 2834 memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff); 2835 erp->er_extcount -= nex2; 2836 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2); 2837 memset(&erp->er_extbuf[idx], 0, byte_diff); 2838 } 2839 2840 /* 2841 * Add the new extents to the end of the target 2842 * list, then allocate new irec record(s) and 2843 * extent buffer(s) as needed to store the rest 2844 * of the new extents. 2845 */ 2846 ext_cnt = count; 2847 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount); 2848 if (ext_diff) { 2849 erp->er_extcount += ext_diff; 2850 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); 2851 ext_cnt -= ext_diff; 2852 } 2853 while (ext_cnt) { 2854 erp_idx++; 2855 erp = xfs_iext_irec_new(ifp, erp_idx); 2856 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS); 2857 erp->er_extcount = ext_diff; 2858 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); 2859 ext_cnt -= ext_diff; 2860 } 2861 2862 /* Add nex2 extents back to indirection array */ 2863 if (nex2) { 2864 xfs_extnum_t ext_avail; 2865 int i; 2866 2867 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t); 2868 ext_avail = XFS_LINEAR_EXTS - erp->er_extcount; 2869 i = 0; 2870 /* 2871 * If nex2 extents fit in the current page, append 2872 * nex2_ep after the new extents. 2873 */ 2874 if (nex2 <= ext_avail) { 2875 i = erp->er_extcount; 2876 } 2877 /* 2878 * Otherwise, check if space is available in the 2879 * next page. 2880 */ 2881 else if ((erp_idx < nlists - 1) && 2882 (nex2 <= (ext_avail = XFS_LINEAR_EXTS - 2883 ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) { 2884 erp_idx++; 2885 erp++; 2886 /* Create a hole for nex2 extents */ 2887 memmove(&erp->er_extbuf[nex2], erp->er_extbuf, 2888 erp->er_extcount * sizeof(xfs_bmbt_rec_t)); 2889 } 2890 /* 2891 * Final choice, create a new extent page for 2892 * nex2 extents. 2893 */ 2894 else { 2895 erp_idx++; 2896 erp = xfs_iext_irec_new(ifp, erp_idx); 2897 } 2898 memmove(&erp->er_extbuf[i], nex2_ep, byte_diff); 2899 kmem_free(nex2_ep); 2900 erp->er_extcount += nex2; 2901 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2); 2902 } 2903 } 2904 2905 /* 2906 * This is called when the amount of space required for incore file 2907 * extents needs to be decreased. The ext_diff parameter stores the 2908 * number of extents to be removed and the idx parameter contains 2909 * the extent index where the extents will be removed from. 2910 * 2911 * If the amount of space needed has decreased below the linear 2912 * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous 2913 * extent array. Otherwise, use kmem_realloc() to adjust the 2914 * size to what is needed. 2915 */ 2916 void 2917 xfs_iext_remove( 2918 xfs_inode_t *ip, /* incore inode pointer */ 2919 xfs_extnum_t idx, /* index to begin removing exts */ 2920 int ext_diff, /* number of extents to remove */ 2921 int state) /* type of extent conversion */ 2922 { 2923 xfs_ifork_t *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df; 2924 xfs_extnum_t nextents; /* number of extents in file */ 2925 int new_size; /* size of extents after removal */ 2926 2927 trace_xfs_iext_remove(ip, idx, state, _RET_IP_); 2928 2929 ASSERT(ext_diff > 0); 2930 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 2931 new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t); 2932 2933 if (new_size == 0) { 2934 xfs_iext_destroy(ifp); 2935 } else if (ifp->if_flags & XFS_IFEXTIREC) { 2936 xfs_iext_remove_indirect(ifp, idx, ext_diff); 2937 } else if (ifp->if_real_bytes) { 2938 xfs_iext_remove_direct(ifp, idx, ext_diff); 2939 } else { 2940 xfs_iext_remove_inline(ifp, idx, ext_diff); 2941 } 2942 ifp->if_bytes = new_size; 2943 } 2944 2945 /* 2946 * This removes ext_diff extents from the inline buffer, beginning 2947 * at extent index idx. 2948 */ 2949 void 2950 xfs_iext_remove_inline( 2951 xfs_ifork_t *ifp, /* inode fork pointer */ 2952 xfs_extnum_t idx, /* index to begin removing exts */ 2953 int ext_diff) /* number of extents to remove */ 2954 { 2955 int nextents; /* number of extents in file */ 2956 2957 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); 2958 ASSERT(idx < XFS_INLINE_EXTS); 2959 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 2960 ASSERT(((nextents - ext_diff) > 0) && 2961 (nextents - ext_diff) < XFS_INLINE_EXTS); 2962 2963 if (idx + ext_diff < nextents) { 2964 memmove(&ifp->if_u2.if_inline_ext[idx], 2965 &ifp->if_u2.if_inline_ext[idx + ext_diff], 2966 (nextents - (idx + ext_diff)) * 2967 sizeof(xfs_bmbt_rec_t)); 2968 memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff], 2969 0, ext_diff * sizeof(xfs_bmbt_rec_t)); 2970 } else { 2971 memset(&ifp->if_u2.if_inline_ext[idx], 0, 2972 ext_diff * sizeof(xfs_bmbt_rec_t)); 2973 } 2974 } 2975 2976 /* 2977 * This removes ext_diff extents from a linear (direct) extent list, 2978 * beginning at extent index idx. If the extents are being removed 2979 * from the end of the list (ie. truncate) then we just need to re- 2980 * allocate the list to remove the extra space. Otherwise, if the 2981 * extents are being removed from the middle of the existing extent 2982 * entries, then we first need to move the extent records beginning 2983 * at idx + ext_diff up in the list to overwrite the records being 2984 * removed, then remove the extra space via kmem_realloc. 2985 */ 2986 void 2987 xfs_iext_remove_direct( 2988 xfs_ifork_t *ifp, /* inode fork pointer */ 2989 xfs_extnum_t idx, /* index to begin removing exts */ 2990 int ext_diff) /* number of extents to remove */ 2991 { 2992 xfs_extnum_t nextents; /* number of extents in file */ 2993 int new_size; /* size of extents after removal */ 2994 2995 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); 2996 new_size = ifp->if_bytes - 2997 (ext_diff * sizeof(xfs_bmbt_rec_t)); 2998 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 2999 3000 if (new_size == 0) { 3001 xfs_iext_destroy(ifp); 3002 return; 3003 } 3004 /* Move extents up in the list (if needed) */ 3005 if (idx + ext_diff < nextents) { 3006 memmove(&ifp->if_u1.if_extents[idx], 3007 &ifp->if_u1.if_extents[idx + ext_diff], 3008 (nextents - (idx + ext_diff)) * 3009 sizeof(xfs_bmbt_rec_t)); 3010 } 3011 memset(&ifp->if_u1.if_extents[nextents - ext_diff], 3012 0, ext_diff * sizeof(xfs_bmbt_rec_t)); 3013 /* 3014 * Reallocate the direct extent list. If the extents 3015 * will fit inside the inode then xfs_iext_realloc_direct 3016 * will switch from direct to inline extent allocation 3017 * mode for us. 3018 */ 3019 xfs_iext_realloc_direct(ifp, new_size); 3020 ifp->if_bytes = new_size; 3021 } 3022 3023 /* 3024 * This is called when incore extents are being removed from the 3025 * indirection array and the extents being removed span multiple extent 3026 * buffers. The idx parameter contains the file extent index where we 3027 * want to begin removing extents, and the count parameter contains 3028 * how many extents need to be removed. 3029 * 3030 * |-------| |-------| 3031 * | nex1 | | | nex1 - number of extents before idx 3032 * |-------| | count | 3033 * | | | | count - number of extents being removed at idx 3034 * | count | |-------| 3035 * | | | nex2 | nex2 - number of extents after idx + count 3036 * |-------| |-------| 3037 */ 3038 void 3039 xfs_iext_remove_indirect( 3040 xfs_ifork_t *ifp, /* inode fork pointer */ 3041 xfs_extnum_t idx, /* index to begin removing extents */ 3042 int count) /* number of extents to remove */ 3043 { 3044 xfs_ext_irec_t *erp; /* indirection array pointer */ 3045 int erp_idx = 0; /* indirection array index */ 3046 xfs_extnum_t ext_cnt; /* extents left to remove */ 3047 xfs_extnum_t ext_diff; /* extents to remove in current list */ 3048 xfs_extnum_t nex1; /* number of extents before idx */ 3049 xfs_extnum_t nex2; /* extents after idx + count */ 3050 int page_idx = idx; /* index in target extent list */ 3051 3052 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3053 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0); 3054 ASSERT(erp != NULL); 3055 nex1 = page_idx; 3056 ext_cnt = count; 3057 while (ext_cnt) { 3058 nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0); 3059 ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1)); 3060 /* 3061 * Check for deletion of entire list; 3062 * xfs_iext_irec_remove() updates extent offsets. 3063 */ 3064 if (ext_diff == erp->er_extcount) { 3065 xfs_iext_irec_remove(ifp, erp_idx); 3066 ext_cnt -= ext_diff; 3067 nex1 = 0; 3068 if (ext_cnt) { 3069 ASSERT(erp_idx < ifp->if_real_bytes / 3070 XFS_IEXT_BUFSZ); 3071 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 3072 nex1 = 0; 3073 continue; 3074 } else { 3075 break; 3076 } 3077 } 3078 /* Move extents up (if needed) */ 3079 if (nex2) { 3080 memmove(&erp->er_extbuf[nex1], 3081 &erp->er_extbuf[nex1 + ext_diff], 3082 nex2 * sizeof(xfs_bmbt_rec_t)); 3083 } 3084 /* Zero out rest of page */ 3085 memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ - 3086 ((nex1 + nex2) * sizeof(xfs_bmbt_rec_t)))); 3087 /* Update remaining counters */ 3088 erp->er_extcount -= ext_diff; 3089 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff); 3090 ext_cnt -= ext_diff; 3091 nex1 = 0; 3092 erp_idx++; 3093 erp++; 3094 } 3095 ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t); 3096 xfs_iext_irec_compact(ifp); 3097 } 3098 3099 /* 3100 * Create, destroy, or resize a linear (direct) block of extents. 3101 */ 3102 void 3103 xfs_iext_realloc_direct( 3104 xfs_ifork_t *ifp, /* inode fork pointer */ 3105 int new_size) /* new size of extents */ 3106 { 3107 int rnew_size; /* real new size of extents */ 3108 3109 rnew_size = new_size; 3110 3111 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) || 3112 ((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) && 3113 (new_size != ifp->if_real_bytes))); 3114 3115 /* Free extent records */ 3116 if (new_size == 0) { 3117 xfs_iext_destroy(ifp); 3118 } 3119 /* Resize direct extent list and zero any new bytes */ 3120 else if (ifp->if_real_bytes) { 3121 /* Check if extents will fit inside the inode */ 3122 if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) { 3123 xfs_iext_direct_to_inline(ifp, new_size / 3124 (uint)sizeof(xfs_bmbt_rec_t)); 3125 ifp->if_bytes = new_size; 3126 return; 3127 } 3128 if (!is_power_of_2(new_size)){ 3129 rnew_size = roundup_pow_of_two(new_size); 3130 } 3131 if (rnew_size != ifp->if_real_bytes) { 3132 ifp->if_u1.if_extents = 3133 kmem_realloc(ifp->if_u1.if_extents, 3134 rnew_size, 3135 ifp->if_real_bytes, KM_NOFS); 3136 } 3137 if (rnew_size > ifp->if_real_bytes) { 3138 memset(&ifp->if_u1.if_extents[ifp->if_bytes / 3139 (uint)sizeof(xfs_bmbt_rec_t)], 0, 3140 rnew_size - ifp->if_real_bytes); 3141 } 3142 } 3143 /* 3144 * Switch from the inline extent buffer to a direct 3145 * extent list. Be sure to include the inline extent 3146 * bytes in new_size. 3147 */ 3148 else { 3149 new_size += ifp->if_bytes; 3150 if (!is_power_of_2(new_size)) { 3151 rnew_size = roundup_pow_of_two(new_size); 3152 } 3153 xfs_iext_inline_to_direct(ifp, rnew_size); 3154 } 3155 ifp->if_real_bytes = rnew_size; 3156 ifp->if_bytes = new_size; 3157 } 3158 3159 /* 3160 * Switch from linear (direct) extent records to inline buffer. 3161 */ 3162 void 3163 xfs_iext_direct_to_inline( 3164 xfs_ifork_t *ifp, /* inode fork pointer */ 3165 xfs_extnum_t nextents) /* number of extents in file */ 3166 { 3167 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 3168 ASSERT(nextents <= XFS_INLINE_EXTS); 3169 /* 3170 * The inline buffer was zeroed when we switched 3171 * from inline to direct extent allocation mode, 3172 * so we don't need to clear it here. 3173 */ 3174 memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents, 3175 nextents * sizeof(xfs_bmbt_rec_t)); 3176 kmem_free(ifp->if_u1.if_extents); 3177 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; 3178 ifp->if_real_bytes = 0; 3179 } 3180 3181 /* 3182 * Switch from inline buffer to linear (direct) extent records. 3183 * new_size should already be rounded up to the next power of 2 3184 * by the caller (when appropriate), so use new_size as it is. 3185 * However, since new_size may be rounded up, we can't update 3186 * if_bytes here. It is the caller's responsibility to update 3187 * if_bytes upon return. 3188 */ 3189 void 3190 xfs_iext_inline_to_direct( 3191 xfs_ifork_t *ifp, /* inode fork pointer */ 3192 int new_size) /* number of extents in file */ 3193 { 3194 ifp->if_u1.if_extents = kmem_alloc(new_size, KM_NOFS); 3195 memset(ifp->if_u1.if_extents, 0, new_size); 3196 if (ifp->if_bytes) { 3197 memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext, 3198 ifp->if_bytes); 3199 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS * 3200 sizeof(xfs_bmbt_rec_t)); 3201 } 3202 ifp->if_real_bytes = new_size; 3203 } 3204 3205 /* 3206 * Resize an extent indirection array to new_size bytes. 3207 */ 3208 STATIC void 3209 xfs_iext_realloc_indirect( 3210 xfs_ifork_t *ifp, /* inode fork pointer */ 3211 int new_size) /* new indirection array size */ 3212 { 3213 int nlists; /* number of irec's (ex lists) */ 3214 int size; /* current indirection array size */ 3215 3216 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3217 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3218 size = nlists * sizeof(xfs_ext_irec_t); 3219 ASSERT(ifp->if_real_bytes); 3220 ASSERT((new_size >= 0) && (new_size != size)); 3221 if (new_size == 0) { 3222 xfs_iext_destroy(ifp); 3223 } else { 3224 ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *) 3225 kmem_realloc(ifp->if_u1.if_ext_irec, 3226 new_size, size, KM_NOFS); 3227 } 3228 } 3229 3230 /* 3231 * Switch from indirection array to linear (direct) extent allocations. 3232 */ 3233 STATIC void 3234 xfs_iext_indirect_to_direct( 3235 xfs_ifork_t *ifp) /* inode fork pointer */ 3236 { 3237 xfs_bmbt_rec_host_t *ep; /* extent record pointer */ 3238 xfs_extnum_t nextents; /* number of extents in file */ 3239 int size; /* size of file extents */ 3240 3241 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3242 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3243 ASSERT(nextents <= XFS_LINEAR_EXTS); 3244 size = nextents * sizeof(xfs_bmbt_rec_t); 3245 3246 xfs_iext_irec_compact_pages(ifp); 3247 ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ); 3248 3249 ep = ifp->if_u1.if_ext_irec->er_extbuf; 3250 kmem_free(ifp->if_u1.if_ext_irec); 3251 ifp->if_flags &= ~XFS_IFEXTIREC; 3252 ifp->if_u1.if_extents = ep; 3253 ifp->if_bytes = size; 3254 if (nextents < XFS_LINEAR_EXTS) { 3255 xfs_iext_realloc_direct(ifp, size); 3256 } 3257 } 3258 3259 /* 3260 * Free incore file extents. 3261 */ 3262 void 3263 xfs_iext_destroy( 3264 xfs_ifork_t *ifp) /* inode fork pointer */ 3265 { 3266 if (ifp->if_flags & XFS_IFEXTIREC) { 3267 int erp_idx; 3268 int nlists; 3269 3270 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3271 for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) { 3272 xfs_iext_irec_remove(ifp, erp_idx); 3273 } 3274 ifp->if_flags &= ~XFS_IFEXTIREC; 3275 } else if (ifp->if_real_bytes) { 3276 kmem_free(ifp->if_u1.if_extents); 3277 } else if (ifp->if_bytes) { 3278 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS * 3279 sizeof(xfs_bmbt_rec_t)); 3280 } 3281 ifp->if_u1.if_extents = NULL; 3282 ifp->if_real_bytes = 0; 3283 ifp->if_bytes = 0; 3284 } 3285 3286 /* 3287 * Return a pointer to the extent record for file system block bno. 3288 */ 3289 xfs_bmbt_rec_host_t * /* pointer to found extent record */ 3290 xfs_iext_bno_to_ext( 3291 xfs_ifork_t *ifp, /* inode fork pointer */ 3292 xfs_fileoff_t bno, /* block number to search for */ 3293 xfs_extnum_t *idxp) /* index of target extent */ 3294 { 3295 xfs_bmbt_rec_host_t *base; /* pointer to first extent */ 3296 xfs_filblks_t blockcount = 0; /* number of blocks in extent */ 3297 xfs_bmbt_rec_host_t *ep = NULL; /* pointer to target extent */ 3298 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */ 3299 int high; /* upper boundary in search */ 3300 xfs_extnum_t idx = 0; /* index of target extent */ 3301 int low; /* lower boundary in search */ 3302 xfs_extnum_t nextents; /* number of file extents */ 3303 xfs_fileoff_t startoff = 0; /* start offset of extent */ 3304 3305 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3306 if (nextents == 0) { 3307 *idxp = 0; 3308 return NULL; 3309 } 3310 low = 0; 3311 if (ifp->if_flags & XFS_IFEXTIREC) { 3312 /* Find target extent list */ 3313 int erp_idx = 0; 3314 erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx); 3315 base = erp->er_extbuf; 3316 high = erp->er_extcount - 1; 3317 } else { 3318 base = ifp->if_u1.if_extents; 3319 high = nextents - 1; 3320 } 3321 /* Binary search extent records */ 3322 while (low <= high) { 3323 idx = (low + high) >> 1; 3324 ep = base + idx; 3325 startoff = xfs_bmbt_get_startoff(ep); 3326 blockcount = xfs_bmbt_get_blockcount(ep); 3327 if (bno < startoff) { 3328 high = idx - 1; 3329 } else if (bno >= startoff + blockcount) { 3330 low = idx + 1; 3331 } else { 3332 /* Convert back to file-based extent index */ 3333 if (ifp->if_flags & XFS_IFEXTIREC) { 3334 idx += erp->er_extoff; 3335 } 3336 *idxp = idx; 3337 return ep; 3338 } 3339 } 3340 /* Convert back to file-based extent index */ 3341 if (ifp->if_flags & XFS_IFEXTIREC) { 3342 idx += erp->er_extoff; 3343 } 3344 if (bno >= startoff + blockcount) { 3345 if (++idx == nextents) { 3346 ep = NULL; 3347 } else { 3348 ep = xfs_iext_get_ext(ifp, idx); 3349 } 3350 } 3351 *idxp = idx; 3352 return ep; 3353 } 3354 3355 /* 3356 * Return a pointer to the indirection array entry containing the 3357 * extent record for filesystem block bno. Store the index of the 3358 * target irec in *erp_idxp. 3359 */ 3360 xfs_ext_irec_t * /* pointer to found extent record */ 3361 xfs_iext_bno_to_irec( 3362 xfs_ifork_t *ifp, /* inode fork pointer */ 3363 xfs_fileoff_t bno, /* block number to search for */ 3364 int *erp_idxp) /* irec index of target ext list */ 3365 { 3366 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */ 3367 xfs_ext_irec_t *erp_next; /* next indirection array entry */ 3368 int erp_idx; /* indirection array index */ 3369 int nlists; /* number of extent irec's (lists) */ 3370 int high; /* binary search upper limit */ 3371 int low; /* binary search lower limit */ 3372 3373 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3374 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3375 erp_idx = 0; 3376 low = 0; 3377 high = nlists - 1; 3378 while (low <= high) { 3379 erp_idx = (low + high) >> 1; 3380 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 3381 erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL; 3382 if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) { 3383 high = erp_idx - 1; 3384 } else if (erp_next && bno >= 3385 xfs_bmbt_get_startoff(erp_next->er_extbuf)) { 3386 low = erp_idx + 1; 3387 } else { 3388 break; 3389 } 3390 } 3391 *erp_idxp = erp_idx; 3392 return erp; 3393 } 3394 3395 /* 3396 * Return a pointer to the indirection array entry containing the 3397 * extent record at file extent index *idxp. Store the index of the 3398 * target irec in *erp_idxp and store the page index of the target 3399 * extent record in *idxp. 3400 */ 3401 xfs_ext_irec_t * 3402 xfs_iext_idx_to_irec( 3403 xfs_ifork_t *ifp, /* inode fork pointer */ 3404 xfs_extnum_t *idxp, /* extent index (file -> page) */ 3405 int *erp_idxp, /* pointer to target irec */ 3406 int realloc) /* new bytes were just added */ 3407 { 3408 xfs_ext_irec_t *prev; /* pointer to previous irec */ 3409 xfs_ext_irec_t *erp = NULL; /* pointer to current irec */ 3410 int erp_idx; /* indirection array index */ 3411 int nlists; /* number of irec's (ex lists) */ 3412 int high; /* binary search upper limit */ 3413 int low; /* binary search lower limit */ 3414 xfs_extnum_t page_idx = *idxp; /* extent index in target list */ 3415 3416 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3417 ASSERT(page_idx >= 0); 3418 ASSERT(page_idx <= ifp->if_bytes / sizeof(xfs_bmbt_rec_t)); 3419 ASSERT(page_idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t) || realloc); 3420 3421 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3422 erp_idx = 0; 3423 low = 0; 3424 high = nlists - 1; 3425 3426 /* Binary search extent irec's */ 3427 while (low <= high) { 3428 erp_idx = (low + high) >> 1; 3429 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 3430 prev = erp_idx > 0 ? erp - 1 : NULL; 3431 if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff && 3432 realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) { 3433 high = erp_idx - 1; 3434 } else if (page_idx > erp->er_extoff + erp->er_extcount || 3435 (page_idx == erp->er_extoff + erp->er_extcount && 3436 !realloc)) { 3437 low = erp_idx + 1; 3438 } else if (page_idx == erp->er_extoff + erp->er_extcount && 3439 erp->er_extcount == XFS_LINEAR_EXTS) { 3440 ASSERT(realloc); 3441 page_idx = 0; 3442 erp_idx++; 3443 erp = erp_idx < nlists ? erp + 1 : NULL; 3444 break; 3445 } else { 3446 page_idx -= erp->er_extoff; 3447 break; 3448 } 3449 } 3450 *idxp = page_idx; 3451 *erp_idxp = erp_idx; 3452 return(erp); 3453 } 3454 3455 /* 3456 * Allocate and initialize an indirection array once the space needed 3457 * for incore extents increases above XFS_IEXT_BUFSZ. 3458 */ 3459 void 3460 xfs_iext_irec_init( 3461 xfs_ifork_t *ifp) /* inode fork pointer */ 3462 { 3463 xfs_ext_irec_t *erp; /* indirection array pointer */ 3464 xfs_extnum_t nextents; /* number of extents in file */ 3465 3466 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); 3467 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3468 ASSERT(nextents <= XFS_LINEAR_EXTS); 3469 3470 erp = kmem_alloc(sizeof(xfs_ext_irec_t), KM_NOFS); 3471 3472 if (nextents == 0) { 3473 ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS); 3474 } else if (!ifp->if_real_bytes) { 3475 xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ); 3476 } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) { 3477 xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ); 3478 } 3479 erp->er_extbuf = ifp->if_u1.if_extents; 3480 erp->er_extcount = nextents; 3481 erp->er_extoff = 0; 3482 3483 ifp->if_flags |= XFS_IFEXTIREC; 3484 ifp->if_real_bytes = XFS_IEXT_BUFSZ; 3485 ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t); 3486 ifp->if_u1.if_ext_irec = erp; 3487 3488 return; 3489 } 3490 3491 /* 3492 * Allocate and initialize a new entry in the indirection array. 3493 */ 3494 xfs_ext_irec_t * 3495 xfs_iext_irec_new( 3496 xfs_ifork_t *ifp, /* inode fork pointer */ 3497 int erp_idx) /* index for new irec */ 3498 { 3499 xfs_ext_irec_t *erp; /* indirection array pointer */ 3500 int i; /* loop counter */ 3501 int nlists; /* number of irec's (ex lists) */ 3502 3503 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3504 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3505 3506 /* Resize indirection array */ 3507 xfs_iext_realloc_indirect(ifp, ++nlists * 3508 sizeof(xfs_ext_irec_t)); 3509 /* 3510 * Move records down in the array so the 3511 * new page can use erp_idx. 3512 */ 3513 erp = ifp->if_u1.if_ext_irec; 3514 for (i = nlists - 1; i > erp_idx; i--) { 3515 memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t)); 3516 } 3517 ASSERT(i == erp_idx); 3518 3519 /* Initialize new extent record */ 3520 erp = ifp->if_u1.if_ext_irec; 3521 erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS); 3522 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ; 3523 memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ); 3524 erp[erp_idx].er_extcount = 0; 3525 erp[erp_idx].er_extoff = erp_idx > 0 ? 3526 erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0; 3527 return (&erp[erp_idx]); 3528 } 3529 3530 /* 3531 * Remove a record from the indirection array. 3532 */ 3533 void 3534 xfs_iext_irec_remove( 3535 xfs_ifork_t *ifp, /* inode fork pointer */ 3536 int erp_idx) /* irec index to remove */ 3537 { 3538 xfs_ext_irec_t *erp; /* indirection array pointer */ 3539 int i; /* loop counter */ 3540 int nlists; /* number of irec's (ex lists) */ 3541 3542 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3543 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3544 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 3545 if (erp->er_extbuf) { 3546 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, 3547 -erp->er_extcount); 3548 kmem_free(erp->er_extbuf); 3549 } 3550 /* Compact extent records */ 3551 erp = ifp->if_u1.if_ext_irec; 3552 for (i = erp_idx; i < nlists - 1; i++) { 3553 memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t)); 3554 } 3555 /* 3556 * Manually free the last extent record from the indirection 3557 * array. A call to xfs_iext_realloc_indirect() with a size 3558 * of zero would result in a call to xfs_iext_destroy() which 3559 * would in turn call this function again, creating a nasty 3560 * infinite loop. 3561 */ 3562 if (--nlists) { 3563 xfs_iext_realloc_indirect(ifp, 3564 nlists * sizeof(xfs_ext_irec_t)); 3565 } else { 3566 kmem_free(ifp->if_u1.if_ext_irec); 3567 } 3568 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ; 3569 } 3570 3571 /* 3572 * This is called to clean up large amounts of unused memory allocated 3573 * by the indirection array. Before compacting anything though, verify 3574 * that the indirection array is still needed and switch back to the 3575 * linear extent list (or even the inline buffer) if possible. The 3576 * compaction policy is as follows: 3577 * 3578 * Full Compaction: Extents fit into a single page (or inline buffer) 3579 * Partial Compaction: Extents occupy less than 50% of allocated space 3580 * No Compaction: Extents occupy at least 50% of allocated space 3581 */ 3582 void 3583 xfs_iext_irec_compact( 3584 xfs_ifork_t *ifp) /* inode fork pointer */ 3585 { 3586 xfs_extnum_t nextents; /* number of extents in file */ 3587 int nlists; /* number of irec's (ex lists) */ 3588 3589 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3590 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3591 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3592 3593 if (nextents == 0) { 3594 xfs_iext_destroy(ifp); 3595 } else if (nextents <= XFS_INLINE_EXTS) { 3596 xfs_iext_indirect_to_direct(ifp); 3597 xfs_iext_direct_to_inline(ifp, nextents); 3598 } else if (nextents <= XFS_LINEAR_EXTS) { 3599 xfs_iext_indirect_to_direct(ifp); 3600 } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) { 3601 xfs_iext_irec_compact_pages(ifp); 3602 } 3603 } 3604 3605 /* 3606 * Combine extents from neighboring extent pages. 3607 */ 3608 void 3609 xfs_iext_irec_compact_pages( 3610 xfs_ifork_t *ifp) /* inode fork pointer */ 3611 { 3612 xfs_ext_irec_t *erp, *erp_next;/* pointers to irec entries */ 3613 int erp_idx = 0; /* indirection array index */ 3614 int nlists; /* number of irec's (ex lists) */ 3615 3616 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3617 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3618 while (erp_idx < nlists - 1) { 3619 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 3620 erp_next = erp + 1; 3621 if (erp_next->er_extcount <= 3622 (XFS_LINEAR_EXTS - erp->er_extcount)) { 3623 memcpy(&erp->er_extbuf[erp->er_extcount], 3624 erp_next->er_extbuf, erp_next->er_extcount * 3625 sizeof(xfs_bmbt_rec_t)); 3626 erp->er_extcount += erp_next->er_extcount; 3627 /* 3628 * Free page before removing extent record 3629 * so er_extoffs don't get modified in 3630 * xfs_iext_irec_remove. 3631 */ 3632 kmem_free(erp_next->er_extbuf); 3633 erp_next->er_extbuf = NULL; 3634 xfs_iext_irec_remove(ifp, erp_idx + 1); 3635 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3636 } else { 3637 erp_idx++; 3638 } 3639 } 3640 } 3641 3642 /* 3643 * This is called to update the er_extoff field in the indirection 3644 * array when extents have been added or removed from one of the 3645 * extent lists. erp_idx contains the irec index to begin updating 3646 * at and ext_diff contains the number of extents that were added 3647 * or removed. 3648 */ 3649 void 3650 xfs_iext_irec_update_extoffs( 3651 xfs_ifork_t *ifp, /* inode fork pointer */ 3652 int erp_idx, /* irec index to update */ 3653 int ext_diff) /* number of new extents */ 3654 { 3655 int i; /* loop counter */ 3656 int nlists; /* number of irec's (ex lists */ 3657 3658 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3659 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3660 for (i = erp_idx; i < nlists; i++) { 3661 ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff; 3662 } 3663 } 3664