1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_sb.h" 13 #include "xfs_mount.h" 14 #include "xfs_trans.h" 15 #include "xfs_error.h" 16 #include "xfs_alloc.h" 17 #include "xfs_fsops.h" 18 #include "xfs_trans_space.h" 19 #include "xfs_log.h" 20 #include "xfs_ag.h" 21 #include "xfs_ag_resv.h" 22 #include "xfs_trace.h" 23 24 /* 25 * Write new AG headers to disk. Non-transactional, but need to be 26 * written and completed prior to the growfs transaction being logged. 27 * To do this, we use a delayed write buffer list and wait for 28 * submission and IO completion of the list as a whole. This allows the 29 * IO subsystem to merge all the AG headers in a single AG into a single 30 * IO and hide most of the latency of the IO from us. 31 * 32 * This also means that if we get an error whilst building the buffer 33 * list to write, we can cancel the entire list without having written 34 * anything. 35 */ 36 static int 37 xfs_resizefs_init_new_ags( 38 struct xfs_trans *tp, 39 struct aghdr_init_data *id, 40 xfs_agnumber_t oagcount, 41 xfs_agnumber_t nagcount, 42 xfs_rfsblock_t delta, 43 bool *lastag_extended) 44 { 45 struct xfs_mount *mp = tp->t_mountp; 46 xfs_rfsblock_t nb = mp->m_sb.sb_dblocks + delta; 47 int error; 48 49 *lastag_extended = false; 50 51 INIT_LIST_HEAD(&id->buffer_list); 52 for (id->agno = nagcount - 1; 53 id->agno >= oagcount; 54 id->agno--, delta -= id->agsize) { 55 56 if (id->agno == nagcount - 1) 57 id->agsize = nb - (id->agno * 58 (xfs_rfsblock_t)mp->m_sb.sb_agblocks); 59 else 60 id->agsize = mp->m_sb.sb_agblocks; 61 62 error = xfs_ag_init_headers(mp, id); 63 if (error) { 64 xfs_buf_delwri_cancel(&id->buffer_list); 65 return error; 66 } 67 } 68 69 error = xfs_buf_delwri_submit(&id->buffer_list); 70 if (error) 71 return error; 72 73 if (delta) { 74 *lastag_extended = true; 75 error = xfs_ag_extend_space(mp, tp, id, delta); 76 } 77 return error; 78 } 79 80 /* 81 * growfs operations 82 */ 83 static int 84 xfs_growfs_data_private( 85 struct xfs_mount *mp, /* mount point for filesystem */ 86 struct xfs_growfs_data *in) /* growfs data input struct */ 87 { 88 struct xfs_buf *bp; 89 int error; 90 xfs_agnumber_t nagcount; 91 xfs_agnumber_t nagimax = 0; 92 xfs_rfsblock_t nb, nb_div, nb_mod; 93 int64_t delta; 94 bool lastag_extended; 95 xfs_agnumber_t oagcount; 96 struct xfs_trans *tp; 97 struct aghdr_init_data id = {}; 98 99 nb = in->newblocks; 100 error = xfs_sb_validate_fsb_count(&mp->m_sb, nb); 101 if (error) 102 return error; 103 104 if (nb > mp->m_sb.sb_dblocks) { 105 error = xfs_buf_read_uncached(mp->m_ddev_targp, 106 XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1), 107 XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL); 108 if (error) 109 return error; 110 xfs_buf_relse(bp); 111 } 112 113 nb_div = nb; 114 nb_mod = do_div(nb_div, mp->m_sb.sb_agblocks); 115 nagcount = nb_div + (nb_mod != 0); 116 if (nb_mod && nb_mod < XFS_MIN_AG_BLOCKS) { 117 nagcount--; 118 nb = (xfs_rfsblock_t)nagcount * mp->m_sb.sb_agblocks; 119 } 120 delta = nb - mp->m_sb.sb_dblocks; 121 /* 122 * Reject filesystems with a single AG because they are not 123 * supported, and reject a shrink operation that would cause a 124 * filesystem to become unsupported. 125 */ 126 if (delta < 0 && nagcount < 2) 127 return -EINVAL; 128 129 oagcount = mp->m_sb.sb_agcount; 130 131 /* allocate the new per-ag structures */ 132 if (nagcount > oagcount) { 133 error = xfs_initialize_perag(mp, nagcount, &nagimax); 134 if (error) 135 return error; 136 } else if (nagcount < oagcount) { 137 /* TODO: shrinking the entire AGs hasn't yet completed */ 138 return -EINVAL; 139 } 140 141 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata, 142 (delta > 0 ? XFS_GROWFS_SPACE_RES(mp) : -delta), 0, 143 XFS_TRANS_RESERVE, &tp); 144 if (error) 145 return error; 146 147 if (delta > 0) { 148 error = xfs_resizefs_init_new_ags(tp, &id, oagcount, nagcount, 149 delta, &lastag_extended); 150 } else { 151 static struct ratelimit_state shrink_warning = \ 152 RATELIMIT_STATE_INIT("shrink_warning", 86400 * HZ, 1); 153 ratelimit_set_flags(&shrink_warning, RATELIMIT_MSG_ON_RELEASE); 154 155 if (__ratelimit(&shrink_warning)) 156 xfs_alert(mp, 157 "EXPERIMENTAL online shrink feature in use. Use at your own risk!"); 158 159 error = xfs_ag_shrink_space(mp, &tp, nagcount - 1, -delta); 160 } 161 if (error) 162 goto out_trans_cancel; 163 164 /* 165 * Update changed superblock fields transactionally. These are not 166 * seen by the rest of the world until the transaction commit applies 167 * them atomically to the superblock. 168 */ 169 if (nagcount > oagcount) 170 xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount); 171 if (delta) 172 xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS, delta); 173 if (id.nfree) 174 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, id.nfree); 175 176 /* 177 * Sync sb counters now to reflect the updated values. This is 178 * particularly important for shrink because the write verifier 179 * will fail if sb_fdblocks is ever larger than sb_dblocks. 180 */ 181 if (xfs_has_lazysbcount(mp)) 182 xfs_log_sb(tp); 183 184 xfs_trans_set_sync(tp); 185 error = xfs_trans_commit(tp); 186 if (error) 187 return error; 188 189 /* New allocation groups fully initialized, so update mount struct */ 190 if (nagimax) 191 mp->m_maxagi = nagimax; 192 xfs_set_low_space_thresholds(mp); 193 mp->m_alloc_set_aside = xfs_alloc_set_aside(mp); 194 195 if (delta > 0) { 196 /* 197 * If we expanded the last AG, free the per-AG reservation 198 * so we can reinitialize it with the new size. 199 */ 200 if (lastag_extended) { 201 struct xfs_perag *pag; 202 203 pag = xfs_perag_get(mp, id.agno); 204 error = xfs_ag_resv_free(pag); 205 xfs_perag_put(pag); 206 if (error) 207 return error; 208 } 209 /* 210 * Reserve AG metadata blocks. ENOSPC here does not mean there 211 * was a growfs failure, just that there still isn't space for 212 * new user data after the grow has been run. 213 */ 214 error = xfs_fs_reserve_ag_blocks(mp); 215 if (error == -ENOSPC) 216 error = 0; 217 } 218 return error; 219 220 out_trans_cancel: 221 xfs_trans_cancel(tp); 222 return error; 223 } 224 225 static int 226 xfs_growfs_log_private( 227 struct xfs_mount *mp, /* mount point for filesystem */ 228 struct xfs_growfs_log *in) /* growfs log input struct */ 229 { 230 xfs_extlen_t nb; 231 232 nb = in->newblocks; 233 if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES)) 234 return -EINVAL; 235 if (nb == mp->m_sb.sb_logblocks && 236 in->isint == (mp->m_sb.sb_logstart != 0)) 237 return -EINVAL; 238 /* 239 * Moving the log is hard, need new interfaces to sync 240 * the log first, hold off all activity while moving it. 241 * Can have shorter or longer log in the same space, 242 * or transform internal to external log or vice versa. 243 */ 244 return -ENOSYS; 245 } 246 247 static int 248 xfs_growfs_imaxpct( 249 struct xfs_mount *mp, 250 __u32 imaxpct) 251 { 252 struct xfs_trans *tp; 253 int dpct; 254 int error; 255 256 if (imaxpct > 100) 257 return -EINVAL; 258 259 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata, 260 XFS_GROWFS_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp); 261 if (error) 262 return error; 263 264 dpct = imaxpct - mp->m_sb.sb_imax_pct; 265 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct); 266 xfs_trans_set_sync(tp); 267 return xfs_trans_commit(tp); 268 } 269 270 /* 271 * protected versions of growfs function acquire and release locks on the mount 272 * point - exported through ioctls: XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG, 273 * XFS_IOC_FSGROWFSRT 274 */ 275 int 276 xfs_growfs_data( 277 struct xfs_mount *mp, 278 struct xfs_growfs_data *in) 279 { 280 int error = 0; 281 282 if (!capable(CAP_SYS_ADMIN)) 283 return -EPERM; 284 if (!mutex_trylock(&mp->m_growlock)) 285 return -EWOULDBLOCK; 286 287 /* update imaxpct separately to the physical grow of the filesystem */ 288 if (in->imaxpct != mp->m_sb.sb_imax_pct) { 289 error = xfs_growfs_imaxpct(mp, in->imaxpct); 290 if (error) 291 goto out_error; 292 } 293 294 if (in->newblocks != mp->m_sb.sb_dblocks) { 295 error = xfs_growfs_data_private(mp, in); 296 if (error) 297 goto out_error; 298 } 299 300 /* Post growfs calculations needed to reflect new state in operations */ 301 if (mp->m_sb.sb_imax_pct) { 302 uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct; 303 do_div(icount, 100); 304 M_IGEO(mp)->maxicount = XFS_FSB_TO_INO(mp, icount); 305 } else 306 M_IGEO(mp)->maxicount = 0; 307 308 /* Update secondary superblocks now the physical grow has completed */ 309 error = xfs_update_secondary_sbs(mp); 310 311 out_error: 312 /* 313 * Increment the generation unconditionally, the error could be from 314 * updating the secondary superblocks, in which case the new size 315 * is live already. 316 */ 317 mp->m_generation++; 318 mutex_unlock(&mp->m_growlock); 319 return error; 320 } 321 322 int 323 xfs_growfs_log( 324 xfs_mount_t *mp, 325 struct xfs_growfs_log *in) 326 { 327 int error; 328 329 if (!capable(CAP_SYS_ADMIN)) 330 return -EPERM; 331 if (!mutex_trylock(&mp->m_growlock)) 332 return -EWOULDBLOCK; 333 error = xfs_growfs_log_private(mp, in); 334 mutex_unlock(&mp->m_growlock); 335 return error; 336 } 337 338 /* 339 * exported through ioctl XFS_IOC_FSCOUNTS 340 */ 341 342 void 343 xfs_fs_counts( 344 xfs_mount_t *mp, 345 xfs_fsop_counts_t *cnt) 346 { 347 cnt->allocino = percpu_counter_read_positive(&mp->m_icount); 348 cnt->freeino = percpu_counter_read_positive(&mp->m_ifree); 349 cnt->freedata = percpu_counter_read_positive(&mp->m_fdblocks) - 350 mp->m_alloc_set_aside; 351 352 spin_lock(&mp->m_sb_lock); 353 cnt->freertx = mp->m_sb.sb_frextents; 354 spin_unlock(&mp->m_sb_lock); 355 } 356 357 /* 358 * exported through ioctl XFS_IOC_SET_RESBLKS & XFS_IOC_GET_RESBLKS 359 * 360 * xfs_reserve_blocks is called to set m_resblks 361 * in the in-core mount table. The number of unused reserved blocks 362 * is kept in m_resblks_avail. 363 * 364 * Reserve the requested number of blocks if available. Otherwise return 365 * as many as possible to satisfy the request. The actual number 366 * reserved are returned in outval 367 * 368 * A null inval pointer indicates that only the current reserved blocks 369 * available should be returned no settings are changed. 370 */ 371 372 int 373 xfs_reserve_blocks( 374 xfs_mount_t *mp, 375 uint64_t *inval, 376 xfs_fsop_resblks_t *outval) 377 { 378 int64_t lcounter, delta; 379 int64_t fdblks_delta = 0; 380 uint64_t request; 381 int64_t free; 382 int error = 0; 383 384 /* If inval is null, report current values and return */ 385 if (inval == (uint64_t *)NULL) { 386 if (!outval) 387 return -EINVAL; 388 outval->resblks = mp->m_resblks; 389 outval->resblks_avail = mp->m_resblks_avail; 390 return 0; 391 } 392 393 request = *inval; 394 395 /* 396 * With per-cpu counters, this becomes an interesting problem. we need 397 * to work out if we are freeing or allocation blocks first, then we can 398 * do the modification as necessary. 399 * 400 * We do this under the m_sb_lock so that if we are near ENOSPC, we will 401 * hold out any changes while we work out what to do. This means that 402 * the amount of free space can change while we do this, so we need to 403 * retry if we end up trying to reserve more space than is available. 404 */ 405 spin_lock(&mp->m_sb_lock); 406 407 /* 408 * If our previous reservation was larger than the current value, 409 * then move any unused blocks back to the free pool. Modify the resblks 410 * counters directly since we shouldn't have any problems unreserving 411 * space. 412 */ 413 if (mp->m_resblks > request) { 414 lcounter = mp->m_resblks_avail - request; 415 if (lcounter > 0) { /* release unused blocks */ 416 fdblks_delta = lcounter; 417 mp->m_resblks_avail -= lcounter; 418 } 419 mp->m_resblks = request; 420 if (fdblks_delta) { 421 spin_unlock(&mp->m_sb_lock); 422 error = xfs_mod_fdblocks(mp, fdblks_delta, 0); 423 spin_lock(&mp->m_sb_lock); 424 } 425 426 goto out; 427 } 428 429 /* 430 * If the request is larger than the current reservation, reserve the 431 * blocks before we update the reserve counters. Sample m_fdblocks and 432 * perform a partial reservation if the request exceeds free space. 433 */ 434 error = -ENOSPC; 435 do { 436 free = percpu_counter_sum(&mp->m_fdblocks) - 437 mp->m_alloc_set_aside; 438 if (free <= 0) 439 break; 440 441 delta = request - mp->m_resblks; 442 lcounter = free - delta; 443 if (lcounter < 0) 444 /* We can't satisfy the request, just get what we can */ 445 fdblks_delta = free; 446 else 447 fdblks_delta = delta; 448 449 /* 450 * We'll either succeed in getting space from the free block 451 * count or we'll get an ENOSPC. If we get a ENOSPC, it means 452 * things changed while we were calculating fdblks_delta and so 453 * we should try again to see if there is anything left to 454 * reserve. 455 * 456 * Don't set the reserved flag here - we don't want to reserve 457 * the extra reserve blocks from the reserve..... 458 */ 459 spin_unlock(&mp->m_sb_lock); 460 error = xfs_mod_fdblocks(mp, -fdblks_delta, 0); 461 spin_lock(&mp->m_sb_lock); 462 } while (error == -ENOSPC); 463 464 /* 465 * Update the reserve counters if blocks have been successfully 466 * allocated. 467 */ 468 if (!error && fdblks_delta) { 469 mp->m_resblks += fdblks_delta; 470 mp->m_resblks_avail += fdblks_delta; 471 } 472 473 out: 474 if (outval) { 475 outval->resblks = mp->m_resblks; 476 outval->resblks_avail = mp->m_resblks_avail; 477 } 478 479 spin_unlock(&mp->m_sb_lock); 480 return error; 481 } 482 483 int 484 xfs_fs_goingdown( 485 xfs_mount_t *mp, 486 uint32_t inflags) 487 { 488 switch (inflags) { 489 case XFS_FSOP_GOING_FLAGS_DEFAULT: { 490 if (!freeze_bdev(mp->m_super->s_bdev)) { 491 xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT); 492 thaw_bdev(mp->m_super->s_bdev); 493 } 494 break; 495 } 496 case XFS_FSOP_GOING_FLAGS_LOGFLUSH: 497 xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT); 498 break; 499 case XFS_FSOP_GOING_FLAGS_NOLOGFLUSH: 500 xfs_force_shutdown(mp, 501 SHUTDOWN_FORCE_UMOUNT | SHUTDOWN_LOG_IO_ERROR); 502 break; 503 default: 504 return -EINVAL; 505 } 506 507 return 0; 508 } 509 510 /* 511 * Force a shutdown of the filesystem instantly while keeping the filesystem 512 * consistent. We don't do an unmount here; just shutdown the shop, make sure 513 * that absolutely nothing persistent happens to this filesystem after this 514 * point. 515 * 516 * The shutdown state change is atomic, resulting in the first and only the 517 * first shutdown call processing the shutdown. This means we only shutdown the 518 * log once as it requires, and we don't spam the logs when multiple concurrent 519 * shutdowns race to set the shutdown flags. 520 */ 521 void 522 xfs_do_force_shutdown( 523 struct xfs_mount *mp, 524 int flags, 525 char *fname, 526 int lnnum) 527 { 528 int tag; 529 const char *why; 530 531 if (test_and_set_bit(XFS_OPSTATE_SHUTDOWN, &mp->m_opstate)) 532 return; 533 if (mp->m_sb_bp) 534 mp->m_sb_bp->b_flags |= XBF_DONE; 535 536 if (flags & SHUTDOWN_FORCE_UMOUNT) 537 xfs_alert(mp, "User initiated shutdown received."); 538 539 if (xlog_force_shutdown(mp->m_log, flags)) { 540 tag = XFS_PTAG_SHUTDOWN_LOGERROR; 541 why = "Log I/O Error"; 542 } else if (flags & SHUTDOWN_CORRUPT_INCORE) { 543 tag = XFS_PTAG_SHUTDOWN_CORRUPT; 544 why = "Corruption of in-memory data"; 545 } else { 546 tag = XFS_PTAG_SHUTDOWN_IOERROR; 547 why = "Metadata I/O Error"; 548 } 549 550 trace_xfs_force_shutdown(mp, tag, flags, fname, lnnum); 551 552 xfs_alert_tag(mp, tag, 553 "%s (0x%x) detected at %pS (%s:%d). Shutting down filesystem.", 554 why, flags, __return_address, fname, lnnum); 555 xfs_alert(mp, 556 "Please unmount the filesystem and rectify the problem(s)"); 557 if (xfs_error_level >= XFS_ERRLEVEL_HIGH) 558 xfs_stack_trace(); 559 } 560 561 /* 562 * Reserve free space for per-AG metadata. 563 */ 564 int 565 xfs_fs_reserve_ag_blocks( 566 struct xfs_mount *mp) 567 { 568 xfs_agnumber_t agno; 569 struct xfs_perag *pag; 570 int error = 0; 571 int err2; 572 573 mp->m_finobt_nores = false; 574 for_each_perag(mp, agno, pag) { 575 err2 = xfs_ag_resv_init(pag, NULL); 576 if (err2 && !error) 577 error = err2; 578 } 579 580 if (error && error != -ENOSPC) { 581 xfs_warn(mp, 582 "Error %d reserving per-AG metadata reserve pool.", error); 583 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 584 } 585 586 return error; 587 } 588 589 /* 590 * Free space reserved for per-AG metadata. 591 */ 592 int 593 xfs_fs_unreserve_ag_blocks( 594 struct xfs_mount *mp) 595 { 596 xfs_agnumber_t agno; 597 struct xfs_perag *pag; 598 int error = 0; 599 int err2; 600 601 for_each_perag(mp, agno, pag) { 602 err2 = xfs_ag_resv_free(pag); 603 if (err2 && !error) 604 error = err2; 605 } 606 607 if (error) 608 xfs_warn(mp, 609 "Error %d freeing per-AG metadata reserve pool.", error); 610 611 return error; 612 } 613