1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_sb.h" 14 #include "xfs_mount.h" 15 #include "xfs_inode.h" 16 #include "xfs_iwalk.h" 17 #include "xfs_quota.h" 18 #include "xfs_bmap.h" 19 #include "xfs_bmap_util.h" 20 #include "xfs_trans.h" 21 #include "xfs_trans_space.h" 22 #include "xfs_qm.h" 23 #include "xfs_trace.h" 24 #include "xfs_icache.h" 25 #include "xfs_error.h" 26 #include "xfs_ag.h" 27 #include "xfs_ialloc.h" 28 #include "xfs_log_priv.h" 29 #include "xfs_health.h" 30 31 /* 32 * The global quota manager. There is only one of these for the entire 33 * system, _not_ one per file system. XQM keeps track of the overall 34 * quota functionality, including maintaining the freelist and hash 35 * tables of dquots. 36 */ 37 STATIC int xfs_qm_init_quotainos(struct xfs_mount *mp); 38 STATIC int xfs_qm_init_quotainfo(struct xfs_mount *mp); 39 40 STATIC void xfs_qm_destroy_quotainos(struct xfs_quotainfo *qi); 41 STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp); 42 /* 43 * We use the batch lookup interface to iterate over the dquots as it 44 * currently is the only interface into the radix tree code that allows 45 * fuzzy lookups instead of exact matches. Holding the lock over multiple 46 * operations is fine as all callers are used either during mount/umount 47 * or quotaoff. 48 */ 49 #define XFS_DQ_LOOKUP_BATCH 32 50 51 STATIC int 52 xfs_qm_dquot_walk( 53 struct xfs_mount *mp, 54 xfs_dqtype_t type, 55 int (*execute)(struct xfs_dquot *dqp, void *data), 56 void *data) 57 { 58 struct xfs_quotainfo *qi = mp->m_quotainfo; 59 struct radix_tree_root *tree = xfs_dquot_tree(qi, type); 60 uint32_t next_index; 61 int last_error = 0; 62 int skipped; 63 int nr_found; 64 65 restart: 66 skipped = 0; 67 next_index = 0; 68 nr_found = 0; 69 70 while (1) { 71 struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH]; 72 int error; 73 int i; 74 75 mutex_lock(&qi->qi_tree_lock); 76 nr_found = radix_tree_gang_lookup(tree, (void **)batch, 77 next_index, XFS_DQ_LOOKUP_BATCH); 78 if (!nr_found) { 79 mutex_unlock(&qi->qi_tree_lock); 80 break; 81 } 82 83 for (i = 0; i < nr_found; i++) { 84 struct xfs_dquot *dqp = batch[i]; 85 86 next_index = dqp->q_id + 1; 87 88 error = execute(batch[i], data); 89 if (error == -EAGAIN) { 90 skipped++; 91 continue; 92 } 93 if (error && last_error != -EFSCORRUPTED) 94 last_error = error; 95 } 96 97 mutex_unlock(&qi->qi_tree_lock); 98 99 /* bail out if the filesystem is corrupted. */ 100 if (last_error == -EFSCORRUPTED) { 101 skipped = 0; 102 break; 103 } 104 /* we're done if id overflows back to zero */ 105 if (!next_index) 106 break; 107 } 108 109 if (skipped) { 110 delay(1); 111 goto restart; 112 } 113 114 return last_error; 115 } 116 117 118 /* 119 * Purge a dquot from all tracking data structures and free it. 120 */ 121 STATIC int 122 xfs_qm_dqpurge( 123 struct xfs_dquot *dqp, 124 void *data) 125 { 126 struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo; 127 int error = -EAGAIN; 128 129 xfs_dqlock(dqp); 130 if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0) 131 goto out_unlock; 132 133 dqp->q_flags |= XFS_DQFLAG_FREEING; 134 135 xfs_dqflock(dqp); 136 137 /* 138 * If we are turning this type of quotas off, we don't care 139 * about the dirty metadata sitting in this dquot. OTOH, if 140 * we're unmounting, we do care, so we flush it and wait. 141 */ 142 if (XFS_DQ_IS_DIRTY(dqp)) { 143 struct xfs_buf *bp = NULL; 144 145 /* 146 * We don't care about getting disk errors here. We need 147 * to purge this dquot anyway, so we go ahead regardless. 148 */ 149 error = xfs_qm_dqflush(dqp, &bp); 150 if (!error) { 151 error = xfs_bwrite(bp); 152 xfs_buf_relse(bp); 153 } else if (error == -EAGAIN) { 154 dqp->q_flags &= ~XFS_DQFLAG_FREEING; 155 goto out_unlock; 156 } 157 xfs_dqflock(dqp); 158 } 159 160 ASSERT(atomic_read(&dqp->q_pincount) == 0); 161 ASSERT(xlog_is_shutdown(dqp->q_logitem.qli_item.li_log) || 162 !test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags)); 163 164 xfs_dqfunlock(dqp); 165 xfs_dqunlock(dqp); 166 167 radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id); 168 qi->qi_dquots--; 169 170 /* 171 * We move dquots to the freelist as soon as their reference count 172 * hits zero, so it really should be on the freelist here. 173 */ 174 ASSERT(!list_empty(&dqp->q_lru)); 175 list_lru_del_obj(&qi->qi_lru, &dqp->q_lru); 176 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused); 177 178 xfs_qm_dqdestroy(dqp); 179 return 0; 180 181 out_unlock: 182 xfs_dqunlock(dqp); 183 return error; 184 } 185 186 /* 187 * Purge the dquot cache. 188 */ 189 static void 190 xfs_qm_dqpurge_all( 191 struct xfs_mount *mp) 192 { 193 xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL); 194 xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL); 195 xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL); 196 } 197 198 /* 199 * Just destroy the quotainfo structure. 200 */ 201 void 202 xfs_qm_unmount( 203 struct xfs_mount *mp) 204 { 205 if (mp->m_quotainfo) { 206 xfs_qm_dqpurge_all(mp); 207 xfs_qm_destroy_quotainfo(mp); 208 } 209 } 210 211 /* 212 * Called from the vfsops layer. 213 */ 214 void 215 xfs_qm_unmount_quotas( 216 xfs_mount_t *mp) 217 { 218 /* 219 * Release the dquots that root inode, et al might be holding, 220 * before we flush quotas and blow away the quotainfo structure. 221 */ 222 ASSERT(mp->m_rootip); 223 xfs_qm_dqdetach(mp->m_rootip); 224 if (mp->m_rbmip) 225 xfs_qm_dqdetach(mp->m_rbmip); 226 if (mp->m_rsumip) 227 xfs_qm_dqdetach(mp->m_rsumip); 228 229 /* 230 * Release the quota inodes. 231 */ 232 if (mp->m_quotainfo) { 233 if (mp->m_quotainfo->qi_uquotaip) { 234 xfs_irele(mp->m_quotainfo->qi_uquotaip); 235 mp->m_quotainfo->qi_uquotaip = NULL; 236 } 237 if (mp->m_quotainfo->qi_gquotaip) { 238 xfs_irele(mp->m_quotainfo->qi_gquotaip); 239 mp->m_quotainfo->qi_gquotaip = NULL; 240 } 241 if (mp->m_quotainfo->qi_pquotaip) { 242 xfs_irele(mp->m_quotainfo->qi_pquotaip); 243 mp->m_quotainfo->qi_pquotaip = NULL; 244 } 245 } 246 } 247 248 STATIC int 249 xfs_qm_dqattach_one( 250 struct xfs_inode *ip, 251 xfs_dqtype_t type, 252 bool doalloc, 253 struct xfs_dquot **IO_idqpp) 254 { 255 struct xfs_dquot *dqp; 256 int error; 257 258 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); 259 error = 0; 260 261 /* 262 * See if we already have it in the inode itself. IO_idqpp is &i_udquot 263 * or &i_gdquot. This made the code look weird, but made the logic a lot 264 * simpler. 265 */ 266 dqp = *IO_idqpp; 267 if (dqp) { 268 trace_xfs_dqattach_found(dqp); 269 return 0; 270 } 271 272 /* 273 * Find the dquot from somewhere. This bumps the reference count of 274 * dquot and returns it locked. This can return ENOENT if dquot didn't 275 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got 276 * turned off suddenly. 277 */ 278 error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp); 279 if (error) 280 return error; 281 282 trace_xfs_dqattach_get(dqp); 283 284 /* 285 * dqget may have dropped and re-acquired the ilock, but it guarantees 286 * that the dquot returned is the one that should go in the inode. 287 */ 288 *IO_idqpp = dqp; 289 xfs_dqunlock(dqp); 290 return 0; 291 } 292 293 static bool 294 xfs_qm_need_dqattach( 295 struct xfs_inode *ip) 296 { 297 struct xfs_mount *mp = ip->i_mount; 298 299 if (!XFS_IS_QUOTA_ON(mp)) 300 return false; 301 if (!XFS_NOT_DQATTACHED(mp, ip)) 302 return false; 303 if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino)) 304 return false; 305 return true; 306 } 307 308 /* 309 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON 310 * into account. 311 * If @doalloc is true, the dquot(s) will be allocated if needed. 312 * Inode may get unlocked and relocked in here, and the caller must deal with 313 * the consequences. 314 */ 315 int 316 xfs_qm_dqattach_locked( 317 xfs_inode_t *ip, 318 bool doalloc) 319 { 320 xfs_mount_t *mp = ip->i_mount; 321 int error = 0; 322 323 if (!xfs_qm_need_dqattach(ip)) 324 return 0; 325 326 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); 327 328 if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) { 329 error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_USER, 330 doalloc, &ip->i_udquot); 331 if (error) 332 goto done; 333 ASSERT(ip->i_udquot); 334 } 335 336 if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) { 337 error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_GROUP, 338 doalloc, &ip->i_gdquot); 339 if (error) 340 goto done; 341 ASSERT(ip->i_gdquot); 342 } 343 344 if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) { 345 error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_PROJ, 346 doalloc, &ip->i_pdquot); 347 if (error) 348 goto done; 349 ASSERT(ip->i_pdquot); 350 } 351 352 done: 353 /* 354 * Don't worry about the dquots that we may have attached before any 355 * error - they'll get detached later if it has not already been done. 356 */ 357 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); 358 return error; 359 } 360 361 int 362 xfs_qm_dqattach( 363 struct xfs_inode *ip) 364 { 365 int error; 366 367 if (!xfs_qm_need_dqattach(ip)) 368 return 0; 369 370 xfs_ilock(ip, XFS_ILOCK_EXCL); 371 error = xfs_qm_dqattach_locked(ip, false); 372 xfs_iunlock(ip, XFS_ILOCK_EXCL); 373 374 return error; 375 } 376 377 /* 378 * Release dquots (and their references) if any. 379 * The inode should be locked EXCL except when this's called by 380 * xfs_ireclaim. 381 */ 382 void 383 xfs_qm_dqdetach( 384 xfs_inode_t *ip) 385 { 386 if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot)) 387 return; 388 389 trace_xfs_dquot_dqdetach(ip); 390 391 ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino)); 392 if (ip->i_udquot) { 393 xfs_qm_dqrele(ip->i_udquot); 394 ip->i_udquot = NULL; 395 } 396 if (ip->i_gdquot) { 397 xfs_qm_dqrele(ip->i_gdquot); 398 ip->i_gdquot = NULL; 399 } 400 if (ip->i_pdquot) { 401 xfs_qm_dqrele(ip->i_pdquot); 402 ip->i_pdquot = NULL; 403 } 404 } 405 406 struct xfs_qm_isolate { 407 struct list_head buffers; 408 struct list_head dispose; 409 }; 410 411 static enum lru_status 412 xfs_qm_dquot_isolate( 413 struct list_head *item, 414 struct list_lru_one *lru, 415 spinlock_t *lru_lock, 416 void *arg) 417 __releases(lru_lock) __acquires(lru_lock) 418 { 419 struct xfs_dquot *dqp = container_of(item, 420 struct xfs_dquot, q_lru); 421 struct xfs_qm_isolate *isol = arg; 422 423 if (!xfs_dqlock_nowait(dqp)) 424 goto out_miss_busy; 425 426 /* 427 * If something else is freeing this dquot and hasn't yet removed it 428 * from the LRU, leave it for the freeing task to complete the freeing 429 * process rather than risk it being free from under us here. 430 */ 431 if (dqp->q_flags & XFS_DQFLAG_FREEING) 432 goto out_miss_unlock; 433 434 /* 435 * This dquot has acquired a reference in the meantime remove it from 436 * the freelist and try again. 437 */ 438 if (dqp->q_nrefs) { 439 xfs_dqunlock(dqp); 440 XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants); 441 442 trace_xfs_dqreclaim_want(dqp); 443 list_lru_isolate(lru, &dqp->q_lru); 444 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused); 445 return LRU_REMOVED; 446 } 447 448 /* 449 * If the dquot is dirty, flush it. If it's already being flushed, just 450 * skip it so there is time for the IO to complete before we try to 451 * reclaim it again on the next LRU pass. 452 */ 453 if (!xfs_dqflock_nowait(dqp)) 454 goto out_miss_unlock; 455 456 if (XFS_DQ_IS_DIRTY(dqp)) { 457 struct xfs_buf *bp = NULL; 458 int error; 459 460 trace_xfs_dqreclaim_dirty(dqp); 461 462 /* we have to drop the LRU lock to flush the dquot */ 463 spin_unlock(lru_lock); 464 465 error = xfs_qm_dqflush(dqp, &bp); 466 if (error) 467 goto out_unlock_dirty; 468 469 xfs_buf_delwri_queue(bp, &isol->buffers); 470 xfs_buf_relse(bp); 471 goto out_unlock_dirty; 472 } 473 xfs_dqfunlock(dqp); 474 475 /* 476 * Prevent lookups now that we are past the point of no return. 477 */ 478 dqp->q_flags |= XFS_DQFLAG_FREEING; 479 xfs_dqunlock(dqp); 480 481 ASSERT(dqp->q_nrefs == 0); 482 list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose); 483 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused); 484 trace_xfs_dqreclaim_done(dqp); 485 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims); 486 return LRU_REMOVED; 487 488 out_miss_unlock: 489 xfs_dqunlock(dqp); 490 out_miss_busy: 491 trace_xfs_dqreclaim_busy(dqp); 492 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses); 493 return LRU_SKIP; 494 495 out_unlock_dirty: 496 trace_xfs_dqreclaim_busy(dqp); 497 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses); 498 xfs_dqunlock(dqp); 499 spin_lock(lru_lock); 500 return LRU_RETRY; 501 } 502 503 static unsigned long 504 xfs_qm_shrink_scan( 505 struct shrinker *shrink, 506 struct shrink_control *sc) 507 { 508 struct xfs_quotainfo *qi = shrink->private_data; 509 struct xfs_qm_isolate isol; 510 unsigned long freed; 511 int error; 512 513 if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM)) 514 return 0; 515 516 INIT_LIST_HEAD(&isol.buffers); 517 INIT_LIST_HEAD(&isol.dispose); 518 519 freed = list_lru_shrink_walk(&qi->qi_lru, sc, 520 xfs_qm_dquot_isolate, &isol); 521 522 error = xfs_buf_delwri_submit(&isol.buffers); 523 if (error) 524 xfs_warn(NULL, "%s: dquot reclaim failed", __func__); 525 526 while (!list_empty(&isol.dispose)) { 527 struct xfs_dquot *dqp; 528 529 dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru); 530 list_del_init(&dqp->q_lru); 531 xfs_qm_dqfree_one(dqp); 532 } 533 534 return freed; 535 } 536 537 static unsigned long 538 xfs_qm_shrink_count( 539 struct shrinker *shrink, 540 struct shrink_control *sc) 541 { 542 struct xfs_quotainfo *qi = shrink->private_data; 543 544 return list_lru_shrink_count(&qi->qi_lru, sc); 545 } 546 547 STATIC void 548 xfs_qm_set_defquota( 549 struct xfs_mount *mp, 550 xfs_dqtype_t type, 551 struct xfs_quotainfo *qinf) 552 { 553 struct xfs_dquot *dqp; 554 struct xfs_def_quota *defq; 555 int error; 556 557 error = xfs_qm_dqget_uncached(mp, 0, type, &dqp); 558 if (error) 559 return; 560 561 defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp)); 562 563 /* 564 * Timers and warnings have been already set, let's just set the 565 * default limits for this quota type 566 */ 567 defq->blk.hard = dqp->q_blk.hardlimit; 568 defq->blk.soft = dqp->q_blk.softlimit; 569 defq->ino.hard = dqp->q_ino.hardlimit; 570 defq->ino.soft = dqp->q_ino.softlimit; 571 defq->rtb.hard = dqp->q_rtb.hardlimit; 572 defq->rtb.soft = dqp->q_rtb.softlimit; 573 xfs_qm_dqdestroy(dqp); 574 } 575 576 /* Initialize quota time limits from the root dquot. */ 577 static void 578 xfs_qm_init_timelimits( 579 struct xfs_mount *mp, 580 xfs_dqtype_t type) 581 { 582 struct xfs_quotainfo *qinf = mp->m_quotainfo; 583 struct xfs_def_quota *defq; 584 struct xfs_dquot *dqp; 585 int error; 586 587 defq = xfs_get_defquota(qinf, type); 588 589 defq->blk.time = XFS_QM_BTIMELIMIT; 590 defq->ino.time = XFS_QM_ITIMELIMIT; 591 defq->rtb.time = XFS_QM_RTBTIMELIMIT; 592 593 /* 594 * We try to get the limits from the superuser's limits fields. 595 * This is quite hacky, but it is standard quota practice. 596 * 597 * Since we may not have done a quotacheck by this point, just read 598 * the dquot without attaching it to any hashtables or lists. 599 */ 600 error = xfs_qm_dqget_uncached(mp, 0, type, &dqp); 601 if (error) 602 return; 603 604 /* 605 * The warnings and timers set the grace period given to 606 * a user or group before he or she can not perform any 607 * more writing. If it is zero, a default is used. 608 */ 609 if (dqp->q_blk.timer) 610 defq->blk.time = dqp->q_blk.timer; 611 if (dqp->q_ino.timer) 612 defq->ino.time = dqp->q_ino.timer; 613 if (dqp->q_rtb.timer) 614 defq->rtb.time = dqp->q_rtb.timer; 615 616 xfs_qm_dqdestroy(dqp); 617 } 618 619 /* 620 * This initializes all the quota information that's kept in the 621 * mount structure 622 */ 623 STATIC int 624 xfs_qm_init_quotainfo( 625 struct xfs_mount *mp) 626 { 627 struct xfs_quotainfo *qinf; 628 int error; 629 630 ASSERT(XFS_IS_QUOTA_ON(mp)); 631 632 qinf = mp->m_quotainfo = kzalloc(sizeof(struct xfs_quotainfo), 633 GFP_KERNEL | __GFP_NOFAIL); 634 635 error = list_lru_init(&qinf->qi_lru); 636 if (error) 637 goto out_free_qinf; 638 639 /* 640 * See if quotainodes are setup, and if not, allocate them, 641 * and change the superblock accordingly. 642 */ 643 error = xfs_qm_init_quotainos(mp); 644 if (error) 645 goto out_free_lru; 646 647 INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_KERNEL); 648 INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_KERNEL); 649 INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_KERNEL); 650 mutex_init(&qinf->qi_tree_lock); 651 652 /* mutex used to serialize quotaoffs */ 653 mutex_init(&qinf->qi_quotaofflock); 654 655 /* Precalc some constants */ 656 qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB); 657 qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen); 658 if (xfs_has_bigtime(mp)) { 659 qinf->qi_expiry_min = 660 xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MIN); 661 qinf->qi_expiry_max = 662 xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MAX); 663 } else { 664 qinf->qi_expiry_min = XFS_DQ_LEGACY_EXPIRY_MIN; 665 qinf->qi_expiry_max = XFS_DQ_LEGACY_EXPIRY_MAX; 666 } 667 trace_xfs_quota_expiry_range(mp, qinf->qi_expiry_min, 668 qinf->qi_expiry_max); 669 670 mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD); 671 672 xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER); 673 xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP); 674 xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ); 675 676 if (XFS_IS_UQUOTA_ON(mp)) 677 xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf); 678 if (XFS_IS_GQUOTA_ON(mp)) 679 xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf); 680 if (XFS_IS_PQUOTA_ON(mp)) 681 xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf); 682 683 qinf->qi_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, "xfs-qm:%s", 684 mp->m_super->s_id); 685 if (!qinf->qi_shrinker) { 686 error = -ENOMEM; 687 goto out_free_inos; 688 } 689 690 qinf->qi_shrinker->count_objects = xfs_qm_shrink_count; 691 qinf->qi_shrinker->scan_objects = xfs_qm_shrink_scan; 692 qinf->qi_shrinker->private_data = qinf; 693 694 shrinker_register(qinf->qi_shrinker); 695 696 xfs_hooks_init(&qinf->qi_mod_ino_dqtrx_hooks); 697 xfs_hooks_init(&qinf->qi_apply_dqtrx_hooks); 698 699 return 0; 700 701 out_free_inos: 702 mutex_destroy(&qinf->qi_quotaofflock); 703 mutex_destroy(&qinf->qi_tree_lock); 704 xfs_qm_destroy_quotainos(qinf); 705 out_free_lru: 706 list_lru_destroy(&qinf->qi_lru); 707 out_free_qinf: 708 kfree(qinf); 709 mp->m_quotainfo = NULL; 710 return error; 711 } 712 713 /* 714 * Gets called when unmounting a filesystem or when all quotas get 715 * turned off. 716 * This purges the quota inodes, destroys locks and frees itself. 717 */ 718 void 719 xfs_qm_destroy_quotainfo( 720 struct xfs_mount *mp) 721 { 722 struct xfs_quotainfo *qi; 723 724 qi = mp->m_quotainfo; 725 ASSERT(qi != NULL); 726 727 shrinker_free(qi->qi_shrinker); 728 list_lru_destroy(&qi->qi_lru); 729 xfs_qm_destroy_quotainos(qi); 730 mutex_destroy(&qi->qi_tree_lock); 731 mutex_destroy(&qi->qi_quotaofflock); 732 kfree(qi); 733 mp->m_quotainfo = NULL; 734 } 735 736 /* 737 * Create an inode and return with a reference already taken, but unlocked 738 * This is how we create quota inodes 739 */ 740 STATIC int 741 xfs_qm_qino_alloc( 742 struct xfs_mount *mp, 743 struct xfs_inode **ipp, 744 unsigned int flags) 745 { 746 struct xfs_trans *tp; 747 int error; 748 bool need_alloc = true; 749 750 *ipp = NULL; 751 /* 752 * With superblock that doesn't have separate pquotino, we 753 * share an inode between gquota and pquota. If the on-disk 754 * superblock has GQUOTA and the filesystem is now mounted 755 * with PQUOTA, just use sb_gquotino for sb_pquotino and 756 * vice-versa. 757 */ 758 if (!xfs_has_pquotino(mp) && 759 (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) { 760 xfs_ino_t ino = NULLFSINO; 761 762 if ((flags & XFS_QMOPT_PQUOTA) && 763 (mp->m_sb.sb_gquotino != NULLFSINO)) { 764 ino = mp->m_sb.sb_gquotino; 765 if (XFS_IS_CORRUPT(mp, 766 mp->m_sb.sb_pquotino != NULLFSINO)) { 767 xfs_fs_mark_sick(mp, XFS_SICK_FS_PQUOTA); 768 return -EFSCORRUPTED; 769 } 770 } else if ((flags & XFS_QMOPT_GQUOTA) && 771 (mp->m_sb.sb_pquotino != NULLFSINO)) { 772 ino = mp->m_sb.sb_pquotino; 773 if (XFS_IS_CORRUPT(mp, 774 mp->m_sb.sb_gquotino != NULLFSINO)) { 775 xfs_fs_mark_sick(mp, XFS_SICK_FS_GQUOTA); 776 return -EFSCORRUPTED; 777 } 778 } 779 if (ino != NULLFSINO) { 780 error = xfs_iget(mp, NULL, ino, 0, 0, ipp); 781 if (error) 782 return error; 783 mp->m_sb.sb_gquotino = NULLFSINO; 784 mp->m_sb.sb_pquotino = NULLFSINO; 785 need_alloc = false; 786 } 787 } 788 789 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create, 790 need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0, 791 0, 0, &tp); 792 if (error) 793 return error; 794 795 if (need_alloc) { 796 xfs_ino_t ino; 797 798 error = xfs_dialloc(&tp, 0, S_IFREG, &ino); 799 if (!error) 800 error = xfs_init_new_inode(&nop_mnt_idmap, tp, NULL, ino, 801 S_IFREG, 1, 0, 0, false, ipp); 802 if (error) { 803 xfs_trans_cancel(tp); 804 return error; 805 } 806 } 807 808 /* 809 * Make the changes in the superblock, and log those too. 810 * sbfields arg may contain fields other than *QUOTINO; 811 * VERSIONNUM for example. 812 */ 813 spin_lock(&mp->m_sb_lock); 814 if (flags & XFS_QMOPT_SBVERSION) { 815 ASSERT(!xfs_has_quota(mp)); 816 817 xfs_add_quota(mp); 818 mp->m_sb.sb_uquotino = NULLFSINO; 819 mp->m_sb.sb_gquotino = NULLFSINO; 820 mp->m_sb.sb_pquotino = NULLFSINO; 821 822 /* qflags will get updated fully _after_ quotacheck */ 823 mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT; 824 } 825 if (flags & XFS_QMOPT_UQUOTA) 826 mp->m_sb.sb_uquotino = (*ipp)->i_ino; 827 else if (flags & XFS_QMOPT_GQUOTA) 828 mp->m_sb.sb_gquotino = (*ipp)->i_ino; 829 else 830 mp->m_sb.sb_pquotino = (*ipp)->i_ino; 831 spin_unlock(&mp->m_sb_lock); 832 xfs_log_sb(tp); 833 834 error = xfs_trans_commit(tp); 835 if (error) { 836 ASSERT(xfs_is_shutdown(mp)); 837 xfs_alert(mp, "%s failed (error %d)!", __func__, error); 838 } 839 if (need_alloc) { 840 xfs_iunlock(*ipp, XFS_ILOCK_EXCL); 841 xfs_finish_inode_setup(*ipp); 842 } 843 return error; 844 } 845 846 847 STATIC void 848 xfs_qm_reset_dqcounts( 849 struct xfs_mount *mp, 850 struct xfs_buf *bp, 851 xfs_dqid_t id, 852 xfs_dqtype_t type) 853 { 854 struct xfs_dqblk *dqb; 855 int j; 856 857 trace_xfs_reset_dqcounts(bp, _RET_IP_); 858 859 /* 860 * Reset all counters and timers. They'll be 861 * started afresh by xfs_qm_quotacheck. 862 */ 863 #ifdef DEBUG 864 j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) / 865 sizeof(struct xfs_dqblk); 866 ASSERT(mp->m_quotainfo->qi_dqperchunk == j); 867 #endif 868 dqb = bp->b_addr; 869 for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) { 870 struct xfs_disk_dquot *ddq; 871 872 ddq = (struct xfs_disk_dquot *)&dqb[j]; 873 874 /* 875 * Do a sanity check, and if needed, repair the dqblk. Don't 876 * output any warnings because it's perfectly possible to 877 * find uninitialised dquot blks. See comment in 878 * xfs_dquot_verify. 879 */ 880 if (xfs_dqblk_verify(mp, &dqb[j], id + j) || 881 (dqb[j].dd_diskdq.d_type & XFS_DQTYPE_REC_MASK) != type) 882 xfs_dqblk_repair(mp, &dqb[j], id + j, type); 883 884 /* 885 * Reset type in case we are reusing group quota file for 886 * project quotas or vice versa 887 */ 888 ddq->d_type = type; 889 ddq->d_bcount = 0; 890 ddq->d_icount = 0; 891 ddq->d_rtbcount = 0; 892 893 /* 894 * dquot id 0 stores the default grace period and the maximum 895 * warning limit that were set by the administrator, so we 896 * should not reset them. 897 */ 898 if (ddq->d_id != 0) { 899 ddq->d_btimer = 0; 900 ddq->d_itimer = 0; 901 ddq->d_rtbtimer = 0; 902 ddq->d_bwarns = 0; 903 ddq->d_iwarns = 0; 904 ddq->d_rtbwarns = 0; 905 if (xfs_has_bigtime(mp)) 906 ddq->d_type |= XFS_DQTYPE_BIGTIME; 907 } 908 909 if (xfs_has_crc(mp)) { 910 xfs_update_cksum((char *)&dqb[j], 911 sizeof(struct xfs_dqblk), 912 XFS_DQUOT_CRC_OFF); 913 } 914 } 915 } 916 917 STATIC int 918 xfs_qm_reset_dqcounts_all( 919 struct xfs_mount *mp, 920 xfs_dqid_t firstid, 921 xfs_fsblock_t bno, 922 xfs_filblks_t blkcnt, 923 xfs_dqtype_t type, 924 struct list_head *buffer_list) 925 { 926 struct xfs_buf *bp; 927 int error = 0; 928 929 ASSERT(blkcnt > 0); 930 931 /* 932 * Blkcnt arg can be a very big number, and might even be 933 * larger than the log itself. So, we have to break it up into 934 * manageable-sized transactions. 935 * Note that we don't start a permanent transaction here; we might 936 * not be able to get a log reservation for the whole thing up front, 937 * and we don't really care to either, because we just discard 938 * everything if we were to crash in the middle of this loop. 939 */ 940 while (blkcnt--) { 941 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, 942 XFS_FSB_TO_DADDR(mp, bno), 943 mp->m_quotainfo->qi_dqchunklen, 0, &bp, 944 &xfs_dquot_buf_ops); 945 946 /* 947 * CRC and validation errors will return a EFSCORRUPTED here. If 948 * this occurs, re-read without CRC validation so that we can 949 * repair the damage via xfs_qm_reset_dqcounts(). This process 950 * will leave a trace in the log indicating corruption has 951 * been detected. 952 */ 953 if (error == -EFSCORRUPTED) { 954 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, 955 XFS_FSB_TO_DADDR(mp, bno), 956 mp->m_quotainfo->qi_dqchunklen, 0, &bp, 957 NULL); 958 } 959 960 if (error) 961 break; 962 963 /* 964 * A corrupt buffer might not have a verifier attached, so 965 * make sure we have the correct one attached before writeback 966 * occurs. 967 */ 968 bp->b_ops = &xfs_dquot_buf_ops; 969 xfs_qm_reset_dqcounts(mp, bp, firstid, type); 970 xfs_buf_delwri_queue(bp, buffer_list); 971 xfs_buf_relse(bp); 972 973 /* goto the next block. */ 974 bno++; 975 firstid += mp->m_quotainfo->qi_dqperchunk; 976 } 977 978 return error; 979 } 980 981 /* 982 * Iterate over all allocated dquot blocks in this quota inode, zeroing all 983 * counters for every chunk of dquots that we find. 984 */ 985 STATIC int 986 xfs_qm_reset_dqcounts_buf( 987 struct xfs_mount *mp, 988 struct xfs_inode *qip, 989 xfs_dqtype_t type, 990 struct list_head *buffer_list) 991 { 992 struct xfs_bmbt_irec *map; 993 int i, nmaps; /* number of map entries */ 994 int error; /* return value */ 995 xfs_fileoff_t lblkno; 996 xfs_filblks_t maxlblkcnt; 997 xfs_dqid_t firstid; 998 xfs_fsblock_t rablkno; 999 xfs_filblks_t rablkcnt; 1000 1001 error = 0; 1002 /* 1003 * This looks racy, but we can't keep an inode lock across a 1004 * trans_reserve. But, this gets called during quotacheck, and that 1005 * happens only at mount time which is single threaded. 1006 */ 1007 if (qip->i_nblocks == 0) 1008 return 0; 1009 1010 map = kmalloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 1011 GFP_KERNEL | __GFP_NOFAIL); 1012 1013 lblkno = 0; 1014 maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 1015 do { 1016 uint lock_mode; 1017 1018 nmaps = XFS_DQITER_MAP_SIZE; 1019 /* 1020 * We aren't changing the inode itself. Just changing 1021 * some of its data. No new blocks are added here, and 1022 * the inode is never added to the transaction. 1023 */ 1024 lock_mode = xfs_ilock_data_map_shared(qip); 1025 error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno, 1026 map, &nmaps, 0); 1027 xfs_iunlock(qip, lock_mode); 1028 if (error) 1029 break; 1030 1031 ASSERT(nmaps <= XFS_DQITER_MAP_SIZE); 1032 for (i = 0; i < nmaps; i++) { 1033 ASSERT(map[i].br_startblock != DELAYSTARTBLOCK); 1034 ASSERT(map[i].br_blockcount); 1035 1036 1037 lblkno += map[i].br_blockcount; 1038 1039 if (map[i].br_startblock == HOLESTARTBLOCK) 1040 continue; 1041 1042 firstid = (xfs_dqid_t) map[i].br_startoff * 1043 mp->m_quotainfo->qi_dqperchunk; 1044 /* 1045 * Do a read-ahead on the next extent. 1046 */ 1047 if ((i+1 < nmaps) && 1048 (map[i+1].br_startblock != HOLESTARTBLOCK)) { 1049 rablkcnt = map[i+1].br_blockcount; 1050 rablkno = map[i+1].br_startblock; 1051 while (rablkcnt--) { 1052 xfs_buf_readahead(mp->m_ddev_targp, 1053 XFS_FSB_TO_DADDR(mp, rablkno), 1054 mp->m_quotainfo->qi_dqchunklen, 1055 &xfs_dquot_buf_ops); 1056 rablkno++; 1057 } 1058 } 1059 /* 1060 * Iterate thru all the blks in the extent and 1061 * reset the counters of all the dquots inside them. 1062 */ 1063 error = xfs_qm_reset_dqcounts_all(mp, firstid, 1064 map[i].br_startblock, 1065 map[i].br_blockcount, 1066 type, buffer_list); 1067 if (error) 1068 goto out; 1069 } 1070 } while (nmaps > 0); 1071 1072 out: 1073 kfree(map); 1074 return error; 1075 } 1076 1077 /* 1078 * Called by dqusage_adjust in doing a quotacheck. 1079 * 1080 * Given the inode, and a dquot id this updates both the incore dqout as well 1081 * as the buffer copy. This is so that once the quotacheck is done, we can 1082 * just log all the buffers, as opposed to logging numerous updates to 1083 * individual dquots. 1084 */ 1085 STATIC int 1086 xfs_qm_quotacheck_dqadjust( 1087 struct xfs_inode *ip, 1088 xfs_dqtype_t type, 1089 xfs_qcnt_t nblks, 1090 xfs_qcnt_t rtblks) 1091 { 1092 struct xfs_mount *mp = ip->i_mount; 1093 struct xfs_dquot *dqp; 1094 xfs_dqid_t id; 1095 int error; 1096 1097 id = xfs_qm_id_for_quotatype(ip, type); 1098 error = xfs_qm_dqget(mp, id, type, true, &dqp); 1099 if (error) { 1100 /* 1101 * Shouldn't be able to turn off quotas here. 1102 */ 1103 ASSERT(error != -ESRCH); 1104 ASSERT(error != -ENOENT); 1105 return error; 1106 } 1107 1108 trace_xfs_dqadjust(dqp); 1109 1110 /* 1111 * Adjust the inode count and the block count to reflect this inode's 1112 * resource usage. 1113 */ 1114 dqp->q_ino.count++; 1115 dqp->q_ino.reserved++; 1116 if (nblks) { 1117 dqp->q_blk.count += nblks; 1118 dqp->q_blk.reserved += nblks; 1119 } 1120 if (rtblks) { 1121 dqp->q_rtb.count += rtblks; 1122 dqp->q_rtb.reserved += rtblks; 1123 } 1124 1125 /* 1126 * Set default limits, adjust timers (since we changed usages) 1127 * 1128 * There are no timers for the default values set in the root dquot. 1129 */ 1130 if (dqp->q_id) { 1131 xfs_qm_adjust_dqlimits(dqp); 1132 xfs_qm_adjust_dqtimers(dqp); 1133 } 1134 1135 dqp->q_flags |= XFS_DQFLAG_DIRTY; 1136 xfs_qm_dqput(dqp); 1137 return 0; 1138 } 1139 1140 /* 1141 * callback routine supplied to bulkstat(). Given an inumber, find its 1142 * dquots and update them to account for resources taken by that inode. 1143 */ 1144 /* ARGSUSED */ 1145 STATIC int 1146 xfs_qm_dqusage_adjust( 1147 struct xfs_mount *mp, 1148 struct xfs_trans *tp, 1149 xfs_ino_t ino, 1150 void *data) 1151 { 1152 struct xfs_inode *ip; 1153 xfs_qcnt_t nblks; 1154 xfs_filblks_t rtblks = 0; /* total rt blks */ 1155 int error; 1156 1157 ASSERT(XFS_IS_QUOTA_ON(mp)); 1158 1159 /* 1160 * rootino must have its resources accounted for, not so with the quota 1161 * inodes. 1162 */ 1163 if (xfs_is_quota_inode(&mp->m_sb, ino)) 1164 return 0; 1165 1166 /* 1167 * We don't _need_ to take the ilock EXCL here because quotacheck runs 1168 * at mount time and therefore nobody will be racing chown/chproj. 1169 */ 1170 error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip); 1171 if (error == -EINVAL || error == -ENOENT) 1172 return 0; 1173 if (error) 1174 return error; 1175 1176 /* 1177 * Reload the incore unlinked list to avoid failure in inodegc. 1178 * Use an unlocked check here because unrecovered unlinked inodes 1179 * should be somewhat rare. 1180 */ 1181 if (xfs_inode_unlinked_incomplete(ip)) { 1182 error = xfs_inode_reload_unlinked(ip); 1183 if (error) { 1184 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1185 goto error0; 1186 } 1187 } 1188 1189 ASSERT(ip->i_delayed_blks == 0); 1190 1191 if (XFS_IS_REALTIME_INODE(ip)) { 1192 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK); 1193 1194 error = xfs_iread_extents(tp, ip, XFS_DATA_FORK); 1195 if (error) 1196 goto error0; 1197 1198 xfs_bmap_count_leaves(ifp, &rtblks); 1199 } 1200 1201 nblks = (xfs_qcnt_t)ip->i_nblocks - rtblks; 1202 xfs_iflags_clear(ip, XFS_IQUOTAUNCHECKED); 1203 1204 /* 1205 * Add the (disk blocks and inode) resources occupied by this 1206 * inode to its dquots. We do this adjustment in the incore dquot, 1207 * and also copy the changes to its buffer. 1208 * We don't care about putting these changes in a transaction 1209 * envelope because if we crash in the middle of a 'quotacheck' 1210 * we have to start from the beginning anyway. 1211 * Once we're done, we'll log all the dquot bufs. 1212 * 1213 * The *QUOTA_ON checks below may look pretty racy, but quotachecks 1214 * and quotaoffs don't race. (Quotachecks happen at mount time only). 1215 */ 1216 if (XFS_IS_UQUOTA_ON(mp)) { 1217 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks, 1218 rtblks); 1219 if (error) 1220 goto error0; 1221 } 1222 1223 if (XFS_IS_GQUOTA_ON(mp)) { 1224 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks, 1225 rtblks); 1226 if (error) 1227 goto error0; 1228 } 1229 1230 if (XFS_IS_PQUOTA_ON(mp)) { 1231 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks, 1232 rtblks); 1233 if (error) 1234 goto error0; 1235 } 1236 1237 error0: 1238 xfs_irele(ip); 1239 return error; 1240 } 1241 1242 STATIC int 1243 xfs_qm_flush_one( 1244 struct xfs_dquot *dqp, 1245 void *data) 1246 { 1247 struct xfs_mount *mp = dqp->q_mount; 1248 struct list_head *buffer_list = data; 1249 struct xfs_buf *bp = NULL; 1250 int error = 0; 1251 1252 xfs_dqlock(dqp); 1253 if (dqp->q_flags & XFS_DQFLAG_FREEING) 1254 goto out_unlock; 1255 if (!XFS_DQ_IS_DIRTY(dqp)) 1256 goto out_unlock; 1257 1258 /* 1259 * The only way the dquot is already flush locked by the time quotacheck 1260 * gets here is if reclaim flushed it before the dqadjust walk dirtied 1261 * it for the final time. Quotacheck collects all dquot bufs in the 1262 * local delwri queue before dquots are dirtied, so reclaim can't have 1263 * possibly queued it for I/O. The only way out is to push the buffer to 1264 * cycle the flush lock. 1265 */ 1266 if (!xfs_dqflock_nowait(dqp)) { 1267 /* buf is pinned in-core by delwri list */ 1268 error = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno, 1269 mp->m_quotainfo->qi_dqchunklen, 0, &bp); 1270 if (error) 1271 goto out_unlock; 1272 1273 if (!(bp->b_flags & _XBF_DELWRI_Q)) { 1274 error = -EAGAIN; 1275 xfs_buf_relse(bp); 1276 goto out_unlock; 1277 } 1278 xfs_buf_unlock(bp); 1279 1280 xfs_buf_delwri_pushbuf(bp, buffer_list); 1281 xfs_buf_rele(bp); 1282 1283 error = -EAGAIN; 1284 goto out_unlock; 1285 } 1286 1287 error = xfs_qm_dqflush(dqp, &bp); 1288 if (error) 1289 goto out_unlock; 1290 1291 xfs_buf_delwri_queue(bp, buffer_list); 1292 xfs_buf_relse(bp); 1293 out_unlock: 1294 xfs_dqunlock(dqp); 1295 return error; 1296 } 1297 1298 /* 1299 * Walk thru all the filesystem inodes and construct a consistent view 1300 * of the disk quota world. If the quotacheck fails, disable quotas. 1301 */ 1302 STATIC int 1303 xfs_qm_quotacheck( 1304 xfs_mount_t *mp) 1305 { 1306 int error, error2; 1307 uint flags; 1308 LIST_HEAD (buffer_list); 1309 struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip; 1310 struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip; 1311 struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip; 1312 1313 flags = 0; 1314 1315 ASSERT(uip || gip || pip); 1316 ASSERT(XFS_IS_QUOTA_ON(mp)); 1317 1318 xfs_notice(mp, "Quotacheck needed: Please wait."); 1319 1320 /* 1321 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset 1322 * their counters to zero. We need a clean slate. 1323 * We don't log our changes till later. 1324 */ 1325 if (uip) { 1326 error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER, 1327 &buffer_list); 1328 if (error) 1329 goto error_return; 1330 flags |= XFS_UQUOTA_CHKD; 1331 } 1332 1333 if (gip) { 1334 error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP, 1335 &buffer_list); 1336 if (error) 1337 goto error_return; 1338 flags |= XFS_GQUOTA_CHKD; 1339 } 1340 1341 if (pip) { 1342 error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ, 1343 &buffer_list); 1344 if (error) 1345 goto error_return; 1346 flags |= XFS_PQUOTA_CHKD; 1347 } 1348 1349 xfs_set_quotacheck_running(mp); 1350 error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true, 1351 NULL); 1352 xfs_clear_quotacheck_running(mp); 1353 1354 /* 1355 * On error, the inode walk may have partially populated the dquot 1356 * caches. We must purge them before disabling quota and tearing down 1357 * the quotainfo, or else the dquots will leak. 1358 */ 1359 if (error) 1360 goto error_purge; 1361 1362 /* 1363 * We've made all the changes that we need to make incore. Flush them 1364 * down to disk buffers if everything was updated successfully. 1365 */ 1366 if (XFS_IS_UQUOTA_ON(mp)) { 1367 error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one, 1368 &buffer_list); 1369 } 1370 if (XFS_IS_GQUOTA_ON(mp)) { 1371 error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one, 1372 &buffer_list); 1373 if (!error) 1374 error = error2; 1375 } 1376 if (XFS_IS_PQUOTA_ON(mp)) { 1377 error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one, 1378 &buffer_list); 1379 if (!error) 1380 error = error2; 1381 } 1382 1383 error2 = xfs_buf_delwri_submit(&buffer_list); 1384 if (!error) 1385 error = error2; 1386 1387 /* 1388 * We can get this error if we couldn't do a dquot allocation inside 1389 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the 1390 * dirty dquots that might be cached, we just want to get rid of them 1391 * and turn quotaoff. The dquots won't be attached to any of the inodes 1392 * at this point (because we intentionally didn't in dqget_noattach). 1393 */ 1394 if (error) 1395 goto error_purge; 1396 1397 /* 1398 * If one type of quotas is off, then it will lose its 1399 * quotachecked status, since we won't be doing accounting for 1400 * that type anymore. 1401 */ 1402 mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD; 1403 mp->m_qflags |= flags; 1404 1405 error_return: 1406 xfs_buf_delwri_cancel(&buffer_list); 1407 1408 if (error) { 1409 xfs_warn(mp, 1410 "Quotacheck: Unsuccessful (Error %d): Disabling quotas.", 1411 error); 1412 /* 1413 * We must turn off quotas. 1414 */ 1415 ASSERT(mp->m_quotainfo != NULL); 1416 xfs_qm_destroy_quotainfo(mp); 1417 if (xfs_mount_reset_sbqflags(mp)) { 1418 xfs_warn(mp, 1419 "Quotacheck: Failed to reset quota flags."); 1420 } 1421 xfs_fs_mark_sick(mp, XFS_SICK_FS_QUOTACHECK); 1422 } else { 1423 xfs_notice(mp, "Quotacheck: Done."); 1424 xfs_fs_mark_healthy(mp, XFS_SICK_FS_QUOTACHECK); 1425 } 1426 1427 return error; 1428 1429 error_purge: 1430 /* 1431 * On error, we may have inodes queued for inactivation. This may try 1432 * to attach dquots to the inode before running cleanup operations on 1433 * the inode and this can race with the xfs_qm_destroy_quotainfo() call 1434 * below that frees mp->m_quotainfo. To avoid this race, flush all the 1435 * pending inodegc operations before we purge the dquots from memory, 1436 * ensuring that background inactivation is idle whilst we turn off 1437 * quotas. 1438 */ 1439 xfs_inodegc_flush(mp); 1440 xfs_qm_dqpurge_all(mp); 1441 goto error_return; 1442 1443 } 1444 1445 /* 1446 * This is called from xfs_mountfs to start quotas and initialize all 1447 * necessary data structures like quotainfo. This is also responsible for 1448 * running a quotacheck as necessary. We are guaranteed that the superblock 1449 * is consistently read in at this point. 1450 * 1451 * If we fail here, the mount will continue with quota turned off. We don't 1452 * need to inidicate success or failure at all. 1453 */ 1454 void 1455 xfs_qm_mount_quotas( 1456 struct xfs_mount *mp) 1457 { 1458 int error = 0; 1459 uint sbf; 1460 1461 /* 1462 * If quotas on realtime volumes is not supported, we disable 1463 * quotas immediately. 1464 */ 1465 if (mp->m_sb.sb_rextents) { 1466 xfs_notice(mp, "Cannot turn on quotas for realtime filesystem"); 1467 mp->m_qflags = 0; 1468 goto write_changes; 1469 } 1470 1471 ASSERT(XFS_IS_QUOTA_ON(mp)); 1472 1473 /* 1474 * Allocate the quotainfo structure inside the mount struct, and 1475 * create quotainode(s), and change/rev superblock if necessary. 1476 */ 1477 error = xfs_qm_init_quotainfo(mp); 1478 if (error) { 1479 /* 1480 * We must turn off quotas. 1481 */ 1482 ASSERT(mp->m_quotainfo == NULL); 1483 mp->m_qflags = 0; 1484 goto write_changes; 1485 } 1486 /* 1487 * If any of the quotas are not consistent, do a quotacheck. 1488 */ 1489 if (XFS_QM_NEED_QUOTACHECK(mp)) { 1490 error = xfs_qm_quotacheck(mp); 1491 if (error) { 1492 /* Quotacheck failed and disabled quotas. */ 1493 return; 1494 } 1495 } 1496 /* 1497 * If one type of quotas is off, then it will lose its 1498 * quotachecked status, since we won't be doing accounting for 1499 * that type anymore. 1500 */ 1501 if (!XFS_IS_UQUOTA_ON(mp)) 1502 mp->m_qflags &= ~XFS_UQUOTA_CHKD; 1503 if (!XFS_IS_GQUOTA_ON(mp)) 1504 mp->m_qflags &= ~XFS_GQUOTA_CHKD; 1505 if (!XFS_IS_PQUOTA_ON(mp)) 1506 mp->m_qflags &= ~XFS_PQUOTA_CHKD; 1507 1508 write_changes: 1509 /* 1510 * We actually don't have to acquire the m_sb_lock at all. 1511 * This can only be called from mount, and that's single threaded. XXX 1512 */ 1513 spin_lock(&mp->m_sb_lock); 1514 sbf = mp->m_sb.sb_qflags; 1515 mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL; 1516 spin_unlock(&mp->m_sb_lock); 1517 1518 if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) { 1519 if (xfs_sync_sb(mp, false)) { 1520 /* 1521 * We could only have been turning quotas off. 1522 * We aren't in very good shape actually because 1523 * the incore structures are convinced that quotas are 1524 * off, but the on disk superblock doesn't know that ! 1525 */ 1526 ASSERT(!(XFS_IS_QUOTA_ON(mp))); 1527 xfs_alert(mp, "%s: Superblock update failed!", 1528 __func__); 1529 } 1530 } 1531 1532 if (error) { 1533 xfs_warn(mp, "Failed to initialize disk quotas."); 1534 return; 1535 } 1536 } 1537 1538 /* 1539 * This is called after the superblock has been read in and we're ready to 1540 * iget the quota inodes. 1541 */ 1542 STATIC int 1543 xfs_qm_init_quotainos( 1544 xfs_mount_t *mp) 1545 { 1546 struct xfs_inode *uip = NULL; 1547 struct xfs_inode *gip = NULL; 1548 struct xfs_inode *pip = NULL; 1549 int error; 1550 uint flags = 0; 1551 1552 ASSERT(mp->m_quotainfo); 1553 1554 /* 1555 * Get the uquota and gquota inodes 1556 */ 1557 if (xfs_has_quota(mp)) { 1558 if (XFS_IS_UQUOTA_ON(mp) && 1559 mp->m_sb.sb_uquotino != NULLFSINO) { 1560 ASSERT(mp->m_sb.sb_uquotino > 0); 1561 error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 1562 0, 0, &uip); 1563 if (error) 1564 return error; 1565 } 1566 if (XFS_IS_GQUOTA_ON(mp) && 1567 mp->m_sb.sb_gquotino != NULLFSINO) { 1568 ASSERT(mp->m_sb.sb_gquotino > 0); 1569 error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 1570 0, 0, &gip); 1571 if (error) 1572 goto error_rele; 1573 } 1574 if (XFS_IS_PQUOTA_ON(mp) && 1575 mp->m_sb.sb_pquotino != NULLFSINO) { 1576 ASSERT(mp->m_sb.sb_pquotino > 0); 1577 error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino, 1578 0, 0, &pip); 1579 if (error) 1580 goto error_rele; 1581 } 1582 } else { 1583 flags |= XFS_QMOPT_SBVERSION; 1584 } 1585 1586 /* 1587 * Create the three inodes, if they don't exist already. The changes 1588 * made above will get added to a transaction and logged in one of 1589 * the qino_alloc calls below. If the device is readonly, 1590 * temporarily switch to read-write to do this. 1591 */ 1592 if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) { 1593 error = xfs_qm_qino_alloc(mp, &uip, 1594 flags | XFS_QMOPT_UQUOTA); 1595 if (error) 1596 goto error_rele; 1597 1598 flags &= ~XFS_QMOPT_SBVERSION; 1599 } 1600 if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) { 1601 error = xfs_qm_qino_alloc(mp, &gip, 1602 flags | XFS_QMOPT_GQUOTA); 1603 if (error) 1604 goto error_rele; 1605 1606 flags &= ~XFS_QMOPT_SBVERSION; 1607 } 1608 if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) { 1609 error = xfs_qm_qino_alloc(mp, &pip, 1610 flags | XFS_QMOPT_PQUOTA); 1611 if (error) 1612 goto error_rele; 1613 } 1614 1615 mp->m_quotainfo->qi_uquotaip = uip; 1616 mp->m_quotainfo->qi_gquotaip = gip; 1617 mp->m_quotainfo->qi_pquotaip = pip; 1618 1619 return 0; 1620 1621 error_rele: 1622 if (uip) 1623 xfs_irele(uip); 1624 if (gip) 1625 xfs_irele(gip); 1626 if (pip) 1627 xfs_irele(pip); 1628 return error; 1629 } 1630 1631 STATIC void 1632 xfs_qm_destroy_quotainos( 1633 struct xfs_quotainfo *qi) 1634 { 1635 if (qi->qi_uquotaip) { 1636 xfs_irele(qi->qi_uquotaip); 1637 qi->qi_uquotaip = NULL; /* paranoia */ 1638 } 1639 if (qi->qi_gquotaip) { 1640 xfs_irele(qi->qi_gquotaip); 1641 qi->qi_gquotaip = NULL; 1642 } 1643 if (qi->qi_pquotaip) { 1644 xfs_irele(qi->qi_pquotaip); 1645 qi->qi_pquotaip = NULL; 1646 } 1647 } 1648 1649 STATIC void 1650 xfs_qm_dqfree_one( 1651 struct xfs_dquot *dqp) 1652 { 1653 struct xfs_mount *mp = dqp->q_mount; 1654 struct xfs_quotainfo *qi = mp->m_quotainfo; 1655 1656 mutex_lock(&qi->qi_tree_lock); 1657 radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id); 1658 1659 qi->qi_dquots--; 1660 mutex_unlock(&qi->qi_tree_lock); 1661 1662 xfs_qm_dqdestroy(dqp); 1663 } 1664 1665 /* --------------- utility functions for vnodeops ---------------- */ 1666 1667 1668 /* 1669 * Given an inode, a uid, gid and prid make sure that we have 1670 * allocated relevant dquot(s) on disk, and that we won't exceed inode 1671 * quotas by creating this file. 1672 * This also attaches dquot(s) to the given inode after locking it, 1673 * and returns the dquots corresponding to the uid and/or gid. 1674 * 1675 * in : inode (unlocked) 1676 * out : udquot, gdquot with references taken and unlocked 1677 */ 1678 int 1679 xfs_qm_vop_dqalloc( 1680 struct xfs_inode *ip, 1681 kuid_t uid, 1682 kgid_t gid, 1683 prid_t prid, 1684 uint flags, 1685 struct xfs_dquot **O_udqpp, 1686 struct xfs_dquot **O_gdqpp, 1687 struct xfs_dquot **O_pdqpp) 1688 { 1689 struct xfs_mount *mp = ip->i_mount; 1690 struct inode *inode = VFS_I(ip); 1691 struct user_namespace *user_ns = inode->i_sb->s_user_ns; 1692 struct xfs_dquot *uq = NULL; 1693 struct xfs_dquot *gq = NULL; 1694 struct xfs_dquot *pq = NULL; 1695 int error; 1696 uint lockflags; 1697 1698 if (!XFS_IS_QUOTA_ON(mp)) 1699 return 0; 1700 1701 lockflags = XFS_ILOCK_EXCL; 1702 xfs_ilock(ip, lockflags); 1703 1704 if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip)) 1705 gid = inode->i_gid; 1706 1707 /* 1708 * Attach the dquot(s) to this inode, doing a dquot allocation 1709 * if necessary. The dquot(s) will not be locked. 1710 */ 1711 if (XFS_NOT_DQATTACHED(mp, ip)) { 1712 error = xfs_qm_dqattach_locked(ip, true); 1713 if (error) { 1714 xfs_iunlock(ip, lockflags); 1715 return error; 1716 } 1717 } 1718 1719 if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) { 1720 ASSERT(O_udqpp); 1721 if (!uid_eq(inode->i_uid, uid)) { 1722 /* 1723 * What we need is the dquot that has this uid, and 1724 * if we send the inode to dqget, the uid of the inode 1725 * takes priority over what's sent in the uid argument. 1726 * We must unlock inode here before calling dqget if 1727 * we're not sending the inode, because otherwise 1728 * we'll deadlock by doing trans_reserve while 1729 * holding ilock. 1730 */ 1731 xfs_iunlock(ip, lockflags); 1732 error = xfs_qm_dqget(mp, from_kuid(user_ns, uid), 1733 XFS_DQTYPE_USER, true, &uq); 1734 if (error) { 1735 ASSERT(error != -ENOENT); 1736 return error; 1737 } 1738 /* 1739 * Get the ilock in the right order. 1740 */ 1741 xfs_dqunlock(uq); 1742 lockflags = XFS_ILOCK_SHARED; 1743 xfs_ilock(ip, lockflags); 1744 } else { 1745 /* 1746 * Take an extra reference, because we'll return 1747 * this to caller 1748 */ 1749 ASSERT(ip->i_udquot); 1750 uq = xfs_qm_dqhold(ip->i_udquot); 1751 } 1752 } 1753 if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) { 1754 ASSERT(O_gdqpp); 1755 if (!gid_eq(inode->i_gid, gid)) { 1756 xfs_iunlock(ip, lockflags); 1757 error = xfs_qm_dqget(mp, from_kgid(user_ns, gid), 1758 XFS_DQTYPE_GROUP, true, &gq); 1759 if (error) { 1760 ASSERT(error != -ENOENT); 1761 goto error_rele; 1762 } 1763 xfs_dqunlock(gq); 1764 lockflags = XFS_ILOCK_SHARED; 1765 xfs_ilock(ip, lockflags); 1766 } else { 1767 ASSERT(ip->i_gdquot); 1768 gq = xfs_qm_dqhold(ip->i_gdquot); 1769 } 1770 } 1771 if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) { 1772 ASSERT(O_pdqpp); 1773 if (ip->i_projid != prid) { 1774 xfs_iunlock(ip, lockflags); 1775 error = xfs_qm_dqget(mp, prid, 1776 XFS_DQTYPE_PROJ, true, &pq); 1777 if (error) { 1778 ASSERT(error != -ENOENT); 1779 goto error_rele; 1780 } 1781 xfs_dqunlock(pq); 1782 lockflags = XFS_ILOCK_SHARED; 1783 xfs_ilock(ip, lockflags); 1784 } else { 1785 ASSERT(ip->i_pdquot); 1786 pq = xfs_qm_dqhold(ip->i_pdquot); 1787 } 1788 } 1789 trace_xfs_dquot_dqalloc(ip); 1790 1791 xfs_iunlock(ip, lockflags); 1792 if (O_udqpp) 1793 *O_udqpp = uq; 1794 else 1795 xfs_qm_dqrele(uq); 1796 if (O_gdqpp) 1797 *O_gdqpp = gq; 1798 else 1799 xfs_qm_dqrele(gq); 1800 if (O_pdqpp) 1801 *O_pdqpp = pq; 1802 else 1803 xfs_qm_dqrele(pq); 1804 return 0; 1805 1806 error_rele: 1807 xfs_qm_dqrele(gq); 1808 xfs_qm_dqrele(uq); 1809 return error; 1810 } 1811 1812 /* 1813 * Actually transfer ownership, and do dquot modifications. 1814 * These were already reserved. 1815 */ 1816 struct xfs_dquot * 1817 xfs_qm_vop_chown( 1818 struct xfs_trans *tp, 1819 struct xfs_inode *ip, 1820 struct xfs_dquot **IO_olddq, 1821 struct xfs_dquot *newdq) 1822 { 1823 struct xfs_dquot *prevdq; 1824 uint bfield = XFS_IS_REALTIME_INODE(ip) ? 1825 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT; 1826 1827 1828 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); 1829 ASSERT(XFS_IS_QUOTA_ON(ip->i_mount)); 1830 1831 /* old dquot */ 1832 prevdq = *IO_olddq; 1833 ASSERT(prevdq); 1834 ASSERT(prevdq != newdq); 1835 1836 xfs_trans_mod_ino_dquot(tp, ip, prevdq, bfield, -(ip->i_nblocks)); 1837 xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_ICOUNT, -1); 1838 1839 /* the sparkling new dquot */ 1840 xfs_trans_mod_ino_dquot(tp, ip, newdq, bfield, ip->i_nblocks); 1841 xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_ICOUNT, 1); 1842 1843 /* 1844 * Back when we made quota reservations for the chown, we reserved the 1845 * ondisk blocks + delalloc blocks with the new dquot. Now that we've 1846 * switched the dquots, decrease the new dquot's block reservation 1847 * (having already bumped up the real counter) so that we don't have 1848 * any reservation to give back when we commit. 1849 */ 1850 xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_RES_BLKS, 1851 -ip->i_delayed_blks); 1852 1853 /* 1854 * Give the incore reservation for delalloc blocks back to the old 1855 * dquot. We don't normally handle delalloc quota reservations 1856 * transactionally, so just lock the dquot and subtract from the 1857 * reservation. Dirty the transaction because it's too late to turn 1858 * back now. 1859 */ 1860 tp->t_flags |= XFS_TRANS_DIRTY; 1861 xfs_dqlock(prevdq); 1862 ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks); 1863 prevdq->q_blk.reserved -= ip->i_delayed_blks; 1864 xfs_dqunlock(prevdq); 1865 1866 /* 1867 * Take an extra reference, because the inode is going to keep 1868 * this dquot pointer even after the trans_commit. 1869 */ 1870 *IO_olddq = xfs_qm_dqhold(newdq); 1871 1872 return prevdq; 1873 } 1874 1875 int 1876 xfs_qm_vop_rename_dqattach( 1877 struct xfs_inode **i_tab) 1878 { 1879 struct xfs_mount *mp = i_tab[0]->i_mount; 1880 int i; 1881 1882 if (!XFS_IS_QUOTA_ON(mp)) 1883 return 0; 1884 1885 for (i = 0; (i < 4 && i_tab[i]); i++) { 1886 struct xfs_inode *ip = i_tab[i]; 1887 int error; 1888 1889 /* 1890 * Watch out for duplicate entries in the table. 1891 */ 1892 if (i == 0 || ip != i_tab[i-1]) { 1893 if (XFS_NOT_DQATTACHED(mp, ip)) { 1894 error = xfs_qm_dqattach(ip); 1895 if (error) 1896 return error; 1897 } 1898 } 1899 } 1900 return 0; 1901 } 1902 1903 void 1904 xfs_qm_vop_create_dqattach( 1905 struct xfs_trans *tp, 1906 struct xfs_inode *ip, 1907 struct xfs_dquot *udqp, 1908 struct xfs_dquot *gdqp, 1909 struct xfs_dquot *pdqp) 1910 { 1911 struct xfs_mount *mp = tp->t_mountp; 1912 1913 if (!XFS_IS_QUOTA_ON(mp)) 1914 return; 1915 1916 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); 1917 1918 if (udqp && XFS_IS_UQUOTA_ON(mp)) { 1919 ASSERT(ip->i_udquot == NULL); 1920 ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id); 1921 1922 ip->i_udquot = xfs_qm_dqhold(udqp); 1923 } 1924 if (gdqp && XFS_IS_GQUOTA_ON(mp)) { 1925 ASSERT(ip->i_gdquot == NULL); 1926 ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id); 1927 1928 ip->i_gdquot = xfs_qm_dqhold(gdqp); 1929 } 1930 if (pdqp && XFS_IS_PQUOTA_ON(mp)) { 1931 ASSERT(ip->i_pdquot == NULL); 1932 ASSERT(ip->i_projid == pdqp->q_id); 1933 1934 ip->i_pdquot = xfs_qm_dqhold(pdqp); 1935 } 1936 1937 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, 1); 1938 } 1939 1940 /* Decide if this inode's dquot is near an enforcement boundary. */ 1941 bool 1942 xfs_inode_near_dquot_enforcement( 1943 struct xfs_inode *ip, 1944 xfs_dqtype_t type) 1945 { 1946 struct xfs_dquot *dqp; 1947 int64_t freesp; 1948 1949 /* We only care for quotas that are enabled and enforced. */ 1950 dqp = xfs_inode_dquot(ip, type); 1951 if (!dqp || !xfs_dquot_is_enforced(dqp)) 1952 return false; 1953 1954 if (xfs_dquot_res_over_limits(&dqp->q_ino) || 1955 xfs_dquot_res_over_limits(&dqp->q_rtb)) 1956 return true; 1957 1958 /* For space on the data device, check the various thresholds. */ 1959 if (!dqp->q_prealloc_hi_wmark) 1960 return false; 1961 1962 if (dqp->q_blk.reserved < dqp->q_prealloc_lo_wmark) 1963 return false; 1964 1965 if (dqp->q_blk.reserved >= dqp->q_prealloc_hi_wmark) 1966 return true; 1967 1968 freesp = dqp->q_prealloc_hi_wmark - dqp->q_blk.reserved; 1969 if (freesp < dqp->q_low_space[XFS_QLOWSP_5_PCNT]) 1970 return true; 1971 1972 return false; 1973 } 1974