1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_sb.h" 14 #include "xfs_mount.h" 15 #include "xfs_inode.h" 16 #include "xfs_iwalk.h" 17 #include "xfs_quota.h" 18 #include "xfs_bmap.h" 19 #include "xfs_bmap_util.h" 20 #include "xfs_trans.h" 21 #include "xfs_trans_space.h" 22 #include "xfs_qm.h" 23 #include "xfs_trace.h" 24 #include "xfs_icache.h" 25 #include "xfs_error.h" 26 #include "xfs_ag.h" 27 #include "xfs_ialloc.h" 28 #include "xfs_log_priv.h" 29 30 /* 31 * The global quota manager. There is only one of these for the entire 32 * system, _not_ one per file system. XQM keeps track of the overall 33 * quota functionality, including maintaining the freelist and hash 34 * tables of dquots. 35 */ 36 STATIC int xfs_qm_init_quotainos(struct xfs_mount *mp); 37 STATIC int xfs_qm_init_quotainfo(struct xfs_mount *mp); 38 39 STATIC void xfs_qm_destroy_quotainos(struct xfs_quotainfo *qi); 40 STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp); 41 /* 42 * We use the batch lookup interface to iterate over the dquots as it 43 * currently is the only interface into the radix tree code that allows 44 * fuzzy lookups instead of exact matches. Holding the lock over multiple 45 * operations is fine as all callers are used either during mount/umount 46 * or quotaoff. 47 */ 48 #define XFS_DQ_LOOKUP_BATCH 32 49 50 STATIC int 51 xfs_qm_dquot_walk( 52 struct xfs_mount *mp, 53 xfs_dqtype_t type, 54 int (*execute)(struct xfs_dquot *dqp, void *data), 55 void *data) 56 { 57 struct xfs_quotainfo *qi = mp->m_quotainfo; 58 struct radix_tree_root *tree = xfs_dquot_tree(qi, type); 59 uint32_t next_index; 60 int last_error = 0; 61 int skipped; 62 int nr_found; 63 64 restart: 65 skipped = 0; 66 next_index = 0; 67 nr_found = 0; 68 69 while (1) { 70 struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH]; 71 int error; 72 int i; 73 74 mutex_lock(&qi->qi_tree_lock); 75 nr_found = radix_tree_gang_lookup(tree, (void **)batch, 76 next_index, XFS_DQ_LOOKUP_BATCH); 77 if (!nr_found) { 78 mutex_unlock(&qi->qi_tree_lock); 79 break; 80 } 81 82 for (i = 0; i < nr_found; i++) { 83 struct xfs_dquot *dqp = batch[i]; 84 85 next_index = dqp->q_id + 1; 86 87 error = execute(batch[i], data); 88 if (error == -EAGAIN) { 89 skipped++; 90 continue; 91 } 92 if (error && last_error != -EFSCORRUPTED) 93 last_error = error; 94 } 95 96 mutex_unlock(&qi->qi_tree_lock); 97 98 /* bail out if the filesystem is corrupted. */ 99 if (last_error == -EFSCORRUPTED) { 100 skipped = 0; 101 break; 102 } 103 /* we're done if id overflows back to zero */ 104 if (!next_index) 105 break; 106 } 107 108 if (skipped) { 109 delay(1); 110 goto restart; 111 } 112 113 return last_error; 114 } 115 116 117 /* 118 * Purge a dquot from all tracking data structures and free it. 119 */ 120 STATIC int 121 xfs_qm_dqpurge( 122 struct xfs_dquot *dqp, 123 void *data) 124 { 125 struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo; 126 int error = -EAGAIN; 127 128 xfs_dqlock(dqp); 129 if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0) 130 goto out_unlock; 131 132 dqp->q_flags |= XFS_DQFLAG_FREEING; 133 134 xfs_dqflock(dqp); 135 136 /* 137 * If we are turning this type of quotas off, we don't care 138 * about the dirty metadata sitting in this dquot. OTOH, if 139 * we're unmounting, we do care, so we flush it and wait. 140 */ 141 if (XFS_DQ_IS_DIRTY(dqp)) { 142 struct xfs_buf *bp = NULL; 143 144 /* 145 * We don't care about getting disk errors here. We need 146 * to purge this dquot anyway, so we go ahead regardless. 147 */ 148 error = xfs_qm_dqflush(dqp, &bp); 149 if (!error) { 150 error = xfs_bwrite(bp); 151 xfs_buf_relse(bp); 152 } else if (error == -EAGAIN) { 153 dqp->q_flags &= ~XFS_DQFLAG_FREEING; 154 goto out_unlock; 155 } 156 xfs_dqflock(dqp); 157 } 158 159 ASSERT(atomic_read(&dqp->q_pincount) == 0); 160 ASSERT(xlog_is_shutdown(dqp->q_logitem.qli_item.li_log) || 161 !test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags)); 162 163 xfs_dqfunlock(dqp); 164 xfs_dqunlock(dqp); 165 166 radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id); 167 qi->qi_dquots--; 168 169 /* 170 * We move dquots to the freelist as soon as their reference count 171 * hits zero, so it really should be on the freelist here. 172 */ 173 ASSERT(!list_empty(&dqp->q_lru)); 174 list_lru_del(&qi->qi_lru, &dqp->q_lru); 175 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused); 176 177 xfs_qm_dqdestroy(dqp); 178 return 0; 179 180 out_unlock: 181 xfs_dqunlock(dqp); 182 return error; 183 } 184 185 /* 186 * Purge the dquot cache. 187 */ 188 static void 189 xfs_qm_dqpurge_all( 190 struct xfs_mount *mp) 191 { 192 xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL); 193 xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL); 194 xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL); 195 } 196 197 /* 198 * Just destroy the quotainfo structure. 199 */ 200 void 201 xfs_qm_unmount( 202 struct xfs_mount *mp) 203 { 204 if (mp->m_quotainfo) { 205 xfs_qm_dqpurge_all(mp); 206 xfs_qm_destroy_quotainfo(mp); 207 } 208 } 209 210 /* 211 * Called from the vfsops layer. 212 */ 213 void 214 xfs_qm_unmount_quotas( 215 xfs_mount_t *mp) 216 { 217 /* 218 * Release the dquots that root inode, et al might be holding, 219 * before we flush quotas and blow away the quotainfo structure. 220 */ 221 ASSERT(mp->m_rootip); 222 xfs_qm_dqdetach(mp->m_rootip); 223 if (mp->m_rbmip) 224 xfs_qm_dqdetach(mp->m_rbmip); 225 if (mp->m_rsumip) 226 xfs_qm_dqdetach(mp->m_rsumip); 227 228 /* 229 * Release the quota inodes. 230 */ 231 if (mp->m_quotainfo) { 232 if (mp->m_quotainfo->qi_uquotaip) { 233 xfs_irele(mp->m_quotainfo->qi_uquotaip); 234 mp->m_quotainfo->qi_uquotaip = NULL; 235 } 236 if (mp->m_quotainfo->qi_gquotaip) { 237 xfs_irele(mp->m_quotainfo->qi_gquotaip); 238 mp->m_quotainfo->qi_gquotaip = NULL; 239 } 240 if (mp->m_quotainfo->qi_pquotaip) { 241 xfs_irele(mp->m_quotainfo->qi_pquotaip); 242 mp->m_quotainfo->qi_pquotaip = NULL; 243 } 244 } 245 } 246 247 STATIC int 248 xfs_qm_dqattach_one( 249 struct xfs_inode *ip, 250 xfs_dqtype_t type, 251 bool doalloc, 252 struct xfs_dquot **IO_idqpp) 253 { 254 struct xfs_dquot *dqp; 255 int error; 256 257 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 258 error = 0; 259 260 /* 261 * See if we already have it in the inode itself. IO_idqpp is &i_udquot 262 * or &i_gdquot. This made the code look weird, but made the logic a lot 263 * simpler. 264 */ 265 dqp = *IO_idqpp; 266 if (dqp) { 267 trace_xfs_dqattach_found(dqp); 268 return 0; 269 } 270 271 /* 272 * Find the dquot from somewhere. This bumps the reference count of 273 * dquot and returns it locked. This can return ENOENT if dquot didn't 274 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got 275 * turned off suddenly. 276 */ 277 error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp); 278 if (error) 279 return error; 280 281 trace_xfs_dqattach_get(dqp); 282 283 /* 284 * dqget may have dropped and re-acquired the ilock, but it guarantees 285 * that the dquot returned is the one that should go in the inode. 286 */ 287 *IO_idqpp = dqp; 288 xfs_dqunlock(dqp); 289 return 0; 290 } 291 292 static bool 293 xfs_qm_need_dqattach( 294 struct xfs_inode *ip) 295 { 296 struct xfs_mount *mp = ip->i_mount; 297 298 if (!XFS_IS_QUOTA_ON(mp)) 299 return false; 300 if (!XFS_NOT_DQATTACHED(mp, ip)) 301 return false; 302 if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino)) 303 return false; 304 return true; 305 } 306 307 /* 308 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON 309 * into account. 310 * If @doalloc is true, the dquot(s) will be allocated if needed. 311 * Inode may get unlocked and relocked in here, and the caller must deal with 312 * the consequences. 313 */ 314 int 315 xfs_qm_dqattach_locked( 316 xfs_inode_t *ip, 317 bool doalloc) 318 { 319 xfs_mount_t *mp = ip->i_mount; 320 int error = 0; 321 322 if (!xfs_qm_need_dqattach(ip)) 323 return 0; 324 325 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 326 327 if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) { 328 error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_USER, 329 doalloc, &ip->i_udquot); 330 if (error) 331 goto done; 332 ASSERT(ip->i_udquot); 333 } 334 335 if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) { 336 error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_GROUP, 337 doalloc, &ip->i_gdquot); 338 if (error) 339 goto done; 340 ASSERT(ip->i_gdquot); 341 } 342 343 if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) { 344 error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_PROJ, 345 doalloc, &ip->i_pdquot); 346 if (error) 347 goto done; 348 ASSERT(ip->i_pdquot); 349 } 350 351 done: 352 /* 353 * Don't worry about the dquots that we may have attached before any 354 * error - they'll get detached later if it has not already been done. 355 */ 356 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 357 return error; 358 } 359 360 int 361 xfs_qm_dqattach( 362 struct xfs_inode *ip) 363 { 364 int error; 365 366 if (!xfs_qm_need_dqattach(ip)) 367 return 0; 368 369 xfs_ilock(ip, XFS_ILOCK_EXCL); 370 error = xfs_qm_dqattach_locked(ip, false); 371 xfs_iunlock(ip, XFS_ILOCK_EXCL); 372 373 return error; 374 } 375 376 /* 377 * Release dquots (and their references) if any. 378 * The inode should be locked EXCL except when this's called by 379 * xfs_ireclaim. 380 */ 381 void 382 xfs_qm_dqdetach( 383 xfs_inode_t *ip) 384 { 385 if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot)) 386 return; 387 388 trace_xfs_dquot_dqdetach(ip); 389 390 ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino)); 391 if (ip->i_udquot) { 392 xfs_qm_dqrele(ip->i_udquot); 393 ip->i_udquot = NULL; 394 } 395 if (ip->i_gdquot) { 396 xfs_qm_dqrele(ip->i_gdquot); 397 ip->i_gdquot = NULL; 398 } 399 if (ip->i_pdquot) { 400 xfs_qm_dqrele(ip->i_pdquot); 401 ip->i_pdquot = NULL; 402 } 403 } 404 405 struct xfs_qm_isolate { 406 struct list_head buffers; 407 struct list_head dispose; 408 }; 409 410 static enum lru_status 411 xfs_qm_dquot_isolate( 412 struct list_head *item, 413 struct list_lru_one *lru, 414 spinlock_t *lru_lock, 415 void *arg) 416 __releases(lru_lock) __acquires(lru_lock) 417 { 418 struct xfs_dquot *dqp = container_of(item, 419 struct xfs_dquot, q_lru); 420 struct xfs_qm_isolate *isol = arg; 421 422 if (!xfs_dqlock_nowait(dqp)) 423 goto out_miss_busy; 424 425 /* 426 * If something else is freeing this dquot and hasn't yet removed it 427 * from the LRU, leave it for the freeing task to complete the freeing 428 * process rather than risk it being free from under us here. 429 */ 430 if (dqp->q_flags & XFS_DQFLAG_FREEING) 431 goto out_miss_unlock; 432 433 /* 434 * This dquot has acquired a reference in the meantime remove it from 435 * the freelist and try again. 436 */ 437 if (dqp->q_nrefs) { 438 xfs_dqunlock(dqp); 439 XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants); 440 441 trace_xfs_dqreclaim_want(dqp); 442 list_lru_isolate(lru, &dqp->q_lru); 443 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused); 444 return LRU_REMOVED; 445 } 446 447 /* 448 * If the dquot is dirty, flush it. If it's already being flushed, just 449 * skip it so there is time for the IO to complete before we try to 450 * reclaim it again on the next LRU pass. 451 */ 452 if (!xfs_dqflock_nowait(dqp)) 453 goto out_miss_unlock; 454 455 if (XFS_DQ_IS_DIRTY(dqp)) { 456 struct xfs_buf *bp = NULL; 457 int error; 458 459 trace_xfs_dqreclaim_dirty(dqp); 460 461 /* we have to drop the LRU lock to flush the dquot */ 462 spin_unlock(lru_lock); 463 464 error = xfs_qm_dqflush(dqp, &bp); 465 if (error) 466 goto out_unlock_dirty; 467 468 xfs_buf_delwri_queue(bp, &isol->buffers); 469 xfs_buf_relse(bp); 470 goto out_unlock_dirty; 471 } 472 xfs_dqfunlock(dqp); 473 474 /* 475 * Prevent lookups now that we are past the point of no return. 476 */ 477 dqp->q_flags |= XFS_DQFLAG_FREEING; 478 xfs_dqunlock(dqp); 479 480 ASSERT(dqp->q_nrefs == 0); 481 list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose); 482 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused); 483 trace_xfs_dqreclaim_done(dqp); 484 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims); 485 return LRU_REMOVED; 486 487 out_miss_unlock: 488 xfs_dqunlock(dqp); 489 out_miss_busy: 490 trace_xfs_dqreclaim_busy(dqp); 491 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses); 492 return LRU_SKIP; 493 494 out_unlock_dirty: 495 trace_xfs_dqreclaim_busy(dqp); 496 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses); 497 xfs_dqunlock(dqp); 498 spin_lock(lru_lock); 499 return LRU_RETRY; 500 } 501 502 static unsigned long 503 xfs_qm_shrink_scan( 504 struct shrinker *shrink, 505 struct shrink_control *sc) 506 { 507 struct xfs_quotainfo *qi = container_of(shrink, 508 struct xfs_quotainfo, qi_shrinker); 509 struct xfs_qm_isolate isol; 510 unsigned long freed; 511 int error; 512 513 if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM)) 514 return 0; 515 516 INIT_LIST_HEAD(&isol.buffers); 517 INIT_LIST_HEAD(&isol.dispose); 518 519 freed = list_lru_shrink_walk(&qi->qi_lru, sc, 520 xfs_qm_dquot_isolate, &isol); 521 522 error = xfs_buf_delwri_submit(&isol.buffers); 523 if (error) 524 xfs_warn(NULL, "%s: dquot reclaim failed", __func__); 525 526 while (!list_empty(&isol.dispose)) { 527 struct xfs_dquot *dqp; 528 529 dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru); 530 list_del_init(&dqp->q_lru); 531 xfs_qm_dqfree_one(dqp); 532 } 533 534 return freed; 535 } 536 537 static unsigned long 538 xfs_qm_shrink_count( 539 struct shrinker *shrink, 540 struct shrink_control *sc) 541 { 542 struct xfs_quotainfo *qi = container_of(shrink, 543 struct xfs_quotainfo, qi_shrinker); 544 545 return list_lru_shrink_count(&qi->qi_lru, sc); 546 } 547 548 STATIC void 549 xfs_qm_set_defquota( 550 struct xfs_mount *mp, 551 xfs_dqtype_t type, 552 struct xfs_quotainfo *qinf) 553 { 554 struct xfs_dquot *dqp; 555 struct xfs_def_quota *defq; 556 int error; 557 558 error = xfs_qm_dqget_uncached(mp, 0, type, &dqp); 559 if (error) 560 return; 561 562 defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp)); 563 564 /* 565 * Timers and warnings have been already set, let's just set the 566 * default limits for this quota type 567 */ 568 defq->blk.hard = dqp->q_blk.hardlimit; 569 defq->blk.soft = dqp->q_blk.softlimit; 570 defq->ino.hard = dqp->q_ino.hardlimit; 571 defq->ino.soft = dqp->q_ino.softlimit; 572 defq->rtb.hard = dqp->q_rtb.hardlimit; 573 defq->rtb.soft = dqp->q_rtb.softlimit; 574 xfs_qm_dqdestroy(dqp); 575 } 576 577 /* Initialize quota time limits from the root dquot. */ 578 static void 579 xfs_qm_init_timelimits( 580 struct xfs_mount *mp, 581 xfs_dqtype_t type) 582 { 583 struct xfs_quotainfo *qinf = mp->m_quotainfo; 584 struct xfs_def_quota *defq; 585 struct xfs_dquot *dqp; 586 int error; 587 588 defq = xfs_get_defquota(qinf, type); 589 590 defq->blk.time = XFS_QM_BTIMELIMIT; 591 defq->ino.time = XFS_QM_ITIMELIMIT; 592 defq->rtb.time = XFS_QM_RTBTIMELIMIT; 593 594 /* 595 * We try to get the limits from the superuser's limits fields. 596 * This is quite hacky, but it is standard quota practice. 597 * 598 * Since we may not have done a quotacheck by this point, just read 599 * the dquot without attaching it to any hashtables or lists. 600 */ 601 error = xfs_qm_dqget_uncached(mp, 0, type, &dqp); 602 if (error) 603 return; 604 605 /* 606 * The warnings and timers set the grace period given to 607 * a user or group before he or she can not perform any 608 * more writing. If it is zero, a default is used. 609 */ 610 if (dqp->q_blk.timer) 611 defq->blk.time = dqp->q_blk.timer; 612 if (dqp->q_ino.timer) 613 defq->ino.time = dqp->q_ino.timer; 614 if (dqp->q_rtb.timer) 615 defq->rtb.time = dqp->q_rtb.timer; 616 617 xfs_qm_dqdestroy(dqp); 618 } 619 620 /* 621 * This initializes all the quota information that's kept in the 622 * mount structure 623 */ 624 STATIC int 625 xfs_qm_init_quotainfo( 626 struct xfs_mount *mp) 627 { 628 struct xfs_quotainfo *qinf; 629 int error; 630 631 ASSERT(XFS_IS_QUOTA_ON(mp)); 632 633 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(struct xfs_quotainfo), 0); 634 635 error = list_lru_init(&qinf->qi_lru); 636 if (error) 637 goto out_free_qinf; 638 639 /* 640 * See if quotainodes are setup, and if not, allocate them, 641 * and change the superblock accordingly. 642 */ 643 error = xfs_qm_init_quotainos(mp); 644 if (error) 645 goto out_free_lru; 646 647 INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS); 648 INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS); 649 INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS); 650 mutex_init(&qinf->qi_tree_lock); 651 652 /* mutex used to serialize quotaoffs */ 653 mutex_init(&qinf->qi_quotaofflock); 654 655 /* Precalc some constants */ 656 qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB); 657 qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen); 658 if (xfs_has_bigtime(mp)) { 659 qinf->qi_expiry_min = 660 xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MIN); 661 qinf->qi_expiry_max = 662 xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MAX); 663 } else { 664 qinf->qi_expiry_min = XFS_DQ_LEGACY_EXPIRY_MIN; 665 qinf->qi_expiry_max = XFS_DQ_LEGACY_EXPIRY_MAX; 666 } 667 trace_xfs_quota_expiry_range(mp, qinf->qi_expiry_min, 668 qinf->qi_expiry_max); 669 670 mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD); 671 672 xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER); 673 xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP); 674 xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ); 675 676 if (XFS_IS_UQUOTA_ON(mp)) 677 xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf); 678 if (XFS_IS_GQUOTA_ON(mp)) 679 xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf); 680 if (XFS_IS_PQUOTA_ON(mp)) 681 xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf); 682 683 qinf->qi_shrinker.count_objects = xfs_qm_shrink_count; 684 qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan; 685 qinf->qi_shrinker.seeks = DEFAULT_SEEKS; 686 qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE; 687 688 error = register_shrinker(&qinf->qi_shrinker, "xfs-qm:%s", 689 mp->m_super->s_id); 690 if (error) 691 goto out_free_inos; 692 693 return 0; 694 695 out_free_inos: 696 mutex_destroy(&qinf->qi_quotaofflock); 697 mutex_destroy(&qinf->qi_tree_lock); 698 xfs_qm_destroy_quotainos(qinf); 699 out_free_lru: 700 list_lru_destroy(&qinf->qi_lru); 701 out_free_qinf: 702 kmem_free(qinf); 703 mp->m_quotainfo = NULL; 704 return error; 705 } 706 707 /* 708 * Gets called when unmounting a filesystem or when all quotas get 709 * turned off. 710 * This purges the quota inodes, destroys locks and frees itself. 711 */ 712 void 713 xfs_qm_destroy_quotainfo( 714 struct xfs_mount *mp) 715 { 716 struct xfs_quotainfo *qi; 717 718 qi = mp->m_quotainfo; 719 ASSERT(qi != NULL); 720 721 unregister_shrinker(&qi->qi_shrinker); 722 list_lru_destroy(&qi->qi_lru); 723 xfs_qm_destroy_quotainos(qi); 724 mutex_destroy(&qi->qi_tree_lock); 725 mutex_destroy(&qi->qi_quotaofflock); 726 kmem_free(qi); 727 mp->m_quotainfo = NULL; 728 } 729 730 /* 731 * Create an inode and return with a reference already taken, but unlocked 732 * This is how we create quota inodes 733 */ 734 STATIC int 735 xfs_qm_qino_alloc( 736 struct xfs_mount *mp, 737 struct xfs_inode **ipp, 738 unsigned int flags) 739 { 740 struct xfs_trans *tp; 741 int error; 742 bool need_alloc = true; 743 744 *ipp = NULL; 745 /* 746 * With superblock that doesn't have separate pquotino, we 747 * share an inode between gquota and pquota. If the on-disk 748 * superblock has GQUOTA and the filesystem is now mounted 749 * with PQUOTA, just use sb_gquotino for sb_pquotino and 750 * vice-versa. 751 */ 752 if (!xfs_has_pquotino(mp) && 753 (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) { 754 xfs_ino_t ino = NULLFSINO; 755 756 if ((flags & XFS_QMOPT_PQUOTA) && 757 (mp->m_sb.sb_gquotino != NULLFSINO)) { 758 ino = mp->m_sb.sb_gquotino; 759 if (XFS_IS_CORRUPT(mp, 760 mp->m_sb.sb_pquotino != NULLFSINO)) 761 return -EFSCORRUPTED; 762 } else if ((flags & XFS_QMOPT_GQUOTA) && 763 (mp->m_sb.sb_pquotino != NULLFSINO)) { 764 ino = mp->m_sb.sb_pquotino; 765 if (XFS_IS_CORRUPT(mp, 766 mp->m_sb.sb_gquotino != NULLFSINO)) 767 return -EFSCORRUPTED; 768 } 769 if (ino != NULLFSINO) { 770 error = xfs_iget(mp, NULL, ino, 0, 0, ipp); 771 if (error) 772 return error; 773 mp->m_sb.sb_gquotino = NULLFSINO; 774 mp->m_sb.sb_pquotino = NULLFSINO; 775 need_alloc = false; 776 } 777 } 778 779 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create, 780 need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0, 781 0, 0, &tp); 782 if (error) 783 return error; 784 785 if (need_alloc) { 786 xfs_ino_t ino; 787 788 error = xfs_dialloc(&tp, 0, S_IFREG, &ino); 789 if (!error) 790 error = xfs_init_new_inode(&init_user_ns, tp, NULL, ino, 791 S_IFREG, 1, 0, 0, false, ipp); 792 if (error) { 793 xfs_trans_cancel(tp); 794 return error; 795 } 796 } 797 798 /* 799 * Make the changes in the superblock, and log those too. 800 * sbfields arg may contain fields other than *QUOTINO; 801 * VERSIONNUM for example. 802 */ 803 spin_lock(&mp->m_sb_lock); 804 if (flags & XFS_QMOPT_SBVERSION) { 805 ASSERT(!xfs_has_quota(mp)); 806 807 xfs_add_quota(mp); 808 mp->m_sb.sb_uquotino = NULLFSINO; 809 mp->m_sb.sb_gquotino = NULLFSINO; 810 mp->m_sb.sb_pquotino = NULLFSINO; 811 812 /* qflags will get updated fully _after_ quotacheck */ 813 mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT; 814 } 815 if (flags & XFS_QMOPT_UQUOTA) 816 mp->m_sb.sb_uquotino = (*ipp)->i_ino; 817 else if (flags & XFS_QMOPT_GQUOTA) 818 mp->m_sb.sb_gquotino = (*ipp)->i_ino; 819 else 820 mp->m_sb.sb_pquotino = (*ipp)->i_ino; 821 spin_unlock(&mp->m_sb_lock); 822 xfs_log_sb(tp); 823 824 error = xfs_trans_commit(tp); 825 if (error) { 826 ASSERT(xfs_is_shutdown(mp)); 827 xfs_alert(mp, "%s failed (error %d)!", __func__, error); 828 } 829 if (need_alloc) 830 xfs_finish_inode_setup(*ipp); 831 return error; 832 } 833 834 835 STATIC void 836 xfs_qm_reset_dqcounts( 837 struct xfs_mount *mp, 838 struct xfs_buf *bp, 839 xfs_dqid_t id, 840 xfs_dqtype_t type) 841 { 842 struct xfs_dqblk *dqb; 843 int j; 844 845 trace_xfs_reset_dqcounts(bp, _RET_IP_); 846 847 /* 848 * Reset all counters and timers. They'll be 849 * started afresh by xfs_qm_quotacheck. 850 */ 851 #ifdef DEBUG 852 j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) / 853 sizeof(struct xfs_dqblk); 854 ASSERT(mp->m_quotainfo->qi_dqperchunk == j); 855 #endif 856 dqb = bp->b_addr; 857 for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) { 858 struct xfs_disk_dquot *ddq; 859 860 ddq = (struct xfs_disk_dquot *)&dqb[j]; 861 862 /* 863 * Do a sanity check, and if needed, repair the dqblk. Don't 864 * output any warnings because it's perfectly possible to 865 * find uninitialised dquot blks. See comment in 866 * xfs_dquot_verify. 867 */ 868 if (xfs_dqblk_verify(mp, &dqb[j], id + j) || 869 (dqb[j].dd_diskdq.d_type & XFS_DQTYPE_REC_MASK) != type) 870 xfs_dqblk_repair(mp, &dqb[j], id + j, type); 871 872 /* 873 * Reset type in case we are reusing group quota file for 874 * project quotas or vice versa 875 */ 876 ddq->d_type = type; 877 ddq->d_bcount = 0; 878 ddq->d_icount = 0; 879 ddq->d_rtbcount = 0; 880 881 /* 882 * dquot id 0 stores the default grace period and the maximum 883 * warning limit that were set by the administrator, so we 884 * should not reset them. 885 */ 886 if (ddq->d_id != 0) { 887 ddq->d_btimer = 0; 888 ddq->d_itimer = 0; 889 ddq->d_rtbtimer = 0; 890 ddq->d_bwarns = 0; 891 ddq->d_iwarns = 0; 892 ddq->d_rtbwarns = 0; 893 if (xfs_has_bigtime(mp)) 894 ddq->d_type |= XFS_DQTYPE_BIGTIME; 895 } 896 897 if (xfs_has_crc(mp)) { 898 xfs_update_cksum((char *)&dqb[j], 899 sizeof(struct xfs_dqblk), 900 XFS_DQUOT_CRC_OFF); 901 } 902 } 903 } 904 905 STATIC int 906 xfs_qm_reset_dqcounts_all( 907 struct xfs_mount *mp, 908 xfs_dqid_t firstid, 909 xfs_fsblock_t bno, 910 xfs_filblks_t blkcnt, 911 xfs_dqtype_t type, 912 struct list_head *buffer_list) 913 { 914 struct xfs_buf *bp; 915 int error = 0; 916 917 ASSERT(blkcnt > 0); 918 919 /* 920 * Blkcnt arg can be a very big number, and might even be 921 * larger than the log itself. So, we have to break it up into 922 * manageable-sized transactions. 923 * Note that we don't start a permanent transaction here; we might 924 * not be able to get a log reservation for the whole thing up front, 925 * and we don't really care to either, because we just discard 926 * everything if we were to crash in the middle of this loop. 927 */ 928 while (blkcnt--) { 929 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, 930 XFS_FSB_TO_DADDR(mp, bno), 931 mp->m_quotainfo->qi_dqchunklen, 0, &bp, 932 &xfs_dquot_buf_ops); 933 934 /* 935 * CRC and validation errors will return a EFSCORRUPTED here. If 936 * this occurs, re-read without CRC validation so that we can 937 * repair the damage via xfs_qm_reset_dqcounts(). This process 938 * will leave a trace in the log indicating corruption has 939 * been detected. 940 */ 941 if (error == -EFSCORRUPTED) { 942 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, 943 XFS_FSB_TO_DADDR(mp, bno), 944 mp->m_quotainfo->qi_dqchunklen, 0, &bp, 945 NULL); 946 } 947 948 if (error) 949 break; 950 951 /* 952 * A corrupt buffer might not have a verifier attached, so 953 * make sure we have the correct one attached before writeback 954 * occurs. 955 */ 956 bp->b_ops = &xfs_dquot_buf_ops; 957 xfs_qm_reset_dqcounts(mp, bp, firstid, type); 958 xfs_buf_delwri_queue(bp, buffer_list); 959 xfs_buf_relse(bp); 960 961 /* goto the next block. */ 962 bno++; 963 firstid += mp->m_quotainfo->qi_dqperchunk; 964 } 965 966 return error; 967 } 968 969 /* 970 * Iterate over all allocated dquot blocks in this quota inode, zeroing all 971 * counters for every chunk of dquots that we find. 972 */ 973 STATIC int 974 xfs_qm_reset_dqcounts_buf( 975 struct xfs_mount *mp, 976 struct xfs_inode *qip, 977 xfs_dqtype_t type, 978 struct list_head *buffer_list) 979 { 980 struct xfs_bmbt_irec *map; 981 int i, nmaps; /* number of map entries */ 982 int error; /* return value */ 983 xfs_fileoff_t lblkno; 984 xfs_filblks_t maxlblkcnt; 985 xfs_dqid_t firstid; 986 xfs_fsblock_t rablkno; 987 xfs_filblks_t rablkcnt; 988 989 error = 0; 990 /* 991 * This looks racy, but we can't keep an inode lock across a 992 * trans_reserve. But, this gets called during quotacheck, and that 993 * happens only at mount time which is single threaded. 994 */ 995 if (qip->i_nblocks == 0) 996 return 0; 997 998 map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0); 999 1000 lblkno = 0; 1001 maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 1002 do { 1003 uint lock_mode; 1004 1005 nmaps = XFS_DQITER_MAP_SIZE; 1006 /* 1007 * We aren't changing the inode itself. Just changing 1008 * some of its data. No new blocks are added here, and 1009 * the inode is never added to the transaction. 1010 */ 1011 lock_mode = xfs_ilock_data_map_shared(qip); 1012 error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno, 1013 map, &nmaps, 0); 1014 xfs_iunlock(qip, lock_mode); 1015 if (error) 1016 break; 1017 1018 ASSERT(nmaps <= XFS_DQITER_MAP_SIZE); 1019 for (i = 0; i < nmaps; i++) { 1020 ASSERT(map[i].br_startblock != DELAYSTARTBLOCK); 1021 ASSERT(map[i].br_blockcount); 1022 1023 1024 lblkno += map[i].br_blockcount; 1025 1026 if (map[i].br_startblock == HOLESTARTBLOCK) 1027 continue; 1028 1029 firstid = (xfs_dqid_t) map[i].br_startoff * 1030 mp->m_quotainfo->qi_dqperchunk; 1031 /* 1032 * Do a read-ahead on the next extent. 1033 */ 1034 if ((i+1 < nmaps) && 1035 (map[i+1].br_startblock != HOLESTARTBLOCK)) { 1036 rablkcnt = map[i+1].br_blockcount; 1037 rablkno = map[i+1].br_startblock; 1038 while (rablkcnt--) { 1039 xfs_buf_readahead(mp->m_ddev_targp, 1040 XFS_FSB_TO_DADDR(mp, rablkno), 1041 mp->m_quotainfo->qi_dqchunklen, 1042 &xfs_dquot_buf_ops); 1043 rablkno++; 1044 } 1045 } 1046 /* 1047 * Iterate thru all the blks in the extent and 1048 * reset the counters of all the dquots inside them. 1049 */ 1050 error = xfs_qm_reset_dqcounts_all(mp, firstid, 1051 map[i].br_startblock, 1052 map[i].br_blockcount, 1053 type, buffer_list); 1054 if (error) 1055 goto out; 1056 } 1057 } while (nmaps > 0); 1058 1059 out: 1060 kmem_free(map); 1061 return error; 1062 } 1063 1064 /* 1065 * Called by dqusage_adjust in doing a quotacheck. 1066 * 1067 * Given the inode, and a dquot id this updates both the incore dqout as well 1068 * as the buffer copy. This is so that once the quotacheck is done, we can 1069 * just log all the buffers, as opposed to logging numerous updates to 1070 * individual dquots. 1071 */ 1072 STATIC int 1073 xfs_qm_quotacheck_dqadjust( 1074 struct xfs_inode *ip, 1075 xfs_dqtype_t type, 1076 xfs_qcnt_t nblks, 1077 xfs_qcnt_t rtblks) 1078 { 1079 struct xfs_mount *mp = ip->i_mount; 1080 struct xfs_dquot *dqp; 1081 xfs_dqid_t id; 1082 int error; 1083 1084 id = xfs_qm_id_for_quotatype(ip, type); 1085 error = xfs_qm_dqget(mp, id, type, true, &dqp); 1086 if (error) { 1087 /* 1088 * Shouldn't be able to turn off quotas here. 1089 */ 1090 ASSERT(error != -ESRCH); 1091 ASSERT(error != -ENOENT); 1092 return error; 1093 } 1094 1095 trace_xfs_dqadjust(dqp); 1096 1097 /* 1098 * Adjust the inode count and the block count to reflect this inode's 1099 * resource usage. 1100 */ 1101 dqp->q_ino.count++; 1102 dqp->q_ino.reserved++; 1103 if (nblks) { 1104 dqp->q_blk.count += nblks; 1105 dqp->q_blk.reserved += nblks; 1106 } 1107 if (rtblks) { 1108 dqp->q_rtb.count += rtblks; 1109 dqp->q_rtb.reserved += rtblks; 1110 } 1111 1112 /* 1113 * Set default limits, adjust timers (since we changed usages) 1114 * 1115 * There are no timers for the default values set in the root dquot. 1116 */ 1117 if (dqp->q_id) { 1118 xfs_qm_adjust_dqlimits(dqp); 1119 xfs_qm_adjust_dqtimers(dqp); 1120 } 1121 1122 dqp->q_flags |= XFS_DQFLAG_DIRTY; 1123 xfs_qm_dqput(dqp); 1124 return 0; 1125 } 1126 1127 /* 1128 * callback routine supplied to bulkstat(). Given an inumber, find its 1129 * dquots and update them to account for resources taken by that inode. 1130 */ 1131 /* ARGSUSED */ 1132 STATIC int 1133 xfs_qm_dqusage_adjust( 1134 struct xfs_mount *mp, 1135 struct xfs_trans *tp, 1136 xfs_ino_t ino, 1137 void *data) 1138 { 1139 struct xfs_inode *ip; 1140 xfs_qcnt_t nblks; 1141 xfs_filblks_t rtblks = 0; /* total rt blks */ 1142 int error; 1143 1144 ASSERT(XFS_IS_QUOTA_ON(mp)); 1145 1146 /* 1147 * rootino must have its resources accounted for, not so with the quota 1148 * inodes. 1149 */ 1150 if (xfs_is_quota_inode(&mp->m_sb, ino)) 1151 return 0; 1152 1153 /* 1154 * We don't _need_ to take the ilock EXCL here because quotacheck runs 1155 * at mount time and therefore nobody will be racing chown/chproj. 1156 */ 1157 error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip); 1158 if (error == -EINVAL || error == -ENOENT) 1159 return 0; 1160 if (error) 1161 return error; 1162 1163 ASSERT(ip->i_delayed_blks == 0); 1164 1165 if (XFS_IS_REALTIME_INODE(ip)) { 1166 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK); 1167 1168 error = xfs_iread_extents(tp, ip, XFS_DATA_FORK); 1169 if (error) 1170 goto error0; 1171 1172 xfs_bmap_count_leaves(ifp, &rtblks); 1173 } 1174 1175 nblks = (xfs_qcnt_t)ip->i_nblocks - rtblks; 1176 1177 /* 1178 * Add the (disk blocks and inode) resources occupied by this 1179 * inode to its dquots. We do this adjustment in the incore dquot, 1180 * and also copy the changes to its buffer. 1181 * We don't care about putting these changes in a transaction 1182 * envelope because if we crash in the middle of a 'quotacheck' 1183 * we have to start from the beginning anyway. 1184 * Once we're done, we'll log all the dquot bufs. 1185 * 1186 * The *QUOTA_ON checks below may look pretty racy, but quotachecks 1187 * and quotaoffs don't race. (Quotachecks happen at mount time only). 1188 */ 1189 if (XFS_IS_UQUOTA_ON(mp)) { 1190 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks, 1191 rtblks); 1192 if (error) 1193 goto error0; 1194 } 1195 1196 if (XFS_IS_GQUOTA_ON(mp)) { 1197 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks, 1198 rtblks); 1199 if (error) 1200 goto error0; 1201 } 1202 1203 if (XFS_IS_PQUOTA_ON(mp)) { 1204 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks, 1205 rtblks); 1206 if (error) 1207 goto error0; 1208 } 1209 1210 error0: 1211 xfs_irele(ip); 1212 return error; 1213 } 1214 1215 STATIC int 1216 xfs_qm_flush_one( 1217 struct xfs_dquot *dqp, 1218 void *data) 1219 { 1220 struct xfs_mount *mp = dqp->q_mount; 1221 struct list_head *buffer_list = data; 1222 struct xfs_buf *bp = NULL; 1223 int error = 0; 1224 1225 xfs_dqlock(dqp); 1226 if (dqp->q_flags & XFS_DQFLAG_FREEING) 1227 goto out_unlock; 1228 if (!XFS_DQ_IS_DIRTY(dqp)) 1229 goto out_unlock; 1230 1231 /* 1232 * The only way the dquot is already flush locked by the time quotacheck 1233 * gets here is if reclaim flushed it before the dqadjust walk dirtied 1234 * it for the final time. Quotacheck collects all dquot bufs in the 1235 * local delwri queue before dquots are dirtied, so reclaim can't have 1236 * possibly queued it for I/O. The only way out is to push the buffer to 1237 * cycle the flush lock. 1238 */ 1239 if (!xfs_dqflock_nowait(dqp)) { 1240 /* buf is pinned in-core by delwri list */ 1241 error = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno, 1242 mp->m_quotainfo->qi_dqchunklen, 0, &bp); 1243 if (error) 1244 goto out_unlock; 1245 1246 if (!(bp->b_flags & _XBF_DELWRI_Q)) { 1247 error = -EAGAIN; 1248 xfs_buf_relse(bp); 1249 goto out_unlock; 1250 } 1251 xfs_buf_unlock(bp); 1252 1253 xfs_buf_delwri_pushbuf(bp, buffer_list); 1254 xfs_buf_rele(bp); 1255 1256 error = -EAGAIN; 1257 goto out_unlock; 1258 } 1259 1260 error = xfs_qm_dqflush(dqp, &bp); 1261 if (error) 1262 goto out_unlock; 1263 1264 xfs_buf_delwri_queue(bp, buffer_list); 1265 xfs_buf_relse(bp); 1266 out_unlock: 1267 xfs_dqunlock(dqp); 1268 return error; 1269 } 1270 1271 /* 1272 * Walk thru all the filesystem inodes and construct a consistent view 1273 * of the disk quota world. If the quotacheck fails, disable quotas. 1274 */ 1275 STATIC int 1276 xfs_qm_quotacheck( 1277 xfs_mount_t *mp) 1278 { 1279 int error, error2; 1280 uint flags; 1281 LIST_HEAD (buffer_list); 1282 struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip; 1283 struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip; 1284 struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip; 1285 1286 flags = 0; 1287 1288 ASSERT(uip || gip || pip); 1289 ASSERT(XFS_IS_QUOTA_ON(mp)); 1290 1291 xfs_notice(mp, "Quotacheck needed: Please wait."); 1292 1293 /* 1294 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset 1295 * their counters to zero. We need a clean slate. 1296 * We don't log our changes till later. 1297 */ 1298 if (uip) { 1299 error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER, 1300 &buffer_list); 1301 if (error) 1302 goto error_return; 1303 flags |= XFS_UQUOTA_CHKD; 1304 } 1305 1306 if (gip) { 1307 error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP, 1308 &buffer_list); 1309 if (error) 1310 goto error_return; 1311 flags |= XFS_GQUOTA_CHKD; 1312 } 1313 1314 if (pip) { 1315 error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ, 1316 &buffer_list); 1317 if (error) 1318 goto error_return; 1319 flags |= XFS_PQUOTA_CHKD; 1320 } 1321 1322 error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true, 1323 NULL); 1324 if (error) { 1325 /* 1326 * The inode walk may have partially populated the dquot 1327 * caches. We must purge them before disabling quota and 1328 * tearing down the quotainfo, or else the dquots will leak. 1329 */ 1330 xfs_qm_dqpurge_all(mp); 1331 goto error_return; 1332 } 1333 1334 /* 1335 * We've made all the changes that we need to make incore. Flush them 1336 * down to disk buffers if everything was updated successfully. 1337 */ 1338 if (XFS_IS_UQUOTA_ON(mp)) { 1339 error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one, 1340 &buffer_list); 1341 } 1342 if (XFS_IS_GQUOTA_ON(mp)) { 1343 error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one, 1344 &buffer_list); 1345 if (!error) 1346 error = error2; 1347 } 1348 if (XFS_IS_PQUOTA_ON(mp)) { 1349 error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one, 1350 &buffer_list); 1351 if (!error) 1352 error = error2; 1353 } 1354 1355 error2 = xfs_buf_delwri_submit(&buffer_list); 1356 if (!error) 1357 error = error2; 1358 1359 /* 1360 * We can get this error if we couldn't do a dquot allocation inside 1361 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the 1362 * dirty dquots that might be cached, we just want to get rid of them 1363 * and turn quotaoff. The dquots won't be attached to any of the inodes 1364 * at this point (because we intentionally didn't in dqget_noattach). 1365 */ 1366 if (error) { 1367 xfs_qm_dqpurge_all(mp); 1368 goto error_return; 1369 } 1370 1371 /* 1372 * If one type of quotas is off, then it will lose its 1373 * quotachecked status, since we won't be doing accounting for 1374 * that type anymore. 1375 */ 1376 mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD; 1377 mp->m_qflags |= flags; 1378 1379 error_return: 1380 xfs_buf_delwri_cancel(&buffer_list); 1381 1382 if (error) { 1383 xfs_warn(mp, 1384 "Quotacheck: Unsuccessful (Error %d): Disabling quotas.", 1385 error); 1386 /* 1387 * We must turn off quotas. 1388 */ 1389 ASSERT(mp->m_quotainfo != NULL); 1390 xfs_qm_destroy_quotainfo(mp); 1391 if (xfs_mount_reset_sbqflags(mp)) { 1392 xfs_warn(mp, 1393 "Quotacheck: Failed to reset quota flags."); 1394 } 1395 } else 1396 xfs_notice(mp, "Quotacheck: Done."); 1397 return error; 1398 } 1399 1400 /* 1401 * This is called from xfs_mountfs to start quotas and initialize all 1402 * necessary data structures like quotainfo. This is also responsible for 1403 * running a quotacheck as necessary. We are guaranteed that the superblock 1404 * is consistently read in at this point. 1405 * 1406 * If we fail here, the mount will continue with quota turned off. We don't 1407 * need to inidicate success or failure at all. 1408 */ 1409 void 1410 xfs_qm_mount_quotas( 1411 struct xfs_mount *mp) 1412 { 1413 int error = 0; 1414 uint sbf; 1415 1416 /* 1417 * If quotas on realtime volumes is not supported, we disable 1418 * quotas immediately. 1419 */ 1420 if (mp->m_sb.sb_rextents) { 1421 xfs_notice(mp, "Cannot turn on quotas for realtime filesystem"); 1422 mp->m_qflags = 0; 1423 goto write_changes; 1424 } 1425 1426 ASSERT(XFS_IS_QUOTA_ON(mp)); 1427 1428 /* 1429 * Allocate the quotainfo structure inside the mount struct, and 1430 * create quotainode(s), and change/rev superblock if necessary. 1431 */ 1432 error = xfs_qm_init_quotainfo(mp); 1433 if (error) { 1434 /* 1435 * We must turn off quotas. 1436 */ 1437 ASSERT(mp->m_quotainfo == NULL); 1438 mp->m_qflags = 0; 1439 goto write_changes; 1440 } 1441 /* 1442 * If any of the quotas are not consistent, do a quotacheck. 1443 */ 1444 if (XFS_QM_NEED_QUOTACHECK(mp)) { 1445 error = xfs_qm_quotacheck(mp); 1446 if (error) { 1447 /* Quotacheck failed and disabled quotas. */ 1448 return; 1449 } 1450 } 1451 /* 1452 * If one type of quotas is off, then it will lose its 1453 * quotachecked status, since we won't be doing accounting for 1454 * that type anymore. 1455 */ 1456 if (!XFS_IS_UQUOTA_ON(mp)) 1457 mp->m_qflags &= ~XFS_UQUOTA_CHKD; 1458 if (!XFS_IS_GQUOTA_ON(mp)) 1459 mp->m_qflags &= ~XFS_GQUOTA_CHKD; 1460 if (!XFS_IS_PQUOTA_ON(mp)) 1461 mp->m_qflags &= ~XFS_PQUOTA_CHKD; 1462 1463 write_changes: 1464 /* 1465 * We actually don't have to acquire the m_sb_lock at all. 1466 * This can only be called from mount, and that's single threaded. XXX 1467 */ 1468 spin_lock(&mp->m_sb_lock); 1469 sbf = mp->m_sb.sb_qflags; 1470 mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL; 1471 spin_unlock(&mp->m_sb_lock); 1472 1473 if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) { 1474 if (xfs_sync_sb(mp, false)) { 1475 /* 1476 * We could only have been turning quotas off. 1477 * We aren't in very good shape actually because 1478 * the incore structures are convinced that quotas are 1479 * off, but the on disk superblock doesn't know that ! 1480 */ 1481 ASSERT(!(XFS_IS_QUOTA_ON(mp))); 1482 xfs_alert(mp, "%s: Superblock update failed!", 1483 __func__); 1484 } 1485 } 1486 1487 if (error) { 1488 xfs_warn(mp, "Failed to initialize disk quotas."); 1489 return; 1490 } 1491 } 1492 1493 /* 1494 * This is called after the superblock has been read in and we're ready to 1495 * iget the quota inodes. 1496 */ 1497 STATIC int 1498 xfs_qm_init_quotainos( 1499 xfs_mount_t *mp) 1500 { 1501 struct xfs_inode *uip = NULL; 1502 struct xfs_inode *gip = NULL; 1503 struct xfs_inode *pip = NULL; 1504 int error; 1505 uint flags = 0; 1506 1507 ASSERT(mp->m_quotainfo); 1508 1509 /* 1510 * Get the uquota and gquota inodes 1511 */ 1512 if (xfs_has_quota(mp)) { 1513 if (XFS_IS_UQUOTA_ON(mp) && 1514 mp->m_sb.sb_uquotino != NULLFSINO) { 1515 ASSERT(mp->m_sb.sb_uquotino > 0); 1516 error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 1517 0, 0, &uip); 1518 if (error) 1519 return error; 1520 } 1521 if (XFS_IS_GQUOTA_ON(mp) && 1522 mp->m_sb.sb_gquotino != NULLFSINO) { 1523 ASSERT(mp->m_sb.sb_gquotino > 0); 1524 error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 1525 0, 0, &gip); 1526 if (error) 1527 goto error_rele; 1528 } 1529 if (XFS_IS_PQUOTA_ON(mp) && 1530 mp->m_sb.sb_pquotino != NULLFSINO) { 1531 ASSERT(mp->m_sb.sb_pquotino > 0); 1532 error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino, 1533 0, 0, &pip); 1534 if (error) 1535 goto error_rele; 1536 } 1537 } else { 1538 flags |= XFS_QMOPT_SBVERSION; 1539 } 1540 1541 /* 1542 * Create the three inodes, if they don't exist already. The changes 1543 * made above will get added to a transaction and logged in one of 1544 * the qino_alloc calls below. If the device is readonly, 1545 * temporarily switch to read-write to do this. 1546 */ 1547 if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) { 1548 error = xfs_qm_qino_alloc(mp, &uip, 1549 flags | XFS_QMOPT_UQUOTA); 1550 if (error) 1551 goto error_rele; 1552 1553 flags &= ~XFS_QMOPT_SBVERSION; 1554 } 1555 if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) { 1556 error = xfs_qm_qino_alloc(mp, &gip, 1557 flags | XFS_QMOPT_GQUOTA); 1558 if (error) 1559 goto error_rele; 1560 1561 flags &= ~XFS_QMOPT_SBVERSION; 1562 } 1563 if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) { 1564 error = xfs_qm_qino_alloc(mp, &pip, 1565 flags | XFS_QMOPT_PQUOTA); 1566 if (error) 1567 goto error_rele; 1568 } 1569 1570 mp->m_quotainfo->qi_uquotaip = uip; 1571 mp->m_quotainfo->qi_gquotaip = gip; 1572 mp->m_quotainfo->qi_pquotaip = pip; 1573 1574 return 0; 1575 1576 error_rele: 1577 if (uip) 1578 xfs_irele(uip); 1579 if (gip) 1580 xfs_irele(gip); 1581 if (pip) 1582 xfs_irele(pip); 1583 return error; 1584 } 1585 1586 STATIC void 1587 xfs_qm_destroy_quotainos( 1588 struct xfs_quotainfo *qi) 1589 { 1590 if (qi->qi_uquotaip) { 1591 xfs_irele(qi->qi_uquotaip); 1592 qi->qi_uquotaip = NULL; /* paranoia */ 1593 } 1594 if (qi->qi_gquotaip) { 1595 xfs_irele(qi->qi_gquotaip); 1596 qi->qi_gquotaip = NULL; 1597 } 1598 if (qi->qi_pquotaip) { 1599 xfs_irele(qi->qi_pquotaip); 1600 qi->qi_pquotaip = NULL; 1601 } 1602 } 1603 1604 STATIC void 1605 xfs_qm_dqfree_one( 1606 struct xfs_dquot *dqp) 1607 { 1608 struct xfs_mount *mp = dqp->q_mount; 1609 struct xfs_quotainfo *qi = mp->m_quotainfo; 1610 1611 mutex_lock(&qi->qi_tree_lock); 1612 radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id); 1613 1614 qi->qi_dquots--; 1615 mutex_unlock(&qi->qi_tree_lock); 1616 1617 xfs_qm_dqdestroy(dqp); 1618 } 1619 1620 /* --------------- utility functions for vnodeops ---------------- */ 1621 1622 1623 /* 1624 * Given an inode, a uid, gid and prid make sure that we have 1625 * allocated relevant dquot(s) on disk, and that we won't exceed inode 1626 * quotas by creating this file. 1627 * This also attaches dquot(s) to the given inode after locking it, 1628 * and returns the dquots corresponding to the uid and/or gid. 1629 * 1630 * in : inode (unlocked) 1631 * out : udquot, gdquot with references taken and unlocked 1632 */ 1633 int 1634 xfs_qm_vop_dqalloc( 1635 struct xfs_inode *ip, 1636 kuid_t uid, 1637 kgid_t gid, 1638 prid_t prid, 1639 uint flags, 1640 struct xfs_dquot **O_udqpp, 1641 struct xfs_dquot **O_gdqpp, 1642 struct xfs_dquot **O_pdqpp) 1643 { 1644 struct xfs_mount *mp = ip->i_mount; 1645 struct inode *inode = VFS_I(ip); 1646 struct user_namespace *user_ns = inode->i_sb->s_user_ns; 1647 struct xfs_dquot *uq = NULL; 1648 struct xfs_dquot *gq = NULL; 1649 struct xfs_dquot *pq = NULL; 1650 int error; 1651 uint lockflags; 1652 1653 if (!XFS_IS_QUOTA_ON(mp)) 1654 return 0; 1655 1656 lockflags = XFS_ILOCK_EXCL; 1657 xfs_ilock(ip, lockflags); 1658 1659 if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip)) 1660 gid = inode->i_gid; 1661 1662 /* 1663 * Attach the dquot(s) to this inode, doing a dquot allocation 1664 * if necessary. The dquot(s) will not be locked. 1665 */ 1666 if (XFS_NOT_DQATTACHED(mp, ip)) { 1667 error = xfs_qm_dqattach_locked(ip, true); 1668 if (error) { 1669 xfs_iunlock(ip, lockflags); 1670 return error; 1671 } 1672 } 1673 1674 if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) { 1675 ASSERT(O_udqpp); 1676 if (!uid_eq(inode->i_uid, uid)) { 1677 /* 1678 * What we need is the dquot that has this uid, and 1679 * if we send the inode to dqget, the uid of the inode 1680 * takes priority over what's sent in the uid argument. 1681 * We must unlock inode here before calling dqget if 1682 * we're not sending the inode, because otherwise 1683 * we'll deadlock by doing trans_reserve while 1684 * holding ilock. 1685 */ 1686 xfs_iunlock(ip, lockflags); 1687 error = xfs_qm_dqget(mp, from_kuid(user_ns, uid), 1688 XFS_DQTYPE_USER, true, &uq); 1689 if (error) { 1690 ASSERT(error != -ENOENT); 1691 return error; 1692 } 1693 /* 1694 * Get the ilock in the right order. 1695 */ 1696 xfs_dqunlock(uq); 1697 lockflags = XFS_ILOCK_SHARED; 1698 xfs_ilock(ip, lockflags); 1699 } else { 1700 /* 1701 * Take an extra reference, because we'll return 1702 * this to caller 1703 */ 1704 ASSERT(ip->i_udquot); 1705 uq = xfs_qm_dqhold(ip->i_udquot); 1706 } 1707 } 1708 if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) { 1709 ASSERT(O_gdqpp); 1710 if (!gid_eq(inode->i_gid, gid)) { 1711 xfs_iunlock(ip, lockflags); 1712 error = xfs_qm_dqget(mp, from_kgid(user_ns, gid), 1713 XFS_DQTYPE_GROUP, true, &gq); 1714 if (error) { 1715 ASSERT(error != -ENOENT); 1716 goto error_rele; 1717 } 1718 xfs_dqunlock(gq); 1719 lockflags = XFS_ILOCK_SHARED; 1720 xfs_ilock(ip, lockflags); 1721 } else { 1722 ASSERT(ip->i_gdquot); 1723 gq = xfs_qm_dqhold(ip->i_gdquot); 1724 } 1725 } 1726 if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) { 1727 ASSERT(O_pdqpp); 1728 if (ip->i_projid != prid) { 1729 xfs_iunlock(ip, lockflags); 1730 error = xfs_qm_dqget(mp, prid, 1731 XFS_DQTYPE_PROJ, true, &pq); 1732 if (error) { 1733 ASSERT(error != -ENOENT); 1734 goto error_rele; 1735 } 1736 xfs_dqunlock(pq); 1737 lockflags = XFS_ILOCK_SHARED; 1738 xfs_ilock(ip, lockflags); 1739 } else { 1740 ASSERT(ip->i_pdquot); 1741 pq = xfs_qm_dqhold(ip->i_pdquot); 1742 } 1743 } 1744 trace_xfs_dquot_dqalloc(ip); 1745 1746 xfs_iunlock(ip, lockflags); 1747 if (O_udqpp) 1748 *O_udqpp = uq; 1749 else 1750 xfs_qm_dqrele(uq); 1751 if (O_gdqpp) 1752 *O_gdqpp = gq; 1753 else 1754 xfs_qm_dqrele(gq); 1755 if (O_pdqpp) 1756 *O_pdqpp = pq; 1757 else 1758 xfs_qm_dqrele(pq); 1759 return 0; 1760 1761 error_rele: 1762 xfs_qm_dqrele(gq); 1763 xfs_qm_dqrele(uq); 1764 return error; 1765 } 1766 1767 /* 1768 * Actually transfer ownership, and do dquot modifications. 1769 * These were already reserved. 1770 */ 1771 struct xfs_dquot * 1772 xfs_qm_vop_chown( 1773 struct xfs_trans *tp, 1774 struct xfs_inode *ip, 1775 struct xfs_dquot **IO_olddq, 1776 struct xfs_dquot *newdq) 1777 { 1778 struct xfs_dquot *prevdq; 1779 uint bfield = XFS_IS_REALTIME_INODE(ip) ? 1780 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT; 1781 1782 1783 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1784 ASSERT(XFS_IS_QUOTA_ON(ip->i_mount)); 1785 1786 /* old dquot */ 1787 prevdq = *IO_olddq; 1788 ASSERT(prevdq); 1789 ASSERT(prevdq != newdq); 1790 1791 xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_nblocks)); 1792 xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1); 1793 1794 /* the sparkling new dquot */ 1795 xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_nblocks); 1796 xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1); 1797 1798 /* 1799 * Back when we made quota reservations for the chown, we reserved the 1800 * ondisk blocks + delalloc blocks with the new dquot. Now that we've 1801 * switched the dquots, decrease the new dquot's block reservation 1802 * (having already bumped up the real counter) so that we don't have 1803 * any reservation to give back when we commit. 1804 */ 1805 xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_RES_BLKS, 1806 -ip->i_delayed_blks); 1807 1808 /* 1809 * Give the incore reservation for delalloc blocks back to the old 1810 * dquot. We don't normally handle delalloc quota reservations 1811 * transactionally, so just lock the dquot and subtract from the 1812 * reservation. Dirty the transaction because it's too late to turn 1813 * back now. 1814 */ 1815 tp->t_flags |= XFS_TRANS_DIRTY; 1816 xfs_dqlock(prevdq); 1817 ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks); 1818 prevdq->q_blk.reserved -= ip->i_delayed_blks; 1819 xfs_dqunlock(prevdq); 1820 1821 /* 1822 * Take an extra reference, because the inode is going to keep 1823 * this dquot pointer even after the trans_commit. 1824 */ 1825 *IO_olddq = xfs_qm_dqhold(newdq); 1826 1827 return prevdq; 1828 } 1829 1830 int 1831 xfs_qm_vop_rename_dqattach( 1832 struct xfs_inode **i_tab) 1833 { 1834 struct xfs_mount *mp = i_tab[0]->i_mount; 1835 int i; 1836 1837 if (!XFS_IS_QUOTA_ON(mp)) 1838 return 0; 1839 1840 for (i = 0; (i < 4 && i_tab[i]); i++) { 1841 struct xfs_inode *ip = i_tab[i]; 1842 int error; 1843 1844 /* 1845 * Watch out for duplicate entries in the table. 1846 */ 1847 if (i == 0 || ip != i_tab[i-1]) { 1848 if (XFS_NOT_DQATTACHED(mp, ip)) { 1849 error = xfs_qm_dqattach(ip); 1850 if (error) 1851 return error; 1852 } 1853 } 1854 } 1855 return 0; 1856 } 1857 1858 void 1859 xfs_qm_vop_create_dqattach( 1860 struct xfs_trans *tp, 1861 struct xfs_inode *ip, 1862 struct xfs_dquot *udqp, 1863 struct xfs_dquot *gdqp, 1864 struct xfs_dquot *pdqp) 1865 { 1866 struct xfs_mount *mp = tp->t_mountp; 1867 1868 if (!XFS_IS_QUOTA_ON(mp)) 1869 return; 1870 1871 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1872 1873 if (udqp && XFS_IS_UQUOTA_ON(mp)) { 1874 ASSERT(ip->i_udquot == NULL); 1875 ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id); 1876 1877 ip->i_udquot = xfs_qm_dqhold(udqp); 1878 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); 1879 } 1880 if (gdqp && XFS_IS_GQUOTA_ON(mp)) { 1881 ASSERT(ip->i_gdquot == NULL); 1882 ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id); 1883 1884 ip->i_gdquot = xfs_qm_dqhold(gdqp); 1885 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); 1886 } 1887 if (pdqp && XFS_IS_PQUOTA_ON(mp)) { 1888 ASSERT(ip->i_pdquot == NULL); 1889 ASSERT(ip->i_projid == pdqp->q_id); 1890 1891 ip->i_pdquot = xfs_qm_dqhold(pdqp); 1892 xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1); 1893 } 1894 } 1895 1896 /* Decide if this inode's dquot is near an enforcement boundary. */ 1897 bool 1898 xfs_inode_near_dquot_enforcement( 1899 struct xfs_inode *ip, 1900 xfs_dqtype_t type) 1901 { 1902 struct xfs_dquot *dqp; 1903 int64_t freesp; 1904 1905 /* We only care for quotas that are enabled and enforced. */ 1906 dqp = xfs_inode_dquot(ip, type); 1907 if (!dqp || !xfs_dquot_is_enforced(dqp)) 1908 return false; 1909 1910 if (xfs_dquot_res_over_limits(&dqp->q_ino) || 1911 xfs_dquot_res_over_limits(&dqp->q_rtb)) 1912 return true; 1913 1914 /* For space on the data device, check the various thresholds. */ 1915 if (!dqp->q_prealloc_hi_wmark) 1916 return false; 1917 1918 if (dqp->q_blk.reserved < dqp->q_prealloc_lo_wmark) 1919 return false; 1920 1921 if (dqp->q_blk.reserved >= dqp->q_prealloc_hi_wmark) 1922 return true; 1923 1924 freesp = dqp->q_prealloc_hi_wmark - dqp->q_blk.reserved; 1925 if (freesp < dqp->q_low_space[XFS_QLOWSP_5_PCNT]) 1926 return true; 1927 1928 return false; 1929 } 1930