1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2002 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_inode.h" 14 #include "xfs_trans.h" 15 #include "xfs_trans_priv.h" 16 #include "xfs_quota.h" 17 #include "xfs_qm.h" 18 #include "xfs_trace.h" 19 #include "xfs_error.h" 20 21 STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *); 22 23 /* 24 * Add the locked dquot to the transaction. 25 * The dquot must be locked, and it cannot be associated with any 26 * transaction. 27 */ 28 void 29 xfs_trans_dqjoin( 30 struct xfs_trans *tp, 31 struct xfs_dquot *dqp) 32 { 33 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 34 ASSERT(dqp->q_logitem.qli_dquot == dqp); 35 36 /* 37 * Get a log_item_desc to point at the new item. 38 */ 39 xfs_trans_add_item(tp, &dqp->q_logitem.qli_item); 40 } 41 42 /* 43 * This is called to mark the dquot as needing 44 * to be logged when the transaction is committed. The dquot must 45 * already be associated with the given transaction. 46 * Note that it marks the entire transaction as dirty. In the ordinary 47 * case, this gets called via xfs_trans_commit, after the transaction 48 * is already dirty. However, there's nothing stop this from getting 49 * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY 50 * flag. 51 */ 52 void 53 xfs_trans_log_dquot( 54 struct xfs_trans *tp, 55 struct xfs_dquot *dqp) 56 { 57 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 58 59 /* Upgrade the dquot to bigtime format if possible. */ 60 if (dqp->q_id != 0 && 61 xfs_has_bigtime(tp->t_mountp) && 62 !(dqp->q_type & XFS_DQTYPE_BIGTIME)) 63 dqp->q_type |= XFS_DQTYPE_BIGTIME; 64 65 tp->t_flags |= XFS_TRANS_DIRTY; 66 set_bit(XFS_LI_DIRTY, &dqp->q_logitem.qli_item.li_flags); 67 } 68 69 /* 70 * Carry forward whatever is left of the quota blk reservation to 71 * the spanky new transaction 72 */ 73 void 74 xfs_trans_dup_dqinfo( 75 struct xfs_trans *otp, 76 struct xfs_trans *ntp) 77 { 78 struct xfs_dqtrx *oq, *nq; 79 int i, j; 80 struct xfs_dqtrx *oqa, *nqa; 81 uint64_t blk_res_used; 82 83 if (!otp->t_dqinfo) 84 return; 85 86 xfs_trans_alloc_dqinfo(ntp); 87 88 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) { 89 oqa = otp->t_dqinfo->dqs[j]; 90 nqa = ntp->t_dqinfo->dqs[j]; 91 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { 92 blk_res_used = 0; 93 94 if (oqa[i].qt_dquot == NULL) 95 break; 96 oq = &oqa[i]; 97 nq = &nqa[i]; 98 99 if (oq->qt_blk_res && oq->qt_bcount_delta > 0) 100 blk_res_used = oq->qt_bcount_delta; 101 102 nq->qt_dquot = oq->qt_dquot; 103 nq->qt_bcount_delta = nq->qt_icount_delta = 0; 104 nq->qt_rtbcount_delta = 0; 105 106 /* 107 * Transfer whatever is left of the reservations. 108 */ 109 nq->qt_blk_res = oq->qt_blk_res - blk_res_used; 110 oq->qt_blk_res = blk_res_used; 111 112 nq->qt_rtblk_res = oq->qt_rtblk_res - 113 oq->qt_rtblk_res_used; 114 oq->qt_rtblk_res = oq->qt_rtblk_res_used; 115 116 nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used; 117 oq->qt_ino_res = oq->qt_ino_res_used; 118 119 } 120 } 121 } 122 123 /* 124 * Wrap around mod_dquot to account for both user and group quotas. 125 */ 126 void 127 xfs_trans_mod_dquot_byino( 128 xfs_trans_t *tp, 129 xfs_inode_t *ip, 130 uint field, 131 int64_t delta) 132 { 133 xfs_mount_t *mp = tp->t_mountp; 134 135 if (!XFS_IS_QUOTA_ON(mp) || 136 xfs_is_quota_inode(&mp->m_sb, ip->i_ino)) 137 return; 138 139 if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot) 140 (void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta); 141 if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot) 142 (void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta); 143 if (XFS_IS_PQUOTA_ON(mp) && ip->i_pdquot) 144 (void) xfs_trans_mod_dquot(tp, ip->i_pdquot, field, delta); 145 } 146 147 STATIC struct xfs_dqtrx * 148 xfs_trans_get_dqtrx( 149 struct xfs_trans *tp, 150 struct xfs_dquot *dqp) 151 { 152 int i; 153 struct xfs_dqtrx *qa; 154 155 switch (xfs_dquot_type(dqp)) { 156 case XFS_DQTYPE_USER: 157 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_USR]; 158 break; 159 case XFS_DQTYPE_GROUP: 160 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_GRP]; 161 break; 162 case XFS_DQTYPE_PROJ: 163 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_PRJ]; 164 break; 165 default: 166 return NULL; 167 } 168 169 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { 170 if (qa[i].qt_dquot == NULL || 171 qa[i].qt_dquot == dqp) 172 return &qa[i]; 173 } 174 175 return NULL; 176 } 177 178 /* 179 * Make the changes in the transaction structure. 180 * The moral equivalent to xfs_trans_mod_sb(). 181 * We don't touch any fields in the dquot, so we don't care 182 * if it's locked or not (most of the time it won't be). 183 */ 184 void 185 xfs_trans_mod_dquot( 186 struct xfs_trans *tp, 187 struct xfs_dquot *dqp, 188 uint field, 189 int64_t delta) 190 { 191 struct xfs_dqtrx *qtrx; 192 193 ASSERT(tp); 194 ASSERT(XFS_IS_QUOTA_ON(tp->t_mountp)); 195 qtrx = NULL; 196 197 if (!delta) 198 return; 199 200 if (tp->t_dqinfo == NULL) 201 xfs_trans_alloc_dqinfo(tp); 202 /* 203 * Find either the first free slot or the slot that belongs 204 * to this dquot. 205 */ 206 qtrx = xfs_trans_get_dqtrx(tp, dqp); 207 ASSERT(qtrx); 208 if (qtrx->qt_dquot == NULL) 209 qtrx->qt_dquot = dqp; 210 211 trace_xfs_trans_mod_dquot_before(qtrx); 212 trace_xfs_trans_mod_dquot(tp, dqp, field, delta); 213 214 switch (field) { 215 /* regular disk blk reservation */ 216 case XFS_TRANS_DQ_RES_BLKS: 217 qtrx->qt_blk_res += delta; 218 break; 219 220 /* inode reservation */ 221 case XFS_TRANS_DQ_RES_INOS: 222 qtrx->qt_ino_res += delta; 223 break; 224 225 /* disk blocks used. */ 226 case XFS_TRANS_DQ_BCOUNT: 227 qtrx->qt_bcount_delta += delta; 228 break; 229 230 case XFS_TRANS_DQ_DELBCOUNT: 231 qtrx->qt_delbcnt_delta += delta; 232 break; 233 234 /* Inode Count */ 235 case XFS_TRANS_DQ_ICOUNT: 236 if (qtrx->qt_ino_res && delta > 0) { 237 qtrx->qt_ino_res_used += delta; 238 ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used); 239 } 240 qtrx->qt_icount_delta += delta; 241 break; 242 243 /* rtblk reservation */ 244 case XFS_TRANS_DQ_RES_RTBLKS: 245 qtrx->qt_rtblk_res += delta; 246 break; 247 248 /* rtblk count */ 249 case XFS_TRANS_DQ_RTBCOUNT: 250 if (qtrx->qt_rtblk_res && delta > 0) { 251 qtrx->qt_rtblk_res_used += delta; 252 ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used); 253 } 254 qtrx->qt_rtbcount_delta += delta; 255 break; 256 257 case XFS_TRANS_DQ_DELRTBCOUNT: 258 qtrx->qt_delrtb_delta += delta; 259 break; 260 261 default: 262 ASSERT(0); 263 } 264 265 trace_xfs_trans_mod_dquot_after(qtrx); 266 } 267 268 269 /* 270 * Given an array of dqtrx structures, lock all the dquots associated and join 271 * them to the transaction, provided they have been modified. We know that the 272 * highest number of dquots of one type - usr, grp and prj - involved in a 273 * transaction is 3 so we don't need to make this very generic. 274 */ 275 STATIC void 276 xfs_trans_dqlockedjoin( 277 struct xfs_trans *tp, 278 struct xfs_dqtrx *q) 279 { 280 ASSERT(q[0].qt_dquot != NULL); 281 if (q[1].qt_dquot == NULL) { 282 xfs_dqlock(q[0].qt_dquot); 283 xfs_trans_dqjoin(tp, q[0].qt_dquot); 284 } else { 285 ASSERT(XFS_QM_TRANS_MAXDQS == 2); 286 xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot); 287 xfs_trans_dqjoin(tp, q[0].qt_dquot); 288 xfs_trans_dqjoin(tp, q[1].qt_dquot); 289 } 290 } 291 292 /* Apply dqtrx changes to the quota reservation counters. */ 293 static inline void 294 xfs_apply_quota_reservation_deltas( 295 struct xfs_dquot_res *res, 296 uint64_t reserved, 297 int64_t res_used, 298 int64_t count_delta) 299 { 300 if (reserved != 0) { 301 /* 302 * Subtle math here: If reserved > res_used (the normal case), 303 * we're simply subtracting the unused transaction quota 304 * reservation from the dquot reservation. 305 * 306 * If, however, res_used > reserved, then we have allocated 307 * more quota blocks than were reserved for the transaction. 308 * We must add that excess to the dquot reservation since it 309 * tracks (usage + resv) and by definition we didn't reserve 310 * that excess. 311 */ 312 res->reserved -= abs(reserved - res_used); 313 } else if (count_delta != 0) { 314 /* 315 * These blks were never reserved, either inside a transaction 316 * or outside one (in a delayed allocation). Also, this isn't 317 * always a negative number since we sometimes deliberately 318 * skip quota reservations. 319 */ 320 res->reserved += count_delta; 321 } 322 } 323 324 /* 325 * Called by xfs_trans_commit() and similar in spirit to 326 * xfs_trans_apply_sb_deltas(). 327 * Go thru all the dquots belonging to this transaction and modify the 328 * INCORE dquot to reflect the actual usages. 329 * Unreserve just the reservations done by this transaction. 330 * dquot is still left locked at exit. 331 */ 332 void 333 xfs_trans_apply_dquot_deltas( 334 struct xfs_trans *tp) 335 { 336 int i, j; 337 struct xfs_dquot *dqp; 338 struct xfs_dqtrx *qtrx, *qa; 339 int64_t totalbdelta; 340 int64_t totalrtbdelta; 341 342 if (!tp->t_dqinfo) 343 return; 344 345 ASSERT(tp->t_dqinfo); 346 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) { 347 qa = tp->t_dqinfo->dqs[j]; 348 if (qa[0].qt_dquot == NULL) 349 continue; 350 351 /* 352 * Lock all of the dquots and join them to the transaction. 353 */ 354 xfs_trans_dqlockedjoin(tp, qa); 355 356 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { 357 uint64_t blk_res_used; 358 359 qtrx = &qa[i]; 360 /* 361 * The array of dquots is filled 362 * sequentially, not sparsely. 363 */ 364 if ((dqp = qtrx->qt_dquot) == NULL) 365 break; 366 367 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 368 369 /* 370 * adjust the actual number of blocks used 371 */ 372 373 /* 374 * The issue here is - sometimes we don't make a blkquota 375 * reservation intentionally to be fair to users 376 * (when the amount is small). On the other hand, 377 * delayed allocs do make reservations, but that's 378 * outside of a transaction, so we have no 379 * idea how much was really reserved. 380 * So, here we've accumulated delayed allocation blks and 381 * non-delay blks. The assumption is that the 382 * delayed ones are always reserved (outside of a 383 * transaction), and the others may or may not have 384 * quota reservations. 385 */ 386 totalbdelta = qtrx->qt_bcount_delta + 387 qtrx->qt_delbcnt_delta; 388 totalrtbdelta = qtrx->qt_rtbcount_delta + 389 qtrx->qt_delrtb_delta; 390 391 if (totalbdelta != 0 || totalrtbdelta != 0 || 392 qtrx->qt_icount_delta != 0) { 393 trace_xfs_trans_apply_dquot_deltas_before(dqp); 394 trace_xfs_trans_apply_dquot_deltas(qtrx); 395 } 396 397 #ifdef DEBUG 398 if (totalbdelta < 0) 399 ASSERT(dqp->q_blk.count >= -totalbdelta); 400 401 if (totalrtbdelta < 0) 402 ASSERT(dqp->q_rtb.count >= -totalrtbdelta); 403 404 if (qtrx->qt_icount_delta < 0) 405 ASSERT(dqp->q_ino.count >= -qtrx->qt_icount_delta); 406 #endif 407 if (totalbdelta) 408 dqp->q_blk.count += totalbdelta; 409 410 if (qtrx->qt_icount_delta) 411 dqp->q_ino.count += qtrx->qt_icount_delta; 412 413 if (totalrtbdelta) 414 dqp->q_rtb.count += totalrtbdelta; 415 416 if (totalbdelta != 0 || totalrtbdelta != 0 || 417 qtrx->qt_icount_delta != 0) 418 trace_xfs_trans_apply_dquot_deltas_after(dqp); 419 420 /* 421 * Get any default limits in use. 422 * Start/reset the timer(s) if needed. 423 */ 424 if (dqp->q_id) { 425 xfs_qm_adjust_dqlimits(dqp); 426 xfs_qm_adjust_dqtimers(dqp); 427 } 428 429 dqp->q_flags |= XFS_DQFLAG_DIRTY; 430 /* 431 * add this to the list of items to get logged 432 */ 433 xfs_trans_log_dquot(tp, dqp); 434 /* 435 * Take off what's left of the original reservation. 436 * In case of delayed allocations, there's no 437 * reservation that a transaction structure knows of. 438 */ 439 blk_res_used = max_t(int64_t, 0, qtrx->qt_bcount_delta); 440 xfs_apply_quota_reservation_deltas(&dqp->q_blk, 441 qtrx->qt_blk_res, blk_res_used, 442 qtrx->qt_bcount_delta); 443 444 /* 445 * Adjust the RT reservation. 446 */ 447 xfs_apply_quota_reservation_deltas(&dqp->q_rtb, 448 qtrx->qt_rtblk_res, 449 qtrx->qt_rtblk_res_used, 450 qtrx->qt_rtbcount_delta); 451 452 /* 453 * Adjust the inode reservation. 454 */ 455 ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used); 456 xfs_apply_quota_reservation_deltas(&dqp->q_ino, 457 qtrx->qt_ino_res, 458 qtrx->qt_ino_res_used, 459 qtrx->qt_icount_delta); 460 461 ASSERT(dqp->q_blk.reserved >= dqp->q_blk.count); 462 ASSERT(dqp->q_ino.reserved >= dqp->q_ino.count); 463 ASSERT(dqp->q_rtb.reserved >= dqp->q_rtb.count); 464 } 465 } 466 } 467 468 /* 469 * Release the reservations, and adjust the dquots accordingly. 470 * This is called only when the transaction is being aborted. If by 471 * any chance we have done dquot modifications incore (ie. deltas) already, 472 * we simply throw those away, since that's the expected behavior 473 * when a transaction is curtailed without a commit. 474 */ 475 void 476 xfs_trans_unreserve_and_mod_dquots( 477 struct xfs_trans *tp) 478 { 479 int i, j; 480 struct xfs_dquot *dqp; 481 struct xfs_dqtrx *qtrx, *qa; 482 bool locked; 483 484 if (!tp->t_dqinfo) 485 return; 486 487 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) { 488 qa = tp->t_dqinfo->dqs[j]; 489 490 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { 491 qtrx = &qa[i]; 492 /* 493 * We assume that the array of dquots is filled 494 * sequentially, not sparsely. 495 */ 496 if ((dqp = qtrx->qt_dquot) == NULL) 497 break; 498 /* 499 * Unreserve the original reservation. We don't care 500 * about the number of blocks used field, or deltas. 501 * Also we don't bother to zero the fields. 502 */ 503 locked = false; 504 if (qtrx->qt_blk_res) { 505 xfs_dqlock(dqp); 506 locked = true; 507 dqp->q_blk.reserved -= 508 (xfs_qcnt_t)qtrx->qt_blk_res; 509 } 510 if (qtrx->qt_ino_res) { 511 if (!locked) { 512 xfs_dqlock(dqp); 513 locked = true; 514 } 515 dqp->q_ino.reserved -= 516 (xfs_qcnt_t)qtrx->qt_ino_res; 517 } 518 519 if (qtrx->qt_rtblk_res) { 520 if (!locked) { 521 xfs_dqlock(dqp); 522 locked = true; 523 } 524 dqp->q_rtb.reserved -= 525 (xfs_qcnt_t)qtrx->qt_rtblk_res; 526 } 527 if (locked) 528 xfs_dqunlock(dqp); 529 530 } 531 } 532 } 533 534 STATIC void 535 xfs_quota_warn( 536 struct xfs_mount *mp, 537 struct xfs_dquot *dqp, 538 int type) 539 { 540 enum quota_type qtype; 541 542 switch (xfs_dquot_type(dqp)) { 543 case XFS_DQTYPE_PROJ: 544 qtype = PRJQUOTA; 545 break; 546 case XFS_DQTYPE_USER: 547 qtype = USRQUOTA; 548 break; 549 case XFS_DQTYPE_GROUP: 550 qtype = GRPQUOTA; 551 break; 552 default: 553 return; 554 } 555 556 quota_send_warning(make_kqid(&init_user_ns, qtype, dqp->q_id), 557 mp->m_super->s_dev, type); 558 } 559 560 /* 561 * Decide if we can make an additional reservation against a quota resource. 562 * Returns an inode QUOTA_NL_ warning code and whether or not it's fatal. 563 * 564 * Note that we assume that the numeric difference between the inode and block 565 * warning codes will always be 3 since it's userspace ABI now, and will never 566 * decrease the quota reservation, so the *BELOW messages are irrelevant. 567 */ 568 static inline int 569 xfs_dqresv_check( 570 struct xfs_dquot_res *res, 571 struct xfs_quota_limits *qlim, 572 int64_t delta, 573 bool *fatal) 574 { 575 xfs_qcnt_t hardlimit = res->hardlimit; 576 xfs_qcnt_t softlimit = res->softlimit; 577 xfs_qcnt_t total_count = res->reserved + delta; 578 579 BUILD_BUG_ON(QUOTA_NL_BHARDWARN != QUOTA_NL_IHARDWARN + 3); 580 BUILD_BUG_ON(QUOTA_NL_BSOFTLONGWARN != QUOTA_NL_ISOFTLONGWARN + 3); 581 BUILD_BUG_ON(QUOTA_NL_BSOFTWARN != QUOTA_NL_ISOFTWARN + 3); 582 583 *fatal = false; 584 if (delta <= 0) 585 return QUOTA_NL_NOWARN; 586 587 if (!hardlimit) 588 hardlimit = qlim->hard; 589 if (!softlimit) 590 softlimit = qlim->soft; 591 592 if (hardlimit && total_count > hardlimit) { 593 *fatal = true; 594 return QUOTA_NL_IHARDWARN; 595 } 596 597 if (softlimit && total_count > softlimit) { 598 time64_t now = ktime_get_real_seconds(); 599 600 if ((res->timer != 0 && now > res->timer) || 601 (res->warnings != 0 && res->warnings >= qlim->warn)) { 602 *fatal = true; 603 return QUOTA_NL_ISOFTLONGWARN; 604 } 605 606 res->warnings++; 607 return QUOTA_NL_ISOFTWARN; 608 } 609 610 return QUOTA_NL_NOWARN; 611 } 612 613 /* 614 * This reserves disk blocks and inodes against a dquot. 615 * Flags indicate if the dquot is to be locked here and also 616 * if the blk reservation is for RT or regular blocks. 617 * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check. 618 */ 619 STATIC int 620 xfs_trans_dqresv( 621 struct xfs_trans *tp, 622 struct xfs_mount *mp, 623 struct xfs_dquot *dqp, 624 int64_t nblks, 625 long ninos, 626 uint flags) 627 { 628 struct xfs_quotainfo *q = mp->m_quotainfo; 629 struct xfs_def_quota *defq; 630 struct xfs_dquot_res *blkres; 631 struct xfs_quota_limits *qlim; 632 633 xfs_dqlock(dqp); 634 635 defq = xfs_get_defquota(q, xfs_dquot_type(dqp)); 636 637 if (flags & XFS_TRANS_DQ_RES_BLKS) { 638 blkres = &dqp->q_blk; 639 qlim = &defq->blk; 640 } else { 641 blkres = &dqp->q_rtb; 642 qlim = &defq->rtb; 643 } 644 645 if ((flags & XFS_QMOPT_FORCE_RES) == 0 && dqp->q_id && 646 xfs_dquot_is_enforced(dqp)) { 647 int quota_nl; 648 bool fatal; 649 650 /* 651 * dquot is locked already. See if we'd go over the hardlimit 652 * or exceed the timelimit if we'd reserve resources. 653 */ 654 quota_nl = xfs_dqresv_check(blkres, qlim, nblks, &fatal); 655 if (quota_nl != QUOTA_NL_NOWARN) { 656 /* 657 * Quota block warning codes are 3 more than the inode 658 * codes, which we check above. 659 */ 660 xfs_quota_warn(mp, dqp, quota_nl + 3); 661 if (fatal) 662 goto error_return; 663 } 664 665 quota_nl = xfs_dqresv_check(&dqp->q_ino, &defq->ino, ninos, 666 &fatal); 667 if (quota_nl != QUOTA_NL_NOWARN) { 668 xfs_quota_warn(mp, dqp, quota_nl); 669 if (fatal) 670 goto error_return; 671 } 672 } 673 674 /* 675 * Change the reservation, but not the actual usage. 676 * Note that q_blk.reserved = q_blk.count + resv 677 */ 678 blkres->reserved += (xfs_qcnt_t)nblks; 679 dqp->q_ino.reserved += (xfs_qcnt_t)ninos; 680 681 /* 682 * note the reservation amt in the trans struct too, 683 * so that the transaction knows how much was reserved by 684 * it against this particular dquot. 685 * We don't do this when we are reserving for a delayed allocation, 686 * because we don't have the luxury of a transaction envelope then. 687 */ 688 if (tp) { 689 ASSERT(flags & XFS_QMOPT_RESBLK_MASK); 690 xfs_trans_mod_dquot(tp, dqp, flags & XFS_QMOPT_RESBLK_MASK, 691 nblks); 692 xfs_trans_mod_dquot(tp, dqp, XFS_TRANS_DQ_RES_INOS, ninos); 693 } 694 695 if (XFS_IS_CORRUPT(mp, dqp->q_blk.reserved < dqp->q_blk.count) || 696 XFS_IS_CORRUPT(mp, dqp->q_rtb.reserved < dqp->q_rtb.count) || 697 XFS_IS_CORRUPT(mp, dqp->q_ino.reserved < dqp->q_ino.count)) 698 goto error_corrupt; 699 700 xfs_dqunlock(dqp); 701 return 0; 702 703 error_return: 704 xfs_dqunlock(dqp); 705 if (xfs_dquot_type(dqp) == XFS_DQTYPE_PROJ) 706 return -ENOSPC; 707 return -EDQUOT; 708 error_corrupt: 709 xfs_dqunlock(dqp); 710 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 711 return -EFSCORRUPTED; 712 } 713 714 715 /* 716 * Given dquot(s), make disk block and/or inode reservations against them. 717 * The fact that this does the reservation against user, group and 718 * project quotas is important, because this follows a all-or-nothing 719 * approach. 720 * 721 * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown. 722 * XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT. Used by pquota. 723 * XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks 724 * XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks 725 * dquots are unlocked on return, if they were not locked by caller. 726 */ 727 int 728 xfs_trans_reserve_quota_bydquots( 729 struct xfs_trans *tp, 730 struct xfs_mount *mp, 731 struct xfs_dquot *udqp, 732 struct xfs_dquot *gdqp, 733 struct xfs_dquot *pdqp, 734 int64_t nblks, 735 long ninos, 736 uint flags) 737 { 738 int error; 739 740 if (!XFS_IS_QUOTA_ON(mp)) 741 return 0; 742 743 ASSERT(flags & XFS_QMOPT_RESBLK_MASK); 744 745 if (udqp) { 746 error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos, flags); 747 if (error) 748 return error; 749 } 750 751 if (gdqp) { 752 error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags); 753 if (error) 754 goto unwind_usr; 755 } 756 757 if (pdqp) { 758 error = xfs_trans_dqresv(tp, mp, pdqp, nblks, ninos, flags); 759 if (error) 760 goto unwind_grp; 761 } 762 763 /* 764 * Didn't change anything critical, so, no need to log 765 */ 766 return 0; 767 768 unwind_grp: 769 flags |= XFS_QMOPT_FORCE_RES; 770 if (gdqp) 771 xfs_trans_dqresv(tp, mp, gdqp, -nblks, -ninos, flags); 772 unwind_usr: 773 flags |= XFS_QMOPT_FORCE_RES; 774 if (udqp) 775 xfs_trans_dqresv(tp, mp, udqp, -nblks, -ninos, flags); 776 return error; 777 } 778 779 780 /* 781 * Lock the dquot and change the reservation if we can. 782 * This doesn't change the actual usage, just the reservation. 783 * The inode sent in is locked. 784 */ 785 int 786 xfs_trans_reserve_quota_nblks( 787 struct xfs_trans *tp, 788 struct xfs_inode *ip, 789 int64_t dblocks, 790 int64_t rblocks, 791 bool force) 792 { 793 struct xfs_mount *mp = ip->i_mount; 794 unsigned int qflags = 0; 795 int error; 796 797 if (!XFS_IS_QUOTA_ON(mp)) 798 return 0; 799 800 ASSERT(!xfs_is_quota_inode(&mp->m_sb, ip->i_ino)); 801 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 802 803 if (force) 804 qflags |= XFS_QMOPT_FORCE_RES; 805 806 /* Reserve data device quota against the inode's dquots. */ 807 error = xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot, 808 ip->i_gdquot, ip->i_pdquot, dblocks, 0, 809 XFS_QMOPT_RES_REGBLKS | qflags); 810 if (error) 811 return error; 812 813 /* Do the same but for realtime blocks. */ 814 error = xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot, 815 ip->i_gdquot, ip->i_pdquot, rblocks, 0, 816 XFS_QMOPT_RES_RTBLKS | qflags); 817 if (error) { 818 xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot, 819 ip->i_gdquot, ip->i_pdquot, -dblocks, 0, 820 XFS_QMOPT_RES_REGBLKS); 821 return error; 822 } 823 824 return 0; 825 } 826 827 /* Change the quota reservations for an inode creation activity. */ 828 int 829 xfs_trans_reserve_quota_icreate( 830 struct xfs_trans *tp, 831 struct xfs_dquot *udqp, 832 struct xfs_dquot *gdqp, 833 struct xfs_dquot *pdqp, 834 int64_t dblocks) 835 { 836 struct xfs_mount *mp = tp->t_mountp; 837 838 if (!XFS_IS_QUOTA_ON(mp)) 839 return 0; 840 841 return xfs_trans_reserve_quota_bydquots(tp, mp, udqp, gdqp, pdqp, 842 dblocks, 1, XFS_QMOPT_RES_REGBLKS); 843 } 844 845 STATIC void 846 xfs_trans_alloc_dqinfo( 847 xfs_trans_t *tp) 848 { 849 tp->t_dqinfo = kmem_cache_zalloc(xfs_qm_dqtrxzone, 850 GFP_KERNEL | __GFP_NOFAIL); 851 } 852 853 void 854 xfs_trans_free_dqinfo( 855 xfs_trans_t *tp) 856 { 857 if (!tp->t_dqinfo) 858 return; 859 kmem_cache_free(xfs_qm_dqtrxzone, tp->t_dqinfo); 860 tp->t_dqinfo = NULL; 861 } 862