1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2002 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_inode.h" 14 #include "xfs_trans.h" 15 #include "xfs_trans_priv.h" 16 #include "xfs_quota.h" 17 #include "xfs_qm.h" 18 #include "xfs_trace.h" 19 #include "xfs_error.h" 20 21 STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *); 22 23 /* 24 * Add the locked dquot to the transaction. 25 * The dquot must be locked, and it cannot be associated with any 26 * transaction. 27 */ 28 void 29 xfs_trans_dqjoin( 30 struct xfs_trans *tp, 31 struct xfs_dquot *dqp) 32 { 33 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 34 ASSERT(dqp->q_logitem.qli_dquot == dqp); 35 36 /* 37 * Get a log_item_desc to point at the new item. 38 */ 39 xfs_trans_add_item(tp, &dqp->q_logitem.qli_item); 40 } 41 42 /* 43 * This is called to mark the dquot as needing 44 * to be logged when the transaction is committed. The dquot must 45 * already be associated with the given transaction. 46 * Note that it marks the entire transaction as dirty. In the ordinary 47 * case, this gets called via xfs_trans_commit, after the transaction 48 * is already dirty. However, there's nothing stop this from getting 49 * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY 50 * flag. 51 */ 52 void 53 xfs_trans_log_dquot( 54 struct xfs_trans *tp, 55 struct xfs_dquot *dqp) 56 { 57 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 58 59 /* Upgrade the dquot to bigtime format if possible. */ 60 if (dqp->q_id != 0 && 61 xfs_sb_version_hasbigtime(&tp->t_mountp->m_sb) && 62 !(dqp->q_type & XFS_DQTYPE_BIGTIME)) 63 dqp->q_type |= XFS_DQTYPE_BIGTIME; 64 65 tp->t_flags |= XFS_TRANS_DIRTY; 66 set_bit(XFS_LI_DIRTY, &dqp->q_logitem.qli_item.li_flags); 67 } 68 69 /* 70 * Carry forward whatever is left of the quota blk reservation to 71 * the spanky new transaction 72 */ 73 void 74 xfs_trans_dup_dqinfo( 75 struct xfs_trans *otp, 76 struct xfs_trans *ntp) 77 { 78 struct xfs_dqtrx *oq, *nq; 79 int i, j; 80 struct xfs_dqtrx *oqa, *nqa; 81 uint64_t blk_res_used; 82 83 if (!otp->t_dqinfo) 84 return; 85 86 xfs_trans_alloc_dqinfo(ntp); 87 88 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) { 89 oqa = otp->t_dqinfo->dqs[j]; 90 nqa = ntp->t_dqinfo->dqs[j]; 91 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { 92 blk_res_used = 0; 93 94 if (oqa[i].qt_dquot == NULL) 95 break; 96 oq = &oqa[i]; 97 nq = &nqa[i]; 98 99 if (oq->qt_blk_res && oq->qt_bcount_delta > 0) 100 blk_res_used = oq->qt_bcount_delta; 101 102 nq->qt_dquot = oq->qt_dquot; 103 nq->qt_bcount_delta = nq->qt_icount_delta = 0; 104 nq->qt_rtbcount_delta = 0; 105 106 /* 107 * Transfer whatever is left of the reservations. 108 */ 109 nq->qt_blk_res = oq->qt_blk_res - blk_res_used; 110 oq->qt_blk_res = blk_res_used; 111 112 nq->qt_rtblk_res = oq->qt_rtblk_res - 113 oq->qt_rtblk_res_used; 114 oq->qt_rtblk_res = oq->qt_rtblk_res_used; 115 116 nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used; 117 oq->qt_ino_res = oq->qt_ino_res_used; 118 119 } 120 } 121 } 122 123 /* 124 * Wrap around mod_dquot to account for both user and group quotas. 125 */ 126 void 127 xfs_trans_mod_dquot_byino( 128 xfs_trans_t *tp, 129 xfs_inode_t *ip, 130 uint field, 131 int64_t delta) 132 { 133 xfs_mount_t *mp = tp->t_mountp; 134 135 if (!XFS_IS_QUOTA_RUNNING(mp) || 136 !XFS_IS_QUOTA_ON(mp) || 137 xfs_is_quota_inode(&mp->m_sb, ip->i_ino)) 138 return; 139 140 if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot) 141 (void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta); 142 if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot) 143 (void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta); 144 if (XFS_IS_PQUOTA_ON(mp) && ip->i_pdquot) 145 (void) xfs_trans_mod_dquot(tp, ip->i_pdquot, field, delta); 146 } 147 148 STATIC struct xfs_dqtrx * 149 xfs_trans_get_dqtrx( 150 struct xfs_trans *tp, 151 struct xfs_dquot *dqp) 152 { 153 int i; 154 struct xfs_dqtrx *qa; 155 156 switch (xfs_dquot_type(dqp)) { 157 case XFS_DQTYPE_USER: 158 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_USR]; 159 break; 160 case XFS_DQTYPE_GROUP: 161 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_GRP]; 162 break; 163 case XFS_DQTYPE_PROJ: 164 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_PRJ]; 165 break; 166 default: 167 return NULL; 168 } 169 170 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { 171 if (qa[i].qt_dquot == NULL || 172 qa[i].qt_dquot == dqp) 173 return &qa[i]; 174 } 175 176 return NULL; 177 } 178 179 /* 180 * Make the changes in the transaction structure. 181 * The moral equivalent to xfs_trans_mod_sb(). 182 * We don't touch any fields in the dquot, so we don't care 183 * if it's locked or not (most of the time it won't be). 184 */ 185 void 186 xfs_trans_mod_dquot( 187 struct xfs_trans *tp, 188 struct xfs_dquot *dqp, 189 uint field, 190 int64_t delta) 191 { 192 struct xfs_dqtrx *qtrx; 193 194 ASSERT(tp); 195 ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp)); 196 qtrx = NULL; 197 198 if (!delta) 199 return; 200 201 if (tp->t_dqinfo == NULL) 202 xfs_trans_alloc_dqinfo(tp); 203 /* 204 * Find either the first free slot or the slot that belongs 205 * to this dquot. 206 */ 207 qtrx = xfs_trans_get_dqtrx(tp, dqp); 208 ASSERT(qtrx); 209 if (qtrx->qt_dquot == NULL) 210 qtrx->qt_dquot = dqp; 211 212 trace_xfs_trans_mod_dquot_before(qtrx); 213 trace_xfs_trans_mod_dquot(tp, dqp, field, delta); 214 215 switch (field) { 216 /* regular disk blk reservation */ 217 case XFS_TRANS_DQ_RES_BLKS: 218 qtrx->qt_blk_res += delta; 219 break; 220 221 /* inode reservation */ 222 case XFS_TRANS_DQ_RES_INOS: 223 qtrx->qt_ino_res += delta; 224 break; 225 226 /* disk blocks used. */ 227 case XFS_TRANS_DQ_BCOUNT: 228 qtrx->qt_bcount_delta += delta; 229 break; 230 231 case XFS_TRANS_DQ_DELBCOUNT: 232 qtrx->qt_delbcnt_delta += delta; 233 break; 234 235 /* Inode Count */ 236 case XFS_TRANS_DQ_ICOUNT: 237 if (qtrx->qt_ino_res && delta > 0) { 238 qtrx->qt_ino_res_used += delta; 239 ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used); 240 } 241 qtrx->qt_icount_delta += delta; 242 break; 243 244 /* rtblk reservation */ 245 case XFS_TRANS_DQ_RES_RTBLKS: 246 qtrx->qt_rtblk_res += delta; 247 break; 248 249 /* rtblk count */ 250 case XFS_TRANS_DQ_RTBCOUNT: 251 if (qtrx->qt_rtblk_res && delta > 0) { 252 qtrx->qt_rtblk_res_used += delta; 253 ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used); 254 } 255 qtrx->qt_rtbcount_delta += delta; 256 break; 257 258 case XFS_TRANS_DQ_DELRTBCOUNT: 259 qtrx->qt_delrtb_delta += delta; 260 break; 261 262 default: 263 ASSERT(0); 264 } 265 266 trace_xfs_trans_mod_dquot_after(qtrx); 267 } 268 269 270 /* 271 * Given an array of dqtrx structures, lock all the dquots associated and join 272 * them to the transaction, provided they have been modified. We know that the 273 * highest number of dquots of one type - usr, grp and prj - involved in a 274 * transaction is 3 so we don't need to make this very generic. 275 */ 276 STATIC void 277 xfs_trans_dqlockedjoin( 278 struct xfs_trans *tp, 279 struct xfs_dqtrx *q) 280 { 281 ASSERT(q[0].qt_dquot != NULL); 282 if (q[1].qt_dquot == NULL) { 283 xfs_dqlock(q[0].qt_dquot); 284 xfs_trans_dqjoin(tp, q[0].qt_dquot); 285 } else { 286 ASSERT(XFS_QM_TRANS_MAXDQS == 2); 287 xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot); 288 xfs_trans_dqjoin(tp, q[0].qt_dquot); 289 xfs_trans_dqjoin(tp, q[1].qt_dquot); 290 } 291 } 292 293 /* Apply dqtrx changes to the quota reservation counters. */ 294 static inline void 295 xfs_apply_quota_reservation_deltas( 296 struct xfs_dquot_res *res, 297 uint64_t reserved, 298 int64_t res_used, 299 int64_t count_delta) 300 { 301 if (reserved != 0) { 302 /* 303 * Subtle math here: If reserved > res_used (the normal case), 304 * we're simply subtracting the unused transaction quota 305 * reservation from the dquot reservation. 306 * 307 * If, however, res_used > reserved, then we have allocated 308 * more quota blocks than were reserved for the transaction. 309 * We must add that excess to the dquot reservation since it 310 * tracks (usage + resv) and by definition we didn't reserve 311 * that excess. 312 */ 313 res->reserved -= abs(reserved - res_used); 314 } else if (count_delta != 0) { 315 /* 316 * These blks were never reserved, either inside a transaction 317 * or outside one (in a delayed allocation). Also, this isn't 318 * always a negative number since we sometimes deliberately 319 * skip quota reservations. 320 */ 321 res->reserved += count_delta; 322 } 323 } 324 325 /* 326 * Called by xfs_trans_commit() and similar in spirit to 327 * xfs_trans_apply_sb_deltas(). 328 * Go thru all the dquots belonging to this transaction and modify the 329 * INCORE dquot to reflect the actual usages. 330 * Unreserve just the reservations done by this transaction. 331 * dquot is still left locked at exit. 332 */ 333 void 334 xfs_trans_apply_dquot_deltas( 335 struct xfs_trans *tp) 336 { 337 int i, j; 338 struct xfs_dquot *dqp; 339 struct xfs_dqtrx *qtrx, *qa; 340 int64_t totalbdelta; 341 int64_t totalrtbdelta; 342 343 if (!tp->t_dqinfo) 344 return; 345 346 ASSERT(tp->t_dqinfo); 347 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) { 348 qa = tp->t_dqinfo->dqs[j]; 349 if (qa[0].qt_dquot == NULL) 350 continue; 351 352 /* 353 * Lock all of the dquots and join them to the transaction. 354 */ 355 xfs_trans_dqlockedjoin(tp, qa); 356 357 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { 358 uint64_t blk_res_used; 359 360 qtrx = &qa[i]; 361 /* 362 * The array of dquots is filled 363 * sequentially, not sparsely. 364 */ 365 if ((dqp = qtrx->qt_dquot) == NULL) 366 break; 367 368 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 369 370 /* 371 * adjust the actual number of blocks used 372 */ 373 374 /* 375 * The issue here is - sometimes we don't make a blkquota 376 * reservation intentionally to be fair to users 377 * (when the amount is small). On the other hand, 378 * delayed allocs do make reservations, but that's 379 * outside of a transaction, so we have no 380 * idea how much was really reserved. 381 * So, here we've accumulated delayed allocation blks and 382 * non-delay blks. The assumption is that the 383 * delayed ones are always reserved (outside of a 384 * transaction), and the others may or may not have 385 * quota reservations. 386 */ 387 totalbdelta = qtrx->qt_bcount_delta + 388 qtrx->qt_delbcnt_delta; 389 totalrtbdelta = qtrx->qt_rtbcount_delta + 390 qtrx->qt_delrtb_delta; 391 392 if (totalbdelta != 0 || totalrtbdelta != 0 || 393 qtrx->qt_icount_delta != 0) { 394 trace_xfs_trans_apply_dquot_deltas_before(dqp); 395 trace_xfs_trans_apply_dquot_deltas(qtrx); 396 } 397 398 #ifdef DEBUG 399 if (totalbdelta < 0) 400 ASSERT(dqp->q_blk.count >= -totalbdelta); 401 402 if (totalrtbdelta < 0) 403 ASSERT(dqp->q_rtb.count >= -totalrtbdelta); 404 405 if (qtrx->qt_icount_delta < 0) 406 ASSERT(dqp->q_ino.count >= -qtrx->qt_icount_delta); 407 #endif 408 if (totalbdelta) 409 dqp->q_blk.count += totalbdelta; 410 411 if (qtrx->qt_icount_delta) 412 dqp->q_ino.count += qtrx->qt_icount_delta; 413 414 if (totalrtbdelta) 415 dqp->q_rtb.count += totalrtbdelta; 416 417 if (totalbdelta != 0 || totalrtbdelta != 0 || 418 qtrx->qt_icount_delta != 0) 419 trace_xfs_trans_apply_dquot_deltas_after(dqp); 420 421 /* 422 * Get any default limits in use. 423 * Start/reset the timer(s) if needed. 424 */ 425 if (dqp->q_id) { 426 xfs_qm_adjust_dqlimits(dqp); 427 xfs_qm_adjust_dqtimers(dqp); 428 } 429 430 dqp->q_flags |= XFS_DQFLAG_DIRTY; 431 /* 432 * add this to the list of items to get logged 433 */ 434 xfs_trans_log_dquot(tp, dqp); 435 /* 436 * Take off what's left of the original reservation. 437 * In case of delayed allocations, there's no 438 * reservation that a transaction structure knows of. 439 */ 440 blk_res_used = max_t(int64_t, 0, qtrx->qt_bcount_delta); 441 xfs_apply_quota_reservation_deltas(&dqp->q_blk, 442 qtrx->qt_blk_res, blk_res_used, 443 qtrx->qt_bcount_delta); 444 445 /* 446 * Adjust the RT reservation. 447 */ 448 xfs_apply_quota_reservation_deltas(&dqp->q_rtb, 449 qtrx->qt_rtblk_res, 450 qtrx->qt_rtblk_res_used, 451 qtrx->qt_rtbcount_delta); 452 453 /* 454 * Adjust the inode reservation. 455 */ 456 ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used); 457 xfs_apply_quota_reservation_deltas(&dqp->q_ino, 458 qtrx->qt_ino_res, 459 qtrx->qt_ino_res_used, 460 qtrx->qt_icount_delta); 461 462 ASSERT(dqp->q_blk.reserved >= dqp->q_blk.count); 463 ASSERT(dqp->q_ino.reserved >= dqp->q_ino.count); 464 ASSERT(dqp->q_rtb.reserved >= dqp->q_rtb.count); 465 } 466 } 467 } 468 469 /* 470 * Release the reservations, and adjust the dquots accordingly. 471 * This is called only when the transaction is being aborted. If by 472 * any chance we have done dquot modifications incore (ie. deltas) already, 473 * we simply throw those away, since that's the expected behavior 474 * when a transaction is curtailed without a commit. 475 */ 476 void 477 xfs_trans_unreserve_and_mod_dquots( 478 struct xfs_trans *tp) 479 { 480 int i, j; 481 struct xfs_dquot *dqp; 482 struct xfs_dqtrx *qtrx, *qa; 483 bool locked; 484 485 if (!tp->t_dqinfo) 486 return; 487 488 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) { 489 qa = tp->t_dqinfo->dqs[j]; 490 491 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { 492 qtrx = &qa[i]; 493 /* 494 * We assume that the array of dquots is filled 495 * sequentially, not sparsely. 496 */ 497 if ((dqp = qtrx->qt_dquot) == NULL) 498 break; 499 /* 500 * Unreserve the original reservation. We don't care 501 * about the number of blocks used field, or deltas. 502 * Also we don't bother to zero the fields. 503 */ 504 locked = false; 505 if (qtrx->qt_blk_res) { 506 xfs_dqlock(dqp); 507 locked = true; 508 dqp->q_blk.reserved -= 509 (xfs_qcnt_t)qtrx->qt_blk_res; 510 } 511 if (qtrx->qt_ino_res) { 512 if (!locked) { 513 xfs_dqlock(dqp); 514 locked = true; 515 } 516 dqp->q_ino.reserved -= 517 (xfs_qcnt_t)qtrx->qt_ino_res; 518 } 519 520 if (qtrx->qt_rtblk_res) { 521 if (!locked) { 522 xfs_dqlock(dqp); 523 locked = true; 524 } 525 dqp->q_rtb.reserved -= 526 (xfs_qcnt_t)qtrx->qt_rtblk_res; 527 } 528 if (locked) 529 xfs_dqunlock(dqp); 530 531 } 532 } 533 } 534 535 STATIC void 536 xfs_quota_warn( 537 struct xfs_mount *mp, 538 struct xfs_dquot *dqp, 539 int type) 540 { 541 enum quota_type qtype; 542 543 switch (xfs_dquot_type(dqp)) { 544 case XFS_DQTYPE_PROJ: 545 qtype = PRJQUOTA; 546 break; 547 case XFS_DQTYPE_USER: 548 qtype = USRQUOTA; 549 break; 550 case XFS_DQTYPE_GROUP: 551 qtype = GRPQUOTA; 552 break; 553 default: 554 return; 555 } 556 557 quota_send_warning(make_kqid(&init_user_ns, qtype, dqp->q_id), 558 mp->m_super->s_dev, type); 559 } 560 561 /* 562 * Decide if we can make an additional reservation against a quota resource. 563 * Returns an inode QUOTA_NL_ warning code and whether or not it's fatal. 564 * 565 * Note that we assume that the numeric difference between the inode and block 566 * warning codes will always be 3 since it's userspace ABI now, and will never 567 * decrease the quota reservation, so the *BELOW messages are irrelevant. 568 */ 569 static inline int 570 xfs_dqresv_check( 571 struct xfs_dquot_res *res, 572 struct xfs_quota_limits *qlim, 573 int64_t delta, 574 bool *fatal) 575 { 576 xfs_qcnt_t hardlimit = res->hardlimit; 577 xfs_qcnt_t softlimit = res->softlimit; 578 xfs_qcnt_t total_count = res->reserved + delta; 579 580 BUILD_BUG_ON(QUOTA_NL_BHARDWARN != QUOTA_NL_IHARDWARN + 3); 581 BUILD_BUG_ON(QUOTA_NL_BSOFTLONGWARN != QUOTA_NL_ISOFTLONGWARN + 3); 582 BUILD_BUG_ON(QUOTA_NL_BSOFTWARN != QUOTA_NL_ISOFTWARN + 3); 583 584 *fatal = false; 585 if (delta <= 0) 586 return QUOTA_NL_NOWARN; 587 588 if (!hardlimit) 589 hardlimit = qlim->hard; 590 if (!softlimit) 591 softlimit = qlim->soft; 592 593 if (hardlimit && total_count > hardlimit) { 594 *fatal = true; 595 return QUOTA_NL_IHARDWARN; 596 } 597 598 if (softlimit && total_count > softlimit) { 599 time64_t now = ktime_get_real_seconds(); 600 601 if ((res->timer != 0 && now > res->timer) || 602 (res->warnings != 0 && res->warnings >= qlim->warn)) { 603 *fatal = true; 604 return QUOTA_NL_ISOFTLONGWARN; 605 } 606 607 res->warnings++; 608 return QUOTA_NL_ISOFTWARN; 609 } 610 611 return QUOTA_NL_NOWARN; 612 } 613 614 /* 615 * This reserves disk blocks and inodes against a dquot. 616 * Flags indicate if the dquot is to be locked here and also 617 * if the blk reservation is for RT or regular blocks. 618 * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check. 619 */ 620 STATIC int 621 xfs_trans_dqresv( 622 struct xfs_trans *tp, 623 struct xfs_mount *mp, 624 struct xfs_dquot *dqp, 625 int64_t nblks, 626 long ninos, 627 uint flags) 628 { 629 struct xfs_quotainfo *q = mp->m_quotainfo; 630 struct xfs_def_quota *defq; 631 struct xfs_dquot_res *blkres; 632 struct xfs_quota_limits *qlim; 633 634 xfs_dqlock(dqp); 635 636 defq = xfs_get_defquota(q, xfs_dquot_type(dqp)); 637 638 if (flags & XFS_TRANS_DQ_RES_BLKS) { 639 blkres = &dqp->q_blk; 640 qlim = &defq->blk; 641 } else { 642 blkres = &dqp->q_rtb; 643 qlim = &defq->rtb; 644 } 645 646 if ((flags & XFS_QMOPT_FORCE_RES) == 0 && dqp->q_id && 647 xfs_dquot_is_enforced(dqp)) { 648 int quota_nl; 649 bool fatal; 650 651 /* 652 * dquot is locked already. See if we'd go over the hardlimit 653 * or exceed the timelimit if we'd reserve resources. 654 */ 655 quota_nl = xfs_dqresv_check(blkres, qlim, nblks, &fatal); 656 if (quota_nl != QUOTA_NL_NOWARN) { 657 /* 658 * Quota block warning codes are 3 more than the inode 659 * codes, which we check above. 660 */ 661 xfs_quota_warn(mp, dqp, quota_nl + 3); 662 if (fatal) 663 goto error_return; 664 } 665 666 quota_nl = xfs_dqresv_check(&dqp->q_ino, &defq->ino, ninos, 667 &fatal); 668 if (quota_nl != QUOTA_NL_NOWARN) { 669 xfs_quota_warn(mp, dqp, quota_nl); 670 if (fatal) 671 goto error_return; 672 } 673 } 674 675 /* 676 * Change the reservation, but not the actual usage. 677 * Note that q_blk.reserved = q_blk.count + resv 678 */ 679 blkres->reserved += (xfs_qcnt_t)nblks; 680 dqp->q_ino.reserved += (xfs_qcnt_t)ninos; 681 682 /* 683 * note the reservation amt in the trans struct too, 684 * so that the transaction knows how much was reserved by 685 * it against this particular dquot. 686 * We don't do this when we are reserving for a delayed allocation, 687 * because we don't have the luxury of a transaction envelope then. 688 */ 689 if (tp) { 690 ASSERT(flags & XFS_QMOPT_RESBLK_MASK); 691 xfs_trans_mod_dquot(tp, dqp, flags & XFS_QMOPT_RESBLK_MASK, 692 nblks); 693 xfs_trans_mod_dquot(tp, dqp, XFS_TRANS_DQ_RES_INOS, ninos); 694 } 695 696 if (XFS_IS_CORRUPT(mp, dqp->q_blk.reserved < dqp->q_blk.count) || 697 XFS_IS_CORRUPT(mp, dqp->q_rtb.reserved < dqp->q_rtb.count) || 698 XFS_IS_CORRUPT(mp, dqp->q_ino.reserved < dqp->q_ino.count)) 699 goto error_corrupt; 700 701 xfs_dqunlock(dqp); 702 return 0; 703 704 error_return: 705 xfs_dqunlock(dqp); 706 if (xfs_dquot_type(dqp) == XFS_DQTYPE_PROJ) 707 return -ENOSPC; 708 return -EDQUOT; 709 error_corrupt: 710 xfs_dqunlock(dqp); 711 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 712 return -EFSCORRUPTED; 713 } 714 715 716 /* 717 * Given dquot(s), make disk block and/or inode reservations against them. 718 * The fact that this does the reservation against user, group and 719 * project quotas is important, because this follows a all-or-nothing 720 * approach. 721 * 722 * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown. 723 * XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT. Used by pquota. 724 * XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks 725 * XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks 726 * dquots are unlocked on return, if they were not locked by caller. 727 */ 728 int 729 xfs_trans_reserve_quota_bydquots( 730 struct xfs_trans *tp, 731 struct xfs_mount *mp, 732 struct xfs_dquot *udqp, 733 struct xfs_dquot *gdqp, 734 struct xfs_dquot *pdqp, 735 int64_t nblks, 736 long ninos, 737 uint flags) 738 { 739 int error; 740 741 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) 742 return 0; 743 744 ASSERT(flags & XFS_QMOPT_RESBLK_MASK); 745 746 if (udqp) { 747 error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos, flags); 748 if (error) 749 return error; 750 } 751 752 if (gdqp) { 753 error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags); 754 if (error) 755 goto unwind_usr; 756 } 757 758 if (pdqp) { 759 error = xfs_trans_dqresv(tp, mp, pdqp, nblks, ninos, flags); 760 if (error) 761 goto unwind_grp; 762 } 763 764 /* 765 * Didn't change anything critical, so, no need to log 766 */ 767 return 0; 768 769 unwind_grp: 770 flags |= XFS_QMOPT_FORCE_RES; 771 if (gdqp) 772 xfs_trans_dqresv(tp, mp, gdqp, -nblks, -ninos, flags); 773 unwind_usr: 774 flags |= XFS_QMOPT_FORCE_RES; 775 if (udqp) 776 xfs_trans_dqresv(tp, mp, udqp, -nblks, -ninos, flags); 777 return error; 778 } 779 780 781 /* 782 * Lock the dquot and change the reservation if we can. 783 * This doesn't change the actual usage, just the reservation. 784 * The inode sent in is locked. 785 */ 786 int 787 xfs_trans_reserve_quota_nblks( 788 struct xfs_trans *tp, 789 struct xfs_inode *ip, 790 int64_t dblocks, 791 int64_t rblocks, 792 bool force) 793 { 794 struct xfs_mount *mp = ip->i_mount; 795 unsigned int qflags = 0; 796 int error; 797 798 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) 799 return 0; 800 801 ASSERT(!xfs_is_quota_inode(&mp->m_sb, ip->i_ino)); 802 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 803 804 if (force) 805 qflags |= XFS_QMOPT_FORCE_RES; 806 807 /* Reserve data device quota against the inode's dquots. */ 808 error = xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot, 809 ip->i_gdquot, ip->i_pdquot, dblocks, 0, 810 XFS_QMOPT_RES_REGBLKS | qflags); 811 if (error) 812 return error; 813 814 /* Do the same but for realtime blocks. */ 815 error = xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot, 816 ip->i_gdquot, ip->i_pdquot, rblocks, 0, 817 XFS_QMOPT_RES_RTBLKS | qflags); 818 if (error) { 819 xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot, 820 ip->i_gdquot, ip->i_pdquot, -dblocks, 0, 821 XFS_QMOPT_RES_REGBLKS); 822 return error; 823 } 824 825 return 0; 826 } 827 828 /* Change the quota reservations for an inode creation activity. */ 829 int 830 xfs_trans_reserve_quota_icreate( 831 struct xfs_trans *tp, 832 struct xfs_dquot *udqp, 833 struct xfs_dquot *gdqp, 834 struct xfs_dquot *pdqp, 835 int64_t dblocks) 836 { 837 struct xfs_mount *mp = tp->t_mountp; 838 839 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) 840 return 0; 841 842 return xfs_trans_reserve_quota_bydquots(tp, mp, udqp, gdqp, pdqp, 843 dblocks, 1, XFS_QMOPT_RES_REGBLKS); 844 } 845 846 /* 847 * This routine is called to allocate a quotaoff log item. 848 */ 849 struct xfs_qoff_logitem * 850 xfs_trans_get_qoff_item( 851 struct xfs_trans *tp, 852 struct xfs_qoff_logitem *startqoff, 853 uint flags) 854 { 855 struct xfs_qoff_logitem *q; 856 857 ASSERT(tp != NULL); 858 859 q = xfs_qm_qoff_logitem_init(tp->t_mountp, startqoff, flags); 860 ASSERT(q != NULL); 861 862 /* 863 * Get a log_item_desc to point at the new item. 864 */ 865 xfs_trans_add_item(tp, &q->qql_item); 866 return q; 867 } 868 869 870 /* 871 * This is called to mark the quotaoff logitem as needing 872 * to be logged when the transaction is committed. The logitem must 873 * already be associated with the given transaction. 874 */ 875 void 876 xfs_trans_log_quotaoff_item( 877 struct xfs_trans *tp, 878 struct xfs_qoff_logitem *qlp) 879 { 880 tp->t_flags |= XFS_TRANS_DIRTY; 881 set_bit(XFS_LI_DIRTY, &qlp->qql_item.li_flags); 882 } 883 884 STATIC void 885 xfs_trans_alloc_dqinfo( 886 xfs_trans_t *tp) 887 { 888 tp->t_dqinfo = kmem_cache_zalloc(xfs_qm_dqtrxzone, 889 GFP_KERNEL | __GFP_NOFAIL); 890 } 891 892 void 893 xfs_trans_free_dqinfo( 894 xfs_trans_t *tp) 895 { 896 if (!tp->t_dqinfo) 897 return; 898 kmem_cache_free(xfs_qm_dqtrxzone, tp->t_dqinfo); 899 tp->t_dqinfo = NULL; 900 } 901