1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2002 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_inode.h" 14 #include "xfs_trans.h" 15 #include "xfs_trans_priv.h" 16 #include "xfs_quota.h" 17 #include "xfs_qm.h" 18 #include "xfs_trace.h" 19 20 STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *); 21 22 /* 23 * Add the locked dquot to the transaction. 24 * The dquot must be locked, and it cannot be associated with any 25 * transaction. 26 */ 27 void 28 xfs_trans_dqjoin( 29 struct xfs_trans *tp, 30 struct xfs_dquot *dqp) 31 { 32 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 33 ASSERT(dqp->q_logitem.qli_dquot == dqp); 34 35 /* 36 * Get a log_item_desc to point at the new item. 37 */ 38 xfs_trans_add_item(tp, &dqp->q_logitem.qli_item); 39 } 40 41 /* 42 * This is called to mark the dquot as needing 43 * to be logged when the transaction is committed. The dquot must 44 * already be associated with the given transaction. 45 * Note that it marks the entire transaction as dirty. In the ordinary 46 * case, this gets called via xfs_trans_commit, after the transaction 47 * is already dirty. However, there's nothing stop this from getting 48 * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY 49 * flag. 50 */ 51 void 52 xfs_trans_log_dquot( 53 struct xfs_trans *tp, 54 struct xfs_dquot *dqp) 55 { 56 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 57 58 tp->t_flags |= XFS_TRANS_DIRTY; 59 set_bit(XFS_LI_DIRTY, &dqp->q_logitem.qli_item.li_flags); 60 } 61 62 /* 63 * Carry forward whatever is left of the quota blk reservation to 64 * the spanky new transaction 65 */ 66 void 67 xfs_trans_dup_dqinfo( 68 struct xfs_trans *otp, 69 struct xfs_trans *ntp) 70 { 71 struct xfs_dqtrx *oq, *nq; 72 int i, j; 73 struct xfs_dqtrx *oqa, *nqa; 74 uint64_t blk_res_used; 75 76 if (!otp->t_dqinfo) 77 return; 78 79 xfs_trans_alloc_dqinfo(ntp); 80 81 /* 82 * Because the quota blk reservation is carried forward, 83 * it is also necessary to carry forward the DQ_DIRTY flag. 84 */ 85 if (otp->t_flags & XFS_TRANS_DQ_DIRTY) 86 ntp->t_flags |= XFS_TRANS_DQ_DIRTY; 87 88 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) { 89 oqa = otp->t_dqinfo->dqs[j]; 90 nqa = ntp->t_dqinfo->dqs[j]; 91 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { 92 blk_res_used = 0; 93 94 if (oqa[i].qt_dquot == NULL) 95 break; 96 oq = &oqa[i]; 97 nq = &nqa[i]; 98 99 if (oq->qt_blk_res && oq->qt_bcount_delta > 0) 100 blk_res_used = oq->qt_bcount_delta; 101 102 nq->qt_dquot = oq->qt_dquot; 103 nq->qt_bcount_delta = nq->qt_icount_delta = 0; 104 nq->qt_rtbcount_delta = 0; 105 106 /* 107 * Transfer whatever is left of the reservations. 108 */ 109 nq->qt_blk_res = oq->qt_blk_res - blk_res_used; 110 oq->qt_blk_res = blk_res_used; 111 112 nq->qt_rtblk_res = oq->qt_rtblk_res - 113 oq->qt_rtblk_res_used; 114 oq->qt_rtblk_res = oq->qt_rtblk_res_used; 115 116 nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used; 117 oq->qt_ino_res = oq->qt_ino_res_used; 118 119 } 120 } 121 } 122 123 /* 124 * Wrap around mod_dquot to account for both user and group quotas. 125 */ 126 void 127 xfs_trans_mod_dquot_byino( 128 xfs_trans_t *tp, 129 xfs_inode_t *ip, 130 uint field, 131 int64_t delta) 132 { 133 xfs_mount_t *mp = tp->t_mountp; 134 135 if (!XFS_IS_QUOTA_RUNNING(mp) || 136 !XFS_IS_QUOTA_ON(mp) || 137 xfs_is_quota_inode(&mp->m_sb, ip->i_ino)) 138 return; 139 140 if (tp->t_dqinfo == NULL) 141 xfs_trans_alloc_dqinfo(tp); 142 143 if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot) 144 (void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta); 145 if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot) 146 (void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta); 147 if (XFS_IS_PQUOTA_ON(mp) && ip->i_pdquot) 148 (void) xfs_trans_mod_dquot(tp, ip->i_pdquot, field, delta); 149 } 150 151 STATIC struct xfs_dqtrx * 152 xfs_trans_get_dqtrx( 153 struct xfs_trans *tp, 154 struct xfs_dquot *dqp) 155 { 156 int i; 157 struct xfs_dqtrx *qa; 158 159 switch (xfs_dquot_type(dqp)) { 160 case XFS_DQTYPE_USER: 161 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_USR]; 162 break; 163 case XFS_DQTYPE_GROUP: 164 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_GRP]; 165 break; 166 case XFS_DQTYPE_PROJ: 167 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_PRJ]; 168 break; 169 default: 170 return NULL; 171 } 172 173 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { 174 if (qa[i].qt_dquot == NULL || 175 qa[i].qt_dquot == dqp) 176 return &qa[i]; 177 } 178 179 return NULL; 180 } 181 182 /* 183 * Make the changes in the transaction structure. 184 * The moral equivalent to xfs_trans_mod_sb(). 185 * We don't touch any fields in the dquot, so we don't care 186 * if it's locked or not (most of the time it won't be). 187 */ 188 void 189 xfs_trans_mod_dquot( 190 struct xfs_trans *tp, 191 struct xfs_dquot *dqp, 192 uint field, 193 int64_t delta) 194 { 195 struct xfs_dqtrx *qtrx; 196 197 ASSERT(tp); 198 ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp)); 199 qtrx = NULL; 200 201 if (tp->t_dqinfo == NULL) 202 xfs_trans_alloc_dqinfo(tp); 203 /* 204 * Find either the first free slot or the slot that belongs 205 * to this dquot. 206 */ 207 qtrx = xfs_trans_get_dqtrx(tp, dqp); 208 ASSERT(qtrx); 209 if (qtrx->qt_dquot == NULL) 210 qtrx->qt_dquot = dqp; 211 212 if (delta) { 213 trace_xfs_trans_mod_dquot_before(qtrx); 214 trace_xfs_trans_mod_dquot(tp, dqp, field, delta); 215 } 216 217 switch (field) { 218 219 /* 220 * regular disk blk reservation 221 */ 222 case XFS_TRANS_DQ_RES_BLKS: 223 qtrx->qt_blk_res += delta; 224 break; 225 226 /* 227 * inode reservation 228 */ 229 case XFS_TRANS_DQ_RES_INOS: 230 qtrx->qt_ino_res += delta; 231 break; 232 233 /* 234 * disk blocks used. 235 */ 236 case XFS_TRANS_DQ_BCOUNT: 237 qtrx->qt_bcount_delta += delta; 238 break; 239 240 case XFS_TRANS_DQ_DELBCOUNT: 241 qtrx->qt_delbcnt_delta += delta; 242 break; 243 244 /* 245 * Inode Count 246 */ 247 case XFS_TRANS_DQ_ICOUNT: 248 if (qtrx->qt_ino_res && delta > 0) { 249 qtrx->qt_ino_res_used += delta; 250 ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used); 251 } 252 qtrx->qt_icount_delta += delta; 253 break; 254 255 /* 256 * rtblk reservation 257 */ 258 case XFS_TRANS_DQ_RES_RTBLKS: 259 qtrx->qt_rtblk_res += delta; 260 break; 261 262 /* 263 * rtblk count 264 */ 265 case XFS_TRANS_DQ_RTBCOUNT: 266 if (qtrx->qt_rtblk_res && delta > 0) { 267 qtrx->qt_rtblk_res_used += delta; 268 ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used); 269 } 270 qtrx->qt_rtbcount_delta += delta; 271 break; 272 273 case XFS_TRANS_DQ_DELRTBCOUNT: 274 qtrx->qt_delrtb_delta += delta; 275 break; 276 277 default: 278 ASSERT(0); 279 } 280 281 if (delta) 282 trace_xfs_trans_mod_dquot_after(qtrx); 283 284 tp->t_flags |= XFS_TRANS_DQ_DIRTY; 285 } 286 287 288 /* 289 * Given an array of dqtrx structures, lock all the dquots associated and join 290 * them to the transaction, provided they have been modified. We know that the 291 * highest number of dquots of one type - usr, grp and prj - involved in a 292 * transaction is 3 so we don't need to make this very generic. 293 */ 294 STATIC void 295 xfs_trans_dqlockedjoin( 296 struct xfs_trans *tp, 297 struct xfs_dqtrx *q) 298 { 299 ASSERT(q[0].qt_dquot != NULL); 300 if (q[1].qt_dquot == NULL) { 301 xfs_dqlock(q[0].qt_dquot); 302 xfs_trans_dqjoin(tp, q[0].qt_dquot); 303 } else { 304 ASSERT(XFS_QM_TRANS_MAXDQS == 2); 305 xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot); 306 xfs_trans_dqjoin(tp, q[0].qt_dquot); 307 xfs_trans_dqjoin(tp, q[1].qt_dquot); 308 } 309 } 310 311 /* Apply dqtrx changes to the quota reservation counters. */ 312 static inline void 313 xfs_apply_quota_reservation_deltas( 314 struct xfs_dquot_res *res, 315 uint64_t reserved, 316 int64_t res_used, 317 int64_t count_delta) 318 { 319 if (reserved != 0) { 320 /* 321 * Subtle math here: If reserved > res_used (the normal case), 322 * we're simply subtracting the unused transaction quota 323 * reservation from the dquot reservation. 324 * 325 * If, however, res_used > reserved, then we have allocated 326 * more quota blocks than were reserved for the transaction. 327 * We must add that excess to the dquot reservation since it 328 * tracks (usage + resv) and by definition we didn't reserve 329 * that excess. 330 */ 331 res->reserved -= abs(reserved - res_used); 332 } else if (count_delta != 0) { 333 /* 334 * These blks were never reserved, either inside a transaction 335 * or outside one (in a delayed allocation). Also, this isn't 336 * always a negative number since we sometimes deliberately 337 * skip quota reservations. 338 */ 339 res->reserved += count_delta; 340 } 341 } 342 343 /* 344 * Called by xfs_trans_commit() and similar in spirit to 345 * xfs_trans_apply_sb_deltas(). 346 * Go thru all the dquots belonging to this transaction and modify the 347 * INCORE dquot to reflect the actual usages. 348 * Unreserve just the reservations done by this transaction. 349 * dquot is still left locked at exit. 350 */ 351 void 352 xfs_trans_apply_dquot_deltas( 353 struct xfs_trans *tp) 354 { 355 int i, j; 356 struct xfs_dquot *dqp; 357 struct xfs_dqtrx *qtrx, *qa; 358 int64_t totalbdelta; 359 int64_t totalrtbdelta; 360 361 if (!(tp->t_flags & XFS_TRANS_DQ_DIRTY)) 362 return; 363 364 ASSERT(tp->t_dqinfo); 365 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) { 366 qa = tp->t_dqinfo->dqs[j]; 367 if (qa[0].qt_dquot == NULL) 368 continue; 369 370 /* 371 * Lock all of the dquots and join them to the transaction. 372 */ 373 xfs_trans_dqlockedjoin(tp, qa); 374 375 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { 376 uint64_t blk_res_used; 377 378 qtrx = &qa[i]; 379 /* 380 * The array of dquots is filled 381 * sequentially, not sparsely. 382 */ 383 if ((dqp = qtrx->qt_dquot) == NULL) 384 break; 385 386 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 387 388 /* 389 * adjust the actual number of blocks used 390 */ 391 392 /* 393 * The issue here is - sometimes we don't make a blkquota 394 * reservation intentionally to be fair to users 395 * (when the amount is small). On the other hand, 396 * delayed allocs do make reservations, but that's 397 * outside of a transaction, so we have no 398 * idea how much was really reserved. 399 * So, here we've accumulated delayed allocation blks and 400 * non-delay blks. The assumption is that the 401 * delayed ones are always reserved (outside of a 402 * transaction), and the others may or may not have 403 * quota reservations. 404 */ 405 totalbdelta = qtrx->qt_bcount_delta + 406 qtrx->qt_delbcnt_delta; 407 totalrtbdelta = qtrx->qt_rtbcount_delta + 408 qtrx->qt_delrtb_delta; 409 410 if (totalbdelta != 0 || totalrtbdelta != 0 || 411 qtrx->qt_icount_delta != 0) { 412 trace_xfs_trans_apply_dquot_deltas_before(dqp); 413 trace_xfs_trans_apply_dquot_deltas(qtrx); 414 } 415 416 #ifdef DEBUG 417 if (totalbdelta < 0) 418 ASSERT(dqp->q_blk.count >= -totalbdelta); 419 420 if (totalrtbdelta < 0) 421 ASSERT(dqp->q_rtb.count >= -totalrtbdelta); 422 423 if (qtrx->qt_icount_delta < 0) 424 ASSERT(dqp->q_ino.count >= -qtrx->qt_icount_delta); 425 #endif 426 if (totalbdelta) 427 dqp->q_blk.count += totalbdelta; 428 429 if (qtrx->qt_icount_delta) 430 dqp->q_ino.count += qtrx->qt_icount_delta; 431 432 if (totalrtbdelta) 433 dqp->q_rtb.count += totalrtbdelta; 434 435 if (totalbdelta != 0 || totalrtbdelta != 0 || 436 qtrx->qt_icount_delta != 0) 437 trace_xfs_trans_apply_dquot_deltas_after(dqp); 438 439 /* 440 * Get any default limits in use. 441 * Start/reset the timer(s) if needed. 442 */ 443 if (dqp->q_id) { 444 xfs_qm_adjust_dqlimits(dqp); 445 xfs_qm_adjust_dqtimers(dqp); 446 } 447 448 dqp->q_flags |= XFS_DQFLAG_DIRTY; 449 /* 450 * add this to the list of items to get logged 451 */ 452 xfs_trans_log_dquot(tp, dqp); 453 /* 454 * Take off what's left of the original reservation. 455 * In case of delayed allocations, there's no 456 * reservation that a transaction structure knows of. 457 */ 458 blk_res_used = max_t(int64_t, 0, qtrx->qt_bcount_delta); 459 xfs_apply_quota_reservation_deltas(&dqp->q_blk, 460 qtrx->qt_blk_res, blk_res_used, 461 qtrx->qt_bcount_delta); 462 463 /* 464 * Adjust the RT reservation. 465 */ 466 xfs_apply_quota_reservation_deltas(&dqp->q_rtb, 467 qtrx->qt_rtblk_res, 468 qtrx->qt_rtblk_res_used, 469 qtrx->qt_rtbcount_delta); 470 471 /* 472 * Adjust the inode reservation. 473 */ 474 ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used); 475 xfs_apply_quota_reservation_deltas(&dqp->q_ino, 476 qtrx->qt_ino_res, 477 qtrx->qt_ino_res_used, 478 qtrx->qt_icount_delta); 479 480 ASSERT(dqp->q_blk.reserved >= dqp->q_blk.count); 481 ASSERT(dqp->q_ino.reserved >= dqp->q_ino.count); 482 ASSERT(dqp->q_rtb.reserved >= dqp->q_rtb.count); 483 } 484 } 485 } 486 487 /* 488 * Release the reservations, and adjust the dquots accordingly. 489 * This is called only when the transaction is being aborted. If by 490 * any chance we have done dquot modifications incore (ie. deltas) already, 491 * we simply throw those away, since that's the expected behavior 492 * when a transaction is curtailed without a commit. 493 */ 494 void 495 xfs_trans_unreserve_and_mod_dquots( 496 struct xfs_trans *tp) 497 { 498 int i, j; 499 struct xfs_dquot *dqp; 500 struct xfs_dqtrx *qtrx, *qa; 501 bool locked; 502 503 if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY)) 504 return; 505 506 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) { 507 qa = tp->t_dqinfo->dqs[j]; 508 509 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { 510 qtrx = &qa[i]; 511 /* 512 * We assume that the array of dquots is filled 513 * sequentially, not sparsely. 514 */ 515 if ((dqp = qtrx->qt_dquot) == NULL) 516 break; 517 /* 518 * Unreserve the original reservation. We don't care 519 * about the number of blocks used field, or deltas. 520 * Also we don't bother to zero the fields. 521 */ 522 locked = false; 523 if (qtrx->qt_blk_res) { 524 xfs_dqlock(dqp); 525 locked = true; 526 dqp->q_blk.reserved -= 527 (xfs_qcnt_t)qtrx->qt_blk_res; 528 } 529 if (qtrx->qt_ino_res) { 530 if (!locked) { 531 xfs_dqlock(dqp); 532 locked = true; 533 } 534 dqp->q_ino.reserved -= 535 (xfs_qcnt_t)qtrx->qt_ino_res; 536 } 537 538 if (qtrx->qt_rtblk_res) { 539 if (!locked) { 540 xfs_dqlock(dqp); 541 locked = true; 542 } 543 dqp->q_rtb.reserved -= 544 (xfs_qcnt_t)qtrx->qt_rtblk_res; 545 } 546 if (locked) 547 xfs_dqunlock(dqp); 548 549 } 550 } 551 } 552 553 STATIC void 554 xfs_quota_warn( 555 struct xfs_mount *mp, 556 struct xfs_dquot *dqp, 557 int type) 558 { 559 enum quota_type qtype; 560 561 switch (xfs_dquot_type(dqp)) { 562 case XFS_DQTYPE_PROJ: 563 qtype = PRJQUOTA; 564 break; 565 case XFS_DQTYPE_USER: 566 qtype = USRQUOTA; 567 break; 568 case XFS_DQTYPE_GROUP: 569 qtype = GRPQUOTA; 570 break; 571 default: 572 return; 573 } 574 575 quota_send_warning(make_kqid(&init_user_ns, qtype, dqp->q_id), 576 mp->m_super->s_dev, type); 577 } 578 579 /* 580 * Decide if we can make an additional reservation against a quota resource. 581 * Returns an inode QUOTA_NL_ warning code and whether or not it's fatal. 582 * 583 * Note that we assume that the numeric difference between the inode and block 584 * warning codes will always be 3 since it's userspace ABI now, and will never 585 * decrease the quota reservation, so the *BELOW messages are irrelevant. 586 */ 587 static inline int 588 xfs_dqresv_check( 589 struct xfs_dquot_res *res, 590 struct xfs_quota_limits *qlim, 591 int64_t delta, 592 bool *fatal) 593 { 594 xfs_qcnt_t hardlimit = res->hardlimit; 595 xfs_qcnt_t softlimit = res->softlimit; 596 xfs_qcnt_t total_count = res->reserved + delta; 597 598 BUILD_BUG_ON(QUOTA_NL_BHARDWARN != QUOTA_NL_IHARDWARN + 3); 599 BUILD_BUG_ON(QUOTA_NL_BSOFTLONGWARN != QUOTA_NL_ISOFTLONGWARN + 3); 600 BUILD_BUG_ON(QUOTA_NL_BSOFTWARN != QUOTA_NL_ISOFTWARN + 3); 601 602 *fatal = false; 603 if (delta <= 0) 604 return QUOTA_NL_NOWARN; 605 606 if (!hardlimit) 607 hardlimit = qlim->hard; 608 if (!softlimit) 609 softlimit = qlim->soft; 610 611 if (hardlimit && total_count > hardlimit) { 612 *fatal = true; 613 return QUOTA_NL_IHARDWARN; 614 } 615 616 if (softlimit && total_count > softlimit) { 617 time64_t now = ktime_get_real_seconds(); 618 619 if ((res->timer != 0 && now > res->timer) || 620 (res->warnings != 0 && res->warnings >= qlim->warn)) { 621 *fatal = true; 622 return QUOTA_NL_ISOFTLONGWARN; 623 } 624 625 res->warnings++; 626 return QUOTA_NL_ISOFTWARN; 627 } 628 629 return QUOTA_NL_NOWARN; 630 } 631 632 /* 633 * This reserves disk blocks and inodes against a dquot. 634 * Flags indicate if the dquot is to be locked here and also 635 * if the blk reservation is for RT or regular blocks. 636 * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check. 637 */ 638 STATIC int 639 xfs_trans_dqresv( 640 struct xfs_trans *tp, 641 struct xfs_mount *mp, 642 struct xfs_dquot *dqp, 643 int64_t nblks, 644 long ninos, 645 uint flags) 646 { 647 struct xfs_quotainfo *q = mp->m_quotainfo; 648 struct xfs_def_quota *defq; 649 struct xfs_dquot_res *blkres; 650 struct xfs_quota_limits *qlim; 651 652 xfs_dqlock(dqp); 653 654 defq = xfs_get_defquota(q, xfs_dquot_type(dqp)); 655 656 if (flags & XFS_TRANS_DQ_RES_BLKS) { 657 blkres = &dqp->q_blk; 658 qlim = &defq->blk; 659 } else { 660 blkres = &dqp->q_rtb; 661 qlim = &defq->rtb; 662 } 663 664 if ((flags & XFS_QMOPT_FORCE_RES) == 0 && dqp->q_id && 665 xfs_dquot_is_enforced(dqp)) { 666 int quota_nl; 667 bool fatal; 668 669 /* 670 * dquot is locked already. See if we'd go over the hardlimit 671 * or exceed the timelimit if we'd reserve resources. 672 */ 673 quota_nl = xfs_dqresv_check(blkres, qlim, nblks, &fatal); 674 if (quota_nl != QUOTA_NL_NOWARN) { 675 /* 676 * Quota block warning codes are 3 more than the inode 677 * codes, which we check above. 678 */ 679 xfs_quota_warn(mp, dqp, quota_nl + 3); 680 if (fatal) 681 goto error_return; 682 } 683 684 quota_nl = xfs_dqresv_check(&dqp->q_ino, &defq->ino, ninos, 685 &fatal); 686 if (quota_nl != QUOTA_NL_NOWARN) { 687 xfs_quota_warn(mp, dqp, quota_nl); 688 if (fatal) 689 goto error_return; 690 } 691 } 692 693 /* 694 * Change the reservation, but not the actual usage. 695 * Note that q_blk.reserved = q_blk.count + resv 696 */ 697 blkres->reserved += (xfs_qcnt_t)nblks; 698 dqp->q_ino.reserved += (xfs_qcnt_t)ninos; 699 700 /* 701 * note the reservation amt in the trans struct too, 702 * so that the transaction knows how much was reserved by 703 * it against this particular dquot. 704 * We don't do this when we are reserving for a delayed allocation, 705 * because we don't have the luxury of a transaction envelope then. 706 */ 707 if (tp) { 708 ASSERT(tp->t_dqinfo); 709 ASSERT(flags & XFS_QMOPT_RESBLK_MASK); 710 if (nblks != 0) 711 xfs_trans_mod_dquot(tp, dqp, 712 flags & XFS_QMOPT_RESBLK_MASK, 713 nblks); 714 if (ninos != 0) 715 xfs_trans_mod_dquot(tp, dqp, 716 XFS_TRANS_DQ_RES_INOS, 717 ninos); 718 } 719 ASSERT(dqp->q_blk.reserved >= dqp->q_blk.count); 720 ASSERT(dqp->q_rtb.reserved >= dqp->q_rtb.count); 721 ASSERT(dqp->q_ino.reserved >= dqp->q_ino.count); 722 723 xfs_dqunlock(dqp); 724 return 0; 725 726 error_return: 727 xfs_dqunlock(dqp); 728 if (xfs_dquot_type(dqp) == XFS_DQTYPE_PROJ) 729 return -ENOSPC; 730 return -EDQUOT; 731 } 732 733 734 /* 735 * Given dquot(s), make disk block and/or inode reservations against them. 736 * The fact that this does the reservation against user, group and 737 * project quotas is important, because this follows a all-or-nothing 738 * approach. 739 * 740 * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown. 741 * XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT. Used by pquota. 742 * XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks 743 * XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks 744 * dquots are unlocked on return, if they were not locked by caller. 745 */ 746 int 747 xfs_trans_reserve_quota_bydquots( 748 struct xfs_trans *tp, 749 struct xfs_mount *mp, 750 struct xfs_dquot *udqp, 751 struct xfs_dquot *gdqp, 752 struct xfs_dquot *pdqp, 753 int64_t nblks, 754 long ninos, 755 uint flags) 756 { 757 int error; 758 759 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) 760 return 0; 761 762 if (tp && tp->t_dqinfo == NULL) 763 xfs_trans_alloc_dqinfo(tp); 764 765 ASSERT(flags & XFS_QMOPT_RESBLK_MASK); 766 767 if (udqp) { 768 error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos, flags); 769 if (error) 770 return error; 771 } 772 773 if (gdqp) { 774 error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags); 775 if (error) 776 goto unwind_usr; 777 } 778 779 if (pdqp) { 780 error = xfs_trans_dqresv(tp, mp, pdqp, nblks, ninos, flags); 781 if (error) 782 goto unwind_grp; 783 } 784 785 /* 786 * Didn't change anything critical, so, no need to log 787 */ 788 return 0; 789 790 unwind_grp: 791 flags |= XFS_QMOPT_FORCE_RES; 792 if (gdqp) 793 xfs_trans_dqresv(tp, mp, gdqp, -nblks, -ninos, flags); 794 unwind_usr: 795 flags |= XFS_QMOPT_FORCE_RES; 796 if (udqp) 797 xfs_trans_dqresv(tp, mp, udqp, -nblks, -ninos, flags); 798 return error; 799 } 800 801 802 /* 803 * Lock the dquot and change the reservation if we can. 804 * This doesn't change the actual usage, just the reservation. 805 * The inode sent in is locked. 806 */ 807 int 808 xfs_trans_reserve_quota_nblks( 809 struct xfs_trans *tp, 810 struct xfs_inode *ip, 811 int64_t nblks, 812 long ninos, 813 uint flags) 814 { 815 struct xfs_mount *mp = ip->i_mount; 816 817 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) 818 return 0; 819 820 ASSERT(!xfs_is_quota_inode(&mp->m_sb, ip->i_ino)); 821 822 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 823 ASSERT((flags & ~(XFS_QMOPT_FORCE_RES)) == XFS_TRANS_DQ_RES_RTBLKS || 824 (flags & ~(XFS_QMOPT_FORCE_RES)) == XFS_TRANS_DQ_RES_BLKS); 825 826 /* 827 * Reserve nblks against these dquots, with trans as the mediator. 828 */ 829 return xfs_trans_reserve_quota_bydquots(tp, mp, 830 ip->i_udquot, ip->i_gdquot, 831 ip->i_pdquot, 832 nblks, ninos, flags); 833 } 834 835 /* 836 * This routine is called to allocate a quotaoff log item. 837 */ 838 struct xfs_qoff_logitem * 839 xfs_trans_get_qoff_item( 840 struct xfs_trans *tp, 841 struct xfs_qoff_logitem *startqoff, 842 uint flags) 843 { 844 struct xfs_qoff_logitem *q; 845 846 ASSERT(tp != NULL); 847 848 q = xfs_qm_qoff_logitem_init(tp->t_mountp, startqoff, flags); 849 ASSERT(q != NULL); 850 851 /* 852 * Get a log_item_desc to point at the new item. 853 */ 854 xfs_trans_add_item(tp, &q->qql_item); 855 return q; 856 } 857 858 859 /* 860 * This is called to mark the quotaoff logitem as needing 861 * to be logged when the transaction is committed. The logitem must 862 * already be associated with the given transaction. 863 */ 864 void 865 xfs_trans_log_quotaoff_item( 866 struct xfs_trans *tp, 867 struct xfs_qoff_logitem *qlp) 868 { 869 tp->t_flags |= XFS_TRANS_DIRTY; 870 set_bit(XFS_LI_DIRTY, &qlp->qql_item.li_flags); 871 } 872 873 STATIC void 874 xfs_trans_alloc_dqinfo( 875 xfs_trans_t *tp) 876 { 877 tp->t_dqinfo = kmem_cache_zalloc(xfs_qm_dqtrxzone, 878 GFP_KERNEL | __GFP_NOFAIL); 879 } 880 881 void 882 xfs_trans_free_dqinfo( 883 xfs_trans_t *tp) 884 { 885 if (!tp->t_dqinfo) 886 return; 887 kmem_cache_free(xfs_qm_dqtrxzone, tp->t_dqinfo); 888 tp->t_dqinfo = NULL; 889 } 890