1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2002 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_inode.h" 14 #include "xfs_trans.h" 15 #include "xfs_trans_priv.h" 16 #include "xfs_quota.h" 17 #include "xfs_qm.h" 18 #include "xfs_trace.h" 19 #include "xfs_error.h" 20 #include "xfs_health.h" 21 22 STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *); 23 24 /* 25 * Add the locked dquot to the transaction. 26 * The dquot must be locked, and it cannot be associated with any 27 * transaction. 28 */ 29 void 30 xfs_trans_dqjoin( 31 struct xfs_trans *tp, 32 struct xfs_dquot *dqp) 33 { 34 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 35 ASSERT(dqp->q_logitem.qli_dquot == dqp); 36 37 /* 38 * Get a log_item_desc to point at the new item. 39 */ 40 xfs_trans_add_item(tp, &dqp->q_logitem.qli_item); 41 } 42 43 /* 44 * This is called to mark the dquot as needing 45 * to be logged when the transaction is committed. The dquot must 46 * already be associated with the given transaction. 47 * Note that it marks the entire transaction as dirty. In the ordinary 48 * case, this gets called via xfs_trans_commit, after the transaction 49 * is already dirty. However, there's nothing stop this from getting 50 * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY 51 * flag. 52 */ 53 void 54 xfs_trans_log_dquot( 55 struct xfs_trans *tp, 56 struct xfs_dquot *dqp) 57 { 58 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 59 60 /* Upgrade the dquot to bigtime format if possible. */ 61 if (dqp->q_id != 0 && 62 xfs_has_bigtime(tp->t_mountp) && 63 !(dqp->q_type & XFS_DQTYPE_BIGTIME)) 64 dqp->q_type |= XFS_DQTYPE_BIGTIME; 65 66 tp->t_flags |= XFS_TRANS_DIRTY; 67 set_bit(XFS_LI_DIRTY, &dqp->q_logitem.qli_item.li_flags); 68 } 69 70 /* 71 * Carry forward whatever is left of the quota blk reservation to 72 * the spanky new transaction 73 */ 74 void 75 xfs_trans_dup_dqinfo( 76 struct xfs_trans *otp, 77 struct xfs_trans *ntp) 78 { 79 struct xfs_dqtrx *oq, *nq; 80 int i, j; 81 struct xfs_dqtrx *oqa, *nqa; 82 uint64_t blk_res_used; 83 84 if (!otp->t_dqinfo) 85 return; 86 87 xfs_trans_alloc_dqinfo(ntp); 88 89 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) { 90 oqa = otp->t_dqinfo->dqs[j]; 91 nqa = ntp->t_dqinfo->dqs[j]; 92 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { 93 blk_res_used = 0; 94 95 if (oqa[i].qt_dquot == NULL) 96 break; 97 oq = &oqa[i]; 98 nq = &nqa[i]; 99 100 if (oq->qt_blk_res && oq->qt_bcount_delta > 0) 101 blk_res_used = oq->qt_bcount_delta; 102 103 nq->qt_dquot = oq->qt_dquot; 104 nq->qt_bcount_delta = nq->qt_icount_delta = 0; 105 nq->qt_rtbcount_delta = 0; 106 107 /* 108 * Transfer whatever is left of the reservations. 109 */ 110 nq->qt_blk_res = oq->qt_blk_res - blk_res_used; 111 oq->qt_blk_res = blk_res_used; 112 113 nq->qt_rtblk_res = oq->qt_rtblk_res - 114 oq->qt_rtblk_res_used; 115 oq->qt_rtblk_res = oq->qt_rtblk_res_used; 116 117 nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used; 118 oq->qt_ino_res = oq->qt_ino_res_used; 119 120 } 121 } 122 } 123 124 #ifdef CONFIG_XFS_LIVE_HOOKS 125 /* 126 * Use a static key here to reduce the overhead of quota live updates. If the 127 * compiler supports jump labels, the static branch will be replaced by a nop 128 * sled when there are no hook users. Online fsck is currently the only 129 * caller, so this is a reasonable tradeoff. 130 * 131 * Note: Patching the kernel code requires taking the cpu hotplug lock. Other 132 * parts of the kernel allocate memory with that lock held, which means that 133 * XFS callers cannot hold any locks that might be used by memory reclaim or 134 * writeback when calling the static_branch_{inc,dec} functions. 135 */ 136 DEFINE_STATIC_XFS_HOOK_SWITCH(xfs_dqtrx_hooks_switch); 137 138 void 139 xfs_dqtrx_hook_disable(void) 140 { 141 xfs_hooks_switch_off(&xfs_dqtrx_hooks_switch); 142 } 143 144 void 145 xfs_dqtrx_hook_enable(void) 146 { 147 xfs_hooks_switch_on(&xfs_dqtrx_hooks_switch); 148 } 149 150 /* Schedule a transactional dquot update on behalf of an inode. */ 151 void 152 xfs_trans_mod_ino_dquot( 153 struct xfs_trans *tp, 154 struct xfs_inode *ip, 155 struct xfs_dquot *dqp, 156 unsigned int field, 157 int64_t delta) 158 { 159 xfs_trans_mod_dquot(tp, dqp, field, delta); 160 161 if (xfs_hooks_switched_on(&xfs_dqtrx_hooks_switch)) { 162 struct xfs_mod_ino_dqtrx_params p = { 163 .tx_id = (uintptr_t)tp, 164 .ino = ip->i_ino, 165 .q_type = xfs_dquot_type(dqp), 166 .q_id = dqp->q_id, 167 .delta = delta 168 }; 169 struct xfs_quotainfo *qi = tp->t_mountp->m_quotainfo; 170 171 xfs_hooks_call(&qi->qi_mod_ino_dqtrx_hooks, field, &p); 172 } 173 } 174 175 /* Call the specified functions during a dquot counter update. */ 176 int 177 xfs_dqtrx_hook_add( 178 struct xfs_quotainfo *qi, 179 struct xfs_dqtrx_hook *hook) 180 { 181 int error; 182 183 /* 184 * Transactional dquot updates first call the mod hook when changes 185 * are attached to the transaction and then call the apply hook when 186 * those changes are committed (or canceled). 187 * 188 * The apply hook must be installed before the mod hook so that we 189 * never fail to catch the end of a quota update sequence. 190 */ 191 error = xfs_hooks_add(&qi->qi_apply_dqtrx_hooks, &hook->apply_hook); 192 if (error) 193 goto out; 194 195 error = xfs_hooks_add(&qi->qi_mod_ino_dqtrx_hooks, &hook->mod_hook); 196 if (error) 197 goto out_apply; 198 199 return 0; 200 201 out_apply: 202 xfs_hooks_del(&qi->qi_apply_dqtrx_hooks, &hook->apply_hook); 203 out: 204 return error; 205 } 206 207 /* Stop calling the specified function during a dquot counter update. */ 208 void 209 xfs_dqtrx_hook_del( 210 struct xfs_quotainfo *qi, 211 struct xfs_dqtrx_hook *hook) 212 { 213 /* 214 * The mod hook must be removed before apply hook to avoid giving the 215 * hook consumer with an incomplete update. No hooks should be running 216 * after these functions return. 217 */ 218 xfs_hooks_del(&qi->qi_mod_ino_dqtrx_hooks, &hook->mod_hook); 219 xfs_hooks_del(&qi->qi_apply_dqtrx_hooks, &hook->apply_hook); 220 } 221 222 /* Configure dquot update hook functions. */ 223 void 224 xfs_dqtrx_hook_setup( 225 struct xfs_dqtrx_hook *hook, 226 notifier_fn_t mod_fn, 227 notifier_fn_t apply_fn) 228 { 229 xfs_hook_setup(&hook->mod_hook, mod_fn); 230 xfs_hook_setup(&hook->apply_hook, apply_fn); 231 } 232 #endif /* CONFIG_XFS_LIVE_HOOKS */ 233 234 /* 235 * Wrap around mod_dquot to account for both user and group quotas. 236 */ 237 void 238 xfs_trans_mod_dquot_byino( 239 xfs_trans_t *tp, 240 xfs_inode_t *ip, 241 uint field, 242 int64_t delta) 243 { 244 xfs_mount_t *mp = tp->t_mountp; 245 246 if (!XFS_IS_QUOTA_ON(mp) || 247 xfs_is_quota_inode(&mp->m_sb, ip->i_ino)) 248 return; 249 250 if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot) 251 xfs_trans_mod_ino_dquot(tp, ip, ip->i_udquot, field, delta); 252 if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot) 253 xfs_trans_mod_ino_dquot(tp, ip, ip->i_gdquot, field, delta); 254 if (XFS_IS_PQUOTA_ON(mp) && ip->i_pdquot) 255 xfs_trans_mod_ino_dquot(tp, ip, ip->i_pdquot, field, delta); 256 } 257 258 STATIC struct xfs_dqtrx * 259 xfs_trans_get_dqtrx( 260 struct xfs_trans *tp, 261 struct xfs_dquot *dqp) 262 { 263 int i; 264 struct xfs_dqtrx *qa; 265 266 switch (xfs_dquot_type(dqp)) { 267 case XFS_DQTYPE_USER: 268 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_USR]; 269 break; 270 case XFS_DQTYPE_GROUP: 271 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_GRP]; 272 break; 273 case XFS_DQTYPE_PROJ: 274 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_PRJ]; 275 break; 276 default: 277 return NULL; 278 } 279 280 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { 281 if (qa[i].qt_dquot == NULL || 282 qa[i].qt_dquot == dqp) 283 return &qa[i]; 284 } 285 286 return NULL; 287 } 288 289 /* 290 * Make the changes in the transaction structure. 291 * The moral equivalent to xfs_trans_mod_sb(). 292 * We don't touch any fields in the dquot, so we don't care 293 * if it's locked or not (most of the time it won't be). 294 */ 295 void 296 xfs_trans_mod_dquot( 297 struct xfs_trans *tp, 298 struct xfs_dquot *dqp, 299 uint field, 300 int64_t delta) 301 { 302 struct xfs_dqtrx *qtrx; 303 304 ASSERT(tp); 305 ASSERT(XFS_IS_QUOTA_ON(tp->t_mountp)); 306 qtrx = NULL; 307 308 if (!delta) 309 return; 310 311 if (tp->t_dqinfo == NULL) 312 xfs_trans_alloc_dqinfo(tp); 313 /* 314 * Find either the first free slot or the slot that belongs 315 * to this dquot. 316 */ 317 qtrx = xfs_trans_get_dqtrx(tp, dqp); 318 ASSERT(qtrx); 319 if (qtrx->qt_dquot == NULL) 320 qtrx->qt_dquot = dqp; 321 322 trace_xfs_trans_mod_dquot_before(qtrx); 323 trace_xfs_trans_mod_dquot(tp, dqp, field, delta); 324 325 switch (field) { 326 /* regular disk blk reservation */ 327 case XFS_TRANS_DQ_RES_BLKS: 328 qtrx->qt_blk_res += delta; 329 break; 330 331 /* inode reservation */ 332 case XFS_TRANS_DQ_RES_INOS: 333 qtrx->qt_ino_res += delta; 334 break; 335 336 /* disk blocks used. */ 337 case XFS_TRANS_DQ_BCOUNT: 338 qtrx->qt_bcount_delta += delta; 339 break; 340 341 case XFS_TRANS_DQ_DELBCOUNT: 342 qtrx->qt_delbcnt_delta += delta; 343 break; 344 345 /* Inode Count */ 346 case XFS_TRANS_DQ_ICOUNT: 347 if (qtrx->qt_ino_res && delta > 0) { 348 qtrx->qt_ino_res_used += delta; 349 ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used); 350 } 351 qtrx->qt_icount_delta += delta; 352 break; 353 354 /* rtblk reservation */ 355 case XFS_TRANS_DQ_RES_RTBLKS: 356 qtrx->qt_rtblk_res += delta; 357 break; 358 359 /* rtblk count */ 360 case XFS_TRANS_DQ_RTBCOUNT: 361 if (qtrx->qt_rtblk_res && delta > 0) { 362 qtrx->qt_rtblk_res_used += delta; 363 ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used); 364 } 365 qtrx->qt_rtbcount_delta += delta; 366 break; 367 368 case XFS_TRANS_DQ_DELRTBCOUNT: 369 qtrx->qt_delrtb_delta += delta; 370 break; 371 372 default: 373 ASSERT(0); 374 } 375 376 trace_xfs_trans_mod_dquot_after(qtrx); 377 } 378 379 380 /* 381 * Given an array of dqtrx structures, lock all the dquots associated and join 382 * them to the transaction, provided they have been modified. 383 */ 384 STATIC void 385 xfs_trans_dqlockedjoin( 386 struct xfs_trans *tp, 387 struct xfs_dqtrx *q) 388 { 389 unsigned int i; 390 ASSERT(q[0].qt_dquot != NULL); 391 if (q[1].qt_dquot == NULL) { 392 xfs_dqlock(q[0].qt_dquot); 393 xfs_trans_dqjoin(tp, q[0].qt_dquot); 394 } else if (q[2].qt_dquot == NULL) { 395 xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot); 396 xfs_trans_dqjoin(tp, q[0].qt_dquot); 397 xfs_trans_dqjoin(tp, q[1].qt_dquot); 398 } else { 399 xfs_dqlockn(q); 400 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { 401 if (q[i].qt_dquot == NULL) 402 break; 403 xfs_trans_dqjoin(tp, q[i].qt_dquot); 404 } 405 } 406 } 407 408 /* Apply dqtrx changes to the quota reservation counters. */ 409 static inline void 410 xfs_apply_quota_reservation_deltas( 411 struct xfs_dquot_res *res, 412 uint64_t reserved, 413 int64_t res_used, 414 int64_t count_delta) 415 { 416 if (reserved != 0) { 417 /* 418 * Subtle math here: If reserved > res_used (the normal case), 419 * we're simply subtracting the unused transaction quota 420 * reservation from the dquot reservation. 421 * 422 * If, however, res_used > reserved, then we have allocated 423 * more quota blocks than were reserved for the transaction. 424 * We must add that excess to the dquot reservation since it 425 * tracks (usage + resv) and by definition we didn't reserve 426 * that excess. 427 */ 428 res->reserved -= abs(reserved - res_used); 429 } else if (count_delta != 0) { 430 /* 431 * These blks were never reserved, either inside a transaction 432 * or outside one (in a delayed allocation). Also, this isn't 433 * always a negative number since we sometimes deliberately 434 * skip quota reservations. 435 */ 436 res->reserved += count_delta; 437 } 438 } 439 440 #ifdef CONFIG_XFS_LIVE_HOOKS 441 /* Call downstream hooks now that it's time to apply dquot deltas. */ 442 static inline void 443 xfs_trans_apply_dquot_deltas_hook( 444 struct xfs_trans *tp, 445 struct xfs_dquot *dqp) 446 { 447 if (xfs_hooks_switched_on(&xfs_dqtrx_hooks_switch)) { 448 struct xfs_apply_dqtrx_params p = { 449 .tx_id = (uintptr_t)tp, 450 .q_type = xfs_dquot_type(dqp), 451 .q_id = dqp->q_id, 452 }; 453 struct xfs_quotainfo *qi = tp->t_mountp->m_quotainfo; 454 455 xfs_hooks_call(&qi->qi_apply_dqtrx_hooks, 456 XFS_APPLY_DQTRX_COMMIT, &p); 457 } 458 } 459 #else 460 # define xfs_trans_apply_dquot_deltas_hook(tp, dqp) ((void)0) 461 #endif /* CONFIG_XFS_LIVE_HOOKS */ 462 463 /* 464 * Called by xfs_trans_commit() and similar in spirit to 465 * xfs_trans_apply_sb_deltas(). 466 * Go thru all the dquots belonging to this transaction and modify the 467 * INCORE dquot to reflect the actual usages. 468 * Unreserve just the reservations done by this transaction. 469 * dquot is still left locked at exit. 470 */ 471 void 472 xfs_trans_apply_dquot_deltas( 473 struct xfs_trans *tp) 474 { 475 int i, j; 476 struct xfs_dquot *dqp; 477 struct xfs_dqtrx *qtrx, *qa; 478 int64_t totalbdelta; 479 int64_t totalrtbdelta; 480 481 if (!tp->t_dqinfo) 482 return; 483 484 ASSERT(tp->t_dqinfo); 485 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) { 486 qa = tp->t_dqinfo->dqs[j]; 487 if (qa[0].qt_dquot == NULL) 488 continue; 489 490 /* 491 * Lock all of the dquots and join them to the transaction. 492 */ 493 xfs_trans_dqlockedjoin(tp, qa); 494 495 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { 496 uint64_t blk_res_used; 497 498 qtrx = &qa[i]; 499 /* 500 * The array of dquots is filled 501 * sequentially, not sparsely. 502 */ 503 if ((dqp = qtrx->qt_dquot) == NULL) 504 break; 505 506 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 507 508 xfs_trans_apply_dquot_deltas_hook(tp, dqp); 509 510 /* 511 * adjust the actual number of blocks used 512 */ 513 514 /* 515 * The issue here is - sometimes we don't make a blkquota 516 * reservation intentionally to be fair to users 517 * (when the amount is small). On the other hand, 518 * delayed allocs do make reservations, but that's 519 * outside of a transaction, so we have no 520 * idea how much was really reserved. 521 * So, here we've accumulated delayed allocation blks and 522 * non-delay blks. The assumption is that the 523 * delayed ones are always reserved (outside of a 524 * transaction), and the others may or may not have 525 * quota reservations. 526 */ 527 totalbdelta = qtrx->qt_bcount_delta + 528 qtrx->qt_delbcnt_delta; 529 totalrtbdelta = qtrx->qt_rtbcount_delta + 530 qtrx->qt_delrtb_delta; 531 532 if (totalbdelta != 0 || totalrtbdelta != 0 || 533 qtrx->qt_icount_delta != 0) { 534 trace_xfs_trans_apply_dquot_deltas_before(dqp); 535 trace_xfs_trans_apply_dquot_deltas(qtrx); 536 } 537 538 #ifdef DEBUG 539 if (totalbdelta < 0) 540 ASSERT(dqp->q_blk.count >= -totalbdelta); 541 542 if (totalrtbdelta < 0) 543 ASSERT(dqp->q_rtb.count >= -totalrtbdelta); 544 545 if (qtrx->qt_icount_delta < 0) 546 ASSERT(dqp->q_ino.count >= -qtrx->qt_icount_delta); 547 #endif 548 if (totalbdelta) 549 dqp->q_blk.count += totalbdelta; 550 551 if (qtrx->qt_icount_delta) 552 dqp->q_ino.count += qtrx->qt_icount_delta; 553 554 if (totalrtbdelta) 555 dqp->q_rtb.count += totalrtbdelta; 556 557 if (totalbdelta != 0 || totalrtbdelta != 0 || 558 qtrx->qt_icount_delta != 0) 559 trace_xfs_trans_apply_dquot_deltas_after(dqp); 560 561 /* 562 * Get any default limits in use. 563 * Start/reset the timer(s) if needed. 564 */ 565 if (dqp->q_id) { 566 xfs_qm_adjust_dqlimits(dqp); 567 xfs_qm_adjust_dqtimers(dqp); 568 } 569 570 dqp->q_flags |= XFS_DQFLAG_DIRTY; 571 /* 572 * add this to the list of items to get logged 573 */ 574 xfs_trans_log_dquot(tp, dqp); 575 /* 576 * Take off what's left of the original reservation. 577 * In case of delayed allocations, there's no 578 * reservation that a transaction structure knows of. 579 */ 580 blk_res_used = max_t(int64_t, 0, qtrx->qt_bcount_delta); 581 xfs_apply_quota_reservation_deltas(&dqp->q_blk, 582 qtrx->qt_blk_res, blk_res_used, 583 qtrx->qt_bcount_delta); 584 585 /* 586 * Adjust the RT reservation. 587 */ 588 xfs_apply_quota_reservation_deltas(&dqp->q_rtb, 589 qtrx->qt_rtblk_res, 590 qtrx->qt_rtblk_res_used, 591 qtrx->qt_rtbcount_delta); 592 593 /* 594 * Adjust the inode reservation. 595 */ 596 ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used); 597 xfs_apply_quota_reservation_deltas(&dqp->q_ino, 598 qtrx->qt_ino_res, 599 qtrx->qt_ino_res_used, 600 qtrx->qt_icount_delta); 601 602 ASSERT(dqp->q_blk.reserved >= dqp->q_blk.count); 603 ASSERT(dqp->q_ino.reserved >= dqp->q_ino.count); 604 ASSERT(dqp->q_rtb.reserved >= dqp->q_rtb.count); 605 } 606 } 607 } 608 609 #ifdef CONFIG_XFS_LIVE_HOOKS 610 /* Call downstream hooks now that it's time to cancel dquot deltas. */ 611 static inline void 612 xfs_trans_unreserve_and_mod_dquots_hook( 613 struct xfs_trans *tp, 614 struct xfs_dquot *dqp) 615 { 616 if (xfs_hooks_switched_on(&xfs_dqtrx_hooks_switch)) { 617 struct xfs_apply_dqtrx_params p = { 618 .tx_id = (uintptr_t)tp, 619 .q_type = xfs_dquot_type(dqp), 620 .q_id = dqp->q_id, 621 }; 622 struct xfs_quotainfo *qi = tp->t_mountp->m_quotainfo; 623 624 xfs_hooks_call(&qi->qi_apply_dqtrx_hooks, 625 XFS_APPLY_DQTRX_UNRESERVE, &p); 626 } 627 } 628 #else 629 # define xfs_trans_unreserve_and_mod_dquots_hook(tp, dqp) ((void)0) 630 #endif /* CONFIG_XFS_LIVE_HOOKS */ 631 632 /* 633 * Release the reservations, and adjust the dquots accordingly. 634 * This is called only when the transaction is being aborted. If by 635 * any chance we have done dquot modifications incore (ie. deltas) already, 636 * we simply throw those away, since that's the expected behavior 637 * when a transaction is curtailed without a commit. 638 */ 639 void 640 xfs_trans_unreserve_and_mod_dquots( 641 struct xfs_trans *tp) 642 { 643 int i, j; 644 struct xfs_dquot *dqp; 645 struct xfs_dqtrx *qtrx, *qa; 646 bool locked; 647 648 if (!tp->t_dqinfo) 649 return; 650 651 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) { 652 qa = tp->t_dqinfo->dqs[j]; 653 654 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { 655 qtrx = &qa[i]; 656 /* 657 * We assume that the array of dquots is filled 658 * sequentially, not sparsely. 659 */ 660 if ((dqp = qtrx->qt_dquot) == NULL) 661 break; 662 663 xfs_trans_unreserve_and_mod_dquots_hook(tp, dqp); 664 665 /* 666 * Unreserve the original reservation. We don't care 667 * about the number of blocks used field, or deltas. 668 * Also we don't bother to zero the fields. 669 */ 670 locked = false; 671 if (qtrx->qt_blk_res) { 672 xfs_dqlock(dqp); 673 locked = true; 674 dqp->q_blk.reserved -= 675 (xfs_qcnt_t)qtrx->qt_blk_res; 676 } 677 if (qtrx->qt_ino_res) { 678 if (!locked) { 679 xfs_dqlock(dqp); 680 locked = true; 681 } 682 dqp->q_ino.reserved -= 683 (xfs_qcnt_t)qtrx->qt_ino_res; 684 } 685 686 if (qtrx->qt_rtblk_res) { 687 if (!locked) { 688 xfs_dqlock(dqp); 689 locked = true; 690 } 691 dqp->q_rtb.reserved -= 692 (xfs_qcnt_t)qtrx->qt_rtblk_res; 693 } 694 if (locked) 695 xfs_dqunlock(dqp); 696 697 } 698 } 699 } 700 701 STATIC void 702 xfs_quota_warn( 703 struct xfs_mount *mp, 704 struct xfs_dquot *dqp, 705 int type) 706 { 707 enum quota_type qtype; 708 709 switch (xfs_dquot_type(dqp)) { 710 case XFS_DQTYPE_PROJ: 711 qtype = PRJQUOTA; 712 break; 713 case XFS_DQTYPE_USER: 714 qtype = USRQUOTA; 715 break; 716 case XFS_DQTYPE_GROUP: 717 qtype = GRPQUOTA; 718 break; 719 default: 720 return; 721 } 722 723 quota_send_warning(make_kqid(&init_user_ns, qtype, dqp->q_id), 724 mp->m_super->s_dev, type); 725 } 726 727 /* 728 * Decide if we can make an additional reservation against a quota resource. 729 * Returns an inode QUOTA_NL_ warning code and whether or not it's fatal. 730 * 731 * Note that we assume that the numeric difference between the inode and block 732 * warning codes will always be 3 since it's userspace ABI now, and will never 733 * decrease the quota reservation, so the *BELOW messages are irrelevant. 734 */ 735 static inline int 736 xfs_dqresv_check( 737 struct xfs_dquot_res *res, 738 struct xfs_quota_limits *qlim, 739 int64_t delta, 740 bool *fatal) 741 { 742 xfs_qcnt_t hardlimit = res->hardlimit; 743 xfs_qcnt_t softlimit = res->softlimit; 744 xfs_qcnt_t total_count = res->reserved + delta; 745 746 BUILD_BUG_ON(QUOTA_NL_BHARDWARN != QUOTA_NL_IHARDWARN + 3); 747 BUILD_BUG_ON(QUOTA_NL_BSOFTLONGWARN != QUOTA_NL_ISOFTLONGWARN + 3); 748 BUILD_BUG_ON(QUOTA_NL_BSOFTWARN != QUOTA_NL_ISOFTWARN + 3); 749 750 *fatal = false; 751 if (delta <= 0) 752 return QUOTA_NL_NOWARN; 753 754 if (!hardlimit) 755 hardlimit = qlim->hard; 756 if (!softlimit) 757 softlimit = qlim->soft; 758 759 if (hardlimit && total_count > hardlimit) { 760 *fatal = true; 761 return QUOTA_NL_IHARDWARN; 762 } 763 764 if (softlimit && total_count > softlimit) { 765 time64_t now = ktime_get_real_seconds(); 766 767 if (res->timer != 0 && now > res->timer) { 768 *fatal = true; 769 return QUOTA_NL_ISOFTLONGWARN; 770 } 771 772 return QUOTA_NL_ISOFTWARN; 773 } 774 775 return QUOTA_NL_NOWARN; 776 } 777 778 /* 779 * This reserves disk blocks and inodes against a dquot. 780 * Flags indicate if the dquot is to be locked here and also 781 * if the blk reservation is for RT or regular blocks. 782 * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check. 783 */ 784 STATIC int 785 xfs_trans_dqresv( 786 struct xfs_trans *tp, 787 struct xfs_mount *mp, 788 struct xfs_dquot *dqp, 789 int64_t nblks, 790 long ninos, 791 uint flags) 792 { 793 struct xfs_quotainfo *q = mp->m_quotainfo; 794 struct xfs_def_quota *defq; 795 struct xfs_dquot_res *blkres; 796 struct xfs_quota_limits *qlim; 797 798 xfs_dqlock(dqp); 799 800 defq = xfs_get_defquota(q, xfs_dquot_type(dqp)); 801 802 if (flags & XFS_TRANS_DQ_RES_BLKS) { 803 blkres = &dqp->q_blk; 804 qlim = &defq->blk; 805 } else { 806 blkres = &dqp->q_rtb; 807 qlim = &defq->rtb; 808 } 809 810 if ((flags & XFS_QMOPT_FORCE_RES) == 0 && dqp->q_id && 811 xfs_dquot_is_enforced(dqp)) { 812 int quota_nl; 813 bool fatal; 814 815 /* 816 * dquot is locked already. See if we'd go over the hardlimit 817 * or exceed the timelimit if we'd reserve resources. 818 */ 819 quota_nl = xfs_dqresv_check(blkres, qlim, nblks, &fatal); 820 if (quota_nl != QUOTA_NL_NOWARN) { 821 /* 822 * Quota block warning codes are 3 more than the inode 823 * codes, which we check above. 824 */ 825 xfs_quota_warn(mp, dqp, quota_nl + 3); 826 if (fatal) 827 goto error_return; 828 } 829 830 quota_nl = xfs_dqresv_check(&dqp->q_ino, &defq->ino, ninos, 831 &fatal); 832 if (quota_nl != QUOTA_NL_NOWARN) { 833 xfs_quota_warn(mp, dqp, quota_nl); 834 if (fatal) 835 goto error_return; 836 } 837 } 838 839 /* 840 * Change the reservation, but not the actual usage. 841 * Note that q_blk.reserved = q_blk.count + resv 842 */ 843 blkres->reserved += (xfs_qcnt_t)nblks; 844 dqp->q_ino.reserved += (xfs_qcnt_t)ninos; 845 846 /* 847 * note the reservation amt in the trans struct too, 848 * so that the transaction knows how much was reserved by 849 * it against this particular dquot. 850 * We don't do this when we are reserving for a delayed allocation, 851 * because we don't have the luxury of a transaction envelope then. 852 */ 853 if (tp) { 854 ASSERT(flags & XFS_QMOPT_RESBLK_MASK); 855 xfs_trans_mod_dquot(tp, dqp, flags & XFS_QMOPT_RESBLK_MASK, 856 nblks); 857 xfs_trans_mod_dquot(tp, dqp, XFS_TRANS_DQ_RES_INOS, ninos); 858 } 859 860 if (XFS_IS_CORRUPT(mp, dqp->q_blk.reserved < dqp->q_blk.count) || 861 XFS_IS_CORRUPT(mp, dqp->q_rtb.reserved < dqp->q_rtb.count) || 862 XFS_IS_CORRUPT(mp, dqp->q_ino.reserved < dqp->q_ino.count)) 863 goto error_corrupt; 864 865 xfs_dqunlock(dqp); 866 return 0; 867 868 error_return: 869 xfs_dqunlock(dqp); 870 if (xfs_dquot_type(dqp) == XFS_DQTYPE_PROJ) 871 return -ENOSPC; 872 return -EDQUOT; 873 error_corrupt: 874 xfs_dqunlock(dqp); 875 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 876 xfs_fs_mark_sick(mp, XFS_SICK_FS_QUOTACHECK); 877 return -EFSCORRUPTED; 878 } 879 880 881 /* 882 * Given dquot(s), make disk block and/or inode reservations against them. 883 * The fact that this does the reservation against user, group and 884 * project quotas is important, because this follows a all-or-nothing 885 * approach. 886 * 887 * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown. 888 * XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT. Used by pquota. 889 * XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks 890 * XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks 891 * dquots are unlocked on return, if they were not locked by caller. 892 */ 893 int 894 xfs_trans_reserve_quota_bydquots( 895 struct xfs_trans *tp, 896 struct xfs_mount *mp, 897 struct xfs_dquot *udqp, 898 struct xfs_dquot *gdqp, 899 struct xfs_dquot *pdqp, 900 int64_t nblks, 901 long ninos, 902 uint flags) 903 { 904 int error; 905 906 if (!XFS_IS_QUOTA_ON(mp)) 907 return 0; 908 909 ASSERT(flags & XFS_QMOPT_RESBLK_MASK); 910 911 if (udqp) { 912 error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos, flags); 913 if (error) 914 return error; 915 } 916 917 if (gdqp) { 918 error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags); 919 if (error) 920 goto unwind_usr; 921 } 922 923 if (pdqp) { 924 error = xfs_trans_dqresv(tp, mp, pdqp, nblks, ninos, flags); 925 if (error) 926 goto unwind_grp; 927 } 928 929 /* 930 * Didn't change anything critical, so, no need to log 931 */ 932 return 0; 933 934 unwind_grp: 935 flags |= XFS_QMOPT_FORCE_RES; 936 if (gdqp) 937 xfs_trans_dqresv(tp, mp, gdqp, -nblks, -ninos, flags); 938 unwind_usr: 939 flags |= XFS_QMOPT_FORCE_RES; 940 if (udqp) 941 xfs_trans_dqresv(tp, mp, udqp, -nblks, -ninos, flags); 942 return error; 943 } 944 945 946 /* 947 * Lock the dquot and change the reservation if we can. 948 * This doesn't change the actual usage, just the reservation. 949 * The inode sent in is locked. 950 */ 951 int 952 xfs_trans_reserve_quota_nblks( 953 struct xfs_trans *tp, 954 struct xfs_inode *ip, 955 int64_t dblocks, 956 int64_t rblocks, 957 bool force) 958 { 959 struct xfs_mount *mp = ip->i_mount; 960 unsigned int qflags = 0; 961 int error; 962 963 if (!XFS_IS_QUOTA_ON(mp)) 964 return 0; 965 966 ASSERT(!xfs_is_quota_inode(&mp->m_sb, ip->i_ino)); 967 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); 968 969 if (force) 970 qflags |= XFS_QMOPT_FORCE_RES; 971 972 /* Reserve data device quota against the inode's dquots. */ 973 error = xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot, 974 ip->i_gdquot, ip->i_pdquot, dblocks, 0, 975 XFS_QMOPT_RES_REGBLKS | qflags); 976 if (error) 977 return error; 978 979 /* Do the same but for realtime blocks. */ 980 error = xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot, 981 ip->i_gdquot, ip->i_pdquot, rblocks, 0, 982 XFS_QMOPT_RES_RTBLKS | qflags); 983 if (error) { 984 xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot, 985 ip->i_gdquot, ip->i_pdquot, -dblocks, 0, 986 XFS_QMOPT_RES_REGBLKS); 987 return error; 988 } 989 990 return 0; 991 } 992 993 /* Change the quota reservations for an inode creation activity. */ 994 int 995 xfs_trans_reserve_quota_icreate( 996 struct xfs_trans *tp, 997 struct xfs_dquot *udqp, 998 struct xfs_dquot *gdqp, 999 struct xfs_dquot *pdqp, 1000 int64_t dblocks) 1001 { 1002 struct xfs_mount *mp = tp->t_mountp; 1003 1004 if (!XFS_IS_QUOTA_ON(mp)) 1005 return 0; 1006 1007 return xfs_trans_reserve_quota_bydquots(tp, mp, udqp, gdqp, pdqp, 1008 dblocks, 1, XFS_QMOPT_RES_REGBLKS); 1009 } 1010 1011 STATIC void 1012 xfs_trans_alloc_dqinfo( 1013 xfs_trans_t *tp) 1014 { 1015 tp->t_dqinfo = kmem_cache_zalloc(xfs_dqtrx_cache, 1016 GFP_KERNEL | __GFP_NOFAIL); 1017 } 1018 1019 void 1020 xfs_trans_free_dqinfo( 1021 xfs_trans_t *tp) 1022 { 1023 if (!tp->t_dqinfo) 1024 return; 1025 kmem_cache_free(xfs_dqtrx_cache, tp->t_dqinfo); 1026 tp->t_dqinfo = NULL; 1027 } 1028