1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2003 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_format.h" 9 #include "xfs_log_format.h" 10 #include "xfs_shared.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_mount.h" 14 #include "xfs_defer.h" 15 #include "xfs_inode.h" 16 #include "xfs_bmap.h" 17 #include "xfs_quota.h" 18 #include "xfs_trans.h" 19 #include "xfs_buf_item.h" 20 #include "xfs_trans_space.h" 21 #include "xfs_trans_priv.h" 22 #include "xfs_qm.h" 23 #include "xfs_trace.h" 24 #include "xfs_log.h" 25 #include "xfs_bmap_btree.h" 26 #include "xfs_error.h" 27 #include "xfs_health.h" 28 29 /* 30 * Lock order: 31 * 32 * ip->i_lock 33 * qi->qi_tree_lock 34 * dquot->q_qlock (xfs_dqlock() and friends) 35 * dquot->q_flush (xfs_dqflock() and friends) 36 * qi->qi_lru_lock 37 * 38 * If two dquots need to be locked the order is user before group/project, 39 * otherwise by the lowest id first, see xfs_dqlock2. 40 */ 41 42 struct kmem_cache *xfs_dqtrx_cache; 43 static struct kmem_cache *xfs_dquot_cache; 44 45 static struct lock_class_key xfs_dquot_group_class; 46 static struct lock_class_key xfs_dquot_project_class; 47 48 /* Record observations of quota corruption with the health tracking system. */ 49 static void 50 xfs_dquot_mark_sick( 51 struct xfs_dquot *dqp) 52 { 53 struct xfs_mount *mp = dqp->q_mount; 54 55 switch (dqp->q_type) { 56 case XFS_DQTYPE_USER: 57 xfs_fs_mark_sick(mp, XFS_SICK_FS_UQUOTA); 58 break; 59 case XFS_DQTYPE_GROUP: 60 xfs_fs_mark_sick(mp, XFS_SICK_FS_GQUOTA); 61 break; 62 case XFS_DQTYPE_PROJ: 63 xfs_fs_mark_sick(mp, XFS_SICK_FS_PQUOTA); 64 break; 65 default: 66 ASSERT(0); 67 break; 68 } 69 } 70 71 /* 72 * Detach the dquot buffer if it's still attached, because we can get called 73 * through dqpurge after a log shutdown. Caller must hold the dqflock or have 74 * otherwise isolated the dquot. 75 */ 76 void 77 xfs_dquot_detach_buf( 78 struct xfs_dquot *dqp) 79 { 80 struct xfs_dq_logitem *qlip = &dqp->q_logitem; 81 struct xfs_buf *bp = NULL; 82 83 spin_lock(&qlip->qli_lock); 84 if (qlip->qli_item.li_buf) { 85 bp = qlip->qli_item.li_buf; 86 qlip->qli_item.li_buf = NULL; 87 } 88 spin_unlock(&qlip->qli_lock); 89 if (bp) { 90 list_del_init(&qlip->qli_item.li_bio_list); 91 xfs_buf_rele(bp); 92 } 93 } 94 95 /* 96 * This is called to free all the memory associated with a dquot 97 */ 98 void 99 xfs_qm_dqdestroy( 100 struct xfs_dquot *dqp) 101 { 102 ASSERT(list_empty(&dqp->q_lru)); 103 ASSERT(dqp->q_logitem.qli_item.li_buf == NULL); 104 105 kvfree(dqp->q_logitem.qli_item.li_lv_shadow); 106 mutex_destroy(&dqp->q_qlock); 107 108 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot); 109 kmem_cache_free(xfs_dquot_cache, dqp); 110 } 111 112 /* 113 * If default limits are in force, push them into the dquot now. 114 * We overwrite the dquot limits only if they are zero and this 115 * is not the root dquot. 116 */ 117 void 118 xfs_qm_adjust_dqlimits( 119 struct xfs_dquot *dq) 120 { 121 struct xfs_mount *mp = dq->q_mount; 122 struct xfs_quotainfo *q = mp->m_quotainfo; 123 struct xfs_def_quota *defq; 124 int prealloc = 0; 125 126 ASSERT(dq->q_id); 127 defq = xfs_get_defquota(q, xfs_dquot_type(dq)); 128 129 if (!dq->q_blk.softlimit) { 130 dq->q_blk.softlimit = defq->blk.soft; 131 prealloc = 1; 132 } 133 if (!dq->q_blk.hardlimit) { 134 dq->q_blk.hardlimit = defq->blk.hard; 135 prealloc = 1; 136 } 137 if (!dq->q_ino.softlimit) 138 dq->q_ino.softlimit = defq->ino.soft; 139 if (!dq->q_ino.hardlimit) 140 dq->q_ino.hardlimit = defq->ino.hard; 141 if (!dq->q_rtb.softlimit) 142 dq->q_rtb.softlimit = defq->rtb.soft; 143 if (!dq->q_rtb.hardlimit) 144 dq->q_rtb.hardlimit = defq->rtb.hard; 145 146 if (prealloc) 147 xfs_dquot_set_prealloc_limits(dq); 148 } 149 150 /* Set the expiration time of a quota's grace period. */ 151 time64_t 152 xfs_dquot_set_timeout( 153 struct xfs_mount *mp, 154 time64_t timeout) 155 { 156 struct xfs_quotainfo *qi = mp->m_quotainfo; 157 158 return clamp_t(time64_t, timeout, qi->qi_expiry_min, 159 qi->qi_expiry_max); 160 } 161 162 /* Set the length of the default grace period. */ 163 time64_t 164 xfs_dquot_set_grace_period( 165 time64_t grace) 166 { 167 return clamp_t(time64_t, grace, XFS_DQ_GRACE_MIN, XFS_DQ_GRACE_MAX); 168 } 169 170 /* 171 * Determine if this quota counter is over either limit and set the quota 172 * timers as appropriate. 173 */ 174 static inline void 175 xfs_qm_adjust_res_timer( 176 struct xfs_mount *mp, 177 struct xfs_dquot_res *res, 178 struct xfs_quota_limits *qlim) 179 { 180 ASSERT(res->hardlimit == 0 || res->softlimit <= res->hardlimit); 181 182 if ((res->softlimit && res->count > res->softlimit) || 183 (res->hardlimit && res->count > res->hardlimit)) { 184 if (res->timer == 0) 185 res->timer = xfs_dquot_set_timeout(mp, 186 ktime_get_real_seconds() + qlim->time); 187 } else { 188 res->timer = 0; 189 } 190 } 191 192 /* 193 * Check the limits and timers of a dquot and start or reset timers 194 * if necessary. 195 * This gets called even when quota enforcement is OFF, which makes our 196 * life a little less complicated. (We just don't reject any quota 197 * reservations in that case, when enforcement is off). 198 * We also return 0 as the values of the timers in Q_GETQUOTA calls, when 199 * enforcement's off. 200 * In contrast, warnings are a little different in that they don't 201 * 'automatically' get started when limits get exceeded. They do 202 * get reset to zero, however, when we find the count to be under 203 * the soft limit (they are only ever set non-zero via userspace). 204 */ 205 void 206 xfs_qm_adjust_dqtimers( 207 struct xfs_dquot *dq) 208 { 209 struct xfs_mount *mp = dq->q_mount; 210 struct xfs_quotainfo *qi = mp->m_quotainfo; 211 struct xfs_def_quota *defq; 212 213 ASSERT(dq->q_id); 214 defq = xfs_get_defquota(qi, xfs_dquot_type(dq)); 215 216 xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_blk, &defq->blk); 217 xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_ino, &defq->ino); 218 xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_rtb, &defq->rtb); 219 } 220 221 /* 222 * initialize a buffer full of dquots and log the whole thing 223 */ 224 void 225 xfs_qm_init_dquot_blk( 226 struct xfs_trans *tp, 227 xfs_dqid_t id, 228 xfs_dqtype_t type, 229 struct xfs_buf *bp) 230 { 231 struct xfs_mount *mp = tp->t_mountp; 232 struct xfs_quotainfo *q = mp->m_quotainfo; 233 struct xfs_dqblk *d; 234 xfs_dqid_t curid; 235 unsigned int qflag; 236 unsigned int blftype; 237 int i; 238 239 ASSERT(tp); 240 ASSERT(xfs_buf_islocked(bp)); 241 242 switch (type) { 243 case XFS_DQTYPE_USER: 244 qflag = XFS_UQUOTA_CHKD; 245 blftype = XFS_BLF_UDQUOT_BUF; 246 break; 247 case XFS_DQTYPE_PROJ: 248 qflag = XFS_PQUOTA_CHKD; 249 blftype = XFS_BLF_PDQUOT_BUF; 250 break; 251 case XFS_DQTYPE_GROUP: 252 qflag = XFS_GQUOTA_CHKD; 253 blftype = XFS_BLF_GDQUOT_BUF; 254 break; 255 default: 256 ASSERT(0); 257 return; 258 } 259 260 d = bp->b_addr; 261 262 /* 263 * ID of the first dquot in the block - id's are zero based. 264 */ 265 curid = id - (id % q->qi_dqperchunk); 266 memset(d, 0, BBTOB(q->qi_dqchunklen)); 267 for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) { 268 d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC); 269 d->dd_diskdq.d_version = XFS_DQUOT_VERSION; 270 d->dd_diskdq.d_id = cpu_to_be32(curid); 271 d->dd_diskdq.d_type = type; 272 if (curid > 0 && xfs_has_bigtime(mp)) 273 d->dd_diskdq.d_type |= XFS_DQTYPE_BIGTIME; 274 if (xfs_has_crc(mp)) { 275 uuid_copy(&d->dd_uuid, &mp->m_sb.sb_meta_uuid); 276 xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk), 277 XFS_DQUOT_CRC_OFF); 278 } 279 } 280 281 xfs_trans_dquot_buf(tp, bp, blftype); 282 283 /* 284 * quotacheck uses delayed writes to update all the dquots on disk in an 285 * efficient manner instead of logging the individual dquot changes as 286 * they are made. However if we log the buffer allocated here and crash 287 * after quotacheck while the logged initialisation is still in the 288 * active region of the log, log recovery can replay the dquot buffer 289 * initialisation over the top of the checked dquots and corrupt quota 290 * accounting. 291 * 292 * To avoid this problem, quotacheck cannot log the initialised buffer. 293 * We must still dirty the buffer and write it back before the 294 * allocation transaction clears the log. Therefore, mark the buffer as 295 * ordered instead of logging it directly. This is safe for quotacheck 296 * because it detects and repairs allocated but initialized dquot blocks 297 * in the quota inodes. 298 */ 299 if (!(mp->m_qflags & qflag)) 300 xfs_trans_ordered_buf(tp, bp); 301 else 302 xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1); 303 } 304 305 static void 306 xfs_dquot_set_prealloc( 307 struct xfs_dquot_pre *pre, 308 const struct xfs_dquot_res *res) 309 { 310 xfs_qcnt_t space; 311 312 pre->q_prealloc_hi_wmark = res->hardlimit; 313 pre->q_prealloc_lo_wmark = res->softlimit; 314 315 space = div_u64(pre->q_prealloc_hi_wmark, 100); 316 if (!pre->q_prealloc_lo_wmark) 317 pre->q_prealloc_lo_wmark = space * 95; 318 319 pre->q_low_space[XFS_QLOWSP_1_PCNT] = space; 320 pre->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3; 321 pre->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5; 322 } 323 324 /* 325 * Initialize the dynamic speculative preallocation thresholds. The lo/hi 326 * watermarks correspond to the soft and hard limits by default. If a soft limit 327 * is not specified, we use 95% of the hard limit. 328 */ 329 void 330 xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp) 331 { 332 xfs_dquot_set_prealloc(&dqp->q_blk_prealloc, &dqp->q_blk); 333 xfs_dquot_set_prealloc(&dqp->q_rtb_prealloc, &dqp->q_rtb); 334 } 335 336 /* 337 * Ensure that the given in-core dquot has a buffer on disk backing it, and 338 * return the buffer locked and held. This is called when the bmapi finds a 339 * hole. 340 */ 341 STATIC int 342 xfs_dquot_disk_alloc( 343 struct xfs_dquot *dqp, 344 struct xfs_buf **bpp) 345 { 346 struct xfs_bmbt_irec map; 347 struct xfs_trans *tp; 348 struct xfs_mount *mp = dqp->q_mount; 349 struct xfs_buf *bp; 350 xfs_dqtype_t qtype = xfs_dquot_type(dqp); 351 struct xfs_inode *quotip = xfs_quota_inode(mp, qtype); 352 int nmaps = 1; 353 int error; 354 355 trace_xfs_dqalloc(dqp); 356 357 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc, 358 XFS_QM_DQALLOC_SPACE_RES(mp), 0, 0, &tp); 359 if (error) 360 return error; 361 362 xfs_ilock(quotip, XFS_ILOCK_EXCL); 363 xfs_trans_ijoin(tp, quotip, 0); 364 365 if (!xfs_this_quota_on(dqp->q_mount, qtype)) { 366 /* 367 * Return if this type of quotas is turned off while we didn't 368 * have an inode lock 369 */ 370 error = -ESRCH; 371 goto err_cancel; 372 } 373 374 error = xfs_iext_count_extend(tp, quotip, XFS_DATA_FORK, 375 XFS_IEXT_ADD_NOSPLIT_CNT); 376 if (error) 377 goto err_cancel; 378 379 /* Create the block mapping. */ 380 error = xfs_bmapi_write(tp, quotip, dqp->q_fileoffset, 381 XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA, 0, &map, 382 &nmaps); 383 if (error) 384 goto err_cancel; 385 386 ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB); 387 ASSERT((map.br_startblock != DELAYSTARTBLOCK) && 388 (map.br_startblock != HOLESTARTBLOCK)); 389 390 /* 391 * Keep track of the blkno to save a lookup later 392 */ 393 dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); 394 395 /* now we can just get the buffer (there's nothing to read yet) */ 396 error = xfs_trans_get_buf(tp, mp->m_ddev_targp, dqp->q_blkno, 397 mp->m_quotainfo->qi_dqchunklen, 0, &bp); 398 if (error) 399 goto err_cancel; 400 bp->b_ops = &xfs_dquot_buf_ops; 401 402 /* 403 * Make a chunk of dquots out of this buffer and log 404 * the entire thing. 405 */ 406 xfs_qm_init_dquot_blk(tp, dqp->q_id, qtype, bp); 407 xfs_buf_set_ref(bp, XFS_DQUOT_REF); 408 409 /* 410 * Hold the buffer and join it to the dfops so that we'll still own 411 * the buffer when we return to the caller. The buffer disposal on 412 * error must be paid attention to very carefully, as it has been 413 * broken since commit efa092f3d4c6 "[XFS] Fixes a bug in the quota 414 * code when allocating a new dquot record" in 2005, and the later 415 * conversion to xfs_defer_ops in commit 310a75a3c6c747 failed to keep 416 * the buffer locked across the _defer_finish call. We can now do 417 * this correctly with xfs_defer_bjoin. 418 * 419 * Above, we allocated a disk block for the dquot information and used 420 * get_buf to initialize the dquot. If the _defer_finish fails, the old 421 * transaction is gone but the new buffer is not joined or held to any 422 * transaction, so we must _buf_relse it. 423 * 424 * If everything succeeds, the caller of this function is returned a 425 * buffer that is locked and held to the transaction. The caller 426 * is responsible for unlocking any buffer passed back, either 427 * manually or by committing the transaction. On error, the buffer is 428 * released and not passed back. 429 * 430 * Keep the quota inode ILOCKed until after the transaction commit to 431 * maintain the atomicity of bmap/rmap updates. 432 */ 433 xfs_trans_bhold(tp, bp); 434 error = xfs_trans_commit(tp); 435 xfs_iunlock(quotip, XFS_ILOCK_EXCL); 436 if (error) { 437 xfs_buf_relse(bp); 438 return error; 439 } 440 441 *bpp = bp; 442 return 0; 443 444 err_cancel: 445 xfs_trans_cancel(tp); 446 xfs_iunlock(quotip, XFS_ILOCK_EXCL); 447 return error; 448 } 449 450 /* 451 * Read in the in-core dquot's on-disk metadata and return the buffer. 452 * Returns ENOENT to signal a hole. 453 */ 454 STATIC int 455 xfs_dquot_disk_read( 456 struct xfs_mount *mp, 457 struct xfs_dquot *dqp, 458 struct xfs_buf **bpp) 459 { 460 struct xfs_bmbt_irec map; 461 struct xfs_buf *bp; 462 xfs_dqtype_t qtype = xfs_dquot_type(dqp); 463 struct xfs_inode *quotip = xfs_quota_inode(mp, qtype); 464 uint lock_mode; 465 int nmaps = 1; 466 int error; 467 468 lock_mode = xfs_ilock_data_map_shared(quotip); 469 if (!xfs_this_quota_on(mp, qtype)) { 470 /* 471 * Return if this type of quotas is turned off while we 472 * didn't have the quota inode lock. 473 */ 474 xfs_iunlock(quotip, lock_mode); 475 return -ESRCH; 476 } 477 478 /* 479 * Find the block map; no allocations yet 480 */ 481 error = xfs_bmapi_read(quotip, dqp->q_fileoffset, 482 XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0); 483 xfs_iunlock(quotip, lock_mode); 484 if (error) 485 return error; 486 487 ASSERT(nmaps == 1); 488 ASSERT(map.br_blockcount >= 1); 489 ASSERT(map.br_startblock != DELAYSTARTBLOCK); 490 if (map.br_startblock == HOLESTARTBLOCK) 491 return -ENOENT; 492 493 trace_xfs_dqtobp_read(dqp); 494 495 /* 496 * store the blkno etc so that we don't have to do the 497 * mapping all the time 498 */ 499 dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); 500 501 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno, 502 mp->m_quotainfo->qi_dqchunklen, 0, &bp, 503 &xfs_dquot_buf_ops); 504 if (xfs_metadata_is_sick(error)) 505 xfs_dquot_mark_sick(dqp); 506 if (error) { 507 ASSERT(bp == NULL); 508 return error; 509 } 510 511 ASSERT(xfs_buf_islocked(bp)); 512 xfs_buf_set_ref(bp, XFS_DQUOT_REF); 513 *bpp = bp; 514 515 return 0; 516 } 517 518 /* Allocate and initialize everything we need for an incore dquot. */ 519 STATIC struct xfs_dquot * 520 xfs_dquot_alloc( 521 struct xfs_mount *mp, 522 xfs_dqid_t id, 523 xfs_dqtype_t type) 524 { 525 struct xfs_dquot *dqp; 526 527 dqp = kmem_cache_zalloc(xfs_dquot_cache, GFP_KERNEL | __GFP_NOFAIL); 528 529 dqp->q_type = type; 530 dqp->q_id = id; 531 dqp->q_mount = mp; 532 INIT_LIST_HEAD(&dqp->q_lru); 533 mutex_init(&dqp->q_qlock); 534 init_waitqueue_head(&dqp->q_pinwait); 535 dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk; 536 /* 537 * Offset of dquot in the (fixed sized) dquot chunk. 538 */ 539 dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) * 540 sizeof(struct xfs_dqblk); 541 542 /* 543 * Because we want to use a counting completion, complete 544 * the flush completion once to allow a single access to 545 * the flush completion without blocking. 546 */ 547 init_completion(&dqp->q_flush); 548 complete(&dqp->q_flush); 549 550 /* 551 * Make sure group quotas have a different lock class than user 552 * quotas. 553 */ 554 switch (type) { 555 case XFS_DQTYPE_USER: 556 /* uses the default lock class */ 557 break; 558 case XFS_DQTYPE_GROUP: 559 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class); 560 break; 561 case XFS_DQTYPE_PROJ: 562 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class); 563 break; 564 default: 565 ASSERT(0); 566 break; 567 } 568 569 xfs_qm_dquot_logitem_init(dqp); 570 571 XFS_STATS_INC(mp, xs_qm_dquot); 572 return dqp; 573 } 574 575 /* Check the ondisk dquot's id and type match what the incore dquot expects. */ 576 static bool 577 xfs_dquot_check_type( 578 struct xfs_dquot *dqp, 579 struct xfs_disk_dquot *ddqp) 580 { 581 uint8_t ddqp_type; 582 uint8_t dqp_type; 583 584 ddqp_type = ddqp->d_type & XFS_DQTYPE_REC_MASK; 585 dqp_type = xfs_dquot_type(dqp); 586 587 if (be32_to_cpu(ddqp->d_id) != dqp->q_id) 588 return false; 589 590 /* 591 * V5 filesystems always expect an exact type match. V4 filesystems 592 * expect an exact match for user dquots and for non-root group and 593 * project dquots. 594 */ 595 if (xfs_has_crc(dqp->q_mount) || 596 dqp_type == XFS_DQTYPE_USER || dqp->q_id != 0) 597 return ddqp_type == dqp_type; 598 599 /* 600 * V4 filesystems support either group or project quotas, but not both 601 * at the same time. The non-user quota file can be switched between 602 * group and project quota uses depending on the mount options, which 603 * means that we can encounter the other type when we try to load quota 604 * defaults. Quotacheck will soon reset the entire quota file 605 * (including the root dquot) anyway, but don't log scary corruption 606 * reports to dmesg. 607 */ 608 return ddqp_type == XFS_DQTYPE_GROUP || ddqp_type == XFS_DQTYPE_PROJ; 609 } 610 611 /* Copy the in-core quota fields in from the on-disk buffer. */ 612 STATIC int 613 xfs_dquot_from_disk( 614 struct xfs_dquot *dqp, 615 struct xfs_buf *bp) 616 { 617 struct xfs_dqblk *dqb = xfs_buf_offset(bp, dqp->q_bufoffset); 618 struct xfs_disk_dquot *ddqp = &dqb->dd_diskdq; 619 620 /* 621 * Ensure that we got the type and ID we were looking for. 622 * Everything else was checked by the dquot buffer verifier. 623 */ 624 if (!xfs_dquot_check_type(dqp, ddqp)) { 625 xfs_alert_tag(bp->b_mount, XFS_PTAG_VERIFIER_ERROR, 626 "Metadata corruption detected at %pS, quota %u", 627 __this_address, dqp->q_id); 628 xfs_alert(bp->b_mount, "Unmount and run xfs_repair"); 629 xfs_dquot_mark_sick(dqp); 630 return -EFSCORRUPTED; 631 } 632 633 /* copy everything from disk dquot to the incore dquot */ 634 dqp->q_type = ddqp->d_type; 635 dqp->q_blk.hardlimit = be64_to_cpu(ddqp->d_blk_hardlimit); 636 dqp->q_blk.softlimit = be64_to_cpu(ddqp->d_blk_softlimit); 637 dqp->q_ino.hardlimit = be64_to_cpu(ddqp->d_ino_hardlimit); 638 dqp->q_ino.softlimit = be64_to_cpu(ddqp->d_ino_softlimit); 639 dqp->q_rtb.hardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit); 640 dqp->q_rtb.softlimit = be64_to_cpu(ddqp->d_rtb_softlimit); 641 642 dqp->q_blk.count = be64_to_cpu(ddqp->d_bcount); 643 dqp->q_ino.count = be64_to_cpu(ddqp->d_icount); 644 dqp->q_rtb.count = be64_to_cpu(ddqp->d_rtbcount); 645 646 dqp->q_blk.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_btimer); 647 dqp->q_ino.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_itimer); 648 dqp->q_rtb.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_rtbtimer); 649 650 /* 651 * Reservation counters are defined as reservation plus current usage 652 * to avoid having to add every time. 653 */ 654 dqp->q_blk.reserved = dqp->q_blk.count; 655 dqp->q_ino.reserved = dqp->q_ino.count; 656 dqp->q_rtb.reserved = dqp->q_rtb.count; 657 658 /* initialize the dquot speculative prealloc thresholds */ 659 xfs_dquot_set_prealloc_limits(dqp); 660 return 0; 661 } 662 663 /* Copy the in-core quota fields into the on-disk buffer. */ 664 void 665 xfs_dquot_to_disk( 666 struct xfs_disk_dquot *ddqp, 667 struct xfs_dquot *dqp) 668 { 669 ddqp->d_magic = cpu_to_be16(XFS_DQUOT_MAGIC); 670 ddqp->d_version = XFS_DQUOT_VERSION; 671 ddqp->d_type = dqp->q_type; 672 ddqp->d_id = cpu_to_be32(dqp->q_id); 673 ddqp->d_pad0 = 0; 674 ddqp->d_pad = 0; 675 676 ddqp->d_blk_hardlimit = cpu_to_be64(dqp->q_blk.hardlimit); 677 ddqp->d_blk_softlimit = cpu_to_be64(dqp->q_blk.softlimit); 678 ddqp->d_ino_hardlimit = cpu_to_be64(dqp->q_ino.hardlimit); 679 ddqp->d_ino_softlimit = cpu_to_be64(dqp->q_ino.softlimit); 680 ddqp->d_rtb_hardlimit = cpu_to_be64(dqp->q_rtb.hardlimit); 681 ddqp->d_rtb_softlimit = cpu_to_be64(dqp->q_rtb.softlimit); 682 683 ddqp->d_bcount = cpu_to_be64(dqp->q_blk.count); 684 ddqp->d_icount = cpu_to_be64(dqp->q_ino.count); 685 ddqp->d_rtbcount = cpu_to_be64(dqp->q_rtb.count); 686 687 ddqp->d_bwarns = 0; 688 ddqp->d_iwarns = 0; 689 ddqp->d_rtbwarns = 0; 690 691 ddqp->d_btimer = xfs_dquot_to_disk_ts(dqp, dqp->q_blk.timer); 692 ddqp->d_itimer = xfs_dquot_to_disk_ts(dqp, dqp->q_ino.timer); 693 ddqp->d_rtbtimer = xfs_dquot_to_disk_ts(dqp, dqp->q_rtb.timer); 694 } 695 696 /* 697 * Read in the ondisk dquot using dqtobp() then copy it to an incore version, 698 * and release the buffer immediately. If @can_alloc is true, fill any 699 * holes in the on-disk metadata. 700 */ 701 static int 702 xfs_qm_dqread( 703 struct xfs_mount *mp, 704 xfs_dqid_t id, 705 xfs_dqtype_t type, 706 bool can_alloc, 707 struct xfs_dquot **dqpp) 708 { 709 struct xfs_dquot *dqp; 710 struct xfs_buf *bp; 711 int error; 712 713 dqp = xfs_dquot_alloc(mp, id, type); 714 trace_xfs_dqread(dqp); 715 716 /* Try to read the buffer, allocating if necessary. */ 717 error = xfs_dquot_disk_read(mp, dqp, &bp); 718 if (error == -ENOENT && can_alloc) 719 error = xfs_dquot_disk_alloc(dqp, &bp); 720 if (error) 721 goto err; 722 723 /* 724 * At this point we should have a clean locked buffer. Copy the data 725 * to the incore dquot and release the buffer since the incore dquot 726 * has its own locking protocol so we needn't tie up the buffer any 727 * further. 728 */ 729 ASSERT(xfs_buf_islocked(bp)); 730 error = xfs_dquot_from_disk(dqp, bp); 731 xfs_buf_relse(bp); 732 if (error) 733 goto err; 734 735 *dqpp = dqp; 736 return error; 737 738 err: 739 trace_xfs_dqread_fail(dqp); 740 xfs_qm_dqdestroy(dqp); 741 *dqpp = NULL; 742 return error; 743 } 744 745 /* 746 * Advance to the next id in the current chunk, or if at the 747 * end of the chunk, skip ahead to first id in next allocated chunk 748 * using the SEEK_DATA interface. 749 */ 750 static int 751 xfs_dq_get_next_id( 752 struct xfs_mount *mp, 753 xfs_dqtype_t type, 754 xfs_dqid_t *id) 755 { 756 struct xfs_inode *quotip = xfs_quota_inode(mp, type); 757 xfs_dqid_t next_id = *id + 1; /* simple advance */ 758 uint lock_flags; 759 struct xfs_bmbt_irec got; 760 struct xfs_iext_cursor cur; 761 xfs_fsblock_t start; 762 int error = 0; 763 764 /* If we'd wrap past the max ID, stop */ 765 if (next_id < *id) 766 return -ENOENT; 767 768 /* If new ID is within the current chunk, advancing it sufficed */ 769 if (next_id % mp->m_quotainfo->qi_dqperchunk) { 770 *id = next_id; 771 return 0; 772 } 773 774 /* Nope, next_id is now past the current chunk, so find the next one */ 775 start = (xfs_fsblock_t)next_id / mp->m_quotainfo->qi_dqperchunk; 776 777 lock_flags = xfs_ilock_data_map_shared(quotip); 778 error = xfs_iread_extents(NULL, quotip, XFS_DATA_FORK); 779 if (error) 780 return error; 781 782 if (xfs_iext_lookup_extent(quotip, "ip->i_df, start, &cur, &got)) { 783 /* contiguous chunk, bump startoff for the id calculation */ 784 if (got.br_startoff < start) 785 got.br_startoff = start; 786 *id = got.br_startoff * mp->m_quotainfo->qi_dqperchunk; 787 } else { 788 error = -ENOENT; 789 } 790 791 xfs_iunlock(quotip, lock_flags); 792 793 return error; 794 } 795 796 /* 797 * Look up the dquot in the in-core cache. If found, the dquot is returned 798 * locked and ready to go. 799 */ 800 static struct xfs_dquot * 801 xfs_qm_dqget_cache_lookup( 802 struct xfs_mount *mp, 803 struct xfs_quotainfo *qi, 804 struct radix_tree_root *tree, 805 xfs_dqid_t id) 806 { 807 struct xfs_dquot *dqp; 808 809 restart: 810 mutex_lock(&qi->qi_tree_lock); 811 dqp = radix_tree_lookup(tree, id); 812 if (!dqp) { 813 mutex_unlock(&qi->qi_tree_lock); 814 XFS_STATS_INC(mp, xs_qm_dqcachemisses); 815 return NULL; 816 } 817 818 xfs_dqlock(dqp); 819 if (dqp->q_flags & XFS_DQFLAG_FREEING) { 820 xfs_dqunlock(dqp); 821 mutex_unlock(&qi->qi_tree_lock); 822 trace_xfs_dqget_freeing(dqp); 823 delay(1); 824 goto restart; 825 } 826 827 dqp->q_nrefs++; 828 mutex_unlock(&qi->qi_tree_lock); 829 830 trace_xfs_dqget_hit(dqp); 831 XFS_STATS_INC(mp, xs_qm_dqcachehits); 832 return dqp; 833 } 834 835 /* 836 * Try to insert a new dquot into the in-core cache. If an error occurs the 837 * caller should throw away the dquot and start over. Otherwise, the dquot 838 * is returned locked (and held by the cache) as if there had been a cache 839 * hit. 840 * 841 * The insert needs to be done under memalloc_nofs context because the radix 842 * tree can do memory allocation during insert. The qi->qi_tree_lock is taken in 843 * memory reclaim when freeing unused dquots, so we cannot have the radix tree 844 * node allocation recursing into filesystem reclaim whilst we hold the 845 * qi_tree_lock. 846 */ 847 static int 848 xfs_qm_dqget_cache_insert( 849 struct xfs_mount *mp, 850 struct xfs_quotainfo *qi, 851 struct radix_tree_root *tree, 852 xfs_dqid_t id, 853 struct xfs_dquot *dqp) 854 { 855 unsigned int nofs_flags; 856 int error; 857 858 nofs_flags = memalloc_nofs_save(); 859 mutex_lock(&qi->qi_tree_lock); 860 error = radix_tree_insert(tree, id, dqp); 861 if (unlikely(error)) { 862 /* Duplicate found! Caller must try again. */ 863 trace_xfs_dqget_dup(dqp); 864 goto out_unlock; 865 } 866 867 /* Return a locked dquot to the caller, with a reference taken. */ 868 xfs_dqlock(dqp); 869 dqp->q_nrefs = 1; 870 qi->qi_dquots++; 871 872 out_unlock: 873 mutex_unlock(&qi->qi_tree_lock); 874 memalloc_nofs_restore(nofs_flags); 875 return error; 876 } 877 878 /* Check our input parameters. */ 879 static int 880 xfs_qm_dqget_checks( 881 struct xfs_mount *mp, 882 xfs_dqtype_t type) 883 { 884 switch (type) { 885 case XFS_DQTYPE_USER: 886 if (!XFS_IS_UQUOTA_ON(mp)) 887 return -ESRCH; 888 return 0; 889 case XFS_DQTYPE_GROUP: 890 if (!XFS_IS_GQUOTA_ON(mp)) 891 return -ESRCH; 892 return 0; 893 case XFS_DQTYPE_PROJ: 894 if (!XFS_IS_PQUOTA_ON(mp)) 895 return -ESRCH; 896 return 0; 897 default: 898 WARN_ON_ONCE(0); 899 return -EINVAL; 900 } 901 } 902 903 /* 904 * Given the file system, id, and type (UDQUOT/GDQUOT/PDQUOT), return a 905 * locked dquot, doing an allocation (if requested) as needed. 906 */ 907 int 908 xfs_qm_dqget( 909 struct xfs_mount *mp, 910 xfs_dqid_t id, 911 xfs_dqtype_t type, 912 bool can_alloc, 913 struct xfs_dquot **O_dqpp) 914 { 915 struct xfs_quotainfo *qi = mp->m_quotainfo; 916 struct radix_tree_root *tree = xfs_dquot_tree(qi, type); 917 struct xfs_dquot *dqp; 918 int error; 919 920 error = xfs_qm_dqget_checks(mp, type); 921 if (error) 922 return error; 923 924 restart: 925 dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id); 926 if (dqp) { 927 *O_dqpp = dqp; 928 return 0; 929 } 930 931 error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp); 932 if (error) 933 return error; 934 935 error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp); 936 if (error) { 937 /* 938 * Duplicate found. Just throw away the new dquot and start 939 * over. 940 */ 941 xfs_qm_dqdestroy(dqp); 942 XFS_STATS_INC(mp, xs_qm_dquot_dups); 943 goto restart; 944 } 945 946 trace_xfs_dqget_miss(dqp); 947 *O_dqpp = dqp; 948 return 0; 949 } 950 951 /* 952 * Given a dquot id and type, read and initialize a dquot from the on-disk 953 * metadata. This function is only for use during quota initialization so 954 * it ignores the dquot cache assuming that the dquot shrinker isn't set up. 955 * The caller is responsible for _qm_dqdestroy'ing the returned dquot. 956 */ 957 int 958 xfs_qm_dqget_uncached( 959 struct xfs_mount *mp, 960 xfs_dqid_t id, 961 xfs_dqtype_t type, 962 struct xfs_dquot **dqpp) 963 { 964 int error; 965 966 error = xfs_qm_dqget_checks(mp, type); 967 if (error) 968 return error; 969 970 return xfs_qm_dqread(mp, id, type, 0, dqpp); 971 } 972 973 /* Return the quota id for a given inode and type. */ 974 xfs_dqid_t 975 xfs_qm_id_for_quotatype( 976 struct xfs_inode *ip, 977 xfs_dqtype_t type) 978 { 979 switch (type) { 980 case XFS_DQTYPE_USER: 981 return i_uid_read(VFS_I(ip)); 982 case XFS_DQTYPE_GROUP: 983 return i_gid_read(VFS_I(ip)); 984 case XFS_DQTYPE_PROJ: 985 return ip->i_projid; 986 } 987 ASSERT(0); 988 return 0; 989 } 990 991 /* 992 * Return the dquot for a given inode and type. If @can_alloc is true, then 993 * allocate blocks if needed. The inode's ILOCK must be held and it must not 994 * have already had an inode attached. 995 */ 996 int 997 xfs_qm_dqget_inode( 998 struct xfs_inode *ip, 999 xfs_dqtype_t type, 1000 bool can_alloc, 1001 struct xfs_dquot **O_dqpp) 1002 { 1003 struct xfs_mount *mp = ip->i_mount; 1004 struct xfs_quotainfo *qi = mp->m_quotainfo; 1005 struct radix_tree_root *tree = xfs_dquot_tree(qi, type); 1006 struct xfs_dquot *dqp; 1007 xfs_dqid_t id; 1008 int error; 1009 1010 error = xfs_qm_dqget_checks(mp, type); 1011 if (error) 1012 return error; 1013 1014 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); 1015 ASSERT(xfs_inode_dquot(ip, type) == NULL); 1016 ASSERT(!xfs_is_metadir_inode(ip)); 1017 1018 id = xfs_qm_id_for_quotatype(ip, type); 1019 1020 restart: 1021 dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id); 1022 if (dqp) { 1023 *O_dqpp = dqp; 1024 return 0; 1025 } 1026 1027 /* 1028 * Dquot cache miss. We don't want to keep the inode lock across 1029 * a (potential) disk read. Also we don't want to deal with the lock 1030 * ordering between quotainode and this inode. OTOH, dropping the inode 1031 * lock here means dealing with a chown that can happen before 1032 * we re-acquire the lock. 1033 */ 1034 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1035 error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp); 1036 xfs_ilock(ip, XFS_ILOCK_EXCL); 1037 if (error) 1038 return error; 1039 1040 /* 1041 * A dquot could be attached to this inode by now, since we had 1042 * dropped the ilock. 1043 */ 1044 if (xfs_this_quota_on(mp, type)) { 1045 struct xfs_dquot *dqp1; 1046 1047 dqp1 = xfs_inode_dquot(ip, type); 1048 if (dqp1) { 1049 xfs_qm_dqdestroy(dqp); 1050 dqp = dqp1; 1051 xfs_dqlock(dqp); 1052 goto dqret; 1053 } 1054 } else { 1055 /* inode stays locked on return */ 1056 xfs_qm_dqdestroy(dqp); 1057 return -ESRCH; 1058 } 1059 1060 error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp); 1061 if (error) { 1062 /* 1063 * Duplicate found. Just throw away the new dquot and start 1064 * over. 1065 */ 1066 xfs_qm_dqdestroy(dqp); 1067 XFS_STATS_INC(mp, xs_qm_dquot_dups); 1068 goto restart; 1069 } 1070 1071 dqret: 1072 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); 1073 trace_xfs_dqget_miss(dqp); 1074 *O_dqpp = dqp; 1075 return 0; 1076 } 1077 1078 /* 1079 * Starting at @id and progressing upwards, look for an initialized incore 1080 * dquot, lock it, and return it. 1081 */ 1082 int 1083 xfs_qm_dqget_next( 1084 struct xfs_mount *mp, 1085 xfs_dqid_t id, 1086 xfs_dqtype_t type, 1087 struct xfs_dquot **dqpp) 1088 { 1089 struct xfs_dquot *dqp; 1090 int error = 0; 1091 1092 *dqpp = NULL; 1093 for (; !error; error = xfs_dq_get_next_id(mp, type, &id)) { 1094 error = xfs_qm_dqget(mp, id, type, false, &dqp); 1095 if (error == -ENOENT) 1096 continue; 1097 else if (error != 0) 1098 break; 1099 1100 if (!XFS_IS_DQUOT_UNINITIALIZED(dqp)) { 1101 *dqpp = dqp; 1102 return 0; 1103 } 1104 1105 xfs_qm_dqput(dqp); 1106 } 1107 1108 return error; 1109 } 1110 1111 /* 1112 * Release a reference to the dquot (decrement ref-count) and unlock it. 1113 * 1114 * If there is a group quota attached to this dquot, carefully release that 1115 * too without tripping over deadlocks'n'stuff. 1116 */ 1117 void 1118 xfs_qm_dqput( 1119 struct xfs_dquot *dqp) 1120 { 1121 ASSERT(dqp->q_nrefs > 0); 1122 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 1123 1124 trace_xfs_dqput(dqp); 1125 1126 if (--dqp->q_nrefs == 0) { 1127 struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo; 1128 trace_xfs_dqput_free(dqp); 1129 1130 if (list_lru_add_obj(&qi->qi_lru, &dqp->q_lru)) 1131 XFS_STATS_INC(dqp->q_mount, xs_qm_dquot_unused); 1132 } 1133 xfs_dqunlock(dqp); 1134 } 1135 1136 /* 1137 * Release a dquot. Flush it if dirty, then dqput() it. 1138 * dquot must not be locked. 1139 */ 1140 void 1141 xfs_qm_dqrele( 1142 struct xfs_dquot *dqp) 1143 { 1144 if (!dqp) 1145 return; 1146 1147 trace_xfs_dqrele(dqp); 1148 1149 xfs_dqlock(dqp); 1150 /* 1151 * We don't care to flush it if the dquot is dirty here. 1152 * That will create stutters that we want to avoid. 1153 * Instead we do a delayed write when we try to reclaim 1154 * a dirty dquot. Also xfs_sync will take part of the burden... 1155 */ 1156 xfs_qm_dqput(dqp); 1157 } 1158 1159 /* 1160 * This is the dquot flushing I/O completion routine. It is called 1161 * from interrupt level when the buffer containing the dquot is 1162 * flushed to disk. It is responsible for removing the dquot logitem 1163 * from the AIL if it has not been re-logged, and unlocking the dquot's 1164 * flush lock. This behavior is very similar to that of inodes.. 1165 */ 1166 static void 1167 xfs_qm_dqflush_done( 1168 struct xfs_log_item *lip) 1169 { 1170 struct xfs_dq_logitem *qlip = 1171 container_of(lip, struct xfs_dq_logitem, qli_item); 1172 struct xfs_dquot *dqp = qlip->qli_dquot; 1173 struct xfs_ail *ailp = lip->li_ailp; 1174 struct xfs_buf *bp = NULL; 1175 xfs_lsn_t tail_lsn; 1176 1177 /* 1178 * We only want to pull the item from the AIL if its 1179 * location in the log has not changed since we started the flush. 1180 * Thus, we only bother if the dquot's lsn has 1181 * not changed. First we check the lsn outside the lock 1182 * since it's cheaper, and then we recheck while 1183 * holding the lock before removing the dquot from the AIL. 1184 */ 1185 if (test_bit(XFS_LI_IN_AIL, &lip->li_flags) && 1186 (lip->li_lsn == qlip->qli_flush_lsn || 1187 test_bit(XFS_LI_FAILED, &lip->li_flags))) { 1188 1189 spin_lock(&ailp->ail_lock); 1190 xfs_clear_li_failed(lip); 1191 if (lip->li_lsn == qlip->qli_flush_lsn) { 1192 /* xfs_ail_update_finish() drops the AIL lock */ 1193 tail_lsn = xfs_ail_delete_one(ailp, lip); 1194 xfs_ail_update_finish(ailp, tail_lsn); 1195 } else { 1196 spin_unlock(&ailp->ail_lock); 1197 } 1198 } 1199 1200 /* 1201 * If this dquot hasn't been dirtied since initiating the last dqflush, 1202 * release the buffer reference. We already unlinked this dquot item 1203 * from the buffer. 1204 */ 1205 spin_lock(&qlip->qli_lock); 1206 if (!qlip->qli_dirty) { 1207 bp = lip->li_buf; 1208 lip->li_buf = NULL; 1209 } 1210 spin_unlock(&qlip->qli_lock); 1211 if (bp) 1212 xfs_buf_rele(bp); 1213 1214 /* 1215 * Release the dq's flush lock since we're done with it. 1216 */ 1217 xfs_dqfunlock(dqp); 1218 } 1219 1220 void 1221 xfs_buf_dquot_iodone( 1222 struct xfs_buf *bp) 1223 { 1224 struct xfs_log_item *lip, *n; 1225 1226 list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) { 1227 list_del_init(&lip->li_bio_list); 1228 xfs_qm_dqflush_done(lip); 1229 } 1230 } 1231 1232 void 1233 xfs_buf_dquot_io_fail( 1234 struct xfs_buf *bp) 1235 { 1236 struct xfs_log_item *lip; 1237 1238 spin_lock(&bp->b_mount->m_ail->ail_lock); 1239 list_for_each_entry(lip, &bp->b_li_list, li_bio_list) 1240 set_bit(XFS_LI_FAILED, &lip->li_flags); 1241 spin_unlock(&bp->b_mount->m_ail->ail_lock); 1242 } 1243 1244 /* Check incore dquot for errors before we flush. */ 1245 static xfs_failaddr_t 1246 xfs_qm_dqflush_check( 1247 struct xfs_dquot *dqp) 1248 { 1249 xfs_dqtype_t type = xfs_dquot_type(dqp); 1250 1251 if (type != XFS_DQTYPE_USER && 1252 type != XFS_DQTYPE_GROUP && 1253 type != XFS_DQTYPE_PROJ) 1254 return __this_address; 1255 1256 if (dqp->q_id == 0) 1257 return NULL; 1258 1259 if (dqp->q_blk.softlimit && dqp->q_blk.count > dqp->q_blk.softlimit && 1260 !dqp->q_blk.timer) 1261 return __this_address; 1262 1263 if (dqp->q_ino.softlimit && dqp->q_ino.count > dqp->q_ino.softlimit && 1264 !dqp->q_ino.timer) 1265 return __this_address; 1266 1267 if (dqp->q_rtb.softlimit && dqp->q_rtb.count > dqp->q_rtb.softlimit && 1268 !dqp->q_rtb.timer) 1269 return __this_address; 1270 1271 /* bigtime flag should never be set on root dquots */ 1272 if (dqp->q_type & XFS_DQTYPE_BIGTIME) { 1273 if (!xfs_has_bigtime(dqp->q_mount)) 1274 return __this_address; 1275 if (dqp->q_id == 0) 1276 return __this_address; 1277 } 1278 1279 return NULL; 1280 } 1281 1282 /* 1283 * Get the buffer containing the on-disk dquot. 1284 * 1285 * Requires dquot flush lock, will clear the dirty flag, delete the quota log 1286 * item from the AIL, and shut down the system if something goes wrong. 1287 */ 1288 static int 1289 xfs_dquot_read_buf( 1290 struct xfs_trans *tp, 1291 struct xfs_dquot *dqp, 1292 struct xfs_buf **bpp) 1293 { 1294 struct xfs_mount *mp = dqp->q_mount; 1295 struct xfs_buf *bp = NULL; 1296 int error; 1297 1298 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, dqp->q_blkno, 1299 mp->m_quotainfo->qi_dqchunklen, 0, 1300 &bp, &xfs_dquot_buf_ops); 1301 if (xfs_metadata_is_sick(error)) 1302 xfs_dquot_mark_sick(dqp); 1303 if (error) 1304 goto out_abort; 1305 1306 *bpp = bp; 1307 return 0; 1308 1309 out_abort: 1310 dqp->q_flags &= ~XFS_DQFLAG_DIRTY; 1311 xfs_trans_ail_delete(&dqp->q_logitem.qli_item, 0); 1312 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1313 return error; 1314 } 1315 1316 /* 1317 * Attach a dquot buffer to this dquot to avoid allocating a buffer during a 1318 * dqflush, since dqflush can be called from reclaim context. 1319 */ 1320 int 1321 xfs_dquot_attach_buf( 1322 struct xfs_trans *tp, 1323 struct xfs_dquot *dqp) 1324 { 1325 struct xfs_dq_logitem *qlip = &dqp->q_logitem; 1326 struct xfs_log_item *lip = &qlip->qli_item; 1327 int error; 1328 1329 spin_lock(&qlip->qli_lock); 1330 if (!lip->li_buf) { 1331 struct xfs_buf *bp = NULL; 1332 1333 spin_unlock(&qlip->qli_lock); 1334 error = xfs_dquot_read_buf(tp, dqp, &bp); 1335 if (error) 1336 return error; 1337 1338 /* 1339 * Attach the dquot to the buffer so that the AIL does not have 1340 * to read the dquot buffer to push this item. 1341 */ 1342 xfs_buf_hold(bp); 1343 spin_lock(&qlip->qli_lock); 1344 lip->li_buf = bp; 1345 xfs_trans_brelse(tp, bp); 1346 } 1347 qlip->qli_dirty = true; 1348 spin_unlock(&qlip->qli_lock); 1349 1350 return 0; 1351 } 1352 1353 /* 1354 * Get a new reference the dquot buffer attached to this dquot for a dqflush 1355 * operation. 1356 * 1357 * Returns 0 and a NULL bp if none was attached to the dquot; 0 and a locked 1358 * bp; or -EAGAIN if the buffer could not be locked. 1359 */ 1360 int 1361 xfs_dquot_use_attached_buf( 1362 struct xfs_dquot *dqp, 1363 struct xfs_buf **bpp) 1364 { 1365 struct xfs_buf *bp = dqp->q_logitem.qli_item.li_buf; 1366 1367 /* 1368 * A NULL buffer can happen if the dquot dirty flag was set but the 1369 * filesystem shut down before transaction commit happened. In that 1370 * case we're not going to flush anyway. 1371 */ 1372 if (!bp) { 1373 ASSERT(xfs_is_shutdown(dqp->q_mount)); 1374 1375 *bpp = NULL; 1376 return 0; 1377 } 1378 1379 if (!xfs_buf_trylock(bp)) 1380 return -EAGAIN; 1381 1382 xfs_buf_hold(bp); 1383 *bpp = bp; 1384 return 0; 1385 } 1386 1387 /* 1388 * Write a modified dquot to disk. 1389 * The dquot must be locked and the flush lock too taken by caller. 1390 * The flush lock will not be unlocked until the dquot reaches the disk, 1391 * but the dquot is free to be unlocked and modified by the caller 1392 * in the interim. Dquot is still locked on return. This behavior is 1393 * identical to that of inodes. 1394 */ 1395 int 1396 xfs_qm_dqflush( 1397 struct xfs_dquot *dqp, 1398 struct xfs_buf *bp) 1399 { 1400 struct xfs_mount *mp = dqp->q_mount; 1401 struct xfs_dq_logitem *qlip = &dqp->q_logitem; 1402 struct xfs_log_item *lip = &qlip->qli_item; 1403 struct xfs_dqblk *dqblk; 1404 xfs_failaddr_t fa; 1405 int error; 1406 1407 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 1408 ASSERT(!completion_done(&dqp->q_flush)); 1409 1410 trace_xfs_dqflush(dqp); 1411 1412 xfs_qm_dqunpin_wait(dqp); 1413 1414 fa = xfs_qm_dqflush_check(dqp); 1415 if (fa) { 1416 xfs_alert(mp, "corrupt dquot ID 0x%x in memory at %pS", 1417 dqp->q_id, fa); 1418 xfs_dquot_mark_sick(dqp); 1419 error = -EFSCORRUPTED; 1420 goto out_abort; 1421 } 1422 1423 /* Flush the incore dquot to the ondisk buffer. */ 1424 dqblk = xfs_buf_offset(bp, dqp->q_bufoffset); 1425 xfs_dquot_to_disk(&dqblk->dd_diskdq, dqp); 1426 1427 /* 1428 * Clear the dirty field and remember the flush lsn for later use. 1429 */ 1430 dqp->q_flags &= ~XFS_DQFLAG_DIRTY; 1431 1432 /* 1433 * We hold the dquot lock, so nobody can dirty it while we're 1434 * scheduling the write out. Clear the dirty-since-flush flag. 1435 */ 1436 spin_lock(&qlip->qli_lock); 1437 qlip->qli_dirty = false; 1438 spin_unlock(&qlip->qli_lock); 1439 1440 xfs_trans_ail_copy_lsn(mp->m_ail, &qlip->qli_flush_lsn, &lip->li_lsn); 1441 1442 /* 1443 * copy the lsn into the on-disk dquot now while we have the in memory 1444 * dquot here. This can't be done later in the write verifier as we 1445 * can't get access to the log item at that point in time. 1446 * 1447 * We also calculate the CRC here so that the on-disk dquot in the 1448 * buffer always has a valid CRC. This ensures there is no possibility 1449 * of a dquot without an up-to-date CRC getting to disk. 1450 */ 1451 if (xfs_has_crc(mp)) { 1452 dqblk->dd_lsn = cpu_to_be64(lip->li_lsn); 1453 xfs_update_cksum((char *)dqblk, sizeof(struct xfs_dqblk), 1454 XFS_DQUOT_CRC_OFF); 1455 } 1456 1457 /* 1458 * Attach the dquot to the buffer so that we can remove this dquot from 1459 * the AIL and release the flush lock once the dquot is synced to disk. 1460 */ 1461 bp->b_flags |= _XBF_DQUOTS; 1462 list_add_tail(&lip->li_bio_list, &bp->b_li_list); 1463 1464 /* 1465 * If the buffer is pinned then push on the log so we won't 1466 * get stuck waiting in the write for too long. 1467 */ 1468 if (xfs_buf_ispinned(bp)) { 1469 trace_xfs_dqflush_force(dqp); 1470 xfs_log_force(mp, 0); 1471 } 1472 1473 trace_xfs_dqflush_done(dqp); 1474 return 0; 1475 1476 out_abort: 1477 dqp->q_flags &= ~XFS_DQFLAG_DIRTY; 1478 xfs_trans_ail_delete(lip, 0); 1479 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1480 xfs_dqfunlock(dqp); 1481 return error; 1482 } 1483 1484 /* 1485 * Lock two xfs_dquot structures. 1486 * 1487 * To avoid deadlocks we always lock the quota structure with 1488 * the lowerd id first. 1489 */ 1490 void 1491 xfs_dqlock2( 1492 struct xfs_dquot *d1, 1493 struct xfs_dquot *d2) 1494 { 1495 if (d1 && d2) { 1496 ASSERT(d1 != d2); 1497 if (d1->q_id > d2->q_id) { 1498 mutex_lock(&d2->q_qlock); 1499 mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED); 1500 } else { 1501 mutex_lock(&d1->q_qlock); 1502 mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED); 1503 } 1504 } else if (d1) { 1505 mutex_lock(&d1->q_qlock); 1506 } else if (d2) { 1507 mutex_lock(&d2->q_qlock); 1508 } 1509 } 1510 1511 static int 1512 xfs_dqtrx_cmp( 1513 const void *a, 1514 const void *b) 1515 { 1516 const struct xfs_dqtrx *qa = a; 1517 const struct xfs_dqtrx *qb = b; 1518 1519 if (qa->qt_dquot->q_id > qb->qt_dquot->q_id) 1520 return 1; 1521 if (qa->qt_dquot->q_id < qb->qt_dquot->q_id) 1522 return -1; 1523 return 0; 1524 } 1525 1526 void 1527 xfs_dqlockn( 1528 struct xfs_dqtrx *q) 1529 { 1530 unsigned int i; 1531 1532 BUILD_BUG_ON(XFS_QM_TRANS_MAXDQS > MAX_LOCKDEP_SUBCLASSES); 1533 1534 /* Sort in order of dquot id, do not allow duplicates */ 1535 for (i = 0; i < XFS_QM_TRANS_MAXDQS && q[i].qt_dquot != NULL; i++) { 1536 unsigned int j; 1537 1538 for (j = 0; j < i; j++) 1539 ASSERT(q[i].qt_dquot != q[j].qt_dquot); 1540 } 1541 if (i == 0) 1542 return; 1543 1544 sort(q, i, sizeof(struct xfs_dqtrx), xfs_dqtrx_cmp, NULL); 1545 1546 mutex_lock(&q[0].qt_dquot->q_qlock); 1547 for (i = 1; i < XFS_QM_TRANS_MAXDQS && q[i].qt_dquot != NULL; i++) 1548 mutex_lock_nested(&q[i].qt_dquot->q_qlock, 1549 XFS_QLOCK_NESTED + i - 1); 1550 } 1551 1552 int __init 1553 xfs_qm_init(void) 1554 { 1555 xfs_dquot_cache = kmem_cache_create("xfs_dquot", 1556 sizeof(struct xfs_dquot), 1557 0, 0, NULL); 1558 if (!xfs_dquot_cache) 1559 goto out; 1560 1561 xfs_dqtrx_cache = kmem_cache_create("xfs_dqtrx", 1562 sizeof(struct xfs_dquot_acct), 1563 0, 0, NULL); 1564 if (!xfs_dqtrx_cache) 1565 goto out_free_dquot_cache; 1566 1567 return 0; 1568 1569 out_free_dquot_cache: 1570 kmem_cache_destroy(xfs_dquot_cache); 1571 out: 1572 return -ENOMEM; 1573 } 1574 1575 void 1576 xfs_qm_exit(void) 1577 { 1578 kmem_cache_destroy(xfs_dqtrx_cache); 1579 kmem_cache_destroy(xfs_dquot_cache); 1580 } 1581