Lines Matching +full:ip +full:- +full:blocks
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
10 * per-node file) and then are periodically synced to the quota file. This
32 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
76 #define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1)
78 /* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
79 /* -> sd_bitmap_lock */
109 struct gfs2_sbd *sdp = qd->qd_sbd; in gfs2_qd_dealloc()
112 if (atomic_dec_and_test(&sdp->sd_quota_count)) in gfs2_qd_dealloc()
113 wake_up(&sdp->sd_kill_wait); in gfs2_qd_dealloc()
118 struct gfs2_sbd *sdp = qd->qd_sbd; in gfs2_qd_dispose()
121 list_del(&qd->qd_list); in gfs2_qd_dispose()
124 spin_lock_bucket(qd->qd_hash); in gfs2_qd_dispose()
125 hlist_bl_del_rcu(&qd->qd_hlist); in gfs2_qd_dispose()
126 spin_unlock_bucket(qd->qd_hash); in gfs2_qd_dispose()
129 gfs2_assert_warn(sdp, !qd->qd_change); in gfs2_qd_dispose()
130 gfs2_assert_warn(sdp, !qd->qd_slot_ref); in gfs2_qd_dispose()
131 gfs2_assert_warn(sdp, !qd->qd_bh_count); in gfs2_qd_dispose()
134 gfs2_glock_put(qd->qd_gl); in gfs2_qd_dispose()
135 call_rcu(&qd->qd_rcu, gfs2_qd_dealloc); in gfs2_qd_dispose()
144 list_del(&qd->qd_lru); in gfs2_qd_list_dispose()
159 if (!spin_trylock(&qd->qd_lockref.lock)) in gfs2_qd_isolate()
163 if (qd->qd_lockref.count == 0) { in gfs2_qd_isolate()
164 lockref_mark_dead(&qd->qd_lockref); in gfs2_qd_isolate()
165 list_lru_isolate_move(lru, &qd->qd_lru, dispose); in gfs2_qd_isolate()
169 spin_unlock(&qd->qd_lockref.lock); in gfs2_qd_isolate()
179 if (!(sc->gfp_mask & __GFP_FS)) in gfs2_qd_shrink_scan()
200 gfs2_qd_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, "gfs2-qd"); in gfs2_qd_shrinker_init()
202 return -ENOMEM; in gfs2_qd_shrinker_init()
204 gfs2_qd_shrinker->count_objects = gfs2_qd_shrink_count; in gfs2_qd_shrinker_init()
205 gfs2_qd_shrinker->scan_objects = gfs2_qd_shrink_scan; in gfs2_qd_shrinker_init()
219 struct kqid qid = qd->qd_id; in qd2index()
238 qd->qd_sbd = sdp; in qd_alloc()
239 lockref_init(&qd->qd_lockref); in qd_alloc()
240 qd->qd_id = qid; in qd_alloc()
241 qd->qd_slot = -1; in qd_alloc()
242 INIT_LIST_HEAD(&qd->qd_lru); in qd_alloc()
243 qd->qd_hash = hash; in qd_alloc()
246 &gfs2_quota_glops, CREATE, &qd->qd_gl); in qd_alloc()
265 if (!qid_eq(qd->qd_id, qid)) in gfs2_qd_search_bucket()
267 if (qd->qd_sbd != sdp) in gfs2_qd_search_bucket()
269 if (lockref_get_not_dead(&qd->qd_lockref)) { in gfs2_qd_search_bucket()
270 list_lru_del_obj(&gfs2_qd_lru, &qd->qd_lru); in gfs2_qd_search_bucket()
294 return -ENOMEM; in qd_get()
301 list_add(&new_qd->qd_list, &sdp->sd_quota_list); in qd_get()
302 hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]); in qd_get()
303 atomic_inc(&sdp->sd_quota_count); in qd_get()
309 gfs2_glock_put(new_qd->qd_gl); in qd_get()
319 struct gfs2_sbd *sdp = qd->qd_sbd; in __qd_hold()
320 gfs2_assert(sdp, qd->qd_lockref.count > 0); in __qd_hold()
321 qd->qd_lockref.count++; in __qd_hold()
328 if (lockref_put_or_lock(&qd->qd_lockref)) in qd_put()
331 BUG_ON(__lockref_is_dead(&qd->qd_lockref)); in qd_put()
332 sdp = qd->qd_sbd; in qd_put()
333 if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) { in qd_put()
334 lockref_mark_dead(&qd->qd_lockref); in qd_put()
335 spin_unlock(&qd->qd_lockref.lock); in qd_put()
341 qd->qd_lockref.count = 0; in qd_put()
342 list_lru_add_obj(&gfs2_qd_lru, &qd->qd_lru); in qd_put()
343 spin_unlock(&qd->qd_lockref.lock); in qd_put()
348 struct gfs2_sbd *sdp = qd->qd_sbd; in slot_get()
352 spin_lock(&sdp->sd_bitmap_lock); in slot_get()
353 if (qd->qd_slot_ref == 0) { in slot_get()
354 bit = find_first_zero_bit(sdp->sd_quota_bitmap, in slot_get()
355 sdp->sd_quota_slots); in slot_get()
356 if (bit >= sdp->sd_quota_slots) { in slot_get()
357 error = -ENOSPC; in slot_get()
360 set_bit(bit, sdp->sd_quota_bitmap); in slot_get()
361 qd->qd_slot = bit; in slot_get()
363 qd->qd_slot_ref++; in slot_get()
365 spin_unlock(&sdp->sd_bitmap_lock); in slot_get()
371 struct gfs2_sbd *sdp = qd->qd_sbd; in slot_hold()
373 spin_lock(&sdp->sd_bitmap_lock); in slot_hold()
374 gfs2_assert(sdp, qd->qd_slot_ref); in slot_hold()
375 qd->qd_slot_ref++; in slot_hold()
376 spin_unlock(&sdp->sd_bitmap_lock); in slot_hold()
381 struct gfs2_sbd *sdp = qd->qd_sbd; in slot_put()
383 spin_lock(&sdp->sd_bitmap_lock); in slot_put()
384 gfs2_assert(sdp, qd->qd_slot_ref); in slot_put()
385 if (!--qd->qd_slot_ref) { in slot_put()
386 BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap)); in slot_put()
387 qd->qd_slot = -1; in slot_put()
389 spin_unlock(&sdp->sd_bitmap_lock); in slot_put()
394 struct gfs2_sbd *sdp = qd->qd_sbd; in bh_get()
395 struct inode *inode = sdp->sd_qc_inode; in bh_get()
396 struct gfs2_inode *ip = GFS2_I(inode); in bh_get() local
402 spin_lock(&qd->qd_lockref.lock); in bh_get()
403 if (qd->qd_bh_count) { in bh_get()
404 qd->qd_bh_count++; in bh_get()
405 spin_unlock(&qd->qd_lockref.lock); in bh_get()
408 spin_unlock(&qd->qd_lockref.lock); in bh_get()
410 block = qd->qd_slot / sdp->sd_qc_per_block; in bh_get()
411 offset = qd->qd_slot % sdp->sd_qc_per_block; in bh_get()
414 (loff_t)block << inode->i_blkbits, in bh_get()
418 error = -ENOENT; in bh_get()
422 error = gfs2_meta_read(ip->i_gl, iomap.addr >> inode->i_blkbits, in bh_get()
426 error = -EIO; in bh_get()
430 spin_lock(&qd->qd_lockref.lock); in bh_get()
431 if (qd->qd_bh == NULL) { in bh_get()
432 qd->qd_bh = bh; in bh_get()
433 qd->qd_bh_qc = (struct gfs2_quota_change *) in bh_get()
434 (bh->b_data + sizeof(struct gfs2_meta_header) + in bh_get()
438 qd->qd_bh_count++; in bh_get()
439 spin_unlock(&qd->qd_lockref.lock); in bh_get()
449 struct gfs2_sbd *sdp = qd->qd_sbd; in bh_put()
452 spin_lock(&qd->qd_lockref.lock); in bh_put()
453 gfs2_assert(sdp, qd->qd_bh_count); in bh_put()
454 if (!--qd->qd_bh_count) { in bh_put()
455 bh = qd->qd_bh; in bh_put()
456 qd->qd_bh = NULL; in bh_put()
457 qd->qd_bh_qc = NULL; in bh_put()
459 spin_unlock(&qd->qd_lockref.lock); in bh_put()
468 spin_lock(&qd->qd_lockref.lock); in qd_grab_sync()
469 if (test_bit(QDF_LOCKED, &qd->qd_flags) || in qd_grab_sync()
470 !test_bit(QDF_CHANGE, &qd->qd_flags) || in qd_grab_sync()
471 qd->qd_sync_gen >= sync_gen) in qd_grab_sync()
474 if (__lockref_is_dead(&qd->qd_lockref)) in qd_grab_sync()
476 qd->qd_lockref.count++; in qd_grab_sync()
478 list_move_tail(&qd->qd_list, &sdp->sd_quota_list); in qd_grab_sync()
479 set_bit(QDF_LOCKED, &qd->qd_flags); in qd_grab_sync()
480 qd->qd_change_sync = qd->qd_change; in qd_grab_sync()
485 spin_unlock(&qd->qd_lockref.lock); in qd_grab_sync()
491 clear_bit(QDF_LOCKED, &qd->qd_flags); in qd_ungrab_sync()
505 spin_lock(&qd->qd_lockref.lock); in qd_unlock()
506 gfs2_assert_warn(qd->qd_sbd, test_bit(QDF_LOCKED, &qd->qd_flags)); in qd_unlock()
507 clear_bit(QDF_LOCKED, &qd->qd_flags); in qd_unlock()
508 spin_unlock(&qd->qd_lockref.lock); in qd_unlock()
539 * gfs2_qa_get - make sure we have a quota allocations data structure,
541 * @ip: the inode for this reservation
543 int gfs2_qa_get(struct gfs2_inode *ip) in gfs2_qa_get() argument
545 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_qa_get()
546 struct inode *inode = &ip->i_inode; in gfs2_qa_get()
548 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) in gfs2_qa_get()
551 spin_lock(&inode->i_lock); in gfs2_qa_get()
552 if (ip->i_qadata == NULL) { in gfs2_qa_get()
555 spin_unlock(&inode->i_lock); in gfs2_qa_get()
558 return -ENOMEM; in gfs2_qa_get()
560 spin_lock(&inode->i_lock); in gfs2_qa_get()
561 if (ip->i_qadata == NULL) in gfs2_qa_get()
562 ip->i_qadata = tmp; in gfs2_qa_get()
566 ip->i_qadata->qa_ref++; in gfs2_qa_get()
567 spin_unlock(&inode->i_lock); in gfs2_qa_get()
571 void gfs2_qa_put(struct gfs2_inode *ip) in gfs2_qa_put() argument
573 struct inode *inode = &ip->i_inode; in gfs2_qa_put()
575 spin_lock(&inode->i_lock); in gfs2_qa_put()
576 if (ip->i_qadata && --ip->i_qadata->qa_ref == 0) { in gfs2_qa_put()
577 kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata); in gfs2_qa_put()
578 ip->i_qadata = NULL; in gfs2_qa_put()
580 spin_unlock(&inode->i_lock); in gfs2_qa_put()
583 int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) in gfs2_quota_hold() argument
585 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_quota_hold()
589 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) in gfs2_quota_hold()
592 error = gfs2_qa_get(ip); in gfs2_quota_hold()
596 qd = ip->i_qadata->qa_qd; in gfs2_quota_hold()
598 if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) || in gfs2_quota_hold()
599 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) { in gfs2_quota_hold()
600 error = -EIO; in gfs2_quota_hold()
601 gfs2_qa_put(ip); in gfs2_quota_hold()
605 error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd); in gfs2_quota_hold()
608 ip->i_qadata->qa_qd_num++; in gfs2_quota_hold()
611 error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd); in gfs2_quota_hold()
614 ip->i_qadata->qa_qd_num++; in gfs2_quota_hold()
618 !uid_eq(uid, ip->i_inode.i_uid)) { in gfs2_quota_hold()
622 ip->i_qadata->qa_qd_num++; in gfs2_quota_hold()
627 !gid_eq(gid, ip->i_inode.i_gid)) { in gfs2_quota_hold()
631 ip->i_qadata->qa_qd_num++; in gfs2_quota_hold()
637 gfs2_quota_unhold(ip); in gfs2_quota_hold()
642 void gfs2_quota_unhold(struct gfs2_inode *ip) in gfs2_quota_unhold() argument
644 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_quota_unhold()
647 if (ip->i_qadata == NULL) in gfs2_quota_unhold()
650 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)); in gfs2_quota_unhold()
652 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { in gfs2_quota_unhold()
653 qdsb_put(ip->i_qadata->qa_qd[x]); in gfs2_quota_unhold()
654 ip->i_qadata->qa_qd[x] = NULL; in gfs2_quota_unhold()
656 ip->i_qadata->qa_qd_num = 0; in gfs2_quota_unhold()
657 gfs2_qa_put(ip); in gfs2_quota_unhold()
665 if (qid_lt(qd_a->qd_id, qd_b->qd_id)) in sort_qd()
666 return -1; in sort_qd()
667 if (qid_lt(qd_b->qd_id, qd_a->qd_id)) in sort_qd()
674 struct gfs2_sbd *sdp = qd->qd_sbd; in do_qc()
675 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); in do_qc() local
676 struct gfs2_quota_change *qc = qd->qd_bh_qc; in do_qc()
680 gfs2_trans_add_meta(ip->i_gl, qd->qd_bh); in do_qc()
684 * is used. Here, we use the value of qc->qc_change when the slot is in do_qc()
688 spin_lock(&qd->qd_lockref.lock); in do_qc()
691 if (test_bit(QDF_CHANGE, &qd->qd_flags)) in do_qc()
692 x = be64_to_cpu(qc->qc_change); in do_qc()
694 qd->qd_change += change; in do_qc()
696 if (!x && test_bit(QDF_CHANGE, &qd->qd_flags)) { in do_qc()
698 clear_bit(QDF_CHANGE, &qd->qd_flags); in do_qc()
699 qc->qc_flags = 0; in do_qc()
700 qc->qc_id = 0; in do_qc()
702 } else if (x && !test_bit(QDF_CHANGE, &qd->qd_flags)) { in do_qc()
704 set_bit(QDF_CHANGE, &qd->qd_flags); in do_qc()
708 qc->qc_flags = 0; in do_qc()
709 if (qd->qd_id.type == USRQUOTA) in do_qc()
710 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER); in do_qc()
711 qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id)); in do_qc()
713 qc->qc_change = cpu_to_be64(x); in do_qc()
715 spin_unlock(&qd->qd_lockref.lock); in do_qc()
721 if (change < 0) /* Reset quiet flag if we freed some blocks */ in do_qc()
722 clear_bit(QDF_QMSG_QUIET, &qd->qd_flags); in do_qc()
728 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); in gfs2_write_buf_to_page() local
729 struct inode *inode = &ip->i_inode; in gfs2_write_buf_to_page()
730 struct address_space *mapping = inode->i_mapping; in gfs2_write_buf_to_page()
734 unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0; in gfs2_write_buf_to_page()
737 blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift); in gfs2_write_buf_to_page()
750 bh = bh->b_this_page; in gfs2_write_buf_to_page()
762 bh->b_size); in gfs2_write_buf_to_page()
768 gfs2_trans_add_data(ip->i_gl, bh); in gfs2_write_buf_to_page()
771 if (to_write > (bsize - boff)) { in gfs2_write_buf_to_page()
772 pg_off += (bsize - boff); in gfs2_write_buf_to_page()
773 to_write -= (bsize - boff); in gfs2_write_buf_to_page()
791 return -EIO; in gfs2_write_buf_to_page()
809 overflow = (pg_off + nbytes) - PAGE_SIZE; in gfs2_write_disk_quota()
813 nbytes - overflow); in gfs2_write_disk_quota()
817 ptr + nbytes - overflow, in gfs2_write_disk_quota()
823 * gfs2_adjust_quota - adjust record of current block usage
833 * Returns: 0 or -ve on error
840 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); in gfs2_adjust_quota() local
841 struct inode *inode = &ip->i_inode; in gfs2_adjust_quota()
846 if (gfs2_is_stuffed(ip)) { in gfs2_adjust_quota()
847 err = gfs2_unstuff_dinode(ip); in gfs2_adjust_quota()
853 err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q)); in gfs2_adjust_quota()
857 loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */ in gfs2_adjust_quota()
861 spin_lock(&qd->qd_lockref.lock); in gfs2_adjust_quota()
862 qd->qd_qb.qb_value = q.qu_value; in gfs2_adjust_quota()
864 if (fdq->d_fieldmask & QC_SPC_SOFT) { in gfs2_adjust_quota()
865 q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift); in gfs2_adjust_quota()
866 qd->qd_qb.qb_warn = q.qu_warn; in gfs2_adjust_quota()
868 if (fdq->d_fieldmask & QC_SPC_HARD) { in gfs2_adjust_quota()
869 q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift); in gfs2_adjust_quota()
870 qd->qd_qb.qb_limit = q.qu_limit; in gfs2_adjust_quota()
872 if (fdq->d_fieldmask & QC_SPACE) { in gfs2_adjust_quota()
873 q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift); in gfs2_adjust_quota()
874 qd->qd_qb.qb_value = q.qu_value; in gfs2_adjust_quota()
877 spin_unlock(&qd->qd_lockref.lock); in gfs2_adjust_quota()
882 if (size > inode->i_size) in gfs2_adjust_quota()
886 set_bit(QDF_REFRESH, &qd->qd_flags); in gfs2_adjust_quota()
895 struct gfs2_sbd *sdp = (*qda)->qd_sbd; in do_sync()
896 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); in do_sync() local
904 unsigned int nalloc = 0, blocks; in do_sync() local
907 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), in do_sync()
912 return -ENOMEM; in do_sync()
915 inode_lock(&ip->i_inode); in do_sync()
917 error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE, in do_sync()
923 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); in do_sync()
929 if (gfs2_write_alloc_required(ip, offset, in do_sync()
942 * two blocks need to be updated instead of 1 */ in do_sync()
943 blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3; in do_sync()
947 error = gfs2_inplace_reserve(ip, &ap); in do_sync()
952 blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS; in do_sync()
954 error = gfs2_trans_begin(sdp, blocks, 0); in do_sync()
961 error = gfs2_adjust_quota(sdp, offset, qd->qd_change_sync, qd, in do_sync()
966 do_qc(qd, -qd->qd_change_sync); in do_sync()
967 set_bit(QDF_REFRESH, &qd->qd_flags); in do_sync()
973 gfs2_inplace_release(ip); in do_sync()
977 while (qx--) in do_sync()
979 inode_unlock(&ip->i_inode); in do_sync()
981 gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl, in do_sync()
986 spin_lock(&qd->qd_lockref.lock); in do_sync()
987 if (qd->qd_sync_gen < sync_gen) in do_sync()
988 qd->qd_sync_gen = sync_gen; in do_sync()
989 spin_unlock(&qd->qd_lockref.lock); in do_sync()
997 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); in update_qd() local
1005 error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q)); in update_qd()
1009 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; in update_qd()
1010 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC); in update_qd()
1011 qlvb->__pad = 0; in update_qd()
1012 qlvb->qb_limit = q.qu_limit; in update_qd()
1013 qlvb->qb_warn = q.qu_warn; in update_qd()
1014 qlvb->qb_value = q.qu_value; in update_qd()
1015 spin_lock(&qd->qd_lockref.lock); in update_qd()
1016 qd->qd_qb = *qlvb; in update_qd()
1017 spin_unlock(&qd->qd_lockref.lock); in update_qd()
1025 struct gfs2_sbd *sdp = qd->qd_sbd; in do_glock()
1026 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); in do_glock() local
1030 gfs2_assert_warn(sdp, sdp == qd->qd_gl->gl_name.ln_sbd); in do_glock()
1032 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh); in do_glock()
1036 if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags)) in do_glock()
1039 spin_lock(&qd->qd_lockref.lock); in do_glock()
1040 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; in do_glock()
1041 spin_unlock(&qd->qd_lockref.lock); in do_glock()
1043 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) { in do_glock()
1045 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, in do_glock()
1050 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh); in do_glock()
1073 int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) in gfs2_quota_lock() argument
1075 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_quota_lock()
1080 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) in gfs2_quota_lock()
1083 error = gfs2_quota_hold(ip, uid, gid); in gfs2_quota_lock()
1087 sort(ip->i_qadata->qa_qd, ip->i_qadata->qa_qd_num, in gfs2_quota_lock()
1090 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { in gfs2_quota_lock()
1091 qd = ip->i_qadata->qa_qd[x]; in gfs2_quota_lock()
1092 error = do_glock(qd, NO_FORCE, &ip->i_qadata->qa_qd_ghs[x]); in gfs2_quota_lock()
1098 set_bit(GIF_QD_LOCKED, &ip->i_flags); in gfs2_quota_lock()
1100 while (x--) in gfs2_quota_lock()
1101 gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]); in gfs2_quota_lock()
1102 gfs2_quota_unhold(ip); in gfs2_quota_lock()
1110 struct gfs2_sbd *sdp = qd->qd_sbd; in need_sync()
1111 struct gfs2_tune *gt = &sdp->sd_tune; in need_sync()
1116 spin_lock(&qd->qd_lockref.lock); in need_sync()
1117 if (!qd->qd_qb.qb_limit) in need_sync()
1120 change = qd->qd_change; in need_sync()
1123 value = (s64)be64_to_cpu(qd->qd_qb.qb_value); in need_sync()
1124 limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit); in need_sync()
1128 spin_lock(>->gt_spin); in need_sync()
1129 num = gt->gt_quota_scale_num; in need_sync()
1130 den = gt->gt_quota_scale_den; in need_sync()
1131 spin_unlock(>->gt_spin); in need_sync()
1140 spin_unlock(&qd->qd_lockref.lock); in need_sync()
1144 void gfs2_quota_unlock(struct gfs2_inode *ip) in gfs2_quota_unlock() argument
1146 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_quota_unlock()
1151 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags)) in gfs2_quota_unlock()
1154 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { in gfs2_quota_unlock()
1159 qd = ip->i_qadata->qa_qd[x]; in gfs2_quota_unlock()
1162 gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]); in gfs2_quota_unlock()
1173 gfs2_assert_warn(sdp, qd->qd_change_sync); in gfs2_quota_unlock()
1184 u64 sync_gen = READ_ONCE(sdp->sd_quota_sync_gen); in gfs2_quota_unlock()
1191 gfs2_quota_unhold(ip); in gfs2_quota_unlock()
1198 struct gfs2_sbd *sdp = qd->qd_sbd; in print_message()
1200 if (sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET) { in print_message()
1203 (qd->qd_id.type == USRQUOTA) ? "user" : "group", in print_message()
1204 from_kqid(&init_user_ns, qd->qd_id)); in print_message()
1209 * gfs2_quota_check - check if allocating new blocks will exceed quota
1210 * @ip: The inode for which this check is being performed
1213 * @ap: The allocation parameters. ap->target contains the requested
1214 * blocks. ap->min_target, if set, contains the minimum blks
1218 * min_req = ap->min_target ? ap->min_target : ap->target;
1220 * ap->allowed is set to the number of blocks allowed
1222 * -EDQUOT otherwise, quota violation. ap->allowed is set to number
1223 * of blocks available.
1225 int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid, in gfs2_quota_check() argument
1228 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_quota_check()
1234 ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */ in gfs2_quota_check()
1235 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags)) in gfs2_quota_check()
1238 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { in gfs2_quota_check()
1239 qd = ip->i_qadata->qa_qd[x]; in gfs2_quota_check()
1241 if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) || in gfs2_quota_check()
1242 qid_eq(qd->qd_id, make_kqid_gid(gid)))) in gfs2_quota_check()
1245 spin_lock(&qd->qd_lockref.lock); in gfs2_quota_check()
1246 warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn); in gfs2_quota_check()
1247 limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit); in gfs2_quota_check()
1248 value = (s64)be64_to_cpu(qd->qd_qb.qb_value); in gfs2_quota_check()
1249 value += qd->qd_change; in gfs2_quota_check()
1250 spin_unlock(&qd->qd_lockref.lock); in gfs2_quota_check()
1252 if (limit > 0 && (limit - value) < ap->allowed) in gfs2_quota_check()
1253 ap->allowed = limit - value; in gfs2_quota_check()
1255 if (limit && limit < (value + (s64)ap->target)) { in gfs2_quota_check()
1257 * min_target, return -EDQUOT */ in gfs2_quota_check()
1258 if (!ap->min_target || ap->min_target > ap->allowed) { in gfs2_quota_check()
1260 &qd->qd_flags)) { in gfs2_quota_check()
1262 quota_send_warning(qd->qd_id, in gfs2_quota_check()
1263 sdp->sd_vfs->s_dev, in gfs2_quota_check()
1266 error = -EDQUOT; in gfs2_quota_check()
1270 time_after_eq(jiffies, qd->qd_last_warn + in gfs2_quota_check()
1273 quota_send_warning(qd->qd_id, in gfs2_quota_check()
1274 sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN); in gfs2_quota_check()
1277 qd->qd_last_warn = jiffies; in gfs2_quota_check()
1283 void gfs2_quota_change(struct gfs2_inode *ip, s64 change, in gfs2_quota_change() argument
1288 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_quota_change()
1290 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF || in gfs2_quota_change()
1293 if (ip->i_diskflags & GFS2_DIF_SYSTEM) in gfs2_quota_change()
1296 if (gfs2_assert_withdraw(sdp, ip->i_qadata && in gfs2_quota_change()
1297 ip->i_qadata->qa_ref > 0)) in gfs2_quota_change()
1299 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { in gfs2_quota_change()
1300 qd = ip->i_qadata->qa_qd[x]; in gfs2_quota_change()
1302 if (qid_eq(qd->qd_id, make_kqid_uid(uid)) || in gfs2_quota_change()
1303 qid_eq(qd->qd_id, make_kqid_gid(gid))) { in gfs2_quota_change()
1311 struct gfs2_sbd *sdp = sb->s_fs_info; in gfs2_quota_sync()
1317 if (sb_rdonly(sdp->sd_vfs)) in gfs2_quota_sync()
1322 return -ENOMEM; in gfs2_quota_sync()
1324 mutex_lock(&sdp->sd_quota_sync_mutex); in gfs2_quota_sync()
1325 sync_gen = sdp->sd_quota_sync_gen + 1; in gfs2_quota_sync()
1333 list_for_each_entry(iter, &sdp->sd_quota_list, qd_list) { in gfs2_quota_sync()
1351 qd_ungrab_sync(qda[--num_qd]); in gfs2_quota_sync()
1356 WRITE_ONCE(sdp->sd_quota_sync_gen, sync_gen); in gfs2_quota_sync()
1364 mutex_unlock(&sdp->sd_quota_sync_mutex); in gfs2_quota_sync()
1390 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); in gfs2_quota_init() local
1391 u64 size = i_size_read(sdp->sd_qc_inode); in gfs2_quota_init()
1392 unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift; in gfs2_quota_init() local
1402 if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20)) in gfs2_quota_init()
1403 return -EIO; in gfs2_quota_init()
1405 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block; in gfs2_quota_init()
1406 bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long)); in gfs2_quota_init()
1408 error = -ENOMEM; in gfs2_quota_init()
1409 sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN); in gfs2_quota_init()
1410 if (sdp->sd_quota_bitmap == NULL) in gfs2_quota_init()
1411 sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS | in gfs2_quota_init()
1413 if (!sdp->sd_quota_bitmap) in gfs2_quota_init()
1416 for (x = 0; x < blocks; x++) { in gfs2_quota_init()
1422 error = gfs2_get_extent(&ip->i_inode, x, &dblock, &extlen); in gfs2_quota_init()
1426 error = -EIO; in gfs2_quota_init()
1427 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen); in gfs2_quota_init()
1433 qc = (struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header)); in gfs2_quota_init()
1434 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots; in gfs2_quota_init()
1437 s64 qc_change = be64_to_cpu(qc->qc_change); in gfs2_quota_init()
1438 u32 qc_flags = be32_to_cpu(qc->qc_flags); in gfs2_quota_init()
1442 be32_to_cpu(qc->qc_id)); in gfs2_quota_init()
1452 qd->qd_lockref.count = 0; in gfs2_quota_init()
1453 set_bit(QDF_CHANGE, &qd->qd_flags); in gfs2_quota_init()
1454 qd->qd_change = qc_change; in gfs2_quota_init()
1455 qd->qd_slot = slot; in gfs2_quota_init()
1456 qd->qd_slot_ref = 1; in gfs2_quota_init()
1465 sdp->sd_jdesc->jd_jid, slot); in gfs2_quota_init()
1471 gfs2_glock_put(qd->qd_gl); in gfs2_quota_init()
1482 BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap)); in gfs2_quota_init()
1483 list_add(&qd->qd_list, &sdp->sd_quota_list); in gfs2_quota_init()
1484 atomic_inc(&sdp->sd_quota_count); in gfs2_quota_init()
1485 hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]); in gfs2_quota_init()
1496 extlen--; in gfs2_quota_init()
1519 BUG_ON(!test_bit(SDF_NORECOVERY, &sdp->sd_flags) && in gfs2_quota_cleanup()
1520 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)); in gfs2_quota_cleanup()
1523 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { in gfs2_quota_cleanup()
1524 spin_lock(&qd->qd_lockref.lock); in gfs2_quota_cleanup()
1525 if (qd->qd_lockref.count != 0) { in gfs2_quota_cleanup()
1526 spin_unlock(&qd->qd_lockref.lock); in gfs2_quota_cleanup()
1529 lockref_mark_dead(&qd->qd_lockref); in gfs2_quota_cleanup()
1530 spin_unlock(&qd->qd_lockref.lock); in gfs2_quota_cleanup()
1532 list_lru_del_obj(&gfs2_qd_lru, &qd->qd_lru); in gfs2_quota_cleanup()
1533 list_add(&qd->qd_lru, &dispose); in gfs2_quota_cleanup()
1539 wait_event_timeout(sdp->sd_kill_wait, in gfs2_quota_cleanup()
1540 (count = atomic_read(&sdp->sd_quota_count)) == 0, in gfs2_quota_cleanup()
1544 fs_err(sdp, "%d left-over quota data objects\n", count); in gfs2_quota_cleanup()
1546 kvfree(sdp->sd_quota_bitmap); in gfs2_quota_cleanup()
1547 sdp->sd_quota_bitmap = NULL; in gfs2_quota_cleanup()
1552 if (error == 0 || error == -EROFS) in quotad_error()
1555 if (!cmpxchg(&sdp->sd_log_error, 0, error)) in quotad_error()
1557 wake_up(&sdp->sd_logd_waitq); in quotad_error()
1567 int error = fxn(sdp->sd_vfs, 0); in quotad_check_timeo()
1569 *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ; in quotad_check_timeo()
1571 *timeo -= t; in quotad_check_timeo()
1576 if (!sdp->sd_statfs_force_sync) { in gfs2_wake_up_statfs()
1577 sdp->sd_statfs_force_sync = 1; in gfs2_wake_up_statfs()
1578 wake_up(&sdp->sd_quota_wait); in gfs2_wake_up_statfs()
1584 * gfs2_quotad - Write cached quota changes into the quota file
1592 struct gfs2_tune *tune = &sdp->sd_tune; in gfs2_quotad()
1603 if (sdp->sd_statfs_force_sync) { in gfs2_quotad()
1604 int error = gfs2_statfs_sync(sdp->sd_vfs, 0); in gfs2_quotad()
1611 &tune->gt_statfs_quantum); in gfs2_quotad()
1615 "ad_timeo, &tune->gt_quota_quantum); in gfs2_quotad()
1619 t = wait_event_freezable_timeout(sdp->sd_quota_wait, in gfs2_quotad()
1620 sdp->sd_statfs_force_sync || in gfs2_quotad()
1625 if (sdp->sd_statfs_force_sync) in gfs2_quotad()
1634 struct gfs2_sbd *sdp = sb->s_fs_info; in gfs2_quota_get_state()
1638 switch (sdp->sd_args.ar_quota) { in gfs2_quota_get_state()
1642 state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED; in gfs2_quota_get_state()
1643 state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED; in gfs2_quota_get_state()
1646 state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED | in gfs2_quota_get_state()
1648 state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED | in gfs2_quota_get_state()
1654 if (sdp->sd_quota_inode) { in gfs2_quota_get_state()
1655 state->s_state[USRQUOTA].ino = in gfs2_quota_get_state()
1656 GFS2_I(sdp->sd_quota_inode)->i_no_addr; in gfs2_quota_get_state()
1657 state->s_state[USRQUOTA].blocks = sdp->sd_quota_inode->i_blocks; in gfs2_quota_get_state()
1659 state->s_state[USRQUOTA].nextents = 1; /* unsupported */ in gfs2_quota_get_state()
1660 state->s_state[GRPQUOTA] = state->s_state[USRQUOTA]; in gfs2_quota_get_state()
1661 state->s_incoredqs = list_lru_count(&gfs2_qd_lru); in gfs2_quota_get_state()
1668 struct gfs2_sbd *sdp = sb->s_fs_info; in gfs2_get_dqblk()
1676 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) in gfs2_get_dqblk()
1677 return -ESRCH; /* Crazy XFS error code */ in gfs2_get_dqblk()
1681 return -EINVAL; in gfs2_get_dqblk()
1690 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; in gfs2_get_dqblk()
1691 fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift; in gfs2_get_dqblk()
1692 fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift; in gfs2_get_dqblk()
1693 fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift; in gfs2_get_dqblk()
1707 struct gfs2_sbd *sdp = sb->s_fs_info; in gfs2_set_dqblk()
1708 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); in gfs2_set_dqblk() local
1712 unsigned int blocks = 0; in gfs2_set_dqblk() local
1717 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) in gfs2_set_dqblk()
1718 return -ESRCH; /* Crazy XFS error code */ in gfs2_set_dqblk()
1722 return -EINVAL; in gfs2_set_dqblk()
1724 if (fdq->d_fieldmask & ~GFS2_FIELDMASK) in gfs2_set_dqblk()
1725 return -EINVAL; in gfs2_set_dqblk()
1731 error = gfs2_qa_get(ip); in gfs2_set_dqblk()
1735 inode_lock(&ip->i_inode); in gfs2_set_dqblk()
1736 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh); in gfs2_set_dqblk()
1739 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); in gfs2_set_dqblk()
1743 /* Check for existing entry, if none then alloc new blocks */ in gfs2_set_dqblk()
1748 /* If nothing has changed, this is a no-op */ in gfs2_set_dqblk()
1749 if ((fdq->d_fieldmask & QC_SPC_SOFT) && in gfs2_set_dqblk()
1750 ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn))) in gfs2_set_dqblk()
1751 fdq->d_fieldmask ^= QC_SPC_SOFT; in gfs2_set_dqblk()
1753 if ((fdq->d_fieldmask & QC_SPC_HARD) && in gfs2_set_dqblk()
1754 ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit))) in gfs2_set_dqblk()
1755 fdq->d_fieldmask ^= QC_SPC_HARD; in gfs2_set_dqblk()
1757 if ((fdq->d_fieldmask & QC_SPACE) && in gfs2_set_dqblk()
1758 ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value))) in gfs2_set_dqblk()
1759 fdq->d_fieldmask ^= QC_SPACE; in gfs2_set_dqblk()
1761 if (fdq->d_fieldmask == 0) in gfs2_set_dqblk()
1765 alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota)); in gfs2_set_dqblk()
1766 if (gfs2_is_stuffed(ip)) in gfs2_set_dqblk()
1770 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), in gfs2_set_dqblk()
1772 blocks = 1 + data_blocks + ind_blocks; in gfs2_set_dqblk()
1773 ap.target = blocks; in gfs2_set_dqblk()
1774 error = gfs2_inplace_reserve(ip, &ap); in gfs2_set_dqblk()
1777 blocks += gfs2_rg_blocks(ip, blocks); in gfs2_set_dqblk()
1780 /* Some quotas span block boundaries and can update two blocks, in gfs2_set_dqblk()
1782 error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0); in gfs2_set_dqblk()
1789 clear_bit(QDF_QMSG_QUIET, &qd->qd_flags); in gfs2_set_dqblk()
1794 gfs2_inplace_release(ip); in gfs2_set_dqblk()
1800 gfs2_qa_put(ip); in gfs2_set_dqblk()
1801 inode_unlock(&ip->i_inode); in gfs2_set_dqblk()