1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #ifndef __XFS_DQUOT_H__ 7 #define __XFS_DQUOT_H__ 8 9 /* 10 * Dquots are structures that hold quota information about a user or a group, 11 * much like inodes are for files. In fact, dquots share many characteristics 12 * with inodes. However, dquots can also be a centralized resource, relative 13 * to a collection of inodes. In this respect, dquots share some characteristics 14 * of the superblock. 15 * XFS dquots exploit both those in its algorithms. They make every attempt 16 * to not be a bottleneck when quotas are on and have minimal impact, if any, 17 * when quotas are off. 18 */ 19 20 struct xfs_mount; 21 struct xfs_trans; 22 23 enum { 24 XFS_QLOWSP_1_PCNT = 0, 25 XFS_QLOWSP_3_PCNT, 26 XFS_QLOWSP_5_PCNT, 27 XFS_QLOWSP_MAX 28 }; 29 30 struct xfs_dquot_res { 31 /* Total resources allocated and reserved. */ 32 xfs_qcnt_t reserved; 33 34 /* Total resources allocated. */ 35 xfs_qcnt_t count; 36 37 /* Absolute and preferred limits. */ 38 xfs_qcnt_t hardlimit; 39 xfs_qcnt_t softlimit; 40 41 /* 42 * For root dquots, this is the default grace period, in seconds. 43 * Otherwise, this is when the quota grace period expires, 44 * in seconds since the Unix epoch. 45 */ 46 time64_t timer; 47 }; 48 49 static inline bool 50 xfs_dquot_res_over_limits( 51 const struct xfs_dquot_res *qres) 52 { 53 if ((qres->softlimit && qres->softlimit < qres->reserved) || 54 (qres->hardlimit && qres->hardlimit < qres->reserved)) 55 return true; 56 return false; 57 } 58 59 struct xfs_dquot_pre { 60 xfs_qcnt_t q_prealloc_lo_wmark; 61 xfs_qcnt_t q_prealloc_hi_wmark; 62 int64_t q_low_space[XFS_QLOWSP_MAX]; 63 }; 64 65 /* 66 * The incore dquot structure 67 */ 68 struct xfs_dquot { 69 struct list_head q_lru; 70 struct xfs_mount *q_mount; 71 xfs_dqtype_t q_type; 72 uint16_t q_flags; 73 xfs_dqid_t q_id; 74 uint q_nrefs; 75 int q_bufoffset; 76 xfs_daddr_t q_blkno; 77 xfs_fileoff_t q_fileoffset; 78 79 struct xfs_dquot_res q_blk; /* regular blocks */ 80 struct xfs_dquot_res q_ino; /* inodes */ 81 struct xfs_dquot_res q_rtb; /* realtime blocks */ 82 83 struct xfs_dq_logitem q_logitem; 84 85 struct xfs_dquot_pre q_blk_prealloc; 86 struct xfs_dquot_pre q_rtb_prealloc; 87 88 struct mutex q_qlock; 89 struct completion q_flush; 90 atomic_t q_pincount; 91 struct wait_queue_head q_pinwait; 92 }; 93 94 /* 95 * Lock hierarchy for q_qlock: 96 * XFS_QLOCK_NORMAL is the implicit default, 97 * XFS_QLOCK_NESTED is the dquot with the higher id in xfs_dqlock2 98 */ 99 enum { 100 XFS_QLOCK_NORMAL = 0, 101 XFS_QLOCK_NESTED, 102 }; 103 104 /* 105 * Manage the q_flush completion queue embedded in the dquot. This completion 106 * queue synchronizes processes attempting to flush the in-core dquot back to 107 * disk. 108 */ 109 static inline void xfs_dqflock(struct xfs_dquot *dqp) 110 { 111 wait_for_completion(&dqp->q_flush); 112 } 113 114 static inline bool xfs_dqflock_nowait(struct xfs_dquot *dqp) 115 { 116 return try_wait_for_completion(&dqp->q_flush); 117 } 118 119 static inline void xfs_dqfunlock(struct xfs_dquot *dqp) 120 { 121 complete(&dqp->q_flush); 122 } 123 124 static inline int xfs_dqlock_nowait(struct xfs_dquot *dqp) 125 { 126 return mutex_trylock(&dqp->q_qlock); 127 } 128 129 static inline void xfs_dqlock(struct xfs_dquot *dqp) 130 { 131 mutex_lock(&dqp->q_qlock); 132 } 133 134 static inline void xfs_dqunlock(struct xfs_dquot *dqp) 135 { 136 mutex_unlock(&dqp->q_qlock); 137 } 138 139 static inline int 140 xfs_dquot_type(const struct xfs_dquot *dqp) 141 { 142 return dqp->q_type & XFS_DQTYPE_REC_MASK; 143 } 144 145 static inline int xfs_this_quota_on(struct xfs_mount *mp, xfs_dqtype_t type) 146 { 147 switch (type) { 148 case XFS_DQTYPE_USER: 149 return XFS_IS_UQUOTA_ON(mp); 150 case XFS_DQTYPE_GROUP: 151 return XFS_IS_GQUOTA_ON(mp); 152 case XFS_DQTYPE_PROJ: 153 return XFS_IS_PQUOTA_ON(mp); 154 default: 155 return 0; 156 } 157 } 158 159 static inline struct xfs_dquot *xfs_inode_dquot( 160 struct xfs_inode *ip, 161 xfs_dqtype_t type) 162 { 163 if (xfs_is_metadir_inode(ip)) 164 return NULL; 165 166 switch (type) { 167 case XFS_DQTYPE_USER: 168 return ip->i_udquot; 169 case XFS_DQTYPE_GROUP: 170 return ip->i_gdquot; 171 case XFS_DQTYPE_PROJ: 172 return ip->i_pdquot; 173 default: 174 return NULL; 175 } 176 } 177 178 /* Decide if the dquot's limits are actually being enforced. */ 179 static inline bool 180 xfs_dquot_is_enforced( 181 const struct xfs_dquot *dqp) 182 { 183 switch (xfs_dquot_type(dqp)) { 184 case XFS_DQTYPE_USER: 185 return XFS_IS_UQUOTA_ENFORCED(dqp->q_mount); 186 case XFS_DQTYPE_GROUP: 187 return XFS_IS_GQUOTA_ENFORCED(dqp->q_mount); 188 case XFS_DQTYPE_PROJ: 189 return XFS_IS_PQUOTA_ENFORCED(dqp->q_mount); 190 } 191 ASSERT(0); 192 return false; 193 } 194 195 /* 196 * Check whether a dquot is under low free space conditions. We assume the quota 197 * is enabled and enforced. 198 */ 199 static inline bool xfs_dquot_lowsp(struct xfs_dquot *dqp) 200 { 201 int64_t freesp; 202 203 freesp = dqp->q_blk.hardlimit - dqp->q_blk.reserved; 204 if (freesp < dqp->q_blk_prealloc.q_low_space[XFS_QLOWSP_1_PCNT]) 205 return true; 206 207 freesp = dqp->q_rtb.hardlimit - dqp->q_rtb.reserved; 208 if (freesp < dqp->q_rtb_prealloc.q_low_space[XFS_QLOWSP_1_PCNT]) 209 return true; 210 211 return false; 212 } 213 214 void xfs_dquot_to_disk(struct xfs_disk_dquot *ddqp, struct xfs_dquot *dqp); 215 216 #define XFS_DQ_IS_LOCKED(dqp) (mutex_is_locked(&((dqp)->q_qlock))) 217 #define XFS_DQ_IS_DIRTY(dqp) ((dqp)->q_flags & XFS_DQFLAG_DIRTY) 218 219 void xfs_qm_dqdestroy(struct xfs_dquot *dqp); 220 int xfs_qm_dqflush(struct xfs_dquot *dqp, struct xfs_buf *bp); 221 void xfs_qm_dqunpin_wait(struct xfs_dquot *dqp); 222 void xfs_qm_adjust_dqtimers(struct xfs_dquot *d); 223 void xfs_qm_adjust_dqlimits(struct xfs_dquot *d); 224 xfs_dqid_t xfs_qm_id_for_quotatype(struct xfs_inode *ip, 225 xfs_dqtype_t type); 226 int xfs_qm_dqget(struct xfs_mount *mp, xfs_dqid_t id, 227 xfs_dqtype_t type, bool can_alloc, 228 struct xfs_dquot **dqpp); 229 int xfs_qm_dqget_inode(struct xfs_inode *ip, xfs_dqtype_t type, 230 bool can_alloc, struct xfs_dquot **dqpp); 231 int xfs_qm_dqget_next(struct xfs_mount *mp, xfs_dqid_t id, 232 xfs_dqtype_t type, struct xfs_dquot **dqpp); 233 int xfs_qm_dqget_uncached(struct xfs_mount *mp, 234 xfs_dqid_t id, xfs_dqtype_t type, 235 struct xfs_dquot **dqpp); 236 void xfs_qm_dqput(struct xfs_dquot *dqp); 237 238 void xfs_dqlock2(struct xfs_dquot *, struct xfs_dquot *); 239 void xfs_dqlockn(struct xfs_dqtrx *q); 240 241 void xfs_dquot_set_prealloc_limits(struct xfs_dquot *); 242 243 int xfs_dquot_attach_buf(struct xfs_trans *tp, struct xfs_dquot *dqp); 244 int xfs_dquot_use_attached_buf(struct xfs_dquot *dqp, struct xfs_buf **bpp); 245 void xfs_dquot_detach_buf(struct xfs_dquot *dqp); 246 247 static inline struct xfs_dquot *xfs_qm_dqhold(struct xfs_dquot *dqp) 248 { 249 xfs_dqlock(dqp); 250 dqp->q_nrefs++; 251 xfs_dqunlock(dqp); 252 return dqp; 253 } 254 255 time64_t xfs_dquot_set_timeout(struct xfs_mount *mp, time64_t timeout); 256 time64_t xfs_dquot_set_grace_period(time64_t grace); 257 258 void xfs_qm_init_dquot_blk(struct xfs_trans *tp, xfs_dqid_t id, xfs_dqtype_t 259 type, struct xfs_buf *bp); 260 261 #endif /* __XFS_DQUOT_H__ */ 262