xref: /linux/fs/xfs/xfs_qm.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_inode.h"
16 #include "xfs_iwalk.h"
17 #include "xfs_quota.h"
18 #include "xfs_bmap.h"
19 #include "xfs_bmap_util.h"
20 #include "xfs_trans.h"
21 #include "xfs_trans_space.h"
22 #include "xfs_qm.h"
23 #include "xfs_trace.h"
24 #include "xfs_icache.h"
25 #include "xfs_error.h"
26 #include "xfs_ag.h"
27 #include "xfs_ialloc.h"
28 #include "xfs_log_priv.h"
29 #include "xfs_health.h"
30 
31 /*
32  * The global quota manager. There is only one of these for the entire
33  * system, _not_ one per file system. XQM keeps track of the overall
34  * quota functionality, including maintaining the freelist and hash
35  * tables of dquots.
36  */
37 STATIC int	xfs_qm_init_quotainos(struct xfs_mount *mp);
38 STATIC int	xfs_qm_init_quotainfo(struct xfs_mount *mp);
39 
40 STATIC void	xfs_qm_destroy_quotainos(struct xfs_quotainfo *qi);
41 STATIC void	xfs_qm_dqfree_one(struct xfs_dquot *dqp);
42 /*
43  * We use the batch lookup interface to iterate over the dquots as it
44  * currently is the only interface into the radix tree code that allows
45  * fuzzy lookups instead of exact matches.  Holding the lock over multiple
46  * operations is fine as all callers are used either during mount/umount
47  * or quotaoff.
48  */
49 #define XFS_DQ_LOOKUP_BATCH	32
50 
51 STATIC int
xfs_qm_dquot_walk(struct xfs_mount * mp,xfs_dqtype_t type,int (* execute)(struct xfs_dquot * dqp,void * data),void * data)52 xfs_qm_dquot_walk(
53 	struct xfs_mount	*mp,
54 	xfs_dqtype_t		type,
55 	int			(*execute)(struct xfs_dquot *dqp, void *data),
56 	void			*data)
57 {
58 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
59 	struct radix_tree_root	*tree = xfs_dquot_tree(qi, type);
60 	uint32_t		next_index;
61 	int			last_error = 0;
62 	int			skipped;
63 	int			nr_found;
64 
65 restart:
66 	skipped = 0;
67 	next_index = 0;
68 	nr_found = 0;
69 
70 	while (1) {
71 		struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
72 		int		error;
73 		int		i;
74 
75 		mutex_lock(&qi->qi_tree_lock);
76 		nr_found = radix_tree_gang_lookup(tree, (void **)batch,
77 					next_index, XFS_DQ_LOOKUP_BATCH);
78 		if (!nr_found) {
79 			mutex_unlock(&qi->qi_tree_lock);
80 			break;
81 		}
82 
83 		for (i = 0; i < nr_found; i++) {
84 			struct xfs_dquot *dqp = batch[i];
85 
86 			next_index = dqp->q_id + 1;
87 
88 			error = execute(batch[i], data);
89 			if (error == -EAGAIN) {
90 				skipped++;
91 				continue;
92 			}
93 			if (error && last_error != -EFSCORRUPTED)
94 				last_error = error;
95 		}
96 
97 		mutex_unlock(&qi->qi_tree_lock);
98 
99 		/* bail out if the filesystem is corrupted.  */
100 		if (last_error == -EFSCORRUPTED) {
101 			skipped = 0;
102 			break;
103 		}
104 		/* we're done if id overflows back to zero */
105 		if (!next_index)
106 			break;
107 	}
108 
109 	if (skipped) {
110 		delay(1);
111 		goto restart;
112 	}
113 
114 	return last_error;
115 }
116 
117 
118 /*
119  * Purge a dquot from all tracking data structures and free it.
120  */
121 STATIC int
xfs_qm_dqpurge(struct xfs_dquot * dqp,void * data)122 xfs_qm_dqpurge(
123 	struct xfs_dquot	*dqp,
124 	void			*data)
125 {
126 	struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;
127 	int			error = -EAGAIN;
128 
129 	xfs_dqlock(dqp);
130 	if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
131 		goto out_unlock;
132 
133 	dqp->q_flags |= XFS_DQFLAG_FREEING;
134 
135 	xfs_dqflock(dqp);
136 
137 	/*
138 	 * If we are turning this type of quotas off, we don't care
139 	 * about the dirty metadata sitting in this dquot. OTOH, if
140 	 * we're unmounting, we do care, so we flush it and wait.
141 	 */
142 	if (XFS_DQ_IS_DIRTY(dqp)) {
143 		struct xfs_buf	*bp = NULL;
144 
145 		/*
146 		 * We don't care about getting disk errors here. We need
147 		 * to purge this dquot anyway, so we go ahead regardless.
148 		 */
149 		error = xfs_qm_dqflush(dqp, &bp);
150 		if (!error) {
151 			error = xfs_bwrite(bp);
152 			xfs_buf_relse(bp);
153 		} else if (error == -EAGAIN) {
154 			dqp->q_flags &= ~XFS_DQFLAG_FREEING;
155 			goto out_unlock;
156 		}
157 		xfs_dqflock(dqp);
158 	}
159 
160 	ASSERT(atomic_read(&dqp->q_pincount) == 0);
161 	ASSERT(xlog_is_shutdown(dqp->q_logitem.qli_item.li_log) ||
162 		!test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
163 
164 	xfs_dqfunlock(dqp);
165 	xfs_dqunlock(dqp);
166 
167 	radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
168 	qi->qi_dquots--;
169 
170 	/*
171 	 * We move dquots to the freelist as soon as their reference count
172 	 * hits zero, so it really should be on the freelist here.
173 	 */
174 	ASSERT(!list_empty(&dqp->q_lru));
175 	list_lru_del_obj(&qi->qi_lru, &dqp->q_lru);
176 	XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
177 
178 	xfs_qm_dqdestroy(dqp);
179 	return 0;
180 
181 out_unlock:
182 	xfs_dqunlock(dqp);
183 	return error;
184 }
185 
186 /*
187  * Purge the dquot cache.
188  */
189 static void
xfs_qm_dqpurge_all(struct xfs_mount * mp)190 xfs_qm_dqpurge_all(
191 	struct xfs_mount	*mp)
192 {
193 	xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL);
194 	xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL);
195 	xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL);
196 }
197 
198 /*
199  * Just destroy the quotainfo structure.
200  */
201 void
xfs_qm_unmount(struct xfs_mount * mp)202 xfs_qm_unmount(
203 	struct xfs_mount	*mp)
204 {
205 	if (mp->m_quotainfo) {
206 		xfs_qm_dqpurge_all(mp);
207 		xfs_qm_destroy_quotainfo(mp);
208 	}
209 }
210 
211 /*
212  * Called from the vfsops layer.
213  */
214 void
xfs_qm_unmount_quotas(xfs_mount_t * mp)215 xfs_qm_unmount_quotas(
216 	xfs_mount_t	*mp)
217 {
218 	/*
219 	 * Release the dquots that root inode, et al might be holding,
220 	 * before we flush quotas and blow away the quotainfo structure.
221 	 */
222 	ASSERT(mp->m_rootip);
223 	xfs_qm_dqdetach(mp->m_rootip);
224 	if (mp->m_rbmip)
225 		xfs_qm_dqdetach(mp->m_rbmip);
226 	if (mp->m_rsumip)
227 		xfs_qm_dqdetach(mp->m_rsumip);
228 
229 	/*
230 	 * Release the quota inodes.
231 	 */
232 	if (mp->m_quotainfo) {
233 		if (mp->m_quotainfo->qi_uquotaip) {
234 			xfs_irele(mp->m_quotainfo->qi_uquotaip);
235 			mp->m_quotainfo->qi_uquotaip = NULL;
236 		}
237 		if (mp->m_quotainfo->qi_gquotaip) {
238 			xfs_irele(mp->m_quotainfo->qi_gquotaip);
239 			mp->m_quotainfo->qi_gquotaip = NULL;
240 		}
241 		if (mp->m_quotainfo->qi_pquotaip) {
242 			xfs_irele(mp->m_quotainfo->qi_pquotaip);
243 			mp->m_quotainfo->qi_pquotaip = NULL;
244 		}
245 	}
246 }
247 
248 STATIC int
xfs_qm_dqattach_one(struct xfs_inode * ip,xfs_dqtype_t type,bool doalloc,struct xfs_dquot ** IO_idqpp)249 xfs_qm_dqattach_one(
250 	struct xfs_inode	*ip,
251 	xfs_dqtype_t		type,
252 	bool			doalloc,
253 	struct xfs_dquot	**IO_idqpp)
254 {
255 	struct xfs_dquot	*dqp;
256 	int			error;
257 
258 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
259 	error = 0;
260 
261 	/*
262 	 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
263 	 * or &i_gdquot. This made the code look weird, but made the logic a lot
264 	 * simpler.
265 	 */
266 	dqp = *IO_idqpp;
267 	if (dqp) {
268 		trace_xfs_dqattach_found(dqp);
269 		return 0;
270 	}
271 
272 	/*
273 	 * Find the dquot from somewhere. This bumps the reference count of
274 	 * dquot and returns it locked.  This can return ENOENT if dquot didn't
275 	 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
276 	 * turned off suddenly.
277 	 */
278 	error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
279 	if (error)
280 		return error;
281 
282 	trace_xfs_dqattach_get(dqp);
283 
284 	/*
285 	 * dqget may have dropped and re-acquired the ilock, but it guarantees
286 	 * that the dquot returned is the one that should go in the inode.
287 	 */
288 	*IO_idqpp = dqp;
289 	xfs_dqunlock(dqp);
290 	return 0;
291 }
292 
293 static bool
xfs_qm_need_dqattach(struct xfs_inode * ip)294 xfs_qm_need_dqattach(
295 	struct xfs_inode	*ip)
296 {
297 	struct xfs_mount	*mp = ip->i_mount;
298 
299 	if (!XFS_IS_QUOTA_ON(mp))
300 		return false;
301 	if (!XFS_NOT_DQATTACHED(mp, ip))
302 		return false;
303 	if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
304 		return false;
305 	return true;
306 }
307 
308 /*
309  * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
310  * into account.
311  * If @doalloc is true, the dquot(s) will be allocated if needed.
312  * Inode may get unlocked and relocked in here, and the caller must deal with
313  * the consequences.
314  */
315 int
xfs_qm_dqattach_locked(xfs_inode_t * ip,bool doalloc)316 xfs_qm_dqattach_locked(
317 	xfs_inode_t	*ip,
318 	bool		doalloc)
319 {
320 	xfs_mount_t	*mp = ip->i_mount;
321 	int		error = 0;
322 
323 	if (!xfs_qm_need_dqattach(ip))
324 		return 0;
325 
326 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
327 
328 	if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
329 		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_USER,
330 				doalloc, &ip->i_udquot);
331 		if (error)
332 			goto done;
333 		ASSERT(ip->i_udquot);
334 	}
335 
336 	if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
337 		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_GROUP,
338 				doalloc, &ip->i_gdquot);
339 		if (error)
340 			goto done;
341 		ASSERT(ip->i_gdquot);
342 	}
343 
344 	if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
345 		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_PROJ,
346 				doalloc, &ip->i_pdquot);
347 		if (error)
348 			goto done;
349 		ASSERT(ip->i_pdquot);
350 	}
351 
352 done:
353 	/*
354 	 * Don't worry about the dquots that we may have attached before any
355 	 * error - they'll get detached later if it has not already been done.
356 	 */
357 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
358 	return error;
359 }
360 
361 int
xfs_qm_dqattach(struct xfs_inode * ip)362 xfs_qm_dqattach(
363 	struct xfs_inode	*ip)
364 {
365 	int			error;
366 
367 	if (!xfs_qm_need_dqattach(ip))
368 		return 0;
369 
370 	xfs_ilock(ip, XFS_ILOCK_EXCL);
371 	error = xfs_qm_dqattach_locked(ip, false);
372 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
373 
374 	return error;
375 }
376 
377 /*
378  * Release dquots (and their references) if any.
379  * The inode should be locked EXCL except when this's called by
380  * xfs_ireclaim.
381  */
382 void
xfs_qm_dqdetach(xfs_inode_t * ip)383 xfs_qm_dqdetach(
384 	xfs_inode_t	*ip)
385 {
386 	if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
387 		return;
388 
389 	trace_xfs_dquot_dqdetach(ip);
390 
391 	ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
392 	if (ip->i_udquot) {
393 		xfs_qm_dqrele(ip->i_udquot);
394 		ip->i_udquot = NULL;
395 	}
396 	if (ip->i_gdquot) {
397 		xfs_qm_dqrele(ip->i_gdquot);
398 		ip->i_gdquot = NULL;
399 	}
400 	if (ip->i_pdquot) {
401 		xfs_qm_dqrele(ip->i_pdquot);
402 		ip->i_pdquot = NULL;
403 	}
404 }
405 
406 struct xfs_qm_isolate {
407 	struct list_head	buffers;
408 	struct list_head	dispose;
409 };
410 
411 static enum lru_status
xfs_qm_dquot_isolate(struct list_head * item,struct list_lru_one * lru,spinlock_t * lru_lock,void * arg)412 xfs_qm_dquot_isolate(
413 	struct list_head	*item,
414 	struct list_lru_one	*lru,
415 	spinlock_t		*lru_lock,
416 	void			*arg)
417 		__releases(lru_lock) __acquires(lru_lock)
418 {
419 	struct xfs_dquot	*dqp = container_of(item,
420 						struct xfs_dquot, q_lru);
421 	struct xfs_qm_isolate	*isol = arg;
422 
423 	if (!xfs_dqlock_nowait(dqp))
424 		goto out_miss_busy;
425 
426 	/*
427 	 * If something else is freeing this dquot and hasn't yet removed it
428 	 * from the LRU, leave it for the freeing task to complete the freeing
429 	 * process rather than risk it being free from under us here.
430 	 */
431 	if (dqp->q_flags & XFS_DQFLAG_FREEING)
432 		goto out_miss_unlock;
433 
434 	/*
435 	 * This dquot has acquired a reference in the meantime remove it from
436 	 * the freelist and try again.
437 	 */
438 	if (dqp->q_nrefs) {
439 		xfs_dqunlock(dqp);
440 		XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
441 
442 		trace_xfs_dqreclaim_want(dqp);
443 		list_lru_isolate(lru, &dqp->q_lru);
444 		XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
445 		return LRU_REMOVED;
446 	}
447 
448 	/*
449 	 * If the dquot is dirty, flush it. If it's already being flushed, just
450 	 * skip it so there is time for the IO to complete before we try to
451 	 * reclaim it again on the next LRU pass.
452 	 */
453 	if (!xfs_dqflock_nowait(dqp))
454 		goto out_miss_unlock;
455 
456 	if (XFS_DQ_IS_DIRTY(dqp)) {
457 		struct xfs_buf	*bp = NULL;
458 		int		error;
459 
460 		trace_xfs_dqreclaim_dirty(dqp);
461 
462 		/* we have to drop the LRU lock to flush the dquot */
463 		spin_unlock(lru_lock);
464 
465 		error = xfs_qm_dqflush(dqp, &bp);
466 		if (error)
467 			goto out_unlock_dirty;
468 
469 		xfs_buf_delwri_queue(bp, &isol->buffers);
470 		xfs_buf_relse(bp);
471 		goto out_unlock_dirty;
472 	}
473 	xfs_dqfunlock(dqp);
474 
475 	/*
476 	 * Prevent lookups now that we are past the point of no return.
477 	 */
478 	dqp->q_flags |= XFS_DQFLAG_FREEING;
479 	xfs_dqunlock(dqp);
480 
481 	ASSERT(dqp->q_nrefs == 0);
482 	list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
483 	XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
484 	trace_xfs_dqreclaim_done(dqp);
485 	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
486 	return LRU_REMOVED;
487 
488 out_miss_unlock:
489 	xfs_dqunlock(dqp);
490 out_miss_busy:
491 	trace_xfs_dqreclaim_busy(dqp);
492 	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
493 	return LRU_SKIP;
494 
495 out_unlock_dirty:
496 	trace_xfs_dqreclaim_busy(dqp);
497 	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
498 	xfs_dqunlock(dqp);
499 	spin_lock(lru_lock);
500 	return LRU_RETRY;
501 }
502 
503 static unsigned long
xfs_qm_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)504 xfs_qm_shrink_scan(
505 	struct shrinker		*shrink,
506 	struct shrink_control	*sc)
507 {
508 	struct xfs_quotainfo	*qi = shrink->private_data;
509 	struct xfs_qm_isolate	isol;
510 	unsigned long		freed;
511 	int			error;
512 
513 	if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
514 		return 0;
515 
516 	INIT_LIST_HEAD(&isol.buffers);
517 	INIT_LIST_HEAD(&isol.dispose);
518 
519 	freed = list_lru_shrink_walk(&qi->qi_lru, sc,
520 				     xfs_qm_dquot_isolate, &isol);
521 
522 	error = xfs_buf_delwri_submit(&isol.buffers);
523 	if (error)
524 		xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
525 
526 	while (!list_empty(&isol.dispose)) {
527 		struct xfs_dquot	*dqp;
528 
529 		dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
530 		list_del_init(&dqp->q_lru);
531 		xfs_qm_dqfree_one(dqp);
532 	}
533 
534 	return freed;
535 }
536 
537 static unsigned long
xfs_qm_shrink_count(struct shrinker * shrink,struct shrink_control * sc)538 xfs_qm_shrink_count(
539 	struct shrinker		*shrink,
540 	struct shrink_control	*sc)
541 {
542 	struct xfs_quotainfo	*qi = shrink->private_data;
543 
544 	return list_lru_shrink_count(&qi->qi_lru, sc);
545 }
546 
547 STATIC void
xfs_qm_set_defquota(struct xfs_mount * mp,xfs_dqtype_t type,struct xfs_quotainfo * qinf)548 xfs_qm_set_defquota(
549 	struct xfs_mount	*mp,
550 	xfs_dqtype_t		type,
551 	struct xfs_quotainfo	*qinf)
552 {
553 	struct xfs_dquot	*dqp;
554 	struct xfs_def_quota	*defq;
555 	int			error;
556 
557 	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
558 	if (error)
559 		return;
560 
561 	defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
562 
563 	/*
564 	 * Timers and warnings have been already set, let's just set the
565 	 * default limits for this quota type
566 	 */
567 	defq->blk.hard = dqp->q_blk.hardlimit;
568 	defq->blk.soft = dqp->q_blk.softlimit;
569 	defq->ino.hard = dqp->q_ino.hardlimit;
570 	defq->ino.soft = dqp->q_ino.softlimit;
571 	defq->rtb.hard = dqp->q_rtb.hardlimit;
572 	defq->rtb.soft = dqp->q_rtb.softlimit;
573 	xfs_qm_dqdestroy(dqp);
574 }
575 
576 /* Initialize quota time limits from the root dquot. */
577 static void
xfs_qm_init_timelimits(struct xfs_mount * mp,xfs_dqtype_t type)578 xfs_qm_init_timelimits(
579 	struct xfs_mount	*mp,
580 	xfs_dqtype_t		type)
581 {
582 	struct xfs_quotainfo	*qinf = mp->m_quotainfo;
583 	struct xfs_def_quota	*defq;
584 	struct xfs_dquot	*dqp;
585 	int			error;
586 
587 	defq = xfs_get_defquota(qinf, type);
588 
589 	defq->blk.time = XFS_QM_BTIMELIMIT;
590 	defq->ino.time = XFS_QM_ITIMELIMIT;
591 	defq->rtb.time = XFS_QM_RTBTIMELIMIT;
592 
593 	/*
594 	 * We try to get the limits from the superuser's limits fields.
595 	 * This is quite hacky, but it is standard quota practice.
596 	 *
597 	 * Since we may not have done a quotacheck by this point, just read
598 	 * the dquot without attaching it to any hashtables or lists.
599 	 */
600 	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
601 	if (error)
602 		return;
603 
604 	/*
605 	 * The warnings and timers set the grace period given to
606 	 * a user or group before he or she can not perform any
607 	 * more writing. If it is zero, a default is used.
608 	 */
609 	if (dqp->q_blk.timer)
610 		defq->blk.time = dqp->q_blk.timer;
611 	if (dqp->q_ino.timer)
612 		defq->ino.time = dqp->q_ino.timer;
613 	if (dqp->q_rtb.timer)
614 		defq->rtb.time = dqp->q_rtb.timer;
615 
616 	xfs_qm_dqdestroy(dqp);
617 }
618 
619 /*
620  * This initializes all the quota information that's kept in the
621  * mount structure
622  */
623 STATIC int
xfs_qm_init_quotainfo(struct xfs_mount * mp)624 xfs_qm_init_quotainfo(
625 	struct xfs_mount	*mp)
626 {
627 	struct xfs_quotainfo	*qinf;
628 	int			error;
629 
630 	ASSERT(XFS_IS_QUOTA_ON(mp));
631 
632 	qinf = mp->m_quotainfo = kzalloc(sizeof(struct xfs_quotainfo),
633 					GFP_KERNEL | __GFP_NOFAIL);
634 
635 	error = list_lru_init(&qinf->qi_lru);
636 	if (error)
637 		goto out_free_qinf;
638 
639 	/*
640 	 * See if quotainodes are setup, and if not, allocate them,
641 	 * and change the superblock accordingly.
642 	 */
643 	error = xfs_qm_init_quotainos(mp);
644 	if (error)
645 		goto out_free_lru;
646 
647 	INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_KERNEL);
648 	INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_KERNEL);
649 	INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_KERNEL);
650 	mutex_init(&qinf->qi_tree_lock);
651 
652 	/* mutex used to serialize quotaoffs */
653 	mutex_init(&qinf->qi_quotaofflock);
654 
655 	/* Precalc some constants */
656 	qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
657 	qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
658 	if (xfs_has_bigtime(mp)) {
659 		qinf->qi_expiry_min =
660 			xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MIN);
661 		qinf->qi_expiry_max =
662 			xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MAX);
663 	} else {
664 		qinf->qi_expiry_min = XFS_DQ_LEGACY_EXPIRY_MIN;
665 		qinf->qi_expiry_max = XFS_DQ_LEGACY_EXPIRY_MAX;
666 	}
667 	trace_xfs_quota_expiry_range(mp, qinf->qi_expiry_min,
668 			qinf->qi_expiry_max);
669 
670 	mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
671 
672 	xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER);
673 	xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP);
674 	xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ);
675 
676 	if (XFS_IS_UQUOTA_ON(mp))
677 		xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf);
678 	if (XFS_IS_GQUOTA_ON(mp))
679 		xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf);
680 	if (XFS_IS_PQUOTA_ON(mp))
681 		xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf);
682 
683 	qinf->qi_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, "xfs-qm:%s",
684 					   mp->m_super->s_id);
685 	if (!qinf->qi_shrinker) {
686 		error = -ENOMEM;
687 		goto out_free_inos;
688 	}
689 
690 	qinf->qi_shrinker->count_objects = xfs_qm_shrink_count;
691 	qinf->qi_shrinker->scan_objects = xfs_qm_shrink_scan;
692 	qinf->qi_shrinker->private_data = qinf;
693 
694 	shrinker_register(qinf->qi_shrinker);
695 
696 	xfs_hooks_init(&qinf->qi_mod_ino_dqtrx_hooks);
697 	xfs_hooks_init(&qinf->qi_apply_dqtrx_hooks);
698 
699 	return 0;
700 
701 out_free_inos:
702 	mutex_destroy(&qinf->qi_quotaofflock);
703 	mutex_destroy(&qinf->qi_tree_lock);
704 	xfs_qm_destroy_quotainos(qinf);
705 out_free_lru:
706 	list_lru_destroy(&qinf->qi_lru);
707 out_free_qinf:
708 	kfree(qinf);
709 	mp->m_quotainfo = NULL;
710 	return error;
711 }
712 
713 /*
714  * Gets called when unmounting a filesystem or when all quotas get
715  * turned off.
716  * This purges the quota inodes, destroys locks and frees itself.
717  */
718 void
xfs_qm_destroy_quotainfo(struct xfs_mount * mp)719 xfs_qm_destroy_quotainfo(
720 	struct xfs_mount	*mp)
721 {
722 	struct xfs_quotainfo	*qi;
723 
724 	qi = mp->m_quotainfo;
725 	ASSERT(qi != NULL);
726 
727 	shrinker_free(qi->qi_shrinker);
728 	list_lru_destroy(&qi->qi_lru);
729 	xfs_qm_destroy_quotainos(qi);
730 	mutex_destroy(&qi->qi_tree_lock);
731 	mutex_destroy(&qi->qi_quotaofflock);
732 	kfree(qi);
733 	mp->m_quotainfo = NULL;
734 }
735 
736 /*
737  * Create an inode and return with a reference already taken, but unlocked
738  * This is how we create quota inodes
739  */
740 STATIC int
xfs_qm_qino_alloc(struct xfs_mount * mp,struct xfs_inode ** ipp,unsigned int flags)741 xfs_qm_qino_alloc(
742 	struct xfs_mount	*mp,
743 	struct xfs_inode	**ipp,
744 	unsigned int		flags)
745 {
746 	struct xfs_trans	*tp;
747 	int			error;
748 	bool			need_alloc = true;
749 
750 	*ipp = NULL;
751 	/*
752 	 * With superblock that doesn't have separate pquotino, we
753 	 * share an inode between gquota and pquota. If the on-disk
754 	 * superblock has GQUOTA and the filesystem is now mounted
755 	 * with PQUOTA, just use sb_gquotino for sb_pquotino and
756 	 * vice-versa.
757 	 */
758 	if (!xfs_has_pquotino(mp) &&
759 			(flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
760 		xfs_ino_t ino = NULLFSINO;
761 
762 		if ((flags & XFS_QMOPT_PQUOTA) &&
763 			     (mp->m_sb.sb_gquotino != NULLFSINO)) {
764 			ino = mp->m_sb.sb_gquotino;
765 			if (XFS_IS_CORRUPT(mp,
766 					   mp->m_sb.sb_pquotino != NULLFSINO)) {
767 				xfs_fs_mark_sick(mp, XFS_SICK_FS_PQUOTA);
768 				return -EFSCORRUPTED;
769 			}
770 		} else if ((flags & XFS_QMOPT_GQUOTA) &&
771 			     (mp->m_sb.sb_pquotino != NULLFSINO)) {
772 			ino = mp->m_sb.sb_pquotino;
773 			if (XFS_IS_CORRUPT(mp,
774 					   mp->m_sb.sb_gquotino != NULLFSINO)) {
775 				xfs_fs_mark_sick(mp, XFS_SICK_FS_GQUOTA);
776 				return -EFSCORRUPTED;
777 			}
778 		}
779 		if (ino != NULLFSINO) {
780 			error = xfs_iget(mp, NULL, ino, 0, 0, ipp);
781 			if (error)
782 				return error;
783 			mp->m_sb.sb_gquotino = NULLFSINO;
784 			mp->m_sb.sb_pquotino = NULLFSINO;
785 			need_alloc = false;
786 		}
787 	}
788 
789 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
790 			need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0,
791 			0, 0, &tp);
792 	if (error)
793 		return error;
794 
795 	if (need_alloc) {
796 		struct xfs_icreate_args	args = {
797 			.mode		= S_IFREG,
798 			.flags		= XFS_ICREATE_UNLINKABLE,
799 		};
800 		xfs_ino_t	ino;
801 
802 		error = xfs_dialloc(&tp, &args, &ino);
803 		if (!error)
804 			error = xfs_icreate(tp, ino, &args, ipp);
805 		if (error) {
806 			xfs_trans_cancel(tp);
807 			return error;
808 		}
809 	}
810 
811 	/*
812 	 * Make the changes in the superblock, and log those too.
813 	 * sbfields arg may contain fields other than *QUOTINO;
814 	 * VERSIONNUM for example.
815 	 */
816 	spin_lock(&mp->m_sb_lock);
817 	if (flags & XFS_QMOPT_SBVERSION) {
818 		ASSERT(!xfs_has_quota(mp));
819 
820 		xfs_add_quota(mp);
821 		mp->m_sb.sb_uquotino = NULLFSINO;
822 		mp->m_sb.sb_gquotino = NULLFSINO;
823 		mp->m_sb.sb_pquotino = NULLFSINO;
824 
825 		/* qflags will get updated fully _after_ quotacheck */
826 		mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
827 	}
828 	if (flags & XFS_QMOPT_UQUOTA)
829 		mp->m_sb.sb_uquotino = (*ipp)->i_ino;
830 	else if (flags & XFS_QMOPT_GQUOTA)
831 		mp->m_sb.sb_gquotino = (*ipp)->i_ino;
832 	else
833 		mp->m_sb.sb_pquotino = (*ipp)->i_ino;
834 	spin_unlock(&mp->m_sb_lock);
835 	xfs_log_sb(tp);
836 
837 	error = xfs_trans_commit(tp);
838 	if (error) {
839 		ASSERT(xfs_is_shutdown(mp));
840 		xfs_alert(mp, "%s failed (error %d)!", __func__, error);
841 	}
842 	if (need_alloc) {
843 		xfs_iunlock(*ipp, XFS_ILOCK_EXCL);
844 		xfs_finish_inode_setup(*ipp);
845 	}
846 	return error;
847 }
848 
849 
850 STATIC void
xfs_qm_reset_dqcounts(struct xfs_mount * mp,struct xfs_buf * bp,xfs_dqid_t id,xfs_dqtype_t type)851 xfs_qm_reset_dqcounts(
852 	struct xfs_mount	*mp,
853 	struct xfs_buf		*bp,
854 	xfs_dqid_t		id,
855 	xfs_dqtype_t		type)
856 {
857 	struct xfs_dqblk	*dqb;
858 	int			j;
859 
860 	trace_xfs_reset_dqcounts(bp, _RET_IP_);
861 
862 	/*
863 	 * Reset all counters and timers. They'll be
864 	 * started afresh by xfs_qm_quotacheck.
865 	 */
866 #ifdef DEBUG
867 	j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
868 		sizeof(struct xfs_dqblk);
869 	ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
870 #endif
871 	dqb = bp->b_addr;
872 	for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
873 		struct xfs_disk_dquot	*ddq;
874 
875 		ddq = (struct xfs_disk_dquot *)&dqb[j];
876 
877 		/*
878 		 * Do a sanity check, and if needed, repair the dqblk. Don't
879 		 * output any warnings because it's perfectly possible to
880 		 * find uninitialised dquot blks. See comment in
881 		 * xfs_dquot_verify.
882 		 */
883 		if (xfs_dqblk_verify(mp, &dqb[j], id + j) ||
884 		    (dqb[j].dd_diskdq.d_type & XFS_DQTYPE_REC_MASK) != type)
885 			xfs_dqblk_repair(mp, &dqb[j], id + j, type);
886 
887 		/*
888 		 * Reset type in case we are reusing group quota file for
889 		 * project quotas or vice versa
890 		 */
891 		ddq->d_type = type;
892 		ddq->d_bcount = 0;
893 		ddq->d_icount = 0;
894 		ddq->d_rtbcount = 0;
895 
896 		/*
897 		 * dquot id 0 stores the default grace period and the maximum
898 		 * warning limit that were set by the administrator, so we
899 		 * should not reset them.
900 		 */
901 		if (ddq->d_id != 0) {
902 			ddq->d_btimer = 0;
903 			ddq->d_itimer = 0;
904 			ddq->d_rtbtimer = 0;
905 			ddq->d_bwarns = 0;
906 			ddq->d_iwarns = 0;
907 			ddq->d_rtbwarns = 0;
908 			if (xfs_has_bigtime(mp))
909 				ddq->d_type |= XFS_DQTYPE_BIGTIME;
910 		}
911 
912 		if (xfs_has_crc(mp)) {
913 			xfs_update_cksum((char *)&dqb[j],
914 					 sizeof(struct xfs_dqblk),
915 					 XFS_DQUOT_CRC_OFF);
916 		}
917 	}
918 }
919 
920 STATIC int
xfs_qm_reset_dqcounts_all(struct xfs_mount * mp,xfs_dqid_t firstid,xfs_fsblock_t bno,xfs_filblks_t blkcnt,xfs_dqtype_t type,struct list_head * buffer_list)921 xfs_qm_reset_dqcounts_all(
922 	struct xfs_mount	*mp,
923 	xfs_dqid_t		firstid,
924 	xfs_fsblock_t		bno,
925 	xfs_filblks_t		blkcnt,
926 	xfs_dqtype_t		type,
927 	struct list_head	*buffer_list)
928 {
929 	struct xfs_buf		*bp;
930 	int			error = 0;
931 
932 	ASSERT(blkcnt > 0);
933 
934 	/*
935 	 * Blkcnt arg can be a very big number, and might even be
936 	 * larger than the log itself. So, we have to break it up into
937 	 * manageable-sized transactions.
938 	 * Note that we don't start a permanent transaction here; we might
939 	 * not be able to get a log reservation for the whole thing up front,
940 	 * and we don't really care to either, because we just discard
941 	 * everything if we were to crash in the middle of this loop.
942 	 */
943 	while (blkcnt--) {
944 		error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
945 			      XFS_FSB_TO_DADDR(mp, bno),
946 			      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
947 			      &xfs_dquot_buf_ops);
948 
949 		/*
950 		 * CRC and validation errors will return a EFSCORRUPTED here. If
951 		 * this occurs, re-read without CRC validation so that we can
952 		 * repair the damage via xfs_qm_reset_dqcounts(). This process
953 		 * will leave a trace in the log indicating corruption has
954 		 * been detected.
955 		 */
956 		if (error == -EFSCORRUPTED) {
957 			error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
958 				      XFS_FSB_TO_DADDR(mp, bno),
959 				      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
960 				      NULL);
961 		}
962 
963 		if (error)
964 			break;
965 
966 		/*
967 		 * A corrupt buffer might not have a verifier attached, so
968 		 * make sure we have the correct one attached before writeback
969 		 * occurs.
970 		 */
971 		bp->b_ops = &xfs_dquot_buf_ops;
972 		xfs_qm_reset_dqcounts(mp, bp, firstid, type);
973 		xfs_buf_delwri_queue(bp, buffer_list);
974 		xfs_buf_relse(bp);
975 
976 		/* goto the next block. */
977 		bno++;
978 		firstid += mp->m_quotainfo->qi_dqperchunk;
979 	}
980 
981 	return error;
982 }
983 
984 /*
985  * Iterate over all allocated dquot blocks in this quota inode, zeroing all
986  * counters for every chunk of dquots that we find.
987  */
988 STATIC int
xfs_qm_reset_dqcounts_buf(struct xfs_mount * mp,struct xfs_inode * qip,xfs_dqtype_t type,struct list_head * buffer_list)989 xfs_qm_reset_dqcounts_buf(
990 	struct xfs_mount	*mp,
991 	struct xfs_inode	*qip,
992 	xfs_dqtype_t		type,
993 	struct list_head	*buffer_list)
994 {
995 	struct xfs_bmbt_irec	*map;
996 	int			i, nmaps;	/* number of map entries */
997 	int			error;		/* return value */
998 	xfs_fileoff_t		lblkno;
999 	xfs_filblks_t		maxlblkcnt;
1000 	xfs_dqid_t		firstid;
1001 	xfs_fsblock_t		rablkno;
1002 	xfs_filblks_t		rablkcnt;
1003 
1004 	error = 0;
1005 	/*
1006 	 * This looks racy, but we can't keep an inode lock across a
1007 	 * trans_reserve. But, this gets called during quotacheck, and that
1008 	 * happens only at mount time which is single threaded.
1009 	 */
1010 	if (qip->i_nblocks == 0)
1011 		return 0;
1012 
1013 	map = kmalloc(XFS_DQITER_MAP_SIZE * sizeof(*map),
1014 			GFP_KERNEL | __GFP_NOFAIL);
1015 
1016 	lblkno = 0;
1017 	maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1018 	do {
1019 		uint		lock_mode;
1020 
1021 		nmaps = XFS_DQITER_MAP_SIZE;
1022 		/*
1023 		 * We aren't changing the inode itself. Just changing
1024 		 * some of its data. No new blocks are added here, and
1025 		 * the inode is never added to the transaction.
1026 		 */
1027 		lock_mode = xfs_ilock_data_map_shared(qip);
1028 		error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1029 				       map, &nmaps, 0);
1030 		xfs_iunlock(qip, lock_mode);
1031 		if (error)
1032 			break;
1033 
1034 		ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1035 		for (i = 0; i < nmaps; i++) {
1036 			ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1037 			ASSERT(map[i].br_blockcount);
1038 
1039 
1040 			lblkno += map[i].br_blockcount;
1041 
1042 			if (map[i].br_startblock == HOLESTARTBLOCK)
1043 				continue;
1044 
1045 			firstid = (xfs_dqid_t) map[i].br_startoff *
1046 				mp->m_quotainfo->qi_dqperchunk;
1047 			/*
1048 			 * Do a read-ahead on the next extent.
1049 			 */
1050 			if ((i+1 < nmaps) &&
1051 			    (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1052 				rablkcnt =  map[i+1].br_blockcount;
1053 				rablkno = map[i+1].br_startblock;
1054 				while (rablkcnt--) {
1055 					xfs_buf_readahead(mp->m_ddev_targp,
1056 					       XFS_FSB_TO_DADDR(mp, rablkno),
1057 					       mp->m_quotainfo->qi_dqchunklen,
1058 					       &xfs_dquot_buf_ops);
1059 					rablkno++;
1060 				}
1061 			}
1062 			/*
1063 			 * Iterate thru all the blks in the extent and
1064 			 * reset the counters of all the dquots inside them.
1065 			 */
1066 			error = xfs_qm_reset_dqcounts_all(mp, firstid,
1067 						   map[i].br_startblock,
1068 						   map[i].br_blockcount,
1069 						   type, buffer_list);
1070 			if (error)
1071 				goto out;
1072 		}
1073 	} while (nmaps > 0);
1074 
1075 out:
1076 	kfree(map);
1077 	return error;
1078 }
1079 
1080 /*
1081  * Called by dqusage_adjust in doing a quotacheck.
1082  *
1083  * Given the inode, and a dquot id this updates both the incore dqout as well
1084  * as the buffer copy. This is so that once the quotacheck is done, we can
1085  * just log all the buffers, as opposed to logging numerous updates to
1086  * individual dquots.
1087  */
1088 STATIC int
xfs_qm_quotacheck_dqadjust(struct xfs_inode * ip,xfs_dqtype_t type,xfs_qcnt_t nblks,xfs_qcnt_t rtblks)1089 xfs_qm_quotacheck_dqadjust(
1090 	struct xfs_inode	*ip,
1091 	xfs_dqtype_t		type,
1092 	xfs_qcnt_t		nblks,
1093 	xfs_qcnt_t		rtblks)
1094 {
1095 	struct xfs_mount	*mp = ip->i_mount;
1096 	struct xfs_dquot	*dqp;
1097 	xfs_dqid_t		id;
1098 	int			error;
1099 
1100 	id = xfs_qm_id_for_quotatype(ip, type);
1101 	error = xfs_qm_dqget(mp, id, type, true, &dqp);
1102 	if (error) {
1103 		/*
1104 		 * Shouldn't be able to turn off quotas here.
1105 		 */
1106 		ASSERT(error != -ESRCH);
1107 		ASSERT(error != -ENOENT);
1108 		return error;
1109 	}
1110 
1111 	trace_xfs_dqadjust(dqp);
1112 
1113 	/*
1114 	 * Adjust the inode count and the block count to reflect this inode's
1115 	 * resource usage.
1116 	 */
1117 	dqp->q_ino.count++;
1118 	dqp->q_ino.reserved++;
1119 	if (nblks) {
1120 		dqp->q_blk.count += nblks;
1121 		dqp->q_blk.reserved += nblks;
1122 	}
1123 	if (rtblks) {
1124 		dqp->q_rtb.count += rtblks;
1125 		dqp->q_rtb.reserved += rtblks;
1126 	}
1127 
1128 	/*
1129 	 * Set default limits, adjust timers (since we changed usages)
1130 	 *
1131 	 * There are no timers for the default values set in the root dquot.
1132 	 */
1133 	if (dqp->q_id) {
1134 		xfs_qm_adjust_dqlimits(dqp);
1135 		xfs_qm_adjust_dqtimers(dqp);
1136 	}
1137 
1138 	dqp->q_flags |= XFS_DQFLAG_DIRTY;
1139 	xfs_qm_dqput(dqp);
1140 	return 0;
1141 }
1142 
1143 /*
1144  * callback routine supplied to bulkstat(). Given an inumber, find its
1145  * dquots and update them to account for resources taken by that inode.
1146  */
1147 /* ARGSUSED */
1148 STATIC int
xfs_qm_dqusage_adjust(struct xfs_mount * mp,struct xfs_trans * tp,xfs_ino_t ino,void * data)1149 xfs_qm_dqusage_adjust(
1150 	struct xfs_mount	*mp,
1151 	struct xfs_trans	*tp,
1152 	xfs_ino_t		ino,
1153 	void			*data)
1154 {
1155 	struct xfs_inode	*ip;
1156 	xfs_qcnt_t		nblks;
1157 	xfs_filblks_t		rtblks = 0;	/* total rt blks */
1158 	int			error;
1159 
1160 	ASSERT(XFS_IS_QUOTA_ON(mp));
1161 
1162 	/*
1163 	 * rootino must have its resources accounted for, not so with the quota
1164 	 * inodes.
1165 	 */
1166 	if (xfs_is_quota_inode(&mp->m_sb, ino))
1167 		return 0;
1168 
1169 	/*
1170 	 * We don't _need_ to take the ilock EXCL here because quotacheck runs
1171 	 * at mount time and therefore nobody will be racing chown/chproj.
1172 	 */
1173 	error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
1174 	if (error == -EINVAL || error == -ENOENT)
1175 		return 0;
1176 	if (error)
1177 		return error;
1178 
1179 	/*
1180 	 * Reload the incore unlinked list to avoid failure in inodegc.
1181 	 * Use an unlocked check here because unrecovered unlinked inodes
1182 	 * should be somewhat rare.
1183 	 */
1184 	if (xfs_inode_unlinked_incomplete(ip)) {
1185 		error = xfs_inode_reload_unlinked(ip);
1186 		if (error) {
1187 			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1188 			goto error0;
1189 		}
1190 	}
1191 
1192 	ASSERT(ip->i_delayed_blks == 0);
1193 
1194 	if (XFS_IS_REALTIME_INODE(ip)) {
1195 		struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
1196 
1197 		error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1198 		if (error)
1199 			goto error0;
1200 
1201 		xfs_bmap_count_leaves(ifp, &rtblks);
1202 	}
1203 
1204 	nblks = (xfs_qcnt_t)ip->i_nblocks - rtblks;
1205 	xfs_iflags_clear(ip, XFS_IQUOTAUNCHECKED);
1206 
1207 	/*
1208 	 * Add the (disk blocks and inode) resources occupied by this
1209 	 * inode to its dquots. We do this adjustment in the incore dquot,
1210 	 * and also copy the changes to its buffer.
1211 	 * We don't care about putting these changes in a transaction
1212 	 * envelope because if we crash in the middle of a 'quotacheck'
1213 	 * we have to start from the beginning anyway.
1214 	 * Once we're done, we'll log all the dquot bufs.
1215 	 *
1216 	 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1217 	 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1218 	 */
1219 	if (XFS_IS_UQUOTA_ON(mp)) {
1220 		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks,
1221 				rtblks);
1222 		if (error)
1223 			goto error0;
1224 	}
1225 
1226 	if (XFS_IS_GQUOTA_ON(mp)) {
1227 		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks,
1228 				rtblks);
1229 		if (error)
1230 			goto error0;
1231 	}
1232 
1233 	if (XFS_IS_PQUOTA_ON(mp)) {
1234 		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks,
1235 				rtblks);
1236 		if (error)
1237 			goto error0;
1238 	}
1239 
1240 error0:
1241 	xfs_irele(ip);
1242 	return error;
1243 }
1244 
1245 STATIC int
xfs_qm_flush_one(struct xfs_dquot * dqp,void * data)1246 xfs_qm_flush_one(
1247 	struct xfs_dquot	*dqp,
1248 	void			*data)
1249 {
1250 	struct xfs_mount	*mp = dqp->q_mount;
1251 	struct list_head	*buffer_list = data;
1252 	struct xfs_buf		*bp = NULL;
1253 	int			error = 0;
1254 
1255 	xfs_dqlock(dqp);
1256 	if (dqp->q_flags & XFS_DQFLAG_FREEING)
1257 		goto out_unlock;
1258 	if (!XFS_DQ_IS_DIRTY(dqp))
1259 		goto out_unlock;
1260 
1261 	/*
1262 	 * The only way the dquot is already flush locked by the time quotacheck
1263 	 * gets here is if reclaim flushed it before the dqadjust walk dirtied
1264 	 * it for the final time. Quotacheck collects all dquot bufs in the
1265 	 * local delwri queue before dquots are dirtied, so reclaim can't have
1266 	 * possibly queued it for I/O. The only way out is to push the buffer to
1267 	 * cycle the flush lock.
1268 	 */
1269 	if (!xfs_dqflock_nowait(dqp)) {
1270 		/* buf is pinned in-core by delwri list */
1271 		error = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
1272 				mp->m_quotainfo->qi_dqchunklen, 0, &bp);
1273 		if (error)
1274 			goto out_unlock;
1275 
1276 		if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1277 			error = -EAGAIN;
1278 			xfs_buf_relse(bp);
1279 			goto out_unlock;
1280 		}
1281 		xfs_buf_unlock(bp);
1282 
1283 		xfs_buf_delwri_pushbuf(bp, buffer_list);
1284 		xfs_buf_rele(bp);
1285 
1286 		error = -EAGAIN;
1287 		goto out_unlock;
1288 	}
1289 
1290 	error = xfs_qm_dqflush(dqp, &bp);
1291 	if (error)
1292 		goto out_unlock;
1293 
1294 	xfs_buf_delwri_queue(bp, buffer_list);
1295 	xfs_buf_relse(bp);
1296 out_unlock:
1297 	xfs_dqunlock(dqp);
1298 	return error;
1299 }
1300 
1301 /*
1302  * Walk thru all the filesystem inodes and construct a consistent view
1303  * of the disk quota world. If the quotacheck fails, disable quotas.
1304  */
1305 STATIC int
xfs_qm_quotacheck(xfs_mount_t * mp)1306 xfs_qm_quotacheck(
1307 	xfs_mount_t	*mp)
1308 {
1309 	int			error, error2;
1310 	uint			flags;
1311 	LIST_HEAD		(buffer_list);
1312 	struct xfs_inode	*uip = mp->m_quotainfo->qi_uquotaip;
1313 	struct xfs_inode	*gip = mp->m_quotainfo->qi_gquotaip;
1314 	struct xfs_inode	*pip = mp->m_quotainfo->qi_pquotaip;
1315 
1316 	flags = 0;
1317 
1318 	ASSERT(uip || gip || pip);
1319 	ASSERT(XFS_IS_QUOTA_ON(mp));
1320 
1321 	xfs_notice(mp, "Quotacheck needed: Please wait.");
1322 
1323 	/*
1324 	 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1325 	 * their counters to zero. We need a clean slate.
1326 	 * We don't log our changes till later.
1327 	 */
1328 	if (uip) {
1329 		error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER,
1330 					 &buffer_list);
1331 		if (error)
1332 			goto error_return;
1333 		flags |= XFS_UQUOTA_CHKD;
1334 	}
1335 
1336 	if (gip) {
1337 		error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP,
1338 					 &buffer_list);
1339 		if (error)
1340 			goto error_return;
1341 		flags |= XFS_GQUOTA_CHKD;
1342 	}
1343 
1344 	if (pip) {
1345 		error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ,
1346 					 &buffer_list);
1347 		if (error)
1348 			goto error_return;
1349 		flags |= XFS_PQUOTA_CHKD;
1350 	}
1351 
1352 	xfs_set_quotacheck_running(mp);
1353 	error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
1354 			NULL);
1355 	xfs_clear_quotacheck_running(mp);
1356 
1357 	/*
1358 	 * On error, the inode walk may have partially populated the dquot
1359 	 * caches.  We must purge them before disabling quota and tearing down
1360 	 * the quotainfo, or else the dquots will leak.
1361 	 */
1362 	if (error)
1363 		goto error_purge;
1364 
1365 	/*
1366 	 * We've made all the changes that we need to make incore.  Flush them
1367 	 * down to disk buffers if everything was updated successfully.
1368 	 */
1369 	if (XFS_IS_UQUOTA_ON(mp)) {
1370 		error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one,
1371 					  &buffer_list);
1372 	}
1373 	if (XFS_IS_GQUOTA_ON(mp)) {
1374 		error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one,
1375 					   &buffer_list);
1376 		if (!error)
1377 			error = error2;
1378 	}
1379 	if (XFS_IS_PQUOTA_ON(mp)) {
1380 		error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one,
1381 					   &buffer_list);
1382 		if (!error)
1383 			error = error2;
1384 	}
1385 
1386 	error2 = xfs_buf_delwri_submit(&buffer_list);
1387 	if (!error)
1388 		error = error2;
1389 
1390 	/*
1391 	 * We can get this error if we couldn't do a dquot allocation inside
1392 	 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1393 	 * dirty dquots that might be cached, we just want to get rid of them
1394 	 * and turn quotaoff. The dquots won't be attached to any of the inodes
1395 	 * at this point (because we intentionally didn't in dqget_noattach).
1396 	 */
1397 	if (error)
1398 		goto error_purge;
1399 
1400 	/*
1401 	 * If one type of quotas is off, then it will lose its
1402 	 * quotachecked status, since we won't be doing accounting for
1403 	 * that type anymore.
1404 	 */
1405 	mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1406 	mp->m_qflags |= flags;
1407 
1408 error_return:
1409 	xfs_buf_delwri_cancel(&buffer_list);
1410 
1411 	if (error) {
1412 		xfs_warn(mp,
1413 	"Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1414 			error);
1415 		/*
1416 		 * We must turn off quotas.
1417 		 */
1418 		ASSERT(mp->m_quotainfo != NULL);
1419 		xfs_qm_destroy_quotainfo(mp);
1420 		if (xfs_mount_reset_sbqflags(mp)) {
1421 			xfs_warn(mp,
1422 				"Quotacheck: Failed to reset quota flags.");
1423 		}
1424 		xfs_fs_mark_sick(mp, XFS_SICK_FS_QUOTACHECK);
1425 	} else {
1426 		xfs_notice(mp, "Quotacheck: Done.");
1427 		xfs_fs_mark_healthy(mp, XFS_SICK_FS_QUOTACHECK);
1428 	}
1429 
1430 	return error;
1431 
1432 error_purge:
1433 	/*
1434 	 * On error, we may have inodes queued for inactivation. This may try
1435 	 * to attach dquots to the inode before running cleanup operations on
1436 	 * the inode and this can race with the xfs_qm_destroy_quotainfo() call
1437 	 * below that frees mp->m_quotainfo. To avoid this race, flush all the
1438 	 * pending inodegc operations before we purge the dquots from memory,
1439 	 * ensuring that background inactivation is idle whilst we turn off
1440 	 * quotas.
1441 	 */
1442 	xfs_inodegc_flush(mp);
1443 	xfs_qm_dqpurge_all(mp);
1444 	goto error_return;
1445 
1446 }
1447 
1448 /*
1449  * This is called from xfs_mountfs to start quotas and initialize all
1450  * necessary data structures like quotainfo.  This is also responsible for
1451  * running a quotacheck as necessary.  We are guaranteed that the superblock
1452  * is consistently read in at this point.
1453  *
1454  * If we fail here, the mount will continue with quota turned off. We don't
1455  * need to inidicate success or failure at all.
1456  */
1457 void
xfs_qm_mount_quotas(struct xfs_mount * mp)1458 xfs_qm_mount_quotas(
1459 	struct xfs_mount	*mp)
1460 {
1461 	int			error = 0;
1462 	uint			sbf;
1463 
1464 	/*
1465 	 * If quotas on realtime volumes is not supported, we disable
1466 	 * quotas immediately.
1467 	 */
1468 	if (mp->m_sb.sb_rextents) {
1469 		xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1470 		mp->m_qflags = 0;
1471 		goto write_changes;
1472 	}
1473 
1474 	ASSERT(XFS_IS_QUOTA_ON(mp));
1475 
1476 	/*
1477 	 * Allocate the quotainfo structure inside the mount struct, and
1478 	 * create quotainode(s), and change/rev superblock if necessary.
1479 	 */
1480 	error = xfs_qm_init_quotainfo(mp);
1481 	if (error) {
1482 		/*
1483 		 * We must turn off quotas.
1484 		 */
1485 		ASSERT(mp->m_quotainfo == NULL);
1486 		mp->m_qflags = 0;
1487 		goto write_changes;
1488 	}
1489 	/*
1490 	 * If any of the quotas are not consistent, do a quotacheck.
1491 	 */
1492 	if (XFS_QM_NEED_QUOTACHECK(mp)) {
1493 		error = xfs_qm_quotacheck(mp);
1494 		if (error) {
1495 			/* Quotacheck failed and disabled quotas. */
1496 			return;
1497 		}
1498 	}
1499 	/*
1500 	 * If one type of quotas is off, then it will lose its
1501 	 * quotachecked status, since we won't be doing accounting for
1502 	 * that type anymore.
1503 	 */
1504 	if (!XFS_IS_UQUOTA_ON(mp))
1505 		mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1506 	if (!XFS_IS_GQUOTA_ON(mp))
1507 		mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1508 	if (!XFS_IS_PQUOTA_ON(mp))
1509 		mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1510 
1511  write_changes:
1512 	/*
1513 	 * We actually don't have to acquire the m_sb_lock at all.
1514 	 * This can only be called from mount, and that's single threaded. XXX
1515 	 */
1516 	spin_lock(&mp->m_sb_lock);
1517 	sbf = mp->m_sb.sb_qflags;
1518 	mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1519 	spin_unlock(&mp->m_sb_lock);
1520 
1521 	if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1522 		if (xfs_sync_sb(mp, false)) {
1523 			/*
1524 			 * We could only have been turning quotas off.
1525 			 * We aren't in very good shape actually because
1526 			 * the incore structures are convinced that quotas are
1527 			 * off, but the on disk superblock doesn't know that !
1528 			 */
1529 			ASSERT(!(XFS_IS_QUOTA_ON(mp)));
1530 			xfs_alert(mp, "%s: Superblock update failed!",
1531 				__func__);
1532 		}
1533 	}
1534 
1535 	if (error) {
1536 		xfs_warn(mp, "Failed to initialize disk quotas.");
1537 		return;
1538 	}
1539 }
1540 
1541 /*
1542  * Load the inode for a given type of quota, assuming that the sb fields have
1543  * been sorted out.  This is not true when switching quota types on a V4
1544  * filesystem, so do not use this function for that.
1545  *
1546  * Returns -ENOENT if the quota inode field is NULLFSINO; 0 and an inode on
1547  * success; or a negative errno.
1548  */
1549 int
xfs_qm_qino_load(struct xfs_mount * mp,xfs_dqtype_t type,struct xfs_inode ** ipp)1550 xfs_qm_qino_load(
1551 	struct xfs_mount	*mp,
1552 	xfs_dqtype_t		type,
1553 	struct xfs_inode	**ipp)
1554 {
1555 	xfs_ino_t		ino = NULLFSINO;
1556 
1557 	switch (type) {
1558 	case XFS_DQTYPE_USER:
1559 		ino = mp->m_sb.sb_uquotino;
1560 		break;
1561 	case XFS_DQTYPE_GROUP:
1562 		ino = mp->m_sb.sb_gquotino;
1563 		break;
1564 	case XFS_DQTYPE_PROJ:
1565 		ino = mp->m_sb.sb_pquotino;
1566 		break;
1567 	default:
1568 		ASSERT(0);
1569 		return -EFSCORRUPTED;
1570 	}
1571 
1572 	if (ino == NULLFSINO)
1573 		return -ENOENT;
1574 
1575 	return xfs_iget(mp, NULL, ino, 0, 0, ipp);
1576 }
1577 
1578 /*
1579  * This is called after the superblock has been read in and we're ready to
1580  * iget the quota inodes.
1581  */
1582 STATIC int
xfs_qm_init_quotainos(xfs_mount_t * mp)1583 xfs_qm_init_quotainos(
1584 	xfs_mount_t	*mp)
1585 {
1586 	struct xfs_inode	*uip = NULL;
1587 	struct xfs_inode	*gip = NULL;
1588 	struct xfs_inode	*pip = NULL;
1589 	int			error;
1590 	uint			flags = 0;
1591 
1592 	ASSERT(mp->m_quotainfo);
1593 
1594 	/*
1595 	 * Get the uquota and gquota inodes
1596 	 */
1597 	if (xfs_has_quota(mp)) {
1598 		if (XFS_IS_UQUOTA_ON(mp) &&
1599 		    mp->m_sb.sb_uquotino != NULLFSINO) {
1600 			ASSERT(mp->m_sb.sb_uquotino > 0);
1601 			error = xfs_qm_qino_load(mp, XFS_DQTYPE_USER, &uip);
1602 			if (error)
1603 				return error;
1604 		}
1605 		if (XFS_IS_GQUOTA_ON(mp) &&
1606 		    mp->m_sb.sb_gquotino != NULLFSINO) {
1607 			ASSERT(mp->m_sb.sb_gquotino > 0);
1608 			error = xfs_qm_qino_load(mp, XFS_DQTYPE_GROUP, &gip);
1609 			if (error)
1610 				goto error_rele;
1611 		}
1612 		if (XFS_IS_PQUOTA_ON(mp) &&
1613 		    mp->m_sb.sb_pquotino != NULLFSINO) {
1614 			ASSERT(mp->m_sb.sb_pquotino > 0);
1615 			error = xfs_qm_qino_load(mp, XFS_DQTYPE_PROJ, &pip);
1616 			if (error)
1617 				goto error_rele;
1618 		}
1619 	} else {
1620 		flags |= XFS_QMOPT_SBVERSION;
1621 	}
1622 
1623 	/*
1624 	 * Create the three inodes, if they don't exist already. The changes
1625 	 * made above will get added to a transaction and logged in one of
1626 	 * the qino_alloc calls below.  If the device is readonly,
1627 	 * temporarily switch to read-write to do this.
1628 	 */
1629 	if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1630 		error = xfs_qm_qino_alloc(mp, &uip,
1631 					      flags | XFS_QMOPT_UQUOTA);
1632 		if (error)
1633 			goto error_rele;
1634 
1635 		flags &= ~XFS_QMOPT_SBVERSION;
1636 	}
1637 	if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1638 		error = xfs_qm_qino_alloc(mp, &gip,
1639 					  flags | XFS_QMOPT_GQUOTA);
1640 		if (error)
1641 			goto error_rele;
1642 
1643 		flags &= ~XFS_QMOPT_SBVERSION;
1644 	}
1645 	if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1646 		error = xfs_qm_qino_alloc(mp, &pip,
1647 					  flags | XFS_QMOPT_PQUOTA);
1648 		if (error)
1649 			goto error_rele;
1650 	}
1651 
1652 	mp->m_quotainfo->qi_uquotaip = uip;
1653 	mp->m_quotainfo->qi_gquotaip = gip;
1654 	mp->m_quotainfo->qi_pquotaip = pip;
1655 
1656 	return 0;
1657 
1658 error_rele:
1659 	if (uip)
1660 		xfs_irele(uip);
1661 	if (gip)
1662 		xfs_irele(gip);
1663 	if (pip)
1664 		xfs_irele(pip);
1665 	return error;
1666 }
1667 
1668 STATIC void
xfs_qm_destroy_quotainos(struct xfs_quotainfo * qi)1669 xfs_qm_destroy_quotainos(
1670 	struct xfs_quotainfo	*qi)
1671 {
1672 	if (qi->qi_uquotaip) {
1673 		xfs_irele(qi->qi_uquotaip);
1674 		qi->qi_uquotaip = NULL; /* paranoia */
1675 	}
1676 	if (qi->qi_gquotaip) {
1677 		xfs_irele(qi->qi_gquotaip);
1678 		qi->qi_gquotaip = NULL;
1679 	}
1680 	if (qi->qi_pquotaip) {
1681 		xfs_irele(qi->qi_pquotaip);
1682 		qi->qi_pquotaip = NULL;
1683 	}
1684 }
1685 
1686 STATIC void
xfs_qm_dqfree_one(struct xfs_dquot * dqp)1687 xfs_qm_dqfree_one(
1688 	struct xfs_dquot	*dqp)
1689 {
1690 	struct xfs_mount	*mp = dqp->q_mount;
1691 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
1692 
1693 	mutex_lock(&qi->qi_tree_lock);
1694 	radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
1695 
1696 	qi->qi_dquots--;
1697 	mutex_unlock(&qi->qi_tree_lock);
1698 
1699 	xfs_qm_dqdestroy(dqp);
1700 }
1701 
1702 /* --------------- utility functions for vnodeops ---------------- */
1703 
1704 
1705 /*
1706  * Given an inode, a uid, gid and prid make sure that we have
1707  * allocated relevant dquot(s) on disk, and that we won't exceed inode
1708  * quotas by creating this file.
1709  * This also attaches dquot(s) to the given inode after locking it,
1710  * and returns the dquots corresponding to the uid and/or gid.
1711  *
1712  * in	: inode (unlocked)
1713  * out	: udquot, gdquot with references taken and unlocked
1714  */
1715 int
xfs_qm_vop_dqalloc(struct xfs_inode * ip,kuid_t uid,kgid_t gid,prid_t prid,uint flags,struct xfs_dquot ** O_udqpp,struct xfs_dquot ** O_gdqpp,struct xfs_dquot ** O_pdqpp)1716 xfs_qm_vop_dqalloc(
1717 	struct xfs_inode	*ip,
1718 	kuid_t			uid,
1719 	kgid_t			gid,
1720 	prid_t			prid,
1721 	uint			flags,
1722 	struct xfs_dquot	**O_udqpp,
1723 	struct xfs_dquot	**O_gdqpp,
1724 	struct xfs_dquot	**O_pdqpp)
1725 {
1726 	struct xfs_mount	*mp = ip->i_mount;
1727 	struct inode		*inode = VFS_I(ip);
1728 	struct user_namespace	*user_ns = inode->i_sb->s_user_ns;
1729 	struct xfs_dquot	*uq = NULL;
1730 	struct xfs_dquot	*gq = NULL;
1731 	struct xfs_dquot	*pq = NULL;
1732 	int			error;
1733 	uint			lockflags;
1734 
1735 	if (!XFS_IS_QUOTA_ON(mp))
1736 		return 0;
1737 
1738 	lockflags = XFS_ILOCK_EXCL;
1739 	xfs_ilock(ip, lockflags);
1740 
1741 	if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1742 		gid = inode->i_gid;
1743 
1744 	/*
1745 	 * Attach the dquot(s) to this inode, doing a dquot allocation
1746 	 * if necessary. The dquot(s) will not be locked.
1747 	 */
1748 	if (XFS_NOT_DQATTACHED(mp, ip)) {
1749 		error = xfs_qm_dqattach_locked(ip, true);
1750 		if (error) {
1751 			xfs_iunlock(ip, lockflags);
1752 			return error;
1753 		}
1754 	}
1755 
1756 	if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1757 		ASSERT(O_udqpp);
1758 		if (!uid_eq(inode->i_uid, uid)) {
1759 			/*
1760 			 * What we need is the dquot that has this uid, and
1761 			 * if we send the inode to dqget, the uid of the inode
1762 			 * takes priority over what's sent in the uid argument.
1763 			 * We must unlock inode here before calling dqget if
1764 			 * we're not sending the inode, because otherwise
1765 			 * we'll deadlock by doing trans_reserve while
1766 			 * holding ilock.
1767 			 */
1768 			xfs_iunlock(ip, lockflags);
1769 			error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
1770 					XFS_DQTYPE_USER, true, &uq);
1771 			if (error) {
1772 				ASSERT(error != -ENOENT);
1773 				return error;
1774 			}
1775 			/*
1776 			 * Get the ilock in the right order.
1777 			 */
1778 			xfs_dqunlock(uq);
1779 			lockflags = XFS_ILOCK_SHARED;
1780 			xfs_ilock(ip, lockflags);
1781 		} else {
1782 			/*
1783 			 * Take an extra reference, because we'll return
1784 			 * this to caller
1785 			 */
1786 			ASSERT(ip->i_udquot);
1787 			uq = xfs_qm_dqhold(ip->i_udquot);
1788 		}
1789 	}
1790 	if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1791 		ASSERT(O_gdqpp);
1792 		if (!gid_eq(inode->i_gid, gid)) {
1793 			xfs_iunlock(ip, lockflags);
1794 			error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
1795 					XFS_DQTYPE_GROUP, true, &gq);
1796 			if (error) {
1797 				ASSERT(error != -ENOENT);
1798 				goto error_rele;
1799 			}
1800 			xfs_dqunlock(gq);
1801 			lockflags = XFS_ILOCK_SHARED;
1802 			xfs_ilock(ip, lockflags);
1803 		} else {
1804 			ASSERT(ip->i_gdquot);
1805 			gq = xfs_qm_dqhold(ip->i_gdquot);
1806 		}
1807 	}
1808 	if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1809 		ASSERT(O_pdqpp);
1810 		if (ip->i_projid != prid) {
1811 			xfs_iunlock(ip, lockflags);
1812 			error = xfs_qm_dqget(mp, prid,
1813 					XFS_DQTYPE_PROJ, true, &pq);
1814 			if (error) {
1815 				ASSERT(error != -ENOENT);
1816 				goto error_rele;
1817 			}
1818 			xfs_dqunlock(pq);
1819 			lockflags = XFS_ILOCK_SHARED;
1820 			xfs_ilock(ip, lockflags);
1821 		} else {
1822 			ASSERT(ip->i_pdquot);
1823 			pq = xfs_qm_dqhold(ip->i_pdquot);
1824 		}
1825 	}
1826 	trace_xfs_dquot_dqalloc(ip);
1827 
1828 	xfs_iunlock(ip, lockflags);
1829 	if (O_udqpp)
1830 		*O_udqpp = uq;
1831 	else
1832 		xfs_qm_dqrele(uq);
1833 	if (O_gdqpp)
1834 		*O_gdqpp = gq;
1835 	else
1836 		xfs_qm_dqrele(gq);
1837 	if (O_pdqpp)
1838 		*O_pdqpp = pq;
1839 	else
1840 		xfs_qm_dqrele(pq);
1841 	return 0;
1842 
1843 error_rele:
1844 	xfs_qm_dqrele(gq);
1845 	xfs_qm_dqrele(uq);
1846 	return error;
1847 }
1848 
1849 /*
1850  * Actually transfer ownership, and do dquot modifications.
1851  * These were already reserved.
1852  */
1853 struct xfs_dquot *
xfs_qm_vop_chown(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_dquot ** IO_olddq,struct xfs_dquot * newdq)1854 xfs_qm_vop_chown(
1855 	struct xfs_trans	*tp,
1856 	struct xfs_inode	*ip,
1857 	struct xfs_dquot	**IO_olddq,
1858 	struct xfs_dquot	*newdq)
1859 {
1860 	struct xfs_dquot	*prevdq;
1861 	uint		bfield = XFS_IS_REALTIME_INODE(ip) ?
1862 				 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1863 
1864 
1865 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
1866 	ASSERT(XFS_IS_QUOTA_ON(ip->i_mount));
1867 
1868 	/* old dquot */
1869 	prevdq = *IO_olddq;
1870 	ASSERT(prevdq);
1871 	ASSERT(prevdq != newdq);
1872 
1873 	xfs_trans_mod_ino_dquot(tp, ip, prevdq, bfield, -(ip->i_nblocks));
1874 	xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1875 
1876 	/* the sparkling new dquot */
1877 	xfs_trans_mod_ino_dquot(tp, ip, newdq, bfield, ip->i_nblocks);
1878 	xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1879 
1880 	/*
1881 	 * Back when we made quota reservations for the chown, we reserved the
1882 	 * ondisk blocks + delalloc blocks with the new dquot.  Now that we've
1883 	 * switched the dquots, decrease the new dquot's block reservation
1884 	 * (having already bumped up the real counter) so that we don't have
1885 	 * any reservation to give back when we commit.
1886 	 */
1887 	xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_RES_BLKS,
1888 			-ip->i_delayed_blks);
1889 
1890 	/*
1891 	 * Give the incore reservation for delalloc blocks back to the old
1892 	 * dquot.  We don't normally handle delalloc quota reservations
1893 	 * transactionally, so just lock the dquot and subtract from the
1894 	 * reservation.  Dirty the transaction because it's too late to turn
1895 	 * back now.
1896 	 */
1897 	tp->t_flags |= XFS_TRANS_DIRTY;
1898 	xfs_dqlock(prevdq);
1899 	ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks);
1900 	prevdq->q_blk.reserved -= ip->i_delayed_blks;
1901 	xfs_dqunlock(prevdq);
1902 
1903 	/*
1904 	 * Take an extra reference, because the inode is going to keep
1905 	 * this dquot pointer even after the trans_commit.
1906 	 */
1907 	*IO_olddq = xfs_qm_dqhold(newdq);
1908 
1909 	return prevdq;
1910 }
1911 
1912 int
xfs_qm_vop_rename_dqattach(struct xfs_inode ** i_tab)1913 xfs_qm_vop_rename_dqattach(
1914 	struct xfs_inode	**i_tab)
1915 {
1916 	struct xfs_mount	*mp = i_tab[0]->i_mount;
1917 	int			i;
1918 
1919 	if (!XFS_IS_QUOTA_ON(mp))
1920 		return 0;
1921 
1922 	for (i = 0; (i < 4 && i_tab[i]); i++) {
1923 		struct xfs_inode	*ip = i_tab[i];
1924 		int			error;
1925 
1926 		/*
1927 		 * Watch out for duplicate entries in the table.
1928 		 */
1929 		if (i == 0 || ip != i_tab[i-1]) {
1930 			if (XFS_NOT_DQATTACHED(mp, ip)) {
1931 				error = xfs_qm_dqattach(ip);
1932 				if (error)
1933 					return error;
1934 			}
1935 		}
1936 	}
1937 	return 0;
1938 }
1939 
1940 void
xfs_qm_vop_create_dqattach(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_dquot * udqp,struct xfs_dquot * gdqp,struct xfs_dquot * pdqp)1941 xfs_qm_vop_create_dqattach(
1942 	struct xfs_trans	*tp,
1943 	struct xfs_inode	*ip,
1944 	struct xfs_dquot	*udqp,
1945 	struct xfs_dquot	*gdqp,
1946 	struct xfs_dquot	*pdqp)
1947 {
1948 	struct xfs_mount	*mp = tp->t_mountp;
1949 
1950 	if (!XFS_IS_QUOTA_ON(mp))
1951 		return;
1952 
1953 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
1954 
1955 	if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1956 		ASSERT(ip->i_udquot == NULL);
1957 		ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id);
1958 
1959 		ip->i_udquot = xfs_qm_dqhold(udqp);
1960 	}
1961 	if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1962 		ASSERT(ip->i_gdquot == NULL);
1963 		ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id);
1964 
1965 		ip->i_gdquot = xfs_qm_dqhold(gdqp);
1966 	}
1967 	if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
1968 		ASSERT(ip->i_pdquot == NULL);
1969 		ASSERT(ip->i_projid == pdqp->q_id);
1970 
1971 		ip->i_pdquot = xfs_qm_dqhold(pdqp);
1972 	}
1973 
1974 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, 1);
1975 }
1976 
1977 /* Decide if this inode's dquot is near an enforcement boundary. */
1978 bool
xfs_inode_near_dquot_enforcement(struct xfs_inode * ip,xfs_dqtype_t type)1979 xfs_inode_near_dquot_enforcement(
1980 	struct xfs_inode	*ip,
1981 	xfs_dqtype_t		type)
1982 {
1983 	struct xfs_dquot	*dqp;
1984 	int64_t			freesp;
1985 
1986 	/* We only care for quotas that are enabled and enforced. */
1987 	dqp = xfs_inode_dquot(ip, type);
1988 	if (!dqp || !xfs_dquot_is_enforced(dqp))
1989 		return false;
1990 
1991 	if (xfs_dquot_res_over_limits(&dqp->q_ino) ||
1992 	    xfs_dquot_res_over_limits(&dqp->q_rtb))
1993 		return true;
1994 
1995 	/* For space on the data device, check the various thresholds. */
1996 	if (!dqp->q_prealloc_hi_wmark)
1997 		return false;
1998 
1999 	if (dqp->q_blk.reserved < dqp->q_prealloc_lo_wmark)
2000 		return false;
2001 
2002 	if (dqp->q_blk.reserved >= dqp->q_prealloc_hi_wmark)
2003 		return true;
2004 
2005 	freesp = dqp->q_prealloc_hi_wmark - dqp->q_blk.reserved;
2006 	if (freesp < dqp->q_low_space[XFS_QLOWSP_5_PCNT])
2007 		return true;
2008 
2009 	return false;
2010 }
2011