xref: /linux/fs/xfs/xfs_qm.c (revision 4b01712311c6e209137c4fa3e7d7920ec509456a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_inode.h"
16 #include "xfs_iwalk.h"
17 #include "xfs_quota.h"
18 #include "xfs_bmap.h"
19 #include "xfs_bmap_util.h"
20 #include "xfs_trans.h"
21 #include "xfs_trans_space.h"
22 #include "xfs_qm.h"
23 #include "xfs_trace.h"
24 #include "xfs_icache.h"
25 #include "xfs_error.h"
26 #include "xfs_ag.h"
27 #include "xfs_ialloc.h"
28 #include "xfs_log_priv.h"
29 #include "xfs_health.h"
30 #include "xfs_da_format.h"
31 #include "xfs_metafile.h"
32 #include "xfs_rtgroup.h"
33 
34 /*
35  * The global quota manager. There is only one of these for the entire
36  * system, _not_ one per file system. XQM keeps track of the overall
37  * quota functionality, including maintaining the freelist and hash
38  * tables of dquots.
39  */
40 STATIC int	xfs_qm_init_quotainos(struct xfs_mount *mp);
41 STATIC int	xfs_qm_init_quotainfo(struct xfs_mount *mp);
42 
43 STATIC void	xfs_qm_dqfree_one(struct xfs_dquot *dqp);
44 /*
45  * We use the batch lookup interface to iterate over the dquots as it
46  * currently is the only interface into the radix tree code that allows
47  * fuzzy lookups instead of exact matches.  Holding the lock over multiple
48  * operations is fine as all callers are used either during mount/umount
49  * or quotaoff.
50  */
51 #define XFS_DQ_LOOKUP_BATCH	32
52 
53 STATIC int
54 xfs_qm_dquot_walk(
55 	struct xfs_mount	*mp,
56 	xfs_dqtype_t		type,
57 	int			(*execute)(struct xfs_dquot *dqp, void *data),
58 	void			*data)
59 {
60 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
61 	struct radix_tree_root	*tree = xfs_dquot_tree(qi, type);
62 	uint32_t		next_index;
63 	int			last_error = 0;
64 	int			skipped;
65 	int			nr_found;
66 
67 restart:
68 	skipped = 0;
69 	next_index = 0;
70 	nr_found = 0;
71 
72 	while (1) {
73 		struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
74 		int		error;
75 		int		i;
76 
77 		mutex_lock(&qi->qi_tree_lock);
78 		nr_found = radix_tree_gang_lookup(tree, (void **)batch,
79 					next_index, XFS_DQ_LOOKUP_BATCH);
80 		if (!nr_found) {
81 			mutex_unlock(&qi->qi_tree_lock);
82 			break;
83 		}
84 
85 		for (i = 0; i < nr_found; i++) {
86 			struct xfs_dquot *dqp = batch[i];
87 
88 			next_index = dqp->q_id + 1;
89 
90 			error = execute(batch[i], data);
91 			if (error == -EAGAIN) {
92 				skipped++;
93 				continue;
94 			}
95 			if (error && last_error != -EFSCORRUPTED)
96 				last_error = error;
97 		}
98 
99 		mutex_unlock(&qi->qi_tree_lock);
100 
101 		/* bail out if the filesystem is corrupted.  */
102 		if (last_error == -EFSCORRUPTED) {
103 			skipped = 0;
104 			break;
105 		}
106 		/* we're done if id overflows back to zero */
107 		if (!next_index)
108 			break;
109 	}
110 
111 	if (skipped) {
112 		delay(1);
113 		goto restart;
114 	}
115 
116 	return last_error;
117 }
118 
119 
120 /*
121  * Purge a dquot from all tracking data structures and free it.
122  */
123 STATIC int
124 xfs_qm_dqpurge(
125 	struct xfs_dquot	*dqp,
126 	void			*data)
127 {
128 	struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;
129 	int			error = -EAGAIN;
130 
131 	xfs_dqlock(dqp);
132 	if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
133 		goto out_unlock;
134 
135 	dqp->q_flags |= XFS_DQFLAG_FREEING;
136 
137 	xfs_dqflock(dqp);
138 
139 	/*
140 	 * If we are turning this type of quotas off, we don't care
141 	 * about the dirty metadata sitting in this dquot. OTOH, if
142 	 * we're unmounting, we do care, so we flush it and wait.
143 	 */
144 	if (XFS_DQ_IS_DIRTY(dqp)) {
145 		struct xfs_buf	*bp = NULL;
146 
147 		/*
148 		 * We don't care about getting disk errors here. We need
149 		 * to purge this dquot anyway, so we go ahead regardless.
150 		 */
151 		error = xfs_qm_dqflush(dqp, &bp);
152 		if (!error) {
153 			error = xfs_bwrite(bp);
154 			xfs_buf_relse(bp);
155 		} else if (error == -EAGAIN) {
156 			dqp->q_flags &= ~XFS_DQFLAG_FREEING;
157 			goto out_unlock;
158 		}
159 		xfs_dqflock(dqp);
160 	}
161 
162 	ASSERT(atomic_read(&dqp->q_pincount) == 0);
163 	ASSERT(xlog_is_shutdown(dqp->q_logitem.qli_item.li_log) ||
164 		!test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
165 
166 	xfs_dqfunlock(dqp);
167 	xfs_dqunlock(dqp);
168 
169 	radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
170 	qi->qi_dquots--;
171 
172 	/*
173 	 * We move dquots to the freelist as soon as their reference count
174 	 * hits zero, so it really should be on the freelist here.
175 	 */
176 	ASSERT(!list_empty(&dqp->q_lru));
177 	list_lru_del_obj(&qi->qi_lru, &dqp->q_lru);
178 	XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
179 
180 	xfs_qm_dqdestroy(dqp);
181 	return 0;
182 
183 out_unlock:
184 	xfs_dqunlock(dqp);
185 	return error;
186 }
187 
188 /*
189  * Purge the dquot cache.
190  */
191 static void
192 xfs_qm_dqpurge_all(
193 	struct xfs_mount	*mp)
194 {
195 	xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL);
196 	xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL);
197 	xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL);
198 }
199 
200 /*
201  * Just destroy the quotainfo structure.
202  */
203 void
204 xfs_qm_unmount(
205 	struct xfs_mount	*mp)
206 {
207 	if (mp->m_quotainfo) {
208 		xfs_qm_dqpurge_all(mp);
209 		xfs_qm_destroy_quotainfo(mp);
210 	}
211 }
212 
213 static void
214 xfs_qm_unmount_rt(
215 	struct xfs_mount	*mp)
216 {
217 	struct xfs_rtgroup	*rtg = xfs_rtgroup_grab(mp, 0);
218 
219 	if (!rtg)
220 		return;
221 	if (rtg->rtg_inodes[XFS_RTGI_BITMAP])
222 		xfs_qm_dqdetach(rtg->rtg_inodes[XFS_RTGI_BITMAP]);
223 	if (rtg->rtg_inodes[XFS_RTGI_SUMMARY])
224 		xfs_qm_dqdetach(rtg->rtg_inodes[XFS_RTGI_SUMMARY]);
225 	xfs_rtgroup_rele(rtg);
226 }
227 
228 STATIC void
229 xfs_qm_destroy_quotainos(
230 	struct xfs_quotainfo	*qi)
231 {
232 	if (qi->qi_uquotaip) {
233 		xfs_irele(qi->qi_uquotaip);
234 		qi->qi_uquotaip = NULL; /* paranoia */
235 	}
236 	if (qi->qi_gquotaip) {
237 		xfs_irele(qi->qi_gquotaip);
238 		qi->qi_gquotaip = NULL;
239 	}
240 	if (qi->qi_pquotaip) {
241 		xfs_irele(qi->qi_pquotaip);
242 		qi->qi_pquotaip = NULL;
243 	}
244 }
245 
246 /*
247  * Called from the vfsops layer.
248  */
249 void
250 xfs_qm_unmount_quotas(
251 	xfs_mount_t	*mp)
252 {
253 	/*
254 	 * Release the dquots that root inode, et al might be holding,
255 	 * before we flush quotas and blow away the quotainfo structure.
256 	 */
257 	ASSERT(mp->m_rootip);
258 	xfs_qm_dqdetach(mp->m_rootip);
259 
260 	/*
261 	 * For pre-RTG file systems, the RT inodes have quotas attached,
262 	 * detach them now.
263 	 */
264 	if (!xfs_has_rtgroups(mp))
265 		xfs_qm_unmount_rt(mp);
266 
267 	/*
268 	 * Release the quota inodes.
269 	 */
270 	if (mp->m_quotainfo)
271 		xfs_qm_destroy_quotainos(mp->m_quotainfo);
272 }
273 
274 STATIC int
275 xfs_qm_dqattach_one(
276 	struct xfs_inode	*ip,
277 	xfs_dqtype_t		type,
278 	bool			doalloc,
279 	struct xfs_dquot	**IO_idqpp)
280 {
281 	struct xfs_dquot	*dqp;
282 	int			error;
283 
284 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
285 	error = 0;
286 
287 	/*
288 	 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
289 	 * or &i_gdquot. This made the code look weird, but made the logic a lot
290 	 * simpler.
291 	 */
292 	dqp = *IO_idqpp;
293 	if (dqp) {
294 		trace_xfs_dqattach_found(dqp);
295 		return 0;
296 	}
297 
298 	/*
299 	 * Find the dquot from somewhere. This bumps the reference count of
300 	 * dquot and returns it locked.  This can return ENOENT if dquot didn't
301 	 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
302 	 * turned off suddenly.
303 	 */
304 	error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
305 	if (error)
306 		return error;
307 
308 	trace_xfs_dqattach_get(dqp);
309 
310 	/*
311 	 * dqget may have dropped and re-acquired the ilock, but it guarantees
312 	 * that the dquot returned is the one that should go in the inode.
313 	 */
314 	*IO_idqpp = dqp;
315 	xfs_dqunlock(dqp);
316 	return 0;
317 }
318 
319 static bool
320 xfs_qm_need_dqattach(
321 	struct xfs_inode	*ip)
322 {
323 	struct xfs_mount	*mp = ip->i_mount;
324 
325 	if (!XFS_IS_QUOTA_ON(mp))
326 		return false;
327 	if (!XFS_NOT_DQATTACHED(mp, ip))
328 		return false;
329 	if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
330 		return false;
331 	if (xfs_is_metadir_inode(ip))
332 		return false;
333 	return true;
334 }
335 
336 /*
337  * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
338  * into account.
339  * If @doalloc is true, the dquot(s) will be allocated if needed.
340  * Inode may get unlocked and relocked in here, and the caller must deal with
341  * the consequences.
342  */
343 int
344 xfs_qm_dqattach_locked(
345 	xfs_inode_t	*ip,
346 	bool		doalloc)
347 {
348 	xfs_mount_t	*mp = ip->i_mount;
349 	int		error = 0;
350 
351 	if (!xfs_qm_need_dqattach(ip))
352 		return 0;
353 
354 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
355 	ASSERT(!xfs_is_metadir_inode(ip));
356 
357 	if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
358 		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_USER,
359 				doalloc, &ip->i_udquot);
360 		if (error)
361 			goto done;
362 		ASSERT(ip->i_udquot);
363 	}
364 
365 	if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
366 		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_GROUP,
367 				doalloc, &ip->i_gdquot);
368 		if (error)
369 			goto done;
370 		ASSERT(ip->i_gdquot);
371 	}
372 
373 	if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
374 		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_PROJ,
375 				doalloc, &ip->i_pdquot);
376 		if (error)
377 			goto done;
378 		ASSERT(ip->i_pdquot);
379 	}
380 
381 done:
382 	/*
383 	 * Don't worry about the dquots that we may have attached before any
384 	 * error - they'll get detached later if it has not already been done.
385 	 */
386 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
387 	return error;
388 }
389 
390 int
391 xfs_qm_dqattach(
392 	struct xfs_inode	*ip)
393 {
394 	int			error;
395 
396 	if (!xfs_qm_need_dqattach(ip))
397 		return 0;
398 
399 	xfs_ilock(ip, XFS_ILOCK_EXCL);
400 	error = xfs_qm_dqattach_locked(ip, false);
401 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
402 
403 	return error;
404 }
405 
406 /*
407  * Release dquots (and their references) if any.
408  * The inode should be locked EXCL except when this's called by
409  * xfs_ireclaim.
410  */
411 void
412 xfs_qm_dqdetach(
413 	xfs_inode_t	*ip)
414 {
415 	if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
416 		return;
417 
418 	trace_xfs_dquot_dqdetach(ip);
419 
420 	ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
421 	if (ip->i_udquot) {
422 		xfs_qm_dqrele(ip->i_udquot);
423 		ip->i_udquot = NULL;
424 	}
425 	if (ip->i_gdquot) {
426 		xfs_qm_dqrele(ip->i_gdquot);
427 		ip->i_gdquot = NULL;
428 	}
429 	if (ip->i_pdquot) {
430 		xfs_qm_dqrele(ip->i_pdquot);
431 		ip->i_pdquot = NULL;
432 	}
433 }
434 
435 struct xfs_qm_isolate {
436 	struct list_head	buffers;
437 	struct list_head	dispose;
438 };
439 
440 static enum lru_status
441 xfs_qm_dquot_isolate(
442 	struct list_head	*item,
443 	struct list_lru_one	*lru,
444 	spinlock_t		*lru_lock,
445 	void			*arg)
446 		__releases(lru_lock) __acquires(lru_lock)
447 {
448 	struct xfs_dquot	*dqp = container_of(item,
449 						struct xfs_dquot, q_lru);
450 	struct xfs_qm_isolate	*isol = arg;
451 
452 	if (!xfs_dqlock_nowait(dqp))
453 		goto out_miss_busy;
454 
455 	/*
456 	 * If something else is freeing this dquot and hasn't yet removed it
457 	 * from the LRU, leave it for the freeing task to complete the freeing
458 	 * process rather than risk it being free from under us here.
459 	 */
460 	if (dqp->q_flags & XFS_DQFLAG_FREEING)
461 		goto out_miss_unlock;
462 
463 	/*
464 	 * This dquot has acquired a reference in the meantime remove it from
465 	 * the freelist and try again.
466 	 */
467 	if (dqp->q_nrefs) {
468 		xfs_dqunlock(dqp);
469 		XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
470 
471 		trace_xfs_dqreclaim_want(dqp);
472 		list_lru_isolate(lru, &dqp->q_lru);
473 		XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
474 		return LRU_REMOVED;
475 	}
476 
477 	/*
478 	 * If the dquot is dirty, flush it. If it's already being flushed, just
479 	 * skip it so there is time for the IO to complete before we try to
480 	 * reclaim it again on the next LRU pass.
481 	 */
482 	if (!xfs_dqflock_nowait(dqp))
483 		goto out_miss_unlock;
484 
485 	if (XFS_DQ_IS_DIRTY(dqp)) {
486 		struct xfs_buf	*bp = NULL;
487 		int		error;
488 
489 		trace_xfs_dqreclaim_dirty(dqp);
490 
491 		/* we have to drop the LRU lock to flush the dquot */
492 		spin_unlock(lru_lock);
493 
494 		error = xfs_qm_dqflush(dqp, &bp);
495 		if (error)
496 			goto out_unlock_dirty;
497 
498 		xfs_buf_delwri_queue(bp, &isol->buffers);
499 		xfs_buf_relse(bp);
500 		goto out_unlock_dirty;
501 	}
502 	xfs_dqfunlock(dqp);
503 
504 	/*
505 	 * Prevent lookups now that we are past the point of no return.
506 	 */
507 	dqp->q_flags |= XFS_DQFLAG_FREEING;
508 	xfs_dqunlock(dqp);
509 
510 	ASSERT(dqp->q_nrefs == 0);
511 	list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
512 	XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
513 	trace_xfs_dqreclaim_done(dqp);
514 	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
515 	return LRU_REMOVED;
516 
517 out_miss_unlock:
518 	xfs_dqunlock(dqp);
519 out_miss_busy:
520 	trace_xfs_dqreclaim_busy(dqp);
521 	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
522 	return LRU_SKIP;
523 
524 out_unlock_dirty:
525 	trace_xfs_dqreclaim_busy(dqp);
526 	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
527 	xfs_dqunlock(dqp);
528 	spin_lock(lru_lock);
529 	return LRU_RETRY;
530 }
531 
532 static unsigned long
533 xfs_qm_shrink_scan(
534 	struct shrinker		*shrink,
535 	struct shrink_control	*sc)
536 {
537 	struct xfs_quotainfo	*qi = shrink->private_data;
538 	struct xfs_qm_isolate	isol;
539 	unsigned long		freed;
540 	int			error;
541 
542 	if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
543 		return 0;
544 
545 	INIT_LIST_HEAD(&isol.buffers);
546 	INIT_LIST_HEAD(&isol.dispose);
547 
548 	freed = list_lru_shrink_walk(&qi->qi_lru, sc,
549 				     xfs_qm_dquot_isolate, &isol);
550 
551 	error = xfs_buf_delwri_submit(&isol.buffers);
552 	if (error)
553 		xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
554 
555 	while (!list_empty(&isol.dispose)) {
556 		struct xfs_dquot	*dqp;
557 
558 		dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
559 		list_del_init(&dqp->q_lru);
560 		xfs_qm_dqfree_one(dqp);
561 	}
562 
563 	return freed;
564 }
565 
566 static unsigned long
567 xfs_qm_shrink_count(
568 	struct shrinker		*shrink,
569 	struct shrink_control	*sc)
570 {
571 	struct xfs_quotainfo	*qi = shrink->private_data;
572 
573 	return list_lru_shrink_count(&qi->qi_lru, sc);
574 }
575 
576 STATIC void
577 xfs_qm_set_defquota(
578 	struct xfs_mount	*mp,
579 	xfs_dqtype_t		type,
580 	struct xfs_quotainfo	*qinf)
581 {
582 	struct xfs_dquot	*dqp;
583 	struct xfs_def_quota	*defq;
584 	int			error;
585 
586 	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
587 	if (error)
588 		return;
589 
590 	defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
591 
592 	/*
593 	 * Timers and warnings have been already set, let's just set the
594 	 * default limits for this quota type
595 	 */
596 	defq->blk.hard = dqp->q_blk.hardlimit;
597 	defq->blk.soft = dqp->q_blk.softlimit;
598 	defq->ino.hard = dqp->q_ino.hardlimit;
599 	defq->ino.soft = dqp->q_ino.softlimit;
600 	defq->rtb.hard = dqp->q_rtb.hardlimit;
601 	defq->rtb.soft = dqp->q_rtb.softlimit;
602 	xfs_qm_dqdestroy(dqp);
603 }
604 
605 /* Initialize quota time limits from the root dquot. */
606 static void
607 xfs_qm_init_timelimits(
608 	struct xfs_mount	*mp,
609 	xfs_dqtype_t		type)
610 {
611 	struct xfs_quotainfo	*qinf = mp->m_quotainfo;
612 	struct xfs_def_quota	*defq;
613 	struct xfs_dquot	*dqp;
614 	int			error;
615 
616 	defq = xfs_get_defquota(qinf, type);
617 
618 	defq->blk.time = XFS_QM_BTIMELIMIT;
619 	defq->ino.time = XFS_QM_ITIMELIMIT;
620 	defq->rtb.time = XFS_QM_RTBTIMELIMIT;
621 
622 	/*
623 	 * We try to get the limits from the superuser's limits fields.
624 	 * This is quite hacky, but it is standard quota practice.
625 	 *
626 	 * Since we may not have done a quotacheck by this point, just read
627 	 * the dquot without attaching it to any hashtables or lists.
628 	 */
629 	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
630 	if (error)
631 		return;
632 
633 	/*
634 	 * The warnings and timers set the grace period given to
635 	 * a user or group before he or she can not perform any
636 	 * more writing. If it is zero, a default is used.
637 	 */
638 	if (dqp->q_blk.timer)
639 		defq->blk.time = dqp->q_blk.timer;
640 	if (dqp->q_ino.timer)
641 		defq->ino.time = dqp->q_ino.timer;
642 	if (dqp->q_rtb.timer)
643 		defq->rtb.time = dqp->q_rtb.timer;
644 
645 	xfs_qm_dqdestroy(dqp);
646 }
647 
648 static int
649 xfs_qm_load_metadir_qinos(
650 	struct xfs_mount	*mp,
651 	struct xfs_quotainfo	*qi,
652 	struct xfs_inode	**dpp)
653 {
654 	struct xfs_trans	*tp;
655 	int			error;
656 
657 	error = xfs_trans_alloc_empty(mp, &tp);
658 	if (error)
659 		return error;
660 
661 	error = xfs_dqinode_load_parent(tp, dpp);
662 	if (error == -ENOENT) {
663 		/* no quota dir directory, but we'll create one later */
664 		error = 0;
665 		goto out_trans;
666 	}
667 	if (error)
668 		goto out_trans;
669 
670 	if (XFS_IS_UQUOTA_ON(mp)) {
671 		error = xfs_dqinode_load(tp, *dpp, XFS_DQTYPE_USER,
672 				&qi->qi_uquotaip);
673 		if (error && error != -ENOENT)
674 			goto out_trans;
675 	}
676 
677 	if (XFS_IS_GQUOTA_ON(mp)) {
678 		error = xfs_dqinode_load(tp, *dpp, XFS_DQTYPE_GROUP,
679 				&qi->qi_gquotaip);
680 		if (error && error != -ENOENT)
681 			goto out_trans;
682 	}
683 
684 	if (XFS_IS_PQUOTA_ON(mp)) {
685 		error = xfs_dqinode_load(tp, *dpp, XFS_DQTYPE_PROJ,
686 				&qi->qi_pquotaip);
687 		if (error && error != -ENOENT)
688 			goto out_trans;
689 	}
690 
691 	error = 0;
692 out_trans:
693 	xfs_trans_cancel(tp);
694 	return error;
695 }
696 
697 /* Create quota inodes in the metadata directory tree. */
698 STATIC int
699 xfs_qm_create_metadir_qinos(
700 	struct xfs_mount	*mp,
701 	struct xfs_quotainfo	*qi,
702 	struct xfs_inode	**dpp)
703 {
704 	int			error;
705 
706 	if (!*dpp) {
707 		error = xfs_dqinode_mkdir_parent(mp, dpp);
708 		if (error && error != -EEXIST)
709 			return error;
710 	}
711 
712 	if (XFS_IS_UQUOTA_ON(mp) && !qi->qi_uquotaip) {
713 		error = xfs_dqinode_metadir_create(*dpp, XFS_DQTYPE_USER,
714 				&qi->qi_uquotaip);
715 		if (error)
716 			return error;
717 	}
718 
719 	if (XFS_IS_GQUOTA_ON(mp) && !qi->qi_gquotaip) {
720 		error = xfs_dqinode_metadir_create(*dpp, XFS_DQTYPE_GROUP,
721 				&qi->qi_gquotaip);
722 		if (error)
723 			return error;
724 	}
725 
726 	if (XFS_IS_PQUOTA_ON(mp) && !qi->qi_pquotaip) {
727 		error = xfs_dqinode_metadir_create(*dpp, XFS_DQTYPE_PROJ,
728 				&qi->qi_pquotaip);
729 		if (error)
730 			return error;
731 	}
732 
733 	return 0;
734 }
735 
736 /*
737  * Add QUOTABIT to sb_versionnum and initialize qflags in preparation for
738  * creating quota files on a metadir filesystem.
739  */
740 STATIC int
741 xfs_qm_prep_metadir_sb(
742 	struct xfs_mount	*mp)
743 {
744 	struct xfs_trans	*tp;
745 	int			error;
746 
747 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_sb, 0, 0, 0, &tp);
748 	if (error)
749 		return error;
750 
751 	spin_lock(&mp->m_sb_lock);
752 
753 	xfs_add_quota(mp);
754 
755 	/* qflags will get updated fully _after_ quotacheck */
756 	mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
757 
758 	spin_unlock(&mp->m_sb_lock);
759 	xfs_log_sb(tp);
760 
761 	return xfs_trans_commit(tp);
762 }
763 
764 /*
765  * Load existing quota inodes or create them.  Since this is a V5 filesystem,
766  * we don't have to deal with the grp/prjquota switcheroo thing from V4.
767  */
768 STATIC int
769 xfs_qm_init_metadir_qinos(
770 	struct xfs_mount	*mp)
771 {
772 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
773 	struct xfs_inode	*dp = NULL;
774 	int			error;
775 
776 	if (!xfs_has_quota(mp)) {
777 		error = xfs_qm_prep_metadir_sb(mp);
778 		if (error)
779 			return error;
780 	}
781 
782 	error = xfs_qm_load_metadir_qinos(mp, qi, &dp);
783 	if (error)
784 		goto out_err;
785 
786 	error = xfs_qm_create_metadir_qinos(mp, qi, &dp);
787 	if (error)
788 		goto out_err;
789 
790 	xfs_irele(dp);
791 	return 0;
792 out_err:
793 	xfs_qm_destroy_quotainos(mp->m_quotainfo);
794 	if (dp)
795 		xfs_irele(dp);
796 	return error;
797 }
798 
799 /*
800  * This initializes all the quota information that's kept in the
801  * mount structure
802  */
803 STATIC int
804 xfs_qm_init_quotainfo(
805 	struct xfs_mount	*mp)
806 {
807 	struct xfs_quotainfo	*qinf;
808 	int			error;
809 
810 	ASSERT(XFS_IS_QUOTA_ON(mp));
811 
812 	qinf = mp->m_quotainfo = kzalloc(sizeof(struct xfs_quotainfo),
813 					GFP_KERNEL | __GFP_NOFAIL);
814 
815 	error = list_lru_init(&qinf->qi_lru);
816 	if (error)
817 		goto out_free_qinf;
818 
819 	/*
820 	 * See if quotainodes are setup, and if not, allocate them,
821 	 * and change the superblock accordingly.
822 	 */
823 	if (xfs_has_metadir(mp))
824 		error = xfs_qm_init_metadir_qinos(mp);
825 	else
826 		error = xfs_qm_init_quotainos(mp);
827 	if (error)
828 		goto out_free_lru;
829 
830 	INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_KERNEL);
831 	INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_KERNEL);
832 	INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_KERNEL);
833 	mutex_init(&qinf->qi_tree_lock);
834 
835 	/* mutex used to serialize quotaoffs */
836 	mutex_init(&qinf->qi_quotaofflock);
837 
838 	/* Precalc some constants */
839 	qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
840 	qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
841 	if (xfs_has_bigtime(mp)) {
842 		qinf->qi_expiry_min =
843 			xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MIN);
844 		qinf->qi_expiry_max =
845 			xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MAX);
846 	} else {
847 		qinf->qi_expiry_min = XFS_DQ_LEGACY_EXPIRY_MIN;
848 		qinf->qi_expiry_max = XFS_DQ_LEGACY_EXPIRY_MAX;
849 	}
850 	trace_xfs_quota_expiry_range(mp, qinf->qi_expiry_min,
851 			qinf->qi_expiry_max);
852 
853 	mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
854 
855 	xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER);
856 	xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP);
857 	xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ);
858 
859 	if (XFS_IS_UQUOTA_ON(mp))
860 		xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf);
861 	if (XFS_IS_GQUOTA_ON(mp))
862 		xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf);
863 	if (XFS_IS_PQUOTA_ON(mp))
864 		xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf);
865 
866 	qinf->qi_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, "xfs-qm:%s",
867 					   mp->m_super->s_id);
868 	if (!qinf->qi_shrinker) {
869 		error = -ENOMEM;
870 		goto out_free_inos;
871 	}
872 
873 	qinf->qi_shrinker->count_objects = xfs_qm_shrink_count;
874 	qinf->qi_shrinker->scan_objects = xfs_qm_shrink_scan;
875 	qinf->qi_shrinker->private_data = qinf;
876 
877 	shrinker_register(qinf->qi_shrinker);
878 
879 	xfs_hooks_init(&qinf->qi_mod_ino_dqtrx_hooks);
880 	xfs_hooks_init(&qinf->qi_apply_dqtrx_hooks);
881 
882 	return 0;
883 
884 out_free_inos:
885 	mutex_destroy(&qinf->qi_quotaofflock);
886 	mutex_destroy(&qinf->qi_tree_lock);
887 	xfs_qm_destroy_quotainos(qinf);
888 out_free_lru:
889 	list_lru_destroy(&qinf->qi_lru);
890 out_free_qinf:
891 	kfree(qinf);
892 	mp->m_quotainfo = NULL;
893 	return error;
894 }
895 
896 /*
897  * Gets called when unmounting a filesystem or when all quotas get
898  * turned off.
899  * This purges the quota inodes, destroys locks and frees itself.
900  */
901 void
902 xfs_qm_destroy_quotainfo(
903 	struct xfs_mount	*mp)
904 {
905 	struct xfs_quotainfo	*qi;
906 
907 	qi = mp->m_quotainfo;
908 	ASSERT(qi != NULL);
909 
910 	shrinker_free(qi->qi_shrinker);
911 	list_lru_destroy(&qi->qi_lru);
912 	xfs_qm_destroy_quotainos(qi);
913 	mutex_destroy(&qi->qi_tree_lock);
914 	mutex_destroy(&qi->qi_quotaofflock);
915 	kfree(qi);
916 	mp->m_quotainfo = NULL;
917 }
918 
919 static inline enum xfs_metafile_type
920 xfs_qm_metafile_type(
921 	unsigned int		flags)
922 {
923 	if (flags & XFS_QMOPT_UQUOTA)
924 		return XFS_METAFILE_USRQUOTA;
925 	else if (flags & XFS_QMOPT_GQUOTA)
926 		return XFS_METAFILE_GRPQUOTA;
927 	return XFS_METAFILE_PRJQUOTA;
928 }
929 
930 /*
931  * Create an inode and return with a reference already taken, but unlocked
932  * This is how we create quota inodes
933  */
934 STATIC int
935 xfs_qm_qino_alloc(
936 	struct xfs_mount	*mp,
937 	struct xfs_inode	**ipp,
938 	unsigned int		flags)
939 {
940 	struct xfs_trans	*tp;
941 	enum xfs_metafile_type	metafile_type = xfs_qm_metafile_type(flags);
942 	int			error;
943 	bool			need_alloc = true;
944 
945 	*ipp = NULL;
946 	/*
947 	 * With superblock that doesn't have separate pquotino, we
948 	 * share an inode between gquota and pquota. If the on-disk
949 	 * superblock has GQUOTA and the filesystem is now mounted
950 	 * with PQUOTA, just use sb_gquotino for sb_pquotino and
951 	 * vice-versa.
952 	 */
953 	if (!xfs_has_pquotino(mp) &&
954 			(flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
955 		xfs_ino_t ino = NULLFSINO;
956 
957 		if ((flags & XFS_QMOPT_PQUOTA) &&
958 			     (mp->m_sb.sb_gquotino != NULLFSINO)) {
959 			ino = mp->m_sb.sb_gquotino;
960 			if (XFS_IS_CORRUPT(mp,
961 					   mp->m_sb.sb_pquotino != NULLFSINO)) {
962 				xfs_fs_mark_sick(mp, XFS_SICK_FS_PQUOTA);
963 				return -EFSCORRUPTED;
964 			}
965 		} else if ((flags & XFS_QMOPT_GQUOTA) &&
966 			     (mp->m_sb.sb_pquotino != NULLFSINO)) {
967 			ino = mp->m_sb.sb_pquotino;
968 			if (XFS_IS_CORRUPT(mp,
969 					   mp->m_sb.sb_gquotino != NULLFSINO)) {
970 				xfs_fs_mark_sick(mp, XFS_SICK_FS_GQUOTA);
971 				return -EFSCORRUPTED;
972 			}
973 		}
974 		if (ino != NULLFSINO) {
975 			error = xfs_metafile_iget(mp, ino, metafile_type, ipp);
976 			if (error)
977 				return error;
978 
979 			mp->m_sb.sb_gquotino = NULLFSINO;
980 			mp->m_sb.sb_pquotino = NULLFSINO;
981 			need_alloc = false;
982 		}
983 	}
984 
985 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
986 			need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0,
987 			0, 0, &tp);
988 	if (error)
989 		return error;
990 
991 	if (need_alloc) {
992 		struct xfs_icreate_args	args = {
993 			.mode		= S_IFREG,
994 			.flags		= XFS_ICREATE_UNLINKABLE,
995 		};
996 		xfs_ino_t	ino;
997 
998 		error = xfs_dialloc(&tp, &args, &ino);
999 		if (!error)
1000 			error = xfs_icreate(tp, ino, &args, ipp);
1001 		if (error) {
1002 			xfs_trans_cancel(tp);
1003 			return error;
1004 		}
1005 		if (xfs_has_metadir(mp))
1006 			xfs_metafile_set_iflag(tp, *ipp, metafile_type);
1007 	}
1008 
1009 	/*
1010 	 * Make the changes in the superblock, and log those too.
1011 	 * sbfields arg may contain fields other than *QUOTINO;
1012 	 * VERSIONNUM for example.
1013 	 */
1014 	spin_lock(&mp->m_sb_lock);
1015 	if (flags & XFS_QMOPT_SBVERSION) {
1016 		ASSERT(!xfs_has_quota(mp));
1017 
1018 		xfs_add_quota(mp);
1019 		mp->m_sb.sb_uquotino = NULLFSINO;
1020 		mp->m_sb.sb_gquotino = NULLFSINO;
1021 		mp->m_sb.sb_pquotino = NULLFSINO;
1022 
1023 		/* qflags will get updated fully _after_ quotacheck */
1024 		mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
1025 	}
1026 	if (flags & XFS_QMOPT_UQUOTA)
1027 		mp->m_sb.sb_uquotino = (*ipp)->i_ino;
1028 	else if (flags & XFS_QMOPT_GQUOTA)
1029 		mp->m_sb.sb_gquotino = (*ipp)->i_ino;
1030 	else
1031 		mp->m_sb.sb_pquotino = (*ipp)->i_ino;
1032 	spin_unlock(&mp->m_sb_lock);
1033 	xfs_log_sb(tp);
1034 
1035 	error = xfs_trans_commit(tp);
1036 	if (error) {
1037 		ASSERT(xfs_is_shutdown(mp));
1038 		xfs_alert(mp, "%s failed (error %d)!", __func__, error);
1039 	}
1040 	if (need_alloc) {
1041 		xfs_iunlock(*ipp, XFS_ILOCK_EXCL);
1042 		xfs_finish_inode_setup(*ipp);
1043 	}
1044 	return error;
1045 }
1046 
1047 
1048 STATIC void
1049 xfs_qm_reset_dqcounts(
1050 	struct xfs_mount	*mp,
1051 	struct xfs_buf		*bp,
1052 	xfs_dqid_t		id,
1053 	xfs_dqtype_t		type)
1054 {
1055 	struct xfs_dqblk	*dqb;
1056 	int			j;
1057 
1058 	trace_xfs_reset_dqcounts(bp, _RET_IP_);
1059 
1060 	/*
1061 	 * Reset all counters and timers. They'll be
1062 	 * started afresh by xfs_qm_quotacheck.
1063 	 */
1064 #ifdef DEBUG
1065 	j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
1066 		sizeof(struct xfs_dqblk);
1067 	ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
1068 #endif
1069 	dqb = bp->b_addr;
1070 	for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
1071 		struct xfs_disk_dquot	*ddq;
1072 
1073 		ddq = (struct xfs_disk_dquot *)&dqb[j];
1074 
1075 		/*
1076 		 * Do a sanity check, and if needed, repair the dqblk. Don't
1077 		 * output any warnings because it's perfectly possible to
1078 		 * find uninitialised dquot blks. See comment in
1079 		 * xfs_dquot_verify.
1080 		 */
1081 		if (xfs_dqblk_verify(mp, &dqb[j], id + j) ||
1082 		    (dqb[j].dd_diskdq.d_type & XFS_DQTYPE_REC_MASK) != type)
1083 			xfs_dqblk_repair(mp, &dqb[j], id + j, type);
1084 
1085 		/*
1086 		 * Reset type in case we are reusing group quota file for
1087 		 * project quotas or vice versa
1088 		 */
1089 		ddq->d_type = type;
1090 		ddq->d_bcount = 0;
1091 		ddq->d_icount = 0;
1092 		ddq->d_rtbcount = 0;
1093 
1094 		/*
1095 		 * dquot id 0 stores the default grace period and the maximum
1096 		 * warning limit that were set by the administrator, so we
1097 		 * should not reset them.
1098 		 */
1099 		if (ddq->d_id != 0) {
1100 			ddq->d_btimer = 0;
1101 			ddq->d_itimer = 0;
1102 			ddq->d_rtbtimer = 0;
1103 			ddq->d_bwarns = 0;
1104 			ddq->d_iwarns = 0;
1105 			ddq->d_rtbwarns = 0;
1106 			if (xfs_has_bigtime(mp))
1107 				ddq->d_type |= XFS_DQTYPE_BIGTIME;
1108 		}
1109 
1110 		if (xfs_has_crc(mp)) {
1111 			xfs_update_cksum((char *)&dqb[j],
1112 					 sizeof(struct xfs_dqblk),
1113 					 XFS_DQUOT_CRC_OFF);
1114 		}
1115 	}
1116 }
1117 
1118 STATIC int
1119 xfs_qm_reset_dqcounts_all(
1120 	struct xfs_mount	*mp,
1121 	xfs_dqid_t		firstid,
1122 	xfs_fsblock_t		bno,
1123 	xfs_filblks_t		blkcnt,
1124 	xfs_dqtype_t		type,
1125 	struct list_head	*buffer_list)
1126 {
1127 	struct xfs_buf		*bp;
1128 	int			error = 0;
1129 
1130 	ASSERT(blkcnt > 0);
1131 
1132 	/*
1133 	 * Blkcnt arg can be a very big number, and might even be
1134 	 * larger than the log itself. So, we have to break it up into
1135 	 * manageable-sized transactions.
1136 	 * Note that we don't start a permanent transaction here; we might
1137 	 * not be able to get a log reservation for the whole thing up front,
1138 	 * and we don't really care to either, because we just discard
1139 	 * everything if we were to crash in the middle of this loop.
1140 	 */
1141 	while (blkcnt--) {
1142 		error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
1143 			      XFS_FSB_TO_DADDR(mp, bno),
1144 			      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
1145 			      &xfs_dquot_buf_ops);
1146 
1147 		/*
1148 		 * CRC and validation errors will return a EFSCORRUPTED here. If
1149 		 * this occurs, re-read without CRC validation so that we can
1150 		 * repair the damage via xfs_qm_reset_dqcounts(). This process
1151 		 * will leave a trace in the log indicating corruption has
1152 		 * been detected.
1153 		 */
1154 		if (error == -EFSCORRUPTED) {
1155 			error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
1156 				      XFS_FSB_TO_DADDR(mp, bno),
1157 				      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
1158 				      NULL);
1159 		}
1160 
1161 		if (error)
1162 			break;
1163 
1164 		/*
1165 		 * A corrupt buffer might not have a verifier attached, so
1166 		 * make sure we have the correct one attached before writeback
1167 		 * occurs.
1168 		 */
1169 		bp->b_ops = &xfs_dquot_buf_ops;
1170 		xfs_qm_reset_dqcounts(mp, bp, firstid, type);
1171 		xfs_buf_delwri_queue(bp, buffer_list);
1172 		xfs_buf_relse(bp);
1173 
1174 		/* goto the next block. */
1175 		bno++;
1176 		firstid += mp->m_quotainfo->qi_dqperchunk;
1177 	}
1178 
1179 	return error;
1180 }
1181 
1182 /*
1183  * Iterate over all allocated dquot blocks in this quota inode, zeroing all
1184  * counters for every chunk of dquots that we find.
1185  */
1186 STATIC int
1187 xfs_qm_reset_dqcounts_buf(
1188 	struct xfs_mount	*mp,
1189 	struct xfs_inode	*qip,
1190 	xfs_dqtype_t		type,
1191 	struct list_head	*buffer_list)
1192 {
1193 	struct xfs_bmbt_irec	*map;
1194 	int			i, nmaps;	/* number of map entries */
1195 	int			error;		/* return value */
1196 	xfs_fileoff_t		lblkno;
1197 	xfs_filblks_t		maxlblkcnt;
1198 	xfs_dqid_t		firstid;
1199 	xfs_fsblock_t		rablkno;
1200 	xfs_filblks_t		rablkcnt;
1201 
1202 	error = 0;
1203 	/*
1204 	 * This looks racy, but we can't keep an inode lock across a
1205 	 * trans_reserve. But, this gets called during quotacheck, and that
1206 	 * happens only at mount time which is single threaded.
1207 	 */
1208 	if (qip->i_nblocks == 0)
1209 		return 0;
1210 
1211 	map = kmalloc(XFS_DQITER_MAP_SIZE * sizeof(*map),
1212 			GFP_KERNEL | __GFP_NOFAIL);
1213 
1214 	lblkno = 0;
1215 	maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1216 	do {
1217 		uint		lock_mode;
1218 
1219 		nmaps = XFS_DQITER_MAP_SIZE;
1220 		/*
1221 		 * We aren't changing the inode itself. Just changing
1222 		 * some of its data. No new blocks are added here, and
1223 		 * the inode is never added to the transaction.
1224 		 */
1225 		lock_mode = xfs_ilock_data_map_shared(qip);
1226 		error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1227 				       map, &nmaps, 0);
1228 		xfs_iunlock(qip, lock_mode);
1229 		if (error)
1230 			break;
1231 
1232 		ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1233 		for (i = 0; i < nmaps; i++) {
1234 			ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1235 			ASSERT(map[i].br_blockcount);
1236 
1237 
1238 			lblkno += map[i].br_blockcount;
1239 
1240 			if (map[i].br_startblock == HOLESTARTBLOCK)
1241 				continue;
1242 
1243 			firstid = (xfs_dqid_t) map[i].br_startoff *
1244 				mp->m_quotainfo->qi_dqperchunk;
1245 			/*
1246 			 * Do a read-ahead on the next extent.
1247 			 */
1248 			if ((i+1 < nmaps) &&
1249 			    (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1250 				rablkcnt =  map[i+1].br_blockcount;
1251 				rablkno = map[i+1].br_startblock;
1252 				while (rablkcnt--) {
1253 					xfs_buf_readahead(mp->m_ddev_targp,
1254 					       XFS_FSB_TO_DADDR(mp, rablkno),
1255 					       mp->m_quotainfo->qi_dqchunklen,
1256 					       &xfs_dquot_buf_ops);
1257 					rablkno++;
1258 				}
1259 			}
1260 			/*
1261 			 * Iterate thru all the blks in the extent and
1262 			 * reset the counters of all the dquots inside them.
1263 			 */
1264 			error = xfs_qm_reset_dqcounts_all(mp, firstid,
1265 						   map[i].br_startblock,
1266 						   map[i].br_blockcount,
1267 						   type, buffer_list);
1268 			if (error)
1269 				goto out;
1270 		}
1271 	} while (nmaps > 0);
1272 
1273 out:
1274 	kfree(map);
1275 	return error;
1276 }
1277 
1278 /*
1279  * Called by dqusage_adjust in doing a quotacheck.
1280  *
1281  * Given the inode, and a dquot id this updates both the incore dqout as well
1282  * as the buffer copy. This is so that once the quotacheck is done, we can
1283  * just log all the buffers, as opposed to logging numerous updates to
1284  * individual dquots.
1285  */
1286 STATIC int
1287 xfs_qm_quotacheck_dqadjust(
1288 	struct xfs_inode	*ip,
1289 	xfs_dqtype_t		type,
1290 	xfs_qcnt_t		nblks,
1291 	xfs_qcnt_t		rtblks)
1292 {
1293 	struct xfs_mount	*mp = ip->i_mount;
1294 	struct xfs_dquot	*dqp;
1295 	xfs_dqid_t		id;
1296 	int			error;
1297 
1298 	id = xfs_qm_id_for_quotatype(ip, type);
1299 	error = xfs_qm_dqget(mp, id, type, true, &dqp);
1300 	if (error) {
1301 		/*
1302 		 * Shouldn't be able to turn off quotas here.
1303 		 */
1304 		ASSERT(error != -ESRCH);
1305 		ASSERT(error != -ENOENT);
1306 		return error;
1307 	}
1308 
1309 	trace_xfs_dqadjust(dqp);
1310 
1311 	/*
1312 	 * Adjust the inode count and the block count to reflect this inode's
1313 	 * resource usage.
1314 	 */
1315 	dqp->q_ino.count++;
1316 	dqp->q_ino.reserved++;
1317 	if (nblks) {
1318 		dqp->q_blk.count += nblks;
1319 		dqp->q_blk.reserved += nblks;
1320 	}
1321 	if (rtblks) {
1322 		dqp->q_rtb.count += rtblks;
1323 		dqp->q_rtb.reserved += rtblks;
1324 	}
1325 
1326 	/*
1327 	 * Set default limits, adjust timers (since we changed usages)
1328 	 *
1329 	 * There are no timers for the default values set in the root dquot.
1330 	 */
1331 	if (dqp->q_id) {
1332 		xfs_qm_adjust_dqlimits(dqp);
1333 		xfs_qm_adjust_dqtimers(dqp);
1334 	}
1335 
1336 	dqp->q_flags |= XFS_DQFLAG_DIRTY;
1337 	xfs_qm_dqput(dqp);
1338 	return 0;
1339 }
1340 
1341 /*
1342  * callback routine supplied to bulkstat(). Given an inumber, find its
1343  * dquots and update them to account for resources taken by that inode.
1344  */
1345 /* ARGSUSED */
1346 STATIC int
1347 xfs_qm_dqusage_adjust(
1348 	struct xfs_mount	*mp,
1349 	struct xfs_trans	*tp,
1350 	xfs_ino_t		ino,
1351 	void			*data)
1352 {
1353 	struct xfs_inode	*ip;
1354 	xfs_filblks_t		nblks, rtblks;
1355 	unsigned int		lock_mode;
1356 	int			error;
1357 
1358 	ASSERT(XFS_IS_QUOTA_ON(mp));
1359 
1360 	/*
1361 	 * rootino must have its resources accounted for, not so with the quota
1362 	 * inodes.
1363 	 */
1364 	if (xfs_is_quota_inode(&mp->m_sb, ino))
1365 		return 0;
1366 
1367 	/*
1368 	 * We don't _need_ to take the ilock EXCL here because quotacheck runs
1369 	 * at mount time and therefore nobody will be racing chown/chproj.
1370 	 */
1371 	error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
1372 	if (error == -EINVAL || error == -ENOENT)
1373 		return 0;
1374 	if (error)
1375 		return error;
1376 
1377 	/*
1378 	 * Reload the incore unlinked list to avoid failure in inodegc.
1379 	 * Use an unlocked check here because unrecovered unlinked inodes
1380 	 * should be somewhat rare.
1381 	 */
1382 	if (xfs_inode_unlinked_incomplete(ip)) {
1383 		error = xfs_inode_reload_unlinked(ip);
1384 		if (error) {
1385 			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1386 			goto error0;
1387 		}
1388 	}
1389 
1390 	/* Metadata directory files are not accounted to user-visible quotas. */
1391 	if (xfs_is_metadir_inode(ip))
1392 		goto error0;
1393 
1394 	ASSERT(ip->i_delayed_blks == 0);
1395 
1396 	lock_mode = xfs_ilock_data_map_shared(ip);
1397 	if (XFS_IS_REALTIME_INODE(ip)) {
1398 		error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1399 		if (error) {
1400 			xfs_iunlock(ip, lock_mode);
1401 			goto error0;
1402 		}
1403 	}
1404 	xfs_inode_count_blocks(tp, ip, &nblks, &rtblks);
1405 	xfs_iflags_clear(ip, XFS_IQUOTAUNCHECKED);
1406 	xfs_iunlock(ip, lock_mode);
1407 
1408 	/*
1409 	 * Add the (disk blocks and inode) resources occupied by this
1410 	 * inode to its dquots. We do this adjustment in the incore dquot,
1411 	 * and also copy the changes to its buffer.
1412 	 * We don't care about putting these changes in a transaction
1413 	 * envelope because if we crash in the middle of a 'quotacheck'
1414 	 * we have to start from the beginning anyway.
1415 	 * Once we're done, we'll log all the dquot bufs.
1416 	 *
1417 	 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1418 	 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1419 	 */
1420 	if (XFS_IS_UQUOTA_ON(mp)) {
1421 		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks,
1422 				rtblks);
1423 		if (error)
1424 			goto error0;
1425 	}
1426 
1427 	if (XFS_IS_GQUOTA_ON(mp)) {
1428 		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks,
1429 				rtblks);
1430 		if (error)
1431 			goto error0;
1432 	}
1433 
1434 	if (XFS_IS_PQUOTA_ON(mp)) {
1435 		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks,
1436 				rtblks);
1437 		if (error)
1438 			goto error0;
1439 	}
1440 
1441 error0:
1442 	xfs_irele(ip);
1443 	return error;
1444 }
1445 
1446 STATIC int
1447 xfs_qm_flush_one(
1448 	struct xfs_dquot	*dqp,
1449 	void			*data)
1450 {
1451 	struct xfs_mount	*mp = dqp->q_mount;
1452 	struct list_head	*buffer_list = data;
1453 	struct xfs_buf		*bp = NULL;
1454 	int			error = 0;
1455 
1456 	xfs_dqlock(dqp);
1457 	if (dqp->q_flags & XFS_DQFLAG_FREEING)
1458 		goto out_unlock;
1459 	if (!XFS_DQ_IS_DIRTY(dqp))
1460 		goto out_unlock;
1461 
1462 	/*
1463 	 * The only way the dquot is already flush locked by the time quotacheck
1464 	 * gets here is if reclaim flushed it before the dqadjust walk dirtied
1465 	 * it for the final time. Quotacheck collects all dquot bufs in the
1466 	 * local delwri queue before dquots are dirtied, so reclaim can't have
1467 	 * possibly queued it for I/O. The only way out is to push the buffer to
1468 	 * cycle the flush lock.
1469 	 */
1470 	if (!xfs_dqflock_nowait(dqp)) {
1471 		/* buf is pinned in-core by delwri list */
1472 		error = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
1473 				mp->m_quotainfo->qi_dqchunklen, 0, &bp);
1474 		if (error)
1475 			goto out_unlock;
1476 
1477 		if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1478 			error = -EAGAIN;
1479 			xfs_buf_relse(bp);
1480 			goto out_unlock;
1481 		}
1482 		xfs_buf_unlock(bp);
1483 
1484 		xfs_buf_delwri_pushbuf(bp, buffer_list);
1485 		xfs_buf_rele(bp);
1486 
1487 		error = -EAGAIN;
1488 		goto out_unlock;
1489 	}
1490 
1491 	error = xfs_qm_dqflush(dqp, &bp);
1492 	if (error)
1493 		goto out_unlock;
1494 
1495 	xfs_buf_delwri_queue(bp, buffer_list);
1496 	xfs_buf_relse(bp);
1497 out_unlock:
1498 	xfs_dqunlock(dqp);
1499 	return error;
1500 }
1501 
1502 /*
1503  * Walk thru all the filesystem inodes and construct a consistent view
1504  * of the disk quota world. If the quotacheck fails, disable quotas.
1505  */
1506 STATIC int
1507 xfs_qm_quotacheck(
1508 	xfs_mount_t	*mp)
1509 {
1510 	int			error, error2;
1511 	uint			flags;
1512 	LIST_HEAD		(buffer_list);
1513 	struct xfs_inode	*uip = mp->m_quotainfo->qi_uquotaip;
1514 	struct xfs_inode	*gip = mp->m_quotainfo->qi_gquotaip;
1515 	struct xfs_inode	*pip = mp->m_quotainfo->qi_pquotaip;
1516 
1517 	flags = 0;
1518 
1519 	ASSERT(uip || gip || pip);
1520 	ASSERT(XFS_IS_QUOTA_ON(mp));
1521 
1522 	xfs_notice(mp, "Quotacheck needed: Please wait.");
1523 
1524 	/*
1525 	 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1526 	 * their counters to zero. We need a clean slate.
1527 	 * We don't log our changes till later.
1528 	 */
1529 	if (uip) {
1530 		error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER,
1531 					 &buffer_list);
1532 		if (error)
1533 			goto error_return;
1534 		flags |= XFS_UQUOTA_CHKD;
1535 	}
1536 
1537 	if (gip) {
1538 		error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP,
1539 					 &buffer_list);
1540 		if (error)
1541 			goto error_return;
1542 		flags |= XFS_GQUOTA_CHKD;
1543 	}
1544 
1545 	if (pip) {
1546 		error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ,
1547 					 &buffer_list);
1548 		if (error)
1549 			goto error_return;
1550 		flags |= XFS_PQUOTA_CHKD;
1551 	}
1552 
1553 	xfs_set_quotacheck_running(mp);
1554 	error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
1555 			NULL);
1556 	xfs_clear_quotacheck_running(mp);
1557 
1558 	/*
1559 	 * On error, the inode walk may have partially populated the dquot
1560 	 * caches.  We must purge them before disabling quota and tearing down
1561 	 * the quotainfo, or else the dquots will leak.
1562 	 */
1563 	if (error)
1564 		goto error_purge;
1565 
1566 	/*
1567 	 * We've made all the changes that we need to make incore.  Flush them
1568 	 * down to disk buffers if everything was updated successfully.
1569 	 */
1570 	if (XFS_IS_UQUOTA_ON(mp)) {
1571 		error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one,
1572 					  &buffer_list);
1573 	}
1574 	if (XFS_IS_GQUOTA_ON(mp)) {
1575 		error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one,
1576 					   &buffer_list);
1577 		if (!error)
1578 			error = error2;
1579 	}
1580 	if (XFS_IS_PQUOTA_ON(mp)) {
1581 		error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one,
1582 					   &buffer_list);
1583 		if (!error)
1584 			error = error2;
1585 	}
1586 
1587 	error2 = xfs_buf_delwri_submit(&buffer_list);
1588 	if (!error)
1589 		error = error2;
1590 
1591 	/*
1592 	 * We can get this error if we couldn't do a dquot allocation inside
1593 	 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1594 	 * dirty dquots that might be cached, we just want to get rid of them
1595 	 * and turn quotaoff. The dquots won't be attached to any of the inodes
1596 	 * at this point (because we intentionally didn't in dqget_noattach).
1597 	 */
1598 	if (error)
1599 		goto error_purge;
1600 
1601 	/*
1602 	 * If one type of quotas is off, then it will lose its
1603 	 * quotachecked status, since we won't be doing accounting for
1604 	 * that type anymore.
1605 	 */
1606 	mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1607 	mp->m_qflags |= flags;
1608 
1609 error_return:
1610 	xfs_buf_delwri_cancel(&buffer_list);
1611 
1612 	if (error) {
1613 		xfs_warn(mp,
1614 	"Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1615 			error);
1616 		/*
1617 		 * We must turn off quotas.
1618 		 */
1619 		ASSERT(mp->m_quotainfo != NULL);
1620 		xfs_qm_destroy_quotainfo(mp);
1621 		if (xfs_mount_reset_sbqflags(mp)) {
1622 			xfs_warn(mp,
1623 				"Quotacheck: Failed to reset quota flags.");
1624 		}
1625 		xfs_fs_mark_sick(mp, XFS_SICK_FS_QUOTACHECK);
1626 	} else {
1627 		xfs_notice(mp, "Quotacheck: Done.");
1628 		xfs_fs_mark_healthy(mp, XFS_SICK_FS_QUOTACHECK);
1629 	}
1630 
1631 	return error;
1632 
1633 error_purge:
1634 	/*
1635 	 * On error, we may have inodes queued for inactivation. This may try
1636 	 * to attach dquots to the inode before running cleanup operations on
1637 	 * the inode and this can race with the xfs_qm_destroy_quotainfo() call
1638 	 * below that frees mp->m_quotainfo. To avoid this race, flush all the
1639 	 * pending inodegc operations before we purge the dquots from memory,
1640 	 * ensuring that background inactivation is idle whilst we turn off
1641 	 * quotas.
1642 	 */
1643 	xfs_inodegc_flush(mp);
1644 	xfs_qm_dqpurge_all(mp);
1645 	goto error_return;
1646 
1647 }
1648 
1649 /*
1650  * This is called from xfs_mountfs to start quotas and initialize all
1651  * necessary data structures like quotainfo.  This is also responsible for
1652  * running a quotacheck as necessary.  We are guaranteed that the superblock
1653  * is consistently read in at this point.
1654  *
1655  * If we fail here, the mount will continue with quota turned off. We don't
1656  * need to inidicate success or failure at all.
1657  */
1658 void
1659 xfs_qm_mount_quotas(
1660 	struct xfs_mount	*mp)
1661 {
1662 	int			error = 0;
1663 	uint			sbf;
1664 
1665 	/*
1666 	 * If quotas on realtime volumes is not supported, disable quotas
1667 	 * immediately.  We only support rtquota if rtgroups are enabled to
1668 	 * avoid problems with older kernels.
1669 	 */
1670 	if (mp->m_sb.sb_rextents && !xfs_has_rtgroups(mp)) {
1671 		xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1672 		mp->m_qflags = 0;
1673 		goto write_changes;
1674 	}
1675 
1676 	ASSERT(XFS_IS_QUOTA_ON(mp));
1677 
1678 	/*
1679 	 * Allocate the quotainfo structure inside the mount struct, and
1680 	 * create quotainode(s), and change/rev superblock if necessary.
1681 	 */
1682 	error = xfs_qm_init_quotainfo(mp);
1683 	if (error) {
1684 		/*
1685 		 * We must turn off quotas.
1686 		 */
1687 		ASSERT(mp->m_quotainfo == NULL);
1688 		mp->m_qflags = 0;
1689 		goto write_changes;
1690 	}
1691 	/*
1692 	 * If any of the quotas are not consistent, do a quotacheck.
1693 	 */
1694 	if (XFS_QM_NEED_QUOTACHECK(mp)) {
1695 		error = xfs_qm_quotacheck(mp);
1696 		if (error) {
1697 			/* Quotacheck failed and disabled quotas. */
1698 			return;
1699 		}
1700 	}
1701 	/*
1702 	 * If one type of quotas is off, then it will lose its
1703 	 * quotachecked status, since we won't be doing accounting for
1704 	 * that type anymore.
1705 	 */
1706 	if (!XFS_IS_UQUOTA_ON(mp))
1707 		mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1708 	if (!XFS_IS_GQUOTA_ON(mp))
1709 		mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1710 	if (!XFS_IS_PQUOTA_ON(mp))
1711 		mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1712 
1713  write_changes:
1714 	/*
1715 	 * We actually don't have to acquire the m_sb_lock at all.
1716 	 * This can only be called from mount, and that's single threaded. XXX
1717 	 */
1718 	spin_lock(&mp->m_sb_lock);
1719 	sbf = mp->m_sb.sb_qflags;
1720 	mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1721 	spin_unlock(&mp->m_sb_lock);
1722 
1723 	if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1724 		if (xfs_sync_sb(mp, false)) {
1725 			/*
1726 			 * We could only have been turning quotas off.
1727 			 * We aren't in very good shape actually because
1728 			 * the incore structures are convinced that quotas are
1729 			 * off, but the on disk superblock doesn't know that !
1730 			 */
1731 			ASSERT(!(XFS_IS_QUOTA_ON(mp)));
1732 			xfs_alert(mp, "%s: Superblock update failed!",
1733 				__func__);
1734 		}
1735 	}
1736 
1737 	if (error) {
1738 		xfs_warn(mp, "Failed to initialize disk quotas, err %d.", error);
1739 		return;
1740 	}
1741 }
1742 
1743 /*
1744  * Load the inode for a given type of quota, assuming that the sb fields have
1745  * been sorted out.  This is not true when switching quota types on a V4
1746  * filesystem, so do not use this function for that.
1747  *
1748  * Returns -ENOENT if the quota inode field is NULLFSINO; 0 and an inode on
1749  * success; or a negative errno.
1750  */
1751 int
1752 xfs_qm_qino_load(
1753 	struct xfs_mount	*mp,
1754 	xfs_dqtype_t		type,
1755 	struct xfs_inode	**ipp)
1756 {
1757 	struct xfs_trans	*tp;
1758 	struct xfs_inode	*dp = NULL;
1759 	int			error;
1760 
1761 	error = xfs_trans_alloc_empty(mp, &tp);
1762 	if (error)
1763 		return error;
1764 
1765 	if (xfs_has_metadir(mp)) {
1766 		error = xfs_dqinode_load_parent(tp, &dp);
1767 		if (error)
1768 			goto out_cancel;
1769 	}
1770 
1771 	error = xfs_dqinode_load(tp, dp, type, ipp);
1772 	if (dp)
1773 		xfs_irele(dp);
1774 out_cancel:
1775 	xfs_trans_cancel(tp);
1776 	return error;
1777 }
1778 
1779 /*
1780  * This is called after the superblock has been read in and we're ready to
1781  * iget the quota inodes.
1782  */
1783 STATIC int
1784 xfs_qm_init_quotainos(
1785 	xfs_mount_t	*mp)
1786 {
1787 	struct xfs_inode	*uip = NULL;
1788 	struct xfs_inode	*gip = NULL;
1789 	struct xfs_inode	*pip = NULL;
1790 	int			error;
1791 	uint			flags = 0;
1792 
1793 	ASSERT(mp->m_quotainfo);
1794 
1795 	/*
1796 	 * Get the uquota and gquota inodes
1797 	 */
1798 	if (xfs_has_quota(mp)) {
1799 		if (XFS_IS_UQUOTA_ON(mp) &&
1800 		    mp->m_sb.sb_uquotino != NULLFSINO) {
1801 			ASSERT(mp->m_sb.sb_uquotino > 0);
1802 			error = xfs_qm_qino_load(mp, XFS_DQTYPE_USER, &uip);
1803 			if (error)
1804 				return error;
1805 		}
1806 		if (XFS_IS_GQUOTA_ON(mp) &&
1807 		    mp->m_sb.sb_gquotino != NULLFSINO) {
1808 			ASSERT(mp->m_sb.sb_gquotino > 0);
1809 			error = xfs_qm_qino_load(mp, XFS_DQTYPE_GROUP, &gip);
1810 			if (error)
1811 				goto error_rele;
1812 		}
1813 		if (XFS_IS_PQUOTA_ON(mp) &&
1814 		    mp->m_sb.sb_pquotino != NULLFSINO) {
1815 			ASSERT(mp->m_sb.sb_pquotino > 0);
1816 			error = xfs_qm_qino_load(mp, XFS_DQTYPE_PROJ, &pip);
1817 			if (error)
1818 				goto error_rele;
1819 		}
1820 	} else {
1821 		flags |= XFS_QMOPT_SBVERSION;
1822 	}
1823 
1824 	/*
1825 	 * Create the three inodes, if they don't exist already. The changes
1826 	 * made above will get added to a transaction and logged in one of
1827 	 * the qino_alloc calls below.  If the device is readonly,
1828 	 * temporarily switch to read-write to do this.
1829 	 */
1830 	if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1831 		error = xfs_qm_qino_alloc(mp, &uip,
1832 					      flags | XFS_QMOPT_UQUOTA);
1833 		if (error)
1834 			goto error_rele;
1835 
1836 		flags &= ~XFS_QMOPT_SBVERSION;
1837 	}
1838 	if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1839 		error = xfs_qm_qino_alloc(mp, &gip,
1840 					  flags | XFS_QMOPT_GQUOTA);
1841 		if (error)
1842 			goto error_rele;
1843 
1844 		flags &= ~XFS_QMOPT_SBVERSION;
1845 	}
1846 	if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1847 		error = xfs_qm_qino_alloc(mp, &pip,
1848 					  flags | XFS_QMOPT_PQUOTA);
1849 		if (error)
1850 			goto error_rele;
1851 	}
1852 
1853 	mp->m_quotainfo->qi_uquotaip = uip;
1854 	mp->m_quotainfo->qi_gquotaip = gip;
1855 	mp->m_quotainfo->qi_pquotaip = pip;
1856 
1857 	return 0;
1858 
1859 error_rele:
1860 	if (uip)
1861 		xfs_irele(uip);
1862 	if (gip)
1863 		xfs_irele(gip);
1864 	if (pip)
1865 		xfs_irele(pip);
1866 	return error;
1867 }
1868 
1869 STATIC void
1870 xfs_qm_dqfree_one(
1871 	struct xfs_dquot	*dqp)
1872 {
1873 	struct xfs_mount	*mp = dqp->q_mount;
1874 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
1875 
1876 	mutex_lock(&qi->qi_tree_lock);
1877 	radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
1878 
1879 	qi->qi_dquots--;
1880 	mutex_unlock(&qi->qi_tree_lock);
1881 
1882 	xfs_qm_dqdestroy(dqp);
1883 }
1884 
1885 /* --------------- utility functions for vnodeops ---------------- */
1886 
1887 
1888 /*
1889  * Given an inode, a uid, gid and prid make sure that we have
1890  * allocated relevant dquot(s) on disk, and that we won't exceed inode
1891  * quotas by creating this file.
1892  * This also attaches dquot(s) to the given inode after locking it,
1893  * and returns the dquots corresponding to the uid and/or gid.
1894  *
1895  * in	: inode (unlocked)
1896  * out	: udquot, gdquot with references taken and unlocked
1897  */
1898 int
1899 xfs_qm_vop_dqalloc(
1900 	struct xfs_inode	*ip,
1901 	kuid_t			uid,
1902 	kgid_t			gid,
1903 	prid_t			prid,
1904 	uint			flags,
1905 	struct xfs_dquot	**O_udqpp,
1906 	struct xfs_dquot	**O_gdqpp,
1907 	struct xfs_dquot	**O_pdqpp)
1908 {
1909 	struct xfs_mount	*mp = ip->i_mount;
1910 	struct inode		*inode = VFS_I(ip);
1911 	struct user_namespace	*user_ns = inode->i_sb->s_user_ns;
1912 	struct xfs_dquot	*uq = NULL;
1913 	struct xfs_dquot	*gq = NULL;
1914 	struct xfs_dquot	*pq = NULL;
1915 	int			error;
1916 	uint			lockflags;
1917 
1918 	if (!XFS_IS_QUOTA_ON(mp))
1919 		return 0;
1920 
1921 	ASSERT(!xfs_is_metadir_inode(ip));
1922 
1923 	lockflags = XFS_ILOCK_EXCL;
1924 	xfs_ilock(ip, lockflags);
1925 
1926 	if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1927 		gid = inode->i_gid;
1928 
1929 	/*
1930 	 * Attach the dquot(s) to this inode, doing a dquot allocation
1931 	 * if necessary. The dquot(s) will not be locked.
1932 	 */
1933 	if (XFS_NOT_DQATTACHED(mp, ip)) {
1934 		error = xfs_qm_dqattach_locked(ip, true);
1935 		if (error) {
1936 			xfs_iunlock(ip, lockflags);
1937 			return error;
1938 		}
1939 	}
1940 
1941 	if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1942 		ASSERT(O_udqpp);
1943 		if (!uid_eq(inode->i_uid, uid)) {
1944 			/*
1945 			 * What we need is the dquot that has this uid, and
1946 			 * if we send the inode to dqget, the uid of the inode
1947 			 * takes priority over what's sent in the uid argument.
1948 			 * We must unlock inode here before calling dqget if
1949 			 * we're not sending the inode, because otherwise
1950 			 * we'll deadlock by doing trans_reserve while
1951 			 * holding ilock.
1952 			 */
1953 			xfs_iunlock(ip, lockflags);
1954 			error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
1955 					XFS_DQTYPE_USER, true, &uq);
1956 			if (error) {
1957 				ASSERT(error != -ENOENT);
1958 				return error;
1959 			}
1960 			/*
1961 			 * Get the ilock in the right order.
1962 			 */
1963 			xfs_dqunlock(uq);
1964 			lockflags = XFS_ILOCK_SHARED;
1965 			xfs_ilock(ip, lockflags);
1966 		} else {
1967 			/*
1968 			 * Take an extra reference, because we'll return
1969 			 * this to caller
1970 			 */
1971 			ASSERT(ip->i_udquot);
1972 			uq = xfs_qm_dqhold(ip->i_udquot);
1973 		}
1974 	}
1975 	if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1976 		ASSERT(O_gdqpp);
1977 		if (!gid_eq(inode->i_gid, gid)) {
1978 			xfs_iunlock(ip, lockflags);
1979 			error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
1980 					XFS_DQTYPE_GROUP, true, &gq);
1981 			if (error) {
1982 				ASSERT(error != -ENOENT);
1983 				goto error_rele;
1984 			}
1985 			xfs_dqunlock(gq);
1986 			lockflags = XFS_ILOCK_SHARED;
1987 			xfs_ilock(ip, lockflags);
1988 		} else {
1989 			ASSERT(ip->i_gdquot);
1990 			gq = xfs_qm_dqhold(ip->i_gdquot);
1991 		}
1992 	}
1993 	if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1994 		ASSERT(O_pdqpp);
1995 		if (ip->i_projid != prid) {
1996 			xfs_iunlock(ip, lockflags);
1997 			error = xfs_qm_dqget(mp, prid,
1998 					XFS_DQTYPE_PROJ, true, &pq);
1999 			if (error) {
2000 				ASSERT(error != -ENOENT);
2001 				goto error_rele;
2002 			}
2003 			xfs_dqunlock(pq);
2004 			lockflags = XFS_ILOCK_SHARED;
2005 			xfs_ilock(ip, lockflags);
2006 		} else {
2007 			ASSERT(ip->i_pdquot);
2008 			pq = xfs_qm_dqhold(ip->i_pdquot);
2009 		}
2010 	}
2011 	trace_xfs_dquot_dqalloc(ip);
2012 
2013 	xfs_iunlock(ip, lockflags);
2014 	if (O_udqpp)
2015 		*O_udqpp = uq;
2016 	else
2017 		xfs_qm_dqrele(uq);
2018 	if (O_gdqpp)
2019 		*O_gdqpp = gq;
2020 	else
2021 		xfs_qm_dqrele(gq);
2022 	if (O_pdqpp)
2023 		*O_pdqpp = pq;
2024 	else
2025 		xfs_qm_dqrele(pq);
2026 	return 0;
2027 
2028 error_rele:
2029 	xfs_qm_dqrele(gq);
2030 	xfs_qm_dqrele(uq);
2031 	return error;
2032 }
2033 
2034 /*
2035  * Actually transfer ownership, and do dquot modifications.
2036  * These were already reserved.
2037  */
2038 struct xfs_dquot *
2039 xfs_qm_vop_chown(
2040 	struct xfs_trans	*tp,
2041 	struct xfs_inode	*ip,
2042 	struct xfs_dquot	**IO_olddq,
2043 	struct xfs_dquot	*newdq)
2044 {
2045 	struct xfs_dquot	*prevdq;
2046 	xfs_filblks_t		dblocks, rblocks;
2047 	bool			isrt = XFS_IS_REALTIME_INODE(ip);
2048 
2049 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
2050 	ASSERT(XFS_IS_QUOTA_ON(ip->i_mount));
2051 	ASSERT(!xfs_is_metadir_inode(ip));
2052 
2053 	/* old dquot */
2054 	prevdq = *IO_olddq;
2055 	ASSERT(prevdq);
2056 	ASSERT(prevdq != newdq);
2057 
2058 	xfs_inode_count_blocks(tp, ip, &dblocks, &rblocks);
2059 
2060 	xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_BCOUNT,
2061 			-(xfs_qcnt_t)dblocks);
2062 	xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_RTBCOUNT,
2063 			-(xfs_qcnt_t)rblocks);
2064 	xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
2065 
2066 	/* the sparkling new dquot */
2067 	xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_BCOUNT, dblocks);
2068 	xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_RTBCOUNT, rblocks);
2069 	xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_ICOUNT, 1);
2070 
2071 	/*
2072 	 * Back when we made quota reservations for the chown, we reserved the
2073 	 * ondisk blocks + delalloc blocks with the new dquot.  Now that we've
2074 	 * switched the dquots, decrease the new dquot's block reservation
2075 	 * (having already bumped up the real counter) so that we don't have
2076 	 * any reservation to give back when we commit.
2077 	 */
2078 	xfs_trans_mod_dquot(tp, newdq,
2079 			isrt ? XFS_TRANS_DQ_RES_RTBLKS : XFS_TRANS_DQ_RES_BLKS,
2080 			-ip->i_delayed_blks);
2081 
2082 	/*
2083 	 * Give the incore reservation for delalloc blocks back to the old
2084 	 * dquot.  We don't normally handle delalloc quota reservations
2085 	 * transactionally, so just lock the dquot and subtract from the
2086 	 * reservation.  Dirty the transaction because it's too late to turn
2087 	 * back now.
2088 	 */
2089 	tp->t_flags |= XFS_TRANS_DIRTY;
2090 	xfs_dqlock(prevdq);
2091 	if (isrt) {
2092 		ASSERT(prevdq->q_rtb.reserved >= ip->i_delayed_blks);
2093 		prevdq->q_rtb.reserved -= ip->i_delayed_blks;
2094 	} else {
2095 		ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks);
2096 		prevdq->q_blk.reserved -= ip->i_delayed_blks;
2097 	}
2098 	xfs_dqunlock(prevdq);
2099 
2100 	/*
2101 	 * Take an extra reference, because the inode is going to keep
2102 	 * this dquot pointer even after the trans_commit.
2103 	 */
2104 	*IO_olddq = xfs_qm_dqhold(newdq);
2105 
2106 	return prevdq;
2107 }
2108 
2109 int
2110 xfs_qm_vop_rename_dqattach(
2111 	struct xfs_inode	**i_tab)
2112 {
2113 	struct xfs_mount	*mp = i_tab[0]->i_mount;
2114 	int			i;
2115 
2116 	if (!XFS_IS_QUOTA_ON(mp))
2117 		return 0;
2118 
2119 	for (i = 0; (i < 4 && i_tab[i]); i++) {
2120 		struct xfs_inode	*ip = i_tab[i];
2121 		int			error;
2122 
2123 		/*
2124 		 * Watch out for duplicate entries in the table.
2125 		 */
2126 		if (i == 0 || ip != i_tab[i-1]) {
2127 			if (XFS_NOT_DQATTACHED(mp, ip)) {
2128 				error = xfs_qm_dqattach(ip);
2129 				if (error)
2130 					return error;
2131 			}
2132 		}
2133 	}
2134 	return 0;
2135 }
2136 
2137 void
2138 xfs_qm_vop_create_dqattach(
2139 	struct xfs_trans	*tp,
2140 	struct xfs_inode	*ip,
2141 	struct xfs_dquot	*udqp,
2142 	struct xfs_dquot	*gdqp,
2143 	struct xfs_dquot	*pdqp)
2144 {
2145 	struct xfs_mount	*mp = tp->t_mountp;
2146 
2147 	if (!XFS_IS_QUOTA_ON(mp))
2148 		return;
2149 
2150 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
2151 	ASSERT(!xfs_is_metadir_inode(ip));
2152 
2153 	if (udqp && XFS_IS_UQUOTA_ON(mp)) {
2154 		ASSERT(ip->i_udquot == NULL);
2155 		ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id);
2156 
2157 		ip->i_udquot = xfs_qm_dqhold(udqp);
2158 	}
2159 	if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
2160 		ASSERT(ip->i_gdquot == NULL);
2161 		ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id);
2162 
2163 		ip->i_gdquot = xfs_qm_dqhold(gdqp);
2164 	}
2165 	if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
2166 		ASSERT(ip->i_pdquot == NULL);
2167 		ASSERT(ip->i_projid == pdqp->q_id);
2168 
2169 		ip->i_pdquot = xfs_qm_dqhold(pdqp);
2170 	}
2171 
2172 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, 1);
2173 }
2174 
2175 /* Decide if this inode's dquot is near an enforcement boundary. */
2176 bool
2177 xfs_inode_near_dquot_enforcement(
2178 	struct xfs_inode	*ip,
2179 	xfs_dqtype_t		type)
2180 {
2181 	struct xfs_dquot	*dqp;
2182 	struct xfs_dquot_res	*res;
2183 	struct xfs_dquot_pre	*pre;
2184 	int64_t			freesp;
2185 
2186 	/* We only care for quotas that are enabled and enforced. */
2187 	dqp = xfs_inode_dquot(ip, type);
2188 	if (!dqp || !xfs_dquot_is_enforced(dqp))
2189 		return false;
2190 
2191 	if (xfs_dquot_res_over_limits(&dqp->q_ino) ||
2192 	    xfs_dquot_res_over_limits(&dqp->q_blk) ||
2193 	    xfs_dquot_res_over_limits(&dqp->q_rtb))
2194 		return true;
2195 
2196 	if (XFS_IS_REALTIME_INODE(ip)) {
2197 		res = &dqp->q_rtb;
2198 		pre = &dqp->q_rtb_prealloc;
2199 	} else {
2200 		res = &dqp->q_blk;
2201 		pre = &dqp->q_blk_prealloc;
2202 	}
2203 
2204 	/* For space on the data device, check the various thresholds. */
2205 	if (!pre->q_prealloc_hi_wmark)
2206 		return false;
2207 
2208 	if (res->reserved < pre->q_prealloc_lo_wmark)
2209 		return false;
2210 
2211 	if (res->reserved >= pre->q_prealloc_hi_wmark)
2212 		return true;
2213 
2214 	freesp = pre->q_prealloc_hi_wmark - res->reserved;
2215 	if (freesp < pre->q_low_space[XFS_QLOWSP_5_PCNT])
2216 		return true;
2217 
2218 	return false;
2219 }
2220