xref: /linux/fs/xfs/xfs_qm.c (revision 06bd48b6cd97ef3889b68c8e09014d81dbc463f1)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_inode.h"
16 #include "xfs_iwalk.h"
17 #include "xfs_quota.h"
18 #include "xfs_bmap.h"
19 #include "xfs_bmap_util.h"
20 #include "xfs_trans.h"
21 #include "xfs_trans_space.h"
22 #include "xfs_qm.h"
23 #include "xfs_trace.h"
24 #include "xfs_icache.h"
25 #include "xfs_error.h"
26 
27 /*
28  * The global quota manager. There is only one of these for the entire
29  * system, _not_ one per file system. XQM keeps track of the overall
30  * quota functionality, including maintaining the freelist and hash
31  * tables of dquots.
32  */
33 STATIC int	xfs_qm_init_quotainos(struct xfs_mount *mp);
34 STATIC int	xfs_qm_init_quotainfo(struct xfs_mount *mp);
35 
36 STATIC void	xfs_qm_destroy_quotainos(struct xfs_quotainfo *qi);
37 STATIC void	xfs_qm_dqfree_one(struct xfs_dquot *dqp);
38 /*
39  * We use the batch lookup interface to iterate over the dquots as it
40  * currently is the only interface into the radix tree code that allows
41  * fuzzy lookups instead of exact matches.  Holding the lock over multiple
42  * operations is fine as all callers are used either during mount/umount
43  * or quotaoff.
44  */
45 #define XFS_DQ_LOOKUP_BATCH	32
46 
47 STATIC int
48 xfs_qm_dquot_walk(
49 	struct xfs_mount	*mp,
50 	int			type,
51 	int			(*execute)(struct xfs_dquot *dqp, void *data),
52 	void			*data)
53 {
54 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
55 	struct radix_tree_root	*tree = xfs_dquot_tree(qi, type);
56 	uint32_t		next_index;
57 	int			last_error = 0;
58 	int			skipped;
59 	int			nr_found;
60 
61 restart:
62 	skipped = 0;
63 	next_index = 0;
64 	nr_found = 0;
65 
66 	while (1) {
67 		struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
68 		int		error = 0;
69 		int		i;
70 
71 		mutex_lock(&qi->qi_tree_lock);
72 		nr_found = radix_tree_gang_lookup(tree, (void **)batch,
73 					next_index, XFS_DQ_LOOKUP_BATCH);
74 		if (!nr_found) {
75 			mutex_unlock(&qi->qi_tree_lock);
76 			break;
77 		}
78 
79 		for (i = 0; i < nr_found; i++) {
80 			struct xfs_dquot *dqp = batch[i];
81 
82 			next_index = be32_to_cpu(dqp->q_core.d_id) + 1;
83 
84 			error = execute(batch[i], data);
85 			if (error == -EAGAIN) {
86 				skipped++;
87 				continue;
88 			}
89 			if (error && last_error != -EFSCORRUPTED)
90 				last_error = error;
91 		}
92 
93 		mutex_unlock(&qi->qi_tree_lock);
94 
95 		/* bail out if the filesystem is corrupted.  */
96 		if (last_error == -EFSCORRUPTED) {
97 			skipped = 0;
98 			break;
99 		}
100 		/* we're done if id overflows back to zero */
101 		if (!next_index)
102 			break;
103 	}
104 
105 	if (skipped) {
106 		delay(1);
107 		goto restart;
108 	}
109 
110 	return last_error;
111 }
112 
113 
114 /*
115  * Purge a dquot from all tracking data structures and free it.
116  */
117 STATIC int
118 xfs_qm_dqpurge(
119 	struct xfs_dquot	*dqp,
120 	void			*data)
121 {
122 	struct xfs_mount	*mp = dqp->q_mount;
123 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
124 
125 	xfs_dqlock(dqp);
126 	if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
127 		xfs_dqunlock(dqp);
128 		return -EAGAIN;
129 	}
130 
131 	dqp->dq_flags |= XFS_DQ_FREEING;
132 
133 	xfs_dqflock(dqp);
134 
135 	/*
136 	 * If we are turning this type of quotas off, we don't care
137 	 * about the dirty metadata sitting in this dquot. OTOH, if
138 	 * we're unmounting, we do care, so we flush it and wait.
139 	 */
140 	if (XFS_DQ_IS_DIRTY(dqp)) {
141 		struct xfs_buf	*bp = NULL;
142 		int		error;
143 
144 		/*
145 		 * We don't care about getting disk errors here. We need
146 		 * to purge this dquot anyway, so we go ahead regardless.
147 		 */
148 		error = xfs_qm_dqflush(dqp, &bp);
149 		if (!error) {
150 			error = xfs_bwrite(bp);
151 			xfs_buf_relse(bp);
152 		}
153 		xfs_dqflock(dqp);
154 	}
155 
156 	ASSERT(atomic_read(&dqp->q_pincount) == 0);
157 	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
158 		!test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
159 
160 	xfs_dqfunlock(dqp);
161 	xfs_dqunlock(dqp);
162 
163 	radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
164 			  be32_to_cpu(dqp->q_core.d_id));
165 	qi->qi_dquots--;
166 
167 	/*
168 	 * We move dquots to the freelist as soon as their reference count
169 	 * hits zero, so it really should be on the freelist here.
170 	 */
171 	ASSERT(!list_empty(&dqp->q_lru));
172 	list_lru_del(&qi->qi_lru, &dqp->q_lru);
173 	XFS_STATS_DEC(mp, xs_qm_dquot_unused);
174 
175 	xfs_qm_dqdestroy(dqp);
176 	return 0;
177 }
178 
179 /*
180  * Purge the dquot cache.
181  */
182 void
183 xfs_qm_dqpurge_all(
184 	struct xfs_mount	*mp,
185 	uint			flags)
186 {
187 	if (flags & XFS_QMOPT_UQUOTA)
188 		xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL);
189 	if (flags & XFS_QMOPT_GQUOTA)
190 		xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
191 	if (flags & XFS_QMOPT_PQUOTA)
192 		xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL);
193 }
194 
195 /*
196  * Just destroy the quotainfo structure.
197  */
198 void
199 xfs_qm_unmount(
200 	struct xfs_mount	*mp)
201 {
202 	if (mp->m_quotainfo) {
203 		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
204 		xfs_qm_destroy_quotainfo(mp);
205 	}
206 }
207 
208 /*
209  * Called from the vfsops layer.
210  */
211 void
212 xfs_qm_unmount_quotas(
213 	xfs_mount_t	*mp)
214 {
215 	/*
216 	 * Release the dquots that root inode, et al might be holding,
217 	 * before we flush quotas and blow away the quotainfo structure.
218 	 */
219 	ASSERT(mp->m_rootip);
220 	xfs_qm_dqdetach(mp->m_rootip);
221 	if (mp->m_rbmip)
222 		xfs_qm_dqdetach(mp->m_rbmip);
223 	if (mp->m_rsumip)
224 		xfs_qm_dqdetach(mp->m_rsumip);
225 
226 	/*
227 	 * Release the quota inodes.
228 	 */
229 	if (mp->m_quotainfo) {
230 		if (mp->m_quotainfo->qi_uquotaip) {
231 			xfs_irele(mp->m_quotainfo->qi_uquotaip);
232 			mp->m_quotainfo->qi_uquotaip = NULL;
233 		}
234 		if (mp->m_quotainfo->qi_gquotaip) {
235 			xfs_irele(mp->m_quotainfo->qi_gquotaip);
236 			mp->m_quotainfo->qi_gquotaip = NULL;
237 		}
238 		if (mp->m_quotainfo->qi_pquotaip) {
239 			xfs_irele(mp->m_quotainfo->qi_pquotaip);
240 			mp->m_quotainfo->qi_pquotaip = NULL;
241 		}
242 	}
243 }
244 
245 STATIC int
246 xfs_qm_dqattach_one(
247 	struct xfs_inode	*ip,
248 	xfs_dqid_t		id,
249 	uint			type,
250 	bool			doalloc,
251 	struct xfs_dquot	**IO_idqpp)
252 {
253 	struct xfs_dquot	*dqp;
254 	int			error;
255 
256 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
257 	error = 0;
258 
259 	/*
260 	 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
261 	 * or &i_gdquot. This made the code look weird, but made the logic a lot
262 	 * simpler.
263 	 */
264 	dqp = *IO_idqpp;
265 	if (dqp) {
266 		trace_xfs_dqattach_found(dqp);
267 		return 0;
268 	}
269 
270 	/*
271 	 * Find the dquot from somewhere. This bumps the reference count of
272 	 * dquot and returns it locked.  This can return ENOENT if dquot didn't
273 	 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
274 	 * turned off suddenly.
275 	 */
276 	error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
277 	if (error)
278 		return error;
279 
280 	trace_xfs_dqattach_get(dqp);
281 
282 	/*
283 	 * dqget may have dropped and re-acquired the ilock, but it guarantees
284 	 * that the dquot returned is the one that should go in the inode.
285 	 */
286 	*IO_idqpp = dqp;
287 	xfs_dqunlock(dqp);
288 	return 0;
289 }
290 
291 static bool
292 xfs_qm_need_dqattach(
293 	struct xfs_inode	*ip)
294 {
295 	struct xfs_mount	*mp = ip->i_mount;
296 
297 	if (!XFS_IS_QUOTA_RUNNING(mp))
298 		return false;
299 	if (!XFS_IS_QUOTA_ON(mp))
300 		return false;
301 	if (!XFS_NOT_DQATTACHED(mp, ip))
302 		return false;
303 	if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
304 		return false;
305 	return true;
306 }
307 
308 /*
309  * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
310  * into account.
311  * If @doalloc is true, the dquot(s) will be allocated if needed.
312  * Inode may get unlocked and relocked in here, and the caller must deal with
313  * the consequences.
314  */
315 int
316 xfs_qm_dqattach_locked(
317 	xfs_inode_t	*ip,
318 	bool		doalloc)
319 {
320 	xfs_mount_t	*mp = ip->i_mount;
321 	int		error = 0;
322 
323 	if (!xfs_qm_need_dqattach(ip))
324 		return 0;
325 
326 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
327 
328 	if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
329 		error = xfs_qm_dqattach_one(ip, i_uid_read(VFS_I(ip)),
330 				XFS_DQ_USER, doalloc, &ip->i_udquot);
331 		if (error)
332 			goto done;
333 		ASSERT(ip->i_udquot);
334 	}
335 
336 	if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
337 		error = xfs_qm_dqattach_one(ip, i_gid_read(VFS_I(ip)),
338 				XFS_DQ_GROUP, doalloc, &ip->i_gdquot);
339 		if (error)
340 			goto done;
341 		ASSERT(ip->i_gdquot);
342 	}
343 
344 	if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
345 		error = xfs_qm_dqattach_one(ip, ip->i_d.di_projid, XFS_DQ_PROJ,
346 				doalloc, &ip->i_pdquot);
347 		if (error)
348 			goto done;
349 		ASSERT(ip->i_pdquot);
350 	}
351 
352 done:
353 	/*
354 	 * Don't worry about the dquots that we may have attached before any
355 	 * error - they'll get detached later if it has not already been done.
356 	 */
357 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
358 	return error;
359 }
360 
361 int
362 xfs_qm_dqattach(
363 	struct xfs_inode	*ip)
364 {
365 	int			error;
366 
367 	if (!xfs_qm_need_dqattach(ip))
368 		return 0;
369 
370 	xfs_ilock(ip, XFS_ILOCK_EXCL);
371 	error = xfs_qm_dqattach_locked(ip, false);
372 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
373 
374 	return error;
375 }
376 
377 /*
378  * Release dquots (and their references) if any.
379  * The inode should be locked EXCL except when this's called by
380  * xfs_ireclaim.
381  */
382 void
383 xfs_qm_dqdetach(
384 	xfs_inode_t	*ip)
385 {
386 	if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
387 		return;
388 
389 	trace_xfs_dquot_dqdetach(ip);
390 
391 	ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
392 	if (ip->i_udquot) {
393 		xfs_qm_dqrele(ip->i_udquot);
394 		ip->i_udquot = NULL;
395 	}
396 	if (ip->i_gdquot) {
397 		xfs_qm_dqrele(ip->i_gdquot);
398 		ip->i_gdquot = NULL;
399 	}
400 	if (ip->i_pdquot) {
401 		xfs_qm_dqrele(ip->i_pdquot);
402 		ip->i_pdquot = NULL;
403 	}
404 }
405 
406 struct xfs_qm_isolate {
407 	struct list_head	buffers;
408 	struct list_head	dispose;
409 };
410 
411 static enum lru_status
412 xfs_qm_dquot_isolate(
413 	struct list_head	*item,
414 	struct list_lru_one	*lru,
415 	spinlock_t		*lru_lock,
416 	void			*arg)
417 		__releases(lru_lock) __acquires(lru_lock)
418 {
419 	struct xfs_dquot	*dqp = container_of(item,
420 						struct xfs_dquot, q_lru);
421 	struct xfs_qm_isolate	*isol = arg;
422 
423 	if (!xfs_dqlock_nowait(dqp))
424 		goto out_miss_busy;
425 
426 	/*
427 	 * This dquot has acquired a reference in the meantime remove it from
428 	 * the freelist and try again.
429 	 */
430 	if (dqp->q_nrefs) {
431 		xfs_dqunlock(dqp);
432 		XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
433 
434 		trace_xfs_dqreclaim_want(dqp);
435 		list_lru_isolate(lru, &dqp->q_lru);
436 		XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
437 		return LRU_REMOVED;
438 	}
439 
440 	/*
441 	 * If the dquot is dirty, flush it. If it's already being flushed, just
442 	 * skip it so there is time for the IO to complete before we try to
443 	 * reclaim it again on the next LRU pass.
444 	 */
445 	if (!xfs_dqflock_nowait(dqp)) {
446 		xfs_dqunlock(dqp);
447 		goto out_miss_busy;
448 	}
449 
450 	if (XFS_DQ_IS_DIRTY(dqp)) {
451 		struct xfs_buf	*bp = NULL;
452 		int		error;
453 
454 		trace_xfs_dqreclaim_dirty(dqp);
455 
456 		/* we have to drop the LRU lock to flush the dquot */
457 		spin_unlock(lru_lock);
458 
459 		error = xfs_qm_dqflush(dqp, &bp);
460 		if (error)
461 			goto out_unlock_dirty;
462 
463 		xfs_buf_delwri_queue(bp, &isol->buffers);
464 		xfs_buf_relse(bp);
465 		goto out_unlock_dirty;
466 	}
467 	xfs_dqfunlock(dqp);
468 
469 	/*
470 	 * Prevent lookups now that we are past the point of no return.
471 	 */
472 	dqp->dq_flags |= XFS_DQ_FREEING;
473 	xfs_dqunlock(dqp);
474 
475 	ASSERT(dqp->q_nrefs == 0);
476 	list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
477 	XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
478 	trace_xfs_dqreclaim_done(dqp);
479 	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
480 	return LRU_REMOVED;
481 
482 out_miss_busy:
483 	trace_xfs_dqreclaim_busy(dqp);
484 	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
485 	return LRU_SKIP;
486 
487 out_unlock_dirty:
488 	trace_xfs_dqreclaim_busy(dqp);
489 	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
490 	xfs_dqunlock(dqp);
491 	spin_lock(lru_lock);
492 	return LRU_RETRY;
493 }
494 
495 static unsigned long
496 xfs_qm_shrink_scan(
497 	struct shrinker		*shrink,
498 	struct shrink_control	*sc)
499 {
500 	struct xfs_quotainfo	*qi = container_of(shrink,
501 					struct xfs_quotainfo, qi_shrinker);
502 	struct xfs_qm_isolate	isol;
503 	unsigned long		freed;
504 	int			error;
505 
506 	if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
507 		return 0;
508 
509 	INIT_LIST_HEAD(&isol.buffers);
510 	INIT_LIST_HEAD(&isol.dispose);
511 
512 	freed = list_lru_shrink_walk(&qi->qi_lru, sc,
513 				     xfs_qm_dquot_isolate, &isol);
514 
515 	error = xfs_buf_delwri_submit(&isol.buffers);
516 	if (error)
517 		xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
518 
519 	while (!list_empty(&isol.dispose)) {
520 		struct xfs_dquot	*dqp;
521 
522 		dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
523 		list_del_init(&dqp->q_lru);
524 		xfs_qm_dqfree_one(dqp);
525 	}
526 
527 	return freed;
528 }
529 
530 static unsigned long
531 xfs_qm_shrink_count(
532 	struct shrinker		*shrink,
533 	struct shrink_control	*sc)
534 {
535 	struct xfs_quotainfo	*qi = container_of(shrink,
536 					struct xfs_quotainfo, qi_shrinker);
537 
538 	return list_lru_shrink_count(&qi->qi_lru, sc);
539 }
540 
541 STATIC void
542 xfs_qm_set_defquota(
543 	struct xfs_mount	*mp,
544 	uint			type,
545 	struct xfs_quotainfo	*qinf)
546 {
547 	struct xfs_dquot	*dqp;
548 	struct xfs_def_quota	*defq;
549 	struct xfs_disk_dquot	*ddqp;
550 	int			error;
551 
552 	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
553 	if (error)
554 		return;
555 
556 	ddqp = &dqp->q_core;
557 	defq = xfs_get_defquota(dqp, qinf);
558 
559 	/*
560 	 * Timers and warnings have been already set, let's just set the
561 	 * default limits for this quota type
562 	 */
563 	defq->bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
564 	defq->bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
565 	defq->ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
566 	defq->isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
567 	defq->rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
568 	defq->rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
569 	xfs_qm_dqdestroy(dqp);
570 }
571 
572 /* Initialize quota time limits from the root dquot. */
573 static void
574 xfs_qm_init_timelimits(
575 	struct xfs_mount	*mp,
576 	struct xfs_quotainfo	*qinf)
577 {
578 	struct xfs_disk_dquot	*ddqp;
579 	struct xfs_dquot	*dqp;
580 	uint			type;
581 	int			error;
582 
583 	qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
584 	qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
585 	qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
586 	qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
587 	qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
588 	qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
589 
590 	/*
591 	 * We try to get the limits from the superuser's limits fields.
592 	 * This is quite hacky, but it is standard quota practice.
593 	 *
594 	 * Since we may not have done a quotacheck by this point, just read
595 	 * the dquot without attaching it to any hashtables or lists.
596 	 *
597 	 * Timers and warnings are globally set by the first timer found in
598 	 * user/group/proj quota types, otherwise a default value is used.
599 	 * This should be split into different fields per quota type.
600 	 */
601 	if (XFS_IS_UQUOTA_RUNNING(mp))
602 		type = XFS_DQ_USER;
603 	else if (XFS_IS_GQUOTA_RUNNING(mp))
604 		type = XFS_DQ_GROUP;
605 	else
606 		type = XFS_DQ_PROJ;
607 	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
608 	if (error)
609 		return;
610 
611 	ddqp = &dqp->q_core;
612 	/*
613 	 * The warnings and timers set the grace period given to
614 	 * a user or group before he or she can not perform any
615 	 * more writing. If it is zero, a default is used.
616 	 */
617 	if (ddqp->d_btimer)
618 		qinf->qi_btimelimit = be32_to_cpu(ddqp->d_btimer);
619 	if (ddqp->d_itimer)
620 		qinf->qi_itimelimit = be32_to_cpu(ddqp->d_itimer);
621 	if (ddqp->d_rtbtimer)
622 		qinf->qi_rtbtimelimit = be32_to_cpu(ddqp->d_rtbtimer);
623 	if (ddqp->d_bwarns)
624 		qinf->qi_bwarnlimit = be16_to_cpu(ddqp->d_bwarns);
625 	if (ddqp->d_iwarns)
626 		qinf->qi_iwarnlimit = be16_to_cpu(ddqp->d_iwarns);
627 	if (ddqp->d_rtbwarns)
628 		qinf->qi_rtbwarnlimit = be16_to_cpu(ddqp->d_rtbwarns);
629 
630 	xfs_qm_dqdestroy(dqp);
631 }
632 
633 /*
634  * This initializes all the quota information that's kept in the
635  * mount structure
636  */
637 STATIC int
638 xfs_qm_init_quotainfo(
639 	struct xfs_mount	*mp)
640 {
641 	struct xfs_quotainfo	*qinf;
642 	int			error;
643 
644 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
645 
646 	qinf = mp->m_quotainfo = kmem_zalloc(sizeof(struct xfs_quotainfo), 0);
647 
648 	error = list_lru_init(&qinf->qi_lru);
649 	if (error)
650 		goto out_free_qinf;
651 
652 	/*
653 	 * See if quotainodes are setup, and if not, allocate them,
654 	 * and change the superblock accordingly.
655 	 */
656 	error = xfs_qm_init_quotainos(mp);
657 	if (error)
658 		goto out_free_lru;
659 
660 	INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
661 	INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
662 	INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
663 	mutex_init(&qinf->qi_tree_lock);
664 
665 	/* mutex used to serialize quotaoffs */
666 	mutex_init(&qinf->qi_quotaofflock);
667 
668 	/* Precalc some constants */
669 	qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
670 	qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
671 
672 	mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
673 
674 	xfs_qm_init_timelimits(mp, qinf);
675 
676 	if (XFS_IS_UQUOTA_RUNNING(mp))
677 		xfs_qm_set_defquota(mp, XFS_DQ_USER, qinf);
678 	if (XFS_IS_GQUOTA_RUNNING(mp))
679 		xfs_qm_set_defquota(mp, XFS_DQ_GROUP, qinf);
680 	if (XFS_IS_PQUOTA_RUNNING(mp))
681 		xfs_qm_set_defquota(mp, XFS_DQ_PROJ, qinf);
682 
683 	qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
684 	qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
685 	qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
686 	qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
687 
688 	error = register_shrinker(&qinf->qi_shrinker);
689 	if (error)
690 		goto out_free_inos;
691 
692 	return 0;
693 
694 out_free_inos:
695 	mutex_destroy(&qinf->qi_quotaofflock);
696 	mutex_destroy(&qinf->qi_tree_lock);
697 	xfs_qm_destroy_quotainos(qinf);
698 out_free_lru:
699 	list_lru_destroy(&qinf->qi_lru);
700 out_free_qinf:
701 	kmem_free(qinf);
702 	mp->m_quotainfo = NULL;
703 	return error;
704 }
705 
706 /*
707  * Gets called when unmounting a filesystem or when all quotas get
708  * turned off.
709  * This purges the quota inodes, destroys locks and frees itself.
710  */
711 void
712 xfs_qm_destroy_quotainfo(
713 	struct xfs_mount	*mp)
714 {
715 	struct xfs_quotainfo	*qi;
716 
717 	qi = mp->m_quotainfo;
718 	ASSERT(qi != NULL);
719 
720 	unregister_shrinker(&qi->qi_shrinker);
721 	list_lru_destroy(&qi->qi_lru);
722 	xfs_qm_destroy_quotainos(qi);
723 	mutex_destroy(&qi->qi_tree_lock);
724 	mutex_destroy(&qi->qi_quotaofflock);
725 	kmem_free(qi);
726 	mp->m_quotainfo = NULL;
727 }
728 
729 /*
730  * Create an inode and return with a reference already taken, but unlocked
731  * This is how we create quota inodes
732  */
733 STATIC int
734 xfs_qm_qino_alloc(
735 	xfs_mount_t	*mp,
736 	xfs_inode_t	**ip,
737 	uint		flags)
738 {
739 	xfs_trans_t	*tp;
740 	int		error;
741 	bool		need_alloc = true;
742 
743 	*ip = NULL;
744 	/*
745 	 * With superblock that doesn't have separate pquotino, we
746 	 * share an inode between gquota and pquota. If the on-disk
747 	 * superblock has GQUOTA and the filesystem is now mounted
748 	 * with PQUOTA, just use sb_gquotino for sb_pquotino and
749 	 * vice-versa.
750 	 */
751 	if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
752 			(flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
753 		xfs_ino_t ino = NULLFSINO;
754 
755 		if ((flags & XFS_QMOPT_PQUOTA) &&
756 			     (mp->m_sb.sb_gquotino != NULLFSINO)) {
757 			ino = mp->m_sb.sb_gquotino;
758 			if (XFS_IS_CORRUPT(mp,
759 					   mp->m_sb.sb_pquotino != NULLFSINO))
760 				return -EFSCORRUPTED;
761 		} else if ((flags & XFS_QMOPT_GQUOTA) &&
762 			     (mp->m_sb.sb_pquotino != NULLFSINO)) {
763 			ino = mp->m_sb.sb_pquotino;
764 			if (XFS_IS_CORRUPT(mp,
765 					   mp->m_sb.sb_gquotino != NULLFSINO))
766 				return -EFSCORRUPTED;
767 		}
768 		if (ino != NULLFSINO) {
769 			error = xfs_iget(mp, NULL, ino, 0, 0, ip);
770 			if (error)
771 				return error;
772 			mp->m_sb.sb_gquotino = NULLFSINO;
773 			mp->m_sb.sb_pquotino = NULLFSINO;
774 			need_alloc = false;
775 		}
776 	}
777 
778 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
779 			XFS_QM_QINOCREATE_SPACE_RES(mp), 0, 0, &tp);
780 	if (error)
781 		return error;
782 
783 	if (need_alloc) {
784 		error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, ip);
785 		if (error) {
786 			xfs_trans_cancel(tp);
787 			return error;
788 		}
789 	}
790 
791 	/*
792 	 * Make the changes in the superblock, and log those too.
793 	 * sbfields arg may contain fields other than *QUOTINO;
794 	 * VERSIONNUM for example.
795 	 */
796 	spin_lock(&mp->m_sb_lock);
797 	if (flags & XFS_QMOPT_SBVERSION) {
798 		ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
799 
800 		xfs_sb_version_addquota(&mp->m_sb);
801 		mp->m_sb.sb_uquotino = NULLFSINO;
802 		mp->m_sb.sb_gquotino = NULLFSINO;
803 		mp->m_sb.sb_pquotino = NULLFSINO;
804 
805 		/* qflags will get updated fully _after_ quotacheck */
806 		mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
807 	}
808 	if (flags & XFS_QMOPT_UQUOTA)
809 		mp->m_sb.sb_uquotino = (*ip)->i_ino;
810 	else if (flags & XFS_QMOPT_GQUOTA)
811 		mp->m_sb.sb_gquotino = (*ip)->i_ino;
812 	else
813 		mp->m_sb.sb_pquotino = (*ip)->i_ino;
814 	spin_unlock(&mp->m_sb_lock);
815 	xfs_log_sb(tp);
816 
817 	error = xfs_trans_commit(tp);
818 	if (error) {
819 		ASSERT(XFS_FORCED_SHUTDOWN(mp));
820 		xfs_alert(mp, "%s failed (error %d)!", __func__, error);
821 	}
822 	if (need_alloc)
823 		xfs_finish_inode_setup(*ip);
824 	return error;
825 }
826 
827 
828 STATIC void
829 xfs_qm_reset_dqcounts(
830 	xfs_mount_t	*mp,
831 	xfs_buf_t	*bp,
832 	xfs_dqid_t	id,
833 	uint		type)
834 {
835 	struct xfs_dqblk	*dqb;
836 	int			j;
837 	xfs_failaddr_t		fa;
838 
839 	trace_xfs_reset_dqcounts(bp, _RET_IP_);
840 
841 	/*
842 	 * Reset all counters and timers. They'll be
843 	 * started afresh by xfs_qm_quotacheck.
844 	 */
845 #ifdef DEBUG
846 	j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
847 		sizeof(xfs_dqblk_t);
848 	ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
849 #endif
850 	dqb = bp->b_addr;
851 	for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
852 		struct xfs_disk_dquot	*ddq;
853 
854 		ddq = (struct xfs_disk_dquot *)&dqb[j];
855 
856 		/*
857 		 * Do a sanity check, and if needed, repair the dqblk. Don't
858 		 * output any warnings because it's perfectly possible to
859 		 * find uninitialised dquot blks. See comment in
860 		 * xfs_dquot_verify.
861 		 */
862 		fa = xfs_dqblk_verify(mp, &dqb[j], id + j, type);
863 		if (fa)
864 			xfs_dqblk_repair(mp, &dqb[j], id + j, type);
865 
866 		/*
867 		 * Reset type in case we are reusing group quota file for
868 		 * project quotas or vice versa
869 		 */
870 		ddq->d_flags = type;
871 		ddq->d_bcount = 0;
872 		ddq->d_icount = 0;
873 		ddq->d_rtbcount = 0;
874 
875 		/*
876 		 * dquot id 0 stores the default grace period and the maximum
877 		 * warning limit that were set by the administrator, so we
878 		 * should not reset them.
879 		 */
880 		if (ddq->d_id != 0) {
881 			ddq->d_btimer = 0;
882 			ddq->d_itimer = 0;
883 			ddq->d_rtbtimer = 0;
884 			ddq->d_bwarns = 0;
885 			ddq->d_iwarns = 0;
886 			ddq->d_rtbwarns = 0;
887 		}
888 
889 		if (xfs_sb_version_hascrc(&mp->m_sb)) {
890 			xfs_update_cksum((char *)&dqb[j],
891 					 sizeof(struct xfs_dqblk),
892 					 XFS_DQUOT_CRC_OFF);
893 		}
894 	}
895 }
896 
897 STATIC int
898 xfs_qm_reset_dqcounts_all(
899 	struct xfs_mount	*mp,
900 	xfs_dqid_t		firstid,
901 	xfs_fsblock_t		bno,
902 	xfs_filblks_t		blkcnt,
903 	uint			flags,
904 	struct list_head	*buffer_list)
905 {
906 	struct xfs_buf		*bp;
907 	int			error;
908 	int			type;
909 
910 	ASSERT(blkcnt > 0);
911 	type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
912 		(flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
913 	error = 0;
914 
915 	/*
916 	 * Blkcnt arg can be a very big number, and might even be
917 	 * larger than the log itself. So, we have to break it up into
918 	 * manageable-sized transactions.
919 	 * Note that we don't start a permanent transaction here; we might
920 	 * not be able to get a log reservation for the whole thing up front,
921 	 * and we don't really care to either, because we just discard
922 	 * everything if we were to crash in the middle of this loop.
923 	 */
924 	while (blkcnt--) {
925 		error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
926 			      XFS_FSB_TO_DADDR(mp, bno),
927 			      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
928 			      &xfs_dquot_buf_ops);
929 
930 		/*
931 		 * CRC and validation errors will return a EFSCORRUPTED here. If
932 		 * this occurs, re-read without CRC validation so that we can
933 		 * repair the damage via xfs_qm_reset_dqcounts(). This process
934 		 * will leave a trace in the log indicating corruption has
935 		 * been detected.
936 		 */
937 		if (error == -EFSCORRUPTED) {
938 			error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
939 				      XFS_FSB_TO_DADDR(mp, bno),
940 				      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
941 				      NULL);
942 		}
943 
944 		if (error)
945 			break;
946 
947 		/*
948 		 * A corrupt buffer might not have a verifier attached, so
949 		 * make sure we have the correct one attached before writeback
950 		 * occurs.
951 		 */
952 		bp->b_ops = &xfs_dquot_buf_ops;
953 		xfs_qm_reset_dqcounts(mp, bp, firstid, type);
954 		xfs_buf_delwri_queue(bp, buffer_list);
955 		xfs_buf_relse(bp);
956 
957 		/* goto the next block. */
958 		bno++;
959 		firstid += mp->m_quotainfo->qi_dqperchunk;
960 	}
961 
962 	return error;
963 }
964 
965 /*
966  * Iterate over all allocated dquot blocks in this quota inode, zeroing all
967  * counters for every chunk of dquots that we find.
968  */
969 STATIC int
970 xfs_qm_reset_dqcounts_buf(
971 	struct xfs_mount	*mp,
972 	struct xfs_inode	*qip,
973 	uint			flags,
974 	struct list_head	*buffer_list)
975 {
976 	struct xfs_bmbt_irec	*map;
977 	int			i, nmaps;	/* number of map entries */
978 	int			error;		/* return value */
979 	xfs_fileoff_t		lblkno;
980 	xfs_filblks_t		maxlblkcnt;
981 	xfs_dqid_t		firstid;
982 	xfs_fsblock_t		rablkno;
983 	xfs_filblks_t		rablkcnt;
984 
985 	error = 0;
986 	/*
987 	 * This looks racy, but we can't keep an inode lock across a
988 	 * trans_reserve. But, this gets called during quotacheck, and that
989 	 * happens only at mount time which is single threaded.
990 	 */
991 	if (qip->i_d.di_nblocks == 0)
992 		return 0;
993 
994 	map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0);
995 
996 	lblkno = 0;
997 	maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
998 	do {
999 		uint		lock_mode;
1000 
1001 		nmaps = XFS_DQITER_MAP_SIZE;
1002 		/*
1003 		 * We aren't changing the inode itself. Just changing
1004 		 * some of its data. No new blocks are added here, and
1005 		 * the inode is never added to the transaction.
1006 		 */
1007 		lock_mode = xfs_ilock_data_map_shared(qip);
1008 		error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1009 				       map, &nmaps, 0);
1010 		xfs_iunlock(qip, lock_mode);
1011 		if (error)
1012 			break;
1013 
1014 		ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1015 		for (i = 0; i < nmaps; i++) {
1016 			ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1017 			ASSERT(map[i].br_blockcount);
1018 
1019 
1020 			lblkno += map[i].br_blockcount;
1021 
1022 			if (map[i].br_startblock == HOLESTARTBLOCK)
1023 				continue;
1024 
1025 			firstid = (xfs_dqid_t) map[i].br_startoff *
1026 				mp->m_quotainfo->qi_dqperchunk;
1027 			/*
1028 			 * Do a read-ahead on the next extent.
1029 			 */
1030 			if ((i+1 < nmaps) &&
1031 			    (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1032 				rablkcnt =  map[i+1].br_blockcount;
1033 				rablkno = map[i+1].br_startblock;
1034 				while (rablkcnt--) {
1035 					xfs_buf_readahead(mp->m_ddev_targp,
1036 					       XFS_FSB_TO_DADDR(mp, rablkno),
1037 					       mp->m_quotainfo->qi_dqchunklen,
1038 					       &xfs_dquot_buf_ops);
1039 					rablkno++;
1040 				}
1041 			}
1042 			/*
1043 			 * Iterate thru all the blks in the extent and
1044 			 * reset the counters of all the dquots inside them.
1045 			 */
1046 			error = xfs_qm_reset_dqcounts_all(mp, firstid,
1047 						   map[i].br_startblock,
1048 						   map[i].br_blockcount,
1049 						   flags, buffer_list);
1050 			if (error)
1051 				goto out;
1052 		}
1053 	} while (nmaps > 0);
1054 
1055 out:
1056 	kmem_free(map);
1057 	return error;
1058 }
1059 
1060 /*
1061  * Called by dqusage_adjust in doing a quotacheck.
1062  *
1063  * Given the inode, and a dquot id this updates both the incore dqout as well
1064  * as the buffer copy. This is so that once the quotacheck is done, we can
1065  * just log all the buffers, as opposed to logging numerous updates to
1066  * individual dquots.
1067  */
1068 STATIC int
1069 xfs_qm_quotacheck_dqadjust(
1070 	struct xfs_inode	*ip,
1071 	uint			type,
1072 	xfs_qcnt_t		nblks,
1073 	xfs_qcnt_t		rtblks)
1074 {
1075 	struct xfs_mount	*mp = ip->i_mount;
1076 	struct xfs_dquot	*dqp;
1077 	xfs_dqid_t		id;
1078 	int			error;
1079 
1080 	id = xfs_qm_id_for_quotatype(ip, type);
1081 	error = xfs_qm_dqget(mp, id, type, true, &dqp);
1082 	if (error) {
1083 		/*
1084 		 * Shouldn't be able to turn off quotas here.
1085 		 */
1086 		ASSERT(error != -ESRCH);
1087 		ASSERT(error != -ENOENT);
1088 		return error;
1089 	}
1090 
1091 	trace_xfs_dqadjust(dqp);
1092 
1093 	/*
1094 	 * Adjust the inode count and the block count to reflect this inode's
1095 	 * resource usage.
1096 	 */
1097 	be64_add_cpu(&dqp->q_core.d_icount, 1);
1098 	dqp->q_res_icount++;
1099 	if (nblks) {
1100 		be64_add_cpu(&dqp->q_core.d_bcount, nblks);
1101 		dqp->q_res_bcount += nblks;
1102 	}
1103 	if (rtblks) {
1104 		be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
1105 		dqp->q_res_rtbcount += rtblks;
1106 	}
1107 
1108 	/*
1109 	 * Set default limits, adjust timers (since we changed usages)
1110 	 *
1111 	 * There are no timers for the default values set in the root dquot.
1112 	 */
1113 	if (dqp->q_core.d_id) {
1114 		xfs_qm_adjust_dqlimits(mp, dqp);
1115 		xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
1116 	}
1117 
1118 	dqp->dq_flags |= XFS_DQ_DIRTY;
1119 	xfs_qm_dqput(dqp);
1120 	return 0;
1121 }
1122 
1123 /*
1124  * callback routine supplied to bulkstat(). Given an inumber, find its
1125  * dquots and update them to account for resources taken by that inode.
1126  */
1127 /* ARGSUSED */
1128 STATIC int
1129 xfs_qm_dqusage_adjust(
1130 	struct xfs_mount	*mp,
1131 	struct xfs_trans	*tp,
1132 	xfs_ino_t		ino,
1133 	void			*data)
1134 {
1135 	struct xfs_inode	*ip;
1136 	xfs_qcnt_t		nblks;
1137 	xfs_filblks_t		rtblks = 0;	/* total rt blks */
1138 	int			error;
1139 
1140 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1141 
1142 	/*
1143 	 * rootino must have its resources accounted for, not so with the quota
1144 	 * inodes.
1145 	 */
1146 	if (xfs_is_quota_inode(&mp->m_sb, ino))
1147 		return 0;
1148 
1149 	/*
1150 	 * We don't _need_ to take the ilock EXCL here because quotacheck runs
1151 	 * at mount time and therefore nobody will be racing chown/chproj.
1152 	 */
1153 	error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
1154 	if (error == -EINVAL || error == -ENOENT)
1155 		return 0;
1156 	if (error)
1157 		return error;
1158 
1159 	ASSERT(ip->i_delayed_blks == 0);
1160 
1161 	if (XFS_IS_REALTIME_INODE(ip)) {
1162 		struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1163 
1164 		if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1165 			error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1166 			if (error)
1167 				goto error0;
1168 		}
1169 
1170 		xfs_bmap_count_leaves(ifp, &rtblks);
1171 	}
1172 
1173 	nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
1174 
1175 	/*
1176 	 * Add the (disk blocks and inode) resources occupied by this
1177 	 * inode to its dquots. We do this adjustment in the incore dquot,
1178 	 * and also copy the changes to its buffer.
1179 	 * We don't care about putting these changes in a transaction
1180 	 * envelope because if we crash in the middle of a 'quotacheck'
1181 	 * we have to start from the beginning anyway.
1182 	 * Once we're done, we'll log all the dquot bufs.
1183 	 *
1184 	 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1185 	 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1186 	 */
1187 	if (XFS_IS_UQUOTA_ON(mp)) {
1188 		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQ_USER, nblks,
1189 				rtblks);
1190 		if (error)
1191 			goto error0;
1192 	}
1193 
1194 	if (XFS_IS_GQUOTA_ON(mp)) {
1195 		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQ_GROUP, nblks,
1196 				rtblks);
1197 		if (error)
1198 			goto error0;
1199 	}
1200 
1201 	if (XFS_IS_PQUOTA_ON(mp)) {
1202 		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQ_PROJ, nblks,
1203 				rtblks);
1204 		if (error)
1205 			goto error0;
1206 	}
1207 
1208 error0:
1209 	xfs_irele(ip);
1210 	return error;
1211 }
1212 
1213 STATIC int
1214 xfs_qm_flush_one(
1215 	struct xfs_dquot	*dqp,
1216 	void			*data)
1217 {
1218 	struct xfs_mount	*mp = dqp->q_mount;
1219 	struct list_head	*buffer_list = data;
1220 	struct xfs_buf		*bp = NULL;
1221 	int			error = 0;
1222 
1223 	xfs_dqlock(dqp);
1224 	if (dqp->dq_flags & XFS_DQ_FREEING)
1225 		goto out_unlock;
1226 	if (!XFS_DQ_IS_DIRTY(dqp))
1227 		goto out_unlock;
1228 
1229 	/*
1230 	 * The only way the dquot is already flush locked by the time quotacheck
1231 	 * gets here is if reclaim flushed it before the dqadjust walk dirtied
1232 	 * it for the final time. Quotacheck collects all dquot bufs in the
1233 	 * local delwri queue before dquots are dirtied, so reclaim can't have
1234 	 * possibly queued it for I/O. The only way out is to push the buffer to
1235 	 * cycle the flush lock.
1236 	 */
1237 	if (!xfs_dqflock_nowait(dqp)) {
1238 		/* buf is pinned in-core by delwri list */
1239 		bp = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
1240 				mp->m_quotainfo->qi_dqchunklen, 0);
1241 		if (!bp) {
1242 			error = -EINVAL;
1243 			goto out_unlock;
1244 		}
1245 		xfs_buf_unlock(bp);
1246 
1247 		xfs_buf_delwri_pushbuf(bp, buffer_list);
1248 		xfs_buf_rele(bp);
1249 
1250 		error = -EAGAIN;
1251 		goto out_unlock;
1252 	}
1253 
1254 	error = xfs_qm_dqflush(dqp, &bp);
1255 	if (error)
1256 		goto out_unlock;
1257 
1258 	xfs_buf_delwri_queue(bp, buffer_list);
1259 	xfs_buf_relse(bp);
1260 out_unlock:
1261 	xfs_dqunlock(dqp);
1262 	return error;
1263 }
1264 
1265 /*
1266  * Walk thru all the filesystem inodes and construct a consistent view
1267  * of the disk quota world. If the quotacheck fails, disable quotas.
1268  */
1269 STATIC int
1270 xfs_qm_quotacheck(
1271 	xfs_mount_t	*mp)
1272 {
1273 	int			error, error2;
1274 	uint			flags;
1275 	LIST_HEAD		(buffer_list);
1276 	struct xfs_inode	*uip = mp->m_quotainfo->qi_uquotaip;
1277 	struct xfs_inode	*gip = mp->m_quotainfo->qi_gquotaip;
1278 	struct xfs_inode	*pip = mp->m_quotainfo->qi_pquotaip;
1279 
1280 	flags = 0;
1281 
1282 	ASSERT(uip || gip || pip);
1283 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1284 
1285 	xfs_notice(mp, "Quotacheck needed: Please wait.");
1286 
1287 	/*
1288 	 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1289 	 * their counters to zero. We need a clean slate.
1290 	 * We don't log our changes till later.
1291 	 */
1292 	if (uip) {
1293 		error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_QMOPT_UQUOTA,
1294 					 &buffer_list);
1295 		if (error)
1296 			goto error_return;
1297 		flags |= XFS_UQUOTA_CHKD;
1298 	}
1299 
1300 	if (gip) {
1301 		error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_QMOPT_GQUOTA,
1302 					 &buffer_list);
1303 		if (error)
1304 			goto error_return;
1305 		flags |= XFS_GQUOTA_CHKD;
1306 	}
1307 
1308 	if (pip) {
1309 		error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_QMOPT_PQUOTA,
1310 					 &buffer_list);
1311 		if (error)
1312 			goto error_return;
1313 		flags |= XFS_PQUOTA_CHKD;
1314 	}
1315 
1316 	error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
1317 			NULL);
1318 	if (error)
1319 		goto error_return;
1320 
1321 	/*
1322 	 * We've made all the changes that we need to make incore.  Flush them
1323 	 * down to disk buffers if everything was updated successfully.
1324 	 */
1325 	if (XFS_IS_UQUOTA_ON(mp)) {
1326 		error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one,
1327 					  &buffer_list);
1328 	}
1329 	if (XFS_IS_GQUOTA_ON(mp)) {
1330 		error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one,
1331 					   &buffer_list);
1332 		if (!error)
1333 			error = error2;
1334 	}
1335 	if (XFS_IS_PQUOTA_ON(mp)) {
1336 		error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one,
1337 					   &buffer_list);
1338 		if (!error)
1339 			error = error2;
1340 	}
1341 
1342 	error2 = xfs_buf_delwri_submit(&buffer_list);
1343 	if (!error)
1344 		error = error2;
1345 
1346 	/*
1347 	 * We can get this error if we couldn't do a dquot allocation inside
1348 	 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1349 	 * dirty dquots that might be cached, we just want to get rid of them
1350 	 * and turn quotaoff. The dquots won't be attached to any of the inodes
1351 	 * at this point (because we intentionally didn't in dqget_noattach).
1352 	 */
1353 	if (error) {
1354 		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
1355 		goto error_return;
1356 	}
1357 
1358 	/*
1359 	 * If one type of quotas is off, then it will lose its
1360 	 * quotachecked status, since we won't be doing accounting for
1361 	 * that type anymore.
1362 	 */
1363 	mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1364 	mp->m_qflags |= flags;
1365 
1366  error_return:
1367 	xfs_buf_delwri_cancel(&buffer_list);
1368 
1369 	if (error) {
1370 		xfs_warn(mp,
1371 	"Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1372 			error);
1373 		/*
1374 		 * We must turn off quotas.
1375 		 */
1376 		ASSERT(mp->m_quotainfo != NULL);
1377 		xfs_qm_destroy_quotainfo(mp);
1378 		if (xfs_mount_reset_sbqflags(mp)) {
1379 			xfs_warn(mp,
1380 				"Quotacheck: Failed to reset quota flags.");
1381 		}
1382 	} else
1383 		xfs_notice(mp, "Quotacheck: Done.");
1384 	return error;
1385 }
1386 
1387 /*
1388  * This is called from xfs_mountfs to start quotas and initialize all
1389  * necessary data structures like quotainfo.  This is also responsible for
1390  * running a quotacheck as necessary.  We are guaranteed that the superblock
1391  * is consistently read in at this point.
1392  *
1393  * If we fail here, the mount will continue with quota turned off. We don't
1394  * need to inidicate success or failure at all.
1395  */
1396 void
1397 xfs_qm_mount_quotas(
1398 	struct xfs_mount	*mp)
1399 {
1400 	int			error = 0;
1401 	uint			sbf;
1402 
1403 	/*
1404 	 * If quotas on realtime volumes is not supported, we disable
1405 	 * quotas immediately.
1406 	 */
1407 	if (mp->m_sb.sb_rextents) {
1408 		xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1409 		mp->m_qflags = 0;
1410 		goto write_changes;
1411 	}
1412 
1413 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1414 
1415 	/*
1416 	 * Allocate the quotainfo structure inside the mount struct, and
1417 	 * create quotainode(s), and change/rev superblock if necessary.
1418 	 */
1419 	error = xfs_qm_init_quotainfo(mp);
1420 	if (error) {
1421 		/*
1422 		 * We must turn off quotas.
1423 		 */
1424 		ASSERT(mp->m_quotainfo == NULL);
1425 		mp->m_qflags = 0;
1426 		goto write_changes;
1427 	}
1428 	/*
1429 	 * If any of the quotas are not consistent, do a quotacheck.
1430 	 */
1431 	if (XFS_QM_NEED_QUOTACHECK(mp)) {
1432 		error = xfs_qm_quotacheck(mp);
1433 		if (error) {
1434 			/* Quotacheck failed and disabled quotas. */
1435 			return;
1436 		}
1437 	}
1438 	/*
1439 	 * If one type of quotas is off, then it will lose its
1440 	 * quotachecked status, since we won't be doing accounting for
1441 	 * that type anymore.
1442 	 */
1443 	if (!XFS_IS_UQUOTA_ON(mp))
1444 		mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1445 	if (!XFS_IS_GQUOTA_ON(mp))
1446 		mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1447 	if (!XFS_IS_PQUOTA_ON(mp))
1448 		mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1449 
1450  write_changes:
1451 	/*
1452 	 * We actually don't have to acquire the m_sb_lock at all.
1453 	 * This can only be called from mount, and that's single threaded. XXX
1454 	 */
1455 	spin_lock(&mp->m_sb_lock);
1456 	sbf = mp->m_sb.sb_qflags;
1457 	mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1458 	spin_unlock(&mp->m_sb_lock);
1459 
1460 	if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1461 		if (xfs_sync_sb(mp, false)) {
1462 			/*
1463 			 * We could only have been turning quotas off.
1464 			 * We aren't in very good shape actually because
1465 			 * the incore structures are convinced that quotas are
1466 			 * off, but the on disk superblock doesn't know that !
1467 			 */
1468 			ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
1469 			xfs_alert(mp, "%s: Superblock update failed!",
1470 				__func__);
1471 		}
1472 	}
1473 
1474 	if (error) {
1475 		xfs_warn(mp, "Failed to initialize disk quotas.");
1476 		return;
1477 	}
1478 }
1479 
1480 /*
1481  * This is called after the superblock has been read in and we're ready to
1482  * iget the quota inodes.
1483  */
1484 STATIC int
1485 xfs_qm_init_quotainos(
1486 	xfs_mount_t	*mp)
1487 {
1488 	struct xfs_inode	*uip = NULL;
1489 	struct xfs_inode	*gip = NULL;
1490 	struct xfs_inode	*pip = NULL;
1491 	int			error;
1492 	uint			flags = 0;
1493 
1494 	ASSERT(mp->m_quotainfo);
1495 
1496 	/*
1497 	 * Get the uquota and gquota inodes
1498 	 */
1499 	if (xfs_sb_version_hasquota(&mp->m_sb)) {
1500 		if (XFS_IS_UQUOTA_ON(mp) &&
1501 		    mp->m_sb.sb_uquotino != NULLFSINO) {
1502 			ASSERT(mp->m_sb.sb_uquotino > 0);
1503 			error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1504 					     0, 0, &uip);
1505 			if (error)
1506 				return error;
1507 		}
1508 		if (XFS_IS_GQUOTA_ON(mp) &&
1509 		    mp->m_sb.sb_gquotino != NULLFSINO) {
1510 			ASSERT(mp->m_sb.sb_gquotino > 0);
1511 			error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1512 					     0, 0, &gip);
1513 			if (error)
1514 				goto error_rele;
1515 		}
1516 		if (XFS_IS_PQUOTA_ON(mp) &&
1517 		    mp->m_sb.sb_pquotino != NULLFSINO) {
1518 			ASSERT(mp->m_sb.sb_pquotino > 0);
1519 			error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1520 					     0, 0, &pip);
1521 			if (error)
1522 				goto error_rele;
1523 		}
1524 	} else {
1525 		flags |= XFS_QMOPT_SBVERSION;
1526 	}
1527 
1528 	/*
1529 	 * Create the three inodes, if they don't exist already. The changes
1530 	 * made above will get added to a transaction and logged in one of
1531 	 * the qino_alloc calls below.  If the device is readonly,
1532 	 * temporarily switch to read-write to do this.
1533 	 */
1534 	if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1535 		error = xfs_qm_qino_alloc(mp, &uip,
1536 					      flags | XFS_QMOPT_UQUOTA);
1537 		if (error)
1538 			goto error_rele;
1539 
1540 		flags &= ~XFS_QMOPT_SBVERSION;
1541 	}
1542 	if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1543 		error = xfs_qm_qino_alloc(mp, &gip,
1544 					  flags | XFS_QMOPT_GQUOTA);
1545 		if (error)
1546 			goto error_rele;
1547 
1548 		flags &= ~XFS_QMOPT_SBVERSION;
1549 	}
1550 	if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1551 		error = xfs_qm_qino_alloc(mp, &pip,
1552 					  flags | XFS_QMOPT_PQUOTA);
1553 		if (error)
1554 			goto error_rele;
1555 	}
1556 
1557 	mp->m_quotainfo->qi_uquotaip = uip;
1558 	mp->m_quotainfo->qi_gquotaip = gip;
1559 	mp->m_quotainfo->qi_pquotaip = pip;
1560 
1561 	return 0;
1562 
1563 error_rele:
1564 	if (uip)
1565 		xfs_irele(uip);
1566 	if (gip)
1567 		xfs_irele(gip);
1568 	if (pip)
1569 		xfs_irele(pip);
1570 	return error;
1571 }
1572 
1573 STATIC void
1574 xfs_qm_destroy_quotainos(
1575 	struct xfs_quotainfo	*qi)
1576 {
1577 	if (qi->qi_uquotaip) {
1578 		xfs_irele(qi->qi_uquotaip);
1579 		qi->qi_uquotaip = NULL; /* paranoia */
1580 	}
1581 	if (qi->qi_gquotaip) {
1582 		xfs_irele(qi->qi_gquotaip);
1583 		qi->qi_gquotaip = NULL;
1584 	}
1585 	if (qi->qi_pquotaip) {
1586 		xfs_irele(qi->qi_pquotaip);
1587 		qi->qi_pquotaip = NULL;
1588 	}
1589 }
1590 
1591 STATIC void
1592 xfs_qm_dqfree_one(
1593 	struct xfs_dquot	*dqp)
1594 {
1595 	struct xfs_mount	*mp = dqp->q_mount;
1596 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
1597 
1598 	mutex_lock(&qi->qi_tree_lock);
1599 	radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
1600 			  be32_to_cpu(dqp->q_core.d_id));
1601 
1602 	qi->qi_dquots--;
1603 	mutex_unlock(&qi->qi_tree_lock);
1604 
1605 	xfs_qm_dqdestroy(dqp);
1606 }
1607 
1608 /* --------------- utility functions for vnodeops ---------------- */
1609 
1610 
1611 /*
1612  * Given an inode, a uid, gid and prid make sure that we have
1613  * allocated relevant dquot(s) on disk, and that we won't exceed inode
1614  * quotas by creating this file.
1615  * This also attaches dquot(s) to the given inode after locking it,
1616  * and returns the dquots corresponding to the uid and/or gid.
1617  *
1618  * in	: inode (unlocked)
1619  * out	: udquot, gdquot with references taken and unlocked
1620  */
1621 int
1622 xfs_qm_vop_dqalloc(
1623 	struct xfs_inode	*ip,
1624 	kuid_t			uid,
1625 	kgid_t			gid,
1626 	prid_t			prid,
1627 	uint			flags,
1628 	struct xfs_dquot	**O_udqpp,
1629 	struct xfs_dquot	**O_gdqpp,
1630 	struct xfs_dquot	**O_pdqpp)
1631 {
1632 	struct xfs_mount	*mp = ip->i_mount;
1633 	struct inode		*inode = VFS_I(ip);
1634 	struct user_namespace	*user_ns = inode->i_sb->s_user_ns;
1635 	struct xfs_dquot	*uq = NULL;
1636 	struct xfs_dquot	*gq = NULL;
1637 	struct xfs_dquot	*pq = NULL;
1638 	int			error;
1639 	uint			lockflags;
1640 
1641 	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1642 		return 0;
1643 
1644 	lockflags = XFS_ILOCK_EXCL;
1645 	xfs_ilock(ip, lockflags);
1646 
1647 	if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1648 		gid = inode->i_gid;
1649 
1650 	/*
1651 	 * Attach the dquot(s) to this inode, doing a dquot allocation
1652 	 * if necessary. The dquot(s) will not be locked.
1653 	 */
1654 	if (XFS_NOT_DQATTACHED(mp, ip)) {
1655 		error = xfs_qm_dqattach_locked(ip, true);
1656 		if (error) {
1657 			xfs_iunlock(ip, lockflags);
1658 			return error;
1659 		}
1660 	}
1661 
1662 	if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1663 		if (!uid_eq(inode->i_uid, uid)) {
1664 			/*
1665 			 * What we need is the dquot that has this uid, and
1666 			 * if we send the inode to dqget, the uid of the inode
1667 			 * takes priority over what's sent in the uid argument.
1668 			 * We must unlock inode here before calling dqget if
1669 			 * we're not sending the inode, because otherwise
1670 			 * we'll deadlock by doing trans_reserve while
1671 			 * holding ilock.
1672 			 */
1673 			xfs_iunlock(ip, lockflags);
1674 			error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
1675 					XFS_DQ_USER, true, &uq);
1676 			if (error) {
1677 				ASSERT(error != -ENOENT);
1678 				return error;
1679 			}
1680 			/*
1681 			 * Get the ilock in the right order.
1682 			 */
1683 			xfs_dqunlock(uq);
1684 			lockflags = XFS_ILOCK_SHARED;
1685 			xfs_ilock(ip, lockflags);
1686 		} else {
1687 			/*
1688 			 * Take an extra reference, because we'll return
1689 			 * this to caller
1690 			 */
1691 			ASSERT(ip->i_udquot);
1692 			uq = xfs_qm_dqhold(ip->i_udquot);
1693 		}
1694 	}
1695 	if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1696 		if (!gid_eq(inode->i_gid, gid)) {
1697 			xfs_iunlock(ip, lockflags);
1698 			error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
1699 					XFS_DQ_GROUP, true, &gq);
1700 			if (error) {
1701 				ASSERT(error != -ENOENT);
1702 				goto error_rele;
1703 			}
1704 			xfs_dqunlock(gq);
1705 			lockflags = XFS_ILOCK_SHARED;
1706 			xfs_ilock(ip, lockflags);
1707 		} else {
1708 			ASSERT(ip->i_gdquot);
1709 			gq = xfs_qm_dqhold(ip->i_gdquot);
1710 		}
1711 	}
1712 	if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1713 		if (ip->i_d.di_projid != prid) {
1714 			xfs_iunlock(ip, lockflags);
1715 			error = xfs_qm_dqget(mp, (xfs_dqid_t)prid, XFS_DQ_PROJ,
1716 					true, &pq);
1717 			if (error) {
1718 				ASSERT(error != -ENOENT);
1719 				goto error_rele;
1720 			}
1721 			xfs_dqunlock(pq);
1722 			lockflags = XFS_ILOCK_SHARED;
1723 			xfs_ilock(ip, lockflags);
1724 		} else {
1725 			ASSERT(ip->i_pdquot);
1726 			pq = xfs_qm_dqhold(ip->i_pdquot);
1727 		}
1728 	}
1729 	if (uq)
1730 		trace_xfs_dquot_dqalloc(ip);
1731 
1732 	xfs_iunlock(ip, lockflags);
1733 	if (O_udqpp)
1734 		*O_udqpp = uq;
1735 	else
1736 		xfs_qm_dqrele(uq);
1737 	if (O_gdqpp)
1738 		*O_gdqpp = gq;
1739 	else
1740 		xfs_qm_dqrele(gq);
1741 	if (O_pdqpp)
1742 		*O_pdqpp = pq;
1743 	else
1744 		xfs_qm_dqrele(pq);
1745 	return 0;
1746 
1747 error_rele:
1748 	xfs_qm_dqrele(gq);
1749 	xfs_qm_dqrele(uq);
1750 	return error;
1751 }
1752 
1753 /*
1754  * Actually transfer ownership, and do dquot modifications.
1755  * These were already reserved.
1756  */
1757 struct xfs_dquot *
1758 xfs_qm_vop_chown(
1759 	struct xfs_trans	*tp,
1760 	struct xfs_inode	*ip,
1761 	struct xfs_dquot	**IO_olddq,
1762 	struct xfs_dquot	*newdq)
1763 {
1764 	struct xfs_dquot	*prevdq;
1765 	uint		bfield = XFS_IS_REALTIME_INODE(ip) ?
1766 				 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1767 
1768 
1769 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1770 	ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
1771 
1772 	/* old dquot */
1773 	prevdq = *IO_olddq;
1774 	ASSERT(prevdq);
1775 	ASSERT(prevdq != newdq);
1776 
1777 	xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
1778 	xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1779 
1780 	/* the sparkling new dquot */
1781 	xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
1782 	xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1783 
1784 	/*
1785 	 * Take an extra reference, because the inode is going to keep
1786 	 * this dquot pointer even after the trans_commit.
1787 	 */
1788 	*IO_olddq = xfs_qm_dqhold(newdq);
1789 
1790 	return prevdq;
1791 }
1792 
1793 /*
1794  * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
1795  */
1796 int
1797 xfs_qm_vop_chown_reserve(
1798 	struct xfs_trans	*tp,
1799 	struct xfs_inode	*ip,
1800 	struct xfs_dquot	*udqp,
1801 	struct xfs_dquot	*gdqp,
1802 	struct xfs_dquot	*pdqp,
1803 	uint			flags)
1804 {
1805 	struct xfs_mount	*mp = ip->i_mount;
1806 	uint64_t		delblks;
1807 	unsigned int		blkflags, prjflags = 0;
1808 	struct xfs_dquot	*udq_unres = NULL;
1809 	struct xfs_dquot	*gdq_unres = NULL;
1810 	struct xfs_dquot	*pdq_unres = NULL;
1811 	struct xfs_dquot	*udq_delblks = NULL;
1812 	struct xfs_dquot	*gdq_delblks = NULL;
1813 	struct xfs_dquot	*pdq_delblks = NULL;
1814 	int			error;
1815 
1816 
1817 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
1818 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1819 
1820 	delblks = ip->i_delayed_blks;
1821 	blkflags = XFS_IS_REALTIME_INODE(ip) ?
1822 			XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
1823 
1824 	if (XFS_IS_UQUOTA_ON(mp) && udqp &&
1825 	    i_uid_read(VFS_I(ip)) != be32_to_cpu(udqp->q_core.d_id)) {
1826 		udq_delblks = udqp;
1827 		/*
1828 		 * If there are delayed allocation blocks, then we have to
1829 		 * unreserve those from the old dquot, and add them to the
1830 		 * new dquot.
1831 		 */
1832 		if (delblks) {
1833 			ASSERT(ip->i_udquot);
1834 			udq_unres = ip->i_udquot;
1835 		}
1836 	}
1837 	if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
1838 	    i_gid_read(VFS_I(ip)) != be32_to_cpu(gdqp->q_core.d_id)) {
1839 		gdq_delblks = gdqp;
1840 		if (delblks) {
1841 			ASSERT(ip->i_gdquot);
1842 			gdq_unres = ip->i_gdquot;
1843 		}
1844 	}
1845 
1846 	if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
1847 	    ip->i_d.di_projid != be32_to_cpu(pdqp->q_core.d_id)) {
1848 		prjflags = XFS_QMOPT_ENOSPC;
1849 		pdq_delblks = pdqp;
1850 		if (delblks) {
1851 			ASSERT(ip->i_pdquot);
1852 			pdq_unres = ip->i_pdquot;
1853 		}
1854 	}
1855 
1856 	error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
1857 				udq_delblks, gdq_delblks, pdq_delblks,
1858 				ip->i_d.di_nblocks, 1,
1859 				flags | blkflags | prjflags);
1860 	if (error)
1861 		return error;
1862 
1863 	/*
1864 	 * Do the delayed blks reservations/unreservations now. Since, these
1865 	 * are done without the help of a transaction, if a reservation fails
1866 	 * its previous reservations won't be automatically undone by trans
1867 	 * code. So, we have to do it manually here.
1868 	 */
1869 	if (delblks) {
1870 		/*
1871 		 * Do the reservations first. Unreservation can't fail.
1872 		 */
1873 		ASSERT(udq_delblks || gdq_delblks || pdq_delblks);
1874 		ASSERT(udq_unres || gdq_unres || pdq_unres);
1875 		error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1876 			    udq_delblks, gdq_delblks, pdq_delblks,
1877 			    (xfs_qcnt_t)delblks, 0,
1878 			    flags | blkflags | prjflags);
1879 		if (error)
1880 			return error;
1881 		xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1882 				udq_unres, gdq_unres, pdq_unres,
1883 				-((xfs_qcnt_t)delblks), 0, blkflags);
1884 	}
1885 
1886 	return 0;
1887 }
1888 
1889 int
1890 xfs_qm_vop_rename_dqattach(
1891 	struct xfs_inode	**i_tab)
1892 {
1893 	struct xfs_mount	*mp = i_tab[0]->i_mount;
1894 	int			i;
1895 
1896 	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1897 		return 0;
1898 
1899 	for (i = 0; (i < 4 && i_tab[i]); i++) {
1900 		struct xfs_inode	*ip = i_tab[i];
1901 		int			error;
1902 
1903 		/*
1904 		 * Watch out for duplicate entries in the table.
1905 		 */
1906 		if (i == 0 || ip != i_tab[i-1]) {
1907 			if (XFS_NOT_DQATTACHED(mp, ip)) {
1908 				error = xfs_qm_dqattach(ip);
1909 				if (error)
1910 					return error;
1911 			}
1912 		}
1913 	}
1914 	return 0;
1915 }
1916 
1917 void
1918 xfs_qm_vop_create_dqattach(
1919 	struct xfs_trans	*tp,
1920 	struct xfs_inode	*ip,
1921 	struct xfs_dquot	*udqp,
1922 	struct xfs_dquot	*gdqp,
1923 	struct xfs_dquot	*pdqp)
1924 {
1925 	struct xfs_mount	*mp = tp->t_mountp;
1926 
1927 	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1928 		return;
1929 
1930 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1931 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1932 
1933 	if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1934 		ASSERT(ip->i_udquot == NULL);
1935 		ASSERT(i_uid_read(VFS_I(ip)) == be32_to_cpu(udqp->q_core.d_id));
1936 
1937 		ip->i_udquot = xfs_qm_dqhold(udqp);
1938 		xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
1939 	}
1940 	if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1941 		ASSERT(ip->i_gdquot == NULL);
1942 		ASSERT(i_gid_read(VFS_I(ip)) == be32_to_cpu(gdqp->q_core.d_id));
1943 
1944 		ip->i_gdquot = xfs_qm_dqhold(gdqp);
1945 		xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
1946 	}
1947 	if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
1948 		ASSERT(ip->i_pdquot == NULL);
1949 		ASSERT(ip->i_d.di_projid == be32_to_cpu(pdqp->q_core.d_id));
1950 
1951 		ip->i_pdquot = xfs_qm_dqhold(pdqp);
1952 		xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
1953 	}
1954 }
1955 
1956