xref: /linux/fs/xfs/xfs_qm.c (revision 3932b9ca55b0be314a36d3e84faff3e823c081f5)
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_bit.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_inode.h"
29 #include "xfs_ialloc.h"
30 #include "xfs_itable.h"
31 #include "xfs_quota.h"
32 #include "xfs_error.h"
33 #include "xfs_bmap.h"
34 #include "xfs_bmap_btree.h"
35 #include "xfs_trans.h"
36 #include "xfs_trans_space.h"
37 #include "xfs_qm.h"
38 #include "xfs_trace.h"
39 #include "xfs_icache.h"
40 #include "xfs_cksum.h"
41 #include "xfs_dinode.h"
42 
43 /*
44  * The global quota manager. There is only one of these for the entire
45  * system, _not_ one per file system. XQM keeps track of the overall
46  * quota functionality, including maintaining the freelist and hash
47  * tables of dquots.
48  */
49 STATIC int	xfs_qm_init_quotainos(xfs_mount_t *);
50 STATIC int	xfs_qm_init_quotainfo(xfs_mount_t *);
51 
52 
53 STATIC void	xfs_qm_dqfree_one(struct xfs_dquot *dqp);
54 /*
55  * We use the batch lookup interface to iterate over the dquots as it
56  * currently is the only interface into the radix tree code that allows
57  * fuzzy lookups instead of exact matches.  Holding the lock over multiple
58  * operations is fine as all callers are used either during mount/umount
59  * or quotaoff.
60  */
61 #define XFS_DQ_LOOKUP_BATCH	32
62 
63 STATIC int
64 xfs_qm_dquot_walk(
65 	struct xfs_mount	*mp,
66 	int			type,
67 	int			(*execute)(struct xfs_dquot *dqp, void *data),
68 	void			*data)
69 {
70 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
71 	struct radix_tree_root	*tree = xfs_dquot_tree(qi, type);
72 	uint32_t		next_index;
73 	int			last_error = 0;
74 	int			skipped;
75 	int			nr_found;
76 
77 restart:
78 	skipped = 0;
79 	next_index = 0;
80 	nr_found = 0;
81 
82 	while (1) {
83 		struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
84 		int		error = 0;
85 		int		i;
86 
87 		mutex_lock(&qi->qi_tree_lock);
88 		nr_found = radix_tree_gang_lookup(tree, (void **)batch,
89 					next_index, XFS_DQ_LOOKUP_BATCH);
90 		if (!nr_found) {
91 			mutex_unlock(&qi->qi_tree_lock);
92 			break;
93 		}
94 
95 		for (i = 0; i < nr_found; i++) {
96 			struct xfs_dquot *dqp = batch[i];
97 
98 			next_index = be32_to_cpu(dqp->q_core.d_id) + 1;
99 
100 			error = execute(batch[i], data);
101 			if (error == -EAGAIN) {
102 				skipped++;
103 				continue;
104 			}
105 			if (error && last_error != -EFSCORRUPTED)
106 				last_error = error;
107 		}
108 
109 		mutex_unlock(&qi->qi_tree_lock);
110 
111 		/* bail out if the filesystem is corrupted.  */
112 		if (last_error == -EFSCORRUPTED) {
113 			skipped = 0;
114 			break;
115 		}
116 	}
117 
118 	if (skipped) {
119 		delay(1);
120 		goto restart;
121 	}
122 
123 	return last_error;
124 }
125 
126 
127 /*
128  * Purge a dquot from all tracking data structures and free it.
129  */
130 STATIC int
131 xfs_qm_dqpurge(
132 	struct xfs_dquot	*dqp,
133 	void			*data)
134 {
135 	struct xfs_mount	*mp = dqp->q_mount;
136 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
137 
138 	xfs_dqlock(dqp);
139 	if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
140 		xfs_dqunlock(dqp);
141 		return -EAGAIN;
142 	}
143 
144 	dqp->dq_flags |= XFS_DQ_FREEING;
145 
146 	xfs_dqflock(dqp);
147 
148 	/*
149 	 * If we are turning this type of quotas off, we don't care
150 	 * about the dirty metadata sitting in this dquot. OTOH, if
151 	 * we're unmounting, we do care, so we flush it and wait.
152 	 */
153 	if (XFS_DQ_IS_DIRTY(dqp)) {
154 		struct xfs_buf	*bp = NULL;
155 		int		error;
156 
157 		/*
158 		 * We don't care about getting disk errors here. We need
159 		 * to purge this dquot anyway, so we go ahead regardless.
160 		 */
161 		error = xfs_qm_dqflush(dqp, &bp);
162 		if (error) {
163 			xfs_warn(mp, "%s: dquot %p flush failed",
164 				__func__, dqp);
165 		} else {
166 			error = xfs_bwrite(bp);
167 			xfs_buf_relse(bp);
168 		}
169 		xfs_dqflock(dqp);
170 	}
171 
172 	ASSERT(atomic_read(&dqp->q_pincount) == 0);
173 	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
174 	       !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));
175 
176 	xfs_dqfunlock(dqp);
177 	xfs_dqunlock(dqp);
178 
179 	radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
180 			  be32_to_cpu(dqp->q_core.d_id));
181 	qi->qi_dquots--;
182 
183 	/*
184 	 * We move dquots to the freelist as soon as their reference count
185 	 * hits zero, so it really should be on the freelist here.
186 	 */
187 	ASSERT(!list_empty(&dqp->q_lru));
188 	list_lru_del(&qi->qi_lru, &dqp->q_lru);
189 	XFS_STATS_DEC(xs_qm_dquot_unused);
190 
191 	xfs_qm_dqdestroy(dqp);
192 	return 0;
193 }
194 
195 /*
196  * Purge the dquot cache.
197  */
198 void
199 xfs_qm_dqpurge_all(
200 	struct xfs_mount	*mp,
201 	uint			flags)
202 {
203 	if (flags & XFS_QMOPT_UQUOTA)
204 		xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL);
205 	if (flags & XFS_QMOPT_GQUOTA)
206 		xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
207 	if (flags & XFS_QMOPT_PQUOTA)
208 		xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL);
209 }
210 
211 /*
212  * Just destroy the quotainfo structure.
213  */
214 void
215 xfs_qm_unmount(
216 	struct xfs_mount	*mp)
217 {
218 	if (mp->m_quotainfo) {
219 		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
220 		xfs_qm_destroy_quotainfo(mp);
221 	}
222 }
223 
224 /*
225  * Called from the vfsops layer.
226  */
227 void
228 xfs_qm_unmount_quotas(
229 	xfs_mount_t	*mp)
230 {
231 	/*
232 	 * Release the dquots that root inode, et al might be holding,
233 	 * before we flush quotas and blow away the quotainfo structure.
234 	 */
235 	ASSERT(mp->m_rootip);
236 	xfs_qm_dqdetach(mp->m_rootip);
237 	if (mp->m_rbmip)
238 		xfs_qm_dqdetach(mp->m_rbmip);
239 	if (mp->m_rsumip)
240 		xfs_qm_dqdetach(mp->m_rsumip);
241 
242 	/*
243 	 * Release the quota inodes.
244 	 */
245 	if (mp->m_quotainfo) {
246 		if (mp->m_quotainfo->qi_uquotaip) {
247 			IRELE(mp->m_quotainfo->qi_uquotaip);
248 			mp->m_quotainfo->qi_uquotaip = NULL;
249 		}
250 		if (mp->m_quotainfo->qi_gquotaip) {
251 			IRELE(mp->m_quotainfo->qi_gquotaip);
252 			mp->m_quotainfo->qi_gquotaip = NULL;
253 		}
254 		if (mp->m_quotainfo->qi_pquotaip) {
255 			IRELE(mp->m_quotainfo->qi_pquotaip);
256 			mp->m_quotainfo->qi_pquotaip = NULL;
257 		}
258 	}
259 }
260 
261 STATIC int
262 xfs_qm_dqattach_one(
263 	xfs_inode_t	*ip,
264 	xfs_dqid_t	id,
265 	uint		type,
266 	uint		doalloc,
267 	xfs_dquot_t	**IO_idqpp)
268 {
269 	xfs_dquot_t	*dqp;
270 	int		error;
271 
272 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
273 	error = 0;
274 
275 	/*
276 	 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
277 	 * or &i_gdquot. This made the code look weird, but made the logic a lot
278 	 * simpler.
279 	 */
280 	dqp = *IO_idqpp;
281 	if (dqp) {
282 		trace_xfs_dqattach_found(dqp);
283 		return 0;
284 	}
285 
286 	/*
287 	 * Find the dquot from somewhere. This bumps the reference count of
288 	 * dquot and returns it locked.  This can return ENOENT if dquot didn't
289 	 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
290 	 * turned off suddenly.
291 	 */
292 	error = xfs_qm_dqget(ip->i_mount, ip, id, type,
293 			     doalloc | XFS_QMOPT_DOWARN, &dqp);
294 	if (error)
295 		return error;
296 
297 	trace_xfs_dqattach_get(dqp);
298 
299 	/*
300 	 * dqget may have dropped and re-acquired the ilock, but it guarantees
301 	 * that the dquot returned is the one that should go in the inode.
302 	 */
303 	*IO_idqpp = dqp;
304 	xfs_dqunlock(dqp);
305 	return 0;
306 }
307 
308 static bool
309 xfs_qm_need_dqattach(
310 	struct xfs_inode	*ip)
311 {
312 	struct xfs_mount	*mp = ip->i_mount;
313 
314 	if (!XFS_IS_QUOTA_RUNNING(mp))
315 		return false;
316 	if (!XFS_IS_QUOTA_ON(mp))
317 		return false;
318 	if (!XFS_NOT_DQATTACHED(mp, ip))
319 		return false;
320 	if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
321 		return false;
322 	return true;
323 }
324 
325 /*
326  * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
327  * into account.
328  * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
329  * Inode may get unlocked and relocked in here, and the caller must deal with
330  * the consequences.
331  */
332 int
333 xfs_qm_dqattach_locked(
334 	xfs_inode_t	*ip,
335 	uint		flags)
336 {
337 	xfs_mount_t	*mp = ip->i_mount;
338 	int		error = 0;
339 
340 	if (!xfs_qm_need_dqattach(ip))
341 		return 0;
342 
343 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
344 
345 	if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
346 		error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
347 						flags & XFS_QMOPT_DQALLOC,
348 						&ip->i_udquot);
349 		if (error)
350 			goto done;
351 		ASSERT(ip->i_udquot);
352 	}
353 
354 	if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
355 		error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
356 						flags & XFS_QMOPT_DQALLOC,
357 						&ip->i_gdquot);
358 		if (error)
359 			goto done;
360 		ASSERT(ip->i_gdquot);
361 	}
362 
363 	if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
364 		error = xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
365 						flags & XFS_QMOPT_DQALLOC,
366 						&ip->i_pdquot);
367 		if (error)
368 			goto done;
369 		ASSERT(ip->i_pdquot);
370 	}
371 
372 done:
373 	/*
374 	 * Don't worry about the dquots that we may have attached before any
375 	 * error - they'll get detached later if it has not already been done.
376 	 */
377 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
378 	return error;
379 }
380 
381 int
382 xfs_qm_dqattach(
383 	struct xfs_inode	*ip,
384 	uint			flags)
385 {
386 	int			error;
387 
388 	if (!xfs_qm_need_dqattach(ip))
389 		return 0;
390 
391 	xfs_ilock(ip, XFS_ILOCK_EXCL);
392 	error = xfs_qm_dqattach_locked(ip, flags);
393 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
394 
395 	return error;
396 }
397 
398 /*
399  * Release dquots (and their references) if any.
400  * The inode should be locked EXCL except when this's called by
401  * xfs_ireclaim.
402  */
403 void
404 xfs_qm_dqdetach(
405 	xfs_inode_t	*ip)
406 {
407 	if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
408 		return;
409 
410 	trace_xfs_dquot_dqdetach(ip);
411 
412 	ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
413 	if (ip->i_udquot) {
414 		xfs_qm_dqrele(ip->i_udquot);
415 		ip->i_udquot = NULL;
416 	}
417 	if (ip->i_gdquot) {
418 		xfs_qm_dqrele(ip->i_gdquot);
419 		ip->i_gdquot = NULL;
420 	}
421 	if (ip->i_pdquot) {
422 		xfs_qm_dqrele(ip->i_pdquot);
423 		ip->i_pdquot = NULL;
424 	}
425 }
426 
427 struct xfs_qm_isolate {
428 	struct list_head	buffers;
429 	struct list_head	dispose;
430 };
431 
432 static enum lru_status
433 xfs_qm_dquot_isolate(
434 	struct list_head	*item,
435 	spinlock_t		*lru_lock,
436 	void			*arg)
437 {
438 	struct xfs_dquot	*dqp = container_of(item,
439 						struct xfs_dquot, q_lru);
440 	struct xfs_qm_isolate	*isol = arg;
441 
442 	if (!xfs_dqlock_nowait(dqp))
443 		goto out_miss_busy;
444 
445 	/*
446 	 * This dquot has acquired a reference in the meantime remove it from
447 	 * the freelist and try again.
448 	 */
449 	if (dqp->q_nrefs) {
450 		xfs_dqunlock(dqp);
451 		XFS_STATS_INC(xs_qm_dqwants);
452 
453 		trace_xfs_dqreclaim_want(dqp);
454 		list_del_init(&dqp->q_lru);
455 		XFS_STATS_DEC(xs_qm_dquot_unused);
456 		return LRU_REMOVED;
457 	}
458 
459 	/*
460 	 * If the dquot is dirty, flush it. If it's already being flushed, just
461 	 * skip it so there is time for the IO to complete before we try to
462 	 * reclaim it again on the next LRU pass.
463 	 */
464 	if (!xfs_dqflock_nowait(dqp)) {
465 		xfs_dqunlock(dqp);
466 		goto out_miss_busy;
467 	}
468 
469 	if (XFS_DQ_IS_DIRTY(dqp)) {
470 		struct xfs_buf	*bp = NULL;
471 		int		error;
472 
473 		trace_xfs_dqreclaim_dirty(dqp);
474 
475 		/* we have to drop the LRU lock to flush the dquot */
476 		spin_unlock(lru_lock);
477 
478 		error = xfs_qm_dqflush(dqp, &bp);
479 		if (error) {
480 			xfs_warn(dqp->q_mount, "%s: dquot %p flush failed",
481 				 __func__, dqp);
482 			goto out_unlock_dirty;
483 		}
484 
485 		xfs_buf_delwri_queue(bp, &isol->buffers);
486 		xfs_buf_relse(bp);
487 		goto out_unlock_dirty;
488 	}
489 	xfs_dqfunlock(dqp);
490 
491 	/*
492 	 * Prevent lookups now that we are past the point of no return.
493 	 */
494 	dqp->dq_flags |= XFS_DQ_FREEING;
495 	xfs_dqunlock(dqp);
496 
497 	ASSERT(dqp->q_nrefs == 0);
498 	list_move_tail(&dqp->q_lru, &isol->dispose);
499 	XFS_STATS_DEC(xs_qm_dquot_unused);
500 	trace_xfs_dqreclaim_done(dqp);
501 	XFS_STATS_INC(xs_qm_dqreclaims);
502 	return LRU_REMOVED;
503 
504 out_miss_busy:
505 	trace_xfs_dqreclaim_busy(dqp);
506 	XFS_STATS_INC(xs_qm_dqreclaim_misses);
507 	return LRU_SKIP;
508 
509 out_unlock_dirty:
510 	trace_xfs_dqreclaim_busy(dqp);
511 	XFS_STATS_INC(xs_qm_dqreclaim_misses);
512 	xfs_dqunlock(dqp);
513 	spin_lock(lru_lock);
514 	return LRU_RETRY;
515 }
516 
517 static unsigned long
518 xfs_qm_shrink_scan(
519 	struct shrinker		*shrink,
520 	struct shrink_control	*sc)
521 {
522 	struct xfs_quotainfo	*qi = container_of(shrink,
523 					struct xfs_quotainfo, qi_shrinker);
524 	struct xfs_qm_isolate	isol;
525 	unsigned long		freed;
526 	int			error;
527 	unsigned long		nr_to_scan = sc->nr_to_scan;
528 
529 	if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT))
530 		return 0;
531 
532 	INIT_LIST_HEAD(&isol.buffers);
533 	INIT_LIST_HEAD(&isol.dispose);
534 
535 	freed = list_lru_walk_node(&qi->qi_lru, sc->nid, xfs_qm_dquot_isolate, &isol,
536 					&nr_to_scan);
537 
538 	error = xfs_buf_delwri_submit(&isol.buffers);
539 	if (error)
540 		xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
541 
542 	while (!list_empty(&isol.dispose)) {
543 		struct xfs_dquot	*dqp;
544 
545 		dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
546 		list_del_init(&dqp->q_lru);
547 		xfs_qm_dqfree_one(dqp);
548 	}
549 
550 	return freed;
551 }
552 
553 static unsigned long
554 xfs_qm_shrink_count(
555 	struct shrinker		*shrink,
556 	struct shrink_control	*sc)
557 {
558 	struct xfs_quotainfo	*qi = container_of(shrink,
559 					struct xfs_quotainfo, qi_shrinker);
560 
561 	return list_lru_count_node(&qi->qi_lru, sc->nid);
562 }
563 
564 /*
565  * This initializes all the quota information that's kept in the
566  * mount structure
567  */
568 STATIC int
569 xfs_qm_init_quotainfo(
570 	xfs_mount_t	*mp)
571 {
572 	xfs_quotainfo_t *qinf;
573 	int		error;
574 	xfs_dquot_t	*dqp;
575 
576 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
577 
578 	qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
579 
580 	error = list_lru_init(&qinf->qi_lru);
581 	if (error)
582 		goto out_free_qinf;
583 
584 	/*
585 	 * See if quotainodes are setup, and if not, allocate them,
586 	 * and change the superblock accordingly.
587 	 */
588 	error = xfs_qm_init_quotainos(mp);
589 	if (error)
590 		goto out_free_lru;
591 
592 	INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
593 	INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
594 	INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
595 	mutex_init(&qinf->qi_tree_lock);
596 
597 	/* mutex used to serialize quotaoffs */
598 	mutex_init(&qinf->qi_quotaofflock);
599 
600 	/* Precalc some constants */
601 	qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
602 	qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
603 
604 	mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
605 
606 	/*
607 	 * We try to get the limits from the superuser's limits fields.
608 	 * This is quite hacky, but it is standard quota practice.
609 	 *
610 	 * We look at the USR dquot with id == 0 first, but if user quotas
611 	 * are not enabled we goto the GRP dquot with id == 0.
612 	 * We don't really care to keep separate default limits for user
613 	 * and group quotas, at least not at this point.
614 	 *
615 	 * Since we may not have done a quotacheck by this point, just read
616 	 * the dquot without attaching it to any hashtables or lists.
617 	 */
618 	error = xfs_qm_dqread(mp, 0,
619 			XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
620 			 (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
621 			  XFS_DQ_PROJ),
622 			XFS_QMOPT_DOWARN, &dqp);
623 	if (!error) {
624 		xfs_disk_dquot_t	*ddqp = &dqp->q_core;
625 
626 		/*
627 		 * The warnings and timers set the grace period given to
628 		 * a user or group before he or she can not perform any
629 		 * more writing. If it is zero, a default is used.
630 		 */
631 		qinf->qi_btimelimit = ddqp->d_btimer ?
632 			be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT;
633 		qinf->qi_itimelimit = ddqp->d_itimer ?
634 			be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT;
635 		qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ?
636 			be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT;
637 		qinf->qi_bwarnlimit = ddqp->d_bwarns ?
638 			be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT;
639 		qinf->qi_iwarnlimit = ddqp->d_iwarns ?
640 			be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT;
641 		qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ?
642 			be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT;
643 		qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
644 		qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
645 		qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
646 		qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
647 		qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
648 		qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
649 
650 		xfs_qm_dqdestroy(dqp);
651 	} else {
652 		qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
653 		qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
654 		qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
655 		qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
656 		qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
657 		qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
658 	}
659 
660 	qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
661 	qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
662 	qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
663 	qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
664 	register_shrinker(&qinf->qi_shrinker);
665 	return 0;
666 
667 out_free_lru:
668 	list_lru_destroy(&qinf->qi_lru);
669 out_free_qinf:
670 	kmem_free(qinf);
671 	mp->m_quotainfo = NULL;
672 	return error;
673 }
674 
675 
676 /*
677  * Gets called when unmounting a filesystem or when all quotas get
678  * turned off.
679  * This purges the quota inodes, destroys locks and frees itself.
680  */
681 void
682 xfs_qm_destroy_quotainfo(
683 	xfs_mount_t	*mp)
684 {
685 	xfs_quotainfo_t *qi;
686 
687 	qi = mp->m_quotainfo;
688 	ASSERT(qi != NULL);
689 
690 	unregister_shrinker(&qi->qi_shrinker);
691 	list_lru_destroy(&qi->qi_lru);
692 
693 	if (qi->qi_uquotaip) {
694 		IRELE(qi->qi_uquotaip);
695 		qi->qi_uquotaip = NULL; /* paranoia */
696 	}
697 	if (qi->qi_gquotaip) {
698 		IRELE(qi->qi_gquotaip);
699 		qi->qi_gquotaip = NULL;
700 	}
701 	if (qi->qi_pquotaip) {
702 		IRELE(qi->qi_pquotaip);
703 		qi->qi_pquotaip = NULL;
704 	}
705 	mutex_destroy(&qi->qi_quotaofflock);
706 	kmem_free(qi);
707 	mp->m_quotainfo = NULL;
708 }
709 
710 /*
711  * Create an inode and return with a reference already taken, but unlocked
712  * This is how we create quota inodes
713  */
714 STATIC int
715 xfs_qm_qino_alloc(
716 	xfs_mount_t	*mp,
717 	xfs_inode_t	**ip,
718 	__int64_t	sbfields,
719 	uint		flags)
720 {
721 	xfs_trans_t	*tp;
722 	int		error;
723 	int		committed;
724 
725 	*ip = NULL;
726 	/*
727 	 * With superblock that doesn't have separate pquotino, we
728 	 * share an inode between gquota and pquota. If the on-disk
729 	 * superblock has GQUOTA and the filesystem is now mounted
730 	 * with PQUOTA, just use sb_gquotino for sb_pquotino and
731 	 * vice-versa.
732 	 */
733 	if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
734 			(flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
735 		xfs_ino_t ino = NULLFSINO;
736 
737 		if ((flags & XFS_QMOPT_PQUOTA) &&
738 			     (mp->m_sb.sb_gquotino != NULLFSINO)) {
739 			ino = mp->m_sb.sb_gquotino;
740 			ASSERT(mp->m_sb.sb_pquotino == NULLFSINO);
741 		} else if ((flags & XFS_QMOPT_GQUOTA) &&
742 			     (mp->m_sb.sb_pquotino != NULLFSINO)) {
743 			ino = mp->m_sb.sb_pquotino;
744 			ASSERT(mp->m_sb.sb_gquotino == NULLFSINO);
745 		}
746 		if (ino != NULLFSINO) {
747 			error = xfs_iget(mp, NULL, ino, 0, 0, ip);
748 			if (error)
749 				return error;
750 			mp->m_sb.sb_gquotino = NULLFSINO;
751 			mp->m_sb.sb_pquotino = NULLFSINO;
752 		}
753 	}
754 
755 	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);
756 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_create,
757 				  XFS_QM_QINOCREATE_SPACE_RES(mp), 0);
758 	if (error) {
759 		xfs_trans_cancel(tp, 0);
760 		return error;
761 	}
762 
763 	if (!*ip) {
764 		error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip,
765 								&committed);
766 		if (error) {
767 			xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
768 					 XFS_TRANS_ABORT);
769 			return error;
770 		}
771 	}
772 
773 	/*
774 	 * Make the changes in the superblock, and log those too.
775 	 * sbfields arg may contain fields other than *QUOTINO;
776 	 * VERSIONNUM for example.
777 	 */
778 	spin_lock(&mp->m_sb_lock);
779 	if (flags & XFS_QMOPT_SBVERSION) {
780 		ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
781 		ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
782 			XFS_SB_GQUOTINO | XFS_SB_PQUOTINO | XFS_SB_QFLAGS)) ==
783 				(XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
784 				 XFS_SB_GQUOTINO | XFS_SB_PQUOTINO |
785 				 XFS_SB_QFLAGS));
786 
787 		xfs_sb_version_addquota(&mp->m_sb);
788 		mp->m_sb.sb_uquotino = NULLFSINO;
789 		mp->m_sb.sb_gquotino = NULLFSINO;
790 		mp->m_sb.sb_pquotino = NULLFSINO;
791 
792 		/* qflags will get updated fully _after_ quotacheck */
793 		mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
794 	}
795 	if (flags & XFS_QMOPT_UQUOTA)
796 		mp->m_sb.sb_uquotino = (*ip)->i_ino;
797 	else if (flags & XFS_QMOPT_GQUOTA)
798 		mp->m_sb.sb_gquotino = (*ip)->i_ino;
799 	else
800 		mp->m_sb.sb_pquotino = (*ip)->i_ino;
801 	spin_unlock(&mp->m_sb_lock);
802 	xfs_mod_sb(tp, sbfields);
803 
804 	if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) {
805 		xfs_alert(mp, "%s failed (error %d)!", __func__, error);
806 		return error;
807 	}
808 	return 0;
809 }
810 
811 
812 STATIC void
813 xfs_qm_reset_dqcounts(
814 	xfs_mount_t	*mp,
815 	xfs_buf_t	*bp,
816 	xfs_dqid_t	id,
817 	uint		type)
818 {
819 	struct xfs_dqblk	*dqb;
820 	int			j;
821 
822 	trace_xfs_reset_dqcounts(bp, _RET_IP_);
823 
824 	/*
825 	 * Reset all counters and timers. They'll be
826 	 * started afresh by xfs_qm_quotacheck.
827 	 */
828 #ifdef DEBUG
829 	j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
830 	do_div(j, sizeof(xfs_dqblk_t));
831 	ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
832 #endif
833 	dqb = bp->b_addr;
834 	for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
835 		struct xfs_disk_dquot	*ddq;
836 
837 		ddq = (struct xfs_disk_dquot *)&dqb[j];
838 
839 		/*
840 		 * Do a sanity check, and if needed, repair the dqblk. Don't
841 		 * output any warnings because it's perfectly possible to
842 		 * find uninitialised dquot blks. See comment in xfs_dqcheck.
843 		 */
844 		xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
845 			    "xfs_quotacheck");
846 		ddq->d_bcount = 0;
847 		ddq->d_icount = 0;
848 		ddq->d_rtbcount = 0;
849 		ddq->d_btimer = 0;
850 		ddq->d_itimer = 0;
851 		ddq->d_rtbtimer = 0;
852 		ddq->d_bwarns = 0;
853 		ddq->d_iwarns = 0;
854 		ddq->d_rtbwarns = 0;
855 
856 		if (xfs_sb_version_hascrc(&mp->m_sb)) {
857 			xfs_update_cksum((char *)&dqb[j],
858 					 sizeof(struct xfs_dqblk),
859 					 XFS_DQUOT_CRC_OFF);
860 		}
861 	}
862 }
863 
864 STATIC int
865 xfs_qm_dqiter_bufs(
866 	struct xfs_mount	*mp,
867 	xfs_dqid_t		firstid,
868 	xfs_fsblock_t		bno,
869 	xfs_filblks_t		blkcnt,
870 	uint			flags,
871 	struct list_head	*buffer_list)
872 {
873 	struct xfs_buf		*bp;
874 	int			error;
875 	int			type;
876 
877 	ASSERT(blkcnt > 0);
878 	type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
879 		(flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
880 	error = 0;
881 
882 	/*
883 	 * Blkcnt arg can be a very big number, and might even be
884 	 * larger than the log itself. So, we have to break it up into
885 	 * manageable-sized transactions.
886 	 * Note that we don't start a permanent transaction here; we might
887 	 * not be able to get a log reservation for the whole thing up front,
888 	 * and we don't really care to either, because we just discard
889 	 * everything if we were to crash in the middle of this loop.
890 	 */
891 	while (blkcnt--) {
892 		error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
893 			      XFS_FSB_TO_DADDR(mp, bno),
894 			      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
895 			      &xfs_dquot_buf_ops);
896 
897 		/*
898 		 * CRC and validation errors will return a EFSCORRUPTED here. If
899 		 * this occurs, re-read without CRC validation so that we can
900 		 * repair the damage via xfs_qm_reset_dqcounts(). This process
901 		 * will leave a trace in the log indicating corruption has
902 		 * been detected.
903 		 */
904 		if (error == -EFSCORRUPTED) {
905 			error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
906 				      XFS_FSB_TO_DADDR(mp, bno),
907 				      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
908 				      NULL);
909 		}
910 
911 		if (error)
912 			break;
913 
914 		/*
915 		 * A corrupt buffer might not have a verifier attached, so
916 		 * make sure we have the correct one attached before writeback
917 		 * occurs.
918 		 */
919 		bp->b_ops = &xfs_dquot_buf_ops;
920 		xfs_qm_reset_dqcounts(mp, bp, firstid, type);
921 		xfs_buf_delwri_queue(bp, buffer_list);
922 		xfs_buf_relse(bp);
923 
924 		/* goto the next block. */
925 		bno++;
926 		firstid += mp->m_quotainfo->qi_dqperchunk;
927 	}
928 
929 	return error;
930 }
931 
932 /*
933  * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
934  * caller supplied function for every chunk of dquots that we find.
935  */
936 STATIC int
937 xfs_qm_dqiterate(
938 	struct xfs_mount	*mp,
939 	struct xfs_inode	*qip,
940 	uint			flags,
941 	struct list_head	*buffer_list)
942 {
943 	struct xfs_bmbt_irec	*map;
944 	int			i, nmaps;	/* number of map entries */
945 	int			error;		/* return value */
946 	xfs_fileoff_t		lblkno;
947 	xfs_filblks_t		maxlblkcnt;
948 	xfs_dqid_t		firstid;
949 	xfs_fsblock_t		rablkno;
950 	xfs_filblks_t		rablkcnt;
951 
952 	error = 0;
953 	/*
954 	 * This looks racy, but we can't keep an inode lock across a
955 	 * trans_reserve. But, this gets called during quotacheck, and that
956 	 * happens only at mount time which is single threaded.
957 	 */
958 	if (qip->i_d.di_nblocks == 0)
959 		return 0;
960 
961 	map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
962 
963 	lblkno = 0;
964 	maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
965 	do {
966 		uint		lock_mode;
967 
968 		nmaps = XFS_DQITER_MAP_SIZE;
969 		/*
970 		 * We aren't changing the inode itself. Just changing
971 		 * some of its data. No new blocks are added here, and
972 		 * the inode is never added to the transaction.
973 		 */
974 		lock_mode = xfs_ilock_data_map_shared(qip);
975 		error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
976 				       map, &nmaps, 0);
977 		xfs_iunlock(qip, lock_mode);
978 		if (error)
979 			break;
980 
981 		ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
982 		for (i = 0; i < nmaps; i++) {
983 			ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
984 			ASSERT(map[i].br_blockcount);
985 
986 
987 			lblkno += map[i].br_blockcount;
988 
989 			if (map[i].br_startblock == HOLESTARTBLOCK)
990 				continue;
991 
992 			firstid = (xfs_dqid_t) map[i].br_startoff *
993 				mp->m_quotainfo->qi_dqperchunk;
994 			/*
995 			 * Do a read-ahead on the next extent.
996 			 */
997 			if ((i+1 < nmaps) &&
998 			    (map[i+1].br_startblock != HOLESTARTBLOCK)) {
999 				rablkcnt =  map[i+1].br_blockcount;
1000 				rablkno = map[i+1].br_startblock;
1001 				while (rablkcnt--) {
1002 					xfs_buf_readahead(mp->m_ddev_targp,
1003 					       XFS_FSB_TO_DADDR(mp, rablkno),
1004 					       mp->m_quotainfo->qi_dqchunklen,
1005 					       &xfs_dquot_buf_ops);
1006 					rablkno++;
1007 				}
1008 			}
1009 			/*
1010 			 * Iterate thru all the blks in the extent and
1011 			 * reset the counters of all the dquots inside them.
1012 			 */
1013 			error = xfs_qm_dqiter_bufs(mp, firstid,
1014 						   map[i].br_startblock,
1015 						   map[i].br_blockcount,
1016 						   flags, buffer_list);
1017 			if (error)
1018 				goto out;
1019 		}
1020 	} while (nmaps > 0);
1021 
1022 out:
1023 	kmem_free(map);
1024 	return error;
1025 }
1026 
1027 /*
1028  * Called by dqusage_adjust in doing a quotacheck.
1029  *
1030  * Given the inode, and a dquot id this updates both the incore dqout as well
1031  * as the buffer copy. This is so that once the quotacheck is done, we can
1032  * just log all the buffers, as opposed to logging numerous updates to
1033  * individual dquots.
1034  */
1035 STATIC int
1036 xfs_qm_quotacheck_dqadjust(
1037 	struct xfs_inode	*ip,
1038 	xfs_dqid_t		id,
1039 	uint			type,
1040 	xfs_qcnt_t		nblks,
1041 	xfs_qcnt_t		rtblks)
1042 {
1043 	struct xfs_mount	*mp = ip->i_mount;
1044 	struct xfs_dquot	*dqp;
1045 	int			error;
1046 
1047 	error = xfs_qm_dqget(mp, ip, id, type,
1048 			     XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp);
1049 	if (error) {
1050 		/*
1051 		 * Shouldn't be able to turn off quotas here.
1052 		 */
1053 		ASSERT(error != -ESRCH);
1054 		ASSERT(error != -ENOENT);
1055 		return error;
1056 	}
1057 
1058 	trace_xfs_dqadjust(dqp);
1059 
1060 	/*
1061 	 * Adjust the inode count and the block count to reflect this inode's
1062 	 * resource usage.
1063 	 */
1064 	be64_add_cpu(&dqp->q_core.d_icount, 1);
1065 	dqp->q_res_icount++;
1066 	if (nblks) {
1067 		be64_add_cpu(&dqp->q_core.d_bcount, nblks);
1068 		dqp->q_res_bcount += nblks;
1069 	}
1070 	if (rtblks) {
1071 		be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
1072 		dqp->q_res_rtbcount += rtblks;
1073 	}
1074 
1075 	/*
1076 	 * Set default limits, adjust timers (since we changed usages)
1077 	 *
1078 	 * There are no timers for the default values set in the root dquot.
1079 	 */
1080 	if (dqp->q_core.d_id) {
1081 		xfs_qm_adjust_dqlimits(mp, dqp);
1082 		xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
1083 	}
1084 
1085 	dqp->dq_flags |= XFS_DQ_DIRTY;
1086 	xfs_qm_dqput(dqp);
1087 	return 0;
1088 }
1089 
1090 STATIC int
1091 xfs_qm_get_rtblks(
1092 	xfs_inode_t	*ip,
1093 	xfs_qcnt_t	*O_rtblks)
1094 {
1095 	xfs_filblks_t	rtblks;			/* total rt blks */
1096 	xfs_extnum_t	idx;			/* extent record index */
1097 	xfs_ifork_t	*ifp;			/* inode fork pointer */
1098 	xfs_extnum_t	nextents;		/* number of extent entries */
1099 	int		error;
1100 
1101 	ASSERT(XFS_IS_REALTIME_INODE(ip));
1102 	ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1103 	if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1104 		if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK)))
1105 			return error;
1106 	}
1107 	rtblks = 0;
1108 	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
1109 	for (idx = 0; idx < nextents; idx++)
1110 		rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx));
1111 	*O_rtblks = (xfs_qcnt_t)rtblks;
1112 	return 0;
1113 }
1114 
1115 /*
1116  * callback routine supplied to bulkstat(). Given an inumber, find its
1117  * dquots and update them to account for resources taken by that inode.
1118  */
1119 /* ARGSUSED */
1120 STATIC int
1121 xfs_qm_dqusage_adjust(
1122 	xfs_mount_t	*mp,		/* mount point for filesystem */
1123 	xfs_ino_t	ino,		/* inode number to get data for */
1124 	void		__user *buffer,	/* not used */
1125 	int		ubsize,		/* not used */
1126 	int		*ubused,	/* not used */
1127 	int		*res)		/* result code value */
1128 {
1129 	xfs_inode_t	*ip;
1130 	xfs_qcnt_t	nblks, rtblks = 0;
1131 	int		error;
1132 
1133 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1134 
1135 	/*
1136 	 * rootino must have its resources accounted for, not so with the quota
1137 	 * inodes.
1138 	 */
1139 	if (xfs_is_quota_inode(&mp->m_sb, ino)) {
1140 		*res = BULKSTAT_RV_NOTHING;
1141 		return -EINVAL;
1142 	}
1143 
1144 	/*
1145 	 * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
1146 	 * interface expects the inode to be exclusively locked because that's
1147 	 * the case in all other instances. It's OK that we do this because
1148 	 * quotacheck is done only at mount time.
1149 	 */
1150 	error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip);
1151 	if (error) {
1152 		*res = BULKSTAT_RV_NOTHING;
1153 		return error;
1154 	}
1155 
1156 	ASSERT(ip->i_delayed_blks == 0);
1157 
1158 	if (XFS_IS_REALTIME_INODE(ip)) {
1159 		/*
1160 		 * Walk thru the extent list and count the realtime blocks.
1161 		 */
1162 		error = xfs_qm_get_rtblks(ip, &rtblks);
1163 		if (error)
1164 			goto error0;
1165 	}
1166 
1167 	nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
1168 
1169 	/*
1170 	 * Add the (disk blocks and inode) resources occupied by this
1171 	 * inode to its dquots. We do this adjustment in the incore dquot,
1172 	 * and also copy the changes to its buffer.
1173 	 * We don't care about putting these changes in a transaction
1174 	 * envelope because if we crash in the middle of a 'quotacheck'
1175 	 * we have to start from the beginning anyway.
1176 	 * Once we're done, we'll log all the dquot bufs.
1177 	 *
1178 	 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1179 	 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1180 	 */
1181 	if (XFS_IS_UQUOTA_ON(mp)) {
1182 		error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid,
1183 						   XFS_DQ_USER, nblks, rtblks);
1184 		if (error)
1185 			goto error0;
1186 	}
1187 
1188 	if (XFS_IS_GQUOTA_ON(mp)) {
1189 		error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid,
1190 						   XFS_DQ_GROUP, nblks, rtblks);
1191 		if (error)
1192 			goto error0;
1193 	}
1194 
1195 	if (XFS_IS_PQUOTA_ON(mp)) {
1196 		error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip),
1197 						   XFS_DQ_PROJ, nblks, rtblks);
1198 		if (error)
1199 			goto error0;
1200 	}
1201 
1202 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1203 	IRELE(ip);
1204 	*res = BULKSTAT_RV_DIDONE;
1205 	return 0;
1206 
1207 error0:
1208 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1209 	IRELE(ip);
1210 	*res = BULKSTAT_RV_GIVEUP;
1211 	return error;
1212 }
1213 
1214 STATIC int
1215 xfs_qm_flush_one(
1216 	struct xfs_dquot	*dqp,
1217 	void			*data)
1218 {
1219 	struct list_head	*buffer_list = data;
1220 	struct xfs_buf		*bp = NULL;
1221 	int			error = 0;
1222 
1223 	xfs_dqlock(dqp);
1224 	if (dqp->dq_flags & XFS_DQ_FREEING)
1225 		goto out_unlock;
1226 	if (!XFS_DQ_IS_DIRTY(dqp))
1227 		goto out_unlock;
1228 
1229 	xfs_dqflock(dqp);
1230 	error = xfs_qm_dqflush(dqp, &bp);
1231 	if (error)
1232 		goto out_unlock;
1233 
1234 	xfs_buf_delwri_queue(bp, buffer_list);
1235 	xfs_buf_relse(bp);
1236 out_unlock:
1237 	xfs_dqunlock(dqp);
1238 	return error;
1239 }
1240 
1241 /*
1242  * Walk thru all the filesystem inodes and construct a consistent view
1243  * of the disk quota world. If the quotacheck fails, disable quotas.
1244  */
1245 STATIC int
1246 xfs_qm_quotacheck(
1247 	xfs_mount_t	*mp)
1248 {
1249 	int			done, count, error, error2;
1250 	xfs_ino_t		lastino;
1251 	size_t			structsz;
1252 	uint			flags;
1253 	LIST_HEAD		(buffer_list);
1254 	struct xfs_inode	*uip = mp->m_quotainfo->qi_uquotaip;
1255 	struct xfs_inode	*gip = mp->m_quotainfo->qi_gquotaip;
1256 	struct xfs_inode	*pip = mp->m_quotainfo->qi_pquotaip;
1257 
1258 	count = INT_MAX;
1259 	structsz = 1;
1260 	lastino = 0;
1261 	flags = 0;
1262 
1263 	ASSERT(uip || gip || pip);
1264 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1265 
1266 	xfs_notice(mp, "Quotacheck needed: Please wait.");
1267 
1268 	/*
1269 	 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1270 	 * their counters to zero. We need a clean slate.
1271 	 * We don't log our changes till later.
1272 	 */
1273 	if (uip) {
1274 		error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA,
1275 					 &buffer_list);
1276 		if (error)
1277 			goto error_return;
1278 		flags |= XFS_UQUOTA_CHKD;
1279 	}
1280 
1281 	if (gip) {
1282 		error = xfs_qm_dqiterate(mp, gip, XFS_QMOPT_GQUOTA,
1283 					 &buffer_list);
1284 		if (error)
1285 			goto error_return;
1286 		flags |= XFS_GQUOTA_CHKD;
1287 	}
1288 
1289 	if (pip) {
1290 		error = xfs_qm_dqiterate(mp, pip, XFS_QMOPT_PQUOTA,
1291 					 &buffer_list);
1292 		if (error)
1293 			goto error_return;
1294 		flags |= XFS_PQUOTA_CHKD;
1295 	}
1296 
1297 	do {
1298 		/*
1299 		 * Iterate thru all the inodes in the file system,
1300 		 * adjusting the corresponding dquot counters in core.
1301 		 */
1302 		error = xfs_bulkstat(mp, &lastino, &count,
1303 				     xfs_qm_dqusage_adjust,
1304 				     structsz, NULL, &done);
1305 		if (error)
1306 			break;
1307 
1308 	} while (!done);
1309 
1310 	/*
1311 	 * We've made all the changes that we need to make incore.  Flush them
1312 	 * down to disk buffers if everything was updated successfully.
1313 	 */
1314 	if (XFS_IS_UQUOTA_ON(mp)) {
1315 		error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one,
1316 					  &buffer_list);
1317 	}
1318 	if (XFS_IS_GQUOTA_ON(mp)) {
1319 		error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one,
1320 					   &buffer_list);
1321 		if (!error)
1322 			error = error2;
1323 	}
1324 	if (XFS_IS_PQUOTA_ON(mp)) {
1325 		error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one,
1326 					   &buffer_list);
1327 		if (!error)
1328 			error = error2;
1329 	}
1330 
1331 	error2 = xfs_buf_delwri_submit(&buffer_list);
1332 	if (!error)
1333 		error = error2;
1334 
1335 	/*
1336 	 * We can get this error if we couldn't do a dquot allocation inside
1337 	 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1338 	 * dirty dquots that might be cached, we just want to get rid of them
1339 	 * and turn quotaoff. The dquots won't be attached to any of the inodes
1340 	 * at this point (because we intentionally didn't in dqget_noattach).
1341 	 */
1342 	if (error) {
1343 		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
1344 		goto error_return;
1345 	}
1346 
1347 	/*
1348 	 * If one type of quotas is off, then it will lose its
1349 	 * quotachecked status, since we won't be doing accounting for
1350 	 * that type anymore.
1351 	 */
1352 	mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1353 	mp->m_qflags |= flags;
1354 
1355  error_return:
1356 	while (!list_empty(&buffer_list)) {
1357 		struct xfs_buf *bp =
1358 			list_first_entry(&buffer_list, struct xfs_buf, b_list);
1359 		list_del_init(&bp->b_list);
1360 		xfs_buf_relse(bp);
1361 	}
1362 
1363 	if (error) {
1364 		xfs_warn(mp,
1365 	"Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1366 			error);
1367 		/*
1368 		 * We must turn off quotas.
1369 		 */
1370 		ASSERT(mp->m_quotainfo != NULL);
1371 		xfs_qm_destroy_quotainfo(mp);
1372 		if (xfs_mount_reset_sbqflags(mp)) {
1373 			xfs_warn(mp,
1374 				"Quotacheck: Failed to reset quota flags.");
1375 		}
1376 	} else
1377 		xfs_notice(mp, "Quotacheck: Done.");
1378 	return error;
1379 }
1380 
1381 /*
1382  * This is called from xfs_mountfs to start quotas and initialize all
1383  * necessary data structures like quotainfo.  This is also responsible for
1384  * running a quotacheck as necessary.  We are guaranteed that the superblock
1385  * is consistently read in at this point.
1386  *
1387  * If we fail here, the mount will continue with quota turned off. We don't
1388  * need to inidicate success or failure at all.
1389  */
1390 void
1391 xfs_qm_mount_quotas(
1392 	struct xfs_mount	*mp)
1393 {
1394 	int			error = 0;
1395 	uint			sbf;
1396 
1397 	/*
1398 	 * If quotas on realtime volumes is not supported, we disable
1399 	 * quotas immediately.
1400 	 */
1401 	if (mp->m_sb.sb_rextents) {
1402 		xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1403 		mp->m_qflags = 0;
1404 		goto write_changes;
1405 	}
1406 
1407 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1408 
1409 	/*
1410 	 * Allocate the quotainfo structure inside the mount struct, and
1411 	 * create quotainode(s), and change/rev superblock if necessary.
1412 	 */
1413 	error = xfs_qm_init_quotainfo(mp);
1414 	if (error) {
1415 		/*
1416 		 * We must turn off quotas.
1417 		 */
1418 		ASSERT(mp->m_quotainfo == NULL);
1419 		mp->m_qflags = 0;
1420 		goto write_changes;
1421 	}
1422 	/*
1423 	 * If any of the quotas are not consistent, do a quotacheck.
1424 	 */
1425 	if (XFS_QM_NEED_QUOTACHECK(mp)) {
1426 		error = xfs_qm_quotacheck(mp);
1427 		if (error) {
1428 			/* Quotacheck failed and disabled quotas. */
1429 			return;
1430 		}
1431 	}
1432 	/*
1433 	 * If one type of quotas is off, then it will lose its
1434 	 * quotachecked status, since we won't be doing accounting for
1435 	 * that type anymore.
1436 	 */
1437 	if (!XFS_IS_UQUOTA_ON(mp))
1438 		mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1439 	if (!XFS_IS_GQUOTA_ON(mp))
1440 		mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1441 	if (!XFS_IS_PQUOTA_ON(mp))
1442 		mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1443 
1444  write_changes:
1445 	/*
1446 	 * We actually don't have to acquire the m_sb_lock at all.
1447 	 * This can only be called from mount, and that's single threaded. XXX
1448 	 */
1449 	spin_lock(&mp->m_sb_lock);
1450 	sbf = mp->m_sb.sb_qflags;
1451 	mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1452 	spin_unlock(&mp->m_sb_lock);
1453 
1454 	if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1455 		if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) {
1456 			/*
1457 			 * We could only have been turning quotas off.
1458 			 * We aren't in very good shape actually because
1459 			 * the incore structures are convinced that quotas are
1460 			 * off, but the on disk superblock doesn't know that !
1461 			 */
1462 			ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
1463 			xfs_alert(mp, "%s: Superblock update failed!",
1464 				__func__);
1465 		}
1466 	}
1467 
1468 	if (error) {
1469 		xfs_warn(mp, "Failed to initialize disk quotas.");
1470 		return;
1471 	}
1472 }
1473 
1474 /*
1475  * This is called after the superblock has been read in and we're ready to
1476  * iget the quota inodes.
1477  */
1478 STATIC int
1479 xfs_qm_init_quotainos(
1480 	xfs_mount_t	*mp)
1481 {
1482 	struct xfs_inode	*uip = NULL;
1483 	struct xfs_inode	*gip = NULL;
1484 	struct xfs_inode	*pip = NULL;
1485 	int			error;
1486 	__int64_t		sbflags = 0;
1487 	uint			flags = 0;
1488 
1489 	ASSERT(mp->m_quotainfo);
1490 
1491 	/*
1492 	 * Get the uquota and gquota inodes
1493 	 */
1494 	if (xfs_sb_version_hasquota(&mp->m_sb)) {
1495 		if (XFS_IS_UQUOTA_ON(mp) &&
1496 		    mp->m_sb.sb_uquotino != NULLFSINO) {
1497 			ASSERT(mp->m_sb.sb_uquotino > 0);
1498 			error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1499 					     0, 0, &uip);
1500 			if (error)
1501 				return error;
1502 		}
1503 		if (XFS_IS_GQUOTA_ON(mp) &&
1504 		    mp->m_sb.sb_gquotino != NULLFSINO) {
1505 			ASSERT(mp->m_sb.sb_gquotino > 0);
1506 			error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1507 					     0, 0, &gip);
1508 			if (error)
1509 				goto error_rele;
1510 		}
1511 		if (XFS_IS_PQUOTA_ON(mp) &&
1512 		    mp->m_sb.sb_pquotino != NULLFSINO) {
1513 			ASSERT(mp->m_sb.sb_pquotino > 0);
1514 			error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1515 					     0, 0, &pip);
1516 			if (error)
1517 				goto error_rele;
1518 		}
1519 	} else {
1520 		flags |= XFS_QMOPT_SBVERSION;
1521 		sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
1522 			    XFS_SB_GQUOTINO | XFS_SB_PQUOTINO |
1523 			    XFS_SB_QFLAGS);
1524 	}
1525 
1526 	/*
1527 	 * Create the three inodes, if they don't exist already. The changes
1528 	 * made above will get added to a transaction and logged in one of
1529 	 * the qino_alloc calls below.  If the device is readonly,
1530 	 * temporarily switch to read-write to do this.
1531 	 */
1532 	if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1533 		error = xfs_qm_qino_alloc(mp, &uip,
1534 					      sbflags | XFS_SB_UQUOTINO,
1535 					      flags | XFS_QMOPT_UQUOTA);
1536 		if (error)
1537 			goto error_rele;
1538 
1539 		flags &= ~XFS_QMOPT_SBVERSION;
1540 	}
1541 	if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1542 		error = xfs_qm_qino_alloc(mp, &gip,
1543 					  sbflags | XFS_SB_GQUOTINO,
1544 					  flags | XFS_QMOPT_GQUOTA);
1545 		if (error)
1546 			goto error_rele;
1547 
1548 		flags &= ~XFS_QMOPT_SBVERSION;
1549 	}
1550 	if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1551 		error = xfs_qm_qino_alloc(mp, &pip,
1552 					  sbflags | XFS_SB_PQUOTINO,
1553 					  flags | XFS_QMOPT_PQUOTA);
1554 		if (error)
1555 			goto error_rele;
1556 	}
1557 
1558 	mp->m_quotainfo->qi_uquotaip = uip;
1559 	mp->m_quotainfo->qi_gquotaip = gip;
1560 	mp->m_quotainfo->qi_pquotaip = pip;
1561 
1562 	return 0;
1563 
1564 error_rele:
1565 	if (uip)
1566 		IRELE(uip);
1567 	if (gip)
1568 		IRELE(gip);
1569 	if (pip)
1570 		IRELE(pip);
1571 	return error;
1572 }
1573 
1574 STATIC void
1575 xfs_qm_dqfree_one(
1576 	struct xfs_dquot	*dqp)
1577 {
1578 	struct xfs_mount	*mp = dqp->q_mount;
1579 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
1580 
1581 	mutex_lock(&qi->qi_tree_lock);
1582 	radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
1583 			  be32_to_cpu(dqp->q_core.d_id));
1584 
1585 	qi->qi_dquots--;
1586 	mutex_unlock(&qi->qi_tree_lock);
1587 
1588 	xfs_qm_dqdestroy(dqp);
1589 }
1590 
1591 /*
1592  * Start a transaction and write the incore superblock changes to
1593  * disk. flags parameter indicates which fields have changed.
1594  */
1595 int
1596 xfs_qm_write_sb_changes(
1597 	xfs_mount_t	*mp,
1598 	__int64_t	flags)
1599 {
1600 	xfs_trans_t	*tp;
1601 	int		error;
1602 
1603 	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
1604 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_sbchange, 0, 0);
1605 	if (error) {
1606 		xfs_trans_cancel(tp, 0);
1607 		return error;
1608 	}
1609 
1610 	xfs_mod_sb(tp, flags);
1611 	error = xfs_trans_commit(tp, 0);
1612 
1613 	return error;
1614 }
1615 
1616 
1617 /* --------------- utility functions for vnodeops ---------------- */
1618 
1619 
1620 /*
1621  * Given an inode, a uid, gid and prid make sure that we have
1622  * allocated relevant dquot(s) on disk, and that we won't exceed inode
1623  * quotas by creating this file.
1624  * This also attaches dquot(s) to the given inode after locking it,
1625  * and returns the dquots corresponding to the uid and/or gid.
1626  *
1627  * in	: inode (unlocked)
1628  * out	: udquot, gdquot with references taken and unlocked
1629  */
1630 int
1631 xfs_qm_vop_dqalloc(
1632 	struct xfs_inode	*ip,
1633 	xfs_dqid_t		uid,
1634 	xfs_dqid_t		gid,
1635 	prid_t			prid,
1636 	uint			flags,
1637 	struct xfs_dquot	**O_udqpp,
1638 	struct xfs_dquot	**O_gdqpp,
1639 	struct xfs_dquot	**O_pdqpp)
1640 {
1641 	struct xfs_mount	*mp = ip->i_mount;
1642 	struct xfs_dquot	*uq = NULL;
1643 	struct xfs_dquot	*gq = NULL;
1644 	struct xfs_dquot	*pq = NULL;
1645 	int			error;
1646 	uint			lockflags;
1647 
1648 	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1649 		return 0;
1650 
1651 	lockflags = XFS_ILOCK_EXCL;
1652 	xfs_ilock(ip, lockflags);
1653 
1654 	if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1655 		gid = ip->i_d.di_gid;
1656 
1657 	/*
1658 	 * Attach the dquot(s) to this inode, doing a dquot allocation
1659 	 * if necessary. The dquot(s) will not be locked.
1660 	 */
1661 	if (XFS_NOT_DQATTACHED(mp, ip)) {
1662 		error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC);
1663 		if (error) {
1664 			xfs_iunlock(ip, lockflags);
1665 			return error;
1666 		}
1667 	}
1668 
1669 	if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1670 		if (ip->i_d.di_uid != uid) {
1671 			/*
1672 			 * What we need is the dquot that has this uid, and
1673 			 * if we send the inode to dqget, the uid of the inode
1674 			 * takes priority over what's sent in the uid argument.
1675 			 * We must unlock inode here before calling dqget if
1676 			 * we're not sending the inode, because otherwise
1677 			 * we'll deadlock by doing trans_reserve while
1678 			 * holding ilock.
1679 			 */
1680 			xfs_iunlock(ip, lockflags);
1681 			error = xfs_qm_dqget(mp, NULL, uid,
1682 						 XFS_DQ_USER,
1683 						 XFS_QMOPT_DQALLOC |
1684 						 XFS_QMOPT_DOWARN,
1685 						 &uq);
1686 			if (error) {
1687 				ASSERT(error != -ENOENT);
1688 				return error;
1689 			}
1690 			/*
1691 			 * Get the ilock in the right order.
1692 			 */
1693 			xfs_dqunlock(uq);
1694 			lockflags = XFS_ILOCK_SHARED;
1695 			xfs_ilock(ip, lockflags);
1696 		} else {
1697 			/*
1698 			 * Take an extra reference, because we'll return
1699 			 * this to caller
1700 			 */
1701 			ASSERT(ip->i_udquot);
1702 			uq = xfs_qm_dqhold(ip->i_udquot);
1703 		}
1704 	}
1705 	if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1706 		if (ip->i_d.di_gid != gid) {
1707 			xfs_iunlock(ip, lockflags);
1708 			error = xfs_qm_dqget(mp, NULL, gid,
1709 						 XFS_DQ_GROUP,
1710 						 XFS_QMOPT_DQALLOC |
1711 						 XFS_QMOPT_DOWARN,
1712 						 &gq);
1713 			if (error) {
1714 				ASSERT(error != -ENOENT);
1715 				goto error_rele;
1716 			}
1717 			xfs_dqunlock(gq);
1718 			lockflags = XFS_ILOCK_SHARED;
1719 			xfs_ilock(ip, lockflags);
1720 		} else {
1721 			ASSERT(ip->i_gdquot);
1722 			gq = xfs_qm_dqhold(ip->i_gdquot);
1723 		}
1724 	}
1725 	if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1726 		if (xfs_get_projid(ip) != prid) {
1727 			xfs_iunlock(ip, lockflags);
1728 			error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
1729 						 XFS_DQ_PROJ,
1730 						 XFS_QMOPT_DQALLOC |
1731 						 XFS_QMOPT_DOWARN,
1732 						 &pq);
1733 			if (error) {
1734 				ASSERT(error != -ENOENT);
1735 				goto error_rele;
1736 			}
1737 			xfs_dqunlock(pq);
1738 			lockflags = XFS_ILOCK_SHARED;
1739 			xfs_ilock(ip, lockflags);
1740 		} else {
1741 			ASSERT(ip->i_pdquot);
1742 			pq = xfs_qm_dqhold(ip->i_pdquot);
1743 		}
1744 	}
1745 	if (uq)
1746 		trace_xfs_dquot_dqalloc(ip);
1747 
1748 	xfs_iunlock(ip, lockflags);
1749 	if (O_udqpp)
1750 		*O_udqpp = uq;
1751 	else if (uq)
1752 		xfs_qm_dqrele(uq);
1753 	if (O_gdqpp)
1754 		*O_gdqpp = gq;
1755 	else if (gq)
1756 		xfs_qm_dqrele(gq);
1757 	if (O_pdqpp)
1758 		*O_pdqpp = pq;
1759 	else if (pq)
1760 		xfs_qm_dqrele(pq);
1761 	return 0;
1762 
1763 error_rele:
1764 	if (gq)
1765 		xfs_qm_dqrele(gq);
1766 	if (uq)
1767 		xfs_qm_dqrele(uq);
1768 	return error;
1769 }
1770 
1771 /*
1772  * Actually transfer ownership, and do dquot modifications.
1773  * These were already reserved.
1774  */
1775 xfs_dquot_t *
1776 xfs_qm_vop_chown(
1777 	xfs_trans_t	*tp,
1778 	xfs_inode_t	*ip,
1779 	xfs_dquot_t	**IO_olddq,
1780 	xfs_dquot_t	*newdq)
1781 {
1782 	xfs_dquot_t	*prevdq;
1783 	uint		bfield = XFS_IS_REALTIME_INODE(ip) ?
1784 				 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1785 
1786 
1787 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1788 	ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
1789 
1790 	/* old dquot */
1791 	prevdq = *IO_olddq;
1792 	ASSERT(prevdq);
1793 	ASSERT(prevdq != newdq);
1794 
1795 	xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
1796 	xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1797 
1798 	/* the sparkling new dquot */
1799 	xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
1800 	xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1801 
1802 	/*
1803 	 * Take an extra reference, because the inode is going to keep
1804 	 * this dquot pointer even after the trans_commit.
1805 	 */
1806 	*IO_olddq = xfs_qm_dqhold(newdq);
1807 
1808 	return prevdq;
1809 }
1810 
1811 /*
1812  * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
1813  */
1814 int
1815 xfs_qm_vop_chown_reserve(
1816 	struct xfs_trans	*tp,
1817 	struct xfs_inode	*ip,
1818 	struct xfs_dquot	*udqp,
1819 	struct xfs_dquot	*gdqp,
1820 	struct xfs_dquot	*pdqp,
1821 	uint			flags)
1822 {
1823 	struct xfs_mount	*mp = ip->i_mount;
1824 	uint			delblks, blkflags, prjflags = 0;
1825 	struct xfs_dquot	*udq_unres = NULL;
1826 	struct xfs_dquot	*gdq_unres = NULL;
1827 	struct xfs_dquot	*pdq_unres = NULL;
1828 	struct xfs_dquot	*udq_delblks = NULL;
1829 	struct xfs_dquot	*gdq_delblks = NULL;
1830 	struct xfs_dquot	*pdq_delblks = NULL;
1831 	int			error;
1832 
1833 
1834 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
1835 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1836 
1837 	delblks = ip->i_delayed_blks;
1838 	blkflags = XFS_IS_REALTIME_INODE(ip) ?
1839 			XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
1840 
1841 	if (XFS_IS_UQUOTA_ON(mp) && udqp &&
1842 	    ip->i_d.di_uid != be32_to_cpu(udqp->q_core.d_id)) {
1843 		udq_delblks = udqp;
1844 		/*
1845 		 * If there are delayed allocation blocks, then we have to
1846 		 * unreserve those from the old dquot, and add them to the
1847 		 * new dquot.
1848 		 */
1849 		if (delblks) {
1850 			ASSERT(ip->i_udquot);
1851 			udq_unres = ip->i_udquot;
1852 		}
1853 	}
1854 	if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
1855 	    ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id)) {
1856 		gdq_delblks = gdqp;
1857 		if (delblks) {
1858 			ASSERT(ip->i_gdquot);
1859 			gdq_unres = ip->i_gdquot;
1860 		}
1861 	}
1862 
1863 	if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
1864 	    xfs_get_projid(ip) != be32_to_cpu(pdqp->q_core.d_id)) {
1865 		prjflags = XFS_QMOPT_ENOSPC;
1866 		pdq_delblks = pdqp;
1867 		if (delblks) {
1868 			ASSERT(ip->i_pdquot);
1869 			pdq_unres = ip->i_pdquot;
1870 		}
1871 	}
1872 
1873 	error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
1874 				udq_delblks, gdq_delblks, pdq_delblks,
1875 				ip->i_d.di_nblocks, 1,
1876 				flags | blkflags | prjflags);
1877 	if (error)
1878 		return error;
1879 
1880 	/*
1881 	 * Do the delayed blks reservations/unreservations now. Since, these
1882 	 * are done without the help of a transaction, if a reservation fails
1883 	 * its previous reservations won't be automatically undone by trans
1884 	 * code. So, we have to do it manually here.
1885 	 */
1886 	if (delblks) {
1887 		/*
1888 		 * Do the reservations first. Unreservation can't fail.
1889 		 */
1890 		ASSERT(udq_delblks || gdq_delblks || pdq_delblks);
1891 		ASSERT(udq_unres || gdq_unres || pdq_unres);
1892 		error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1893 			    udq_delblks, gdq_delblks, pdq_delblks,
1894 			    (xfs_qcnt_t)delblks, 0,
1895 			    flags | blkflags | prjflags);
1896 		if (error)
1897 			return error;
1898 		xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1899 				udq_unres, gdq_unres, pdq_unres,
1900 				-((xfs_qcnt_t)delblks), 0, blkflags);
1901 	}
1902 
1903 	return 0;
1904 }
1905 
1906 int
1907 xfs_qm_vop_rename_dqattach(
1908 	struct xfs_inode	**i_tab)
1909 {
1910 	struct xfs_mount	*mp = i_tab[0]->i_mount;
1911 	int			i;
1912 
1913 	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1914 		return 0;
1915 
1916 	for (i = 0; (i < 4 && i_tab[i]); i++) {
1917 		struct xfs_inode	*ip = i_tab[i];
1918 		int			error;
1919 
1920 		/*
1921 		 * Watch out for duplicate entries in the table.
1922 		 */
1923 		if (i == 0 || ip != i_tab[i-1]) {
1924 			if (XFS_NOT_DQATTACHED(mp, ip)) {
1925 				error = xfs_qm_dqattach(ip, 0);
1926 				if (error)
1927 					return error;
1928 			}
1929 		}
1930 	}
1931 	return 0;
1932 }
1933 
1934 void
1935 xfs_qm_vop_create_dqattach(
1936 	struct xfs_trans	*tp,
1937 	struct xfs_inode	*ip,
1938 	struct xfs_dquot	*udqp,
1939 	struct xfs_dquot	*gdqp,
1940 	struct xfs_dquot	*pdqp)
1941 {
1942 	struct xfs_mount	*mp = tp->t_mountp;
1943 
1944 	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1945 		return;
1946 
1947 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1948 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1949 
1950 	if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1951 		ASSERT(ip->i_udquot == NULL);
1952 		ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
1953 
1954 		ip->i_udquot = xfs_qm_dqhold(udqp);
1955 		xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
1956 	}
1957 	if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1958 		ASSERT(ip->i_gdquot == NULL);
1959 		ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id));
1960 		ip->i_gdquot = xfs_qm_dqhold(gdqp);
1961 		xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
1962 	}
1963 	if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
1964 		ASSERT(ip->i_pdquot == NULL);
1965 		ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id));
1966 
1967 		ip->i_pdquot = xfs_qm_dqhold(pdqp);
1968 		xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
1969 	}
1970 }
1971 
1972