xref: /linux/fs/xfs/xfs_qm.c (revision c98be0c96db00e9b6b02d31e0fa7590c54cdaaac)
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_bit.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_inode.h"
29 #include "xfs_ialloc.h"
30 #include "xfs_itable.h"
31 #include "xfs_quota.h"
32 #include "xfs_error.h"
33 #include "xfs_bmap.h"
34 #include "xfs_bmap_btree.h"
35 #include "xfs_trans.h"
36 #include "xfs_trans_space.h"
37 #include "xfs_qm.h"
38 #include "xfs_trace.h"
39 #include "xfs_icache.h"
40 #include "xfs_cksum.h"
41 #include "xfs_dinode.h"
42 
43 /*
44  * The global quota manager. There is only one of these for the entire
45  * system, _not_ one per file system. XQM keeps track of the overall
46  * quota functionality, including maintaining the freelist and hash
47  * tables of dquots.
48  */
49 STATIC int	xfs_qm_init_quotainos(xfs_mount_t *);
50 STATIC int	xfs_qm_init_quotainfo(xfs_mount_t *);
51 
52 
53 STATIC void	xfs_qm_dqfree_one(struct xfs_dquot *dqp);
54 /*
55  * We use the batch lookup interface to iterate over the dquots as it
56  * currently is the only interface into the radix tree code that allows
57  * fuzzy lookups instead of exact matches.  Holding the lock over multiple
58  * operations is fine as all callers are used either during mount/umount
59  * or quotaoff.
60  */
61 #define XFS_DQ_LOOKUP_BATCH	32
62 
63 STATIC int
64 xfs_qm_dquot_walk(
65 	struct xfs_mount	*mp,
66 	int			type,
67 	int			(*execute)(struct xfs_dquot *dqp, void *data),
68 	void			*data)
69 {
70 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
71 	struct radix_tree_root	*tree = xfs_dquot_tree(qi, type);
72 	uint32_t		next_index;
73 	int			last_error = 0;
74 	int			skipped;
75 	int			nr_found;
76 
77 restart:
78 	skipped = 0;
79 	next_index = 0;
80 	nr_found = 0;
81 
82 	while (1) {
83 		struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
84 		int		error = 0;
85 		int		i;
86 
87 		mutex_lock(&qi->qi_tree_lock);
88 		nr_found = radix_tree_gang_lookup(tree, (void **)batch,
89 					next_index, XFS_DQ_LOOKUP_BATCH);
90 		if (!nr_found) {
91 			mutex_unlock(&qi->qi_tree_lock);
92 			break;
93 		}
94 
95 		for (i = 0; i < nr_found; i++) {
96 			struct xfs_dquot *dqp = batch[i];
97 
98 			next_index = be32_to_cpu(dqp->q_core.d_id) + 1;
99 
100 			error = execute(batch[i], data);
101 			if (error == EAGAIN) {
102 				skipped++;
103 				continue;
104 			}
105 			if (error && last_error != EFSCORRUPTED)
106 				last_error = error;
107 		}
108 
109 		mutex_unlock(&qi->qi_tree_lock);
110 
111 		/* bail out if the filesystem is corrupted.  */
112 		if (last_error == EFSCORRUPTED) {
113 			skipped = 0;
114 			break;
115 		}
116 	}
117 
118 	if (skipped) {
119 		delay(1);
120 		goto restart;
121 	}
122 
123 	return last_error;
124 }
125 
126 
127 /*
128  * Purge a dquot from all tracking data structures and free it.
129  */
130 STATIC int
131 xfs_qm_dqpurge(
132 	struct xfs_dquot	*dqp,
133 	void			*data)
134 {
135 	struct xfs_mount	*mp = dqp->q_mount;
136 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
137 
138 	xfs_dqlock(dqp);
139 	if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
140 		xfs_dqunlock(dqp);
141 		return EAGAIN;
142 	}
143 
144 	dqp->dq_flags |= XFS_DQ_FREEING;
145 
146 	xfs_dqflock(dqp);
147 
148 	/*
149 	 * If we are turning this type of quotas off, we don't care
150 	 * about the dirty metadata sitting in this dquot. OTOH, if
151 	 * we're unmounting, we do care, so we flush it and wait.
152 	 */
153 	if (XFS_DQ_IS_DIRTY(dqp)) {
154 		struct xfs_buf	*bp = NULL;
155 		int		error;
156 
157 		/*
158 		 * We don't care about getting disk errors here. We need
159 		 * to purge this dquot anyway, so we go ahead regardless.
160 		 */
161 		error = xfs_qm_dqflush(dqp, &bp);
162 		if (error) {
163 			xfs_warn(mp, "%s: dquot %p flush failed",
164 				__func__, dqp);
165 		} else {
166 			error = xfs_bwrite(bp);
167 			xfs_buf_relse(bp);
168 		}
169 		xfs_dqflock(dqp);
170 	}
171 
172 	ASSERT(atomic_read(&dqp->q_pincount) == 0);
173 	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
174 	       !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));
175 
176 	xfs_dqfunlock(dqp);
177 	xfs_dqunlock(dqp);
178 
179 	radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
180 			  be32_to_cpu(dqp->q_core.d_id));
181 	qi->qi_dquots--;
182 
183 	/*
184 	 * We move dquots to the freelist as soon as their reference count
185 	 * hits zero, so it really should be on the freelist here.
186 	 */
187 	ASSERT(!list_empty(&dqp->q_lru));
188 	list_lru_del(&qi->qi_lru, &dqp->q_lru);
189 	XFS_STATS_DEC(xs_qm_dquot_unused);
190 
191 	xfs_qm_dqdestroy(dqp);
192 	return 0;
193 }
194 
195 /*
196  * Release the group or project dquot pointers the user dquots maybe carrying
197  * around as a hint, and proceed to purge the user dquot cache if requested.
198 */
199 STATIC int
200 xfs_qm_dqpurge_hints(
201 	struct xfs_dquot	*dqp,
202 	void			*data)
203 {
204 	struct xfs_dquot	*gdqp = NULL;
205 	struct xfs_dquot	*pdqp = NULL;
206 	uint			flags = *((uint *)data);
207 
208 	xfs_dqlock(dqp);
209 	if (dqp->dq_flags & XFS_DQ_FREEING) {
210 		xfs_dqunlock(dqp);
211 		return EAGAIN;
212 	}
213 
214 	/* If this quota has a hint attached, prepare for releasing it now */
215 	gdqp = dqp->q_gdquot;
216 	if (gdqp)
217 		dqp->q_gdquot = NULL;
218 
219 	pdqp = dqp->q_pdquot;
220 	if (pdqp)
221 		dqp->q_pdquot = NULL;
222 
223 	xfs_dqunlock(dqp);
224 
225 	if (gdqp)
226 		xfs_qm_dqrele(gdqp);
227 	if (pdqp)
228 		xfs_qm_dqrele(pdqp);
229 
230 	if (flags & XFS_QMOPT_UQUOTA)
231 		return xfs_qm_dqpurge(dqp, NULL);
232 
233 	return 0;
234 }
235 
236 /*
237  * Purge the dquot cache.
238  */
239 void
240 xfs_qm_dqpurge_all(
241 	struct xfs_mount	*mp,
242 	uint			flags)
243 {
244 	/*
245 	 * We have to release group/project dquot hint(s) from the user dquot
246 	 * at first if they are there, otherwise we would run into an infinite
247 	 * loop while walking through radix tree to purge other type of dquots
248 	 * since their refcount is not zero if the user dquot refers to them
249 	 * as hint.
250 	 *
251 	 * Call the special xfs_qm_dqpurge_hints() will end up go through the
252 	 * general xfs_qm_dqpurge() against user dquot cache if requested.
253 	 */
254 	xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge_hints, &flags);
255 
256 	if (flags & XFS_QMOPT_GQUOTA)
257 		xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
258 	if (flags & XFS_QMOPT_PQUOTA)
259 		xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL);
260 }
261 
262 /*
263  * Just destroy the quotainfo structure.
264  */
265 void
266 xfs_qm_unmount(
267 	struct xfs_mount	*mp)
268 {
269 	if (mp->m_quotainfo) {
270 		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
271 		xfs_qm_destroy_quotainfo(mp);
272 	}
273 }
274 
275 
276 /*
277  * This is called from xfs_mountfs to start quotas and initialize all
278  * necessary data structures like quotainfo.  This is also responsible for
279  * running a quotacheck as necessary.  We are guaranteed that the superblock
280  * is consistently read in at this point.
281  *
282  * If we fail here, the mount will continue with quota turned off. We don't
283  * need to inidicate success or failure at all.
284  */
285 void
286 xfs_qm_mount_quotas(
287 	xfs_mount_t	*mp)
288 {
289 	int		error = 0;
290 	uint		sbf;
291 
292 	/*
293 	 * If quotas on realtime volumes is not supported, we disable
294 	 * quotas immediately.
295 	 */
296 	if (mp->m_sb.sb_rextents) {
297 		xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
298 		mp->m_qflags = 0;
299 		goto write_changes;
300 	}
301 
302 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
303 
304 	/*
305 	 * Allocate the quotainfo structure inside the mount struct, and
306 	 * create quotainode(s), and change/rev superblock if necessary.
307 	 */
308 	error = xfs_qm_init_quotainfo(mp);
309 	if (error) {
310 		/*
311 		 * We must turn off quotas.
312 		 */
313 		ASSERT(mp->m_quotainfo == NULL);
314 		mp->m_qflags = 0;
315 		goto write_changes;
316 	}
317 	/*
318 	 * If any of the quotas are not consistent, do a quotacheck.
319 	 */
320 	if (XFS_QM_NEED_QUOTACHECK(mp)) {
321 		error = xfs_qm_quotacheck(mp);
322 		if (error) {
323 			/* Quotacheck failed and disabled quotas. */
324 			return;
325 		}
326 	}
327 	/*
328 	 * If one type of quotas is off, then it will lose its
329 	 * quotachecked status, since we won't be doing accounting for
330 	 * that type anymore.
331 	 */
332 	if (!XFS_IS_UQUOTA_ON(mp))
333 		mp->m_qflags &= ~XFS_UQUOTA_CHKD;
334 	if (!XFS_IS_GQUOTA_ON(mp))
335 		mp->m_qflags &= ~XFS_GQUOTA_CHKD;
336 	if (!XFS_IS_PQUOTA_ON(mp))
337 		mp->m_qflags &= ~XFS_PQUOTA_CHKD;
338 
339  write_changes:
340 	/*
341 	 * We actually don't have to acquire the m_sb_lock at all.
342 	 * This can only be called from mount, and that's single threaded. XXX
343 	 */
344 	spin_lock(&mp->m_sb_lock);
345 	sbf = mp->m_sb.sb_qflags;
346 	mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
347 	spin_unlock(&mp->m_sb_lock);
348 
349 	if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
350 		if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) {
351 			/*
352 			 * We could only have been turning quotas off.
353 			 * We aren't in very good shape actually because
354 			 * the incore structures are convinced that quotas are
355 			 * off, but the on disk superblock doesn't know that !
356 			 */
357 			ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
358 			xfs_alert(mp, "%s: Superblock update failed!",
359 				__func__);
360 		}
361 	}
362 
363 	if (error) {
364 		xfs_warn(mp, "Failed to initialize disk quotas.");
365 		return;
366 	}
367 }
368 
369 /*
370  * Called from the vfsops layer.
371  */
372 void
373 xfs_qm_unmount_quotas(
374 	xfs_mount_t	*mp)
375 {
376 	/*
377 	 * Release the dquots that root inode, et al might be holding,
378 	 * before we flush quotas and blow away the quotainfo structure.
379 	 */
380 	ASSERT(mp->m_rootip);
381 	xfs_qm_dqdetach(mp->m_rootip);
382 	if (mp->m_rbmip)
383 		xfs_qm_dqdetach(mp->m_rbmip);
384 	if (mp->m_rsumip)
385 		xfs_qm_dqdetach(mp->m_rsumip);
386 
387 	/*
388 	 * Release the quota inodes.
389 	 */
390 	if (mp->m_quotainfo) {
391 		if (mp->m_quotainfo->qi_uquotaip) {
392 			IRELE(mp->m_quotainfo->qi_uquotaip);
393 			mp->m_quotainfo->qi_uquotaip = NULL;
394 		}
395 		if (mp->m_quotainfo->qi_gquotaip) {
396 			IRELE(mp->m_quotainfo->qi_gquotaip);
397 			mp->m_quotainfo->qi_gquotaip = NULL;
398 		}
399 		if (mp->m_quotainfo->qi_pquotaip) {
400 			IRELE(mp->m_quotainfo->qi_pquotaip);
401 			mp->m_quotainfo->qi_pquotaip = NULL;
402 		}
403 	}
404 }
405 
406 STATIC int
407 xfs_qm_dqattach_one(
408 	xfs_inode_t	*ip,
409 	xfs_dqid_t	id,
410 	uint		type,
411 	uint		doalloc,
412 	xfs_dquot_t	*udqhint, /* hint */
413 	xfs_dquot_t	**IO_idqpp)
414 {
415 	xfs_dquot_t	*dqp;
416 	int		error;
417 
418 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
419 	error = 0;
420 
421 	/*
422 	 * See if we already have it in the inode itself. IO_idqpp is
423 	 * &i_udquot or &i_gdquot. This made the code look weird, but
424 	 * made the logic a lot simpler.
425 	 */
426 	dqp = *IO_idqpp;
427 	if (dqp) {
428 		trace_xfs_dqattach_found(dqp);
429 		return 0;
430 	}
431 
432 	/*
433 	 * udqhint is the i_udquot field in inode, and is non-NULL only
434 	 * when the type arg is group/project. Its purpose is to save a
435 	 * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside
436 	 * the user dquot.
437 	 */
438 	if (udqhint) {
439 		ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
440 		xfs_dqlock(udqhint);
441 
442 		/*
443 		 * No need to take dqlock to look at the id.
444 		 *
445 		 * The ID can't change until it gets reclaimed, and it won't
446 		 * be reclaimed as long as we have a ref from inode and we
447 		 * hold the ilock.
448 		 */
449 		if (type == XFS_DQ_GROUP)
450 			dqp = udqhint->q_gdquot;
451 		else
452 			dqp = udqhint->q_pdquot;
453 		if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) {
454 			ASSERT(*IO_idqpp == NULL);
455 
456 			*IO_idqpp = xfs_qm_dqhold(dqp);
457 			xfs_dqunlock(udqhint);
458 			return 0;
459 		}
460 
461 		/*
462 		 * We can't hold a dquot lock when we call the dqget code.
463 		 * We'll deadlock in no time, because of (not conforming to)
464 		 * lock ordering - the inodelock comes before any dquot lock,
465 		 * and we may drop and reacquire the ilock in xfs_qm_dqget().
466 		 */
467 		xfs_dqunlock(udqhint);
468 	}
469 
470 	/*
471 	 * Find the dquot from somewhere. This bumps the
472 	 * reference count of dquot and returns it locked.
473 	 * This can return ENOENT if dquot didn't exist on
474 	 * disk and we didn't ask it to allocate;
475 	 * ESRCH if quotas got turned off suddenly.
476 	 */
477 	error = xfs_qm_dqget(ip->i_mount, ip, id, type,
478 			     doalloc | XFS_QMOPT_DOWARN, &dqp);
479 	if (error)
480 		return error;
481 
482 	trace_xfs_dqattach_get(dqp);
483 
484 	/*
485 	 * dqget may have dropped and re-acquired the ilock, but it guarantees
486 	 * that the dquot returned is the one that should go in the inode.
487 	 */
488 	*IO_idqpp = dqp;
489 	xfs_dqunlock(dqp);
490 	return 0;
491 }
492 
493 
494 /*
495  * Given a udquot and group/project type, attach the group/project
496  * dquot pointer to the udquot as a hint for future lookups.
497  */
498 STATIC void
499 xfs_qm_dqattach_hint(
500 	struct xfs_inode	*ip,
501 	int			type)
502 {
503 	struct xfs_dquot **dqhintp;
504 	struct xfs_dquot *dqp;
505 	struct xfs_dquot *udq = ip->i_udquot;
506 
507 	ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
508 
509 	xfs_dqlock(udq);
510 
511 	if (type == XFS_DQ_GROUP) {
512 		dqp = ip->i_gdquot;
513 		dqhintp = &udq->q_gdquot;
514 	} else {
515 		dqp = ip->i_pdquot;
516 		dqhintp = &udq->q_pdquot;
517 	}
518 
519 	if (*dqhintp) {
520 		struct xfs_dquot *tmp;
521 
522 		if (*dqhintp == dqp)
523 			goto done;
524 
525 		tmp = *dqhintp;
526 		*dqhintp = NULL;
527 		xfs_qm_dqrele(tmp);
528 	}
529 
530 	*dqhintp = xfs_qm_dqhold(dqp);
531 done:
532 	xfs_dqunlock(udq);
533 }
534 
535 static bool
536 xfs_qm_need_dqattach(
537 	struct xfs_inode	*ip)
538 {
539 	struct xfs_mount	*mp = ip->i_mount;
540 
541 	if (!XFS_IS_QUOTA_RUNNING(mp))
542 		return false;
543 	if (!XFS_IS_QUOTA_ON(mp))
544 		return false;
545 	if (!XFS_NOT_DQATTACHED(mp, ip))
546 		return false;
547 	if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
548 		return false;
549 	return true;
550 }
551 
552 /*
553  * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
554  * into account.
555  * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
556  * Inode may get unlocked and relocked in here, and the caller must deal with
557  * the consequences.
558  */
559 int
560 xfs_qm_dqattach_locked(
561 	xfs_inode_t	*ip,
562 	uint		flags)
563 {
564 	xfs_mount_t	*mp = ip->i_mount;
565 	uint		nquotas = 0;
566 	int		error = 0;
567 
568 	if (!xfs_qm_need_dqattach(ip))
569 		return 0;
570 
571 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
572 
573 	if (XFS_IS_UQUOTA_ON(mp)) {
574 		error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
575 						flags & XFS_QMOPT_DQALLOC,
576 						NULL, &ip->i_udquot);
577 		if (error)
578 			goto done;
579 		nquotas++;
580 	}
581 
582 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
583 	if (XFS_IS_GQUOTA_ON(mp)) {
584 		error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
585 						flags & XFS_QMOPT_DQALLOC,
586 						ip->i_udquot, &ip->i_gdquot);
587 		/*
588 		 * Don't worry about the udquot that we may have
589 		 * attached above. It'll get detached, if not already.
590 		 */
591 		if (error)
592 			goto done;
593 		nquotas++;
594 	}
595 
596 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
597 	if (XFS_IS_PQUOTA_ON(mp)) {
598 		error = xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
599 						flags & XFS_QMOPT_DQALLOC,
600 						ip->i_udquot, &ip->i_pdquot);
601 		/*
602 		 * Don't worry about the udquot that we may have
603 		 * attached above. It'll get detached, if not already.
604 		 */
605 		if (error)
606 			goto done;
607 		nquotas++;
608 	}
609 
610 	/*
611 	 * Attach this group/project quota to the user quota as a hint.
612 	 * This WON'T, in general, result in a thrash.
613 	 */
614 	if (nquotas > 1 && ip->i_udquot) {
615 		ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
616 		ASSERT(ip->i_gdquot || !XFS_IS_GQUOTA_ON(mp));
617 		ASSERT(ip->i_pdquot || !XFS_IS_PQUOTA_ON(mp));
618 
619 		/*
620 		 * We do not have i_udquot locked at this point, but this check
621 		 * is OK since we don't depend on the i_gdquot to be accurate
622 		 * 100% all the time. It is just a hint, and this will
623 		 * succeed in general.
624 		 */
625 		if (ip->i_udquot->q_gdquot != ip->i_gdquot)
626 			xfs_qm_dqattach_hint(ip, XFS_DQ_GROUP);
627 
628 		if (ip->i_udquot->q_pdquot != ip->i_pdquot)
629 			xfs_qm_dqattach_hint(ip, XFS_DQ_PROJ);
630 	}
631 
632  done:
633 #ifdef DEBUG
634 	if (!error) {
635 		if (XFS_IS_UQUOTA_ON(mp))
636 			ASSERT(ip->i_udquot);
637 		if (XFS_IS_GQUOTA_ON(mp))
638 			ASSERT(ip->i_gdquot);
639 		if (XFS_IS_PQUOTA_ON(mp))
640 			ASSERT(ip->i_pdquot);
641 	}
642 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
643 #endif
644 	return error;
645 }
646 
647 int
648 xfs_qm_dqattach(
649 	struct xfs_inode	*ip,
650 	uint			flags)
651 {
652 	int			error;
653 
654 	if (!xfs_qm_need_dqattach(ip))
655 		return 0;
656 
657 	xfs_ilock(ip, XFS_ILOCK_EXCL);
658 	error = xfs_qm_dqattach_locked(ip, flags);
659 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
660 
661 	return error;
662 }
663 
664 /*
665  * Release dquots (and their references) if any.
666  * The inode should be locked EXCL except when this's called by
667  * xfs_ireclaim.
668  */
669 void
670 xfs_qm_dqdetach(
671 	xfs_inode_t	*ip)
672 {
673 	if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
674 		return;
675 
676 	trace_xfs_dquot_dqdetach(ip);
677 
678 	ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
679 	if (ip->i_udquot) {
680 		xfs_qm_dqrele(ip->i_udquot);
681 		ip->i_udquot = NULL;
682 	}
683 	if (ip->i_gdquot) {
684 		xfs_qm_dqrele(ip->i_gdquot);
685 		ip->i_gdquot = NULL;
686 	}
687 	if (ip->i_pdquot) {
688 		xfs_qm_dqrele(ip->i_pdquot);
689 		ip->i_pdquot = NULL;
690 	}
691 }
692 
693 struct xfs_qm_isolate {
694 	struct list_head	buffers;
695 	struct list_head	dispose;
696 };
697 
698 static enum lru_status
699 xfs_qm_dquot_isolate(
700 	struct list_head	*item,
701 	spinlock_t		*lru_lock,
702 	void			*arg)
703 {
704 	struct xfs_dquot	*dqp = container_of(item,
705 						struct xfs_dquot, q_lru);
706 	struct xfs_qm_isolate	*isol = arg;
707 
708 	if (!xfs_dqlock_nowait(dqp))
709 		goto out_miss_busy;
710 
711 	/*
712 	 * This dquot has acquired a reference in the meantime remove it from
713 	 * the freelist and try again.
714 	 */
715 	if (dqp->q_nrefs) {
716 		xfs_dqunlock(dqp);
717 		XFS_STATS_INC(xs_qm_dqwants);
718 
719 		trace_xfs_dqreclaim_want(dqp);
720 		list_del_init(&dqp->q_lru);
721 		XFS_STATS_DEC(xs_qm_dquot_unused);
722 		return LRU_REMOVED;
723 	}
724 
725 	/*
726 	 * If the dquot is dirty, flush it. If it's already being flushed, just
727 	 * skip it so there is time for the IO to complete before we try to
728 	 * reclaim it again on the next LRU pass.
729 	 */
730 	if (!xfs_dqflock_nowait(dqp)) {
731 		xfs_dqunlock(dqp);
732 		goto out_miss_busy;
733 	}
734 
735 	if (XFS_DQ_IS_DIRTY(dqp)) {
736 		struct xfs_buf	*bp = NULL;
737 		int		error;
738 
739 		trace_xfs_dqreclaim_dirty(dqp);
740 
741 		/* we have to drop the LRU lock to flush the dquot */
742 		spin_unlock(lru_lock);
743 
744 		error = xfs_qm_dqflush(dqp, &bp);
745 		if (error) {
746 			xfs_warn(dqp->q_mount, "%s: dquot %p flush failed",
747 				 __func__, dqp);
748 			goto out_unlock_dirty;
749 		}
750 
751 		xfs_buf_delwri_queue(bp, &isol->buffers);
752 		xfs_buf_relse(bp);
753 		goto out_unlock_dirty;
754 	}
755 	xfs_dqfunlock(dqp);
756 
757 	/*
758 	 * Prevent lookups now that we are past the point of no return.
759 	 */
760 	dqp->dq_flags |= XFS_DQ_FREEING;
761 	xfs_dqunlock(dqp);
762 
763 	ASSERT(dqp->q_nrefs == 0);
764 	list_move_tail(&dqp->q_lru, &isol->dispose);
765 	XFS_STATS_DEC(xs_qm_dquot_unused);
766 	trace_xfs_dqreclaim_done(dqp);
767 	XFS_STATS_INC(xs_qm_dqreclaims);
768 	return LRU_REMOVED;
769 
770 out_miss_busy:
771 	trace_xfs_dqreclaim_busy(dqp);
772 	XFS_STATS_INC(xs_qm_dqreclaim_misses);
773 	return LRU_SKIP;
774 
775 out_unlock_dirty:
776 	trace_xfs_dqreclaim_busy(dqp);
777 	XFS_STATS_INC(xs_qm_dqreclaim_misses);
778 	xfs_dqunlock(dqp);
779 	spin_lock(lru_lock);
780 	return LRU_RETRY;
781 }
782 
783 static unsigned long
784 xfs_qm_shrink_scan(
785 	struct shrinker		*shrink,
786 	struct shrink_control	*sc)
787 {
788 	struct xfs_quotainfo	*qi = container_of(shrink,
789 					struct xfs_quotainfo, qi_shrinker);
790 	struct xfs_qm_isolate	isol;
791 	unsigned long		freed;
792 	int			error;
793 	unsigned long		nr_to_scan = sc->nr_to_scan;
794 
795 	if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT))
796 		return 0;
797 
798 	INIT_LIST_HEAD(&isol.buffers);
799 	INIT_LIST_HEAD(&isol.dispose);
800 
801 	freed = list_lru_walk_node(&qi->qi_lru, sc->nid, xfs_qm_dquot_isolate, &isol,
802 					&nr_to_scan);
803 
804 	error = xfs_buf_delwri_submit(&isol.buffers);
805 	if (error)
806 		xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
807 
808 	while (!list_empty(&isol.dispose)) {
809 		struct xfs_dquot	*dqp;
810 
811 		dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
812 		list_del_init(&dqp->q_lru);
813 		xfs_qm_dqfree_one(dqp);
814 	}
815 
816 	return freed;
817 }
818 
819 static unsigned long
820 xfs_qm_shrink_count(
821 	struct shrinker		*shrink,
822 	struct shrink_control	*sc)
823 {
824 	struct xfs_quotainfo	*qi = container_of(shrink,
825 					struct xfs_quotainfo, qi_shrinker);
826 
827 	return list_lru_count_node(&qi->qi_lru, sc->nid);
828 }
829 
830 /*
831  * This initializes all the quota information that's kept in the
832  * mount structure
833  */
834 STATIC int
835 xfs_qm_init_quotainfo(
836 	xfs_mount_t	*mp)
837 {
838 	xfs_quotainfo_t *qinf;
839 	int		error;
840 	xfs_dquot_t	*dqp;
841 
842 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
843 
844 	qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
845 
846 	if ((error = list_lru_init(&qinf->qi_lru))) {
847 		kmem_free(qinf);
848 		mp->m_quotainfo = NULL;
849 		return error;
850 	}
851 
852 	/*
853 	 * See if quotainodes are setup, and if not, allocate them,
854 	 * and change the superblock accordingly.
855 	 */
856 	if ((error = xfs_qm_init_quotainos(mp))) {
857 		list_lru_destroy(&qinf->qi_lru);
858 		kmem_free(qinf);
859 		mp->m_quotainfo = NULL;
860 		return error;
861 	}
862 
863 	INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
864 	INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
865 	INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
866 	mutex_init(&qinf->qi_tree_lock);
867 
868 	/* mutex used to serialize quotaoffs */
869 	mutex_init(&qinf->qi_quotaofflock);
870 
871 	/* Precalc some constants */
872 	qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
873 	qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(mp,
874 							qinf->qi_dqchunklen);
875 
876 	mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
877 
878 	/*
879 	 * We try to get the limits from the superuser's limits fields.
880 	 * This is quite hacky, but it is standard quota practice.
881 	 *
882 	 * We look at the USR dquot with id == 0 first, but if user quotas
883 	 * are not enabled we goto the GRP dquot with id == 0.
884 	 * We don't really care to keep separate default limits for user
885 	 * and group quotas, at least not at this point.
886 	 *
887 	 * Since we may not have done a quotacheck by this point, just read
888 	 * the dquot without attaching it to any hashtables or lists.
889 	 */
890 	error = xfs_qm_dqread(mp, 0,
891 			XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
892 			 (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
893 			  XFS_DQ_PROJ),
894 			XFS_QMOPT_DOWARN, &dqp);
895 	if (!error) {
896 		xfs_disk_dquot_t	*ddqp = &dqp->q_core;
897 
898 		/*
899 		 * The warnings and timers set the grace period given to
900 		 * a user or group before he or she can not perform any
901 		 * more writing. If it is zero, a default is used.
902 		 */
903 		qinf->qi_btimelimit = ddqp->d_btimer ?
904 			be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT;
905 		qinf->qi_itimelimit = ddqp->d_itimer ?
906 			be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT;
907 		qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ?
908 			be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT;
909 		qinf->qi_bwarnlimit = ddqp->d_bwarns ?
910 			be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT;
911 		qinf->qi_iwarnlimit = ddqp->d_iwarns ?
912 			be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT;
913 		qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ?
914 			be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT;
915 		qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
916 		qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
917 		qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
918 		qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
919 		qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
920 		qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
921 
922 		xfs_qm_dqdestroy(dqp);
923 	} else {
924 		qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
925 		qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
926 		qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
927 		qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
928 		qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
929 		qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
930 	}
931 
932 	qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
933 	qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
934 	qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
935 	qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
936 	register_shrinker(&qinf->qi_shrinker);
937 	return 0;
938 }
939 
940 
941 /*
942  * Gets called when unmounting a filesystem or when all quotas get
943  * turned off.
944  * This purges the quota inodes, destroys locks and frees itself.
945  */
946 void
947 xfs_qm_destroy_quotainfo(
948 	xfs_mount_t	*mp)
949 {
950 	xfs_quotainfo_t *qi;
951 
952 	qi = mp->m_quotainfo;
953 	ASSERT(qi != NULL);
954 
955 	unregister_shrinker(&qi->qi_shrinker);
956 	list_lru_destroy(&qi->qi_lru);
957 
958 	if (qi->qi_uquotaip) {
959 		IRELE(qi->qi_uquotaip);
960 		qi->qi_uquotaip = NULL; /* paranoia */
961 	}
962 	if (qi->qi_gquotaip) {
963 		IRELE(qi->qi_gquotaip);
964 		qi->qi_gquotaip = NULL;
965 	}
966 	if (qi->qi_pquotaip) {
967 		IRELE(qi->qi_pquotaip);
968 		qi->qi_pquotaip = NULL;
969 	}
970 	mutex_destroy(&qi->qi_quotaofflock);
971 	kmem_free(qi);
972 	mp->m_quotainfo = NULL;
973 }
974 
975 /*
976  * Create an inode and return with a reference already taken, but unlocked
977  * This is how we create quota inodes
978  */
979 STATIC int
980 xfs_qm_qino_alloc(
981 	xfs_mount_t	*mp,
982 	xfs_inode_t	**ip,
983 	__int64_t	sbfields,
984 	uint		flags)
985 {
986 	xfs_trans_t	*tp;
987 	int		error;
988 	int		committed;
989 
990 	*ip = NULL;
991 	/*
992 	 * With superblock that doesn't have separate pquotino, we
993 	 * share an inode between gquota and pquota. If the on-disk
994 	 * superblock has GQUOTA and the filesystem is now mounted
995 	 * with PQUOTA, just use sb_gquotino for sb_pquotino and
996 	 * vice-versa.
997 	 */
998 	if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
999 			(flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
1000 		xfs_ino_t ino = NULLFSINO;
1001 
1002 		if ((flags & XFS_QMOPT_PQUOTA) &&
1003 			     (mp->m_sb.sb_gquotino != NULLFSINO)) {
1004 			ino = mp->m_sb.sb_gquotino;
1005 			ASSERT(mp->m_sb.sb_pquotino == NULLFSINO);
1006 		} else if ((flags & XFS_QMOPT_GQUOTA) &&
1007 			     (mp->m_sb.sb_pquotino != NULLFSINO)) {
1008 			ino = mp->m_sb.sb_pquotino;
1009 			ASSERT(mp->m_sb.sb_gquotino == NULLFSINO);
1010 		}
1011 		if (ino != NULLFSINO) {
1012 			error = xfs_iget(mp, NULL, ino, 0, 0, ip);
1013 			if (error)
1014 				return error;
1015 			mp->m_sb.sb_gquotino = NULLFSINO;
1016 			mp->m_sb.sb_pquotino = NULLFSINO;
1017 		}
1018 	}
1019 
1020 	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);
1021 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_create,
1022 				  XFS_QM_QINOCREATE_SPACE_RES(mp), 0);
1023 	if (error) {
1024 		xfs_trans_cancel(tp, 0);
1025 		return error;
1026 	}
1027 
1028 	if (!*ip) {
1029 		error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip,
1030 								&committed);
1031 		if (error) {
1032 			xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
1033 					 XFS_TRANS_ABORT);
1034 			return error;
1035 		}
1036 	}
1037 
1038 	/*
1039 	 * Make the changes in the superblock, and log those too.
1040 	 * sbfields arg may contain fields other than *QUOTINO;
1041 	 * VERSIONNUM for example.
1042 	 */
1043 	spin_lock(&mp->m_sb_lock);
1044 	if (flags & XFS_QMOPT_SBVERSION) {
1045 		ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
1046 		ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
1047 			XFS_SB_GQUOTINO | XFS_SB_PQUOTINO | XFS_SB_QFLAGS)) ==
1048 				(XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
1049 				 XFS_SB_GQUOTINO | XFS_SB_PQUOTINO |
1050 				 XFS_SB_QFLAGS));
1051 
1052 		xfs_sb_version_addquota(&mp->m_sb);
1053 		mp->m_sb.sb_uquotino = NULLFSINO;
1054 		mp->m_sb.sb_gquotino = NULLFSINO;
1055 		mp->m_sb.sb_pquotino = NULLFSINO;
1056 
1057 		/* qflags will get updated fully _after_ quotacheck */
1058 		mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
1059 	}
1060 	if (flags & XFS_QMOPT_UQUOTA)
1061 		mp->m_sb.sb_uquotino = (*ip)->i_ino;
1062 	else if (flags & XFS_QMOPT_GQUOTA)
1063 		mp->m_sb.sb_gquotino = (*ip)->i_ino;
1064 	else
1065 		mp->m_sb.sb_pquotino = (*ip)->i_ino;
1066 	spin_unlock(&mp->m_sb_lock);
1067 	xfs_mod_sb(tp, sbfields);
1068 
1069 	if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) {
1070 		xfs_alert(mp, "%s failed (error %d)!", __func__, error);
1071 		return error;
1072 	}
1073 	return 0;
1074 }
1075 
1076 
1077 STATIC void
1078 xfs_qm_reset_dqcounts(
1079 	xfs_mount_t	*mp,
1080 	xfs_buf_t	*bp,
1081 	xfs_dqid_t	id,
1082 	uint		type)
1083 {
1084 	struct xfs_dqblk	*dqb;
1085 	int			j;
1086 
1087 	trace_xfs_reset_dqcounts(bp, _RET_IP_);
1088 
1089 	/*
1090 	 * Reset all counters and timers. They'll be
1091 	 * started afresh by xfs_qm_quotacheck.
1092 	 */
1093 #ifdef DEBUG
1094 	j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
1095 	do_div(j, sizeof(xfs_dqblk_t));
1096 	ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
1097 #endif
1098 	dqb = bp->b_addr;
1099 	for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
1100 		struct xfs_disk_dquot	*ddq;
1101 
1102 		ddq = (struct xfs_disk_dquot *)&dqb[j];
1103 
1104 		/*
1105 		 * Do a sanity check, and if needed, repair the dqblk. Don't
1106 		 * output any warnings because it's perfectly possible to
1107 		 * find uninitialised dquot blks. See comment in xfs_dqcheck.
1108 		 */
1109 		xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
1110 			    "xfs_quotacheck");
1111 		ddq->d_bcount = 0;
1112 		ddq->d_icount = 0;
1113 		ddq->d_rtbcount = 0;
1114 		ddq->d_btimer = 0;
1115 		ddq->d_itimer = 0;
1116 		ddq->d_rtbtimer = 0;
1117 		ddq->d_bwarns = 0;
1118 		ddq->d_iwarns = 0;
1119 		ddq->d_rtbwarns = 0;
1120 
1121 		if (xfs_sb_version_hascrc(&mp->m_sb)) {
1122 			xfs_update_cksum((char *)&dqb[j],
1123 					 sizeof(struct xfs_dqblk),
1124 					 XFS_DQUOT_CRC_OFF);
1125 		}
1126 	}
1127 }
1128 
1129 STATIC int
1130 xfs_qm_dqiter_bufs(
1131 	struct xfs_mount	*mp,
1132 	xfs_dqid_t		firstid,
1133 	xfs_fsblock_t		bno,
1134 	xfs_filblks_t		blkcnt,
1135 	uint			flags,
1136 	struct list_head	*buffer_list)
1137 {
1138 	struct xfs_buf		*bp;
1139 	int			error;
1140 	int			type;
1141 
1142 	ASSERT(blkcnt > 0);
1143 	type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
1144 		(flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
1145 	error = 0;
1146 
1147 	/*
1148 	 * Blkcnt arg can be a very big number, and might even be
1149 	 * larger than the log itself. So, we have to break it up into
1150 	 * manageable-sized transactions.
1151 	 * Note that we don't start a permanent transaction here; we might
1152 	 * not be able to get a log reservation for the whole thing up front,
1153 	 * and we don't really care to either, because we just discard
1154 	 * everything if we were to crash in the middle of this loop.
1155 	 */
1156 	while (blkcnt--) {
1157 		error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
1158 			      XFS_FSB_TO_DADDR(mp, bno),
1159 			      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
1160 			      &xfs_dquot_buf_ops);
1161 
1162 		/*
1163 		 * CRC and validation errors will return a EFSCORRUPTED here. If
1164 		 * this occurs, re-read without CRC validation so that we can
1165 		 * repair the damage via xfs_qm_reset_dqcounts(). This process
1166 		 * will leave a trace in the log indicating corruption has
1167 		 * been detected.
1168 		 */
1169 		if (error == EFSCORRUPTED) {
1170 			error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
1171 				      XFS_FSB_TO_DADDR(mp, bno),
1172 				      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
1173 				      NULL);
1174 		}
1175 
1176 		if (error)
1177 			break;
1178 
1179 		xfs_qm_reset_dqcounts(mp, bp, firstid, type);
1180 		xfs_buf_delwri_queue(bp, buffer_list);
1181 		xfs_buf_relse(bp);
1182 
1183 		/* goto the next block. */
1184 		bno++;
1185 		firstid += mp->m_quotainfo->qi_dqperchunk;
1186 	}
1187 
1188 	return error;
1189 }
1190 
1191 /*
1192  * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
1193  * caller supplied function for every chunk of dquots that we find.
1194  */
1195 STATIC int
1196 xfs_qm_dqiterate(
1197 	struct xfs_mount	*mp,
1198 	struct xfs_inode	*qip,
1199 	uint			flags,
1200 	struct list_head	*buffer_list)
1201 {
1202 	struct xfs_bmbt_irec	*map;
1203 	int			i, nmaps;	/* number of map entries */
1204 	int			error;		/* return value */
1205 	xfs_fileoff_t		lblkno;
1206 	xfs_filblks_t		maxlblkcnt;
1207 	xfs_dqid_t		firstid;
1208 	xfs_fsblock_t		rablkno;
1209 	xfs_filblks_t		rablkcnt;
1210 
1211 	error = 0;
1212 	/*
1213 	 * This looks racy, but we can't keep an inode lock across a
1214 	 * trans_reserve. But, this gets called during quotacheck, and that
1215 	 * happens only at mount time which is single threaded.
1216 	 */
1217 	if (qip->i_d.di_nblocks == 0)
1218 		return 0;
1219 
1220 	map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
1221 
1222 	lblkno = 0;
1223 	maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1224 	do {
1225 		uint		lock_mode;
1226 
1227 		nmaps = XFS_DQITER_MAP_SIZE;
1228 		/*
1229 		 * We aren't changing the inode itself. Just changing
1230 		 * some of its data. No new blocks are added here, and
1231 		 * the inode is never added to the transaction.
1232 		 */
1233 		lock_mode = xfs_ilock_data_map_shared(qip);
1234 		error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1235 				       map, &nmaps, 0);
1236 		xfs_iunlock(qip, lock_mode);
1237 		if (error)
1238 			break;
1239 
1240 		ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1241 		for (i = 0; i < nmaps; i++) {
1242 			ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1243 			ASSERT(map[i].br_blockcount);
1244 
1245 
1246 			lblkno += map[i].br_blockcount;
1247 
1248 			if (map[i].br_startblock == HOLESTARTBLOCK)
1249 				continue;
1250 
1251 			firstid = (xfs_dqid_t) map[i].br_startoff *
1252 				mp->m_quotainfo->qi_dqperchunk;
1253 			/*
1254 			 * Do a read-ahead on the next extent.
1255 			 */
1256 			if ((i+1 < nmaps) &&
1257 			    (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1258 				rablkcnt =  map[i+1].br_blockcount;
1259 				rablkno = map[i+1].br_startblock;
1260 				while (rablkcnt--) {
1261 					xfs_buf_readahead(mp->m_ddev_targp,
1262 					       XFS_FSB_TO_DADDR(mp, rablkno),
1263 					       mp->m_quotainfo->qi_dqchunklen,
1264 					       NULL);
1265 					rablkno++;
1266 				}
1267 			}
1268 			/*
1269 			 * Iterate thru all the blks in the extent and
1270 			 * reset the counters of all the dquots inside them.
1271 			 */
1272 			error = xfs_qm_dqiter_bufs(mp, firstid,
1273 						   map[i].br_startblock,
1274 						   map[i].br_blockcount,
1275 						   flags, buffer_list);
1276 			if (error)
1277 				goto out;
1278 		}
1279 	} while (nmaps > 0);
1280 
1281 out:
1282 	kmem_free(map);
1283 	return error;
1284 }
1285 
1286 /*
1287  * Called by dqusage_adjust in doing a quotacheck.
1288  *
1289  * Given the inode, and a dquot id this updates both the incore dqout as well
1290  * as the buffer copy. This is so that once the quotacheck is done, we can
1291  * just log all the buffers, as opposed to logging numerous updates to
1292  * individual dquots.
1293  */
1294 STATIC int
1295 xfs_qm_quotacheck_dqadjust(
1296 	struct xfs_inode	*ip,
1297 	xfs_dqid_t		id,
1298 	uint			type,
1299 	xfs_qcnt_t		nblks,
1300 	xfs_qcnt_t		rtblks)
1301 {
1302 	struct xfs_mount	*mp = ip->i_mount;
1303 	struct xfs_dquot	*dqp;
1304 	int			error;
1305 
1306 	error = xfs_qm_dqget(mp, ip, id, type,
1307 			     XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp);
1308 	if (error) {
1309 		/*
1310 		 * Shouldn't be able to turn off quotas here.
1311 		 */
1312 		ASSERT(error != ESRCH);
1313 		ASSERT(error != ENOENT);
1314 		return error;
1315 	}
1316 
1317 	trace_xfs_dqadjust(dqp);
1318 
1319 	/*
1320 	 * Adjust the inode count and the block count to reflect this inode's
1321 	 * resource usage.
1322 	 */
1323 	be64_add_cpu(&dqp->q_core.d_icount, 1);
1324 	dqp->q_res_icount++;
1325 	if (nblks) {
1326 		be64_add_cpu(&dqp->q_core.d_bcount, nblks);
1327 		dqp->q_res_bcount += nblks;
1328 	}
1329 	if (rtblks) {
1330 		be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
1331 		dqp->q_res_rtbcount += rtblks;
1332 	}
1333 
1334 	/*
1335 	 * Set default limits, adjust timers (since we changed usages)
1336 	 *
1337 	 * There are no timers for the default values set in the root dquot.
1338 	 */
1339 	if (dqp->q_core.d_id) {
1340 		xfs_qm_adjust_dqlimits(mp, dqp);
1341 		xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
1342 	}
1343 
1344 	dqp->dq_flags |= XFS_DQ_DIRTY;
1345 	xfs_qm_dqput(dqp);
1346 	return 0;
1347 }
1348 
1349 STATIC int
1350 xfs_qm_get_rtblks(
1351 	xfs_inode_t	*ip,
1352 	xfs_qcnt_t	*O_rtblks)
1353 {
1354 	xfs_filblks_t	rtblks;			/* total rt blks */
1355 	xfs_extnum_t	idx;			/* extent record index */
1356 	xfs_ifork_t	*ifp;			/* inode fork pointer */
1357 	xfs_extnum_t	nextents;		/* number of extent entries */
1358 	int		error;
1359 
1360 	ASSERT(XFS_IS_REALTIME_INODE(ip));
1361 	ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1362 	if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1363 		if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK)))
1364 			return error;
1365 	}
1366 	rtblks = 0;
1367 	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
1368 	for (idx = 0; idx < nextents; idx++)
1369 		rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx));
1370 	*O_rtblks = (xfs_qcnt_t)rtblks;
1371 	return 0;
1372 }
1373 
1374 /*
1375  * callback routine supplied to bulkstat(). Given an inumber, find its
1376  * dquots and update them to account for resources taken by that inode.
1377  */
1378 /* ARGSUSED */
1379 STATIC int
1380 xfs_qm_dqusage_adjust(
1381 	xfs_mount_t	*mp,		/* mount point for filesystem */
1382 	xfs_ino_t	ino,		/* inode number to get data for */
1383 	void		__user *buffer,	/* not used */
1384 	int		ubsize,		/* not used */
1385 	int		*ubused,	/* not used */
1386 	int		*res)		/* result code value */
1387 {
1388 	xfs_inode_t	*ip;
1389 	xfs_qcnt_t	nblks, rtblks = 0;
1390 	int		error;
1391 
1392 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1393 
1394 	/*
1395 	 * rootino must have its resources accounted for, not so with the quota
1396 	 * inodes.
1397 	 */
1398 	if (xfs_is_quota_inode(&mp->m_sb, ino)) {
1399 		*res = BULKSTAT_RV_NOTHING;
1400 		return XFS_ERROR(EINVAL);
1401 	}
1402 
1403 	/*
1404 	 * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
1405 	 * interface expects the inode to be exclusively locked because that's
1406 	 * the case in all other instances. It's OK that we do this because
1407 	 * quotacheck is done only at mount time.
1408 	 */
1409 	error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip);
1410 	if (error) {
1411 		*res = BULKSTAT_RV_NOTHING;
1412 		return error;
1413 	}
1414 
1415 	ASSERT(ip->i_delayed_blks == 0);
1416 
1417 	if (XFS_IS_REALTIME_INODE(ip)) {
1418 		/*
1419 		 * Walk thru the extent list and count the realtime blocks.
1420 		 */
1421 		error = xfs_qm_get_rtblks(ip, &rtblks);
1422 		if (error)
1423 			goto error0;
1424 	}
1425 
1426 	nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
1427 
1428 	/*
1429 	 * Add the (disk blocks and inode) resources occupied by this
1430 	 * inode to its dquots. We do this adjustment in the incore dquot,
1431 	 * and also copy the changes to its buffer.
1432 	 * We don't care about putting these changes in a transaction
1433 	 * envelope because if we crash in the middle of a 'quotacheck'
1434 	 * we have to start from the beginning anyway.
1435 	 * Once we're done, we'll log all the dquot bufs.
1436 	 *
1437 	 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1438 	 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1439 	 */
1440 	if (XFS_IS_UQUOTA_ON(mp)) {
1441 		error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid,
1442 						   XFS_DQ_USER, nblks, rtblks);
1443 		if (error)
1444 			goto error0;
1445 	}
1446 
1447 	if (XFS_IS_GQUOTA_ON(mp)) {
1448 		error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid,
1449 						   XFS_DQ_GROUP, nblks, rtblks);
1450 		if (error)
1451 			goto error0;
1452 	}
1453 
1454 	if (XFS_IS_PQUOTA_ON(mp)) {
1455 		error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip),
1456 						   XFS_DQ_PROJ, nblks, rtblks);
1457 		if (error)
1458 			goto error0;
1459 	}
1460 
1461 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1462 	IRELE(ip);
1463 	*res = BULKSTAT_RV_DIDONE;
1464 	return 0;
1465 
1466 error0:
1467 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1468 	IRELE(ip);
1469 	*res = BULKSTAT_RV_GIVEUP;
1470 	return error;
1471 }
1472 
1473 STATIC int
1474 xfs_qm_flush_one(
1475 	struct xfs_dquot	*dqp,
1476 	void			*data)
1477 {
1478 	struct list_head	*buffer_list = data;
1479 	struct xfs_buf		*bp = NULL;
1480 	int			error = 0;
1481 
1482 	xfs_dqlock(dqp);
1483 	if (dqp->dq_flags & XFS_DQ_FREEING)
1484 		goto out_unlock;
1485 	if (!XFS_DQ_IS_DIRTY(dqp))
1486 		goto out_unlock;
1487 
1488 	xfs_dqflock(dqp);
1489 	error = xfs_qm_dqflush(dqp, &bp);
1490 	if (error)
1491 		goto out_unlock;
1492 
1493 	xfs_buf_delwri_queue(bp, buffer_list);
1494 	xfs_buf_relse(bp);
1495 out_unlock:
1496 	xfs_dqunlock(dqp);
1497 	return error;
1498 }
1499 
1500 /*
1501  * Walk thru all the filesystem inodes and construct a consistent view
1502  * of the disk quota world. If the quotacheck fails, disable quotas.
1503  */
1504 int
1505 xfs_qm_quotacheck(
1506 	xfs_mount_t	*mp)
1507 {
1508 	int			done, count, error, error2;
1509 	xfs_ino_t		lastino;
1510 	size_t			structsz;
1511 	uint			flags;
1512 	LIST_HEAD		(buffer_list);
1513 	struct xfs_inode	*uip = mp->m_quotainfo->qi_uquotaip;
1514 	struct xfs_inode	*gip = mp->m_quotainfo->qi_gquotaip;
1515 	struct xfs_inode	*pip = mp->m_quotainfo->qi_pquotaip;
1516 
1517 	count = INT_MAX;
1518 	structsz = 1;
1519 	lastino = 0;
1520 	flags = 0;
1521 
1522 	ASSERT(uip || gip || pip);
1523 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1524 
1525 	xfs_notice(mp, "Quotacheck needed: Please wait.");
1526 
1527 	/*
1528 	 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1529 	 * their counters to zero. We need a clean slate.
1530 	 * We don't log our changes till later.
1531 	 */
1532 	if (uip) {
1533 		error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA,
1534 					 &buffer_list);
1535 		if (error)
1536 			goto error_return;
1537 		flags |= XFS_UQUOTA_CHKD;
1538 	}
1539 
1540 	if (gip) {
1541 		error = xfs_qm_dqiterate(mp, gip, XFS_QMOPT_GQUOTA,
1542 					 &buffer_list);
1543 		if (error)
1544 			goto error_return;
1545 		flags |= XFS_GQUOTA_CHKD;
1546 	}
1547 
1548 	if (pip) {
1549 		error = xfs_qm_dqiterate(mp, pip, XFS_QMOPT_PQUOTA,
1550 					 &buffer_list);
1551 		if (error)
1552 			goto error_return;
1553 		flags |= XFS_PQUOTA_CHKD;
1554 	}
1555 
1556 	do {
1557 		/*
1558 		 * Iterate thru all the inodes in the file system,
1559 		 * adjusting the corresponding dquot counters in core.
1560 		 */
1561 		error = xfs_bulkstat(mp, &lastino, &count,
1562 				     xfs_qm_dqusage_adjust,
1563 				     structsz, NULL, &done);
1564 		if (error)
1565 			break;
1566 
1567 	} while (!done);
1568 
1569 	/*
1570 	 * We've made all the changes that we need to make incore.  Flush them
1571 	 * down to disk buffers if everything was updated successfully.
1572 	 */
1573 	if (XFS_IS_UQUOTA_ON(mp)) {
1574 		error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one,
1575 					  &buffer_list);
1576 	}
1577 	if (XFS_IS_GQUOTA_ON(mp)) {
1578 		error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one,
1579 					   &buffer_list);
1580 		if (!error)
1581 			error = error2;
1582 	}
1583 	if (XFS_IS_PQUOTA_ON(mp)) {
1584 		error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one,
1585 					   &buffer_list);
1586 		if (!error)
1587 			error = error2;
1588 	}
1589 
1590 	error2 = xfs_buf_delwri_submit(&buffer_list);
1591 	if (!error)
1592 		error = error2;
1593 
1594 	/*
1595 	 * We can get this error if we couldn't do a dquot allocation inside
1596 	 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1597 	 * dirty dquots that might be cached, we just want to get rid of them
1598 	 * and turn quotaoff. The dquots won't be attached to any of the inodes
1599 	 * at this point (because we intentionally didn't in dqget_noattach).
1600 	 */
1601 	if (error) {
1602 		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
1603 		goto error_return;
1604 	}
1605 
1606 	/*
1607 	 * If one type of quotas is off, then it will lose its
1608 	 * quotachecked status, since we won't be doing accounting for
1609 	 * that type anymore.
1610 	 */
1611 	mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1612 	mp->m_qflags |= flags;
1613 
1614  error_return:
1615 	while (!list_empty(&buffer_list)) {
1616 		struct xfs_buf *bp =
1617 			list_first_entry(&buffer_list, struct xfs_buf, b_list);
1618 		list_del_init(&bp->b_list);
1619 		xfs_buf_relse(bp);
1620 	}
1621 
1622 	if (error) {
1623 		xfs_warn(mp,
1624 	"Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1625 			error);
1626 		/*
1627 		 * We must turn off quotas.
1628 		 */
1629 		ASSERT(mp->m_quotainfo != NULL);
1630 		xfs_qm_destroy_quotainfo(mp);
1631 		if (xfs_mount_reset_sbqflags(mp)) {
1632 			xfs_warn(mp,
1633 				"Quotacheck: Failed to reset quota flags.");
1634 		}
1635 	} else
1636 		xfs_notice(mp, "Quotacheck: Done.");
1637 	return (error);
1638 }
1639 
1640 /*
1641  * This is called after the superblock has been read in and we're ready to
1642  * iget the quota inodes.
1643  */
1644 STATIC int
1645 xfs_qm_init_quotainos(
1646 	xfs_mount_t	*mp)
1647 {
1648 	struct xfs_inode	*uip = NULL;
1649 	struct xfs_inode	*gip = NULL;
1650 	struct xfs_inode	*pip = NULL;
1651 	int			error;
1652 	__int64_t		sbflags = 0;
1653 	uint			flags = 0;
1654 
1655 	ASSERT(mp->m_quotainfo);
1656 
1657 	/*
1658 	 * Get the uquota and gquota inodes
1659 	 */
1660 	if (xfs_sb_version_hasquota(&mp->m_sb)) {
1661 		if (XFS_IS_UQUOTA_ON(mp) &&
1662 		    mp->m_sb.sb_uquotino != NULLFSINO) {
1663 			ASSERT(mp->m_sb.sb_uquotino > 0);
1664 			error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1665 					     0, 0, &uip);
1666 			if (error)
1667 				return XFS_ERROR(error);
1668 		}
1669 		if (XFS_IS_GQUOTA_ON(mp) &&
1670 		    mp->m_sb.sb_gquotino != NULLFSINO) {
1671 			ASSERT(mp->m_sb.sb_gquotino > 0);
1672 			error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1673 					     0, 0, &gip);
1674 			if (error)
1675 				goto error_rele;
1676 		}
1677 		if (XFS_IS_PQUOTA_ON(mp) &&
1678 		    mp->m_sb.sb_pquotino != NULLFSINO) {
1679 			ASSERT(mp->m_sb.sb_pquotino > 0);
1680 			error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1681 					     0, 0, &pip);
1682 			if (error)
1683 				goto error_rele;
1684 		}
1685 	} else {
1686 		flags |= XFS_QMOPT_SBVERSION;
1687 		sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
1688 			    XFS_SB_GQUOTINO | XFS_SB_PQUOTINO |
1689 			    XFS_SB_QFLAGS);
1690 	}
1691 
1692 	/*
1693 	 * Create the three inodes, if they don't exist already. The changes
1694 	 * made above will get added to a transaction and logged in one of
1695 	 * the qino_alloc calls below.  If the device is readonly,
1696 	 * temporarily switch to read-write to do this.
1697 	 */
1698 	if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1699 		error = xfs_qm_qino_alloc(mp, &uip,
1700 					      sbflags | XFS_SB_UQUOTINO,
1701 					      flags | XFS_QMOPT_UQUOTA);
1702 		if (error)
1703 			goto error_rele;
1704 
1705 		flags &= ~XFS_QMOPT_SBVERSION;
1706 	}
1707 	if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1708 		error = xfs_qm_qino_alloc(mp, &gip,
1709 					  sbflags | XFS_SB_GQUOTINO,
1710 					  flags | XFS_QMOPT_GQUOTA);
1711 		if (error)
1712 			goto error_rele;
1713 
1714 		flags &= ~XFS_QMOPT_SBVERSION;
1715 	}
1716 	if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1717 		error = xfs_qm_qino_alloc(mp, &pip,
1718 					  sbflags | XFS_SB_PQUOTINO,
1719 					  flags | XFS_QMOPT_PQUOTA);
1720 		if (error)
1721 			goto error_rele;
1722 	}
1723 
1724 	mp->m_quotainfo->qi_uquotaip = uip;
1725 	mp->m_quotainfo->qi_gquotaip = gip;
1726 	mp->m_quotainfo->qi_pquotaip = pip;
1727 
1728 	return 0;
1729 
1730 error_rele:
1731 	if (uip)
1732 		IRELE(uip);
1733 	if (gip)
1734 		IRELE(gip);
1735 	if (pip)
1736 		IRELE(pip);
1737 	return XFS_ERROR(error);
1738 }
1739 
1740 STATIC void
1741 xfs_qm_dqfree_one(
1742 	struct xfs_dquot	*dqp)
1743 {
1744 	struct xfs_mount	*mp = dqp->q_mount;
1745 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
1746 
1747 	mutex_lock(&qi->qi_tree_lock);
1748 	radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
1749 			  be32_to_cpu(dqp->q_core.d_id));
1750 
1751 	qi->qi_dquots--;
1752 	mutex_unlock(&qi->qi_tree_lock);
1753 
1754 	xfs_qm_dqdestroy(dqp);
1755 }
1756 
1757 /*
1758  * Start a transaction and write the incore superblock changes to
1759  * disk. flags parameter indicates which fields have changed.
1760  */
1761 int
1762 xfs_qm_write_sb_changes(
1763 	xfs_mount_t	*mp,
1764 	__int64_t	flags)
1765 {
1766 	xfs_trans_t	*tp;
1767 	int		error;
1768 
1769 	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
1770 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_sbchange, 0, 0);
1771 	if (error) {
1772 		xfs_trans_cancel(tp, 0);
1773 		return error;
1774 	}
1775 
1776 	xfs_mod_sb(tp, flags);
1777 	error = xfs_trans_commit(tp, 0);
1778 
1779 	return error;
1780 }
1781 
1782 
1783 /* --------------- utility functions for vnodeops ---------------- */
1784 
1785 
1786 /*
1787  * Given an inode, a uid, gid and prid make sure that we have
1788  * allocated relevant dquot(s) on disk, and that we won't exceed inode
1789  * quotas by creating this file.
1790  * This also attaches dquot(s) to the given inode after locking it,
1791  * and returns the dquots corresponding to the uid and/or gid.
1792  *
1793  * in	: inode (unlocked)
1794  * out	: udquot, gdquot with references taken and unlocked
1795  */
1796 int
1797 xfs_qm_vop_dqalloc(
1798 	struct xfs_inode	*ip,
1799 	xfs_dqid_t		uid,
1800 	xfs_dqid_t		gid,
1801 	prid_t			prid,
1802 	uint			flags,
1803 	struct xfs_dquot	**O_udqpp,
1804 	struct xfs_dquot	**O_gdqpp,
1805 	struct xfs_dquot	**O_pdqpp)
1806 {
1807 	struct xfs_mount	*mp = ip->i_mount;
1808 	struct xfs_dquot	*uq = NULL;
1809 	struct xfs_dquot	*gq = NULL;
1810 	struct xfs_dquot	*pq = NULL;
1811 	int			error;
1812 	uint			lockflags;
1813 
1814 	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1815 		return 0;
1816 
1817 	lockflags = XFS_ILOCK_EXCL;
1818 	xfs_ilock(ip, lockflags);
1819 
1820 	if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1821 		gid = ip->i_d.di_gid;
1822 
1823 	/*
1824 	 * Attach the dquot(s) to this inode, doing a dquot allocation
1825 	 * if necessary. The dquot(s) will not be locked.
1826 	 */
1827 	if (XFS_NOT_DQATTACHED(mp, ip)) {
1828 		error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC);
1829 		if (error) {
1830 			xfs_iunlock(ip, lockflags);
1831 			return error;
1832 		}
1833 	}
1834 
1835 	if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1836 		if (ip->i_d.di_uid != uid) {
1837 			/*
1838 			 * What we need is the dquot that has this uid, and
1839 			 * if we send the inode to dqget, the uid of the inode
1840 			 * takes priority over what's sent in the uid argument.
1841 			 * We must unlock inode here before calling dqget if
1842 			 * we're not sending the inode, because otherwise
1843 			 * we'll deadlock by doing trans_reserve while
1844 			 * holding ilock.
1845 			 */
1846 			xfs_iunlock(ip, lockflags);
1847 			error = xfs_qm_dqget(mp, NULL, uid,
1848 						 XFS_DQ_USER,
1849 						 XFS_QMOPT_DQALLOC |
1850 						 XFS_QMOPT_DOWARN,
1851 						 &uq);
1852 			if (error) {
1853 				ASSERT(error != ENOENT);
1854 				return error;
1855 			}
1856 			/*
1857 			 * Get the ilock in the right order.
1858 			 */
1859 			xfs_dqunlock(uq);
1860 			lockflags = XFS_ILOCK_SHARED;
1861 			xfs_ilock(ip, lockflags);
1862 		} else {
1863 			/*
1864 			 * Take an extra reference, because we'll return
1865 			 * this to caller
1866 			 */
1867 			ASSERT(ip->i_udquot);
1868 			uq = xfs_qm_dqhold(ip->i_udquot);
1869 		}
1870 	}
1871 	if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1872 		if (ip->i_d.di_gid != gid) {
1873 			xfs_iunlock(ip, lockflags);
1874 			error = xfs_qm_dqget(mp, NULL, gid,
1875 						 XFS_DQ_GROUP,
1876 						 XFS_QMOPT_DQALLOC |
1877 						 XFS_QMOPT_DOWARN,
1878 						 &gq);
1879 			if (error) {
1880 				ASSERT(error != ENOENT);
1881 				goto error_rele;
1882 			}
1883 			xfs_dqunlock(gq);
1884 			lockflags = XFS_ILOCK_SHARED;
1885 			xfs_ilock(ip, lockflags);
1886 		} else {
1887 			ASSERT(ip->i_gdquot);
1888 			gq = xfs_qm_dqhold(ip->i_gdquot);
1889 		}
1890 	}
1891 	if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1892 		if (xfs_get_projid(ip) != prid) {
1893 			xfs_iunlock(ip, lockflags);
1894 			error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
1895 						 XFS_DQ_PROJ,
1896 						 XFS_QMOPT_DQALLOC |
1897 						 XFS_QMOPT_DOWARN,
1898 						 &pq);
1899 			if (error) {
1900 				ASSERT(error != ENOENT);
1901 				goto error_rele;
1902 			}
1903 			xfs_dqunlock(pq);
1904 			lockflags = XFS_ILOCK_SHARED;
1905 			xfs_ilock(ip, lockflags);
1906 		} else {
1907 			ASSERT(ip->i_pdquot);
1908 			pq = xfs_qm_dqhold(ip->i_pdquot);
1909 		}
1910 	}
1911 	if (uq)
1912 		trace_xfs_dquot_dqalloc(ip);
1913 
1914 	xfs_iunlock(ip, lockflags);
1915 	if (O_udqpp)
1916 		*O_udqpp = uq;
1917 	else if (uq)
1918 		xfs_qm_dqrele(uq);
1919 	if (O_gdqpp)
1920 		*O_gdqpp = gq;
1921 	else if (gq)
1922 		xfs_qm_dqrele(gq);
1923 	if (O_pdqpp)
1924 		*O_pdqpp = pq;
1925 	else if (pq)
1926 		xfs_qm_dqrele(pq);
1927 	return 0;
1928 
1929 error_rele:
1930 	if (gq)
1931 		xfs_qm_dqrele(gq);
1932 	if (uq)
1933 		xfs_qm_dqrele(uq);
1934 	return error;
1935 }
1936 
1937 /*
1938  * Actually transfer ownership, and do dquot modifications.
1939  * These were already reserved.
1940  */
1941 xfs_dquot_t *
1942 xfs_qm_vop_chown(
1943 	xfs_trans_t	*tp,
1944 	xfs_inode_t	*ip,
1945 	xfs_dquot_t	**IO_olddq,
1946 	xfs_dquot_t	*newdq)
1947 {
1948 	xfs_dquot_t	*prevdq;
1949 	uint		bfield = XFS_IS_REALTIME_INODE(ip) ?
1950 				 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1951 
1952 
1953 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1954 	ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
1955 
1956 	/* old dquot */
1957 	prevdq = *IO_olddq;
1958 	ASSERT(prevdq);
1959 	ASSERT(prevdq != newdq);
1960 
1961 	xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
1962 	xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1963 
1964 	/* the sparkling new dquot */
1965 	xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
1966 	xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1967 
1968 	/*
1969 	 * Take an extra reference, because the inode is going to keep
1970 	 * this dquot pointer even after the trans_commit.
1971 	 */
1972 	*IO_olddq = xfs_qm_dqhold(newdq);
1973 
1974 	return prevdq;
1975 }
1976 
1977 /*
1978  * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
1979  */
1980 int
1981 xfs_qm_vop_chown_reserve(
1982 	struct xfs_trans	*tp,
1983 	struct xfs_inode	*ip,
1984 	struct xfs_dquot	*udqp,
1985 	struct xfs_dquot	*gdqp,
1986 	struct xfs_dquot	*pdqp,
1987 	uint			flags)
1988 {
1989 	struct xfs_mount	*mp = ip->i_mount;
1990 	uint			delblks, blkflags, prjflags = 0;
1991 	struct xfs_dquot	*udq_unres = NULL;
1992 	struct xfs_dquot	*gdq_unres = NULL;
1993 	struct xfs_dquot	*pdq_unres = NULL;
1994 	struct xfs_dquot	*udq_delblks = NULL;
1995 	struct xfs_dquot	*gdq_delblks = NULL;
1996 	struct xfs_dquot	*pdq_delblks = NULL;
1997 	int			error;
1998 
1999 
2000 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2001 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
2002 
2003 	delblks = ip->i_delayed_blks;
2004 	blkflags = XFS_IS_REALTIME_INODE(ip) ?
2005 			XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
2006 
2007 	if (XFS_IS_UQUOTA_ON(mp) && udqp &&
2008 	    ip->i_d.di_uid != be32_to_cpu(udqp->q_core.d_id)) {
2009 		udq_delblks = udqp;
2010 		/*
2011 		 * If there are delayed allocation blocks, then we have to
2012 		 * unreserve those from the old dquot, and add them to the
2013 		 * new dquot.
2014 		 */
2015 		if (delblks) {
2016 			ASSERT(ip->i_udquot);
2017 			udq_unres = ip->i_udquot;
2018 		}
2019 	}
2020 	if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
2021 	    ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id)) {
2022 		gdq_delblks = gdqp;
2023 		if (delblks) {
2024 			ASSERT(ip->i_gdquot);
2025 			gdq_unres = ip->i_gdquot;
2026 		}
2027 	}
2028 
2029 	if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
2030 	    xfs_get_projid(ip) != be32_to_cpu(pdqp->q_core.d_id)) {
2031 		prjflags = XFS_QMOPT_ENOSPC;
2032 		pdq_delblks = pdqp;
2033 		if (delblks) {
2034 			ASSERT(ip->i_pdquot);
2035 			pdq_unres = ip->i_pdquot;
2036 		}
2037 	}
2038 
2039 	error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
2040 				udq_delblks, gdq_delblks, pdq_delblks,
2041 				ip->i_d.di_nblocks, 1,
2042 				flags | blkflags | prjflags);
2043 	if (error)
2044 		return error;
2045 
2046 	/*
2047 	 * Do the delayed blks reservations/unreservations now. Since, these
2048 	 * are done without the help of a transaction, if a reservation fails
2049 	 * its previous reservations won't be automatically undone by trans
2050 	 * code. So, we have to do it manually here.
2051 	 */
2052 	if (delblks) {
2053 		/*
2054 		 * Do the reservations first. Unreservation can't fail.
2055 		 */
2056 		ASSERT(udq_delblks || gdq_delblks || pdq_delblks);
2057 		ASSERT(udq_unres || gdq_unres || pdq_unres);
2058 		error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
2059 			    udq_delblks, gdq_delblks, pdq_delblks,
2060 			    (xfs_qcnt_t)delblks, 0,
2061 			    flags | blkflags | prjflags);
2062 		if (error)
2063 			return error;
2064 		xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
2065 				udq_unres, gdq_unres, pdq_unres,
2066 				-((xfs_qcnt_t)delblks), 0, blkflags);
2067 	}
2068 
2069 	return (0);
2070 }
2071 
2072 int
2073 xfs_qm_vop_rename_dqattach(
2074 	struct xfs_inode	**i_tab)
2075 {
2076 	struct xfs_mount	*mp = i_tab[0]->i_mount;
2077 	int			i;
2078 
2079 	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
2080 		return 0;
2081 
2082 	for (i = 0; (i < 4 && i_tab[i]); i++) {
2083 		struct xfs_inode	*ip = i_tab[i];
2084 		int			error;
2085 
2086 		/*
2087 		 * Watch out for duplicate entries in the table.
2088 		 */
2089 		if (i == 0 || ip != i_tab[i-1]) {
2090 			if (XFS_NOT_DQATTACHED(mp, ip)) {
2091 				error = xfs_qm_dqattach(ip, 0);
2092 				if (error)
2093 					return error;
2094 			}
2095 		}
2096 	}
2097 	return 0;
2098 }
2099 
2100 void
2101 xfs_qm_vop_create_dqattach(
2102 	struct xfs_trans	*tp,
2103 	struct xfs_inode	*ip,
2104 	struct xfs_dquot	*udqp,
2105 	struct xfs_dquot	*gdqp,
2106 	struct xfs_dquot	*pdqp)
2107 {
2108 	struct xfs_mount	*mp = tp->t_mountp;
2109 
2110 	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
2111 		return;
2112 
2113 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2114 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
2115 
2116 	if (udqp && XFS_IS_UQUOTA_ON(mp)) {
2117 		ASSERT(ip->i_udquot == NULL);
2118 		ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
2119 
2120 		ip->i_udquot = xfs_qm_dqhold(udqp);
2121 		xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
2122 	}
2123 	if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
2124 		ASSERT(ip->i_gdquot == NULL);
2125 		ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id));
2126 		ip->i_gdquot = xfs_qm_dqhold(gdqp);
2127 		xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
2128 	}
2129 	if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
2130 		ASSERT(ip->i_pdquot == NULL);
2131 		ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id));
2132 
2133 		ip->i_pdquot = xfs_qm_dqhold(pdqp);
2134 		xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
2135 	}
2136 }
2137 
2138