xref: /linux/fs/xfs/xfs_dquot.c (revision c4ee0af3fa0dc65f690fc908f02b8355f9576ea0)
1 /*
2  * Copyright (c) 2000-2003 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_shared.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_bit.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_inode.h"
29 #include "xfs_bmap.h"
30 #include "xfs_bmap_util.h"
31 #include "xfs_alloc.h"
32 #include "xfs_quota.h"
33 #include "xfs_error.h"
34 #include "xfs_trans.h"
35 #include "xfs_buf_item.h"
36 #include "xfs_trans_space.h"
37 #include "xfs_trans_priv.h"
38 #include "xfs_qm.h"
39 #include "xfs_cksum.h"
40 #include "xfs_trace.h"
41 #include "xfs_log.h"
42 #include "xfs_bmap_btree.h"
43 
44 /*
45  * Lock order:
46  *
47  * ip->i_lock
48  *   qi->qi_tree_lock
49  *     dquot->q_qlock (xfs_dqlock() and friends)
50  *       dquot->q_flush (xfs_dqflock() and friends)
51  *       qi->qi_lru_lock
52  *
53  * If two dquots need to be locked the order is user before group/project,
54  * otherwise by the lowest id first, see xfs_dqlock2.
55  */
56 
57 #ifdef DEBUG
58 xfs_buftarg_t *xfs_dqerror_target;
59 int xfs_do_dqerror;
60 int xfs_dqreq_num;
61 int xfs_dqerror_mod = 33;
62 #endif
63 
64 struct kmem_zone		*xfs_qm_dqtrxzone;
65 static struct kmem_zone		*xfs_qm_dqzone;
66 
67 static struct lock_class_key xfs_dquot_group_class;
68 static struct lock_class_key xfs_dquot_project_class;
69 
70 /*
71  * This is called to free all the memory associated with a dquot
72  */
73 void
74 xfs_qm_dqdestroy(
75 	xfs_dquot_t	*dqp)
76 {
77 	ASSERT(list_empty(&dqp->q_lru));
78 
79 	mutex_destroy(&dqp->q_qlock);
80 	kmem_zone_free(xfs_qm_dqzone, dqp);
81 
82 	XFS_STATS_DEC(xs_qm_dquot);
83 }
84 
85 /*
86  * If default limits are in force, push them into the dquot now.
87  * We overwrite the dquot limits only if they are zero and this
88  * is not the root dquot.
89  */
90 void
91 xfs_qm_adjust_dqlimits(
92 	struct xfs_mount	*mp,
93 	struct xfs_dquot	*dq)
94 {
95 	struct xfs_quotainfo	*q = mp->m_quotainfo;
96 	struct xfs_disk_dquot	*d = &dq->q_core;
97 	int			prealloc = 0;
98 
99 	ASSERT(d->d_id);
100 
101 	if (q->qi_bsoftlimit && !d->d_blk_softlimit) {
102 		d->d_blk_softlimit = cpu_to_be64(q->qi_bsoftlimit);
103 		prealloc = 1;
104 	}
105 	if (q->qi_bhardlimit && !d->d_blk_hardlimit) {
106 		d->d_blk_hardlimit = cpu_to_be64(q->qi_bhardlimit);
107 		prealloc = 1;
108 	}
109 	if (q->qi_isoftlimit && !d->d_ino_softlimit)
110 		d->d_ino_softlimit = cpu_to_be64(q->qi_isoftlimit);
111 	if (q->qi_ihardlimit && !d->d_ino_hardlimit)
112 		d->d_ino_hardlimit = cpu_to_be64(q->qi_ihardlimit);
113 	if (q->qi_rtbsoftlimit && !d->d_rtb_softlimit)
114 		d->d_rtb_softlimit = cpu_to_be64(q->qi_rtbsoftlimit);
115 	if (q->qi_rtbhardlimit && !d->d_rtb_hardlimit)
116 		d->d_rtb_hardlimit = cpu_to_be64(q->qi_rtbhardlimit);
117 
118 	if (prealloc)
119 		xfs_dquot_set_prealloc_limits(dq);
120 }
121 
122 /*
123  * Check the limits and timers of a dquot and start or reset timers
124  * if necessary.
125  * This gets called even when quota enforcement is OFF, which makes our
126  * life a little less complicated. (We just don't reject any quota
127  * reservations in that case, when enforcement is off).
128  * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
129  * enforcement's off.
130  * In contrast, warnings are a little different in that they don't
131  * 'automatically' get started when limits get exceeded.  They do
132  * get reset to zero, however, when we find the count to be under
133  * the soft limit (they are only ever set non-zero via userspace).
134  */
135 void
136 xfs_qm_adjust_dqtimers(
137 	xfs_mount_t		*mp,
138 	xfs_disk_dquot_t	*d)
139 {
140 	ASSERT(d->d_id);
141 
142 #ifdef DEBUG
143 	if (d->d_blk_hardlimit)
144 		ASSERT(be64_to_cpu(d->d_blk_softlimit) <=
145 		       be64_to_cpu(d->d_blk_hardlimit));
146 	if (d->d_ino_hardlimit)
147 		ASSERT(be64_to_cpu(d->d_ino_softlimit) <=
148 		       be64_to_cpu(d->d_ino_hardlimit));
149 	if (d->d_rtb_hardlimit)
150 		ASSERT(be64_to_cpu(d->d_rtb_softlimit) <=
151 		       be64_to_cpu(d->d_rtb_hardlimit));
152 #endif
153 
154 	if (!d->d_btimer) {
155 		if ((d->d_blk_softlimit &&
156 		     (be64_to_cpu(d->d_bcount) >
157 		      be64_to_cpu(d->d_blk_softlimit))) ||
158 		    (d->d_blk_hardlimit &&
159 		     (be64_to_cpu(d->d_bcount) >
160 		      be64_to_cpu(d->d_blk_hardlimit)))) {
161 			d->d_btimer = cpu_to_be32(get_seconds() +
162 					mp->m_quotainfo->qi_btimelimit);
163 		} else {
164 			d->d_bwarns = 0;
165 		}
166 	} else {
167 		if ((!d->d_blk_softlimit ||
168 		     (be64_to_cpu(d->d_bcount) <=
169 		      be64_to_cpu(d->d_blk_softlimit))) &&
170 		    (!d->d_blk_hardlimit ||
171 		    (be64_to_cpu(d->d_bcount) <=
172 		     be64_to_cpu(d->d_blk_hardlimit)))) {
173 			d->d_btimer = 0;
174 		}
175 	}
176 
177 	if (!d->d_itimer) {
178 		if ((d->d_ino_softlimit &&
179 		     (be64_to_cpu(d->d_icount) >
180 		      be64_to_cpu(d->d_ino_softlimit))) ||
181 		    (d->d_ino_hardlimit &&
182 		     (be64_to_cpu(d->d_icount) >
183 		      be64_to_cpu(d->d_ino_hardlimit)))) {
184 			d->d_itimer = cpu_to_be32(get_seconds() +
185 					mp->m_quotainfo->qi_itimelimit);
186 		} else {
187 			d->d_iwarns = 0;
188 		}
189 	} else {
190 		if ((!d->d_ino_softlimit ||
191 		     (be64_to_cpu(d->d_icount) <=
192 		      be64_to_cpu(d->d_ino_softlimit)))  &&
193 		    (!d->d_ino_hardlimit ||
194 		     (be64_to_cpu(d->d_icount) <=
195 		      be64_to_cpu(d->d_ino_hardlimit)))) {
196 			d->d_itimer = 0;
197 		}
198 	}
199 
200 	if (!d->d_rtbtimer) {
201 		if ((d->d_rtb_softlimit &&
202 		     (be64_to_cpu(d->d_rtbcount) >
203 		      be64_to_cpu(d->d_rtb_softlimit))) ||
204 		    (d->d_rtb_hardlimit &&
205 		     (be64_to_cpu(d->d_rtbcount) >
206 		      be64_to_cpu(d->d_rtb_hardlimit)))) {
207 			d->d_rtbtimer = cpu_to_be32(get_seconds() +
208 					mp->m_quotainfo->qi_rtbtimelimit);
209 		} else {
210 			d->d_rtbwarns = 0;
211 		}
212 	} else {
213 		if ((!d->d_rtb_softlimit ||
214 		     (be64_to_cpu(d->d_rtbcount) <=
215 		      be64_to_cpu(d->d_rtb_softlimit))) &&
216 		    (!d->d_rtb_hardlimit ||
217 		     (be64_to_cpu(d->d_rtbcount) <=
218 		      be64_to_cpu(d->d_rtb_hardlimit)))) {
219 			d->d_rtbtimer = 0;
220 		}
221 	}
222 }
223 
224 /*
225  * initialize a buffer full of dquots and log the whole thing
226  */
227 STATIC void
228 xfs_qm_init_dquot_blk(
229 	xfs_trans_t	*tp,
230 	xfs_mount_t	*mp,
231 	xfs_dqid_t	id,
232 	uint		type,
233 	xfs_buf_t	*bp)
234 {
235 	struct xfs_quotainfo	*q = mp->m_quotainfo;
236 	xfs_dqblk_t	*d;
237 	int		curid, i;
238 
239 	ASSERT(tp);
240 	ASSERT(xfs_buf_islocked(bp));
241 
242 	d = bp->b_addr;
243 
244 	/*
245 	 * ID of the first dquot in the block - id's are zero based.
246 	 */
247 	curid = id - (id % q->qi_dqperchunk);
248 	ASSERT(curid >= 0);
249 	memset(d, 0, BBTOB(q->qi_dqchunklen));
250 	for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
251 		d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
252 		d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
253 		d->dd_diskdq.d_id = cpu_to_be32(curid);
254 		d->dd_diskdq.d_flags = type;
255 		if (xfs_sb_version_hascrc(&mp->m_sb)) {
256 			uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
257 			xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
258 					 XFS_DQUOT_CRC_OFF);
259 		}
260 	}
261 
262 	xfs_trans_dquot_buf(tp, bp,
263 			    (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF :
264 			    ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF :
265 			     XFS_BLF_GDQUOT_BUF)));
266 	xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
267 }
268 
269 /*
270  * Initialize the dynamic speculative preallocation thresholds. The lo/hi
271  * watermarks correspond to the soft and hard limits by default. If a soft limit
272  * is not specified, we use 95% of the hard limit.
273  */
274 void
275 xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
276 {
277 	__uint64_t space;
278 
279 	dqp->q_prealloc_hi_wmark = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
280 	dqp->q_prealloc_lo_wmark = be64_to_cpu(dqp->q_core.d_blk_softlimit);
281 	if (!dqp->q_prealloc_lo_wmark) {
282 		dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark;
283 		do_div(dqp->q_prealloc_lo_wmark, 100);
284 		dqp->q_prealloc_lo_wmark *= 95;
285 	}
286 
287 	space = dqp->q_prealloc_hi_wmark;
288 
289 	do_div(space, 100);
290 	dqp->q_low_space[XFS_QLOWSP_1_PCNT] = space;
291 	dqp->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3;
292 	dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
293 }
294 
295 /*
296  * Allocate a block and fill it with dquots.
297  * This is called when the bmapi finds a hole.
298  */
299 STATIC int
300 xfs_qm_dqalloc(
301 	xfs_trans_t	**tpp,
302 	xfs_mount_t	*mp,
303 	xfs_dquot_t	*dqp,
304 	xfs_inode_t	*quotip,
305 	xfs_fileoff_t	offset_fsb,
306 	xfs_buf_t	**O_bpp)
307 {
308 	xfs_fsblock_t	firstblock;
309 	xfs_bmap_free_t flist;
310 	xfs_bmbt_irec_t map;
311 	int		nmaps, error, committed;
312 	xfs_buf_t	*bp;
313 	xfs_trans_t	*tp = *tpp;
314 
315 	ASSERT(tp != NULL);
316 
317 	trace_xfs_dqalloc(dqp);
318 
319 	/*
320 	 * Initialize the bmap freelist prior to calling bmapi code.
321 	 */
322 	xfs_bmap_init(&flist, &firstblock);
323 	xfs_ilock(quotip, XFS_ILOCK_EXCL);
324 	/*
325 	 * Return if this type of quotas is turned off while we didn't
326 	 * have an inode lock
327 	 */
328 	if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
329 		xfs_iunlock(quotip, XFS_ILOCK_EXCL);
330 		return (ESRCH);
331 	}
332 
333 	xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
334 	nmaps = 1;
335 	error = xfs_bmapi_write(tp, quotip, offset_fsb,
336 				XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA,
337 				&firstblock, XFS_QM_DQALLOC_SPACE_RES(mp),
338 				&map, &nmaps, &flist);
339 	if (error)
340 		goto error0;
341 	ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
342 	ASSERT(nmaps == 1);
343 	ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
344 	       (map.br_startblock != HOLESTARTBLOCK));
345 
346 	/*
347 	 * Keep track of the blkno to save a lookup later
348 	 */
349 	dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
350 
351 	/* now we can just get the buffer (there's nothing to read yet) */
352 	bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
353 			       dqp->q_blkno,
354 			       mp->m_quotainfo->qi_dqchunklen,
355 			       0);
356 
357 	error = xfs_buf_geterror(bp);
358 	if (error)
359 		goto error1;
360 	bp->b_ops = &xfs_dquot_buf_ops;
361 
362 	/*
363 	 * Make a chunk of dquots out of this buffer and log
364 	 * the entire thing.
365 	 */
366 	xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id),
367 			      dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
368 
369 	/*
370 	 * xfs_bmap_finish() may commit the current transaction and
371 	 * start a second transaction if the freelist is not empty.
372 	 *
373 	 * Since we still want to modify this buffer, we need to
374 	 * ensure that the buffer is not released on commit of
375 	 * the first transaction and ensure the buffer is added to the
376 	 * second transaction.
377 	 *
378 	 * If there is only one transaction then don't stop the buffer
379 	 * from being released when it commits later on.
380 	 */
381 
382 	xfs_trans_bhold(tp, bp);
383 
384 	if ((error = xfs_bmap_finish(tpp, &flist, &committed))) {
385 		goto error1;
386 	}
387 
388 	if (committed) {
389 		tp = *tpp;
390 		xfs_trans_bjoin(tp, bp);
391 	} else {
392 		xfs_trans_bhold_release(tp, bp);
393 	}
394 
395 	*O_bpp = bp;
396 	return 0;
397 
398       error1:
399 	xfs_bmap_cancel(&flist);
400       error0:
401 	xfs_iunlock(quotip, XFS_ILOCK_EXCL);
402 
403 	return (error);
404 }
405 
406 STATIC int
407 xfs_qm_dqrepair(
408 	struct xfs_mount	*mp,
409 	struct xfs_trans	*tp,
410 	struct xfs_dquot	*dqp,
411 	xfs_dqid_t		firstid,
412 	struct xfs_buf		**bpp)
413 {
414 	int			error;
415 	struct xfs_disk_dquot	*ddq;
416 	struct xfs_dqblk	*d;
417 	int			i;
418 
419 	/*
420 	 * Read the buffer without verification so we get the corrupted
421 	 * buffer returned to us. make sure we verify it on write, though.
422 	 */
423 	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, dqp->q_blkno,
424 				   mp->m_quotainfo->qi_dqchunklen,
425 				   0, bpp, NULL);
426 
427 	if (error) {
428 		ASSERT(*bpp == NULL);
429 		return XFS_ERROR(error);
430 	}
431 	(*bpp)->b_ops = &xfs_dquot_buf_ops;
432 
433 	ASSERT(xfs_buf_islocked(*bpp));
434 	d = (struct xfs_dqblk *)(*bpp)->b_addr;
435 
436 	/* Do the actual repair of dquots in this buffer */
437 	for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++) {
438 		ddq = &d[i].dd_diskdq;
439 		error = xfs_dqcheck(mp, ddq, firstid + i,
440 				       dqp->dq_flags & XFS_DQ_ALLTYPES,
441 				       XFS_QMOPT_DQREPAIR, "xfs_qm_dqrepair");
442 		if (error) {
443 			/* repair failed, we're screwed */
444 			xfs_trans_brelse(tp, *bpp);
445 			return XFS_ERROR(EIO);
446 		}
447 	}
448 
449 	return 0;
450 }
451 
452 /*
453  * Maps a dquot to the buffer containing its on-disk version.
454  * This returns a ptr to the buffer containing the on-disk dquot
455  * in the bpp param, and a ptr to the on-disk dquot within that buffer
456  */
457 STATIC int
458 xfs_qm_dqtobp(
459 	xfs_trans_t		**tpp,
460 	xfs_dquot_t		*dqp,
461 	xfs_disk_dquot_t	**O_ddpp,
462 	xfs_buf_t		**O_bpp,
463 	uint			flags)
464 {
465 	struct xfs_bmbt_irec	map;
466 	int			nmaps = 1, error;
467 	struct xfs_buf		*bp;
468 	struct xfs_inode	*quotip = xfs_dq_to_quota_inode(dqp);
469 	struct xfs_mount	*mp = dqp->q_mount;
470 	xfs_dqid_t		id = be32_to_cpu(dqp->q_core.d_id);
471 	struct xfs_trans	*tp = (tpp ? *tpp : NULL);
472 
473 	dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
474 
475 	xfs_ilock(quotip, XFS_ILOCK_SHARED);
476 	if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
477 		/*
478 		 * Return if this type of quotas is turned off while we
479 		 * didn't have the quota inode lock.
480 		 */
481 		xfs_iunlock(quotip, XFS_ILOCK_SHARED);
482 		return ESRCH;
483 	}
484 
485 	/*
486 	 * Find the block map; no allocations yet
487 	 */
488 	error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
489 			       XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
490 
491 	xfs_iunlock(quotip, XFS_ILOCK_SHARED);
492 	if (error)
493 		return error;
494 
495 	ASSERT(nmaps == 1);
496 	ASSERT(map.br_blockcount == 1);
497 
498 	/*
499 	 * Offset of dquot in the (fixed sized) dquot chunk.
500 	 */
501 	dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
502 		sizeof(xfs_dqblk_t);
503 
504 	ASSERT(map.br_startblock != DELAYSTARTBLOCK);
505 	if (map.br_startblock == HOLESTARTBLOCK) {
506 		/*
507 		 * We don't allocate unless we're asked to
508 		 */
509 		if (!(flags & XFS_QMOPT_DQALLOC))
510 			return ENOENT;
511 
512 		ASSERT(tp);
513 		error = xfs_qm_dqalloc(tpp, mp, dqp, quotip,
514 					dqp->q_fileoffset, &bp);
515 		if (error)
516 			return error;
517 		tp = *tpp;
518 	} else {
519 		trace_xfs_dqtobp_read(dqp);
520 
521 		/*
522 		 * store the blkno etc so that we don't have to do the
523 		 * mapping all the time
524 		 */
525 		dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
526 
527 		error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
528 					   dqp->q_blkno,
529 					   mp->m_quotainfo->qi_dqchunklen,
530 					   0, &bp, &xfs_dquot_buf_ops);
531 
532 		if (error == EFSCORRUPTED && (flags & XFS_QMOPT_DQREPAIR)) {
533 			xfs_dqid_t firstid = (xfs_dqid_t)map.br_startoff *
534 						mp->m_quotainfo->qi_dqperchunk;
535 			ASSERT(bp == NULL);
536 			error = xfs_qm_dqrepair(mp, tp, dqp, firstid, &bp);
537 		}
538 
539 		if (error) {
540 			ASSERT(bp == NULL);
541 			return XFS_ERROR(error);
542 		}
543 	}
544 
545 	ASSERT(xfs_buf_islocked(bp));
546 	*O_bpp = bp;
547 	*O_ddpp = bp->b_addr + dqp->q_bufoffset;
548 
549 	return (0);
550 }
551 
552 
553 /*
554  * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
555  * and release the buffer immediately.
556  *
557  * If XFS_QMOPT_DQALLOC is set, allocate a dquot on disk if it needed.
558  */
559 int
560 xfs_qm_dqread(
561 	struct xfs_mount	*mp,
562 	xfs_dqid_t		id,
563 	uint			type,
564 	uint			flags,
565 	struct xfs_dquot	**O_dqpp)
566 {
567 	struct xfs_dquot	*dqp;
568 	struct xfs_disk_dquot	*ddqp;
569 	struct xfs_buf		*bp;
570 	struct xfs_trans	*tp = NULL;
571 	int			error;
572 	int			cancelflags = 0;
573 
574 
575 	dqp = kmem_zone_zalloc(xfs_qm_dqzone, KM_SLEEP);
576 
577 	dqp->dq_flags = type;
578 	dqp->q_core.d_id = cpu_to_be32(id);
579 	dqp->q_mount = mp;
580 	INIT_LIST_HEAD(&dqp->q_lru);
581 	mutex_init(&dqp->q_qlock);
582 	init_waitqueue_head(&dqp->q_pinwait);
583 
584 	/*
585 	 * Because we want to use a counting completion, complete
586 	 * the flush completion once to allow a single access to
587 	 * the flush completion without blocking.
588 	 */
589 	init_completion(&dqp->q_flush);
590 	complete(&dqp->q_flush);
591 
592 	/*
593 	 * Make sure group quotas have a different lock class than user
594 	 * quotas.
595 	 */
596 	switch (type) {
597 	case XFS_DQ_USER:
598 		/* uses the default lock class */
599 		break;
600 	case XFS_DQ_GROUP:
601 		lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class);
602 		break;
603 	case XFS_DQ_PROJ:
604 		lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class);
605 		break;
606 	default:
607 		ASSERT(0);
608 		break;
609 	}
610 
611 	XFS_STATS_INC(xs_qm_dquot);
612 
613 	trace_xfs_dqread(dqp);
614 
615 	if (flags & XFS_QMOPT_DQALLOC) {
616 		tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC);
617 		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_attrsetm,
618 					  XFS_QM_DQALLOC_SPACE_RES(mp), 0);
619 		if (error)
620 			goto error1;
621 		cancelflags = XFS_TRANS_RELEASE_LOG_RES;
622 	}
623 
624 	/*
625 	 * get a pointer to the on-disk dquot and the buffer containing it
626 	 * dqp already knows its own type (GROUP/USER).
627 	 */
628 	error = xfs_qm_dqtobp(&tp, dqp, &ddqp, &bp, flags);
629 	if (error) {
630 		/*
631 		 * This can happen if quotas got turned off (ESRCH),
632 		 * or if the dquot didn't exist on disk and we ask to
633 		 * allocate (ENOENT).
634 		 */
635 		trace_xfs_dqread_fail(dqp);
636 		cancelflags |= XFS_TRANS_ABORT;
637 		goto error1;
638 	}
639 
640 	/* copy everything from disk dquot to the incore dquot */
641 	memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
642 	xfs_qm_dquot_logitem_init(dqp);
643 
644 	/*
645 	 * Reservation counters are defined as reservation plus current usage
646 	 * to avoid having to add every time.
647 	 */
648 	dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount);
649 	dqp->q_res_icount = be64_to_cpu(ddqp->d_icount);
650 	dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount);
651 
652 	/* initialize the dquot speculative prealloc thresholds */
653 	xfs_dquot_set_prealloc_limits(dqp);
654 
655 	/* Mark the buf so that this will stay incore a little longer */
656 	xfs_buf_set_ref(bp, XFS_DQUOT_REF);
657 
658 	/*
659 	 * We got the buffer with a xfs_trans_read_buf() (in dqtobp())
660 	 * So we need to release with xfs_trans_brelse().
661 	 * The strategy here is identical to that of inodes; we lock
662 	 * the dquot in xfs_qm_dqget() before making it accessible to
663 	 * others. This is because dquots, like inodes, need a good level of
664 	 * concurrency, and we don't want to take locks on the entire buffers
665 	 * for dquot accesses.
666 	 * Note also that the dquot buffer may even be dirty at this point, if
667 	 * this particular dquot was repaired. We still aren't afraid to
668 	 * brelse it because we have the changes incore.
669 	 */
670 	ASSERT(xfs_buf_islocked(bp));
671 	xfs_trans_brelse(tp, bp);
672 
673 	if (tp) {
674 		error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
675 		if (error)
676 			goto error0;
677 	}
678 
679 	*O_dqpp = dqp;
680 	return error;
681 
682 error1:
683 	if (tp)
684 		xfs_trans_cancel(tp, cancelflags);
685 error0:
686 	xfs_qm_dqdestroy(dqp);
687 	*O_dqpp = NULL;
688 	return error;
689 }
690 
691 /*
692  * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
693  * a locked dquot, doing an allocation (if requested) as needed.
694  * When both an inode and an id are given, the inode's id takes precedence.
695  * That is, if the id changes while we don't hold the ilock inside this
696  * function, the new dquot is returned, not necessarily the one requested
697  * in the id argument.
698  */
699 int
700 xfs_qm_dqget(
701 	xfs_mount_t	*mp,
702 	xfs_inode_t	*ip,	  /* locked inode (optional) */
703 	xfs_dqid_t	id,	  /* uid/projid/gid depending on type */
704 	uint		type,	  /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */
705 	uint		flags,	  /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */
706 	xfs_dquot_t	**O_dqpp) /* OUT : locked incore dquot */
707 {
708 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
709 	struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
710 	struct xfs_dquot	*dqp;
711 	int			error;
712 
713 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
714 	if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) ||
715 	    (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) ||
716 	    (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) {
717 		return (ESRCH);
718 	}
719 
720 #ifdef DEBUG
721 	if (xfs_do_dqerror) {
722 		if ((xfs_dqerror_target == mp->m_ddev_targp) &&
723 		    (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) {
724 			xfs_debug(mp, "Returning error in dqget");
725 			return (EIO);
726 		}
727 	}
728 
729 	ASSERT(type == XFS_DQ_USER ||
730 	       type == XFS_DQ_PROJ ||
731 	       type == XFS_DQ_GROUP);
732 	if (ip) {
733 		ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
734 		ASSERT(xfs_inode_dquot(ip, type) == NULL);
735 	}
736 #endif
737 
738 restart:
739 	mutex_lock(&qi->qi_tree_lock);
740 	dqp = radix_tree_lookup(tree, id);
741 	if (dqp) {
742 		xfs_dqlock(dqp);
743 		if (dqp->dq_flags & XFS_DQ_FREEING) {
744 			xfs_dqunlock(dqp);
745 			mutex_unlock(&qi->qi_tree_lock);
746 			trace_xfs_dqget_freeing(dqp);
747 			delay(1);
748 			goto restart;
749 		}
750 
751 		dqp->q_nrefs++;
752 		mutex_unlock(&qi->qi_tree_lock);
753 
754 		trace_xfs_dqget_hit(dqp);
755 		XFS_STATS_INC(xs_qm_dqcachehits);
756 		*O_dqpp = dqp;
757 		return 0;
758 	}
759 	mutex_unlock(&qi->qi_tree_lock);
760 	XFS_STATS_INC(xs_qm_dqcachemisses);
761 
762 	/*
763 	 * Dquot cache miss. We don't want to keep the inode lock across
764 	 * a (potential) disk read. Also we don't want to deal with the lock
765 	 * ordering between quotainode and this inode. OTOH, dropping the inode
766 	 * lock here means dealing with a chown that can happen before
767 	 * we re-acquire the lock.
768 	 */
769 	if (ip)
770 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
771 
772 	error = xfs_qm_dqread(mp, id, type, flags, &dqp);
773 
774 	if (ip)
775 		xfs_ilock(ip, XFS_ILOCK_EXCL);
776 
777 	if (error)
778 		return error;
779 
780 	if (ip) {
781 		/*
782 		 * A dquot could be attached to this inode by now, since
783 		 * we had dropped the ilock.
784 		 */
785 		if (xfs_this_quota_on(mp, type)) {
786 			struct xfs_dquot	*dqp1;
787 
788 			dqp1 = xfs_inode_dquot(ip, type);
789 			if (dqp1) {
790 				xfs_qm_dqdestroy(dqp);
791 				dqp = dqp1;
792 				xfs_dqlock(dqp);
793 				goto dqret;
794 			}
795 		} else {
796 			/* inode stays locked on return */
797 			xfs_qm_dqdestroy(dqp);
798 			return XFS_ERROR(ESRCH);
799 		}
800 	}
801 
802 	mutex_lock(&qi->qi_tree_lock);
803 	error = -radix_tree_insert(tree, id, dqp);
804 	if (unlikely(error)) {
805 		WARN_ON(error != EEXIST);
806 
807 		/*
808 		 * Duplicate found. Just throw away the new dquot and start
809 		 * over.
810 		 */
811 		mutex_unlock(&qi->qi_tree_lock);
812 		trace_xfs_dqget_dup(dqp);
813 		xfs_qm_dqdestroy(dqp);
814 		XFS_STATS_INC(xs_qm_dquot_dups);
815 		goto restart;
816 	}
817 
818 	/*
819 	 * We return a locked dquot to the caller, with a reference taken
820 	 */
821 	xfs_dqlock(dqp);
822 	dqp->q_nrefs = 1;
823 
824 	qi->qi_dquots++;
825 	mutex_unlock(&qi->qi_tree_lock);
826 
827  dqret:
828 	ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
829 	trace_xfs_dqget_miss(dqp);
830 	*O_dqpp = dqp;
831 	return (0);
832 }
833 
834 
835 STATIC void
836 xfs_qm_dqput_final(
837 	struct xfs_dquot	*dqp)
838 {
839 	struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;
840 	struct xfs_dquot	*gdqp;
841 	struct xfs_dquot	*pdqp;
842 
843 	trace_xfs_dqput_free(dqp);
844 
845 	if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
846 		XFS_STATS_INC(xs_qm_dquot_unused);
847 
848 	/*
849 	 * If we just added a udquot to the freelist, then we want to release
850 	 * the gdquot/pdquot reference that it (probably) has. Otherwise it'll
851 	 * keep the gdquot/pdquot from getting reclaimed.
852 	 */
853 	gdqp = dqp->q_gdquot;
854 	if (gdqp) {
855 		xfs_dqlock(gdqp);
856 		dqp->q_gdquot = NULL;
857 	}
858 
859 	pdqp = dqp->q_pdquot;
860 	if (pdqp) {
861 		xfs_dqlock(pdqp);
862 		dqp->q_pdquot = NULL;
863 	}
864 	xfs_dqunlock(dqp);
865 
866 	/*
867 	 * If we had a group/project quota hint, release it now.
868 	 */
869 	if (gdqp)
870 		xfs_qm_dqput(gdqp);
871 	if (pdqp)
872 		xfs_qm_dqput(pdqp);
873 }
874 
875 /*
876  * Release a reference to the dquot (decrement ref-count) and unlock it.
877  *
878  * If there is a group quota attached to this dquot, carefully release that
879  * too without tripping over deadlocks'n'stuff.
880  */
881 void
882 xfs_qm_dqput(
883 	struct xfs_dquot	*dqp)
884 {
885 	ASSERT(dqp->q_nrefs > 0);
886 	ASSERT(XFS_DQ_IS_LOCKED(dqp));
887 
888 	trace_xfs_dqput(dqp);
889 
890 	if (--dqp->q_nrefs > 0)
891 		xfs_dqunlock(dqp);
892 	else
893 		xfs_qm_dqput_final(dqp);
894 }
895 
896 /*
897  * Release a dquot. Flush it if dirty, then dqput() it.
898  * dquot must not be locked.
899  */
900 void
901 xfs_qm_dqrele(
902 	xfs_dquot_t	*dqp)
903 {
904 	if (!dqp)
905 		return;
906 
907 	trace_xfs_dqrele(dqp);
908 
909 	xfs_dqlock(dqp);
910 	/*
911 	 * We don't care to flush it if the dquot is dirty here.
912 	 * That will create stutters that we want to avoid.
913 	 * Instead we do a delayed write when we try to reclaim
914 	 * a dirty dquot. Also xfs_sync will take part of the burden...
915 	 */
916 	xfs_qm_dqput(dqp);
917 }
918 
919 /*
920  * This is the dquot flushing I/O completion routine.  It is called
921  * from interrupt level when the buffer containing the dquot is
922  * flushed to disk.  It is responsible for removing the dquot logitem
923  * from the AIL if it has not been re-logged, and unlocking the dquot's
924  * flush lock. This behavior is very similar to that of inodes..
925  */
926 STATIC void
927 xfs_qm_dqflush_done(
928 	struct xfs_buf		*bp,
929 	struct xfs_log_item	*lip)
930 {
931 	xfs_dq_logitem_t	*qip = (struct xfs_dq_logitem *)lip;
932 	xfs_dquot_t		*dqp = qip->qli_dquot;
933 	struct xfs_ail		*ailp = lip->li_ailp;
934 
935 	/*
936 	 * We only want to pull the item from the AIL if its
937 	 * location in the log has not changed since we started the flush.
938 	 * Thus, we only bother if the dquot's lsn has
939 	 * not changed. First we check the lsn outside the lock
940 	 * since it's cheaper, and then we recheck while
941 	 * holding the lock before removing the dquot from the AIL.
942 	 */
943 	if ((lip->li_flags & XFS_LI_IN_AIL) &&
944 	    lip->li_lsn == qip->qli_flush_lsn) {
945 
946 		/* xfs_trans_ail_delete() drops the AIL lock. */
947 		spin_lock(&ailp->xa_lock);
948 		if (lip->li_lsn == qip->qli_flush_lsn)
949 			xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
950 		else
951 			spin_unlock(&ailp->xa_lock);
952 	}
953 
954 	/*
955 	 * Release the dq's flush lock since we're done with it.
956 	 */
957 	xfs_dqfunlock(dqp);
958 }
959 
960 /*
961  * Write a modified dquot to disk.
962  * The dquot must be locked and the flush lock too taken by caller.
963  * The flush lock will not be unlocked until the dquot reaches the disk,
964  * but the dquot is free to be unlocked and modified by the caller
965  * in the interim. Dquot is still locked on return. This behavior is
966  * identical to that of inodes.
967  */
968 int
969 xfs_qm_dqflush(
970 	struct xfs_dquot	*dqp,
971 	struct xfs_buf		**bpp)
972 {
973 	struct xfs_mount	*mp = dqp->q_mount;
974 	struct xfs_buf		*bp;
975 	struct xfs_disk_dquot	*ddqp;
976 	int			error;
977 
978 	ASSERT(XFS_DQ_IS_LOCKED(dqp));
979 	ASSERT(!completion_done(&dqp->q_flush));
980 
981 	trace_xfs_dqflush(dqp);
982 
983 	*bpp = NULL;
984 
985 	xfs_qm_dqunpin_wait(dqp);
986 
987 	/*
988 	 * This may have been unpinned because the filesystem is shutting
989 	 * down forcibly. If that's the case we must not write this dquot
990 	 * to disk, because the log record didn't make it to disk.
991 	 *
992 	 * We also have to remove the log item from the AIL in this case,
993 	 * as we wait for an emptry AIL as part of the unmount process.
994 	 */
995 	if (XFS_FORCED_SHUTDOWN(mp)) {
996 		struct xfs_log_item	*lip = &dqp->q_logitem.qli_item;
997 		dqp->dq_flags &= ~XFS_DQ_DIRTY;
998 
999 		spin_lock(&mp->m_ail->xa_lock);
1000 		if (lip->li_flags & XFS_LI_IN_AIL)
1001 			xfs_trans_ail_delete(mp->m_ail, lip,
1002 					     SHUTDOWN_CORRUPT_INCORE);
1003 		else
1004 			spin_unlock(&mp->m_ail->xa_lock);
1005 		error = XFS_ERROR(EIO);
1006 		goto out_unlock;
1007 	}
1008 
1009 	/*
1010 	 * Get the buffer containing the on-disk dquot
1011 	 */
1012 	error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
1013 				   mp->m_quotainfo->qi_dqchunklen, 0, &bp, NULL);
1014 	if (error)
1015 		goto out_unlock;
1016 
1017 	/*
1018 	 * Calculate the location of the dquot inside the buffer.
1019 	 */
1020 	ddqp = bp->b_addr + dqp->q_bufoffset;
1021 
1022 	/*
1023 	 * A simple sanity check in case we got a corrupted dquot..
1024 	 */
1025 	error = xfs_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
1026 			   XFS_QMOPT_DOWARN, "dqflush (incore copy)");
1027 	if (error) {
1028 		xfs_buf_relse(bp);
1029 		xfs_dqfunlock(dqp);
1030 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1031 		return XFS_ERROR(EIO);
1032 	}
1033 
1034 	/* This is the only portion of data that needs to persist */
1035 	memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t));
1036 
1037 	/*
1038 	 * Clear the dirty field and remember the flush lsn for later use.
1039 	 */
1040 	dqp->dq_flags &= ~XFS_DQ_DIRTY;
1041 
1042 	xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
1043 					&dqp->q_logitem.qli_item.li_lsn);
1044 
1045 	/*
1046 	 * copy the lsn into the on-disk dquot now while we have the in memory
1047 	 * dquot here. This can't be done later in the write verifier as we
1048 	 * can't get access to the log item at that point in time.
1049 	 *
1050 	 * We also calculate the CRC here so that the on-disk dquot in the
1051 	 * buffer always has a valid CRC. This ensures there is no possibility
1052 	 * of a dquot without an up-to-date CRC getting to disk.
1053 	 */
1054 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
1055 		struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddqp;
1056 
1057 		dqb->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
1058 		xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk),
1059 				 XFS_DQUOT_CRC_OFF);
1060 	}
1061 
1062 	/*
1063 	 * Attach an iodone routine so that we can remove this dquot from the
1064 	 * AIL and release the flush lock once the dquot is synced to disk.
1065 	 */
1066 	xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done,
1067 				  &dqp->q_logitem.qli_item);
1068 
1069 	/*
1070 	 * If the buffer is pinned then push on the log so we won't
1071 	 * get stuck waiting in the write for too long.
1072 	 */
1073 	if (xfs_buf_ispinned(bp)) {
1074 		trace_xfs_dqflush_force(dqp);
1075 		xfs_log_force(mp, 0);
1076 	}
1077 
1078 	trace_xfs_dqflush_done(dqp);
1079 	*bpp = bp;
1080 	return 0;
1081 
1082 out_unlock:
1083 	xfs_dqfunlock(dqp);
1084 	return XFS_ERROR(EIO);
1085 }
1086 
1087 /*
1088  * Lock two xfs_dquot structures.
1089  *
1090  * To avoid deadlocks we always lock the quota structure with
1091  * the lowerd id first.
1092  */
1093 void
1094 xfs_dqlock2(
1095 	xfs_dquot_t	*d1,
1096 	xfs_dquot_t	*d2)
1097 {
1098 	if (d1 && d2) {
1099 		ASSERT(d1 != d2);
1100 		if (be32_to_cpu(d1->q_core.d_id) >
1101 		    be32_to_cpu(d2->q_core.d_id)) {
1102 			mutex_lock(&d2->q_qlock);
1103 			mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED);
1104 		} else {
1105 			mutex_lock(&d1->q_qlock);
1106 			mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED);
1107 		}
1108 	} else if (d1) {
1109 		mutex_lock(&d1->q_qlock);
1110 	} else if (d2) {
1111 		mutex_lock(&d2->q_qlock);
1112 	}
1113 }
1114 
1115 int __init
1116 xfs_qm_init(void)
1117 {
1118 	xfs_qm_dqzone =
1119 		kmem_zone_init(sizeof(struct xfs_dquot), "xfs_dquot");
1120 	if (!xfs_qm_dqzone)
1121 		goto out;
1122 
1123 	xfs_qm_dqtrxzone =
1124 		kmem_zone_init(sizeof(struct xfs_dquot_acct), "xfs_dqtrx");
1125 	if (!xfs_qm_dqtrxzone)
1126 		goto out_free_dqzone;
1127 
1128 	return 0;
1129 
1130 out_free_dqzone:
1131 	kmem_zone_destroy(xfs_qm_dqzone);
1132 out:
1133 	return -ENOMEM;
1134 }
1135 
1136 void
1137 xfs_qm_exit(void)
1138 {
1139 	kmem_zone_destroy(xfs_qm_dqtrxzone);
1140 	kmem_zone_destroy(xfs_qm_dqzone);
1141 }
1142