xref: /linux/fs/xfs/xfs_trans_dquot.c (revision 4949009eb8d40a441dcddcd96e101e77d31cf1b2)
1 /*
2  * Copyright (c) 2000-2002 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_mount.h"
25 #include "xfs_inode.h"
26 #include "xfs_error.h"
27 #include "xfs_trans.h"
28 #include "xfs_trans_priv.h"
29 #include "xfs_quota.h"
30 #include "xfs_qm.h"
31 
32 STATIC void	xfs_trans_alloc_dqinfo(xfs_trans_t *);
33 
34 /*
35  * Add the locked dquot to the transaction.
36  * The dquot must be locked, and it cannot be associated with any
37  * transaction.
38  */
39 void
40 xfs_trans_dqjoin(
41 	xfs_trans_t	*tp,
42 	xfs_dquot_t	*dqp)
43 {
44 	ASSERT(dqp->q_transp != tp);
45 	ASSERT(XFS_DQ_IS_LOCKED(dqp));
46 	ASSERT(dqp->q_logitem.qli_dquot == dqp);
47 
48 	/*
49 	 * Get a log_item_desc to point at the new item.
50 	 */
51 	xfs_trans_add_item(tp, &dqp->q_logitem.qli_item);
52 
53 	/*
54 	 * Initialize d_transp so we can later determine if this dquot is
55 	 * associated with this transaction.
56 	 */
57 	dqp->q_transp = tp;
58 }
59 
60 
61 /*
62  * This is called to mark the dquot as needing
63  * to be logged when the transaction is committed.  The dquot must
64  * already be associated with the given transaction.
65  * Note that it marks the entire transaction as dirty. In the ordinary
66  * case, this gets called via xfs_trans_commit, after the transaction
67  * is already dirty. However, there's nothing stop this from getting
68  * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY
69  * flag.
70  */
71 void
72 xfs_trans_log_dquot(
73 	xfs_trans_t	*tp,
74 	xfs_dquot_t	*dqp)
75 {
76 	ASSERT(dqp->q_transp == tp);
77 	ASSERT(XFS_DQ_IS_LOCKED(dqp));
78 
79 	tp->t_flags |= XFS_TRANS_DIRTY;
80 	dqp->q_logitem.qli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
81 }
82 
83 /*
84  * Carry forward whatever is left of the quota blk reservation to
85  * the spanky new transaction
86  */
87 void
88 xfs_trans_dup_dqinfo(
89 	xfs_trans_t	*otp,
90 	xfs_trans_t	*ntp)
91 {
92 	xfs_dqtrx_t	*oq, *nq;
93 	int		i,j;
94 	xfs_dqtrx_t	*oqa, *nqa;
95 
96 	if (!otp->t_dqinfo)
97 		return;
98 
99 	xfs_trans_alloc_dqinfo(ntp);
100 
101 	/*
102 	 * Because the quota blk reservation is carried forward,
103 	 * it is also necessary to carry forward the DQ_DIRTY flag.
104 	 */
105 	if(otp->t_flags & XFS_TRANS_DQ_DIRTY)
106 		ntp->t_flags |= XFS_TRANS_DQ_DIRTY;
107 
108 	for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
109 		oqa = otp->t_dqinfo->dqs[j];
110 		nqa = ntp->t_dqinfo->dqs[j];
111 		for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
112 			if (oqa[i].qt_dquot == NULL)
113 				break;
114 			oq = &oqa[i];
115 			nq = &nqa[i];
116 
117 			nq->qt_dquot = oq->qt_dquot;
118 			nq->qt_bcount_delta = nq->qt_icount_delta = 0;
119 			nq->qt_rtbcount_delta = 0;
120 
121 			/*
122 			 * Transfer whatever is left of the reservations.
123 			 */
124 			nq->qt_blk_res = oq->qt_blk_res - oq->qt_blk_res_used;
125 			oq->qt_blk_res = oq->qt_blk_res_used;
126 
127 			nq->qt_rtblk_res = oq->qt_rtblk_res -
128 				oq->qt_rtblk_res_used;
129 			oq->qt_rtblk_res = oq->qt_rtblk_res_used;
130 
131 			nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used;
132 			oq->qt_ino_res = oq->qt_ino_res_used;
133 
134 		}
135 	}
136 }
137 
138 /*
139  * Wrap around mod_dquot to account for both user and group quotas.
140  */
141 void
142 xfs_trans_mod_dquot_byino(
143 	xfs_trans_t	*tp,
144 	xfs_inode_t	*ip,
145 	uint		field,
146 	long		delta)
147 {
148 	xfs_mount_t	*mp = tp->t_mountp;
149 
150 	if (!XFS_IS_QUOTA_RUNNING(mp) ||
151 	    !XFS_IS_QUOTA_ON(mp) ||
152 	    xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
153 		return;
154 
155 	if (tp->t_dqinfo == NULL)
156 		xfs_trans_alloc_dqinfo(tp);
157 
158 	if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot)
159 		(void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta);
160 	if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot)
161 		(void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta);
162 	if (XFS_IS_PQUOTA_ON(mp) && ip->i_pdquot)
163 		(void) xfs_trans_mod_dquot(tp, ip->i_pdquot, field, delta);
164 }
165 
166 STATIC struct xfs_dqtrx *
167 xfs_trans_get_dqtrx(
168 	struct xfs_trans	*tp,
169 	struct xfs_dquot	*dqp)
170 {
171 	int			i;
172 	struct xfs_dqtrx	*qa;
173 
174 	if (XFS_QM_ISUDQ(dqp))
175 		qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_USR];
176 	else if (XFS_QM_ISGDQ(dqp))
177 		qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_GRP];
178 	else if (XFS_QM_ISPDQ(dqp))
179 		qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_PRJ];
180 	else
181 		return NULL;
182 
183 	for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
184 		if (qa[i].qt_dquot == NULL ||
185 		    qa[i].qt_dquot == dqp)
186 			return &qa[i];
187 	}
188 
189 	return NULL;
190 }
191 
192 /*
193  * Make the changes in the transaction structure.
194  * The moral equivalent to xfs_trans_mod_sb().
195  * We don't touch any fields in the dquot, so we don't care
196  * if it's locked or not (most of the time it won't be).
197  */
198 void
199 xfs_trans_mod_dquot(
200 	xfs_trans_t	*tp,
201 	xfs_dquot_t	*dqp,
202 	uint		field,
203 	long		delta)
204 {
205 	xfs_dqtrx_t	*qtrx;
206 
207 	ASSERT(tp);
208 	ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp));
209 	qtrx = NULL;
210 
211 	if (tp->t_dqinfo == NULL)
212 		xfs_trans_alloc_dqinfo(tp);
213 	/*
214 	 * Find either the first free slot or the slot that belongs
215 	 * to this dquot.
216 	 */
217 	qtrx = xfs_trans_get_dqtrx(tp, dqp);
218 	ASSERT(qtrx);
219 	if (qtrx->qt_dquot == NULL)
220 		qtrx->qt_dquot = dqp;
221 
222 	switch (field) {
223 
224 		/*
225 		 * regular disk blk reservation
226 		 */
227 	      case XFS_TRANS_DQ_RES_BLKS:
228 		qtrx->qt_blk_res += (ulong)delta;
229 		break;
230 
231 		/*
232 		 * inode reservation
233 		 */
234 	      case XFS_TRANS_DQ_RES_INOS:
235 		qtrx->qt_ino_res += (ulong)delta;
236 		break;
237 
238 		/*
239 		 * disk blocks used.
240 		 */
241 	      case XFS_TRANS_DQ_BCOUNT:
242 		if (qtrx->qt_blk_res && delta > 0) {
243 			qtrx->qt_blk_res_used += (ulong)delta;
244 			ASSERT(qtrx->qt_blk_res >= qtrx->qt_blk_res_used);
245 		}
246 		qtrx->qt_bcount_delta += delta;
247 		break;
248 
249 	      case XFS_TRANS_DQ_DELBCOUNT:
250 		qtrx->qt_delbcnt_delta += delta;
251 		break;
252 
253 		/*
254 		 * Inode Count
255 		 */
256 	      case XFS_TRANS_DQ_ICOUNT:
257 		if (qtrx->qt_ino_res && delta > 0) {
258 			qtrx->qt_ino_res_used += (ulong)delta;
259 			ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
260 		}
261 		qtrx->qt_icount_delta += delta;
262 		break;
263 
264 		/*
265 		 * rtblk reservation
266 		 */
267 	      case XFS_TRANS_DQ_RES_RTBLKS:
268 		qtrx->qt_rtblk_res += (ulong)delta;
269 		break;
270 
271 		/*
272 		 * rtblk count
273 		 */
274 	      case XFS_TRANS_DQ_RTBCOUNT:
275 		if (qtrx->qt_rtblk_res && delta > 0) {
276 			qtrx->qt_rtblk_res_used += (ulong)delta;
277 			ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used);
278 		}
279 		qtrx->qt_rtbcount_delta += delta;
280 		break;
281 
282 	      case XFS_TRANS_DQ_DELRTBCOUNT:
283 		qtrx->qt_delrtb_delta += delta;
284 		break;
285 
286 	      default:
287 		ASSERT(0);
288 	}
289 	tp->t_flags |= XFS_TRANS_DQ_DIRTY;
290 }
291 
292 
293 /*
294  * Given an array of dqtrx structures, lock all the dquots associated and join
295  * them to the transaction, provided they have been modified.  We know that the
296  * highest number of dquots of one type - usr, grp and prj - involved in a
297  * transaction is 3 so we don't need to make this very generic.
298  */
299 STATIC void
300 xfs_trans_dqlockedjoin(
301 	xfs_trans_t	*tp,
302 	xfs_dqtrx_t	*q)
303 {
304 	ASSERT(q[0].qt_dquot != NULL);
305 	if (q[1].qt_dquot == NULL) {
306 		xfs_dqlock(q[0].qt_dquot);
307 		xfs_trans_dqjoin(tp, q[0].qt_dquot);
308 	} else {
309 		ASSERT(XFS_QM_TRANS_MAXDQS == 2);
310 		xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot);
311 		xfs_trans_dqjoin(tp, q[0].qt_dquot);
312 		xfs_trans_dqjoin(tp, q[1].qt_dquot);
313 	}
314 }
315 
316 
317 /*
318  * Called by xfs_trans_commit() and similar in spirit to
319  * xfs_trans_apply_sb_deltas().
320  * Go thru all the dquots belonging to this transaction and modify the
321  * INCORE dquot to reflect the actual usages.
322  * Unreserve just the reservations done by this transaction.
323  * dquot is still left locked at exit.
324  */
325 void
326 xfs_trans_apply_dquot_deltas(
327 	struct xfs_trans	*tp)
328 {
329 	int			i, j;
330 	struct xfs_dquot	*dqp;
331 	struct xfs_dqtrx	*qtrx, *qa;
332 	struct xfs_disk_dquot	*d;
333 	long			totalbdelta;
334 	long			totalrtbdelta;
335 
336 	if (!(tp->t_flags & XFS_TRANS_DQ_DIRTY))
337 		return;
338 
339 	ASSERT(tp->t_dqinfo);
340 	for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
341 		qa = tp->t_dqinfo->dqs[j];
342 		if (qa[0].qt_dquot == NULL)
343 			continue;
344 
345 		/*
346 		 * Lock all of the dquots and join them to the transaction.
347 		 */
348 		xfs_trans_dqlockedjoin(tp, qa);
349 
350 		for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
351 			qtrx = &qa[i];
352 			/*
353 			 * The array of dquots is filled
354 			 * sequentially, not sparsely.
355 			 */
356 			if ((dqp = qtrx->qt_dquot) == NULL)
357 				break;
358 
359 			ASSERT(XFS_DQ_IS_LOCKED(dqp));
360 			ASSERT(dqp->q_transp == tp);
361 
362 			/*
363 			 * adjust the actual number of blocks used
364 			 */
365 			d = &dqp->q_core;
366 
367 			/*
368 			 * The issue here is - sometimes we don't make a blkquota
369 			 * reservation intentionally to be fair to users
370 			 * (when the amount is small). On the other hand,
371 			 * delayed allocs do make reservations, but that's
372 			 * outside of a transaction, so we have no
373 			 * idea how much was really reserved.
374 			 * So, here we've accumulated delayed allocation blks and
375 			 * non-delay blks. The assumption is that the
376 			 * delayed ones are always reserved (outside of a
377 			 * transaction), and the others may or may not have
378 			 * quota reservations.
379 			 */
380 			totalbdelta = qtrx->qt_bcount_delta +
381 				qtrx->qt_delbcnt_delta;
382 			totalrtbdelta = qtrx->qt_rtbcount_delta +
383 				qtrx->qt_delrtb_delta;
384 #ifdef DEBUG
385 			if (totalbdelta < 0)
386 				ASSERT(be64_to_cpu(d->d_bcount) >=
387 				       -totalbdelta);
388 
389 			if (totalrtbdelta < 0)
390 				ASSERT(be64_to_cpu(d->d_rtbcount) >=
391 				       -totalrtbdelta);
392 
393 			if (qtrx->qt_icount_delta < 0)
394 				ASSERT(be64_to_cpu(d->d_icount) >=
395 				       -qtrx->qt_icount_delta);
396 #endif
397 			if (totalbdelta)
398 				be64_add_cpu(&d->d_bcount, (xfs_qcnt_t)totalbdelta);
399 
400 			if (qtrx->qt_icount_delta)
401 				be64_add_cpu(&d->d_icount, (xfs_qcnt_t)qtrx->qt_icount_delta);
402 
403 			if (totalrtbdelta)
404 				be64_add_cpu(&d->d_rtbcount, (xfs_qcnt_t)totalrtbdelta);
405 
406 			/*
407 			 * Get any default limits in use.
408 			 * Start/reset the timer(s) if needed.
409 			 */
410 			if (d->d_id) {
411 				xfs_qm_adjust_dqlimits(tp->t_mountp, dqp);
412 				xfs_qm_adjust_dqtimers(tp->t_mountp, d);
413 			}
414 
415 			dqp->dq_flags |= XFS_DQ_DIRTY;
416 			/*
417 			 * add this to the list of items to get logged
418 			 */
419 			xfs_trans_log_dquot(tp, dqp);
420 			/*
421 			 * Take off what's left of the original reservation.
422 			 * In case of delayed allocations, there's no
423 			 * reservation that a transaction structure knows of.
424 			 */
425 			if (qtrx->qt_blk_res != 0) {
426 				if (qtrx->qt_blk_res != qtrx->qt_blk_res_used) {
427 					if (qtrx->qt_blk_res >
428 					    qtrx->qt_blk_res_used)
429 						dqp->q_res_bcount -= (xfs_qcnt_t)
430 							(qtrx->qt_blk_res -
431 							 qtrx->qt_blk_res_used);
432 					else
433 						dqp->q_res_bcount -= (xfs_qcnt_t)
434 							(qtrx->qt_blk_res_used -
435 							 qtrx->qt_blk_res);
436 				}
437 			} else {
438 				/*
439 				 * These blks were never reserved, either inside
440 				 * a transaction or outside one (in a delayed
441 				 * allocation). Also, this isn't always a
442 				 * negative number since we sometimes
443 				 * deliberately skip quota reservations.
444 				 */
445 				if (qtrx->qt_bcount_delta) {
446 					dqp->q_res_bcount +=
447 					      (xfs_qcnt_t)qtrx->qt_bcount_delta;
448 				}
449 			}
450 			/*
451 			 * Adjust the RT reservation.
452 			 */
453 			if (qtrx->qt_rtblk_res != 0) {
454 				if (qtrx->qt_rtblk_res != qtrx->qt_rtblk_res_used) {
455 					if (qtrx->qt_rtblk_res >
456 					    qtrx->qt_rtblk_res_used)
457 					       dqp->q_res_rtbcount -= (xfs_qcnt_t)
458 						       (qtrx->qt_rtblk_res -
459 							qtrx->qt_rtblk_res_used);
460 					else
461 					       dqp->q_res_rtbcount -= (xfs_qcnt_t)
462 						       (qtrx->qt_rtblk_res_used -
463 							qtrx->qt_rtblk_res);
464 				}
465 			} else {
466 				if (qtrx->qt_rtbcount_delta)
467 					dqp->q_res_rtbcount +=
468 					    (xfs_qcnt_t)qtrx->qt_rtbcount_delta;
469 			}
470 
471 			/*
472 			 * Adjust the inode reservation.
473 			 */
474 			if (qtrx->qt_ino_res != 0) {
475 				ASSERT(qtrx->qt_ino_res >=
476 				       qtrx->qt_ino_res_used);
477 				if (qtrx->qt_ino_res > qtrx->qt_ino_res_used)
478 					dqp->q_res_icount -= (xfs_qcnt_t)
479 						(qtrx->qt_ino_res -
480 						 qtrx->qt_ino_res_used);
481 			} else {
482 				if (qtrx->qt_icount_delta)
483 					dqp->q_res_icount +=
484 					    (xfs_qcnt_t)qtrx->qt_icount_delta;
485 			}
486 
487 			ASSERT(dqp->q_res_bcount >=
488 				be64_to_cpu(dqp->q_core.d_bcount));
489 			ASSERT(dqp->q_res_icount >=
490 				be64_to_cpu(dqp->q_core.d_icount));
491 			ASSERT(dqp->q_res_rtbcount >=
492 				be64_to_cpu(dqp->q_core.d_rtbcount));
493 		}
494 	}
495 }
496 
497 /*
498  * Release the reservations, and adjust the dquots accordingly.
499  * This is called only when the transaction is being aborted. If by
500  * any chance we have done dquot modifications incore (ie. deltas) already,
501  * we simply throw those away, since that's the expected behavior
502  * when a transaction is curtailed without a commit.
503  */
504 void
505 xfs_trans_unreserve_and_mod_dquots(
506 	xfs_trans_t		*tp)
507 {
508 	int			i, j;
509 	xfs_dquot_t		*dqp;
510 	xfs_dqtrx_t		*qtrx, *qa;
511 	bool                    locked;
512 
513 	if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY))
514 		return;
515 
516 	for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
517 		qa = tp->t_dqinfo->dqs[j];
518 
519 		for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
520 			qtrx = &qa[i];
521 			/*
522 			 * We assume that the array of dquots is filled
523 			 * sequentially, not sparsely.
524 			 */
525 			if ((dqp = qtrx->qt_dquot) == NULL)
526 				break;
527 			/*
528 			 * Unreserve the original reservation. We don't care
529 			 * about the number of blocks used field, or deltas.
530 			 * Also we don't bother to zero the fields.
531 			 */
532 			locked = false;
533 			if (qtrx->qt_blk_res) {
534 				xfs_dqlock(dqp);
535 				locked = true;
536 				dqp->q_res_bcount -=
537 					(xfs_qcnt_t)qtrx->qt_blk_res;
538 			}
539 			if (qtrx->qt_ino_res) {
540 				if (!locked) {
541 					xfs_dqlock(dqp);
542 					locked = true;
543 				}
544 				dqp->q_res_icount -=
545 					(xfs_qcnt_t)qtrx->qt_ino_res;
546 			}
547 
548 			if (qtrx->qt_rtblk_res) {
549 				if (!locked) {
550 					xfs_dqlock(dqp);
551 					locked = true;
552 				}
553 				dqp->q_res_rtbcount -=
554 					(xfs_qcnt_t)qtrx->qt_rtblk_res;
555 			}
556 			if (locked)
557 				xfs_dqunlock(dqp);
558 
559 		}
560 	}
561 }
562 
563 STATIC void
564 xfs_quota_warn(
565 	struct xfs_mount	*mp,
566 	struct xfs_dquot	*dqp,
567 	int			type)
568 {
569 	/* no warnings for project quotas - we just return ENOSPC later */
570 	if (dqp->dq_flags & XFS_DQ_PROJ)
571 		return;
572 	quota_send_warning(make_kqid(&init_user_ns,
573 				     (dqp->dq_flags & XFS_DQ_USER) ?
574 				     USRQUOTA : GRPQUOTA,
575 				     be32_to_cpu(dqp->q_core.d_id)),
576 			   mp->m_super->s_dev, type);
577 }
578 
579 /*
580  * This reserves disk blocks and inodes against a dquot.
581  * Flags indicate if the dquot is to be locked here and also
582  * if the blk reservation is for RT or regular blocks.
583  * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check.
584  */
585 STATIC int
586 xfs_trans_dqresv(
587 	xfs_trans_t	*tp,
588 	xfs_mount_t	*mp,
589 	xfs_dquot_t	*dqp,
590 	long		nblks,
591 	long		ninos,
592 	uint		flags)
593 {
594 	xfs_qcnt_t	hardlimit;
595 	xfs_qcnt_t	softlimit;
596 	time_t		timer;
597 	xfs_qwarncnt_t	warns;
598 	xfs_qwarncnt_t	warnlimit;
599 	xfs_qcnt_t	total_count;
600 	xfs_qcnt_t	*resbcountp;
601 	xfs_quotainfo_t	*q = mp->m_quotainfo;
602 
603 
604 	xfs_dqlock(dqp);
605 
606 	if (flags & XFS_TRANS_DQ_RES_BLKS) {
607 		hardlimit = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
608 		if (!hardlimit)
609 			hardlimit = q->qi_bhardlimit;
610 		softlimit = be64_to_cpu(dqp->q_core.d_blk_softlimit);
611 		if (!softlimit)
612 			softlimit = q->qi_bsoftlimit;
613 		timer = be32_to_cpu(dqp->q_core.d_btimer);
614 		warns = be16_to_cpu(dqp->q_core.d_bwarns);
615 		warnlimit = dqp->q_mount->m_quotainfo->qi_bwarnlimit;
616 		resbcountp = &dqp->q_res_bcount;
617 	} else {
618 		ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS);
619 		hardlimit = be64_to_cpu(dqp->q_core.d_rtb_hardlimit);
620 		if (!hardlimit)
621 			hardlimit = q->qi_rtbhardlimit;
622 		softlimit = be64_to_cpu(dqp->q_core.d_rtb_softlimit);
623 		if (!softlimit)
624 			softlimit = q->qi_rtbsoftlimit;
625 		timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
626 		warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
627 		warnlimit = dqp->q_mount->m_quotainfo->qi_rtbwarnlimit;
628 		resbcountp = &dqp->q_res_rtbcount;
629 	}
630 
631 	if ((flags & XFS_QMOPT_FORCE_RES) == 0 &&
632 	    dqp->q_core.d_id &&
633 	    ((XFS_IS_UQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISUDQ(dqp)) ||
634 	     (XFS_IS_GQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISGDQ(dqp)) ||
635 	     (XFS_IS_PQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISPDQ(dqp)))) {
636 		if (nblks > 0) {
637 			/*
638 			 * dquot is locked already. See if we'd go over the
639 			 * hardlimit or exceed the timelimit if we allocate
640 			 * nblks.
641 			 */
642 			total_count = *resbcountp + nblks;
643 			if (hardlimit && total_count > hardlimit) {
644 				xfs_quota_warn(mp, dqp, QUOTA_NL_BHARDWARN);
645 				goto error_return;
646 			}
647 			if (softlimit && total_count > softlimit) {
648 				if ((timer != 0 && get_seconds() > timer) ||
649 				    (warns != 0 && warns >= warnlimit)) {
650 					xfs_quota_warn(mp, dqp,
651 						       QUOTA_NL_BSOFTLONGWARN);
652 					goto error_return;
653 				}
654 
655 				xfs_quota_warn(mp, dqp, QUOTA_NL_BSOFTWARN);
656 			}
657 		}
658 		if (ninos > 0) {
659 			total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos;
660 			timer = be32_to_cpu(dqp->q_core.d_itimer);
661 			warns = be16_to_cpu(dqp->q_core.d_iwarns);
662 			warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit;
663 			hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
664 			if (!hardlimit)
665 				hardlimit = q->qi_ihardlimit;
666 			softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
667 			if (!softlimit)
668 				softlimit = q->qi_isoftlimit;
669 
670 			if (hardlimit && total_count > hardlimit) {
671 				xfs_quota_warn(mp, dqp, QUOTA_NL_IHARDWARN);
672 				goto error_return;
673 			}
674 			if (softlimit && total_count > softlimit) {
675 				if  ((timer != 0 && get_seconds() > timer) ||
676 				     (warns != 0 && warns >= warnlimit)) {
677 					xfs_quota_warn(mp, dqp,
678 						       QUOTA_NL_ISOFTLONGWARN);
679 					goto error_return;
680 				}
681 				xfs_quota_warn(mp, dqp, QUOTA_NL_ISOFTWARN);
682 			}
683 		}
684 	}
685 
686 	/*
687 	 * Change the reservation, but not the actual usage.
688 	 * Note that q_res_bcount = q_core.d_bcount + resv
689 	 */
690 	(*resbcountp) += (xfs_qcnt_t)nblks;
691 	if (ninos != 0)
692 		dqp->q_res_icount += (xfs_qcnt_t)ninos;
693 
694 	/*
695 	 * note the reservation amt in the trans struct too,
696 	 * so that the transaction knows how much was reserved by
697 	 * it against this particular dquot.
698 	 * We don't do this when we are reserving for a delayed allocation,
699 	 * because we don't have the luxury of a transaction envelope then.
700 	 */
701 	if (tp) {
702 		ASSERT(tp->t_dqinfo);
703 		ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
704 		if (nblks != 0)
705 			xfs_trans_mod_dquot(tp, dqp,
706 					    flags & XFS_QMOPT_RESBLK_MASK,
707 					    nblks);
708 		if (ninos != 0)
709 			xfs_trans_mod_dquot(tp, dqp,
710 					    XFS_TRANS_DQ_RES_INOS,
711 					    ninos);
712 	}
713 	ASSERT(dqp->q_res_bcount >= be64_to_cpu(dqp->q_core.d_bcount));
714 	ASSERT(dqp->q_res_rtbcount >= be64_to_cpu(dqp->q_core.d_rtbcount));
715 	ASSERT(dqp->q_res_icount >= be64_to_cpu(dqp->q_core.d_icount));
716 
717 	xfs_dqunlock(dqp);
718 	return 0;
719 
720 error_return:
721 	xfs_dqunlock(dqp);
722 	if (flags & XFS_QMOPT_ENOSPC)
723 		return -ENOSPC;
724 	return -EDQUOT;
725 }
726 
727 
728 /*
729  * Given dquot(s), make disk block and/or inode reservations against them.
730  * The fact that this does the reservation against user, group and
731  * project quotas is important, because this follows a all-or-nothing
732  * approach.
733  *
734  * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown.
735  *	   XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT.  Used by pquota.
736  *	   XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks
737  *	   XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks
738  * dquots are unlocked on return, if they were not locked by caller.
739  */
740 int
741 xfs_trans_reserve_quota_bydquots(
742 	struct xfs_trans	*tp,
743 	struct xfs_mount	*mp,
744 	struct xfs_dquot	*udqp,
745 	struct xfs_dquot	*gdqp,
746 	struct xfs_dquot	*pdqp,
747 	long			nblks,
748 	long			ninos,
749 	uint			flags)
750 {
751 	int		error;
752 
753 	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
754 		return 0;
755 
756 	if (tp && tp->t_dqinfo == NULL)
757 		xfs_trans_alloc_dqinfo(tp);
758 
759 	ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
760 
761 	if (udqp) {
762 		error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos,
763 					(flags & ~XFS_QMOPT_ENOSPC));
764 		if (error)
765 			return error;
766 	}
767 
768 	if (gdqp) {
769 		error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags);
770 		if (error)
771 			goto unwind_usr;
772 	}
773 
774 	if (pdqp) {
775 		error = xfs_trans_dqresv(tp, mp, pdqp, nblks, ninos, flags);
776 		if (error)
777 			goto unwind_grp;
778 	}
779 
780 	/*
781 	 * Didn't change anything critical, so, no need to log
782 	 */
783 	return 0;
784 
785 unwind_grp:
786 	flags |= XFS_QMOPT_FORCE_RES;
787 	if (gdqp)
788 		xfs_trans_dqresv(tp, mp, gdqp, -nblks, -ninos, flags);
789 unwind_usr:
790 	flags |= XFS_QMOPT_FORCE_RES;
791 	if (udqp)
792 		xfs_trans_dqresv(tp, mp, udqp, -nblks, -ninos, flags);
793 	return error;
794 }
795 
796 
797 /*
798  * Lock the dquot and change the reservation if we can.
799  * This doesn't change the actual usage, just the reservation.
800  * The inode sent in is locked.
801  */
802 int
803 xfs_trans_reserve_quota_nblks(
804 	struct xfs_trans	*tp,
805 	struct xfs_inode	*ip,
806 	long			nblks,
807 	long			ninos,
808 	uint			flags)
809 {
810 	struct xfs_mount	*mp = ip->i_mount;
811 
812 	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
813 		return 0;
814 	if (XFS_IS_PQUOTA_ON(mp))
815 		flags |= XFS_QMOPT_ENOSPC;
816 
817 	ASSERT(!xfs_is_quota_inode(&mp->m_sb, ip->i_ino));
818 
819 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
820 	ASSERT((flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) ==
821 				XFS_TRANS_DQ_RES_RTBLKS ||
822 	       (flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) ==
823 				XFS_TRANS_DQ_RES_BLKS);
824 
825 	/*
826 	 * Reserve nblks against these dquots, with trans as the mediator.
827 	 */
828 	return xfs_trans_reserve_quota_bydquots(tp, mp,
829 						ip->i_udquot, ip->i_gdquot,
830 						ip->i_pdquot,
831 						nblks, ninos, flags);
832 }
833 
834 /*
835  * This routine is called to allocate a quotaoff log item.
836  */
837 xfs_qoff_logitem_t *
838 xfs_trans_get_qoff_item(
839 	xfs_trans_t		*tp,
840 	xfs_qoff_logitem_t	*startqoff,
841 	uint			flags)
842 {
843 	xfs_qoff_logitem_t	*q;
844 
845 	ASSERT(tp != NULL);
846 
847 	q = xfs_qm_qoff_logitem_init(tp->t_mountp, startqoff, flags);
848 	ASSERT(q != NULL);
849 
850 	/*
851 	 * Get a log_item_desc to point at the new item.
852 	 */
853 	xfs_trans_add_item(tp, &q->qql_item);
854 	return q;
855 }
856 
857 
858 /*
859  * This is called to mark the quotaoff logitem as needing
860  * to be logged when the transaction is committed.  The logitem must
861  * already be associated with the given transaction.
862  */
863 void
864 xfs_trans_log_quotaoff_item(
865 	xfs_trans_t		*tp,
866 	xfs_qoff_logitem_t	*qlp)
867 {
868 	tp->t_flags |= XFS_TRANS_DIRTY;
869 	qlp->qql_item.li_desc->lid_flags |= XFS_LID_DIRTY;
870 }
871 
872 STATIC void
873 xfs_trans_alloc_dqinfo(
874 	xfs_trans_t	*tp)
875 {
876 	tp->t_dqinfo = kmem_zone_zalloc(xfs_qm_dqtrxzone, KM_SLEEP);
877 }
878 
879 void
880 xfs_trans_free_dqinfo(
881 	xfs_trans_t	*tp)
882 {
883 	if (!tp->t_dqinfo)
884 		return;
885 	kmem_zone_free(xfs_qm_dqtrxzone, tp->t_dqinfo);
886 	tp->t_dqinfo = NULL;
887 }
888