xref: /linux/fs/xfs/xfs_qm.c (revision b477ff98d903618a1ab8247861f2ea6e70c0f0f8)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_inode.h"
16 #include "xfs_iwalk.h"
17 #include "xfs_quota.h"
18 #include "xfs_bmap.h"
19 #include "xfs_bmap_util.h"
20 #include "xfs_trans.h"
21 #include "xfs_trans_space.h"
22 #include "xfs_qm.h"
23 #include "xfs_trace.h"
24 #include "xfs_icache.h"
25 #include "xfs_error.h"
26 #include "xfs_ag.h"
27 #include "xfs_ialloc.h"
28 #include "xfs_log_priv.h"
29 #include "xfs_health.h"
30 #include "xfs_da_format.h"
31 #include "xfs_metafile.h"
32 #include "xfs_rtgroup.h"
33 
34 /*
35  * The global quota manager. There is only one of these for the entire
36  * system, _not_ one per file system. XQM keeps track of the overall
37  * quota functionality, including maintaining the freelist and hash
38  * tables of dquots.
39  */
40 STATIC int	xfs_qm_init_quotainos(struct xfs_mount *mp);
41 STATIC int	xfs_qm_init_quotainfo(struct xfs_mount *mp);
42 
43 STATIC void	xfs_qm_dqfree_one(struct xfs_dquot *dqp);
44 /*
45  * We use the batch lookup interface to iterate over the dquots as it
46  * currently is the only interface into the radix tree code that allows
47  * fuzzy lookups instead of exact matches.  Holding the lock over multiple
48  * operations is fine as all callers are used either during mount/umount
49  * or quotaoff.
50  */
51 #define XFS_DQ_LOOKUP_BATCH	32
52 
53 STATIC int
xfs_qm_dquot_walk(struct xfs_mount * mp,xfs_dqtype_t type,int (* execute)(struct xfs_dquot * dqp,void * data),void * data)54 xfs_qm_dquot_walk(
55 	struct xfs_mount	*mp,
56 	xfs_dqtype_t		type,
57 	int			(*execute)(struct xfs_dquot *dqp, void *data),
58 	void			*data)
59 {
60 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
61 	struct radix_tree_root	*tree = xfs_dquot_tree(qi, type);
62 	uint32_t		next_index;
63 	int			last_error = 0;
64 	int			skipped;
65 	int			nr_found;
66 
67 restart:
68 	skipped = 0;
69 	next_index = 0;
70 	nr_found = 0;
71 
72 	while (1) {
73 		struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
74 		int		error;
75 		int		i;
76 
77 		mutex_lock(&qi->qi_tree_lock);
78 		nr_found = radix_tree_gang_lookup(tree, (void **)batch,
79 					next_index, XFS_DQ_LOOKUP_BATCH);
80 		if (!nr_found) {
81 			mutex_unlock(&qi->qi_tree_lock);
82 			break;
83 		}
84 
85 		for (i = 0; i < nr_found; i++) {
86 			struct xfs_dquot *dqp = batch[i];
87 
88 			next_index = dqp->q_id + 1;
89 
90 			error = execute(batch[i], data);
91 			if (error == -EAGAIN) {
92 				skipped++;
93 				continue;
94 			}
95 			if (error && last_error != -EFSCORRUPTED)
96 				last_error = error;
97 		}
98 
99 		mutex_unlock(&qi->qi_tree_lock);
100 
101 		/* bail out if the filesystem is corrupted.  */
102 		if (last_error == -EFSCORRUPTED) {
103 			skipped = 0;
104 			break;
105 		}
106 		/* we're done if id overflows back to zero */
107 		if (!next_index)
108 			break;
109 	}
110 
111 	if (skipped) {
112 		delay(1);
113 		goto restart;
114 	}
115 
116 	return last_error;
117 }
118 
119 
120 /*
121  * Purge a dquot from all tracking data structures and free it.
122  */
123 STATIC int
xfs_qm_dqpurge(struct xfs_dquot * dqp,void * data)124 xfs_qm_dqpurge(
125 	struct xfs_dquot	*dqp,
126 	void			*data)
127 {
128 	struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;
129 	int			error = -EAGAIN;
130 
131 	xfs_dqlock(dqp);
132 	if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
133 		goto out_unlock;
134 
135 	dqp->q_flags |= XFS_DQFLAG_FREEING;
136 
137 	xfs_dqflock(dqp);
138 
139 	/*
140 	 * If we are turning this type of quotas off, we don't care
141 	 * about the dirty metadata sitting in this dquot. OTOH, if
142 	 * we're unmounting, we do care, so we flush it and wait.
143 	 */
144 	if (XFS_DQ_IS_DIRTY(dqp)) {
145 		struct xfs_buf	*bp = NULL;
146 
147 		/*
148 		 * We don't care about getting disk errors here. We need
149 		 * to purge this dquot anyway, so we go ahead regardless.
150 		 */
151 		error = xfs_dquot_use_attached_buf(dqp, &bp);
152 		if (error == -EAGAIN) {
153 			xfs_dqfunlock(dqp);
154 			dqp->q_flags &= ~XFS_DQFLAG_FREEING;
155 			goto out_unlock;
156 		}
157 		if (!bp)
158 			goto out_funlock;
159 
160 		/*
161 		 * dqflush completes dqflock on error, and the bwrite ioend
162 		 * does it on success.
163 		 */
164 		error = xfs_qm_dqflush(dqp, bp);
165 		if (!error) {
166 			error = xfs_bwrite(bp);
167 			xfs_buf_relse(bp);
168 		}
169 		xfs_dqflock(dqp);
170 	}
171 	xfs_dquot_detach_buf(dqp);
172 
173 out_funlock:
174 	ASSERT(atomic_read(&dqp->q_pincount) == 0);
175 	ASSERT(xlog_is_shutdown(dqp->q_logitem.qli_item.li_log) ||
176 		!test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
177 
178 	xfs_dqfunlock(dqp);
179 	xfs_dqunlock(dqp);
180 
181 	radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
182 	qi->qi_dquots--;
183 
184 	/*
185 	 * We move dquots to the freelist as soon as their reference count
186 	 * hits zero, so it really should be on the freelist here.
187 	 */
188 	ASSERT(!list_empty(&dqp->q_lru));
189 	list_lru_del_obj(&qi->qi_lru, &dqp->q_lru);
190 	XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
191 
192 	xfs_qm_dqdestroy(dqp);
193 	return 0;
194 
195 out_unlock:
196 	xfs_dqunlock(dqp);
197 	return error;
198 }
199 
200 /*
201  * Purge the dquot cache.
202  */
203 static void
xfs_qm_dqpurge_all(struct xfs_mount * mp)204 xfs_qm_dqpurge_all(
205 	struct xfs_mount	*mp)
206 {
207 	xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL);
208 	xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL);
209 	xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL);
210 }
211 
212 /*
213  * Just destroy the quotainfo structure.
214  */
215 void
xfs_qm_unmount(struct xfs_mount * mp)216 xfs_qm_unmount(
217 	struct xfs_mount	*mp)
218 {
219 	if (mp->m_quotainfo) {
220 		xfs_qm_dqpurge_all(mp);
221 		xfs_qm_destroy_quotainfo(mp);
222 	}
223 }
224 
225 static void
xfs_qm_unmount_rt(struct xfs_mount * mp)226 xfs_qm_unmount_rt(
227 	struct xfs_mount	*mp)
228 {
229 	struct xfs_rtgroup	*rtg = xfs_rtgroup_grab(mp, 0);
230 
231 	if (!rtg)
232 		return;
233 	if (rtg_bitmap(rtg))
234 		xfs_qm_dqdetach(rtg_bitmap(rtg));
235 	if (rtg_summary(rtg))
236 		xfs_qm_dqdetach(rtg_summary(rtg));
237 	xfs_rtgroup_rele(rtg);
238 }
239 
240 STATIC void
xfs_qm_destroy_quotainos(struct xfs_quotainfo * qi)241 xfs_qm_destroy_quotainos(
242 	struct xfs_quotainfo	*qi)
243 {
244 	if (qi->qi_uquotaip) {
245 		xfs_irele(qi->qi_uquotaip);
246 		qi->qi_uquotaip = NULL; /* paranoia */
247 	}
248 	if (qi->qi_gquotaip) {
249 		xfs_irele(qi->qi_gquotaip);
250 		qi->qi_gquotaip = NULL;
251 	}
252 	if (qi->qi_pquotaip) {
253 		xfs_irele(qi->qi_pquotaip);
254 		qi->qi_pquotaip = NULL;
255 	}
256 	if (qi->qi_dirip) {
257 		xfs_irele(qi->qi_dirip);
258 		qi->qi_dirip = NULL;
259 	}
260 }
261 
262 /*
263  * Called from the vfsops layer.
264  */
265 void
xfs_qm_unmount_quotas(xfs_mount_t * mp)266 xfs_qm_unmount_quotas(
267 	xfs_mount_t	*mp)
268 {
269 	/*
270 	 * Release the dquots that root inode, et al might be holding,
271 	 * before we flush quotas and blow away the quotainfo structure.
272 	 */
273 	ASSERT(mp->m_rootip);
274 	xfs_qm_dqdetach(mp->m_rootip);
275 
276 	/*
277 	 * For pre-RTG file systems, the RT inodes have quotas attached,
278 	 * detach them now.
279 	 */
280 	if (!xfs_has_rtgroups(mp))
281 		xfs_qm_unmount_rt(mp);
282 
283 	/*
284 	 * Release the quota inodes.
285 	 */
286 	if (mp->m_quotainfo)
287 		xfs_qm_destroy_quotainos(mp->m_quotainfo);
288 }
289 
290 STATIC int
xfs_qm_dqattach_one(struct xfs_inode * ip,xfs_dqtype_t type,bool doalloc,struct xfs_dquot ** IO_idqpp)291 xfs_qm_dqattach_one(
292 	struct xfs_inode	*ip,
293 	xfs_dqtype_t		type,
294 	bool			doalloc,
295 	struct xfs_dquot	**IO_idqpp)
296 {
297 	struct xfs_dquot	*dqp;
298 	int			error;
299 
300 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
301 	error = 0;
302 
303 	/*
304 	 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
305 	 * or &i_gdquot. This made the code look weird, but made the logic a lot
306 	 * simpler.
307 	 */
308 	dqp = *IO_idqpp;
309 	if (dqp) {
310 		trace_xfs_dqattach_found(dqp);
311 		return 0;
312 	}
313 
314 	/*
315 	 * Find the dquot from somewhere. This bumps the reference count of
316 	 * dquot and returns it locked.  This can return ENOENT if dquot didn't
317 	 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
318 	 * turned off suddenly.
319 	 */
320 	error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
321 	if (error)
322 		return error;
323 
324 	trace_xfs_dqattach_get(dqp);
325 
326 	/*
327 	 * dqget may have dropped and re-acquired the ilock, but it guarantees
328 	 * that the dquot returned is the one that should go in the inode.
329 	 */
330 	*IO_idqpp = dqp;
331 	xfs_dqunlock(dqp);
332 	return 0;
333 }
334 
335 static bool
xfs_qm_need_dqattach(struct xfs_inode * ip)336 xfs_qm_need_dqattach(
337 	struct xfs_inode	*ip)
338 {
339 	struct xfs_mount	*mp = ip->i_mount;
340 
341 	if (!XFS_IS_QUOTA_ON(mp))
342 		return false;
343 	if (!XFS_NOT_DQATTACHED(mp, ip))
344 		return false;
345 	if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
346 		return false;
347 	if (xfs_is_metadir_inode(ip))
348 		return false;
349 	return true;
350 }
351 
352 /*
353  * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
354  * into account.
355  * If @doalloc is true, the dquot(s) will be allocated if needed.
356  * Inode may get unlocked and relocked in here, and the caller must deal with
357  * the consequences.
358  */
359 int
xfs_qm_dqattach_locked(xfs_inode_t * ip,bool doalloc)360 xfs_qm_dqattach_locked(
361 	xfs_inode_t	*ip,
362 	bool		doalloc)
363 {
364 	xfs_mount_t	*mp = ip->i_mount;
365 	int		error = 0;
366 
367 	if (!xfs_qm_need_dqattach(ip))
368 		return 0;
369 
370 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
371 	ASSERT(!xfs_is_metadir_inode(ip));
372 
373 	if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
374 		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_USER,
375 				doalloc, &ip->i_udquot);
376 		if (error)
377 			goto done;
378 		ASSERT(ip->i_udquot);
379 	}
380 
381 	if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
382 		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_GROUP,
383 				doalloc, &ip->i_gdquot);
384 		if (error)
385 			goto done;
386 		ASSERT(ip->i_gdquot);
387 	}
388 
389 	if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
390 		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_PROJ,
391 				doalloc, &ip->i_pdquot);
392 		if (error)
393 			goto done;
394 		ASSERT(ip->i_pdquot);
395 	}
396 
397 done:
398 	/*
399 	 * Don't worry about the dquots that we may have attached before any
400 	 * error - they'll get detached later if it has not already been done.
401 	 */
402 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
403 	return error;
404 }
405 
406 int
xfs_qm_dqattach(struct xfs_inode * ip)407 xfs_qm_dqattach(
408 	struct xfs_inode	*ip)
409 {
410 	int			error;
411 
412 	if (!xfs_qm_need_dqattach(ip))
413 		return 0;
414 
415 	xfs_ilock(ip, XFS_ILOCK_EXCL);
416 	error = xfs_qm_dqattach_locked(ip, false);
417 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
418 
419 	return error;
420 }
421 
422 /*
423  * Release dquots (and their references) if any.
424  * The inode should be locked EXCL except when this's called by
425  * xfs_ireclaim.
426  */
427 void
xfs_qm_dqdetach(xfs_inode_t * ip)428 xfs_qm_dqdetach(
429 	xfs_inode_t	*ip)
430 {
431 	if (xfs_is_metadir_inode(ip))
432 		return;
433 	if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
434 		return;
435 
436 	trace_xfs_dquot_dqdetach(ip);
437 
438 	ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
439 	if (ip->i_udquot) {
440 		xfs_qm_dqrele(ip->i_udquot);
441 		ip->i_udquot = NULL;
442 	}
443 	if (ip->i_gdquot) {
444 		xfs_qm_dqrele(ip->i_gdquot);
445 		ip->i_gdquot = NULL;
446 	}
447 	if (ip->i_pdquot) {
448 		xfs_qm_dqrele(ip->i_pdquot);
449 		ip->i_pdquot = NULL;
450 	}
451 }
452 
453 struct xfs_qm_isolate {
454 	struct list_head	buffers;
455 	struct list_head	dispose;
456 };
457 
458 static enum lru_status
xfs_qm_dquot_isolate(struct list_head * item,struct list_lru_one * lru,void * arg)459 xfs_qm_dquot_isolate(
460 	struct list_head	*item,
461 	struct list_lru_one	*lru,
462 	void			*arg)
463 		__releases(&lru->lock) __acquires(&lru->lock)
464 {
465 	struct xfs_dquot	*dqp = container_of(item,
466 						struct xfs_dquot, q_lru);
467 	struct xfs_qm_isolate	*isol = arg;
468 
469 	if (!xfs_dqlock_nowait(dqp))
470 		goto out_miss_busy;
471 
472 	/*
473 	 * If something else is freeing this dquot and hasn't yet removed it
474 	 * from the LRU, leave it for the freeing task to complete the freeing
475 	 * process rather than risk it being free from under us here.
476 	 */
477 	if (dqp->q_flags & XFS_DQFLAG_FREEING)
478 		goto out_miss_unlock;
479 
480 	/*
481 	 * This dquot has acquired a reference in the meantime remove it from
482 	 * the freelist and try again.
483 	 */
484 	if (dqp->q_nrefs) {
485 		xfs_dqunlock(dqp);
486 		XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
487 
488 		trace_xfs_dqreclaim_want(dqp);
489 		list_lru_isolate(lru, &dqp->q_lru);
490 		XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
491 		return LRU_REMOVED;
492 	}
493 
494 	/*
495 	 * If the dquot is dirty, flush it. If it's already being flushed, just
496 	 * skip it so there is time for the IO to complete before we try to
497 	 * reclaim it again on the next LRU pass.
498 	 */
499 	if (!xfs_dqflock_nowait(dqp))
500 		goto out_miss_unlock;
501 
502 	if (XFS_DQ_IS_DIRTY(dqp)) {
503 		struct xfs_buf	*bp = NULL;
504 		int		error;
505 
506 		trace_xfs_dqreclaim_dirty(dqp);
507 
508 		/* we have to drop the LRU lock to flush the dquot */
509 		spin_unlock(&lru->lock);
510 
511 		error = xfs_dquot_use_attached_buf(dqp, &bp);
512 		if (!bp || error == -EAGAIN) {
513 			xfs_dqfunlock(dqp);
514 			goto out_unlock_dirty;
515 		}
516 
517 		/*
518 		 * dqflush completes dqflock on error, and the delwri ioend
519 		 * does it on success.
520 		 */
521 		error = xfs_qm_dqflush(dqp, bp);
522 		if (error)
523 			goto out_unlock_dirty;
524 
525 		xfs_buf_delwri_queue(bp, &isol->buffers);
526 		xfs_buf_relse(bp);
527 		goto out_unlock_dirty;
528 	}
529 
530 	xfs_dquot_detach_buf(dqp);
531 	xfs_dqfunlock(dqp);
532 
533 	/*
534 	 * Prevent lookups now that we are past the point of no return.
535 	 */
536 	dqp->q_flags |= XFS_DQFLAG_FREEING;
537 	xfs_dqunlock(dqp);
538 
539 	ASSERT(dqp->q_nrefs == 0);
540 	list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
541 	XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
542 	trace_xfs_dqreclaim_done(dqp);
543 	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
544 	return LRU_REMOVED;
545 
546 out_miss_unlock:
547 	xfs_dqunlock(dqp);
548 out_miss_busy:
549 	trace_xfs_dqreclaim_busy(dqp);
550 	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
551 	return LRU_SKIP;
552 
553 out_unlock_dirty:
554 	trace_xfs_dqreclaim_busy(dqp);
555 	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
556 	xfs_dqunlock(dqp);
557 	return LRU_RETRY;
558 }
559 
560 static unsigned long
xfs_qm_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)561 xfs_qm_shrink_scan(
562 	struct shrinker		*shrink,
563 	struct shrink_control	*sc)
564 {
565 	struct xfs_quotainfo	*qi = shrink->private_data;
566 	struct xfs_qm_isolate	isol;
567 	unsigned long		freed;
568 	int			error;
569 
570 	if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
571 		return 0;
572 
573 	INIT_LIST_HEAD(&isol.buffers);
574 	INIT_LIST_HEAD(&isol.dispose);
575 
576 	freed = list_lru_shrink_walk(&qi->qi_lru, sc,
577 				     xfs_qm_dquot_isolate, &isol);
578 
579 	error = xfs_buf_delwri_submit(&isol.buffers);
580 	if (error)
581 		xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
582 
583 	while (!list_empty(&isol.dispose)) {
584 		struct xfs_dquot	*dqp;
585 
586 		dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
587 		list_del_init(&dqp->q_lru);
588 		xfs_qm_dqfree_one(dqp);
589 	}
590 
591 	return freed;
592 }
593 
594 static unsigned long
xfs_qm_shrink_count(struct shrinker * shrink,struct shrink_control * sc)595 xfs_qm_shrink_count(
596 	struct shrinker		*shrink,
597 	struct shrink_control	*sc)
598 {
599 	struct xfs_quotainfo	*qi = shrink->private_data;
600 
601 	return list_lru_shrink_count(&qi->qi_lru, sc);
602 }
603 
604 STATIC void
xfs_qm_set_defquota(struct xfs_mount * mp,xfs_dqtype_t type,struct xfs_quotainfo * qinf)605 xfs_qm_set_defquota(
606 	struct xfs_mount	*mp,
607 	xfs_dqtype_t		type,
608 	struct xfs_quotainfo	*qinf)
609 {
610 	struct xfs_dquot	*dqp;
611 	struct xfs_def_quota	*defq;
612 	int			error;
613 
614 	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
615 	if (error)
616 		return;
617 
618 	defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
619 
620 	/*
621 	 * Timers and warnings have been already set, let's just set the
622 	 * default limits for this quota type
623 	 */
624 	defq->blk.hard = dqp->q_blk.hardlimit;
625 	defq->blk.soft = dqp->q_blk.softlimit;
626 	defq->ino.hard = dqp->q_ino.hardlimit;
627 	defq->ino.soft = dqp->q_ino.softlimit;
628 	defq->rtb.hard = dqp->q_rtb.hardlimit;
629 	defq->rtb.soft = dqp->q_rtb.softlimit;
630 	xfs_qm_dqdestroy(dqp);
631 }
632 
633 /* Initialize quota time limits from the root dquot. */
634 static void
xfs_qm_init_timelimits(struct xfs_mount * mp,xfs_dqtype_t type)635 xfs_qm_init_timelimits(
636 	struct xfs_mount	*mp,
637 	xfs_dqtype_t		type)
638 {
639 	struct xfs_quotainfo	*qinf = mp->m_quotainfo;
640 	struct xfs_def_quota	*defq;
641 	struct xfs_dquot	*dqp;
642 	int			error;
643 
644 	defq = xfs_get_defquota(qinf, type);
645 
646 	defq->blk.time = XFS_QM_BTIMELIMIT;
647 	defq->ino.time = XFS_QM_ITIMELIMIT;
648 	defq->rtb.time = XFS_QM_RTBTIMELIMIT;
649 
650 	/*
651 	 * We try to get the limits from the superuser's limits fields.
652 	 * This is quite hacky, but it is standard quota practice.
653 	 *
654 	 * Since we may not have done a quotacheck by this point, just read
655 	 * the dquot without attaching it to any hashtables or lists.
656 	 */
657 	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
658 	if (error)
659 		return;
660 
661 	/*
662 	 * The warnings and timers set the grace period given to
663 	 * a user or group before he or she can not perform any
664 	 * more writing. If it is zero, a default is used.
665 	 */
666 	if (dqp->q_blk.timer)
667 		defq->blk.time = dqp->q_blk.timer;
668 	if (dqp->q_ino.timer)
669 		defq->ino.time = dqp->q_ino.timer;
670 	if (dqp->q_rtb.timer)
671 		defq->rtb.time = dqp->q_rtb.timer;
672 
673 	xfs_qm_dqdestroy(dqp);
674 }
675 
676 static int
xfs_qm_load_metadir_qinos(struct xfs_mount * mp,struct xfs_quotainfo * qi)677 xfs_qm_load_metadir_qinos(
678 	struct xfs_mount	*mp,
679 	struct xfs_quotainfo	*qi)
680 {
681 	struct xfs_trans	*tp;
682 	int			error;
683 
684 	error = xfs_trans_alloc_empty(mp, &tp);
685 	if (error)
686 		return error;
687 
688 	error = xfs_dqinode_load_parent(tp, &qi->qi_dirip);
689 	if (error == -ENOENT) {
690 		/* no quota dir directory, but we'll create one later */
691 		error = 0;
692 		goto out_trans;
693 	}
694 	if (error)
695 		goto out_trans;
696 
697 	if (XFS_IS_UQUOTA_ON(mp)) {
698 		error = xfs_dqinode_load(tp, qi->qi_dirip, XFS_DQTYPE_USER,
699 				&qi->qi_uquotaip);
700 		if (error && error != -ENOENT)
701 			goto out_trans;
702 	}
703 
704 	if (XFS_IS_GQUOTA_ON(mp)) {
705 		error = xfs_dqinode_load(tp, qi->qi_dirip, XFS_DQTYPE_GROUP,
706 				&qi->qi_gquotaip);
707 		if (error && error != -ENOENT)
708 			goto out_trans;
709 	}
710 
711 	if (XFS_IS_PQUOTA_ON(mp)) {
712 		error = xfs_dqinode_load(tp, qi->qi_dirip, XFS_DQTYPE_PROJ,
713 				&qi->qi_pquotaip);
714 		if (error && error != -ENOENT)
715 			goto out_trans;
716 	}
717 
718 	error = 0;
719 out_trans:
720 	xfs_trans_cancel(tp);
721 	return error;
722 }
723 
724 /* Create quota inodes in the metadata directory tree. */
725 STATIC int
xfs_qm_create_metadir_qinos(struct xfs_mount * mp,struct xfs_quotainfo * qi)726 xfs_qm_create_metadir_qinos(
727 	struct xfs_mount	*mp,
728 	struct xfs_quotainfo	*qi)
729 {
730 	int			error;
731 
732 	if (!qi->qi_dirip) {
733 		error = xfs_dqinode_mkdir_parent(mp, &qi->qi_dirip);
734 		if (error && error != -EEXIST)
735 			return error;
736 		/*
737 		 * If the /quotas dirent points to an inode that isn't
738 		 * loadable, qi_dirip will be NULL but mkdir_parent will return
739 		 * -EEXIST.  In this case the metadir is corrupt, so bail out.
740 		 */
741 		if (XFS_IS_CORRUPT(mp, qi->qi_dirip == NULL))
742 			return -EFSCORRUPTED;
743 	}
744 
745 	if (XFS_IS_UQUOTA_ON(mp) && !qi->qi_uquotaip) {
746 		error = xfs_dqinode_metadir_create(qi->qi_dirip,
747 				XFS_DQTYPE_USER, &qi->qi_uquotaip);
748 		if (error)
749 			return error;
750 	}
751 
752 	if (XFS_IS_GQUOTA_ON(mp) && !qi->qi_gquotaip) {
753 		error = xfs_dqinode_metadir_create(qi->qi_dirip,
754 				XFS_DQTYPE_GROUP, &qi->qi_gquotaip);
755 		if (error)
756 			return error;
757 	}
758 
759 	if (XFS_IS_PQUOTA_ON(mp) && !qi->qi_pquotaip) {
760 		error = xfs_dqinode_metadir_create(qi->qi_dirip,
761 				XFS_DQTYPE_PROJ, &qi->qi_pquotaip);
762 		if (error)
763 			return error;
764 	}
765 
766 	return 0;
767 }
768 
769 /*
770  * Add QUOTABIT to sb_versionnum and initialize qflags in preparation for
771  * creating quota files on a metadir filesystem.
772  */
773 STATIC int
xfs_qm_prep_metadir_sb(struct xfs_mount * mp)774 xfs_qm_prep_metadir_sb(
775 	struct xfs_mount	*mp)
776 {
777 	struct xfs_trans	*tp;
778 	int			error;
779 
780 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_sb, 0, 0, 0, &tp);
781 	if (error)
782 		return error;
783 
784 	spin_lock(&mp->m_sb_lock);
785 
786 	xfs_add_quota(mp);
787 
788 	/* qflags will get updated fully _after_ quotacheck */
789 	mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
790 
791 	spin_unlock(&mp->m_sb_lock);
792 	xfs_log_sb(tp);
793 
794 	return xfs_trans_commit(tp);
795 }
796 
797 /*
798  * Load existing quota inodes or create them.  Since this is a V5 filesystem,
799  * we don't have to deal with the grp/prjquota switcheroo thing from V4.
800  */
801 STATIC int
xfs_qm_init_metadir_qinos(struct xfs_mount * mp)802 xfs_qm_init_metadir_qinos(
803 	struct xfs_mount	*mp)
804 {
805 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
806 	int			error;
807 
808 	if (!xfs_has_quota(mp)) {
809 		error = xfs_qm_prep_metadir_sb(mp);
810 		if (error)
811 			return error;
812 	}
813 
814 	error = xfs_qm_load_metadir_qinos(mp, qi);
815 	if (error)
816 		goto out_err;
817 
818 	error = xfs_qm_create_metadir_qinos(mp, qi);
819 	if (error)
820 		goto out_err;
821 
822 	/* The only user of the quota dir inode is online fsck */
823 #if !IS_ENABLED(CONFIG_XFS_ONLINE_SCRUB)
824 	xfs_irele(qi->qi_dirip);
825 	qi->qi_dirip = NULL;
826 #endif
827 	return 0;
828 out_err:
829 	xfs_qm_destroy_quotainos(mp->m_quotainfo);
830 	return error;
831 }
832 
833 /*
834  * This initializes all the quota information that's kept in the
835  * mount structure
836  */
837 STATIC int
xfs_qm_init_quotainfo(struct xfs_mount * mp)838 xfs_qm_init_quotainfo(
839 	struct xfs_mount	*mp)
840 {
841 	struct xfs_quotainfo	*qinf;
842 	int			error;
843 
844 	ASSERT(XFS_IS_QUOTA_ON(mp));
845 
846 	qinf = mp->m_quotainfo = kzalloc(sizeof(struct xfs_quotainfo),
847 					GFP_KERNEL | __GFP_NOFAIL);
848 
849 	error = list_lru_init(&qinf->qi_lru);
850 	if (error)
851 		goto out_free_qinf;
852 
853 	/*
854 	 * See if quotainodes are setup, and if not, allocate them,
855 	 * and change the superblock accordingly.
856 	 */
857 	if (xfs_has_metadir(mp))
858 		error = xfs_qm_init_metadir_qinos(mp);
859 	else
860 		error = xfs_qm_init_quotainos(mp);
861 	if (error)
862 		goto out_free_lru;
863 
864 	INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_KERNEL);
865 	INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_KERNEL);
866 	INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_KERNEL);
867 	mutex_init(&qinf->qi_tree_lock);
868 
869 	/* mutex used to serialize quotaoffs */
870 	mutex_init(&qinf->qi_quotaofflock);
871 
872 	/* Precalc some constants */
873 	qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
874 	qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
875 	if (xfs_has_bigtime(mp)) {
876 		qinf->qi_expiry_min =
877 			xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MIN);
878 		qinf->qi_expiry_max =
879 			xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MAX);
880 	} else {
881 		qinf->qi_expiry_min = XFS_DQ_LEGACY_EXPIRY_MIN;
882 		qinf->qi_expiry_max = XFS_DQ_LEGACY_EXPIRY_MAX;
883 	}
884 	trace_xfs_quota_expiry_range(mp, qinf->qi_expiry_min,
885 			qinf->qi_expiry_max);
886 
887 	mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
888 
889 	xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER);
890 	xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP);
891 	xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ);
892 
893 	if (XFS_IS_UQUOTA_ON(mp))
894 		xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf);
895 	if (XFS_IS_GQUOTA_ON(mp))
896 		xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf);
897 	if (XFS_IS_PQUOTA_ON(mp))
898 		xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf);
899 
900 	qinf->qi_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, "xfs-qm:%s",
901 					   mp->m_super->s_id);
902 	if (!qinf->qi_shrinker) {
903 		error = -ENOMEM;
904 		goto out_free_inos;
905 	}
906 
907 	qinf->qi_shrinker->count_objects = xfs_qm_shrink_count;
908 	qinf->qi_shrinker->scan_objects = xfs_qm_shrink_scan;
909 	qinf->qi_shrinker->private_data = qinf;
910 
911 	shrinker_register(qinf->qi_shrinker);
912 
913 	xfs_hooks_init(&qinf->qi_mod_ino_dqtrx_hooks);
914 	xfs_hooks_init(&qinf->qi_apply_dqtrx_hooks);
915 
916 	return 0;
917 
918 out_free_inos:
919 	mutex_destroy(&qinf->qi_quotaofflock);
920 	mutex_destroy(&qinf->qi_tree_lock);
921 	xfs_qm_destroy_quotainos(qinf);
922 out_free_lru:
923 	list_lru_destroy(&qinf->qi_lru);
924 out_free_qinf:
925 	kfree(qinf);
926 	mp->m_quotainfo = NULL;
927 	return error;
928 }
929 
930 /*
931  * Gets called when unmounting a filesystem or when all quotas get
932  * turned off.
933  * This purges the quota inodes, destroys locks and frees itself.
934  */
935 void
xfs_qm_destroy_quotainfo(struct xfs_mount * mp)936 xfs_qm_destroy_quotainfo(
937 	struct xfs_mount	*mp)
938 {
939 	struct xfs_quotainfo	*qi;
940 
941 	qi = mp->m_quotainfo;
942 	ASSERT(qi != NULL);
943 
944 	shrinker_free(qi->qi_shrinker);
945 	list_lru_destroy(&qi->qi_lru);
946 	xfs_qm_destroy_quotainos(qi);
947 	mutex_destroy(&qi->qi_tree_lock);
948 	mutex_destroy(&qi->qi_quotaofflock);
949 	kfree(qi);
950 	mp->m_quotainfo = NULL;
951 }
952 
953 static inline enum xfs_metafile_type
xfs_qm_metafile_type(unsigned int flags)954 xfs_qm_metafile_type(
955 	unsigned int		flags)
956 {
957 	if (flags & XFS_QMOPT_UQUOTA)
958 		return XFS_METAFILE_USRQUOTA;
959 	else if (flags & XFS_QMOPT_GQUOTA)
960 		return XFS_METAFILE_GRPQUOTA;
961 	return XFS_METAFILE_PRJQUOTA;
962 }
963 
964 /*
965  * Create an inode and return with a reference already taken, but unlocked
966  * This is how we create quota inodes
967  */
968 STATIC int
xfs_qm_qino_alloc(struct xfs_mount * mp,struct xfs_inode ** ipp,unsigned int flags)969 xfs_qm_qino_alloc(
970 	struct xfs_mount	*mp,
971 	struct xfs_inode	**ipp,
972 	unsigned int		flags)
973 {
974 	struct xfs_trans	*tp;
975 	enum xfs_metafile_type	metafile_type = xfs_qm_metafile_type(flags);
976 	int			error;
977 	bool			need_alloc = true;
978 
979 	*ipp = NULL;
980 	/*
981 	 * With superblock that doesn't have separate pquotino, we
982 	 * share an inode between gquota and pquota. If the on-disk
983 	 * superblock has GQUOTA and the filesystem is now mounted
984 	 * with PQUOTA, just use sb_gquotino for sb_pquotino and
985 	 * vice-versa.
986 	 */
987 	if (!xfs_has_pquotino(mp) &&
988 			(flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
989 		xfs_ino_t ino = NULLFSINO;
990 
991 		if ((flags & XFS_QMOPT_PQUOTA) &&
992 			     (mp->m_sb.sb_gquotino != NULLFSINO)) {
993 			ino = mp->m_sb.sb_gquotino;
994 			if (XFS_IS_CORRUPT(mp,
995 					   mp->m_sb.sb_pquotino != NULLFSINO)) {
996 				xfs_fs_mark_sick(mp, XFS_SICK_FS_PQUOTA);
997 				return -EFSCORRUPTED;
998 			}
999 		} else if ((flags & XFS_QMOPT_GQUOTA) &&
1000 			     (mp->m_sb.sb_pquotino != NULLFSINO)) {
1001 			ino = mp->m_sb.sb_pquotino;
1002 			if (XFS_IS_CORRUPT(mp,
1003 					   mp->m_sb.sb_gquotino != NULLFSINO)) {
1004 				xfs_fs_mark_sick(mp, XFS_SICK_FS_GQUOTA);
1005 				return -EFSCORRUPTED;
1006 			}
1007 		}
1008 		if (ino != NULLFSINO) {
1009 			error = xfs_metafile_iget(mp, ino, metafile_type, ipp);
1010 			if (error)
1011 				return error;
1012 
1013 			mp->m_sb.sb_gquotino = NULLFSINO;
1014 			mp->m_sb.sb_pquotino = NULLFSINO;
1015 			need_alloc = false;
1016 		}
1017 	}
1018 
1019 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
1020 			need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0,
1021 			0, 0, &tp);
1022 	if (error)
1023 		return error;
1024 
1025 	if (need_alloc) {
1026 		struct xfs_icreate_args	args = {
1027 			.mode		= S_IFREG,
1028 			.flags		= XFS_ICREATE_UNLINKABLE,
1029 		};
1030 		xfs_ino_t	ino;
1031 
1032 		error = xfs_dialloc(&tp, &args, &ino);
1033 		if (!error)
1034 			error = xfs_icreate(tp, ino, &args, ipp);
1035 		if (error) {
1036 			xfs_trans_cancel(tp);
1037 			return error;
1038 		}
1039 		if (xfs_has_metadir(mp))
1040 			xfs_metafile_set_iflag(tp, *ipp, metafile_type);
1041 	}
1042 
1043 	/*
1044 	 * Make the changes in the superblock, and log those too.
1045 	 * sbfields arg may contain fields other than *QUOTINO;
1046 	 * VERSIONNUM for example.
1047 	 */
1048 	spin_lock(&mp->m_sb_lock);
1049 	if (flags & XFS_QMOPT_SBVERSION) {
1050 		ASSERT(!xfs_has_quota(mp));
1051 
1052 		xfs_add_quota(mp);
1053 		mp->m_sb.sb_uquotino = NULLFSINO;
1054 		mp->m_sb.sb_gquotino = NULLFSINO;
1055 		mp->m_sb.sb_pquotino = NULLFSINO;
1056 
1057 		/* qflags will get updated fully _after_ quotacheck */
1058 		mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
1059 	}
1060 	if (flags & XFS_QMOPT_UQUOTA)
1061 		mp->m_sb.sb_uquotino = (*ipp)->i_ino;
1062 	else if (flags & XFS_QMOPT_GQUOTA)
1063 		mp->m_sb.sb_gquotino = (*ipp)->i_ino;
1064 	else
1065 		mp->m_sb.sb_pquotino = (*ipp)->i_ino;
1066 	spin_unlock(&mp->m_sb_lock);
1067 	xfs_log_sb(tp);
1068 
1069 	error = xfs_trans_commit(tp);
1070 	if (error) {
1071 		ASSERT(xfs_is_shutdown(mp));
1072 		xfs_alert(mp, "%s failed (error %d)!", __func__, error);
1073 	}
1074 	if (need_alloc) {
1075 		xfs_iunlock(*ipp, XFS_ILOCK_EXCL);
1076 		xfs_finish_inode_setup(*ipp);
1077 	}
1078 	return error;
1079 }
1080 
1081 
1082 STATIC void
xfs_qm_reset_dqcounts(struct xfs_mount * mp,struct xfs_buf * bp,xfs_dqid_t id,xfs_dqtype_t type)1083 xfs_qm_reset_dqcounts(
1084 	struct xfs_mount	*mp,
1085 	struct xfs_buf		*bp,
1086 	xfs_dqid_t		id,
1087 	xfs_dqtype_t		type)
1088 {
1089 	struct xfs_dqblk	*dqb;
1090 	int			j;
1091 
1092 	trace_xfs_reset_dqcounts(bp, _RET_IP_);
1093 
1094 	/*
1095 	 * Reset all counters and timers. They'll be
1096 	 * started afresh by xfs_qm_quotacheck.
1097 	 */
1098 #ifdef DEBUG
1099 	j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
1100 		sizeof(struct xfs_dqblk);
1101 	ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
1102 #endif
1103 	dqb = bp->b_addr;
1104 	for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
1105 		struct xfs_disk_dquot	*ddq;
1106 
1107 		ddq = (struct xfs_disk_dquot *)&dqb[j];
1108 
1109 		/*
1110 		 * Do a sanity check, and if needed, repair the dqblk. Don't
1111 		 * output any warnings because it's perfectly possible to
1112 		 * find uninitialised dquot blks. See comment in
1113 		 * xfs_dquot_verify.
1114 		 */
1115 		if (xfs_dqblk_verify(mp, &dqb[j], id + j) ||
1116 		    (dqb[j].dd_diskdq.d_type & XFS_DQTYPE_REC_MASK) != type)
1117 			xfs_dqblk_repair(mp, &dqb[j], id + j, type);
1118 
1119 		/*
1120 		 * Reset type in case we are reusing group quota file for
1121 		 * project quotas or vice versa
1122 		 */
1123 		ddq->d_type = type;
1124 		ddq->d_bcount = 0;
1125 		ddq->d_icount = 0;
1126 		ddq->d_rtbcount = 0;
1127 
1128 		/*
1129 		 * dquot id 0 stores the default grace period and the maximum
1130 		 * warning limit that were set by the administrator, so we
1131 		 * should not reset them.
1132 		 */
1133 		if (ddq->d_id != 0) {
1134 			ddq->d_btimer = 0;
1135 			ddq->d_itimer = 0;
1136 			ddq->d_rtbtimer = 0;
1137 			ddq->d_bwarns = 0;
1138 			ddq->d_iwarns = 0;
1139 			ddq->d_rtbwarns = 0;
1140 			if (xfs_has_bigtime(mp))
1141 				ddq->d_type |= XFS_DQTYPE_BIGTIME;
1142 		}
1143 
1144 		if (xfs_has_crc(mp)) {
1145 			xfs_update_cksum((char *)&dqb[j],
1146 					 sizeof(struct xfs_dqblk),
1147 					 XFS_DQUOT_CRC_OFF);
1148 		}
1149 	}
1150 }
1151 
1152 STATIC int
xfs_qm_reset_dqcounts_all(struct xfs_mount * mp,xfs_dqid_t firstid,xfs_fsblock_t bno,xfs_filblks_t blkcnt,xfs_dqtype_t type,struct list_head * buffer_list)1153 xfs_qm_reset_dqcounts_all(
1154 	struct xfs_mount	*mp,
1155 	xfs_dqid_t		firstid,
1156 	xfs_fsblock_t		bno,
1157 	xfs_filblks_t		blkcnt,
1158 	xfs_dqtype_t		type,
1159 	struct list_head	*buffer_list)
1160 {
1161 	struct xfs_buf		*bp;
1162 	int			error = 0;
1163 
1164 	ASSERT(blkcnt > 0);
1165 
1166 	/*
1167 	 * Blkcnt arg can be a very big number, and might even be
1168 	 * larger than the log itself. So, we have to break it up into
1169 	 * manageable-sized transactions.
1170 	 * Note that we don't start a permanent transaction here; we might
1171 	 * not be able to get a log reservation for the whole thing up front,
1172 	 * and we don't really care to either, because we just discard
1173 	 * everything if we were to crash in the middle of this loop.
1174 	 */
1175 	while (blkcnt--) {
1176 		error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
1177 			      XFS_FSB_TO_DADDR(mp, bno),
1178 			      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
1179 			      &xfs_dquot_buf_ops);
1180 
1181 		/*
1182 		 * CRC and validation errors will return a EFSCORRUPTED here. If
1183 		 * this occurs, re-read without CRC validation so that we can
1184 		 * repair the damage via xfs_qm_reset_dqcounts(). This process
1185 		 * will leave a trace in the log indicating corruption has
1186 		 * been detected.
1187 		 */
1188 		if (error == -EFSCORRUPTED) {
1189 			error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
1190 				      XFS_FSB_TO_DADDR(mp, bno),
1191 				      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
1192 				      NULL);
1193 		}
1194 
1195 		if (error)
1196 			break;
1197 
1198 		/*
1199 		 * A corrupt buffer might not have a verifier attached, so
1200 		 * make sure we have the correct one attached before writeback
1201 		 * occurs.
1202 		 */
1203 		bp->b_ops = &xfs_dquot_buf_ops;
1204 		xfs_qm_reset_dqcounts(mp, bp, firstid, type);
1205 		xfs_buf_delwri_queue(bp, buffer_list);
1206 		xfs_buf_relse(bp);
1207 
1208 		/* goto the next block. */
1209 		bno++;
1210 		firstid += mp->m_quotainfo->qi_dqperchunk;
1211 	}
1212 
1213 	return error;
1214 }
1215 
1216 /*
1217  * Iterate over all allocated dquot blocks in this quota inode, zeroing all
1218  * counters for every chunk of dquots that we find.
1219  */
1220 STATIC int
xfs_qm_reset_dqcounts_buf(struct xfs_mount * mp,struct xfs_inode * qip,xfs_dqtype_t type,struct list_head * buffer_list)1221 xfs_qm_reset_dqcounts_buf(
1222 	struct xfs_mount	*mp,
1223 	struct xfs_inode	*qip,
1224 	xfs_dqtype_t		type,
1225 	struct list_head	*buffer_list)
1226 {
1227 	struct xfs_bmbt_irec	*map;
1228 	int			i, nmaps;	/* number of map entries */
1229 	int			error;		/* return value */
1230 	xfs_fileoff_t		lblkno;
1231 	xfs_filblks_t		maxlblkcnt;
1232 	xfs_dqid_t		firstid;
1233 	xfs_fsblock_t		rablkno;
1234 	xfs_filblks_t		rablkcnt;
1235 
1236 	error = 0;
1237 	/*
1238 	 * This looks racy, but we can't keep an inode lock across a
1239 	 * trans_reserve. But, this gets called during quotacheck, and that
1240 	 * happens only at mount time which is single threaded.
1241 	 */
1242 	if (qip->i_nblocks == 0)
1243 		return 0;
1244 
1245 	map = kmalloc(XFS_DQITER_MAP_SIZE * sizeof(*map),
1246 			GFP_KERNEL | __GFP_NOFAIL);
1247 
1248 	lblkno = 0;
1249 	maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1250 	do {
1251 		uint		lock_mode;
1252 
1253 		nmaps = XFS_DQITER_MAP_SIZE;
1254 		/*
1255 		 * We aren't changing the inode itself. Just changing
1256 		 * some of its data. No new blocks are added here, and
1257 		 * the inode is never added to the transaction.
1258 		 */
1259 		lock_mode = xfs_ilock_data_map_shared(qip);
1260 		error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1261 				       map, &nmaps, 0);
1262 		xfs_iunlock(qip, lock_mode);
1263 		if (error)
1264 			break;
1265 
1266 		ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1267 		for (i = 0; i < nmaps; i++) {
1268 			ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1269 			ASSERT(map[i].br_blockcount);
1270 
1271 
1272 			lblkno += map[i].br_blockcount;
1273 
1274 			if (map[i].br_startblock == HOLESTARTBLOCK)
1275 				continue;
1276 
1277 			firstid = (xfs_dqid_t) map[i].br_startoff *
1278 				mp->m_quotainfo->qi_dqperchunk;
1279 			/*
1280 			 * Do a read-ahead on the next extent.
1281 			 */
1282 			if ((i+1 < nmaps) &&
1283 			    (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1284 				rablkcnt =  map[i+1].br_blockcount;
1285 				rablkno = map[i+1].br_startblock;
1286 				while (rablkcnt--) {
1287 					xfs_buf_readahead(mp->m_ddev_targp,
1288 					       XFS_FSB_TO_DADDR(mp, rablkno),
1289 					       mp->m_quotainfo->qi_dqchunklen,
1290 					       &xfs_dquot_buf_ops);
1291 					rablkno++;
1292 				}
1293 			}
1294 			/*
1295 			 * Iterate thru all the blks in the extent and
1296 			 * reset the counters of all the dquots inside them.
1297 			 */
1298 			error = xfs_qm_reset_dqcounts_all(mp, firstid,
1299 						   map[i].br_startblock,
1300 						   map[i].br_blockcount,
1301 						   type, buffer_list);
1302 			if (error)
1303 				goto out;
1304 		}
1305 	} while (nmaps > 0);
1306 
1307 out:
1308 	kfree(map);
1309 	return error;
1310 }
1311 
1312 /*
1313  * Called by dqusage_adjust in doing a quotacheck.
1314  *
1315  * Given the inode, and a dquot id this updates both the incore dqout as well
1316  * as the buffer copy. This is so that once the quotacheck is done, we can
1317  * just log all the buffers, as opposed to logging numerous updates to
1318  * individual dquots.
1319  */
1320 STATIC int
xfs_qm_quotacheck_dqadjust(struct xfs_inode * ip,xfs_dqtype_t type,xfs_qcnt_t nblks,xfs_qcnt_t rtblks)1321 xfs_qm_quotacheck_dqadjust(
1322 	struct xfs_inode	*ip,
1323 	xfs_dqtype_t		type,
1324 	xfs_qcnt_t		nblks,
1325 	xfs_qcnt_t		rtblks)
1326 {
1327 	struct xfs_mount	*mp = ip->i_mount;
1328 	struct xfs_dquot	*dqp;
1329 	xfs_dqid_t		id;
1330 	int			error;
1331 
1332 	id = xfs_qm_id_for_quotatype(ip, type);
1333 	error = xfs_qm_dqget(mp, id, type, true, &dqp);
1334 	if (error) {
1335 		/*
1336 		 * Shouldn't be able to turn off quotas here.
1337 		 */
1338 		ASSERT(error != -ESRCH);
1339 		ASSERT(error != -ENOENT);
1340 		return error;
1341 	}
1342 
1343 	error = xfs_dquot_attach_buf(NULL, dqp);
1344 	if (error)
1345 		return error;
1346 
1347 	trace_xfs_dqadjust(dqp);
1348 
1349 	/*
1350 	 * Adjust the inode count and the block count to reflect this inode's
1351 	 * resource usage.
1352 	 */
1353 	dqp->q_ino.count++;
1354 	dqp->q_ino.reserved++;
1355 	if (nblks) {
1356 		dqp->q_blk.count += nblks;
1357 		dqp->q_blk.reserved += nblks;
1358 	}
1359 	if (rtblks) {
1360 		dqp->q_rtb.count += rtblks;
1361 		dqp->q_rtb.reserved += rtblks;
1362 	}
1363 
1364 	/*
1365 	 * Set default limits, adjust timers (since we changed usages)
1366 	 *
1367 	 * There are no timers for the default values set in the root dquot.
1368 	 */
1369 	if (dqp->q_id) {
1370 		xfs_qm_adjust_dqlimits(dqp);
1371 		xfs_qm_adjust_dqtimers(dqp);
1372 	}
1373 
1374 	dqp->q_flags |= XFS_DQFLAG_DIRTY;
1375 	xfs_qm_dqput(dqp);
1376 	return 0;
1377 }
1378 
1379 /*
1380  * callback routine supplied to bulkstat(). Given an inumber, find its
1381  * dquots and update them to account for resources taken by that inode.
1382  */
1383 /* ARGSUSED */
1384 STATIC int
xfs_qm_dqusage_adjust(struct xfs_mount * mp,struct xfs_trans * tp,xfs_ino_t ino,void * data)1385 xfs_qm_dqusage_adjust(
1386 	struct xfs_mount	*mp,
1387 	struct xfs_trans	*tp,
1388 	xfs_ino_t		ino,
1389 	void			*data)
1390 {
1391 	struct xfs_inode	*ip;
1392 	xfs_filblks_t		nblks, rtblks;
1393 	unsigned int		lock_mode;
1394 	int			error;
1395 
1396 	ASSERT(XFS_IS_QUOTA_ON(mp));
1397 
1398 	/*
1399 	 * rootino must have its resources accounted for, not so with the quota
1400 	 * inodes.
1401 	 */
1402 	if (xfs_is_quota_inode(&mp->m_sb, ino))
1403 		return 0;
1404 
1405 	/*
1406 	 * We don't _need_ to take the ilock EXCL here because quotacheck runs
1407 	 * at mount time and therefore nobody will be racing chown/chproj.
1408 	 */
1409 	error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
1410 	if (error == -EINVAL || error == -ENOENT)
1411 		return 0;
1412 	if (error)
1413 		return error;
1414 
1415 	/*
1416 	 * Reload the incore unlinked list to avoid failure in inodegc.
1417 	 * Use an unlocked check here because unrecovered unlinked inodes
1418 	 * should be somewhat rare.
1419 	 */
1420 	if (xfs_inode_unlinked_incomplete(ip)) {
1421 		error = xfs_inode_reload_unlinked(ip);
1422 		if (error) {
1423 			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1424 			goto error0;
1425 		}
1426 	}
1427 
1428 	/* Metadata directory files are not accounted to user-visible quotas. */
1429 	if (xfs_is_metadir_inode(ip))
1430 		goto error0;
1431 
1432 	ASSERT(ip->i_delayed_blks == 0);
1433 
1434 	lock_mode = xfs_ilock_data_map_shared(ip);
1435 	if (XFS_IS_REALTIME_INODE(ip)) {
1436 		error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1437 		if (error) {
1438 			xfs_iunlock(ip, lock_mode);
1439 			goto error0;
1440 		}
1441 	}
1442 	xfs_inode_count_blocks(tp, ip, &nblks, &rtblks);
1443 	xfs_iflags_clear(ip, XFS_IQUOTAUNCHECKED);
1444 	xfs_iunlock(ip, lock_mode);
1445 
1446 	/*
1447 	 * Add the (disk blocks and inode) resources occupied by this
1448 	 * inode to its dquots. We do this adjustment in the incore dquot,
1449 	 * and also copy the changes to its buffer.
1450 	 * We don't care about putting these changes in a transaction
1451 	 * envelope because if we crash in the middle of a 'quotacheck'
1452 	 * we have to start from the beginning anyway.
1453 	 * Once we're done, we'll log all the dquot bufs.
1454 	 *
1455 	 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1456 	 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1457 	 */
1458 	if (XFS_IS_UQUOTA_ON(mp)) {
1459 		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks,
1460 				rtblks);
1461 		if (error)
1462 			goto error0;
1463 	}
1464 
1465 	if (XFS_IS_GQUOTA_ON(mp)) {
1466 		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks,
1467 				rtblks);
1468 		if (error)
1469 			goto error0;
1470 	}
1471 
1472 	if (XFS_IS_PQUOTA_ON(mp)) {
1473 		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks,
1474 				rtblks);
1475 		if (error)
1476 			goto error0;
1477 	}
1478 
1479 error0:
1480 	xfs_irele(ip);
1481 	return error;
1482 }
1483 
1484 STATIC int
xfs_qm_flush_one(struct xfs_dquot * dqp,void * data)1485 xfs_qm_flush_one(
1486 	struct xfs_dquot	*dqp,
1487 	void			*data)
1488 {
1489 	struct xfs_mount	*mp = dqp->q_mount;
1490 	struct list_head	*buffer_list = data;
1491 	struct xfs_buf		*bp = NULL;
1492 	int			error = 0;
1493 
1494 	xfs_dqlock(dqp);
1495 	if (dqp->q_flags & XFS_DQFLAG_FREEING)
1496 		goto out_unlock;
1497 	if (!XFS_DQ_IS_DIRTY(dqp))
1498 		goto out_unlock;
1499 
1500 	/*
1501 	 * The only way the dquot is already flush locked by the time quotacheck
1502 	 * gets here is if reclaim flushed it before the dqadjust walk dirtied
1503 	 * it for the final time. Quotacheck collects all dquot bufs in the
1504 	 * local delwri queue before dquots are dirtied, so reclaim can't have
1505 	 * possibly queued it for I/O. The only way out is to push the buffer to
1506 	 * cycle the flush lock.
1507 	 */
1508 	if (!xfs_dqflock_nowait(dqp)) {
1509 		/* buf is pinned in-core by delwri list */
1510 		error = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
1511 				mp->m_quotainfo->qi_dqchunklen, 0, &bp);
1512 		if (error)
1513 			goto out_unlock;
1514 
1515 		if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1516 			error = -EAGAIN;
1517 			xfs_buf_relse(bp);
1518 			goto out_unlock;
1519 		}
1520 		xfs_buf_unlock(bp);
1521 
1522 		xfs_buf_delwri_pushbuf(bp, buffer_list);
1523 		xfs_buf_rele(bp);
1524 
1525 		error = -EAGAIN;
1526 		goto out_unlock;
1527 	}
1528 
1529 	error = xfs_dquot_use_attached_buf(dqp, &bp);
1530 	if (error)
1531 		goto out_unlock;
1532 	if (!bp) {
1533 		error = -EFSCORRUPTED;
1534 		goto out_unlock;
1535 	}
1536 
1537 	error = xfs_qm_dqflush(dqp, bp);
1538 	if (!error)
1539 		xfs_buf_delwri_queue(bp, buffer_list);
1540 	xfs_buf_relse(bp);
1541 out_unlock:
1542 	xfs_dqunlock(dqp);
1543 	return error;
1544 }
1545 
1546 /*
1547  * Walk thru all the filesystem inodes and construct a consistent view
1548  * of the disk quota world. If the quotacheck fails, disable quotas.
1549  */
1550 STATIC int
xfs_qm_quotacheck(xfs_mount_t * mp)1551 xfs_qm_quotacheck(
1552 	xfs_mount_t	*mp)
1553 {
1554 	int			error, error2;
1555 	uint			flags;
1556 	LIST_HEAD		(buffer_list);
1557 	struct xfs_inode	*uip = mp->m_quotainfo->qi_uquotaip;
1558 	struct xfs_inode	*gip = mp->m_quotainfo->qi_gquotaip;
1559 	struct xfs_inode	*pip = mp->m_quotainfo->qi_pquotaip;
1560 
1561 	flags = 0;
1562 
1563 	ASSERT(uip || gip || pip);
1564 	ASSERT(XFS_IS_QUOTA_ON(mp));
1565 
1566 	xfs_notice(mp, "Quotacheck needed: Please wait.");
1567 
1568 	/*
1569 	 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1570 	 * their counters to zero. We need a clean slate.
1571 	 * We don't log our changes till later.
1572 	 */
1573 	if (uip) {
1574 		error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER,
1575 					 &buffer_list);
1576 		if (error)
1577 			goto error_return;
1578 		flags |= XFS_UQUOTA_CHKD;
1579 	}
1580 
1581 	if (gip) {
1582 		error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP,
1583 					 &buffer_list);
1584 		if (error)
1585 			goto error_return;
1586 		flags |= XFS_GQUOTA_CHKD;
1587 	}
1588 
1589 	if (pip) {
1590 		error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ,
1591 					 &buffer_list);
1592 		if (error)
1593 			goto error_return;
1594 		flags |= XFS_PQUOTA_CHKD;
1595 	}
1596 
1597 	xfs_set_quotacheck_running(mp);
1598 	error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
1599 			NULL);
1600 	xfs_clear_quotacheck_running(mp);
1601 
1602 	/*
1603 	 * On error, the inode walk may have partially populated the dquot
1604 	 * caches.  We must purge them before disabling quota and tearing down
1605 	 * the quotainfo, or else the dquots will leak.
1606 	 */
1607 	if (error)
1608 		goto error_purge;
1609 
1610 	/*
1611 	 * We've made all the changes that we need to make incore.  Flush them
1612 	 * down to disk buffers if everything was updated successfully.
1613 	 */
1614 	if (XFS_IS_UQUOTA_ON(mp)) {
1615 		error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one,
1616 					  &buffer_list);
1617 	}
1618 	if (XFS_IS_GQUOTA_ON(mp)) {
1619 		error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one,
1620 					   &buffer_list);
1621 		if (!error)
1622 			error = error2;
1623 	}
1624 	if (XFS_IS_PQUOTA_ON(mp)) {
1625 		error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one,
1626 					   &buffer_list);
1627 		if (!error)
1628 			error = error2;
1629 	}
1630 
1631 	error2 = xfs_buf_delwri_submit(&buffer_list);
1632 	if (!error)
1633 		error = error2;
1634 
1635 	/*
1636 	 * We can get this error if we couldn't do a dquot allocation inside
1637 	 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1638 	 * dirty dquots that might be cached, we just want to get rid of them
1639 	 * and turn quotaoff. The dquots won't be attached to any of the inodes
1640 	 * at this point (because we intentionally didn't in dqget_noattach).
1641 	 */
1642 	if (error)
1643 		goto error_purge;
1644 
1645 	/*
1646 	 * If one type of quotas is off, then it will lose its
1647 	 * quotachecked status, since we won't be doing accounting for
1648 	 * that type anymore.
1649 	 */
1650 	mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1651 	mp->m_qflags |= flags;
1652 
1653 error_return:
1654 	xfs_buf_delwri_cancel(&buffer_list);
1655 
1656 	if (error) {
1657 		xfs_warn(mp,
1658 	"Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1659 			error);
1660 		/*
1661 		 * We must turn off quotas.
1662 		 */
1663 		ASSERT(mp->m_quotainfo != NULL);
1664 		xfs_qm_destroy_quotainfo(mp);
1665 		if (xfs_mount_reset_sbqflags(mp)) {
1666 			xfs_warn(mp,
1667 				"Quotacheck: Failed to reset quota flags.");
1668 		}
1669 		xfs_fs_mark_sick(mp, XFS_SICK_FS_QUOTACHECK);
1670 	} else {
1671 		xfs_notice(mp, "Quotacheck: Done.");
1672 		xfs_fs_mark_healthy(mp, XFS_SICK_FS_QUOTACHECK);
1673 	}
1674 
1675 	return error;
1676 
1677 error_purge:
1678 	/*
1679 	 * On error, we may have inodes queued for inactivation. This may try
1680 	 * to attach dquots to the inode before running cleanup operations on
1681 	 * the inode and this can race with the xfs_qm_destroy_quotainfo() call
1682 	 * below that frees mp->m_quotainfo. To avoid this race, flush all the
1683 	 * pending inodegc operations before we purge the dquots from memory,
1684 	 * ensuring that background inactivation is idle whilst we turn off
1685 	 * quotas.
1686 	 */
1687 	xfs_inodegc_flush(mp);
1688 	xfs_qm_dqpurge_all(mp);
1689 	goto error_return;
1690 
1691 }
1692 
1693 /*
1694  * This is called from xfs_mountfs to start quotas and initialize all
1695  * necessary data structures like quotainfo.  This is also responsible for
1696  * running a quotacheck as necessary.  We are guaranteed that the superblock
1697  * is consistently read in at this point.
1698  *
1699  * If we fail here, the mount will continue with quota turned off. We don't
1700  * need to inidicate success or failure at all.
1701  */
1702 void
xfs_qm_mount_quotas(struct xfs_mount * mp)1703 xfs_qm_mount_quotas(
1704 	struct xfs_mount	*mp)
1705 {
1706 	int			error = 0;
1707 	uint			sbf;
1708 
1709 	/*
1710 	 * If quotas on realtime volumes is not supported, disable quotas
1711 	 * immediately.  We only support rtquota if rtgroups are enabled to
1712 	 * avoid problems with older kernels.
1713 	 */
1714 	if (mp->m_sb.sb_rextents && !xfs_has_rtgroups(mp)) {
1715 		xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1716 		mp->m_qflags = 0;
1717 		goto write_changes;
1718 	}
1719 
1720 	ASSERT(XFS_IS_QUOTA_ON(mp));
1721 
1722 	/*
1723 	 * Allocate the quotainfo structure inside the mount struct, and
1724 	 * create quotainode(s), and change/rev superblock if necessary.
1725 	 */
1726 	error = xfs_qm_init_quotainfo(mp);
1727 	if (error) {
1728 		/*
1729 		 * We must turn off quotas.
1730 		 */
1731 		ASSERT(mp->m_quotainfo == NULL);
1732 		mp->m_qflags = 0;
1733 		goto write_changes;
1734 	}
1735 	/*
1736 	 * If any of the quotas are not consistent, do a quotacheck.
1737 	 */
1738 	if (XFS_QM_NEED_QUOTACHECK(mp)) {
1739 		error = xfs_qm_quotacheck(mp);
1740 		if (error) {
1741 			/* Quotacheck failed and disabled quotas. */
1742 			return;
1743 		}
1744 	}
1745 	/*
1746 	 * If one type of quotas is off, then it will lose its
1747 	 * quotachecked status, since we won't be doing accounting for
1748 	 * that type anymore.
1749 	 */
1750 	if (!XFS_IS_UQUOTA_ON(mp))
1751 		mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1752 	if (!XFS_IS_GQUOTA_ON(mp))
1753 		mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1754 	if (!XFS_IS_PQUOTA_ON(mp))
1755 		mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1756 
1757  write_changes:
1758 	/*
1759 	 * We actually don't have to acquire the m_sb_lock at all.
1760 	 * This can only be called from mount, and that's single threaded. XXX
1761 	 */
1762 	spin_lock(&mp->m_sb_lock);
1763 	sbf = mp->m_sb.sb_qflags;
1764 	mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1765 	spin_unlock(&mp->m_sb_lock);
1766 
1767 	if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1768 		if (xfs_sync_sb(mp, false)) {
1769 			/*
1770 			 * We could only have been turning quotas off.
1771 			 * We aren't in very good shape actually because
1772 			 * the incore structures are convinced that quotas are
1773 			 * off, but the on disk superblock doesn't know that !
1774 			 */
1775 			ASSERT(!(XFS_IS_QUOTA_ON(mp)));
1776 			xfs_alert(mp, "%s: Superblock update failed!",
1777 				__func__);
1778 		}
1779 	}
1780 
1781 	if (error) {
1782 		xfs_warn(mp, "Failed to initialize disk quotas, err %d.", error);
1783 		return;
1784 	}
1785 }
1786 
1787 /*
1788  * Load the inode for a given type of quota, assuming that the sb fields have
1789  * been sorted out.  This is not true when switching quota types on a V4
1790  * filesystem, so do not use this function for that.
1791  *
1792  * Returns -ENOENT if the quota inode field is NULLFSINO; 0 and an inode on
1793  * success; or a negative errno.
1794  */
1795 int
xfs_qm_qino_load(struct xfs_mount * mp,xfs_dqtype_t type,struct xfs_inode ** ipp)1796 xfs_qm_qino_load(
1797 	struct xfs_mount	*mp,
1798 	xfs_dqtype_t		type,
1799 	struct xfs_inode	**ipp)
1800 {
1801 	struct xfs_trans	*tp;
1802 	struct xfs_inode	*dp = NULL;
1803 	int			error;
1804 
1805 	error = xfs_trans_alloc_empty(mp, &tp);
1806 	if (error)
1807 		return error;
1808 
1809 	if (xfs_has_metadir(mp)) {
1810 		error = xfs_dqinode_load_parent(tp, &dp);
1811 		if (error)
1812 			goto out_cancel;
1813 	}
1814 
1815 	error = xfs_dqinode_load(tp, dp, type, ipp);
1816 	if (dp)
1817 		xfs_irele(dp);
1818 out_cancel:
1819 	xfs_trans_cancel(tp);
1820 	return error;
1821 }
1822 
1823 /*
1824  * This is called after the superblock has been read in and we're ready to
1825  * iget the quota inodes.
1826  */
1827 STATIC int
xfs_qm_init_quotainos(xfs_mount_t * mp)1828 xfs_qm_init_quotainos(
1829 	xfs_mount_t	*mp)
1830 {
1831 	struct xfs_inode	*uip = NULL;
1832 	struct xfs_inode	*gip = NULL;
1833 	struct xfs_inode	*pip = NULL;
1834 	int			error;
1835 	uint			flags = 0;
1836 
1837 	ASSERT(mp->m_quotainfo);
1838 
1839 	/*
1840 	 * Get the uquota and gquota inodes
1841 	 */
1842 	if (xfs_has_quota(mp)) {
1843 		if (XFS_IS_UQUOTA_ON(mp) &&
1844 		    mp->m_sb.sb_uquotino != NULLFSINO) {
1845 			ASSERT(mp->m_sb.sb_uquotino > 0);
1846 			error = xfs_qm_qino_load(mp, XFS_DQTYPE_USER, &uip);
1847 			if (error)
1848 				return error;
1849 		}
1850 		if (XFS_IS_GQUOTA_ON(mp) &&
1851 		    mp->m_sb.sb_gquotino != NULLFSINO) {
1852 			ASSERT(mp->m_sb.sb_gquotino > 0);
1853 			error = xfs_qm_qino_load(mp, XFS_DQTYPE_GROUP, &gip);
1854 			if (error)
1855 				goto error_rele;
1856 		}
1857 		if (XFS_IS_PQUOTA_ON(mp) &&
1858 		    mp->m_sb.sb_pquotino != NULLFSINO) {
1859 			ASSERT(mp->m_sb.sb_pquotino > 0);
1860 			error = xfs_qm_qino_load(mp, XFS_DQTYPE_PROJ, &pip);
1861 			if (error)
1862 				goto error_rele;
1863 		}
1864 	} else {
1865 		flags |= XFS_QMOPT_SBVERSION;
1866 	}
1867 
1868 	/*
1869 	 * Create the three inodes, if they don't exist already. The changes
1870 	 * made above will get added to a transaction and logged in one of
1871 	 * the qino_alloc calls below.  If the device is readonly,
1872 	 * temporarily switch to read-write to do this.
1873 	 */
1874 	if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1875 		error = xfs_qm_qino_alloc(mp, &uip,
1876 					      flags | XFS_QMOPT_UQUOTA);
1877 		if (error)
1878 			goto error_rele;
1879 
1880 		flags &= ~XFS_QMOPT_SBVERSION;
1881 	}
1882 	if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1883 		error = xfs_qm_qino_alloc(mp, &gip,
1884 					  flags | XFS_QMOPT_GQUOTA);
1885 		if (error)
1886 			goto error_rele;
1887 
1888 		flags &= ~XFS_QMOPT_SBVERSION;
1889 	}
1890 	if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1891 		error = xfs_qm_qino_alloc(mp, &pip,
1892 					  flags | XFS_QMOPT_PQUOTA);
1893 		if (error)
1894 			goto error_rele;
1895 	}
1896 
1897 	mp->m_quotainfo->qi_uquotaip = uip;
1898 	mp->m_quotainfo->qi_gquotaip = gip;
1899 	mp->m_quotainfo->qi_pquotaip = pip;
1900 
1901 	return 0;
1902 
1903 error_rele:
1904 	if (uip)
1905 		xfs_irele(uip);
1906 	if (gip)
1907 		xfs_irele(gip);
1908 	if (pip)
1909 		xfs_irele(pip);
1910 	return error;
1911 }
1912 
1913 STATIC void
xfs_qm_dqfree_one(struct xfs_dquot * dqp)1914 xfs_qm_dqfree_one(
1915 	struct xfs_dquot	*dqp)
1916 {
1917 	struct xfs_mount	*mp = dqp->q_mount;
1918 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
1919 
1920 	mutex_lock(&qi->qi_tree_lock);
1921 	radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
1922 
1923 	qi->qi_dquots--;
1924 	mutex_unlock(&qi->qi_tree_lock);
1925 
1926 	xfs_qm_dqdestroy(dqp);
1927 }
1928 
1929 /* --------------- utility functions for vnodeops ---------------- */
1930 
1931 
1932 /*
1933  * Given an inode, a uid, gid and prid make sure that we have
1934  * allocated relevant dquot(s) on disk, and that we won't exceed inode
1935  * quotas by creating this file.
1936  * This also attaches dquot(s) to the given inode after locking it,
1937  * and returns the dquots corresponding to the uid and/or gid.
1938  *
1939  * in	: inode (unlocked)
1940  * out	: udquot, gdquot with references taken and unlocked
1941  */
1942 int
xfs_qm_vop_dqalloc(struct xfs_inode * ip,kuid_t uid,kgid_t gid,prid_t prid,uint flags,struct xfs_dquot ** O_udqpp,struct xfs_dquot ** O_gdqpp,struct xfs_dquot ** O_pdqpp)1943 xfs_qm_vop_dqalloc(
1944 	struct xfs_inode	*ip,
1945 	kuid_t			uid,
1946 	kgid_t			gid,
1947 	prid_t			prid,
1948 	uint			flags,
1949 	struct xfs_dquot	**O_udqpp,
1950 	struct xfs_dquot	**O_gdqpp,
1951 	struct xfs_dquot	**O_pdqpp)
1952 {
1953 	struct xfs_mount	*mp = ip->i_mount;
1954 	struct inode		*inode = VFS_I(ip);
1955 	struct user_namespace	*user_ns = inode->i_sb->s_user_ns;
1956 	struct xfs_dquot	*uq = NULL;
1957 	struct xfs_dquot	*gq = NULL;
1958 	struct xfs_dquot	*pq = NULL;
1959 	int			error;
1960 	uint			lockflags;
1961 
1962 	if (!XFS_IS_QUOTA_ON(mp))
1963 		return 0;
1964 
1965 	ASSERT(!xfs_is_metadir_inode(ip));
1966 
1967 	lockflags = XFS_ILOCK_EXCL;
1968 	xfs_ilock(ip, lockflags);
1969 
1970 	if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1971 		gid = inode->i_gid;
1972 
1973 	/*
1974 	 * Attach the dquot(s) to this inode, doing a dquot allocation
1975 	 * if necessary. The dquot(s) will not be locked.
1976 	 */
1977 	if (XFS_NOT_DQATTACHED(mp, ip)) {
1978 		error = xfs_qm_dqattach_locked(ip, true);
1979 		if (error) {
1980 			xfs_iunlock(ip, lockflags);
1981 			return error;
1982 		}
1983 	}
1984 
1985 	if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1986 		ASSERT(O_udqpp);
1987 		if (!uid_eq(inode->i_uid, uid)) {
1988 			/*
1989 			 * What we need is the dquot that has this uid, and
1990 			 * if we send the inode to dqget, the uid of the inode
1991 			 * takes priority over what's sent in the uid argument.
1992 			 * We must unlock inode here before calling dqget if
1993 			 * we're not sending the inode, because otherwise
1994 			 * we'll deadlock by doing trans_reserve while
1995 			 * holding ilock.
1996 			 */
1997 			xfs_iunlock(ip, lockflags);
1998 			error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
1999 					XFS_DQTYPE_USER, true, &uq);
2000 			if (error) {
2001 				ASSERT(error != -ENOENT);
2002 				return error;
2003 			}
2004 			/*
2005 			 * Get the ilock in the right order.
2006 			 */
2007 			xfs_dqunlock(uq);
2008 			lockflags = XFS_ILOCK_SHARED;
2009 			xfs_ilock(ip, lockflags);
2010 		} else {
2011 			/*
2012 			 * Take an extra reference, because we'll return
2013 			 * this to caller
2014 			 */
2015 			ASSERT(ip->i_udquot);
2016 			uq = xfs_qm_dqhold(ip->i_udquot);
2017 		}
2018 	}
2019 	if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
2020 		ASSERT(O_gdqpp);
2021 		if (!gid_eq(inode->i_gid, gid)) {
2022 			xfs_iunlock(ip, lockflags);
2023 			error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
2024 					XFS_DQTYPE_GROUP, true, &gq);
2025 			if (error) {
2026 				ASSERT(error != -ENOENT);
2027 				goto error_rele;
2028 			}
2029 			xfs_dqunlock(gq);
2030 			lockflags = XFS_ILOCK_SHARED;
2031 			xfs_ilock(ip, lockflags);
2032 		} else {
2033 			ASSERT(ip->i_gdquot);
2034 			gq = xfs_qm_dqhold(ip->i_gdquot);
2035 		}
2036 	}
2037 	if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
2038 		ASSERT(O_pdqpp);
2039 		if (ip->i_projid != prid) {
2040 			xfs_iunlock(ip, lockflags);
2041 			error = xfs_qm_dqget(mp, prid,
2042 					XFS_DQTYPE_PROJ, true, &pq);
2043 			if (error) {
2044 				ASSERT(error != -ENOENT);
2045 				goto error_rele;
2046 			}
2047 			xfs_dqunlock(pq);
2048 			lockflags = XFS_ILOCK_SHARED;
2049 			xfs_ilock(ip, lockflags);
2050 		} else {
2051 			ASSERT(ip->i_pdquot);
2052 			pq = xfs_qm_dqhold(ip->i_pdquot);
2053 		}
2054 	}
2055 	trace_xfs_dquot_dqalloc(ip);
2056 
2057 	xfs_iunlock(ip, lockflags);
2058 	if (O_udqpp)
2059 		*O_udqpp = uq;
2060 	else
2061 		xfs_qm_dqrele(uq);
2062 	if (O_gdqpp)
2063 		*O_gdqpp = gq;
2064 	else
2065 		xfs_qm_dqrele(gq);
2066 	if (O_pdqpp)
2067 		*O_pdqpp = pq;
2068 	else
2069 		xfs_qm_dqrele(pq);
2070 	return 0;
2071 
2072 error_rele:
2073 	xfs_qm_dqrele(gq);
2074 	xfs_qm_dqrele(uq);
2075 	return error;
2076 }
2077 
2078 /*
2079  * Actually transfer ownership, and do dquot modifications.
2080  * These were already reserved.
2081  */
2082 struct xfs_dquot *
xfs_qm_vop_chown(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_dquot ** IO_olddq,struct xfs_dquot * newdq)2083 xfs_qm_vop_chown(
2084 	struct xfs_trans	*tp,
2085 	struct xfs_inode	*ip,
2086 	struct xfs_dquot	**IO_olddq,
2087 	struct xfs_dquot	*newdq)
2088 {
2089 	struct xfs_dquot	*prevdq;
2090 	xfs_filblks_t		dblocks, rblocks;
2091 	bool			isrt = XFS_IS_REALTIME_INODE(ip);
2092 
2093 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
2094 	ASSERT(XFS_IS_QUOTA_ON(ip->i_mount));
2095 	ASSERT(!xfs_is_metadir_inode(ip));
2096 
2097 	/* old dquot */
2098 	prevdq = *IO_olddq;
2099 	ASSERT(prevdq);
2100 	ASSERT(prevdq != newdq);
2101 
2102 	xfs_inode_count_blocks(tp, ip, &dblocks, &rblocks);
2103 
2104 	xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_BCOUNT,
2105 			-(xfs_qcnt_t)dblocks);
2106 	xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_RTBCOUNT,
2107 			-(xfs_qcnt_t)rblocks);
2108 	xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
2109 
2110 	/* the sparkling new dquot */
2111 	xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_BCOUNT, dblocks);
2112 	xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_RTBCOUNT, rblocks);
2113 	xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_ICOUNT, 1);
2114 
2115 	/*
2116 	 * Back when we made quota reservations for the chown, we reserved the
2117 	 * ondisk blocks + delalloc blocks with the new dquot.  Now that we've
2118 	 * switched the dquots, decrease the new dquot's block reservation
2119 	 * (having already bumped up the real counter) so that we don't have
2120 	 * any reservation to give back when we commit.
2121 	 */
2122 	xfs_trans_mod_dquot(tp, newdq,
2123 			isrt ? XFS_TRANS_DQ_RES_RTBLKS : XFS_TRANS_DQ_RES_BLKS,
2124 			-ip->i_delayed_blks);
2125 
2126 	/*
2127 	 * Give the incore reservation for delalloc blocks back to the old
2128 	 * dquot.  We don't normally handle delalloc quota reservations
2129 	 * transactionally, so just lock the dquot and subtract from the
2130 	 * reservation.  Dirty the transaction because it's too late to turn
2131 	 * back now.
2132 	 */
2133 	tp->t_flags |= XFS_TRANS_DIRTY;
2134 	xfs_dqlock(prevdq);
2135 	if (isrt) {
2136 		ASSERT(prevdq->q_rtb.reserved >= ip->i_delayed_blks);
2137 		prevdq->q_rtb.reserved -= ip->i_delayed_blks;
2138 	} else {
2139 		ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks);
2140 		prevdq->q_blk.reserved -= ip->i_delayed_blks;
2141 	}
2142 	xfs_dqunlock(prevdq);
2143 
2144 	/*
2145 	 * Take an extra reference, because the inode is going to keep
2146 	 * this dquot pointer even after the trans_commit.
2147 	 */
2148 	*IO_olddq = xfs_qm_dqhold(newdq);
2149 
2150 	return prevdq;
2151 }
2152 
2153 int
xfs_qm_vop_rename_dqattach(struct xfs_inode ** i_tab)2154 xfs_qm_vop_rename_dqattach(
2155 	struct xfs_inode	**i_tab)
2156 {
2157 	struct xfs_mount	*mp = i_tab[0]->i_mount;
2158 	int			i;
2159 
2160 	if (!XFS_IS_QUOTA_ON(mp))
2161 		return 0;
2162 
2163 	for (i = 0; (i < 4 && i_tab[i]); i++) {
2164 		struct xfs_inode	*ip = i_tab[i];
2165 		int			error;
2166 
2167 		/*
2168 		 * Watch out for duplicate entries in the table.
2169 		 */
2170 		if (i == 0 || ip != i_tab[i-1]) {
2171 			if (XFS_NOT_DQATTACHED(mp, ip)) {
2172 				error = xfs_qm_dqattach(ip);
2173 				if (error)
2174 					return error;
2175 			}
2176 		}
2177 	}
2178 	return 0;
2179 }
2180 
2181 void
xfs_qm_vop_create_dqattach(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_dquot * udqp,struct xfs_dquot * gdqp,struct xfs_dquot * pdqp)2182 xfs_qm_vop_create_dqattach(
2183 	struct xfs_trans	*tp,
2184 	struct xfs_inode	*ip,
2185 	struct xfs_dquot	*udqp,
2186 	struct xfs_dquot	*gdqp,
2187 	struct xfs_dquot	*pdqp)
2188 {
2189 	struct xfs_mount	*mp = tp->t_mountp;
2190 
2191 	if (!XFS_IS_QUOTA_ON(mp))
2192 		return;
2193 
2194 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
2195 	ASSERT(!xfs_is_metadir_inode(ip));
2196 
2197 	if (udqp && XFS_IS_UQUOTA_ON(mp)) {
2198 		ASSERT(ip->i_udquot == NULL);
2199 		ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id);
2200 
2201 		ip->i_udquot = xfs_qm_dqhold(udqp);
2202 	}
2203 	if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
2204 		ASSERT(ip->i_gdquot == NULL);
2205 		ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id);
2206 
2207 		ip->i_gdquot = xfs_qm_dqhold(gdqp);
2208 	}
2209 	if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
2210 		ASSERT(ip->i_pdquot == NULL);
2211 		ASSERT(ip->i_projid == pdqp->q_id);
2212 
2213 		ip->i_pdquot = xfs_qm_dqhold(pdqp);
2214 	}
2215 
2216 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, 1);
2217 }
2218 
2219 /* Decide if this inode's dquot is near an enforcement boundary. */
2220 bool
xfs_inode_near_dquot_enforcement(struct xfs_inode * ip,xfs_dqtype_t type)2221 xfs_inode_near_dquot_enforcement(
2222 	struct xfs_inode	*ip,
2223 	xfs_dqtype_t		type)
2224 {
2225 	struct xfs_dquot	*dqp;
2226 	struct xfs_dquot_res	*res;
2227 	struct xfs_dquot_pre	*pre;
2228 	int64_t			freesp;
2229 
2230 	/* We only care for quotas that are enabled and enforced. */
2231 	dqp = xfs_inode_dquot(ip, type);
2232 	if (!dqp || !xfs_dquot_is_enforced(dqp))
2233 		return false;
2234 
2235 	if (xfs_dquot_res_over_limits(&dqp->q_ino) ||
2236 	    xfs_dquot_res_over_limits(&dqp->q_blk) ||
2237 	    xfs_dquot_res_over_limits(&dqp->q_rtb))
2238 		return true;
2239 
2240 	if (XFS_IS_REALTIME_INODE(ip)) {
2241 		res = &dqp->q_rtb;
2242 		pre = &dqp->q_rtb_prealloc;
2243 	} else {
2244 		res = &dqp->q_blk;
2245 		pre = &dqp->q_blk_prealloc;
2246 	}
2247 
2248 	/* For space on the data device, check the various thresholds. */
2249 	if (!pre->q_prealloc_hi_wmark)
2250 		return false;
2251 
2252 	if (res->reserved < pre->q_prealloc_lo_wmark)
2253 		return false;
2254 
2255 	if (res->reserved >= pre->q_prealloc_hi_wmark)
2256 		return true;
2257 
2258 	freesp = pre->q_prealloc_hi_wmark - res->reserved;
2259 	if (freesp < pre->q_low_space[XFS_QLOWSP_5_PCNT])
2260 		return true;
2261 
2262 	return false;
2263 }
2264