xref: /linux/fs/gfs2/quota.c (revision 1f24458a1071f006e3f7449c08ae0f12af493923)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
5  */
6 
7 /*
8  * Quota change tags are associated with each transaction that allocates or
9  * deallocates space.  Those changes are accumulated locally to each node (in a
10  * per-node file) and then are periodically synced to the quota file.  This
11  * avoids the bottleneck of constantly touching the quota file, but introduces
12  * fuzziness in the current usage value of IDs that are being used on different
13  * nodes in the cluster simultaneously.  So, it is possible for a user on
14  * multiple nodes to overrun their quota, but that overrun is controlable.
15  * Since quota tags are part of transactions, there is no need for a quota check
16  * program to be run on node crashes or anything like that.
17  *
18  * There are couple of knobs that let the administrator manage the quota
19  * fuzziness.  "quota_quantum" sets the maximum time a quota change can be
20  * sitting on one node before being synced to the quota file.  (The default is
21  * 60 seconds.)  Another knob, "quota_scale" controls how quickly the frequency
22  * of quota file syncs increases as the user moves closer to their limit.  The
23  * more frequent the syncs, the more accurate the quota enforcement, but that
24  * means that there is more contention between the nodes for the quota file.
25  * The default value is one.  This sets the maximum theoretical quota overrun
26  * (with infinite node with infinite bandwidth) to twice the user's limit.  (In
27  * practice, the maximum overrun you see should be much less.)  A "quota_scale"
28  * number greater than one makes quota syncs more frequent and reduces the
29  * maximum overrun.  Numbers less than one (but greater than zero) make quota
30  * syncs less frequent.
31  *
32  * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
33  * the quota file, so it is not being constantly read.
34  */
35 
36 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 
38 #include <linux/sched.h>
39 #include <linux/slab.h>
40 #include <linux/mm.h>
41 #include <linux/spinlock.h>
42 #include <linux/completion.h>
43 #include <linux/buffer_head.h>
44 #include <linux/sort.h>
45 #include <linux/fs.h>
46 #include <linux/bio.h>
47 #include <linux/gfs2_ondisk.h>
48 #include <linux/kthread.h>
49 #include <linux/freezer.h>
50 #include <linux/quota.h>
51 #include <linux/dqblk_xfs.h>
52 #include <linux/lockref.h>
53 #include <linux/list_lru.h>
54 #include <linux/rcupdate.h>
55 #include <linux/rculist_bl.h>
56 #include <linux/bit_spinlock.h>
57 #include <linux/jhash.h>
58 #include <linux/vmalloc.h>
59 
60 #include "gfs2.h"
61 #include "incore.h"
62 #include "bmap.h"
63 #include "glock.h"
64 #include "glops.h"
65 #include "log.h"
66 #include "meta_io.h"
67 #include "quota.h"
68 #include "rgrp.h"
69 #include "super.h"
70 #include "trans.h"
71 #include "inode.h"
72 #include "util.h"
73 
74 #define GFS2_QD_HASH_SHIFT      12
75 #define GFS2_QD_HASH_SIZE       BIT(GFS2_QD_HASH_SHIFT)
76 #define GFS2_QD_HASH_MASK       (GFS2_QD_HASH_SIZE - 1)
77 
78 #define QC_CHANGE 0
79 #define QC_SYNC 1
80 
81 /* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
82 /*                     -> sd_bitmap_lock                              */
83 static DEFINE_SPINLOCK(qd_lock);
84 struct list_lru gfs2_qd_lru;
85 
86 static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE];
87 
88 static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp,
89 				 const struct kqid qid)
90 {
91 	unsigned int h;
92 
93 	h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0);
94 	h = jhash(&qid, sizeof(struct kqid), h);
95 
96 	return h & GFS2_QD_HASH_MASK;
97 }
98 
99 static inline void spin_lock_bucket(unsigned int hash)
100 {
101         hlist_bl_lock(&qd_hash_table[hash]);
102 }
103 
104 static inline void spin_unlock_bucket(unsigned int hash)
105 {
106         hlist_bl_unlock(&qd_hash_table[hash]);
107 }
108 
109 static void gfs2_qd_dealloc(struct rcu_head *rcu)
110 {
111 	struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
112 	struct gfs2_sbd *sdp = qd->qd_sbd;
113 
114 	kmem_cache_free(gfs2_quotad_cachep, qd);
115 	if (atomic_dec_and_test(&sdp->sd_quota_count))
116 		wake_up(&sdp->sd_kill_wait);
117 }
118 
119 static void gfs2_qd_dispose(struct gfs2_quota_data *qd)
120 {
121 	struct gfs2_sbd *sdp = qd->qd_sbd;
122 
123 	spin_lock(&qd_lock);
124 	list_del(&qd->qd_list);
125 	spin_unlock(&qd_lock);
126 
127 	spin_lock_bucket(qd->qd_hash);
128 	hlist_bl_del_rcu(&qd->qd_hlist);
129 	spin_unlock_bucket(qd->qd_hash);
130 
131 	if (!gfs2_withdrawn(sdp)) {
132 		gfs2_assert_warn(sdp, !qd->qd_change);
133 		gfs2_assert_warn(sdp, !qd->qd_slot_ref);
134 		gfs2_assert_warn(sdp, !qd->qd_bh_count);
135 	}
136 
137 	gfs2_glock_put(qd->qd_gl);
138 	call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
139 }
140 
141 static void gfs2_qd_list_dispose(struct list_head *list)
142 {
143 	struct gfs2_quota_data *qd;
144 
145 	while (!list_empty(list)) {
146 		qd = list_first_entry(list, struct gfs2_quota_data, qd_lru);
147 		list_del(&qd->qd_lru);
148 
149 		gfs2_qd_dispose(qd);
150 	}
151 }
152 
153 
154 static enum lru_status gfs2_qd_isolate(struct list_head *item,
155 		struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
156 {
157 	struct list_head *dispose = arg;
158 	struct gfs2_quota_data *qd =
159 		list_entry(item, struct gfs2_quota_data, qd_lru);
160 	enum lru_status status;
161 
162 	if (!spin_trylock(&qd->qd_lockref.lock))
163 		return LRU_SKIP;
164 
165 	status = LRU_SKIP;
166 	if (qd->qd_lockref.count == 0) {
167 		lockref_mark_dead(&qd->qd_lockref);
168 		list_lru_isolate_move(lru, &qd->qd_lru, dispose);
169 		status = LRU_REMOVED;
170 	}
171 
172 	spin_unlock(&qd->qd_lockref.lock);
173 	return status;
174 }
175 
176 static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
177 					 struct shrink_control *sc)
178 {
179 	LIST_HEAD(dispose);
180 	unsigned long freed;
181 
182 	if (!(sc->gfp_mask & __GFP_FS))
183 		return SHRINK_STOP;
184 
185 	freed = list_lru_shrink_walk(&gfs2_qd_lru, sc,
186 				     gfs2_qd_isolate, &dispose);
187 
188 	gfs2_qd_list_dispose(&dispose);
189 
190 	return freed;
191 }
192 
193 static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
194 					  struct shrink_control *sc)
195 {
196 	return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru, sc));
197 }
198 
199 static struct shrinker *gfs2_qd_shrinker;
200 
201 int __init gfs2_qd_shrinker_init(void)
202 {
203 	gfs2_qd_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, "gfs2-qd");
204 	if (!gfs2_qd_shrinker)
205 		return -ENOMEM;
206 
207 	gfs2_qd_shrinker->count_objects = gfs2_qd_shrink_count;
208 	gfs2_qd_shrinker->scan_objects = gfs2_qd_shrink_scan;
209 
210 	shrinker_register(gfs2_qd_shrinker);
211 
212 	return 0;
213 }
214 
215 void gfs2_qd_shrinker_exit(void)
216 {
217 	shrinker_free(gfs2_qd_shrinker);
218 }
219 
220 static u64 qd2index(struct gfs2_quota_data *qd)
221 {
222 	struct kqid qid = qd->qd_id;
223 	return (2 * (u64)from_kqid(&init_user_ns, qid)) +
224 		((qid.type == USRQUOTA) ? 0 : 1);
225 }
226 
227 static u64 qd2offset(struct gfs2_quota_data *qd)
228 {
229 	return qd2index(qd) * sizeof(struct gfs2_quota);
230 }
231 
232 static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
233 {
234 	struct gfs2_quota_data *qd;
235 	int error;
236 
237 	qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
238 	if (!qd)
239 		return NULL;
240 
241 	qd->qd_sbd = sdp;
242 	qd->qd_lockref.count = 0;
243 	spin_lock_init(&qd->qd_lockref.lock);
244 	qd->qd_id = qid;
245 	qd->qd_slot = -1;
246 	INIT_LIST_HEAD(&qd->qd_lru);
247 	qd->qd_hash = hash;
248 
249 	error = gfs2_glock_get(sdp, qd2index(qd),
250 			      &gfs2_quota_glops, CREATE, &qd->qd_gl);
251 	if (error)
252 		goto fail;
253 
254 	return qd;
255 
256 fail:
257 	kmem_cache_free(gfs2_quotad_cachep, qd);
258 	return NULL;
259 }
260 
261 static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash,
262 						     const struct gfs2_sbd *sdp,
263 						     struct kqid qid)
264 {
265 	struct gfs2_quota_data *qd;
266 	struct hlist_bl_node *h;
267 
268 	hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) {
269 		if (!qid_eq(qd->qd_id, qid))
270 			continue;
271 		if (qd->qd_sbd != sdp)
272 			continue;
273 		if (lockref_get_not_dead(&qd->qd_lockref)) {
274 			list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
275 			return qd;
276 		}
277 	}
278 
279 	return NULL;
280 }
281 
282 
283 static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
284 		  struct gfs2_quota_data **qdp)
285 {
286 	struct gfs2_quota_data *qd, *new_qd;
287 	unsigned int hash = gfs2_qd_hash(sdp, qid);
288 
289 	rcu_read_lock();
290 	*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
291 	rcu_read_unlock();
292 
293 	if (qd)
294 		return 0;
295 
296 	new_qd = qd_alloc(hash, sdp, qid);
297 	if (!new_qd)
298 		return -ENOMEM;
299 
300 	spin_lock(&qd_lock);
301 	spin_lock_bucket(hash);
302 	*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
303 	if (qd == NULL) {
304 		new_qd->qd_lockref.count++;
305 		*qdp = new_qd;
306 		list_add(&new_qd->qd_list, &sdp->sd_quota_list);
307 		hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
308 		atomic_inc(&sdp->sd_quota_count);
309 	}
310 	spin_unlock_bucket(hash);
311 	spin_unlock(&qd_lock);
312 
313 	if (qd) {
314 		gfs2_glock_put(new_qd->qd_gl);
315 		kmem_cache_free(gfs2_quotad_cachep, new_qd);
316 	}
317 
318 	return 0;
319 }
320 
321 
322 static void qd_hold(struct gfs2_quota_data *qd)
323 {
324 	struct gfs2_sbd *sdp = qd->qd_sbd;
325 	gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
326 	lockref_get(&qd->qd_lockref);
327 }
328 
329 static void qd_put(struct gfs2_quota_data *qd)
330 {
331 	struct gfs2_sbd *sdp;
332 
333 	if (lockref_put_or_lock(&qd->qd_lockref))
334 		return;
335 
336 	BUG_ON(__lockref_is_dead(&qd->qd_lockref));
337 	sdp = qd->qd_sbd;
338 	if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
339 		lockref_mark_dead(&qd->qd_lockref);
340 		spin_unlock(&qd->qd_lockref.lock);
341 
342 		gfs2_qd_dispose(qd);
343 		return;
344 	}
345 
346 	qd->qd_lockref.count = 0;
347 	list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
348 	spin_unlock(&qd->qd_lockref.lock);
349 }
350 
351 static int slot_get(struct gfs2_quota_data *qd)
352 {
353 	struct gfs2_sbd *sdp = qd->qd_sbd;
354 	unsigned int bit;
355 	int error = 0;
356 
357 	spin_lock(&sdp->sd_bitmap_lock);
358 	if (qd->qd_slot_ref == 0) {
359 		bit = find_first_zero_bit(sdp->sd_quota_bitmap,
360 					  sdp->sd_quota_slots);
361 		if (bit >= sdp->sd_quota_slots) {
362 			error = -ENOSPC;
363 			goto out;
364 		}
365 		set_bit(bit, sdp->sd_quota_bitmap);
366 		qd->qd_slot = bit;
367 	}
368 	qd->qd_slot_ref++;
369 out:
370 	spin_unlock(&sdp->sd_bitmap_lock);
371 	return error;
372 }
373 
374 static void slot_hold(struct gfs2_quota_data *qd)
375 {
376 	struct gfs2_sbd *sdp = qd->qd_sbd;
377 
378 	spin_lock(&sdp->sd_bitmap_lock);
379 	gfs2_assert(sdp, qd->qd_slot_ref);
380 	qd->qd_slot_ref++;
381 	spin_unlock(&sdp->sd_bitmap_lock);
382 }
383 
384 static void slot_put(struct gfs2_quota_data *qd)
385 {
386 	struct gfs2_sbd *sdp = qd->qd_sbd;
387 
388 	spin_lock(&sdp->sd_bitmap_lock);
389 	gfs2_assert(sdp, qd->qd_slot_ref);
390 	if (!--qd->qd_slot_ref) {
391 		BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
392 		qd->qd_slot = -1;
393 	}
394 	spin_unlock(&sdp->sd_bitmap_lock);
395 }
396 
397 static int bh_get(struct gfs2_quota_data *qd)
398 {
399 	struct gfs2_sbd *sdp = qd->qd_sbd;
400 	struct inode *inode = sdp->sd_qc_inode;
401 	struct gfs2_inode *ip = GFS2_I(inode);
402 	unsigned int block, offset;
403 	struct buffer_head *bh;
404 	struct iomap iomap = { };
405 	int error;
406 
407 	mutex_lock(&sdp->sd_quota_mutex);
408 
409 	if (qd->qd_bh_count++) {
410 		mutex_unlock(&sdp->sd_quota_mutex);
411 		return 0;
412 	}
413 
414 	block = qd->qd_slot / sdp->sd_qc_per_block;
415 	offset = qd->qd_slot % sdp->sd_qc_per_block;
416 
417 	error = gfs2_iomap_get(inode,
418 			       (loff_t)block << inode->i_blkbits,
419 			       i_blocksize(inode), &iomap);
420 	if (error)
421 		goto fail;
422 	error = -ENOENT;
423 	if (iomap.type != IOMAP_MAPPED)
424 		goto fail;
425 
426 	error = gfs2_meta_read(ip->i_gl, iomap.addr >> inode->i_blkbits,
427 			       DIO_WAIT, 0, &bh);
428 	if (error)
429 		goto fail;
430 	error = -EIO;
431 	if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
432 		goto fail_brelse;
433 
434 	qd->qd_bh = bh;
435 	qd->qd_bh_qc = (struct gfs2_quota_change *)
436 		(bh->b_data + sizeof(struct gfs2_meta_header) +
437 		 offset * sizeof(struct gfs2_quota_change));
438 
439 	mutex_unlock(&sdp->sd_quota_mutex);
440 
441 	return 0;
442 
443 fail_brelse:
444 	brelse(bh);
445 fail:
446 	qd->qd_bh_count--;
447 	mutex_unlock(&sdp->sd_quota_mutex);
448 	return error;
449 }
450 
451 static void bh_put(struct gfs2_quota_data *qd)
452 {
453 	struct gfs2_sbd *sdp = qd->qd_sbd;
454 
455 	mutex_lock(&sdp->sd_quota_mutex);
456 	gfs2_assert(sdp, qd->qd_bh_count);
457 	if (!--qd->qd_bh_count) {
458 		brelse(qd->qd_bh);
459 		qd->qd_bh = NULL;
460 		qd->qd_bh_qc = NULL;
461 	}
462 	mutex_unlock(&sdp->sd_quota_mutex);
463 }
464 
465 static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
466 			 u64 *sync_gen)
467 {
468 	if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
469 	    !test_bit(QDF_CHANGE, &qd->qd_flags) ||
470 	    (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
471 		return 0;
472 
473 	if (!lockref_get_not_dead(&qd->qd_lockref))
474 		return 0;
475 
476 	list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
477 	set_bit(QDF_LOCKED, &qd->qd_flags);
478 	qd->qd_change_sync = qd->qd_change;
479 	slot_hold(qd);
480 	return 1;
481 }
482 
483 static int qd_bh_get_or_undo(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
484 {
485 	int error;
486 
487 	error = bh_get(qd);
488 	if (!error)
489 		return 0;
490 
491 	clear_bit(QDF_LOCKED, &qd->qd_flags);
492 	slot_put(qd);
493 	qd_put(qd);
494 	return error;
495 }
496 
497 static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
498 {
499 	struct gfs2_quota_data *qd = NULL, *iter;
500 	int error;
501 
502 	*qdp = NULL;
503 
504 	if (sb_rdonly(sdp->sd_vfs))
505 		return 0;
506 
507 	spin_lock(&qd_lock);
508 
509 	list_for_each_entry(iter, &sdp->sd_quota_list, qd_list) {
510 		if (qd_check_sync(sdp, iter, &sdp->sd_quota_sync_gen)) {
511 			qd = iter;
512 			break;
513 		}
514 	}
515 
516 	spin_unlock(&qd_lock);
517 
518 	if (qd) {
519 		error = qd_bh_get_or_undo(sdp, qd);
520 		if (error)
521 			return error;
522 		*qdp = qd;
523 	}
524 
525 	return 0;
526 }
527 
528 static void qdsb_put(struct gfs2_quota_data *qd)
529 {
530 	bh_put(qd);
531 	slot_put(qd);
532 	qd_put(qd);
533 }
534 
535 static void qd_unlock(struct gfs2_quota_data *qd)
536 {
537 	gfs2_assert_warn(qd->qd_sbd, test_bit(QDF_LOCKED, &qd->qd_flags));
538 	clear_bit(QDF_LOCKED, &qd->qd_flags);
539 	qdsb_put(qd);
540 }
541 
542 static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
543 		    struct gfs2_quota_data **qdp)
544 {
545 	int error;
546 
547 	error = qd_get(sdp, qid, qdp);
548 	if (error)
549 		return error;
550 
551 	error = slot_get(*qdp);
552 	if (error)
553 		goto fail;
554 
555 	error = bh_get(*qdp);
556 	if (error)
557 		goto fail_slot;
558 
559 	return 0;
560 
561 fail_slot:
562 	slot_put(*qdp);
563 fail:
564 	qd_put(*qdp);
565 	return error;
566 }
567 
568 /**
569  * gfs2_qa_get - make sure we have a quota allocations data structure,
570  *               if necessary
571  * @ip: the inode for this reservation
572  */
573 int gfs2_qa_get(struct gfs2_inode *ip)
574 {
575 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
576 	struct inode *inode = &ip->i_inode;
577 
578 	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
579 		return 0;
580 
581 	spin_lock(&inode->i_lock);
582 	if (ip->i_qadata == NULL) {
583 		struct gfs2_qadata *tmp;
584 
585 		spin_unlock(&inode->i_lock);
586 		tmp = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS);
587 		if (!tmp)
588 			return -ENOMEM;
589 
590 		spin_lock(&inode->i_lock);
591 		if (ip->i_qadata == NULL)
592 			ip->i_qadata = tmp;
593 		else
594 			kmem_cache_free(gfs2_qadata_cachep, tmp);
595 	}
596 	ip->i_qadata->qa_ref++;
597 	spin_unlock(&inode->i_lock);
598 	return 0;
599 }
600 
601 void gfs2_qa_put(struct gfs2_inode *ip)
602 {
603 	struct inode *inode = &ip->i_inode;
604 
605 	spin_lock(&inode->i_lock);
606 	if (ip->i_qadata && --ip->i_qadata->qa_ref == 0) {
607 		kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata);
608 		ip->i_qadata = NULL;
609 	}
610 	spin_unlock(&inode->i_lock);
611 }
612 
613 int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
614 {
615 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
616 	struct gfs2_quota_data **qd;
617 	int error;
618 
619 	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
620 		return 0;
621 
622 	error = gfs2_qa_get(ip);
623 	if (error)
624 		return error;
625 
626 	qd = ip->i_qadata->qa_qd;
627 
628 	if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) ||
629 	    gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) {
630 		error = -EIO;
631 		gfs2_qa_put(ip);
632 		goto out;
633 	}
634 
635 	error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
636 	if (error)
637 		goto out_unhold;
638 	ip->i_qadata->qa_qd_num++;
639 	qd++;
640 
641 	error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
642 	if (error)
643 		goto out_unhold;
644 	ip->i_qadata->qa_qd_num++;
645 	qd++;
646 
647 	if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) &&
648 	    !uid_eq(uid, ip->i_inode.i_uid)) {
649 		error = qdsb_get(sdp, make_kqid_uid(uid), qd);
650 		if (error)
651 			goto out_unhold;
652 		ip->i_qadata->qa_qd_num++;
653 		qd++;
654 	}
655 
656 	if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) &&
657 	    !gid_eq(gid, ip->i_inode.i_gid)) {
658 		error = qdsb_get(sdp, make_kqid_gid(gid), qd);
659 		if (error)
660 			goto out_unhold;
661 		ip->i_qadata->qa_qd_num++;
662 		qd++;
663 	}
664 
665 out_unhold:
666 	if (error)
667 		gfs2_quota_unhold(ip);
668 out:
669 	return error;
670 }
671 
672 void gfs2_quota_unhold(struct gfs2_inode *ip)
673 {
674 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
675 	u32 x;
676 
677 	if (ip->i_qadata == NULL)
678 		return;
679 
680 	gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
681 
682 	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
683 		qdsb_put(ip->i_qadata->qa_qd[x]);
684 		ip->i_qadata->qa_qd[x] = NULL;
685 	}
686 	ip->i_qadata->qa_qd_num = 0;
687 	gfs2_qa_put(ip);
688 }
689 
690 static int sort_qd(const void *a, const void *b)
691 {
692 	const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
693 	const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
694 
695 	if (qid_lt(qd_a->qd_id, qd_b->qd_id))
696 		return -1;
697 	if (qid_lt(qd_b->qd_id, qd_a->qd_id))
698 		return 1;
699 	return 0;
700 }
701 
702 static void do_qc(struct gfs2_quota_data *qd, s64 change, int qc_type)
703 {
704 	struct gfs2_sbd *sdp = qd->qd_sbd;
705 	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
706 	struct gfs2_quota_change *qc = qd->qd_bh_qc;
707 	s64 x;
708 
709 	mutex_lock(&sdp->sd_quota_mutex);
710 	gfs2_trans_add_meta(ip->i_gl, qd->qd_bh);
711 
712 	if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
713 		qc->qc_change = 0;
714 		qc->qc_flags = 0;
715 		if (qd->qd_id.type == USRQUOTA)
716 			qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
717 		qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id));
718 	}
719 
720 	x = be64_to_cpu(qc->qc_change) + change;
721 	qc->qc_change = cpu_to_be64(x);
722 
723 	spin_lock(&qd_lock);
724 	qd->qd_change = x;
725 	spin_unlock(&qd_lock);
726 
727 	if (qc_type == QC_CHANGE) {
728 		if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
729 			qd_hold(qd);
730 			slot_hold(qd);
731 		}
732 	} else {
733 		gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
734 		clear_bit(QDF_CHANGE, &qd->qd_flags);
735 		qc->qc_flags = 0;
736 		qc->qc_id = 0;
737 		slot_put(qd);
738 		qd_put(qd);
739 	}
740 
741 	if (change < 0) /* Reset quiet flag if we freed some blocks */
742 		clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
743 	mutex_unlock(&sdp->sd_quota_mutex);
744 }
745 
746 static int gfs2_write_buf_to_page(struct gfs2_sbd *sdp, unsigned long index,
747 				  unsigned off, void *buf, unsigned bytes)
748 {
749 	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
750 	struct inode *inode = &ip->i_inode;
751 	struct address_space *mapping = inode->i_mapping;
752 	struct folio *folio;
753 	struct buffer_head *bh;
754 	u64 blk;
755 	unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0;
756 	unsigned to_write = bytes, pg_off = off;
757 
758 	blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift);
759 	boff = off % bsize;
760 
761 	folio = filemap_grab_folio(mapping, index);
762 	if (IS_ERR(folio))
763 		return PTR_ERR(folio);
764 	bh = folio_buffers(folio);
765 	if (!bh)
766 		bh = create_empty_buffers(folio, bsize, 0);
767 
768 	for (;;) {
769 		/* Find the beginning block within the folio */
770 		if (pg_off >= ((bnum * bsize) + bsize)) {
771 			bh = bh->b_this_page;
772 			bnum++;
773 			blk++;
774 			continue;
775 		}
776 		if (!buffer_mapped(bh)) {
777 			gfs2_block_map(inode, blk, bh, 1);
778 			if (!buffer_mapped(bh))
779 				goto unlock_out;
780 			/* If it's a newly allocated disk block, zero it */
781 			if (buffer_new(bh))
782 				folio_zero_range(folio, bnum * bsize,
783 						bh->b_size);
784 		}
785 		if (folio_test_uptodate(folio))
786 			set_buffer_uptodate(bh);
787 		if (bh_read(bh, REQ_META | REQ_PRIO) < 0)
788 			goto unlock_out;
789 		gfs2_trans_add_data(ip->i_gl, bh);
790 
791 		/* If we need to write to the next block as well */
792 		if (to_write > (bsize - boff)) {
793 			pg_off += (bsize - boff);
794 			to_write -= (bsize - boff);
795 			boff = pg_off % bsize;
796 			continue;
797 		}
798 		break;
799 	}
800 
801 	/* Write to the folio, now that we have setup the buffer(s) */
802 	memcpy_to_folio(folio, off, buf, bytes);
803 	flush_dcache_folio(folio);
804 	folio_unlock(folio);
805 	folio_put(folio);
806 
807 	return 0;
808 
809 unlock_out:
810 	folio_unlock(folio);
811 	folio_put(folio);
812 	return -EIO;
813 }
814 
815 static int gfs2_write_disk_quota(struct gfs2_sbd *sdp, struct gfs2_quota *qp,
816 				 loff_t loc)
817 {
818 	unsigned long pg_beg;
819 	unsigned pg_off, nbytes, overflow = 0;
820 	int error;
821 	void *ptr;
822 
823 	nbytes = sizeof(struct gfs2_quota);
824 
825 	pg_beg = loc >> PAGE_SHIFT;
826 	pg_off = offset_in_page(loc);
827 
828 	/* If the quota straddles a page boundary, split the write in two */
829 	if ((pg_off + nbytes) > PAGE_SIZE)
830 		overflow = (pg_off + nbytes) - PAGE_SIZE;
831 
832 	ptr = qp;
833 	error = gfs2_write_buf_to_page(sdp, pg_beg, pg_off, ptr,
834 				       nbytes - overflow);
835 	/* If there's an overflow, write the remaining bytes to the next page */
836 	if (!error && overflow)
837 		error = gfs2_write_buf_to_page(sdp, pg_beg + 1, 0,
838 					       ptr + nbytes - overflow,
839 					       overflow);
840 	return error;
841 }
842 
843 /**
844  * gfs2_adjust_quota - adjust record of current block usage
845  * @sdp: The superblock
846  * @loc: Offset of the entry in the quota file
847  * @change: The amount of usage change to record
848  * @qd: The quota data
849  * @fdq: The updated limits to record
850  *
851  * This function was mostly borrowed from gfs2_block_truncate_page which was
852  * in turn mostly borrowed from ext3
853  *
854  * Returns: 0 or -ve on error
855  */
856 
857 static int gfs2_adjust_quota(struct gfs2_sbd *sdp, loff_t loc,
858 			     s64 change, struct gfs2_quota_data *qd,
859 			     struct qc_dqblk *fdq)
860 {
861 	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
862 	struct inode *inode = &ip->i_inode;
863 	struct gfs2_quota q;
864 	int err;
865 	u64 size;
866 
867 	if (gfs2_is_stuffed(ip)) {
868 		err = gfs2_unstuff_dinode(ip);
869 		if (err)
870 			return err;
871 	}
872 
873 	memset(&q, 0, sizeof(struct gfs2_quota));
874 	err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q));
875 	if (err < 0)
876 		return err;
877 
878 	loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */
879 	be64_add_cpu(&q.qu_value, change);
880 	if (((s64)be64_to_cpu(q.qu_value)) < 0)
881 		q.qu_value = 0; /* Never go negative on quota usage */
882 	qd->qd_qb.qb_value = q.qu_value;
883 	if (fdq) {
884 		if (fdq->d_fieldmask & QC_SPC_SOFT) {
885 			q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
886 			qd->qd_qb.qb_warn = q.qu_warn;
887 		}
888 		if (fdq->d_fieldmask & QC_SPC_HARD) {
889 			q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
890 			qd->qd_qb.qb_limit = q.qu_limit;
891 		}
892 		if (fdq->d_fieldmask & QC_SPACE) {
893 			q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
894 			qd->qd_qb.qb_value = q.qu_value;
895 		}
896 	}
897 
898 	err = gfs2_write_disk_quota(sdp, &q, loc);
899 	if (!err) {
900 		size = loc + sizeof(struct gfs2_quota);
901 		if (size > inode->i_size)
902 			i_size_write(inode, size);
903 		inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
904 		mark_inode_dirty(inode);
905 		set_bit(QDF_REFRESH, &qd->qd_flags);
906 	}
907 
908 	return err;
909 }
910 
911 static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
912 {
913 	struct gfs2_sbd *sdp = (*qda)->qd_sbd;
914 	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
915 	struct gfs2_alloc_parms ap = { .aflags = 0, };
916 	unsigned int data_blocks, ind_blocks;
917 	struct gfs2_holder *ghs, i_gh;
918 	unsigned int qx, x;
919 	struct gfs2_quota_data *qd;
920 	unsigned reserved;
921 	loff_t offset;
922 	unsigned int nalloc = 0, blocks;
923 	int error;
924 
925 	gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
926 			      &data_blocks, &ind_blocks);
927 
928 	ghs = kmalloc_array(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
929 	if (!ghs)
930 		return -ENOMEM;
931 
932 	sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
933 	inode_lock(&ip->i_inode);
934 	for (qx = 0; qx < num_qd; qx++) {
935 		error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
936 					   GL_NOCACHE, &ghs[qx]);
937 		if (error)
938 			goto out_dq;
939 	}
940 
941 	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
942 	if (error)
943 		goto out_dq;
944 
945 	for (x = 0; x < num_qd; x++) {
946 		offset = qd2offset(qda[x]);
947 		if (gfs2_write_alloc_required(ip, offset,
948 					      sizeof(struct gfs2_quota)))
949 			nalloc++;
950 	}
951 
952 	/*
953 	 * 1 blk for unstuffing inode if stuffed. We add this extra
954 	 * block to the reservation unconditionally. If the inode
955 	 * doesn't need unstuffing, the block will be released to the
956 	 * rgrp since it won't be allocated during the transaction
957 	 */
958 	/* +3 in the end for unstuffing block, inode size update block
959 	 * and another block in case quota straddles page boundary and
960 	 * two blocks need to be updated instead of 1 */
961 	blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
962 
963 	reserved = 1 + (nalloc * (data_blocks + ind_blocks));
964 	ap.target = reserved;
965 	error = gfs2_inplace_reserve(ip, &ap);
966 	if (error)
967 		goto out_alloc;
968 
969 	if (nalloc)
970 		blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS;
971 
972 	error = gfs2_trans_begin(sdp, blocks, 0);
973 	if (error)
974 		goto out_ipres;
975 
976 	for (x = 0; x < num_qd; x++) {
977 		qd = qda[x];
978 		offset = qd2offset(qd);
979 		error = gfs2_adjust_quota(sdp, offset, qd->qd_change_sync, qd,
980 							NULL);
981 		if (error)
982 			goto out_end_trans;
983 
984 		do_qc(qd, -qd->qd_change_sync, QC_SYNC);
985 		set_bit(QDF_REFRESH, &qd->qd_flags);
986 	}
987 
988 out_end_trans:
989 	gfs2_trans_end(sdp);
990 out_ipres:
991 	gfs2_inplace_release(ip);
992 out_alloc:
993 	gfs2_glock_dq_uninit(&i_gh);
994 out_dq:
995 	while (qx--)
996 		gfs2_glock_dq_uninit(&ghs[qx]);
997 	inode_unlock(&ip->i_inode);
998 	kfree(ghs);
999 	gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl,
1000 		       GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC);
1001 	if (!error) {
1002 		for (x = 0; x < num_qd; x++)
1003 			qda[x]->qd_sync_gen = sdp->sd_quota_sync_gen;
1004 	}
1005 	return error;
1006 }
1007 
1008 static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
1009 {
1010 	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1011 	struct gfs2_quota q;
1012 	struct gfs2_quota_lvb *qlvb;
1013 	loff_t pos;
1014 	int error;
1015 
1016 	memset(&q, 0, sizeof(struct gfs2_quota));
1017 	pos = qd2offset(qd);
1018 	error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q));
1019 	if (error < 0)
1020 		return error;
1021 
1022 	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1023 	qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
1024 	qlvb->__pad = 0;
1025 	qlvb->qb_limit = q.qu_limit;
1026 	qlvb->qb_warn = q.qu_warn;
1027 	qlvb->qb_value = q.qu_value;
1028 	qd->qd_qb = *qlvb;
1029 
1030 	return 0;
1031 }
1032 
1033 static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
1034 		    struct gfs2_holder *q_gh)
1035 {
1036 	struct gfs2_sbd *sdp = qd->qd_sbd;
1037 	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1038 	struct gfs2_holder i_gh;
1039 	int error;
1040 
1041 	gfs2_assert_warn(sdp, sdp == qd->qd_gl->gl_name.ln_sbd);
1042 restart:
1043 	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
1044 	if (error)
1045 		return error;
1046 
1047 	if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
1048 		force_refresh = FORCE;
1049 
1050 	qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1051 
1052 	if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
1053 		gfs2_glock_dq_uninit(q_gh);
1054 		error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
1055 					   GL_NOCACHE, q_gh);
1056 		if (error)
1057 			return error;
1058 
1059 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
1060 		if (error)
1061 			goto fail;
1062 
1063 		error = update_qd(sdp, qd);
1064 		if (error)
1065 			goto fail_gunlock;
1066 
1067 		gfs2_glock_dq_uninit(&i_gh);
1068 		gfs2_glock_dq_uninit(q_gh);
1069 		force_refresh = 0;
1070 		goto restart;
1071 	}
1072 
1073 	return 0;
1074 
1075 fail_gunlock:
1076 	gfs2_glock_dq_uninit(&i_gh);
1077 fail:
1078 	gfs2_glock_dq_uninit(q_gh);
1079 	return error;
1080 }
1081 
1082 int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
1083 {
1084 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1085 	struct gfs2_quota_data *qd;
1086 	u32 x;
1087 	int error;
1088 
1089 	if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON &&
1090 	    sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET)
1091 		return 0;
1092 
1093 	error = gfs2_quota_hold(ip, uid, gid);
1094 	if (error)
1095 		return error;
1096 
1097 	sort(ip->i_qadata->qa_qd, ip->i_qadata->qa_qd_num,
1098 	     sizeof(struct gfs2_quota_data *), sort_qd, NULL);
1099 
1100 	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1101 		qd = ip->i_qadata->qa_qd[x];
1102 		error = do_glock(qd, NO_FORCE, &ip->i_qadata->qa_qd_ghs[x]);
1103 		if (error)
1104 			break;
1105 	}
1106 
1107 	if (!error)
1108 		set_bit(GIF_QD_LOCKED, &ip->i_flags);
1109 	else {
1110 		while (x--)
1111 			gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
1112 		gfs2_quota_unhold(ip);
1113 	}
1114 
1115 	return error;
1116 }
1117 
1118 static bool need_sync(struct gfs2_quota_data *qd)
1119 {
1120 	struct gfs2_sbd *sdp = qd->qd_sbd;
1121 	struct gfs2_tune *gt = &sdp->sd_tune;
1122 	s64 value;
1123 	unsigned int num, den;
1124 
1125 	if (!qd->qd_qb.qb_limit)
1126 		return false;
1127 
1128 	spin_lock(&qd_lock);
1129 	value = qd->qd_change;
1130 	spin_unlock(&qd_lock);
1131 
1132 	spin_lock(&gt->gt_spin);
1133 	num = gt->gt_quota_scale_num;
1134 	den = gt->gt_quota_scale_den;
1135 	spin_unlock(&gt->gt_spin);
1136 
1137 	if (value <= 0)
1138 		return false;
1139 	else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
1140 		 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
1141 		return false;
1142 	else {
1143 		value *= gfs2_jindex_size(sdp) * num;
1144 		value = div_s64(value, den);
1145 		value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
1146 		if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
1147 			return false;
1148 	}
1149 
1150 	return true;
1151 }
1152 
1153 void gfs2_quota_unlock(struct gfs2_inode *ip)
1154 {
1155 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1156 	struct gfs2_quota_data *qda[2 * GFS2_MAXQUOTAS];
1157 	unsigned int count = 0;
1158 	u32 x;
1159 	int found;
1160 
1161 	if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
1162 		return;
1163 
1164 	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1165 		struct gfs2_quota_data *qd;
1166 		bool sync;
1167 
1168 		qd = ip->i_qadata->qa_qd[x];
1169 		sync = need_sync(qd);
1170 
1171 		gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
1172 		if (!sync)
1173 			continue;
1174 
1175 		spin_lock(&qd_lock);
1176 		found = qd_check_sync(sdp, qd, NULL);
1177 		spin_unlock(&qd_lock);
1178 
1179 		if (!found)
1180 			continue;
1181 
1182 		if (!qd_bh_get_or_undo(sdp, qd))
1183 			qda[count++] = qd;
1184 	}
1185 
1186 	if (count) {
1187 		do_sync(count, qda);
1188 		for (x = 0; x < count; x++)
1189 			qd_unlock(qda[x]);
1190 	}
1191 
1192 	gfs2_quota_unhold(ip);
1193 }
1194 
1195 #define MAX_LINE 256
1196 
1197 static int print_message(struct gfs2_quota_data *qd, char *type)
1198 {
1199 	struct gfs2_sbd *sdp = qd->qd_sbd;
1200 
1201 	if (sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET)
1202 		fs_info(sdp, "quota %s for %s %u\n",
1203 			type,
1204 			(qd->qd_id.type == USRQUOTA) ? "user" : "group",
1205 			from_kqid(&init_user_ns, qd->qd_id));
1206 
1207 	return 0;
1208 }
1209 
1210 /**
1211  * gfs2_quota_check - check if allocating new blocks will exceed quota
1212  * @ip:  The inode for which this check is being performed
1213  * @uid: The uid to check against
1214  * @gid: The gid to check against
1215  * @ap:  The allocation parameters. ap->target contains the requested
1216  *       blocks. ap->min_target, if set, contains the minimum blks
1217  *       requested.
1218  *
1219  * Returns: 0 on success.
1220  *                  min_req = ap->min_target ? ap->min_target : ap->target;
1221  *                  quota must allow at least min_req blks for success and
1222  *                  ap->allowed is set to the number of blocks allowed
1223  *
1224  *          -EDQUOT otherwise, quota violation. ap->allowed is set to number
1225  *                  of blocks available.
1226  */
1227 int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
1228 		     struct gfs2_alloc_parms *ap)
1229 {
1230 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1231 	struct gfs2_quota_data *qd;
1232 	s64 value, warn, limit;
1233 	u32 x;
1234 	int error = 0;
1235 
1236 	ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
1237 	if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
1238 		return 0;
1239 
1240 	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1241 		qd = ip->i_qadata->qa_qd[x];
1242 
1243 		if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1244 		      qid_eq(qd->qd_id, make_kqid_gid(gid))))
1245 			continue;
1246 
1247 		warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn);
1248 		limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit);
1249 		value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
1250 		spin_lock(&qd_lock);
1251 		value += qd->qd_change;
1252 		spin_unlock(&qd_lock);
1253 
1254 		if (limit > 0 && (limit - value) < ap->allowed)
1255 			ap->allowed = limit - value;
1256 		/* If we can't meet the target */
1257 		if (limit && limit < (value + (s64)ap->target)) {
1258 			/* If no min_target specified or we don't meet
1259 			 * min_target, return -EDQUOT */
1260 			if (!ap->min_target || ap->min_target > ap->allowed) {
1261 				if (!test_and_set_bit(QDF_QMSG_QUIET,
1262 						      &qd->qd_flags)) {
1263 					print_message(qd, "exceeded");
1264 					quota_send_warning(qd->qd_id,
1265 							   sdp->sd_vfs->s_dev,
1266 							   QUOTA_NL_BHARDWARN);
1267 				}
1268 				error = -EDQUOT;
1269 				break;
1270 			}
1271 		} else if (warn && warn < value &&
1272 			   time_after_eq(jiffies, qd->qd_last_warn +
1273 					 gfs2_tune_get(sdp, gt_quota_warn_period)
1274 					 * HZ)) {
1275 			quota_send_warning(qd->qd_id,
1276 					   sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
1277 			error = print_message(qd, "warning");
1278 			qd->qd_last_warn = jiffies;
1279 		}
1280 	}
1281 	return error;
1282 }
1283 
1284 void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1285 		       kuid_t uid, kgid_t gid)
1286 {
1287 	struct gfs2_quota_data *qd;
1288 	u32 x;
1289 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1290 
1291 	if ((sdp->sd_args.ar_quota != GFS2_QUOTA_ON &&
1292 	    sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET) ||
1293 	    gfs2_assert_warn(sdp, change))
1294 		return;
1295 	if (ip->i_diskflags & GFS2_DIF_SYSTEM)
1296 		return;
1297 
1298 	if (gfs2_assert_withdraw(sdp, ip->i_qadata &&
1299 				 ip->i_qadata->qa_ref > 0))
1300 		return;
1301 	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1302 		qd = ip->i_qadata->qa_qd[x];
1303 
1304 		if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1305 		    qid_eq(qd->qd_id, make_kqid_gid(gid))) {
1306 			do_qc(qd, change, QC_CHANGE);
1307 		}
1308 	}
1309 }
1310 
1311 static bool qd_changed(struct gfs2_sbd *sdp)
1312 {
1313 	struct gfs2_quota_data *qd;
1314 	bool changed = false;
1315 
1316 	spin_lock(&qd_lock);
1317 	list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
1318 		if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
1319 		    !test_bit(QDF_CHANGE, &qd->qd_flags))
1320 			continue;
1321 
1322 		changed = true;
1323 		break;
1324 	}
1325 	spin_unlock(&qd_lock);
1326 	return changed;
1327 }
1328 
1329 int gfs2_quota_sync(struct super_block *sb, int type)
1330 {
1331 	struct gfs2_sbd *sdp = sb->s_fs_info;
1332 	struct gfs2_quota_data **qda;
1333 	unsigned int max_qd = PAGE_SIZE / sizeof(struct gfs2_holder);
1334 	unsigned int num_qd;
1335 	unsigned int x;
1336 	int error = 0;
1337 
1338 	if (!qd_changed(sdp))
1339 		return 0;
1340 
1341 	qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1342 	if (!qda)
1343 		return -ENOMEM;
1344 
1345 	mutex_lock(&sdp->sd_quota_sync_mutex);
1346 	sdp->sd_quota_sync_gen++;
1347 
1348 	do {
1349 		num_qd = 0;
1350 
1351 		for (;;) {
1352 			error = qd_fish(sdp, qda + num_qd);
1353 			if (error || !qda[num_qd])
1354 				break;
1355 			if (++num_qd == max_qd)
1356 				break;
1357 		}
1358 
1359 		if (num_qd) {
1360 			if (!error)
1361 				error = do_sync(num_qd, qda);
1362 
1363 			for (x = 0; x < num_qd; x++)
1364 				qd_unlock(qda[x]);
1365 		}
1366 	} while (!error && num_qd == max_qd);
1367 
1368 	mutex_unlock(&sdp->sd_quota_sync_mutex);
1369 	kfree(qda);
1370 
1371 	return error;
1372 }
1373 
1374 int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
1375 {
1376 	struct gfs2_quota_data *qd;
1377 	struct gfs2_holder q_gh;
1378 	int error;
1379 
1380 	error = qd_get(sdp, qid, &qd);
1381 	if (error)
1382 		return error;
1383 
1384 	error = do_glock(qd, FORCE, &q_gh);
1385 	if (!error)
1386 		gfs2_glock_dq_uninit(&q_gh);
1387 
1388 	qd_put(qd);
1389 	return error;
1390 }
1391 
1392 int gfs2_quota_init(struct gfs2_sbd *sdp)
1393 {
1394 	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1395 	u64 size = i_size_read(sdp->sd_qc_inode);
1396 	unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
1397 	unsigned int x, slot = 0;
1398 	unsigned int found = 0;
1399 	unsigned int hash;
1400 	unsigned int bm_size;
1401 	u64 dblock;
1402 	u32 extlen = 0;
1403 	int error;
1404 
1405 	if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
1406 		return -EIO;
1407 
1408 	sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1409 	bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long));
1410 	bm_size *= sizeof(unsigned long);
1411 	error = -ENOMEM;
1412 	sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN);
1413 	if (sdp->sd_quota_bitmap == NULL)
1414 		sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS |
1415 						 __GFP_ZERO);
1416 	if (!sdp->sd_quota_bitmap)
1417 		return error;
1418 
1419 	for (x = 0; x < blocks; x++) {
1420 		struct buffer_head *bh;
1421 		const struct gfs2_quota_change *qc;
1422 		unsigned int y;
1423 
1424 		if (!extlen) {
1425 			extlen = 32;
1426 			error = gfs2_get_extent(&ip->i_inode, x, &dblock, &extlen);
1427 			if (error)
1428 				goto fail;
1429 		}
1430 		error = -EIO;
1431 		bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1432 		if (!bh)
1433 			goto fail;
1434 		if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1435 			brelse(bh);
1436 			goto fail;
1437 		}
1438 
1439 		qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header));
1440 		for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1441 		     y++, slot++) {
1442 			struct gfs2_quota_data *qd;
1443 			s64 qc_change = be64_to_cpu(qc->qc_change);
1444 			u32 qc_flags = be32_to_cpu(qc->qc_flags);
1445 			enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ?
1446 						USRQUOTA : GRPQUOTA;
1447 			struct kqid qc_id = make_kqid(&init_user_ns, qtype,
1448 						      be32_to_cpu(qc->qc_id));
1449 			qc++;
1450 			if (!qc_change)
1451 				continue;
1452 
1453 			hash = gfs2_qd_hash(sdp, qc_id);
1454 			qd = qd_alloc(hash, sdp, qc_id);
1455 			if (qd == NULL) {
1456 				brelse(bh);
1457 				goto fail;
1458 			}
1459 
1460 			set_bit(QDF_CHANGE, &qd->qd_flags);
1461 			qd->qd_change = qc_change;
1462 			qd->qd_slot = slot;
1463 			qd->qd_slot_ref = 1;
1464 
1465 			spin_lock(&qd_lock);
1466 			BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
1467 			list_add(&qd->qd_list, &sdp->sd_quota_list);
1468 			atomic_inc(&sdp->sd_quota_count);
1469 			spin_unlock(&qd_lock);
1470 
1471 			spin_lock_bucket(hash);
1472 			hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]);
1473 			spin_unlock_bucket(hash);
1474 
1475 			found++;
1476 		}
1477 
1478 		brelse(bh);
1479 		dblock++;
1480 		extlen--;
1481 	}
1482 
1483 	if (found)
1484 		fs_info(sdp, "found %u quota changes\n", found);
1485 
1486 	return 0;
1487 
1488 fail:
1489 	gfs2_quota_cleanup(sdp);
1490 	return error;
1491 }
1492 
1493 void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1494 {
1495 	struct gfs2_quota_data *qd;
1496 	LIST_HEAD(dispose);
1497 	int count;
1498 
1499 	BUG_ON(test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
1500 
1501 	spin_lock(&qd_lock);
1502 	list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
1503 		spin_lock(&qd->qd_lockref.lock);
1504 		if (qd->qd_lockref.count != 0) {
1505 			spin_unlock(&qd->qd_lockref.lock);
1506 			continue;
1507 		}
1508 		lockref_mark_dead(&qd->qd_lockref);
1509 		spin_unlock(&qd->qd_lockref.lock);
1510 
1511 		list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
1512 		list_add(&qd->qd_lru, &dispose);
1513 	}
1514 	spin_unlock(&qd_lock);
1515 
1516 	gfs2_qd_list_dispose(&dispose);
1517 
1518 	wait_event_timeout(sdp->sd_kill_wait,
1519 		(count = atomic_read(&sdp->sd_quota_count)) == 0,
1520 		HZ * 60);
1521 
1522 	if (count != 0)
1523 		fs_err(sdp, "%d left-over quota data objects\n", count);
1524 
1525 	kvfree(sdp->sd_quota_bitmap);
1526 	sdp->sd_quota_bitmap = NULL;
1527 }
1528 
1529 static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1530 {
1531 	if (error == 0 || error == -EROFS)
1532 		return;
1533 	if (!gfs2_withdrawn(sdp)) {
1534 		if (!cmpxchg(&sdp->sd_log_error, 0, error))
1535 			fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
1536 		wake_up(&sdp->sd_logd_waitq);
1537 	}
1538 }
1539 
1540 static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
1541 			       int (*fxn)(struct super_block *sb, int type),
1542 			       unsigned long t, unsigned long *timeo,
1543 			       unsigned int *new_timeo)
1544 {
1545 	if (t >= *timeo) {
1546 		int error = fxn(sdp->sd_vfs, 0);
1547 		quotad_error(sdp, msg, error);
1548 		*timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1549 	} else {
1550 		*timeo -= t;
1551 	}
1552 }
1553 
1554 void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
1555 	if (!sdp->sd_statfs_force_sync) {
1556 		sdp->sd_statfs_force_sync = 1;
1557 		wake_up(&sdp->sd_quota_wait);
1558 	}
1559 }
1560 
1561 
1562 /**
1563  * gfs2_quotad - Write cached quota changes into the quota file
1564  * @data: Pointer to GFS2 superblock
1565  *
1566  */
1567 
1568 int gfs2_quotad(void *data)
1569 {
1570 	struct gfs2_sbd *sdp = data;
1571 	struct gfs2_tune *tune = &sdp->sd_tune;
1572 	unsigned long statfs_timeo = 0;
1573 	unsigned long quotad_timeo = 0;
1574 	unsigned long t = 0;
1575 
1576 	while (!kthread_should_stop()) {
1577 		if (gfs2_withdrawn(sdp))
1578 			break;
1579 
1580 		/* Update the master statfs file */
1581 		if (sdp->sd_statfs_force_sync) {
1582 			int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
1583 			quotad_error(sdp, "statfs", error);
1584 			statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
1585 		}
1586 		else
1587 			quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1588 				   	   &statfs_timeo,
1589 					   &tune->gt_statfs_quantum);
1590 
1591 		/* Update quota file */
1592 		quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
1593 				   &quotad_timeo, &tune->gt_quota_quantum);
1594 
1595 		try_to_freeze();
1596 
1597 		t = min(quotad_timeo, statfs_timeo);
1598 
1599 		t = wait_event_interruptible_timeout(sdp->sd_quota_wait,
1600 				sdp->sd_statfs_force_sync ||
1601 				gfs2_withdrawn(sdp) ||
1602 				kthread_should_stop(),
1603 				t);
1604 
1605 		if (sdp->sd_statfs_force_sync)
1606 			t = 0;
1607 	}
1608 
1609 	return 0;
1610 }
1611 
1612 static int gfs2_quota_get_state(struct super_block *sb, struct qc_state *state)
1613 {
1614 	struct gfs2_sbd *sdp = sb->s_fs_info;
1615 
1616 	memset(state, 0, sizeof(*state));
1617 
1618 	switch (sdp->sd_args.ar_quota) {
1619 	case GFS2_QUOTA_QUIET:
1620 		fallthrough;
1621 	case GFS2_QUOTA_ON:
1622 		state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED;
1623 		state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED;
1624 		fallthrough;
1625 	case GFS2_QUOTA_ACCOUNT:
1626 		state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED |
1627 						  QCI_SYSFILE;
1628 		state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED |
1629 						  QCI_SYSFILE;
1630 		break;
1631 	case GFS2_QUOTA_OFF:
1632 		break;
1633 	}
1634 	if (sdp->sd_quota_inode) {
1635 		state->s_state[USRQUOTA].ino =
1636 					GFS2_I(sdp->sd_quota_inode)->i_no_addr;
1637 		state->s_state[USRQUOTA].blocks = sdp->sd_quota_inode->i_blocks;
1638 	}
1639 	state->s_state[USRQUOTA].nextents = 1;	/* unsupported */
1640 	state->s_state[GRPQUOTA] = state->s_state[USRQUOTA];
1641 	state->s_incoredqs = list_lru_count(&gfs2_qd_lru);
1642 	return 0;
1643 }
1644 
1645 static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
1646 			  struct qc_dqblk *fdq)
1647 {
1648 	struct gfs2_sbd *sdp = sb->s_fs_info;
1649 	struct gfs2_quota_lvb *qlvb;
1650 	struct gfs2_quota_data *qd;
1651 	struct gfs2_holder q_gh;
1652 	int error;
1653 
1654 	memset(fdq, 0, sizeof(*fdq));
1655 
1656 	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1657 		return -ESRCH; /* Crazy XFS error code */
1658 
1659 	if ((qid.type != USRQUOTA) &&
1660 	    (qid.type != GRPQUOTA))
1661 		return -EINVAL;
1662 
1663 	error = qd_get(sdp, qid, &qd);
1664 	if (error)
1665 		return error;
1666 	error = do_glock(qd, FORCE, &q_gh);
1667 	if (error)
1668 		goto out;
1669 
1670 	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1671 	fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift;
1672 	fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift;
1673 	fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift;
1674 
1675 	gfs2_glock_dq_uninit(&q_gh);
1676 out:
1677 	qd_put(qd);
1678 	return error;
1679 }
1680 
1681 /* GFS2 only supports a subset of the XFS fields */
1682 #define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE)
1683 
1684 static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
1685 			  struct qc_dqblk *fdq)
1686 {
1687 	struct gfs2_sbd *sdp = sb->s_fs_info;
1688 	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1689 	struct gfs2_quota_data *qd;
1690 	struct gfs2_holder q_gh, i_gh;
1691 	unsigned int data_blocks, ind_blocks;
1692 	unsigned int blocks = 0;
1693 	int alloc_required;
1694 	loff_t offset;
1695 	int error;
1696 
1697 	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1698 		return -ESRCH; /* Crazy XFS error code */
1699 
1700 	if ((qid.type != USRQUOTA) &&
1701 	    (qid.type != GRPQUOTA))
1702 		return -EINVAL;
1703 
1704 	if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
1705 		return -EINVAL;
1706 
1707 	error = qd_get(sdp, qid, &qd);
1708 	if (error)
1709 		return error;
1710 
1711 	error = gfs2_qa_get(ip);
1712 	if (error)
1713 		goto out_put;
1714 
1715 	inode_lock(&ip->i_inode);
1716 	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
1717 	if (error)
1718 		goto out_unlockput;
1719 	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1720 	if (error)
1721 		goto out_q;
1722 
1723 	/* Check for existing entry, if none then alloc new blocks */
1724 	error = update_qd(sdp, qd);
1725 	if (error)
1726 		goto out_i;
1727 
1728 	/* If nothing has changed, this is a no-op */
1729 	if ((fdq->d_fieldmask & QC_SPC_SOFT) &&
1730 	    ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
1731 		fdq->d_fieldmask ^= QC_SPC_SOFT;
1732 
1733 	if ((fdq->d_fieldmask & QC_SPC_HARD) &&
1734 	    ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
1735 		fdq->d_fieldmask ^= QC_SPC_HARD;
1736 
1737 	if ((fdq->d_fieldmask & QC_SPACE) &&
1738 	    ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
1739 		fdq->d_fieldmask ^= QC_SPACE;
1740 
1741 	if (fdq->d_fieldmask == 0)
1742 		goto out_i;
1743 
1744 	offset = qd2offset(qd);
1745 	alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
1746 	if (gfs2_is_stuffed(ip))
1747 		alloc_required = 1;
1748 	if (alloc_required) {
1749 		struct gfs2_alloc_parms ap = { .aflags = 0, };
1750 		gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
1751 				       &data_blocks, &ind_blocks);
1752 		blocks = 1 + data_blocks + ind_blocks;
1753 		ap.target = blocks;
1754 		error = gfs2_inplace_reserve(ip, &ap);
1755 		if (error)
1756 			goto out_i;
1757 		blocks += gfs2_rg_blocks(ip, blocks);
1758 	}
1759 
1760 	/* Some quotas span block boundaries and can update two blocks,
1761 	   adding an extra block to the transaction to handle such quotas */
1762 	error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
1763 	if (error)
1764 		goto out_release;
1765 
1766 	/* Apply changes */
1767 	error = gfs2_adjust_quota(sdp, offset, 0, qd, fdq);
1768 	if (!error)
1769 		clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
1770 
1771 	gfs2_trans_end(sdp);
1772 out_release:
1773 	if (alloc_required)
1774 		gfs2_inplace_release(ip);
1775 out_i:
1776 	gfs2_glock_dq_uninit(&i_gh);
1777 out_q:
1778 	gfs2_glock_dq_uninit(&q_gh);
1779 out_unlockput:
1780 	gfs2_qa_put(ip);
1781 	inode_unlock(&ip->i_inode);
1782 out_put:
1783 	qd_put(qd);
1784 	return error;
1785 }
1786 
1787 const struct quotactl_ops gfs2_quotactl_ops = {
1788 	.quota_sync     = gfs2_quota_sync,
1789 	.get_state	= gfs2_quota_get_state,
1790 	.get_dqblk	= gfs2_get_dqblk,
1791 	.set_dqblk	= gfs2_set_dqblk,
1792 };
1793 
1794 void __init gfs2_quota_hash_init(void)
1795 {
1796 	unsigned i;
1797 
1798 	for(i = 0; i < GFS2_QD_HASH_SIZE; i++)
1799 		INIT_HLIST_BL_HEAD(&qd_hash_table[i]);
1800 }
1801