1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. 5 */ 6 7 /* 8 * Quota change tags are associated with each transaction that allocates or 9 * deallocates space. Those changes are accumulated locally to each node (in a 10 * per-node file) and then are periodically synced to the quota file. This 11 * avoids the bottleneck of constantly touching the quota file, but introduces 12 * fuzziness in the current usage value of IDs that are being used on different 13 * nodes in the cluster simultaneously. So, it is possible for a user on 14 * multiple nodes to overrun their quota, but that overrun is controlable. 15 * Since quota tags are part of transactions, there is no need for a quota check 16 * program to be run on node crashes or anything like that. 17 * 18 * There are couple of knobs that let the administrator manage the quota 19 * fuzziness. "quota_quantum" sets the maximum time a quota change can be 20 * sitting on one node before being synced to the quota file. (The default is 21 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency 22 * of quota file syncs increases as the user moves closer to their limit. The 23 * more frequent the syncs, the more accurate the quota enforcement, but that 24 * means that there is more contention between the nodes for the quota file. 25 * The default value is one. This sets the maximum theoretical quota overrun 26 * (with infinite node with infinite bandwidth) to twice the user's limit. (In 27 * practice, the maximum overrun you see should be much less.) A "quota_scale" 28 * number greater than one makes quota syncs more frequent and reduces the 29 * maximum overrun. Numbers less than one (but greater than zero) make quota 30 * syncs less frequent. 31 * 32 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of 33 * the quota file, so it is not being constantly read. 34 */ 35 36 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 37 38 #include <linux/sched.h> 39 #include <linux/slab.h> 40 #include <linux/mm.h> 41 #include <linux/spinlock.h> 42 #include <linux/completion.h> 43 #include <linux/buffer_head.h> 44 #include <linux/sort.h> 45 #include <linux/fs.h> 46 #include <linux/bio.h> 47 #include <linux/gfs2_ondisk.h> 48 #include <linux/kthread.h> 49 #include <linux/freezer.h> 50 #include <linux/quota.h> 51 #include <linux/dqblk_xfs.h> 52 #include <linux/lockref.h> 53 #include <linux/list_lru.h> 54 #include <linux/rcupdate.h> 55 #include <linux/rculist_bl.h> 56 #include <linux/bit_spinlock.h> 57 #include <linux/jhash.h> 58 #include <linux/vmalloc.h> 59 60 #include "gfs2.h" 61 #include "incore.h" 62 #include "bmap.h" 63 #include "glock.h" 64 #include "glops.h" 65 #include "log.h" 66 #include "meta_io.h" 67 #include "quota.h" 68 #include "rgrp.h" 69 #include "super.h" 70 #include "trans.h" 71 #include "inode.h" 72 #include "util.h" 73 74 #define GFS2_QD_HASH_SHIFT 12 75 #define GFS2_QD_HASH_SIZE BIT(GFS2_QD_HASH_SHIFT) 76 #define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1) 77 78 #define QC_CHANGE 0 79 #define QC_SYNC 1 80 81 /* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */ 82 /* -> sd_bitmap_lock */ 83 static DEFINE_SPINLOCK(qd_lock); 84 struct list_lru gfs2_qd_lru; 85 86 static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE]; 87 88 static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp, 89 const struct kqid qid) 90 { 91 unsigned int h; 92 93 h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0); 94 h = jhash(&qid, sizeof(struct kqid), h); 95 96 return h & GFS2_QD_HASH_MASK; 97 } 98 99 static inline void spin_lock_bucket(unsigned int hash) 100 { 101 hlist_bl_lock(&qd_hash_table[hash]); 102 } 103 104 static inline void spin_unlock_bucket(unsigned int hash) 105 { 106 hlist_bl_unlock(&qd_hash_table[hash]); 107 } 108 109 static void gfs2_qd_dealloc(struct rcu_head *rcu) 110 { 111 struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu); 112 kmem_cache_free(gfs2_quotad_cachep, qd); 113 } 114 115 static void gfs2_qd_dispose(struct list_head *list) 116 { 117 struct gfs2_quota_data *qd; 118 struct gfs2_sbd *sdp; 119 120 while (!list_empty(list)) { 121 qd = list_first_entry(list, struct gfs2_quota_data, qd_lru); 122 sdp = qd->qd_gl->gl_name.ln_sbd; 123 124 list_del(&qd->qd_lru); 125 126 /* Free from the filesystem-specific list */ 127 spin_lock(&qd_lock); 128 list_del(&qd->qd_list); 129 spin_unlock(&qd_lock); 130 131 spin_lock_bucket(qd->qd_hash); 132 hlist_bl_del_rcu(&qd->qd_hlist); 133 spin_unlock_bucket(qd->qd_hash); 134 135 gfs2_assert_warn(sdp, !qd->qd_change); 136 gfs2_assert_warn(sdp, !qd->qd_slot_count); 137 gfs2_assert_warn(sdp, !qd->qd_bh_count); 138 139 gfs2_glock_put(qd->qd_gl); 140 atomic_dec(&sdp->sd_quota_count); 141 142 /* Delete it from the common reclaim list */ 143 call_rcu(&qd->qd_rcu, gfs2_qd_dealloc); 144 } 145 } 146 147 148 static enum lru_status gfs2_qd_isolate(struct list_head *item, 149 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) 150 { 151 struct list_head *dispose = arg; 152 struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru); 153 154 if (!spin_trylock(&qd->qd_lockref.lock)) 155 return LRU_SKIP; 156 157 if (qd->qd_lockref.count == 0) { 158 lockref_mark_dead(&qd->qd_lockref); 159 list_lru_isolate_move(lru, &qd->qd_lru, dispose); 160 } 161 162 spin_unlock(&qd->qd_lockref.lock); 163 return LRU_REMOVED; 164 } 165 166 static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink, 167 struct shrink_control *sc) 168 { 169 LIST_HEAD(dispose); 170 unsigned long freed; 171 172 if (!(sc->gfp_mask & __GFP_FS)) 173 return SHRINK_STOP; 174 175 freed = list_lru_shrink_walk(&gfs2_qd_lru, sc, 176 gfs2_qd_isolate, &dispose); 177 178 gfs2_qd_dispose(&dispose); 179 180 return freed; 181 } 182 183 static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink, 184 struct shrink_control *sc) 185 { 186 return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru, sc)); 187 } 188 189 struct shrinker gfs2_qd_shrinker = { 190 .count_objects = gfs2_qd_shrink_count, 191 .scan_objects = gfs2_qd_shrink_scan, 192 .seeks = DEFAULT_SEEKS, 193 .flags = SHRINKER_NUMA_AWARE, 194 }; 195 196 197 static u64 qd2index(struct gfs2_quota_data *qd) 198 { 199 struct kqid qid = qd->qd_id; 200 return (2 * (u64)from_kqid(&init_user_ns, qid)) + 201 ((qid.type == USRQUOTA) ? 0 : 1); 202 } 203 204 static u64 qd2offset(struct gfs2_quota_data *qd) 205 { 206 u64 offset; 207 208 offset = qd2index(qd); 209 offset *= sizeof(struct gfs2_quota); 210 211 return offset; 212 } 213 214 static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid) 215 { 216 struct gfs2_quota_data *qd; 217 int error; 218 219 qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS); 220 if (!qd) 221 return NULL; 222 223 qd->qd_sbd = sdp; 224 qd->qd_lockref.count = 1; 225 spin_lock_init(&qd->qd_lockref.lock); 226 qd->qd_id = qid; 227 qd->qd_slot = -1; 228 INIT_LIST_HEAD(&qd->qd_lru); 229 qd->qd_hash = hash; 230 231 error = gfs2_glock_get(sdp, qd2index(qd), 232 &gfs2_quota_glops, CREATE, &qd->qd_gl); 233 if (error) 234 goto fail; 235 236 return qd; 237 238 fail: 239 kmem_cache_free(gfs2_quotad_cachep, qd); 240 return NULL; 241 } 242 243 static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash, 244 const struct gfs2_sbd *sdp, 245 struct kqid qid) 246 { 247 struct gfs2_quota_data *qd; 248 struct hlist_bl_node *h; 249 250 hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) { 251 if (!qid_eq(qd->qd_id, qid)) 252 continue; 253 if (qd->qd_sbd != sdp) 254 continue; 255 if (lockref_get_not_dead(&qd->qd_lockref)) { 256 list_lru_del(&gfs2_qd_lru, &qd->qd_lru); 257 return qd; 258 } 259 } 260 261 return NULL; 262 } 263 264 265 static int qd_get(struct gfs2_sbd *sdp, struct kqid qid, 266 struct gfs2_quota_data **qdp) 267 { 268 struct gfs2_quota_data *qd, *new_qd; 269 unsigned int hash = gfs2_qd_hash(sdp, qid); 270 271 rcu_read_lock(); 272 *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid); 273 rcu_read_unlock(); 274 275 if (qd) 276 return 0; 277 278 new_qd = qd_alloc(hash, sdp, qid); 279 if (!new_qd) 280 return -ENOMEM; 281 282 spin_lock(&qd_lock); 283 spin_lock_bucket(hash); 284 *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid); 285 if (qd == NULL) { 286 *qdp = new_qd; 287 list_add(&new_qd->qd_list, &sdp->sd_quota_list); 288 hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]); 289 atomic_inc(&sdp->sd_quota_count); 290 } 291 spin_unlock_bucket(hash); 292 spin_unlock(&qd_lock); 293 294 if (qd) { 295 gfs2_glock_put(new_qd->qd_gl); 296 kmem_cache_free(gfs2_quotad_cachep, new_qd); 297 } 298 299 return 0; 300 } 301 302 303 static void qd_hold(struct gfs2_quota_data *qd) 304 { 305 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 306 gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref)); 307 lockref_get(&qd->qd_lockref); 308 } 309 310 static void qd_put(struct gfs2_quota_data *qd) 311 { 312 if (lockref_put_or_lock(&qd->qd_lockref)) 313 return; 314 315 qd->qd_lockref.count = 0; 316 list_lru_add(&gfs2_qd_lru, &qd->qd_lru); 317 spin_unlock(&qd->qd_lockref.lock); 318 319 } 320 321 static int slot_get(struct gfs2_quota_data *qd) 322 { 323 struct gfs2_sbd *sdp = qd->qd_sbd; 324 unsigned int bit; 325 int error = 0; 326 327 spin_lock(&sdp->sd_bitmap_lock); 328 if (qd->qd_slot_count != 0) 329 goto out; 330 331 error = -ENOSPC; 332 bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots); 333 if (bit < sdp->sd_quota_slots) { 334 set_bit(bit, sdp->sd_quota_bitmap); 335 qd->qd_slot = bit; 336 error = 0; 337 out: 338 qd->qd_slot_count++; 339 } 340 spin_unlock(&sdp->sd_bitmap_lock); 341 342 return error; 343 } 344 345 static void slot_hold(struct gfs2_quota_data *qd) 346 { 347 struct gfs2_sbd *sdp = qd->qd_sbd; 348 349 spin_lock(&sdp->sd_bitmap_lock); 350 gfs2_assert(sdp, qd->qd_slot_count); 351 qd->qd_slot_count++; 352 spin_unlock(&sdp->sd_bitmap_lock); 353 } 354 355 static void slot_put(struct gfs2_quota_data *qd) 356 { 357 struct gfs2_sbd *sdp = qd->qd_sbd; 358 359 spin_lock(&sdp->sd_bitmap_lock); 360 gfs2_assert(sdp, qd->qd_slot_count); 361 if (!--qd->qd_slot_count) { 362 BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap)); 363 qd->qd_slot = -1; 364 } 365 spin_unlock(&sdp->sd_bitmap_lock); 366 } 367 368 static int bh_get(struct gfs2_quota_data *qd) 369 { 370 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 371 struct inode *inode = sdp->sd_qc_inode; 372 struct gfs2_inode *ip = GFS2_I(inode); 373 unsigned int block, offset; 374 struct buffer_head *bh; 375 struct iomap iomap = { }; 376 int error; 377 378 mutex_lock(&sdp->sd_quota_mutex); 379 380 if (qd->qd_bh_count++) { 381 mutex_unlock(&sdp->sd_quota_mutex); 382 return 0; 383 } 384 385 block = qd->qd_slot / sdp->sd_qc_per_block; 386 offset = qd->qd_slot % sdp->sd_qc_per_block; 387 388 error = gfs2_iomap_get(inode, 389 (loff_t)block << inode->i_blkbits, 390 i_blocksize(inode), &iomap); 391 if (error) 392 goto fail; 393 error = -ENOENT; 394 if (iomap.type != IOMAP_MAPPED) 395 goto fail; 396 397 error = gfs2_meta_read(ip->i_gl, iomap.addr >> inode->i_blkbits, 398 DIO_WAIT, 0, &bh); 399 if (error) 400 goto fail; 401 error = -EIO; 402 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) 403 goto fail_brelse; 404 405 qd->qd_bh = bh; 406 qd->qd_bh_qc = (struct gfs2_quota_change *) 407 (bh->b_data + sizeof(struct gfs2_meta_header) + 408 offset * sizeof(struct gfs2_quota_change)); 409 410 mutex_unlock(&sdp->sd_quota_mutex); 411 412 return 0; 413 414 fail_brelse: 415 brelse(bh); 416 fail: 417 qd->qd_bh_count--; 418 mutex_unlock(&sdp->sd_quota_mutex); 419 return error; 420 } 421 422 static void bh_put(struct gfs2_quota_data *qd) 423 { 424 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 425 426 mutex_lock(&sdp->sd_quota_mutex); 427 gfs2_assert(sdp, qd->qd_bh_count); 428 if (!--qd->qd_bh_count) { 429 brelse(qd->qd_bh); 430 qd->qd_bh = NULL; 431 qd->qd_bh_qc = NULL; 432 } 433 mutex_unlock(&sdp->sd_quota_mutex); 434 } 435 436 static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd, 437 u64 *sync_gen) 438 { 439 if (test_bit(QDF_LOCKED, &qd->qd_flags) || 440 !test_bit(QDF_CHANGE, &qd->qd_flags) || 441 (sync_gen && (qd->qd_sync_gen >= *sync_gen))) 442 return 0; 443 444 if (!lockref_get_not_dead(&qd->qd_lockref)) 445 return 0; 446 447 list_move_tail(&qd->qd_list, &sdp->sd_quota_list); 448 set_bit(QDF_LOCKED, &qd->qd_flags); 449 qd->qd_change_sync = qd->qd_change; 450 slot_hold(qd); 451 return 1; 452 } 453 454 static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp) 455 { 456 struct gfs2_quota_data *qd = NULL, *iter; 457 int error; 458 459 *qdp = NULL; 460 461 if (sb_rdonly(sdp->sd_vfs)) 462 return 0; 463 464 spin_lock(&qd_lock); 465 466 list_for_each_entry(iter, &sdp->sd_quota_list, qd_list) { 467 if (qd_check_sync(sdp, iter, &sdp->sd_quota_sync_gen)) { 468 qd = iter; 469 break; 470 } 471 } 472 473 spin_unlock(&qd_lock); 474 475 if (qd) { 476 error = bh_get(qd); 477 if (error) { 478 clear_bit(QDF_LOCKED, &qd->qd_flags); 479 slot_put(qd); 480 qd_put(qd); 481 return error; 482 } 483 } 484 485 *qdp = qd; 486 487 return 0; 488 } 489 490 static void qd_unlock(struct gfs2_quota_data *qd) 491 { 492 gfs2_assert_warn(qd->qd_gl->gl_name.ln_sbd, 493 test_bit(QDF_LOCKED, &qd->qd_flags)); 494 clear_bit(QDF_LOCKED, &qd->qd_flags); 495 bh_put(qd); 496 slot_put(qd); 497 qd_put(qd); 498 } 499 500 static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid, 501 struct gfs2_quota_data **qdp) 502 { 503 int error; 504 505 error = qd_get(sdp, qid, qdp); 506 if (error) 507 return error; 508 509 error = slot_get(*qdp); 510 if (error) 511 goto fail; 512 513 error = bh_get(*qdp); 514 if (error) 515 goto fail_slot; 516 517 return 0; 518 519 fail_slot: 520 slot_put(*qdp); 521 fail: 522 qd_put(*qdp); 523 return error; 524 } 525 526 static void qdsb_put(struct gfs2_quota_data *qd) 527 { 528 bh_put(qd); 529 slot_put(qd); 530 qd_put(qd); 531 } 532 533 /** 534 * gfs2_qa_get - make sure we have a quota allocations data structure, 535 * if necessary 536 * @ip: the inode for this reservation 537 */ 538 int gfs2_qa_get(struct gfs2_inode *ip) 539 { 540 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 541 struct inode *inode = &ip->i_inode; 542 543 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 544 return 0; 545 546 spin_lock(&inode->i_lock); 547 if (ip->i_qadata == NULL) { 548 struct gfs2_qadata *tmp; 549 550 spin_unlock(&inode->i_lock); 551 tmp = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS); 552 if (!tmp) 553 return -ENOMEM; 554 555 spin_lock(&inode->i_lock); 556 if (ip->i_qadata == NULL) 557 ip->i_qadata = tmp; 558 else 559 kmem_cache_free(gfs2_qadata_cachep, tmp); 560 } 561 ip->i_qadata->qa_ref++; 562 spin_unlock(&inode->i_lock); 563 return 0; 564 } 565 566 void gfs2_qa_put(struct gfs2_inode *ip) 567 { 568 struct inode *inode = &ip->i_inode; 569 570 spin_lock(&inode->i_lock); 571 if (ip->i_qadata && --ip->i_qadata->qa_ref == 0) { 572 kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata); 573 ip->i_qadata = NULL; 574 } 575 spin_unlock(&inode->i_lock); 576 } 577 578 int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) 579 { 580 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 581 struct gfs2_quota_data **qd; 582 int error; 583 584 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 585 return 0; 586 587 error = gfs2_qa_get(ip); 588 if (error) 589 return error; 590 591 qd = ip->i_qadata->qa_qd; 592 593 if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) || 594 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) { 595 error = -EIO; 596 gfs2_qa_put(ip); 597 goto out; 598 } 599 600 error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd); 601 if (error) 602 goto out_unhold; 603 ip->i_qadata->qa_qd_num++; 604 qd++; 605 606 error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd); 607 if (error) 608 goto out_unhold; 609 ip->i_qadata->qa_qd_num++; 610 qd++; 611 612 if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) && 613 !uid_eq(uid, ip->i_inode.i_uid)) { 614 error = qdsb_get(sdp, make_kqid_uid(uid), qd); 615 if (error) 616 goto out_unhold; 617 ip->i_qadata->qa_qd_num++; 618 qd++; 619 } 620 621 if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) && 622 !gid_eq(gid, ip->i_inode.i_gid)) { 623 error = qdsb_get(sdp, make_kqid_gid(gid), qd); 624 if (error) 625 goto out_unhold; 626 ip->i_qadata->qa_qd_num++; 627 qd++; 628 } 629 630 out_unhold: 631 if (error) 632 gfs2_quota_unhold(ip); 633 out: 634 return error; 635 } 636 637 void gfs2_quota_unhold(struct gfs2_inode *ip) 638 { 639 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 640 u32 x; 641 642 if (ip->i_qadata == NULL) 643 return; 644 645 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)); 646 647 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { 648 qdsb_put(ip->i_qadata->qa_qd[x]); 649 ip->i_qadata->qa_qd[x] = NULL; 650 } 651 ip->i_qadata->qa_qd_num = 0; 652 gfs2_qa_put(ip); 653 } 654 655 static int sort_qd(const void *a, const void *b) 656 { 657 const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a; 658 const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b; 659 660 if (qid_lt(qd_a->qd_id, qd_b->qd_id)) 661 return -1; 662 if (qid_lt(qd_b->qd_id, qd_a->qd_id)) 663 return 1; 664 return 0; 665 } 666 667 static void do_qc(struct gfs2_quota_data *qd, s64 change, int qc_type) 668 { 669 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 670 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); 671 struct gfs2_quota_change *qc = qd->qd_bh_qc; 672 s64 x; 673 674 mutex_lock(&sdp->sd_quota_mutex); 675 gfs2_trans_add_meta(ip->i_gl, qd->qd_bh); 676 677 if (!test_bit(QDF_CHANGE, &qd->qd_flags)) { 678 qc->qc_change = 0; 679 qc->qc_flags = 0; 680 if (qd->qd_id.type == USRQUOTA) 681 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER); 682 qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id)); 683 } 684 685 x = be64_to_cpu(qc->qc_change) + change; 686 qc->qc_change = cpu_to_be64(x); 687 688 spin_lock(&qd_lock); 689 qd->qd_change = x; 690 spin_unlock(&qd_lock); 691 692 if (qc_type == QC_CHANGE) { 693 if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) { 694 qd_hold(qd); 695 slot_hold(qd); 696 } 697 } else { 698 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags)); 699 clear_bit(QDF_CHANGE, &qd->qd_flags); 700 qc->qc_flags = 0; 701 qc->qc_id = 0; 702 slot_put(qd); 703 qd_put(qd); 704 } 705 706 if (change < 0) /* Reset quiet flag if we freed some blocks */ 707 clear_bit(QDF_QMSG_QUIET, &qd->qd_flags); 708 mutex_unlock(&sdp->sd_quota_mutex); 709 } 710 711 static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index, 712 unsigned off, void *buf, unsigned bytes) 713 { 714 struct inode *inode = &ip->i_inode; 715 struct gfs2_sbd *sdp = GFS2_SB(inode); 716 struct address_space *mapping = inode->i_mapping; 717 struct page *page; 718 struct buffer_head *bh; 719 u64 blk; 720 unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0; 721 unsigned to_write = bytes, pg_off = off; 722 int done = 0; 723 724 blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift); 725 boff = off % bsize; 726 727 page = find_or_create_page(mapping, index, GFP_NOFS); 728 if (!page) 729 return -ENOMEM; 730 if (!page_has_buffers(page)) 731 create_empty_buffers(page, bsize, 0); 732 733 bh = page_buffers(page); 734 while (!done) { 735 /* Find the beginning block within the page */ 736 if (pg_off >= ((bnum * bsize) + bsize)) { 737 bh = bh->b_this_page; 738 bnum++; 739 blk++; 740 continue; 741 } 742 if (!buffer_mapped(bh)) { 743 gfs2_block_map(inode, blk, bh, 1); 744 if (!buffer_mapped(bh)) 745 goto unlock_out; 746 /* If it's a newly allocated disk block, zero it */ 747 if (buffer_new(bh)) 748 zero_user(page, bnum * bsize, bh->b_size); 749 } 750 if (PageUptodate(page)) 751 set_buffer_uptodate(bh); 752 if (bh_read(bh, REQ_META | REQ_PRIO) < 0) 753 goto unlock_out; 754 if (gfs2_is_jdata(ip)) 755 gfs2_trans_add_data(ip->i_gl, bh); 756 else 757 gfs2_ordered_add_inode(ip); 758 759 /* If we need to write to the next block as well */ 760 if (to_write > (bsize - boff)) { 761 pg_off += (bsize - boff); 762 to_write -= (bsize - boff); 763 boff = pg_off % bsize; 764 continue; 765 } 766 done = 1; 767 } 768 769 /* Write to the page, now that we have setup the buffer(s) */ 770 memcpy_to_page(page, off, buf, bytes); 771 flush_dcache_page(page); 772 unlock_page(page); 773 put_page(page); 774 775 return 0; 776 777 unlock_out: 778 unlock_page(page); 779 put_page(page); 780 return -EIO; 781 } 782 783 static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp, 784 loff_t loc) 785 { 786 unsigned long pg_beg; 787 unsigned pg_off, nbytes, overflow = 0; 788 int pg_oflow = 0, error; 789 void *ptr; 790 791 nbytes = sizeof(struct gfs2_quota); 792 793 pg_beg = loc >> PAGE_SHIFT; 794 pg_off = offset_in_page(loc); 795 796 /* If the quota straddles a page boundary, split the write in two */ 797 if ((pg_off + nbytes) > PAGE_SIZE) { 798 pg_oflow = 1; 799 overflow = (pg_off + nbytes) - PAGE_SIZE; 800 } 801 802 ptr = qp; 803 error = gfs2_write_buf_to_page(ip, pg_beg, pg_off, ptr, 804 nbytes - overflow); 805 /* If there's an overflow, write the remaining bytes to the next page */ 806 if (!error && pg_oflow) 807 error = gfs2_write_buf_to_page(ip, pg_beg + 1, 0, 808 ptr + nbytes - overflow, 809 overflow); 810 return error; 811 } 812 813 /** 814 * gfs2_adjust_quota - adjust record of current block usage 815 * @ip: The quota inode 816 * @loc: Offset of the entry in the quota file 817 * @change: The amount of usage change to record 818 * @qd: The quota data 819 * @fdq: The updated limits to record 820 * 821 * This function was mostly borrowed from gfs2_block_truncate_page which was 822 * in turn mostly borrowed from ext3 823 * 824 * Returns: 0 or -ve on error 825 */ 826 827 static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, 828 s64 change, struct gfs2_quota_data *qd, 829 struct qc_dqblk *fdq) 830 { 831 struct inode *inode = &ip->i_inode; 832 struct gfs2_sbd *sdp = GFS2_SB(inode); 833 struct gfs2_quota q; 834 int err; 835 u64 size; 836 837 if (gfs2_is_stuffed(ip)) { 838 err = gfs2_unstuff_dinode(ip); 839 if (err) 840 return err; 841 } 842 843 memset(&q, 0, sizeof(struct gfs2_quota)); 844 err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q)); 845 if (err < 0) 846 return err; 847 848 loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */ 849 err = -EIO; 850 be64_add_cpu(&q.qu_value, change); 851 if (((s64)be64_to_cpu(q.qu_value)) < 0) 852 q.qu_value = 0; /* Never go negative on quota usage */ 853 qd->qd_qb.qb_value = q.qu_value; 854 if (fdq) { 855 if (fdq->d_fieldmask & QC_SPC_SOFT) { 856 q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift); 857 qd->qd_qb.qb_warn = q.qu_warn; 858 } 859 if (fdq->d_fieldmask & QC_SPC_HARD) { 860 q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift); 861 qd->qd_qb.qb_limit = q.qu_limit; 862 } 863 if (fdq->d_fieldmask & QC_SPACE) { 864 q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift); 865 qd->qd_qb.qb_value = q.qu_value; 866 } 867 } 868 869 err = gfs2_write_disk_quota(ip, &q, loc); 870 if (!err) { 871 size = loc + sizeof(struct gfs2_quota); 872 if (size > inode->i_size) 873 i_size_write(inode, size); 874 inode->i_mtime = inode->i_atime = current_time(inode); 875 mark_inode_dirty(inode); 876 set_bit(QDF_REFRESH, &qd->qd_flags); 877 } 878 879 return err; 880 } 881 882 static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) 883 { 884 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_name.ln_sbd; 885 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 886 struct gfs2_alloc_parms ap = { .aflags = 0, }; 887 unsigned int data_blocks, ind_blocks; 888 struct gfs2_holder *ghs, i_gh; 889 unsigned int qx, x; 890 struct gfs2_quota_data *qd; 891 unsigned reserved; 892 loff_t offset; 893 unsigned int nalloc = 0, blocks; 894 int error; 895 896 error = gfs2_qa_get(ip); 897 if (error) 898 return error; 899 900 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), 901 &data_blocks, &ind_blocks); 902 903 ghs = kmalloc_array(num_qd, sizeof(struct gfs2_holder), GFP_NOFS); 904 if (!ghs) { 905 error = -ENOMEM; 906 goto out; 907 } 908 909 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL); 910 inode_lock(&ip->i_inode); 911 for (qx = 0; qx < num_qd; qx++) { 912 error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE, 913 GL_NOCACHE, &ghs[qx]); 914 if (error) 915 goto out_dq; 916 } 917 918 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); 919 if (error) 920 goto out_dq; 921 922 for (x = 0; x < num_qd; x++) { 923 offset = qd2offset(qda[x]); 924 if (gfs2_write_alloc_required(ip, offset, 925 sizeof(struct gfs2_quota))) 926 nalloc++; 927 } 928 929 /* 930 * 1 blk for unstuffing inode if stuffed. We add this extra 931 * block to the reservation unconditionally. If the inode 932 * doesn't need unstuffing, the block will be released to the 933 * rgrp since it won't be allocated during the transaction 934 */ 935 /* +3 in the end for unstuffing block, inode size update block 936 * and another block in case quota straddles page boundary and 937 * two blocks need to be updated instead of 1 */ 938 blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3; 939 940 reserved = 1 + (nalloc * (data_blocks + ind_blocks)); 941 ap.target = reserved; 942 error = gfs2_inplace_reserve(ip, &ap); 943 if (error) 944 goto out_alloc; 945 946 if (nalloc) 947 blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS; 948 949 error = gfs2_trans_begin(sdp, blocks, 0); 950 if (error) 951 goto out_ipres; 952 953 for (x = 0; x < num_qd; x++) { 954 qd = qda[x]; 955 offset = qd2offset(qd); 956 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL); 957 if (error) 958 goto out_end_trans; 959 960 do_qc(qd, -qd->qd_change_sync, QC_SYNC); 961 set_bit(QDF_REFRESH, &qd->qd_flags); 962 } 963 964 error = 0; 965 966 out_end_trans: 967 gfs2_trans_end(sdp); 968 out_ipres: 969 gfs2_inplace_release(ip); 970 out_alloc: 971 gfs2_glock_dq_uninit(&i_gh); 972 out_dq: 973 while (qx--) 974 gfs2_glock_dq_uninit(&ghs[qx]); 975 inode_unlock(&ip->i_inode); 976 kfree(ghs); 977 gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl, 978 GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC); 979 out: 980 gfs2_qa_put(ip); 981 return error; 982 } 983 984 static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd) 985 { 986 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 987 struct gfs2_quota q; 988 struct gfs2_quota_lvb *qlvb; 989 loff_t pos; 990 int error; 991 992 memset(&q, 0, sizeof(struct gfs2_quota)); 993 pos = qd2offset(qd); 994 error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q)); 995 if (error < 0) 996 return error; 997 998 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; 999 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC); 1000 qlvb->__pad = 0; 1001 qlvb->qb_limit = q.qu_limit; 1002 qlvb->qb_warn = q.qu_warn; 1003 qlvb->qb_value = q.qu_value; 1004 qd->qd_qb = *qlvb; 1005 1006 return 0; 1007 } 1008 1009 static int do_glock(struct gfs2_quota_data *qd, int force_refresh, 1010 struct gfs2_holder *q_gh) 1011 { 1012 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 1013 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 1014 struct gfs2_holder i_gh; 1015 int error; 1016 1017 restart: 1018 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh); 1019 if (error) 1020 return error; 1021 1022 if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags)) 1023 force_refresh = FORCE; 1024 1025 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; 1026 1027 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) { 1028 gfs2_glock_dq_uninit(q_gh); 1029 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 1030 GL_NOCACHE, q_gh); 1031 if (error) 1032 return error; 1033 1034 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh); 1035 if (error) 1036 goto fail; 1037 1038 error = update_qd(sdp, qd); 1039 if (error) 1040 goto fail_gunlock; 1041 1042 gfs2_glock_dq_uninit(&i_gh); 1043 gfs2_glock_dq_uninit(q_gh); 1044 force_refresh = 0; 1045 goto restart; 1046 } 1047 1048 return 0; 1049 1050 fail_gunlock: 1051 gfs2_glock_dq_uninit(&i_gh); 1052 fail: 1053 gfs2_glock_dq_uninit(q_gh); 1054 return error; 1055 } 1056 1057 int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) 1058 { 1059 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1060 struct gfs2_quota_data *qd; 1061 u32 x; 1062 int error = 0; 1063 1064 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON) 1065 return 0; 1066 1067 error = gfs2_quota_hold(ip, uid, gid); 1068 if (error) 1069 return error; 1070 1071 sort(ip->i_qadata->qa_qd, ip->i_qadata->qa_qd_num, 1072 sizeof(struct gfs2_quota_data *), sort_qd, NULL); 1073 1074 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { 1075 qd = ip->i_qadata->qa_qd[x]; 1076 error = do_glock(qd, NO_FORCE, &ip->i_qadata->qa_qd_ghs[x]); 1077 if (error) 1078 break; 1079 } 1080 1081 if (!error) 1082 set_bit(GIF_QD_LOCKED, &ip->i_flags); 1083 else { 1084 while (x--) 1085 gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]); 1086 gfs2_quota_unhold(ip); 1087 } 1088 1089 return error; 1090 } 1091 1092 static int need_sync(struct gfs2_quota_data *qd) 1093 { 1094 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 1095 struct gfs2_tune *gt = &sdp->sd_tune; 1096 s64 value; 1097 unsigned int num, den; 1098 int do_sync = 1; 1099 1100 if (!qd->qd_qb.qb_limit) 1101 return 0; 1102 1103 spin_lock(&qd_lock); 1104 value = qd->qd_change; 1105 spin_unlock(&qd_lock); 1106 1107 spin_lock(>->gt_spin); 1108 num = gt->gt_quota_scale_num; 1109 den = gt->gt_quota_scale_den; 1110 spin_unlock(>->gt_spin); 1111 1112 if (value < 0) 1113 do_sync = 0; 1114 else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >= 1115 (s64)be64_to_cpu(qd->qd_qb.qb_limit)) 1116 do_sync = 0; 1117 else { 1118 value *= gfs2_jindex_size(sdp) * num; 1119 value = div_s64(value, den); 1120 value += (s64)be64_to_cpu(qd->qd_qb.qb_value); 1121 if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit)) 1122 do_sync = 0; 1123 } 1124 1125 return do_sync; 1126 } 1127 1128 void gfs2_quota_unlock(struct gfs2_inode *ip) 1129 { 1130 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1131 struct gfs2_quota_data *qda[4]; 1132 unsigned int count = 0; 1133 u32 x; 1134 int found; 1135 1136 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags)) 1137 return; 1138 1139 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { 1140 struct gfs2_quota_data *qd; 1141 int sync; 1142 1143 qd = ip->i_qadata->qa_qd[x]; 1144 sync = need_sync(qd); 1145 1146 gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]); 1147 if (!sync) 1148 continue; 1149 1150 spin_lock(&qd_lock); 1151 found = qd_check_sync(sdp, qd, NULL); 1152 spin_unlock(&qd_lock); 1153 1154 if (!found) 1155 continue; 1156 1157 gfs2_assert_warn(sdp, qd->qd_change_sync); 1158 if (bh_get(qd)) { 1159 clear_bit(QDF_LOCKED, &qd->qd_flags); 1160 slot_put(qd); 1161 qd_put(qd); 1162 continue; 1163 } 1164 1165 qda[count++] = qd; 1166 } 1167 1168 if (count) { 1169 do_sync(count, qda); 1170 for (x = 0; x < count; x++) 1171 qd_unlock(qda[x]); 1172 } 1173 1174 gfs2_quota_unhold(ip); 1175 } 1176 1177 #define MAX_LINE 256 1178 1179 static int print_message(struct gfs2_quota_data *qd, char *type) 1180 { 1181 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 1182 1183 fs_info(sdp, "quota %s for %s %u\n", 1184 type, 1185 (qd->qd_id.type == USRQUOTA) ? "user" : "group", 1186 from_kqid(&init_user_ns, qd->qd_id)); 1187 1188 return 0; 1189 } 1190 1191 /** 1192 * gfs2_quota_check - check if allocating new blocks will exceed quota 1193 * @ip: The inode for which this check is being performed 1194 * @uid: The uid to check against 1195 * @gid: The gid to check against 1196 * @ap: The allocation parameters. ap->target contains the requested 1197 * blocks. ap->min_target, if set, contains the minimum blks 1198 * requested. 1199 * 1200 * Returns: 0 on success. 1201 * min_req = ap->min_target ? ap->min_target : ap->target; 1202 * quota must allow at least min_req blks for success and 1203 * ap->allowed is set to the number of blocks allowed 1204 * 1205 * -EDQUOT otherwise, quota violation. ap->allowed is set to number 1206 * of blocks available. 1207 */ 1208 int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid, 1209 struct gfs2_alloc_parms *ap) 1210 { 1211 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1212 struct gfs2_quota_data *qd; 1213 s64 value, warn, limit; 1214 u32 x; 1215 int error = 0; 1216 1217 ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */ 1218 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags)) 1219 return 0; 1220 1221 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { 1222 qd = ip->i_qadata->qa_qd[x]; 1223 1224 if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) || 1225 qid_eq(qd->qd_id, make_kqid_gid(gid)))) 1226 continue; 1227 1228 warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn); 1229 limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit); 1230 value = (s64)be64_to_cpu(qd->qd_qb.qb_value); 1231 spin_lock(&qd_lock); 1232 value += qd->qd_change; 1233 spin_unlock(&qd_lock); 1234 1235 if (limit > 0 && (limit - value) < ap->allowed) 1236 ap->allowed = limit - value; 1237 /* If we can't meet the target */ 1238 if (limit && limit < (value + (s64)ap->target)) { 1239 /* If no min_target specified or we don't meet 1240 * min_target, return -EDQUOT */ 1241 if (!ap->min_target || ap->min_target > ap->allowed) { 1242 if (!test_and_set_bit(QDF_QMSG_QUIET, 1243 &qd->qd_flags)) { 1244 print_message(qd, "exceeded"); 1245 quota_send_warning(qd->qd_id, 1246 sdp->sd_vfs->s_dev, 1247 QUOTA_NL_BHARDWARN); 1248 } 1249 error = -EDQUOT; 1250 break; 1251 } 1252 } else if (warn && warn < value && 1253 time_after_eq(jiffies, qd->qd_last_warn + 1254 gfs2_tune_get(sdp, gt_quota_warn_period) 1255 * HZ)) { 1256 quota_send_warning(qd->qd_id, 1257 sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN); 1258 error = print_message(qd, "warning"); 1259 qd->qd_last_warn = jiffies; 1260 } 1261 } 1262 return error; 1263 } 1264 1265 void gfs2_quota_change(struct gfs2_inode *ip, s64 change, 1266 kuid_t uid, kgid_t gid) 1267 { 1268 struct gfs2_quota_data *qd; 1269 u32 x; 1270 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1271 1272 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON || 1273 gfs2_assert_warn(sdp, change)) 1274 return; 1275 if (ip->i_diskflags & GFS2_DIF_SYSTEM) 1276 return; 1277 1278 if (gfs2_assert_withdraw(sdp, ip->i_qadata && 1279 ip->i_qadata->qa_ref > 0)) 1280 return; 1281 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { 1282 qd = ip->i_qadata->qa_qd[x]; 1283 1284 if (qid_eq(qd->qd_id, make_kqid_uid(uid)) || 1285 qid_eq(qd->qd_id, make_kqid_gid(gid))) { 1286 do_qc(qd, change, QC_CHANGE); 1287 } 1288 } 1289 } 1290 1291 int gfs2_quota_sync(struct super_block *sb, int type) 1292 { 1293 struct gfs2_sbd *sdp = sb->s_fs_info; 1294 struct gfs2_quota_data **qda; 1295 unsigned int max_qd = PAGE_SIZE / sizeof(struct gfs2_holder); 1296 unsigned int num_qd; 1297 unsigned int x; 1298 int error = 0; 1299 1300 qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL); 1301 if (!qda) 1302 return -ENOMEM; 1303 1304 mutex_lock(&sdp->sd_quota_sync_mutex); 1305 sdp->sd_quota_sync_gen++; 1306 1307 do { 1308 num_qd = 0; 1309 1310 for (;;) { 1311 error = qd_fish(sdp, qda + num_qd); 1312 if (error || !qda[num_qd]) 1313 break; 1314 if (++num_qd == max_qd) 1315 break; 1316 } 1317 1318 if (num_qd) { 1319 if (!error) 1320 error = do_sync(num_qd, qda); 1321 if (!error) 1322 for (x = 0; x < num_qd; x++) 1323 qda[x]->qd_sync_gen = 1324 sdp->sd_quota_sync_gen; 1325 1326 for (x = 0; x < num_qd; x++) 1327 qd_unlock(qda[x]); 1328 } 1329 } while (!error && num_qd == max_qd); 1330 1331 mutex_unlock(&sdp->sd_quota_sync_mutex); 1332 kfree(qda); 1333 1334 return error; 1335 } 1336 1337 int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid) 1338 { 1339 struct gfs2_quota_data *qd; 1340 struct gfs2_holder q_gh; 1341 int error; 1342 1343 error = qd_get(sdp, qid, &qd); 1344 if (error) 1345 return error; 1346 1347 error = do_glock(qd, FORCE, &q_gh); 1348 if (!error) 1349 gfs2_glock_dq_uninit(&q_gh); 1350 1351 qd_put(qd); 1352 return error; 1353 } 1354 1355 int gfs2_quota_init(struct gfs2_sbd *sdp) 1356 { 1357 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); 1358 u64 size = i_size_read(sdp->sd_qc_inode); 1359 unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift; 1360 unsigned int x, slot = 0; 1361 unsigned int found = 0; 1362 unsigned int hash; 1363 unsigned int bm_size; 1364 u64 dblock; 1365 u32 extlen = 0; 1366 int error; 1367 1368 if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20)) 1369 return -EIO; 1370 1371 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block; 1372 bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long)); 1373 bm_size *= sizeof(unsigned long); 1374 error = -ENOMEM; 1375 sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN); 1376 if (sdp->sd_quota_bitmap == NULL) 1377 sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS | 1378 __GFP_ZERO); 1379 if (!sdp->sd_quota_bitmap) 1380 return error; 1381 1382 for (x = 0; x < blocks; x++) { 1383 struct buffer_head *bh; 1384 const struct gfs2_quota_change *qc; 1385 unsigned int y; 1386 1387 if (!extlen) { 1388 extlen = 32; 1389 error = gfs2_get_extent(&ip->i_inode, x, &dblock, &extlen); 1390 if (error) 1391 goto fail; 1392 } 1393 error = -EIO; 1394 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen); 1395 if (!bh) 1396 goto fail; 1397 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) { 1398 brelse(bh); 1399 goto fail; 1400 } 1401 1402 qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header)); 1403 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots; 1404 y++, slot++) { 1405 struct gfs2_quota_data *qd; 1406 s64 qc_change = be64_to_cpu(qc->qc_change); 1407 u32 qc_flags = be32_to_cpu(qc->qc_flags); 1408 enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ? 1409 USRQUOTA : GRPQUOTA; 1410 struct kqid qc_id = make_kqid(&init_user_ns, qtype, 1411 be32_to_cpu(qc->qc_id)); 1412 qc++; 1413 if (!qc_change) 1414 continue; 1415 1416 hash = gfs2_qd_hash(sdp, qc_id); 1417 qd = qd_alloc(hash, sdp, qc_id); 1418 if (qd == NULL) { 1419 brelse(bh); 1420 goto fail; 1421 } 1422 1423 set_bit(QDF_CHANGE, &qd->qd_flags); 1424 qd->qd_change = qc_change; 1425 qd->qd_slot = slot; 1426 qd->qd_slot_count = 1; 1427 1428 spin_lock(&qd_lock); 1429 BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap)); 1430 list_add(&qd->qd_list, &sdp->sd_quota_list); 1431 atomic_inc(&sdp->sd_quota_count); 1432 spin_unlock(&qd_lock); 1433 1434 spin_lock_bucket(hash); 1435 hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]); 1436 spin_unlock_bucket(hash); 1437 1438 found++; 1439 } 1440 1441 brelse(bh); 1442 dblock++; 1443 extlen--; 1444 } 1445 1446 if (found) 1447 fs_info(sdp, "found %u quota changes\n", found); 1448 1449 return 0; 1450 1451 fail: 1452 gfs2_quota_cleanup(sdp); 1453 return error; 1454 } 1455 1456 void gfs2_quota_cleanup(struct gfs2_sbd *sdp) 1457 { 1458 struct list_head *head = &sdp->sd_quota_list; 1459 struct gfs2_quota_data *qd; 1460 1461 spin_lock(&qd_lock); 1462 while (!list_empty(head)) { 1463 qd = list_last_entry(head, struct gfs2_quota_data, qd_list); 1464 1465 list_del(&qd->qd_list); 1466 1467 /* Also remove if this qd exists in the reclaim list */ 1468 list_lru_del(&gfs2_qd_lru, &qd->qd_lru); 1469 atomic_dec(&sdp->sd_quota_count); 1470 spin_unlock(&qd_lock); 1471 1472 spin_lock_bucket(qd->qd_hash); 1473 hlist_bl_del_rcu(&qd->qd_hlist); 1474 spin_unlock_bucket(qd->qd_hash); 1475 1476 gfs2_assert_warn(sdp, !qd->qd_change); 1477 gfs2_assert_warn(sdp, !qd->qd_slot_count); 1478 gfs2_assert_warn(sdp, !qd->qd_bh_count); 1479 1480 gfs2_glock_put(qd->qd_gl); 1481 call_rcu(&qd->qd_rcu, gfs2_qd_dealloc); 1482 1483 spin_lock(&qd_lock); 1484 } 1485 spin_unlock(&qd_lock); 1486 1487 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count)); 1488 1489 kvfree(sdp->sd_quota_bitmap); 1490 sdp->sd_quota_bitmap = NULL; 1491 } 1492 1493 static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error) 1494 { 1495 if (error == 0 || error == -EROFS) 1496 return; 1497 if (!gfs2_withdrawn(sdp)) { 1498 if (!cmpxchg(&sdp->sd_log_error, 0, error)) 1499 fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error); 1500 wake_up(&sdp->sd_logd_waitq); 1501 } 1502 } 1503 1504 static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg, 1505 int (*fxn)(struct super_block *sb, int type), 1506 unsigned long t, unsigned long *timeo, 1507 unsigned int *new_timeo) 1508 { 1509 if (t >= *timeo) { 1510 int error = fxn(sdp->sd_vfs, 0); 1511 quotad_error(sdp, msg, error); 1512 *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ; 1513 } else { 1514 *timeo -= t; 1515 } 1516 } 1517 1518 void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) { 1519 if (!sdp->sd_statfs_force_sync) { 1520 sdp->sd_statfs_force_sync = 1; 1521 wake_up(&sdp->sd_quota_wait); 1522 } 1523 } 1524 1525 1526 /** 1527 * gfs2_quotad - Write cached quota changes into the quota file 1528 * @data: Pointer to GFS2 superblock 1529 * 1530 */ 1531 1532 int gfs2_quotad(void *data) 1533 { 1534 struct gfs2_sbd *sdp = data; 1535 struct gfs2_tune *tune = &sdp->sd_tune; 1536 unsigned long statfs_timeo = 0; 1537 unsigned long quotad_timeo = 0; 1538 unsigned long t = 0; 1539 DEFINE_WAIT(wait); 1540 1541 while (!kthread_should_stop()) { 1542 1543 if (gfs2_withdrawn(sdp)) 1544 goto bypass; 1545 /* Update the master statfs file */ 1546 if (sdp->sd_statfs_force_sync) { 1547 int error = gfs2_statfs_sync(sdp->sd_vfs, 0); 1548 quotad_error(sdp, "statfs", error); 1549 statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ; 1550 } 1551 else 1552 quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t, 1553 &statfs_timeo, 1554 &tune->gt_statfs_quantum); 1555 1556 /* Update quota file */ 1557 quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t, 1558 "ad_timeo, &tune->gt_quota_quantum); 1559 1560 try_to_freeze(); 1561 1562 bypass: 1563 t = min(quotad_timeo, statfs_timeo); 1564 1565 prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE); 1566 if (!sdp->sd_statfs_force_sync) 1567 t -= schedule_timeout(t); 1568 else 1569 t = 0; 1570 finish_wait(&sdp->sd_quota_wait, &wait); 1571 } 1572 1573 return 0; 1574 } 1575 1576 static int gfs2_quota_get_state(struct super_block *sb, struct qc_state *state) 1577 { 1578 struct gfs2_sbd *sdp = sb->s_fs_info; 1579 1580 memset(state, 0, sizeof(*state)); 1581 1582 switch (sdp->sd_args.ar_quota) { 1583 case GFS2_QUOTA_ON: 1584 state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED; 1585 state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED; 1586 fallthrough; 1587 case GFS2_QUOTA_ACCOUNT: 1588 state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED | 1589 QCI_SYSFILE; 1590 state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED | 1591 QCI_SYSFILE; 1592 break; 1593 case GFS2_QUOTA_OFF: 1594 break; 1595 } 1596 if (sdp->sd_quota_inode) { 1597 state->s_state[USRQUOTA].ino = 1598 GFS2_I(sdp->sd_quota_inode)->i_no_addr; 1599 state->s_state[USRQUOTA].blocks = sdp->sd_quota_inode->i_blocks; 1600 } 1601 state->s_state[USRQUOTA].nextents = 1; /* unsupported */ 1602 state->s_state[GRPQUOTA] = state->s_state[USRQUOTA]; 1603 state->s_incoredqs = list_lru_count(&gfs2_qd_lru); 1604 return 0; 1605 } 1606 1607 static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid, 1608 struct qc_dqblk *fdq) 1609 { 1610 struct gfs2_sbd *sdp = sb->s_fs_info; 1611 struct gfs2_quota_lvb *qlvb; 1612 struct gfs2_quota_data *qd; 1613 struct gfs2_holder q_gh; 1614 int error; 1615 1616 memset(fdq, 0, sizeof(*fdq)); 1617 1618 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 1619 return -ESRCH; /* Crazy XFS error code */ 1620 1621 if ((qid.type != USRQUOTA) && 1622 (qid.type != GRPQUOTA)) 1623 return -EINVAL; 1624 1625 error = qd_get(sdp, qid, &qd); 1626 if (error) 1627 return error; 1628 error = do_glock(qd, FORCE, &q_gh); 1629 if (error) 1630 goto out; 1631 1632 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; 1633 fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift; 1634 fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift; 1635 fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift; 1636 1637 gfs2_glock_dq_uninit(&q_gh); 1638 out: 1639 qd_put(qd); 1640 return error; 1641 } 1642 1643 /* GFS2 only supports a subset of the XFS fields */ 1644 #define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE) 1645 1646 static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid, 1647 struct qc_dqblk *fdq) 1648 { 1649 struct gfs2_sbd *sdp = sb->s_fs_info; 1650 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 1651 struct gfs2_quota_data *qd; 1652 struct gfs2_holder q_gh, i_gh; 1653 unsigned int data_blocks, ind_blocks; 1654 unsigned int blocks = 0; 1655 int alloc_required; 1656 loff_t offset; 1657 int error; 1658 1659 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 1660 return -ESRCH; /* Crazy XFS error code */ 1661 1662 if ((qid.type != USRQUOTA) && 1663 (qid.type != GRPQUOTA)) 1664 return -EINVAL; 1665 1666 if (fdq->d_fieldmask & ~GFS2_FIELDMASK) 1667 return -EINVAL; 1668 1669 error = qd_get(sdp, qid, &qd); 1670 if (error) 1671 return error; 1672 1673 error = gfs2_qa_get(ip); 1674 if (error) 1675 goto out_put; 1676 1677 inode_lock(&ip->i_inode); 1678 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh); 1679 if (error) 1680 goto out_unlockput; 1681 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); 1682 if (error) 1683 goto out_q; 1684 1685 /* Check for existing entry, if none then alloc new blocks */ 1686 error = update_qd(sdp, qd); 1687 if (error) 1688 goto out_i; 1689 1690 /* If nothing has changed, this is a no-op */ 1691 if ((fdq->d_fieldmask & QC_SPC_SOFT) && 1692 ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn))) 1693 fdq->d_fieldmask ^= QC_SPC_SOFT; 1694 1695 if ((fdq->d_fieldmask & QC_SPC_HARD) && 1696 ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit))) 1697 fdq->d_fieldmask ^= QC_SPC_HARD; 1698 1699 if ((fdq->d_fieldmask & QC_SPACE) && 1700 ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value))) 1701 fdq->d_fieldmask ^= QC_SPACE; 1702 1703 if (fdq->d_fieldmask == 0) 1704 goto out_i; 1705 1706 offset = qd2offset(qd); 1707 alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota)); 1708 if (gfs2_is_stuffed(ip)) 1709 alloc_required = 1; 1710 if (alloc_required) { 1711 struct gfs2_alloc_parms ap = { .aflags = 0, }; 1712 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), 1713 &data_blocks, &ind_blocks); 1714 blocks = 1 + data_blocks + ind_blocks; 1715 ap.target = blocks; 1716 error = gfs2_inplace_reserve(ip, &ap); 1717 if (error) 1718 goto out_i; 1719 blocks += gfs2_rg_blocks(ip, blocks); 1720 } 1721 1722 /* Some quotas span block boundaries and can update two blocks, 1723 adding an extra block to the transaction to handle such quotas */ 1724 error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0); 1725 if (error) 1726 goto out_release; 1727 1728 /* Apply changes */ 1729 error = gfs2_adjust_quota(ip, offset, 0, qd, fdq); 1730 if (!error) 1731 clear_bit(QDF_QMSG_QUIET, &qd->qd_flags); 1732 1733 gfs2_trans_end(sdp); 1734 out_release: 1735 if (alloc_required) 1736 gfs2_inplace_release(ip); 1737 out_i: 1738 gfs2_glock_dq_uninit(&i_gh); 1739 out_q: 1740 gfs2_glock_dq_uninit(&q_gh); 1741 out_unlockput: 1742 gfs2_qa_put(ip); 1743 inode_unlock(&ip->i_inode); 1744 out_put: 1745 qd_put(qd); 1746 return error; 1747 } 1748 1749 const struct quotactl_ops gfs2_quotactl_ops = { 1750 .quota_sync = gfs2_quota_sync, 1751 .get_state = gfs2_quota_get_state, 1752 .get_dqblk = gfs2_get_dqblk, 1753 .set_dqblk = gfs2_set_dqblk, 1754 }; 1755 1756 void __init gfs2_quota_hash_init(void) 1757 { 1758 unsigned i; 1759 1760 for(i = 0; i < GFS2_QD_HASH_SIZE; i++) 1761 INIT_HLIST_BL_HEAD(&qd_hash_table[i]); 1762 } 1763