1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 /* 11 * Quota change tags are associated with each transaction that allocates or 12 * deallocates space. Those changes are accumulated locally to each node (in a 13 * per-node file) and then are periodically synced to the quota file. This 14 * avoids the bottleneck of constantly touching the quota file, but introduces 15 * fuzziness in the current usage value of IDs that are being used on different 16 * nodes in the cluster simultaneously. So, it is possible for a user on 17 * multiple nodes to overrun their quota, but that overrun is controlable. 18 * Since quota tags are part of transactions, there is no need for a quota check 19 * program to be run on node crashes or anything like that. 20 * 21 * There are couple of knobs that let the administrator manage the quota 22 * fuzziness. "quota_quantum" sets the maximum time a quota change can be 23 * sitting on one node before being synced to the quota file. (The default is 24 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency 25 * of quota file syncs increases as the user moves closer to their limit. The 26 * more frequent the syncs, the more accurate the quota enforcement, but that 27 * means that there is more contention between the nodes for the quota file. 28 * The default value is one. This sets the maximum theoretical quota overrun 29 * (with infinite node with infinite bandwidth) to twice the user's limit. (In 30 * practice, the maximum overrun you see should be much less.) A "quota_scale" 31 * number greater than one makes quota syncs more frequent and reduces the 32 * maximum overrun. Numbers less than one (but greater than zero) make quota 33 * syncs less frequent. 34 * 35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of 36 * the quota file, so it is not being constantly read. 37 */ 38 39 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 40 41 #include <linux/sched.h> 42 #include <linux/slab.h> 43 #include <linux/mm.h> 44 #include <linux/spinlock.h> 45 #include <linux/completion.h> 46 #include <linux/buffer_head.h> 47 #include <linux/sort.h> 48 #include <linux/fs.h> 49 #include <linux/bio.h> 50 #include <linux/gfs2_ondisk.h> 51 #include <linux/kthread.h> 52 #include <linux/freezer.h> 53 #include <linux/quota.h> 54 #include <linux/dqblk_xfs.h> 55 #include <linux/lockref.h> 56 #include <linux/list_lru.h> 57 #include <linux/rcupdate.h> 58 #include <linux/rculist_bl.h> 59 #include <linux/bit_spinlock.h> 60 #include <linux/jhash.h> 61 #include <linux/vmalloc.h> 62 63 #include "gfs2.h" 64 #include "incore.h" 65 #include "bmap.h" 66 #include "glock.h" 67 #include "glops.h" 68 #include "log.h" 69 #include "meta_io.h" 70 #include "quota.h" 71 #include "rgrp.h" 72 #include "super.h" 73 #include "trans.h" 74 #include "inode.h" 75 #include "util.h" 76 77 #define GFS2_QD_HASH_SHIFT 12 78 #define GFS2_QD_HASH_SIZE (1 << GFS2_QD_HASH_SHIFT) 79 #define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1) 80 81 /* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */ 82 /* -> sd_bitmap_lock */ 83 static DEFINE_SPINLOCK(qd_lock); 84 struct list_lru gfs2_qd_lru; 85 86 static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE]; 87 88 static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp, 89 const struct kqid qid) 90 { 91 unsigned int h; 92 93 h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0); 94 h = jhash(&qid, sizeof(struct kqid), h); 95 96 return h & GFS2_QD_HASH_MASK; 97 } 98 99 static inline void spin_lock_bucket(unsigned int hash) 100 { 101 hlist_bl_lock(&qd_hash_table[hash]); 102 } 103 104 static inline void spin_unlock_bucket(unsigned int hash) 105 { 106 hlist_bl_unlock(&qd_hash_table[hash]); 107 } 108 109 static void gfs2_qd_dealloc(struct rcu_head *rcu) 110 { 111 struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu); 112 kmem_cache_free(gfs2_quotad_cachep, qd); 113 } 114 115 static void gfs2_qd_dispose(struct list_head *list) 116 { 117 struct gfs2_quota_data *qd; 118 struct gfs2_sbd *sdp; 119 120 while (!list_empty(list)) { 121 qd = list_entry(list->next, struct gfs2_quota_data, qd_lru); 122 sdp = qd->qd_gl->gl_name.ln_sbd; 123 124 list_del(&qd->qd_lru); 125 126 /* Free from the filesystem-specific list */ 127 spin_lock(&qd_lock); 128 list_del(&qd->qd_list); 129 spin_unlock(&qd_lock); 130 131 spin_lock_bucket(qd->qd_hash); 132 hlist_bl_del_rcu(&qd->qd_hlist); 133 spin_unlock_bucket(qd->qd_hash); 134 135 gfs2_assert_warn(sdp, !qd->qd_change); 136 gfs2_assert_warn(sdp, !qd->qd_slot_count); 137 gfs2_assert_warn(sdp, !qd->qd_bh_count); 138 139 gfs2_glock_put(qd->qd_gl); 140 atomic_dec(&sdp->sd_quota_count); 141 142 /* Delete it from the common reclaim list */ 143 call_rcu(&qd->qd_rcu, gfs2_qd_dealloc); 144 } 145 } 146 147 148 static enum lru_status gfs2_qd_isolate(struct list_head *item, 149 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) 150 { 151 struct list_head *dispose = arg; 152 struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru); 153 154 if (!spin_trylock(&qd->qd_lockref.lock)) 155 return LRU_SKIP; 156 157 if (qd->qd_lockref.count == 0) { 158 lockref_mark_dead(&qd->qd_lockref); 159 list_lru_isolate_move(lru, &qd->qd_lru, dispose); 160 } 161 162 spin_unlock(&qd->qd_lockref.lock); 163 return LRU_REMOVED; 164 } 165 166 static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink, 167 struct shrink_control *sc) 168 { 169 LIST_HEAD(dispose); 170 unsigned long freed; 171 172 if (!(sc->gfp_mask & __GFP_FS)) 173 return SHRINK_STOP; 174 175 freed = list_lru_shrink_walk(&gfs2_qd_lru, sc, 176 gfs2_qd_isolate, &dispose); 177 178 gfs2_qd_dispose(&dispose); 179 180 return freed; 181 } 182 183 static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink, 184 struct shrink_control *sc) 185 { 186 return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru, sc)); 187 } 188 189 struct shrinker gfs2_qd_shrinker = { 190 .count_objects = gfs2_qd_shrink_count, 191 .scan_objects = gfs2_qd_shrink_scan, 192 .seeks = DEFAULT_SEEKS, 193 .flags = SHRINKER_NUMA_AWARE, 194 }; 195 196 197 static u64 qd2index(struct gfs2_quota_data *qd) 198 { 199 struct kqid qid = qd->qd_id; 200 return (2 * (u64)from_kqid(&init_user_ns, qid)) + 201 ((qid.type == USRQUOTA) ? 0 : 1); 202 } 203 204 static u64 qd2offset(struct gfs2_quota_data *qd) 205 { 206 u64 offset; 207 208 offset = qd2index(qd); 209 offset *= sizeof(struct gfs2_quota); 210 211 return offset; 212 } 213 214 static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid) 215 { 216 struct gfs2_quota_data *qd; 217 int error; 218 219 qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS); 220 if (!qd) 221 return NULL; 222 223 qd->qd_sbd = sdp; 224 qd->qd_lockref.count = 1; 225 spin_lock_init(&qd->qd_lockref.lock); 226 qd->qd_id = qid; 227 qd->qd_slot = -1; 228 INIT_LIST_HEAD(&qd->qd_lru); 229 qd->qd_hash = hash; 230 231 error = gfs2_glock_get(sdp, qd2index(qd), 232 &gfs2_quota_glops, CREATE, &qd->qd_gl); 233 if (error) 234 goto fail; 235 236 return qd; 237 238 fail: 239 kmem_cache_free(gfs2_quotad_cachep, qd); 240 return NULL; 241 } 242 243 static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash, 244 const struct gfs2_sbd *sdp, 245 struct kqid qid) 246 { 247 struct gfs2_quota_data *qd; 248 struct hlist_bl_node *h; 249 250 hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) { 251 if (!qid_eq(qd->qd_id, qid)) 252 continue; 253 if (qd->qd_sbd != sdp) 254 continue; 255 if (lockref_get_not_dead(&qd->qd_lockref)) { 256 list_lru_del(&gfs2_qd_lru, &qd->qd_lru); 257 return qd; 258 } 259 } 260 261 return NULL; 262 } 263 264 265 static int qd_get(struct gfs2_sbd *sdp, struct kqid qid, 266 struct gfs2_quota_data **qdp) 267 { 268 struct gfs2_quota_data *qd, *new_qd; 269 unsigned int hash = gfs2_qd_hash(sdp, qid); 270 271 rcu_read_lock(); 272 *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid); 273 rcu_read_unlock(); 274 275 if (qd) 276 return 0; 277 278 new_qd = qd_alloc(hash, sdp, qid); 279 if (!new_qd) 280 return -ENOMEM; 281 282 spin_lock(&qd_lock); 283 spin_lock_bucket(hash); 284 *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid); 285 if (qd == NULL) { 286 *qdp = new_qd; 287 list_add(&new_qd->qd_list, &sdp->sd_quota_list); 288 hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]); 289 atomic_inc(&sdp->sd_quota_count); 290 } 291 spin_unlock_bucket(hash); 292 spin_unlock(&qd_lock); 293 294 if (qd) { 295 gfs2_glock_put(new_qd->qd_gl); 296 kmem_cache_free(gfs2_quotad_cachep, new_qd); 297 } 298 299 return 0; 300 } 301 302 303 static void qd_hold(struct gfs2_quota_data *qd) 304 { 305 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 306 gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref)); 307 lockref_get(&qd->qd_lockref); 308 } 309 310 static void qd_put(struct gfs2_quota_data *qd) 311 { 312 if (lockref_put_or_lock(&qd->qd_lockref)) 313 return; 314 315 qd->qd_lockref.count = 0; 316 list_lru_add(&gfs2_qd_lru, &qd->qd_lru); 317 spin_unlock(&qd->qd_lockref.lock); 318 319 } 320 321 static int slot_get(struct gfs2_quota_data *qd) 322 { 323 struct gfs2_sbd *sdp = qd->qd_sbd; 324 unsigned int bit; 325 int error = 0; 326 327 spin_lock(&sdp->sd_bitmap_lock); 328 if (qd->qd_slot_count != 0) 329 goto out; 330 331 error = -ENOSPC; 332 bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots); 333 if (bit < sdp->sd_quota_slots) { 334 set_bit(bit, sdp->sd_quota_bitmap); 335 qd->qd_slot = bit; 336 error = 0; 337 out: 338 qd->qd_slot_count++; 339 } 340 spin_unlock(&sdp->sd_bitmap_lock); 341 342 return error; 343 } 344 345 static void slot_hold(struct gfs2_quota_data *qd) 346 { 347 struct gfs2_sbd *sdp = qd->qd_sbd; 348 349 spin_lock(&sdp->sd_bitmap_lock); 350 gfs2_assert(sdp, qd->qd_slot_count); 351 qd->qd_slot_count++; 352 spin_unlock(&sdp->sd_bitmap_lock); 353 } 354 355 static void slot_put(struct gfs2_quota_data *qd) 356 { 357 struct gfs2_sbd *sdp = qd->qd_sbd; 358 359 spin_lock(&sdp->sd_bitmap_lock); 360 gfs2_assert(sdp, qd->qd_slot_count); 361 if (!--qd->qd_slot_count) { 362 BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap)); 363 qd->qd_slot = -1; 364 } 365 spin_unlock(&sdp->sd_bitmap_lock); 366 } 367 368 static int bh_get(struct gfs2_quota_data *qd) 369 { 370 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 371 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); 372 unsigned int block, offset; 373 struct buffer_head *bh; 374 int error; 375 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 }; 376 377 mutex_lock(&sdp->sd_quota_mutex); 378 379 if (qd->qd_bh_count++) { 380 mutex_unlock(&sdp->sd_quota_mutex); 381 return 0; 382 } 383 384 block = qd->qd_slot / sdp->sd_qc_per_block; 385 offset = qd->qd_slot % sdp->sd_qc_per_block; 386 387 bh_map.b_size = 1 << ip->i_inode.i_blkbits; 388 error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0); 389 if (error) 390 goto fail; 391 error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh); 392 if (error) 393 goto fail; 394 error = -EIO; 395 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) 396 goto fail_brelse; 397 398 qd->qd_bh = bh; 399 qd->qd_bh_qc = (struct gfs2_quota_change *) 400 (bh->b_data + sizeof(struct gfs2_meta_header) + 401 offset * sizeof(struct gfs2_quota_change)); 402 403 mutex_unlock(&sdp->sd_quota_mutex); 404 405 return 0; 406 407 fail_brelse: 408 brelse(bh); 409 fail: 410 qd->qd_bh_count--; 411 mutex_unlock(&sdp->sd_quota_mutex); 412 return error; 413 } 414 415 static void bh_put(struct gfs2_quota_data *qd) 416 { 417 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 418 419 mutex_lock(&sdp->sd_quota_mutex); 420 gfs2_assert(sdp, qd->qd_bh_count); 421 if (!--qd->qd_bh_count) { 422 brelse(qd->qd_bh); 423 qd->qd_bh = NULL; 424 qd->qd_bh_qc = NULL; 425 } 426 mutex_unlock(&sdp->sd_quota_mutex); 427 } 428 429 static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd, 430 u64 *sync_gen) 431 { 432 if (test_bit(QDF_LOCKED, &qd->qd_flags) || 433 !test_bit(QDF_CHANGE, &qd->qd_flags) || 434 (sync_gen && (qd->qd_sync_gen >= *sync_gen))) 435 return 0; 436 437 if (!lockref_get_not_dead(&qd->qd_lockref)) 438 return 0; 439 440 list_move_tail(&qd->qd_list, &sdp->sd_quota_list); 441 set_bit(QDF_LOCKED, &qd->qd_flags); 442 qd->qd_change_sync = qd->qd_change; 443 slot_hold(qd); 444 return 1; 445 } 446 447 static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp) 448 { 449 struct gfs2_quota_data *qd = NULL; 450 int error; 451 int found = 0; 452 453 *qdp = NULL; 454 455 if (sdp->sd_vfs->s_flags & MS_RDONLY) 456 return 0; 457 458 spin_lock(&qd_lock); 459 460 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { 461 found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen); 462 if (found) 463 break; 464 } 465 466 if (!found) 467 qd = NULL; 468 469 spin_unlock(&qd_lock); 470 471 if (qd) { 472 gfs2_assert_warn(sdp, qd->qd_change_sync); 473 error = bh_get(qd); 474 if (error) { 475 clear_bit(QDF_LOCKED, &qd->qd_flags); 476 slot_put(qd); 477 qd_put(qd); 478 return error; 479 } 480 } 481 482 *qdp = qd; 483 484 return 0; 485 } 486 487 static void qd_unlock(struct gfs2_quota_data *qd) 488 { 489 gfs2_assert_warn(qd->qd_gl->gl_name.ln_sbd, 490 test_bit(QDF_LOCKED, &qd->qd_flags)); 491 clear_bit(QDF_LOCKED, &qd->qd_flags); 492 bh_put(qd); 493 slot_put(qd); 494 qd_put(qd); 495 } 496 497 static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid, 498 struct gfs2_quota_data **qdp) 499 { 500 int error; 501 502 error = qd_get(sdp, qid, qdp); 503 if (error) 504 return error; 505 506 error = slot_get(*qdp); 507 if (error) 508 goto fail; 509 510 error = bh_get(*qdp); 511 if (error) 512 goto fail_slot; 513 514 return 0; 515 516 fail_slot: 517 slot_put(*qdp); 518 fail: 519 qd_put(*qdp); 520 return error; 521 } 522 523 static void qdsb_put(struct gfs2_quota_data *qd) 524 { 525 bh_put(qd); 526 slot_put(qd); 527 qd_put(qd); 528 } 529 530 int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) 531 { 532 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 533 struct gfs2_quota_data **qd; 534 int error; 535 536 if (ip->i_res == NULL) { 537 error = gfs2_rs_alloc(ip); 538 if (error) 539 return error; 540 } 541 542 qd = ip->i_res->rs_qa_qd; 543 544 if (gfs2_assert_warn(sdp, !ip->i_res->rs_qa_qd_num) || 545 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) 546 return -EIO; 547 548 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 549 return 0; 550 551 error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd); 552 if (error) 553 goto out; 554 ip->i_res->rs_qa_qd_num++; 555 qd++; 556 557 error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd); 558 if (error) 559 goto out; 560 ip->i_res->rs_qa_qd_num++; 561 qd++; 562 563 if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) && 564 !uid_eq(uid, ip->i_inode.i_uid)) { 565 error = qdsb_get(sdp, make_kqid_uid(uid), qd); 566 if (error) 567 goto out; 568 ip->i_res->rs_qa_qd_num++; 569 qd++; 570 } 571 572 if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) && 573 !gid_eq(gid, ip->i_inode.i_gid)) { 574 error = qdsb_get(sdp, make_kqid_gid(gid), qd); 575 if (error) 576 goto out; 577 ip->i_res->rs_qa_qd_num++; 578 qd++; 579 } 580 581 out: 582 if (error) 583 gfs2_quota_unhold(ip); 584 return error; 585 } 586 587 void gfs2_quota_unhold(struct gfs2_inode *ip) 588 { 589 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 590 unsigned int x; 591 592 if (ip->i_res == NULL) 593 return; 594 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)); 595 596 for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) { 597 qdsb_put(ip->i_res->rs_qa_qd[x]); 598 ip->i_res->rs_qa_qd[x] = NULL; 599 } 600 ip->i_res->rs_qa_qd_num = 0; 601 } 602 603 static int sort_qd(const void *a, const void *b) 604 { 605 const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a; 606 const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b; 607 608 if (qid_lt(qd_a->qd_id, qd_b->qd_id)) 609 return -1; 610 if (qid_lt(qd_b->qd_id, qd_a->qd_id)) 611 return 1; 612 return 0; 613 } 614 615 static void do_qc(struct gfs2_quota_data *qd, s64 change) 616 { 617 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 618 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); 619 struct gfs2_quota_change *qc = qd->qd_bh_qc; 620 s64 x; 621 622 mutex_lock(&sdp->sd_quota_mutex); 623 gfs2_trans_add_meta(ip->i_gl, qd->qd_bh); 624 625 if (!test_bit(QDF_CHANGE, &qd->qd_flags)) { 626 qc->qc_change = 0; 627 qc->qc_flags = 0; 628 if (qd->qd_id.type == USRQUOTA) 629 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER); 630 qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id)); 631 } 632 633 x = be64_to_cpu(qc->qc_change) + change; 634 qc->qc_change = cpu_to_be64(x); 635 636 spin_lock(&qd_lock); 637 qd->qd_change = x; 638 spin_unlock(&qd_lock); 639 640 if (!x) { 641 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags)); 642 clear_bit(QDF_CHANGE, &qd->qd_flags); 643 qc->qc_flags = 0; 644 qc->qc_id = 0; 645 slot_put(qd); 646 qd_put(qd); 647 } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) { 648 qd_hold(qd); 649 slot_hold(qd); 650 } 651 652 if (change < 0) /* Reset quiet flag if we freed some blocks */ 653 clear_bit(QDF_QMSG_QUIET, &qd->qd_flags); 654 mutex_unlock(&sdp->sd_quota_mutex); 655 } 656 657 static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index, 658 unsigned off, void *buf, unsigned bytes) 659 { 660 struct inode *inode = &ip->i_inode; 661 struct gfs2_sbd *sdp = GFS2_SB(inode); 662 struct address_space *mapping = inode->i_mapping; 663 struct page *page; 664 struct buffer_head *bh; 665 void *kaddr; 666 u64 blk; 667 unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0; 668 unsigned to_write = bytes, pg_off = off; 669 int done = 0; 670 671 blk = index << (PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift); 672 boff = off % bsize; 673 674 page = find_or_create_page(mapping, index, GFP_NOFS); 675 if (!page) 676 return -ENOMEM; 677 if (!page_has_buffers(page)) 678 create_empty_buffers(page, bsize, 0); 679 680 bh = page_buffers(page); 681 while (!done) { 682 /* Find the beginning block within the page */ 683 if (pg_off >= ((bnum * bsize) + bsize)) { 684 bh = bh->b_this_page; 685 bnum++; 686 blk++; 687 continue; 688 } 689 if (!buffer_mapped(bh)) { 690 gfs2_block_map(inode, blk, bh, 1); 691 if (!buffer_mapped(bh)) 692 goto unlock_out; 693 /* If it's a newly allocated disk block, zero it */ 694 if (buffer_new(bh)) 695 zero_user(page, bnum * bsize, bh->b_size); 696 } 697 if (PageUptodate(page)) 698 set_buffer_uptodate(bh); 699 if (!buffer_uptodate(bh)) { 700 ll_rw_block(READ | REQ_META, 1, &bh); 701 wait_on_buffer(bh); 702 if (!buffer_uptodate(bh)) 703 goto unlock_out; 704 } 705 gfs2_trans_add_data(ip->i_gl, bh); 706 707 /* If we need to write to the next block as well */ 708 if (to_write > (bsize - boff)) { 709 pg_off += (bsize - boff); 710 to_write -= (bsize - boff); 711 boff = pg_off % bsize; 712 continue; 713 } 714 done = 1; 715 } 716 717 /* Write to the page, now that we have setup the buffer(s) */ 718 kaddr = kmap_atomic(page); 719 memcpy(kaddr + off, buf, bytes); 720 flush_dcache_page(page); 721 kunmap_atomic(kaddr); 722 unlock_page(page); 723 page_cache_release(page); 724 725 return 0; 726 727 unlock_out: 728 unlock_page(page); 729 page_cache_release(page); 730 return -EIO; 731 } 732 733 static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp, 734 loff_t loc) 735 { 736 unsigned long pg_beg; 737 unsigned pg_off, nbytes, overflow = 0; 738 int pg_oflow = 0, error; 739 void *ptr; 740 741 nbytes = sizeof(struct gfs2_quota); 742 743 pg_beg = loc >> PAGE_CACHE_SHIFT; 744 pg_off = loc % PAGE_CACHE_SIZE; 745 746 /* If the quota straddles a page boundary, split the write in two */ 747 if ((pg_off + nbytes) > PAGE_CACHE_SIZE) { 748 pg_oflow = 1; 749 overflow = (pg_off + nbytes) - PAGE_CACHE_SIZE; 750 } 751 752 ptr = qp; 753 error = gfs2_write_buf_to_page(ip, pg_beg, pg_off, ptr, 754 nbytes - overflow); 755 /* If there's an overflow, write the remaining bytes to the next page */ 756 if (!error && pg_oflow) 757 error = gfs2_write_buf_to_page(ip, pg_beg + 1, 0, 758 ptr + nbytes - overflow, 759 overflow); 760 return error; 761 } 762 763 /** 764 * gfs2_adjust_quota - adjust record of current block usage 765 * @ip: The quota inode 766 * @loc: Offset of the entry in the quota file 767 * @change: The amount of usage change to record 768 * @qd: The quota data 769 * @fdq: The updated limits to record 770 * 771 * This function was mostly borrowed from gfs2_block_truncate_page which was 772 * in turn mostly borrowed from ext3 773 * 774 * Returns: 0 or -ve on error 775 */ 776 777 static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, 778 s64 change, struct gfs2_quota_data *qd, 779 struct qc_dqblk *fdq) 780 { 781 struct inode *inode = &ip->i_inode; 782 struct gfs2_sbd *sdp = GFS2_SB(inode); 783 struct gfs2_quota q; 784 int err; 785 u64 size; 786 787 if (gfs2_is_stuffed(ip)) { 788 err = gfs2_unstuff_dinode(ip, NULL); 789 if (err) 790 return err; 791 } 792 793 memset(&q, 0, sizeof(struct gfs2_quota)); 794 err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q)); 795 if (err < 0) 796 return err; 797 798 loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */ 799 err = -EIO; 800 be64_add_cpu(&q.qu_value, change); 801 if (((s64)be64_to_cpu(q.qu_value)) < 0) 802 q.qu_value = 0; /* Never go negative on quota usage */ 803 qd->qd_qb.qb_value = q.qu_value; 804 if (fdq) { 805 if (fdq->d_fieldmask & QC_SPC_SOFT) { 806 q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift); 807 qd->qd_qb.qb_warn = q.qu_warn; 808 } 809 if (fdq->d_fieldmask & QC_SPC_HARD) { 810 q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift); 811 qd->qd_qb.qb_limit = q.qu_limit; 812 } 813 if (fdq->d_fieldmask & QC_SPACE) { 814 q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift); 815 qd->qd_qb.qb_value = q.qu_value; 816 } 817 } 818 819 err = gfs2_write_disk_quota(ip, &q, loc); 820 if (!err) { 821 size = loc + sizeof(struct gfs2_quota); 822 if (size > inode->i_size) 823 i_size_write(inode, size); 824 inode->i_mtime = inode->i_atime = CURRENT_TIME; 825 mark_inode_dirty(inode); 826 set_bit(QDF_REFRESH, &qd->qd_flags); 827 } 828 829 return err; 830 } 831 832 static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) 833 { 834 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_name.ln_sbd; 835 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 836 struct gfs2_alloc_parms ap = { .aflags = 0, }; 837 unsigned int data_blocks, ind_blocks; 838 struct gfs2_holder *ghs, i_gh; 839 unsigned int qx, x; 840 struct gfs2_quota_data *qd; 841 unsigned reserved; 842 loff_t offset; 843 unsigned int nalloc = 0, blocks; 844 int error; 845 846 error = gfs2_rs_alloc(ip); 847 if (error) 848 return error; 849 850 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), 851 &data_blocks, &ind_blocks); 852 853 ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS); 854 if (!ghs) 855 return -ENOMEM; 856 857 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL); 858 mutex_lock(&ip->i_inode.i_mutex); 859 for (qx = 0; qx < num_qd; qx++) { 860 error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE, 861 GL_NOCACHE, &ghs[qx]); 862 if (error) 863 goto out; 864 } 865 866 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); 867 if (error) 868 goto out; 869 870 for (x = 0; x < num_qd; x++) { 871 offset = qd2offset(qda[x]); 872 if (gfs2_write_alloc_required(ip, offset, 873 sizeof(struct gfs2_quota))) 874 nalloc++; 875 } 876 877 /* 878 * 1 blk for unstuffing inode if stuffed. We add this extra 879 * block to the reservation unconditionally. If the inode 880 * doesn't need unstuffing, the block will be released to the 881 * rgrp since it won't be allocated during the transaction 882 */ 883 /* +3 in the end for unstuffing block, inode size update block 884 * and another block in case quota straddles page boundary and 885 * two blocks need to be updated instead of 1 */ 886 blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3; 887 888 reserved = 1 + (nalloc * (data_blocks + ind_blocks)); 889 ap.target = reserved; 890 error = gfs2_inplace_reserve(ip, &ap); 891 if (error) 892 goto out_alloc; 893 894 if (nalloc) 895 blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS; 896 897 error = gfs2_trans_begin(sdp, blocks, 0); 898 if (error) 899 goto out_ipres; 900 901 for (x = 0; x < num_qd; x++) { 902 qd = qda[x]; 903 offset = qd2offset(qd); 904 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL); 905 if (error) 906 goto out_end_trans; 907 908 do_qc(qd, -qd->qd_change_sync); 909 set_bit(QDF_REFRESH, &qd->qd_flags); 910 } 911 912 error = 0; 913 914 out_end_trans: 915 gfs2_trans_end(sdp); 916 out_ipres: 917 gfs2_inplace_release(ip); 918 out_alloc: 919 gfs2_glock_dq_uninit(&i_gh); 920 out: 921 while (qx--) 922 gfs2_glock_dq_uninit(&ghs[qx]); 923 mutex_unlock(&ip->i_inode.i_mutex); 924 kfree(ghs); 925 gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl, NORMAL_FLUSH); 926 return error; 927 } 928 929 static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd) 930 { 931 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 932 struct gfs2_quota q; 933 struct gfs2_quota_lvb *qlvb; 934 loff_t pos; 935 int error; 936 937 memset(&q, 0, sizeof(struct gfs2_quota)); 938 pos = qd2offset(qd); 939 error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q)); 940 if (error < 0) 941 return error; 942 943 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; 944 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC); 945 qlvb->__pad = 0; 946 qlvb->qb_limit = q.qu_limit; 947 qlvb->qb_warn = q.qu_warn; 948 qlvb->qb_value = q.qu_value; 949 qd->qd_qb = *qlvb; 950 951 return 0; 952 } 953 954 static int do_glock(struct gfs2_quota_data *qd, int force_refresh, 955 struct gfs2_holder *q_gh) 956 { 957 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 958 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 959 struct gfs2_holder i_gh; 960 int error; 961 962 restart: 963 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh); 964 if (error) 965 return error; 966 967 if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags)) 968 force_refresh = FORCE; 969 970 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; 971 972 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) { 973 gfs2_glock_dq_uninit(q_gh); 974 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 975 GL_NOCACHE, q_gh); 976 if (error) 977 return error; 978 979 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh); 980 if (error) 981 goto fail; 982 983 error = update_qd(sdp, qd); 984 if (error) 985 goto fail_gunlock; 986 987 gfs2_glock_dq_uninit(&i_gh); 988 gfs2_glock_dq_uninit(q_gh); 989 force_refresh = 0; 990 goto restart; 991 } 992 993 return 0; 994 995 fail_gunlock: 996 gfs2_glock_dq_uninit(&i_gh); 997 fail: 998 gfs2_glock_dq_uninit(q_gh); 999 return error; 1000 } 1001 1002 int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) 1003 { 1004 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1005 struct gfs2_quota_data *qd; 1006 unsigned int x; 1007 int error = 0; 1008 1009 error = gfs2_quota_hold(ip, uid, gid); 1010 if (error) 1011 return error; 1012 1013 if (capable(CAP_SYS_RESOURCE) || 1014 sdp->sd_args.ar_quota != GFS2_QUOTA_ON) 1015 return 0; 1016 1017 sort(ip->i_res->rs_qa_qd, ip->i_res->rs_qa_qd_num, 1018 sizeof(struct gfs2_quota_data *), sort_qd, NULL); 1019 1020 for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) { 1021 qd = ip->i_res->rs_qa_qd[x]; 1022 error = do_glock(qd, NO_FORCE, &ip->i_res->rs_qa_qd_ghs[x]); 1023 if (error) 1024 break; 1025 } 1026 1027 if (!error) 1028 set_bit(GIF_QD_LOCKED, &ip->i_flags); 1029 else { 1030 while (x--) 1031 gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]); 1032 gfs2_quota_unhold(ip); 1033 } 1034 1035 return error; 1036 } 1037 1038 static int need_sync(struct gfs2_quota_data *qd) 1039 { 1040 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 1041 struct gfs2_tune *gt = &sdp->sd_tune; 1042 s64 value; 1043 unsigned int num, den; 1044 int do_sync = 1; 1045 1046 if (!qd->qd_qb.qb_limit) 1047 return 0; 1048 1049 spin_lock(&qd_lock); 1050 value = qd->qd_change; 1051 spin_unlock(&qd_lock); 1052 1053 spin_lock(>->gt_spin); 1054 num = gt->gt_quota_scale_num; 1055 den = gt->gt_quota_scale_den; 1056 spin_unlock(>->gt_spin); 1057 1058 if (value < 0) 1059 do_sync = 0; 1060 else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >= 1061 (s64)be64_to_cpu(qd->qd_qb.qb_limit)) 1062 do_sync = 0; 1063 else { 1064 value *= gfs2_jindex_size(sdp) * num; 1065 value = div_s64(value, den); 1066 value += (s64)be64_to_cpu(qd->qd_qb.qb_value); 1067 if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit)) 1068 do_sync = 0; 1069 } 1070 1071 return do_sync; 1072 } 1073 1074 void gfs2_quota_unlock(struct gfs2_inode *ip) 1075 { 1076 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1077 struct gfs2_quota_data *qda[4]; 1078 unsigned int count = 0; 1079 unsigned int x; 1080 int found; 1081 1082 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags)) 1083 goto out; 1084 1085 for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) { 1086 struct gfs2_quota_data *qd; 1087 int sync; 1088 1089 qd = ip->i_res->rs_qa_qd[x]; 1090 sync = need_sync(qd); 1091 1092 gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]); 1093 if (!sync) 1094 continue; 1095 1096 spin_lock(&qd_lock); 1097 found = qd_check_sync(sdp, qd, NULL); 1098 spin_unlock(&qd_lock); 1099 1100 if (!found) 1101 continue; 1102 1103 gfs2_assert_warn(sdp, qd->qd_change_sync); 1104 if (bh_get(qd)) { 1105 clear_bit(QDF_LOCKED, &qd->qd_flags); 1106 slot_put(qd); 1107 qd_put(qd); 1108 continue; 1109 } 1110 1111 qda[count++] = qd; 1112 } 1113 1114 if (count) { 1115 do_sync(count, qda); 1116 for (x = 0; x < count; x++) 1117 qd_unlock(qda[x]); 1118 } 1119 1120 out: 1121 gfs2_quota_unhold(ip); 1122 } 1123 1124 #define MAX_LINE 256 1125 1126 static int print_message(struct gfs2_quota_data *qd, char *type) 1127 { 1128 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 1129 1130 fs_info(sdp, "quota %s for %s %u\n", 1131 type, 1132 (qd->qd_id.type == USRQUOTA) ? "user" : "group", 1133 from_kqid(&init_user_ns, qd->qd_id)); 1134 1135 return 0; 1136 } 1137 1138 /** 1139 * gfs2_quota_check - check if allocating new blocks will exceed quota 1140 * @ip: The inode for which this check is being performed 1141 * @uid: The uid to check against 1142 * @gid: The gid to check against 1143 * @ap: The allocation parameters. ap->target contains the requested 1144 * blocks. ap->min_target, if set, contains the minimum blks 1145 * requested. 1146 * 1147 * Returns: 0 on success. 1148 * min_req = ap->min_target ? ap->min_target : ap->target; 1149 * quota must allow atleast min_req blks for success and 1150 * ap->allowed is set to the number of blocks allowed 1151 * 1152 * -EDQUOT otherwise, quota violation. ap->allowed is set to number 1153 * of blocks available. 1154 */ 1155 int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid, 1156 struct gfs2_alloc_parms *ap) 1157 { 1158 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1159 struct gfs2_quota_data *qd; 1160 s64 value, warn, limit; 1161 unsigned int x; 1162 int error = 0; 1163 1164 ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */ 1165 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags)) 1166 return 0; 1167 1168 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON) 1169 return 0; 1170 1171 for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) { 1172 qd = ip->i_res->rs_qa_qd[x]; 1173 1174 if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) || 1175 qid_eq(qd->qd_id, make_kqid_gid(gid)))) 1176 continue; 1177 1178 warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn); 1179 limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit); 1180 value = (s64)be64_to_cpu(qd->qd_qb.qb_value); 1181 spin_lock(&qd_lock); 1182 value += qd->qd_change; 1183 spin_unlock(&qd_lock); 1184 1185 if (limit > 0 && (limit - value) < ap->allowed) 1186 ap->allowed = limit - value; 1187 /* If we can't meet the target */ 1188 if (limit && limit < (value + (s64)ap->target)) { 1189 /* If no min_target specified or we don't meet 1190 * min_target, return -EDQUOT */ 1191 if (!ap->min_target || ap->min_target > ap->allowed) { 1192 if (!test_and_set_bit(QDF_QMSG_QUIET, 1193 &qd->qd_flags)) { 1194 print_message(qd, "exceeded"); 1195 quota_send_warning(qd->qd_id, 1196 sdp->sd_vfs->s_dev, 1197 QUOTA_NL_BHARDWARN); 1198 } 1199 error = -EDQUOT; 1200 break; 1201 } 1202 } else if (warn && warn < value && 1203 time_after_eq(jiffies, qd->qd_last_warn + 1204 gfs2_tune_get(sdp, gt_quota_warn_period) 1205 * HZ)) { 1206 quota_send_warning(qd->qd_id, 1207 sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN); 1208 error = print_message(qd, "warning"); 1209 qd->qd_last_warn = jiffies; 1210 } 1211 } 1212 return error; 1213 } 1214 1215 void gfs2_quota_change(struct gfs2_inode *ip, s64 change, 1216 kuid_t uid, kgid_t gid) 1217 { 1218 struct gfs2_quota_data *qd; 1219 unsigned int x; 1220 1221 if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change)) 1222 return; 1223 if (ip->i_diskflags & GFS2_DIF_SYSTEM) 1224 return; 1225 1226 for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) { 1227 qd = ip->i_res->rs_qa_qd[x]; 1228 1229 if (qid_eq(qd->qd_id, make_kqid_uid(uid)) || 1230 qid_eq(qd->qd_id, make_kqid_gid(gid))) { 1231 do_qc(qd, change); 1232 } 1233 } 1234 } 1235 1236 int gfs2_quota_sync(struct super_block *sb, int type) 1237 { 1238 struct gfs2_sbd *sdp = sb->s_fs_info; 1239 struct gfs2_quota_data **qda; 1240 unsigned int max_qd = PAGE_SIZE/sizeof(struct gfs2_holder); 1241 unsigned int num_qd; 1242 unsigned int x; 1243 int error = 0; 1244 1245 qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL); 1246 if (!qda) 1247 return -ENOMEM; 1248 1249 mutex_lock(&sdp->sd_quota_sync_mutex); 1250 sdp->sd_quota_sync_gen++; 1251 1252 do { 1253 num_qd = 0; 1254 1255 for (;;) { 1256 error = qd_fish(sdp, qda + num_qd); 1257 if (error || !qda[num_qd]) 1258 break; 1259 if (++num_qd == max_qd) 1260 break; 1261 } 1262 1263 if (num_qd) { 1264 if (!error) 1265 error = do_sync(num_qd, qda); 1266 if (!error) 1267 for (x = 0; x < num_qd; x++) 1268 qda[x]->qd_sync_gen = 1269 sdp->sd_quota_sync_gen; 1270 1271 for (x = 0; x < num_qd; x++) 1272 qd_unlock(qda[x]); 1273 } 1274 } while (!error && num_qd == max_qd); 1275 1276 mutex_unlock(&sdp->sd_quota_sync_mutex); 1277 kfree(qda); 1278 1279 return error; 1280 } 1281 1282 int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid) 1283 { 1284 struct gfs2_quota_data *qd; 1285 struct gfs2_holder q_gh; 1286 int error; 1287 1288 error = qd_get(sdp, qid, &qd); 1289 if (error) 1290 return error; 1291 1292 error = do_glock(qd, FORCE, &q_gh); 1293 if (!error) 1294 gfs2_glock_dq_uninit(&q_gh); 1295 1296 qd_put(qd); 1297 return error; 1298 } 1299 1300 int gfs2_quota_init(struct gfs2_sbd *sdp) 1301 { 1302 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); 1303 u64 size = i_size_read(sdp->sd_qc_inode); 1304 unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift; 1305 unsigned int x, slot = 0; 1306 unsigned int found = 0; 1307 unsigned int hash; 1308 unsigned int bm_size; 1309 u64 dblock; 1310 u32 extlen = 0; 1311 int error; 1312 1313 if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20)) 1314 return -EIO; 1315 1316 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block; 1317 bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long)); 1318 bm_size *= sizeof(unsigned long); 1319 error = -ENOMEM; 1320 sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN); 1321 if (sdp->sd_quota_bitmap == NULL) 1322 sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS | 1323 __GFP_ZERO, PAGE_KERNEL); 1324 if (!sdp->sd_quota_bitmap) 1325 return error; 1326 1327 for (x = 0; x < blocks; x++) { 1328 struct buffer_head *bh; 1329 const struct gfs2_quota_change *qc; 1330 unsigned int y; 1331 1332 if (!extlen) { 1333 int new = 0; 1334 error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen); 1335 if (error) 1336 goto fail; 1337 } 1338 error = -EIO; 1339 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen); 1340 if (!bh) 1341 goto fail; 1342 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) { 1343 brelse(bh); 1344 goto fail; 1345 } 1346 1347 qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header)); 1348 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots; 1349 y++, slot++) { 1350 struct gfs2_quota_data *qd; 1351 s64 qc_change = be64_to_cpu(qc->qc_change); 1352 u32 qc_flags = be32_to_cpu(qc->qc_flags); 1353 enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ? 1354 USRQUOTA : GRPQUOTA; 1355 struct kqid qc_id = make_kqid(&init_user_ns, qtype, 1356 be32_to_cpu(qc->qc_id)); 1357 qc++; 1358 if (!qc_change) 1359 continue; 1360 1361 hash = gfs2_qd_hash(sdp, qc_id); 1362 qd = qd_alloc(hash, sdp, qc_id); 1363 if (qd == NULL) { 1364 brelse(bh); 1365 goto fail; 1366 } 1367 1368 set_bit(QDF_CHANGE, &qd->qd_flags); 1369 qd->qd_change = qc_change; 1370 qd->qd_slot = slot; 1371 qd->qd_slot_count = 1; 1372 1373 spin_lock(&qd_lock); 1374 BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap)); 1375 list_add(&qd->qd_list, &sdp->sd_quota_list); 1376 atomic_inc(&sdp->sd_quota_count); 1377 spin_unlock(&qd_lock); 1378 1379 spin_lock_bucket(hash); 1380 hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]); 1381 spin_unlock_bucket(hash); 1382 1383 found++; 1384 } 1385 1386 brelse(bh); 1387 dblock++; 1388 extlen--; 1389 } 1390 1391 if (found) 1392 fs_info(sdp, "found %u quota changes\n", found); 1393 1394 return 0; 1395 1396 fail: 1397 gfs2_quota_cleanup(sdp); 1398 return error; 1399 } 1400 1401 void gfs2_quota_cleanup(struct gfs2_sbd *sdp) 1402 { 1403 struct list_head *head = &sdp->sd_quota_list; 1404 struct gfs2_quota_data *qd; 1405 1406 spin_lock(&qd_lock); 1407 while (!list_empty(head)) { 1408 qd = list_entry(head->prev, struct gfs2_quota_data, qd_list); 1409 1410 list_del(&qd->qd_list); 1411 1412 /* Also remove if this qd exists in the reclaim list */ 1413 list_lru_del(&gfs2_qd_lru, &qd->qd_lru); 1414 atomic_dec(&sdp->sd_quota_count); 1415 spin_unlock(&qd_lock); 1416 1417 spin_lock_bucket(qd->qd_hash); 1418 hlist_bl_del_rcu(&qd->qd_hlist); 1419 spin_unlock_bucket(qd->qd_hash); 1420 1421 gfs2_assert_warn(sdp, !qd->qd_change); 1422 gfs2_assert_warn(sdp, !qd->qd_slot_count); 1423 gfs2_assert_warn(sdp, !qd->qd_bh_count); 1424 1425 gfs2_glock_put(qd->qd_gl); 1426 call_rcu(&qd->qd_rcu, gfs2_qd_dealloc); 1427 1428 spin_lock(&qd_lock); 1429 } 1430 spin_unlock(&qd_lock); 1431 1432 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count)); 1433 1434 kvfree(sdp->sd_quota_bitmap); 1435 sdp->sd_quota_bitmap = NULL; 1436 } 1437 1438 static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error) 1439 { 1440 if (error == 0 || error == -EROFS) 1441 return; 1442 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) 1443 fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error); 1444 } 1445 1446 static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg, 1447 int (*fxn)(struct super_block *sb, int type), 1448 unsigned long t, unsigned long *timeo, 1449 unsigned int *new_timeo) 1450 { 1451 if (t >= *timeo) { 1452 int error = fxn(sdp->sd_vfs, 0); 1453 quotad_error(sdp, msg, error); 1454 *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ; 1455 } else { 1456 *timeo -= t; 1457 } 1458 } 1459 1460 static void quotad_check_trunc_list(struct gfs2_sbd *sdp) 1461 { 1462 struct gfs2_inode *ip; 1463 1464 while(1) { 1465 ip = NULL; 1466 spin_lock(&sdp->sd_trunc_lock); 1467 if (!list_empty(&sdp->sd_trunc_list)) { 1468 ip = list_entry(sdp->sd_trunc_list.next, 1469 struct gfs2_inode, i_trunc_list); 1470 list_del_init(&ip->i_trunc_list); 1471 } 1472 spin_unlock(&sdp->sd_trunc_lock); 1473 if (ip == NULL) 1474 return; 1475 gfs2_glock_finish_truncate(ip); 1476 } 1477 } 1478 1479 void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) { 1480 if (!sdp->sd_statfs_force_sync) { 1481 sdp->sd_statfs_force_sync = 1; 1482 wake_up(&sdp->sd_quota_wait); 1483 } 1484 } 1485 1486 1487 /** 1488 * gfs2_quotad - Write cached quota changes into the quota file 1489 * @sdp: Pointer to GFS2 superblock 1490 * 1491 */ 1492 1493 int gfs2_quotad(void *data) 1494 { 1495 struct gfs2_sbd *sdp = data; 1496 struct gfs2_tune *tune = &sdp->sd_tune; 1497 unsigned long statfs_timeo = 0; 1498 unsigned long quotad_timeo = 0; 1499 unsigned long t = 0; 1500 DEFINE_WAIT(wait); 1501 int empty; 1502 1503 while (!kthread_should_stop()) { 1504 1505 /* Update the master statfs file */ 1506 if (sdp->sd_statfs_force_sync) { 1507 int error = gfs2_statfs_sync(sdp->sd_vfs, 0); 1508 quotad_error(sdp, "statfs", error); 1509 statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ; 1510 } 1511 else 1512 quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t, 1513 &statfs_timeo, 1514 &tune->gt_statfs_quantum); 1515 1516 /* Update quota file */ 1517 quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t, 1518 "ad_timeo, &tune->gt_quota_quantum); 1519 1520 /* Check for & recover partially truncated inodes */ 1521 quotad_check_trunc_list(sdp); 1522 1523 try_to_freeze(); 1524 1525 t = min(quotad_timeo, statfs_timeo); 1526 1527 prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE); 1528 spin_lock(&sdp->sd_trunc_lock); 1529 empty = list_empty(&sdp->sd_trunc_list); 1530 spin_unlock(&sdp->sd_trunc_lock); 1531 if (empty && !sdp->sd_statfs_force_sync) 1532 t -= schedule_timeout(t); 1533 else 1534 t = 0; 1535 finish_wait(&sdp->sd_quota_wait, &wait); 1536 } 1537 1538 return 0; 1539 } 1540 1541 static int gfs2_quota_get_state(struct super_block *sb, struct qc_state *state) 1542 { 1543 struct gfs2_sbd *sdp = sb->s_fs_info; 1544 1545 memset(state, 0, sizeof(*state)); 1546 1547 switch (sdp->sd_args.ar_quota) { 1548 case GFS2_QUOTA_ON: 1549 state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED; 1550 state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED; 1551 /*FALLTHRU*/ 1552 case GFS2_QUOTA_ACCOUNT: 1553 state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED | 1554 QCI_SYSFILE; 1555 state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED | 1556 QCI_SYSFILE; 1557 break; 1558 case GFS2_QUOTA_OFF: 1559 break; 1560 } 1561 if (sdp->sd_quota_inode) { 1562 state->s_state[USRQUOTA].ino = 1563 GFS2_I(sdp->sd_quota_inode)->i_no_addr; 1564 state->s_state[USRQUOTA].blocks = sdp->sd_quota_inode->i_blocks; 1565 } 1566 state->s_state[USRQUOTA].nextents = 1; /* unsupported */ 1567 state->s_state[GRPQUOTA] = state->s_state[USRQUOTA]; 1568 state->s_incoredqs = list_lru_count(&gfs2_qd_lru); 1569 return 0; 1570 } 1571 1572 static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid, 1573 struct qc_dqblk *fdq) 1574 { 1575 struct gfs2_sbd *sdp = sb->s_fs_info; 1576 struct gfs2_quota_lvb *qlvb; 1577 struct gfs2_quota_data *qd; 1578 struct gfs2_holder q_gh; 1579 int error; 1580 1581 memset(fdq, 0, sizeof(*fdq)); 1582 1583 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 1584 return -ESRCH; /* Crazy XFS error code */ 1585 1586 if ((qid.type != USRQUOTA) && 1587 (qid.type != GRPQUOTA)) 1588 return -EINVAL; 1589 1590 error = qd_get(sdp, qid, &qd); 1591 if (error) 1592 return error; 1593 error = do_glock(qd, FORCE, &q_gh); 1594 if (error) 1595 goto out; 1596 1597 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; 1598 fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift; 1599 fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift; 1600 fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift; 1601 1602 gfs2_glock_dq_uninit(&q_gh); 1603 out: 1604 qd_put(qd); 1605 return error; 1606 } 1607 1608 /* GFS2 only supports a subset of the XFS fields */ 1609 #define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE) 1610 1611 static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid, 1612 struct qc_dqblk *fdq) 1613 { 1614 struct gfs2_sbd *sdp = sb->s_fs_info; 1615 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 1616 struct gfs2_quota_data *qd; 1617 struct gfs2_holder q_gh, i_gh; 1618 unsigned int data_blocks, ind_blocks; 1619 unsigned int blocks = 0; 1620 int alloc_required; 1621 loff_t offset; 1622 int error; 1623 1624 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 1625 return -ESRCH; /* Crazy XFS error code */ 1626 1627 if ((qid.type != USRQUOTA) && 1628 (qid.type != GRPQUOTA)) 1629 return -EINVAL; 1630 1631 if (fdq->d_fieldmask & ~GFS2_FIELDMASK) 1632 return -EINVAL; 1633 1634 error = qd_get(sdp, qid, &qd); 1635 if (error) 1636 return error; 1637 1638 error = gfs2_rs_alloc(ip); 1639 if (error) 1640 goto out_put; 1641 1642 mutex_lock(&ip->i_inode.i_mutex); 1643 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh); 1644 if (error) 1645 goto out_unlockput; 1646 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); 1647 if (error) 1648 goto out_q; 1649 1650 /* Check for existing entry, if none then alloc new blocks */ 1651 error = update_qd(sdp, qd); 1652 if (error) 1653 goto out_i; 1654 1655 /* If nothing has changed, this is a no-op */ 1656 if ((fdq->d_fieldmask & QC_SPC_SOFT) && 1657 ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn))) 1658 fdq->d_fieldmask ^= QC_SPC_SOFT; 1659 1660 if ((fdq->d_fieldmask & QC_SPC_HARD) && 1661 ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit))) 1662 fdq->d_fieldmask ^= QC_SPC_HARD; 1663 1664 if ((fdq->d_fieldmask & QC_SPACE) && 1665 ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value))) 1666 fdq->d_fieldmask ^= QC_SPACE; 1667 1668 if (fdq->d_fieldmask == 0) 1669 goto out_i; 1670 1671 offset = qd2offset(qd); 1672 alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota)); 1673 if (gfs2_is_stuffed(ip)) 1674 alloc_required = 1; 1675 if (alloc_required) { 1676 struct gfs2_alloc_parms ap = { .aflags = 0, }; 1677 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), 1678 &data_blocks, &ind_blocks); 1679 blocks = 1 + data_blocks + ind_blocks; 1680 ap.target = blocks; 1681 error = gfs2_inplace_reserve(ip, &ap); 1682 if (error) 1683 goto out_i; 1684 blocks += gfs2_rg_blocks(ip, blocks); 1685 } 1686 1687 /* Some quotas span block boundaries and can update two blocks, 1688 adding an extra block to the transaction to handle such quotas */ 1689 error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0); 1690 if (error) 1691 goto out_release; 1692 1693 /* Apply changes */ 1694 error = gfs2_adjust_quota(ip, offset, 0, qd, fdq); 1695 if (!error) 1696 clear_bit(QDF_QMSG_QUIET, &qd->qd_flags); 1697 1698 gfs2_trans_end(sdp); 1699 out_release: 1700 if (alloc_required) 1701 gfs2_inplace_release(ip); 1702 out_i: 1703 gfs2_glock_dq_uninit(&i_gh); 1704 out_q: 1705 gfs2_glock_dq_uninit(&q_gh); 1706 out_unlockput: 1707 mutex_unlock(&ip->i_inode.i_mutex); 1708 out_put: 1709 qd_put(qd); 1710 return error; 1711 } 1712 1713 const struct quotactl_ops gfs2_quotactl_ops = { 1714 .quota_sync = gfs2_quota_sync, 1715 .get_state = gfs2_quota_get_state, 1716 .get_dqblk = gfs2_get_dqblk, 1717 .set_dqblk = gfs2_set_dqblk, 1718 }; 1719 1720 void __init gfs2_quota_hash_init(void) 1721 { 1722 unsigned i; 1723 1724 for(i = 0; i < GFS2_QD_HASH_SIZE; i++) 1725 INIT_HLIST_BL_HEAD(&qd_hash_table[i]); 1726 } 1727