1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 /* 11 * Quota change tags are associated with each transaction that allocates or 12 * deallocates space. Those changes are accumulated locally to each node (in a 13 * per-node file) and then are periodically synced to the quota file. This 14 * avoids the bottleneck of constantly touching the quota file, but introduces 15 * fuzziness in the current usage value of IDs that are being used on different 16 * nodes in the cluster simultaneously. So, it is possible for a user on 17 * multiple nodes to overrun their quota, but that overrun is controlable. 18 * Since quota tags are part of transactions, there is no need for a quota check 19 * program to be run on node crashes or anything like that. 20 * 21 * There are couple of knobs that let the administrator manage the quota 22 * fuzziness. "quota_quantum" sets the maximum time a quota change can be 23 * sitting on one node before being synced to the quota file. (The default is 24 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency 25 * of quota file syncs increases as the user moves closer to their limit. The 26 * more frequent the syncs, the more accurate the quota enforcement, but that 27 * means that there is more contention between the nodes for the quota file. 28 * The default value is one. This sets the maximum theoretical quota overrun 29 * (with infinite node with infinite bandwidth) to twice the user's limit. (In 30 * practice, the maximum overrun you see should be much less.) A "quota_scale" 31 * number greater than one makes quota syncs more frequent and reduces the 32 * maximum overrun. Numbers less than one (but greater than zero) make quota 33 * syncs less frequent. 34 * 35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of 36 * the quota file, so it is not being constantly read. 37 */ 38 39 #include <linux/sched.h> 40 #include <linux/slab.h> 41 #include <linux/mm.h> 42 #include <linux/spinlock.h> 43 #include <linux/completion.h> 44 #include <linux/buffer_head.h> 45 #include <linux/sort.h> 46 #include <linux/fs.h> 47 #include <linux/bio.h> 48 #include <linux/gfs2_ondisk.h> 49 #include <linux/kthread.h> 50 #include <linux/freezer.h> 51 #include <linux/quota.h> 52 #include <linux/dqblk_xfs.h> 53 54 #include "gfs2.h" 55 #include "incore.h" 56 #include "bmap.h" 57 #include "glock.h" 58 #include "glops.h" 59 #include "log.h" 60 #include "meta_io.h" 61 #include "quota.h" 62 #include "rgrp.h" 63 #include "super.h" 64 #include "trans.h" 65 #include "inode.h" 66 #include "util.h" 67 68 #define QUOTA_USER 1 69 #define QUOTA_GROUP 0 70 71 struct gfs2_quota_change_host { 72 u64 qc_change; 73 u32 qc_flags; /* GFS2_QCF_... */ 74 u32 qc_id; 75 }; 76 77 static LIST_HEAD(qd_lru_list); 78 static atomic_t qd_lru_count = ATOMIC_INIT(0); 79 static DEFINE_SPINLOCK(qd_lru_lock); 80 81 int gfs2_shrink_qd_memory(struct shrinker *shrink, struct shrink_control *sc) 82 { 83 struct gfs2_quota_data *qd; 84 struct gfs2_sbd *sdp; 85 int nr_to_scan = sc->nr_to_scan; 86 87 if (nr_to_scan == 0) 88 goto out; 89 90 if (!(sc->gfp_mask & __GFP_FS)) 91 return -1; 92 93 spin_lock(&qd_lru_lock); 94 while (nr_to_scan && !list_empty(&qd_lru_list)) { 95 qd = list_entry(qd_lru_list.next, 96 struct gfs2_quota_data, qd_reclaim); 97 sdp = qd->qd_gl->gl_sbd; 98 99 /* Free from the filesystem-specific list */ 100 list_del(&qd->qd_list); 101 102 gfs2_assert_warn(sdp, !qd->qd_change); 103 gfs2_assert_warn(sdp, !qd->qd_slot_count); 104 gfs2_assert_warn(sdp, !qd->qd_bh_count); 105 106 gfs2_glock_put(qd->qd_gl); 107 atomic_dec(&sdp->sd_quota_count); 108 109 /* Delete it from the common reclaim list */ 110 list_del_init(&qd->qd_reclaim); 111 atomic_dec(&qd_lru_count); 112 spin_unlock(&qd_lru_lock); 113 kmem_cache_free(gfs2_quotad_cachep, qd); 114 spin_lock(&qd_lru_lock); 115 nr_to_scan--; 116 } 117 spin_unlock(&qd_lru_lock); 118 119 out: 120 return (atomic_read(&qd_lru_count) * sysctl_vfs_cache_pressure) / 100; 121 } 122 123 static u64 qd2offset(struct gfs2_quota_data *qd) 124 { 125 u64 offset; 126 127 offset = 2 * (u64)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags); 128 offset *= sizeof(struct gfs2_quota); 129 130 return offset; 131 } 132 133 static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id, 134 struct gfs2_quota_data **qdp) 135 { 136 struct gfs2_quota_data *qd; 137 int error; 138 139 qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS); 140 if (!qd) 141 return -ENOMEM; 142 143 atomic_set(&qd->qd_count, 1); 144 qd->qd_id = id; 145 if (user) 146 set_bit(QDF_USER, &qd->qd_flags); 147 qd->qd_slot = -1; 148 INIT_LIST_HEAD(&qd->qd_reclaim); 149 150 error = gfs2_glock_get(sdp, 2 * (u64)id + !user, 151 &gfs2_quota_glops, CREATE, &qd->qd_gl); 152 if (error) 153 goto fail; 154 155 *qdp = qd; 156 157 return 0; 158 159 fail: 160 kmem_cache_free(gfs2_quotad_cachep, qd); 161 return error; 162 } 163 164 static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, 165 struct gfs2_quota_data **qdp) 166 { 167 struct gfs2_quota_data *qd = NULL, *new_qd = NULL; 168 int error, found; 169 170 *qdp = NULL; 171 172 for (;;) { 173 found = 0; 174 spin_lock(&qd_lru_lock); 175 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { 176 if (qd->qd_id == id && 177 !test_bit(QDF_USER, &qd->qd_flags) == !user) { 178 if (!atomic_read(&qd->qd_count) && 179 !list_empty(&qd->qd_reclaim)) { 180 /* Remove it from reclaim list */ 181 list_del_init(&qd->qd_reclaim); 182 atomic_dec(&qd_lru_count); 183 } 184 atomic_inc(&qd->qd_count); 185 found = 1; 186 break; 187 } 188 } 189 190 if (!found) 191 qd = NULL; 192 193 if (!qd && new_qd) { 194 qd = new_qd; 195 list_add(&qd->qd_list, &sdp->sd_quota_list); 196 atomic_inc(&sdp->sd_quota_count); 197 new_qd = NULL; 198 } 199 200 spin_unlock(&qd_lru_lock); 201 202 if (qd) { 203 if (new_qd) { 204 gfs2_glock_put(new_qd->qd_gl); 205 kmem_cache_free(gfs2_quotad_cachep, new_qd); 206 } 207 *qdp = qd; 208 return 0; 209 } 210 211 error = qd_alloc(sdp, user, id, &new_qd); 212 if (error) 213 return error; 214 } 215 } 216 217 static void qd_hold(struct gfs2_quota_data *qd) 218 { 219 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 220 gfs2_assert(sdp, atomic_read(&qd->qd_count)); 221 atomic_inc(&qd->qd_count); 222 } 223 224 static void qd_put(struct gfs2_quota_data *qd) 225 { 226 if (atomic_dec_and_lock(&qd->qd_count, &qd_lru_lock)) { 227 /* Add to the reclaim list */ 228 list_add_tail(&qd->qd_reclaim, &qd_lru_list); 229 atomic_inc(&qd_lru_count); 230 spin_unlock(&qd_lru_lock); 231 } 232 } 233 234 static int slot_get(struct gfs2_quota_data *qd) 235 { 236 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 237 unsigned int c, o = 0, b; 238 unsigned char byte = 0; 239 240 spin_lock(&qd_lru_lock); 241 242 if (qd->qd_slot_count++) { 243 spin_unlock(&qd_lru_lock); 244 return 0; 245 } 246 247 for (c = 0; c < sdp->sd_quota_chunks; c++) 248 for (o = 0; o < PAGE_SIZE; o++) { 249 byte = sdp->sd_quota_bitmap[c][o]; 250 if (byte != 0xFF) 251 goto found; 252 } 253 254 goto fail; 255 256 found: 257 for (b = 0; b < 8; b++) 258 if (!(byte & (1 << b))) 259 break; 260 qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b; 261 262 if (qd->qd_slot >= sdp->sd_quota_slots) 263 goto fail; 264 265 sdp->sd_quota_bitmap[c][o] |= 1 << b; 266 267 spin_unlock(&qd_lru_lock); 268 269 return 0; 270 271 fail: 272 qd->qd_slot_count--; 273 spin_unlock(&qd_lru_lock); 274 return -ENOSPC; 275 } 276 277 static void slot_hold(struct gfs2_quota_data *qd) 278 { 279 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 280 281 spin_lock(&qd_lru_lock); 282 gfs2_assert(sdp, qd->qd_slot_count); 283 qd->qd_slot_count++; 284 spin_unlock(&qd_lru_lock); 285 } 286 287 static void slot_put(struct gfs2_quota_data *qd) 288 { 289 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 290 291 spin_lock(&qd_lru_lock); 292 gfs2_assert(sdp, qd->qd_slot_count); 293 if (!--qd->qd_slot_count) { 294 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0); 295 qd->qd_slot = -1; 296 } 297 spin_unlock(&qd_lru_lock); 298 } 299 300 static int bh_get(struct gfs2_quota_data *qd) 301 { 302 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 303 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); 304 unsigned int block, offset; 305 struct buffer_head *bh; 306 int error; 307 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 }; 308 309 mutex_lock(&sdp->sd_quota_mutex); 310 311 if (qd->qd_bh_count++) { 312 mutex_unlock(&sdp->sd_quota_mutex); 313 return 0; 314 } 315 316 block = qd->qd_slot / sdp->sd_qc_per_block; 317 offset = qd->qd_slot % sdp->sd_qc_per_block; 318 319 bh_map.b_size = 1 << ip->i_inode.i_blkbits; 320 error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0); 321 if (error) 322 goto fail; 323 error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh); 324 if (error) 325 goto fail; 326 error = -EIO; 327 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) 328 goto fail_brelse; 329 330 qd->qd_bh = bh; 331 qd->qd_bh_qc = (struct gfs2_quota_change *) 332 (bh->b_data + sizeof(struct gfs2_meta_header) + 333 offset * sizeof(struct gfs2_quota_change)); 334 335 mutex_unlock(&sdp->sd_quota_mutex); 336 337 return 0; 338 339 fail_brelse: 340 brelse(bh); 341 fail: 342 qd->qd_bh_count--; 343 mutex_unlock(&sdp->sd_quota_mutex); 344 return error; 345 } 346 347 static void bh_put(struct gfs2_quota_data *qd) 348 { 349 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 350 351 mutex_lock(&sdp->sd_quota_mutex); 352 gfs2_assert(sdp, qd->qd_bh_count); 353 if (!--qd->qd_bh_count) { 354 brelse(qd->qd_bh); 355 qd->qd_bh = NULL; 356 qd->qd_bh_qc = NULL; 357 } 358 mutex_unlock(&sdp->sd_quota_mutex); 359 } 360 361 static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp) 362 { 363 struct gfs2_quota_data *qd = NULL; 364 int error; 365 int found = 0; 366 367 *qdp = NULL; 368 369 if (sdp->sd_vfs->s_flags & MS_RDONLY) 370 return 0; 371 372 spin_lock(&qd_lru_lock); 373 374 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { 375 if (test_bit(QDF_LOCKED, &qd->qd_flags) || 376 !test_bit(QDF_CHANGE, &qd->qd_flags) || 377 qd->qd_sync_gen >= sdp->sd_quota_sync_gen) 378 continue; 379 380 list_move_tail(&qd->qd_list, &sdp->sd_quota_list); 381 382 set_bit(QDF_LOCKED, &qd->qd_flags); 383 gfs2_assert_warn(sdp, atomic_read(&qd->qd_count)); 384 atomic_inc(&qd->qd_count); 385 qd->qd_change_sync = qd->qd_change; 386 gfs2_assert_warn(sdp, qd->qd_slot_count); 387 qd->qd_slot_count++; 388 found = 1; 389 390 break; 391 } 392 393 if (!found) 394 qd = NULL; 395 396 spin_unlock(&qd_lru_lock); 397 398 if (qd) { 399 gfs2_assert_warn(sdp, qd->qd_change_sync); 400 error = bh_get(qd); 401 if (error) { 402 clear_bit(QDF_LOCKED, &qd->qd_flags); 403 slot_put(qd); 404 qd_put(qd); 405 return error; 406 } 407 } 408 409 *qdp = qd; 410 411 return 0; 412 } 413 414 static int qd_trylock(struct gfs2_quota_data *qd) 415 { 416 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 417 418 if (sdp->sd_vfs->s_flags & MS_RDONLY) 419 return 0; 420 421 spin_lock(&qd_lru_lock); 422 423 if (test_bit(QDF_LOCKED, &qd->qd_flags) || 424 !test_bit(QDF_CHANGE, &qd->qd_flags)) { 425 spin_unlock(&qd_lru_lock); 426 return 0; 427 } 428 429 list_move_tail(&qd->qd_list, &sdp->sd_quota_list); 430 431 set_bit(QDF_LOCKED, &qd->qd_flags); 432 gfs2_assert_warn(sdp, atomic_read(&qd->qd_count)); 433 atomic_inc(&qd->qd_count); 434 qd->qd_change_sync = qd->qd_change; 435 gfs2_assert_warn(sdp, qd->qd_slot_count); 436 qd->qd_slot_count++; 437 438 spin_unlock(&qd_lru_lock); 439 440 gfs2_assert_warn(sdp, qd->qd_change_sync); 441 if (bh_get(qd)) { 442 clear_bit(QDF_LOCKED, &qd->qd_flags); 443 slot_put(qd); 444 qd_put(qd); 445 return 0; 446 } 447 448 return 1; 449 } 450 451 static void qd_unlock(struct gfs2_quota_data *qd) 452 { 453 gfs2_assert_warn(qd->qd_gl->gl_sbd, 454 test_bit(QDF_LOCKED, &qd->qd_flags)); 455 clear_bit(QDF_LOCKED, &qd->qd_flags); 456 bh_put(qd); 457 slot_put(qd); 458 qd_put(qd); 459 } 460 461 static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id, 462 struct gfs2_quota_data **qdp) 463 { 464 int error; 465 466 error = qd_get(sdp, user, id, qdp); 467 if (error) 468 return error; 469 470 error = slot_get(*qdp); 471 if (error) 472 goto fail; 473 474 error = bh_get(*qdp); 475 if (error) 476 goto fail_slot; 477 478 return 0; 479 480 fail_slot: 481 slot_put(*qdp); 482 fail: 483 qd_put(*qdp); 484 return error; 485 } 486 487 static void qdsb_put(struct gfs2_quota_data *qd) 488 { 489 bh_put(qd); 490 slot_put(qd); 491 qd_put(qd); 492 } 493 494 int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid) 495 { 496 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 497 struct gfs2_quota_data **qd; 498 int error; 499 500 if (ip->i_res == NULL) 501 gfs2_rs_alloc(ip); 502 503 qd = ip->i_res->rs_qa_qd; 504 505 if (gfs2_assert_warn(sdp, !ip->i_res->rs_qa_qd_num) || 506 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) 507 return -EIO; 508 509 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 510 return 0; 511 512 error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, qd); 513 if (error) 514 goto out; 515 ip->i_res->rs_qa_qd_num++; 516 qd++; 517 518 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, qd); 519 if (error) 520 goto out; 521 ip->i_res->rs_qa_qd_num++; 522 qd++; 523 524 if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) { 525 error = qdsb_get(sdp, QUOTA_USER, uid, qd); 526 if (error) 527 goto out; 528 ip->i_res->rs_qa_qd_num++; 529 qd++; 530 } 531 532 if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) { 533 error = qdsb_get(sdp, QUOTA_GROUP, gid, qd); 534 if (error) 535 goto out; 536 ip->i_res->rs_qa_qd_num++; 537 qd++; 538 } 539 540 out: 541 if (error) 542 gfs2_quota_unhold(ip); 543 return error; 544 } 545 546 void gfs2_quota_unhold(struct gfs2_inode *ip) 547 { 548 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 549 unsigned int x; 550 551 if (ip->i_res == NULL) 552 return; 553 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)); 554 555 for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) { 556 qdsb_put(ip->i_res->rs_qa_qd[x]); 557 ip->i_res->rs_qa_qd[x] = NULL; 558 } 559 ip->i_res->rs_qa_qd_num = 0; 560 } 561 562 static int sort_qd(const void *a, const void *b) 563 { 564 const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a; 565 const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b; 566 567 if (!test_bit(QDF_USER, &qd_a->qd_flags) != 568 !test_bit(QDF_USER, &qd_b->qd_flags)) { 569 if (test_bit(QDF_USER, &qd_a->qd_flags)) 570 return -1; 571 else 572 return 1; 573 } 574 if (qd_a->qd_id < qd_b->qd_id) 575 return -1; 576 if (qd_a->qd_id > qd_b->qd_id) 577 return 1; 578 579 return 0; 580 } 581 582 static void do_qc(struct gfs2_quota_data *qd, s64 change) 583 { 584 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 585 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); 586 struct gfs2_quota_change *qc = qd->qd_bh_qc; 587 s64 x; 588 589 mutex_lock(&sdp->sd_quota_mutex); 590 gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1); 591 592 if (!test_bit(QDF_CHANGE, &qd->qd_flags)) { 593 qc->qc_change = 0; 594 qc->qc_flags = 0; 595 if (test_bit(QDF_USER, &qd->qd_flags)) 596 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER); 597 qc->qc_id = cpu_to_be32(qd->qd_id); 598 } 599 600 x = be64_to_cpu(qc->qc_change) + change; 601 qc->qc_change = cpu_to_be64(x); 602 603 spin_lock(&qd_lru_lock); 604 qd->qd_change = x; 605 spin_unlock(&qd_lru_lock); 606 607 if (!x) { 608 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags)); 609 clear_bit(QDF_CHANGE, &qd->qd_flags); 610 qc->qc_flags = 0; 611 qc->qc_id = 0; 612 slot_put(qd); 613 qd_put(qd); 614 } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) { 615 qd_hold(qd); 616 slot_hold(qd); 617 } 618 619 mutex_unlock(&sdp->sd_quota_mutex); 620 } 621 622 /** 623 * gfs2_adjust_quota - adjust record of current block usage 624 * @ip: The quota inode 625 * @loc: Offset of the entry in the quota file 626 * @change: The amount of usage change to record 627 * @qd: The quota data 628 * @fdq: The updated limits to record 629 * 630 * This function was mostly borrowed from gfs2_block_truncate_page which was 631 * in turn mostly borrowed from ext3 632 * 633 * Returns: 0 or -ve on error 634 */ 635 636 static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, 637 s64 change, struct gfs2_quota_data *qd, 638 struct fs_disk_quota *fdq) 639 { 640 struct inode *inode = &ip->i_inode; 641 struct gfs2_sbd *sdp = GFS2_SB(inode); 642 struct address_space *mapping = inode->i_mapping; 643 unsigned long index = loc >> PAGE_CACHE_SHIFT; 644 unsigned offset = loc & (PAGE_CACHE_SIZE - 1); 645 unsigned blocksize, iblock, pos; 646 struct buffer_head *bh; 647 struct page *page; 648 void *kaddr, *ptr; 649 struct gfs2_quota q, *qp; 650 int err, nbytes; 651 u64 size; 652 653 if (gfs2_is_stuffed(ip)) { 654 err = gfs2_unstuff_dinode(ip, NULL); 655 if (err) 656 return err; 657 } 658 659 memset(&q, 0, sizeof(struct gfs2_quota)); 660 err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q)); 661 if (err < 0) 662 return err; 663 664 err = -EIO; 665 qp = &q; 666 qp->qu_value = be64_to_cpu(qp->qu_value); 667 qp->qu_value += change; 668 qp->qu_value = cpu_to_be64(qp->qu_value); 669 qd->qd_qb.qb_value = qp->qu_value; 670 if (fdq) { 671 if (fdq->d_fieldmask & FS_DQ_BSOFT) { 672 qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift); 673 qd->qd_qb.qb_warn = qp->qu_warn; 674 } 675 if (fdq->d_fieldmask & FS_DQ_BHARD) { 676 qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift); 677 qd->qd_qb.qb_limit = qp->qu_limit; 678 } 679 if (fdq->d_fieldmask & FS_DQ_BCOUNT) { 680 qp->qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift); 681 qd->qd_qb.qb_value = qp->qu_value; 682 } 683 } 684 685 /* Write the quota into the quota file on disk */ 686 ptr = qp; 687 nbytes = sizeof(struct gfs2_quota); 688 get_a_page: 689 page = find_or_create_page(mapping, index, GFP_NOFS); 690 if (!page) 691 return -ENOMEM; 692 693 blocksize = inode->i_sb->s_blocksize; 694 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 695 696 if (!page_has_buffers(page)) 697 create_empty_buffers(page, blocksize, 0); 698 699 bh = page_buffers(page); 700 pos = blocksize; 701 while (offset >= pos) { 702 bh = bh->b_this_page; 703 iblock++; 704 pos += blocksize; 705 } 706 707 if (!buffer_mapped(bh)) { 708 gfs2_block_map(inode, iblock, bh, 1); 709 if (!buffer_mapped(bh)) 710 goto unlock_out; 711 /* If it's a newly allocated disk block for quota, zero it */ 712 if (buffer_new(bh)) 713 zero_user(page, pos - blocksize, bh->b_size); 714 } 715 716 if (PageUptodate(page)) 717 set_buffer_uptodate(bh); 718 719 if (!buffer_uptodate(bh)) { 720 ll_rw_block(READ | REQ_META, 1, &bh); 721 wait_on_buffer(bh); 722 if (!buffer_uptodate(bh)) 723 goto unlock_out; 724 } 725 726 gfs2_trans_add_bh(ip->i_gl, bh, 0); 727 728 kaddr = kmap_atomic(page); 729 if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE) 730 nbytes = PAGE_CACHE_SIZE - offset; 731 memcpy(kaddr + offset, ptr, nbytes); 732 flush_dcache_page(page); 733 kunmap_atomic(kaddr); 734 unlock_page(page); 735 page_cache_release(page); 736 737 /* If quota straddles page boundary, we need to update the rest of the 738 * quota at the beginning of the next page */ 739 if ((offset + sizeof(struct gfs2_quota)) > PAGE_CACHE_SIZE) { 740 ptr = ptr + nbytes; 741 nbytes = sizeof(struct gfs2_quota) - nbytes; 742 offset = 0; 743 index++; 744 goto get_a_page; 745 } 746 747 size = loc + sizeof(struct gfs2_quota); 748 if (size > inode->i_size) 749 i_size_write(inode, size); 750 inode->i_mtime = inode->i_atime = CURRENT_TIME; 751 mark_inode_dirty(inode); 752 return 0; 753 754 unlock_out: 755 unlock_page(page); 756 page_cache_release(page); 757 return err; 758 } 759 760 static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) 761 { 762 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd; 763 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 764 unsigned int data_blocks, ind_blocks; 765 struct gfs2_holder *ghs, i_gh; 766 unsigned int qx, x; 767 struct gfs2_quota_data *qd; 768 unsigned reserved; 769 loff_t offset; 770 unsigned int nalloc = 0, blocks; 771 int error; 772 773 error = gfs2_rs_alloc(ip); 774 if (error) 775 return error; 776 777 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), 778 &data_blocks, &ind_blocks); 779 780 ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS); 781 if (!ghs) 782 return -ENOMEM; 783 784 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL); 785 mutex_lock(&ip->i_inode.i_mutex); 786 for (qx = 0; qx < num_qd; qx++) { 787 error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE, 788 GL_NOCACHE, &ghs[qx]); 789 if (error) 790 goto out; 791 } 792 793 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); 794 if (error) 795 goto out; 796 797 for (x = 0; x < num_qd; x++) { 798 offset = qd2offset(qda[x]); 799 if (gfs2_write_alloc_required(ip, offset, 800 sizeof(struct gfs2_quota))) 801 nalloc++; 802 } 803 804 /* 805 * 1 blk for unstuffing inode if stuffed. We add this extra 806 * block to the reservation unconditionally. If the inode 807 * doesn't need unstuffing, the block will be released to the 808 * rgrp since it won't be allocated during the transaction 809 */ 810 /* +3 in the end for unstuffing block, inode size update block 811 * and another block in case quota straddles page boundary and 812 * two blocks need to be updated instead of 1 */ 813 blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3; 814 815 reserved = 1 + (nalloc * (data_blocks + ind_blocks)); 816 error = gfs2_inplace_reserve(ip, reserved); 817 if (error) 818 goto out_alloc; 819 820 if (nalloc) 821 blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS; 822 823 error = gfs2_trans_begin(sdp, blocks, 0); 824 if (error) 825 goto out_ipres; 826 827 for (x = 0; x < num_qd; x++) { 828 qd = qda[x]; 829 offset = qd2offset(qd); 830 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL); 831 if (error) 832 goto out_end_trans; 833 834 do_qc(qd, -qd->qd_change_sync); 835 set_bit(QDF_REFRESH, &qd->qd_flags); 836 } 837 838 error = 0; 839 840 out_end_trans: 841 gfs2_trans_end(sdp); 842 out_ipres: 843 gfs2_inplace_release(ip); 844 out_alloc: 845 gfs2_glock_dq_uninit(&i_gh); 846 out: 847 while (qx--) 848 gfs2_glock_dq_uninit(&ghs[qx]); 849 mutex_unlock(&ip->i_inode.i_mutex); 850 kfree(ghs); 851 gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl); 852 return error; 853 } 854 855 static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd) 856 { 857 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 858 struct gfs2_quota q; 859 struct gfs2_quota_lvb *qlvb; 860 loff_t pos; 861 int error; 862 863 memset(&q, 0, sizeof(struct gfs2_quota)); 864 pos = qd2offset(qd); 865 error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q)); 866 if (error < 0) 867 return error; 868 869 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; 870 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC); 871 qlvb->__pad = 0; 872 qlvb->qb_limit = q.qu_limit; 873 qlvb->qb_warn = q.qu_warn; 874 qlvb->qb_value = q.qu_value; 875 qd->qd_qb = *qlvb; 876 877 return 0; 878 } 879 880 static int do_glock(struct gfs2_quota_data *qd, int force_refresh, 881 struct gfs2_holder *q_gh) 882 { 883 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 884 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 885 struct gfs2_holder i_gh; 886 int error; 887 888 restart: 889 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh); 890 if (error) 891 return error; 892 893 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; 894 895 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) { 896 gfs2_glock_dq_uninit(q_gh); 897 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 898 GL_NOCACHE, q_gh); 899 if (error) 900 return error; 901 902 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh); 903 if (error) 904 goto fail; 905 906 error = update_qd(sdp, qd); 907 if (error) 908 goto fail_gunlock; 909 910 gfs2_glock_dq_uninit(&i_gh); 911 gfs2_glock_dq_uninit(q_gh); 912 force_refresh = 0; 913 goto restart; 914 } 915 916 return 0; 917 918 fail_gunlock: 919 gfs2_glock_dq_uninit(&i_gh); 920 fail: 921 gfs2_glock_dq_uninit(q_gh); 922 return error; 923 } 924 925 int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid) 926 { 927 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 928 struct gfs2_quota_data *qd; 929 unsigned int x; 930 int error = 0; 931 932 error = gfs2_quota_hold(ip, uid, gid); 933 if (error) 934 return error; 935 936 if (capable(CAP_SYS_RESOURCE) || 937 sdp->sd_args.ar_quota != GFS2_QUOTA_ON) 938 return 0; 939 940 sort(ip->i_res->rs_qa_qd, ip->i_res->rs_qa_qd_num, 941 sizeof(struct gfs2_quota_data *), sort_qd, NULL); 942 943 for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) { 944 int force = NO_FORCE; 945 qd = ip->i_res->rs_qa_qd[x]; 946 if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags)) 947 force = FORCE; 948 error = do_glock(qd, force, &ip->i_res->rs_qa_qd_ghs[x]); 949 if (error) 950 break; 951 } 952 953 if (!error) 954 set_bit(GIF_QD_LOCKED, &ip->i_flags); 955 else { 956 while (x--) 957 gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]); 958 gfs2_quota_unhold(ip); 959 } 960 961 return error; 962 } 963 964 static int need_sync(struct gfs2_quota_data *qd) 965 { 966 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 967 struct gfs2_tune *gt = &sdp->sd_tune; 968 s64 value; 969 unsigned int num, den; 970 int do_sync = 1; 971 972 if (!qd->qd_qb.qb_limit) 973 return 0; 974 975 spin_lock(&qd_lru_lock); 976 value = qd->qd_change; 977 spin_unlock(&qd_lru_lock); 978 979 spin_lock(>->gt_spin); 980 num = gt->gt_quota_scale_num; 981 den = gt->gt_quota_scale_den; 982 spin_unlock(>->gt_spin); 983 984 if (value < 0) 985 do_sync = 0; 986 else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >= 987 (s64)be64_to_cpu(qd->qd_qb.qb_limit)) 988 do_sync = 0; 989 else { 990 value *= gfs2_jindex_size(sdp) * num; 991 value = div_s64(value, den); 992 value += (s64)be64_to_cpu(qd->qd_qb.qb_value); 993 if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit)) 994 do_sync = 0; 995 } 996 997 return do_sync; 998 } 999 1000 void gfs2_quota_unlock(struct gfs2_inode *ip) 1001 { 1002 struct gfs2_quota_data *qda[4]; 1003 unsigned int count = 0; 1004 unsigned int x; 1005 1006 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags)) 1007 goto out; 1008 1009 for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) { 1010 struct gfs2_quota_data *qd; 1011 int sync; 1012 1013 qd = ip->i_res->rs_qa_qd[x]; 1014 sync = need_sync(qd); 1015 1016 gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]); 1017 1018 if (sync && qd_trylock(qd)) 1019 qda[count++] = qd; 1020 } 1021 1022 if (count) { 1023 do_sync(count, qda); 1024 for (x = 0; x < count; x++) 1025 qd_unlock(qda[x]); 1026 } 1027 1028 out: 1029 gfs2_quota_unhold(ip); 1030 } 1031 1032 #define MAX_LINE 256 1033 1034 static int print_message(struct gfs2_quota_data *qd, char *type) 1035 { 1036 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 1037 1038 printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\n", 1039 sdp->sd_fsname, type, 1040 (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group", 1041 qd->qd_id); 1042 1043 return 0; 1044 } 1045 1046 int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid) 1047 { 1048 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1049 struct gfs2_quota_data *qd; 1050 s64 value; 1051 unsigned int x; 1052 int error = 0; 1053 1054 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags)) 1055 return 0; 1056 1057 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON) 1058 return 0; 1059 1060 for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) { 1061 qd = ip->i_res->rs_qa_qd[x]; 1062 1063 if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) || 1064 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags)))) 1065 continue; 1066 1067 value = (s64)be64_to_cpu(qd->qd_qb.qb_value); 1068 spin_lock(&qd_lru_lock); 1069 value += qd->qd_change; 1070 spin_unlock(&qd_lru_lock); 1071 1072 if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) { 1073 print_message(qd, "exceeded"); 1074 quota_send_warning(make_kqid(&init_user_ns, 1075 test_bit(QDF_USER, &qd->qd_flags) ? 1076 USRQUOTA : GRPQUOTA, 1077 qd->qd_id), 1078 sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN); 1079 1080 error = -EDQUOT; 1081 break; 1082 } else if (be64_to_cpu(qd->qd_qb.qb_warn) && 1083 (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value && 1084 time_after_eq(jiffies, qd->qd_last_warn + 1085 gfs2_tune_get(sdp, 1086 gt_quota_warn_period) * HZ)) { 1087 quota_send_warning(make_kqid(&init_user_ns, 1088 test_bit(QDF_USER, &qd->qd_flags) ? 1089 USRQUOTA : GRPQUOTA, 1090 qd->qd_id), 1091 sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN); 1092 error = print_message(qd, "warning"); 1093 qd->qd_last_warn = jiffies; 1094 } 1095 } 1096 1097 return error; 1098 } 1099 1100 void gfs2_quota_change(struct gfs2_inode *ip, s64 change, 1101 u32 uid, u32 gid) 1102 { 1103 struct gfs2_quota_data *qd; 1104 unsigned int x; 1105 1106 if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change)) 1107 return; 1108 if (ip->i_diskflags & GFS2_DIF_SYSTEM) 1109 return; 1110 1111 for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) { 1112 qd = ip->i_res->rs_qa_qd[x]; 1113 1114 if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) || 1115 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) { 1116 do_qc(qd, change); 1117 } 1118 } 1119 } 1120 1121 int gfs2_quota_sync(struct super_block *sb, int type) 1122 { 1123 struct gfs2_sbd *sdp = sb->s_fs_info; 1124 struct gfs2_quota_data **qda; 1125 unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync); 1126 unsigned int num_qd; 1127 unsigned int x; 1128 int error = 0; 1129 1130 sdp->sd_quota_sync_gen++; 1131 1132 qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL); 1133 if (!qda) 1134 return -ENOMEM; 1135 1136 do { 1137 num_qd = 0; 1138 1139 for (;;) { 1140 error = qd_fish(sdp, qda + num_qd); 1141 if (error || !qda[num_qd]) 1142 break; 1143 if (++num_qd == max_qd) 1144 break; 1145 } 1146 1147 if (num_qd) { 1148 if (!error) 1149 error = do_sync(num_qd, qda); 1150 if (!error) 1151 for (x = 0; x < num_qd; x++) 1152 qda[x]->qd_sync_gen = 1153 sdp->sd_quota_sync_gen; 1154 1155 for (x = 0; x < num_qd; x++) 1156 qd_unlock(qda[x]); 1157 } 1158 } while (!error && num_qd == max_qd); 1159 1160 kfree(qda); 1161 1162 return error; 1163 } 1164 1165 static int gfs2_quota_sync_timeo(struct super_block *sb, int type) 1166 { 1167 return gfs2_quota_sync(sb, type); 1168 } 1169 1170 int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id) 1171 { 1172 struct gfs2_quota_data *qd; 1173 struct gfs2_holder q_gh; 1174 int error; 1175 1176 error = qd_get(sdp, user, id, &qd); 1177 if (error) 1178 return error; 1179 1180 error = do_glock(qd, FORCE, &q_gh); 1181 if (!error) 1182 gfs2_glock_dq_uninit(&q_gh); 1183 1184 qd_put(qd); 1185 return error; 1186 } 1187 1188 static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf) 1189 { 1190 const struct gfs2_quota_change *str = buf; 1191 1192 qc->qc_change = be64_to_cpu(str->qc_change); 1193 qc->qc_flags = be32_to_cpu(str->qc_flags); 1194 qc->qc_id = be32_to_cpu(str->qc_id); 1195 } 1196 1197 int gfs2_quota_init(struct gfs2_sbd *sdp) 1198 { 1199 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); 1200 u64 size = i_size_read(sdp->sd_qc_inode); 1201 unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift; 1202 unsigned int x, slot = 0; 1203 unsigned int found = 0; 1204 u64 dblock; 1205 u32 extlen = 0; 1206 int error; 1207 1208 if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20)) 1209 return -EIO; 1210 1211 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block; 1212 sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE); 1213 1214 error = -ENOMEM; 1215 1216 sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks, 1217 sizeof(unsigned char *), GFP_NOFS); 1218 if (!sdp->sd_quota_bitmap) 1219 return error; 1220 1221 for (x = 0; x < sdp->sd_quota_chunks; x++) { 1222 sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_NOFS); 1223 if (!sdp->sd_quota_bitmap[x]) 1224 goto fail; 1225 } 1226 1227 for (x = 0; x < blocks; x++) { 1228 struct buffer_head *bh; 1229 unsigned int y; 1230 1231 if (!extlen) { 1232 int new = 0; 1233 error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen); 1234 if (error) 1235 goto fail; 1236 } 1237 error = -EIO; 1238 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen); 1239 if (!bh) 1240 goto fail; 1241 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) { 1242 brelse(bh); 1243 goto fail; 1244 } 1245 1246 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots; 1247 y++, slot++) { 1248 struct gfs2_quota_change_host qc; 1249 struct gfs2_quota_data *qd; 1250 1251 gfs2_quota_change_in(&qc, bh->b_data + 1252 sizeof(struct gfs2_meta_header) + 1253 y * sizeof(struct gfs2_quota_change)); 1254 if (!qc.qc_change) 1255 continue; 1256 1257 error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER), 1258 qc.qc_id, &qd); 1259 if (error) { 1260 brelse(bh); 1261 goto fail; 1262 } 1263 1264 set_bit(QDF_CHANGE, &qd->qd_flags); 1265 qd->qd_change = qc.qc_change; 1266 qd->qd_slot = slot; 1267 qd->qd_slot_count = 1; 1268 1269 spin_lock(&qd_lru_lock); 1270 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1); 1271 list_add(&qd->qd_list, &sdp->sd_quota_list); 1272 atomic_inc(&sdp->sd_quota_count); 1273 spin_unlock(&qd_lru_lock); 1274 1275 found++; 1276 } 1277 1278 brelse(bh); 1279 dblock++; 1280 extlen--; 1281 } 1282 1283 if (found) 1284 fs_info(sdp, "found %u quota changes\n", found); 1285 1286 return 0; 1287 1288 fail: 1289 gfs2_quota_cleanup(sdp); 1290 return error; 1291 } 1292 1293 void gfs2_quota_cleanup(struct gfs2_sbd *sdp) 1294 { 1295 struct list_head *head = &sdp->sd_quota_list; 1296 struct gfs2_quota_data *qd; 1297 unsigned int x; 1298 1299 spin_lock(&qd_lru_lock); 1300 while (!list_empty(head)) { 1301 qd = list_entry(head->prev, struct gfs2_quota_data, qd_list); 1302 1303 if (atomic_read(&qd->qd_count) > 1 || 1304 (atomic_read(&qd->qd_count) && 1305 !test_bit(QDF_CHANGE, &qd->qd_flags))) { 1306 list_move(&qd->qd_list, head); 1307 spin_unlock(&qd_lru_lock); 1308 schedule(); 1309 spin_lock(&qd_lru_lock); 1310 continue; 1311 } 1312 1313 list_del(&qd->qd_list); 1314 /* Also remove if this qd exists in the reclaim list */ 1315 if (!list_empty(&qd->qd_reclaim)) { 1316 list_del_init(&qd->qd_reclaim); 1317 atomic_dec(&qd_lru_count); 1318 } 1319 atomic_dec(&sdp->sd_quota_count); 1320 spin_unlock(&qd_lru_lock); 1321 1322 if (!atomic_read(&qd->qd_count)) { 1323 gfs2_assert_warn(sdp, !qd->qd_change); 1324 gfs2_assert_warn(sdp, !qd->qd_slot_count); 1325 } else 1326 gfs2_assert_warn(sdp, qd->qd_slot_count == 1); 1327 gfs2_assert_warn(sdp, !qd->qd_bh_count); 1328 1329 gfs2_glock_put(qd->qd_gl); 1330 kmem_cache_free(gfs2_quotad_cachep, qd); 1331 1332 spin_lock(&qd_lru_lock); 1333 } 1334 spin_unlock(&qd_lru_lock); 1335 1336 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count)); 1337 1338 if (sdp->sd_quota_bitmap) { 1339 for (x = 0; x < sdp->sd_quota_chunks; x++) 1340 kfree(sdp->sd_quota_bitmap[x]); 1341 kfree(sdp->sd_quota_bitmap); 1342 } 1343 } 1344 1345 static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error) 1346 { 1347 if (error == 0 || error == -EROFS) 1348 return; 1349 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) 1350 fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error); 1351 } 1352 1353 static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg, 1354 int (*fxn)(struct super_block *sb, int type), 1355 unsigned long t, unsigned long *timeo, 1356 unsigned int *new_timeo) 1357 { 1358 if (t >= *timeo) { 1359 int error = fxn(sdp->sd_vfs, 0); 1360 quotad_error(sdp, msg, error); 1361 *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ; 1362 } else { 1363 *timeo -= t; 1364 } 1365 } 1366 1367 static void quotad_check_trunc_list(struct gfs2_sbd *sdp) 1368 { 1369 struct gfs2_inode *ip; 1370 1371 while(1) { 1372 ip = NULL; 1373 spin_lock(&sdp->sd_trunc_lock); 1374 if (!list_empty(&sdp->sd_trunc_list)) { 1375 ip = list_entry(sdp->sd_trunc_list.next, 1376 struct gfs2_inode, i_trunc_list); 1377 list_del_init(&ip->i_trunc_list); 1378 } 1379 spin_unlock(&sdp->sd_trunc_lock); 1380 if (ip == NULL) 1381 return; 1382 gfs2_glock_finish_truncate(ip); 1383 } 1384 } 1385 1386 void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) { 1387 if (!sdp->sd_statfs_force_sync) { 1388 sdp->sd_statfs_force_sync = 1; 1389 wake_up(&sdp->sd_quota_wait); 1390 } 1391 } 1392 1393 1394 /** 1395 * gfs2_quotad - Write cached quota changes into the quota file 1396 * @sdp: Pointer to GFS2 superblock 1397 * 1398 */ 1399 1400 int gfs2_quotad(void *data) 1401 { 1402 struct gfs2_sbd *sdp = data; 1403 struct gfs2_tune *tune = &sdp->sd_tune; 1404 unsigned long statfs_timeo = 0; 1405 unsigned long quotad_timeo = 0; 1406 unsigned long t = 0; 1407 DEFINE_WAIT(wait); 1408 int empty; 1409 1410 while (!kthread_should_stop()) { 1411 1412 /* Update the master statfs file */ 1413 if (sdp->sd_statfs_force_sync) { 1414 int error = gfs2_statfs_sync(sdp->sd_vfs, 0); 1415 quotad_error(sdp, "statfs", error); 1416 statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ; 1417 } 1418 else 1419 quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t, 1420 &statfs_timeo, 1421 &tune->gt_statfs_quantum); 1422 1423 /* Update quota file */ 1424 quotad_check_timeo(sdp, "sync", gfs2_quota_sync_timeo, t, 1425 "ad_timeo, &tune->gt_quota_quantum); 1426 1427 /* Check for & recover partially truncated inodes */ 1428 quotad_check_trunc_list(sdp); 1429 1430 try_to_freeze(); 1431 1432 t = min(quotad_timeo, statfs_timeo); 1433 1434 prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE); 1435 spin_lock(&sdp->sd_trunc_lock); 1436 empty = list_empty(&sdp->sd_trunc_list); 1437 spin_unlock(&sdp->sd_trunc_lock); 1438 if (empty && !sdp->sd_statfs_force_sync) 1439 t -= schedule_timeout(t); 1440 else 1441 t = 0; 1442 finish_wait(&sdp->sd_quota_wait, &wait); 1443 } 1444 1445 return 0; 1446 } 1447 1448 static int gfs2_quota_get_xstate(struct super_block *sb, 1449 struct fs_quota_stat *fqs) 1450 { 1451 struct gfs2_sbd *sdp = sb->s_fs_info; 1452 1453 memset(fqs, 0, sizeof(struct fs_quota_stat)); 1454 fqs->qs_version = FS_QSTAT_VERSION; 1455 1456 switch (sdp->sd_args.ar_quota) { 1457 case GFS2_QUOTA_ON: 1458 fqs->qs_flags |= (FS_QUOTA_UDQ_ENFD | FS_QUOTA_GDQ_ENFD); 1459 /*FALLTHRU*/ 1460 case GFS2_QUOTA_ACCOUNT: 1461 fqs->qs_flags |= (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT); 1462 break; 1463 case GFS2_QUOTA_OFF: 1464 break; 1465 } 1466 1467 if (sdp->sd_quota_inode) { 1468 fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr; 1469 fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks; 1470 } 1471 fqs->qs_uquota.qfs_nextents = 1; /* unsupported */ 1472 fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */ 1473 fqs->qs_incoredqs = atomic_read(&qd_lru_count); 1474 return 0; 1475 } 1476 1477 static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid, 1478 struct fs_disk_quota *fdq) 1479 { 1480 struct gfs2_sbd *sdp = sb->s_fs_info; 1481 struct gfs2_quota_lvb *qlvb; 1482 struct gfs2_quota_data *qd; 1483 struct gfs2_holder q_gh; 1484 int error; 1485 int type; 1486 1487 memset(fdq, 0, sizeof(struct fs_disk_quota)); 1488 1489 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 1490 return -ESRCH; /* Crazy XFS error code */ 1491 1492 if (qid.type == USRQUOTA) 1493 type = QUOTA_USER; 1494 else if (qid.type == GRPQUOTA) 1495 type = QUOTA_GROUP; 1496 else 1497 return -EINVAL; 1498 1499 error = qd_get(sdp, type, from_kqid(&init_user_ns, qid), &qd); 1500 if (error) 1501 return error; 1502 error = do_glock(qd, FORCE, &q_gh); 1503 if (error) 1504 goto out; 1505 1506 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; 1507 fdq->d_version = FS_DQUOT_VERSION; 1508 fdq->d_flags = (type == QUOTA_USER) ? FS_USER_QUOTA : FS_GROUP_QUOTA; 1509 fdq->d_id = from_kqid(&init_user_ns, qid); 1510 fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift; 1511 fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift; 1512 fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift; 1513 1514 gfs2_glock_dq_uninit(&q_gh); 1515 out: 1516 qd_put(qd); 1517 return error; 1518 } 1519 1520 /* GFS2 only supports a subset of the XFS fields */ 1521 #define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT) 1522 1523 static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid, 1524 struct fs_disk_quota *fdq) 1525 { 1526 struct gfs2_sbd *sdp = sb->s_fs_info; 1527 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 1528 struct gfs2_quota_data *qd; 1529 struct gfs2_holder q_gh, i_gh; 1530 unsigned int data_blocks, ind_blocks; 1531 unsigned int blocks = 0; 1532 int alloc_required; 1533 loff_t offset; 1534 int error; 1535 int type; 1536 1537 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 1538 return -ESRCH; /* Crazy XFS error code */ 1539 1540 switch(qid.type) { 1541 case USRQUOTA: 1542 type = QUOTA_USER; 1543 if (fdq->d_flags != FS_USER_QUOTA) 1544 return -EINVAL; 1545 break; 1546 case GRPQUOTA: 1547 type = QUOTA_GROUP; 1548 if (fdq->d_flags != FS_GROUP_QUOTA) 1549 return -EINVAL; 1550 break; 1551 default: 1552 return -EINVAL; 1553 } 1554 1555 if (fdq->d_fieldmask & ~GFS2_FIELDMASK) 1556 return -EINVAL; 1557 if (fdq->d_id != from_kqid(&init_user_ns, qid)) 1558 return -EINVAL; 1559 1560 error = qd_get(sdp, type, from_kqid(&init_user_ns, qid), &qd); 1561 if (error) 1562 return error; 1563 1564 error = gfs2_rs_alloc(ip); 1565 if (error) 1566 goto out_put; 1567 1568 mutex_lock(&ip->i_inode.i_mutex); 1569 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh); 1570 if (error) 1571 goto out_unlockput; 1572 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); 1573 if (error) 1574 goto out_q; 1575 1576 /* Check for existing entry, if none then alloc new blocks */ 1577 error = update_qd(sdp, qd); 1578 if (error) 1579 goto out_i; 1580 1581 /* If nothing has changed, this is a no-op */ 1582 if ((fdq->d_fieldmask & FS_DQ_BSOFT) && 1583 ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn))) 1584 fdq->d_fieldmask ^= FS_DQ_BSOFT; 1585 1586 if ((fdq->d_fieldmask & FS_DQ_BHARD) && 1587 ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit))) 1588 fdq->d_fieldmask ^= FS_DQ_BHARD; 1589 1590 if ((fdq->d_fieldmask & FS_DQ_BCOUNT) && 1591 ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value))) 1592 fdq->d_fieldmask ^= FS_DQ_BCOUNT; 1593 1594 if (fdq->d_fieldmask == 0) 1595 goto out_i; 1596 1597 offset = qd2offset(qd); 1598 alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota)); 1599 if (gfs2_is_stuffed(ip)) 1600 alloc_required = 1; 1601 if (alloc_required) { 1602 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), 1603 &data_blocks, &ind_blocks); 1604 blocks = 1 + data_blocks + ind_blocks; 1605 error = gfs2_inplace_reserve(ip, blocks); 1606 if (error) 1607 goto out_i; 1608 blocks += gfs2_rg_blocks(ip, blocks); 1609 } 1610 1611 /* Some quotas span block boundaries and can update two blocks, 1612 adding an extra block to the transaction to handle such quotas */ 1613 error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0); 1614 if (error) 1615 goto out_release; 1616 1617 /* Apply changes */ 1618 error = gfs2_adjust_quota(ip, offset, 0, qd, fdq); 1619 1620 gfs2_trans_end(sdp); 1621 out_release: 1622 if (alloc_required) 1623 gfs2_inplace_release(ip); 1624 out_i: 1625 gfs2_glock_dq_uninit(&i_gh); 1626 out_q: 1627 gfs2_glock_dq_uninit(&q_gh); 1628 out_unlockput: 1629 mutex_unlock(&ip->i_inode.i_mutex); 1630 out_put: 1631 qd_put(qd); 1632 return error; 1633 } 1634 1635 const struct quotactl_ops gfs2_quotactl_ops = { 1636 .quota_sync = gfs2_quota_sync, 1637 .get_xstate = gfs2_quota_get_xstate, 1638 .get_dqblk = gfs2_get_dqblk, 1639 .set_dqblk = gfs2_set_dqblk, 1640 }; 1641