1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Implementation of the diskquota system for the LINUX operating system. QUOTA 4 * is implemented using the BSD system call interface as the means of 5 * communication with the user level. This file contains the generic routines 6 * called by the different filesystems on allocation of an inode or block. 7 * These routines take care of the administration needed to have a consistent 8 * diskquota tracking system. The ideas of both user and group quotas are based 9 * on the Melbourne quota system as used on BSD derived systems. The internal 10 * implementation is based on one of the several variants of the LINUX 11 * inode-subsystem with added complexity of the diskquota system. 12 * 13 * Author: Marco van Wieringen <mvw@planets.elm.net> 14 * 15 * Fixes: Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96 16 * 17 * Revised list management to avoid races 18 * -- Bill Hawes, <whawes@star.net>, 9/98 19 * 20 * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...(). 21 * As the consequence the locking was moved from dquot_decr_...(), 22 * dquot_incr_...() to calling functions. 23 * invalidate_dquots() now writes modified dquots. 24 * Serialized quota_off() and quota_on() for mount point. 25 * Fixed a few bugs in grow_dquots(). 26 * Fixed deadlock in write_dquot() - we no longer account quotas on 27 * quota files 28 * remove_dquot_ref() moved to inode.c - it now traverses through inodes 29 * add_dquot_ref() restarts after blocking 30 * Added check for bogus uid and fixed check for group in quotactl. 31 * Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99 32 * 33 * Used struct list_head instead of own list struct 34 * Invalidation of referenced dquots is no longer possible 35 * Improved free_dquots list management 36 * Quota and i_blocks are now updated in one place to avoid races 37 * Warnings are now delayed so we won't block in critical section 38 * Write updated not to require dquot lock 39 * Jan Kara, <jack@suse.cz>, 9/2000 40 * 41 * Added dynamic quota structure allocation 42 * Jan Kara <jack@suse.cz> 12/2000 43 * 44 * Rewritten quota interface. Implemented new quota format and 45 * formats registering. 46 * Jan Kara, <jack@suse.cz>, 2001,2002 47 * 48 * New SMP locking. 49 * Jan Kara, <jack@suse.cz>, 10/2002 50 * 51 * Added journalled quota support, fix lock inversion problems 52 * Jan Kara, <jack@suse.cz>, 2003,2004 53 * 54 * (C) Copyright 1994 - 1997 Marco van Wieringen 55 */ 56 57 #include <linux/errno.h> 58 #include <linux/kernel.h> 59 #include <linux/fs.h> 60 #include <linux/mount.h> 61 #include <linux/mm.h> 62 #include <linux/time.h> 63 #include <linux/types.h> 64 #include <linux/string.h> 65 #include <linux/fcntl.h> 66 #include <linux/stat.h> 67 #include <linux/tty.h> 68 #include <linux/file.h> 69 #include <linux/slab.h> 70 #include <linux/sysctl.h> 71 #include <linux/init.h> 72 #include <linux/module.h> 73 #include <linux/proc_fs.h> 74 #include <linux/security.h> 75 #include <linux/sched.h> 76 #include <linux/cred.h> 77 #include <linux/kmod.h> 78 #include <linux/namei.h> 79 #include <linux/capability.h> 80 #include <linux/quotaops.h> 81 #include <linux/blkdev.h> 82 #include <linux/sched/mm.h> 83 84 #include <linux/uaccess.h> 85 86 /* 87 * There are five quota SMP locks: 88 * * dq_list_lock protects all lists with quotas and quota formats. 89 * * dquot->dq_dqb_lock protects data from dq_dqb 90 * * inode->i_lock protects inode->i_blocks, i_bytes and also guards 91 * consistency of dquot->dq_dqb with inode->i_blocks, i_bytes so that 92 * dquot_transfer() can stabilize amount it transfers 93 * * dq_data_lock protects mem_dqinfo structures and modifications of dquot 94 * pointers in the inode 95 * * dq_state_lock protects modifications of quota state (on quotaon and 96 * quotaoff) and readers who care about latest values take it as well. 97 * 98 * The spinlock ordering is hence: 99 * dq_data_lock > dq_list_lock > i_lock > dquot->dq_dqb_lock, 100 * dq_list_lock > dq_state_lock 101 * 102 * Note that some things (eg. sb pointer, type, id) doesn't change during 103 * the life of the dquot structure and so needn't to be protected by a lock 104 * 105 * Operation accessing dquots via inode pointers are protected by dquot_srcu. 106 * Operation of reading pointer needs srcu_read_lock(&dquot_srcu), and 107 * synchronize_srcu(&dquot_srcu) is called after clearing pointers from 108 * inode and before dropping dquot references to avoid use of dquots after 109 * they are freed. dq_data_lock is used to serialize the pointer setting and 110 * clearing operations. 111 * Special care needs to be taken about S_NOQUOTA inode flag (marking that 112 * inode is a quota file). Functions adding pointers from inode to dquots have 113 * to check this flag under dq_data_lock and then (if S_NOQUOTA is not set) they 114 * have to do all pointer modifications before dropping dq_data_lock. This makes 115 * sure they cannot race with quotaon which first sets S_NOQUOTA flag and 116 * then drops all pointers to dquots from an inode. 117 * 118 * Each dquot has its dq_lock mutex. Dquot is locked when it is being read to 119 * memory (or space for it is being allocated) on the first dqget(), when it is 120 * being written out, and when it is being released on the last dqput(). The 121 * allocation and release operations are serialized by the dq_lock and by 122 * checking the use count in dquot_release(). 123 * 124 * Lock ordering (including related VFS locks) is the following: 125 * s_umount > i_mutex > journal_lock > dquot->dq_lock > dqio_sem 126 */ 127 128 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock); 129 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock); 130 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock); 131 EXPORT_SYMBOL(dq_data_lock); 132 DEFINE_STATIC_SRCU(dquot_srcu); 133 134 static DECLARE_WAIT_QUEUE_HEAD(dquot_ref_wq); 135 136 void __quota_error(struct super_block *sb, const char *func, 137 const char *fmt, ...) 138 { 139 if (printk_ratelimit()) { 140 va_list args; 141 struct va_format vaf; 142 143 va_start(args, fmt); 144 145 vaf.fmt = fmt; 146 vaf.va = &args; 147 148 printk(KERN_ERR "Quota error (device %s): %s: %pV\n", 149 sb->s_id, func, &vaf); 150 151 va_end(args); 152 } 153 } 154 EXPORT_SYMBOL(__quota_error); 155 156 #if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING) 157 static char *quotatypes[] = INITQFNAMES; 158 #endif 159 static struct quota_format_type *quota_formats; /* List of registered formats */ 160 static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES; 161 162 /* SLAB cache for dquot structures */ 163 static struct kmem_cache *dquot_cachep; 164 165 void register_quota_format(struct quota_format_type *fmt) 166 { 167 spin_lock(&dq_list_lock); 168 fmt->qf_next = quota_formats; 169 quota_formats = fmt; 170 spin_unlock(&dq_list_lock); 171 } 172 EXPORT_SYMBOL(register_quota_format); 173 174 void unregister_quota_format(struct quota_format_type *fmt) 175 { 176 struct quota_format_type **actqf; 177 178 spin_lock(&dq_list_lock); 179 for (actqf = "a_formats; *actqf && *actqf != fmt; 180 actqf = &(*actqf)->qf_next) 181 ; 182 if (*actqf) 183 *actqf = (*actqf)->qf_next; 184 spin_unlock(&dq_list_lock); 185 } 186 EXPORT_SYMBOL(unregister_quota_format); 187 188 static struct quota_format_type *find_quota_format(int id) 189 { 190 struct quota_format_type *actqf; 191 192 spin_lock(&dq_list_lock); 193 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; 194 actqf = actqf->qf_next) 195 ; 196 if (!actqf || !try_module_get(actqf->qf_owner)) { 197 int qm; 198 199 spin_unlock(&dq_list_lock); 200 201 for (qm = 0; module_names[qm].qm_fmt_id && 202 module_names[qm].qm_fmt_id != id; qm++) 203 ; 204 if (!module_names[qm].qm_fmt_id || 205 request_module(module_names[qm].qm_mod_name)) 206 return NULL; 207 208 spin_lock(&dq_list_lock); 209 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; 210 actqf = actqf->qf_next) 211 ; 212 if (actqf && !try_module_get(actqf->qf_owner)) 213 actqf = NULL; 214 } 215 spin_unlock(&dq_list_lock); 216 return actqf; 217 } 218 219 static void put_quota_format(struct quota_format_type *fmt) 220 { 221 module_put(fmt->qf_owner); 222 } 223 224 /* 225 * Dquot List Management: 226 * The quota code uses five lists for dquot management: the inuse_list, 227 * releasing_dquots, free_dquots, dqi_dirty_list, and dquot_hash[] array. 228 * A single dquot structure may be on some of those lists, depending on 229 * its current state. 230 * 231 * All dquots are placed to the end of inuse_list when first created, and this 232 * list is used for invalidate operation, which must look at every dquot. 233 * 234 * When the last reference of a dquot is dropped, the dquot is added to 235 * releasing_dquots. We'll then queue work item which will call 236 * synchronize_srcu() and after that perform the final cleanup of all the 237 * dquots on the list. Each cleaned up dquot is moved to free_dquots list. 238 * Both releasing_dquots and free_dquots use the dq_free list_head in the dquot 239 * struct. 240 * 241 * Unused and cleaned up dquots are in the free_dquots list and this list is 242 * searched whenever we need an available dquot. Dquots are removed from the 243 * list as soon as they are used again and dqstats.free_dquots gives the number 244 * of dquots on the list. When dquot is invalidated it's completely released 245 * from memory. 246 * 247 * Dirty dquots are added to the dqi_dirty_list of quota_info when mark 248 * dirtied, and this list is searched when writing dirty dquots back to 249 * quota file. Note that some filesystems do dirty dquot tracking on their 250 * own (e.g. in a journal) and thus don't use dqi_dirty_list. 251 * 252 * Dquots with a specific identity (device, type and id) are placed on 253 * one of the dquot_hash[] hash chains. The provides an efficient search 254 * mechanism to locate a specific dquot. 255 */ 256 257 static LIST_HEAD(inuse_list); 258 static LIST_HEAD(free_dquots); 259 static LIST_HEAD(releasing_dquots); 260 static unsigned int dq_hash_bits, dq_hash_mask; 261 static struct hlist_head *dquot_hash; 262 263 struct dqstats dqstats; 264 EXPORT_SYMBOL(dqstats); 265 266 static qsize_t inode_get_rsv_space(struct inode *inode); 267 static qsize_t __inode_get_rsv_space(struct inode *inode); 268 static int __dquot_initialize(struct inode *inode, int type); 269 270 static void quota_release_workfn(struct work_struct *work); 271 static DECLARE_DELAYED_WORK(quota_release_work, quota_release_workfn); 272 273 static inline unsigned int 274 hashfn(const struct super_block *sb, struct kqid qid) 275 { 276 unsigned int id = from_kqid(&init_user_ns, qid); 277 int type = qid.type; 278 unsigned long tmp; 279 280 tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type); 281 return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask; 282 } 283 284 /* 285 * Following list functions expect dq_list_lock to be held 286 */ 287 static inline void insert_dquot_hash(struct dquot *dquot) 288 { 289 struct hlist_head *head; 290 head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id); 291 hlist_add_head(&dquot->dq_hash, head); 292 } 293 294 static inline void remove_dquot_hash(struct dquot *dquot) 295 { 296 hlist_del_init(&dquot->dq_hash); 297 } 298 299 static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, 300 struct kqid qid) 301 { 302 struct dquot *dquot; 303 304 hlist_for_each_entry(dquot, dquot_hash+hashent, dq_hash) 305 if (dquot->dq_sb == sb && qid_eq(dquot->dq_id, qid)) 306 return dquot; 307 308 return NULL; 309 } 310 311 /* Add a dquot to the tail of the free list */ 312 static inline void put_dquot_last(struct dquot *dquot) 313 { 314 list_add_tail(&dquot->dq_free, &free_dquots); 315 dqstats_inc(DQST_FREE_DQUOTS); 316 } 317 318 static inline void put_releasing_dquots(struct dquot *dquot) 319 { 320 list_add_tail(&dquot->dq_free, &releasing_dquots); 321 set_bit(DQ_RELEASING_B, &dquot->dq_flags); 322 } 323 324 static inline void remove_free_dquot(struct dquot *dquot) 325 { 326 if (list_empty(&dquot->dq_free)) 327 return; 328 list_del_init(&dquot->dq_free); 329 if (!test_bit(DQ_RELEASING_B, &dquot->dq_flags)) 330 dqstats_dec(DQST_FREE_DQUOTS); 331 else 332 clear_bit(DQ_RELEASING_B, &dquot->dq_flags); 333 } 334 335 static inline void put_inuse(struct dquot *dquot) 336 { 337 /* We add to the back of inuse list so we don't have to restart 338 * when traversing this list and we block */ 339 list_add_tail(&dquot->dq_inuse, &inuse_list); 340 dqstats_inc(DQST_ALLOC_DQUOTS); 341 } 342 343 static inline void remove_inuse(struct dquot *dquot) 344 { 345 dqstats_dec(DQST_ALLOC_DQUOTS); 346 list_del(&dquot->dq_inuse); 347 } 348 /* 349 * End of list functions needing dq_list_lock 350 */ 351 352 static void wait_on_dquot(struct dquot *dquot) 353 { 354 mutex_lock(&dquot->dq_lock); 355 mutex_unlock(&dquot->dq_lock); 356 } 357 358 static inline int dquot_active(struct dquot *dquot) 359 { 360 return test_bit(DQ_ACTIVE_B, &dquot->dq_flags); 361 } 362 363 static inline int dquot_dirty(struct dquot *dquot) 364 { 365 return test_bit(DQ_MOD_B, &dquot->dq_flags); 366 } 367 368 static inline int mark_dquot_dirty(struct dquot *dquot) 369 { 370 return dquot->dq_sb->dq_op->mark_dirty(dquot); 371 } 372 373 /* Mark dquot dirty in atomic manner, and return it's old dirty flag state */ 374 int dquot_mark_dquot_dirty(struct dquot *dquot) 375 { 376 int ret = 1; 377 378 if (!dquot_active(dquot)) 379 return 0; 380 381 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY) 382 return test_and_set_bit(DQ_MOD_B, &dquot->dq_flags); 383 384 /* If quota is dirty already, we don't have to acquire dq_list_lock */ 385 if (dquot_dirty(dquot)) 386 return 1; 387 388 spin_lock(&dq_list_lock); 389 if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) { 390 list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)-> 391 info[dquot->dq_id.type].dqi_dirty_list); 392 ret = 0; 393 } 394 spin_unlock(&dq_list_lock); 395 return ret; 396 } 397 EXPORT_SYMBOL(dquot_mark_dquot_dirty); 398 399 /* Dirtify all the dquots - this can block when journalling */ 400 static inline int mark_all_dquot_dirty(struct dquot __rcu * const *dquots) 401 { 402 int ret, err, cnt; 403 struct dquot *dquot; 404 405 ret = err = 0; 406 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 407 dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 408 if (dquot) 409 /* Even in case of error we have to continue */ 410 ret = mark_dquot_dirty(dquot); 411 if (!err && ret < 0) 412 err = ret; 413 } 414 return err; 415 } 416 417 static inline void dqput_all(struct dquot **dquot) 418 { 419 unsigned int cnt; 420 421 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 422 dqput(dquot[cnt]); 423 } 424 425 static inline int clear_dquot_dirty(struct dquot *dquot) 426 { 427 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY) 428 return test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags); 429 430 spin_lock(&dq_list_lock); 431 if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags)) { 432 spin_unlock(&dq_list_lock); 433 return 0; 434 } 435 list_del_init(&dquot->dq_dirty); 436 spin_unlock(&dq_list_lock); 437 return 1; 438 } 439 440 void mark_info_dirty(struct super_block *sb, int type) 441 { 442 spin_lock(&dq_data_lock); 443 sb_dqopt(sb)->info[type].dqi_flags |= DQF_INFO_DIRTY; 444 spin_unlock(&dq_data_lock); 445 } 446 EXPORT_SYMBOL(mark_info_dirty); 447 448 /* 449 * Read dquot from disk and alloc space for it 450 */ 451 452 int dquot_acquire(struct dquot *dquot) 453 { 454 int ret = 0, ret2 = 0; 455 unsigned int memalloc; 456 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); 457 458 mutex_lock(&dquot->dq_lock); 459 memalloc = memalloc_nofs_save(); 460 if (!test_bit(DQ_READ_B, &dquot->dq_flags)) { 461 ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot); 462 if (ret < 0) 463 goto out_iolock; 464 } 465 /* Make sure flags update is visible after dquot has been filled */ 466 smp_mb__before_atomic(); 467 set_bit(DQ_READ_B, &dquot->dq_flags); 468 /* Instantiate dquot if needed */ 469 if (!dquot_active(dquot) && !dquot->dq_off) { 470 ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot); 471 /* Write the info if needed */ 472 if (info_dirty(&dqopt->info[dquot->dq_id.type])) { 473 ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info( 474 dquot->dq_sb, dquot->dq_id.type); 475 } 476 if (ret < 0) 477 goto out_iolock; 478 if (ret2 < 0) { 479 ret = ret2; 480 goto out_iolock; 481 } 482 } 483 /* 484 * Make sure flags update is visible after on-disk struct has been 485 * allocated. Paired with smp_rmb() in dqget(). 486 */ 487 smp_mb__before_atomic(); 488 set_bit(DQ_ACTIVE_B, &dquot->dq_flags); 489 out_iolock: 490 memalloc_nofs_restore(memalloc); 491 mutex_unlock(&dquot->dq_lock); 492 return ret; 493 } 494 EXPORT_SYMBOL(dquot_acquire); 495 496 /* 497 * Write dquot to disk 498 */ 499 int dquot_commit(struct dquot *dquot) 500 { 501 int ret = 0; 502 unsigned int memalloc; 503 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); 504 505 mutex_lock(&dquot->dq_lock); 506 memalloc = memalloc_nofs_save(); 507 if (!clear_dquot_dirty(dquot)) 508 goto out_lock; 509 /* Inactive dquot can be only if there was error during read/init 510 * => we have better not writing it */ 511 if (dquot_active(dquot)) 512 ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot); 513 else 514 ret = -EIO; 515 out_lock: 516 memalloc_nofs_restore(memalloc); 517 mutex_unlock(&dquot->dq_lock); 518 return ret; 519 } 520 EXPORT_SYMBOL(dquot_commit); 521 522 /* 523 * Release dquot 524 */ 525 int dquot_release(struct dquot *dquot) 526 { 527 int ret = 0, ret2 = 0; 528 unsigned int memalloc; 529 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); 530 531 mutex_lock(&dquot->dq_lock); 532 memalloc = memalloc_nofs_save(); 533 /* Check whether we are not racing with some other dqget() */ 534 if (dquot_is_busy(dquot)) 535 goto out_dqlock; 536 if (dqopt->ops[dquot->dq_id.type]->release_dqblk) { 537 ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot); 538 /* Write the info */ 539 if (info_dirty(&dqopt->info[dquot->dq_id.type])) { 540 ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info( 541 dquot->dq_sb, dquot->dq_id.type); 542 } 543 if (ret >= 0) 544 ret = ret2; 545 } 546 clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); 547 out_dqlock: 548 memalloc_nofs_restore(memalloc); 549 mutex_unlock(&dquot->dq_lock); 550 return ret; 551 } 552 EXPORT_SYMBOL(dquot_release); 553 554 void dquot_destroy(struct dquot *dquot) 555 { 556 kmem_cache_free(dquot_cachep, dquot); 557 } 558 EXPORT_SYMBOL(dquot_destroy); 559 560 static inline void do_destroy_dquot(struct dquot *dquot) 561 { 562 dquot->dq_sb->dq_op->destroy_dquot(dquot); 563 } 564 565 /* Invalidate all dquots on the list. Note that this function is called after 566 * quota is disabled and pointers from inodes removed so there cannot be new 567 * quota users. There can still be some users of quotas due to inodes being 568 * just deleted or pruned by prune_icache() (those are not attached to any 569 * list) or parallel quotactl call. We have to wait for such users. 570 */ 571 static void invalidate_dquots(struct super_block *sb, int type) 572 { 573 struct dquot *dquot, *tmp; 574 575 restart: 576 flush_delayed_work("a_release_work); 577 578 spin_lock(&dq_list_lock); 579 list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) { 580 if (dquot->dq_sb != sb) 581 continue; 582 if (dquot->dq_id.type != type) 583 continue; 584 /* Wait for dquot users */ 585 if (atomic_read(&dquot->dq_count)) { 586 atomic_inc(&dquot->dq_count); 587 spin_unlock(&dq_list_lock); 588 /* 589 * Once dqput() wakes us up, we know it's time to free 590 * the dquot. 591 * IMPORTANT: we rely on the fact that there is always 592 * at most one process waiting for dquot to free. 593 * Otherwise dq_count would be > 1 and we would never 594 * wake up. 595 */ 596 wait_event(dquot_ref_wq, 597 atomic_read(&dquot->dq_count) == 1); 598 dqput(dquot); 599 /* At this moment dquot() need not exist (it could be 600 * reclaimed by prune_dqcache(). Hence we must 601 * restart. */ 602 goto restart; 603 } 604 /* 605 * The last user already dropped its reference but dquot didn't 606 * get fully cleaned up yet. Restart the scan which flushes the 607 * work cleaning up released dquots. 608 */ 609 if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) { 610 spin_unlock(&dq_list_lock); 611 goto restart; 612 } 613 /* 614 * Quota now has no users and it has been written on last 615 * dqput() 616 */ 617 remove_dquot_hash(dquot); 618 remove_free_dquot(dquot); 619 remove_inuse(dquot); 620 do_destroy_dquot(dquot); 621 } 622 spin_unlock(&dq_list_lock); 623 } 624 625 /* Call callback for every active dquot on given filesystem */ 626 int dquot_scan_active(struct super_block *sb, 627 int (*fn)(struct dquot *dquot, unsigned long priv), 628 unsigned long priv) 629 { 630 struct dquot *dquot, *old_dquot = NULL; 631 int ret = 0; 632 633 WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount)); 634 635 spin_lock(&dq_list_lock); 636 list_for_each_entry(dquot, &inuse_list, dq_inuse) { 637 if (!dquot_active(dquot)) 638 continue; 639 if (dquot->dq_sb != sb) 640 continue; 641 /* Now we have active dquot so we can just increase use count */ 642 atomic_inc(&dquot->dq_count); 643 spin_unlock(&dq_list_lock); 644 dqput(old_dquot); 645 old_dquot = dquot; 646 /* 647 * ->release_dquot() can be racing with us. Our reference 648 * protects us from new calls to it so just wait for any 649 * outstanding call and recheck the DQ_ACTIVE_B after that. 650 */ 651 wait_on_dquot(dquot); 652 if (dquot_active(dquot)) { 653 ret = fn(dquot, priv); 654 if (ret < 0) 655 goto out; 656 } 657 spin_lock(&dq_list_lock); 658 /* We are safe to continue now because our dquot could not 659 * be moved out of the inuse list while we hold the reference */ 660 } 661 spin_unlock(&dq_list_lock); 662 out: 663 dqput(old_dquot); 664 return ret; 665 } 666 EXPORT_SYMBOL(dquot_scan_active); 667 668 static inline int dquot_write_dquot(struct dquot *dquot) 669 { 670 int ret = dquot->dq_sb->dq_op->write_dquot(dquot); 671 if (ret < 0) { 672 quota_error(dquot->dq_sb, "Can't write quota structure " 673 "(error %d). Quota may get out of sync!", ret); 674 /* Clear dirty bit anyway to avoid infinite loop. */ 675 clear_dquot_dirty(dquot); 676 } 677 return ret; 678 } 679 680 /* Write all dquot structures to quota files */ 681 int dquot_writeback_dquots(struct super_block *sb, int type) 682 { 683 struct list_head dirty; 684 struct dquot *dquot; 685 struct quota_info *dqopt = sb_dqopt(sb); 686 int cnt; 687 int err, ret = 0; 688 689 WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount)); 690 691 flush_delayed_work("a_release_work); 692 693 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 694 if (type != -1 && cnt != type) 695 continue; 696 if (!sb_has_quota_active(sb, cnt)) 697 continue; 698 spin_lock(&dq_list_lock); 699 /* Move list away to avoid livelock. */ 700 list_replace_init(&dqopt->info[cnt].dqi_dirty_list, &dirty); 701 while (!list_empty(&dirty)) { 702 dquot = list_first_entry(&dirty, struct dquot, 703 dq_dirty); 704 705 WARN_ON(!dquot_active(dquot)); 706 /* If the dquot is releasing we should not touch it */ 707 if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) { 708 spin_unlock(&dq_list_lock); 709 flush_delayed_work("a_release_work); 710 spin_lock(&dq_list_lock); 711 continue; 712 } 713 714 /* Now we have active dquot from which someone is 715 * holding reference so we can safely just increase 716 * use count */ 717 dqgrab(dquot); 718 spin_unlock(&dq_list_lock); 719 err = dquot_write_dquot(dquot); 720 if (err && !ret) 721 ret = err; 722 dqput(dquot); 723 spin_lock(&dq_list_lock); 724 } 725 spin_unlock(&dq_list_lock); 726 } 727 728 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 729 if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt) 730 && info_dirty(&dqopt->info[cnt])) 731 sb->dq_op->write_info(sb, cnt); 732 dqstats_inc(DQST_SYNCS); 733 734 return ret; 735 } 736 EXPORT_SYMBOL(dquot_writeback_dquots); 737 738 /* Write all dquot structures to disk and make them visible from userspace */ 739 int dquot_quota_sync(struct super_block *sb, int type) 740 { 741 struct quota_info *dqopt = sb_dqopt(sb); 742 int cnt; 743 int ret; 744 745 ret = dquot_writeback_dquots(sb, type); 746 if (ret) 747 return ret; 748 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) 749 return 0; 750 751 /* This is not very clever (and fast) but currently I don't know about 752 * any other simple way of getting quota data to disk and we must get 753 * them there for userspace to be visible... */ 754 if (sb->s_op->sync_fs) { 755 ret = sb->s_op->sync_fs(sb, 1); 756 if (ret) 757 return ret; 758 } 759 ret = sync_blockdev(sb->s_bdev); 760 if (ret) 761 return ret; 762 763 /* 764 * Now when everything is written we can discard the pagecache so 765 * that userspace sees the changes. 766 */ 767 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 768 if (type != -1 && cnt != type) 769 continue; 770 if (!sb_has_quota_active(sb, cnt)) 771 continue; 772 inode_lock(dqopt->files[cnt]); 773 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0); 774 inode_unlock(dqopt->files[cnt]); 775 } 776 777 return 0; 778 } 779 EXPORT_SYMBOL(dquot_quota_sync); 780 781 static unsigned long 782 dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 783 { 784 struct dquot *dquot; 785 unsigned long freed = 0; 786 787 spin_lock(&dq_list_lock); 788 while (!list_empty(&free_dquots) && sc->nr_to_scan) { 789 dquot = list_first_entry(&free_dquots, struct dquot, dq_free); 790 remove_dquot_hash(dquot); 791 remove_free_dquot(dquot); 792 remove_inuse(dquot); 793 do_destroy_dquot(dquot); 794 sc->nr_to_scan--; 795 freed++; 796 } 797 spin_unlock(&dq_list_lock); 798 return freed; 799 } 800 801 static unsigned long 802 dqcache_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 803 { 804 return vfs_pressure_ratio( 805 percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS])); 806 } 807 808 /* 809 * Safely release dquot and put reference to dquot. 810 */ 811 static void quota_release_workfn(struct work_struct *work) 812 { 813 struct dquot *dquot; 814 struct list_head rls_head; 815 816 spin_lock(&dq_list_lock); 817 /* Exchange the list head to avoid livelock. */ 818 list_replace_init(&releasing_dquots, &rls_head); 819 spin_unlock(&dq_list_lock); 820 synchronize_srcu(&dquot_srcu); 821 822 restart: 823 spin_lock(&dq_list_lock); 824 while (!list_empty(&rls_head)) { 825 dquot = list_first_entry(&rls_head, struct dquot, dq_free); 826 WARN_ON_ONCE(atomic_read(&dquot->dq_count)); 827 /* 828 * Note that DQ_RELEASING_B protects us from racing with 829 * invalidate_dquots() calls so we are safe to work with the 830 * dquot even after we drop dq_list_lock. 831 */ 832 if (dquot_dirty(dquot)) { 833 spin_unlock(&dq_list_lock); 834 /* Commit dquot before releasing */ 835 dquot_write_dquot(dquot); 836 goto restart; 837 } 838 if (dquot_active(dquot)) { 839 spin_unlock(&dq_list_lock); 840 dquot->dq_sb->dq_op->release_dquot(dquot); 841 goto restart; 842 } 843 /* Dquot is inactive and clean, now move it to free list */ 844 remove_free_dquot(dquot); 845 put_dquot_last(dquot); 846 } 847 spin_unlock(&dq_list_lock); 848 } 849 850 /* 851 * Put reference to dquot 852 */ 853 void dqput(struct dquot *dquot) 854 { 855 if (!dquot) 856 return; 857 #ifdef CONFIG_QUOTA_DEBUG 858 if (!atomic_read(&dquot->dq_count)) { 859 quota_error(dquot->dq_sb, "trying to free free dquot of %s %d", 860 quotatypes[dquot->dq_id.type], 861 from_kqid(&init_user_ns, dquot->dq_id)); 862 BUG(); 863 } 864 #endif 865 dqstats_inc(DQST_DROPS); 866 867 spin_lock(&dq_list_lock); 868 if (atomic_read(&dquot->dq_count) > 1) { 869 /* We have more than one user... nothing to do */ 870 atomic_dec(&dquot->dq_count); 871 /* Releasing dquot during quotaoff phase? */ 872 if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_id.type) && 873 atomic_read(&dquot->dq_count) == 1) 874 wake_up(&dquot_ref_wq); 875 spin_unlock(&dq_list_lock); 876 return; 877 } 878 879 /* Need to release dquot? */ 880 WARN_ON_ONCE(!list_empty(&dquot->dq_free)); 881 put_releasing_dquots(dquot); 882 atomic_dec(&dquot->dq_count); 883 spin_unlock(&dq_list_lock); 884 queue_delayed_work(system_unbound_wq, "a_release_work, 1); 885 } 886 EXPORT_SYMBOL(dqput); 887 888 struct dquot *dquot_alloc(struct super_block *sb, int type) 889 { 890 return kmem_cache_zalloc(dquot_cachep, GFP_NOFS); 891 } 892 EXPORT_SYMBOL(dquot_alloc); 893 894 static struct dquot *get_empty_dquot(struct super_block *sb, int type) 895 { 896 struct dquot *dquot; 897 898 dquot = sb->dq_op->alloc_dquot(sb, type); 899 if(!dquot) 900 return NULL; 901 902 mutex_init(&dquot->dq_lock); 903 INIT_LIST_HEAD(&dquot->dq_free); 904 INIT_LIST_HEAD(&dquot->dq_inuse); 905 INIT_HLIST_NODE(&dquot->dq_hash); 906 INIT_LIST_HEAD(&dquot->dq_dirty); 907 dquot->dq_sb = sb; 908 dquot->dq_id = make_kqid_invalid(type); 909 atomic_set(&dquot->dq_count, 1); 910 spin_lock_init(&dquot->dq_dqb_lock); 911 912 return dquot; 913 } 914 915 /* 916 * Get reference to dquot 917 * 918 * Locking is slightly tricky here. We are guarded from parallel quotaoff() 919 * destroying our dquot by: 920 * a) checking for quota flags under dq_list_lock and 921 * b) getting a reference to dquot before we release dq_list_lock 922 */ 923 struct dquot *dqget(struct super_block *sb, struct kqid qid) 924 { 925 unsigned int hashent = hashfn(sb, qid); 926 struct dquot *dquot, *empty = NULL; 927 928 if (!qid_has_mapping(sb->s_user_ns, qid)) 929 return ERR_PTR(-EINVAL); 930 931 if (!sb_has_quota_active(sb, qid.type)) 932 return ERR_PTR(-ESRCH); 933 we_slept: 934 spin_lock(&dq_list_lock); 935 spin_lock(&dq_state_lock); 936 if (!sb_has_quota_active(sb, qid.type)) { 937 spin_unlock(&dq_state_lock); 938 spin_unlock(&dq_list_lock); 939 dquot = ERR_PTR(-ESRCH); 940 goto out; 941 } 942 spin_unlock(&dq_state_lock); 943 944 dquot = find_dquot(hashent, sb, qid); 945 if (!dquot) { 946 if (!empty) { 947 spin_unlock(&dq_list_lock); 948 empty = get_empty_dquot(sb, qid.type); 949 if (!empty) 950 schedule(); /* Try to wait for a moment... */ 951 goto we_slept; 952 } 953 dquot = empty; 954 empty = NULL; 955 dquot->dq_id = qid; 956 /* all dquots go on the inuse_list */ 957 put_inuse(dquot); 958 /* hash it first so it can be found */ 959 insert_dquot_hash(dquot); 960 spin_unlock(&dq_list_lock); 961 dqstats_inc(DQST_LOOKUPS); 962 } else { 963 if (!atomic_read(&dquot->dq_count)) 964 remove_free_dquot(dquot); 965 atomic_inc(&dquot->dq_count); 966 spin_unlock(&dq_list_lock); 967 dqstats_inc(DQST_CACHE_HITS); 968 dqstats_inc(DQST_LOOKUPS); 969 } 970 /* Wait for dq_lock - after this we know that either dquot_release() is 971 * already finished or it will be canceled due to dq_count > 0 test */ 972 wait_on_dquot(dquot); 973 /* Read the dquot / allocate space in quota file */ 974 if (!dquot_active(dquot)) { 975 int err; 976 977 err = sb->dq_op->acquire_dquot(dquot); 978 if (err < 0) { 979 dqput(dquot); 980 dquot = ERR_PTR(err); 981 goto out; 982 } 983 } 984 /* 985 * Make sure following reads see filled structure - paired with 986 * smp_mb__before_atomic() in dquot_acquire(). 987 */ 988 smp_rmb(); 989 /* Has somebody invalidated entry under us? */ 990 WARN_ON_ONCE(hlist_unhashed(&dquot->dq_hash)); 991 out: 992 if (empty) 993 do_destroy_dquot(empty); 994 995 return dquot; 996 } 997 EXPORT_SYMBOL(dqget); 998 999 static inline struct dquot __rcu **i_dquot(struct inode *inode) 1000 { 1001 return inode->i_sb->s_op->get_dquots(inode); 1002 } 1003 1004 static int dqinit_needed(struct inode *inode, int type) 1005 { 1006 struct dquot __rcu * const *dquots; 1007 int cnt; 1008 1009 if (IS_NOQUOTA(inode)) 1010 return 0; 1011 1012 dquots = i_dquot(inode); 1013 if (type != -1) 1014 return !dquots[type]; 1015 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1016 if (!dquots[cnt]) 1017 return 1; 1018 return 0; 1019 } 1020 1021 /* This routine is guarded by s_umount semaphore */ 1022 static int add_dquot_ref(struct super_block *sb, int type) 1023 { 1024 struct inode *inode, *old_inode = NULL; 1025 #ifdef CONFIG_QUOTA_DEBUG 1026 int reserved = 0; 1027 #endif 1028 int err = 0; 1029 1030 spin_lock(&sb->s_inode_list_lock); 1031 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 1032 spin_lock(&inode->i_lock); 1033 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || 1034 !atomic_read(&inode->i_writecount) || 1035 !dqinit_needed(inode, type)) { 1036 spin_unlock(&inode->i_lock); 1037 continue; 1038 } 1039 __iget(inode); 1040 spin_unlock(&inode->i_lock); 1041 spin_unlock(&sb->s_inode_list_lock); 1042 1043 #ifdef CONFIG_QUOTA_DEBUG 1044 if (unlikely(inode_get_rsv_space(inode) > 0)) 1045 reserved = 1; 1046 #endif 1047 iput(old_inode); 1048 err = __dquot_initialize(inode, type); 1049 if (err) { 1050 iput(inode); 1051 goto out; 1052 } 1053 1054 /* 1055 * We hold a reference to 'inode' so it couldn't have been 1056 * removed from s_inodes list while we dropped the 1057 * s_inode_list_lock. We cannot iput the inode now as we can be 1058 * holding the last reference and we cannot iput it under 1059 * s_inode_list_lock. So we keep the reference and iput it 1060 * later. 1061 */ 1062 old_inode = inode; 1063 cond_resched(); 1064 spin_lock(&sb->s_inode_list_lock); 1065 } 1066 spin_unlock(&sb->s_inode_list_lock); 1067 iput(old_inode); 1068 out: 1069 #ifdef CONFIG_QUOTA_DEBUG 1070 if (reserved) { 1071 quota_error(sb, "Writes happened before quota was turned on " 1072 "thus quota information is probably inconsistent. " 1073 "Please run quotacheck(8)"); 1074 } 1075 #endif 1076 return err; 1077 } 1078 1079 static void remove_dquot_ref(struct super_block *sb, int type) 1080 { 1081 struct inode *inode; 1082 #ifdef CONFIG_QUOTA_DEBUG 1083 int reserved = 0; 1084 #endif 1085 1086 spin_lock(&sb->s_inode_list_lock); 1087 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 1088 /* 1089 * We have to scan also I_NEW inodes because they can already 1090 * have quota pointer initialized. Luckily, we need to touch 1091 * only quota pointers and these have separate locking 1092 * (dq_data_lock). 1093 */ 1094 spin_lock(&dq_data_lock); 1095 if (!IS_NOQUOTA(inode)) { 1096 struct dquot __rcu **dquots = i_dquot(inode); 1097 struct dquot *dquot = srcu_dereference_check( 1098 dquots[type], &dquot_srcu, 1099 lockdep_is_held(&dq_data_lock)); 1100 1101 #ifdef CONFIG_QUOTA_DEBUG 1102 if (unlikely(inode_get_rsv_space(inode) > 0)) 1103 reserved = 1; 1104 #endif 1105 rcu_assign_pointer(dquots[type], NULL); 1106 if (dquot) 1107 dqput(dquot); 1108 } 1109 spin_unlock(&dq_data_lock); 1110 } 1111 spin_unlock(&sb->s_inode_list_lock); 1112 #ifdef CONFIG_QUOTA_DEBUG 1113 if (reserved) { 1114 printk(KERN_WARNING "VFS (%s): Writes happened after quota" 1115 " was disabled thus quota information is probably " 1116 "inconsistent. Please run quotacheck(8).\n", sb->s_id); 1117 } 1118 #endif 1119 } 1120 1121 /* Gather all references from inodes and drop them */ 1122 static void drop_dquot_ref(struct super_block *sb, int type) 1123 { 1124 if (sb->dq_op) 1125 remove_dquot_ref(sb, type); 1126 } 1127 1128 static inline 1129 void dquot_free_reserved_space(struct dquot *dquot, qsize_t number) 1130 { 1131 if (dquot->dq_dqb.dqb_rsvspace >= number) 1132 dquot->dq_dqb.dqb_rsvspace -= number; 1133 else { 1134 WARN_ON_ONCE(1); 1135 dquot->dq_dqb.dqb_rsvspace = 0; 1136 } 1137 if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <= 1138 dquot->dq_dqb.dqb_bsoftlimit) 1139 dquot->dq_dqb.dqb_btime = (time64_t) 0; 1140 clear_bit(DQ_BLKS_B, &dquot->dq_flags); 1141 } 1142 1143 static void dquot_decr_inodes(struct dquot *dquot, qsize_t number) 1144 { 1145 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || 1146 dquot->dq_dqb.dqb_curinodes >= number) 1147 dquot->dq_dqb.dqb_curinodes -= number; 1148 else 1149 dquot->dq_dqb.dqb_curinodes = 0; 1150 if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit) 1151 dquot->dq_dqb.dqb_itime = (time64_t) 0; 1152 clear_bit(DQ_INODES_B, &dquot->dq_flags); 1153 } 1154 1155 static void dquot_decr_space(struct dquot *dquot, qsize_t number) 1156 { 1157 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || 1158 dquot->dq_dqb.dqb_curspace >= number) 1159 dquot->dq_dqb.dqb_curspace -= number; 1160 else 1161 dquot->dq_dqb.dqb_curspace = 0; 1162 if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <= 1163 dquot->dq_dqb.dqb_bsoftlimit) 1164 dquot->dq_dqb.dqb_btime = (time64_t) 0; 1165 clear_bit(DQ_BLKS_B, &dquot->dq_flags); 1166 } 1167 1168 struct dquot_warn { 1169 struct super_block *w_sb; 1170 struct kqid w_dq_id; 1171 short w_type; 1172 }; 1173 1174 static int warning_issued(struct dquot *dquot, const int warntype) 1175 { 1176 int flag = (warntype == QUOTA_NL_BHARDWARN || 1177 warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B : 1178 ((warntype == QUOTA_NL_IHARDWARN || 1179 warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0); 1180 1181 if (!flag) 1182 return 0; 1183 return test_and_set_bit(flag, &dquot->dq_flags); 1184 } 1185 1186 #ifdef CONFIG_PRINT_QUOTA_WARNING 1187 static int flag_print_warnings = 1; 1188 1189 static int need_print_warning(struct dquot_warn *warn) 1190 { 1191 if (!flag_print_warnings) 1192 return 0; 1193 1194 switch (warn->w_dq_id.type) { 1195 case USRQUOTA: 1196 return uid_eq(current_fsuid(), warn->w_dq_id.uid); 1197 case GRPQUOTA: 1198 return in_group_p(warn->w_dq_id.gid); 1199 case PRJQUOTA: 1200 return 1; 1201 } 1202 return 0; 1203 } 1204 1205 /* Print warning to user which exceeded quota */ 1206 static void print_warning(struct dquot_warn *warn) 1207 { 1208 char *msg = NULL; 1209 struct tty_struct *tty; 1210 int warntype = warn->w_type; 1211 1212 if (warntype == QUOTA_NL_IHARDBELOW || 1213 warntype == QUOTA_NL_ISOFTBELOW || 1214 warntype == QUOTA_NL_BHARDBELOW || 1215 warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(warn)) 1216 return; 1217 1218 tty = get_current_tty(); 1219 if (!tty) 1220 return; 1221 tty_write_message(tty, warn->w_sb->s_id); 1222 if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN) 1223 tty_write_message(tty, ": warning, "); 1224 else 1225 tty_write_message(tty, ": write failed, "); 1226 tty_write_message(tty, quotatypes[warn->w_dq_id.type]); 1227 switch (warntype) { 1228 case QUOTA_NL_IHARDWARN: 1229 msg = " file limit reached.\r\n"; 1230 break; 1231 case QUOTA_NL_ISOFTLONGWARN: 1232 msg = " file quota exceeded too long.\r\n"; 1233 break; 1234 case QUOTA_NL_ISOFTWARN: 1235 msg = " file quota exceeded.\r\n"; 1236 break; 1237 case QUOTA_NL_BHARDWARN: 1238 msg = " block limit reached.\r\n"; 1239 break; 1240 case QUOTA_NL_BSOFTLONGWARN: 1241 msg = " block quota exceeded too long.\r\n"; 1242 break; 1243 case QUOTA_NL_BSOFTWARN: 1244 msg = " block quota exceeded.\r\n"; 1245 break; 1246 } 1247 tty_write_message(tty, msg); 1248 tty_kref_put(tty); 1249 } 1250 #endif 1251 1252 static void prepare_warning(struct dquot_warn *warn, struct dquot *dquot, 1253 int warntype) 1254 { 1255 if (warning_issued(dquot, warntype)) 1256 return; 1257 warn->w_type = warntype; 1258 warn->w_sb = dquot->dq_sb; 1259 warn->w_dq_id = dquot->dq_id; 1260 } 1261 1262 /* 1263 * Write warnings to the console and send warning messages over netlink. 1264 * 1265 * Note that this function can call into tty and networking code. 1266 */ 1267 static void flush_warnings(struct dquot_warn *warn) 1268 { 1269 int i; 1270 1271 for (i = 0; i < MAXQUOTAS; i++) { 1272 if (warn[i].w_type == QUOTA_NL_NOWARN) 1273 continue; 1274 #ifdef CONFIG_PRINT_QUOTA_WARNING 1275 print_warning(&warn[i]); 1276 #endif 1277 quota_send_warning(warn[i].w_dq_id, 1278 warn[i].w_sb->s_dev, warn[i].w_type); 1279 } 1280 } 1281 1282 static int ignore_hardlimit(struct dquot *dquot) 1283 { 1284 struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; 1285 1286 return capable(CAP_SYS_RESOURCE) && 1287 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || 1288 !(info->dqi_flags & DQF_ROOT_SQUASH)); 1289 } 1290 1291 static int dquot_add_inodes(struct dquot *dquot, qsize_t inodes, 1292 struct dquot_warn *warn) 1293 { 1294 qsize_t newinodes; 1295 int ret = 0; 1296 1297 spin_lock(&dquot->dq_dqb_lock); 1298 newinodes = dquot->dq_dqb.dqb_curinodes + inodes; 1299 if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type) || 1300 test_bit(DQ_FAKE_B, &dquot->dq_flags)) 1301 goto add; 1302 1303 if (dquot->dq_dqb.dqb_ihardlimit && 1304 newinodes > dquot->dq_dqb.dqb_ihardlimit && 1305 !ignore_hardlimit(dquot)) { 1306 prepare_warning(warn, dquot, QUOTA_NL_IHARDWARN); 1307 ret = -EDQUOT; 1308 goto out; 1309 } 1310 1311 if (dquot->dq_dqb.dqb_isoftlimit && 1312 newinodes > dquot->dq_dqb.dqb_isoftlimit && 1313 dquot->dq_dqb.dqb_itime && 1314 ktime_get_real_seconds() >= dquot->dq_dqb.dqb_itime && 1315 !ignore_hardlimit(dquot)) { 1316 prepare_warning(warn, dquot, QUOTA_NL_ISOFTLONGWARN); 1317 ret = -EDQUOT; 1318 goto out; 1319 } 1320 1321 if (dquot->dq_dqb.dqb_isoftlimit && 1322 newinodes > dquot->dq_dqb.dqb_isoftlimit && 1323 dquot->dq_dqb.dqb_itime == 0) { 1324 prepare_warning(warn, dquot, QUOTA_NL_ISOFTWARN); 1325 dquot->dq_dqb.dqb_itime = ktime_get_real_seconds() + 1326 sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type].dqi_igrace; 1327 } 1328 add: 1329 dquot->dq_dqb.dqb_curinodes = newinodes; 1330 1331 out: 1332 spin_unlock(&dquot->dq_dqb_lock); 1333 return ret; 1334 } 1335 1336 static int dquot_add_space(struct dquot *dquot, qsize_t space, 1337 qsize_t rsv_space, unsigned int flags, 1338 struct dquot_warn *warn) 1339 { 1340 qsize_t tspace; 1341 struct super_block *sb = dquot->dq_sb; 1342 int ret = 0; 1343 1344 spin_lock(&dquot->dq_dqb_lock); 1345 if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) || 1346 test_bit(DQ_FAKE_B, &dquot->dq_flags)) 1347 goto finish; 1348 1349 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace 1350 + space + rsv_space; 1351 1352 if (dquot->dq_dqb.dqb_bhardlimit && 1353 tspace > dquot->dq_dqb.dqb_bhardlimit && 1354 !ignore_hardlimit(dquot)) { 1355 if (flags & DQUOT_SPACE_WARN) 1356 prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN); 1357 ret = -EDQUOT; 1358 goto finish; 1359 } 1360 1361 if (dquot->dq_dqb.dqb_bsoftlimit && 1362 tspace > dquot->dq_dqb.dqb_bsoftlimit && 1363 dquot->dq_dqb.dqb_btime && 1364 ktime_get_real_seconds() >= dquot->dq_dqb.dqb_btime && 1365 !ignore_hardlimit(dquot)) { 1366 if (flags & DQUOT_SPACE_WARN) 1367 prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN); 1368 ret = -EDQUOT; 1369 goto finish; 1370 } 1371 1372 if (dquot->dq_dqb.dqb_bsoftlimit && 1373 tspace > dquot->dq_dqb.dqb_bsoftlimit && 1374 dquot->dq_dqb.dqb_btime == 0) { 1375 if (flags & DQUOT_SPACE_WARN) { 1376 prepare_warning(warn, dquot, QUOTA_NL_BSOFTWARN); 1377 dquot->dq_dqb.dqb_btime = ktime_get_real_seconds() + 1378 sb_dqopt(sb)->info[dquot->dq_id.type].dqi_bgrace; 1379 } else { 1380 /* 1381 * We don't allow preallocation to exceed softlimit so exceeding will 1382 * be always printed 1383 */ 1384 ret = -EDQUOT; 1385 goto finish; 1386 } 1387 } 1388 finish: 1389 /* 1390 * We have to be careful and go through warning generation & grace time 1391 * setting even if DQUOT_SPACE_NOFAIL is set. That's why we check it 1392 * only here... 1393 */ 1394 if (flags & DQUOT_SPACE_NOFAIL) 1395 ret = 0; 1396 if (!ret) { 1397 dquot->dq_dqb.dqb_rsvspace += rsv_space; 1398 dquot->dq_dqb.dqb_curspace += space; 1399 } 1400 spin_unlock(&dquot->dq_dqb_lock); 1401 return ret; 1402 } 1403 1404 static int info_idq_free(struct dquot *dquot, qsize_t inodes) 1405 { 1406 qsize_t newinodes; 1407 1408 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || 1409 dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit || 1410 !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type)) 1411 return QUOTA_NL_NOWARN; 1412 1413 newinodes = dquot->dq_dqb.dqb_curinodes - inodes; 1414 if (newinodes <= dquot->dq_dqb.dqb_isoftlimit) 1415 return QUOTA_NL_ISOFTBELOW; 1416 if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit && 1417 newinodes < dquot->dq_dqb.dqb_ihardlimit) 1418 return QUOTA_NL_IHARDBELOW; 1419 return QUOTA_NL_NOWARN; 1420 } 1421 1422 static int info_bdq_free(struct dquot *dquot, qsize_t space) 1423 { 1424 qsize_t tspace; 1425 1426 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace; 1427 1428 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || 1429 tspace <= dquot->dq_dqb.dqb_bsoftlimit) 1430 return QUOTA_NL_NOWARN; 1431 1432 if (tspace - space <= dquot->dq_dqb.dqb_bsoftlimit) 1433 return QUOTA_NL_BSOFTBELOW; 1434 if (tspace >= dquot->dq_dqb.dqb_bhardlimit && 1435 tspace - space < dquot->dq_dqb.dqb_bhardlimit) 1436 return QUOTA_NL_BHARDBELOW; 1437 return QUOTA_NL_NOWARN; 1438 } 1439 1440 static int inode_quota_active(const struct inode *inode) 1441 { 1442 struct super_block *sb = inode->i_sb; 1443 1444 if (IS_NOQUOTA(inode)) 1445 return 0; 1446 return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb); 1447 } 1448 1449 /* 1450 * Initialize quota pointers in inode 1451 * 1452 * It is better to call this function outside of any transaction as it 1453 * might need a lot of space in journal for dquot structure allocation. 1454 */ 1455 static int __dquot_initialize(struct inode *inode, int type) 1456 { 1457 int cnt, init_needed = 0; 1458 struct dquot __rcu **dquots; 1459 struct dquot *got[MAXQUOTAS] = {}; 1460 struct super_block *sb = inode->i_sb; 1461 qsize_t rsv; 1462 int ret = 0; 1463 1464 if (!inode_quota_active(inode)) 1465 return 0; 1466 1467 dquots = i_dquot(inode); 1468 1469 /* First get references to structures we might need. */ 1470 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1471 struct kqid qid; 1472 kprojid_t projid; 1473 int rc; 1474 struct dquot *dquot; 1475 1476 if (type != -1 && cnt != type) 1477 continue; 1478 /* 1479 * The i_dquot should have been initialized in most cases, 1480 * we check it without locking here to avoid unnecessary 1481 * dqget()/dqput() calls. 1482 */ 1483 if (dquots[cnt]) 1484 continue; 1485 1486 if (!sb_has_quota_active(sb, cnt)) 1487 continue; 1488 1489 init_needed = 1; 1490 1491 switch (cnt) { 1492 case USRQUOTA: 1493 qid = make_kqid_uid(inode->i_uid); 1494 break; 1495 case GRPQUOTA: 1496 qid = make_kqid_gid(inode->i_gid); 1497 break; 1498 case PRJQUOTA: 1499 rc = inode->i_sb->dq_op->get_projid(inode, &projid); 1500 if (rc) 1501 continue; 1502 qid = make_kqid_projid(projid); 1503 break; 1504 } 1505 dquot = dqget(sb, qid); 1506 if (IS_ERR(dquot)) { 1507 /* We raced with somebody turning quotas off... */ 1508 if (PTR_ERR(dquot) != -ESRCH) { 1509 ret = PTR_ERR(dquot); 1510 goto out_put; 1511 } 1512 dquot = NULL; 1513 } 1514 got[cnt] = dquot; 1515 } 1516 1517 /* All required i_dquot has been initialized */ 1518 if (!init_needed) 1519 return 0; 1520 1521 spin_lock(&dq_data_lock); 1522 if (IS_NOQUOTA(inode)) 1523 goto out_lock; 1524 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1525 if (type != -1 && cnt != type) 1526 continue; 1527 /* Avoid races with quotaoff() */ 1528 if (!sb_has_quota_active(sb, cnt)) 1529 continue; 1530 /* We could race with quotaon or dqget() could have failed */ 1531 if (!got[cnt]) 1532 continue; 1533 if (!dquots[cnt]) { 1534 rcu_assign_pointer(dquots[cnt], got[cnt]); 1535 got[cnt] = NULL; 1536 /* 1537 * Make quota reservation system happy if someone 1538 * did a write before quota was turned on 1539 */ 1540 rsv = inode_get_rsv_space(inode); 1541 if (unlikely(rsv)) { 1542 struct dquot *dquot = srcu_dereference_check( 1543 dquots[cnt], &dquot_srcu, 1544 lockdep_is_held(&dq_data_lock)); 1545 1546 spin_lock(&inode->i_lock); 1547 /* Get reservation again under proper lock */ 1548 rsv = __inode_get_rsv_space(inode); 1549 spin_lock(&dquot->dq_dqb_lock); 1550 dquot->dq_dqb.dqb_rsvspace += rsv; 1551 spin_unlock(&dquot->dq_dqb_lock); 1552 spin_unlock(&inode->i_lock); 1553 } 1554 } 1555 } 1556 out_lock: 1557 spin_unlock(&dq_data_lock); 1558 out_put: 1559 /* Drop unused references */ 1560 dqput_all(got); 1561 1562 return ret; 1563 } 1564 1565 int dquot_initialize(struct inode *inode) 1566 { 1567 return __dquot_initialize(inode, -1); 1568 } 1569 EXPORT_SYMBOL(dquot_initialize); 1570 1571 bool dquot_initialize_needed(struct inode *inode) 1572 { 1573 struct dquot __rcu **dquots; 1574 int i; 1575 1576 if (!inode_quota_active(inode)) 1577 return false; 1578 1579 dquots = i_dquot(inode); 1580 for (i = 0; i < MAXQUOTAS; i++) 1581 if (!dquots[i] && sb_has_quota_active(inode->i_sb, i)) 1582 return true; 1583 return false; 1584 } 1585 EXPORT_SYMBOL(dquot_initialize_needed); 1586 1587 /* 1588 * Release all quotas referenced by inode. 1589 * 1590 * This function only be called on inode free or converting 1591 * a file to quota file, no other users for the i_dquot in 1592 * both cases, so we needn't call synchronize_srcu() after 1593 * clearing i_dquot. 1594 */ 1595 static void __dquot_drop(struct inode *inode) 1596 { 1597 int cnt; 1598 struct dquot __rcu **dquots = i_dquot(inode); 1599 struct dquot *put[MAXQUOTAS]; 1600 1601 spin_lock(&dq_data_lock); 1602 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1603 put[cnt] = srcu_dereference_check(dquots[cnt], &dquot_srcu, 1604 lockdep_is_held(&dq_data_lock)); 1605 rcu_assign_pointer(dquots[cnt], NULL); 1606 } 1607 spin_unlock(&dq_data_lock); 1608 dqput_all(put); 1609 } 1610 1611 void dquot_drop(struct inode *inode) 1612 { 1613 struct dquot __rcu * const *dquots; 1614 int cnt; 1615 1616 if (IS_NOQUOTA(inode)) 1617 return; 1618 1619 /* 1620 * Test before calling to rule out calls from proc and such 1621 * where we are not allowed to block. Note that this is 1622 * actually reliable test even without the lock - the caller 1623 * must assure that nobody can come after the DQUOT_DROP and 1624 * add quota pointers back anyway. 1625 */ 1626 dquots = i_dquot(inode); 1627 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1628 if (dquots[cnt]) 1629 break; 1630 } 1631 1632 if (cnt < MAXQUOTAS) 1633 __dquot_drop(inode); 1634 } 1635 EXPORT_SYMBOL(dquot_drop); 1636 1637 /* 1638 * inode_reserved_space is managed internally by quota, and protected by 1639 * i_lock similar to i_blocks+i_bytes. 1640 */ 1641 static qsize_t *inode_reserved_space(struct inode * inode) 1642 { 1643 /* Filesystem must explicitly define it's own method in order to use 1644 * quota reservation interface */ 1645 BUG_ON(!inode->i_sb->dq_op->get_reserved_space); 1646 return inode->i_sb->dq_op->get_reserved_space(inode); 1647 } 1648 1649 static qsize_t __inode_get_rsv_space(struct inode *inode) 1650 { 1651 if (!inode->i_sb->dq_op->get_reserved_space) 1652 return 0; 1653 return *inode_reserved_space(inode); 1654 } 1655 1656 static qsize_t inode_get_rsv_space(struct inode *inode) 1657 { 1658 qsize_t ret; 1659 1660 if (!inode->i_sb->dq_op->get_reserved_space) 1661 return 0; 1662 spin_lock(&inode->i_lock); 1663 ret = __inode_get_rsv_space(inode); 1664 spin_unlock(&inode->i_lock); 1665 return ret; 1666 } 1667 1668 /* 1669 * This functions updates i_blocks+i_bytes fields and quota information 1670 * (together with appropriate checks). 1671 * 1672 * NOTE: We absolutely rely on the fact that caller dirties the inode 1673 * (usually helpers in quotaops.h care about this) and holds a handle for 1674 * the current transaction so that dquot write and inode write go into the 1675 * same transaction. 1676 */ 1677 1678 /* 1679 * This operation can block, but only after everything is updated 1680 */ 1681 int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) 1682 { 1683 int cnt, ret = 0, index; 1684 struct dquot_warn warn[MAXQUOTAS]; 1685 int reserve = flags & DQUOT_SPACE_RESERVE; 1686 struct dquot __rcu **dquots; 1687 struct dquot *dquot; 1688 1689 if (!inode_quota_active(inode)) { 1690 if (reserve) { 1691 spin_lock(&inode->i_lock); 1692 *inode_reserved_space(inode) += number; 1693 spin_unlock(&inode->i_lock); 1694 } else { 1695 inode_add_bytes(inode, number); 1696 } 1697 goto out; 1698 } 1699 1700 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1701 warn[cnt].w_type = QUOTA_NL_NOWARN; 1702 1703 dquots = i_dquot(inode); 1704 index = srcu_read_lock(&dquot_srcu); 1705 spin_lock(&inode->i_lock); 1706 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1707 dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1708 if (!dquot) 1709 continue; 1710 if (reserve) { 1711 ret = dquot_add_space(dquot, 0, number, flags, &warn[cnt]); 1712 } else { 1713 ret = dquot_add_space(dquot, number, 0, flags, &warn[cnt]); 1714 } 1715 if (ret) { 1716 /* Back out changes we already did */ 1717 for (cnt--; cnt >= 0; cnt--) { 1718 dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1719 if (!dquot) 1720 continue; 1721 spin_lock(&dquot->dq_dqb_lock); 1722 if (reserve) 1723 dquot_free_reserved_space(dquot, number); 1724 else 1725 dquot_decr_space(dquot, number); 1726 spin_unlock(&dquot->dq_dqb_lock); 1727 } 1728 spin_unlock(&inode->i_lock); 1729 goto out_flush_warn; 1730 } 1731 } 1732 if (reserve) 1733 *inode_reserved_space(inode) += number; 1734 else 1735 __inode_add_bytes(inode, number); 1736 spin_unlock(&inode->i_lock); 1737 1738 if (reserve) 1739 goto out_flush_warn; 1740 ret = mark_all_dquot_dirty(dquots); 1741 out_flush_warn: 1742 srcu_read_unlock(&dquot_srcu, index); 1743 flush_warnings(warn); 1744 out: 1745 return ret; 1746 } 1747 EXPORT_SYMBOL(__dquot_alloc_space); 1748 1749 /* 1750 * This operation can block, but only after everything is updated 1751 */ 1752 int dquot_alloc_inode(struct inode *inode) 1753 { 1754 int cnt, ret = 0, index; 1755 struct dquot_warn warn[MAXQUOTAS]; 1756 struct dquot __rcu * const *dquots; 1757 struct dquot *dquot; 1758 1759 if (!inode_quota_active(inode)) 1760 return 0; 1761 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1762 warn[cnt].w_type = QUOTA_NL_NOWARN; 1763 1764 dquots = i_dquot(inode); 1765 index = srcu_read_lock(&dquot_srcu); 1766 spin_lock(&inode->i_lock); 1767 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1768 dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1769 if (!dquot) 1770 continue; 1771 ret = dquot_add_inodes(dquot, 1, &warn[cnt]); 1772 if (ret) { 1773 for (cnt--; cnt >= 0; cnt--) { 1774 dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1775 if (!dquot) 1776 continue; 1777 /* Back out changes we already did */ 1778 spin_lock(&dquot->dq_dqb_lock); 1779 dquot_decr_inodes(dquot, 1); 1780 spin_unlock(&dquot->dq_dqb_lock); 1781 } 1782 goto warn_put_all; 1783 } 1784 } 1785 1786 warn_put_all: 1787 spin_unlock(&inode->i_lock); 1788 if (ret == 0) 1789 ret = mark_all_dquot_dirty(dquots); 1790 srcu_read_unlock(&dquot_srcu, index); 1791 flush_warnings(warn); 1792 return ret; 1793 } 1794 EXPORT_SYMBOL(dquot_alloc_inode); 1795 1796 /* 1797 * Convert in-memory reserved quotas to real consumed quotas 1798 */ 1799 void dquot_claim_space_nodirty(struct inode *inode, qsize_t number) 1800 { 1801 struct dquot __rcu **dquots; 1802 struct dquot *dquot; 1803 int cnt, index; 1804 1805 if (!inode_quota_active(inode)) { 1806 spin_lock(&inode->i_lock); 1807 *inode_reserved_space(inode) -= number; 1808 __inode_add_bytes(inode, number); 1809 spin_unlock(&inode->i_lock); 1810 return; 1811 } 1812 1813 dquots = i_dquot(inode); 1814 index = srcu_read_lock(&dquot_srcu); 1815 spin_lock(&inode->i_lock); 1816 /* Claim reserved quotas to allocated quotas */ 1817 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1818 dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1819 if (dquot) { 1820 spin_lock(&dquot->dq_dqb_lock); 1821 if (WARN_ON_ONCE(dquot->dq_dqb.dqb_rsvspace < number)) 1822 number = dquot->dq_dqb.dqb_rsvspace; 1823 dquot->dq_dqb.dqb_curspace += number; 1824 dquot->dq_dqb.dqb_rsvspace -= number; 1825 spin_unlock(&dquot->dq_dqb_lock); 1826 } 1827 } 1828 /* Update inode bytes */ 1829 *inode_reserved_space(inode) -= number; 1830 __inode_add_bytes(inode, number); 1831 spin_unlock(&inode->i_lock); 1832 mark_all_dquot_dirty(dquots); 1833 srcu_read_unlock(&dquot_srcu, index); 1834 } 1835 EXPORT_SYMBOL(dquot_claim_space_nodirty); 1836 1837 /* 1838 * Convert allocated space back to in-memory reserved quotas 1839 */ 1840 void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number) 1841 { 1842 struct dquot __rcu **dquots; 1843 struct dquot *dquot; 1844 int cnt, index; 1845 1846 if (!inode_quota_active(inode)) { 1847 spin_lock(&inode->i_lock); 1848 *inode_reserved_space(inode) += number; 1849 __inode_sub_bytes(inode, number); 1850 spin_unlock(&inode->i_lock); 1851 return; 1852 } 1853 1854 dquots = i_dquot(inode); 1855 index = srcu_read_lock(&dquot_srcu); 1856 spin_lock(&inode->i_lock); 1857 /* Claim reserved quotas to allocated quotas */ 1858 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1859 dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1860 if (dquot) { 1861 spin_lock(&dquot->dq_dqb_lock); 1862 if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number)) 1863 number = dquot->dq_dqb.dqb_curspace; 1864 dquot->dq_dqb.dqb_rsvspace += number; 1865 dquot->dq_dqb.dqb_curspace -= number; 1866 spin_unlock(&dquot->dq_dqb_lock); 1867 } 1868 } 1869 /* Update inode bytes */ 1870 *inode_reserved_space(inode) += number; 1871 __inode_sub_bytes(inode, number); 1872 spin_unlock(&inode->i_lock); 1873 mark_all_dquot_dirty(dquots); 1874 srcu_read_unlock(&dquot_srcu, index); 1875 } 1876 EXPORT_SYMBOL(dquot_reclaim_space_nodirty); 1877 1878 /* 1879 * This operation can block, but only after everything is updated 1880 */ 1881 void __dquot_free_space(struct inode *inode, qsize_t number, int flags) 1882 { 1883 unsigned int cnt; 1884 struct dquot_warn warn[MAXQUOTAS]; 1885 struct dquot __rcu **dquots; 1886 struct dquot *dquot; 1887 int reserve = flags & DQUOT_SPACE_RESERVE, index; 1888 1889 if (!inode_quota_active(inode)) { 1890 if (reserve) { 1891 spin_lock(&inode->i_lock); 1892 *inode_reserved_space(inode) -= number; 1893 spin_unlock(&inode->i_lock); 1894 } else { 1895 inode_sub_bytes(inode, number); 1896 } 1897 return; 1898 } 1899 1900 dquots = i_dquot(inode); 1901 index = srcu_read_lock(&dquot_srcu); 1902 spin_lock(&inode->i_lock); 1903 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1904 int wtype; 1905 1906 warn[cnt].w_type = QUOTA_NL_NOWARN; 1907 dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1908 if (!dquot) 1909 continue; 1910 spin_lock(&dquot->dq_dqb_lock); 1911 wtype = info_bdq_free(dquot, number); 1912 if (wtype != QUOTA_NL_NOWARN) 1913 prepare_warning(&warn[cnt], dquot, wtype); 1914 if (reserve) 1915 dquot_free_reserved_space(dquot, number); 1916 else 1917 dquot_decr_space(dquot, number); 1918 spin_unlock(&dquot->dq_dqb_lock); 1919 } 1920 if (reserve) 1921 *inode_reserved_space(inode) -= number; 1922 else 1923 __inode_sub_bytes(inode, number); 1924 spin_unlock(&inode->i_lock); 1925 1926 if (reserve) 1927 goto out_unlock; 1928 mark_all_dquot_dirty(dquots); 1929 out_unlock: 1930 srcu_read_unlock(&dquot_srcu, index); 1931 flush_warnings(warn); 1932 } 1933 EXPORT_SYMBOL(__dquot_free_space); 1934 1935 /* 1936 * This operation can block, but only after everything is updated 1937 */ 1938 void dquot_free_inode(struct inode *inode) 1939 { 1940 unsigned int cnt; 1941 struct dquot_warn warn[MAXQUOTAS]; 1942 struct dquot __rcu * const *dquots; 1943 struct dquot *dquot; 1944 int index; 1945 1946 if (!inode_quota_active(inode)) 1947 return; 1948 1949 dquots = i_dquot(inode); 1950 index = srcu_read_lock(&dquot_srcu); 1951 spin_lock(&inode->i_lock); 1952 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1953 int wtype; 1954 warn[cnt].w_type = QUOTA_NL_NOWARN; 1955 dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1956 if (!dquot) 1957 continue; 1958 spin_lock(&dquot->dq_dqb_lock); 1959 wtype = info_idq_free(dquot, 1); 1960 if (wtype != QUOTA_NL_NOWARN) 1961 prepare_warning(&warn[cnt], dquot, wtype); 1962 dquot_decr_inodes(dquot, 1); 1963 spin_unlock(&dquot->dq_dqb_lock); 1964 } 1965 spin_unlock(&inode->i_lock); 1966 mark_all_dquot_dirty(dquots); 1967 srcu_read_unlock(&dquot_srcu, index); 1968 flush_warnings(warn); 1969 } 1970 EXPORT_SYMBOL(dquot_free_inode); 1971 1972 /* 1973 * Transfer the number of inode and blocks from one diskquota to an other. 1974 * On success, dquot references in transfer_to are consumed and references 1975 * to original dquots that need to be released are placed there. On failure, 1976 * references are kept untouched. 1977 * 1978 * This operation can block, but only after everything is updated 1979 * A transaction must be started when entering this function. 1980 * 1981 * We are holding reference on transfer_from & transfer_to, no need to 1982 * protect them by srcu_read_lock(). 1983 */ 1984 int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) 1985 { 1986 qsize_t cur_space; 1987 qsize_t rsv_space = 0; 1988 qsize_t inode_usage = 1; 1989 struct dquot __rcu **dquots; 1990 struct dquot *transfer_from[MAXQUOTAS] = {}; 1991 int cnt, index, ret = 0, err; 1992 char is_valid[MAXQUOTAS] = {}; 1993 struct dquot_warn warn_to[MAXQUOTAS]; 1994 struct dquot_warn warn_from_inodes[MAXQUOTAS]; 1995 struct dquot_warn warn_from_space[MAXQUOTAS]; 1996 1997 if (IS_NOQUOTA(inode)) 1998 return 0; 1999 2000 if (inode->i_sb->dq_op->get_inode_usage) { 2001 ret = inode->i_sb->dq_op->get_inode_usage(inode, &inode_usage); 2002 if (ret) 2003 return ret; 2004 } 2005 2006 /* Initialize the arrays */ 2007 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 2008 warn_to[cnt].w_type = QUOTA_NL_NOWARN; 2009 warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN; 2010 warn_from_space[cnt].w_type = QUOTA_NL_NOWARN; 2011 } 2012 2013 spin_lock(&dq_data_lock); 2014 spin_lock(&inode->i_lock); 2015 if (IS_NOQUOTA(inode)) { /* File without quota accounting? */ 2016 spin_unlock(&inode->i_lock); 2017 spin_unlock(&dq_data_lock); 2018 return 0; 2019 } 2020 cur_space = __inode_get_bytes(inode); 2021 rsv_space = __inode_get_rsv_space(inode); 2022 dquots = i_dquot(inode); 2023 /* 2024 * Build the transfer_from list, check limits, and update usage in 2025 * the target structures. 2026 */ 2027 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 2028 /* 2029 * Skip changes for same uid or gid or for turned off quota-type. 2030 */ 2031 if (!transfer_to[cnt]) 2032 continue; 2033 /* Avoid races with quotaoff() */ 2034 if (!sb_has_quota_active(inode->i_sb, cnt)) 2035 continue; 2036 is_valid[cnt] = 1; 2037 transfer_from[cnt] = srcu_dereference_check(dquots[cnt], 2038 &dquot_srcu, lockdep_is_held(&dq_data_lock)); 2039 ret = dquot_add_inodes(transfer_to[cnt], inode_usage, 2040 &warn_to[cnt]); 2041 if (ret) 2042 goto over_quota; 2043 ret = dquot_add_space(transfer_to[cnt], cur_space, rsv_space, 2044 DQUOT_SPACE_WARN, &warn_to[cnt]); 2045 if (ret) { 2046 spin_lock(&transfer_to[cnt]->dq_dqb_lock); 2047 dquot_decr_inodes(transfer_to[cnt], inode_usage); 2048 spin_unlock(&transfer_to[cnt]->dq_dqb_lock); 2049 goto over_quota; 2050 } 2051 } 2052 2053 /* Decrease usage for source structures and update quota pointers */ 2054 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 2055 if (!is_valid[cnt]) 2056 continue; 2057 /* Due to IO error we might not have transfer_from[] structure */ 2058 if (transfer_from[cnt]) { 2059 int wtype; 2060 2061 spin_lock(&transfer_from[cnt]->dq_dqb_lock); 2062 wtype = info_idq_free(transfer_from[cnt], inode_usage); 2063 if (wtype != QUOTA_NL_NOWARN) 2064 prepare_warning(&warn_from_inodes[cnt], 2065 transfer_from[cnt], wtype); 2066 wtype = info_bdq_free(transfer_from[cnt], 2067 cur_space + rsv_space); 2068 if (wtype != QUOTA_NL_NOWARN) 2069 prepare_warning(&warn_from_space[cnt], 2070 transfer_from[cnt], wtype); 2071 dquot_decr_inodes(transfer_from[cnt], inode_usage); 2072 dquot_decr_space(transfer_from[cnt], cur_space); 2073 dquot_free_reserved_space(transfer_from[cnt], 2074 rsv_space); 2075 spin_unlock(&transfer_from[cnt]->dq_dqb_lock); 2076 } 2077 rcu_assign_pointer(dquots[cnt], transfer_to[cnt]); 2078 } 2079 spin_unlock(&inode->i_lock); 2080 spin_unlock(&dq_data_lock); 2081 2082 /* 2083 * These arrays are local and we hold dquot references so we don't need 2084 * the srcu protection but still take dquot_srcu to avoid warning in 2085 * mark_all_dquot_dirty(). 2086 */ 2087 index = srcu_read_lock(&dquot_srcu); 2088 err = mark_all_dquot_dirty((struct dquot __rcu **)transfer_from); 2089 if (err < 0) 2090 ret = err; 2091 err = mark_all_dquot_dirty((struct dquot __rcu **)transfer_to); 2092 if (err < 0) 2093 ret = err; 2094 srcu_read_unlock(&dquot_srcu, index); 2095 2096 flush_warnings(warn_to); 2097 flush_warnings(warn_from_inodes); 2098 flush_warnings(warn_from_space); 2099 /* Pass back references to put */ 2100 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 2101 if (is_valid[cnt]) 2102 transfer_to[cnt] = transfer_from[cnt]; 2103 return ret; 2104 over_quota: 2105 /* Back out changes we already did */ 2106 for (cnt--; cnt >= 0; cnt--) { 2107 if (!is_valid[cnt]) 2108 continue; 2109 spin_lock(&transfer_to[cnt]->dq_dqb_lock); 2110 dquot_decr_inodes(transfer_to[cnt], inode_usage); 2111 dquot_decr_space(transfer_to[cnt], cur_space); 2112 dquot_free_reserved_space(transfer_to[cnt], rsv_space); 2113 spin_unlock(&transfer_to[cnt]->dq_dqb_lock); 2114 } 2115 spin_unlock(&inode->i_lock); 2116 spin_unlock(&dq_data_lock); 2117 flush_warnings(warn_to); 2118 return ret; 2119 } 2120 EXPORT_SYMBOL(__dquot_transfer); 2121 2122 /* Wrapper for transferring ownership of an inode for uid/gid only 2123 * Called from FSXXX_setattr() 2124 */ 2125 int dquot_transfer(struct mnt_idmap *idmap, struct inode *inode, 2126 struct iattr *iattr) 2127 { 2128 struct dquot *transfer_to[MAXQUOTAS] = {}; 2129 struct dquot *dquot; 2130 struct super_block *sb = inode->i_sb; 2131 int ret; 2132 2133 if (!inode_quota_active(inode)) 2134 return 0; 2135 2136 if (i_uid_needs_update(idmap, iattr, inode)) { 2137 kuid_t kuid = from_vfsuid(idmap, i_user_ns(inode), 2138 iattr->ia_vfsuid); 2139 2140 dquot = dqget(sb, make_kqid_uid(kuid)); 2141 if (IS_ERR(dquot)) { 2142 if (PTR_ERR(dquot) != -ESRCH) { 2143 ret = PTR_ERR(dquot); 2144 goto out_put; 2145 } 2146 dquot = NULL; 2147 } 2148 transfer_to[USRQUOTA] = dquot; 2149 } 2150 if (i_gid_needs_update(idmap, iattr, inode)) { 2151 kgid_t kgid = from_vfsgid(idmap, i_user_ns(inode), 2152 iattr->ia_vfsgid); 2153 2154 dquot = dqget(sb, make_kqid_gid(kgid)); 2155 if (IS_ERR(dquot)) { 2156 if (PTR_ERR(dquot) != -ESRCH) { 2157 ret = PTR_ERR(dquot); 2158 goto out_put; 2159 } 2160 dquot = NULL; 2161 } 2162 transfer_to[GRPQUOTA] = dquot; 2163 } 2164 ret = __dquot_transfer(inode, transfer_to); 2165 out_put: 2166 dqput_all(transfer_to); 2167 return ret; 2168 } 2169 EXPORT_SYMBOL(dquot_transfer); 2170 2171 /* 2172 * Write info of quota file to disk 2173 */ 2174 int dquot_commit_info(struct super_block *sb, int type) 2175 { 2176 struct quota_info *dqopt = sb_dqopt(sb); 2177 2178 return dqopt->ops[type]->write_file_info(sb, type); 2179 } 2180 EXPORT_SYMBOL(dquot_commit_info); 2181 2182 int dquot_get_next_id(struct super_block *sb, struct kqid *qid) 2183 { 2184 struct quota_info *dqopt = sb_dqopt(sb); 2185 2186 if (!sb_has_quota_active(sb, qid->type)) 2187 return -ESRCH; 2188 if (!dqopt->ops[qid->type]->get_next_id) 2189 return -ENOSYS; 2190 return dqopt->ops[qid->type]->get_next_id(sb, qid); 2191 } 2192 EXPORT_SYMBOL(dquot_get_next_id); 2193 2194 /* 2195 * Definitions of diskquota operations. 2196 */ 2197 const struct dquot_operations dquot_operations = { 2198 .write_dquot = dquot_commit, 2199 .acquire_dquot = dquot_acquire, 2200 .release_dquot = dquot_release, 2201 .mark_dirty = dquot_mark_dquot_dirty, 2202 .write_info = dquot_commit_info, 2203 .alloc_dquot = dquot_alloc, 2204 .destroy_dquot = dquot_destroy, 2205 .get_next_id = dquot_get_next_id, 2206 }; 2207 EXPORT_SYMBOL(dquot_operations); 2208 2209 /* 2210 * Generic helper for ->open on filesystems supporting disk quotas. 2211 */ 2212 int dquot_file_open(struct inode *inode, struct file *file) 2213 { 2214 int error; 2215 2216 error = generic_file_open(inode, file); 2217 if (!error && (file->f_mode & FMODE_WRITE)) 2218 error = dquot_initialize(inode); 2219 return error; 2220 } 2221 EXPORT_SYMBOL(dquot_file_open); 2222 2223 static void vfs_cleanup_quota_inode(struct super_block *sb, int type) 2224 { 2225 struct quota_info *dqopt = sb_dqopt(sb); 2226 struct inode *inode = dqopt->files[type]; 2227 2228 if (!inode) 2229 return; 2230 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { 2231 inode_lock(inode); 2232 inode->i_flags &= ~S_NOQUOTA; 2233 inode_unlock(inode); 2234 } 2235 dqopt->files[type] = NULL; 2236 iput(inode); 2237 } 2238 2239 /* 2240 * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount) 2241 */ 2242 int dquot_disable(struct super_block *sb, int type, unsigned int flags) 2243 { 2244 int cnt; 2245 struct quota_info *dqopt = sb_dqopt(sb); 2246 2247 rwsem_assert_held_write(&sb->s_umount); 2248 2249 /* Cannot turn off usage accounting without turning off limits, or 2250 * suspend quotas and simultaneously turn quotas off. */ 2251 if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED)) 2252 || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED | 2253 DQUOT_USAGE_ENABLED))) 2254 return -EINVAL; 2255 2256 /* 2257 * Skip everything if there's nothing to do. We have to do this because 2258 * sometimes we are called when fill_super() failed and calling 2259 * sync_fs() in such cases does no good. 2260 */ 2261 if (!sb_any_quota_loaded(sb)) 2262 return 0; 2263 2264 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 2265 if (type != -1 && cnt != type) 2266 continue; 2267 if (!sb_has_quota_loaded(sb, cnt)) 2268 continue; 2269 2270 if (flags & DQUOT_SUSPENDED) { 2271 spin_lock(&dq_state_lock); 2272 dqopt->flags |= 2273 dquot_state_flag(DQUOT_SUSPENDED, cnt); 2274 spin_unlock(&dq_state_lock); 2275 } else { 2276 spin_lock(&dq_state_lock); 2277 dqopt->flags &= ~dquot_state_flag(flags, cnt); 2278 /* Turning off suspended quotas? */ 2279 if (!sb_has_quota_loaded(sb, cnt) && 2280 sb_has_quota_suspended(sb, cnt)) { 2281 dqopt->flags &= ~dquot_state_flag( 2282 DQUOT_SUSPENDED, cnt); 2283 spin_unlock(&dq_state_lock); 2284 vfs_cleanup_quota_inode(sb, cnt); 2285 continue; 2286 } 2287 spin_unlock(&dq_state_lock); 2288 } 2289 2290 /* We still have to keep quota loaded? */ 2291 if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED)) 2292 continue; 2293 2294 /* Note: these are blocking operations */ 2295 drop_dquot_ref(sb, cnt); 2296 invalidate_dquots(sb, cnt); 2297 /* 2298 * Now all dquots should be invalidated, all writes done so we 2299 * should be only users of the info. No locks needed. 2300 */ 2301 if (info_dirty(&dqopt->info[cnt])) 2302 sb->dq_op->write_info(sb, cnt); 2303 if (dqopt->ops[cnt]->free_file_info) 2304 dqopt->ops[cnt]->free_file_info(sb, cnt); 2305 put_quota_format(dqopt->info[cnt].dqi_format); 2306 dqopt->info[cnt].dqi_flags = 0; 2307 dqopt->info[cnt].dqi_igrace = 0; 2308 dqopt->info[cnt].dqi_bgrace = 0; 2309 dqopt->ops[cnt] = NULL; 2310 } 2311 2312 /* Skip syncing and setting flags if quota files are hidden */ 2313 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) 2314 goto put_inodes; 2315 2316 /* Sync the superblock so that buffers with quota data are written to 2317 * disk (and so userspace sees correct data afterwards). */ 2318 if (sb->s_op->sync_fs) 2319 sb->s_op->sync_fs(sb, 1); 2320 sync_blockdev(sb->s_bdev); 2321 /* Now the quota files are just ordinary files and we can set the 2322 * inode flags back. Moreover we discard the pagecache so that 2323 * userspace sees the writes we did bypassing the pagecache. We 2324 * must also discard the blockdev buffers so that we see the 2325 * changes done by userspace on the next quotaon() */ 2326 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 2327 if (!sb_has_quota_loaded(sb, cnt) && dqopt->files[cnt]) { 2328 inode_lock(dqopt->files[cnt]); 2329 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0); 2330 inode_unlock(dqopt->files[cnt]); 2331 } 2332 if (sb->s_bdev) 2333 invalidate_bdev(sb->s_bdev); 2334 put_inodes: 2335 /* We are done when suspending quotas */ 2336 if (flags & DQUOT_SUSPENDED) 2337 return 0; 2338 2339 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 2340 if (!sb_has_quota_loaded(sb, cnt)) 2341 vfs_cleanup_quota_inode(sb, cnt); 2342 return 0; 2343 } 2344 EXPORT_SYMBOL(dquot_disable); 2345 2346 int dquot_quota_off(struct super_block *sb, int type) 2347 { 2348 return dquot_disable(sb, type, 2349 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); 2350 } 2351 EXPORT_SYMBOL(dquot_quota_off); 2352 2353 /* 2354 * Turn quotas on on a device 2355 */ 2356 2357 static int vfs_setup_quota_inode(struct inode *inode, int type) 2358 { 2359 struct super_block *sb = inode->i_sb; 2360 struct quota_info *dqopt = sb_dqopt(sb); 2361 2362 if (is_bad_inode(inode)) 2363 return -EUCLEAN; 2364 if (!S_ISREG(inode->i_mode)) 2365 return -EACCES; 2366 if (IS_RDONLY(inode)) 2367 return -EROFS; 2368 if (sb_has_quota_loaded(sb, type)) 2369 return -EBUSY; 2370 2371 /* 2372 * Quota files should never be encrypted. They should be thought of as 2373 * filesystem metadata, not user data. New-style internal quota files 2374 * cannot be encrypted by users anyway, but old-style external quota 2375 * files could potentially be incorrectly created in an encrypted 2376 * directory, hence this explicit check. Some reasons why encrypted 2377 * quota files don't work include: (1) some filesystems that support 2378 * encryption don't handle it in their quota_read and quota_write, and 2379 * (2) cleaning up encrypted quota files at unmount would need special 2380 * consideration, as quota files are cleaned up later than user files. 2381 */ 2382 if (IS_ENCRYPTED(inode)) 2383 return -EINVAL; 2384 2385 dqopt->files[type] = igrab(inode); 2386 if (!dqopt->files[type]) 2387 return -EIO; 2388 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { 2389 /* We don't want quota and atime on quota files (deadlocks 2390 * possible) Also nobody should write to the file - we use 2391 * special IO operations which ignore the immutable bit. */ 2392 inode_lock(inode); 2393 inode->i_flags |= S_NOQUOTA; 2394 inode_unlock(inode); 2395 /* 2396 * When S_NOQUOTA is set, remove dquot references as no more 2397 * references can be added 2398 */ 2399 __dquot_drop(inode); 2400 } 2401 return 0; 2402 } 2403 2404 int dquot_load_quota_sb(struct super_block *sb, int type, int format_id, 2405 unsigned int flags) 2406 { 2407 struct quota_format_type *fmt; 2408 struct quota_info *dqopt = sb_dqopt(sb); 2409 int error; 2410 2411 lockdep_assert_held_write(&sb->s_umount); 2412 2413 /* Just unsuspend quotas? */ 2414 if (WARN_ON_ONCE(flags & DQUOT_SUSPENDED)) 2415 return -EINVAL; 2416 2417 fmt = find_quota_format(format_id); 2418 if (!fmt) 2419 return -ESRCH; 2420 if (!sb->dq_op || !sb->s_qcop || 2421 (type == PRJQUOTA && sb->dq_op->get_projid == NULL)) { 2422 error = -EINVAL; 2423 goto out_fmt; 2424 } 2425 /* Filesystems outside of init_user_ns not yet supported */ 2426 if (sb->s_user_ns != &init_user_ns) { 2427 error = -EINVAL; 2428 goto out_fmt; 2429 } 2430 /* Usage always has to be set... */ 2431 if (!(flags & DQUOT_USAGE_ENABLED)) { 2432 error = -EINVAL; 2433 goto out_fmt; 2434 } 2435 if (sb_has_quota_loaded(sb, type)) { 2436 error = -EBUSY; 2437 goto out_fmt; 2438 } 2439 2440 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { 2441 /* As we bypass the pagecache we must now flush all the 2442 * dirty data and invalidate caches so that kernel sees 2443 * changes from userspace. It is not enough to just flush 2444 * the quota file since if blocksize < pagesize, invalidation 2445 * of the cache could fail because of other unrelated dirty 2446 * data */ 2447 sync_filesystem(sb); 2448 invalidate_bdev(sb->s_bdev); 2449 } 2450 2451 error = -EINVAL; 2452 if (!fmt->qf_ops->check_quota_file(sb, type)) 2453 goto out_fmt; 2454 2455 dqopt->ops[type] = fmt->qf_ops; 2456 dqopt->info[type].dqi_format = fmt; 2457 dqopt->info[type].dqi_fmt_id = format_id; 2458 INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list); 2459 error = dqopt->ops[type]->read_file_info(sb, type); 2460 if (error < 0) 2461 goto out_fmt; 2462 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) { 2463 spin_lock(&dq_data_lock); 2464 dqopt->info[type].dqi_flags |= DQF_SYS_FILE; 2465 spin_unlock(&dq_data_lock); 2466 } 2467 spin_lock(&dq_state_lock); 2468 dqopt->flags |= dquot_state_flag(flags, type); 2469 spin_unlock(&dq_state_lock); 2470 2471 error = add_dquot_ref(sb, type); 2472 if (error) 2473 dquot_disable(sb, type, 2474 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); 2475 2476 return error; 2477 out_fmt: 2478 put_quota_format(fmt); 2479 2480 return error; 2481 } 2482 EXPORT_SYMBOL(dquot_load_quota_sb); 2483 2484 /* 2485 * More powerful function for turning on quotas on given quota inode allowing 2486 * setting of individual quota flags 2487 */ 2488 int dquot_load_quota_inode(struct inode *inode, int type, int format_id, 2489 unsigned int flags) 2490 { 2491 int err; 2492 2493 err = vfs_setup_quota_inode(inode, type); 2494 if (err < 0) 2495 return err; 2496 err = dquot_load_quota_sb(inode->i_sb, type, format_id, flags); 2497 if (err < 0) 2498 vfs_cleanup_quota_inode(inode->i_sb, type); 2499 return err; 2500 } 2501 EXPORT_SYMBOL(dquot_load_quota_inode); 2502 2503 /* Reenable quotas on remount RW */ 2504 int dquot_resume(struct super_block *sb, int type) 2505 { 2506 struct quota_info *dqopt = sb_dqopt(sb); 2507 int ret = 0, cnt; 2508 unsigned int flags; 2509 2510 rwsem_assert_held_write(&sb->s_umount); 2511 2512 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 2513 if (type != -1 && cnt != type) 2514 continue; 2515 if (!sb_has_quota_suspended(sb, cnt)) 2516 continue; 2517 2518 spin_lock(&dq_state_lock); 2519 flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED | 2520 DQUOT_LIMITS_ENABLED, 2521 cnt); 2522 dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt); 2523 spin_unlock(&dq_state_lock); 2524 2525 flags = dquot_generic_flag(flags, cnt); 2526 ret = dquot_load_quota_sb(sb, cnt, dqopt->info[cnt].dqi_fmt_id, 2527 flags); 2528 if (ret < 0) 2529 vfs_cleanup_quota_inode(sb, cnt); 2530 } 2531 2532 return ret; 2533 } 2534 EXPORT_SYMBOL(dquot_resume); 2535 2536 int dquot_quota_on(struct super_block *sb, int type, int format_id, 2537 const struct path *path) 2538 { 2539 int error = security_quota_on(path->dentry); 2540 if (error) 2541 return error; 2542 /* Quota file not on the same filesystem? */ 2543 if (path->dentry->d_sb != sb) 2544 error = -EXDEV; 2545 else 2546 error = dquot_load_quota_inode(d_inode(path->dentry), type, 2547 format_id, DQUOT_USAGE_ENABLED | 2548 DQUOT_LIMITS_ENABLED); 2549 return error; 2550 } 2551 EXPORT_SYMBOL(dquot_quota_on); 2552 2553 /* 2554 * This function is used when filesystem needs to initialize quotas 2555 * during mount time. 2556 */ 2557 int dquot_quota_on_mount(struct super_block *sb, char *qf_name, 2558 int format_id, int type) 2559 { 2560 struct dentry *dentry; 2561 int error; 2562 2563 dentry = lookup_positive_unlocked(qf_name, sb->s_root, strlen(qf_name)); 2564 if (IS_ERR(dentry)) 2565 return PTR_ERR(dentry); 2566 2567 error = security_quota_on(dentry); 2568 if (!error) 2569 error = dquot_load_quota_inode(d_inode(dentry), type, format_id, 2570 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); 2571 2572 dput(dentry); 2573 return error; 2574 } 2575 EXPORT_SYMBOL(dquot_quota_on_mount); 2576 2577 static int dquot_quota_enable(struct super_block *sb, unsigned int flags) 2578 { 2579 int ret; 2580 int type; 2581 struct quota_info *dqopt = sb_dqopt(sb); 2582 2583 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) 2584 return -ENOSYS; 2585 /* Accounting cannot be turned on while fs is mounted */ 2586 flags &= ~(FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT); 2587 if (!flags) 2588 return -EINVAL; 2589 for (type = 0; type < MAXQUOTAS; type++) { 2590 if (!(flags & qtype_enforce_flag(type))) 2591 continue; 2592 /* Can't enforce without accounting */ 2593 if (!sb_has_quota_usage_enabled(sb, type)) { 2594 ret = -EINVAL; 2595 goto out_err; 2596 } 2597 if (sb_has_quota_limits_enabled(sb, type)) { 2598 /* compatible with XFS */ 2599 ret = -EEXIST; 2600 goto out_err; 2601 } 2602 spin_lock(&dq_state_lock); 2603 dqopt->flags |= dquot_state_flag(DQUOT_LIMITS_ENABLED, type); 2604 spin_unlock(&dq_state_lock); 2605 } 2606 return 0; 2607 out_err: 2608 /* Backout enforcement enablement we already did */ 2609 for (type--; type >= 0; type--) { 2610 if (flags & qtype_enforce_flag(type)) 2611 dquot_disable(sb, type, DQUOT_LIMITS_ENABLED); 2612 } 2613 return ret; 2614 } 2615 2616 static int dquot_quota_disable(struct super_block *sb, unsigned int flags) 2617 { 2618 int ret; 2619 int type; 2620 struct quota_info *dqopt = sb_dqopt(sb); 2621 2622 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) 2623 return -ENOSYS; 2624 /* 2625 * We don't support turning off accounting via quotactl. In principle 2626 * quota infrastructure can do this but filesystems don't expect 2627 * userspace to be able to do it. 2628 */ 2629 if (flags & 2630 (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT)) 2631 return -EOPNOTSUPP; 2632 2633 /* Filter out limits not enabled */ 2634 for (type = 0; type < MAXQUOTAS; type++) 2635 if (!sb_has_quota_limits_enabled(sb, type)) 2636 flags &= ~qtype_enforce_flag(type); 2637 /* Nothing left? */ 2638 if (!flags) 2639 return -EEXIST; 2640 for (type = 0; type < MAXQUOTAS; type++) { 2641 if (flags & qtype_enforce_flag(type)) { 2642 ret = dquot_disable(sb, type, DQUOT_LIMITS_ENABLED); 2643 if (ret < 0) 2644 goto out_err; 2645 } 2646 } 2647 return 0; 2648 out_err: 2649 /* Backout enforcement disabling we already did */ 2650 for (type--; type >= 0; type--) { 2651 if (flags & qtype_enforce_flag(type)) { 2652 spin_lock(&dq_state_lock); 2653 dqopt->flags |= 2654 dquot_state_flag(DQUOT_LIMITS_ENABLED, type); 2655 spin_unlock(&dq_state_lock); 2656 } 2657 } 2658 return ret; 2659 } 2660 2661 /* Generic routine for getting common part of quota structure */ 2662 static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di) 2663 { 2664 struct mem_dqblk *dm = &dquot->dq_dqb; 2665 2666 memset(di, 0, sizeof(*di)); 2667 spin_lock(&dquot->dq_dqb_lock); 2668 di->d_spc_hardlimit = dm->dqb_bhardlimit; 2669 di->d_spc_softlimit = dm->dqb_bsoftlimit; 2670 di->d_ino_hardlimit = dm->dqb_ihardlimit; 2671 di->d_ino_softlimit = dm->dqb_isoftlimit; 2672 di->d_space = dm->dqb_curspace + dm->dqb_rsvspace; 2673 di->d_ino_count = dm->dqb_curinodes; 2674 di->d_spc_timer = dm->dqb_btime; 2675 di->d_ino_timer = dm->dqb_itime; 2676 spin_unlock(&dquot->dq_dqb_lock); 2677 } 2678 2679 int dquot_get_dqblk(struct super_block *sb, struct kqid qid, 2680 struct qc_dqblk *di) 2681 { 2682 struct dquot *dquot; 2683 2684 dquot = dqget(sb, qid); 2685 if (IS_ERR(dquot)) 2686 return PTR_ERR(dquot); 2687 do_get_dqblk(dquot, di); 2688 dqput(dquot); 2689 2690 return 0; 2691 } 2692 EXPORT_SYMBOL(dquot_get_dqblk); 2693 2694 int dquot_get_next_dqblk(struct super_block *sb, struct kqid *qid, 2695 struct qc_dqblk *di) 2696 { 2697 struct dquot *dquot; 2698 int err; 2699 2700 if (!sb->dq_op->get_next_id) 2701 return -ENOSYS; 2702 err = sb->dq_op->get_next_id(sb, qid); 2703 if (err < 0) 2704 return err; 2705 dquot = dqget(sb, *qid); 2706 if (IS_ERR(dquot)) 2707 return PTR_ERR(dquot); 2708 do_get_dqblk(dquot, di); 2709 dqput(dquot); 2710 2711 return 0; 2712 } 2713 EXPORT_SYMBOL(dquot_get_next_dqblk); 2714 2715 #define VFS_QC_MASK \ 2716 (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \ 2717 QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \ 2718 QC_SPC_TIMER | QC_INO_TIMER) 2719 2720 /* Generic routine for setting common part of quota structure */ 2721 static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di) 2722 { 2723 struct mem_dqblk *dm = &dquot->dq_dqb; 2724 int check_blim = 0, check_ilim = 0; 2725 struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; 2726 int ret; 2727 2728 if (di->d_fieldmask & ~VFS_QC_MASK) 2729 return -EINVAL; 2730 2731 if (((di->d_fieldmask & QC_SPC_SOFT) && 2732 di->d_spc_softlimit > dqi->dqi_max_spc_limit) || 2733 ((di->d_fieldmask & QC_SPC_HARD) && 2734 di->d_spc_hardlimit > dqi->dqi_max_spc_limit) || 2735 ((di->d_fieldmask & QC_INO_SOFT) && 2736 (di->d_ino_softlimit > dqi->dqi_max_ino_limit)) || 2737 ((di->d_fieldmask & QC_INO_HARD) && 2738 (di->d_ino_hardlimit > dqi->dqi_max_ino_limit))) 2739 return -ERANGE; 2740 2741 spin_lock(&dquot->dq_dqb_lock); 2742 if (di->d_fieldmask & QC_SPACE) { 2743 dm->dqb_curspace = di->d_space - dm->dqb_rsvspace; 2744 check_blim = 1; 2745 set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); 2746 } 2747 2748 if (di->d_fieldmask & QC_SPC_SOFT) 2749 dm->dqb_bsoftlimit = di->d_spc_softlimit; 2750 if (di->d_fieldmask & QC_SPC_HARD) 2751 dm->dqb_bhardlimit = di->d_spc_hardlimit; 2752 if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) { 2753 check_blim = 1; 2754 set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); 2755 } 2756 2757 if (di->d_fieldmask & QC_INO_COUNT) { 2758 dm->dqb_curinodes = di->d_ino_count; 2759 check_ilim = 1; 2760 set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); 2761 } 2762 2763 if (di->d_fieldmask & QC_INO_SOFT) 2764 dm->dqb_isoftlimit = di->d_ino_softlimit; 2765 if (di->d_fieldmask & QC_INO_HARD) 2766 dm->dqb_ihardlimit = di->d_ino_hardlimit; 2767 if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) { 2768 check_ilim = 1; 2769 set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); 2770 } 2771 2772 if (di->d_fieldmask & QC_SPC_TIMER) { 2773 dm->dqb_btime = di->d_spc_timer; 2774 check_blim = 1; 2775 set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); 2776 } 2777 2778 if (di->d_fieldmask & QC_INO_TIMER) { 2779 dm->dqb_itime = di->d_ino_timer; 2780 check_ilim = 1; 2781 set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); 2782 } 2783 2784 if (check_blim) { 2785 if (!dm->dqb_bsoftlimit || 2786 dm->dqb_curspace + dm->dqb_rsvspace <= dm->dqb_bsoftlimit) { 2787 dm->dqb_btime = 0; 2788 clear_bit(DQ_BLKS_B, &dquot->dq_flags); 2789 } else if (!(di->d_fieldmask & QC_SPC_TIMER)) 2790 /* Set grace only if user hasn't provided his own... */ 2791 dm->dqb_btime = ktime_get_real_seconds() + dqi->dqi_bgrace; 2792 } 2793 if (check_ilim) { 2794 if (!dm->dqb_isoftlimit || 2795 dm->dqb_curinodes <= dm->dqb_isoftlimit) { 2796 dm->dqb_itime = 0; 2797 clear_bit(DQ_INODES_B, &dquot->dq_flags); 2798 } else if (!(di->d_fieldmask & QC_INO_TIMER)) 2799 /* Set grace only if user hasn't provided his own... */ 2800 dm->dqb_itime = ktime_get_real_seconds() + dqi->dqi_igrace; 2801 } 2802 if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || 2803 dm->dqb_isoftlimit) 2804 clear_bit(DQ_FAKE_B, &dquot->dq_flags); 2805 else 2806 set_bit(DQ_FAKE_B, &dquot->dq_flags); 2807 spin_unlock(&dquot->dq_dqb_lock); 2808 ret = mark_dquot_dirty(dquot); 2809 if (ret < 0) 2810 return ret; 2811 return 0; 2812 } 2813 2814 int dquot_set_dqblk(struct super_block *sb, struct kqid qid, 2815 struct qc_dqblk *di) 2816 { 2817 struct dquot *dquot; 2818 int rc; 2819 2820 dquot = dqget(sb, qid); 2821 if (IS_ERR(dquot)) { 2822 rc = PTR_ERR(dquot); 2823 goto out; 2824 } 2825 rc = do_set_dqblk(dquot, di); 2826 dqput(dquot); 2827 out: 2828 return rc; 2829 } 2830 EXPORT_SYMBOL(dquot_set_dqblk); 2831 2832 /* Generic routine for getting common part of quota file information */ 2833 int dquot_get_state(struct super_block *sb, struct qc_state *state) 2834 { 2835 struct mem_dqinfo *mi; 2836 struct qc_type_state *tstate; 2837 struct quota_info *dqopt = sb_dqopt(sb); 2838 int type; 2839 2840 memset(state, 0, sizeof(*state)); 2841 for (type = 0; type < MAXQUOTAS; type++) { 2842 if (!sb_has_quota_active(sb, type)) 2843 continue; 2844 tstate = state->s_state + type; 2845 mi = sb_dqopt(sb)->info + type; 2846 tstate->flags = QCI_ACCT_ENABLED; 2847 spin_lock(&dq_data_lock); 2848 if (mi->dqi_flags & DQF_SYS_FILE) 2849 tstate->flags |= QCI_SYSFILE; 2850 if (mi->dqi_flags & DQF_ROOT_SQUASH) 2851 tstate->flags |= QCI_ROOT_SQUASH; 2852 if (sb_has_quota_limits_enabled(sb, type)) 2853 tstate->flags |= QCI_LIMITS_ENFORCED; 2854 tstate->spc_timelimit = mi->dqi_bgrace; 2855 tstate->ino_timelimit = mi->dqi_igrace; 2856 if (dqopt->files[type]) { 2857 tstate->ino = dqopt->files[type]->i_ino; 2858 tstate->blocks = dqopt->files[type]->i_blocks; 2859 } 2860 tstate->nextents = 1; /* We don't know... */ 2861 spin_unlock(&dq_data_lock); 2862 } 2863 return 0; 2864 } 2865 EXPORT_SYMBOL(dquot_get_state); 2866 2867 /* Generic routine for setting common part of quota file information */ 2868 int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii) 2869 { 2870 struct mem_dqinfo *mi; 2871 2872 if ((ii->i_fieldmask & QC_WARNS_MASK) || 2873 (ii->i_fieldmask & QC_RT_SPC_TIMER)) 2874 return -EINVAL; 2875 if (!sb_has_quota_active(sb, type)) 2876 return -ESRCH; 2877 mi = sb_dqopt(sb)->info + type; 2878 if (ii->i_fieldmask & QC_FLAGS) { 2879 if ((ii->i_flags & QCI_ROOT_SQUASH && 2880 mi->dqi_format->qf_fmt_id != QFMT_VFS_OLD)) 2881 return -EINVAL; 2882 } 2883 spin_lock(&dq_data_lock); 2884 if (ii->i_fieldmask & QC_SPC_TIMER) 2885 mi->dqi_bgrace = ii->i_spc_timelimit; 2886 if (ii->i_fieldmask & QC_INO_TIMER) 2887 mi->dqi_igrace = ii->i_ino_timelimit; 2888 if (ii->i_fieldmask & QC_FLAGS) { 2889 if (ii->i_flags & QCI_ROOT_SQUASH) 2890 mi->dqi_flags |= DQF_ROOT_SQUASH; 2891 else 2892 mi->dqi_flags &= ~DQF_ROOT_SQUASH; 2893 } 2894 spin_unlock(&dq_data_lock); 2895 mark_info_dirty(sb, type); 2896 /* Force write to disk */ 2897 return sb->dq_op->write_info(sb, type); 2898 } 2899 EXPORT_SYMBOL(dquot_set_dqinfo); 2900 2901 const struct quotactl_ops dquot_quotactl_sysfile_ops = { 2902 .quota_enable = dquot_quota_enable, 2903 .quota_disable = dquot_quota_disable, 2904 .quota_sync = dquot_quota_sync, 2905 .get_state = dquot_get_state, 2906 .set_info = dquot_set_dqinfo, 2907 .get_dqblk = dquot_get_dqblk, 2908 .get_nextdqblk = dquot_get_next_dqblk, 2909 .set_dqblk = dquot_set_dqblk 2910 }; 2911 EXPORT_SYMBOL(dquot_quotactl_sysfile_ops); 2912 2913 static int do_proc_dqstats(const struct ctl_table *table, int write, 2914 void *buffer, size_t *lenp, loff_t *ppos) 2915 { 2916 unsigned int type = (unsigned long *)table->data - dqstats.stat; 2917 s64 value = percpu_counter_sum(&dqstats.counter[type]); 2918 2919 /* Filter negative values for non-monotonic counters */ 2920 if (value < 0 && (type == DQST_ALLOC_DQUOTS || 2921 type == DQST_FREE_DQUOTS)) 2922 value = 0; 2923 2924 /* Update global table */ 2925 dqstats.stat[type] = value; 2926 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 2927 } 2928 2929 static struct ctl_table fs_dqstats_table[] = { 2930 { 2931 .procname = "lookups", 2932 .data = &dqstats.stat[DQST_LOOKUPS], 2933 .maxlen = sizeof(unsigned long), 2934 .mode = 0444, 2935 .proc_handler = do_proc_dqstats, 2936 }, 2937 { 2938 .procname = "drops", 2939 .data = &dqstats.stat[DQST_DROPS], 2940 .maxlen = sizeof(unsigned long), 2941 .mode = 0444, 2942 .proc_handler = do_proc_dqstats, 2943 }, 2944 { 2945 .procname = "reads", 2946 .data = &dqstats.stat[DQST_READS], 2947 .maxlen = sizeof(unsigned long), 2948 .mode = 0444, 2949 .proc_handler = do_proc_dqstats, 2950 }, 2951 { 2952 .procname = "writes", 2953 .data = &dqstats.stat[DQST_WRITES], 2954 .maxlen = sizeof(unsigned long), 2955 .mode = 0444, 2956 .proc_handler = do_proc_dqstats, 2957 }, 2958 { 2959 .procname = "cache_hits", 2960 .data = &dqstats.stat[DQST_CACHE_HITS], 2961 .maxlen = sizeof(unsigned long), 2962 .mode = 0444, 2963 .proc_handler = do_proc_dqstats, 2964 }, 2965 { 2966 .procname = "allocated_dquots", 2967 .data = &dqstats.stat[DQST_ALLOC_DQUOTS], 2968 .maxlen = sizeof(unsigned long), 2969 .mode = 0444, 2970 .proc_handler = do_proc_dqstats, 2971 }, 2972 { 2973 .procname = "free_dquots", 2974 .data = &dqstats.stat[DQST_FREE_DQUOTS], 2975 .maxlen = sizeof(unsigned long), 2976 .mode = 0444, 2977 .proc_handler = do_proc_dqstats, 2978 }, 2979 { 2980 .procname = "syncs", 2981 .data = &dqstats.stat[DQST_SYNCS], 2982 .maxlen = sizeof(unsigned long), 2983 .mode = 0444, 2984 .proc_handler = do_proc_dqstats, 2985 }, 2986 #ifdef CONFIG_PRINT_QUOTA_WARNING 2987 { 2988 .procname = "warnings", 2989 .data = &flag_print_warnings, 2990 .maxlen = sizeof(int), 2991 .mode = 0644, 2992 .proc_handler = proc_dointvec, 2993 }, 2994 #endif 2995 }; 2996 2997 static int __init dquot_init(void) 2998 { 2999 int i, ret; 3000 unsigned long nr_hash, order; 3001 struct shrinker *dqcache_shrinker; 3002 3003 printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__); 3004 3005 register_sysctl_init("fs/quota", fs_dqstats_table); 3006 3007 dquot_cachep = kmem_cache_create("dquot", 3008 sizeof(struct dquot), sizeof(unsigned long) * 4, 3009 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 3010 SLAB_PANIC), 3011 NULL); 3012 3013 order = 0; 3014 dquot_hash = (struct hlist_head *)__get_free_pages(GFP_KERNEL, order); 3015 if (!dquot_hash) 3016 panic("Cannot create dquot hash table"); 3017 3018 ret = percpu_counter_init_many(dqstats.counter, 0, GFP_KERNEL, 3019 _DQST_DQSTAT_LAST); 3020 if (ret) 3021 panic("Cannot create dquot stat counters"); 3022 3023 /* Find power-of-two hlist_heads which can fit into allocation */ 3024 nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head); 3025 dq_hash_bits = ilog2(nr_hash); 3026 3027 nr_hash = 1UL << dq_hash_bits; 3028 dq_hash_mask = nr_hash - 1; 3029 for (i = 0; i < nr_hash; i++) 3030 INIT_HLIST_HEAD(dquot_hash + i); 3031 3032 pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld," 3033 " %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order)); 3034 3035 dqcache_shrinker = shrinker_alloc(0, "dquota-cache"); 3036 if (!dqcache_shrinker) 3037 panic("Cannot allocate dquot shrinker"); 3038 3039 dqcache_shrinker->count_objects = dqcache_shrink_count; 3040 dqcache_shrinker->scan_objects = dqcache_shrink_scan; 3041 3042 shrinker_register(dqcache_shrinker); 3043 3044 return 0; 3045 } 3046 fs_initcall(dquot_init); 3047