1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Implementation of the diskquota system for the LINUX operating system. QUOTA 4 * is implemented using the BSD system call interface as the means of 5 * communication with the user level. This file contains the generic routines 6 * called by the different filesystems on allocation of an inode or block. 7 * These routines take care of the administration needed to have a consistent 8 * diskquota tracking system. The ideas of both user and group quotas are based 9 * on the Melbourne quota system as used on BSD derived systems. The internal 10 * implementation is based on one of the several variants of the LINUX 11 * inode-subsystem with added complexity of the diskquota system. 12 * 13 * Author: Marco van Wieringen <mvw@planets.elm.net> 14 * 15 * Fixes: Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96 16 * 17 * Revised list management to avoid races 18 * -- Bill Hawes, <whawes@star.net>, 9/98 19 * 20 * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...(). 21 * As the consequence the locking was moved from dquot_decr_...(), 22 * dquot_incr_...() to calling functions. 23 * invalidate_dquots() now writes modified dquots. 24 * Serialized quota_off() and quota_on() for mount point. 25 * Fixed a few bugs in grow_dquots(). 26 * Fixed deadlock in write_dquot() - we no longer account quotas on 27 * quota files 28 * remove_dquot_ref() moved to inode.c - it now traverses through inodes 29 * add_dquot_ref() restarts after blocking 30 * Added check for bogus uid and fixed check for group in quotactl. 31 * Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99 32 * 33 * Used struct list_head instead of own list struct 34 * Invalidation of referenced dquots is no longer possible 35 * Improved free_dquots list management 36 * Quota and i_blocks are now updated in one place to avoid races 37 * Warnings are now delayed so we won't block in critical section 38 * Write updated not to require dquot lock 39 * Jan Kara, <jack@suse.cz>, 9/2000 40 * 41 * Added dynamic quota structure allocation 42 * Jan Kara <jack@suse.cz> 12/2000 43 * 44 * Rewritten quota interface. Implemented new quota format and 45 * formats registering. 46 * Jan Kara, <jack@suse.cz>, 2001,2002 47 * 48 * New SMP locking. 49 * Jan Kara, <jack@suse.cz>, 10/2002 50 * 51 * Added journalled quota support, fix lock inversion problems 52 * Jan Kara, <jack@suse.cz>, 2003,2004 53 * 54 * (C) Copyright 1994 - 1997 Marco van Wieringen 55 */ 56 57 #include <linux/errno.h> 58 #include <linux/kernel.h> 59 #include <linux/fs.h> 60 #include <linux/mount.h> 61 #include <linux/mm.h> 62 #include <linux/time.h> 63 #include <linux/types.h> 64 #include <linux/string.h> 65 #include <linux/fcntl.h> 66 #include <linux/stat.h> 67 #include <linux/tty.h> 68 #include <linux/file.h> 69 #include <linux/slab.h> 70 #include <linux/sysctl.h> 71 #include <linux/init.h> 72 #include <linux/module.h> 73 #include <linux/proc_fs.h> 74 #include <linux/security.h> 75 #include <linux/sched.h> 76 #include <linux/cred.h> 77 #include <linux/kmod.h> 78 #include <linux/namei.h> 79 #include <linux/capability.h> 80 #include <linux/quotaops.h> 81 #include <linux/blkdev.h> 82 #include <linux/sched/mm.h> 83 84 #include <linux/uaccess.h> 85 86 /* 87 * There are five quota SMP locks: 88 * * dq_list_lock protects all lists with quotas and quota formats. 89 * * dquot->dq_dqb_lock protects data from dq_dqb 90 * * inode->i_lock protects inode->i_blocks, i_bytes and also guards 91 * consistency of dquot->dq_dqb with inode->i_blocks, i_bytes so that 92 * dquot_transfer() can stabilize amount it transfers 93 * * dq_data_lock protects mem_dqinfo structures and modifications of dquot 94 * pointers in the inode 95 * * dq_state_lock protects modifications of quota state (on quotaon and 96 * quotaoff) and readers who care about latest values take it as well. 97 * 98 * The spinlock ordering is hence: 99 * dq_data_lock > dq_list_lock > i_lock > dquot->dq_dqb_lock, 100 * dq_list_lock > dq_state_lock 101 * 102 * Note that some things (eg. sb pointer, type, id) doesn't change during 103 * the life of the dquot structure and so needn't to be protected by a lock 104 * 105 * Operation accessing dquots via inode pointers are protected by dquot_srcu. 106 * Operation of reading pointer needs srcu_read_lock(&dquot_srcu), and 107 * synchronize_srcu(&dquot_srcu) is called after clearing pointers from 108 * inode and before dropping dquot references to avoid use of dquots after 109 * they are freed. dq_data_lock is used to serialize the pointer setting and 110 * clearing operations. 111 * Special care needs to be taken about S_NOQUOTA inode flag (marking that 112 * inode is a quota file). Functions adding pointers from inode to dquots have 113 * to check this flag under dq_data_lock and then (if S_NOQUOTA is not set) they 114 * have to do all pointer modifications before dropping dq_data_lock. This makes 115 * sure they cannot race with quotaon which first sets S_NOQUOTA flag and 116 * then drops all pointers to dquots from an inode. 117 * 118 * Each dquot has its dq_lock mutex. Dquot is locked when it is being read to 119 * memory (or space for it is being allocated) on the first dqget(), when it is 120 * being written out, and when it is being released on the last dqput(). The 121 * allocation and release operations are serialized by the dq_lock and by 122 * checking the use count in dquot_release(). 123 * 124 * Lock ordering (including related VFS locks) is the following: 125 * s_umount > i_mutex > journal_lock > dquot->dq_lock > dqio_sem 126 */ 127 128 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock); 129 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock); 130 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock); 131 EXPORT_SYMBOL(dq_data_lock); 132 DEFINE_STATIC_SRCU(dquot_srcu); 133 134 static DECLARE_WAIT_QUEUE_HEAD(dquot_ref_wq); 135 136 void __quota_error(struct super_block *sb, const char *func, 137 const char *fmt, ...) 138 { 139 if (printk_ratelimit()) { 140 va_list args; 141 struct va_format vaf; 142 143 va_start(args, fmt); 144 145 vaf.fmt = fmt; 146 vaf.va = &args; 147 148 printk(KERN_ERR "Quota error (device %s): %s: %pV\n", 149 sb->s_id, func, &vaf); 150 151 va_end(args); 152 } 153 } 154 EXPORT_SYMBOL(__quota_error); 155 156 #if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING) 157 static char *quotatypes[] = INITQFNAMES; 158 #endif 159 static struct quota_format_type *quota_formats; /* List of registered formats */ 160 static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES; 161 162 /* SLAB cache for dquot structures */ 163 static struct kmem_cache *dquot_cachep; 164 165 /* workqueue for work quota_release_work*/ 166 static struct workqueue_struct *quota_unbound_wq; 167 168 void register_quota_format(struct quota_format_type *fmt) 169 { 170 spin_lock(&dq_list_lock); 171 fmt->qf_next = quota_formats; 172 quota_formats = fmt; 173 spin_unlock(&dq_list_lock); 174 } 175 EXPORT_SYMBOL(register_quota_format); 176 177 void unregister_quota_format(struct quota_format_type *fmt) 178 { 179 struct quota_format_type **actqf; 180 181 spin_lock(&dq_list_lock); 182 for (actqf = "a_formats; *actqf && *actqf != fmt; 183 actqf = &(*actqf)->qf_next) 184 ; 185 if (*actqf) 186 *actqf = (*actqf)->qf_next; 187 spin_unlock(&dq_list_lock); 188 } 189 EXPORT_SYMBOL(unregister_quota_format); 190 191 static struct quota_format_type *find_quota_format(int id) 192 { 193 struct quota_format_type *actqf; 194 195 spin_lock(&dq_list_lock); 196 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; 197 actqf = actqf->qf_next) 198 ; 199 if (!actqf || !try_module_get(actqf->qf_owner)) { 200 int qm; 201 202 spin_unlock(&dq_list_lock); 203 204 for (qm = 0; module_names[qm].qm_fmt_id && 205 module_names[qm].qm_fmt_id != id; qm++) 206 ; 207 if (!module_names[qm].qm_fmt_id || 208 request_module(module_names[qm].qm_mod_name)) 209 return NULL; 210 211 spin_lock(&dq_list_lock); 212 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; 213 actqf = actqf->qf_next) 214 ; 215 if (actqf && !try_module_get(actqf->qf_owner)) 216 actqf = NULL; 217 } 218 spin_unlock(&dq_list_lock); 219 return actqf; 220 } 221 222 static void put_quota_format(struct quota_format_type *fmt) 223 { 224 module_put(fmt->qf_owner); 225 } 226 227 /* 228 * Dquot List Management: 229 * The quota code uses five lists for dquot management: the inuse_list, 230 * releasing_dquots, free_dquots, dqi_dirty_list, and dquot_hash[] array. 231 * A single dquot structure may be on some of those lists, depending on 232 * its current state. 233 * 234 * All dquots are placed to the end of inuse_list when first created, and this 235 * list is used for invalidate operation, which must look at every dquot. 236 * 237 * When the last reference of a dquot is dropped, the dquot is added to 238 * releasing_dquots. We'll then queue work item which will call 239 * synchronize_srcu() and after that perform the final cleanup of all the 240 * dquots on the list. Each cleaned up dquot is moved to free_dquots list. 241 * Both releasing_dquots and free_dquots use the dq_free list_head in the dquot 242 * struct. 243 * 244 * Unused and cleaned up dquots are in the free_dquots list and this list is 245 * searched whenever we need an available dquot. Dquots are removed from the 246 * list as soon as they are used again and dqstats.free_dquots gives the number 247 * of dquots on the list. When dquot is invalidated it's completely released 248 * from memory. 249 * 250 * Dirty dquots are added to the dqi_dirty_list of quota_info when mark 251 * dirtied, and this list is searched when writing dirty dquots back to 252 * quota file. Note that some filesystems do dirty dquot tracking on their 253 * own (e.g. in a journal) and thus don't use dqi_dirty_list. 254 * 255 * Dquots with a specific identity (device, type and id) are placed on 256 * one of the dquot_hash[] hash chains. The provides an efficient search 257 * mechanism to locate a specific dquot. 258 */ 259 260 static LIST_HEAD(inuse_list); 261 static LIST_HEAD(free_dquots); 262 static LIST_HEAD(releasing_dquots); 263 static unsigned int dq_hash_bits, dq_hash_mask; 264 static struct hlist_head *dquot_hash; 265 266 struct dqstats dqstats; 267 EXPORT_SYMBOL(dqstats); 268 269 static qsize_t inode_get_rsv_space(struct inode *inode); 270 static qsize_t __inode_get_rsv_space(struct inode *inode); 271 static int __dquot_initialize(struct inode *inode, int type); 272 273 static void quota_release_workfn(struct work_struct *work); 274 static DECLARE_DELAYED_WORK(quota_release_work, quota_release_workfn); 275 276 static inline unsigned int 277 hashfn(const struct super_block *sb, struct kqid qid) 278 { 279 unsigned int id = from_kqid(&init_user_ns, qid); 280 int type = qid.type; 281 unsigned long tmp; 282 283 tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type); 284 return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask; 285 } 286 287 /* 288 * Following list functions expect dq_list_lock to be held 289 */ 290 static inline void insert_dquot_hash(struct dquot *dquot) 291 { 292 struct hlist_head *head; 293 head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id); 294 hlist_add_head(&dquot->dq_hash, head); 295 } 296 297 static inline void remove_dquot_hash(struct dquot *dquot) 298 { 299 hlist_del_init(&dquot->dq_hash); 300 } 301 302 static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, 303 struct kqid qid) 304 { 305 struct dquot *dquot; 306 307 hlist_for_each_entry(dquot, dquot_hash+hashent, dq_hash) 308 if (dquot->dq_sb == sb && qid_eq(dquot->dq_id, qid)) 309 return dquot; 310 311 return NULL; 312 } 313 314 /* Add a dquot to the tail of the free list */ 315 static inline void put_dquot_last(struct dquot *dquot) 316 { 317 list_add_tail(&dquot->dq_free, &free_dquots); 318 dqstats_inc(DQST_FREE_DQUOTS); 319 } 320 321 static inline void put_releasing_dquots(struct dquot *dquot) 322 { 323 list_add_tail(&dquot->dq_free, &releasing_dquots); 324 set_bit(DQ_RELEASING_B, &dquot->dq_flags); 325 } 326 327 static inline void remove_free_dquot(struct dquot *dquot) 328 { 329 if (list_empty(&dquot->dq_free)) 330 return; 331 list_del_init(&dquot->dq_free); 332 if (!test_bit(DQ_RELEASING_B, &dquot->dq_flags)) 333 dqstats_dec(DQST_FREE_DQUOTS); 334 else 335 clear_bit(DQ_RELEASING_B, &dquot->dq_flags); 336 } 337 338 static inline void put_inuse(struct dquot *dquot) 339 { 340 /* We add to the back of inuse list so we don't have to restart 341 * when traversing this list and we block */ 342 list_add_tail(&dquot->dq_inuse, &inuse_list); 343 dqstats_inc(DQST_ALLOC_DQUOTS); 344 } 345 346 static inline void remove_inuse(struct dquot *dquot) 347 { 348 dqstats_dec(DQST_ALLOC_DQUOTS); 349 list_del(&dquot->dq_inuse); 350 } 351 /* 352 * End of list functions needing dq_list_lock 353 */ 354 355 static void wait_on_dquot(struct dquot *dquot) 356 { 357 mutex_lock(&dquot->dq_lock); 358 mutex_unlock(&dquot->dq_lock); 359 } 360 361 static inline int dquot_active(struct dquot *dquot) 362 { 363 return test_bit(DQ_ACTIVE_B, &dquot->dq_flags); 364 } 365 366 static inline int dquot_dirty(struct dquot *dquot) 367 { 368 return test_bit(DQ_MOD_B, &dquot->dq_flags); 369 } 370 371 static inline int mark_dquot_dirty(struct dquot *dquot) 372 { 373 return dquot->dq_sb->dq_op->mark_dirty(dquot); 374 } 375 376 /* Mark dquot dirty in atomic manner, and return it's old dirty flag state */ 377 int dquot_mark_dquot_dirty(struct dquot *dquot) 378 { 379 int ret = 1; 380 381 if (!dquot_active(dquot)) 382 return 0; 383 384 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY) 385 return test_and_set_bit(DQ_MOD_B, &dquot->dq_flags); 386 387 /* If quota is dirty already, we don't have to acquire dq_list_lock */ 388 if (dquot_dirty(dquot)) 389 return 1; 390 391 spin_lock(&dq_list_lock); 392 if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) { 393 list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)-> 394 info[dquot->dq_id.type].dqi_dirty_list); 395 ret = 0; 396 } 397 spin_unlock(&dq_list_lock); 398 return ret; 399 } 400 EXPORT_SYMBOL(dquot_mark_dquot_dirty); 401 402 /* Dirtify all the dquots - this can block when journalling */ 403 static inline int mark_all_dquot_dirty(struct dquot __rcu * const *dquots) 404 { 405 int ret, err, cnt; 406 struct dquot *dquot; 407 408 ret = err = 0; 409 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 410 dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 411 if (dquot) 412 /* Even in case of error we have to continue */ 413 ret = mark_dquot_dirty(dquot); 414 if (!err && ret < 0) 415 err = ret; 416 } 417 return err; 418 } 419 420 static inline void dqput_all(struct dquot **dquot) 421 { 422 unsigned int cnt; 423 424 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 425 dqput(dquot[cnt]); 426 } 427 428 static inline int clear_dquot_dirty(struct dquot *dquot) 429 { 430 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY) 431 return test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags); 432 433 spin_lock(&dq_list_lock); 434 if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags)) { 435 spin_unlock(&dq_list_lock); 436 return 0; 437 } 438 list_del_init(&dquot->dq_dirty); 439 spin_unlock(&dq_list_lock); 440 return 1; 441 } 442 443 void mark_info_dirty(struct super_block *sb, int type) 444 { 445 spin_lock(&dq_data_lock); 446 sb_dqopt(sb)->info[type].dqi_flags |= DQF_INFO_DIRTY; 447 spin_unlock(&dq_data_lock); 448 } 449 EXPORT_SYMBOL(mark_info_dirty); 450 451 /* 452 * Read dquot from disk and alloc space for it 453 */ 454 455 int dquot_acquire(struct dquot *dquot) 456 { 457 int ret = 0, ret2 = 0; 458 unsigned int memalloc; 459 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); 460 461 mutex_lock(&dquot->dq_lock); 462 memalloc = memalloc_nofs_save(); 463 if (!test_bit(DQ_READ_B, &dquot->dq_flags)) { 464 ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot); 465 if (ret < 0) 466 goto out_iolock; 467 } 468 /* Make sure flags update is visible after dquot has been filled */ 469 smp_mb__before_atomic(); 470 set_bit(DQ_READ_B, &dquot->dq_flags); 471 /* Instantiate dquot if needed */ 472 if (!dquot_active(dquot) && !dquot->dq_off) { 473 ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot); 474 /* Write the info if needed */ 475 if (info_dirty(&dqopt->info[dquot->dq_id.type])) { 476 ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info( 477 dquot->dq_sb, dquot->dq_id.type); 478 } 479 if (ret < 0) 480 goto out_iolock; 481 if (ret2 < 0) { 482 ret = ret2; 483 goto out_iolock; 484 } 485 } 486 /* 487 * Make sure flags update is visible after on-disk struct has been 488 * allocated. Paired with smp_rmb() in dqget(). 489 */ 490 smp_mb__before_atomic(); 491 set_bit(DQ_ACTIVE_B, &dquot->dq_flags); 492 out_iolock: 493 memalloc_nofs_restore(memalloc); 494 mutex_unlock(&dquot->dq_lock); 495 return ret; 496 } 497 EXPORT_SYMBOL(dquot_acquire); 498 499 /* 500 * Write dquot to disk 501 */ 502 int dquot_commit(struct dquot *dquot) 503 { 504 int ret = 0; 505 unsigned int memalloc; 506 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); 507 508 mutex_lock(&dquot->dq_lock); 509 memalloc = memalloc_nofs_save(); 510 if (!clear_dquot_dirty(dquot)) 511 goto out_lock; 512 /* Inactive dquot can be only if there was error during read/init 513 * => we have better not writing it */ 514 if (dquot_active(dquot)) 515 ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot); 516 else 517 ret = -EIO; 518 out_lock: 519 memalloc_nofs_restore(memalloc); 520 mutex_unlock(&dquot->dq_lock); 521 return ret; 522 } 523 EXPORT_SYMBOL(dquot_commit); 524 525 /* 526 * Release dquot 527 */ 528 int dquot_release(struct dquot *dquot) 529 { 530 int ret = 0, ret2 = 0; 531 unsigned int memalloc; 532 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); 533 534 mutex_lock(&dquot->dq_lock); 535 memalloc = memalloc_nofs_save(); 536 /* Check whether we are not racing with some other dqget() */ 537 if (dquot_is_busy(dquot)) 538 goto out_dqlock; 539 if (dqopt->ops[dquot->dq_id.type]->release_dqblk) { 540 ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot); 541 /* Write the info */ 542 if (info_dirty(&dqopt->info[dquot->dq_id.type])) { 543 ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info( 544 dquot->dq_sb, dquot->dq_id.type); 545 } 546 if (ret >= 0) 547 ret = ret2; 548 } 549 clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); 550 out_dqlock: 551 memalloc_nofs_restore(memalloc); 552 mutex_unlock(&dquot->dq_lock); 553 return ret; 554 } 555 EXPORT_SYMBOL(dquot_release); 556 557 void dquot_destroy(struct dquot *dquot) 558 { 559 kmem_cache_free(dquot_cachep, dquot); 560 } 561 EXPORT_SYMBOL(dquot_destroy); 562 563 static inline void do_destroy_dquot(struct dquot *dquot) 564 { 565 dquot->dq_sb->dq_op->destroy_dquot(dquot); 566 } 567 568 /* Invalidate all dquots on the list. Note that this function is called after 569 * quota is disabled and pointers from inodes removed so there cannot be new 570 * quota users. There can still be some users of quotas due to inodes being 571 * just deleted or pruned by prune_icache() (those are not attached to any 572 * list) or parallel quotactl call. We have to wait for such users. 573 */ 574 static void invalidate_dquots(struct super_block *sb, int type) 575 { 576 struct dquot *dquot, *tmp; 577 578 restart: 579 flush_delayed_work("a_release_work); 580 581 spin_lock(&dq_list_lock); 582 list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) { 583 if (dquot->dq_sb != sb) 584 continue; 585 if (dquot->dq_id.type != type) 586 continue; 587 /* Wait for dquot users */ 588 if (atomic_read(&dquot->dq_count)) { 589 atomic_inc(&dquot->dq_count); 590 spin_unlock(&dq_list_lock); 591 /* 592 * Once dqput() wakes us up, we know it's time to free 593 * the dquot. 594 * IMPORTANT: we rely on the fact that there is always 595 * at most one process waiting for dquot to free. 596 * Otherwise dq_count would be > 1 and we would never 597 * wake up. 598 */ 599 wait_event(dquot_ref_wq, 600 atomic_read(&dquot->dq_count) == 1); 601 dqput(dquot); 602 /* At this moment dquot() need not exist (it could be 603 * reclaimed by prune_dqcache(). Hence we must 604 * restart. */ 605 goto restart; 606 } 607 /* 608 * The last user already dropped its reference but dquot didn't 609 * get fully cleaned up yet. Restart the scan which flushes the 610 * work cleaning up released dquots. 611 */ 612 if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) { 613 spin_unlock(&dq_list_lock); 614 goto restart; 615 } 616 /* 617 * Quota now has no users and it has been written on last 618 * dqput() 619 */ 620 remove_dquot_hash(dquot); 621 remove_free_dquot(dquot); 622 remove_inuse(dquot); 623 do_destroy_dquot(dquot); 624 } 625 spin_unlock(&dq_list_lock); 626 } 627 628 /* Call callback for every active dquot on given filesystem */ 629 int dquot_scan_active(struct super_block *sb, 630 int (*fn)(struct dquot *dquot, unsigned long priv), 631 unsigned long priv) 632 { 633 struct dquot *dquot, *old_dquot = NULL; 634 int ret = 0; 635 636 WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount)); 637 638 spin_lock(&dq_list_lock); 639 list_for_each_entry(dquot, &inuse_list, dq_inuse) { 640 if (!dquot_active(dquot)) 641 continue; 642 if (dquot->dq_sb != sb) 643 continue; 644 /* Now we have active dquot so we can just increase use count */ 645 atomic_inc(&dquot->dq_count); 646 spin_unlock(&dq_list_lock); 647 dqput(old_dquot); 648 old_dquot = dquot; 649 /* 650 * ->release_dquot() can be racing with us. Our reference 651 * protects us from new calls to it so just wait for any 652 * outstanding call and recheck the DQ_ACTIVE_B after that. 653 */ 654 wait_on_dquot(dquot); 655 if (dquot_active(dquot)) { 656 ret = fn(dquot, priv); 657 if (ret < 0) 658 goto out; 659 } 660 spin_lock(&dq_list_lock); 661 /* We are safe to continue now because our dquot could not 662 * be moved out of the inuse list while we hold the reference */ 663 } 664 spin_unlock(&dq_list_lock); 665 out: 666 dqput(old_dquot); 667 return ret; 668 } 669 EXPORT_SYMBOL(dquot_scan_active); 670 671 static inline int dquot_write_dquot(struct dquot *dquot) 672 { 673 int ret = dquot->dq_sb->dq_op->write_dquot(dquot); 674 if (ret < 0) { 675 quota_error(dquot->dq_sb, "Can't write quota structure " 676 "(error %d). Quota may get out of sync!", ret); 677 /* Clear dirty bit anyway to avoid infinite loop. */ 678 clear_dquot_dirty(dquot); 679 } 680 return ret; 681 } 682 683 /* Write all dquot structures to quota files */ 684 int dquot_writeback_dquots(struct super_block *sb, int type) 685 { 686 struct list_head dirty; 687 struct dquot *dquot; 688 struct quota_info *dqopt = sb_dqopt(sb); 689 int cnt; 690 int err, ret = 0; 691 692 WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount)); 693 694 flush_delayed_work("a_release_work); 695 696 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 697 if (type != -1 && cnt != type) 698 continue; 699 if (!sb_has_quota_active(sb, cnt)) 700 continue; 701 spin_lock(&dq_list_lock); 702 /* Move list away to avoid livelock. */ 703 list_replace_init(&dqopt->info[cnt].dqi_dirty_list, &dirty); 704 while (!list_empty(&dirty)) { 705 dquot = list_first_entry(&dirty, struct dquot, 706 dq_dirty); 707 708 WARN_ON(!dquot_active(dquot)); 709 /* If the dquot is releasing we should not touch it */ 710 if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) { 711 spin_unlock(&dq_list_lock); 712 flush_delayed_work("a_release_work); 713 spin_lock(&dq_list_lock); 714 continue; 715 } 716 717 /* Now we have active dquot from which someone is 718 * holding reference so we can safely just increase 719 * use count */ 720 dqgrab(dquot); 721 spin_unlock(&dq_list_lock); 722 err = dquot_write_dquot(dquot); 723 if (err && !ret) 724 ret = err; 725 dqput(dquot); 726 spin_lock(&dq_list_lock); 727 } 728 spin_unlock(&dq_list_lock); 729 } 730 731 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 732 if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt) 733 && info_dirty(&dqopt->info[cnt])) 734 sb->dq_op->write_info(sb, cnt); 735 dqstats_inc(DQST_SYNCS); 736 737 return ret; 738 } 739 EXPORT_SYMBOL(dquot_writeback_dquots); 740 741 /* Write all dquot structures to disk and make them visible from userspace */ 742 int dquot_quota_sync(struct super_block *sb, int type) 743 { 744 struct quota_info *dqopt = sb_dqopt(sb); 745 int cnt; 746 int ret; 747 748 ret = dquot_writeback_dquots(sb, type); 749 if (ret) 750 return ret; 751 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) 752 return 0; 753 754 /* This is not very clever (and fast) but currently I don't know about 755 * any other simple way of getting quota data to disk and we must get 756 * them there for userspace to be visible... */ 757 if (sb->s_op->sync_fs) { 758 ret = sb->s_op->sync_fs(sb, 1); 759 if (ret) 760 return ret; 761 } 762 ret = sync_blockdev(sb->s_bdev); 763 if (ret) 764 return ret; 765 766 /* 767 * Now when everything is written we can discard the pagecache so 768 * that userspace sees the changes. 769 */ 770 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 771 if (type != -1 && cnt != type) 772 continue; 773 if (!sb_has_quota_active(sb, cnt)) 774 continue; 775 inode_lock(dqopt->files[cnt]); 776 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0); 777 inode_unlock(dqopt->files[cnt]); 778 } 779 780 return 0; 781 } 782 EXPORT_SYMBOL(dquot_quota_sync); 783 784 static unsigned long 785 dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 786 { 787 struct dquot *dquot; 788 unsigned long freed = 0; 789 790 spin_lock(&dq_list_lock); 791 while (!list_empty(&free_dquots) && sc->nr_to_scan) { 792 dquot = list_first_entry(&free_dquots, struct dquot, dq_free); 793 remove_dquot_hash(dquot); 794 remove_free_dquot(dquot); 795 remove_inuse(dquot); 796 do_destroy_dquot(dquot); 797 sc->nr_to_scan--; 798 freed++; 799 } 800 spin_unlock(&dq_list_lock); 801 return freed; 802 } 803 804 static unsigned long 805 dqcache_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 806 { 807 return vfs_pressure_ratio( 808 percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS])); 809 } 810 811 /* 812 * Safely release dquot and put reference to dquot. 813 */ 814 static void quota_release_workfn(struct work_struct *work) 815 { 816 struct dquot *dquot; 817 struct list_head rls_head; 818 819 spin_lock(&dq_list_lock); 820 /* Exchange the list head to avoid livelock. */ 821 list_replace_init(&releasing_dquots, &rls_head); 822 spin_unlock(&dq_list_lock); 823 synchronize_srcu(&dquot_srcu); 824 825 restart: 826 spin_lock(&dq_list_lock); 827 while (!list_empty(&rls_head)) { 828 dquot = list_first_entry(&rls_head, struct dquot, dq_free); 829 WARN_ON_ONCE(atomic_read(&dquot->dq_count)); 830 /* 831 * Note that DQ_RELEASING_B protects us from racing with 832 * invalidate_dquots() calls so we are safe to work with the 833 * dquot even after we drop dq_list_lock. 834 */ 835 if (dquot_dirty(dquot)) { 836 spin_unlock(&dq_list_lock); 837 /* Commit dquot before releasing */ 838 dquot_write_dquot(dquot); 839 goto restart; 840 } 841 if (dquot_active(dquot)) { 842 spin_unlock(&dq_list_lock); 843 dquot->dq_sb->dq_op->release_dquot(dquot); 844 goto restart; 845 } 846 /* Dquot is inactive and clean, now move it to free list */ 847 remove_free_dquot(dquot); 848 put_dquot_last(dquot); 849 } 850 spin_unlock(&dq_list_lock); 851 } 852 853 /* 854 * Put reference to dquot 855 */ 856 void dqput(struct dquot *dquot) 857 { 858 if (!dquot) 859 return; 860 #ifdef CONFIG_QUOTA_DEBUG 861 if (!atomic_read(&dquot->dq_count)) { 862 quota_error(dquot->dq_sb, "trying to free free dquot of %s %d", 863 quotatypes[dquot->dq_id.type], 864 from_kqid(&init_user_ns, dquot->dq_id)); 865 BUG(); 866 } 867 #endif 868 dqstats_inc(DQST_DROPS); 869 870 spin_lock(&dq_list_lock); 871 if (atomic_read(&dquot->dq_count) > 1) { 872 /* We have more than one user... nothing to do */ 873 atomic_dec(&dquot->dq_count); 874 /* Releasing dquot during quotaoff phase? */ 875 if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_id.type) && 876 atomic_read(&dquot->dq_count) == 1) 877 wake_up(&dquot_ref_wq); 878 spin_unlock(&dq_list_lock); 879 return; 880 } 881 882 /* Need to release dquot? */ 883 WARN_ON_ONCE(!list_empty(&dquot->dq_free)); 884 put_releasing_dquots(dquot); 885 atomic_dec(&dquot->dq_count); 886 spin_unlock(&dq_list_lock); 887 queue_delayed_work(quota_unbound_wq, "a_release_work, 1); 888 } 889 EXPORT_SYMBOL(dqput); 890 891 struct dquot *dquot_alloc(struct super_block *sb, int type) 892 { 893 return kmem_cache_zalloc(dquot_cachep, GFP_NOFS); 894 } 895 EXPORT_SYMBOL(dquot_alloc); 896 897 static struct dquot *get_empty_dquot(struct super_block *sb, int type) 898 { 899 struct dquot *dquot; 900 901 dquot = sb->dq_op->alloc_dquot(sb, type); 902 if(!dquot) 903 return NULL; 904 905 mutex_init(&dquot->dq_lock); 906 INIT_LIST_HEAD(&dquot->dq_free); 907 INIT_LIST_HEAD(&dquot->dq_inuse); 908 INIT_HLIST_NODE(&dquot->dq_hash); 909 INIT_LIST_HEAD(&dquot->dq_dirty); 910 dquot->dq_sb = sb; 911 dquot->dq_id = make_kqid_invalid(type); 912 atomic_set(&dquot->dq_count, 1); 913 spin_lock_init(&dquot->dq_dqb_lock); 914 915 return dquot; 916 } 917 918 /* 919 * Get reference to dquot 920 * 921 * Locking is slightly tricky here. We are guarded from parallel quotaoff() 922 * destroying our dquot by: 923 * a) checking for quota flags under dq_list_lock and 924 * b) getting a reference to dquot before we release dq_list_lock 925 */ 926 struct dquot *dqget(struct super_block *sb, struct kqid qid) 927 { 928 unsigned int hashent = hashfn(sb, qid); 929 struct dquot *dquot, *empty = NULL; 930 931 if (!qid_has_mapping(sb->s_user_ns, qid)) 932 return ERR_PTR(-EINVAL); 933 934 if (!sb_has_quota_active(sb, qid.type)) 935 return ERR_PTR(-ESRCH); 936 we_slept: 937 spin_lock(&dq_list_lock); 938 spin_lock(&dq_state_lock); 939 if (!sb_has_quota_active(sb, qid.type)) { 940 spin_unlock(&dq_state_lock); 941 spin_unlock(&dq_list_lock); 942 dquot = ERR_PTR(-ESRCH); 943 goto out; 944 } 945 spin_unlock(&dq_state_lock); 946 947 dquot = find_dquot(hashent, sb, qid); 948 if (!dquot) { 949 if (!empty) { 950 spin_unlock(&dq_list_lock); 951 empty = get_empty_dquot(sb, qid.type); 952 if (!empty) 953 schedule(); /* Try to wait for a moment... */ 954 goto we_slept; 955 } 956 dquot = empty; 957 empty = NULL; 958 dquot->dq_id = qid; 959 /* all dquots go on the inuse_list */ 960 put_inuse(dquot); 961 /* hash it first so it can be found */ 962 insert_dquot_hash(dquot); 963 spin_unlock(&dq_list_lock); 964 dqstats_inc(DQST_LOOKUPS); 965 } else { 966 if (!atomic_read(&dquot->dq_count)) 967 remove_free_dquot(dquot); 968 atomic_inc(&dquot->dq_count); 969 spin_unlock(&dq_list_lock); 970 dqstats_inc(DQST_CACHE_HITS); 971 dqstats_inc(DQST_LOOKUPS); 972 } 973 /* Wait for dq_lock - after this we know that either dquot_release() is 974 * already finished or it will be canceled due to dq_count > 0 test */ 975 wait_on_dquot(dquot); 976 /* Read the dquot / allocate space in quota file */ 977 if (!dquot_active(dquot)) { 978 int err; 979 980 err = sb->dq_op->acquire_dquot(dquot); 981 if (err < 0) { 982 dqput(dquot); 983 dquot = ERR_PTR(err); 984 goto out; 985 } 986 } 987 /* 988 * Make sure following reads see filled structure - paired with 989 * smp_mb__before_atomic() in dquot_acquire(). 990 */ 991 smp_rmb(); 992 /* Has somebody invalidated entry under us? */ 993 WARN_ON_ONCE(hlist_unhashed(&dquot->dq_hash)); 994 out: 995 if (empty) 996 do_destroy_dquot(empty); 997 998 return dquot; 999 } 1000 EXPORT_SYMBOL(dqget); 1001 1002 static inline struct dquot __rcu **i_dquot(struct inode *inode) 1003 { 1004 return inode->i_sb->s_op->get_dquots(inode); 1005 } 1006 1007 static int dqinit_needed(struct inode *inode, int type) 1008 { 1009 struct dquot __rcu * const *dquots; 1010 int cnt; 1011 1012 if (IS_NOQUOTA(inode)) 1013 return 0; 1014 1015 dquots = i_dquot(inode); 1016 if (type != -1) 1017 return !dquots[type]; 1018 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1019 if (!dquots[cnt]) 1020 return 1; 1021 return 0; 1022 } 1023 1024 /* This routine is guarded by s_umount semaphore */ 1025 static int add_dquot_ref(struct super_block *sb, int type) 1026 { 1027 struct inode *inode, *old_inode = NULL; 1028 #ifdef CONFIG_QUOTA_DEBUG 1029 int reserved = 0; 1030 #endif 1031 int err = 0; 1032 1033 spin_lock(&sb->s_inode_list_lock); 1034 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 1035 spin_lock(&inode->i_lock); 1036 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || 1037 !atomic_read(&inode->i_writecount) || 1038 !dqinit_needed(inode, type)) { 1039 spin_unlock(&inode->i_lock); 1040 continue; 1041 } 1042 __iget(inode); 1043 spin_unlock(&inode->i_lock); 1044 spin_unlock(&sb->s_inode_list_lock); 1045 1046 #ifdef CONFIG_QUOTA_DEBUG 1047 if (unlikely(inode_get_rsv_space(inode) > 0)) 1048 reserved = 1; 1049 #endif 1050 iput(old_inode); 1051 err = __dquot_initialize(inode, type); 1052 if (err) { 1053 iput(inode); 1054 goto out; 1055 } 1056 1057 /* 1058 * We hold a reference to 'inode' so it couldn't have been 1059 * removed from s_inodes list while we dropped the 1060 * s_inode_list_lock. We cannot iput the inode now as we can be 1061 * holding the last reference and we cannot iput it under 1062 * s_inode_list_lock. So we keep the reference and iput it 1063 * later. 1064 */ 1065 old_inode = inode; 1066 cond_resched(); 1067 spin_lock(&sb->s_inode_list_lock); 1068 } 1069 spin_unlock(&sb->s_inode_list_lock); 1070 iput(old_inode); 1071 out: 1072 #ifdef CONFIG_QUOTA_DEBUG 1073 if (reserved) { 1074 quota_error(sb, "Writes happened before quota was turned on " 1075 "thus quota information is probably inconsistent. " 1076 "Please run quotacheck(8)"); 1077 } 1078 #endif 1079 return err; 1080 } 1081 1082 static void remove_dquot_ref(struct super_block *sb, int type) 1083 { 1084 struct inode *inode; 1085 #ifdef CONFIG_QUOTA_DEBUG 1086 int reserved = 0; 1087 #endif 1088 1089 spin_lock(&sb->s_inode_list_lock); 1090 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 1091 /* 1092 * We have to scan also I_NEW inodes because they can already 1093 * have quota pointer initialized. Luckily, we need to touch 1094 * only quota pointers and these have separate locking 1095 * (dq_data_lock). 1096 */ 1097 spin_lock(&dq_data_lock); 1098 if (!IS_NOQUOTA(inode)) { 1099 struct dquot __rcu **dquots = i_dquot(inode); 1100 struct dquot *dquot = srcu_dereference_check( 1101 dquots[type], &dquot_srcu, 1102 lockdep_is_held(&dq_data_lock)); 1103 1104 #ifdef CONFIG_QUOTA_DEBUG 1105 if (unlikely(inode_get_rsv_space(inode) > 0)) 1106 reserved = 1; 1107 #endif 1108 rcu_assign_pointer(dquots[type], NULL); 1109 if (dquot) 1110 dqput(dquot); 1111 } 1112 spin_unlock(&dq_data_lock); 1113 } 1114 spin_unlock(&sb->s_inode_list_lock); 1115 #ifdef CONFIG_QUOTA_DEBUG 1116 if (reserved) { 1117 printk(KERN_WARNING "VFS (%s): Writes happened after quota" 1118 " was disabled thus quota information is probably " 1119 "inconsistent. Please run quotacheck(8).\n", sb->s_id); 1120 } 1121 #endif 1122 } 1123 1124 /* Gather all references from inodes and drop them */ 1125 static void drop_dquot_ref(struct super_block *sb, int type) 1126 { 1127 if (sb->dq_op) 1128 remove_dquot_ref(sb, type); 1129 } 1130 1131 static inline 1132 void dquot_free_reserved_space(struct dquot *dquot, qsize_t number) 1133 { 1134 if (dquot->dq_dqb.dqb_rsvspace >= number) 1135 dquot->dq_dqb.dqb_rsvspace -= number; 1136 else { 1137 WARN_ON_ONCE(1); 1138 dquot->dq_dqb.dqb_rsvspace = 0; 1139 } 1140 if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <= 1141 dquot->dq_dqb.dqb_bsoftlimit) 1142 dquot->dq_dqb.dqb_btime = (time64_t) 0; 1143 clear_bit(DQ_BLKS_B, &dquot->dq_flags); 1144 } 1145 1146 static void dquot_decr_inodes(struct dquot *dquot, qsize_t number) 1147 { 1148 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || 1149 dquot->dq_dqb.dqb_curinodes >= number) 1150 dquot->dq_dqb.dqb_curinodes -= number; 1151 else 1152 dquot->dq_dqb.dqb_curinodes = 0; 1153 if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit) 1154 dquot->dq_dqb.dqb_itime = (time64_t) 0; 1155 clear_bit(DQ_INODES_B, &dquot->dq_flags); 1156 } 1157 1158 static void dquot_decr_space(struct dquot *dquot, qsize_t number) 1159 { 1160 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || 1161 dquot->dq_dqb.dqb_curspace >= number) 1162 dquot->dq_dqb.dqb_curspace -= number; 1163 else 1164 dquot->dq_dqb.dqb_curspace = 0; 1165 if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <= 1166 dquot->dq_dqb.dqb_bsoftlimit) 1167 dquot->dq_dqb.dqb_btime = (time64_t) 0; 1168 clear_bit(DQ_BLKS_B, &dquot->dq_flags); 1169 } 1170 1171 struct dquot_warn { 1172 struct super_block *w_sb; 1173 struct kqid w_dq_id; 1174 short w_type; 1175 }; 1176 1177 static int warning_issued(struct dquot *dquot, const int warntype) 1178 { 1179 int flag = (warntype == QUOTA_NL_BHARDWARN || 1180 warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B : 1181 ((warntype == QUOTA_NL_IHARDWARN || 1182 warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0); 1183 1184 if (!flag) 1185 return 0; 1186 return test_and_set_bit(flag, &dquot->dq_flags); 1187 } 1188 1189 #ifdef CONFIG_PRINT_QUOTA_WARNING 1190 static int flag_print_warnings = 1; 1191 1192 static int need_print_warning(struct dquot_warn *warn) 1193 { 1194 if (!flag_print_warnings) 1195 return 0; 1196 1197 switch (warn->w_dq_id.type) { 1198 case USRQUOTA: 1199 return uid_eq(current_fsuid(), warn->w_dq_id.uid); 1200 case GRPQUOTA: 1201 return in_group_p(warn->w_dq_id.gid); 1202 case PRJQUOTA: 1203 return 1; 1204 } 1205 return 0; 1206 } 1207 1208 /* Print warning to user which exceeded quota */ 1209 static void print_warning(struct dquot_warn *warn) 1210 { 1211 char *msg = NULL; 1212 struct tty_struct *tty; 1213 int warntype = warn->w_type; 1214 1215 if (warntype == QUOTA_NL_IHARDBELOW || 1216 warntype == QUOTA_NL_ISOFTBELOW || 1217 warntype == QUOTA_NL_BHARDBELOW || 1218 warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(warn)) 1219 return; 1220 1221 tty = get_current_tty(); 1222 if (!tty) 1223 return; 1224 tty_write_message(tty, warn->w_sb->s_id); 1225 if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN) 1226 tty_write_message(tty, ": warning, "); 1227 else 1228 tty_write_message(tty, ": write failed, "); 1229 tty_write_message(tty, quotatypes[warn->w_dq_id.type]); 1230 switch (warntype) { 1231 case QUOTA_NL_IHARDWARN: 1232 msg = " file limit reached.\r\n"; 1233 break; 1234 case QUOTA_NL_ISOFTLONGWARN: 1235 msg = " file quota exceeded too long.\r\n"; 1236 break; 1237 case QUOTA_NL_ISOFTWARN: 1238 msg = " file quota exceeded.\r\n"; 1239 break; 1240 case QUOTA_NL_BHARDWARN: 1241 msg = " block limit reached.\r\n"; 1242 break; 1243 case QUOTA_NL_BSOFTLONGWARN: 1244 msg = " block quota exceeded too long.\r\n"; 1245 break; 1246 case QUOTA_NL_BSOFTWARN: 1247 msg = " block quota exceeded.\r\n"; 1248 break; 1249 } 1250 tty_write_message(tty, msg); 1251 tty_kref_put(tty); 1252 } 1253 #endif 1254 1255 static void prepare_warning(struct dquot_warn *warn, struct dquot *dquot, 1256 int warntype) 1257 { 1258 if (warning_issued(dquot, warntype)) 1259 return; 1260 warn->w_type = warntype; 1261 warn->w_sb = dquot->dq_sb; 1262 warn->w_dq_id = dquot->dq_id; 1263 } 1264 1265 /* 1266 * Write warnings to the console and send warning messages over netlink. 1267 * 1268 * Note that this function can call into tty and networking code. 1269 */ 1270 static void flush_warnings(struct dquot_warn *warn) 1271 { 1272 int i; 1273 1274 for (i = 0; i < MAXQUOTAS; i++) { 1275 if (warn[i].w_type == QUOTA_NL_NOWARN) 1276 continue; 1277 #ifdef CONFIG_PRINT_QUOTA_WARNING 1278 print_warning(&warn[i]); 1279 #endif 1280 quota_send_warning(warn[i].w_dq_id, 1281 warn[i].w_sb->s_dev, warn[i].w_type); 1282 } 1283 } 1284 1285 static int ignore_hardlimit(struct dquot *dquot) 1286 { 1287 struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; 1288 1289 return capable(CAP_SYS_RESOURCE) && 1290 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || 1291 !(info->dqi_flags & DQF_ROOT_SQUASH)); 1292 } 1293 1294 static int dquot_add_inodes(struct dquot *dquot, qsize_t inodes, 1295 struct dquot_warn *warn) 1296 { 1297 qsize_t newinodes; 1298 int ret = 0; 1299 1300 spin_lock(&dquot->dq_dqb_lock); 1301 newinodes = dquot->dq_dqb.dqb_curinodes + inodes; 1302 if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type) || 1303 test_bit(DQ_FAKE_B, &dquot->dq_flags)) 1304 goto add; 1305 1306 if (dquot->dq_dqb.dqb_ihardlimit && 1307 newinodes > dquot->dq_dqb.dqb_ihardlimit && 1308 !ignore_hardlimit(dquot)) { 1309 prepare_warning(warn, dquot, QUOTA_NL_IHARDWARN); 1310 ret = -EDQUOT; 1311 goto out; 1312 } 1313 1314 if (dquot->dq_dqb.dqb_isoftlimit && 1315 newinodes > dquot->dq_dqb.dqb_isoftlimit && 1316 dquot->dq_dqb.dqb_itime && 1317 ktime_get_real_seconds() >= dquot->dq_dqb.dqb_itime && 1318 !ignore_hardlimit(dquot)) { 1319 prepare_warning(warn, dquot, QUOTA_NL_ISOFTLONGWARN); 1320 ret = -EDQUOT; 1321 goto out; 1322 } 1323 1324 if (dquot->dq_dqb.dqb_isoftlimit && 1325 newinodes > dquot->dq_dqb.dqb_isoftlimit && 1326 dquot->dq_dqb.dqb_itime == 0) { 1327 prepare_warning(warn, dquot, QUOTA_NL_ISOFTWARN); 1328 dquot->dq_dqb.dqb_itime = ktime_get_real_seconds() + 1329 sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type].dqi_igrace; 1330 } 1331 add: 1332 dquot->dq_dqb.dqb_curinodes = newinodes; 1333 1334 out: 1335 spin_unlock(&dquot->dq_dqb_lock); 1336 return ret; 1337 } 1338 1339 static int dquot_add_space(struct dquot *dquot, qsize_t space, 1340 qsize_t rsv_space, unsigned int flags, 1341 struct dquot_warn *warn) 1342 { 1343 qsize_t tspace; 1344 struct super_block *sb = dquot->dq_sb; 1345 int ret = 0; 1346 1347 spin_lock(&dquot->dq_dqb_lock); 1348 if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) || 1349 test_bit(DQ_FAKE_B, &dquot->dq_flags)) 1350 goto finish; 1351 1352 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace 1353 + space + rsv_space; 1354 1355 if (dquot->dq_dqb.dqb_bhardlimit && 1356 tspace > dquot->dq_dqb.dqb_bhardlimit && 1357 !ignore_hardlimit(dquot)) { 1358 if (flags & DQUOT_SPACE_WARN) 1359 prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN); 1360 ret = -EDQUOT; 1361 goto finish; 1362 } 1363 1364 if (dquot->dq_dqb.dqb_bsoftlimit && 1365 tspace > dquot->dq_dqb.dqb_bsoftlimit && 1366 dquot->dq_dqb.dqb_btime && 1367 ktime_get_real_seconds() >= dquot->dq_dqb.dqb_btime && 1368 !ignore_hardlimit(dquot)) { 1369 if (flags & DQUOT_SPACE_WARN) 1370 prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN); 1371 ret = -EDQUOT; 1372 goto finish; 1373 } 1374 1375 if (dquot->dq_dqb.dqb_bsoftlimit && 1376 tspace > dquot->dq_dqb.dqb_bsoftlimit && 1377 dquot->dq_dqb.dqb_btime == 0) { 1378 if (flags & DQUOT_SPACE_WARN) { 1379 prepare_warning(warn, dquot, QUOTA_NL_BSOFTWARN); 1380 dquot->dq_dqb.dqb_btime = ktime_get_real_seconds() + 1381 sb_dqopt(sb)->info[dquot->dq_id.type].dqi_bgrace; 1382 } else { 1383 /* 1384 * We don't allow preallocation to exceed softlimit so exceeding will 1385 * be always printed 1386 */ 1387 ret = -EDQUOT; 1388 goto finish; 1389 } 1390 } 1391 finish: 1392 /* 1393 * We have to be careful and go through warning generation & grace time 1394 * setting even if DQUOT_SPACE_NOFAIL is set. That's why we check it 1395 * only here... 1396 */ 1397 if (flags & DQUOT_SPACE_NOFAIL) 1398 ret = 0; 1399 if (!ret) { 1400 dquot->dq_dqb.dqb_rsvspace += rsv_space; 1401 dquot->dq_dqb.dqb_curspace += space; 1402 } 1403 spin_unlock(&dquot->dq_dqb_lock); 1404 return ret; 1405 } 1406 1407 static int info_idq_free(struct dquot *dquot, qsize_t inodes) 1408 { 1409 qsize_t newinodes; 1410 1411 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || 1412 dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit || 1413 !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type)) 1414 return QUOTA_NL_NOWARN; 1415 1416 newinodes = dquot->dq_dqb.dqb_curinodes - inodes; 1417 if (newinodes <= dquot->dq_dqb.dqb_isoftlimit) 1418 return QUOTA_NL_ISOFTBELOW; 1419 if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit && 1420 newinodes < dquot->dq_dqb.dqb_ihardlimit) 1421 return QUOTA_NL_IHARDBELOW; 1422 return QUOTA_NL_NOWARN; 1423 } 1424 1425 static int info_bdq_free(struct dquot *dquot, qsize_t space) 1426 { 1427 qsize_t tspace; 1428 1429 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace; 1430 1431 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || 1432 tspace <= dquot->dq_dqb.dqb_bsoftlimit) 1433 return QUOTA_NL_NOWARN; 1434 1435 if (tspace - space <= dquot->dq_dqb.dqb_bsoftlimit) 1436 return QUOTA_NL_BSOFTBELOW; 1437 if (tspace >= dquot->dq_dqb.dqb_bhardlimit && 1438 tspace - space < dquot->dq_dqb.dqb_bhardlimit) 1439 return QUOTA_NL_BHARDBELOW; 1440 return QUOTA_NL_NOWARN; 1441 } 1442 1443 static int inode_quota_active(const struct inode *inode) 1444 { 1445 struct super_block *sb = inode->i_sb; 1446 1447 if (IS_NOQUOTA(inode)) 1448 return 0; 1449 return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb); 1450 } 1451 1452 /* 1453 * Initialize quota pointers in inode 1454 * 1455 * It is better to call this function outside of any transaction as it 1456 * might need a lot of space in journal for dquot structure allocation. 1457 */ 1458 static int __dquot_initialize(struct inode *inode, int type) 1459 { 1460 int cnt, init_needed = 0; 1461 struct dquot __rcu **dquots; 1462 struct dquot *got[MAXQUOTAS] = {}; 1463 struct super_block *sb = inode->i_sb; 1464 qsize_t rsv; 1465 int ret = 0; 1466 1467 if (!inode_quota_active(inode)) 1468 return 0; 1469 1470 dquots = i_dquot(inode); 1471 1472 /* First get references to structures we might need. */ 1473 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1474 struct kqid qid; 1475 kprojid_t projid; 1476 int rc; 1477 struct dquot *dquot; 1478 1479 if (type != -1 && cnt != type) 1480 continue; 1481 /* 1482 * The i_dquot should have been initialized in most cases, 1483 * we check it without locking here to avoid unnecessary 1484 * dqget()/dqput() calls. 1485 */ 1486 if (dquots[cnt]) 1487 continue; 1488 1489 if (!sb_has_quota_active(sb, cnt)) 1490 continue; 1491 1492 init_needed = 1; 1493 1494 switch (cnt) { 1495 case USRQUOTA: 1496 qid = make_kqid_uid(inode->i_uid); 1497 break; 1498 case GRPQUOTA: 1499 qid = make_kqid_gid(inode->i_gid); 1500 break; 1501 case PRJQUOTA: 1502 rc = inode->i_sb->dq_op->get_projid(inode, &projid); 1503 if (rc) 1504 continue; 1505 qid = make_kqid_projid(projid); 1506 break; 1507 } 1508 dquot = dqget(sb, qid); 1509 if (IS_ERR(dquot)) { 1510 /* We raced with somebody turning quotas off... */ 1511 if (PTR_ERR(dquot) != -ESRCH) { 1512 ret = PTR_ERR(dquot); 1513 goto out_put; 1514 } 1515 dquot = NULL; 1516 } 1517 got[cnt] = dquot; 1518 } 1519 1520 /* All required i_dquot has been initialized */ 1521 if (!init_needed) 1522 return 0; 1523 1524 spin_lock(&dq_data_lock); 1525 if (IS_NOQUOTA(inode)) 1526 goto out_lock; 1527 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1528 if (type != -1 && cnt != type) 1529 continue; 1530 /* Avoid races with quotaoff() */ 1531 if (!sb_has_quota_active(sb, cnt)) 1532 continue; 1533 /* We could race with quotaon or dqget() could have failed */ 1534 if (!got[cnt]) 1535 continue; 1536 if (!dquots[cnt]) { 1537 rcu_assign_pointer(dquots[cnt], got[cnt]); 1538 got[cnt] = NULL; 1539 /* 1540 * Make quota reservation system happy if someone 1541 * did a write before quota was turned on 1542 */ 1543 rsv = inode_get_rsv_space(inode); 1544 if (unlikely(rsv)) { 1545 struct dquot *dquot = srcu_dereference_check( 1546 dquots[cnt], &dquot_srcu, 1547 lockdep_is_held(&dq_data_lock)); 1548 1549 spin_lock(&inode->i_lock); 1550 /* Get reservation again under proper lock */ 1551 rsv = __inode_get_rsv_space(inode); 1552 spin_lock(&dquot->dq_dqb_lock); 1553 dquot->dq_dqb.dqb_rsvspace += rsv; 1554 spin_unlock(&dquot->dq_dqb_lock); 1555 spin_unlock(&inode->i_lock); 1556 } 1557 } 1558 } 1559 out_lock: 1560 spin_unlock(&dq_data_lock); 1561 out_put: 1562 /* Drop unused references */ 1563 dqput_all(got); 1564 1565 return ret; 1566 } 1567 1568 int dquot_initialize(struct inode *inode) 1569 { 1570 return __dquot_initialize(inode, -1); 1571 } 1572 EXPORT_SYMBOL(dquot_initialize); 1573 1574 bool dquot_initialize_needed(struct inode *inode) 1575 { 1576 struct dquot __rcu **dquots; 1577 int i; 1578 1579 if (!inode_quota_active(inode)) 1580 return false; 1581 1582 dquots = i_dquot(inode); 1583 for (i = 0; i < MAXQUOTAS; i++) 1584 if (!dquots[i] && sb_has_quota_active(inode->i_sb, i)) 1585 return true; 1586 return false; 1587 } 1588 EXPORT_SYMBOL(dquot_initialize_needed); 1589 1590 /* 1591 * Release all quotas referenced by inode. 1592 * 1593 * This function only be called on inode free or converting 1594 * a file to quota file, no other users for the i_dquot in 1595 * both cases, so we needn't call synchronize_srcu() after 1596 * clearing i_dquot. 1597 */ 1598 static void __dquot_drop(struct inode *inode) 1599 { 1600 int cnt; 1601 struct dquot __rcu **dquots = i_dquot(inode); 1602 struct dquot *put[MAXQUOTAS]; 1603 1604 spin_lock(&dq_data_lock); 1605 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1606 put[cnt] = srcu_dereference_check(dquots[cnt], &dquot_srcu, 1607 lockdep_is_held(&dq_data_lock)); 1608 rcu_assign_pointer(dquots[cnt], NULL); 1609 } 1610 spin_unlock(&dq_data_lock); 1611 dqput_all(put); 1612 } 1613 1614 void dquot_drop(struct inode *inode) 1615 { 1616 struct dquot __rcu * const *dquots; 1617 int cnt; 1618 1619 if (IS_NOQUOTA(inode)) 1620 return; 1621 1622 /* 1623 * Test before calling to rule out calls from proc and such 1624 * where we are not allowed to block. Note that this is 1625 * actually reliable test even without the lock - the caller 1626 * must assure that nobody can come after the DQUOT_DROP and 1627 * add quota pointers back anyway. 1628 */ 1629 dquots = i_dquot(inode); 1630 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1631 if (dquots[cnt]) 1632 break; 1633 } 1634 1635 if (cnt < MAXQUOTAS) 1636 __dquot_drop(inode); 1637 } 1638 EXPORT_SYMBOL(dquot_drop); 1639 1640 /* 1641 * inode_reserved_space is managed internally by quota, and protected by 1642 * i_lock similar to i_blocks+i_bytes. 1643 */ 1644 static qsize_t *inode_reserved_space(struct inode * inode) 1645 { 1646 /* Filesystem must explicitly define it's own method in order to use 1647 * quota reservation interface */ 1648 BUG_ON(!inode->i_sb->dq_op->get_reserved_space); 1649 return inode->i_sb->dq_op->get_reserved_space(inode); 1650 } 1651 1652 static qsize_t __inode_get_rsv_space(struct inode *inode) 1653 { 1654 if (!inode->i_sb->dq_op->get_reserved_space) 1655 return 0; 1656 return *inode_reserved_space(inode); 1657 } 1658 1659 static qsize_t inode_get_rsv_space(struct inode *inode) 1660 { 1661 qsize_t ret; 1662 1663 if (!inode->i_sb->dq_op->get_reserved_space) 1664 return 0; 1665 spin_lock(&inode->i_lock); 1666 ret = __inode_get_rsv_space(inode); 1667 spin_unlock(&inode->i_lock); 1668 return ret; 1669 } 1670 1671 /* 1672 * This functions updates i_blocks+i_bytes fields and quota information 1673 * (together with appropriate checks). 1674 * 1675 * NOTE: We absolutely rely on the fact that caller dirties the inode 1676 * (usually helpers in quotaops.h care about this) and holds a handle for 1677 * the current transaction so that dquot write and inode write go into the 1678 * same transaction. 1679 */ 1680 1681 /* 1682 * This operation can block, but only after everything is updated 1683 */ 1684 int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) 1685 { 1686 int cnt, ret = 0, index; 1687 struct dquot_warn warn[MAXQUOTAS]; 1688 int reserve = flags & DQUOT_SPACE_RESERVE; 1689 struct dquot __rcu **dquots; 1690 struct dquot *dquot; 1691 1692 if (!inode_quota_active(inode)) { 1693 if (reserve) { 1694 spin_lock(&inode->i_lock); 1695 *inode_reserved_space(inode) += number; 1696 spin_unlock(&inode->i_lock); 1697 } else { 1698 inode_add_bytes(inode, number); 1699 } 1700 goto out; 1701 } 1702 1703 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1704 warn[cnt].w_type = QUOTA_NL_NOWARN; 1705 1706 dquots = i_dquot(inode); 1707 index = srcu_read_lock(&dquot_srcu); 1708 spin_lock(&inode->i_lock); 1709 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1710 dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1711 if (!dquot) 1712 continue; 1713 if (reserve) { 1714 ret = dquot_add_space(dquot, 0, number, flags, &warn[cnt]); 1715 } else { 1716 ret = dquot_add_space(dquot, number, 0, flags, &warn[cnt]); 1717 } 1718 if (ret) { 1719 /* Back out changes we already did */ 1720 for (cnt--; cnt >= 0; cnt--) { 1721 dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1722 if (!dquot) 1723 continue; 1724 spin_lock(&dquot->dq_dqb_lock); 1725 if (reserve) 1726 dquot_free_reserved_space(dquot, number); 1727 else 1728 dquot_decr_space(dquot, number); 1729 spin_unlock(&dquot->dq_dqb_lock); 1730 } 1731 spin_unlock(&inode->i_lock); 1732 goto out_flush_warn; 1733 } 1734 } 1735 if (reserve) 1736 *inode_reserved_space(inode) += number; 1737 else 1738 __inode_add_bytes(inode, number); 1739 spin_unlock(&inode->i_lock); 1740 1741 if (reserve) 1742 goto out_flush_warn; 1743 ret = mark_all_dquot_dirty(dquots); 1744 out_flush_warn: 1745 srcu_read_unlock(&dquot_srcu, index); 1746 flush_warnings(warn); 1747 out: 1748 return ret; 1749 } 1750 EXPORT_SYMBOL(__dquot_alloc_space); 1751 1752 /* 1753 * This operation can block, but only after everything is updated 1754 */ 1755 int dquot_alloc_inode(struct inode *inode) 1756 { 1757 int cnt, ret = 0, index; 1758 struct dquot_warn warn[MAXQUOTAS]; 1759 struct dquot __rcu * const *dquots; 1760 struct dquot *dquot; 1761 1762 if (!inode_quota_active(inode)) 1763 return 0; 1764 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1765 warn[cnt].w_type = QUOTA_NL_NOWARN; 1766 1767 dquots = i_dquot(inode); 1768 index = srcu_read_lock(&dquot_srcu); 1769 spin_lock(&inode->i_lock); 1770 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1771 dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1772 if (!dquot) 1773 continue; 1774 ret = dquot_add_inodes(dquot, 1, &warn[cnt]); 1775 if (ret) { 1776 for (cnt--; cnt >= 0; cnt--) { 1777 dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1778 if (!dquot) 1779 continue; 1780 /* Back out changes we already did */ 1781 spin_lock(&dquot->dq_dqb_lock); 1782 dquot_decr_inodes(dquot, 1); 1783 spin_unlock(&dquot->dq_dqb_lock); 1784 } 1785 goto warn_put_all; 1786 } 1787 } 1788 1789 warn_put_all: 1790 spin_unlock(&inode->i_lock); 1791 if (ret == 0) 1792 ret = mark_all_dquot_dirty(dquots); 1793 srcu_read_unlock(&dquot_srcu, index); 1794 flush_warnings(warn); 1795 return ret; 1796 } 1797 EXPORT_SYMBOL(dquot_alloc_inode); 1798 1799 /* 1800 * Convert in-memory reserved quotas to real consumed quotas 1801 */ 1802 void dquot_claim_space_nodirty(struct inode *inode, qsize_t number) 1803 { 1804 struct dquot __rcu **dquots; 1805 struct dquot *dquot; 1806 int cnt, index; 1807 1808 if (!inode_quota_active(inode)) { 1809 spin_lock(&inode->i_lock); 1810 *inode_reserved_space(inode) -= number; 1811 __inode_add_bytes(inode, number); 1812 spin_unlock(&inode->i_lock); 1813 return; 1814 } 1815 1816 dquots = i_dquot(inode); 1817 index = srcu_read_lock(&dquot_srcu); 1818 spin_lock(&inode->i_lock); 1819 /* Claim reserved quotas to allocated quotas */ 1820 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1821 dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1822 if (dquot) { 1823 spin_lock(&dquot->dq_dqb_lock); 1824 if (WARN_ON_ONCE(dquot->dq_dqb.dqb_rsvspace < number)) 1825 number = dquot->dq_dqb.dqb_rsvspace; 1826 dquot->dq_dqb.dqb_curspace += number; 1827 dquot->dq_dqb.dqb_rsvspace -= number; 1828 spin_unlock(&dquot->dq_dqb_lock); 1829 } 1830 } 1831 /* Update inode bytes */ 1832 *inode_reserved_space(inode) -= number; 1833 __inode_add_bytes(inode, number); 1834 spin_unlock(&inode->i_lock); 1835 mark_all_dquot_dirty(dquots); 1836 srcu_read_unlock(&dquot_srcu, index); 1837 } 1838 EXPORT_SYMBOL(dquot_claim_space_nodirty); 1839 1840 /* 1841 * Convert allocated space back to in-memory reserved quotas 1842 */ 1843 void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number) 1844 { 1845 struct dquot __rcu **dquots; 1846 struct dquot *dquot; 1847 int cnt, index; 1848 1849 if (!inode_quota_active(inode)) { 1850 spin_lock(&inode->i_lock); 1851 *inode_reserved_space(inode) += number; 1852 __inode_sub_bytes(inode, number); 1853 spin_unlock(&inode->i_lock); 1854 return; 1855 } 1856 1857 dquots = i_dquot(inode); 1858 index = srcu_read_lock(&dquot_srcu); 1859 spin_lock(&inode->i_lock); 1860 /* Claim reserved quotas to allocated quotas */ 1861 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1862 dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1863 if (dquot) { 1864 spin_lock(&dquot->dq_dqb_lock); 1865 if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number)) 1866 number = dquot->dq_dqb.dqb_curspace; 1867 dquot->dq_dqb.dqb_rsvspace += number; 1868 dquot->dq_dqb.dqb_curspace -= number; 1869 spin_unlock(&dquot->dq_dqb_lock); 1870 } 1871 } 1872 /* Update inode bytes */ 1873 *inode_reserved_space(inode) += number; 1874 __inode_sub_bytes(inode, number); 1875 spin_unlock(&inode->i_lock); 1876 mark_all_dquot_dirty(dquots); 1877 srcu_read_unlock(&dquot_srcu, index); 1878 } 1879 EXPORT_SYMBOL(dquot_reclaim_space_nodirty); 1880 1881 /* 1882 * This operation can block, but only after everything is updated 1883 */ 1884 void __dquot_free_space(struct inode *inode, qsize_t number, int flags) 1885 { 1886 unsigned int cnt; 1887 struct dquot_warn warn[MAXQUOTAS]; 1888 struct dquot __rcu **dquots; 1889 struct dquot *dquot; 1890 int reserve = flags & DQUOT_SPACE_RESERVE, index; 1891 1892 if (!inode_quota_active(inode)) { 1893 if (reserve) { 1894 spin_lock(&inode->i_lock); 1895 *inode_reserved_space(inode) -= number; 1896 spin_unlock(&inode->i_lock); 1897 } else { 1898 inode_sub_bytes(inode, number); 1899 } 1900 return; 1901 } 1902 1903 dquots = i_dquot(inode); 1904 index = srcu_read_lock(&dquot_srcu); 1905 spin_lock(&inode->i_lock); 1906 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1907 int wtype; 1908 1909 warn[cnt].w_type = QUOTA_NL_NOWARN; 1910 dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1911 if (!dquot) 1912 continue; 1913 spin_lock(&dquot->dq_dqb_lock); 1914 wtype = info_bdq_free(dquot, number); 1915 if (wtype != QUOTA_NL_NOWARN) 1916 prepare_warning(&warn[cnt], dquot, wtype); 1917 if (reserve) 1918 dquot_free_reserved_space(dquot, number); 1919 else 1920 dquot_decr_space(dquot, number); 1921 spin_unlock(&dquot->dq_dqb_lock); 1922 } 1923 if (reserve) 1924 *inode_reserved_space(inode) -= number; 1925 else 1926 __inode_sub_bytes(inode, number); 1927 spin_unlock(&inode->i_lock); 1928 1929 if (reserve) 1930 goto out_unlock; 1931 mark_all_dquot_dirty(dquots); 1932 out_unlock: 1933 srcu_read_unlock(&dquot_srcu, index); 1934 flush_warnings(warn); 1935 } 1936 EXPORT_SYMBOL(__dquot_free_space); 1937 1938 /* 1939 * This operation can block, but only after everything is updated 1940 */ 1941 void dquot_free_inode(struct inode *inode) 1942 { 1943 unsigned int cnt; 1944 struct dquot_warn warn[MAXQUOTAS]; 1945 struct dquot __rcu * const *dquots; 1946 struct dquot *dquot; 1947 int index; 1948 1949 if (!inode_quota_active(inode)) 1950 return; 1951 1952 dquots = i_dquot(inode); 1953 index = srcu_read_lock(&dquot_srcu); 1954 spin_lock(&inode->i_lock); 1955 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1956 int wtype; 1957 warn[cnt].w_type = QUOTA_NL_NOWARN; 1958 dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1959 if (!dquot) 1960 continue; 1961 spin_lock(&dquot->dq_dqb_lock); 1962 wtype = info_idq_free(dquot, 1); 1963 if (wtype != QUOTA_NL_NOWARN) 1964 prepare_warning(&warn[cnt], dquot, wtype); 1965 dquot_decr_inodes(dquot, 1); 1966 spin_unlock(&dquot->dq_dqb_lock); 1967 } 1968 spin_unlock(&inode->i_lock); 1969 mark_all_dquot_dirty(dquots); 1970 srcu_read_unlock(&dquot_srcu, index); 1971 flush_warnings(warn); 1972 } 1973 EXPORT_SYMBOL(dquot_free_inode); 1974 1975 /* 1976 * Transfer the number of inode and blocks from one diskquota to an other. 1977 * On success, dquot references in transfer_to are consumed and references 1978 * to original dquots that need to be released are placed there. On failure, 1979 * references are kept untouched. 1980 * 1981 * This operation can block, but only after everything is updated 1982 * A transaction must be started when entering this function. 1983 * 1984 * We are holding reference on transfer_from & transfer_to, no need to 1985 * protect them by srcu_read_lock(). 1986 */ 1987 int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) 1988 { 1989 qsize_t cur_space; 1990 qsize_t rsv_space = 0; 1991 qsize_t inode_usage = 1; 1992 struct dquot __rcu **dquots; 1993 struct dquot *transfer_from[MAXQUOTAS] = {}; 1994 int cnt, index, ret = 0, err; 1995 char is_valid[MAXQUOTAS] = {}; 1996 struct dquot_warn warn_to[MAXQUOTAS]; 1997 struct dquot_warn warn_from_inodes[MAXQUOTAS]; 1998 struct dquot_warn warn_from_space[MAXQUOTAS]; 1999 2000 if (IS_NOQUOTA(inode)) 2001 return 0; 2002 2003 if (inode->i_sb->dq_op->get_inode_usage) { 2004 ret = inode->i_sb->dq_op->get_inode_usage(inode, &inode_usage); 2005 if (ret) 2006 return ret; 2007 } 2008 2009 /* Initialize the arrays */ 2010 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 2011 warn_to[cnt].w_type = QUOTA_NL_NOWARN; 2012 warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN; 2013 warn_from_space[cnt].w_type = QUOTA_NL_NOWARN; 2014 } 2015 2016 spin_lock(&dq_data_lock); 2017 spin_lock(&inode->i_lock); 2018 if (IS_NOQUOTA(inode)) { /* File without quota accounting? */ 2019 spin_unlock(&inode->i_lock); 2020 spin_unlock(&dq_data_lock); 2021 return 0; 2022 } 2023 cur_space = __inode_get_bytes(inode); 2024 rsv_space = __inode_get_rsv_space(inode); 2025 dquots = i_dquot(inode); 2026 /* 2027 * Build the transfer_from list, check limits, and update usage in 2028 * the target structures. 2029 */ 2030 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 2031 /* 2032 * Skip changes for same uid or gid or for turned off quota-type. 2033 */ 2034 if (!transfer_to[cnt]) 2035 continue; 2036 /* Avoid races with quotaoff() */ 2037 if (!sb_has_quota_active(inode->i_sb, cnt)) 2038 continue; 2039 is_valid[cnt] = 1; 2040 transfer_from[cnt] = srcu_dereference_check(dquots[cnt], 2041 &dquot_srcu, lockdep_is_held(&dq_data_lock)); 2042 ret = dquot_add_inodes(transfer_to[cnt], inode_usage, 2043 &warn_to[cnt]); 2044 if (ret) 2045 goto over_quota; 2046 ret = dquot_add_space(transfer_to[cnt], cur_space, rsv_space, 2047 DQUOT_SPACE_WARN, &warn_to[cnt]); 2048 if (ret) { 2049 spin_lock(&transfer_to[cnt]->dq_dqb_lock); 2050 dquot_decr_inodes(transfer_to[cnt], inode_usage); 2051 spin_unlock(&transfer_to[cnt]->dq_dqb_lock); 2052 goto over_quota; 2053 } 2054 } 2055 2056 /* Decrease usage for source structures and update quota pointers */ 2057 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 2058 if (!is_valid[cnt]) 2059 continue; 2060 /* Due to IO error we might not have transfer_from[] structure */ 2061 if (transfer_from[cnt]) { 2062 int wtype; 2063 2064 spin_lock(&transfer_from[cnt]->dq_dqb_lock); 2065 wtype = info_idq_free(transfer_from[cnt], inode_usage); 2066 if (wtype != QUOTA_NL_NOWARN) 2067 prepare_warning(&warn_from_inodes[cnt], 2068 transfer_from[cnt], wtype); 2069 wtype = info_bdq_free(transfer_from[cnt], 2070 cur_space + rsv_space); 2071 if (wtype != QUOTA_NL_NOWARN) 2072 prepare_warning(&warn_from_space[cnt], 2073 transfer_from[cnt], wtype); 2074 dquot_decr_inodes(transfer_from[cnt], inode_usage); 2075 dquot_decr_space(transfer_from[cnt], cur_space); 2076 dquot_free_reserved_space(transfer_from[cnt], 2077 rsv_space); 2078 spin_unlock(&transfer_from[cnt]->dq_dqb_lock); 2079 } 2080 rcu_assign_pointer(dquots[cnt], transfer_to[cnt]); 2081 } 2082 spin_unlock(&inode->i_lock); 2083 spin_unlock(&dq_data_lock); 2084 2085 /* 2086 * These arrays are local and we hold dquot references so we don't need 2087 * the srcu protection but still take dquot_srcu to avoid warning in 2088 * mark_all_dquot_dirty(). 2089 */ 2090 index = srcu_read_lock(&dquot_srcu); 2091 err = mark_all_dquot_dirty((struct dquot __rcu **)transfer_from); 2092 if (err < 0) 2093 ret = err; 2094 err = mark_all_dquot_dirty((struct dquot __rcu **)transfer_to); 2095 if (err < 0) 2096 ret = err; 2097 srcu_read_unlock(&dquot_srcu, index); 2098 2099 flush_warnings(warn_to); 2100 flush_warnings(warn_from_inodes); 2101 flush_warnings(warn_from_space); 2102 /* Pass back references to put */ 2103 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 2104 if (is_valid[cnt]) 2105 transfer_to[cnt] = transfer_from[cnt]; 2106 return ret; 2107 over_quota: 2108 /* Back out changes we already did */ 2109 for (cnt--; cnt >= 0; cnt--) { 2110 if (!is_valid[cnt]) 2111 continue; 2112 spin_lock(&transfer_to[cnt]->dq_dqb_lock); 2113 dquot_decr_inodes(transfer_to[cnt], inode_usage); 2114 dquot_decr_space(transfer_to[cnt], cur_space); 2115 dquot_free_reserved_space(transfer_to[cnt], rsv_space); 2116 spin_unlock(&transfer_to[cnt]->dq_dqb_lock); 2117 } 2118 spin_unlock(&inode->i_lock); 2119 spin_unlock(&dq_data_lock); 2120 flush_warnings(warn_to); 2121 return ret; 2122 } 2123 EXPORT_SYMBOL(__dquot_transfer); 2124 2125 /* Wrapper for transferring ownership of an inode for uid/gid only 2126 * Called from FSXXX_setattr() 2127 */ 2128 int dquot_transfer(struct mnt_idmap *idmap, struct inode *inode, 2129 struct iattr *iattr) 2130 { 2131 struct dquot *transfer_to[MAXQUOTAS] = {}; 2132 struct dquot *dquot; 2133 struct super_block *sb = inode->i_sb; 2134 int ret; 2135 2136 if (!inode_quota_active(inode)) 2137 return 0; 2138 2139 if (i_uid_needs_update(idmap, iattr, inode)) { 2140 kuid_t kuid = from_vfsuid(idmap, i_user_ns(inode), 2141 iattr->ia_vfsuid); 2142 2143 dquot = dqget(sb, make_kqid_uid(kuid)); 2144 if (IS_ERR(dquot)) { 2145 if (PTR_ERR(dquot) != -ESRCH) { 2146 ret = PTR_ERR(dquot); 2147 goto out_put; 2148 } 2149 dquot = NULL; 2150 } 2151 transfer_to[USRQUOTA] = dquot; 2152 } 2153 if (i_gid_needs_update(idmap, iattr, inode)) { 2154 kgid_t kgid = from_vfsgid(idmap, i_user_ns(inode), 2155 iattr->ia_vfsgid); 2156 2157 dquot = dqget(sb, make_kqid_gid(kgid)); 2158 if (IS_ERR(dquot)) { 2159 if (PTR_ERR(dquot) != -ESRCH) { 2160 ret = PTR_ERR(dquot); 2161 goto out_put; 2162 } 2163 dquot = NULL; 2164 } 2165 transfer_to[GRPQUOTA] = dquot; 2166 } 2167 ret = __dquot_transfer(inode, transfer_to); 2168 out_put: 2169 dqput_all(transfer_to); 2170 return ret; 2171 } 2172 EXPORT_SYMBOL(dquot_transfer); 2173 2174 /* 2175 * Write info of quota file to disk 2176 */ 2177 int dquot_commit_info(struct super_block *sb, int type) 2178 { 2179 struct quota_info *dqopt = sb_dqopt(sb); 2180 2181 return dqopt->ops[type]->write_file_info(sb, type); 2182 } 2183 EXPORT_SYMBOL(dquot_commit_info); 2184 2185 int dquot_get_next_id(struct super_block *sb, struct kqid *qid) 2186 { 2187 struct quota_info *dqopt = sb_dqopt(sb); 2188 2189 if (!sb_has_quota_active(sb, qid->type)) 2190 return -ESRCH; 2191 if (!dqopt->ops[qid->type]->get_next_id) 2192 return -ENOSYS; 2193 return dqopt->ops[qid->type]->get_next_id(sb, qid); 2194 } 2195 EXPORT_SYMBOL(dquot_get_next_id); 2196 2197 /* 2198 * Definitions of diskquota operations. 2199 */ 2200 const struct dquot_operations dquot_operations = { 2201 .write_dquot = dquot_commit, 2202 .acquire_dquot = dquot_acquire, 2203 .release_dquot = dquot_release, 2204 .mark_dirty = dquot_mark_dquot_dirty, 2205 .write_info = dquot_commit_info, 2206 .alloc_dquot = dquot_alloc, 2207 .destroy_dquot = dquot_destroy, 2208 .get_next_id = dquot_get_next_id, 2209 }; 2210 EXPORT_SYMBOL(dquot_operations); 2211 2212 /* 2213 * Generic helper for ->open on filesystems supporting disk quotas. 2214 */ 2215 int dquot_file_open(struct inode *inode, struct file *file) 2216 { 2217 int error; 2218 2219 error = generic_file_open(inode, file); 2220 if (!error && (file->f_mode & FMODE_WRITE)) 2221 error = dquot_initialize(inode); 2222 return error; 2223 } 2224 EXPORT_SYMBOL(dquot_file_open); 2225 2226 static void vfs_cleanup_quota_inode(struct super_block *sb, int type) 2227 { 2228 struct quota_info *dqopt = sb_dqopt(sb); 2229 struct inode *inode = dqopt->files[type]; 2230 2231 if (!inode) 2232 return; 2233 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { 2234 inode_lock(inode); 2235 inode->i_flags &= ~S_NOQUOTA; 2236 inode_unlock(inode); 2237 } 2238 dqopt->files[type] = NULL; 2239 iput(inode); 2240 } 2241 2242 /* 2243 * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount) 2244 */ 2245 int dquot_disable(struct super_block *sb, int type, unsigned int flags) 2246 { 2247 int cnt; 2248 struct quota_info *dqopt = sb_dqopt(sb); 2249 2250 rwsem_assert_held_write(&sb->s_umount); 2251 2252 /* Cannot turn off usage accounting without turning off limits, or 2253 * suspend quotas and simultaneously turn quotas off. */ 2254 if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED)) 2255 || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED | 2256 DQUOT_USAGE_ENABLED))) 2257 return -EINVAL; 2258 2259 /* 2260 * Skip everything if there's nothing to do. We have to do this because 2261 * sometimes we are called when fill_super() failed and calling 2262 * sync_fs() in such cases does no good. 2263 */ 2264 if (!sb_any_quota_loaded(sb)) 2265 return 0; 2266 2267 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 2268 if (type != -1 && cnt != type) 2269 continue; 2270 if (!sb_has_quota_loaded(sb, cnt)) 2271 continue; 2272 2273 if (flags & DQUOT_SUSPENDED) { 2274 spin_lock(&dq_state_lock); 2275 dqopt->flags |= 2276 dquot_state_flag(DQUOT_SUSPENDED, cnt); 2277 spin_unlock(&dq_state_lock); 2278 } else { 2279 spin_lock(&dq_state_lock); 2280 dqopt->flags &= ~dquot_state_flag(flags, cnt); 2281 /* Turning off suspended quotas? */ 2282 if (!sb_has_quota_loaded(sb, cnt) && 2283 sb_has_quota_suspended(sb, cnt)) { 2284 dqopt->flags &= ~dquot_state_flag( 2285 DQUOT_SUSPENDED, cnt); 2286 spin_unlock(&dq_state_lock); 2287 vfs_cleanup_quota_inode(sb, cnt); 2288 continue; 2289 } 2290 spin_unlock(&dq_state_lock); 2291 } 2292 2293 /* We still have to keep quota loaded? */ 2294 if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED)) 2295 continue; 2296 2297 /* Note: these are blocking operations */ 2298 drop_dquot_ref(sb, cnt); 2299 invalidate_dquots(sb, cnt); 2300 /* 2301 * Now all dquots should be invalidated, all writes done so we 2302 * should be only users of the info. No locks needed. 2303 */ 2304 if (info_dirty(&dqopt->info[cnt])) 2305 sb->dq_op->write_info(sb, cnt); 2306 if (dqopt->ops[cnt]->free_file_info) 2307 dqopt->ops[cnt]->free_file_info(sb, cnt); 2308 put_quota_format(dqopt->info[cnt].dqi_format); 2309 dqopt->info[cnt].dqi_flags = 0; 2310 dqopt->info[cnt].dqi_igrace = 0; 2311 dqopt->info[cnt].dqi_bgrace = 0; 2312 dqopt->ops[cnt] = NULL; 2313 } 2314 2315 /* Skip syncing and setting flags if quota files are hidden */ 2316 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) 2317 goto put_inodes; 2318 2319 /* Sync the superblock so that buffers with quota data are written to 2320 * disk (and so userspace sees correct data afterwards). */ 2321 if (sb->s_op->sync_fs) 2322 sb->s_op->sync_fs(sb, 1); 2323 sync_blockdev(sb->s_bdev); 2324 /* Now the quota files are just ordinary files and we can set the 2325 * inode flags back. Moreover we discard the pagecache so that 2326 * userspace sees the writes we did bypassing the pagecache. We 2327 * must also discard the blockdev buffers so that we see the 2328 * changes done by userspace on the next quotaon() */ 2329 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 2330 if (!sb_has_quota_loaded(sb, cnt) && dqopt->files[cnt]) { 2331 inode_lock(dqopt->files[cnt]); 2332 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0); 2333 inode_unlock(dqopt->files[cnt]); 2334 } 2335 if (sb->s_bdev) 2336 invalidate_bdev(sb->s_bdev); 2337 put_inodes: 2338 /* We are done when suspending quotas */ 2339 if (flags & DQUOT_SUSPENDED) 2340 return 0; 2341 2342 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 2343 if (!sb_has_quota_loaded(sb, cnt)) 2344 vfs_cleanup_quota_inode(sb, cnt); 2345 return 0; 2346 } 2347 EXPORT_SYMBOL(dquot_disable); 2348 2349 int dquot_quota_off(struct super_block *sb, int type) 2350 { 2351 return dquot_disable(sb, type, 2352 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); 2353 } 2354 EXPORT_SYMBOL(dquot_quota_off); 2355 2356 /* 2357 * Turn quotas on on a device 2358 */ 2359 2360 static int vfs_setup_quota_inode(struct inode *inode, int type) 2361 { 2362 struct super_block *sb = inode->i_sb; 2363 struct quota_info *dqopt = sb_dqopt(sb); 2364 2365 if (is_bad_inode(inode)) 2366 return -EUCLEAN; 2367 if (!S_ISREG(inode->i_mode)) 2368 return -EACCES; 2369 if (IS_RDONLY(inode)) 2370 return -EROFS; 2371 if (sb_has_quota_loaded(sb, type)) 2372 return -EBUSY; 2373 2374 /* 2375 * Quota files should never be encrypted. They should be thought of as 2376 * filesystem metadata, not user data. New-style internal quota files 2377 * cannot be encrypted by users anyway, but old-style external quota 2378 * files could potentially be incorrectly created in an encrypted 2379 * directory, hence this explicit check. Some reasons why encrypted 2380 * quota files don't work include: (1) some filesystems that support 2381 * encryption don't handle it in their quota_read and quota_write, and 2382 * (2) cleaning up encrypted quota files at unmount would need special 2383 * consideration, as quota files are cleaned up later than user files. 2384 */ 2385 if (IS_ENCRYPTED(inode)) 2386 return -EINVAL; 2387 2388 dqopt->files[type] = igrab(inode); 2389 if (!dqopt->files[type]) 2390 return -EIO; 2391 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { 2392 /* We don't want quota and atime on quota files (deadlocks 2393 * possible) Also nobody should write to the file - we use 2394 * special IO operations which ignore the immutable bit. */ 2395 inode_lock(inode); 2396 inode->i_flags |= S_NOQUOTA; 2397 inode_unlock(inode); 2398 /* 2399 * When S_NOQUOTA is set, remove dquot references as no more 2400 * references can be added 2401 */ 2402 __dquot_drop(inode); 2403 } 2404 return 0; 2405 } 2406 2407 int dquot_load_quota_sb(struct super_block *sb, int type, int format_id, 2408 unsigned int flags) 2409 { 2410 struct quota_format_type *fmt; 2411 struct quota_info *dqopt = sb_dqopt(sb); 2412 int error; 2413 2414 lockdep_assert_held_write(&sb->s_umount); 2415 2416 /* Just unsuspend quotas? */ 2417 if (WARN_ON_ONCE(flags & DQUOT_SUSPENDED)) 2418 return -EINVAL; 2419 2420 fmt = find_quota_format(format_id); 2421 if (!fmt) 2422 return -ESRCH; 2423 if (!sb->dq_op || !sb->s_qcop || 2424 (type == PRJQUOTA && sb->dq_op->get_projid == NULL)) { 2425 error = -EINVAL; 2426 goto out_fmt; 2427 } 2428 /* Filesystems outside of init_user_ns not yet supported */ 2429 if (sb->s_user_ns != &init_user_ns) { 2430 error = -EINVAL; 2431 goto out_fmt; 2432 } 2433 /* Usage always has to be set... */ 2434 if (!(flags & DQUOT_USAGE_ENABLED)) { 2435 error = -EINVAL; 2436 goto out_fmt; 2437 } 2438 if (sb_has_quota_loaded(sb, type)) { 2439 error = -EBUSY; 2440 goto out_fmt; 2441 } 2442 2443 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { 2444 /* As we bypass the pagecache we must now flush all the 2445 * dirty data and invalidate caches so that kernel sees 2446 * changes from userspace. It is not enough to just flush 2447 * the quota file since if blocksize < pagesize, invalidation 2448 * of the cache could fail because of other unrelated dirty 2449 * data */ 2450 sync_filesystem(sb); 2451 invalidate_bdev(sb->s_bdev); 2452 } 2453 2454 error = -EINVAL; 2455 if (!fmt->qf_ops->check_quota_file(sb, type)) 2456 goto out_fmt; 2457 2458 dqopt->ops[type] = fmt->qf_ops; 2459 dqopt->info[type].dqi_format = fmt; 2460 dqopt->info[type].dqi_fmt_id = format_id; 2461 INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list); 2462 error = dqopt->ops[type]->read_file_info(sb, type); 2463 if (error < 0) 2464 goto out_fmt; 2465 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) { 2466 spin_lock(&dq_data_lock); 2467 dqopt->info[type].dqi_flags |= DQF_SYS_FILE; 2468 spin_unlock(&dq_data_lock); 2469 } 2470 spin_lock(&dq_state_lock); 2471 dqopt->flags |= dquot_state_flag(flags, type); 2472 spin_unlock(&dq_state_lock); 2473 2474 error = add_dquot_ref(sb, type); 2475 if (error) 2476 dquot_disable(sb, type, 2477 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); 2478 2479 return error; 2480 out_fmt: 2481 put_quota_format(fmt); 2482 2483 return error; 2484 } 2485 EXPORT_SYMBOL(dquot_load_quota_sb); 2486 2487 /* 2488 * More powerful function for turning on quotas on given quota inode allowing 2489 * setting of individual quota flags 2490 */ 2491 int dquot_load_quota_inode(struct inode *inode, int type, int format_id, 2492 unsigned int flags) 2493 { 2494 int err; 2495 2496 err = vfs_setup_quota_inode(inode, type); 2497 if (err < 0) 2498 return err; 2499 err = dquot_load_quota_sb(inode->i_sb, type, format_id, flags); 2500 if (err < 0) 2501 vfs_cleanup_quota_inode(inode->i_sb, type); 2502 return err; 2503 } 2504 EXPORT_SYMBOL(dquot_load_quota_inode); 2505 2506 /* Reenable quotas on remount RW */ 2507 int dquot_resume(struct super_block *sb, int type) 2508 { 2509 struct quota_info *dqopt = sb_dqopt(sb); 2510 int ret = 0, cnt; 2511 unsigned int flags; 2512 2513 rwsem_assert_held_write(&sb->s_umount); 2514 2515 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 2516 if (type != -1 && cnt != type) 2517 continue; 2518 if (!sb_has_quota_suspended(sb, cnt)) 2519 continue; 2520 2521 spin_lock(&dq_state_lock); 2522 flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED | 2523 DQUOT_LIMITS_ENABLED, 2524 cnt); 2525 dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt); 2526 spin_unlock(&dq_state_lock); 2527 2528 flags = dquot_generic_flag(flags, cnt); 2529 ret = dquot_load_quota_sb(sb, cnt, dqopt->info[cnt].dqi_fmt_id, 2530 flags); 2531 if (ret < 0) 2532 vfs_cleanup_quota_inode(sb, cnt); 2533 } 2534 2535 return ret; 2536 } 2537 EXPORT_SYMBOL(dquot_resume); 2538 2539 int dquot_quota_on(struct super_block *sb, int type, int format_id, 2540 const struct path *path) 2541 { 2542 int error = security_quota_on(path->dentry); 2543 if (error) 2544 return error; 2545 /* Quota file not on the same filesystem? */ 2546 if (path->dentry->d_sb != sb) 2547 error = -EXDEV; 2548 else 2549 error = dquot_load_quota_inode(d_inode(path->dentry), type, 2550 format_id, DQUOT_USAGE_ENABLED | 2551 DQUOT_LIMITS_ENABLED); 2552 return error; 2553 } 2554 EXPORT_SYMBOL(dquot_quota_on); 2555 2556 /* 2557 * This function is used when filesystem needs to initialize quotas 2558 * during mount time. 2559 */ 2560 int dquot_quota_on_mount(struct super_block *sb, char *qf_name, 2561 int format_id, int type) 2562 { 2563 struct dentry *dentry; 2564 int error; 2565 2566 dentry = lookup_noperm_positive_unlocked(&QSTR(qf_name), sb->s_root); 2567 if (IS_ERR(dentry)) 2568 return PTR_ERR(dentry); 2569 2570 error = security_quota_on(dentry); 2571 if (!error) 2572 error = dquot_load_quota_inode(d_inode(dentry), type, format_id, 2573 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); 2574 2575 dput(dentry); 2576 return error; 2577 } 2578 EXPORT_SYMBOL(dquot_quota_on_mount); 2579 2580 static int dquot_quota_enable(struct super_block *sb, unsigned int flags) 2581 { 2582 int ret; 2583 int type; 2584 struct quota_info *dqopt = sb_dqopt(sb); 2585 2586 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) 2587 return -ENOSYS; 2588 /* Accounting cannot be turned on while fs is mounted */ 2589 flags &= ~(FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT); 2590 if (!flags) 2591 return -EINVAL; 2592 for (type = 0; type < MAXQUOTAS; type++) { 2593 if (!(flags & qtype_enforce_flag(type))) 2594 continue; 2595 /* Can't enforce without accounting */ 2596 if (!sb_has_quota_usage_enabled(sb, type)) { 2597 ret = -EINVAL; 2598 goto out_err; 2599 } 2600 if (sb_has_quota_limits_enabled(sb, type)) { 2601 /* compatible with XFS */ 2602 ret = -EEXIST; 2603 goto out_err; 2604 } 2605 spin_lock(&dq_state_lock); 2606 dqopt->flags |= dquot_state_flag(DQUOT_LIMITS_ENABLED, type); 2607 spin_unlock(&dq_state_lock); 2608 } 2609 return 0; 2610 out_err: 2611 /* Backout enforcement enablement we already did */ 2612 for (type--; type >= 0; type--) { 2613 if (flags & qtype_enforce_flag(type)) 2614 dquot_disable(sb, type, DQUOT_LIMITS_ENABLED); 2615 } 2616 return ret; 2617 } 2618 2619 static int dquot_quota_disable(struct super_block *sb, unsigned int flags) 2620 { 2621 int ret; 2622 int type; 2623 struct quota_info *dqopt = sb_dqopt(sb); 2624 2625 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) 2626 return -ENOSYS; 2627 /* 2628 * We don't support turning off accounting via quotactl. In principle 2629 * quota infrastructure can do this but filesystems don't expect 2630 * userspace to be able to do it. 2631 */ 2632 if (flags & 2633 (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT)) 2634 return -EOPNOTSUPP; 2635 2636 /* Filter out limits not enabled */ 2637 for (type = 0; type < MAXQUOTAS; type++) 2638 if (!sb_has_quota_limits_enabled(sb, type)) 2639 flags &= ~qtype_enforce_flag(type); 2640 /* Nothing left? */ 2641 if (!flags) 2642 return -EEXIST; 2643 for (type = 0; type < MAXQUOTAS; type++) { 2644 if (flags & qtype_enforce_flag(type)) { 2645 ret = dquot_disable(sb, type, DQUOT_LIMITS_ENABLED); 2646 if (ret < 0) 2647 goto out_err; 2648 } 2649 } 2650 return 0; 2651 out_err: 2652 /* Backout enforcement disabling we already did */ 2653 for (type--; type >= 0; type--) { 2654 if (flags & qtype_enforce_flag(type)) { 2655 spin_lock(&dq_state_lock); 2656 dqopt->flags |= 2657 dquot_state_flag(DQUOT_LIMITS_ENABLED, type); 2658 spin_unlock(&dq_state_lock); 2659 } 2660 } 2661 return ret; 2662 } 2663 2664 /* Generic routine for getting common part of quota structure */ 2665 static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di) 2666 { 2667 struct mem_dqblk *dm = &dquot->dq_dqb; 2668 2669 memset(di, 0, sizeof(*di)); 2670 spin_lock(&dquot->dq_dqb_lock); 2671 di->d_spc_hardlimit = dm->dqb_bhardlimit; 2672 di->d_spc_softlimit = dm->dqb_bsoftlimit; 2673 di->d_ino_hardlimit = dm->dqb_ihardlimit; 2674 di->d_ino_softlimit = dm->dqb_isoftlimit; 2675 di->d_space = dm->dqb_curspace + dm->dqb_rsvspace; 2676 di->d_ino_count = dm->dqb_curinodes; 2677 di->d_spc_timer = dm->dqb_btime; 2678 di->d_ino_timer = dm->dqb_itime; 2679 spin_unlock(&dquot->dq_dqb_lock); 2680 } 2681 2682 int dquot_get_dqblk(struct super_block *sb, struct kqid qid, 2683 struct qc_dqblk *di) 2684 { 2685 struct dquot *dquot; 2686 2687 dquot = dqget(sb, qid); 2688 if (IS_ERR(dquot)) 2689 return PTR_ERR(dquot); 2690 do_get_dqblk(dquot, di); 2691 dqput(dquot); 2692 2693 return 0; 2694 } 2695 EXPORT_SYMBOL(dquot_get_dqblk); 2696 2697 int dquot_get_next_dqblk(struct super_block *sb, struct kqid *qid, 2698 struct qc_dqblk *di) 2699 { 2700 struct dquot *dquot; 2701 int err; 2702 2703 if (!sb->dq_op->get_next_id) 2704 return -ENOSYS; 2705 err = sb->dq_op->get_next_id(sb, qid); 2706 if (err < 0) 2707 return err; 2708 dquot = dqget(sb, *qid); 2709 if (IS_ERR(dquot)) 2710 return PTR_ERR(dquot); 2711 do_get_dqblk(dquot, di); 2712 dqput(dquot); 2713 2714 return 0; 2715 } 2716 EXPORT_SYMBOL(dquot_get_next_dqblk); 2717 2718 #define VFS_QC_MASK \ 2719 (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \ 2720 QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \ 2721 QC_SPC_TIMER | QC_INO_TIMER) 2722 2723 /* Generic routine for setting common part of quota structure */ 2724 static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di) 2725 { 2726 struct mem_dqblk *dm = &dquot->dq_dqb; 2727 int check_blim = 0, check_ilim = 0; 2728 struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; 2729 int ret; 2730 2731 if (di->d_fieldmask & ~VFS_QC_MASK) 2732 return -EINVAL; 2733 2734 if (((di->d_fieldmask & QC_SPC_SOFT) && 2735 di->d_spc_softlimit > dqi->dqi_max_spc_limit) || 2736 ((di->d_fieldmask & QC_SPC_HARD) && 2737 di->d_spc_hardlimit > dqi->dqi_max_spc_limit) || 2738 ((di->d_fieldmask & QC_INO_SOFT) && 2739 (di->d_ino_softlimit > dqi->dqi_max_ino_limit)) || 2740 ((di->d_fieldmask & QC_INO_HARD) && 2741 (di->d_ino_hardlimit > dqi->dqi_max_ino_limit))) 2742 return -ERANGE; 2743 2744 spin_lock(&dquot->dq_dqb_lock); 2745 if (di->d_fieldmask & QC_SPACE) { 2746 dm->dqb_curspace = di->d_space - dm->dqb_rsvspace; 2747 check_blim = 1; 2748 set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); 2749 } 2750 2751 if (di->d_fieldmask & QC_SPC_SOFT) 2752 dm->dqb_bsoftlimit = di->d_spc_softlimit; 2753 if (di->d_fieldmask & QC_SPC_HARD) 2754 dm->dqb_bhardlimit = di->d_spc_hardlimit; 2755 if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) { 2756 check_blim = 1; 2757 set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); 2758 } 2759 2760 if (di->d_fieldmask & QC_INO_COUNT) { 2761 dm->dqb_curinodes = di->d_ino_count; 2762 check_ilim = 1; 2763 set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); 2764 } 2765 2766 if (di->d_fieldmask & QC_INO_SOFT) 2767 dm->dqb_isoftlimit = di->d_ino_softlimit; 2768 if (di->d_fieldmask & QC_INO_HARD) 2769 dm->dqb_ihardlimit = di->d_ino_hardlimit; 2770 if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) { 2771 check_ilim = 1; 2772 set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); 2773 } 2774 2775 if (di->d_fieldmask & QC_SPC_TIMER) { 2776 dm->dqb_btime = di->d_spc_timer; 2777 check_blim = 1; 2778 set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); 2779 } 2780 2781 if (di->d_fieldmask & QC_INO_TIMER) { 2782 dm->dqb_itime = di->d_ino_timer; 2783 check_ilim = 1; 2784 set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); 2785 } 2786 2787 if (check_blim) { 2788 if (!dm->dqb_bsoftlimit || 2789 dm->dqb_curspace + dm->dqb_rsvspace <= dm->dqb_bsoftlimit) { 2790 dm->dqb_btime = 0; 2791 clear_bit(DQ_BLKS_B, &dquot->dq_flags); 2792 } else if (!(di->d_fieldmask & QC_SPC_TIMER)) 2793 /* Set grace only if user hasn't provided his own... */ 2794 dm->dqb_btime = ktime_get_real_seconds() + dqi->dqi_bgrace; 2795 } 2796 if (check_ilim) { 2797 if (!dm->dqb_isoftlimit || 2798 dm->dqb_curinodes <= dm->dqb_isoftlimit) { 2799 dm->dqb_itime = 0; 2800 clear_bit(DQ_INODES_B, &dquot->dq_flags); 2801 } else if (!(di->d_fieldmask & QC_INO_TIMER)) 2802 /* Set grace only if user hasn't provided his own... */ 2803 dm->dqb_itime = ktime_get_real_seconds() + dqi->dqi_igrace; 2804 } 2805 if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || 2806 dm->dqb_isoftlimit) 2807 clear_bit(DQ_FAKE_B, &dquot->dq_flags); 2808 else 2809 set_bit(DQ_FAKE_B, &dquot->dq_flags); 2810 spin_unlock(&dquot->dq_dqb_lock); 2811 ret = mark_dquot_dirty(dquot); 2812 if (ret < 0) 2813 return ret; 2814 return 0; 2815 } 2816 2817 int dquot_set_dqblk(struct super_block *sb, struct kqid qid, 2818 struct qc_dqblk *di) 2819 { 2820 struct dquot *dquot; 2821 int rc; 2822 2823 dquot = dqget(sb, qid); 2824 if (IS_ERR(dquot)) { 2825 rc = PTR_ERR(dquot); 2826 goto out; 2827 } 2828 rc = do_set_dqblk(dquot, di); 2829 dqput(dquot); 2830 out: 2831 return rc; 2832 } 2833 EXPORT_SYMBOL(dquot_set_dqblk); 2834 2835 /* Generic routine for getting common part of quota file information */ 2836 int dquot_get_state(struct super_block *sb, struct qc_state *state) 2837 { 2838 struct mem_dqinfo *mi; 2839 struct qc_type_state *tstate; 2840 struct quota_info *dqopt = sb_dqopt(sb); 2841 int type; 2842 2843 memset(state, 0, sizeof(*state)); 2844 for (type = 0; type < MAXQUOTAS; type++) { 2845 if (!sb_has_quota_active(sb, type)) 2846 continue; 2847 tstate = state->s_state + type; 2848 mi = sb_dqopt(sb)->info + type; 2849 tstate->flags = QCI_ACCT_ENABLED; 2850 spin_lock(&dq_data_lock); 2851 if (mi->dqi_flags & DQF_SYS_FILE) 2852 tstate->flags |= QCI_SYSFILE; 2853 if (mi->dqi_flags & DQF_ROOT_SQUASH) 2854 tstate->flags |= QCI_ROOT_SQUASH; 2855 if (sb_has_quota_limits_enabled(sb, type)) 2856 tstate->flags |= QCI_LIMITS_ENFORCED; 2857 tstate->spc_timelimit = mi->dqi_bgrace; 2858 tstate->ino_timelimit = mi->dqi_igrace; 2859 if (dqopt->files[type]) { 2860 tstate->ino = dqopt->files[type]->i_ino; 2861 tstate->blocks = dqopt->files[type]->i_blocks; 2862 } 2863 tstate->nextents = 1; /* We don't know... */ 2864 spin_unlock(&dq_data_lock); 2865 } 2866 return 0; 2867 } 2868 EXPORT_SYMBOL(dquot_get_state); 2869 2870 /* Generic routine for setting common part of quota file information */ 2871 int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii) 2872 { 2873 struct mem_dqinfo *mi; 2874 2875 if ((ii->i_fieldmask & QC_WARNS_MASK) || 2876 (ii->i_fieldmask & QC_RT_SPC_TIMER)) 2877 return -EINVAL; 2878 if (!sb_has_quota_active(sb, type)) 2879 return -ESRCH; 2880 mi = sb_dqopt(sb)->info + type; 2881 if (ii->i_fieldmask & QC_FLAGS) { 2882 if ((ii->i_flags & QCI_ROOT_SQUASH && 2883 mi->dqi_format->qf_fmt_id != QFMT_VFS_OLD)) 2884 return -EINVAL; 2885 } 2886 spin_lock(&dq_data_lock); 2887 if (ii->i_fieldmask & QC_SPC_TIMER) 2888 mi->dqi_bgrace = ii->i_spc_timelimit; 2889 if (ii->i_fieldmask & QC_INO_TIMER) 2890 mi->dqi_igrace = ii->i_ino_timelimit; 2891 if (ii->i_fieldmask & QC_FLAGS) { 2892 if (ii->i_flags & QCI_ROOT_SQUASH) 2893 mi->dqi_flags |= DQF_ROOT_SQUASH; 2894 else 2895 mi->dqi_flags &= ~DQF_ROOT_SQUASH; 2896 } 2897 spin_unlock(&dq_data_lock); 2898 mark_info_dirty(sb, type); 2899 /* Force write to disk */ 2900 return sb->dq_op->write_info(sb, type); 2901 } 2902 EXPORT_SYMBOL(dquot_set_dqinfo); 2903 2904 const struct quotactl_ops dquot_quotactl_sysfile_ops = { 2905 .quota_enable = dquot_quota_enable, 2906 .quota_disable = dquot_quota_disable, 2907 .quota_sync = dquot_quota_sync, 2908 .get_state = dquot_get_state, 2909 .set_info = dquot_set_dqinfo, 2910 .get_dqblk = dquot_get_dqblk, 2911 .get_nextdqblk = dquot_get_next_dqblk, 2912 .set_dqblk = dquot_set_dqblk 2913 }; 2914 EXPORT_SYMBOL(dquot_quotactl_sysfile_ops); 2915 2916 static int do_proc_dqstats(const struct ctl_table *table, int write, 2917 void *buffer, size_t *lenp, loff_t *ppos) 2918 { 2919 unsigned int type = (unsigned long *)table->data - dqstats.stat; 2920 s64 value = percpu_counter_sum(&dqstats.counter[type]); 2921 2922 /* Filter negative values for non-monotonic counters */ 2923 if (value < 0 && (type == DQST_ALLOC_DQUOTS || 2924 type == DQST_FREE_DQUOTS)) 2925 value = 0; 2926 2927 /* Update global table */ 2928 dqstats.stat[type] = value; 2929 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 2930 } 2931 2932 static const struct ctl_table fs_dqstats_table[] = { 2933 { 2934 .procname = "lookups", 2935 .data = &dqstats.stat[DQST_LOOKUPS], 2936 .maxlen = sizeof(unsigned long), 2937 .mode = 0444, 2938 .proc_handler = do_proc_dqstats, 2939 }, 2940 { 2941 .procname = "drops", 2942 .data = &dqstats.stat[DQST_DROPS], 2943 .maxlen = sizeof(unsigned long), 2944 .mode = 0444, 2945 .proc_handler = do_proc_dqstats, 2946 }, 2947 { 2948 .procname = "reads", 2949 .data = &dqstats.stat[DQST_READS], 2950 .maxlen = sizeof(unsigned long), 2951 .mode = 0444, 2952 .proc_handler = do_proc_dqstats, 2953 }, 2954 { 2955 .procname = "writes", 2956 .data = &dqstats.stat[DQST_WRITES], 2957 .maxlen = sizeof(unsigned long), 2958 .mode = 0444, 2959 .proc_handler = do_proc_dqstats, 2960 }, 2961 { 2962 .procname = "cache_hits", 2963 .data = &dqstats.stat[DQST_CACHE_HITS], 2964 .maxlen = sizeof(unsigned long), 2965 .mode = 0444, 2966 .proc_handler = do_proc_dqstats, 2967 }, 2968 { 2969 .procname = "allocated_dquots", 2970 .data = &dqstats.stat[DQST_ALLOC_DQUOTS], 2971 .maxlen = sizeof(unsigned long), 2972 .mode = 0444, 2973 .proc_handler = do_proc_dqstats, 2974 }, 2975 { 2976 .procname = "free_dquots", 2977 .data = &dqstats.stat[DQST_FREE_DQUOTS], 2978 .maxlen = sizeof(unsigned long), 2979 .mode = 0444, 2980 .proc_handler = do_proc_dqstats, 2981 }, 2982 { 2983 .procname = "syncs", 2984 .data = &dqstats.stat[DQST_SYNCS], 2985 .maxlen = sizeof(unsigned long), 2986 .mode = 0444, 2987 .proc_handler = do_proc_dqstats, 2988 }, 2989 #ifdef CONFIG_PRINT_QUOTA_WARNING 2990 { 2991 .procname = "warnings", 2992 .data = &flag_print_warnings, 2993 .maxlen = sizeof(int), 2994 .mode = 0644, 2995 .proc_handler = proc_dointvec, 2996 }, 2997 #endif 2998 }; 2999 3000 static int __init dquot_init(void) 3001 { 3002 int i, ret; 3003 unsigned long nr_hash, order; 3004 struct shrinker *dqcache_shrinker; 3005 3006 printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__); 3007 3008 register_sysctl_init("fs/quota", fs_dqstats_table); 3009 3010 dquot_cachep = kmem_cache_create("dquot", 3011 sizeof(struct dquot), sizeof(unsigned long) * 4, 3012 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 3013 SLAB_PANIC), 3014 NULL); 3015 3016 order = 0; 3017 dquot_hash = (struct hlist_head *)__get_free_pages(GFP_KERNEL, order); 3018 if (!dquot_hash) 3019 panic("Cannot create dquot hash table"); 3020 3021 ret = percpu_counter_init_many(dqstats.counter, 0, GFP_KERNEL, 3022 _DQST_DQSTAT_LAST); 3023 if (ret) 3024 panic("Cannot create dquot stat counters"); 3025 3026 /* Find power-of-two hlist_heads which can fit into allocation */ 3027 nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head); 3028 dq_hash_bits = ilog2(nr_hash); 3029 3030 nr_hash = 1UL << dq_hash_bits; 3031 dq_hash_mask = nr_hash - 1; 3032 for (i = 0; i < nr_hash; i++) 3033 INIT_HLIST_HEAD(dquot_hash + i); 3034 3035 pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld," 3036 " %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order)); 3037 3038 dqcache_shrinker = shrinker_alloc(0, "dquota-cache"); 3039 if (!dqcache_shrinker) 3040 panic("Cannot allocate dquot shrinker"); 3041 3042 dqcache_shrinker->count_objects = dqcache_shrink_count; 3043 dqcache_shrinker->scan_objects = dqcache_shrink_scan; 3044 3045 shrinker_register(dqcache_shrinker); 3046 3047 quota_unbound_wq = alloc_workqueue("quota_events_unbound", 3048 WQ_UNBOUND | WQ_MEM_RECLAIM, WQ_MAX_ACTIVE); 3049 if (!quota_unbound_wq) 3050 panic("Cannot create quota_unbound_wq\n"); 3051 3052 return 0; 3053 } 3054 fs_initcall(dquot_init); 3055