1 // SPDX-License-Identifier: GPL-2.0 2 #include "bcachefs.h" 3 #include "btree_update.h" 4 #include "errcode.h" 5 #include "error.h" 6 #include "inode.h" 7 #include "quota.h" 8 #include "snapshot.h" 9 #include "super-io.h" 10 11 static const char * const bch2_quota_types[] = { 12 "user", 13 "group", 14 "project", 15 }; 16 17 static const char * const bch2_quota_counters[] = { 18 "space", 19 "inodes", 20 }; 21 22 static int bch2_sb_quota_validate(struct bch_sb *sb, struct bch_sb_field *f, 23 struct printbuf *err) 24 { 25 struct bch_sb_field_quota *q = field_to_type(f, quota); 26 27 if (vstruct_bytes(&q->field) < sizeof(*q)) { 28 prt_printf(err, "wrong size (got %zu should be %zu)", 29 vstruct_bytes(&q->field), sizeof(*q)); 30 return -BCH_ERR_invalid_sb_quota; 31 } 32 33 return 0; 34 } 35 36 static void bch2_sb_quota_to_text(struct printbuf *out, struct bch_sb *sb, 37 struct bch_sb_field *f) 38 { 39 struct bch_sb_field_quota *q = field_to_type(f, quota); 40 unsigned qtyp, counter; 41 42 for (qtyp = 0; qtyp < ARRAY_SIZE(q->q); qtyp++) { 43 prt_printf(out, "%s: flags %llx", 44 bch2_quota_types[qtyp], 45 le64_to_cpu(q->q[qtyp].flags)); 46 47 for (counter = 0; counter < Q_COUNTERS; counter++) 48 prt_printf(out, " %s timelimit %u warnlimit %u", 49 bch2_quota_counters[counter], 50 le32_to_cpu(q->q[qtyp].c[counter].timelimit), 51 le32_to_cpu(q->q[qtyp].c[counter].warnlimit)); 52 53 prt_newline(out); 54 } 55 } 56 57 const struct bch_sb_field_ops bch_sb_field_ops_quota = { 58 .validate = bch2_sb_quota_validate, 59 .to_text = bch2_sb_quota_to_text, 60 }; 61 62 int bch2_quota_invalid(const struct bch_fs *c, struct bkey_s_c k, 63 enum bkey_invalid_flags flags, 64 struct printbuf *err) 65 { 66 if (k.k->p.inode >= QTYP_NR) { 67 prt_printf(err, "invalid quota type (%llu >= %u)", 68 k.k->p.inode, QTYP_NR); 69 return -BCH_ERR_invalid_bkey; 70 } 71 72 return 0; 73 } 74 75 void bch2_quota_to_text(struct printbuf *out, struct bch_fs *c, 76 struct bkey_s_c k) 77 { 78 struct bkey_s_c_quota dq = bkey_s_c_to_quota(k); 79 unsigned i; 80 81 for (i = 0; i < Q_COUNTERS; i++) 82 prt_printf(out, "%s hardlimit %llu softlimit %llu", 83 bch2_quota_counters[i], 84 le64_to_cpu(dq.v->c[i].hardlimit), 85 le64_to_cpu(dq.v->c[i].softlimit)); 86 } 87 88 #ifdef CONFIG_BCACHEFS_QUOTA 89 90 #include <linux/cred.h> 91 #include <linux/fs.h> 92 #include <linux/quota.h> 93 94 static void qc_info_to_text(struct printbuf *out, struct qc_info *i) 95 { 96 printbuf_tabstops_reset(out); 97 printbuf_tabstop_push(out, 20); 98 99 prt_str(out, "i_fieldmask"); 100 prt_tab(out); 101 prt_printf(out, "%x", i->i_fieldmask); 102 prt_newline(out); 103 104 prt_str(out, "i_flags"); 105 prt_tab(out); 106 prt_printf(out, "%u", i->i_flags); 107 prt_newline(out); 108 109 prt_str(out, "i_spc_timelimit"); 110 prt_tab(out); 111 prt_printf(out, "%u", i->i_spc_timelimit); 112 prt_newline(out); 113 114 prt_str(out, "i_ino_timelimit"); 115 prt_tab(out); 116 prt_printf(out, "%u", i->i_ino_timelimit); 117 prt_newline(out); 118 119 prt_str(out, "i_rt_spc_timelimit"); 120 prt_tab(out); 121 prt_printf(out, "%u", i->i_rt_spc_timelimit); 122 prt_newline(out); 123 124 prt_str(out, "i_spc_warnlimit"); 125 prt_tab(out); 126 prt_printf(out, "%u", i->i_spc_warnlimit); 127 prt_newline(out); 128 129 prt_str(out, "i_ino_warnlimit"); 130 prt_tab(out); 131 prt_printf(out, "%u", i->i_ino_warnlimit); 132 prt_newline(out); 133 134 prt_str(out, "i_rt_spc_warnlimit"); 135 prt_tab(out); 136 prt_printf(out, "%u", i->i_rt_spc_warnlimit); 137 prt_newline(out); 138 } 139 140 static void qc_dqblk_to_text(struct printbuf *out, struct qc_dqblk *q) 141 { 142 printbuf_tabstops_reset(out); 143 printbuf_tabstop_push(out, 20); 144 145 prt_str(out, "d_fieldmask"); 146 prt_tab(out); 147 prt_printf(out, "%x", q->d_fieldmask); 148 prt_newline(out); 149 150 prt_str(out, "d_spc_hardlimit"); 151 prt_tab(out); 152 prt_printf(out, "%llu", q->d_spc_hardlimit); 153 prt_newline(out); 154 155 prt_str(out, "d_spc_softlimit"); 156 prt_tab(out); 157 prt_printf(out, "%llu", q->d_spc_softlimit); 158 prt_newline(out); 159 160 prt_str(out, "d_ino_hardlimit"); 161 prt_tab(out); 162 prt_printf(out, "%llu", q->d_ino_hardlimit); 163 prt_newline(out); 164 165 prt_str(out, "d_ino_softlimit"); 166 prt_tab(out); 167 prt_printf(out, "%llu", q->d_ino_softlimit); 168 prt_newline(out); 169 170 prt_str(out, "d_space"); 171 prt_tab(out); 172 prt_printf(out, "%llu", q->d_space); 173 prt_newline(out); 174 175 prt_str(out, "d_ino_count"); 176 prt_tab(out); 177 prt_printf(out, "%llu", q->d_ino_count); 178 prt_newline(out); 179 180 prt_str(out, "d_ino_timer"); 181 prt_tab(out); 182 prt_printf(out, "%llu", q->d_ino_timer); 183 prt_newline(out); 184 185 prt_str(out, "d_spc_timer"); 186 prt_tab(out); 187 prt_printf(out, "%llu", q->d_spc_timer); 188 prt_newline(out); 189 190 prt_str(out, "d_ino_warns"); 191 prt_tab(out); 192 prt_printf(out, "%i", q->d_ino_warns); 193 prt_newline(out); 194 195 prt_str(out, "d_spc_warns"); 196 prt_tab(out); 197 prt_printf(out, "%i", q->d_spc_warns); 198 prt_newline(out); 199 } 200 201 static inline unsigned __next_qtype(unsigned i, unsigned qtypes) 202 { 203 qtypes >>= i; 204 return qtypes ? i + __ffs(qtypes) : QTYP_NR; 205 } 206 207 #define for_each_set_qtype(_c, _i, _q, _qtypes) \ 208 for (_i = 0; \ 209 (_i = __next_qtype(_i, _qtypes), \ 210 _q = &(_c)->quotas[_i], \ 211 _i < QTYP_NR); \ 212 _i++) 213 214 static bool ignore_hardlimit(struct bch_memquota_type *q) 215 { 216 if (capable(CAP_SYS_RESOURCE)) 217 return true; 218 #if 0 219 struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; 220 221 return capable(CAP_SYS_RESOURCE) && 222 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || 223 !(info->dqi_flags & DQF_ROOT_SQUASH)); 224 #endif 225 return false; 226 } 227 228 enum quota_msg { 229 SOFTWARN, /* Softlimit reached */ 230 SOFTLONGWARN, /* Grace time expired */ 231 HARDWARN, /* Hardlimit reached */ 232 233 HARDBELOW, /* Usage got below inode hardlimit */ 234 SOFTBELOW, /* Usage got below inode softlimit */ 235 }; 236 237 static int quota_nl[][Q_COUNTERS] = { 238 [HARDWARN][Q_SPC] = QUOTA_NL_BHARDWARN, 239 [SOFTLONGWARN][Q_SPC] = QUOTA_NL_BSOFTLONGWARN, 240 [SOFTWARN][Q_SPC] = QUOTA_NL_BSOFTWARN, 241 [HARDBELOW][Q_SPC] = QUOTA_NL_BHARDBELOW, 242 [SOFTBELOW][Q_SPC] = QUOTA_NL_BSOFTBELOW, 243 244 [HARDWARN][Q_INO] = QUOTA_NL_IHARDWARN, 245 [SOFTLONGWARN][Q_INO] = QUOTA_NL_ISOFTLONGWARN, 246 [SOFTWARN][Q_INO] = QUOTA_NL_ISOFTWARN, 247 [HARDBELOW][Q_INO] = QUOTA_NL_IHARDBELOW, 248 [SOFTBELOW][Q_INO] = QUOTA_NL_ISOFTBELOW, 249 }; 250 251 struct quota_msgs { 252 u8 nr; 253 struct { 254 u8 qtype; 255 u8 msg; 256 } m[QTYP_NR * Q_COUNTERS]; 257 }; 258 259 static void prepare_msg(unsigned qtype, 260 enum quota_counters counter, 261 struct quota_msgs *msgs, 262 enum quota_msg msg_type) 263 { 264 BUG_ON(msgs->nr >= ARRAY_SIZE(msgs->m)); 265 266 msgs->m[msgs->nr].qtype = qtype; 267 msgs->m[msgs->nr].msg = quota_nl[msg_type][counter]; 268 msgs->nr++; 269 } 270 271 static void prepare_warning(struct memquota_counter *qc, 272 unsigned qtype, 273 enum quota_counters counter, 274 struct quota_msgs *msgs, 275 enum quota_msg msg_type) 276 { 277 if (qc->warning_issued & (1 << msg_type)) 278 return; 279 280 prepare_msg(qtype, counter, msgs, msg_type); 281 } 282 283 static void flush_warnings(struct bch_qid qid, 284 struct super_block *sb, 285 struct quota_msgs *msgs) 286 { 287 unsigned i; 288 289 for (i = 0; i < msgs->nr; i++) 290 quota_send_warning(make_kqid(&init_user_ns, msgs->m[i].qtype, qid.q[i]), 291 sb->s_dev, msgs->m[i].msg); 292 } 293 294 static int bch2_quota_check_limit(struct bch_fs *c, 295 unsigned qtype, 296 struct bch_memquota *mq, 297 struct quota_msgs *msgs, 298 enum quota_counters counter, 299 s64 v, 300 enum quota_acct_mode mode) 301 { 302 struct bch_memquota_type *q = &c->quotas[qtype]; 303 struct memquota_counter *qc = &mq->c[counter]; 304 u64 n = qc->v + v; 305 306 BUG_ON((s64) n < 0); 307 308 if (mode == KEY_TYPE_QUOTA_NOCHECK) 309 return 0; 310 311 if (v <= 0) { 312 if (n < qc->hardlimit && 313 (qc->warning_issued & (1 << HARDWARN))) { 314 qc->warning_issued &= ~(1 << HARDWARN); 315 prepare_msg(qtype, counter, msgs, HARDBELOW); 316 } 317 318 if (n < qc->softlimit && 319 (qc->warning_issued & (1 << SOFTWARN))) { 320 qc->warning_issued &= ~(1 << SOFTWARN); 321 prepare_msg(qtype, counter, msgs, SOFTBELOW); 322 } 323 324 qc->warning_issued = 0; 325 return 0; 326 } 327 328 if (qc->hardlimit && 329 qc->hardlimit < n && 330 !ignore_hardlimit(q)) { 331 prepare_warning(qc, qtype, counter, msgs, HARDWARN); 332 return -EDQUOT; 333 } 334 335 if (qc->softlimit && 336 qc->softlimit < n) { 337 if (qc->timer == 0) { 338 qc->timer = ktime_get_real_seconds() + q->limits[counter].timelimit; 339 prepare_warning(qc, qtype, counter, msgs, SOFTWARN); 340 } else if (ktime_get_real_seconds() >= qc->timer && 341 !ignore_hardlimit(q)) { 342 prepare_warning(qc, qtype, counter, msgs, SOFTLONGWARN); 343 return -EDQUOT; 344 } 345 } 346 347 return 0; 348 } 349 350 int bch2_quota_acct(struct bch_fs *c, struct bch_qid qid, 351 enum quota_counters counter, s64 v, 352 enum quota_acct_mode mode) 353 { 354 unsigned qtypes = enabled_qtypes(c); 355 struct bch_memquota_type *q; 356 struct bch_memquota *mq[QTYP_NR]; 357 struct quota_msgs msgs; 358 unsigned i; 359 int ret = 0; 360 361 memset(&msgs, 0, sizeof(msgs)); 362 363 for_each_set_qtype(c, i, q, qtypes) { 364 mq[i] = genradix_ptr_alloc(&q->table, qid.q[i], GFP_KERNEL); 365 if (!mq[i]) 366 return -ENOMEM; 367 } 368 369 for_each_set_qtype(c, i, q, qtypes) 370 mutex_lock_nested(&q->lock, i); 371 372 for_each_set_qtype(c, i, q, qtypes) { 373 ret = bch2_quota_check_limit(c, i, mq[i], &msgs, counter, v, mode); 374 if (ret) 375 goto err; 376 } 377 378 for_each_set_qtype(c, i, q, qtypes) 379 mq[i]->c[counter].v += v; 380 err: 381 for_each_set_qtype(c, i, q, qtypes) 382 mutex_unlock(&q->lock); 383 384 flush_warnings(qid, c->vfs_sb, &msgs); 385 386 return ret; 387 } 388 389 static void __bch2_quota_transfer(struct bch_memquota *src_q, 390 struct bch_memquota *dst_q, 391 enum quota_counters counter, s64 v) 392 { 393 BUG_ON(v > src_q->c[counter].v); 394 BUG_ON(v + dst_q->c[counter].v < v); 395 396 src_q->c[counter].v -= v; 397 dst_q->c[counter].v += v; 398 } 399 400 int bch2_quota_transfer(struct bch_fs *c, unsigned qtypes, 401 struct bch_qid dst, 402 struct bch_qid src, u64 space, 403 enum quota_acct_mode mode) 404 { 405 struct bch_memquota_type *q; 406 struct bch_memquota *src_q[3], *dst_q[3]; 407 struct quota_msgs msgs; 408 unsigned i; 409 int ret = 0; 410 411 qtypes &= enabled_qtypes(c); 412 413 memset(&msgs, 0, sizeof(msgs)); 414 415 for_each_set_qtype(c, i, q, qtypes) { 416 src_q[i] = genradix_ptr_alloc(&q->table, src.q[i], GFP_KERNEL); 417 dst_q[i] = genradix_ptr_alloc(&q->table, dst.q[i], GFP_KERNEL); 418 if (!src_q[i] || !dst_q[i]) 419 return -ENOMEM; 420 } 421 422 for_each_set_qtype(c, i, q, qtypes) 423 mutex_lock_nested(&q->lock, i); 424 425 for_each_set_qtype(c, i, q, qtypes) { 426 ret = bch2_quota_check_limit(c, i, dst_q[i], &msgs, Q_SPC, 427 dst_q[i]->c[Q_SPC].v + space, 428 mode); 429 if (ret) 430 goto err; 431 432 ret = bch2_quota_check_limit(c, i, dst_q[i], &msgs, Q_INO, 433 dst_q[i]->c[Q_INO].v + 1, 434 mode); 435 if (ret) 436 goto err; 437 } 438 439 for_each_set_qtype(c, i, q, qtypes) { 440 __bch2_quota_transfer(src_q[i], dst_q[i], Q_SPC, space); 441 __bch2_quota_transfer(src_q[i], dst_q[i], Q_INO, 1); 442 } 443 444 err: 445 for_each_set_qtype(c, i, q, qtypes) 446 mutex_unlock(&q->lock); 447 448 flush_warnings(dst, c->vfs_sb, &msgs); 449 450 return ret; 451 } 452 453 static int __bch2_quota_set(struct bch_fs *c, struct bkey_s_c k, 454 struct qc_dqblk *qdq) 455 { 456 struct bkey_s_c_quota dq; 457 struct bch_memquota_type *q; 458 struct bch_memquota *mq; 459 unsigned i; 460 461 BUG_ON(k.k->p.inode >= QTYP_NR); 462 463 if (!((1U << k.k->p.inode) & enabled_qtypes(c))) 464 return 0; 465 466 switch (k.k->type) { 467 case KEY_TYPE_quota: 468 dq = bkey_s_c_to_quota(k); 469 q = &c->quotas[k.k->p.inode]; 470 471 mutex_lock(&q->lock); 472 mq = genradix_ptr_alloc(&q->table, k.k->p.offset, GFP_KERNEL); 473 if (!mq) { 474 mutex_unlock(&q->lock); 475 return -ENOMEM; 476 } 477 478 for (i = 0; i < Q_COUNTERS; i++) { 479 mq->c[i].hardlimit = le64_to_cpu(dq.v->c[i].hardlimit); 480 mq->c[i].softlimit = le64_to_cpu(dq.v->c[i].softlimit); 481 } 482 483 if (qdq && qdq->d_fieldmask & QC_SPC_TIMER) 484 mq->c[Q_SPC].timer = qdq->d_spc_timer; 485 if (qdq && qdq->d_fieldmask & QC_SPC_WARNS) 486 mq->c[Q_SPC].warns = qdq->d_spc_warns; 487 if (qdq && qdq->d_fieldmask & QC_INO_TIMER) 488 mq->c[Q_INO].timer = qdq->d_ino_timer; 489 if (qdq && qdq->d_fieldmask & QC_INO_WARNS) 490 mq->c[Q_INO].warns = qdq->d_ino_warns; 491 492 mutex_unlock(&q->lock); 493 } 494 495 return 0; 496 } 497 498 void bch2_fs_quota_exit(struct bch_fs *c) 499 { 500 unsigned i; 501 502 for (i = 0; i < ARRAY_SIZE(c->quotas); i++) 503 genradix_free(&c->quotas[i].table); 504 } 505 506 void bch2_fs_quota_init(struct bch_fs *c) 507 { 508 unsigned i; 509 510 for (i = 0; i < ARRAY_SIZE(c->quotas); i++) 511 mutex_init(&c->quotas[i].lock); 512 } 513 514 static struct bch_sb_field_quota *bch2_sb_get_or_create_quota(struct bch_sb_handle *sb) 515 { 516 struct bch_sb_field_quota *sb_quota = bch2_sb_field_get(sb->sb, quota); 517 518 if (sb_quota) 519 return sb_quota; 520 521 sb_quota = bch2_sb_field_resize(sb, quota, sizeof(*sb_quota) / sizeof(u64)); 522 if (sb_quota) { 523 unsigned qtype, qc; 524 525 for (qtype = 0; qtype < QTYP_NR; qtype++) 526 for (qc = 0; qc < Q_COUNTERS; qc++) 527 sb_quota->q[qtype].c[qc].timelimit = 528 cpu_to_le32(7 * 24 * 60 * 60); 529 } 530 531 return sb_quota; 532 } 533 534 static void bch2_sb_quota_read(struct bch_fs *c) 535 { 536 struct bch_sb_field_quota *sb_quota; 537 unsigned i, j; 538 539 sb_quota = bch2_sb_field_get(c->disk_sb.sb, quota); 540 if (!sb_quota) 541 return; 542 543 for (i = 0; i < QTYP_NR; i++) { 544 struct bch_memquota_type *q = &c->quotas[i]; 545 546 for (j = 0; j < Q_COUNTERS; j++) { 547 q->limits[j].timelimit = 548 le32_to_cpu(sb_quota->q[i].c[j].timelimit); 549 q->limits[j].warnlimit = 550 le32_to_cpu(sb_quota->q[i].c[j].warnlimit); 551 } 552 } 553 } 554 555 static int bch2_fs_quota_read_inode(struct btree_trans *trans, 556 struct btree_iter *iter, 557 struct bkey_s_c k) 558 { 559 struct bch_fs *c = trans->c; 560 struct bch_inode_unpacked u; 561 struct bch_snapshot_tree s_t; 562 int ret; 563 564 ret = bch2_snapshot_tree_lookup(trans, 565 bch2_snapshot_tree(c, k.k->p.snapshot), &s_t); 566 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c, 567 "%s: snapshot tree %u not found", __func__, 568 snapshot_t(c, k.k->p.snapshot)->tree); 569 if (ret) 570 return ret; 571 572 if (!s_t.master_subvol) 573 goto advance; 574 575 ret = bch2_inode_find_by_inum_nowarn_trans(trans, 576 (subvol_inum) { 577 le32_to_cpu(s_t.master_subvol), 578 k.k->p.offset, 579 }, &u); 580 /* 581 * Inode might be deleted in this snapshot - the easiest way to handle 582 * that is to just skip it here: 583 */ 584 if (bch2_err_matches(ret, ENOENT)) 585 goto advance; 586 587 if (ret) 588 return ret; 589 590 bch2_quota_acct(c, bch_qid(&u), Q_SPC, u.bi_sectors, 591 KEY_TYPE_QUOTA_NOCHECK); 592 bch2_quota_acct(c, bch_qid(&u), Q_INO, 1, 593 KEY_TYPE_QUOTA_NOCHECK); 594 advance: 595 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos)); 596 return 0; 597 } 598 599 int bch2_fs_quota_read(struct bch_fs *c) 600 { 601 struct bch_sb_field_quota *sb_quota; 602 struct btree_trans *trans; 603 struct btree_iter iter; 604 struct bkey_s_c k; 605 int ret; 606 607 mutex_lock(&c->sb_lock); 608 sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb); 609 if (!sb_quota) { 610 mutex_unlock(&c->sb_lock); 611 return -BCH_ERR_ENOSPC_sb_quota; 612 } 613 614 bch2_sb_quota_read(c); 615 mutex_unlock(&c->sb_lock); 616 617 trans = bch2_trans_get(c); 618 619 ret = for_each_btree_key2(trans, iter, BTREE_ID_quotas, 620 POS_MIN, BTREE_ITER_PREFETCH, k, 621 __bch2_quota_set(c, k, NULL)) ?: 622 for_each_btree_key2(trans, iter, BTREE_ID_inodes, 623 POS_MIN, BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k, 624 bch2_fs_quota_read_inode(trans, &iter, k)); 625 626 bch2_trans_put(trans); 627 628 if (ret) 629 bch_err_fn(c, ret); 630 return ret; 631 } 632 633 /* Enable/disable/delete quotas for an entire filesystem: */ 634 635 static int bch2_quota_enable(struct super_block *sb, unsigned uflags) 636 { 637 struct bch_fs *c = sb->s_fs_info; 638 struct bch_sb_field_quota *sb_quota; 639 int ret = 0; 640 641 if (sb->s_flags & SB_RDONLY) 642 return -EROFS; 643 644 /* Accounting must be enabled at mount time: */ 645 if (uflags & (FS_QUOTA_UDQ_ACCT|FS_QUOTA_GDQ_ACCT|FS_QUOTA_PDQ_ACCT)) 646 return -EINVAL; 647 648 /* Can't enable enforcement without accounting: */ 649 if ((uflags & FS_QUOTA_UDQ_ENFD) && !c->opts.usrquota) 650 return -EINVAL; 651 652 if ((uflags & FS_QUOTA_GDQ_ENFD) && !c->opts.grpquota) 653 return -EINVAL; 654 655 if (uflags & FS_QUOTA_PDQ_ENFD && !c->opts.prjquota) 656 return -EINVAL; 657 658 mutex_lock(&c->sb_lock); 659 sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb); 660 if (!sb_quota) { 661 ret = -BCH_ERR_ENOSPC_sb_quota; 662 goto unlock; 663 } 664 665 if (uflags & FS_QUOTA_UDQ_ENFD) 666 SET_BCH_SB_USRQUOTA(c->disk_sb.sb, true); 667 668 if (uflags & FS_QUOTA_GDQ_ENFD) 669 SET_BCH_SB_GRPQUOTA(c->disk_sb.sb, true); 670 671 if (uflags & FS_QUOTA_PDQ_ENFD) 672 SET_BCH_SB_PRJQUOTA(c->disk_sb.sb, true); 673 674 bch2_write_super(c); 675 unlock: 676 mutex_unlock(&c->sb_lock); 677 678 return bch2_err_class(ret); 679 } 680 681 static int bch2_quota_disable(struct super_block *sb, unsigned uflags) 682 { 683 struct bch_fs *c = sb->s_fs_info; 684 685 if (sb->s_flags & SB_RDONLY) 686 return -EROFS; 687 688 mutex_lock(&c->sb_lock); 689 if (uflags & FS_QUOTA_UDQ_ENFD) 690 SET_BCH_SB_USRQUOTA(c->disk_sb.sb, false); 691 692 if (uflags & FS_QUOTA_GDQ_ENFD) 693 SET_BCH_SB_GRPQUOTA(c->disk_sb.sb, false); 694 695 if (uflags & FS_QUOTA_PDQ_ENFD) 696 SET_BCH_SB_PRJQUOTA(c->disk_sb.sb, false); 697 698 bch2_write_super(c); 699 mutex_unlock(&c->sb_lock); 700 701 return 0; 702 } 703 704 static int bch2_quota_remove(struct super_block *sb, unsigned uflags) 705 { 706 struct bch_fs *c = sb->s_fs_info; 707 int ret; 708 709 if (sb->s_flags & SB_RDONLY) 710 return -EROFS; 711 712 if (uflags & FS_USER_QUOTA) { 713 if (c->opts.usrquota) 714 return -EINVAL; 715 716 ret = bch2_btree_delete_range(c, BTREE_ID_quotas, 717 POS(QTYP_USR, 0), 718 POS(QTYP_USR, U64_MAX), 719 0, NULL); 720 if (ret) 721 return ret; 722 } 723 724 if (uflags & FS_GROUP_QUOTA) { 725 if (c->opts.grpquota) 726 return -EINVAL; 727 728 ret = bch2_btree_delete_range(c, BTREE_ID_quotas, 729 POS(QTYP_GRP, 0), 730 POS(QTYP_GRP, U64_MAX), 731 0, NULL); 732 if (ret) 733 return ret; 734 } 735 736 if (uflags & FS_PROJ_QUOTA) { 737 if (c->opts.prjquota) 738 return -EINVAL; 739 740 ret = bch2_btree_delete_range(c, BTREE_ID_quotas, 741 POS(QTYP_PRJ, 0), 742 POS(QTYP_PRJ, U64_MAX), 743 0, NULL); 744 if (ret) 745 return ret; 746 } 747 748 return 0; 749 } 750 751 /* 752 * Return quota status information, such as enforcements, quota file inode 753 * numbers etc. 754 */ 755 static int bch2_quota_get_state(struct super_block *sb, struct qc_state *state) 756 { 757 struct bch_fs *c = sb->s_fs_info; 758 unsigned qtypes = enabled_qtypes(c); 759 unsigned i; 760 761 memset(state, 0, sizeof(*state)); 762 763 for (i = 0; i < QTYP_NR; i++) { 764 state->s_state[i].flags |= QCI_SYSFILE; 765 766 if (!(qtypes & (1 << i))) 767 continue; 768 769 state->s_state[i].flags |= QCI_ACCT_ENABLED; 770 771 state->s_state[i].spc_timelimit = c->quotas[i].limits[Q_SPC].timelimit; 772 state->s_state[i].spc_warnlimit = c->quotas[i].limits[Q_SPC].warnlimit; 773 774 state->s_state[i].ino_timelimit = c->quotas[i].limits[Q_INO].timelimit; 775 state->s_state[i].ino_warnlimit = c->quotas[i].limits[Q_INO].warnlimit; 776 } 777 778 return 0; 779 } 780 781 /* 782 * Adjust quota timers & warnings 783 */ 784 static int bch2_quota_set_info(struct super_block *sb, int type, 785 struct qc_info *info) 786 { 787 struct bch_fs *c = sb->s_fs_info; 788 struct bch_sb_field_quota *sb_quota; 789 int ret = 0; 790 791 if (0) { 792 struct printbuf buf = PRINTBUF; 793 794 qc_info_to_text(&buf, info); 795 pr_info("setting:\n%s", buf.buf); 796 printbuf_exit(&buf); 797 } 798 799 if (sb->s_flags & SB_RDONLY) 800 return -EROFS; 801 802 if (type >= QTYP_NR) 803 return -EINVAL; 804 805 if (!((1 << type) & enabled_qtypes(c))) 806 return -ESRCH; 807 808 if (info->i_fieldmask & 809 ~(QC_SPC_TIMER|QC_INO_TIMER|QC_SPC_WARNS|QC_INO_WARNS)) 810 return -EINVAL; 811 812 mutex_lock(&c->sb_lock); 813 sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb); 814 if (!sb_quota) { 815 ret = -BCH_ERR_ENOSPC_sb_quota; 816 goto unlock; 817 } 818 819 if (info->i_fieldmask & QC_SPC_TIMER) 820 sb_quota->q[type].c[Q_SPC].timelimit = 821 cpu_to_le32(info->i_spc_timelimit); 822 823 if (info->i_fieldmask & QC_SPC_WARNS) 824 sb_quota->q[type].c[Q_SPC].warnlimit = 825 cpu_to_le32(info->i_spc_warnlimit); 826 827 if (info->i_fieldmask & QC_INO_TIMER) 828 sb_quota->q[type].c[Q_INO].timelimit = 829 cpu_to_le32(info->i_ino_timelimit); 830 831 if (info->i_fieldmask & QC_INO_WARNS) 832 sb_quota->q[type].c[Q_INO].warnlimit = 833 cpu_to_le32(info->i_ino_warnlimit); 834 835 bch2_sb_quota_read(c); 836 837 bch2_write_super(c); 838 unlock: 839 mutex_unlock(&c->sb_lock); 840 841 return bch2_err_class(ret); 842 } 843 844 /* Get/set individual quotas: */ 845 846 static void __bch2_quota_get(struct qc_dqblk *dst, struct bch_memquota *src) 847 { 848 dst->d_space = src->c[Q_SPC].v << 9; 849 dst->d_spc_hardlimit = src->c[Q_SPC].hardlimit << 9; 850 dst->d_spc_softlimit = src->c[Q_SPC].softlimit << 9; 851 dst->d_spc_timer = src->c[Q_SPC].timer; 852 dst->d_spc_warns = src->c[Q_SPC].warns; 853 854 dst->d_ino_count = src->c[Q_INO].v; 855 dst->d_ino_hardlimit = src->c[Q_INO].hardlimit; 856 dst->d_ino_softlimit = src->c[Q_INO].softlimit; 857 dst->d_ino_timer = src->c[Q_INO].timer; 858 dst->d_ino_warns = src->c[Q_INO].warns; 859 } 860 861 static int bch2_get_quota(struct super_block *sb, struct kqid kqid, 862 struct qc_dqblk *qdq) 863 { 864 struct bch_fs *c = sb->s_fs_info; 865 struct bch_memquota_type *q = &c->quotas[kqid.type]; 866 qid_t qid = from_kqid(&init_user_ns, kqid); 867 struct bch_memquota *mq; 868 869 memset(qdq, 0, sizeof(*qdq)); 870 871 mutex_lock(&q->lock); 872 mq = genradix_ptr(&q->table, qid); 873 if (mq) 874 __bch2_quota_get(qdq, mq); 875 mutex_unlock(&q->lock); 876 877 return 0; 878 } 879 880 static int bch2_get_next_quota(struct super_block *sb, struct kqid *kqid, 881 struct qc_dqblk *qdq) 882 { 883 struct bch_fs *c = sb->s_fs_info; 884 struct bch_memquota_type *q = &c->quotas[kqid->type]; 885 qid_t qid = from_kqid(&init_user_ns, *kqid); 886 struct genradix_iter iter; 887 struct bch_memquota *mq; 888 int ret = 0; 889 890 mutex_lock(&q->lock); 891 892 genradix_for_each_from(&q->table, iter, mq, qid) 893 if (memcmp(mq, page_address(ZERO_PAGE(0)), sizeof(*mq))) { 894 __bch2_quota_get(qdq, mq); 895 *kqid = make_kqid(current_user_ns(), kqid->type, iter.pos); 896 goto found; 897 } 898 899 ret = -ENOENT; 900 found: 901 mutex_unlock(&q->lock); 902 return bch2_err_class(ret); 903 } 904 905 static int bch2_set_quota_trans(struct btree_trans *trans, 906 struct bkey_i_quota *new_quota, 907 struct qc_dqblk *qdq) 908 { 909 struct btree_iter iter; 910 struct bkey_s_c k; 911 int ret; 912 913 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_quotas, new_quota->k.p, 914 BTREE_ITER_SLOTS|BTREE_ITER_INTENT); 915 ret = bkey_err(k); 916 if (unlikely(ret)) 917 return ret; 918 919 if (k.k->type == KEY_TYPE_quota) 920 new_quota->v = *bkey_s_c_to_quota(k).v; 921 922 if (qdq->d_fieldmask & QC_SPC_SOFT) 923 new_quota->v.c[Q_SPC].softlimit = cpu_to_le64(qdq->d_spc_softlimit >> 9); 924 if (qdq->d_fieldmask & QC_SPC_HARD) 925 new_quota->v.c[Q_SPC].hardlimit = cpu_to_le64(qdq->d_spc_hardlimit >> 9); 926 927 if (qdq->d_fieldmask & QC_INO_SOFT) 928 new_quota->v.c[Q_INO].softlimit = cpu_to_le64(qdq->d_ino_softlimit); 929 if (qdq->d_fieldmask & QC_INO_HARD) 930 new_quota->v.c[Q_INO].hardlimit = cpu_to_le64(qdq->d_ino_hardlimit); 931 932 ret = bch2_trans_update(trans, &iter, &new_quota->k_i, 0); 933 bch2_trans_iter_exit(trans, &iter); 934 return ret; 935 } 936 937 static int bch2_set_quota(struct super_block *sb, struct kqid qid, 938 struct qc_dqblk *qdq) 939 { 940 struct bch_fs *c = sb->s_fs_info; 941 struct bkey_i_quota new_quota; 942 int ret; 943 944 if (0) { 945 struct printbuf buf = PRINTBUF; 946 947 qc_dqblk_to_text(&buf, qdq); 948 pr_info("setting:\n%s", buf.buf); 949 printbuf_exit(&buf); 950 } 951 952 if (sb->s_flags & SB_RDONLY) 953 return -EROFS; 954 955 bkey_quota_init(&new_quota.k_i); 956 new_quota.k.p = POS(qid.type, from_kqid(&init_user_ns, qid)); 957 958 ret = bch2_trans_do(c, NULL, NULL, 0, 959 bch2_set_quota_trans(trans, &new_quota, qdq)) ?: 960 __bch2_quota_set(c, bkey_i_to_s_c(&new_quota.k_i), qdq); 961 962 return bch2_err_class(ret); 963 } 964 965 const struct quotactl_ops bch2_quotactl_operations = { 966 .quota_enable = bch2_quota_enable, 967 .quota_disable = bch2_quota_disable, 968 .rm_xquota = bch2_quota_remove, 969 970 .get_state = bch2_quota_get_state, 971 .set_info = bch2_quota_set_info, 972 973 .get_dqblk = bch2_get_quota, 974 .get_nextdqblk = bch2_get_next_quota, 975 .set_dqblk = bch2_set_quota, 976 }; 977 978 #endif /* CONFIG_BCACHEFS_QUOTA */ 979