1 // SPDX-License-Identifier: GPL-2.0 2 #include "bcachefs.h" 3 #include "btree_update.h" 4 #include "errcode.h" 5 #include "inode.h" 6 #include "quota.h" 7 #include "subvolume.h" 8 #include "super-io.h" 9 10 static const char * const bch2_quota_types[] = { 11 "user", 12 "group", 13 "project", 14 }; 15 16 static const char * const bch2_quota_counters[] = { 17 "space", 18 "inodes", 19 }; 20 21 static int bch2_sb_quota_validate(struct bch_sb *sb, struct bch_sb_field *f, 22 struct printbuf *err) 23 { 24 struct bch_sb_field_quota *q = field_to_type(f, quota); 25 26 if (vstruct_bytes(&q->field) < sizeof(*q)) { 27 prt_printf(err, "wrong size (got %zu should be %zu)", 28 vstruct_bytes(&q->field), sizeof(*q)); 29 return -BCH_ERR_invalid_sb_quota; 30 } 31 32 return 0; 33 } 34 35 static void bch2_sb_quota_to_text(struct printbuf *out, struct bch_sb *sb, 36 struct bch_sb_field *f) 37 { 38 struct bch_sb_field_quota *q = field_to_type(f, quota); 39 unsigned qtyp, counter; 40 41 for (qtyp = 0; qtyp < ARRAY_SIZE(q->q); qtyp++) { 42 prt_printf(out, "%s: flags %llx", 43 bch2_quota_types[qtyp], 44 le64_to_cpu(q->q[qtyp].flags)); 45 46 for (counter = 0; counter < Q_COUNTERS; counter++) 47 prt_printf(out, " %s timelimit %u warnlimit %u", 48 bch2_quota_counters[counter], 49 le32_to_cpu(q->q[qtyp].c[counter].timelimit), 50 le32_to_cpu(q->q[qtyp].c[counter].warnlimit)); 51 52 prt_newline(out); 53 } 54 } 55 56 const struct bch_sb_field_ops bch_sb_field_ops_quota = { 57 .validate = bch2_sb_quota_validate, 58 .to_text = bch2_sb_quota_to_text, 59 }; 60 61 int bch2_quota_invalid(const struct bch_fs *c, struct bkey_s_c k, 62 unsigned flags, struct printbuf *err) 63 { 64 if (k.k->p.inode >= QTYP_NR) { 65 prt_printf(err, "invalid quota type (%llu >= %u)", 66 k.k->p.inode, QTYP_NR); 67 return -BCH_ERR_invalid_bkey; 68 } 69 70 if (bkey_val_bytes(k.k) != sizeof(struct bch_quota)) { 71 prt_printf(err, "incorrect value size (%zu != %zu)", 72 bkey_val_bytes(k.k), sizeof(struct bch_quota)); 73 return -BCH_ERR_invalid_bkey; 74 } 75 76 return 0; 77 } 78 79 void bch2_quota_to_text(struct printbuf *out, struct bch_fs *c, 80 struct bkey_s_c k) 81 { 82 struct bkey_s_c_quota dq = bkey_s_c_to_quota(k); 83 unsigned i; 84 85 for (i = 0; i < Q_COUNTERS; i++) 86 prt_printf(out, "%s hardlimit %llu softlimit %llu", 87 bch2_quota_counters[i], 88 le64_to_cpu(dq.v->c[i].hardlimit), 89 le64_to_cpu(dq.v->c[i].softlimit)); 90 } 91 92 #ifdef CONFIG_BCACHEFS_QUOTA 93 94 #include <linux/cred.h> 95 #include <linux/fs.h> 96 #include <linux/quota.h> 97 98 static void qc_info_to_text(struct printbuf *out, struct qc_info *i) 99 { 100 printbuf_tabstops_reset(out); 101 printbuf_tabstop_push(out, 20); 102 103 prt_str(out, "i_fieldmask"); 104 prt_tab(out); 105 prt_printf(out, "%x", i->i_fieldmask); 106 prt_newline(out); 107 108 prt_str(out, "i_flags"); 109 prt_tab(out); 110 prt_printf(out, "%u", i->i_flags); 111 prt_newline(out); 112 113 prt_str(out, "i_spc_timelimit"); 114 prt_tab(out); 115 prt_printf(out, "%u", i->i_spc_timelimit); 116 prt_newline(out); 117 118 prt_str(out, "i_ino_timelimit"); 119 prt_tab(out); 120 prt_printf(out, "%u", i->i_ino_timelimit); 121 prt_newline(out); 122 123 prt_str(out, "i_rt_spc_timelimit"); 124 prt_tab(out); 125 prt_printf(out, "%u", i->i_rt_spc_timelimit); 126 prt_newline(out); 127 128 prt_str(out, "i_spc_warnlimit"); 129 prt_tab(out); 130 prt_printf(out, "%u", i->i_spc_warnlimit); 131 prt_newline(out); 132 133 prt_str(out, "i_ino_warnlimit"); 134 prt_tab(out); 135 prt_printf(out, "%u", i->i_ino_warnlimit); 136 prt_newline(out); 137 138 prt_str(out, "i_rt_spc_warnlimit"); 139 prt_tab(out); 140 prt_printf(out, "%u", i->i_rt_spc_warnlimit); 141 prt_newline(out); 142 } 143 144 static void qc_dqblk_to_text(struct printbuf *out, struct qc_dqblk *q) 145 { 146 printbuf_tabstops_reset(out); 147 printbuf_tabstop_push(out, 20); 148 149 prt_str(out, "d_fieldmask"); 150 prt_tab(out); 151 prt_printf(out, "%x", q->d_fieldmask); 152 prt_newline(out); 153 154 prt_str(out, "d_spc_hardlimit"); 155 prt_tab(out); 156 prt_printf(out, "%llu", q->d_spc_hardlimit); 157 prt_newline(out); 158 159 prt_str(out, "d_spc_softlimit"); 160 prt_tab(out); 161 prt_printf(out, "%llu", q->d_spc_softlimit); 162 prt_newline(out); 163 164 prt_str(out, "d_ino_hardlimit"); 165 prt_tab(out); 166 prt_printf(out, "%llu", q->d_ino_hardlimit); 167 prt_newline(out); 168 169 prt_str(out, "d_ino_softlimit"); 170 prt_tab(out); 171 prt_printf(out, "%llu", q->d_ino_softlimit); 172 prt_newline(out); 173 174 prt_str(out, "d_space"); 175 prt_tab(out); 176 prt_printf(out, "%llu", q->d_space); 177 prt_newline(out); 178 179 prt_str(out, "d_ino_count"); 180 prt_tab(out); 181 prt_printf(out, "%llu", q->d_ino_count); 182 prt_newline(out); 183 184 prt_str(out, "d_ino_timer"); 185 prt_tab(out); 186 prt_printf(out, "%llu", q->d_ino_timer); 187 prt_newline(out); 188 189 prt_str(out, "d_spc_timer"); 190 prt_tab(out); 191 prt_printf(out, "%llu", q->d_spc_timer); 192 prt_newline(out); 193 194 prt_str(out, "d_ino_warns"); 195 prt_tab(out); 196 prt_printf(out, "%i", q->d_ino_warns); 197 prt_newline(out); 198 199 prt_str(out, "d_spc_warns"); 200 prt_tab(out); 201 prt_printf(out, "%i", q->d_spc_warns); 202 prt_newline(out); 203 } 204 205 static inline unsigned __next_qtype(unsigned i, unsigned qtypes) 206 { 207 qtypes >>= i; 208 return qtypes ? i + __ffs(qtypes) : QTYP_NR; 209 } 210 211 #define for_each_set_qtype(_c, _i, _q, _qtypes) \ 212 for (_i = 0; \ 213 (_i = __next_qtype(_i, _qtypes), \ 214 _q = &(_c)->quotas[_i], \ 215 _i < QTYP_NR); \ 216 _i++) 217 218 static bool ignore_hardlimit(struct bch_memquota_type *q) 219 { 220 if (capable(CAP_SYS_RESOURCE)) 221 return true; 222 #if 0 223 struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; 224 225 return capable(CAP_SYS_RESOURCE) && 226 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || 227 !(info->dqi_flags & DQF_ROOT_SQUASH)); 228 #endif 229 return false; 230 } 231 232 enum quota_msg { 233 SOFTWARN, /* Softlimit reached */ 234 SOFTLONGWARN, /* Grace time expired */ 235 HARDWARN, /* Hardlimit reached */ 236 237 HARDBELOW, /* Usage got below inode hardlimit */ 238 SOFTBELOW, /* Usage got below inode softlimit */ 239 }; 240 241 static int quota_nl[][Q_COUNTERS] = { 242 [HARDWARN][Q_SPC] = QUOTA_NL_BHARDWARN, 243 [SOFTLONGWARN][Q_SPC] = QUOTA_NL_BSOFTLONGWARN, 244 [SOFTWARN][Q_SPC] = QUOTA_NL_BSOFTWARN, 245 [HARDBELOW][Q_SPC] = QUOTA_NL_BHARDBELOW, 246 [SOFTBELOW][Q_SPC] = QUOTA_NL_BSOFTBELOW, 247 248 [HARDWARN][Q_INO] = QUOTA_NL_IHARDWARN, 249 [SOFTLONGWARN][Q_INO] = QUOTA_NL_ISOFTLONGWARN, 250 [SOFTWARN][Q_INO] = QUOTA_NL_ISOFTWARN, 251 [HARDBELOW][Q_INO] = QUOTA_NL_IHARDBELOW, 252 [SOFTBELOW][Q_INO] = QUOTA_NL_ISOFTBELOW, 253 }; 254 255 struct quota_msgs { 256 u8 nr; 257 struct { 258 u8 qtype; 259 u8 msg; 260 } m[QTYP_NR * Q_COUNTERS]; 261 }; 262 263 static void prepare_msg(unsigned qtype, 264 enum quota_counters counter, 265 struct quota_msgs *msgs, 266 enum quota_msg msg_type) 267 { 268 BUG_ON(msgs->nr >= ARRAY_SIZE(msgs->m)); 269 270 msgs->m[msgs->nr].qtype = qtype; 271 msgs->m[msgs->nr].msg = quota_nl[msg_type][counter]; 272 msgs->nr++; 273 } 274 275 static void prepare_warning(struct memquota_counter *qc, 276 unsigned qtype, 277 enum quota_counters counter, 278 struct quota_msgs *msgs, 279 enum quota_msg msg_type) 280 { 281 if (qc->warning_issued & (1 << msg_type)) 282 return; 283 284 prepare_msg(qtype, counter, msgs, msg_type); 285 } 286 287 static void flush_warnings(struct bch_qid qid, 288 struct super_block *sb, 289 struct quota_msgs *msgs) 290 { 291 unsigned i; 292 293 for (i = 0; i < msgs->nr; i++) 294 quota_send_warning(make_kqid(&init_user_ns, msgs->m[i].qtype, qid.q[i]), 295 sb->s_dev, msgs->m[i].msg); 296 } 297 298 static int bch2_quota_check_limit(struct bch_fs *c, 299 unsigned qtype, 300 struct bch_memquota *mq, 301 struct quota_msgs *msgs, 302 enum quota_counters counter, 303 s64 v, 304 enum quota_acct_mode mode) 305 { 306 struct bch_memquota_type *q = &c->quotas[qtype]; 307 struct memquota_counter *qc = &mq->c[counter]; 308 u64 n = qc->v + v; 309 310 BUG_ON((s64) n < 0); 311 312 if (mode == KEY_TYPE_QUOTA_NOCHECK) 313 return 0; 314 315 if (v <= 0) { 316 if (n < qc->hardlimit && 317 (qc->warning_issued & (1 << HARDWARN))) { 318 qc->warning_issued &= ~(1 << HARDWARN); 319 prepare_msg(qtype, counter, msgs, HARDBELOW); 320 } 321 322 if (n < qc->softlimit && 323 (qc->warning_issued & (1 << SOFTWARN))) { 324 qc->warning_issued &= ~(1 << SOFTWARN); 325 prepare_msg(qtype, counter, msgs, SOFTBELOW); 326 } 327 328 qc->warning_issued = 0; 329 return 0; 330 } 331 332 if (qc->hardlimit && 333 qc->hardlimit < n && 334 !ignore_hardlimit(q)) { 335 prepare_warning(qc, qtype, counter, msgs, HARDWARN); 336 return -EDQUOT; 337 } 338 339 if (qc->softlimit && 340 qc->softlimit < n) { 341 if (qc->timer == 0) { 342 qc->timer = ktime_get_real_seconds() + q->limits[counter].timelimit; 343 prepare_warning(qc, qtype, counter, msgs, SOFTWARN); 344 } else if (ktime_get_real_seconds() >= qc->timer && 345 !ignore_hardlimit(q)) { 346 prepare_warning(qc, qtype, counter, msgs, SOFTLONGWARN); 347 return -EDQUOT; 348 } 349 } 350 351 return 0; 352 } 353 354 int bch2_quota_acct(struct bch_fs *c, struct bch_qid qid, 355 enum quota_counters counter, s64 v, 356 enum quota_acct_mode mode) 357 { 358 unsigned qtypes = enabled_qtypes(c); 359 struct bch_memquota_type *q; 360 struct bch_memquota *mq[QTYP_NR]; 361 struct quota_msgs msgs; 362 unsigned i; 363 int ret = 0; 364 365 memset(&msgs, 0, sizeof(msgs)); 366 367 for_each_set_qtype(c, i, q, qtypes) { 368 mq[i] = genradix_ptr_alloc(&q->table, qid.q[i], GFP_KERNEL); 369 if (!mq[i]) 370 return -ENOMEM; 371 } 372 373 for_each_set_qtype(c, i, q, qtypes) 374 mutex_lock_nested(&q->lock, i); 375 376 for_each_set_qtype(c, i, q, qtypes) { 377 ret = bch2_quota_check_limit(c, i, mq[i], &msgs, counter, v, mode); 378 if (ret) 379 goto err; 380 } 381 382 for_each_set_qtype(c, i, q, qtypes) 383 mq[i]->c[counter].v += v; 384 err: 385 for_each_set_qtype(c, i, q, qtypes) 386 mutex_unlock(&q->lock); 387 388 flush_warnings(qid, c->vfs_sb, &msgs); 389 390 return ret; 391 } 392 393 static void __bch2_quota_transfer(struct bch_memquota *src_q, 394 struct bch_memquota *dst_q, 395 enum quota_counters counter, s64 v) 396 { 397 BUG_ON(v > src_q->c[counter].v); 398 BUG_ON(v + dst_q->c[counter].v < v); 399 400 src_q->c[counter].v -= v; 401 dst_q->c[counter].v += v; 402 } 403 404 int bch2_quota_transfer(struct bch_fs *c, unsigned qtypes, 405 struct bch_qid dst, 406 struct bch_qid src, u64 space, 407 enum quota_acct_mode mode) 408 { 409 struct bch_memquota_type *q; 410 struct bch_memquota *src_q[3], *dst_q[3]; 411 struct quota_msgs msgs; 412 unsigned i; 413 int ret = 0; 414 415 qtypes &= enabled_qtypes(c); 416 417 memset(&msgs, 0, sizeof(msgs)); 418 419 for_each_set_qtype(c, i, q, qtypes) { 420 src_q[i] = genradix_ptr_alloc(&q->table, src.q[i], GFP_KERNEL); 421 dst_q[i] = genradix_ptr_alloc(&q->table, dst.q[i], GFP_KERNEL); 422 if (!src_q[i] || !dst_q[i]) 423 return -ENOMEM; 424 } 425 426 for_each_set_qtype(c, i, q, qtypes) 427 mutex_lock_nested(&q->lock, i); 428 429 for_each_set_qtype(c, i, q, qtypes) { 430 ret = bch2_quota_check_limit(c, i, dst_q[i], &msgs, Q_SPC, 431 dst_q[i]->c[Q_SPC].v + space, 432 mode); 433 if (ret) 434 goto err; 435 436 ret = bch2_quota_check_limit(c, i, dst_q[i], &msgs, Q_INO, 437 dst_q[i]->c[Q_INO].v + 1, 438 mode); 439 if (ret) 440 goto err; 441 } 442 443 for_each_set_qtype(c, i, q, qtypes) { 444 __bch2_quota_transfer(src_q[i], dst_q[i], Q_SPC, space); 445 __bch2_quota_transfer(src_q[i], dst_q[i], Q_INO, 1); 446 } 447 448 err: 449 for_each_set_qtype(c, i, q, qtypes) 450 mutex_unlock(&q->lock); 451 452 flush_warnings(dst, c->vfs_sb, &msgs); 453 454 return ret; 455 } 456 457 static int __bch2_quota_set(struct bch_fs *c, struct bkey_s_c k, 458 struct qc_dqblk *qdq) 459 { 460 struct bkey_s_c_quota dq; 461 struct bch_memquota_type *q; 462 struct bch_memquota *mq; 463 unsigned i; 464 465 BUG_ON(k.k->p.inode >= QTYP_NR); 466 467 if (!((1U << k.k->p.inode) & enabled_qtypes(c))) 468 return 0; 469 470 switch (k.k->type) { 471 case KEY_TYPE_quota: 472 dq = bkey_s_c_to_quota(k); 473 q = &c->quotas[k.k->p.inode]; 474 475 mutex_lock(&q->lock); 476 mq = genradix_ptr_alloc(&q->table, k.k->p.offset, GFP_KERNEL); 477 if (!mq) { 478 mutex_unlock(&q->lock); 479 return -ENOMEM; 480 } 481 482 for (i = 0; i < Q_COUNTERS; i++) { 483 mq->c[i].hardlimit = le64_to_cpu(dq.v->c[i].hardlimit); 484 mq->c[i].softlimit = le64_to_cpu(dq.v->c[i].softlimit); 485 } 486 487 if (qdq && qdq->d_fieldmask & QC_SPC_TIMER) 488 mq->c[Q_SPC].timer = cpu_to_le64(qdq->d_spc_timer); 489 if (qdq && qdq->d_fieldmask & QC_SPC_WARNS) 490 mq->c[Q_SPC].warns = cpu_to_le64(qdq->d_spc_warns); 491 if (qdq && qdq->d_fieldmask & QC_INO_TIMER) 492 mq->c[Q_INO].timer = cpu_to_le64(qdq->d_ino_timer); 493 if (qdq && qdq->d_fieldmask & QC_INO_WARNS) 494 mq->c[Q_INO].warns = cpu_to_le64(qdq->d_ino_warns); 495 496 mutex_unlock(&q->lock); 497 } 498 499 return 0; 500 } 501 502 void bch2_fs_quota_exit(struct bch_fs *c) 503 { 504 unsigned i; 505 506 for (i = 0; i < ARRAY_SIZE(c->quotas); i++) 507 genradix_free(&c->quotas[i].table); 508 } 509 510 void bch2_fs_quota_init(struct bch_fs *c) 511 { 512 unsigned i; 513 514 for (i = 0; i < ARRAY_SIZE(c->quotas); i++) 515 mutex_init(&c->quotas[i].lock); 516 } 517 518 static struct bch_sb_field_quota *bch2_sb_get_or_create_quota(struct bch_sb_handle *sb) 519 { 520 struct bch_sb_field_quota *sb_quota = bch2_sb_get_quota(sb->sb); 521 522 if (sb_quota) 523 return sb_quota; 524 525 sb_quota = bch2_sb_resize_quota(sb, sizeof(*sb_quota) / sizeof(u64)); 526 if (sb_quota) { 527 unsigned qtype, qc; 528 529 for (qtype = 0; qtype < QTYP_NR; qtype++) 530 for (qc = 0; qc < Q_COUNTERS; qc++) 531 sb_quota->q[qtype].c[qc].timelimit = 532 cpu_to_le32(7 * 24 * 60 * 60); 533 } 534 535 return sb_quota; 536 } 537 538 static void bch2_sb_quota_read(struct bch_fs *c) 539 { 540 struct bch_sb_field_quota *sb_quota; 541 unsigned i, j; 542 543 sb_quota = bch2_sb_get_quota(c->disk_sb.sb); 544 if (!sb_quota) 545 return; 546 547 for (i = 0; i < QTYP_NR; i++) { 548 struct bch_memquota_type *q = &c->quotas[i]; 549 550 for (j = 0; j < Q_COUNTERS; j++) { 551 q->limits[j].timelimit = 552 le32_to_cpu(sb_quota->q[i].c[j].timelimit); 553 q->limits[j].warnlimit = 554 le32_to_cpu(sb_quota->q[i].c[j].warnlimit); 555 } 556 } 557 } 558 559 static int bch2_fs_quota_read_inode(struct btree_trans *trans, 560 struct btree_iter *iter, 561 struct bkey_s_c k) 562 { 563 struct bch_fs *c = trans->c; 564 struct bch_inode_unpacked u; 565 struct bch_subvolume subvolume; 566 int ret; 567 568 ret = bch2_snapshot_get_subvol(trans, k.k->p.snapshot, &subvolume); 569 if (ret) 570 return ret; 571 572 /* 573 * We don't do quota accounting in snapshots: 574 */ 575 if (BCH_SUBVOLUME_SNAP(&subvolume)) 576 goto advance; 577 578 if (!bkey_is_inode(k.k)) 579 goto advance; 580 581 ret = bch2_inode_unpack(k, &u); 582 if (ret) 583 return ret; 584 585 bch2_quota_acct(c, bch_qid(&u), Q_SPC, u.bi_sectors, 586 KEY_TYPE_QUOTA_NOCHECK); 587 bch2_quota_acct(c, bch_qid(&u), Q_INO, 1, 588 KEY_TYPE_QUOTA_NOCHECK); 589 advance: 590 bch2_btree_iter_set_pos(iter, POS(iter->pos.inode, iter->pos.offset + 1)); 591 return 0; 592 } 593 594 int bch2_fs_quota_read(struct bch_fs *c) 595 { 596 struct bch_sb_field_quota *sb_quota; 597 struct btree_trans trans; 598 struct btree_iter iter; 599 struct bkey_s_c k; 600 int ret; 601 602 mutex_lock(&c->sb_lock); 603 sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb); 604 if (!sb_quota) { 605 mutex_unlock(&c->sb_lock); 606 return -BCH_ERR_ENOSPC_sb_quota; 607 } 608 609 bch2_sb_quota_read(c); 610 mutex_unlock(&c->sb_lock); 611 612 bch2_trans_init(&trans, c, 0, 0); 613 614 ret = for_each_btree_key2(&trans, iter, BTREE_ID_quotas, 615 POS_MIN, BTREE_ITER_PREFETCH, k, 616 __bch2_quota_set(c, k, NULL)) ?: 617 for_each_btree_key2(&trans, iter, BTREE_ID_inodes, 618 POS_MIN, BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k, 619 bch2_fs_quota_read_inode(&trans, &iter, k)); 620 if (ret) 621 bch_err(c, "err in quota_read: %s", bch2_err_str(ret)); 622 623 bch2_trans_exit(&trans); 624 return ret; 625 } 626 627 /* Enable/disable/delete quotas for an entire filesystem: */ 628 629 static int bch2_quota_enable(struct super_block *sb, unsigned uflags) 630 { 631 struct bch_fs *c = sb->s_fs_info; 632 struct bch_sb_field_quota *sb_quota; 633 int ret = 0; 634 635 if (sb->s_flags & SB_RDONLY) 636 return -EROFS; 637 638 /* Accounting must be enabled at mount time: */ 639 if (uflags & (FS_QUOTA_UDQ_ACCT|FS_QUOTA_GDQ_ACCT|FS_QUOTA_PDQ_ACCT)) 640 return -EINVAL; 641 642 /* Can't enable enforcement without accounting: */ 643 if ((uflags & FS_QUOTA_UDQ_ENFD) && !c->opts.usrquota) 644 return -EINVAL; 645 646 if ((uflags & FS_QUOTA_GDQ_ENFD) && !c->opts.grpquota) 647 return -EINVAL; 648 649 if (uflags & FS_QUOTA_PDQ_ENFD && !c->opts.prjquota) 650 return -EINVAL; 651 652 mutex_lock(&c->sb_lock); 653 sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb); 654 if (!sb_quota) { 655 ret = -BCH_ERR_ENOSPC_sb_quota; 656 goto unlock; 657 } 658 659 if (uflags & FS_QUOTA_UDQ_ENFD) 660 SET_BCH_SB_USRQUOTA(c->disk_sb.sb, true); 661 662 if (uflags & FS_QUOTA_GDQ_ENFD) 663 SET_BCH_SB_GRPQUOTA(c->disk_sb.sb, true); 664 665 if (uflags & FS_QUOTA_PDQ_ENFD) 666 SET_BCH_SB_PRJQUOTA(c->disk_sb.sb, true); 667 668 bch2_write_super(c); 669 unlock: 670 mutex_unlock(&c->sb_lock); 671 672 return bch2_err_class(ret); 673 } 674 675 static int bch2_quota_disable(struct super_block *sb, unsigned uflags) 676 { 677 struct bch_fs *c = sb->s_fs_info; 678 679 if (sb->s_flags & SB_RDONLY) 680 return -EROFS; 681 682 mutex_lock(&c->sb_lock); 683 if (uflags & FS_QUOTA_UDQ_ENFD) 684 SET_BCH_SB_USRQUOTA(c->disk_sb.sb, false); 685 686 if (uflags & FS_QUOTA_GDQ_ENFD) 687 SET_BCH_SB_GRPQUOTA(c->disk_sb.sb, false); 688 689 if (uflags & FS_QUOTA_PDQ_ENFD) 690 SET_BCH_SB_PRJQUOTA(c->disk_sb.sb, false); 691 692 bch2_write_super(c); 693 mutex_unlock(&c->sb_lock); 694 695 return 0; 696 } 697 698 static int bch2_quota_remove(struct super_block *sb, unsigned uflags) 699 { 700 struct bch_fs *c = sb->s_fs_info; 701 int ret; 702 703 if (sb->s_flags & SB_RDONLY) 704 return -EROFS; 705 706 if (uflags & FS_USER_QUOTA) { 707 if (c->opts.usrquota) 708 return -EINVAL; 709 710 ret = bch2_btree_delete_range(c, BTREE_ID_quotas, 711 POS(QTYP_USR, 0), 712 POS(QTYP_USR, U64_MAX), 713 0, NULL); 714 if (ret) 715 return ret; 716 } 717 718 if (uflags & FS_GROUP_QUOTA) { 719 if (c->opts.grpquota) 720 return -EINVAL; 721 722 ret = bch2_btree_delete_range(c, BTREE_ID_quotas, 723 POS(QTYP_GRP, 0), 724 POS(QTYP_GRP, U64_MAX), 725 0, NULL); 726 if (ret) 727 return ret; 728 } 729 730 if (uflags & FS_PROJ_QUOTA) { 731 if (c->opts.prjquota) 732 return -EINVAL; 733 734 ret = bch2_btree_delete_range(c, BTREE_ID_quotas, 735 POS(QTYP_PRJ, 0), 736 POS(QTYP_PRJ, U64_MAX), 737 0, NULL); 738 if (ret) 739 return ret; 740 } 741 742 return 0; 743 } 744 745 /* 746 * Return quota status information, such as enforcements, quota file inode 747 * numbers etc. 748 */ 749 static int bch2_quota_get_state(struct super_block *sb, struct qc_state *state) 750 { 751 struct bch_fs *c = sb->s_fs_info; 752 unsigned qtypes = enabled_qtypes(c); 753 unsigned i; 754 755 memset(state, 0, sizeof(*state)); 756 757 for (i = 0; i < QTYP_NR; i++) { 758 state->s_state[i].flags |= QCI_SYSFILE; 759 760 if (!(qtypes & (1 << i))) 761 continue; 762 763 state->s_state[i].flags |= QCI_ACCT_ENABLED; 764 765 state->s_state[i].spc_timelimit = c->quotas[i].limits[Q_SPC].timelimit; 766 state->s_state[i].spc_warnlimit = c->quotas[i].limits[Q_SPC].warnlimit; 767 768 state->s_state[i].ino_timelimit = c->quotas[i].limits[Q_INO].timelimit; 769 state->s_state[i].ino_warnlimit = c->quotas[i].limits[Q_INO].warnlimit; 770 } 771 772 return 0; 773 } 774 775 /* 776 * Adjust quota timers & warnings 777 */ 778 static int bch2_quota_set_info(struct super_block *sb, int type, 779 struct qc_info *info) 780 { 781 struct bch_fs *c = sb->s_fs_info; 782 struct bch_sb_field_quota *sb_quota; 783 struct bch_memquota_type *q; 784 int ret = 0; 785 786 if (0) { 787 struct printbuf buf = PRINTBUF; 788 789 qc_info_to_text(&buf, info); 790 pr_info("setting:\n%s", buf.buf); 791 printbuf_exit(&buf); 792 } 793 794 if (sb->s_flags & SB_RDONLY) 795 return -EROFS; 796 797 if (type >= QTYP_NR) 798 return -EINVAL; 799 800 if (!((1 << type) & enabled_qtypes(c))) 801 return -ESRCH; 802 803 if (info->i_fieldmask & 804 ~(QC_SPC_TIMER|QC_INO_TIMER|QC_SPC_WARNS|QC_INO_WARNS)) 805 return -EINVAL; 806 807 q = &c->quotas[type]; 808 809 mutex_lock(&c->sb_lock); 810 sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb); 811 if (!sb_quota) { 812 ret = -BCH_ERR_ENOSPC_sb_quota; 813 goto unlock; 814 } 815 816 if (info->i_fieldmask & QC_SPC_TIMER) 817 sb_quota->q[type].c[Q_SPC].timelimit = 818 cpu_to_le32(info->i_spc_timelimit); 819 820 if (info->i_fieldmask & QC_SPC_WARNS) 821 sb_quota->q[type].c[Q_SPC].warnlimit = 822 cpu_to_le32(info->i_spc_warnlimit); 823 824 if (info->i_fieldmask & QC_INO_TIMER) 825 sb_quota->q[type].c[Q_INO].timelimit = 826 cpu_to_le32(info->i_ino_timelimit); 827 828 if (info->i_fieldmask & QC_INO_WARNS) 829 sb_quota->q[type].c[Q_INO].warnlimit = 830 cpu_to_le32(info->i_ino_warnlimit); 831 832 bch2_sb_quota_read(c); 833 834 bch2_write_super(c); 835 unlock: 836 mutex_unlock(&c->sb_lock); 837 838 return bch2_err_class(ret); 839 } 840 841 /* Get/set individual quotas: */ 842 843 static void __bch2_quota_get(struct qc_dqblk *dst, struct bch_memquota *src) 844 { 845 dst->d_space = src->c[Q_SPC].v << 9; 846 dst->d_spc_hardlimit = src->c[Q_SPC].hardlimit << 9; 847 dst->d_spc_softlimit = src->c[Q_SPC].softlimit << 9; 848 dst->d_spc_timer = src->c[Q_SPC].timer; 849 dst->d_spc_warns = src->c[Q_SPC].warns; 850 851 dst->d_ino_count = src->c[Q_INO].v; 852 dst->d_ino_hardlimit = src->c[Q_INO].hardlimit; 853 dst->d_ino_softlimit = src->c[Q_INO].softlimit; 854 dst->d_ino_timer = src->c[Q_INO].timer; 855 dst->d_ino_warns = src->c[Q_INO].warns; 856 } 857 858 static int bch2_get_quota(struct super_block *sb, struct kqid kqid, 859 struct qc_dqblk *qdq) 860 { 861 struct bch_fs *c = sb->s_fs_info; 862 struct bch_memquota_type *q = &c->quotas[kqid.type]; 863 qid_t qid = from_kqid(&init_user_ns, kqid); 864 struct bch_memquota *mq; 865 866 memset(qdq, 0, sizeof(*qdq)); 867 868 mutex_lock(&q->lock); 869 mq = genradix_ptr(&q->table, qid); 870 if (mq) 871 __bch2_quota_get(qdq, mq); 872 mutex_unlock(&q->lock); 873 874 return 0; 875 } 876 877 static int bch2_get_next_quota(struct super_block *sb, struct kqid *kqid, 878 struct qc_dqblk *qdq) 879 { 880 struct bch_fs *c = sb->s_fs_info; 881 struct bch_memquota_type *q = &c->quotas[kqid->type]; 882 qid_t qid = from_kqid(&init_user_ns, *kqid); 883 struct genradix_iter iter; 884 struct bch_memquota *mq; 885 int ret = 0; 886 887 mutex_lock(&q->lock); 888 889 genradix_for_each_from(&q->table, iter, mq, qid) 890 if (memcmp(mq, page_address(ZERO_PAGE(0)), sizeof(*mq))) { 891 __bch2_quota_get(qdq, mq); 892 *kqid = make_kqid(current_user_ns(), kqid->type, iter.pos); 893 goto found; 894 } 895 896 ret = -ENOENT; 897 found: 898 mutex_unlock(&q->lock); 899 return ret; 900 } 901 902 static int bch2_set_quota_trans(struct btree_trans *trans, 903 struct bkey_i_quota *new_quota, 904 struct qc_dqblk *qdq) 905 { 906 struct btree_iter iter; 907 struct bkey_s_c k; 908 int ret; 909 910 bch2_trans_iter_init(trans, &iter, BTREE_ID_quotas, new_quota->k.p, 911 BTREE_ITER_SLOTS|BTREE_ITER_INTENT); 912 k = bch2_btree_iter_peek_slot(&iter); 913 914 ret = bkey_err(k); 915 if (unlikely(ret)) 916 return ret; 917 918 if (k.k->type == KEY_TYPE_quota) 919 new_quota->v = *bkey_s_c_to_quota(k).v; 920 921 if (qdq->d_fieldmask & QC_SPC_SOFT) 922 new_quota->v.c[Q_SPC].softlimit = cpu_to_le64(qdq->d_spc_softlimit >> 9); 923 if (qdq->d_fieldmask & QC_SPC_HARD) 924 new_quota->v.c[Q_SPC].hardlimit = cpu_to_le64(qdq->d_spc_hardlimit >> 9); 925 926 if (qdq->d_fieldmask & QC_INO_SOFT) 927 new_quota->v.c[Q_INO].softlimit = cpu_to_le64(qdq->d_ino_softlimit); 928 if (qdq->d_fieldmask & QC_INO_HARD) 929 new_quota->v.c[Q_INO].hardlimit = cpu_to_le64(qdq->d_ino_hardlimit); 930 931 ret = bch2_trans_update(trans, &iter, &new_quota->k_i, 0); 932 bch2_trans_iter_exit(trans, &iter); 933 return ret; 934 } 935 936 static int bch2_set_quota(struct super_block *sb, struct kqid qid, 937 struct qc_dqblk *qdq) 938 { 939 struct bch_fs *c = sb->s_fs_info; 940 struct bkey_i_quota new_quota; 941 int ret; 942 943 if (0) { 944 struct printbuf buf = PRINTBUF; 945 946 qc_dqblk_to_text(&buf, qdq); 947 pr_info("setting:\n%s", buf.buf); 948 printbuf_exit(&buf); 949 } 950 951 if (sb->s_flags & SB_RDONLY) 952 return -EROFS; 953 954 bkey_quota_init(&new_quota.k_i); 955 new_quota.k.p = POS(qid.type, from_kqid(&init_user_ns, qid)); 956 957 ret = bch2_trans_do(c, NULL, NULL, 0, 958 bch2_set_quota_trans(&trans, &new_quota, qdq)) ?: 959 __bch2_quota_set(c, bkey_i_to_s_c(&new_quota.k_i), qdq); 960 961 return ret; 962 } 963 964 const struct quotactl_ops bch2_quotactl_operations = { 965 .quota_enable = bch2_quota_enable, 966 .quota_disable = bch2_quota_disable, 967 .rm_xquota = bch2_quota_remove, 968 969 .get_state = bch2_quota_get_state, 970 .set_info = bch2_quota_set_info, 971 972 .get_dqblk = bch2_get_quota, 973 .get_nextdqblk = bch2_get_next_quota, 974 .set_dqblk = bch2_set_quota, 975 }; 976 977 #endif /* CONFIG_BCACHEFS_QUOTA */ 978