1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "btree_update.h"
4 #include "errcode.h"
5 #include "error.h"
6 #include "inode.h"
7 #include "quota.h"
8 #include "snapshot.h"
9 #include "super-io.h"
10
11 static const char * const bch2_quota_types[] = {
12 "user",
13 "group",
14 "project",
15 };
16
17 static const char * const bch2_quota_counters[] = {
18 "space",
19 "inodes",
20 };
21
bch2_sb_quota_validate(struct bch_sb * sb,struct bch_sb_field * f,enum bch_validate_flags flags,struct printbuf * err)22 static int bch2_sb_quota_validate(struct bch_sb *sb, struct bch_sb_field *f,
23 enum bch_validate_flags flags, struct printbuf *err)
24 {
25 struct bch_sb_field_quota *q = field_to_type(f, quota);
26
27 if (vstruct_bytes(&q->field) < sizeof(*q)) {
28 prt_printf(err, "wrong size (got %zu should be %zu)",
29 vstruct_bytes(&q->field), sizeof(*q));
30 return -BCH_ERR_invalid_sb_quota;
31 }
32
33 return 0;
34 }
35
bch2_sb_quota_to_text(struct printbuf * out,struct bch_sb * sb,struct bch_sb_field * f)36 static void bch2_sb_quota_to_text(struct printbuf *out, struct bch_sb *sb,
37 struct bch_sb_field *f)
38 {
39 struct bch_sb_field_quota *q = field_to_type(f, quota);
40 unsigned qtyp, counter;
41
42 for (qtyp = 0; qtyp < ARRAY_SIZE(q->q); qtyp++) {
43 prt_printf(out, "%s: flags %llx",
44 bch2_quota_types[qtyp],
45 le64_to_cpu(q->q[qtyp].flags));
46
47 for (counter = 0; counter < Q_COUNTERS; counter++)
48 prt_printf(out, " %s timelimit %u warnlimit %u",
49 bch2_quota_counters[counter],
50 le32_to_cpu(q->q[qtyp].c[counter].timelimit),
51 le32_to_cpu(q->q[qtyp].c[counter].warnlimit));
52
53 prt_newline(out);
54 }
55 }
56
57 const struct bch_sb_field_ops bch_sb_field_ops_quota = {
58 .validate = bch2_sb_quota_validate,
59 .to_text = bch2_sb_quota_to_text,
60 };
61
bch2_quota_validate(struct bch_fs * c,struct bkey_s_c k,enum bch_validate_flags flags)62 int bch2_quota_validate(struct bch_fs *c, struct bkey_s_c k,
63 enum bch_validate_flags flags)
64 {
65 int ret = 0;
66
67 bkey_fsck_err_on(k.k->p.inode >= QTYP_NR,
68 c, quota_type_invalid,
69 "invalid quota type (%llu >= %u)",
70 k.k->p.inode, QTYP_NR);
71 fsck_err:
72 return ret;
73 }
74
bch2_quota_to_text(struct printbuf * out,struct bch_fs * c,struct bkey_s_c k)75 void bch2_quota_to_text(struct printbuf *out, struct bch_fs *c,
76 struct bkey_s_c k)
77 {
78 struct bkey_s_c_quota dq = bkey_s_c_to_quota(k);
79 unsigned i;
80
81 for (i = 0; i < Q_COUNTERS; i++)
82 prt_printf(out, "%s hardlimit %llu softlimit %llu",
83 bch2_quota_counters[i],
84 le64_to_cpu(dq.v->c[i].hardlimit),
85 le64_to_cpu(dq.v->c[i].softlimit));
86 }
87
88 #ifdef CONFIG_BCACHEFS_QUOTA
89
90 #include <linux/cred.h>
91 #include <linux/fs.h>
92 #include <linux/quota.h>
93
qc_info_to_text(struct printbuf * out,struct qc_info * i)94 static void qc_info_to_text(struct printbuf *out, struct qc_info *i)
95 {
96 printbuf_tabstops_reset(out);
97 printbuf_tabstop_push(out, 20);
98
99 prt_printf(out, "i_fieldmask\t%x\n", i->i_fieldmask);
100 prt_printf(out, "i_flags\t%u\n", i->i_flags);
101 prt_printf(out, "i_spc_timelimit\t%u\n", i->i_spc_timelimit);
102 prt_printf(out, "i_ino_timelimit\t%u\n", i->i_ino_timelimit);
103 prt_printf(out, "i_rt_spc_timelimit\t%u\n", i->i_rt_spc_timelimit);
104 prt_printf(out, "i_spc_warnlimit\t%u\n", i->i_spc_warnlimit);
105 prt_printf(out, "i_ino_warnlimit\t%u\n", i->i_ino_warnlimit);
106 prt_printf(out, "i_rt_spc_warnlimit\t%u\n", i->i_rt_spc_warnlimit);
107 }
108
qc_dqblk_to_text(struct printbuf * out,struct qc_dqblk * q)109 static void qc_dqblk_to_text(struct printbuf *out, struct qc_dqblk *q)
110 {
111 printbuf_tabstops_reset(out);
112 printbuf_tabstop_push(out, 20);
113
114 prt_printf(out, "d_fieldmask\t%x\n", q->d_fieldmask);
115 prt_printf(out, "d_spc_hardlimit\t%llu\n", q->d_spc_hardlimit);
116 prt_printf(out, "d_spc_softlimit\t%llu\n", q->d_spc_softlimit);
117 prt_printf(out, "d_ino_hardlimit\%llu\n", q->d_ino_hardlimit);
118 prt_printf(out, "d_ino_softlimit\t%llu\n", q->d_ino_softlimit);
119 prt_printf(out, "d_space\t%llu\n", q->d_space);
120 prt_printf(out, "d_ino_count\t%llu\n", q->d_ino_count);
121 prt_printf(out, "d_ino_timer\t%llu\n", q->d_ino_timer);
122 prt_printf(out, "d_spc_timer\t%llu\n", q->d_spc_timer);
123 prt_printf(out, "d_ino_warns\t%i\n", q->d_ino_warns);
124 prt_printf(out, "d_spc_warns\t%i\n", q->d_spc_warns);
125 }
126
__next_qtype(unsigned i,unsigned qtypes)127 static inline unsigned __next_qtype(unsigned i, unsigned qtypes)
128 {
129 qtypes >>= i;
130 return qtypes ? i + __ffs(qtypes) : QTYP_NR;
131 }
132
133 #define for_each_set_qtype(_c, _i, _q, _qtypes) \
134 for (_i = 0; \
135 (_i = __next_qtype(_i, _qtypes), \
136 _q = &(_c)->quotas[_i], \
137 _i < QTYP_NR); \
138 _i++)
139
ignore_hardlimit(struct bch_memquota_type * q)140 static bool ignore_hardlimit(struct bch_memquota_type *q)
141 {
142 if (capable(CAP_SYS_RESOURCE))
143 return true;
144 #if 0
145 struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
146
147 return capable(CAP_SYS_RESOURCE) &&
148 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
149 !(info->dqi_flags & DQF_ROOT_SQUASH));
150 #endif
151 return false;
152 }
153
154 enum quota_msg {
155 SOFTWARN, /* Softlimit reached */
156 SOFTLONGWARN, /* Grace time expired */
157 HARDWARN, /* Hardlimit reached */
158
159 HARDBELOW, /* Usage got below inode hardlimit */
160 SOFTBELOW, /* Usage got below inode softlimit */
161 };
162
163 static int quota_nl[][Q_COUNTERS] = {
164 [HARDWARN][Q_SPC] = QUOTA_NL_BHARDWARN,
165 [SOFTLONGWARN][Q_SPC] = QUOTA_NL_BSOFTLONGWARN,
166 [SOFTWARN][Q_SPC] = QUOTA_NL_BSOFTWARN,
167 [HARDBELOW][Q_SPC] = QUOTA_NL_BHARDBELOW,
168 [SOFTBELOW][Q_SPC] = QUOTA_NL_BSOFTBELOW,
169
170 [HARDWARN][Q_INO] = QUOTA_NL_IHARDWARN,
171 [SOFTLONGWARN][Q_INO] = QUOTA_NL_ISOFTLONGWARN,
172 [SOFTWARN][Q_INO] = QUOTA_NL_ISOFTWARN,
173 [HARDBELOW][Q_INO] = QUOTA_NL_IHARDBELOW,
174 [SOFTBELOW][Q_INO] = QUOTA_NL_ISOFTBELOW,
175 };
176
177 struct quota_msgs {
178 u8 nr;
179 struct {
180 u8 qtype;
181 u8 msg;
182 } m[QTYP_NR * Q_COUNTERS];
183 };
184
prepare_msg(unsigned qtype,enum quota_counters counter,struct quota_msgs * msgs,enum quota_msg msg_type)185 static void prepare_msg(unsigned qtype,
186 enum quota_counters counter,
187 struct quota_msgs *msgs,
188 enum quota_msg msg_type)
189 {
190 BUG_ON(msgs->nr >= ARRAY_SIZE(msgs->m));
191
192 msgs->m[msgs->nr].qtype = qtype;
193 msgs->m[msgs->nr].msg = quota_nl[msg_type][counter];
194 msgs->nr++;
195 }
196
prepare_warning(struct memquota_counter * qc,unsigned qtype,enum quota_counters counter,struct quota_msgs * msgs,enum quota_msg msg_type)197 static void prepare_warning(struct memquota_counter *qc,
198 unsigned qtype,
199 enum quota_counters counter,
200 struct quota_msgs *msgs,
201 enum quota_msg msg_type)
202 {
203 if (qc->warning_issued & (1 << msg_type))
204 return;
205
206 prepare_msg(qtype, counter, msgs, msg_type);
207 }
208
flush_warnings(struct bch_qid qid,struct super_block * sb,struct quota_msgs * msgs)209 static void flush_warnings(struct bch_qid qid,
210 struct super_block *sb,
211 struct quota_msgs *msgs)
212 {
213 unsigned i;
214
215 for (i = 0; i < msgs->nr; i++)
216 quota_send_warning(make_kqid(&init_user_ns, msgs->m[i].qtype, qid.q[i]),
217 sb->s_dev, msgs->m[i].msg);
218 }
219
bch2_quota_check_limit(struct bch_fs * c,unsigned qtype,struct bch_memquota * mq,struct quota_msgs * msgs,enum quota_counters counter,s64 v,enum quota_acct_mode mode)220 static int bch2_quota_check_limit(struct bch_fs *c,
221 unsigned qtype,
222 struct bch_memquota *mq,
223 struct quota_msgs *msgs,
224 enum quota_counters counter,
225 s64 v,
226 enum quota_acct_mode mode)
227 {
228 struct bch_memquota_type *q = &c->quotas[qtype];
229 struct memquota_counter *qc = &mq->c[counter];
230 u64 n = qc->v + v;
231
232 BUG_ON((s64) n < 0);
233
234 if (mode == KEY_TYPE_QUOTA_NOCHECK)
235 return 0;
236
237 if (v <= 0) {
238 if (n < qc->hardlimit &&
239 (qc->warning_issued & (1 << HARDWARN))) {
240 qc->warning_issued &= ~(1 << HARDWARN);
241 prepare_msg(qtype, counter, msgs, HARDBELOW);
242 }
243
244 if (n < qc->softlimit &&
245 (qc->warning_issued & (1 << SOFTWARN))) {
246 qc->warning_issued &= ~(1 << SOFTWARN);
247 prepare_msg(qtype, counter, msgs, SOFTBELOW);
248 }
249
250 qc->warning_issued = 0;
251 return 0;
252 }
253
254 if (qc->hardlimit &&
255 qc->hardlimit < n &&
256 !ignore_hardlimit(q)) {
257 prepare_warning(qc, qtype, counter, msgs, HARDWARN);
258 return -EDQUOT;
259 }
260
261 if (qc->softlimit &&
262 qc->softlimit < n) {
263 if (qc->timer == 0) {
264 qc->timer = ktime_get_real_seconds() + q->limits[counter].timelimit;
265 prepare_warning(qc, qtype, counter, msgs, SOFTWARN);
266 } else if (ktime_get_real_seconds() >= qc->timer &&
267 !ignore_hardlimit(q)) {
268 prepare_warning(qc, qtype, counter, msgs, SOFTLONGWARN);
269 return -EDQUOT;
270 }
271 }
272
273 return 0;
274 }
275
bch2_quota_acct(struct bch_fs * c,struct bch_qid qid,enum quota_counters counter,s64 v,enum quota_acct_mode mode)276 int bch2_quota_acct(struct bch_fs *c, struct bch_qid qid,
277 enum quota_counters counter, s64 v,
278 enum quota_acct_mode mode)
279 {
280 unsigned qtypes = enabled_qtypes(c);
281 struct bch_memquota_type *q;
282 struct bch_memquota *mq[QTYP_NR];
283 struct quota_msgs msgs;
284 unsigned i;
285 int ret = 0;
286
287 memset(&msgs, 0, sizeof(msgs));
288
289 for_each_set_qtype(c, i, q, qtypes) {
290 mq[i] = genradix_ptr_alloc(&q->table, qid.q[i], GFP_KERNEL);
291 if (!mq[i])
292 return -ENOMEM;
293 }
294
295 for_each_set_qtype(c, i, q, qtypes)
296 mutex_lock_nested(&q->lock, i);
297
298 for_each_set_qtype(c, i, q, qtypes) {
299 ret = bch2_quota_check_limit(c, i, mq[i], &msgs, counter, v, mode);
300 if (ret)
301 goto err;
302 }
303
304 for_each_set_qtype(c, i, q, qtypes)
305 mq[i]->c[counter].v += v;
306 err:
307 for_each_set_qtype(c, i, q, qtypes)
308 mutex_unlock(&q->lock);
309
310 flush_warnings(qid, c->vfs_sb, &msgs);
311
312 return ret;
313 }
314
__bch2_quota_transfer(struct bch_memquota * src_q,struct bch_memquota * dst_q,enum quota_counters counter,s64 v)315 static void __bch2_quota_transfer(struct bch_memquota *src_q,
316 struct bch_memquota *dst_q,
317 enum quota_counters counter, s64 v)
318 {
319 BUG_ON(v > src_q->c[counter].v);
320 BUG_ON(v + dst_q->c[counter].v < v);
321
322 src_q->c[counter].v -= v;
323 dst_q->c[counter].v += v;
324 }
325
bch2_quota_transfer(struct bch_fs * c,unsigned qtypes,struct bch_qid dst,struct bch_qid src,u64 space,enum quota_acct_mode mode)326 int bch2_quota_transfer(struct bch_fs *c, unsigned qtypes,
327 struct bch_qid dst,
328 struct bch_qid src, u64 space,
329 enum quota_acct_mode mode)
330 {
331 struct bch_memquota_type *q;
332 struct bch_memquota *src_q[3], *dst_q[3];
333 struct quota_msgs msgs;
334 unsigned i;
335 int ret = 0;
336
337 qtypes &= enabled_qtypes(c);
338
339 memset(&msgs, 0, sizeof(msgs));
340
341 for_each_set_qtype(c, i, q, qtypes) {
342 src_q[i] = genradix_ptr_alloc(&q->table, src.q[i], GFP_KERNEL);
343 dst_q[i] = genradix_ptr_alloc(&q->table, dst.q[i], GFP_KERNEL);
344 if (!src_q[i] || !dst_q[i])
345 return -ENOMEM;
346 }
347
348 for_each_set_qtype(c, i, q, qtypes)
349 mutex_lock_nested(&q->lock, i);
350
351 for_each_set_qtype(c, i, q, qtypes) {
352 ret = bch2_quota_check_limit(c, i, dst_q[i], &msgs, Q_SPC,
353 dst_q[i]->c[Q_SPC].v + space,
354 mode);
355 if (ret)
356 goto err;
357
358 ret = bch2_quota_check_limit(c, i, dst_q[i], &msgs, Q_INO,
359 dst_q[i]->c[Q_INO].v + 1,
360 mode);
361 if (ret)
362 goto err;
363 }
364
365 for_each_set_qtype(c, i, q, qtypes) {
366 __bch2_quota_transfer(src_q[i], dst_q[i], Q_SPC, space);
367 __bch2_quota_transfer(src_q[i], dst_q[i], Q_INO, 1);
368 }
369
370 err:
371 for_each_set_qtype(c, i, q, qtypes)
372 mutex_unlock(&q->lock);
373
374 flush_warnings(dst, c->vfs_sb, &msgs);
375
376 return ret;
377 }
378
__bch2_quota_set(struct bch_fs * c,struct bkey_s_c k,struct qc_dqblk * qdq)379 static int __bch2_quota_set(struct bch_fs *c, struct bkey_s_c k,
380 struct qc_dqblk *qdq)
381 {
382 struct bkey_s_c_quota dq;
383 struct bch_memquota_type *q;
384 struct bch_memquota *mq;
385 unsigned i;
386
387 BUG_ON(k.k->p.inode >= QTYP_NR);
388
389 if (!((1U << k.k->p.inode) & enabled_qtypes(c)))
390 return 0;
391
392 switch (k.k->type) {
393 case KEY_TYPE_quota:
394 dq = bkey_s_c_to_quota(k);
395 q = &c->quotas[k.k->p.inode];
396
397 mutex_lock(&q->lock);
398 mq = genradix_ptr_alloc(&q->table, k.k->p.offset, GFP_KERNEL);
399 if (!mq) {
400 mutex_unlock(&q->lock);
401 return -ENOMEM;
402 }
403
404 for (i = 0; i < Q_COUNTERS; i++) {
405 mq->c[i].hardlimit = le64_to_cpu(dq.v->c[i].hardlimit);
406 mq->c[i].softlimit = le64_to_cpu(dq.v->c[i].softlimit);
407 }
408
409 if (qdq && qdq->d_fieldmask & QC_SPC_TIMER)
410 mq->c[Q_SPC].timer = qdq->d_spc_timer;
411 if (qdq && qdq->d_fieldmask & QC_SPC_WARNS)
412 mq->c[Q_SPC].warns = qdq->d_spc_warns;
413 if (qdq && qdq->d_fieldmask & QC_INO_TIMER)
414 mq->c[Q_INO].timer = qdq->d_ino_timer;
415 if (qdq && qdq->d_fieldmask & QC_INO_WARNS)
416 mq->c[Q_INO].warns = qdq->d_ino_warns;
417
418 mutex_unlock(&q->lock);
419 }
420
421 return 0;
422 }
423
bch2_fs_quota_exit(struct bch_fs * c)424 void bch2_fs_quota_exit(struct bch_fs *c)
425 {
426 unsigned i;
427
428 for (i = 0; i < ARRAY_SIZE(c->quotas); i++)
429 genradix_free(&c->quotas[i].table);
430 }
431
bch2_fs_quota_init(struct bch_fs * c)432 void bch2_fs_quota_init(struct bch_fs *c)
433 {
434 unsigned i;
435
436 for (i = 0; i < ARRAY_SIZE(c->quotas); i++)
437 mutex_init(&c->quotas[i].lock);
438 }
439
bch2_sb_get_or_create_quota(struct bch_sb_handle * sb)440 static struct bch_sb_field_quota *bch2_sb_get_or_create_quota(struct bch_sb_handle *sb)
441 {
442 struct bch_sb_field_quota *sb_quota = bch2_sb_field_get(sb->sb, quota);
443
444 if (sb_quota)
445 return sb_quota;
446
447 sb_quota = bch2_sb_field_resize(sb, quota, sizeof(*sb_quota) / sizeof(u64));
448 if (sb_quota) {
449 unsigned qtype, qc;
450
451 for (qtype = 0; qtype < QTYP_NR; qtype++)
452 for (qc = 0; qc < Q_COUNTERS; qc++)
453 sb_quota->q[qtype].c[qc].timelimit =
454 cpu_to_le32(7 * 24 * 60 * 60);
455 }
456
457 return sb_quota;
458 }
459
bch2_sb_quota_read(struct bch_fs * c)460 static void bch2_sb_quota_read(struct bch_fs *c)
461 {
462 struct bch_sb_field_quota *sb_quota;
463 unsigned i, j;
464
465 sb_quota = bch2_sb_field_get(c->disk_sb.sb, quota);
466 if (!sb_quota)
467 return;
468
469 for (i = 0; i < QTYP_NR; i++) {
470 struct bch_memquota_type *q = &c->quotas[i];
471
472 for (j = 0; j < Q_COUNTERS; j++) {
473 q->limits[j].timelimit =
474 le32_to_cpu(sb_quota->q[i].c[j].timelimit);
475 q->limits[j].warnlimit =
476 le32_to_cpu(sb_quota->q[i].c[j].warnlimit);
477 }
478 }
479 }
480
bch2_fs_quota_read_inode(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c k)481 static int bch2_fs_quota_read_inode(struct btree_trans *trans,
482 struct btree_iter *iter,
483 struct bkey_s_c k)
484 {
485 struct bch_fs *c = trans->c;
486 struct bch_inode_unpacked u;
487 struct bch_snapshot_tree s_t;
488 u32 tree = bch2_snapshot_tree(c, k.k->p.snapshot);
489
490 int ret = bch2_snapshot_tree_lookup(trans, tree, &s_t);
491 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
492 "%s: snapshot tree %u not found", __func__, tree);
493 if (ret)
494 return ret;
495
496 if (!s_t.master_subvol)
497 goto advance;
498
499 ret = bch2_inode_find_by_inum_nowarn_trans(trans,
500 (subvol_inum) {
501 le32_to_cpu(s_t.master_subvol),
502 k.k->p.offset,
503 }, &u);
504 /*
505 * Inode might be deleted in this snapshot - the easiest way to handle
506 * that is to just skip it here:
507 */
508 if (bch2_err_matches(ret, ENOENT))
509 goto advance;
510
511 if (ret)
512 return ret;
513
514 bch2_quota_acct(c, bch_qid(&u), Q_SPC, u.bi_sectors,
515 KEY_TYPE_QUOTA_NOCHECK);
516 bch2_quota_acct(c, bch_qid(&u), Q_INO, 1,
517 KEY_TYPE_QUOTA_NOCHECK);
518 advance:
519 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
520 return 0;
521 }
522
bch2_fs_quota_read(struct bch_fs * c)523 int bch2_fs_quota_read(struct bch_fs *c)
524 {
525
526 mutex_lock(&c->sb_lock);
527 struct bch_sb_field_quota *sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb);
528 if (!sb_quota) {
529 mutex_unlock(&c->sb_lock);
530 return -BCH_ERR_ENOSPC_sb_quota;
531 }
532
533 bch2_sb_quota_read(c);
534 mutex_unlock(&c->sb_lock);
535
536 int ret = bch2_trans_run(c,
537 for_each_btree_key(trans, iter, BTREE_ID_quotas, POS_MIN,
538 BTREE_ITER_prefetch, k,
539 __bch2_quota_set(c, k, NULL)) ?:
540 for_each_btree_key(trans, iter, BTREE_ID_inodes, POS_MIN,
541 BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
542 bch2_fs_quota_read_inode(trans, &iter, k)));
543 bch_err_fn(c, ret);
544 return ret;
545 }
546
547 /* Enable/disable/delete quotas for an entire filesystem: */
548
bch2_quota_enable(struct super_block * sb,unsigned uflags)549 static int bch2_quota_enable(struct super_block *sb, unsigned uflags)
550 {
551 struct bch_fs *c = sb->s_fs_info;
552 struct bch_sb_field_quota *sb_quota;
553 int ret = 0;
554
555 if (sb->s_flags & SB_RDONLY)
556 return -EROFS;
557
558 /* Accounting must be enabled at mount time: */
559 if (uflags & (FS_QUOTA_UDQ_ACCT|FS_QUOTA_GDQ_ACCT|FS_QUOTA_PDQ_ACCT))
560 return -EINVAL;
561
562 /* Can't enable enforcement without accounting: */
563 if ((uflags & FS_QUOTA_UDQ_ENFD) && !c->opts.usrquota)
564 return -EINVAL;
565
566 if ((uflags & FS_QUOTA_GDQ_ENFD) && !c->opts.grpquota)
567 return -EINVAL;
568
569 if (uflags & FS_QUOTA_PDQ_ENFD && !c->opts.prjquota)
570 return -EINVAL;
571
572 mutex_lock(&c->sb_lock);
573 sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb);
574 if (!sb_quota) {
575 ret = -BCH_ERR_ENOSPC_sb_quota;
576 goto unlock;
577 }
578
579 if (uflags & FS_QUOTA_UDQ_ENFD)
580 SET_BCH_SB_USRQUOTA(c->disk_sb.sb, true);
581
582 if (uflags & FS_QUOTA_GDQ_ENFD)
583 SET_BCH_SB_GRPQUOTA(c->disk_sb.sb, true);
584
585 if (uflags & FS_QUOTA_PDQ_ENFD)
586 SET_BCH_SB_PRJQUOTA(c->disk_sb.sb, true);
587
588 bch2_write_super(c);
589 unlock:
590 mutex_unlock(&c->sb_lock);
591
592 return bch2_err_class(ret);
593 }
594
bch2_quota_disable(struct super_block * sb,unsigned uflags)595 static int bch2_quota_disable(struct super_block *sb, unsigned uflags)
596 {
597 struct bch_fs *c = sb->s_fs_info;
598
599 if (sb->s_flags & SB_RDONLY)
600 return -EROFS;
601
602 mutex_lock(&c->sb_lock);
603 if (uflags & FS_QUOTA_UDQ_ENFD)
604 SET_BCH_SB_USRQUOTA(c->disk_sb.sb, false);
605
606 if (uflags & FS_QUOTA_GDQ_ENFD)
607 SET_BCH_SB_GRPQUOTA(c->disk_sb.sb, false);
608
609 if (uflags & FS_QUOTA_PDQ_ENFD)
610 SET_BCH_SB_PRJQUOTA(c->disk_sb.sb, false);
611
612 bch2_write_super(c);
613 mutex_unlock(&c->sb_lock);
614
615 return 0;
616 }
617
bch2_quota_remove(struct super_block * sb,unsigned uflags)618 static int bch2_quota_remove(struct super_block *sb, unsigned uflags)
619 {
620 struct bch_fs *c = sb->s_fs_info;
621 int ret;
622
623 if (sb->s_flags & SB_RDONLY)
624 return -EROFS;
625
626 if (uflags & FS_USER_QUOTA) {
627 if (c->opts.usrquota)
628 return -EINVAL;
629
630 ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
631 POS(QTYP_USR, 0),
632 POS(QTYP_USR, U64_MAX),
633 0, NULL);
634 if (ret)
635 return ret;
636 }
637
638 if (uflags & FS_GROUP_QUOTA) {
639 if (c->opts.grpquota)
640 return -EINVAL;
641
642 ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
643 POS(QTYP_GRP, 0),
644 POS(QTYP_GRP, U64_MAX),
645 0, NULL);
646 if (ret)
647 return ret;
648 }
649
650 if (uflags & FS_PROJ_QUOTA) {
651 if (c->opts.prjquota)
652 return -EINVAL;
653
654 ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
655 POS(QTYP_PRJ, 0),
656 POS(QTYP_PRJ, U64_MAX),
657 0, NULL);
658 if (ret)
659 return ret;
660 }
661
662 return 0;
663 }
664
665 /*
666 * Return quota status information, such as enforcements, quota file inode
667 * numbers etc.
668 */
bch2_quota_get_state(struct super_block * sb,struct qc_state * state)669 static int bch2_quota_get_state(struct super_block *sb, struct qc_state *state)
670 {
671 struct bch_fs *c = sb->s_fs_info;
672 unsigned qtypes = enabled_qtypes(c);
673 unsigned i;
674
675 memset(state, 0, sizeof(*state));
676
677 for (i = 0; i < QTYP_NR; i++) {
678 state->s_state[i].flags |= QCI_SYSFILE;
679
680 if (!(qtypes & (1 << i)))
681 continue;
682
683 state->s_state[i].flags |= QCI_ACCT_ENABLED;
684
685 state->s_state[i].spc_timelimit = c->quotas[i].limits[Q_SPC].timelimit;
686 state->s_state[i].spc_warnlimit = c->quotas[i].limits[Q_SPC].warnlimit;
687
688 state->s_state[i].ino_timelimit = c->quotas[i].limits[Q_INO].timelimit;
689 state->s_state[i].ino_warnlimit = c->quotas[i].limits[Q_INO].warnlimit;
690 }
691
692 return 0;
693 }
694
695 /*
696 * Adjust quota timers & warnings
697 */
bch2_quota_set_info(struct super_block * sb,int type,struct qc_info * info)698 static int bch2_quota_set_info(struct super_block *sb, int type,
699 struct qc_info *info)
700 {
701 struct bch_fs *c = sb->s_fs_info;
702 struct bch_sb_field_quota *sb_quota;
703 int ret = 0;
704
705 if (0) {
706 struct printbuf buf = PRINTBUF;
707
708 qc_info_to_text(&buf, info);
709 pr_info("setting:\n%s", buf.buf);
710 printbuf_exit(&buf);
711 }
712
713 if (sb->s_flags & SB_RDONLY)
714 return -EROFS;
715
716 if (type >= QTYP_NR)
717 return -EINVAL;
718
719 if (!((1 << type) & enabled_qtypes(c)))
720 return -ESRCH;
721
722 if (info->i_fieldmask &
723 ~(QC_SPC_TIMER|QC_INO_TIMER|QC_SPC_WARNS|QC_INO_WARNS))
724 return -EINVAL;
725
726 mutex_lock(&c->sb_lock);
727 sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb);
728 if (!sb_quota) {
729 ret = -BCH_ERR_ENOSPC_sb_quota;
730 goto unlock;
731 }
732
733 if (info->i_fieldmask & QC_SPC_TIMER)
734 sb_quota->q[type].c[Q_SPC].timelimit =
735 cpu_to_le32(info->i_spc_timelimit);
736
737 if (info->i_fieldmask & QC_SPC_WARNS)
738 sb_quota->q[type].c[Q_SPC].warnlimit =
739 cpu_to_le32(info->i_spc_warnlimit);
740
741 if (info->i_fieldmask & QC_INO_TIMER)
742 sb_quota->q[type].c[Q_INO].timelimit =
743 cpu_to_le32(info->i_ino_timelimit);
744
745 if (info->i_fieldmask & QC_INO_WARNS)
746 sb_quota->q[type].c[Q_INO].warnlimit =
747 cpu_to_le32(info->i_ino_warnlimit);
748
749 bch2_sb_quota_read(c);
750
751 bch2_write_super(c);
752 unlock:
753 mutex_unlock(&c->sb_lock);
754
755 return bch2_err_class(ret);
756 }
757
758 /* Get/set individual quotas: */
759
__bch2_quota_get(struct qc_dqblk * dst,struct bch_memquota * src)760 static void __bch2_quota_get(struct qc_dqblk *dst, struct bch_memquota *src)
761 {
762 dst->d_space = src->c[Q_SPC].v << 9;
763 dst->d_spc_hardlimit = src->c[Q_SPC].hardlimit << 9;
764 dst->d_spc_softlimit = src->c[Q_SPC].softlimit << 9;
765 dst->d_spc_timer = src->c[Q_SPC].timer;
766 dst->d_spc_warns = src->c[Q_SPC].warns;
767
768 dst->d_ino_count = src->c[Q_INO].v;
769 dst->d_ino_hardlimit = src->c[Q_INO].hardlimit;
770 dst->d_ino_softlimit = src->c[Q_INO].softlimit;
771 dst->d_ino_timer = src->c[Q_INO].timer;
772 dst->d_ino_warns = src->c[Q_INO].warns;
773 }
774
bch2_get_quota(struct super_block * sb,struct kqid kqid,struct qc_dqblk * qdq)775 static int bch2_get_quota(struct super_block *sb, struct kqid kqid,
776 struct qc_dqblk *qdq)
777 {
778 struct bch_fs *c = sb->s_fs_info;
779 struct bch_memquota_type *q = &c->quotas[kqid.type];
780 qid_t qid = from_kqid(&init_user_ns, kqid);
781 struct bch_memquota *mq;
782
783 memset(qdq, 0, sizeof(*qdq));
784
785 mutex_lock(&q->lock);
786 mq = genradix_ptr(&q->table, qid);
787 if (mq)
788 __bch2_quota_get(qdq, mq);
789 mutex_unlock(&q->lock);
790
791 return 0;
792 }
793
bch2_get_next_quota(struct super_block * sb,struct kqid * kqid,struct qc_dqblk * qdq)794 static int bch2_get_next_quota(struct super_block *sb, struct kqid *kqid,
795 struct qc_dqblk *qdq)
796 {
797 struct bch_fs *c = sb->s_fs_info;
798 struct bch_memquota_type *q = &c->quotas[kqid->type];
799 qid_t qid = from_kqid(&init_user_ns, *kqid);
800 struct genradix_iter iter;
801 struct bch_memquota *mq;
802 int ret = 0;
803
804 mutex_lock(&q->lock);
805
806 genradix_for_each_from(&q->table, iter, mq, qid)
807 if (memcmp(mq, page_address(ZERO_PAGE(0)), sizeof(*mq))) {
808 __bch2_quota_get(qdq, mq);
809 *kqid = make_kqid(current_user_ns(), kqid->type, iter.pos);
810 goto found;
811 }
812
813 ret = -ENOENT;
814 found:
815 mutex_unlock(&q->lock);
816 return bch2_err_class(ret);
817 }
818
bch2_set_quota_trans(struct btree_trans * trans,struct bkey_i_quota * new_quota,struct qc_dqblk * qdq)819 static int bch2_set_quota_trans(struct btree_trans *trans,
820 struct bkey_i_quota *new_quota,
821 struct qc_dqblk *qdq)
822 {
823 struct btree_iter iter;
824 struct bkey_s_c k;
825 int ret;
826
827 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_quotas, new_quota->k.p,
828 BTREE_ITER_slots|BTREE_ITER_intent);
829 ret = bkey_err(k);
830 if (unlikely(ret))
831 return ret;
832
833 if (k.k->type == KEY_TYPE_quota)
834 new_quota->v = *bkey_s_c_to_quota(k).v;
835
836 if (qdq->d_fieldmask & QC_SPC_SOFT)
837 new_quota->v.c[Q_SPC].softlimit = cpu_to_le64(qdq->d_spc_softlimit >> 9);
838 if (qdq->d_fieldmask & QC_SPC_HARD)
839 new_quota->v.c[Q_SPC].hardlimit = cpu_to_le64(qdq->d_spc_hardlimit >> 9);
840
841 if (qdq->d_fieldmask & QC_INO_SOFT)
842 new_quota->v.c[Q_INO].softlimit = cpu_to_le64(qdq->d_ino_softlimit);
843 if (qdq->d_fieldmask & QC_INO_HARD)
844 new_quota->v.c[Q_INO].hardlimit = cpu_to_le64(qdq->d_ino_hardlimit);
845
846 ret = bch2_trans_update(trans, &iter, &new_quota->k_i, 0);
847 bch2_trans_iter_exit(trans, &iter);
848 return ret;
849 }
850
bch2_set_quota(struct super_block * sb,struct kqid qid,struct qc_dqblk * qdq)851 static int bch2_set_quota(struct super_block *sb, struct kqid qid,
852 struct qc_dqblk *qdq)
853 {
854 struct bch_fs *c = sb->s_fs_info;
855 struct bkey_i_quota new_quota;
856 int ret;
857
858 if (0) {
859 struct printbuf buf = PRINTBUF;
860
861 qc_dqblk_to_text(&buf, qdq);
862 pr_info("setting:\n%s", buf.buf);
863 printbuf_exit(&buf);
864 }
865
866 if (sb->s_flags & SB_RDONLY)
867 return -EROFS;
868
869 bkey_quota_init(&new_quota.k_i);
870 new_quota.k.p = POS(qid.type, from_kqid(&init_user_ns, qid));
871
872 ret = bch2_trans_commit_do(c, NULL, NULL, 0,
873 bch2_set_quota_trans(trans, &new_quota, qdq)) ?:
874 __bch2_quota_set(c, bkey_i_to_s_c(&new_quota.k_i), qdq);
875
876 return bch2_err_class(ret);
877 }
878
879 const struct quotactl_ops bch2_quotactl_operations = {
880 .quota_enable = bch2_quota_enable,
881 .quota_disable = bch2_quota_disable,
882 .rm_xquota = bch2_quota_remove,
883
884 .get_state = bch2_quota_get_state,
885 .set_info = bch2_quota_set_info,
886
887 .get_dqblk = bch2_get_quota,
888 .get_nextdqblk = bch2_get_next_quota,
889 .set_dqblk = bch2_set_quota,
890 };
891
892 #endif /* CONFIG_BCACHEFS_QUOTA */
893