xref: /linux/fs/bcachefs/nocow_locking.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "bkey_methods.h"
5 #include "nocow_locking.h"
6 #include "util.h"
7 
8 #include <linux/closure.h>
9 
10 bool bch2_bucket_nocow_is_locked(struct bucket_nocow_lock_table *t, struct bpos bucket)
11 {
12 	u64 dev_bucket = bucket_to_u64(bucket);
13 	struct nocow_lock_bucket *l = bucket_nocow_lock(t, dev_bucket);
14 	unsigned i;
15 
16 	for (i = 0; i < ARRAY_SIZE(l->b); i++)
17 		if (l->b[i] == dev_bucket && atomic_read(&l->l[i]))
18 			return true;
19 	return false;
20 }
21 
22 #define sign(v)		(v < 0 ? -1 : v > 0 ? 1 : 0)
23 
24 void bch2_bucket_nocow_unlock(struct bucket_nocow_lock_table *t, struct bpos bucket, int flags)
25 {
26 	u64 dev_bucket = bucket_to_u64(bucket);
27 	struct nocow_lock_bucket *l = bucket_nocow_lock(t, dev_bucket);
28 	int lock_val = flags ? 1 : -1;
29 	unsigned i;
30 
31 	for (i = 0; i < ARRAY_SIZE(l->b); i++)
32 		if (l->b[i] == dev_bucket) {
33 			int v = atomic_sub_return(lock_val, &l->l[i]);
34 
35 			BUG_ON(v && sign(v) != lock_val);
36 			if (!v)
37 				closure_wake_up(&l->wait);
38 			return;
39 		}
40 
41 	BUG();
42 }
43 
44 bool __bch2_bucket_nocow_trylock(struct nocow_lock_bucket *l,
45 				 u64 dev_bucket, int flags)
46 {
47 	int v, lock_val = flags ? 1 : -1;
48 	unsigned i;
49 
50 	spin_lock(&l->lock);
51 
52 	for (i = 0; i < ARRAY_SIZE(l->b); i++)
53 		if (l->b[i] == dev_bucket)
54 			goto got_entry;
55 
56 	for (i = 0; i < ARRAY_SIZE(l->b); i++)
57 		if (!atomic_read(&l->l[i])) {
58 			l->b[i] = dev_bucket;
59 			goto take_lock;
60 		}
61 fail:
62 	spin_unlock(&l->lock);
63 	return false;
64 got_entry:
65 	v = atomic_read(&l->l[i]);
66 	if (lock_val > 0 ? v < 0 : v > 0)
67 		goto fail;
68 take_lock:
69 	v = atomic_read(&l->l[i]);
70 	/* Overflow? */
71 	if (v && sign(v + lock_val) != sign(v))
72 		goto fail;
73 
74 	atomic_add(lock_val, &l->l[i]);
75 	spin_unlock(&l->lock);
76 	return true;
77 }
78 
79 void __bch2_bucket_nocow_lock(struct bucket_nocow_lock_table *t,
80 			      struct nocow_lock_bucket *l,
81 			      u64 dev_bucket, int flags)
82 {
83 	if (!__bch2_bucket_nocow_trylock(l, dev_bucket, flags)) {
84 		struct bch_fs *c = container_of(t, struct bch_fs, nocow_locks);
85 		u64 start_time = local_clock();
86 
87 		__closure_wait_event(&l->wait, __bch2_bucket_nocow_trylock(l, dev_bucket, flags));
88 		bch2_time_stats_update(&c->times[BCH_TIME_nocow_lock_contended], start_time);
89 	}
90 }
91 
92 void bch2_nocow_locks_to_text(struct printbuf *out, struct bucket_nocow_lock_table *t)
93 
94 {
95 	unsigned i, nr_zero = 0;
96 	struct nocow_lock_bucket *l;
97 
98 	for (l = t->l; l < t->l + ARRAY_SIZE(t->l); l++) {
99 		unsigned v = 0;
100 
101 		for (i = 0; i < ARRAY_SIZE(l->l); i++)
102 			v |= atomic_read(&l->l[i]);
103 
104 		if (!v) {
105 			nr_zero++;
106 			continue;
107 		}
108 
109 		if (nr_zero)
110 			prt_printf(out, "(%u empty entries)\n", nr_zero);
111 		nr_zero = 0;
112 
113 		for (i = 0; i < ARRAY_SIZE(l->l); i++) {
114 			int v = atomic_read(&l->l[i]);
115 			if (v) {
116 				bch2_bpos_to_text(out, u64_to_bucket(l->b[i]));
117 				prt_printf(out, ": %s %u ", v < 0 ? "copy" : "update", abs(v));
118 			}
119 		}
120 		prt_newline(out);
121 	}
122 
123 	if (nr_zero)
124 		prt_printf(out, "(%u empty entries)\n", nr_zero);
125 }
126 
127 void bch2_fs_nocow_locking_exit(struct bch_fs *c)
128 {
129 	struct bucket_nocow_lock_table *t = &c->nocow_locks;
130 
131 	for (struct nocow_lock_bucket *l = t->l; l < t->l + ARRAY_SIZE(t->l); l++)
132 		for (unsigned j = 0; j < ARRAY_SIZE(l->l); j++)
133 			BUG_ON(atomic_read(&l->l[j]));
134 }
135 
136 int bch2_fs_nocow_locking_init(struct bch_fs *c)
137 {
138 	struct bucket_nocow_lock_table *t = &c->nocow_locks;
139 
140 	for (struct nocow_lock_bucket *l = t->l; l < t->l + ARRAY_SIZE(t->l); l++)
141 		spin_lock_init(&l->lock);
142 
143 	return 0;
144 }
145