1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Volume-level cache cookie handling.
3 *
4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8 #define FSCACHE_DEBUG_LEVEL COOKIE
9 #include <linux/export.h>
10 #include <linux/slab.h>
11 #include "internal.h"
12
13 #define fscache_volume_hash_shift 10
14 static struct hlist_bl_head fscache_volume_hash[1 << fscache_volume_hash_shift];
15 static atomic_t fscache_volume_debug_id;
16 static LIST_HEAD(fscache_volumes);
17
18 static void fscache_create_volume_work(struct work_struct *work);
19
fscache_get_volume(struct fscache_volume * volume,enum fscache_volume_trace where)20 struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
21 enum fscache_volume_trace where)
22 {
23 int ref;
24
25 __refcount_inc(&volume->ref, &ref);
26 trace_fscache_volume(volume->debug_id, ref + 1, where);
27 return volume;
28 }
29
fscache_try_get_volume(struct fscache_volume * volume,enum fscache_volume_trace where)30 struct fscache_volume *fscache_try_get_volume(struct fscache_volume *volume,
31 enum fscache_volume_trace where)
32 {
33 int ref;
34
35 if (!__refcount_inc_not_zero(&volume->ref, &ref))
36 return NULL;
37
38 trace_fscache_volume(volume->debug_id, ref + 1, where);
39 return volume;
40 }
41 EXPORT_SYMBOL(fscache_try_get_volume);
42
fscache_see_volume(struct fscache_volume * volume,enum fscache_volume_trace where)43 static void fscache_see_volume(struct fscache_volume *volume,
44 enum fscache_volume_trace where)
45 {
46 int ref = refcount_read(&volume->ref);
47
48 trace_fscache_volume(volume->debug_id, ref, where);
49 }
50
51 /*
52 * Pin the cache behind a volume so that we can access it.
53 */
__fscache_begin_volume_access(struct fscache_volume * volume,struct fscache_cookie * cookie,enum fscache_access_trace why)54 static void __fscache_begin_volume_access(struct fscache_volume *volume,
55 struct fscache_cookie *cookie,
56 enum fscache_access_trace why)
57 {
58 int n_accesses;
59
60 n_accesses = atomic_inc_return(&volume->n_accesses);
61 smp_mb__after_atomic();
62 trace_fscache_access_volume(volume->debug_id, cookie ? cookie->debug_id : 0,
63 refcount_read(&volume->ref),
64 n_accesses, why);
65 }
66
67 /**
68 * fscache_begin_volume_access - Pin a cache so a volume can be accessed
69 * @volume: The volume cookie
70 * @cookie: A datafile cookie for a tracing reference (or NULL)
71 * @why: An indication of the circumstances of the access for tracing
72 *
73 * Attempt to pin the cache to prevent it from going away whilst we're
74 * accessing a volume and returns true if successful. This works as follows:
75 *
76 * (1) If the cache tests as not live (state is not FSCACHE_CACHE_IS_ACTIVE),
77 * then we return false to indicate access was not permitted.
78 *
79 * (2) If the cache tests as live, then we increment the volume's n_accesses
80 * count and then recheck the cache liveness, ending the access if it
81 * ceased to be live.
82 *
83 * (3) When we end the access, we decrement the volume's n_accesses and wake
84 * up the any waiters if it reaches 0.
85 *
86 * (4) Whilst the cache is caching, the volume's n_accesses is kept
87 * artificially incremented to prevent wakeups from happening.
88 *
89 * (5) When the cache is taken offline, the state is changed to prevent new
90 * accesses, the volume's n_accesses is decremented and we wait for it to
91 * become 0.
92 *
93 * The datafile @cookie and the @why indicator are merely provided for tracing
94 * purposes.
95 */
fscache_begin_volume_access(struct fscache_volume * volume,struct fscache_cookie * cookie,enum fscache_access_trace why)96 bool fscache_begin_volume_access(struct fscache_volume *volume,
97 struct fscache_cookie *cookie,
98 enum fscache_access_trace why)
99 {
100 if (!fscache_cache_is_live(volume->cache))
101 return false;
102 __fscache_begin_volume_access(volume, cookie, why);
103 if (!fscache_cache_is_live(volume->cache)) {
104 fscache_end_volume_access(volume, cookie, fscache_access_unlive);
105 return false;
106 }
107 return true;
108 }
109
110 /**
111 * fscache_end_volume_access - Unpin a cache at the end of an access.
112 * @volume: The volume cookie
113 * @cookie: A datafile cookie for a tracing reference (or NULL)
114 * @why: An indication of the circumstances of the access for tracing
115 *
116 * Unpin a cache volume after we've accessed it. The datafile @cookie and the
117 * @why indicator are merely provided for tracing purposes.
118 */
fscache_end_volume_access(struct fscache_volume * volume,struct fscache_cookie * cookie,enum fscache_access_trace why)119 void fscache_end_volume_access(struct fscache_volume *volume,
120 struct fscache_cookie *cookie,
121 enum fscache_access_trace why)
122 {
123 int n_accesses;
124
125 smp_mb__before_atomic();
126 n_accesses = atomic_dec_return(&volume->n_accesses);
127 trace_fscache_access_volume(volume->debug_id, cookie ? cookie->debug_id : 0,
128 refcount_read(&volume->ref),
129 n_accesses, why);
130 if (n_accesses == 0)
131 wake_up_var(&volume->n_accesses);
132 }
133 EXPORT_SYMBOL(fscache_end_volume_access);
134
fscache_volume_same(const struct fscache_volume * a,const struct fscache_volume * b)135 static bool fscache_volume_same(const struct fscache_volume *a,
136 const struct fscache_volume *b)
137 {
138 size_t klen;
139
140 if (a->key_hash != b->key_hash ||
141 a->cache != b->cache ||
142 a->key[0] != b->key[0])
143 return false;
144
145 klen = round_up(a->key[0] + 1, sizeof(__le32));
146 return memcmp(a->key, b->key, klen) == 0;
147 }
148
fscache_is_acquire_pending(struct fscache_volume * volume)149 static bool fscache_is_acquire_pending(struct fscache_volume *volume)
150 {
151 return test_bit(FSCACHE_VOLUME_ACQUIRE_PENDING, &volume->flags);
152 }
153
fscache_wait_on_volume_collision(struct fscache_volume * candidate,unsigned int collidee_debug_id)154 static void fscache_wait_on_volume_collision(struct fscache_volume *candidate,
155 unsigned int collidee_debug_id)
156 {
157 wait_on_bit_timeout(&candidate->flags, FSCACHE_VOLUME_ACQUIRE_PENDING,
158 TASK_UNINTERRUPTIBLE, 20 * HZ);
159 if (fscache_is_acquire_pending(candidate)) {
160 pr_notice("Potential volume collision new=%08x old=%08x",
161 candidate->debug_id, collidee_debug_id);
162 fscache_stat(&fscache_n_volumes_collision);
163 wait_on_bit(&candidate->flags, FSCACHE_VOLUME_ACQUIRE_PENDING,
164 TASK_UNINTERRUPTIBLE);
165 }
166 }
167
168 /*
169 * Attempt to insert the new volume into the hash. If there's a collision, we
170 * wait for the old volume to complete if it's being relinquished and an error
171 * otherwise.
172 */
fscache_hash_volume(struct fscache_volume * candidate)173 static bool fscache_hash_volume(struct fscache_volume *candidate)
174 {
175 struct fscache_volume *cursor;
176 struct hlist_bl_head *h;
177 struct hlist_bl_node *p;
178 unsigned int bucket, collidee_debug_id = 0;
179
180 bucket = candidate->key_hash & (ARRAY_SIZE(fscache_volume_hash) - 1);
181 h = &fscache_volume_hash[bucket];
182
183 hlist_bl_lock(h);
184 hlist_bl_for_each_entry(cursor, p, h, hash_link) {
185 if (fscache_volume_same(candidate, cursor)) {
186 if (!test_bit(FSCACHE_VOLUME_RELINQUISHED, &cursor->flags))
187 goto collision;
188 fscache_see_volume(cursor, fscache_volume_get_hash_collision);
189 set_bit(FSCACHE_VOLUME_COLLIDED_WITH, &cursor->flags);
190 set_bit(FSCACHE_VOLUME_ACQUIRE_PENDING, &candidate->flags);
191 collidee_debug_id = cursor->debug_id;
192 break;
193 }
194 }
195
196 hlist_bl_add_head(&candidate->hash_link, h);
197 hlist_bl_unlock(h);
198
199 if (fscache_is_acquire_pending(candidate))
200 fscache_wait_on_volume_collision(candidate, collidee_debug_id);
201 return true;
202
203 collision:
204 fscache_see_volume(cursor, fscache_volume_collision);
205 hlist_bl_unlock(h);
206 return false;
207 }
208
209 /*
210 * Allocate and initialise a volume representation cookie.
211 */
fscache_alloc_volume(const char * volume_key,const char * cache_name,const void * coherency_data,size_t coherency_len)212 static struct fscache_volume *fscache_alloc_volume(const char *volume_key,
213 const char *cache_name,
214 const void *coherency_data,
215 size_t coherency_len)
216 {
217 struct fscache_volume *volume;
218 struct fscache_cache *cache;
219 size_t klen, hlen;
220 u8 *key;
221
222 klen = strlen(volume_key);
223 if (klen > NAME_MAX)
224 return NULL;
225
226 if (!coherency_data)
227 coherency_len = 0;
228
229 cache = fscache_lookup_cache(cache_name, false);
230 if (IS_ERR(cache))
231 return NULL;
232
233 volume = kzalloc(struct_size(volume, coherency, coherency_len),
234 GFP_KERNEL);
235 if (!volume)
236 goto err_cache;
237
238 volume->cache = cache;
239 volume->coherency_len = coherency_len;
240 if (coherency_data)
241 memcpy(volume->coherency, coherency_data, coherency_len);
242 INIT_LIST_HEAD(&volume->proc_link);
243 INIT_WORK(&volume->work, fscache_create_volume_work);
244 refcount_set(&volume->ref, 1);
245 spin_lock_init(&volume->lock);
246
247 /* Stick the length on the front of the key and pad it out to make
248 * hashing easier.
249 */
250 hlen = round_up(1 + klen + 1, sizeof(__le32));
251 key = kzalloc(hlen, GFP_KERNEL);
252 if (!key)
253 goto err_vol;
254 key[0] = klen;
255 memcpy(key + 1, volume_key, klen);
256
257 volume->key = key;
258 volume->key_hash = fscache_hash(0, key, hlen);
259
260 volume->debug_id = atomic_inc_return(&fscache_volume_debug_id);
261 down_write(&fscache_addremove_sem);
262 atomic_inc(&cache->n_volumes);
263 list_add_tail(&volume->proc_link, &fscache_volumes);
264 fscache_see_volume(volume, fscache_volume_new_acquire);
265 fscache_stat(&fscache_n_volumes);
266 up_write(&fscache_addremove_sem);
267 _leave(" = v=%x", volume->debug_id);
268 return volume;
269
270 err_vol:
271 kfree(volume);
272 err_cache:
273 fscache_put_cache(cache, fscache_cache_put_alloc_volume);
274 fscache_stat(&fscache_n_volumes_nomem);
275 return NULL;
276 }
277
278 /*
279 * Create a volume's representation on disk. Have a volume ref and a cache
280 * access we have to release.
281 */
fscache_create_volume_work(struct work_struct * work)282 static void fscache_create_volume_work(struct work_struct *work)
283 {
284 const struct fscache_cache_ops *ops;
285 struct fscache_volume *volume =
286 container_of(work, struct fscache_volume, work);
287
288 fscache_see_volume(volume, fscache_volume_see_create_work);
289
290 ops = volume->cache->ops;
291 if (ops->acquire_volume)
292 ops->acquire_volume(volume);
293 fscache_end_cache_access(volume->cache,
294 fscache_access_acquire_volume_end);
295
296 clear_and_wake_up_bit(FSCACHE_VOLUME_CREATING, &volume->flags);
297 fscache_put_volume(volume, fscache_volume_put_create_work);
298 }
299
300 /*
301 * Dispatch a worker thread to create a volume's representation on disk.
302 */
fscache_create_volume(struct fscache_volume * volume,bool wait)303 void fscache_create_volume(struct fscache_volume *volume, bool wait)
304 {
305 if (test_and_set_bit(FSCACHE_VOLUME_CREATING, &volume->flags))
306 goto maybe_wait;
307 if (volume->cache_priv)
308 goto no_wait; /* We raced */
309 if (!fscache_begin_cache_access(volume->cache,
310 fscache_access_acquire_volume))
311 goto no_wait;
312
313 fscache_get_volume(volume, fscache_volume_get_create_work);
314 if (!schedule_work(&volume->work))
315 fscache_put_volume(volume, fscache_volume_put_create_work);
316
317 maybe_wait:
318 if (wait) {
319 fscache_see_volume(volume, fscache_volume_wait_create_work);
320 wait_on_bit(&volume->flags, FSCACHE_VOLUME_CREATING,
321 TASK_UNINTERRUPTIBLE);
322 }
323 return;
324 no_wait:
325 clear_bit_unlock(FSCACHE_VOLUME_CREATING, &volume->flags);
326 wake_up_bit(&volume->flags, FSCACHE_VOLUME_CREATING);
327 }
328
329 /*
330 * Acquire a volume representation cookie and link it to a (proposed) cache.
331 */
__fscache_acquire_volume(const char * volume_key,const char * cache_name,const void * coherency_data,size_t coherency_len)332 struct fscache_volume *__fscache_acquire_volume(const char *volume_key,
333 const char *cache_name,
334 const void *coherency_data,
335 size_t coherency_len)
336 {
337 struct fscache_volume *volume;
338
339 volume = fscache_alloc_volume(volume_key, cache_name,
340 coherency_data, coherency_len);
341 if (!volume)
342 return ERR_PTR(-ENOMEM);
343
344 if (!fscache_hash_volume(volume)) {
345 fscache_put_volume(volume, fscache_volume_put_hash_collision);
346 return ERR_PTR(-EBUSY);
347 }
348
349 fscache_create_volume(volume, false);
350 return volume;
351 }
352 EXPORT_SYMBOL(__fscache_acquire_volume);
353
fscache_wake_pending_volume(struct fscache_volume * volume,struct hlist_bl_head * h)354 static void fscache_wake_pending_volume(struct fscache_volume *volume,
355 struct hlist_bl_head *h)
356 {
357 struct fscache_volume *cursor;
358 struct hlist_bl_node *p;
359
360 hlist_bl_for_each_entry(cursor, p, h, hash_link) {
361 if (fscache_volume_same(cursor, volume)) {
362 fscache_see_volume(cursor, fscache_volume_see_hash_wake);
363 clear_and_wake_up_bit(FSCACHE_VOLUME_ACQUIRE_PENDING,
364 &cursor->flags);
365 return;
366 }
367 }
368 }
369
370 /*
371 * Remove a volume cookie from the hash table.
372 */
fscache_unhash_volume(struct fscache_volume * volume)373 static void fscache_unhash_volume(struct fscache_volume *volume)
374 {
375 struct hlist_bl_head *h;
376 unsigned int bucket;
377
378 bucket = volume->key_hash & (ARRAY_SIZE(fscache_volume_hash) - 1);
379 h = &fscache_volume_hash[bucket];
380
381 hlist_bl_lock(h);
382 hlist_bl_del(&volume->hash_link);
383 if (test_bit(FSCACHE_VOLUME_COLLIDED_WITH, &volume->flags))
384 fscache_wake_pending_volume(volume, h);
385 hlist_bl_unlock(h);
386 }
387
388 /*
389 * Drop a cache's volume attachments.
390 */
fscache_free_volume(struct fscache_volume * volume)391 static void fscache_free_volume(struct fscache_volume *volume)
392 {
393 struct fscache_cache *cache = volume->cache;
394
395 if (volume->cache_priv) {
396 __fscache_begin_volume_access(volume, NULL,
397 fscache_access_relinquish_volume);
398 if (volume->cache_priv)
399 cache->ops->free_volume(volume);
400 fscache_end_volume_access(volume, NULL,
401 fscache_access_relinquish_volume_end);
402 }
403
404 down_write(&fscache_addremove_sem);
405 list_del_init(&volume->proc_link);
406 atomic_dec(&volume->cache->n_volumes);
407 up_write(&fscache_addremove_sem);
408
409 if (!hlist_bl_unhashed(&volume->hash_link))
410 fscache_unhash_volume(volume);
411
412 trace_fscache_volume(volume->debug_id, 0, fscache_volume_free);
413 kfree(volume->key);
414 kfree(volume);
415 fscache_stat_d(&fscache_n_volumes);
416 fscache_put_cache(cache, fscache_cache_put_volume);
417 }
418
419 /*
420 * Drop a reference to a volume cookie.
421 */
fscache_put_volume(struct fscache_volume * volume,enum fscache_volume_trace where)422 void fscache_put_volume(struct fscache_volume *volume,
423 enum fscache_volume_trace where)
424 {
425 if (volume) {
426 unsigned int debug_id = volume->debug_id;
427 bool zero;
428 int ref;
429
430 zero = __refcount_dec_and_test(&volume->ref, &ref);
431 trace_fscache_volume(debug_id, ref - 1, where);
432 if (zero)
433 fscache_free_volume(volume);
434 }
435 }
436 EXPORT_SYMBOL(fscache_put_volume);
437
438 /*
439 * Relinquish a volume representation cookie.
440 */
__fscache_relinquish_volume(struct fscache_volume * volume,const void * coherency_data,bool invalidate)441 void __fscache_relinquish_volume(struct fscache_volume *volume,
442 const void *coherency_data,
443 bool invalidate)
444 {
445 if (WARN_ON(test_and_set_bit(FSCACHE_VOLUME_RELINQUISHED, &volume->flags)))
446 return;
447
448 if (invalidate) {
449 set_bit(FSCACHE_VOLUME_INVALIDATE, &volume->flags);
450 } else if (coherency_data) {
451 memcpy(volume->coherency, coherency_data, volume->coherency_len);
452 }
453
454 fscache_put_volume(volume, fscache_volume_put_relinquish);
455 }
456 EXPORT_SYMBOL(__fscache_relinquish_volume);
457
458 /**
459 * fscache_withdraw_volume - Withdraw a volume from being cached
460 * @volume: Volume cookie
461 *
462 * Withdraw a cache volume from service, waiting for all accesses to complete
463 * before returning.
464 */
fscache_withdraw_volume(struct fscache_volume * volume)465 void fscache_withdraw_volume(struct fscache_volume *volume)
466 {
467 int n_accesses;
468
469 _debug("withdraw V=%x", volume->debug_id);
470
471 /* Allow wakeups on dec-to-0 */
472 n_accesses = atomic_dec_return(&volume->n_accesses);
473 trace_fscache_access_volume(volume->debug_id, 0,
474 refcount_read(&volume->ref),
475 n_accesses, fscache_access_cache_unpin);
476
477 wait_var_event(&volume->n_accesses,
478 atomic_read(&volume->n_accesses) == 0);
479 }
480 EXPORT_SYMBOL(fscache_withdraw_volume);
481
482 #ifdef CONFIG_PROC_FS
483 /*
484 * Generate a list of volumes in /proc/fs/fscache/volumes
485 */
fscache_volumes_seq_show(struct seq_file * m,void * v)486 static int fscache_volumes_seq_show(struct seq_file *m, void *v)
487 {
488 struct fscache_volume *volume;
489
490 if (v == &fscache_volumes) {
491 seq_puts(m,
492 "VOLUME REF nCOOK ACC FL CACHE KEY\n"
493 "======== ===== ===== === == =============== ================\n");
494 return 0;
495 }
496
497 volume = list_entry(v, struct fscache_volume, proc_link);
498 seq_printf(m,
499 "%08x %5d %5d %3d %02lx %-15.15s %s\n",
500 volume->debug_id,
501 refcount_read(&volume->ref),
502 atomic_read(&volume->n_cookies),
503 atomic_read(&volume->n_accesses),
504 volume->flags,
505 volume->cache->name ?: "-",
506 volume->key + 1);
507 return 0;
508 }
509
fscache_volumes_seq_start(struct seq_file * m,loff_t * _pos)510 static void *fscache_volumes_seq_start(struct seq_file *m, loff_t *_pos)
511 __acquires(&fscache_addremove_sem)
512 {
513 down_read(&fscache_addremove_sem);
514 return seq_list_start_head(&fscache_volumes, *_pos);
515 }
516
fscache_volumes_seq_next(struct seq_file * m,void * v,loff_t * _pos)517 static void *fscache_volumes_seq_next(struct seq_file *m, void *v, loff_t *_pos)
518 {
519 return seq_list_next(v, &fscache_volumes, _pos);
520 }
521
fscache_volumes_seq_stop(struct seq_file * m,void * v)522 static void fscache_volumes_seq_stop(struct seq_file *m, void *v)
523 __releases(&fscache_addremove_sem)
524 {
525 up_read(&fscache_addremove_sem);
526 }
527
528 const struct seq_operations fscache_volumes_seq_ops = {
529 .start = fscache_volumes_seq_start,
530 .next = fscache_volumes_seq_next,
531 .stop = fscache_volumes_seq_stop,
532 .show = fscache_volumes_seq_show,
533 };
534 #endif /* CONFIG_PROC_FS */
535