xref: /linux/fs/mbcache.c (revision 0ea6e61122196509af82cc4f36cbdaacbefb8227)
1 /*
2  * linux/fs/mbcache.c
3  * (C) 2001-2002 Andreas Gruenbacher, <a.gruenbacher@computer.org>
4  */
5 
6 /*
7  * Filesystem Meta Information Block Cache (mbcache)
8  *
9  * The mbcache caches blocks of block devices that need to be located
10  * by their device/block number, as well as by other criteria (such
11  * as the block's contents).
12  *
13  * There can only be one cache entry in a cache per device and block number.
14  * Additional indexes need not be unique in this sense. The number of
15  * additional indexes (=other criteria) can be hardwired at compile time
16  * or specified at cache create time.
17  *
18  * Each cache entry is of fixed size. An entry may be `valid' or `invalid'
19  * in the cache. A valid entry is in the main hash tables of the cache,
20  * and may also be in the lru list. An invalid entry is not in any hashes
21  * or lists.
22  *
23  * A valid cache entry is only in the lru list if no handles refer to it.
24  * Invalid cache entries will be freed when the last handle to the cache
25  * entry is released. Entries that cannot be freed immediately are put
26  * back on the lru list.
27  */
28 
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 
32 #include <linux/hash.h>
33 #include <linux/fs.h>
34 #include <linux/mm.h>
35 #include <linux/slab.h>
36 #include <linux/sched.h>
37 #include <linux/init.h>
38 #include <linux/mbcache.h>
39 
40 
41 #ifdef MB_CACHE_DEBUG
42 # define mb_debug(f...) do { \
43 		printk(KERN_DEBUG f); \
44 		printk("\n"); \
45 	} while (0)
46 #define mb_assert(c) do { if (!(c)) \
47 		printk(KERN_ERR "assertion " #c " failed\n"); \
48 	} while(0)
49 #else
50 # define mb_debug(f...) do { } while(0)
51 # define mb_assert(c) do { } while(0)
52 #endif
53 #define mb_error(f...) do { \
54 		printk(KERN_ERR f); \
55 		printk("\n"); \
56 	} while(0)
57 
58 #define MB_CACHE_WRITER ((unsigned short)~0U >> 1)
59 
60 static DECLARE_WAIT_QUEUE_HEAD(mb_cache_queue);
61 
62 MODULE_AUTHOR("Andreas Gruenbacher <a.gruenbacher@computer.org>");
63 MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
64 MODULE_LICENSE("GPL");
65 
66 EXPORT_SYMBOL(mb_cache_create);
67 EXPORT_SYMBOL(mb_cache_shrink);
68 EXPORT_SYMBOL(mb_cache_destroy);
69 EXPORT_SYMBOL(mb_cache_entry_alloc);
70 EXPORT_SYMBOL(mb_cache_entry_insert);
71 EXPORT_SYMBOL(mb_cache_entry_release);
72 EXPORT_SYMBOL(mb_cache_entry_free);
73 EXPORT_SYMBOL(mb_cache_entry_get);
74 #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
75 EXPORT_SYMBOL(mb_cache_entry_find_first);
76 EXPORT_SYMBOL(mb_cache_entry_find_next);
77 #endif
78 
79 struct mb_cache {
80 	struct list_head		c_cache_list;
81 	const char			*c_name;
82 	struct mb_cache_op		c_op;
83 	atomic_t			c_entry_count;
84 	int				c_bucket_bits;
85 #ifndef MB_CACHE_INDEXES_COUNT
86 	int				c_indexes_count;
87 #endif
88 	struct kmem_cache			*c_entry_cache;
89 	struct list_head		*c_block_hash;
90 	struct list_head		*c_indexes_hash[0];
91 };
92 
93 
94 /*
95  * Global data: list of all mbcache's, lru list, and a spinlock for
96  * accessing cache data structures on SMP machines. The lru list is
97  * global across all mbcaches.
98  */
99 
100 static LIST_HEAD(mb_cache_list);
101 static LIST_HEAD(mb_cache_lru_list);
102 static DEFINE_SPINLOCK(mb_cache_spinlock);
103 
104 static inline int
105 mb_cache_indexes(struct mb_cache *cache)
106 {
107 #ifdef MB_CACHE_INDEXES_COUNT
108 	return MB_CACHE_INDEXES_COUNT;
109 #else
110 	return cache->c_indexes_count;
111 #endif
112 }
113 
114 /*
115  * What the mbcache registers as to get shrunk dynamically.
116  */
117 
118 static int mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask);
119 
120 static struct shrinker mb_cache_shrinker = {
121 	.shrink = mb_cache_shrink_fn,
122 	.seeks = DEFAULT_SEEKS,
123 };
124 
125 static inline int
126 __mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
127 {
128 	return !list_empty(&ce->e_block_list);
129 }
130 
131 
132 static void
133 __mb_cache_entry_unhash(struct mb_cache_entry *ce)
134 {
135 	int n;
136 
137 	if (__mb_cache_entry_is_hashed(ce)) {
138 		list_del_init(&ce->e_block_list);
139 		for (n=0; n<mb_cache_indexes(ce->e_cache); n++)
140 			list_del(&ce->e_indexes[n].o_list);
141 	}
142 }
143 
144 
145 static void
146 __mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask)
147 {
148 	struct mb_cache *cache = ce->e_cache;
149 
150 	mb_assert(!(ce->e_used || ce->e_queued));
151 	if (cache->c_op.free && cache->c_op.free(ce, gfp_mask)) {
152 		/* free failed -- put back on the lru list
153 		   for freeing later. */
154 		spin_lock(&mb_cache_spinlock);
155 		list_add(&ce->e_lru_list, &mb_cache_lru_list);
156 		spin_unlock(&mb_cache_spinlock);
157 	} else {
158 		kmem_cache_free(cache->c_entry_cache, ce);
159 		atomic_dec(&cache->c_entry_count);
160 	}
161 }
162 
163 
164 static void
165 __mb_cache_entry_release_unlock(struct mb_cache_entry *ce)
166 	__releases(mb_cache_spinlock)
167 {
168 	/* Wake up all processes queuing for this cache entry. */
169 	if (ce->e_queued)
170 		wake_up_all(&mb_cache_queue);
171 	if (ce->e_used >= MB_CACHE_WRITER)
172 		ce->e_used -= MB_CACHE_WRITER;
173 	ce->e_used--;
174 	if (!(ce->e_used || ce->e_queued)) {
175 		if (!__mb_cache_entry_is_hashed(ce))
176 			goto forget;
177 		mb_assert(list_empty(&ce->e_lru_list));
178 		list_add_tail(&ce->e_lru_list, &mb_cache_lru_list);
179 	}
180 	spin_unlock(&mb_cache_spinlock);
181 	return;
182 forget:
183 	spin_unlock(&mb_cache_spinlock);
184 	__mb_cache_entry_forget(ce, GFP_KERNEL);
185 }
186 
187 
188 /*
189  * mb_cache_shrink_fn()  memory pressure callback
190  *
191  * This function is called by the kernel memory management when memory
192  * gets low.
193  *
194  * @shrink: (ignored)
195  * @nr_to_scan: Number of objects to scan
196  * @gfp_mask: (ignored)
197  *
198  * Returns the number of objects which are present in the cache.
199  */
200 static int
201 mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
202 {
203 	LIST_HEAD(free_list);
204 	struct list_head *l, *ltmp;
205 	int count = 0;
206 
207 	spin_lock(&mb_cache_spinlock);
208 	list_for_each(l, &mb_cache_list) {
209 		struct mb_cache *cache =
210 			list_entry(l, struct mb_cache, c_cache_list);
211 		mb_debug("cache %s (%d)", cache->c_name,
212 			  atomic_read(&cache->c_entry_count));
213 		count += atomic_read(&cache->c_entry_count);
214 	}
215 	mb_debug("trying to free %d entries", nr_to_scan);
216 	if (nr_to_scan == 0) {
217 		spin_unlock(&mb_cache_spinlock);
218 		goto out;
219 	}
220 	while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) {
221 		struct mb_cache_entry *ce =
222 			list_entry(mb_cache_lru_list.next,
223 				   struct mb_cache_entry, e_lru_list);
224 		list_move_tail(&ce->e_lru_list, &free_list);
225 		__mb_cache_entry_unhash(ce);
226 	}
227 	spin_unlock(&mb_cache_spinlock);
228 	list_for_each_safe(l, ltmp, &free_list) {
229 		__mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
230 						   e_lru_list), gfp_mask);
231 	}
232 out:
233 	return (count / 100) * sysctl_vfs_cache_pressure;
234 }
235 
236 
237 /*
238  * mb_cache_create()  create a new cache
239  *
240  * All entries in one cache are equal size. Cache entries may be from
241  * multiple devices. If this is the first mbcache created, registers
242  * the cache with kernel memory management. Returns NULL if no more
243  * memory was available.
244  *
245  * @name: name of the cache (informal)
246  * @cache_op: contains the callback called when freeing a cache entry
247  * @entry_size: The size of a cache entry, including
248  *              struct mb_cache_entry
249  * @indexes_count: number of additional indexes in the cache. Must equal
250  *                 MB_CACHE_INDEXES_COUNT if the number of indexes is
251  *                 hardwired.
252  * @bucket_bits: log2(number of hash buckets)
253  */
254 struct mb_cache *
255 mb_cache_create(const char *name, struct mb_cache_op *cache_op,
256 		size_t entry_size, int indexes_count, int bucket_bits)
257 {
258 	int m=0, n, bucket_count = 1 << bucket_bits;
259 	struct mb_cache *cache = NULL;
260 
261 	if(entry_size < sizeof(struct mb_cache_entry) +
262 	   indexes_count * sizeof(((struct mb_cache_entry *) 0)->e_indexes[0]))
263 		return NULL;
264 
265 	cache = kmalloc(sizeof(struct mb_cache) +
266 	                indexes_count * sizeof(struct list_head), GFP_KERNEL);
267 	if (!cache)
268 		goto fail;
269 	cache->c_name = name;
270 	cache->c_op.free = NULL;
271 	if (cache_op)
272 		cache->c_op.free = cache_op->free;
273 	atomic_set(&cache->c_entry_count, 0);
274 	cache->c_bucket_bits = bucket_bits;
275 #ifdef MB_CACHE_INDEXES_COUNT
276 	mb_assert(indexes_count == MB_CACHE_INDEXES_COUNT);
277 #else
278 	cache->c_indexes_count = indexes_count;
279 #endif
280 	cache->c_block_hash = kmalloc(bucket_count * sizeof(struct list_head),
281 	                              GFP_KERNEL);
282 	if (!cache->c_block_hash)
283 		goto fail;
284 	for (n=0; n<bucket_count; n++)
285 		INIT_LIST_HEAD(&cache->c_block_hash[n]);
286 	for (m=0; m<indexes_count; m++) {
287 		cache->c_indexes_hash[m] = kmalloc(bucket_count *
288 		                                 sizeof(struct list_head),
289 		                                 GFP_KERNEL);
290 		if (!cache->c_indexes_hash[m])
291 			goto fail;
292 		for (n=0; n<bucket_count; n++)
293 			INIT_LIST_HEAD(&cache->c_indexes_hash[m][n]);
294 	}
295 	cache->c_entry_cache = kmem_cache_create(name, entry_size, 0,
296 		SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
297 	if (!cache->c_entry_cache)
298 		goto fail;
299 
300 	spin_lock(&mb_cache_spinlock);
301 	list_add(&cache->c_cache_list, &mb_cache_list);
302 	spin_unlock(&mb_cache_spinlock);
303 	return cache;
304 
305 fail:
306 	if (cache) {
307 		while (--m >= 0)
308 			kfree(cache->c_indexes_hash[m]);
309 		kfree(cache->c_block_hash);
310 		kfree(cache);
311 	}
312 	return NULL;
313 }
314 
315 
316 /*
317  * mb_cache_shrink()
318  *
319  * Removes all cache entries of a device from the cache. All cache entries
320  * currently in use cannot be freed, and thus remain in the cache. All others
321  * are freed.
322  *
323  * @bdev: which device's cache entries to shrink
324  */
325 void
326 mb_cache_shrink(struct block_device *bdev)
327 {
328 	LIST_HEAD(free_list);
329 	struct list_head *l, *ltmp;
330 
331 	spin_lock(&mb_cache_spinlock);
332 	list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
333 		struct mb_cache_entry *ce =
334 			list_entry(l, struct mb_cache_entry, e_lru_list);
335 		if (ce->e_bdev == bdev) {
336 			list_move_tail(&ce->e_lru_list, &free_list);
337 			__mb_cache_entry_unhash(ce);
338 		}
339 	}
340 	spin_unlock(&mb_cache_spinlock);
341 	list_for_each_safe(l, ltmp, &free_list) {
342 		__mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
343 						   e_lru_list), GFP_KERNEL);
344 	}
345 }
346 
347 
348 /*
349  * mb_cache_destroy()
350  *
351  * Shrinks the cache to its minimum possible size (hopefully 0 entries),
352  * and then destroys it. If this was the last mbcache, un-registers the
353  * mbcache from kernel memory management.
354  */
355 void
356 mb_cache_destroy(struct mb_cache *cache)
357 {
358 	LIST_HEAD(free_list);
359 	struct list_head *l, *ltmp;
360 	int n;
361 
362 	spin_lock(&mb_cache_spinlock);
363 	list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
364 		struct mb_cache_entry *ce =
365 			list_entry(l, struct mb_cache_entry, e_lru_list);
366 		if (ce->e_cache == cache) {
367 			list_move_tail(&ce->e_lru_list, &free_list);
368 			__mb_cache_entry_unhash(ce);
369 		}
370 	}
371 	list_del(&cache->c_cache_list);
372 	spin_unlock(&mb_cache_spinlock);
373 
374 	list_for_each_safe(l, ltmp, &free_list) {
375 		__mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
376 						   e_lru_list), GFP_KERNEL);
377 	}
378 
379 	if (atomic_read(&cache->c_entry_count) > 0) {
380 		mb_error("cache %s: %d orphaned entries",
381 			  cache->c_name,
382 			  atomic_read(&cache->c_entry_count));
383 	}
384 
385 	kmem_cache_destroy(cache->c_entry_cache);
386 
387 	for (n=0; n < mb_cache_indexes(cache); n++)
388 		kfree(cache->c_indexes_hash[n]);
389 	kfree(cache->c_block_hash);
390 	kfree(cache);
391 }
392 
393 
394 /*
395  * mb_cache_entry_alloc()
396  *
397  * Allocates a new cache entry. The new entry will not be valid initially,
398  * and thus cannot be looked up yet. It should be filled with data, and
399  * then inserted into the cache using mb_cache_entry_insert(). Returns NULL
400  * if no more memory was available.
401  */
402 struct mb_cache_entry *
403 mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags)
404 {
405 	struct mb_cache_entry *ce;
406 
407 	ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
408 	if (ce) {
409 		atomic_inc(&cache->c_entry_count);
410 		INIT_LIST_HEAD(&ce->e_lru_list);
411 		INIT_LIST_HEAD(&ce->e_block_list);
412 		ce->e_cache = cache;
413 		ce->e_used = 1 + MB_CACHE_WRITER;
414 		ce->e_queued = 0;
415 	}
416 	return ce;
417 }
418 
419 
420 /*
421  * mb_cache_entry_insert()
422  *
423  * Inserts an entry that was allocated using mb_cache_entry_alloc() into
424  * the cache. After this, the cache entry can be looked up, but is not yet
425  * in the lru list as the caller still holds a handle to it. Returns 0 on
426  * success, or -EBUSY if a cache entry for that device + inode exists
427  * already (this may happen after a failed lookup, but when another process
428  * has inserted the same cache entry in the meantime).
429  *
430  * @bdev: device the cache entry belongs to
431  * @block: block number
432  * @keys: array of additional keys. There must be indexes_count entries
433  *        in the array (as specified when creating the cache).
434  */
435 int
436 mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev,
437 		      sector_t block, unsigned int keys[])
438 {
439 	struct mb_cache *cache = ce->e_cache;
440 	unsigned int bucket;
441 	struct list_head *l;
442 	int error = -EBUSY, n;
443 
444 	bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
445 			   cache->c_bucket_bits);
446 	spin_lock(&mb_cache_spinlock);
447 	list_for_each_prev(l, &cache->c_block_hash[bucket]) {
448 		struct mb_cache_entry *ce =
449 			list_entry(l, struct mb_cache_entry, e_block_list);
450 		if (ce->e_bdev == bdev && ce->e_block == block)
451 			goto out;
452 	}
453 	__mb_cache_entry_unhash(ce);
454 	ce->e_bdev = bdev;
455 	ce->e_block = block;
456 	list_add(&ce->e_block_list, &cache->c_block_hash[bucket]);
457 	for (n=0; n<mb_cache_indexes(cache); n++) {
458 		ce->e_indexes[n].o_key = keys[n];
459 		bucket = hash_long(keys[n], cache->c_bucket_bits);
460 		list_add(&ce->e_indexes[n].o_list,
461 			 &cache->c_indexes_hash[n][bucket]);
462 	}
463 	error = 0;
464 out:
465 	spin_unlock(&mb_cache_spinlock);
466 	return error;
467 }
468 
469 
470 /*
471  * mb_cache_entry_release()
472  *
473  * Release a handle to a cache entry. When the last handle to a cache entry
474  * is released it is either freed (if it is invalid) or otherwise inserted
475  * in to the lru list.
476  */
477 void
478 mb_cache_entry_release(struct mb_cache_entry *ce)
479 {
480 	spin_lock(&mb_cache_spinlock);
481 	__mb_cache_entry_release_unlock(ce);
482 }
483 
484 
485 /*
486  * mb_cache_entry_free()
487  *
488  * This is equivalent to the sequence mb_cache_entry_takeout() --
489  * mb_cache_entry_release().
490  */
491 void
492 mb_cache_entry_free(struct mb_cache_entry *ce)
493 {
494 	spin_lock(&mb_cache_spinlock);
495 	mb_assert(list_empty(&ce->e_lru_list));
496 	__mb_cache_entry_unhash(ce);
497 	__mb_cache_entry_release_unlock(ce);
498 }
499 
500 
501 /*
502  * mb_cache_entry_get()
503  *
504  * Get a cache entry  by device / block number. (There can only be one entry
505  * in the cache per device and block.) Returns NULL if no such cache entry
506  * exists. The returned cache entry is locked for exclusive access ("single
507  * writer").
508  */
509 struct mb_cache_entry *
510 mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev,
511 		   sector_t block)
512 {
513 	unsigned int bucket;
514 	struct list_head *l;
515 	struct mb_cache_entry *ce;
516 
517 	bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
518 			   cache->c_bucket_bits);
519 	spin_lock(&mb_cache_spinlock);
520 	list_for_each(l, &cache->c_block_hash[bucket]) {
521 		ce = list_entry(l, struct mb_cache_entry, e_block_list);
522 		if (ce->e_bdev == bdev && ce->e_block == block) {
523 			DEFINE_WAIT(wait);
524 
525 			if (!list_empty(&ce->e_lru_list))
526 				list_del_init(&ce->e_lru_list);
527 
528 			while (ce->e_used > 0) {
529 				ce->e_queued++;
530 				prepare_to_wait(&mb_cache_queue, &wait,
531 						TASK_UNINTERRUPTIBLE);
532 				spin_unlock(&mb_cache_spinlock);
533 				schedule();
534 				spin_lock(&mb_cache_spinlock);
535 				ce->e_queued--;
536 			}
537 			finish_wait(&mb_cache_queue, &wait);
538 			ce->e_used += 1 + MB_CACHE_WRITER;
539 
540 			if (!__mb_cache_entry_is_hashed(ce)) {
541 				__mb_cache_entry_release_unlock(ce);
542 				return NULL;
543 			}
544 			goto cleanup;
545 		}
546 	}
547 	ce = NULL;
548 
549 cleanup:
550 	spin_unlock(&mb_cache_spinlock);
551 	return ce;
552 }
553 
554 #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
555 
556 static struct mb_cache_entry *
557 __mb_cache_entry_find(struct list_head *l, struct list_head *head,
558 		      int index, struct block_device *bdev, unsigned int key)
559 {
560 	while (l != head) {
561 		struct mb_cache_entry *ce =
562 			list_entry(l, struct mb_cache_entry,
563 			           e_indexes[index].o_list);
564 		if (ce->e_bdev == bdev && ce->e_indexes[index].o_key == key) {
565 			DEFINE_WAIT(wait);
566 
567 			if (!list_empty(&ce->e_lru_list))
568 				list_del_init(&ce->e_lru_list);
569 
570 			/* Incrementing before holding the lock gives readers
571 			   priority over writers. */
572 			ce->e_used++;
573 			while (ce->e_used >= MB_CACHE_WRITER) {
574 				ce->e_queued++;
575 				prepare_to_wait(&mb_cache_queue, &wait,
576 						TASK_UNINTERRUPTIBLE);
577 				spin_unlock(&mb_cache_spinlock);
578 				schedule();
579 				spin_lock(&mb_cache_spinlock);
580 				ce->e_queued--;
581 			}
582 			finish_wait(&mb_cache_queue, &wait);
583 
584 			if (!__mb_cache_entry_is_hashed(ce)) {
585 				__mb_cache_entry_release_unlock(ce);
586 				spin_lock(&mb_cache_spinlock);
587 				return ERR_PTR(-EAGAIN);
588 			}
589 			return ce;
590 		}
591 		l = l->next;
592 	}
593 	return NULL;
594 }
595 
596 
597 /*
598  * mb_cache_entry_find_first()
599  *
600  * Find the first cache entry on a given device with a certain key in
601  * an additional index. Additonal matches can be found with
602  * mb_cache_entry_find_next(). Returns NULL if no match was found. The
603  * returned cache entry is locked for shared access ("multiple readers").
604  *
605  * @cache: the cache to search
606  * @index: the number of the additonal index to search (0<=index<indexes_count)
607  * @bdev: the device the cache entry should belong to
608  * @key: the key in the index
609  */
610 struct mb_cache_entry *
611 mb_cache_entry_find_first(struct mb_cache *cache, int index,
612 			  struct block_device *bdev, unsigned int key)
613 {
614 	unsigned int bucket = hash_long(key, cache->c_bucket_bits);
615 	struct list_head *l;
616 	struct mb_cache_entry *ce;
617 
618 	mb_assert(index < mb_cache_indexes(cache));
619 	spin_lock(&mb_cache_spinlock);
620 	l = cache->c_indexes_hash[index][bucket].next;
621 	ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket],
622 	                           index, bdev, key);
623 	spin_unlock(&mb_cache_spinlock);
624 	return ce;
625 }
626 
627 
628 /*
629  * mb_cache_entry_find_next()
630  *
631  * Find the next cache entry on a given device with a certain key in an
632  * additional index. Returns NULL if no match could be found. The previous
633  * entry is atomatically released, so that mb_cache_entry_find_next() can
634  * be called like this:
635  *
636  * entry = mb_cache_entry_find_first();
637  * while (entry) {
638  * 	...
639  *	entry = mb_cache_entry_find_next(entry, ...);
640  * }
641  *
642  * @prev: The previous match
643  * @index: the number of the additonal index to search (0<=index<indexes_count)
644  * @bdev: the device the cache entry should belong to
645  * @key: the key in the index
646  */
647 struct mb_cache_entry *
648 mb_cache_entry_find_next(struct mb_cache_entry *prev, int index,
649 			 struct block_device *bdev, unsigned int key)
650 {
651 	struct mb_cache *cache = prev->e_cache;
652 	unsigned int bucket = hash_long(key, cache->c_bucket_bits);
653 	struct list_head *l;
654 	struct mb_cache_entry *ce;
655 
656 	mb_assert(index < mb_cache_indexes(cache));
657 	spin_lock(&mb_cache_spinlock);
658 	l = prev->e_indexes[index].o_list.next;
659 	ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket],
660 	                           index, bdev, key);
661 	__mb_cache_entry_release_unlock(prev);
662 	return ce;
663 }
664 
665 #endif  /* !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) */
666 
667 static int __init init_mbcache(void)
668 {
669 	register_shrinker(&mb_cache_shrinker);
670 	return 0;
671 }
672 
673 static void __exit exit_mbcache(void)
674 {
675 	unregister_shrinker(&mb_cache_shrinker);
676 }
677 
678 module_init(init_mbcache)
679 module_exit(exit_mbcache)
680 
681