xref: /linux/fs/bcachefs/fast_list.c (revision 25489a4f556414445d342951615178368ee45cde)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Fast, unordered lists
5  *
6  * Supports add, remove, and iterate
7  *
8  * Underneath, they're a radix tree and an IDA, with a percpu buffer for slot
9  * allocation and freeing.
10  *
11  * This means that adding, removing, and iterating over items is lockless,
12  * except when refilling/emptying the percpu slot buffers.
13  */
14 
15 #include "fast_list.h"
16 
17 struct fast_list_pcpu {
18 	u32			nr;
19 	u32			entries[31];
20 };
21 
22 static int fast_list_alloc_idx(struct fast_list *l, gfp_t gfp)
23 {
24 	int idx = ida_alloc_range(&l->slots_allocated, 1, INT_MAX, gfp);
25 	if (unlikely(idx < 0))
26 		return 0;
27 
28 	if (unlikely(!genradix_ptr_alloc_inlined(&l->items, idx, gfp))) {
29 		ida_free(&l->slots_allocated, idx);
30 		return 0;
31 	}
32 
33 	return idx;
34 }
35 
36 /**
37  * fast_list_get_idx - get a slot in a fast_list
38  * @l:		list to get slot in
39  *
40  * This allocates a slot in the radix tree without storing to it, so that we can
41  * take the potential memory allocation failure early and do the list add later
42  * when we can't take an allocation failure.
43  *
44  * Returns: positive integer on success, -ENOMEM on failure
45  */
46 int fast_list_get_idx(struct fast_list *l)
47 {
48 	unsigned long flags;
49 	int idx;
50 retry:
51 	local_irq_save(flags);
52 	struct fast_list_pcpu *lp = this_cpu_ptr(l->buffer);
53 
54 	if (unlikely(!lp->nr)) {
55 		u32 entries[16], nr = 0;
56 
57 		local_irq_restore(flags);
58 		while (nr < ARRAY_SIZE(entries) &&
59 		       (idx = fast_list_alloc_idx(l, GFP_KERNEL)))
60 			entries[nr++] = idx;
61 		local_irq_save(flags);
62 
63 		lp = this_cpu_ptr(l->buffer);
64 
65 		while (nr && lp->nr < ARRAY_SIZE(lp->entries))
66 			lp->entries[lp->nr++] = entries[--nr];
67 
68 		if (unlikely(nr)) {
69 			local_irq_restore(flags);
70 			while (nr)
71 				ida_free(&l->slots_allocated, entries[--nr]);
72 			goto retry;
73 		}
74 
75 		if (unlikely(!lp->nr)) {
76 			local_irq_restore(flags);
77 			return -ENOMEM;
78 		}
79 	}
80 
81 	idx = lp->entries[--lp->nr];
82 	local_irq_restore(flags);
83 
84 	return idx;
85 }
86 
87 /**
88  * fast_list_add - add an item to a fast_list
89  * @l:		list
90  * @item:	item to add
91  *
92  * Allocates a slot in the radix tree and stores to it and then returns the
93  * slot index, which must be passed to fast_list_remove().
94  *
95  * Returns: positive integer on success, -ENOMEM on failure
96  */
97 int fast_list_add(struct fast_list *l, void *item)
98 {
99 	int idx = fast_list_get_idx(l);
100 	if (idx < 0)
101 		return idx;
102 
103 	*genradix_ptr_inlined(&l->items, idx) = item;
104 	return idx;
105 }
106 
107 /**
108  * fast_list_remove - remove an item from a fast_list
109  * @l:		list
110  * @idx:	item's slot index
111  *
112  * Zeroes out the slot in the radix tree and frees the slot for future
113  * fast_list_add() operations.
114  */
115 void fast_list_remove(struct fast_list *l, unsigned idx)
116 {
117 	u32 entries[16], nr = 0;
118 	unsigned long flags;
119 
120 	if (!idx)
121 		return;
122 
123 	*genradix_ptr_inlined(&l->items, idx) = NULL;
124 
125 	local_irq_save(flags);
126 	struct fast_list_pcpu *lp = this_cpu_ptr(l->buffer);
127 
128 	if (unlikely(lp->nr == ARRAY_SIZE(lp->entries)))
129 		while (nr < ARRAY_SIZE(entries))
130 			entries[nr++] = lp->entries[--lp->nr];
131 
132 	lp->entries[lp->nr++] = idx;
133 	local_irq_restore(flags);
134 
135 	if (unlikely(nr))
136 		while (nr)
137 			ida_free(&l->slots_allocated, entries[--nr]);
138 }
139 
140 void fast_list_exit(struct fast_list *l)
141 {
142 	/* XXX: warn if list isn't empty */
143 	free_percpu(l->buffer);
144 	ida_destroy(&l->slots_allocated);
145 	genradix_free(&l->items);
146 }
147 
148 int fast_list_init(struct fast_list *l)
149 {
150 	genradix_init(&l->items);
151 	ida_init(&l->slots_allocated);
152 	l->buffer = alloc_percpu(*l->buffer);
153 	if (!l->buffer)
154 		return -ENOMEM;
155 	return 0;
156 }
157