Lines Matching +full:cpu +full:- +full:capacity
1 /* SPDX-License-Identifier: GPL-2.0 */
14 * objpool: ring-array based lockless MPMC queue
21 * With leveraging percpu ring-array to mitigate hot spots of memory
22 * contention, it delivers near-linear scalability for high parallel
28 * 1) Maximum objects (capacity) is fixed after objpool creation
29 * 2) All pre-allocated objects are managed in percpu ring array,
34 * struct objpool_slot - percpu ring array of objpool
38 * @mask: bits mask for modulo capacity to compute array indexes
41 * Represents a cpu-local array-based ring buffer, its size is specialized
44 * continuous memory: CPU assigned number of objects are stored just after
64 * caller-specified callback for object initial setup, it's only called
69 /* caller-specified cleanup callback for objpool destruction */
73 * struct objpool_head - object pooling metadata
75 * @nr_objs: total objs (to be pre-allocated with objpool)
77 * @capacity: max objs can be managed by one objpool_slot
83 * @context: caller-provided context
89 int capacity; member
102 * objpool_init() - initialize objpool and pre-allocated objects
104 * @nr_objs: total objects to be pre-allocated by this object pool
113 * All pre-allocated objects are to be zeroed after memory allocation.
126 static inline void *__objpool_try_get_slot(struct objpool_head *pool, int cpu) in __objpool_try_get_slot() argument
128 struct objpool_slot *slot = pool->cpu_slots[cpu]; in __objpool_try_get_slot()
130 uint32_t head = smp_load_acquire(&slot->head); in __objpool_try_get_slot()
132 while (head != READ_ONCE(slot->last)) { in __objpool_try_get_slot()
143 * by condition 'last != head && last - head <= nr_objs' in __objpool_try_get_slot()
144 * that is equivalent to 'last - head - 1 < nr_objs' as in __objpool_try_get_slot()
147 if (READ_ONCE(slot->last) - head - 1 >= pool->nr_objs) { in __objpool_try_get_slot()
148 head = READ_ONCE(slot->head); in __objpool_try_get_slot()
153 obj = READ_ONCE(slot->entries[head & slot->mask]); in __objpool_try_get_slot()
156 if (try_cmpxchg_release(&slot->head, &head, head + 1)) in __objpool_try_get_slot()
164 * objpool_pop() - allocate an object from objpool
173 int i, cpu; in objpool_pop() local
178 cpu = raw_smp_processor_id(); in objpool_pop()
179 for (i = 0; i < pool->nr_possible_cpus; i++) { in objpool_pop()
180 obj = __objpool_try_get_slot(pool, cpu); in objpool_pop()
183 cpu = cpumask_next_wrap(cpu, cpu_possible_mask, -1, 1); in objpool_pop()
192 __objpool_try_add_slot(void *obj, struct objpool_head *pool, int cpu) in __objpool_try_add_slot() argument
194 struct objpool_slot *slot = pool->cpu_slots[cpu]; in __objpool_try_add_slot()
198 tail = READ_ONCE(slot->tail); in __objpool_try_add_slot()
201 head = READ_ONCE(slot->head); in __objpool_try_add_slot()
203 WARN_ON_ONCE(tail - head > pool->nr_objs); in __objpool_try_add_slot()
204 } while (!try_cmpxchg_acquire(&slot->tail, &tail, tail + 1)); in __objpool_try_add_slot()
207 WRITE_ONCE(slot->entries[tail & slot->mask], obj); in __objpool_try_add_slot()
209 smp_store_release(&slot->last, tail + 1); in __objpool_try_add_slot()
215 * objpool_push() - reclaim the object and return back to objpool
237 * objpool_drop() - discard the object and deref objpool
241 * return: 0 if objpool was released; -EAGAIN if there are still
255 * objpool_free() - release objpool forcely (all objects to be freed)
261 * objpool_fini() - deref object pool (also releasing unused objects)