Lines Matching +full:lock +full:- +full:less

1 // SPDX-License-Identifier: GPL-2.0
65 raw_spin_lock_irqsave(&fbc->lock, flags); in percpu_counter_set()
67 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); in percpu_counter_set()
70 fbc->count = amount; in percpu_counter_set()
71 raw_spin_unlock_irqrestore(&fbc->lock, flags); in percpu_counter_set()
83 * the this_cpu_add(), and the interrupt updates this_cpu(*fbc->counters),
90 * 1. the fast path uses local cmpxchg (note: no lock prefix)
98 count = this_cpu_read(*fbc->counters); in percpu_counter_add_batch()
101 raw_spin_lock_irqsave(&fbc->lock, flags); in percpu_counter_add_batch()
106 count = __this_cpu_read(*fbc->counters); in percpu_counter_add_batch()
107 fbc->count += count + amount; in percpu_counter_add_batch()
108 __this_cpu_sub(*fbc->counters, count); in percpu_counter_add_batch()
109 raw_spin_unlock_irqrestore(&fbc->lock, flags); in percpu_counter_add_batch()
112 } while (!this_cpu_try_cmpxchg(*fbc->counters, &count, count + amount)); in percpu_counter_add_batch()
117 * - The slow path would be ok as protected by an irq-safe spinlock.
118 * - this_cpu_add would be ok as it is irq-safe by definition.
126 count = __this_cpu_read(*fbc->counters) + amount; in percpu_counter_add_batch()
128 raw_spin_lock(&fbc->lock); in percpu_counter_add_batch()
129 fbc->count += count; in percpu_counter_add_batch()
130 __this_cpu_sub(*fbc->counters, count - amount); in percpu_counter_add_batch()
131 raw_spin_unlock(&fbc->lock); in percpu_counter_add_batch()
133 this_cpu_add(*fbc->counters, amount); in percpu_counter_add_batch()
151 raw_spin_lock_irqsave(&fbc->lock, flags); in percpu_counter_sync()
152 count = __this_cpu_read(*fbc->counters); in percpu_counter_sync()
153 fbc->count += count; in percpu_counter_sync()
154 __this_cpu_sub(*fbc->counters, count); in percpu_counter_sync()
155 raw_spin_unlock_irqrestore(&fbc->lock, flags); in percpu_counter_sync()
160 * Add up all the per-cpu counts, return the result. This is a more accurate
177 raw_spin_lock_irqsave(&fbc->lock, flags); in __percpu_counter_sum()
178 ret = fbc->count; in __percpu_counter_sum()
180 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); in __percpu_counter_sum()
183 raw_spin_unlock_irqrestore(&fbc->lock, flags); in __percpu_counter_sum()
202 return -ENOMEM; in __percpu_counter_init_many()
206 raw_spin_lock_init(&fbc[i].lock); in __percpu_counter_init_many()
207 lockdep_set_class(&fbc[i].lock, key); in __percpu_counter_init_many()
277 raw_spin_lock(&fbc->lock); in percpu_counter_cpu_dead()
278 pcount = per_cpu_ptr(fbc->counters, cpu); in percpu_counter_cpu_dead()
279 fbc->count += *pcount; in percpu_counter_cpu_dead()
281 raw_spin_unlock(&fbc->lock); in percpu_counter_cpu_dead()
290 * Return 1 if greater, 0 if equal and -1 if less
298 if (abs(count - rhs) > (batch * num_online_cpus())) { in __percpu_counter_compare()
302 return -1; in __percpu_counter_compare()
309 return -1; in __percpu_counter_compare()
316 * Compare counter, and add amount if total is: less than or equal to limit if
322 * the limit would most naturally be 0 - but other limits are also allowed.
340 count = __this_cpu_read(*fbc->counters); in __percpu_counter_limited_add()
342 /* Skip taking the lock when safe */ in __percpu_counter_limited_add()
344 ((amount > 0 && fbc->count + unknown <= limit) || in __percpu_counter_limited_add()
345 (amount < 0 && fbc->count - unknown >= limit))) { in __percpu_counter_limited_add()
346 this_cpu_add(*fbc->counters, amount); in __percpu_counter_limited_add()
351 raw_spin_lock(&fbc->lock); in __percpu_counter_limited_add()
352 count = fbc->count + amount; in __percpu_counter_limited_add()
356 if (count - unknown > limit) in __percpu_counter_limited_add()
363 if (count - unknown >= limit) in __percpu_counter_limited_add()
372 pcount = per_cpu_ptr(fbc->counters, cpu); in __percpu_counter_limited_add()
385 count = __this_cpu_read(*fbc->counters); in __percpu_counter_limited_add()
386 fbc->count += count + amount; in __percpu_counter_limited_add()
387 __this_cpu_sub(*fbc->counters, count); in __percpu_counter_limited_add()
389 raw_spin_unlock(&fbc->lock); in __percpu_counter_limited_add()