Lines Matching +full:a +full:- +full:bit
1 // SPDX-License-Identifier: GPL-2.0
16 * For requesting a deferred user space stack trace from NMI context
17 * the architecture must support a safe cmpxchg in NMI context.
19 * for a deferred user space stack trace from an NMI context. If it
20 * does, then it will get -EINVAL.
28 return try_cmpxchg(&info->id.cnt, &old, cnt); in try_assign_cnt()
35 info->id.cnt = cnt; in try_assign_cnt()
40 /* Make the cache fit in a 4K page */
42 ((SZ_4K - sizeof(struct unwind_cache)) / sizeof(long))
56 return test_bit(UNWIND_PENDING_BIT, &info->unwind_mask); in unwind_pending()
60 * This is a unique percpu identifier for a given task entry context.
62 * user space, so that each "entry context" on the CPU gets a unique ID. In
64 * deferred unwind request after a given entry-from-user.
66 * It's combined with the CPU id to make a systemwide-unique "context cookie".
71 * The context cookie is a unique identifier that is assigned to a user
82 if (info->id.cpu) in get_cookie()
83 return info->id.id; in get_cookie()
92 info->id.cpu = smp_processor_id() + 1; /* Must be non zero */ in get_cookie()
94 return info->id.id; in get_cookie()
98 * unwind_user_faultable - Produce a user stacktrace in faultable context
101 * This must be called in a known faultable context (usually when entering
111 struct unwind_task_info *info = ¤t->unwind_info; in unwind_user_faultable()
117 if (!current->mm) in unwind_user_faultable()
118 return -EINVAL; in unwind_user_faultable()
120 if (!info->cache) { in unwind_user_faultable()
121 info->cache = kzalloc(struct_size(cache, entries, UNWIND_MAX_ENTRIES), in unwind_user_faultable()
123 if (!info->cache) in unwind_user_faultable()
124 return -ENOMEM; in unwind_user_faultable()
127 cache = info->cache; in unwind_user_faultable()
128 trace->entries = cache->entries; in unwind_user_faultable()
130 if (cache->nr_entries) { in unwind_user_faultable()
135 trace->nr = cache->nr_entries; in unwind_user_faultable()
139 trace->nr = 0; in unwind_user_faultable()
142 cache->nr_entries = trace->nr; in unwind_user_faultable()
145 set_bit(UNWIND_USED_BIT, &info->unwind_mask); in unwind_user_faultable()
152 struct unwind_task_info *info = &task->unwind_info; in process_unwind_deferred()
161 /* Clear pending bit but make sure to have the current bits */ in process_unwind_deferred()
163 (atomic_long_t *)&info->unwind_mask); in process_unwind_deferred()
173 if (info->cache) in process_unwind_deferred()
174 bits &= ~(info->cache->unwind_completed); in process_unwind_deferred()
176 cookie = info->id.id; in process_unwind_deferred()
181 if (test_bit(work->bit, &bits)) { in process_unwind_deferred()
182 work->func(work, &trace, cookie); in process_unwind_deferred()
183 if (info->cache) in process_unwind_deferred()
184 info->cache->unwind_completed |= BIT(work->bit); in process_unwind_deferred()
196 struct unwind_task_info *info = ¤t->unwind_info; in unwind_deferred_task_exit()
203 task_work_cancel(task, &info->work); in unwind_deferred_task_exit()
207 * unwind_deferred_request - Request a user stacktrace on task kernel exit
211 * Schedule a user space unwind to be done in task work before exiting the
215 * request for a user space stacktrace for this task since it entered the
216 * kernel. It can be from a request by any caller of this infrastructure.
218 * used to stitch kernel and user stack traces together in post-processing.
233 struct unwind_task_info *info = ¤t->unwind_info; in unwind_deferred_request()
235 unsigned long bit; in unwind_deferred_request() local
240 if ((current->flags & (PF_KTHREAD | PF_EXITING)) || in unwind_deferred_request()
242 return -EINVAL; in unwind_deferred_request()
246 * Trigger a warning to make it obvious that an architecture in unwind_deferred_request()
250 return -EINVAL; in unwind_deferred_request()
253 bit = READ_ONCE(work->bit); in unwind_deferred_request()
254 if (WARN_ON_ONCE(bit < 0)) in unwind_deferred_request()
255 return -EINVAL; in unwind_deferred_request()
258 bit = BIT(bit); in unwind_deferred_request()
264 old = READ_ONCE(info->unwind_mask); in unwind_deferred_request()
267 if (old & bit) in unwind_deferred_request()
271 * This work's bit hasn't been set yet. Now set it with the PENDING in unwind_deferred_request()
272 * bit and fetch the current value of unwind_mask. If ether the in unwind_deferred_request()
273 * work's bit or PENDING was already set, then this is already queued in unwind_deferred_request()
274 * to have a callback. in unwind_deferred_request()
276 bits = UNWIND_PENDING | bit; in unwind_deferred_request()
277 old = atomic_long_fetch_or(bits, (atomic_long_t *)&info->unwind_mask); in unwind_deferred_request()
280 * If the work's bit was set, whatever set it had better in unwind_deferred_request()
281 * have also set pending and queued a callback. in unwind_deferred_request()
284 return old & bit; in unwind_deferred_request()
288 ret = task_work_add(current, &info->work, TWA_RESUME); in unwind_deferred_request()
291 WRITE_ONCE(info->unwind_mask, 0); in unwind_deferred_request()
299 int bit; in unwind_deferred_cancel() local
304 bit = work->bit; in unwind_deferred_cancel()
306 /* No work should be using a reserved bit */ in unwind_deferred_cancel()
307 if (WARN_ON_ONCE(BIT(bit) & RESERVED_BITS)) in unwind_deferred_cancel()
311 list_del_rcu(&work->list); in unwind_deferred_cancel()
314 work->bit = -1; in unwind_deferred_cancel()
316 __clear_bit(bit, &unwind_mask); in unwind_deferred_cancel()
321 /* Clear this bit from all threads */ in unwind_deferred_cancel()
323 clear_bit(bit, &t->unwind_info.unwind_mask); in unwind_deferred_cancel()
324 if (t->unwind_info.cache) in unwind_deferred_cancel()
325 clear_bit(bit, &t->unwind_info.cache->unwind_completed); in unwind_deferred_cancel()
335 /* See if there's a bit in the mask available */ in unwind_deferred_init()
337 return -EBUSY; in unwind_deferred_init()
339 work->bit = ffz(unwind_mask); in unwind_deferred_init()
340 __set_bit(work->bit, &unwind_mask); in unwind_deferred_init()
342 list_add_rcu(&work->list, &callbacks); in unwind_deferred_init()
343 work->func = func; in unwind_deferred_init()
349 struct unwind_task_info *info = &task->unwind_info; in unwind_task_init()
352 init_task_work(&info->work, unwind_deferred_task_work); in unwind_task_init()
353 info->unwind_mask = 0; in unwind_task_init()
358 struct unwind_task_info *info = &task->unwind_info; in unwind_task_free()
360 kfree(info->cache); in unwind_task_free()
361 task_work_cancel(task, &info->work); in unwind_task_free()