15e32d0f1SSteven Rostedt // SPDX-License-Identifier: GPL-2.0 25e32d0f1SSteven Rostedt /* 35e32d0f1SSteven Rostedt * Deferred user space unwinding 45e32d0f1SSteven Rostedt */ 52dffa355SJosh Poimboeuf #include <linux/sched/task_stack.h> 62dffa355SJosh Poimboeuf #include <linux/unwind_deferred.h> 72dffa355SJosh Poimboeuf #include <linux/sched/clock.h> 82dffa355SJosh Poimboeuf #include <linux/task_work.h> 95e32d0f1SSteven Rostedt #include <linux/kernel.h> 105e32d0f1SSteven Rostedt #include <linux/sched.h> 11b9c73524SJosh Poimboeuf #include <linux/sizes.h> 125e32d0f1SSteven Rostedt #include <linux/slab.h> 132dffa355SJosh Poimboeuf #include <linux/mm.h> 145e32d0f1SSteven Rostedt 15055c7060SSteven Rostedt /* 16055c7060SSteven Rostedt * For requesting a deferred user space stack trace from NMI context 17055c7060SSteven Rostedt * the architecture must support a safe cmpxchg in NMI context. 18055c7060SSteven Rostedt * For those architectures that do not have that, then it cannot ask 19055c7060SSteven Rostedt * for a deferred user space stack trace from an NMI context. If it 20055c7060SSteven Rostedt * does, then it will get -EINVAL. 21055c7060SSteven Rostedt */ 22055c7060SSteven Rostedt #if defined(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) 23055c7060SSteven Rostedt # define CAN_USE_IN_NMI 1 24055c7060SSteven Rostedt static inline bool try_assign_cnt(struct unwind_task_info *info, u32 cnt) 25055c7060SSteven Rostedt { 26055c7060SSteven Rostedt u32 old = 0; 27055c7060SSteven Rostedt 28055c7060SSteven Rostedt return try_cmpxchg(&info->id.cnt, &old, cnt); 29055c7060SSteven Rostedt } 30055c7060SSteven Rostedt #else 31055c7060SSteven Rostedt # define CAN_USE_IN_NMI 0 32055c7060SSteven Rostedt /* When NMIs are not allowed, this always succeeds */ 33055c7060SSteven Rostedt static inline bool try_assign_cnt(struct unwind_task_info *info, u32 cnt) 34055c7060SSteven Rostedt { 35055c7060SSteven Rostedt info->id.cnt = cnt; 36055c7060SSteven Rostedt return true; 37055c7060SSteven Rostedt } 38055c7060SSteven Rostedt #endif 39055c7060SSteven Rostedt 40b9c73524SJosh Poimboeuf /* Make the cache fit in a 4K page */ 41b9c73524SJosh Poimboeuf #define UNWIND_MAX_ENTRIES \ 42b9c73524SJosh Poimboeuf ((SZ_4K - sizeof(struct unwind_cache)) / sizeof(long)) 435e32d0f1SSteven Rostedt 442dffa355SJosh Poimboeuf /* Guards adding to and reading the list of callbacks */ 452dffa355SJosh Poimboeuf static DEFINE_MUTEX(callback_mutex); 462dffa355SJosh Poimboeuf static LIST_HEAD(callbacks); 472dffa355SJosh Poimboeuf 48*be3d526aSSteven Rostedt #define RESERVED_BITS (UNWIND_PENDING) 49*be3d526aSSteven Rostedt 50*be3d526aSSteven Rostedt /* Zero'd bits are available for assigning callback users */ 51*be3d526aSSteven Rostedt static unsigned long unwind_mask = RESERVED_BITS; 52*be3d526aSSteven Rostedt 53*be3d526aSSteven Rostedt static inline bool unwind_pending(struct unwind_task_info *info) 54*be3d526aSSteven Rostedt { 55*be3d526aSSteven Rostedt return test_bit(UNWIND_PENDING_BIT, &info->unwind_mask); 56*be3d526aSSteven Rostedt } 57*be3d526aSSteven Rostedt 582dffa355SJosh Poimboeuf /* 592dffa355SJosh Poimboeuf * This is a unique percpu identifier for a given task entry context. 602dffa355SJosh Poimboeuf * Conceptually, it's incremented every time the CPU enters the kernel from 612dffa355SJosh Poimboeuf * user space, so that each "entry context" on the CPU gets a unique ID. In 622dffa355SJosh Poimboeuf * reality, as an optimization, it's only incremented on demand for the first 632dffa355SJosh Poimboeuf * deferred unwind request after a given entry-from-user. 642dffa355SJosh Poimboeuf * 652dffa355SJosh Poimboeuf * It's combined with the CPU id to make a systemwide-unique "context cookie". 662dffa355SJosh Poimboeuf */ 672dffa355SJosh Poimboeuf static DEFINE_PER_CPU(u32, unwind_ctx_ctr); 682dffa355SJosh Poimboeuf 692dffa355SJosh Poimboeuf /* 702dffa355SJosh Poimboeuf * The context cookie is a unique identifier that is assigned to a user 712dffa355SJosh Poimboeuf * space stacktrace. As the user space stacktrace remains the same while 722dffa355SJosh Poimboeuf * the task is in the kernel, the cookie is an identifier for the stacktrace. 732dffa355SJosh Poimboeuf * Although it is possible for the stacktrace to get another cookie if another 742dffa355SJosh Poimboeuf * request is made after the cookie was cleared and before reentering user 752dffa355SJosh Poimboeuf * space. 762dffa355SJosh Poimboeuf */ 772dffa355SJosh Poimboeuf static u64 get_cookie(struct unwind_task_info *info) 782dffa355SJosh Poimboeuf { 792dffa355SJosh Poimboeuf u32 cnt = 1; 802dffa355SJosh Poimboeuf 812dffa355SJosh Poimboeuf if (info->id.cpu) 822dffa355SJosh Poimboeuf return info->id.id; 832dffa355SJosh Poimboeuf 842dffa355SJosh Poimboeuf /* LSB is always set to ensure 0 is an invalid value */ 852dffa355SJosh Poimboeuf cnt |= __this_cpu_read(unwind_ctx_ctr) + 2; 86055c7060SSteven Rostedt if (try_assign_cnt(info, cnt)) { 872dffa355SJosh Poimboeuf /* Update the per cpu counter */ 882dffa355SJosh Poimboeuf __this_cpu_write(unwind_ctx_ctr, cnt); 892dffa355SJosh Poimboeuf } 902dffa355SJosh Poimboeuf /* Interrupts are disabled, the CPU will always be same */ 912dffa355SJosh Poimboeuf info->id.cpu = smp_processor_id() + 1; /* Must be non zero */ 922dffa355SJosh Poimboeuf 932dffa355SJosh Poimboeuf return info->id.id; 942dffa355SJosh Poimboeuf } 952dffa355SJosh Poimboeuf 965e32d0f1SSteven Rostedt /** 975e32d0f1SSteven Rostedt * unwind_user_faultable - Produce a user stacktrace in faultable context 985e32d0f1SSteven Rostedt * @trace: The descriptor that will store the user stacktrace 995e32d0f1SSteven Rostedt * 1005e32d0f1SSteven Rostedt * This must be called in a known faultable context (usually when entering 1015e32d0f1SSteven Rostedt * or exiting user space). Depending on the available implementations 1025e32d0f1SSteven Rostedt * the @trace will be loaded with the addresses of the user space stacktrace 1035e32d0f1SSteven Rostedt * if it can be found. 1045e32d0f1SSteven Rostedt * 1055e32d0f1SSteven Rostedt * Return: 0 on success and negative on error 1065e32d0f1SSteven Rostedt * On success @trace will contain the user space stacktrace 1075e32d0f1SSteven Rostedt */ 1085e32d0f1SSteven Rostedt int unwind_user_faultable(struct unwind_stacktrace *trace) 1095e32d0f1SSteven Rostedt { 1105e32d0f1SSteven Rostedt struct unwind_task_info *info = ¤t->unwind_info; 111b9c73524SJosh Poimboeuf struct unwind_cache *cache; 1125e32d0f1SSteven Rostedt 1135e32d0f1SSteven Rostedt /* Should always be called from faultable context */ 1145e32d0f1SSteven Rostedt might_fault(); 1155e32d0f1SSteven Rostedt 1165e32d0f1SSteven Rostedt if (current->flags & PF_EXITING) 1175e32d0f1SSteven Rostedt return -EINVAL; 1185e32d0f1SSteven Rostedt 119b9c73524SJosh Poimboeuf if (!info->cache) { 120b9c73524SJosh Poimboeuf info->cache = kzalloc(struct_size(cache, entries, UNWIND_MAX_ENTRIES), 1215e32d0f1SSteven Rostedt GFP_KERNEL); 122b9c73524SJosh Poimboeuf if (!info->cache) 1235e32d0f1SSteven Rostedt return -ENOMEM; 1245e32d0f1SSteven Rostedt } 1255e32d0f1SSteven Rostedt 126b9c73524SJosh Poimboeuf cache = info->cache; 127b9c73524SJosh Poimboeuf trace->entries = cache->entries; 128b9c73524SJosh Poimboeuf 129b9c73524SJosh Poimboeuf if (cache->nr_entries) { 130b9c73524SJosh Poimboeuf /* 131b9c73524SJosh Poimboeuf * The user stack has already been previously unwound in this 132b9c73524SJosh Poimboeuf * entry context. Skip the unwind and use the cache. 133b9c73524SJosh Poimboeuf */ 134b9c73524SJosh Poimboeuf trace->nr = cache->nr_entries; 135b9c73524SJosh Poimboeuf return 0; 136b9c73524SJosh Poimboeuf } 137b9c73524SJosh Poimboeuf 1385e32d0f1SSteven Rostedt trace->nr = 0; 1395e32d0f1SSteven Rostedt unwind_user(trace, UNWIND_MAX_ENTRIES); 1405e32d0f1SSteven Rostedt 141b9c73524SJosh Poimboeuf cache->nr_entries = trace->nr; 142b9c73524SJosh Poimboeuf 1435e32d0f1SSteven Rostedt return 0; 1445e32d0f1SSteven Rostedt } 1455e32d0f1SSteven Rostedt 1462dffa355SJosh Poimboeuf static void unwind_deferred_task_work(struct callback_head *head) 1472dffa355SJosh Poimboeuf { 1482dffa355SJosh Poimboeuf struct unwind_task_info *info = container_of(head, struct unwind_task_info, work); 1492dffa355SJosh Poimboeuf struct unwind_stacktrace trace; 1502dffa355SJosh Poimboeuf struct unwind_work *work; 151*be3d526aSSteven Rostedt unsigned long bits; 1522dffa355SJosh Poimboeuf u64 cookie; 1532dffa355SJosh Poimboeuf 154*be3d526aSSteven Rostedt if (WARN_ON_ONCE(!unwind_pending(info))) 1552dffa355SJosh Poimboeuf return; 1562dffa355SJosh Poimboeuf 157*be3d526aSSteven Rostedt /* Clear pending bit but make sure to have the current bits */ 158*be3d526aSSteven Rostedt bits = atomic_long_fetch_andnot(UNWIND_PENDING, 159*be3d526aSSteven Rostedt (atomic_long_t *)&info->unwind_mask); 1602dffa355SJosh Poimboeuf /* 1612dffa355SJosh Poimboeuf * From here on out, the callback must always be called, even if it's 1622dffa355SJosh Poimboeuf * just an empty trace. 1632dffa355SJosh Poimboeuf */ 1642dffa355SJosh Poimboeuf trace.nr = 0; 1652dffa355SJosh Poimboeuf trace.entries = NULL; 1662dffa355SJosh Poimboeuf 1672dffa355SJosh Poimboeuf unwind_user_faultable(&trace); 1682dffa355SJosh Poimboeuf 1692dffa355SJosh Poimboeuf cookie = info->id.id; 1702dffa355SJosh Poimboeuf 1712dffa355SJosh Poimboeuf guard(mutex)(&callback_mutex); 1722dffa355SJosh Poimboeuf list_for_each_entry(work, &callbacks, list) { 173*be3d526aSSteven Rostedt if (test_bit(work->bit, &bits)) 1742dffa355SJosh Poimboeuf work->func(work, &trace, cookie); 1752dffa355SJosh Poimboeuf } 1762dffa355SJosh Poimboeuf } 1772dffa355SJosh Poimboeuf 1782dffa355SJosh Poimboeuf /** 1792dffa355SJosh Poimboeuf * unwind_deferred_request - Request a user stacktrace on task kernel exit 1802dffa355SJosh Poimboeuf * @work: Unwind descriptor requesting the trace 1812dffa355SJosh Poimboeuf * @cookie: The cookie of the first request made for this task 1822dffa355SJosh Poimboeuf * 1832dffa355SJosh Poimboeuf * Schedule a user space unwind to be done in task work before exiting the 1842dffa355SJosh Poimboeuf * kernel. 1852dffa355SJosh Poimboeuf * 1862dffa355SJosh Poimboeuf * The returned @cookie output is the generated cookie of the very first 1872dffa355SJosh Poimboeuf * request for a user space stacktrace for this task since it entered the 1882dffa355SJosh Poimboeuf * kernel. It can be from a request by any caller of this infrastructure. 1892dffa355SJosh Poimboeuf * Its value will also be passed to the callback function. It can be 1902dffa355SJosh Poimboeuf * used to stitch kernel and user stack traces together in post-processing. 1912dffa355SJosh Poimboeuf * 1922dffa355SJosh Poimboeuf * It's valid to call this function multiple times for the same @work within 1932dffa355SJosh Poimboeuf * the same task entry context. Each call will return the same cookie 1942dffa355SJosh Poimboeuf * while the task hasn't left the kernel. If the callback is not pending 1952dffa355SJosh Poimboeuf * because it has already been previously called for the same entry context, 1962dffa355SJosh Poimboeuf * it will be called again with the same stack trace and cookie. 1972dffa355SJosh Poimboeuf * 198*be3d526aSSteven Rostedt * Return: 0 if the callback successfully was queued. 199*be3d526aSSteven Rostedt * 1 if the callback is pending or was already executed. 2002dffa355SJosh Poimboeuf * Negative if there's an error. 2012dffa355SJosh Poimboeuf * @cookie holds the cookie of the first request by any user 2022dffa355SJosh Poimboeuf */ 2032dffa355SJosh Poimboeuf int unwind_deferred_request(struct unwind_work *work, u64 *cookie) 2042dffa355SJosh Poimboeuf { 2052dffa355SJosh Poimboeuf struct unwind_task_info *info = ¤t->unwind_info; 206*be3d526aSSteven Rostedt unsigned long old, bits; 207*be3d526aSSteven Rostedt unsigned long bit = BIT(work->bit); 2082dffa355SJosh Poimboeuf int ret; 2092dffa355SJosh Poimboeuf 2102dffa355SJosh Poimboeuf *cookie = 0; 2112dffa355SJosh Poimboeuf 2122dffa355SJosh Poimboeuf if ((current->flags & (PF_KTHREAD | PF_EXITING)) || 2132dffa355SJosh Poimboeuf !user_mode(task_pt_regs(current))) 2142dffa355SJosh Poimboeuf return -EINVAL; 2152dffa355SJosh Poimboeuf 216055c7060SSteven Rostedt /* 217055c7060SSteven Rostedt * NMI requires having safe cmpxchg operations. 218055c7060SSteven Rostedt * Trigger a warning to make it obvious that an architecture 219055c7060SSteven Rostedt * is using this in NMI when it should not be. 220055c7060SSteven Rostedt */ 221055c7060SSteven Rostedt if (WARN_ON_ONCE(!CAN_USE_IN_NMI && in_nmi())) 222055c7060SSteven Rostedt return -EINVAL; 223055c7060SSteven Rostedt 2242dffa355SJosh Poimboeuf guard(irqsave)(); 2252dffa355SJosh Poimboeuf 2262dffa355SJosh Poimboeuf *cookie = get_cookie(info); 2272dffa355SJosh Poimboeuf 228*be3d526aSSteven Rostedt old = READ_ONCE(info->unwind_mask); 229*be3d526aSSteven Rostedt 230*be3d526aSSteven Rostedt /* Is this already queued or executed */ 231*be3d526aSSteven Rostedt if (old & bit) 232055c7060SSteven Rostedt return 1; 233055c7060SSteven Rostedt 234*be3d526aSSteven Rostedt /* 235*be3d526aSSteven Rostedt * This work's bit hasn't been set yet. Now set it with the PENDING 236*be3d526aSSteven Rostedt * bit and fetch the current value of unwind_mask. If ether the 237*be3d526aSSteven Rostedt * work's bit or PENDING was already set, then this is already queued 238*be3d526aSSteven Rostedt * to have a callback. 239*be3d526aSSteven Rostedt */ 240*be3d526aSSteven Rostedt bits = UNWIND_PENDING | bit; 241*be3d526aSSteven Rostedt old = atomic_long_fetch_or(bits, (atomic_long_t *)&info->unwind_mask); 242*be3d526aSSteven Rostedt if (old & bits) { 243*be3d526aSSteven Rostedt /* 244*be3d526aSSteven Rostedt * If the work's bit was set, whatever set it had better 245*be3d526aSSteven Rostedt * have also set pending and queued a callback. 246*be3d526aSSteven Rostedt */ 247*be3d526aSSteven Rostedt WARN_ON_ONCE(!(old & UNWIND_PENDING)); 248*be3d526aSSteven Rostedt return old & bit; 249*be3d526aSSteven Rostedt } 2502dffa355SJosh Poimboeuf 2512dffa355SJosh Poimboeuf /* The work has been claimed, now schedule it. */ 2522dffa355SJosh Poimboeuf ret = task_work_add(current, &info->work, TWA_RESUME); 2532dffa355SJosh Poimboeuf 254*be3d526aSSteven Rostedt if (WARN_ON_ONCE(ret)) 255*be3d526aSSteven Rostedt WRITE_ONCE(info->unwind_mask, 0); 256*be3d526aSSteven Rostedt 257*be3d526aSSteven Rostedt return ret; 2582dffa355SJosh Poimboeuf } 2592dffa355SJosh Poimboeuf 2602dffa355SJosh Poimboeuf void unwind_deferred_cancel(struct unwind_work *work) 2612dffa355SJosh Poimboeuf { 262*be3d526aSSteven Rostedt struct task_struct *g, *t; 263*be3d526aSSteven Rostedt 2642dffa355SJosh Poimboeuf if (!work) 2652dffa355SJosh Poimboeuf return; 2662dffa355SJosh Poimboeuf 267*be3d526aSSteven Rostedt /* No work should be using a reserved bit */ 268*be3d526aSSteven Rostedt if (WARN_ON_ONCE(BIT(work->bit) & RESERVED_BITS)) 269*be3d526aSSteven Rostedt return; 270*be3d526aSSteven Rostedt 2712dffa355SJosh Poimboeuf guard(mutex)(&callback_mutex); 2722dffa355SJosh Poimboeuf list_del(&work->list); 273*be3d526aSSteven Rostedt 274*be3d526aSSteven Rostedt __clear_bit(work->bit, &unwind_mask); 275*be3d526aSSteven Rostedt 276*be3d526aSSteven Rostedt guard(rcu)(); 277*be3d526aSSteven Rostedt /* Clear this bit from all threads */ 278*be3d526aSSteven Rostedt for_each_process_thread(g, t) { 279*be3d526aSSteven Rostedt clear_bit(work->bit, &t->unwind_info.unwind_mask); 280*be3d526aSSteven Rostedt } 2812dffa355SJosh Poimboeuf } 2822dffa355SJosh Poimboeuf 2832dffa355SJosh Poimboeuf int unwind_deferred_init(struct unwind_work *work, unwind_callback_t func) 2842dffa355SJosh Poimboeuf { 2852dffa355SJosh Poimboeuf memset(work, 0, sizeof(*work)); 2862dffa355SJosh Poimboeuf 2872dffa355SJosh Poimboeuf guard(mutex)(&callback_mutex); 288*be3d526aSSteven Rostedt 289*be3d526aSSteven Rostedt /* See if there's a bit in the mask available */ 290*be3d526aSSteven Rostedt if (unwind_mask == ~0UL) 291*be3d526aSSteven Rostedt return -EBUSY; 292*be3d526aSSteven Rostedt 293*be3d526aSSteven Rostedt work->bit = ffz(unwind_mask); 294*be3d526aSSteven Rostedt __set_bit(work->bit, &unwind_mask); 295*be3d526aSSteven Rostedt 2962dffa355SJosh Poimboeuf list_add(&work->list, &callbacks); 2972dffa355SJosh Poimboeuf work->func = func; 2982dffa355SJosh Poimboeuf return 0; 2992dffa355SJosh Poimboeuf } 3002dffa355SJosh Poimboeuf 3015e32d0f1SSteven Rostedt void unwind_task_init(struct task_struct *task) 3025e32d0f1SSteven Rostedt { 3035e32d0f1SSteven Rostedt struct unwind_task_info *info = &task->unwind_info; 3045e32d0f1SSteven Rostedt 3055e32d0f1SSteven Rostedt memset(info, 0, sizeof(*info)); 3062dffa355SJosh Poimboeuf init_task_work(&info->work, unwind_deferred_task_work); 307*be3d526aSSteven Rostedt info->unwind_mask = 0; 3085e32d0f1SSteven Rostedt } 3095e32d0f1SSteven Rostedt 3105e32d0f1SSteven Rostedt void unwind_task_free(struct task_struct *task) 3115e32d0f1SSteven Rostedt { 3125e32d0f1SSteven Rostedt struct unwind_task_info *info = &task->unwind_info; 3135e32d0f1SSteven Rostedt 314b9c73524SJosh Poimboeuf kfree(info->cache); 3152dffa355SJosh Poimboeuf task_work_cancel(task, &info->work); 3165e32d0f1SSteven Rostedt } 317