xref: /linux/lib/smp_processor_id.c (revision d257f9bf06129613de539ea71ecea60848b662cd)
1 /*
2  * lib/smp_processor_id.c
3  *
4  * DEBUG_PREEMPT variant of smp_processor_id().
5  */
6 #include <linux/export.h>
7 #include <linux/kallsyms.h>
8 #include <linux/sched.h>
9 
10 notrace static unsigned int check_preemption_disabled(const char *what1,
11 							const char *what2)
12 {
13 	int this_cpu = raw_smp_processor_id();
14 
15 	if (likely(preempt_count()))
16 		goto out;
17 
18 	if (irqs_disabled())
19 		goto out;
20 
21 	/*
22 	 * Kernel threads bound to a single CPU can safely use
23 	 * smp_processor_id():
24 	 */
25 	if (cpumask_equal(&current->cpus_allowed, cpumask_of(this_cpu)))
26 		goto out;
27 
28 	/*
29 	 * It is valid to assume CPU-locality during early bootup:
30 	 */
31 	if (system_state < SYSTEM_SCHEDULING)
32 		goto out;
33 
34 	/*
35 	 * Avoid recursion:
36 	 */
37 	preempt_disable_notrace();
38 
39 	if (!printk_ratelimit())
40 		goto out_enable;
41 
42 	printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n",
43 		what1, what2, preempt_count() - 1, current->comm, current->pid);
44 
45 	print_symbol("caller is %s\n", (long)__builtin_return_address(0));
46 	dump_stack();
47 
48 out_enable:
49 	preempt_enable_no_resched_notrace();
50 out:
51 	return this_cpu;
52 }
53 
54 notrace unsigned int debug_smp_processor_id(void)
55 {
56 	return check_preemption_disabled("smp_processor_id", "");
57 }
58 EXPORT_SYMBOL(debug_smp_processor_id);
59 
60 notrace void __this_cpu_preempt_check(const char *op)
61 {
62 	check_preemption_disabled("__this_cpu_", op);
63 }
64 EXPORT_SYMBOL(__this_cpu_preempt_check);
65