xref: /linux/arch/arm64/kernel/watchdog_hld.c (revision 746680ec6696585e30db3e18c93a63df9cbec39c)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/nmi.h>
3 #include <linux/cpufreq.h>
4 #include <linux/perf/arm_pmu.h>
5 
6 /*
7  * Safe maximum CPU frequency in case a particular platform doesn't implement
8  * cpufreq driver. Although, architecture doesn't put any restrictions on
9  * maximum frequency but 5 GHz seems to be safe maximum given the available
10  * Arm CPUs in the market which are clocked much less than 5 GHz. On the other
11  * hand, we can't make it much higher as it would lead to a large hard-lockup
12  * detection timeout on parts which are running slower (eg. 1GHz on
13  * Developerbox) and doesn't possess a cpufreq driver.
14  */
15 #define SAFE_MAX_CPU_FREQ	5000000000UL // 5 GHz
16 u64 hw_nmi_get_sample_period(int watchdog_thresh)
17 {
18 	unsigned int cpu = smp_processor_id();
19 	unsigned long max_cpu_freq;
20 
21 	max_cpu_freq = cpufreq_get_hw_max_freq(cpu) * 1000UL;
22 	if (!max_cpu_freq)
23 		max_cpu_freq = SAFE_MAX_CPU_FREQ;
24 
25 	return (u64)max_cpu_freq * watchdog_thresh;
26 }
27 
28 bool __init arch_perf_nmi_is_available(void)
29 {
30 	/*
31 	 * hardlockup_detector_perf_init() will success even if Pseudo-NMI turns off,
32 	 * however, the pmu interrupts will act like a normal interrupt instead of
33 	 * NMI and the hardlockup detector would be broken.
34 	 */
35 	return arm_pmu_irq_is_nmi();
36 }
37 
38 static int watchdog_perf_update_period(void *data)
39 {
40 	int cpu = smp_processor_id();
41 	u64 max_cpu_freq, new_period;
42 
43 	max_cpu_freq = cpufreq_get_hw_max_freq(cpu) * 1000UL;
44 	if (!max_cpu_freq)
45 		return 0;
46 
47 	new_period = watchdog_thresh * max_cpu_freq;
48 	hardlockup_detector_perf_adjust_period(new_period);
49 
50 	return 0;
51 }
52 
53 static int watchdog_freq_notifier_callback(struct notifier_block *nb,
54 					   unsigned long val, void *data)
55 {
56 	struct cpufreq_policy *policy = data;
57 	int cpu;
58 
59 	if (val != CPUFREQ_CREATE_POLICY)
60 		return NOTIFY_DONE;
61 
62 	/*
63 	 * Let each online CPU related to the policy update the period by their
64 	 * own. This will serialize with the framework on start/stop the lockup
65 	 * detector (softlockup_{start,stop}_all) and avoid potential race
66 	 * condition. Otherwise we may have below theoretical race condition:
67 	 * (core 0/1 share the same policy)
68 	 * [core 0]                      [core 1]
69 	 *                               hardlockup_detector_event_create()
70 	 *                                 hw_nmi_get_sample_period()
71 	 * (cpufreq registered, notifier callback invoked)
72 	 * watchdog_freq_notifier_callback()
73 	 *   watchdog_perf_update_period()
74 	 *   (since core 1's event's not yet created,
75 	 *    the period is not set)
76 	 *                                 perf_event_create_kernel_counter()
77 	 *                                 (event's period is SAFE_MAX_CPU_FREQ)
78 	 */
79 	for_each_cpu(cpu, policy->cpus)
80 		smp_call_on_cpu(cpu, watchdog_perf_update_period, NULL, false);
81 
82 	return NOTIFY_DONE;
83 }
84 
85 static struct notifier_block watchdog_freq_notifier = {
86 	.notifier_call = watchdog_freq_notifier_callback,
87 };
88 
89 static int __init init_watchdog_freq_notifier(void)
90 {
91 	return cpufreq_register_notifier(&watchdog_freq_notifier,
92 					 CPUFREQ_POLICY_NOTIFIER);
93 }
94 core_initcall(init_watchdog_freq_notifier);
95