xref: /linux/drivers/watchdog/watchdog_core.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  *	watchdog_core.c
4  *
5  *	(c) Copyright 2008-2011 Alan Cox <alan@lxorguk.ukuu.org.uk>,
6  *						All Rights Reserved.
7  *
8  *	(c) Copyright 2008-2011 Wim Van Sebroeck <wim@iguana.be>.
9  *
10  *	This source code is part of the generic code that can be used
11  *	by all the watchdog timer drivers.
12  *
13  *	Based on source code of the following authors:
14  *	  Matt Domsch <Matt_Domsch@dell.com>,
15  *	  Rob Radez <rob@osinvestor.com>,
16  *	  Rusty Lynch <rusty@linux.co.intel.com>
17  *	  Satyam Sharma <satyam@infradead.org>
18  *	  Randy Dunlap <randy.dunlap@oracle.com>
19  *
20  *	Neither Alan Cox, CymruNet Ltd., Wim Van Sebroeck nor Iguana vzw.
21  *	admit liability nor provide warranty for any of this software.
22  *	This material is provided "AS-IS" and at no charge.
23  */
24 
25 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26 
27 #include <linux/module.h>	/* For EXPORT_SYMBOL/module stuff/... */
28 #include <linux/types.h>	/* For standard types */
29 #include <linux/errno.h>	/* For the -ENODEV/... values */
30 #include <linux/kernel.h>	/* For printk/panic/... */
31 #include <linux/reboot.h>	/* For restart handler */
32 #include <linux/watchdog.h>	/* For watchdog specific items */
33 #include <linux/init.h>		/* For __init/__exit/... */
34 #include <linux/idr.h>		/* For ida_* macros */
35 #include <linux/err.h>		/* For IS_ERR macros */
36 #include <linux/of.h>		/* For of_get_timeout_sec */
37 #include <linux/suspend.h>
38 
39 #include "watchdog_core.h"	/* For watchdog_dev_register/... */
40 
41 #define CREATE_TRACE_POINTS
42 #include <trace/events/watchdog.h>
43 
44 static DEFINE_IDA(watchdog_ida);
45 
46 static int stop_on_reboot = -1;
47 module_param(stop_on_reboot, int, 0444);
48 MODULE_PARM_DESC(stop_on_reboot, "Stop watchdogs on reboot (0=keep watching, 1=stop)");
49 
50 /*
51  * Deferred Registration infrastructure.
52  *
53  * Sometimes watchdog drivers needs to be loaded as soon as possible,
54  * for example when it's impossible to disable it. To do so,
55  * raising the initcall level of the watchdog driver is a solution.
56  * But in such case, the miscdev is maybe not ready (subsys_initcall), and
57  * watchdog_core need miscdev to register the watchdog as a char device.
58  *
59  * The deferred registration infrastructure offer a way for the watchdog
60  * subsystem to register a watchdog properly, even before miscdev is ready.
61  */
62 
63 static DEFINE_MUTEX(wtd_deferred_reg_mutex);
64 static LIST_HEAD(wtd_deferred_reg_list);
65 static bool wtd_deferred_reg_done;
66 
67 static void watchdog_deferred_registration_add(struct watchdog_device *wdd)
68 {
69 	list_add_tail(&wdd->deferred,
70 		      &wtd_deferred_reg_list);
71 }
72 
73 static void watchdog_deferred_registration_del(struct watchdog_device *wdd)
74 {
75 	struct list_head *p, *n;
76 	struct watchdog_device *wdd_tmp;
77 
78 	list_for_each_safe(p, n, &wtd_deferred_reg_list) {
79 		wdd_tmp = list_entry(p, struct watchdog_device,
80 				     deferred);
81 		if (wdd_tmp == wdd) {
82 			list_del(&wdd_tmp->deferred);
83 			break;
84 		}
85 	}
86 }
87 
88 static void watchdog_check_min_max_timeout(struct watchdog_device *wdd)
89 {
90 	/*
91 	 * Check that we have valid min and max timeout values, if
92 	 * not reset them both to 0 (=not used or unknown)
93 	 */
94 	if (!wdd->max_hw_heartbeat_ms && wdd->min_timeout > wdd->max_timeout) {
95 		pr_info("Invalid min and max timeout values, resetting to 0!\n");
96 		wdd->min_timeout = 0;
97 		wdd->max_timeout = 0;
98 	}
99 }
100 
101 /**
102  * watchdog_init_timeout() - initialize the timeout field
103  * @wdd: watchdog device
104  * @timeout_parm: timeout module parameter
105  * @dev: Device that stores the timeout-sec property
106  *
107  * Initialize the timeout field of the watchdog_device struct with either the
108  * timeout module parameter (if it is valid value) or the timeout-sec property
109  * (only if it is a valid value and the timeout_parm is out of bounds).
110  * If none of them are valid then we keep the old value (which should normally
111  * be the default timeout value). Note that for the module parameter, '0' means
112  * 'use default' while it is an invalid value for the timeout-sec property.
113  * It should simply be dropped if you want to use the default value then.
114  *
115  * A zero is returned on success or -EINVAL if all provided values are out of
116  * bounds.
117  */
118 int watchdog_init_timeout(struct watchdog_device *wdd,
119 				unsigned int timeout_parm, struct device *dev)
120 {
121 	const char *dev_str = wdd->parent ? dev_name(wdd->parent) :
122 			      (const char *)wdd->info->identity;
123 	unsigned int t = 0;
124 	int ret = 0;
125 
126 	watchdog_check_min_max_timeout(wdd);
127 
128 	/* check the driver supplied value (likely a module parameter) first */
129 	if (timeout_parm) {
130 		if (!watchdog_timeout_invalid(wdd, timeout_parm)) {
131 			wdd->timeout = timeout_parm;
132 			return 0;
133 		}
134 		pr_err("%s: driver supplied timeout (%u) out of range\n",
135 			dev_str, timeout_parm);
136 		ret = -EINVAL;
137 	}
138 
139 	/* try to get the timeout_sec property */
140 	if (dev && dev->of_node &&
141 	    of_property_read_u32(dev->of_node, "timeout-sec", &t) == 0) {
142 		if (t && !watchdog_timeout_invalid(wdd, t)) {
143 			wdd->timeout = t;
144 			return 0;
145 		}
146 		pr_err("%s: DT supplied timeout (%u) out of range\n", dev_str, t);
147 		ret = -EINVAL;
148 	}
149 
150 	if (ret < 0 && wdd->timeout)
151 		pr_warn("%s: falling back to default timeout (%u)\n", dev_str,
152 			wdd->timeout);
153 
154 	return ret;
155 }
156 EXPORT_SYMBOL_GPL(watchdog_init_timeout);
157 
158 static int watchdog_reboot_notifier(struct notifier_block *nb,
159 				    unsigned long code, void *data)
160 {
161 	struct watchdog_device *wdd;
162 
163 	wdd = container_of(nb, struct watchdog_device, reboot_nb);
164 	if (code == SYS_DOWN || code == SYS_HALT || code == SYS_POWER_OFF) {
165 		if (watchdog_hw_running(wdd)) {
166 			int ret;
167 
168 			ret = wdd->ops->stop(wdd);
169 			trace_watchdog_stop(wdd, ret);
170 			if (ret)
171 				return NOTIFY_BAD;
172 		}
173 	}
174 
175 	return NOTIFY_DONE;
176 }
177 
178 static int watchdog_restart_notifier(struct notifier_block *nb,
179 				     unsigned long action, void *data)
180 {
181 	struct watchdog_device *wdd = container_of(nb, struct watchdog_device,
182 						   restart_nb);
183 
184 	int ret;
185 
186 	ret = wdd->ops->restart(wdd, action, data);
187 	if (ret)
188 		return NOTIFY_BAD;
189 
190 	return NOTIFY_DONE;
191 }
192 
193 static int watchdog_pm_notifier(struct notifier_block *nb, unsigned long mode,
194 				void *data)
195 {
196 	struct watchdog_device *wdd;
197 	int ret = 0;
198 
199 	wdd = container_of(nb, struct watchdog_device, pm_nb);
200 
201 	switch (mode) {
202 	case PM_HIBERNATION_PREPARE:
203 	case PM_RESTORE_PREPARE:
204 	case PM_SUSPEND_PREPARE:
205 		ret = watchdog_dev_suspend(wdd);
206 		break;
207 	case PM_POST_HIBERNATION:
208 	case PM_POST_RESTORE:
209 	case PM_POST_SUSPEND:
210 		ret = watchdog_dev_resume(wdd);
211 		break;
212 	}
213 
214 	if (ret)
215 		return NOTIFY_BAD;
216 
217 	return NOTIFY_DONE;
218 }
219 
220 /**
221  * watchdog_set_restart_priority - Change priority of restart handler
222  * @wdd: watchdog device
223  * @priority: priority of the restart handler, should follow these guidelines:
224  *   0:   use watchdog's restart function as last resort, has limited restart
225  *        capabilies
226  *   128: default restart handler, use if no other handler is expected to be
227  *        available and/or if restart is sufficient to restart the entire system
228  *   255: preempt all other handlers
229  *
230  * If a wdd->ops->restart function is provided when watchdog_register_device is
231  * called, it will be registered as a restart handler with the priority given
232  * here.
233  */
234 void watchdog_set_restart_priority(struct watchdog_device *wdd, int priority)
235 {
236 	wdd->restart_nb.priority = priority;
237 }
238 EXPORT_SYMBOL_GPL(watchdog_set_restart_priority);
239 
240 static int __watchdog_register_device(struct watchdog_device *wdd)
241 {
242 	int ret, id = -1;
243 
244 	if (wdd == NULL || wdd->info == NULL || wdd->ops == NULL)
245 		return -EINVAL;
246 
247 	/* Mandatory operations need to be supported */
248 	if (!wdd->ops->start || (!wdd->ops->stop && !wdd->max_hw_heartbeat_ms))
249 		return -EINVAL;
250 
251 	watchdog_check_min_max_timeout(wdd);
252 
253 	/*
254 	 * Note: now that all watchdog_device data has been verified, we
255 	 * will not check this anymore in other functions. If data gets
256 	 * corrupted in a later stage then we expect a kernel panic!
257 	 */
258 
259 	/* Use alias for watchdog id if possible */
260 	if (wdd->parent) {
261 		ret = of_alias_get_id(wdd->parent->of_node, "watchdog");
262 		if (ret >= 0)
263 			id = ida_alloc_range(&watchdog_ida, ret, ret,
264 					     GFP_KERNEL);
265 	}
266 
267 	if (id < 0)
268 		id = ida_alloc_max(&watchdog_ida, MAX_DOGS - 1, GFP_KERNEL);
269 
270 	if (id < 0)
271 		return id;
272 	wdd->id = id;
273 
274 	ret = watchdog_dev_register(wdd);
275 	if (ret) {
276 		ida_free(&watchdog_ida, id);
277 		if (!(id == 0 && ret == -EBUSY))
278 			return ret;
279 
280 		/* Retry in case a legacy watchdog module exists */
281 		id = ida_alloc_range(&watchdog_ida, 1, MAX_DOGS - 1,
282 				     GFP_KERNEL);
283 		if (id < 0)
284 			return id;
285 		wdd->id = id;
286 
287 		ret = watchdog_dev_register(wdd);
288 		if (ret) {
289 			ida_free(&watchdog_ida, id);
290 			return ret;
291 		}
292 	}
293 
294 	/* Module parameter to force watchdog policy on reboot. */
295 	if (stop_on_reboot != -1) {
296 		if (stop_on_reboot)
297 			set_bit(WDOG_STOP_ON_REBOOT, &wdd->status);
298 		else
299 			clear_bit(WDOG_STOP_ON_REBOOT, &wdd->status);
300 	}
301 
302 	if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) {
303 		if (!wdd->ops->stop)
304 			pr_warn("watchdog%d: stop_on_reboot not supported\n", wdd->id);
305 		else {
306 			wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
307 
308 			ret = register_reboot_notifier(&wdd->reboot_nb);
309 			if (ret) {
310 				pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
311 					wdd->id, ret);
312 				watchdog_dev_unregister(wdd);
313 				ida_free(&watchdog_ida, id);
314 				return ret;
315 			}
316 		}
317 	}
318 
319 	if (wdd->ops->restart) {
320 		wdd->restart_nb.notifier_call = watchdog_restart_notifier;
321 
322 		ret = register_restart_handler(&wdd->restart_nb);
323 		if (ret)
324 			pr_warn("watchdog%d: Cannot register restart handler (%d)\n",
325 				wdd->id, ret);
326 	}
327 
328 	if (test_bit(WDOG_NO_PING_ON_SUSPEND, &wdd->status)) {
329 		wdd->pm_nb.notifier_call = watchdog_pm_notifier;
330 
331 		ret = register_pm_notifier(&wdd->pm_nb);
332 		if (ret)
333 			pr_warn("watchdog%d: Cannot register pm handler (%d)\n",
334 				wdd->id, ret);
335 	}
336 
337 	return 0;
338 }
339 
340 /**
341  * watchdog_register_device() - register a watchdog device
342  * @wdd: watchdog device
343  *
344  * Register a watchdog device with the kernel so that the
345  * watchdog timer can be accessed from userspace.
346  *
347  * A zero is returned on success and a negative errno code for
348  * failure.
349  */
350 
351 int watchdog_register_device(struct watchdog_device *wdd)
352 {
353 	const char *dev_str;
354 	int ret = 0;
355 
356 	mutex_lock(&wtd_deferred_reg_mutex);
357 	if (wtd_deferred_reg_done)
358 		ret = __watchdog_register_device(wdd);
359 	else
360 		watchdog_deferred_registration_add(wdd);
361 	mutex_unlock(&wtd_deferred_reg_mutex);
362 
363 	if (ret) {
364 		dev_str = wdd->parent ? dev_name(wdd->parent) :
365 			  (const char *)wdd->info->identity;
366 		pr_err("%s: failed to register watchdog device (err = %d)\n",
367 			dev_str, ret);
368 	}
369 
370 	return ret;
371 }
372 EXPORT_SYMBOL_GPL(watchdog_register_device);
373 
374 static void __watchdog_unregister_device(struct watchdog_device *wdd)
375 {
376 	if (wdd == NULL)
377 		return;
378 
379 	if (wdd->ops->restart)
380 		unregister_restart_handler(&wdd->restart_nb);
381 
382 	if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status))
383 		unregister_reboot_notifier(&wdd->reboot_nb);
384 
385 	watchdog_dev_unregister(wdd);
386 	ida_free(&watchdog_ida, wdd->id);
387 }
388 
389 /**
390  * watchdog_unregister_device() - unregister a watchdog device
391  * @wdd: watchdog device to unregister
392  *
393  * Unregister a watchdog device that was previously successfully
394  * registered with watchdog_register_device().
395  */
396 
397 void watchdog_unregister_device(struct watchdog_device *wdd)
398 {
399 	mutex_lock(&wtd_deferred_reg_mutex);
400 	if (wtd_deferred_reg_done)
401 		__watchdog_unregister_device(wdd);
402 	else
403 		watchdog_deferred_registration_del(wdd);
404 	mutex_unlock(&wtd_deferred_reg_mutex);
405 }
406 
407 EXPORT_SYMBOL_GPL(watchdog_unregister_device);
408 
409 static void devm_watchdog_unregister_device(struct device *dev, void *res)
410 {
411 	watchdog_unregister_device(*(struct watchdog_device **)res);
412 }
413 
414 /**
415  * devm_watchdog_register_device() - resource managed watchdog_register_device()
416  * @dev: device that is registering this watchdog device
417  * @wdd: watchdog device
418  *
419  * Managed watchdog_register_device(). For watchdog device registered by this
420  * function,  watchdog_unregister_device() is automatically called on driver
421  * detach. See watchdog_register_device() for more information.
422  */
423 int devm_watchdog_register_device(struct device *dev,
424 				struct watchdog_device *wdd)
425 {
426 	struct watchdog_device **rcwdd;
427 	int ret;
428 
429 	rcwdd = devres_alloc(devm_watchdog_unregister_device, sizeof(*rcwdd),
430 			     GFP_KERNEL);
431 	if (!rcwdd)
432 		return -ENOMEM;
433 
434 	ret = watchdog_register_device(wdd);
435 	if (!ret) {
436 		*rcwdd = wdd;
437 		devres_add(dev, rcwdd);
438 	} else {
439 		devres_free(rcwdd);
440 	}
441 
442 	return ret;
443 }
444 EXPORT_SYMBOL_GPL(devm_watchdog_register_device);
445 
446 static int __init watchdog_deferred_registration(void)
447 {
448 	mutex_lock(&wtd_deferred_reg_mutex);
449 	wtd_deferred_reg_done = true;
450 	while (!list_empty(&wtd_deferred_reg_list)) {
451 		struct watchdog_device *wdd;
452 
453 		wdd = list_first_entry(&wtd_deferred_reg_list,
454 				       struct watchdog_device, deferred);
455 		list_del(&wdd->deferred);
456 		__watchdog_register_device(wdd);
457 	}
458 	mutex_unlock(&wtd_deferred_reg_mutex);
459 	return 0;
460 }
461 
462 static int __init watchdog_init(void)
463 {
464 	int err;
465 
466 	err = watchdog_dev_init();
467 	if (err < 0)
468 		return err;
469 
470 	watchdog_deferred_registration();
471 	return 0;
472 }
473 
474 static void __exit watchdog_exit(void)
475 {
476 	watchdog_dev_exit();
477 	ida_destroy(&watchdog_ida);
478 }
479 
480 subsys_initcall_sync(watchdog_init);
481 module_exit(watchdog_exit);
482 
483 MODULE_AUTHOR("Alan Cox <alan@lxorguk.ukuu.org.uk>");
484 MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>");
485 MODULE_DESCRIPTION("WatchDog Timer Driver Core");
486 MODULE_LICENSE("GPL");
487