1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright 2010-2011 Picochip Ltd., Jamie Iles
4 * https://www.picochip.com
5 *
6 * This file implements a driver for the Synopsys DesignWare watchdog device
7 * in the many subsystems. The watchdog has 16 different timeout periods
8 * and these are a function of the input clock frequency.
9 *
10 * The DesignWare watchdog cannot be stopped once it has been started so we
11 * do not implement a stop function. The watchdog core will continue to send
12 * heartbeat requests after the watchdog device has been closed.
13 */
14
15 #include <linux/bitops.h>
16 #include <linux/clk.h>
17 #include <linux/debugfs.h>
18 #include <linux/delay.h>
19 #include <linux/err.h>
20 #include <linux/interrupt.h>
21 #include <linux/io.h>
22 #include <linux/kernel.h>
23 #include <linux/limits.h>
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/of.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm.h>
29 #include <linux/reset.h>
30 #include <linux/watchdog.h>
31
32 #define WDOG_CONTROL_REG_OFFSET 0x00
33 #define WDOG_CONTROL_REG_WDT_EN_MASK 0x01
34 #define WDOG_CONTROL_REG_RESP_MODE_MASK 0x02
35 #define WDOG_TIMEOUT_RANGE_REG_OFFSET 0x04
36 #define WDOG_TIMEOUT_RANGE_TOPINIT_SHIFT 4
37 #define WDOG_CURRENT_COUNT_REG_OFFSET 0x08
38 #define WDOG_COUNTER_RESTART_REG_OFFSET 0x0c
39 #define WDOG_COUNTER_RESTART_KICK_VALUE 0x76
40 #define WDOG_INTERRUPT_STATUS_REG_OFFSET 0x10
41 #define WDOG_INTERRUPT_CLEAR_REG_OFFSET 0x14
42 #define WDOG_COMP_PARAMS_5_REG_OFFSET 0xe4
43 #define WDOG_COMP_PARAMS_4_REG_OFFSET 0xe8
44 #define WDOG_COMP_PARAMS_3_REG_OFFSET 0xec
45 #define WDOG_COMP_PARAMS_2_REG_OFFSET 0xf0
46 #define WDOG_COMP_PARAMS_1_REG_OFFSET 0xf4
47 #define WDOG_COMP_PARAMS_1_USE_FIX_TOP BIT(6)
48 #define WDOG_COMP_VERSION_REG_OFFSET 0xf8
49 #define WDOG_COMP_TYPE_REG_OFFSET 0xfc
50
51 /* There are sixteen TOPs (timeout periods) that can be set in the watchdog. */
52 #define DW_WDT_NUM_TOPS 16
53 #define DW_WDT_FIX_TOP(_idx) (1U << (16 + _idx))
54
55 #define DW_WDT_DEFAULT_SECONDS 30
56
57 static const u32 dw_wdt_fix_tops[DW_WDT_NUM_TOPS] = {
58 DW_WDT_FIX_TOP(0), DW_WDT_FIX_TOP(1), DW_WDT_FIX_TOP(2),
59 DW_WDT_FIX_TOP(3), DW_WDT_FIX_TOP(4), DW_WDT_FIX_TOP(5),
60 DW_WDT_FIX_TOP(6), DW_WDT_FIX_TOP(7), DW_WDT_FIX_TOP(8),
61 DW_WDT_FIX_TOP(9), DW_WDT_FIX_TOP(10), DW_WDT_FIX_TOP(11),
62 DW_WDT_FIX_TOP(12), DW_WDT_FIX_TOP(13), DW_WDT_FIX_TOP(14),
63 DW_WDT_FIX_TOP(15)
64 };
65
66 static bool nowayout = WATCHDOG_NOWAYOUT;
67 module_param(nowayout, bool, 0);
68 MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
69 "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
70
71 enum dw_wdt_rmod {
72 DW_WDT_RMOD_RESET = 1,
73 DW_WDT_RMOD_IRQ = 2
74 };
75
76 struct dw_wdt_timeout {
77 u32 top_val;
78 unsigned int sec;
79 unsigned int msec;
80 };
81
82 struct dw_wdt {
83 void __iomem *regs;
84 struct clk *clk;
85 struct clk *pclk;
86 unsigned long rate;
87 enum dw_wdt_rmod rmod;
88 struct dw_wdt_timeout timeouts[DW_WDT_NUM_TOPS];
89 struct watchdog_device wdd;
90 struct reset_control *rst;
91 /* Save/restore */
92 u32 control;
93 u32 timeout;
94
95 #ifdef CONFIG_DEBUG_FS
96 struct dentry *dbgfs_dir;
97 #endif
98 };
99
100 #define to_dw_wdt(wdd) container_of(wdd, struct dw_wdt, wdd)
101
dw_wdt_is_enabled(struct dw_wdt * dw_wdt)102 static inline int dw_wdt_is_enabled(struct dw_wdt *dw_wdt)
103 {
104 return readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET) &
105 WDOG_CONTROL_REG_WDT_EN_MASK;
106 }
107
dw_wdt_update_mode(struct dw_wdt * dw_wdt,enum dw_wdt_rmod rmod)108 static void dw_wdt_update_mode(struct dw_wdt *dw_wdt, enum dw_wdt_rmod rmod)
109 {
110 u32 val;
111
112 val = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
113 if (rmod == DW_WDT_RMOD_IRQ)
114 val |= WDOG_CONTROL_REG_RESP_MODE_MASK;
115 else
116 val &= ~WDOG_CONTROL_REG_RESP_MODE_MASK;
117 writel(val, dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
118
119 dw_wdt->rmod = rmod;
120 }
121
dw_wdt_find_best_top(struct dw_wdt * dw_wdt,unsigned int timeout,u32 * top_val)122 static unsigned int dw_wdt_find_best_top(struct dw_wdt *dw_wdt,
123 unsigned int timeout, u32 *top_val)
124 {
125 int idx;
126
127 /*
128 * Find a TOP with timeout greater or equal to the requested number.
129 * Note we'll select a TOP with maximum timeout if the requested
130 * timeout couldn't be reached.
131 */
132 for (idx = 0; idx < DW_WDT_NUM_TOPS; ++idx) {
133 if (dw_wdt->timeouts[idx].sec >= timeout)
134 break;
135 }
136
137 if (idx == DW_WDT_NUM_TOPS)
138 --idx;
139
140 *top_val = dw_wdt->timeouts[idx].top_val;
141
142 return dw_wdt->timeouts[idx].sec;
143 }
144
dw_wdt_get_min_timeout(struct dw_wdt * dw_wdt)145 static unsigned int dw_wdt_get_min_timeout(struct dw_wdt *dw_wdt)
146 {
147 int idx;
148
149 /*
150 * We'll find a timeout greater or equal to one second anyway because
151 * the driver probe would have failed if there was none.
152 */
153 for (idx = 0; idx < DW_WDT_NUM_TOPS; ++idx) {
154 if (dw_wdt->timeouts[idx].sec)
155 break;
156 }
157
158 return dw_wdt->timeouts[idx].sec;
159 }
160
dw_wdt_get_max_timeout_ms(struct dw_wdt * dw_wdt)161 static unsigned int dw_wdt_get_max_timeout_ms(struct dw_wdt *dw_wdt)
162 {
163 struct dw_wdt_timeout *timeout = &dw_wdt->timeouts[DW_WDT_NUM_TOPS - 1];
164 u64 msec;
165
166 msec = (u64)timeout->sec * MSEC_PER_SEC + timeout->msec;
167
168 return msec < UINT_MAX ? msec : UINT_MAX;
169 }
170
dw_wdt_get_timeout(struct dw_wdt * dw_wdt)171 static unsigned int dw_wdt_get_timeout(struct dw_wdt *dw_wdt)
172 {
173 int top_val = readl(dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET) & 0xF;
174 int idx;
175
176 for (idx = 0; idx < DW_WDT_NUM_TOPS; ++idx) {
177 if (dw_wdt->timeouts[idx].top_val == top_val)
178 break;
179 }
180
181 /*
182 * In IRQ mode due to the two stages counter, the actual timeout is
183 * twice greater than the TOP setting.
184 */
185 return dw_wdt->timeouts[idx].sec * dw_wdt->rmod;
186 }
187
dw_wdt_ping(struct watchdog_device * wdd)188 static int dw_wdt_ping(struct watchdog_device *wdd)
189 {
190 struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
191
192 writel(WDOG_COUNTER_RESTART_KICK_VALUE, dw_wdt->regs +
193 WDOG_COUNTER_RESTART_REG_OFFSET);
194
195 return 0;
196 }
197
dw_wdt_set_timeout(struct watchdog_device * wdd,unsigned int top_s)198 static int dw_wdt_set_timeout(struct watchdog_device *wdd, unsigned int top_s)
199 {
200 struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
201 unsigned int timeout;
202 u32 top_val;
203
204 /*
205 * Note IRQ mode being enabled means having a non-zero pre-timeout
206 * setup. In this case we try to find a TOP as close to the half of the
207 * requested timeout as possible since DW Watchdog IRQ mode is designed
208 * in two stages way - first timeout rises the pre-timeout interrupt,
209 * second timeout performs the system reset. So basically the effective
210 * watchdog-caused reset happens after two watchdog TOPs elapsed.
211 */
212 timeout = dw_wdt_find_best_top(dw_wdt, DIV_ROUND_UP(top_s, dw_wdt->rmod),
213 &top_val);
214 if (dw_wdt->rmod == DW_WDT_RMOD_IRQ)
215 wdd->pretimeout = timeout;
216 else
217 wdd->pretimeout = 0;
218
219 /*
220 * Set the new value in the watchdog. Some versions of dw_wdt
221 * have TOPINIT in the TIMEOUT_RANGE register (as per
222 * CP_WDT_DUAL_TOP in WDT_COMP_PARAMS_1). On those we
223 * effectively get a pat of the watchdog right here.
224 */
225 writel(top_val | top_val << WDOG_TIMEOUT_RANGE_TOPINIT_SHIFT,
226 dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
227
228 /* Kick new TOP value into the watchdog counter if activated. */
229 if (watchdog_active(wdd))
230 dw_wdt_ping(wdd);
231
232 /*
233 * In case users set bigger timeout value than HW can support,
234 * kernel(watchdog_dev.c) helps to feed watchdog before
235 * wdd->max_hw_heartbeat_ms
236 */
237 if (top_s * 1000 <= wdd->max_hw_heartbeat_ms)
238 wdd->timeout = timeout * dw_wdt->rmod;
239 else
240 wdd->timeout = top_s;
241
242 return 0;
243 }
244
dw_wdt_set_pretimeout(struct watchdog_device * wdd,unsigned int req)245 static int dw_wdt_set_pretimeout(struct watchdog_device *wdd, unsigned int req)
246 {
247 struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
248
249 /*
250 * We ignore actual value of the timeout passed from user-space
251 * using it as a flag whether the pretimeout functionality is intended
252 * to be activated.
253 */
254 dw_wdt_update_mode(dw_wdt, req ? DW_WDT_RMOD_IRQ : DW_WDT_RMOD_RESET);
255 dw_wdt_set_timeout(wdd, wdd->timeout);
256
257 return 0;
258 }
259
dw_wdt_arm_system_reset(struct dw_wdt * dw_wdt)260 static void dw_wdt_arm_system_reset(struct dw_wdt *dw_wdt)
261 {
262 u32 val = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
263
264 /* Disable/enable interrupt mode depending on the RMOD flag. */
265 if (dw_wdt->rmod == DW_WDT_RMOD_IRQ)
266 val |= WDOG_CONTROL_REG_RESP_MODE_MASK;
267 else
268 val &= ~WDOG_CONTROL_REG_RESP_MODE_MASK;
269 /* Enable watchdog. */
270 val |= WDOG_CONTROL_REG_WDT_EN_MASK;
271 writel(val, dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
272 }
273
dw_wdt_start(struct watchdog_device * wdd)274 static int dw_wdt_start(struct watchdog_device *wdd)
275 {
276 struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
277
278 dw_wdt_set_timeout(wdd, wdd->timeout);
279 dw_wdt_ping(&dw_wdt->wdd);
280 dw_wdt_arm_system_reset(dw_wdt);
281
282 return 0;
283 }
284
dw_wdt_stop(struct watchdog_device * wdd)285 static int dw_wdt_stop(struct watchdog_device *wdd)
286 {
287 struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
288
289 if (!dw_wdt->rst) {
290 set_bit(WDOG_HW_RUNNING, &wdd->status);
291 return 0;
292 }
293
294 reset_control_assert(dw_wdt->rst);
295 reset_control_deassert(dw_wdt->rst);
296
297 return 0;
298 }
299
dw_wdt_restart(struct watchdog_device * wdd,unsigned long action,void * data)300 static int dw_wdt_restart(struct watchdog_device *wdd,
301 unsigned long action, void *data)
302 {
303 struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
304
305 writel(0, dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
306 dw_wdt_update_mode(dw_wdt, DW_WDT_RMOD_RESET);
307 if (dw_wdt_is_enabled(dw_wdt))
308 writel(WDOG_COUNTER_RESTART_KICK_VALUE,
309 dw_wdt->regs + WDOG_COUNTER_RESTART_REG_OFFSET);
310 else
311 dw_wdt_arm_system_reset(dw_wdt);
312
313 /* wait for reset to assert... */
314 mdelay(500);
315
316 return 0;
317 }
318
dw_wdt_get_timeleft(struct watchdog_device * wdd)319 static unsigned int dw_wdt_get_timeleft(struct watchdog_device *wdd)
320 {
321 struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
322 unsigned int sec;
323 u32 val;
324
325 val = readl(dw_wdt->regs + WDOG_CURRENT_COUNT_REG_OFFSET);
326 sec = val / dw_wdt->rate;
327
328 if (dw_wdt->rmod == DW_WDT_RMOD_IRQ) {
329 val = readl(dw_wdt->regs + WDOG_INTERRUPT_STATUS_REG_OFFSET);
330 if (!val)
331 sec += wdd->pretimeout;
332 }
333
334 return sec;
335 }
336
337 static const struct watchdog_info dw_wdt_ident = {
338 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
339 WDIOF_MAGICCLOSE,
340 .identity = "Synopsys DesignWare Watchdog",
341 };
342
343 static const struct watchdog_info dw_wdt_pt_ident = {
344 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
345 WDIOF_PRETIMEOUT | WDIOF_MAGICCLOSE,
346 .identity = "Synopsys DesignWare Watchdog",
347 };
348
349 static const struct watchdog_ops dw_wdt_ops = {
350 .owner = THIS_MODULE,
351 .start = dw_wdt_start,
352 .stop = dw_wdt_stop,
353 .ping = dw_wdt_ping,
354 .set_timeout = dw_wdt_set_timeout,
355 .set_pretimeout = dw_wdt_set_pretimeout,
356 .get_timeleft = dw_wdt_get_timeleft,
357 .restart = dw_wdt_restart,
358 };
359
dw_wdt_irq(int irq,void * devid)360 static irqreturn_t dw_wdt_irq(int irq, void *devid)
361 {
362 struct dw_wdt *dw_wdt = devid;
363 u32 val;
364
365 /*
366 * We don't clear the IRQ status. It's supposed to be done by the
367 * following ping operations.
368 */
369 val = readl(dw_wdt->regs + WDOG_INTERRUPT_STATUS_REG_OFFSET);
370 if (!val)
371 return IRQ_NONE;
372
373 watchdog_notify_pretimeout(&dw_wdt->wdd);
374
375 return IRQ_HANDLED;
376 }
377
dw_wdt_suspend(struct device * dev)378 static int dw_wdt_suspend(struct device *dev)
379 {
380 struct dw_wdt *dw_wdt = dev_get_drvdata(dev);
381
382 dw_wdt->control = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
383 dw_wdt->timeout = readl(dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
384
385 clk_disable_unprepare(dw_wdt->pclk);
386 clk_disable_unprepare(dw_wdt->clk);
387
388 return 0;
389 }
390
dw_wdt_resume(struct device * dev)391 static int dw_wdt_resume(struct device *dev)
392 {
393 struct dw_wdt *dw_wdt = dev_get_drvdata(dev);
394 int err = clk_prepare_enable(dw_wdt->clk);
395
396 if (err)
397 return err;
398
399 err = clk_prepare_enable(dw_wdt->pclk);
400 if (err) {
401 clk_disable_unprepare(dw_wdt->clk);
402 return err;
403 }
404
405 writel(dw_wdt->timeout, dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
406 writel(dw_wdt->control, dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
407
408 dw_wdt_ping(&dw_wdt->wdd);
409
410 return 0;
411 }
412
413 static DEFINE_SIMPLE_DEV_PM_OPS(dw_wdt_pm_ops, dw_wdt_suspend, dw_wdt_resume);
414
415 /*
416 * In case if DW WDT IP core is synthesized with fixed TOP feature disabled the
417 * TOPs array can be arbitrary ordered with nearly any sixteen uint numbers
418 * depending on the system engineer imagination. The next method handles the
419 * passed TOPs array to pre-calculate the effective timeouts and to sort the
420 * TOP items out in the ascending order with respect to the timeouts.
421 */
422
dw_wdt_handle_tops(struct dw_wdt * dw_wdt,const u32 * tops)423 static void dw_wdt_handle_tops(struct dw_wdt *dw_wdt, const u32 *tops)
424 {
425 struct dw_wdt_timeout tout, *dst;
426 int val, tidx;
427 u64 msec;
428
429 /*
430 * We walk over the passed TOPs array and calculate corresponding
431 * timeouts in seconds and milliseconds. The milliseconds granularity
432 * is needed to distinguish the TOPs with very close timeouts and to
433 * set the watchdog max heartbeat setting further.
434 */
435 for (val = 0; val < DW_WDT_NUM_TOPS; ++val) {
436 tout.top_val = val;
437 tout.sec = tops[val] / dw_wdt->rate;
438 msec = (u64)tops[val] * MSEC_PER_SEC;
439 do_div(msec, dw_wdt->rate);
440 tout.msec = msec - ((u64)tout.sec * MSEC_PER_SEC);
441
442 /*
443 * Find a suitable place for the current TOP in the timeouts
444 * array so that the list is remained in the ascending order.
445 */
446 for (tidx = 0; tidx < val; ++tidx) {
447 dst = &dw_wdt->timeouts[tidx];
448 if (tout.sec > dst->sec || (tout.sec == dst->sec &&
449 tout.msec >= dst->msec))
450 continue;
451 else
452 swap(*dst, tout);
453 }
454
455 dw_wdt->timeouts[val] = tout;
456 }
457 }
458
dw_wdt_init_timeouts(struct dw_wdt * dw_wdt,struct device * dev)459 static int dw_wdt_init_timeouts(struct dw_wdt *dw_wdt, struct device *dev)
460 {
461 u32 data, of_tops[DW_WDT_NUM_TOPS];
462 const u32 *tops;
463 int ret;
464
465 /*
466 * Retrieve custom or fixed counter values depending on the
467 * WDT_USE_FIX_TOP flag found in the component specific parameters
468 * #1 register.
469 */
470 data = readl(dw_wdt->regs + WDOG_COMP_PARAMS_1_REG_OFFSET);
471 if (data & WDOG_COMP_PARAMS_1_USE_FIX_TOP) {
472 tops = dw_wdt_fix_tops;
473 } else {
474 ret = of_property_read_variable_u32_array(dev_of_node(dev),
475 "snps,watchdog-tops", of_tops, DW_WDT_NUM_TOPS,
476 DW_WDT_NUM_TOPS);
477 if (ret < 0) {
478 dev_warn(dev, "No valid TOPs array specified\n");
479 tops = dw_wdt_fix_tops;
480 } else {
481 tops = of_tops;
482 }
483 }
484
485 /* Convert the specified TOPs into an array of watchdog timeouts. */
486 dw_wdt_handle_tops(dw_wdt, tops);
487 if (!dw_wdt->timeouts[DW_WDT_NUM_TOPS - 1].sec) {
488 dev_err(dev, "No any valid TOP detected\n");
489 return -EINVAL;
490 }
491
492 return 0;
493 }
494
495 #ifdef CONFIG_DEBUG_FS
496
497 #define DW_WDT_DBGFS_REG(_name, _off) \
498 { \
499 .name = _name, \
500 .offset = _off \
501 }
502
503 static const struct debugfs_reg32 dw_wdt_dbgfs_regs[] = {
504 DW_WDT_DBGFS_REG("cr", WDOG_CONTROL_REG_OFFSET),
505 DW_WDT_DBGFS_REG("torr", WDOG_TIMEOUT_RANGE_REG_OFFSET),
506 DW_WDT_DBGFS_REG("ccvr", WDOG_CURRENT_COUNT_REG_OFFSET),
507 DW_WDT_DBGFS_REG("crr", WDOG_COUNTER_RESTART_REG_OFFSET),
508 DW_WDT_DBGFS_REG("stat", WDOG_INTERRUPT_STATUS_REG_OFFSET),
509 DW_WDT_DBGFS_REG("param5", WDOG_COMP_PARAMS_5_REG_OFFSET),
510 DW_WDT_DBGFS_REG("param4", WDOG_COMP_PARAMS_4_REG_OFFSET),
511 DW_WDT_DBGFS_REG("param3", WDOG_COMP_PARAMS_3_REG_OFFSET),
512 DW_WDT_DBGFS_REG("param2", WDOG_COMP_PARAMS_2_REG_OFFSET),
513 DW_WDT_DBGFS_REG("param1", WDOG_COMP_PARAMS_1_REG_OFFSET),
514 DW_WDT_DBGFS_REG("version", WDOG_COMP_VERSION_REG_OFFSET),
515 DW_WDT_DBGFS_REG("type", WDOG_COMP_TYPE_REG_OFFSET)
516 };
517
dw_wdt_dbgfs_init(struct dw_wdt * dw_wdt)518 static void dw_wdt_dbgfs_init(struct dw_wdt *dw_wdt)
519 {
520 struct device *dev = dw_wdt->wdd.parent;
521 struct debugfs_regset32 *regset;
522
523 regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
524 if (!regset)
525 return;
526
527 regset->regs = dw_wdt_dbgfs_regs;
528 regset->nregs = ARRAY_SIZE(dw_wdt_dbgfs_regs);
529 regset->base = dw_wdt->regs;
530
531 dw_wdt->dbgfs_dir = debugfs_create_dir(dev_name(dev), NULL);
532
533 debugfs_create_regset32("registers", 0444, dw_wdt->dbgfs_dir, regset);
534 }
535
dw_wdt_dbgfs_clear(struct dw_wdt * dw_wdt)536 static void dw_wdt_dbgfs_clear(struct dw_wdt *dw_wdt)
537 {
538 debugfs_remove_recursive(dw_wdt->dbgfs_dir);
539 }
540
541 #else /* !CONFIG_DEBUG_FS */
542
dw_wdt_dbgfs_init(struct dw_wdt * dw_wdt)543 static void dw_wdt_dbgfs_init(struct dw_wdt *dw_wdt) {}
dw_wdt_dbgfs_clear(struct dw_wdt * dw_wdt)544 static void dw_wdt_dbgfs_clear(struct dw_wdt *dw_wdt) {}
545
546 #endif /* !CONFIG_DEBUG_FS */
547
dw_wdt_drv_probe(struct platform_device * pdev)548 static int dw_wdt_drv_probe(struct platform_device *pdev)
549 {
550 struct device *dev = &pdev->dev;
551 struct watchdog_device *wdd;
552 struct dw_wdt *dw_wdt;
553 int ret;
554
555 dw_wdt = devm_kzalloc(dev, sizeof(*dw_wdt), GFP_KERNEL);
556 if (!dw_wdt)
557 return -ENOMEM;
558
559 dw_wdt->regs = devm_platform_ioremap_resource(pdev, 0);
560 if (IS_ERR(dw_wdt->regs))
561 return PTR_ERR(dw_wdt->regs);
562
563 /*
564 * Try to request the watchdog dedicated timer clock source. It must
565 * be supplied if asynchronous mode is enabled. Otherwise fallback
566 * to the common timer/bus clocks configuration, in which the very
567 * first found clock supply both timer and APB signals.
568 */
569 dw_wdt->clk = devm_clk_get_enabled(dev, "tclk");
570 if (IS_ERR(dw_wdt->clk)) {
571 dw_wdt->clk = devm_clk_get_enabled(dev, NULL);
572 if (IS_ERR(dw_wdt->clk))
573 return PTR_ERR(dw_wdt->clk);
574 }
575
576 dw_wdt->rate = clk_get_rate(dw_wdt->clk);
577 if (dw_wdt->rate == 0)
578 return -EINVAL;
579
580 /*
581 * Request APB clock if device is configured with async clocks mode.
582 * In this case both tclk and pclk clocks are supposed to be specified.
583 * Alas we can't know for sure whether async mode was really activated,
584 * so the pclk phandle reference is left optional. If it couldn't be
585 * found we consider the device configured in synchronous clocks mode.
586 */
587 dw_wdt->pclk = devm_clk_get_optional_enabled(dev, "pclk");
588 if (IS_ERR(dw_wdt->pclk))
589 return PTR_ERR(dw_wdt->pclk);
590
591 dw_wdt->rst = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
592 if (IS_ERR(dw_wdt->rst))
593 return PTR_ERR(dw_wdt->rst);
594
595 /* Enable normal reset without pre-timeout by default. */
596 dw_wdt_update_mode(dw_wdt, DW_WDT_RMOD_RESET);
597
598 /*
599 * Pre-timeout IRQ is optional, since some hardware may lack support
600 * of it. Note we must request rising-edge IRQ, since the lane is left
601 * pending either until the next watchdog kick event or up to the
602 * system reset.
603 */
604 ret = platform_get_irq_optional(pdev, 0);
605 if (ret > 0) {
606 ret = devm_request_irq(dev, ret, dw_wdt_irq,
607 IRQF_SHARED | IRQF_TRIGGER_RISING,
608 pdev->name, dw_wdt);
609 if (ret)
610 return ret;
611
612 dw_wdt->wdd.info = &dw_wdt_pt_ident;
613 } else {
614 if (ret == -EPROBE_DEFER)
615 return ret;
616
617 dw_wdt->wdd.info = &dw_wdt_ident;
618 }
619
620 reset_control_deassert(dw_wdt->rst);
621
622 ret = dw_wdt_init_timeouts(dw_wdt, dev);
623 if (ret)
624 goto out_assert_rst;
625
626 wdd = &dw_wdt->wdd;
627 wdd->ops = &dw_wdt_ops;
628 wdd->min_timeout = dw_wdt_get_min_timeout(dw_wdt);
629 wdd->max_hw_heartbeat_ms = dw_wdt_get_max_timeout_ms(dw_wdt);
630 wdd->parent = dev;
631
632 watchdog_set_drvdata(wdd, dw_wdt);
633 watchdog_set_nowayout(wdd, nowayout);
634 watchdog_init_timeout(wdd, 0, dev);
635
636 /*
637 * If the watchdog is already running, use its already configured
638 * timeout. Otherwise use the default or the value provided through
639 * devicetree.
640 */
641 if (dw_wdt_is_enabled(dw_wdt)) {
642 wdd->timeout = dw_wdt_get_timeout(dw_wdt);
643 set_bit(WDOG_HW_RUNNING, &wdd->status);
644 } else {
645 wdd->timeout = DW_WDT_DEFAULT_SECONDS;
646 watchdog_init_timeout(wdd, 0, dev);
647 }
648
649 platform_set_drvdata(pdev, dw_wdt);
650
651 watchdog_set_restart_priority(wdd, 128);
652 watchdog_stop_on_reboot(wdd);
653
654 ret = watchdog_register_device(wdd);
655 if (ret)
656 goto out_assert_rst;
657
658 dw_wdt_dbgfs_init(dw_wdt);
659
660 return 0;
661
662 out_assert_rst:
663 reset_control_assert(dw_wdt->rst);
664 return ret;
665 }
666
dw_wdt_drv_remove(struct platform_device * pdev)667 static void dw_wdt_drv_remove(struct platform_device *pdev)
668 {
669 struct dw_wdt *dw_wdt = platform_get_drvdata(pdev);
670
671 dw_wdt_dbgfs_clear(dw_wdt);
672
673 watchdog_unregister_device(&dw_wdt->wdd);
674 reset_control_assert(dw_wdt->rst);
675 }
676
677 #ifdef CONFIG_OF
678 static const struct of_device_id dw_wdt_of_match[] = {
679 { .compatible = "snps,dw-wdt", },
680 { /* sentinel */ }
681 };
682 MODULE_DEVICE_TABLE(of, dw_wdt_of_match);
683 #endif
684
685 static struct platform_driver dw_wdt_driver = {
686 .probe = dw_wdt_drv_probe,
687 .remove_new = dw_wdt_drv_remove,
688 .driver = {
689 .name = "dw_wdt",
690 .of_match_table = of_match_ptr(dw_wdt_of_match),
691 .pm = pm_sleep_ptr(&dw_wdt_pm_ops),
692 },
693 };
694
695 module_platform_driver(dw_wdt_driver);
696
697 MODULE_AUTHOR("Jamie Iles");
698 MODULE_DESCRIPTION("Synopsys DesignWare Watchdog Driver");
699 MODULE_LICENSE("GPL");
700