1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * arch/arm/mach-at91/pm.c
4 * AT91 Power Management
5 *
6 * Copyright (C) 2005 David Brownell
7 */
8
9 #include <linux/genalloc.h>
10 #include <linux/io.h>
11 #include <linux/of_address.h>
12 #include <linux/of.h>
13 #include <linux/of_fdt.h>
14 #include <linux/of_platform.h>
15 #include <linux/platform_device.h>
16 #include <linux/parser.h>
17 #include <linux/suspend.h>
18
19 #include <linux/clk.h>
20 #include <linux/clk/at91_pmc.h>
21 #include <linux/platform_data/atmel.h>
22
23 #include <asm/cacheflush.h>
24 #include <asm/fncpy.h>
25 #include <asm/system_misc.h>
26 #include <asm/suspend.h>
27
28 #include "generic.h"
29 #include "pm.h"
30 #include "sam_secure.h"
31
32 #define BACKUP_DDR_PHY_CALIBRATION (9)
33
34 /**
35 * struct at91_pm_bu - AT91 power management backup unit data structure
36 * @suspended: true if suspended to backup mode
37 * @reserved: reserved
38 * @canary: canary data for memory checking after exit from backup mode
39 * @resume: resume API
40 * @ddr_phy_calibration: DDR PHY calibration data: ZQ0CR0, first 8 words
41 * of the memory
42 */
43 struct at91_pm_bu {
44 int suspended;
45 unsigned long reserved;
46 phys_addr_t canary;
47 phys_addr_t resume;
48 unsigned long ddr_phy_calibration[BACKUP_DDR_PHY_CALIBRATION];
49 };
50
51 /**
52 * struct at91_pm_sfrbu_regs - registers mapping for SFRBU
53 * @pswbu: power switch BU control registers
54 */
55 struct at91_pm_sfrbu_regs {
56 struct {
57 u32 key;
58 u32 ctrl;
59 u32 state;
60 u32 softsw;
61 } pswbu;
62 };
63
64 /**
65 * enum at91_pm_eth_clk - Ethernet clock indexes
66 * @AT91_PM_ETH_PCLK: pclk index
67 * @AT91_PM_ETH_HCLK: hclk index
68 * @AT91_PM_ETH_MAX_CLK: max index
69 */
70 enum at91_pm_eth_clk {
71 AT91_PM_ETH_PCLK,
72 AT91_PM_ETH_HCLK,
73 AT91_PM_ETH_MAX_CLK,
74 };
75
76 /**
77 * enum at91_pm_eth - Ethernet controller indexes
78 * @AT91_PM_G_ETH: gigabit Ethernet controller index
79 * @AT91_PM_E_ETH: megabit Ethernet controller index
80 * @AT91_PM_MAX_ETH: max index
81 */
82 enum at91_pm_eth {
83 AT91_PM_G_ETH,
84 AT91_PM_E_ETH,
85 AT91_PM_MAX_ETH,
86 };
87
88 /**
89 * struct at91_pm_quirk_eth - AT91 PM Ethernet quirks
90 * @dev: Ethernet device
91 * @np: Ethernet device node
92 * @clks: Ethernet clocks
93 * @modes: power management mode that this quirk applies to
94 * @dns_modes: do not suspend modes: stop suspending if Ethernet is configured
95 * as wakeup source but buggy and no other wakeup source is
96 * available
97 */
98 struct at91_pm_quirk_eth {
99 struct device *dev;
100 struct device_node *np;
101 struct clk_bulk_data clks[AT91_PM_ETH_MAX_CLK];
102 u32 modes;
103 u32 dns_modes;
104 };
105
106 /**
107 * struct at91_pm_quirks - AT91 PM quirks
108 * @eth: Ethernet quirks
109 */
110 struct at91_pm_quirks {
111 struct at91_pm_quirk_eth eth[AT91_PM_MAX_ETH];
112 };
113
114 /**
115 * struct at91_soc_pm - AT91 SoC power management data structure
116 * @config_shdwc_ws: wakeup sources configuration function for SHDWC
117 * @config_pmc_ws: wakeup srouces configuration function for PMC
118 * @ws_ids: wakup sources of_device_id array
119 * @bu: backup unit mapped data (for backup mode)
120 * @quirks: PM quirks
121 * @data: PM data to be used on last phase of suspend
122 * @sfrbu_regs: SFRBU registers mapping
123 * @memcs: memory chip select
124 */
125 struct at91_soc_pm {
126 int (*config_shdwc_ws)(void __iomem *shdwc, u32 *mode, u32 *polarity);
127 int (*config_pmc_ws)(void __iomem *pmc, u32 mode, u32 polarity);
128 const struct of_device_id *ws_ids;
129 struct at91_pm_bu *bu;
130 struct at91_pm_quirks quirks;
131 struct at91_pm_data data;
132 struct at91_pm_sfrbu_regs sfrbu_regs;
133 void *memcs;
134 };
135
136 /**
137 * enum at91_pm_iomaps - IOs that needs to be mapped for different PM modes
138 * @AT91_PM_IOMAP_SHDWC: SHDWC controller
139 * @AT91_PM_IOMAP_SFRBU: SFRBU controller
140 * @AT91_PM_IOMAP_ETHC: Ethernet controller
141 */
142 enum at91_pm_iomaps {
143 AT91_PM_IOMAP_SHDWC,
144 AT91_PM_IOMAP_SFRBU,
145 AT91_PM_IOMAP_ETHC,
146 };
147
148 #define AT91_PM_IOMAP(name) BIT(AT91_PM_IOMAP_##name)
149
150 static struct at91_soc_pm soc_pm = {
151 .data = {
152 .standby_mode = AT91_PM_STANDBY,
153 .suspend_mode = AT91_PM_ULP0,
154 },
155 };
156
157 static const match_table_t pm_modes __initconst = {
158 { AT91_PM_STANDBY, "standby" },
159 { AT91_PM_ULP0, "ulp0" },
160 { AT91_PM_ULP0_FAST, "ulp0-fast" },
161 { AT91_PM_ULP1, "ulp1" },
162 { AT91_PM_BACKUP, "backup" },
163 { -1, NULL },
164 };
165
166 #define at91_ramc_read(id, field) \
167 __raw_readl(soc_pm.data.ramc[id] + field)
168
169 #define at91_ramc_write(id, field, value) \
170 __raw_writel(value, soc_pm.data.ramc[id] + field)
171
at91_pm_valid_state(suspend_state_t state)172 static int at91_pm_valid_state(suspend_state_t state)
173 {
174 switch (state) {
175 case PM_SUSPEND_ON:
176 case PM_SUSPEND_STANDBY:
177 case PM_SUSPEND_MEM:
178 return 1;
179
180 default:
181 return 0;
182 }
183 }
184
185 static int canary = 0xA5A5A5A5;
186
187 struct wakeup_source_info {
188 unsigned int pmc_fsmr_bit;
189 unsigned int shdwc_mr_bit;
190 bool set_polarity;
191 };
192
193 static const struct wakeup_source_info ws_info[] = {
194 { .pmc_fsmr_bit = AT91_PMC_FSTT(10), .set_polarity = true },
195 { .pmc_fsmr_bit = AT91_PMC_RTCAL, .shdwc_mr_bit = BIT(17) },
196 { .pmc_fsmr_bit = AT91_PMC_USBAL },
197 { .pmc_fsmr_bit = AT91_PMC_SDMMC_CD },
198 { .pmc_fsmr_bit = AT91_PMC_RTTAL },
199 { .pmc_fsmr_bit = AT91_PMC_RXLP_MCE },
200 };
201
202 static const struct of_device_id sama5d2_ws_ids[] = {
203 { .compatible = "atmel,sama5d2-gem", .data = &ws_info[0] },
204 { .compatible = "atmel,sama5d2-rtc", .data = &ws_info[1] },
205 { .compatible = "atmel,sama5d3-udc", .data = &ws_info[2] },
206 { .compatible = "atmel,at91rm9200-ohci", .data = &ws_info[2] },
207 { .compatible = "usb-ohci", .data = &ws_info[2] },
208 { .compatible = "atmel,at91sam9g45-ehci", .data = &ws_info[2] },
209 { .compatible = "usb-ehci", .data = &ws_info[2] },
210 { .compatible = "atmel,sama5d2-sdhci", .data = &ws_info[3] },
211 { /* sentinel */ }
212 };
213
214 static const struct of_device_id sam9x60_ws_ids[] = {
215 { .compatible = "microchip,sam9x60-rtc", .data = &ws_info[1] },
216 { .compatible = "atmel,at91rm9200-ohci", .data = &ws_info[2] },
217 { .compatible = "usb-ohci", .data = &ws_info[2] },
218 { .compatible = "atmel,at91sam9g45-ehci", .data = &ws_info[2] },
219 { .compatible = "usb-ehci", .data = &ws_info[2] },
220 { .compatible = "microchip,sam9x60-rtt", .data = &ws_info[4] },
221 { .compatible = "cdns,sam9x60-macb", .data = &ws_info[5] },
222 { /* sentinel */ }
223 };
224
225 static const struct of_device_id sama7_ws_ids[] = {
226 { .compatible = "microchip,sama7d65-rtc", .data = &ws_info[1] },
227 { .compatible = "microchip,sama7g5-rtc", .data = &ws_info[1] },
228 { .compatible = "microchip,sama7g5-ohci", .data = &ws_info[2] },
229 { .compatible = "usb-ohci", .data = &ws_info[2] },
230 { .compatible = "atmel,at91sam9g45-ehci", .data = &ws_info[2] },
231 { .compatible = "usb-ehci", .data = &ws_info[2] },
232 { .compatible = "microchip,sama7d65-sdhci", .data = &ws_info[3] },
233 { .compatible = "microchip,sama7g5-sdhci", .data = &ws_info[3] },
234 { .compatible = "microchip,sama7d65-rtt", .data = &ws_info[4] },
235 { .compatible = "microchip,sama7g5-rtt", .data = &ws_info[4] },
236 { /* sentinel */ }
237 };
238
239 static const struct of_device_id sam9x7_ws_ids[] = {
240 { .compatible = "microchip,sam9x7-rtc", .data = &ws_info[1] },
241 { .compatible = "microchip,sam9x7-rtt", .data = &ws_info[4] },
242 { .compatible = "microchip,sam9x7-gem", .data = &ws_info[5] },
243 { /* sentinel */ }
244 };
245
at91_pm_config_ws(unsigned int pm_mode,bool set)246 static int at91_pm_config_ws(unsigned int pm_mode, bool set)
247 {
248 const struct wakeup_source_info *wsi;
249 const struct of_device_id *match;
250 struct platform_device *pdev;
251 struct device_node *np;
252 unsigned int mode = 0, polarity = 0, val = 0;
253
254 if (pm_mode != AT91_PM_ULP1)
255 return 0;
256
257 if (!soc_pm.data.pmc || !soc_pm.data.shdwc || !soc_pm.ws_ids)
258 return -EPERM;
259
260 if (!set) {
261 writel(mode, soc_pm.data.pmc + AT91_PMC_FSMR);
262 return 0;
263 }
264
265 if (soc_pm.config_shdwc_ws)
266 soc_pm.config_shdwc_ws(soc_pm.data.shdwc, &mode, &polarity);
267
268 /* SHDWC.MR */
269 val = readl(soc_pm.data.shdwc + 0x04);
270
271 /* Loop through defined wakeup sources. */
272 for_each_matching_node_and_match(np, soc_pm.ws_ids, &match) {
273 pdev = of_find_device_by_node(np);
274 if (!pdev)
275 continue;
276
277 if (device_may_wakeup(&pdev->dev)) {
278 wsi = match->data;
279
280 /* Check if enabled on SHDWC. */
281 if (wsi->shdwc_mr_bit && !(val & wsi->shdwc_mr_bit))
282 goto put_device;
283
284 mode |= wsi->pmc_fsmr_bit;
285 if (wsi->set_polarity)
286 polarity |= wsi->pmc_fsmr_bit;
287 }
288
289 put_device:
290 put_device(&pdev->dev);
291 }
292
293 if (mode) {
294 if (soc_pm.config_pmc_ws)
295 soc_pm.config_pmc_ws(soc_pm.data.pmc, mode, polarity);
296 } else {
297 pr_err("AT91: PM: no ULP1 wakeup sources found!");
298 }
299
300 return mode ? 0 : -EPERM;
301 }
302
at91_sama5d2_config_shdwc_ws(void __iomem * shdwc,u32 * mode,u32 * polarity)303 static int at91_sama5d2_config_shdwc_ws(void __iomem *shdwc, u32 *mode,
304 u32 *polarity)
305 {
306 u32 val;
307
308 /* SHDWC.WUIR */
309 val = readl(shdwc + 0x0c);
310 *mode |= (val & 0x3ff);
311 *polarity |= ((val >> 16) & 0x3ff);
312
313 return 0;
314 }
315
at91_sama5d2_config_pmc_ws(void __iomem * pmc,u32 mode,u32 polarity)316 static int at91_sama5d2_config_pmc_ws(void __iomem *pmc, u32 mode, u32 polarity)
317 {
318 writel(mode, pmc + AT91_PMC_FSMR);
319 writel(polarity, pmc + AT91_PMC_FSPR);
320
321 return 0;
322 }
323
at91_sam9x60_config_pmc_ws(void __iomem * pmc,u32 mode,u32 polarity)324 static int at91_sam9x60_config_pmc_ws(void __iomem *pmc, u32 mode, u32 polarity)
325 {
326 writel(mode, pmc + AT91_PMC_FSMR);
327
328 return 0;
329 }
330
at91_pm_eth_quirk_is_valid(struct at91_pm_quirk_eth * eth)331 static bool at91_pm_eth_quirk_is_valid(struct at91_pm_quirk_eth *eth)
332 {
333 struct platform_device *pdev;
334
335 /* Interface NA in DT. */
336 if (!eth->np)
337 return false;
338
339 /* No quirks for this interface and current suspend mode. */
340 if (!(eth->modes & BIT(soc_pm.data.mode)))
341 return false;
342
343 if (!eth->dev) {
344 /* Driver not probed. */
345 pdev = of_find_device_by_node(eth->np);
346 if (!pdev)
347 return false;
348 /* put_device(eth->dev) is called at the end of suspend. */
349 eth->dev = &pdev->dev;
350 }
351
352 /* No quirks if device isn't a wakeup source. */
353 if (!device_may_wakeup(eth->dev))
354 return false;
355
356 return true;
357 }
358
at91_pm_config_quirks(bool suspend)359 static int at91_pm_config_quirks(bool suspend)
360 {
361 struct at91_pm_quirk_eth *eth;
362 int i, j, ret, tmp;
363
364 /*
365 * Ethernet IPs who's device_node pointers are stored into
366 * soc_pm.quirks.eth[].np cannot handle WoL packets while in ULP0, ULP1
367 * or both due to a hardware bug. If they receive WoL packets while in
368 * ULP0 or ULP1 IPs could stop working or the whole system could stop
369 * working. We cannot handle this scenario in the ethernet driver itself
370 * as the driver is common to multiple vendors and also we only know
371 * here, in this file, if we suspend to ULP0 or ULP1 mode. Thus handle
372 * these scenarios here, as quirks.
373 */
374 for (i = 0; i < AT91_PM_MAX_ETH; i++) {
375 eth = &soc_pm.quirks.eth[i];
376
377 if (!at91_pm_eth_quirk_is_valid(eth))
378 continue;
379
380 /*
381 * For modes in dns_modes mask the system blocks if quirk is not
382 * applied but if applied the interface doesn't act at WoL
383 * events. Thus take care to avoid suspending if this interface
384 * is the only configured wakeup source.
385 */
386 if (suspend && eth->dns_modes & BIT(soc_pm.data.mode)) {
387 int ws_count = 0;
388 #ifdef CONFIG_PM_SLEEP
389 struct wakeup_source *ws;
390
391 for_each_wakeup_source(ws) {
392 if (ws->dev == eth->dev)
393 continue;
394
395 ws_count++;
396 break;
397 }
398 #endif
399
400 /*
401 * Checking !ws is good for all platforms with issues
402 * even when both G_ETH and E_ETH are available as dns_modes
403 * is populated only on G_ETH interface.
404 */
405 if (!ws_count) {
406 pr_err("AT91: PM: Ethernet cannot resume from WoL!");
407 ret = -EPERM;
408 put_device(eth->dev);
409 eth->dev = NULL;
410 /* No need to revert clock settings for this eth. */
411 i--;
412 goto clk_unconfigure;
413 }
414 }
415
416 if (suspend) {
417 clk_bulk_disable_unprepare(AT91_PM_ETH_MAX_CLK, eth->clks);
418 } else {
419 ret = clk_bulk_prepare_enable(AT91_PM_ETH_MAX_CLK,
420 eth->clks);
421 if (ret)
422 goto clk_unconfigure;
423 /*
424 * Release the reference to eth->dev taken in
425 * at91_pm_eth_quirk_is_valid().
426 */
427 put_device(eth->dev);
428 eth->dev = NULL;
429 }
430 }
431
432 return 0;
433
434 clk_unconfigure:
435 /*
436 * In case of resume we reach this point if clk_prepare_enable() failed.
437 * we don't want to revert the previous clk_prepare_enable() for the
438 * other IP.
439 */
440 for (j = i; j >= 0; j--) {
441 eth = &soc_pm.quirks.eth[j];
442 if (suspend) {
443 if (!at91_pm_eth_quirk_is_valid(eth))
444 continue;
445
446 tmp = clk_bulk_prepare_enable(AT91_PM_ETH_MAX_CLK, eth->clks);
447 if (tmp) {
448 pr_err("AT91: PM: failed to enable %s clocks\n",
449 j == AT91_PM_G_ETH ? "geth" : "eth");
450 }
451 }
452
453 /*
454 * Release the reference to eth->dev taken in
455 * at91_pm_eth_quirk_is_valid().
456 */
457 put_device(eth->dev);
458 eth->dev = NULL;
459 }
460
461 return ret;
462 }
463
464 /*
465 * Called after processes are frozen, but before we shutdown devices.
466 */
at91_pm_begin(suspend_state_t state)467 static int at91_pm_begin(suspend_state_t state)
468 {
469 int ret;
470
471 switch (state) {
472 case PM_SUSPEND_MEM:
473 soc_pm.data.mode = soc_pm.data.suspend_mode;
474 break;
475
476 case PM_SUSPEND_STANDBY:
477 soc_pm.data.mode = soc_pm.data.standby_mode;
478 break;
479
480 default:
481 soc_pm.data.mode = -1;
482 }
483
484 ret = at91_pm_config_ws(soc_pm.data.mode, true);
485 if (ret)
486 return ret;
487
488 if (soc_pm.data.mode == AT91_PM_BACKUP)
489 soc_pm.bu->suspended = 1;
490 else if (soc_pm.bu)
491 soc_pm.bu->suspended = 0;
492
493 return 0;
494 }
495
496 /*
497 * Verify that all the clocks are correct before entering
498 * slow-clock mode.
499 */
at91_pm_verify_clocks(void)500 static int at91_pm_verify_clocks(void)
501 {
502 unsigned long scsr;
503 int i;
504
505 scsr = readl(soc_pm.data.pmc + AT91_PMC_SCSR);
506
507 /* USB must not be using PLLB */
508 if ((scsr & soc_pm.data.uhp_udp_mask) != 0) {
509 pr_err("AT91: PM - Suspend-to-RAM with USB still active\n");
510 return 0;
511 }
512
513 /* PCK0..PCK3 must be disabled, or configured to use clk32k */
514 for (i = 0; i < 4; i++) {
515 u32 css;
516
517 if ((scsr & (AT91_PMC_PCK0 << i)) == 0)
518 continue;
519 css = readl(soc_pm.data.pmc + AT91_PMC_PCKR(i)) & AT91_PMC_CSS;
520 if (css != AT91_PMC_CSS_SLOW) {
521 pr_err("AT91: PM - Suspend-to-RAM with PCK%d src %d\n", i, css);
522 return 0;
523 }
524 }
525
526 return 1;
527 }
528
529 /*
530 * Call this from platform driver suspend() to see how deeply to suspend.
531 * For example, some controllers (like OHCI) need one of the PLL clocks
532 * in order to act as a wakeup source, and those are not available when
533 * going into slow clock mode.
534 *
535 * REVISIT: generalize as clk_will_be_available(clk)? Other platforms have
536 * the very same problem (but not using at91 main_clk), and it'd be better
537 * to add one generic API rather than lots of platform-specific ones.
538 */
at91_suspend_entering_slow_clock(void)539 int at91_suspend_entering_slow_clock(void)
540 {
541 return (soc_pm.data.mode >= AT91_PM_ULP0);
542 }
543 EXPORT_SYMBOL(at91_suspend_entering_slow_clock);
544
545 static void (*at91_suspend_sram_fn)(struct at91_pm_data *);
546 extern void at91_pm_suspend_in_sram(struct at91_pm_data *pm_data);
547 extern u32 at91_pm_suspend_in_sram_sz;
548
at91_suspend_finish(unsigned long val)549 static int at91_suspend_finish(unsigned long val)
550 {
551 /* SYNOPSYS workaround to fix a bug in the calibration logic */
552 unsigned char modified_fix_code[] = {
553 0x00, 0x01, 0x01, 0x06, 0x07, 0x0c, 0x06, 0x07, 0x0b, 0x18,
554 0x0a, 0x0b, 0x0c, 0x0d, 0x0d, 0x0a, 0x13, 0x13, 0x12, 0x13,
555 0x14, 0x15, 0x15, 0x12, 0x18, 0x19, 0x19, 0x1e, 0x1f, 0x14,
556 0x1e, 0x1f,
557 };
558 unsigned int tmp, index;
559 int i;
560
561 if (soc_pm.data.mode == AT91_PM_BACKUP && soc_pm.data.ramc_phy) {
562 /*
563 * Bootloader will perform DDR recalibration and will try to
564 * restore the ZQ0SR0 with the value saved here. But the
565 * calibration is buggy and restoring some values from ZQ0SR0
566 * is forbidden and risky thus we need to provide processed
567 * values for these.
568 */
569 tmp = readl(soc_pm.data.ramc_phy + DDR3PHY_ZQ0SR0);
570
571 /* Store pull-down output impedance select. */
572 index = (tmp >> DDR3PHY_ZQ0SR0_PDO_OFF) & 0x1f;
573 soc_pm.bu->ddr_phy_calibration[0] = modified_fix_code[index] << DDR3PHY_ZQ0SR0_PDO_OFF;
574
575 /* Store pull-up output impedance select. */
576 index = (tmp >> DDR3PHY_ZQ0SR0_PUO_OFF) & 0x1f;
577 soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SR0_PUO_OFF;
578
579 /* Store pull-down on-die termination impedance select. */
580 index = (tmp >> DDR3PHY_ZQ0SR0_PDODT_OFF) & 0x1f;
581 soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SR0_PDODT_OFF;
582
583 /* Store pull-up on-die termination impedance select. */
584 index = (tmp >> DDR3PHY_ZQ0SRO_PUODT_OFF) & 0x1f;
585 soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SRO_PUODT_OFF;
586
587 /*
588 * The 1st 8 words of memory might get corrupted in the process
589 * of DDR PHY recalibration; it is saved here in securam and it
590 * will be restored later, after recalibration, by bootloader
591 */
592 for (i = 1; i < BACKUP_DDR_PHY_CALIBRATION; i++)
593 soc_pm.bu->ddr_phy_calibration[i] =
594 *((unsigned int *)soc_pm.memcs + (i - 1));
595 }
596
597 flush_cache_all();
598 outer_disable();
599
600 at91_suspend_sram_fn(&soc_pm.data);
601
602 return 0;
603 }
604
605 /**
606 * at91_pm_switch_ba_to_auto() - Configure Backup Unit Power Switch
607 * to automatic/hardware mode.
608 *
609 * The Backup Unit Power Switch can be managed either by software or hardware.
610 * Enabling hardware mode allows the automatic transition of power between
611 * VDDANA (or VDDIN33) and VDDBU (or VBAT, respectively), based on the
612 * availability of these power sources.
613 *
614 * If the Backup Unit Power Switch is already in automatic mode, no action is
615 * required. If it is in software-controlled mode, it is switched to automatic
616 * mode to enhance safety and eliminate the need for toggling between power
617 * sources.
618 */
at91_pm_switch_ba_to_auto(void)619 static void at91_pm_switch_ba_to_auto(void)
620 {
621 unsigned int offset = offsetof(struct at91_pm_sfrbu_regs, pswbu);
622 unsigned int val;
623
624 /* Just for safety. */
625 if (!soc_pm.data.sfrbu)
626 return;
627
628 val = readl(soc_pm.data.sfrbu + offset);
629
630 /* Already on auto/hardware. */
631 if (!(val & soc_pm.sfrbu_regs.pswbu.ctrl))
632 return;
633
634 val &= ~soc_pm.sfrbu_regs.pswbu.ctrl;
635 val |= soc_pm.sfrbu_regs.pswbu.key;
636 writel(val, soc_pm.data.sfrbu + offset);
637 }
638
at91_pm_suspend(suspend_state_t state)639 static void at91_pm_suspend(suspend_state_t state)
640 {
641 if (soc_pm.data.mode == AT91_PM_BACKUP) {
642 at91_pm_switch_ba_to_auto();
643
644 cpu_suspend(0, at91_suspend_finish);
645
646 /* The SRAM is lost between suspend cycles */
647 at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn,
648 &at91_pm_suspend_in_sram,
649 at91_pm_suspend_in_sram_sz);
650
651 if (IS_ENABLED(CONFIG_SOC_SAMA7D65)) {
652 /* SHDWC.SR */
653 readl(soc_pm.data.shdwc + 0x08);
654 }
655 } else {
656 at91_suspend_finish(0);
657 }
658
659 outer_resume();
660 }
661
662 /*
663 * STANDBY mode has *all* drivers suspended; ignores irqs not marked as 'wakeup'
664 * event sources; and reduces DRAM power. But otherwise it's identical to
665 * PM_SUSPEND_ON: cpu idle, and nothing fancy done with main or cpu clocks.
666 *
667 * AT91_PM_ULP0 is like STANDBY plus slow clock mode, so drivers must
668 * suspend more deeply, the master clock switches to the clk32k and turns off
669 * the main oscillator
670 *
671 * AT91_PM_BACKUP turns off the whole SoC after placing the DDR in self refresh
672 */
at91_pm_enter(suspend_state_t state)673 static int at91_pm_enter(suspend_state_t state)
674 {
675 int ret;
676
677 ret = at91_pm_config_quirks(true);
678 if (ret)
679 return ret;
680
681 switch (state) {
682 case PM_SUSPEND_MEM:
683 case PM_SUSPEND_STANDBY:
684 /*
685 * Ensure that clocks are in a valid state.
686 */
687 if (soc_pm.data.mode >= AT91_PM_ULP0 &&
688 !at91_pm_verify_clocks())
689 goto error;
690
691 at91_pm_suspend(state);
692
693 break;
694
695 case PM_SUSPEND_ON:
696 cpu_do_idle();
697 break;
698
699 default:
700 pr_debug("AT91: PM - bogus suspend state %d\n", state);
701 goto error;
702 }
703
704 error:
705 at91_pm_config_quirks(false);
706 return 0;
707 }
708
709 /*
710 * Called right prior to thawing processes.
711 */
at91_pm_end(void)712 static void at91_pm_end(void)
713 {
714 at91_pm_config_ws(soc_pm.data.mode, false);
715 }
716
717
718 static const struct platform_suspend_ops at91_pm_ops = {
719 .valid = at91_pm_valid_state,
720 .begin = at91_pm_begin,
721 .enter = at91_pm_enter,
722 .end = at91_pm_end,
723 };
724
725 static struct platform_device at91_cpuidle_device = {
726 .name = "cpuidle-at91",
727 };
728
729 /*
730 * The AT91RM9200 goes into self-refresh mode with this command, and will
731 * terminate self-refresh automatically on the next SDRAM access.
732 *
733 * Self-refresh mode is exited as soon as a memory access is made, but we don't
734 * know for sure when that happens. However, we need to restore the low-power
735 * mode if it was enabled before going idle. Restoring low-power mode while
736 * still in self-refresh is "not recommended", but seems to work.
737 */
at91rm9200_standby(void)738 static void at91rm9200_standby(void)
739 {
740 asm volatile(
741 "b 1f\n\t"
742 ".align 5\n\t"
743 "1: mcr p15, 0, %0, c7, c10, 4\n\t"
744 " str %2, [%1, %3]\n\t"
745 " mcr p15, 0, %0, c7, c0, 4\n\t"
746 :
747 : "r" (0), "r" (soc_pm.data.ramc[0]),
748 "r" (1), "r" (AT91_MC_SDRAMC_SRR));
749 }
750
751 /* We manage both DDRAM/SDRAM controllers, we need more than one value to
752 * remember.
753 */
at91_ddr_standby(void)754 static void at91_ddr_standby(void)
755 {
756 /* Those two values allow us to delay self-refresh activation
757 * to the maximum. */
758 u32 lpr0, lpr1 = 0;
759 u32 mdr, saved_mdr0, saved_mdr1 = 0;
760 u32 saved_lpr0, saved_lpr1 = 0;
761
762 /* LPDDR1 --> force DDR2 mode during self-refresh */
763 saved_mdr0 = at91_ramc_read(0, AT91_DDRSDRC_MDR);
764 if ((saved_mdr0 & AT91_DDRSDRC_MD) == AT91_DDRSDRC_MD_LOW_POWER_DDR) {
765 mdr = saved_mdr0 & ~AT91_DDRSDRC_MD;
766 mdr |= AT91_DDRSDRC_MD_DDR2;
767 at91_ramc_write(0, AT91_DDRSDRC_MDR, mdr);
768 }
769
770 if (soc_pm.data.ramc[1]) {
771 saved_lpr1 = at91_ramc_read(1, AT91_DDRSDRC_LPR);
772 lpr1 = saved_lpr1 & ~AT91_DDRSDRC_LPCB;
773 lpr1 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
774 saved_mdr1 = at91_ramc_read(1, AT91_DDRSDRC_MDR);
775 if ((saved_mdr1 & AT91_DDRSDRC_MD) == AT91_DDRSDRC_MD_LOW_POWER_DDR) {
776 mdr = saved_mdr1 & ~AT91_DDRSDRC_MD;
777 mdr |= AT91_DDRSDRC_MD_DDR2;
778 at91_ramc_write(1, AT91_DDRSDRC_MDR, mdr);
779 }
780 }
781
782 saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
783 lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
784 lpr0 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
785
786 /* self-refresh mode now */
787 at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
788 if (soc_pm.data.ramc[1])
789 at91_ramc_write(1, AT91_DDRSDRC_LPR, lpr1);
790
791 cpu_do_idle();
792
793 at91_ramc_write(0, AT91_DDRSDRC_MDR, saved_mdr0);
794 at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
795 if (soc_pm.data.ramc[1]) {
796 at91_ramc_write(0, AT91_DDRSDRC_MDR, saved_mdr1);
797 at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
798 }
799 }
800
sama5d3_ddr_standby(void)801 static void sama5d3_ddr_standby(void)
802 {
803 u32 lpr0;
804 u32 saved_lpr0;
805
806 saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
807 lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
808 lpr0 |= AT91_DDRSDRC_LPCB_POWER_DOWN;
809
810 at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
811
812 cpu_do_idle();
813
814 at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
815 }
816
817 /* We manage both DDRAM/SDRAM controllers, we need more than one value to
818 * remember.
819 */
at91sam9_sdram_standby(void)820 static void at91sam9_sdram_standby(void)
821 {
822 u32 lpr0, lpr1 = 0;
823 u32 saved_lpr0, saved_lpr1 = 0;
824
825 if (soc_pm.data.ramc[1]) {
826 saved_lpr1 = at91_ramc_read(1, AT91_SDRAMC_LPR);
827 lpr1 = saved_lpr1 & ~AT91_SDRAMC_LPCB;
828 lpr1 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
829 }
830
831 saved_lpr0 = at91_ramc_read(0, AT91_SDRAMC_LPR);
832 lpr0 = saved_lpr0 & ~AT91_SDRAMC_LPCB;
833 lpr0 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
834
835 /* self-refresh mode now */
836 at91_ramc_write(0, AT91_SDRAMC_LPR, lpr0);
837 if (soc_pm.data.ramc[1])
838 at91_ramc_write(1, AT91_SDRAMC_LPR, lpr1);
839
840 cpu_do_idle();
841
842 at91_ramc_write(0, AT91_SDRAMC_LPR, saved_lpr0);
843 if (soc_pm.data.ramc[1])
844 at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1);
845 }
846
sama7g5_standby(void)847 static void sama7g5_standby(void)
848 {
849 int pwrtmg, ratio;
850
851 pwrtmg = readl(soc_pm.data.ramc[0] + UDDRC_PWRCTL);
852 ratio = readl(soc_pm.data.pmc + AT91_PMC_RATIO);
853
854 /*
855 * Place RAM into self-refresh after a maximum idle clocks. The maximum
856 * idle clocks is configured by bootloader in
857 * UDDRC_PWRMGT.SELFREF_TO_X32.
858 */
859 writel(pwrtmg | UDDRC_PWRCTL_SELFREF_EN,
860 soc_pm.data.ramc[0] + UDDRC_PWRCTL);
861 /* Divide CPU clock by 16. */
862 writel(ratio & ~AT91_PMC_RATIO_RATIO, soc_pm.data.pmc + AT91_PMC_RATIO);
863
864 cpu_do_idle();
865
866 /* Restore previous configuration. */
867 writel(ratio, soc_pm.data.pmc + AT91_PMC_RATIO);
868 writel(pwrtmg, soc_pm.data.ramc[0] + UDDRC_PWRCTL);
869 }
870
871 struct ramc_info {
872 void (*idle)(void);
873 unsigned int memctrl;
874 };
875
876 static const struct ramc_info ramc_infos[] __initconst = {
877 { .idle = at91rm9200_standby, .memctrl = AT91_MEMCTRL_MC},
878 { .idle = at91sam9_sdram_standby, .memctrl = AT91_MEMCTRL_SDRAMC},
879 { .idle = at91_ddr_standby, .memctrl = AT91_MEMCTRL_DDRSDR},
880 { .idle = sama5d3_ddr_standby, .memctrl = AT91_MEMCTRL_DDRSDR},
881 { .idle = sama7g5_standby, },
882 };
883
884 static const struct of_device_id ramc_ids[] __initconst = {
885 { .compatible = "atmel,at91rm9200-sdramc", .data = &ramc_infos[0] },
886 { .compatible = "atmel,at91sam9260-sdramc", .data = &ramc_infos[1] },
887 { .compatible = "atmel,at91sam9g45-ddramc", .data = &ramc_infos[2] },
888 { .compatible = "atmel,sama5d3-ddramc", .data = &ramc_infos[3] },
889 { .compatible = "microchip,sama7g5-uddrc", .data = &ramc_infos[4], },
890 { /*sentinel*/ }
891 };
892
893 static const struct of_device_id ramc_phy_ids[] __initconst = {
894 { .compatible = "microchip,sama7g5-ddr3phy", },
895 { /* Sentinel. */ },
896 };
897
at91_dt_ramc(bool phy_mandatory)898 static __init int at91_dt_ramc(bool phy_mandatory)
899 {
900 struct device_node *np;
901 const struct of_device_id *of_id;
902 int idx = 0;
903 void *standby = NULL;
904 const struct ramc_info *ramc;
905 int ret;
906
907 for_each_matching_node_and_match(np, ramc_ids, &of_id) {
908 soc_pm.data.ramc[idx] = of_iomap(np, 0);
909 if (!soc_pm.data.ramc[idx]) {
910 pr_err("unable to map ramc[%d] cpu registers\n", idx);
911 ret = -ENOMEM;
912 of_node_put(np);
913 goto unmap_ramc;
914 }
915
916 ramc = of_id->data;
917 if (ramc) {
918 if (!standby)
919 standby = ramc->idle;
920 soc_pm.data.memctrl = ramc->memctrl;
921 }
922
923 idx++;
924 }
925
926 if (!idx) {
927 pr_err("unable to find compatible ram controller node in dtb\n");
928 ret = -ENODEV;
929 goto unmap_ramc;
930 }
931
932 /* Lookup for DDR PHY node, if any. */
933 for_each_matching_node_and_match(np, ramc_phy_ids, &of_id) {
934 soc_pm.data.ramc_phy = of_iomap(np, 0);
935 if (!soc_pm.data.ramc_phy) {
936 pr_err("unable to map ramc phy cpu registers\n");
937 ret = -ENOMEM;
938 of_node_put(np);
939 goto unmap_ramc;
940 }
941 }
942
943 if (phy_mandatory && !soc_pm.data.ramc_phy) {
944 pr_err("DDR PHY is mandatory!\n");
945 ret = -ENODEV;
946 goto unmap_ramc;
947 }
948
949 if (!standby) {
950 pr_warn("ramc no standby function available\n");
951 return 0;
952 }
953
954 at91_cpuidle_device.dev.platform_data = standby;
955
956 return 0;
957
958 unmap_ramc:
959 while (idx)
960 iounmap(soc_pm.data.ramc[--idx]);
961
962 return ret;
963 }
964
at91rm9200_idle(void)965 static void at91rm9200_idle(void)
966 {
967 /*
968 * Disable the processor clock. The processor will be automatically
969 * re-enabled by an interrupt or by a reset.
970 */
971 writel(AT91_PMC_PCK, soc_pm.data.pmc + AT91_PMC_SCDR);
972 }
973
at91sam9_idle(void)974 static void at91sam9_idle(void)
975 {
976 writel(AT91_PMC_PCK, soc_pm.data.pmc + AT91_PMC_SCDR);
977 cpu_do_idle();
978 }
979
at91_pm_sram_init(void)980 static void __init at91_pm_sram_init(void)
981 {
982 struct gen_pool *sram_pool;
983 phys_addr_t sram_pbase;
984 unsigned long sram_base;
985 struct device_node *node;
986 struct platform_device *pdev = NULL;
987
988 for_each_compatible_node(node, NULL, "mmio-sram") {
989 pdev = of_find_device_by_node(node);
990 if (pdev) {
991 of_node_put(node);
992 break;
993 }
994 }
995
996 if (!pdev) {
997 pr_warn("%s: failed to find sram device!\n", __func__);
998 return;
999 }
1000
1001 sram_pool = gen_pool_get(&pdev->dev, NULL);
1002 if (!sram_pool) {
1003 pr_warn("%s: sram pool unavailable!\n", __func__);
1004 goto out_put_device;
1005 }
1006
1007 sram_base = gen_pool_alloc(sram_pool, at91_pm_suspend_in_sram_sz);
1008 if (!sram_base) {
1009 pr_warn("%s: unable to alloc sram!\n", __func__);
1010 goto out_put_device;
1011 }
1012
1013 sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base);
1014 at91_suspend_sram_fn = __arm_ioremap_exec(sram_pbase,
1015 at91_pm_suspend_in_sram_sz, false);
1016 if (!at91_suspend_sram_fn) {
1017 pr_warn("SRAM: Could not map\n");
1018 goto out_put_device;
1019 }
1020
1021 /* Copy the pm suspend handler to SRAM */
1022 at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn,
1023 &at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz);
1024 return;
1025
1026 out_put_device:
1027 put_device(&pdev->dev);
1028 return;
1029 }
1030
at91_is_pm_mode_active(int pm_mode)1031 static bool __init at91_is_pm_mode_active(int pm_mode)
1032 {
1033 return (soc_pm.data.standby_mode == pm_mode ||
1034 soc_pm.data.suspend_mode == pm_mode);
1035 }
1036
at91_pm_backup_scan_memcs(unsigned long node,const char * uname,int depth,void * data)1037 static int __init at91_pm_backup_scan_memcs(unsigned long node,
1038 const char *uname, int depth,
1039 void *data)
1040 {
1041 const char *type;
1042 const __be32 *reg;
1043 int *located = data;
1044 int size;
1045
1046 /* Memory node already located. */
1047 if (*located)
1048 return 0;
1049
1050 type = of_get_flat_dt_prop(node, "device_type", NULL);
1051
1052 /* We are scanning "memory" nodes only. */
1053 if (!type || strcmp(type, "memory"))
1054 return 0;
1055
1056 reg = of_get_flat_dt_prop(node, "reg", &size);
1057 if (reg) {
1058 soc_pm.memcs = __va((phys_addr_t)be32_to_cpu(*reg));
1059 *located = 1;
1060 }
1061
1062 return 0;
1063 }
1064
at91_pm_backup_init(void)1065 static int __init at91_pm_backup_init(void)
1066 {
1067 struct gen_pool *sram_pool;
1068 struct device_node *np;
1069 struct platform_device *pdev;
1070 int ret = -ENODEV, located = 0;
1071
1072 if (!IS_ENABLED(CONFIG_SOC_SAMA5D2) &&
1073 !IS_ENABLED(CONFIG_SOC_SAMA7G5) &&
1074 !IS_ENABLED(CONFIG_SOC_SAMA7D65))
1075 return -EPERM;
1076
1077 if (!at91_is_pm_mode_active(AT91_PM_BACKUP))
1078 return 0;
1079
1080 np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam");
1081 if (!np)
1082 return ret;
1083
1084 pdev = of_find_device_by_node(np);
1085 of_node_put(np);
1086 if (!pdev) {
1087 pr_warn("%s: failed to find securam device!\n", __func__);
1088 return ret;
1089 }
1090
1091 sram_pool = gen_pool_get(&pdev->dev, NULL);
1092 if (!sram_pool) {
1093 pr_warn("%s: securam pool unavailable!\n", __func__);
1094 goto securam_fail;
1095 }
1096
1097 soc_pm.bu = (void *)gen_pool_alloc(sram_pool, sizeof(struct at91_pm_bu));
1098 if (!soc_pm.bu) {
1099 pr_warn("%s: unable to alloc securam!\n", __func__);
1100 ret = -ENOMEM;
1101 goto securam_fail;
1102 }
1103
1104 soc_pm.bu->suspended = 0;
1105 soc_pm.bu->canary = __pa_symbol(&canary);
1106 soc_pm.bu->resume = __pa_symbol(cpu_resume);
1107 if (soc_pm.data.ramc_phy) {
1108 of_scan_flat_dt(at91_pm_backup_scan_memcs, &located);
1109 if (!located)
1110 goto securam_fail;
1111 }
1112
1113 return 0;
1114
1115 securam_fail:
1116 put_device(&pdev->dev);
1117 return ret;
1118 }
1119
at91_pm_secure_init(void)1120 static void __init at91_pm_secure_init(void)
1121 {
1122 int suspend_mode;
1123 struct arm_smccc_res res;
1124
1125 suspend_mode = soc_pm.data.suspend_mode;
1126
1127 res = sam_smccc_call(SAMA5_SMC_SIP_SET_SUSPEND_MODE,
1128 suspend_mode, 0);
1129 if (res.a0 == 0) {
1130 pr_info("AT91: Secure PM: suspend mode set to %s\n",
1131 pm_modes[suspend_mode].pattern);
1132 soc_pm.data.mode = suspend_mode;
1133 return;
1134 }
1135
1136 pr_warn("AT91: Secure PM: %s mode not supported !\n",
1137 pm_modes[suspend_mode].pattern);
1138
1139 res = sam_smccc_call(SAMA5_SMC_SIP_GET_SUSPEND_MODE, 0, 0);
1140 if (res.a0 == 0) {
1141 pr_warn("AT91: Secure PM: failed to get default mode\n");
1142 soc_pm.data.mode = -1;
1143 return;
1144 }
1145
1146 pr_info("AT91: Secure PM: using default suspend mode %s\n",
1147 pm_modes[suspend_mode].pattern);
1148
1149 soc_pm.data.suspend_mode = res.a1;
1150 soc_pm.data.mode = soc_pm.data.suspend_mode;
1151 }
1152 static const struct of_device_id atmel_shdwc_ids[] = {
1153 { .compatible = "atmel,sama5d2-shdwc" },
1154 { .compatible = "microchip,sam9x60-shdwc" },
1155 { .compatible = "microchip,sama7g5-shdwc" },
1156 { /* sentinel. */ }
1157 };
1158
1159 static const struct of_device_id gmac_ids[] __initconst = {
1160 { .compatible = "atmel,sama5d3-gem" },
1161 { .compatible = "atmel,sama5d2-gem" },
1162 { .compatible = "atmel,sama5d29-gem" },
1163 { .compatible = "microchip,sama7g5-gem" },
1164 { },
1165 };
1166
1167 static const struct of_device_id emac_ids[] __initconst = {
1168 { .compatible = "atmel,sama5d3-macb" },
1169 { .compatible = "microchip,sama7g5-emac" },
1170 { },
1171 };
1172
1173 /*
1174 * Replaces _mode_to_replace with a supported mode that doesn't depend
1175 * on controller pointed by _map_bitmask
1176 * @_maps: u32 array containing AT91_PM_IOMAP() flags and indexed by AT91
1177 * PM mode
1178 * @_map_bitmask: AT91_PM_IOMAP() bitmask; if _mode_to_replace depends on
1179 * controller represented by _map_bitmask, _mode_to_replace needs to be
1180 * updated
1181 * @_mode_to_replace: standby_mode or suspend_mode that need to be
1182 * updated
1183 * @_mode_to_check: standby_mode or suspend_mode; this is needed here
1184 * to avoid having standby_mode and suspend_mode set with the same AT91
1185 * PM mode
1186 */
1187 #define AT91_PM_REPLACE_MODE(_maps, _map_bitmask, _mode_to_replace, \
1188 _mode_to_check) \
1189 do { \
1190 if (((_maps)[(_mode_to_replace)]) & (_map_bitmask)) { \
1191 int _mode_to_use, _mode_complementary; \
1192 /* Use ULP0 if it doesn't need _map_bitmask. */ \
1193 if (!((_maps)[AT91_PM_ULP0] & (_map_bitmask))) {\
1194 _mode_to_use = AT91_PM_ULP0; \
1195 _mode_complementary = AT91_PM_STANDBY; \
1196 } else { \
1197 _mode_to_use = AT91_PM_STANDBY; \
1198 _mode_complementary = AT91_PM_STANDBY; \
1199 } \
1200 \
1201 if ((_mode_to_check) != _mode_to_use) \
1202 (_mode_to_replace) = _mode_to_use; \
1203 else \
1204 (_mode_to_replace) = _mode_complementary;\
1205 } \
1206 } while (0)
1207
1208 /*
1209 * Replaces standby and suspend modes with default supported modes:
1210 * ULP0 and STANDBY.
1211 * @_maps: u32 array indexed by AT91 PM mode containing AT91_PM_IOMAP()
1212 * flags
1213 * @_map: controller specific name; standby and suspend mode need to be
1214 * replaced in order to not depend on this controller
1215 */
1216 #define AT91_PM_REPLACE_MODES(_maps, _map) \
1217 do { \
1218 AT91_PM_REPLACE_MODE((_maps), BIT(AT91_PM_IOMAP_##_map),\
1219 (soc_pm.data.standby_mode), \
1220 (soc_pm.data.suspend_mode)); \
1221 AT91_PM_REPLACE_MODE((_maps), BIT(AT91_PM_IOMAP_##_map),\
1222 (soc_pm.data.suspend_mode), \
1223 (soc_pm.data.standby_mode)); \
1224 } while (0)
1225
at91_pm_get_eth_clks(struct device_node * np,struct clk_bulk_data * clks)1226 static int __init at91_pm_get_eth_clks(struct device_node *np,
1227 struct clk_bulk_data *clks)
1228 {
1229 clks[AT91_PM_ETH_PCLK].clk = of_clk_get_by_name(np, "pclk");
1230 if (IS_ERR(clks[AT91_PM_ETH_PCLK].clk))
1231 return PTR_ERR(clks[AT91_PM_ETH_PCLK].clk);
1232
1233 clks[AT91_PM_ETH_HCLK].clk = of_clk_get_by_name(np, "hclk");
1234 if (IS_ERR(clks[AT91_PM_ETH_HCLK].clk))
1235 return PTR_ERR(clks[AT91_PM_ETH_HCLK].clk);
1236
1237 return 0;
1238 }
1239
at91_pm_eth_clks_empty(struct clk_bulk_data * clks)1240 static int __init at91_pm_eth_clks_empty(struct clk_bulk_data *clks)
1241 {
1242 return IS_ERR(clks[AT91_PM_ETH_PCLK].clk) ||
1243 IS_ERR(clks[AT91_PM_ETH_HCLK].clk);
1244 }
1245
at91_pm_modes_init(const u32 * maps,int len)1246 static void __init at91_pm_modes_init(const u32 *maps, int len)
1247 {
1248 struct at91_pm_quirk_eth *gmac = &soc_pm.quirks.eth[AT91_PM_G_ETH];
1249 struct at91_pm_quirk_eth *emac = &soc_pm.quirks.eth[AT91_PM_E_ETH];
1250 struct device_node *np;
1251 int ret;
1252
1253 ret = at91_pm_backup_init();
1254 if (ret) {
1255 if (soc_pm.data.standby_mode == AT91_PM_BACKUP)
1256 soc_pm.data.standby_mode = AT91_PM_ULP0;
1257 if (soc_pm.data.suspend_mode == AT91_PM_BACKUP)
1258 soc_pm.data.suspend_mode = AT91_PM_ULP0;
1259 }
1260
1261 if (maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SHDWC) ||
1262 maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SHDWC)) {
1263 np = of_find_matching_node(NULL, atmel_shdwc_ids);
1264 if (!np) {
1265 pr_warn("%s: failed to find shdwc!\n", __func__);
1266 AT91_PM_REPLACE_MODES(maps, SHDWC);
1267 } else {
1268 soc_pm.data.shdwc = of_iomap(np, 0);
1269 of_node_put(np);
1270 }
1271 }
1272
1273 if (maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SFRBU) ||
1274 maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SFRBU)) {
1275 np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-sfrbu");
1276 if (!np) {
1277 pr_warn("%s: failed to find sfrbu!\n", __func__);
1278 AT91_PM_REPLACE_MODES(maps, SFRBU);
1279 } else {
1280 soc_pm.data.sfrbu = of_iomap(np, 0);
1281 of_node_put(np);
1282 }
1283 }
1284
1285 if ((at91_is_pm_mode_active(AT91_PM_ULP1) ||
1286 at91_is_pm_mode_active(AT91_PM_ULP0) ||
1287 at91_is_pm_mode_active(AT91_PM_ULP0_FAST)) &&
1288 (maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(ETHC) ||
1289 maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(ETHC))) {
1290 np = of_find_matching_node(NULL, gmac_ids);
1291 if (!np) {
1292 np = of_find_matching_node(NULL, emac_ids);
1293 if (np)
1294 goto get_emac_clks;
1295 AT91_PM_REPLACE_MODES(maps, ETHC);
1296 goto unmap_unused_nodes;
1297 } else {
1298 gmac->np = np;
1299 at91_pm_get_eth_clks(np, gmac->clks);
1300 }
1301
1302 np = of_find_matching_node(NULL, emac_ids);
1303 if (!np) {
1304 if (at91_pm_eth_clks_empty(gmac->clks))
1305 AT91_PM_REPLACE_MODES(maps, ETHC);
1306 } else {
1307 get_emac_clks:
1308 emac->np = np;
1309 ret = at91_pm_get_eth_clks(np, emac->clks);
1310 if (ret && at91_pm_eth_clks_empty(gmac->clks)) {
1311 of_node_put(gmac->np);
1312 of_node_put(emac->np);
1313 gmac->np = NULL;
1314 emac->np = NULL;
1315 }
1316 }
1317 }
1318
1319 unmap_unused_nodes:
1320 /* Unmap all unnecessary. */
1321 if (soc_pm.data.shdwc &&
1322 !(maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SHDWC) ||
1323 maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SHDWC))) {
1324 iounmap(soc_pm.data.shdwc);
1325 soc_pm.data.shdwc = NULL;
1326 }
1327
1328 if (soc_pm.data.sfrbu &&
1329 !(maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SFRBU) ||
1330 maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SFRBU))) {
1331 iounmap(soc_pm.data.sfrbu);
1332 soc_pm.data.sfrbu = NULL;
1333 }
1334
1335 return;
1336 }
1337
1338 struct pmc_info {
1339 unsigned long uhp_udp_mask;
1340 unsigned long mckr;
1341 unsigned long version;
1342 unsigned long mcks;
1343 };
1344
1345 static const struct pmc_info pmc_infos[] __initconst = {
1346 {
1347 .uhp_udp_mask = AT91RM9200_PMC_UHP | AT91RM9200_PMC_UDP,
1348 .mckr = 0x30,
1349 .version = AT91_PMC_V1,
1350 },
1351
1352 {
1353 .uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP,
1354 .mckr = 0x30,
1355 .version = AT91_PMC_V1,
1356 },
1357 {
1358 .uhp_udp_mask = AT91SAM926x_PMC_UHP,
1359 .mckr = 0x30,
1360 .version = AT91_PMC_V1,
1361 },
1362 { .uhp_udp_mask = 0,
1363 .mckr = 0x30,
1364 .version = AT91_PMC_V1,
1365 },
1366 {
1367 .uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP,
1368 .mckr = 0x28,
1369 .version = AT91_PMC_V2,
1370 },
1371 {
1372 .mckr = 0x28,
1373 .version = AT91_PMC_V2,
1374 .mcks = 4,
1375 },
1376 {
1377 .uhp_udp_mask = AT91SAM926x_PMC_UHP,
1378 .mckr = 0x28,
1379 .version = AT91_PMC_V2,
1380 .mcks = 9,
1381 },
1382 };
1383
1384 static const struct of_device_id atmel_pmc_ids[] __initconst = {
1385 { .compatible = "atmel,at91rm9200-pmc", .data = &pmc_infos[0] },
1386 { .compatible = "atmel,at91sam9260-pmc", .data = &pmc_infos[1] },
1387 { .compatible = "atmel,at91sam9261-pmc", .data = &pmc_infos[1] },
1388 { .compatible = "atmel,at91sam9263-pmc", .data = &pmc_infos[1] },
1389 { .compatible = "atmel,at91sam9g45-pmc", .data = &pmc_infos[2] },
1390 { .compatible = "atmel,at91sam9n12-pmc", .data = &pmc_infos[1] },
1391 { .compatible = "atmel,at91sam9rl-pmc", .data = &pmc_infos[3] },
1392 { .compatible = "atmel,at91sam9x5-pmc", .data = &pmc_infos[1] },
1393 { .compatible = "atmel,sama5d3-pmc", .data = &pmc_infos[1] },
1394 { .compatible = "atmel,sama5d4-pmc", .data = &pmc_infos[1] },
1395 { .compatible = "atmel,sama5d2-pmc", .data = &pmc_infos[1] },
1396 { .compatible = "microchip,sam9x60-pmc", .data = &pmc_infos[4] },
1397 { .compatible = "microchip,sam9x7-pmc", .data = &pmc_infos[4] },
1398 { .compatible = "microchip,sama7d65-pmc", .data = &pmc_infos[6] },
1399 { .compatible = "microchip,sama7g5-pmc", .data = &pmc_infos[5] },
1400 { /* sentinel */ },
1401 };
1402
at91_pm_modes_validate(const int * modes,int len)1403 static void __init at91_pm_modes_validate(const int *modes, int len)
1404 {
1405 u8 i, standby = 0, suspend = 0;
1406 int mode;
1407
1408 for (i = 0; i < len; i++) {
1409 if (standby && suspend)
1410 break;
1411
1412 if (modes[i] == soc_pm.data.standby_mode && !standby) {
1413 standby = 1;
1414 continue;
1415 }
1416
1417 if (modes[i] == soc_pm.data.suspend_mode && !suspend) {
1418 suspend = 1;
1419 continue;
1420 }
1421 }
1422
1423 if (!standby) {
1424 if (soc_pm.data.suspend_mode == AT91_PM_STANDBY)
1425 mode = AT91_PM_ULP0;
1426 else
1427 mode = AT91_PM_STANDBY;
1428
1429 pr_warn("AT91: PM: %s mode not supported! Using %s.\n",
1430 pm_modes[soc_pm.data.standby_mode].pattern,
1431 pm_modes[mode].pattern);
1432 soc_pm.data.standby_mode = mode;
1433 }
1434
1435 if (!suspend) {
1436 if (soc_pm.data.standby_mode == AT91_PM_ULP0)
1437 mode = AT91_PM_STANDBY;
1438 else
1439 mode = AT91_PM_ULP0;
1440
1441 pr_warn("AT91: PM: %s mode not supported! Using %s.\n",
1442 pm_modes[soc_pm.data.suspend_mode].pattern,
1443 pm_modes[mode].pattern);
1444 soc_pm.data.suspend_mode = mode;
1445 }
1446 }
1447
at91_pm_init(void (* pm_idle)(void))1448 static void __init at91_pm_init(void (*pm_idle)(void))
1449 {
1450 struct device_node *pmc_np;
1451 const struct of_device_id *of_id;
1452 const struct pmc_info *pmc;
1453
1454 if (at91_cpuidle_device.dev.platform_data)
1455 platform_device_register(&at91_cpuidle_device);
1456
1457 pmc_np = of_find_matching_node_and_match(NULL, atmel_pmc_ids, &of_id);
1458 soc_pm.data.pmc = of_iomap(pmc_np, 0);
1459 of_node_put(pmc_np);
1460 if (!soc_pm.data.pmc) {
1461 pr_err("AT91: PM not supported, PMC not found\n");
1462 return;
1463 }
1464
1465 pmc = of_id->data;
1466 soc_pm.data.uhp_udp_mask = pmc->uhp_udp_mask;
1467 soc_pm.data.pmc_mckr_offset = pmc->mckr;
1468 soc_pm.data.pmc_version = pmc->version;
1469 soc_pm.data.pmc_mcks = pmc->mcks;
1470
1471 if (pm_idle)
1472 arm_pm_idle = pm_idle;
1473
1474 at91_pm_sram_init();
1475
1476 if (at91_suspend_sram_fn) {
1477 suspend_set_ops(&at91_pm_ops);
1478 pr_info("AT91: PM: standby: %s, suspend: %s\n",
1479 pm_modes[soc_pm.data.standby_mode].pattern,
1480 pm_modes[soc_pm.data.suspend_mode].pattern);
1481 } else {
1482 pr_info("AT91: PM not supported, due to no SRAM allocated\n");
1483 }
1484 }
1485
at91rm9200_pm_init(void)1486 void __init at91rm9200_pm_init(void)
1487 {
1488 int ret;
1489
1490 if (!IS_ENABLED(CONFIG_SOC_AT91RM9200))
1491 return;
1492
1493 /*
1494 * Force STANDBY and ULP0 mode to avoid calling
1495 * at91_pm_modes_validate() which may increase booting time.
1496 * Platform supports anyway only STANDBY and ULP0 modes.
1497 */
1498 soc_pm.data.standby_mode = AT91_PM_STANDBY;
1499 soc_pm.data.suspend_mode = AT91_PM_ULP0;
1500
1501 ret = at91_dt_ramc(false);
1502 if (ret)
1503 return;
1504
1505 /*
1506 * AT91RM9200 SDRAM low-power mode cannot be used with self-refresh.
1507 */
1508 at91_ramc_write(0, AT91_MC_SDRAMC_LPR, 0);
1509
1510 at91_pm_init(at91rm9200_idle);
1511 }
1512
sam9x60_pm_init(void)1513 void __init sam9x60_pm_init(void)
1514 {
1515 static const int modes[] __initconst = {
1516 AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST, AT91_PM_ULP1,
1517 };
1518 static const int iomaps[] __initconst = {
1519 [AT91_PM_ULP1] = AT91_PM_IOMAP(SHDWC),
1520 };
1521 int ret;
1522
1523 if (!IS_ENABLED(CONFIG_SOC_SAM9X60))
1524 return;
1525
1526 at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1527 at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1528 ret = at91_dt_ramc(false);
1529 if (ret)
1530 return;
1531
1532 at91_pm_init(NULL);
1533
1534 soc_pm.ws_ids = sam9x60_ws_ids;
1535 soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws;
1536 }
1537
sam9x7_pm_init(void)1538 void __init sam9x7_pm_init(void)
1539 {
1540 static const int modes[] __initconst = {
1541 AT91_PM_STANDBY, AT91_PM_ULP0,
1542 };
1543 int ret;
1544
1545 if (!IS_ENABLED(CONFIG_SOC_SAM9X7))
1546 return;
1547
1548 at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1549 ret = at91_dt_ramc(false);
1550 if (ret)
1551 return;
1552
1553 at91_pm_init(NULL);
1554
1555 soc_pm.ws_ids = sam9x7_ws_ids;
1556 soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws;
1557 }
1558
at91sam9_pm_init(void)1559 void __init at91sam9_pm_init(void)
1560 {
1561 int ret;
1562
1563 if (!IS_ENABLED(CONFIG_SOC_AT91SAM9))
1564 return;
1565
1566 /*
1567 * Force STANDBY and ULP0 mode to avoid calling
1568 * at91_pm_modes_validate() which may increase booting time.
1569 * Platform supports anyway only STANDBY and ULP0 modes.
1570 */
1571 soc_pm.data.standby_mode = AT91_PM_STANDBY;
1572 soc_pm.data.suspend_mode = AT91_PM_ULP0;
1573
1574 ret = at91_dt_ramc(false);
1575 if (ret)
1576 return;
1577
1578 at91_pm_init(at91sam9_idle);
1579 }
1580
sama5_pm_init(void)1581 void __init sama5_pm_init(void)
1582 {
1583 static const int modes[] __initconst = {
1584 AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST,
1585 };
1586 static const u32 iomaps[] __initconst = {
1587 [AT91_PM_ULP0] = AT91_PM_IOMAP(ETHC),
1588 [AT91_PM_ULP0_FAST] = AT91_PM_IOMAP(ETHC),
1589 };
1590 int ret;
1591
1592 if (!IS_ENABLED(CONFIG_SOC_SAMA5))
1593 return;
1594
1595 at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1596 at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1597 ret = at91_dt_ramc(false);
1598 if (ret)
1599 return;
1600
1601 at91_pm_init(NULL);
1602
1603 /* Quirks applies to ULP0, ULP0 fast and ULP1 modes. */
1604 soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP0) |
1605 BIT(AT91_PM_ULP0_FAST) |
1606 BIT(AT91_PM_ULP1);
1607 /* Do not suspend in ULP0, ULP0 fast if GETH is the only wakeup source. */
1608 soc_pm.quirks.eth[AT91_PM_G_ETH].dns_modes = BIT(AT91_PM_ULP0) |
1609 BIT(AT91_PM_ULP0_FAST);
1610 }
1611
sama5d2_pm_init(void)1612 void __init sama5d2_pm_init(void)
1613 {
1614 static const int modes[] __initconst = {
1615 AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST, AT91_PM_ULP1,
1616 AT91_PM_BACKUP,
1617 };
1618 static const u32 iomaps[] __initconst = {
1619 [AT91_PM_ULP0] = AT91_PM_IOMAP(ETHC),
1620 [AT91_PM_ULP0_FAST] = AT91_PM_IOMAP(ETHC),
1621 [AT91_PM_ULP1] = AT91_PM_IOMAP(SHDWC) |
1622 AT91_PM_IOMAP(ETHC),
1623 [AT91_PM_BACKUP] = AT91_PM_IOMAP(SHDWC) |
1624 AT91_PM_IOMAP(SFRBU),
1625 };
1626 int ret;
1627
1628 if (!IS_ENABLED(CONFIG_SOC_SAMA5D2))
1629 return;
1630
1631 if (IS_ENABLED(CONFIG_ATMEL_SECURE_PM)) {
1632 pr_warn("AT91: Secure PM: ignoring standby mode\n");
1633 at91_pm_secure_init();
1634 return;
1635 }
1636
1637 at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1638 at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1639 ret = at91_dt_ramc(false);
1640 if (ret)
1641 return;
1642
1643 at91_pm_init(NULL);
1644
1645 soc_pm.ws_ids = sama5d2_ws_ids;
1646 soc_pm.config_shdwc_ws = at91_sama5d2_config_shdwc_ws;
1647 soc_pm.config_pmc_ws = at91_sama5d2_config_pmc_ws;
1648
1649 soc_pm.sfrbu_regs.pswbu.key = (0x4BD20C << 8);
1650 soc_pm.sfrbu_regs.pswbu.ctrl = BIT(0);
1651 soc_pm.sfrbu_regs.pswbu.softsw = BIT(1);
1652 soc_pm.sfrbu_regs.pswbu.state = BIT(3);
1653
1654 /* Quirk applies to ULP0, ULP0 fast and ULP1 modes. */
1655 soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP0) |
1656 BIT(AT91_PM_ULP0_FAST) |
1657 BIT(AT91_PM_ULP1);
1658 /*
1659 * Do not suspend in ULP0, ULP0 fast if GETH is the only wakeup
1660 * source.
1661 */
1662 soc_pm.quirks.eth[AT91_PM_G_ETH].dns_modes = BIT(AT91_PM_ULP0) |
1663 BIT(AT91_PM_ULP0_FAST);
1664 }
1665
sama7_pm_init(void)1666 void __init sama7_pm_init(void)
1667 {
1668 static const int modes[] __initconst = {
1669 AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP1, AT91_PM_BACKUP,
1670 };
1671 static const u32 iomaps[] __initconst = {
1672 [AT91_PM_ULP0] = AT91_PM_IOMAP(SFRBU),
1673 [AT91_PM_ULP1] = AT91_PM_IOMAP(SFRBU) |
1674 AT91_PM_IOMAP(SHDWC) |
1675 AT91_PM_IOMAP(ETHC),
1676 [AT91_PM_BACKUP] = AT91_PM_IOMAP(SFRBU) |
1677 AT91_PM_IOMAP(SHDWC),
1678 };
1679 int ret;
1680
1681 if (!IS_ENABLED(CONFIG_SOC_SAMA7))
1682 return;
1683
1684 at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1685
1686 ret = at91_dt_ramc(true);
1687 if (ret)
1688 return;
1689
1690 at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1691 at91_pm_init(NULL);
1692
1693 soc_pm.ws_ids = sama7_ws_ids;
1694 soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws;
1695
1696 soc_pm.sfrbu_regs.pswbu.key = (0x4BD20C << 8);
1697 soc_pm.sfrbu_regs.pswbu.ctrl = BIT(0);
1698 soc_pm.sfrbu_regs.pswbu.softsw = BIT(1);
1699 soc_pm.sfrbu_regs.pswbu.state = BIT(2);
1700
1701 /* Quirks applies to ULP1 for both Ethernet interfaces. */
1702 soc_pm.quirks.eth[AT91_PM_E_ETH].modes = BIT(AT91_PM_ULP1);
1703 soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP1);
1704 }
1705
at91_pm_modes_select(char * str)1706 static int __init at91_pm_modes_select(char *str)
1707 {
1708 char *s;
1709 substring_t args[MAX_OPT_ARGS];
1710 int standby, suspend;
1711
1712 if (!str)
1713 return 0;
1714
1715 s = strsep(&str, ",");
1716 standby = match_token(s, pm_modes, args);
1717 if (standby < 0)
1718 return 0;
1719
1720 suspend = match_token(str, pm_modes, args);
1721 if (suspend < 0)
1722 return 0;
1723
1724 soc_pm.data.standby_mode = standby;
1725 soc_pm.data.suspend_mode = suspend;
1726
1727 return 0;
1728 }
1729 early_param("atmel.pm_modes", at91_pm_modes_select);
1730