1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * arch/arm/mach-at91/pm.c
4 * AT91 Power Management
5 *
6 * Copyright (C) 2005 David Brownell
7 */
8
9 #include <linux/genalloc.h>
10 #include <linux/io.h>
11 #include <linux/of_address.h>
12 #include <linux/of.h>
13 #include <linux/of_fdt.h>
14 #include <linux/of_platform.h>
15 #include <linux/platform_device.h>
16 #include <linux/parser.h>
17 #include <linux/suspend.h>
18
19 #include <linux/clk.h>
20 #include <linux/clk/at91_pmc.h>
21 #include <linux/platform_data/atmel.h>
22
23 #include <asm/cacheflush.h>
24 #include <asm/fncpy.h>
25 #include <asm/system_misc.h>
26 #include <asm/suspend.h>
27
28 #include "generic.h"
29 #include "pm.h"
30 #include "sam_secure.h"
31
32 #define BACKUP_DDR_PHY_CALIBRATION (9)
33
34 /**
35 * struct at91_pm_bu - AT91 power management backup unit data structure
36 * @suspended: true if suspended to backup mode
37 * @reserved: reserved
38 * @canary: canary data for memory checking after exit from backup mode
39 * @resume: resume API
40 * @ddr_phy_calibration: DDR PHY calibration data: ZQ0CR0, first 8 words
41 * of the memory
42 */
43 struct at91_pm_bu {
44 int suspended;
45 unsigned long reserved;
46 phys_addr_t canary;
47 phys_addr_t resume;
48 unsigned long ddr_phy_calibration[BACKUP_DDR_PHY_CALIBRATION];
49 };
50
51 /**
52 * struct at91_pm_sfrbu_regs - registers mapping for SFRBU
53 * @pswbu: power switch BU control registers
54 */
55 struct at91_pm_sfrbu_regs {
56 struct {
57 u32 key;
58 u32 ctrl;
59 u32 state;
60 u32 softsw;
61 } pswbu;
62 };
63
64 /**
65 * enum at91_pm_eth_clk - Ethernet clock indexes
66 * @AT91_PM_ETH_PCLK: pclk index
67 * @AT91_PM_ETH_HCLK: hclk index
68 * @AT91_PM_ETH_MAX_CLK: max index
69 */
70 enum at91_pm_eth_clk {
71 AT91_PM_ETH_PCLK,
72 AT91_PM_ETH_HCLK,
73 AT91_PM_ETH_MAX_CLK,
74 };
75
76 /**
77 * enum at91_pm_eth - Ethernet controller indexes
78 * @AT91_PM_G_ETH: gigabit Ethernet controller index
79 * @AT91_PM_E_ETH: megabit Ethernet controller index
80 * @AT91_PM_MAX_ETH: max index
81 */
82 enum at91_pm_eth {
83 AT91_PM_G_ETH,
84 AT91_PM_E_ETH,
85 AT91_PM_MAX_ETH,
86 };
87
88 /**
89 * struct at91_pm_quirk_eth - AT91 PM Ethernet quirks
90 * @dev: Ethernet device
91 * @np: Ethernet device node
92 * @clks: Ethernet clocks
93 * @modes: power management mode that this quirk applies to
94 * @dns_modes: do not suspend modes: stop suspending if Ethernet is configured
95 * as wakeup source but buggy and no other wakeup source is
96 * available
97 */
98 struct at91_pm_quirk_eth {
99 struct device *dev;
100 struct device_node *np;
101 struct clk_bulk_data clks[AT91_PM_ETH_MAX_CLK];
102 u32 modes;
103 u32 dns_modes;
104 };
105
106 /**
107 * struct at91_pm_quirks - AT91 PM quirks
108 * @eth: Ethernet quirks
109 */
110 struct at91_pm_quirks {
111 struct at91_pm_quirk_eth eth[AT91_PM_MAX_ETH];
112 };
113
114 /**
115 * struct at91_soc_pm - AT91 SoC power management data structure
116 * @config_shdwc_ws: wakeup sources configuration function for SHDWC
117 * @config_pmc_ws: wakeup srouces configuration function for PMC
118 * @ws_ids: wakup sources of_device_id array
119 * @bu: backup unit mapped data (for backup mode)
120 * @quirks: PM quirks
121 * @data: PM data to be used on last phase of suspend
122 * @sfrbu_regs: SFRBU registers mapping
123 * @memcs: memory chip select
124 */
125 struct at91_soc_pm {
126 int (*config_shdwc_ws)(void __iomem *shdwc, u32 *mode, u32 *polarity);
127 int (*config_pmc_ws)(void __iomem *pmc, u32 mode, u32 polarity);
128 const struct of_device_id *ws_ids;
129 struct at91_pm_bu *bu;
130 struct at91_pm_quirks quirks;
131 struct at91_pm_data data;
132 struct at91_pm_sfrbu_regs sfrbu_regs;
133 void *memcs;
134 };
135
136 /**
137 * enum at91_pm_iomaps - IOs that needs to be mapped for different PM modes
138 * @AT91_PM_IOMAP_SHDWC: SHDWC controller
139 * @AT91_PM_IOMAP_SFRBU: SFRBU controller
140 * @AT91_PM_IOMAP_ETHC: Ethernet controller
141 */
142 enum at91_pm_iomaps {
143 AT91_PM_IOMAP_SHDWC,
144 AT91_PM_IOMAP_SFRBU,
145 AT91_PM_IOMAP_ETHC,
146 };
147
148 #define AT91_PM_IOMAP(name) BIT(AT91_PM_IOMAP_##name)
149
150 static struct at91_soc_pm soc_pm = {
151 .data = {
152 .standby_mode = AT91_PM_STANDBY,
153 .suspend_mode = AT91_PM_ULP0,
154 },
155 };
156
157 static const match_table_t pm_modes __initconst = {
158 { AT91_PM_STANDBY, "standby" },
159 { AT91_PM_ULP0, "ulp0" },
160 { AT91_PM_ULP0_FAST, "ulp0-fast" },
161 { AT91_PM_ULP1, "ulp1" },
162 { AT91_PM_BACKUP, "backup" },
163 { -1, NULL },
164 };
165
166 #define at91_ramc_read(id, field) \
167 __raw_readl(soc_pm.data.ramc[id] + field)
168
169 #define at91_ramc_write(id, field, value) \
170 __raw_writel(value, soc_pm.data.ramc[id] + field)
171
at91_pm_valid_state(suspend_state_t state)172 static int at91_pm_valid_state(suspend_state_t state)
173 {
174 switch (state) {
175 case PM_SUSPEND_ON:
176 case PM_SUSPEND_STANDBY:
177 case PM_SUSPEND_MEM:
178 return 1;
179
180 default:
181 return 0;
182 }
183 }
184
185 static int canary = 0xA5A5A5A5;
186
187 struct wakeup_source_info {
188 unsigned int pmc_fsmr_bit;
189 unsigned int shdwc_mr_bit;
190 bool set_polarity;
191 };
192
193 static const struct wakeup_source_info ws_info[] = {
194 { .pmc_fsmr_bit = AT91_PMC_FSTT(10), .set_polarity = true },
195 { .pmc_fsmr_bit = AT91_PMC_RTCAL, .shdwc_mr_bit = BIT(17) },
196 { .pmc_fsmr_bit = AT91_PMC_USBAL },
197 { .pmc_fsmr_bit = AT91_PMC_SDMMC_CD },
198 { .pmc_fsmr_bit = AT91_PMC_RTTAL },
199 { .pmc_fsmr_bit = AT91_PMC_RXLP_MCE },
200 };
201
202 static const struct of_device_id sama5d2_ws_ids[] = {
203 { .compatible = "atmel,sama5d2-gem", .data = &ws_info[0] },
204 { .compatible = "atmel,sama5d2-rtc", .data = &ws_info[1] },
205 { .compatible = "atmel,sama5d3-udc", .data = &ws_info[2] },
206 { .compatible = "atmel,at91rm9200-ohci", .data = &ws_info[2] },
207 { .compatible = "usb-ohci", .data = &ws_info[2] },
208 { .compatible = "atmel,at91sam9g45-ehci", .data = &ws_info[2] },
209 { .compatible = "usb-ehci", .data = &ws_info[2] },
210 { .compatible = "atmel,sama5d2-sdhci", .data = &ws_info[3] },
211 { /* sentinel */ }
212 };
213
214 static const struct of_device_id sam9x60_ws_ids[] = {
215 { .compatible = "microchip,sam9x60-rtc", .data = &ws_info[1] },
216 { .compatible = "atmel,at91rm9200-ohci", .data = &ws_info[2] },
217 { .compatible = "usb-ohci", .data = &ws_info[2] },
218 { .compatible = "atmel,at91sam9g45-ehci", .data = &ws_info[2] },
219 { .compatible = "usb-ehci", .data = &ws_info[2] },
220 { .compatible = "microchip,sam9x60-rtt", .data = &ws_info[4] },
221 { .compatible = "cdns,sam9x60-macb", .data = &ws_info[5] },
222 { /* sentinel */ }
223 };
224
225 static const struct of_device_id sama7g5_ws_ids[] = {
226 { .compatible = "microchip,sama7g5-rtc", .data = &ws_info[1] },
227 { .compatible = "microchip,sama7g5-ohci", .data = &ws_info[2] },
228 { .compatible = "usb-ohci", .data = &ws_info[2] },
229 { .compatible = "atmel,at91sam9g45-ehci", .data = &ws_info[2] },
230 { .compatible = "usb-ehci", .data = &ws_info[2] },
231 { .compatible = "microchip,sama7g5-sdhci", .data = &ws_info[3] },
232 { .compatible = "microchip,sama7g5-rtt", .data = &ws_info[4] },
233 { /* sentinel */ }
234 };
235
236 static const struct of_device_id sam9x7_ws_ids[] = {
237 { .compatible = "microchip,sam9x7-rtc", .data = &ws_info[1] },
238 { .compatible = "microchip,sam9x7-rtt", .data = &ws_info[4] },
239 { .compatible = "microchip,sam9x7-gem", .data = &ws_info[5] },
240 { /* sentinel */ }
241 };
242
at91_pm_config_ws(unsigned int pm_mode,bool set)243 static int at91_pm_config_ws(unsigned int pm_mode, bool set)
244 {
245 const struct wakeup_source_info *wsi;
246 const struct of_device_id *match;
247 struct platform_device *pdev;
248 struct device_node *np;
249 unsigned int mode = 0, polarity = 0, val = 0;
250
251 if (pm_mode != AT91_PM_ULP1)
252 return 0;
253
254 if (!soc_pm.data.pmc || !soc_pm.data.shdwc || !soc_pm.ws_ids)
255 return -EPERM;
256
257 if (!set) {
258 writel(mode, soc_pm.data.pmc + AT91_PMC_FSMR);
259 return 0;
260 }
261
262 if (soc_pm.config_shdwc_ws)
263 soc_pm.config_shdwc_ws(soc_pm.data.shdwc, &mode, &polarity);
264
265 /* SHDWC.MR */
266 val = readl(soc_pm.data.shdwc + 0x04);
267
268 /* Loop through defined wakeup sources. */
269 for_each_matching_node_and_match(np, soc_pm.ws_ids, &match) {
270 pdev = of_find_device_by_node(np);
271 if (!pdev)
272 continue;
273
274 if (device_may_wakeup(&pdev->dev)) {
275 wsi = match->data;
276
277 /* Check if enabled on SHDWC. */
278 if (wsi->shdwc_mr_bit && !(val & wsi->shdwc_mr_bit))
279 goto put_device;
280
281 mode |= wsi->pmc_fsmr_bit;
282 if (wsi->set_polarity)
283 polarity |= wsi->pmc_fsmr_bit;
284 }
285
286 put_device:
287 put_device(&pdev->dev);
288 }
289
290 if (mode) {
291 if (soc_pm.config_pmc_ws)
292 soc_pm.config_pmc_ws(soc_pm.data.pmc, mode, polarity);
293 } else {
294 pr_err("AT91: PM: no ULP1 wakeup sources found!");
295 }
296
297 return mode ? 0 : -EPERM;
298 }
299
at91_sama5d2_config_shdwc_ws(void __iomem * shdwc,u32 * mode,u32 * polarity)300 static int at91_sama5d2_config_shdwc_ws(void __iomem *shdwc, u32 *mode,
301 u32 *polarity)
302 {
303 u32 val;
304
305 /* SHDWC.WUIR */
306 val = readl(shdwc + 0x0c);
307 *mode |= (val & 0x3ff);
308 *polarity |= ((val >> 16) & 0x3ff);
309
310 return 0;
311 }
312
at91_sama5d2_config_pmc_ws(void __iomem * pmc,u32 mode,u32 polarity)313 static int at91_sama5d2_config_pmc_ws(void __iomem *pmc, u32 mode, u32 polarity)
314 {
315 writel(mode, pmc + AT91_PMC_FSMR);
316 writel(polarity, pmc + AT91_PMC_FSPR);
317
318 return 0;
319 }
320
at91_sam9x60_config_pmc_ws(void __iomem * pmc,u32 mode,u32 polarity)321 static int at91_sam9x60_config_pmc_ws(void __iomem *pmc, u32 mode, u32 polarity)
322 {
323 writel(mode, pmc + AT91_PMC_FSMR);
324
325 return 0;
326 }
327
at91_pm_eth_quirk_is_valid(struct at91_pm_quirk_eth * eth)328 static bool at91_pm_eth_quirk_is_valid(struct at91_pm_quirk_eth *eth)
329 {
330 struct platform_device *pdev;
331
332 /* Interface NA in DT. */
333 if (!eth->np)
334 return false;
335
336 /* No quirks for this interface and current suspend mode. */
337 if (!(eth->modes & BIT(soc_pm.data.mode)))
338 return false;
339
340 if (!eth->dev) {
341 /* Driver not probed. */
342 pdev = of_find_device_by_node(eth->np);
343 if (!pdev)
344 return false;
345 /* put_device(eth->dev) is called at the end of suspend. */
346 eth->dev = &pdev->dev;
347 }
348
349 /* No quirks if device isn't a wakeup source. */
350 if (!device_may_wakeup(eth->dev))
351 return false;
352
353 return true;
354 }
355
at91_pm_config_quirks(bool suspend)356 static int at91_pm_config_quirks(bool suspend)
357 {
358 struct at91_pm_quirk_eth *eth;
359 int i, j, ret, tmp;
360
361 /*
362 * Ethernet IPs who's device_node pointers are stored into
363 * soc_pm.quirks.eth[].np cannot handle WoL packets while in ULP0, ULP1
364 * or both due to a hardware bug. If they receive WoL packets while in
365 * ULP0 or ULP1 IPs could stop working or the whole system could stop
366 * working. We cannot handle this scenario in the ethernet driver itself
367 * as the driver is common to multiple vendors and also we only know
368 * here, in this file, if we suspend to ULP0 or ULP1 mode. Thus handle
369 * these scenarios here, as quirks.
370 */
371 for (i = 0; i < AT91_PM_MAX_ETH; i++) {
372 eth = &soc_pm.quirks.eth[i];
373
374 if (!at91_pm_eth_quirk_is_valid(eth))
375 continue;
376
377 /*
378 * For modes in dns_modes mask the system blocks if quirk is not
379 * applied but if applied the interface doesn't act at WoL
380 * events. Thus take care to avoid suspending if this interface
381 * is the only configured wakeup source.
382 */
383 if (suspend && eth->dns_modes & BIT(soc_pm.data.mode)) {
384 int ws_count = 0;
385 #ifdef CONFIG_PM_SLEEP
386 struct wakeup_source *ws;
387
388 for_each_wakeup_source(ws) {
389 if (ws->dev == eth->dev)
390 continue;
391
392 ws_count++;
393 break;
394 }
395 #endif
396
397 /*
398 * Checking !ws is good for all platforms with issues
399 * even when both G_ETH and E_ETH are available as dns_modes
400 * is populated only on G_ETH interface.
401 */
402 if (!ws_count) {
403 pr_err("AT91: PM: Ethernet cannot resume from WoL!");
404 ret = -EPERM;
405 put_device(eth->dev);
406 eth->dev = NULL;
407 /* No need to revert clock settings for this eth. */
408 i--;
409 goto clk_unconfigure;
410 }
411 }
412
413 if (suspend) {
414 clk_bulk_disable_unprepare(AT91_PM_ETH_MAX_CLK, eth->clks);
415 } else {
416 ret = clk_bulk_prepare_enable(AT91_PM_ETH_MAX_CLK,
417 eth->clks);
418 if (ret)
419 goto clk_unconfigure;
420 /*
421 * Release the reference to eth->dev taken in
422 * at91_pm_eth_quirk_is_valid().
423 */
424 put_device(eth->dev);
425 eth->dev = NULL;
426 }
427 }
428
429 return 0;
430
431 clk_unconfigure:
432 /*
433 * In case of resume we reach this point if clk_prepare_enable() failed.
434 * we don't want to revert the previous clk_prepare_enable() for the
435 * other IP.
436 */
437 for (j = i; j >= 0; j--) {
438 eth = &soc_pm.quirks.eth[j];
439 if (suspend) {
440 if (!at91_pm_eth_quirk_is_valid(eth))
441 continue;
442
443 tmp = clk_bulk_prepare_enable(AT91_PM_ETH_MAX_CLK, eth->clks);
444 if (tmp) {
445 pr_err("AT91: PM: failed to enable %s clocks\n",
446 j == AT91_PM_G_ETH ? "geth" : "eth");
447 }
448 }
449
450 /*
451 * Release the reference to eth->dev taken in
452 * at91_pm_eth_quirk_is_valid().
453 */
454 put_device(eth->dev);
455 eth->dev = NULL;
456 }
457
458 return ret;
459 }
460
461 /*
462 * Called after processes are frozen, but before we shutdown devices.
463 */
at91_pm_begin(suspend_state_t state)464 static int at91_pm_begin(suspend_state_t state)
465 {
466 int ret;
467
468 switch (state) {
469 case PM_SUSPEND_MEM:
470 soc_pm.data.mode = soc_pm.data.suspend_mode;
471 break;
472
473 case PM_SUSPEND_STANDBY:
474 soc_pm.data.mode = soc_pm.data.standby_mode;
475 break;
476
477 default:
478 soc_pm.data.mode = -1;
479 }
480
481 ret = at91_pm_config_ws(soc_pm.data.mode, true);
482 if (ret)
483 return ret;
484
485 if (soc_pm.data.mode == AT91_PM_BACKUP)
486 soc_pm.bu->suspended = 1;
487 else if (soc_pm.bu)
488 soc_pm.bu->suspended = 0;
489
490 return 0;
491 }
492
493 /*
494 * Verify that all the clocks are correct before entering
495 * slow-clock mode.
496 */
at91_pm_verify_clocks(void)497 static int at91_pm_verify_clocks(void)
498 {
499 unsigned long scsr;
500 int i;
501
502 scsr = readl(soc_pm.data.pmc + AT91_PMC_SCSR);
503
504 /* USB must not be using PLLB */
505 if ((scsr & soc_pm.data.uhp_udp_mask) != 0) {
506 pr_err("AT91: PM - Suspend-to-RAM with USB still active\n");
507 return 0;
508 }
509
510 /* PCK0..PCK3 must be disabled, or configured to use clk32k */
511 for (i = 0; i < 4; i++) {
512 u32 css;
513
514 if ((scsr & (AT91_PMC_PCK0 << i)) == 0)
515 continue;
516 css = readl(soc_pm.data.pmc + AT91_PMC_PCKR(i)) & AT91_PMC_CSS;
517 if (css != AT91_PMC_CSS_SLOW) {
518 pr_err("AT91: PM - Suspend-to-RAM with PCK%d src %d\n", i, css);
519 return 0;
520 }
521 }
522
523 return 1;
524 }
525
526 /*
527 * Call this from platform driver suspend() to see how deeply to suspend.
528 * For example, some controllers (like OHCI) need one of the PLL clocks
529 * in order to act as a wakeup source, and those are not available when
530 * going into slow clock mode.
531 *
532 * REVISIT: generalize as clk_will_be_available(clk)? Other platforms have
533 * the very same problem (but not using at91 main_clk), and it'd be better
534 * to add one generic API rather than lots of platform-specific ones.
535 */
at91_suspend_entering_slow_clock(void)536 int at91_suspend_entering_slow_clock(void)
537 {
538 return (soc_pm.data.mode >= AT91_PM_ULP0);
539 }
540 EXPORT_SYMBOL(at91_suspend_entering_slow_clock);
541
542 static void (*at91_suspend_sram_fn)(struct at91_pm_data *);
543 extern void at91_pm_suspend_in_sram(struct at91_pm_data *pm_data);
544 extern u32 at91_pm_suspend_in_sram_sz;
545
at91_suspend_finish(unsigned long val)546 static int at91_suspend_finish(unsigned long val)
547 {
548 unsigned char modified_gray_code[] = {
549 0x00, 0x01, 0x02, 0x03, 0x06, 0x07, 0x04, 0x05, 0x0c, 0x0d,
550 0x0e, 0x0f, 0x0a, 0x0b, 0x08, 0x09, 0x18, 0x19, 0x1a, 0x1b,
551 0x1e, 0x1f, 0x1c, 0x1d, 0x14, 0x15, 0x16, 0x17, 0x12, 0x13,
552 0x10, 0x11,
553 };
554 unsigned int tmp, index;
555 int i;
556
557 if (soc_pm.data.mode == AT91_PM_BACKUP && soc_pm.data.ramc_phy) {
558 /*
559 * Bootloader will perform DDR recalibration and will try to
560 * restore the ZQ0SR0 with the value saved here. But the
561 * calibration is buggy and restoring some values from ZQ0SR0
562 * is forbidden and risky thus we need to provide processed
563 * values for these (modified gray code values).
564 */
565 tmp = readl(soc_pm.data.ramc_phy + DDR3PHY_ZQ0SR0);
566
567 /* Store pull-down output impedance select. */
568 index = (tmp >> DDR3PHY_ZQ0SR0_PDO_OFF) & 0x1f;
569 soc_pm.bu->ddr_phy_calibration[0] = modified_gray_code[index];
570
571 /* Store pull-up output impedance select. */
572 index = (tmp >> DDR3PHY_ZQ0SR0_PUO_OFF) & 0x1f;
573 soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
574
575 /* Store pull-down on-die termination impedance select. */
576 index = (tmp >> DDR3PHY_ZQ0SR0_PDODT_OFF) & 0x1f;
577 soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
578
579 /* Store pull-up on-die termination impedance select. */
580 index = (tmp >> DDR3PHY_ZQ0SRO_PUODT_OFF) & 0x1f;
581 soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
582
583 /*
584 * The 1st 8 words of memory might get corrupted in the process
585 * of DDR PHY recalibration; it is saved here in securam and it
586 * will be restored later, after recalibration, by bootloader
587 */
588 for (i = 1; i < BACKUP_DDR_PHY_CALIBRATION; i++)
589 soc_pm.bu->ddr_phy_calibration[i] =
590 *((unsigned int *)soc_pm.memcs + (i - 1));
591 }
592
593 flush_cache_all();
594 outer_disable();
595
596 at91_suspend_sram_fn(&soc_pm.data);
597
598 return 0;
599 }
600
at91_pm_switch_ba_to_vbat(void)601 static void at91_pm_switch_ba_to_vbat(void)
602 {
603 unsigned int offset = offsetof(struct at91_pm_sfrbu_regs, pswbu);
604 unsigned int val;
605
606 /* Just for safety. */
607 if (!soc_pm.data.sfrbu)
608 return;
609
610 val = readl(soc_pm.data.sfrbu + offset);
611
612 /* Already on VBAT. */
613 if (!(val & soc_pm.sfrbu_regs.pswbu.state))
614 return;
615
616 val &= ~soc_pm.sfrbu_regs.pswbu.softsw;
617 val |= soc_pm.sfrbu_regs.pswbu.key | soc_pm.sfrbu_regs.pswbu.ctrl;
618 writel(val, soc_pm.data.sfrbu + offset);
619
620 /* Wait for update. */
621 val = readl(soc_pm.data.sfrbu + offset);
622 while (val & soc_pm.sfrbu_regs.pswbu.state)
623 val = readl(soc_pm.data.sfrbu + offset);
624 }
625
at91_pm_suspend(suspend_state_t state)626 static void at91_pm_suspend(suspend_state_t state)
627 {
628 if (soc_pm.data.mode == AT91_PM_BACKUP) {
629 at91_pm_switch_ba_to_vbat();
630
631 cpu_suspend(0, at91_suspend_finish);
632
633 /* The SRAM is lost between suspend cycles */
634 at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn,
635 &at91_pm_suspend_in_sram,
636 at91_pm_suspend_in_sram_sz);
637 } else {
638 at91_suspend_finish(0);
639 }
640
641 outer_resume();
642 }
643
644 /*
645 * STANDBY mode has *all* drivers suspended; ignores irqs not marked as 'wakeup'
646 * event sources; and reduces DRAM power. But otherwise it's identical to
647 * PM_SUSPEND_ON: cpu idle, and nothing fancy done with main or cpu clocks.
648 *
649 * AT91_PM_ULP0 is like STANDBY plus slow clock mode, so drivers must
650 * suspend more deeply, the master clock switches to the clk32k and turns off
651 * the main oscillator
652 *
653 * AT91_PM_BACKUP turns off the whole SoC after placing the DDR in self refresh
654 */
at91_pm_enter(suspend_state_t state)655 static int at91_pm_enter(suspend_state_t state)
656 {
657 int ret;
658
659 ret = at91_pm_config_quirks(true);
660 if (ret)
661 return ret;
662
663 switch (state) {
664 case PM_SUSPEND_MEM:
665 case PM_SUSPEND_STANDBY:
666 /*
667 * Ensure that clocks are in a valid state.
668 */
669 if (soc_pm.data.mode >= AT91_PM_ULP0 &&
670 !at91_pm_verify_clocks())
671 goto error;
672
673 at91_pm_suspend(state);
674
675 break;
676
677 case PM_SUSPEND_ON:
678 cpu_do_idle();
679 break;
680
681 default:
682 pr_debug("AT91: PM - bogus suspend state %d\n", state);
683 goto error;
684 }
685
686 error:
687 at91_pm_config_quirks(false);
688 return 0;
689 }
690
691 /*
692 * Called right prior to thawing processes.
693 */
at91_pm_end(void)694 static void at91_pm_end(void)
695 {
696 at91_pm_config_ws(soc_pm.data.mode, false);
697 }
698
699
700 static const struct platform_suspend_ops at91_pm_ops = {
701 .valid = at91_pm_valid_state,
702 .begin = at91_pm_begin,
703 .enter = at91_pm_enter,
704 .end = at91_pm_end,
705 };
706
707 static struct platform_device at91_cpuidle_device = {
708 .name = "cpuidle-at91",
709 };
710
711 /*
712 * The AT91RM9200 goes into self-refresh mode with this command, and will
713 * terminate self-refresh automatically on the next SDRAM access.
714 *
715 * Self-refresh mode is exited as soon as a memory access is made, but we don't
716 * know for sure when that happens. However, we need to restore the low-power
717 * mode if it was enabled before going idle. Restoring low-power mode while
718 * still in self-refresh is "not recommended", but seems to work.
719 */
at91rm9200_standby(void)720 static void at91rm9200_standby(void)
721 {
722 asm volatile(
723 "b 1f\n\t"
724 ".align 5\n\t"
725 "1: mcr p15, 0, %0, c7, c10, 4\n\t"
726 " str %2, [%1, %3]\n\t"
727 " mcr p15, 0, %0, c7, c0, 4\n\t"
728 :
729 : "r" (0), "r" (soc_pm.data.ramc[0]),
730 "r" (1), "r" (AT91_MC_SDRAMC_SRR));
731 }
732
733 /* We manage both DDRAM/SDRAM controllers, we need more than one value to
734 * remember.
735 */
at91_ddr_standby(void)736 static void at91_ddr_standby(void)
737 {
738 /* Those two values allow us to delay self-refresh activation
739 * to the maximum. */
740 u32 lpr0, lpr1 = 0;
741 u32 mdr, saved_mdr0, saved_mdr1 = 0;
742 u32 saved_lpr0, saved_lpr1 = 0;
743
744 /* LPDDR1 --> force DDR2 mode during self-refresh */
745 saved_mdr0 = at91_ramc_read(0, AT91_DDRSDRC_MDR);
746 if ((saved_mdr0 & AT91_DDRSDRC_MD) == AT91_DDRSDRC_MD_LOW_POWER_DDR) {
747 mdr = saved_mdr0 & ~AT91_DDRSDRC_MD;
748 mdr |= AT91_DDRSDRC_MD_DDR2;
749 at91_ramc_write(0, AT91_DDRSDRC_MDR, mdr);
750 }
751
752 if (soc_pm.data.ramc[1]) {
753 saved_lpr1 = at91_ramc_read(1, AT91_DDRSDRC_LPR);
754 lpr1 = saved_lpr1 & ~AT91_DDRSDRC_LPCB;
755 lpr1 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
756 saved_mdr1 = at91_ramc_read(1, AT91_DDRSDRC_MDR);
757 if ((saved_mdr1 & AT91_DDRSDRC_MD) == AT91_DDRSDRC_MD_LOW_POWER_DDR) {
758 mdr = saved_mdr1 & ~AT91_DDRSDRC_MD;
759 mdr |= AT91_DDRSDRC_MD_DDR2;
760 at91_ramc_write(1, AT91_DDRSDRC_MDR, mdr);
761 }
762 }
763
764 saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
765 lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
766 lpr0 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
767
768 /* self-refresh mode now */
769 at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
770 if (soc_pm.data.ramc[1])
771 at91_ramc_write(1, AT91_DDRSDRC_LPR, lpr1);
772
773 cpu_do_idle();
774
775 at91_ramc_write(0, AT91_DDRSDRC_MDR, saved_mdr0);
776 at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
777 if (soc_pm.data.ramc[1]) {
778 at91_ramc_write(0, AT91_DDRSDRC_MDR, saved_mdr1);
779 at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
780 }
781 }
782
sama5d3_ddr_standby(void)783 static void sama5d3_ddr_standby(void)
784 {
785 u32 lpr0;
786 u32 saved_lpr0;
787
788 saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
789 lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
790 lpr0 |= AT91_DDRSDRC_LPCB_POWER_DOWN;
791
792 at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
793
794 cpu_do_idle();
795
796 at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
797 }
798
799 /* We manage both DDRAM/SDRAM controllers, we need more than one value to
800 * remember.
801 */
at91sam9_sdram_standby(void)802 static void at91sam9_sdram_standby(void)
803 {
804 u32 lpr0, lpr1 = 0;
805 u32 saved_lpr0, saved_lpr1 = 0;
806
807 if (soc_pm.data.ramc[1]) {
808 saved_lpr1 = at91_ramc_read(1, AT91_SDRAMC_LPR);
809 lpr1 = saved_lpr1 & ~AT91_SDRAMC_LPCB;
810 lpr1 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
811 }
812
813 saved_lpr0 = at91_ramc_read(0, AT91_SDRAMC_LPR);
814 lpr0 = saved_lpr0 & ~AT91_SDRAMC_LPCB;
815 lpr0 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
816
817 /* self-refresh mode now */
818 at91_ramc_write(0, AT91_SDRAMC_LPR, lpr0);
819 if (soc_pm.data.ramc[1])
820 at91_ramc_write(1, AT91_SDRAMC_LPR, lpr1);
821
822 cpu_do_idle();
823
824 at91_ramc_write(0, AT91_SDRAMC_LPR, saved_lpr0);
825 if (soc_pm.data.ramc[1])
826 at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1);
827 }
828
sama7g5_standby(void)829 static void sama7g5_standby(void)
830 {
831 int pwrtmg, ratio;
832
833 pwrtmg = readl(soc_pm.data.ramc[0] + UDDRC_PWRCTL);
834 ratio = readl(soc_pm.data.pmc + AT91_PMC_RATIO);
835
836 /*
837 * Place RAM into self-refresh after a maximum idle clocks. The maximum
838 * idle clocks is configured by bootloader in
839 * UDDRC_PWRMGT.SELFREF_TO_X32.
840 */
841 writel(pwrtmg | UDDRC_PWRCTL_SELFREF_EN,
842 soc_pm.data.ramc[0] + UDDRC_PWRCTL);
843 /* Divide CPU clock by 16. */
844 writel(ratio & ~AT91_PMC_RATIO_RATIO, soc_pm.data.pmc + AT91_PMC_RATIO);
845
846 cpu_do_idle();
847
848 /* Restore previous configuration. */
849 writel(ratio, soc_pm.data.pmc + AT91_PMC_RATIO);
850 writel(pwrtmg, soc_pm.data.ramc[0] + UDDRC_PWRCTL);
851 }
852
853 struct ramc_info {
854 void (*idle)(void);
855 unsigned int memctrl;
856 };
857
858 static const struct ramc_info ramc_infos[] __initconst = {
859 { .idle = at91rm9200_standby, .memctrl = AT91_MEMCTRL_MC},
860 { .idle = at91sam9_sdram_standby, .memctrl = AT91_MEMCTRL_SDRAMC},
861 { .idle = at91_ddr_standby, .memctrl = AT91_MEMCTRL_DDRSDR},
862 { .idle = sama5d3_ddr_standby, .memctrl = AT91_MEMCTRL_DDRSDR},
863 { .idle = sama7g5_standby, },
864 };
865
866 static const struct of_device_id ramc_ids[] __initconst = {
867 { .compatible = "atmel,at91rm9200-sdramc", .data = &ramc_infos[0] },
868 { .compatible = "atmel,at91sam9260-sdramc", .data = &ramc_infos[1] },
869 { .compatible = "atmel,at91sam9g45-ddramc", .data = &ramc_infos[2] },
870 { .compatible = "atmel,sama5d3-ddramc", .data = &ramc_infos[3] },
871 { .compatible = "microchip,sama7g5-uddrc", .data = &ramc_infos[4], },
872 { /*sentinel*/ }
873 };
874
875 static const struct of_device_id ramc_phy_ids[] __initconst = {
876 { .compatible = "microchip,sama7g5-ddr3phy", },
877 { /* Sentinel. */ },
878 };
879
at91_dt_ramc(bool phy_mandatory)880 static __init int at91_dt_ramc(bool phy_mandatory)
881 {
882 struct device_node *np;
883 const struct of_device_id *of_id;
884 int idx = 0;
885 void *standby = NULL;
886 const struct ramc_info *ramc;
887 int ret;
888
889 for_each_matching_node_and_match(np, ramc_ids, &of_id) {
890 soc_pm.data.ramc[idx] = of_iomap(np, 0);
891 if (!soc_pm.data.ramc[idx]) {
892 pr_err("unable to map ramc[%d] cpu registers\n", idx);
893 ret = -ENOMEM;
894 of_node_put(np);
895 goto unmap_ramc;
896 }
897
898 ramc = of_id->data;
899 if (ramc) {
900 if (!standby)
901 standby = ramc->idle;
902 soc_pm.data.memctrl = ramc->memctrl;
903 }
904
905 idx++;
906 }
907
908 if (!idx) {
909 pr_err("unable to find compatible ram controller node in dtb\n");
910 ret = -ENODEV;
911 goto unmap_ramc;
912 }
913
914 /* Lookup for DDR PHY node, if any. */
915 for_each_matching_node_and_match(np, ramc_phy_ids, &of_id) {
916 soc_pm.data.ramc_phy = of_iomap(np, 0);
917 if (!soc_pm.data.ramc_phy) {
918 pr_err("unable to map ramc phy cpu registers\n");
919 ret = -ENOMEM;
920 of_node_put(np);
921 goto unmap_ramc;
922 }
923 }
924
925 if (phy_mandatory && !soc_pm.data.ramc_phy) {
926 pr_err("DDR PHY is mandatory!\n");
927 ret = -ENODEV;
928 goto unmap_ramc;
929 }
930
931 if (!standby) {
932 pr_warn("ramc no standby function available\n");
933 return 0;
934 }
935
936 at91_cpuidle_device.dev.platform_data = standby;
937
938 return 0;
939
940 unmap_ramc:
941 while (idx)
942 iounmap(soc_pm.data.ramc[--idx]);
943
944 return ret;
945 }
946
at91rm9200_idle(void)947 static void at91rm9200_idle(void)
948 {
949 /*
950 * Disable the processor clock. The processor will be automatically
951 * re-enabled by an interrupt or by a reset.
952 */
953 writel(AT91_PMC_PCK, soc_pm.data.pmc + AT91_PMC_SCDR);
954 }
955
at91sam9_idle(void)956 static void at91sam9_idle(void)
957 {
958 writel(AT91_PMC_PCK, soc_pm.data.pmc + AT91_PMC_SCDR);
959 cpu_do_idle();
960 }
961
at91_pm_sram_init(void)962 static void __init at91_pm_sram_init(void)
963 {
964 struct gen_pool *sram_pool;
965 phys_addr_t sram_pbase;
966 unsigned long sram_base;
967 struct device_node *node;
968 struct platform_device *pdev = NULL;
969
970 for_each_compatible_node(node, NULL, "mmio-sram") {
971 pdev = of_find_device_by_node(node);
972 if (pdev) {
973 of_node_put(node);
974 break;
975 }
976 }
977
978 if (!pdev) {
979 pr_warn("%s: failed to find sram device!\n", __func__);
980 return;
981 }
982
983 sram_pool = gen_pool_get(&pdev->dev, NULL);
984 if (!sram_pool) {
985 pr_warn("%s: sram pool unavailable!\n", __func__);
986 goto out_put_device;
987 }
988
989 sram_base = gen_pool_alloc(sram_pool, at91_pm_suspend_in_sram_sz);
990 if (!sram_base) {
991 pr_warn("%s: unable to alloc sram!\n", __func__);
992 goto out_put_device;
993 }
994
995 sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base);
996 at91_suspend_sram_fn = __arm_ioremap_exec(sram_pbase,
997 at91_pm_suspend_in_sram_sz, false);
998 if (!at91_suspend_sram_fn) {
999 pr_warn("SRAM: Could not map\n");
1000 goto out_put_device;
1001 }
1002
1003 /* Copy the pm suspend handler to SRAM */
1004 at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn,
1005 &at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz);
1006 return;
1007
1008 out_put_device:
1009 put_device(&pdev->dev);
1010 return;
1011 }
1012
at91_is_pm_mode_active(int pm_mode)1013 static bool __init at91_is_pm_mode_active(int pm_mode)
1014 {
1015 return (soc_pm.data.standby_mode == pm_mode ||
1016 soc_pm.data.suspend_mode == pm_mode);
1017 }
1018
at91_pm_backup_scan_memcs(unsigned long node,const char * uname,int depth,void * data)1019 static int __init at91_pm_backup_scan_memcs(unsigned long node,
1020 const char *uname, int depth,
1021 void *data)
1022 {
1023 const char *type;
1024 const __be32 *reg;
1025 int *located = data;
1026 int size;
1027
1028 /* Memory node already located. */
1029 if (*located)
1030 return 0;
1031
1032 type = of_get_flat_dt_prop(node, "device_type", NULL);
1033
1034 /* We are scanning "memory" nodes only. */
1035 if (!type || strcmp(type, "memory"))
1036 return 0;
1037
1038 reg = of_get_flat_dt_prop(node, "reg", &size);
1039 if (reg) {
1040 soc_pm.memcs = __va((phys_addr_t)be32_to_cpu(*reg));
1041 *located = 1;
1042 }
1043
1044 return 0;
1045 }
1046
at91_pm_backup_init(void)1047 static int __init at91_pm_backup_init(void)
1048 {
1049 struct gen_pool *sram_pool;
1050 struct device_node *np;
1051 struct platform_device *pdev;
1052 int ret = -ENODEV, located = 0;
1053
1054 if (!IS_ENABLED(CONFIG_SOC_SAMA5D2) &&
1055 !IS_ENABLED(CONFIG_SOC_SAMA7G5))
1056 return -EPERM;
1057
1058 if (!at91_is_pm_mode_active(AT91_PM_BACKUP))
1059 return 0;
1060
1061 np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam");
1062 if (!np)
1063 return ret;
1064
1065 pdev = of_find_device_by_node(np);
1066 of_node_put(np);
1067 if (!pdev) {
1068 pr_warn("%s: failed to find securam device!\n", __func__);
1069 return ret;
1070 }
1071
1072 sram_pool = gen_pool_get(&pdev->dev, NULL);
1073 if (!sram_pool) {
1074 pr_warn("%s: securam pool unavailable!\n", __func__);
1075 goto securam_fail;
1076 }
1077
1078 soc_pm.bu = (void *)gen_pool_alloc(sram_pool, sizeof(struct at91_pm_bu));
1079 if (!soc_pm.bu) {
1080 pr_warn("%s: unable to alloc securam!\n", __func__);
1081 ret = -ENOMEM;
1082 goto securam_fail;
1083 }
1084
1085 soc_pm.bu->suspended = 0;
1086 soc_pm.bu->canary = __pa_symbol(&canary);
1087 soc_pm.bu->resume = __pa_symbol(cpu_resume);
1088 if (soc_pm.data.ramc_phy) {
1089 of_scan_flat_dt(at91_pm_backup_scan_memcs, &located);
1090 if (!located)
1091 goto securam_fail;
1092 }
1093
1094 return 0;
1095
1096 securam_fail:
1097 put_device(&pdev->dev);
1098 return ret;
1099 }
1100
at91_pm_secure_init(void)1101 static void __init at91_pm_secure_init(void)
1102 {
1103 int suspend_mode;
1104 struct arm_smccc_res res;
1105
1106 suspend_mode = soc_pm.data.suspend_mode;
1107
1108 res = sam_smccc_call(SAMA5_SMC_SIP_SET_SUSPEND_MODE,
1109 suspend_mode, 0);
1110 if (res.a0 == 0) {
1111 pr_info("AT91: Secure PM: suspend mode set to %s\n",
1112 pm_modes[suspend_mode].pattern);
1113 soc_pm.data.mode = suspend_mode;
1114 return;
1115 }
1116
1117 pr_warn("AT91: Secure PM: %s mode not supported !\n",
1118 pm_modes[suspend_mode].pattern);
1119
1120 res = sam_smccc_call(SAMA5_SMC_SIP_GET_SUSPEND_MODE, 0, 0);
1121 if (res.a0 == 0) {
1122 pr_warn("AT91: Secure PM: failed to get default mode\n");
1123 soc_pm.data.mode = -1;
1124 return;
1125 }
1126
1127 pr_info("AT91: Secure PM: using default suspend mode %s\n",
1128 pm_modes[suspend_mode].pattern);
1129
1130 soc_pm.data.suspend_mode = res.a1;
1131 soc_pm.data.mode = soc_pm.data.suspend_mode;
1132 }
1133 static const struct of_device_id atmel_shdwc_ids[] = {
1134 { .compatible = "atmel,sama5d2-shdwc" },
1135 { .compatible = "microchip,sam9x60-shdwc" },
1136 { .compatible = "microchip,sama7g5-shdwc" },
1137 { /* sentinel. */ }
1138 };
1139
1140 static const struct of_device_id gmac_ids[] __initconst = {
1141 { .compatible = "atmel,sama5d3-gem" },
1142 { .compatible = "atmel,sama5d2-gem" },
1143 { .compatible = "atmel,sama5d29-gem" },
1144 { .compatible = "microchip,sama7g5-gem" },
1145 { },
1146 };
1147
1148 static const struct of_device_id emac_ids[] __initconst = {
1149 { .compatible = "atmel,sama5d3-macb" },
1150 { .compatible = "microchip,sama7g5-emac" },
1151 { },
1152 };
1153
1154 /*
1155 * Replaces _mode_to_replace with a supported mode that doesn't depend
1156 * on controller pointed by _map_bitmask
1157 * @_maps: u32 array containing AT91_PM_IOMAP() flags and indexed by AT91
1158 * PM mode
1159 * @_map_bitmask: AT91_PM_IOMAP() bitmask; if _mode_to_replace depends on
1160 * controller represented by _map_bitmask, _mode_to_replace needs to be
1161 * updated
1162 * @_mode_to_replace: standby_mode or suspend_mode that need to be
1163 * updated
1164 * @_mode_to_check: standby_mode or suspend_mode; this is needed here
1165 * to avoid having standby_mode and suspend_mode set with the same AT91
1166 * PM mode
1167 */
1168 #define AT91_PM_REPLACE_MODE(_maps, _map_bitmask, _mode_to_replace, \
1169 _mode_to_check) \
1170 do { \
1171 if (((_maps)[(_mode_to_replace)]) & (_map_bitmask)) { \
1172 int _mode_to_use, _mode_complementary; \
1173 /* Use ULP0 if it doesn't need _map_bitmask. */ \
1174 if (!((_maps)[AT91_PM_ULP0] & (_map_bitmask))) {\
1175 _mode_to_use = AT91_PM_ULP0; \
1176 _mode_complementary = AT91_PM_STANDBY; \
1177 } else { \
1178 _mode_to_use = AT91_PM_STANDBY; \
1179 _mode_complementary = AT91_PM_STANDBY; \
1180 } \
1181 \
1182 if ((_mode_to_check) != _mode_to_use) \
1183 (_mode_to_replace) = _mode_to_use; \
1184 else \
1185 (_mode_to_replace) = _mode_complementary;\
1186 } \
1187 } while (0)
1188
1189 /*
1190 * Replaces standby and suspend modes with default supported modes:
1191 * ULP0 and STANDBY.
1192 * @_maps: u32 array indexed by AT91 PM mode containing AT91_PM_IOMAP()
1193 * flags
1194 * @_map: controller specific name; standby and suspend mode need to be
1195 * replaced in order to not depend on this controller
1196 */
1197 #define AT91_PM_REPLACE_MODES(_maps, _map) \
1198 do { \
1199 AT91_PM_REPLACE_MODE((_maps), BIT(AT91_PM_IOMAP_##_map),\
1200 (soc_pm.data.standby_mode), \
1201 (soc_pm.data.suspend_mode)); \
1202 AT91_PM_REPLACE_MODE((_maps), BIT(AT91_PM_IOMAP_##_map),\
1203 (soc_pm.data.suspend_mode), \
1204 (soc_pm.data.standby_mode)); \
1205 } while (0)
1206
at91_pm_get_eth_clks(struct device_node * np,struct clk_bulk_data * clks)1207 static int __init at91_pm_get_eth_clks(struct device_node *np,
1208 struct clk_bulk_data *clks)
1209 {
1210 clks[AT91_PM_ETH_PCLK].clk = of_clk_get_by_name(np, "pclk");
1211 if (IS_ERR(clks[AT91_PM_ETH_PCLK].clk))
1212 return PTR_ERR(clks[AT91_PM_ETH_PCLK].clk);
1213
1214 clks[AT91_PM_ETH_HCLK].clk = of_clk_get_by_name(np, "hclk");
1215 if (IS_ERR(clks[AT91_PM_ETH_HCLK].clk))
1216 return PTR_ERR(clks[AT91_PM_ETH_HCLK].clk);
1217
1218 return 0;
1219 }
1220
at91_pm_eth_clks_empty(struct clk_bulk_data * clks)1221 static int __init at91_pm_eth_clks_empty(struct clk_bulk_data *clks)
1222 {
1223 return IS_ERR(clks[AT91_PM_ETH_PCLK].clk) ||
1224 IS_ERR(clks[AT91_PM_ETH_HCLK].clk);
1225 }
1226
at91_pm_modes_init(const u32 * maps,int len)1227 static void __init at91_pm_modes_init(const u32 *maps, int len)
1228 {
1229 struct at91_pm_quirk_eth *gmac = &soc_pm.quirks.eth[AT91_PM_G_ETH];
1230 struct at91_pm_quirk_eth *emac = &soc_pm.quirks.eth[AT91_PM_E_ETH];
1231 struct device_node *np;
1232 int ret;
1233
1234 ret = at91_pm_backup_init();
1235 if (ret) {
1236 if (soc_pm.data.standby_mode == AT91_PM_BACKUP)
1237 soc_pm.data.standby_mode = AT91_PM_ULP0;
1238 if (soc_pm.data.suspend_mode == AT91_PM_BACKUP)
1239 soc_pm.data.suspend_mode = AT91_PM_ULP0;
1240 }
1241
1242 if (maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SHDWC) ||
1243 maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SHDWC)) {
1244 np = of_find_matching_node(NULL, atmel_shdwc_ids);
1245 if (!np) {
1246 pr_warn("%s: failed to find shdwc!\n", __func__);
1247 AT91_PM_REPLACE_MODES(maps, SHDWC);
1248 } else {
1249 soc_pm.data.shdwc = of_iomap(np, 0);
1250 of_node_put(np);
1251 }
1252 }
1253
1254 if (maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SFRBU) ||
1255 maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SFRBU)) {
1256 np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-sfrbu");
1257 if (!np) {
1258 pr_warn("%s: failed to find sfrbu!\n", __func__);
1259 AT91_PM_REPLACE_MODES(maps, SFRBU);
1260 } else {
1261 soc_pm.data.sfrbu = of_iomap(np, 0);
1262 of_node_put(np);
1263 }
1264 }
1265
1266 if ((at91_is_pm_mode_active(AT91_PM_ULP1) ||
1267 at91_is_pm_mode_active(AT91_PM_ULP0) ||
1268 at91_is_pm_mode_active(AT91_PM_ULP0_FAST)) &&
1269 (maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(ETHC) ||
1270 maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(ETHC))) {
1271 np = of_find_matching_node(NULL, gmac_ids);
1272 if (!np) {
1273 np = of_find_matching_node(NULL, emac_ids);
1274 if (np)
1275 goto get_emac_clks;
1276 AT91_PM_REPLACE_MODES(maps, ETHC);
1277 goto unmap_unused_nodes;
1278 } else {
1279 gmac->np = np;
1280 at91_pm_get_eth_clks(np, gmac->clks);
1281 }
1282
1283 np = of_find_matching_node(NULL, emac_ids);
1284 if (!np) {
1285 if (at91_pm_eth_clks_empty(gmac->clks))
1286 AT91_PM_REPLACE_MODES(maps, ETHC);
1287 } else {
1288 get_emac_clks:
1289 emac->np = np;
1290 ret = at91_pm_get_eth_clks(np, emac->clks);
1291 if (ret && at91_pm_eth_clks_empty(gmac->clks)) {
1292 of_node_put(gmac->np);
1293 of_node_put(emac->np);
1294 gmac->np = NULL;
1295 emac->np = NULL;
1296 }
1297 }
1298 }
1299
1300 unmap_unused_nodes:
1301 /* Unmap all unnecessary. */
1302 if (soc_pm.data.shdwc &&
1303 !(maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SHDWC) ||
1304 maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SHDWC))) {
1305 iounmap(soc_pm.data.shdwc);
1306 soc_pm.data.shdwc = NULL;
1307 }
1308
1309 if (soc_pm.data.sfrbu &&
1310 !(maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SFRBU) ||
1311 maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SFRBU))) {
1312 iounmap(soc_pm.data.sfrbu);
1313 soc_pm.data.sfrbu = NULL;
1314 }
1315
1316 return;
1317 }
1318
1319 struct pmc_info {
1320 unsigned long uhp_udp_mask;
1321 unsigned long mckr;
1322 unsigned long version;
1323 };
1324
1325 static const struct pmc_info pmc_infos[] __initconst = {
1326 {
1327 .uhp_udp_mask = AT91RM9200_PMC_UHP | AT91RM9200_PMC_UDP,
1328 .mckr = 0x30,
1329 .version = AT91_PMC_V1,
1330 },
1331
1332 {
1333 .uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP,
1334 .mckr = 0x30,
1335 .version = AT91_PMC_V1,
1336 },
1337 {
1338 .uhp_udp_mask = AT91SAM926x_PMC_UHP,
1339 .mckr = 0x30,
1340 .version = AT91_PMC_V1,
1341 },
1342 { .uhp_udp_mask = 0,
1343 .mckr = 0x30,
1344 .version = AT91_PMC_V1,
1345 },
1346 {
1347 .uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP,
1348 .mckr = 0x28,
1349 .version = AT91_PMC_V2,
1350 },
1351 {
1352 .mckr = 0x28,
1353 .version = AT91_PMC_V2,
1354 },
1355
1356 };
1357
1358 static const struct of_device_id atmel_pmc_ids[] __initconst = {
1359 { .compatible = "atmel,at91rm9200-pmc", .data = &pmc_infos[0] },
1360 { .compatible = "atmel,at91sam9260-pmc", .data = &pmc_infos[1] },
1361 { .compatible = "atmel,at91sam9261-pmc", .data = &pmc_infos[1] },
1362 { .compatible = "atmel,at91sam9263-pmc", .data = &pmc_infos[1] },
1363 { .compatible = "atmel,at91sam9g45-pmc", .data = &pmc_infos[2] },
1364 { .compatible = "atmel,at91sam9n12-pmc", .data = &pmc_infos[1] },
1365 { .compatible = "atmel,at91sam9rl-pmc", .data = &pmc_infos[3] },
1366 { .compatible = "atmel,at91sam9x5-pmc", .data = &pmc_infos[1] },
1367 { .compatible = "atmel,sama5d3-pmc", .data = &pmc_infos[1] },
1368 { .compatible = "atmel,sama5d4-pmc", .data = &pmc_infos[1] },
1369 { .compatible = "atmel,sama5d2-pmc", .data = &pmc_infos[1] },
1370 { .compatible = "microchip,sam9x60-pmc", .data = &pmc_infos[4] },
1371 { .compatible = "microchip,sam9x7-pmc", .data = &pmc_infos[4] },
1372 { .compatible = "microchip,sama7g5-pmc", .data = &pmc_infos[5] },
1373 { /* sentinel */ },
1374 };
1375
at91_pm_modes_validate(const int * modes,int len)1376 static void __init at91_pm_modes_validate(const int *modes, int len)
1377 {
1378 u8 i, standby = 0, suspend = 0;
1379 int mode;
1380
1381 for (i = 0; i < len; i++) {
1382 if (standby && suspend)
1383 break;
1384
1385 if (modes[i] == soc_pm.data.standby_mode && !standby) {
1386 standby = 1;
1387 continue;
1388 }
1389
1390 if (modes[i] == soc_pm.data.suspend_mode && !suspend) {
1391 suspend = 1;
1392 continue;
1393 }
1394 }
1395
1396 if (!standby) {
1397 if (soc_pm.data.suspend_mode == AT91_PM_STANDBY)
1398 mode = AT91_PM_ULP0;
1399 else
1400 mode = AT91_PM_STANDBY;
1401
1402 pr_warn("AT91: PM: %s mode not supported! Using %s.\n",
1403 pm_modes[soc_pm.data.standby_mode].pattern,
1404 pm_modes[mode].pattern);
1405 soc_pm.data.standby_mode = mode;
1406 }
1407
1408 if (!suspend) {
1409 if (soc_pm.data.standby_mode == AT91_PM_ULP0)
1410 mode = AT91_PM_STANDBY;
1411 else
1412 mode = AT91_PM_ULP0;
1413
1414 pr_warn("AT91: PM: %s mode not supported! Using %s.\n",
1415 pm_modes[soc_pm.data.suspend_mode].pattern,
1416 pm_modes[mode].pattern);
1417 soc_pm.data.suspend_mode = mode;
1418 }
1419 }
1420
at91_pm_init(void (* pm_idle)(void))1421 static void __init at91_pm_init(void (*pm_idle)(void))
1422 {
1423 struct device_node *pmc_np;
1424 const struct of_device_id *of_id;
1425 const struct pmc_info *pmc;
1426
1427 if (at91_cpuidle_device.dev.platform_data)
1428 platform_device_register(&at91_cpuidle_device);
1429
1430 pmc_np = of_find_matching_node_and_match(NULL, atmel_pmc_ids, &of_id);
1431 soc_pm.data.pmc = of_iomap(pmc_np, 0);
1432 of_node_put(pmc_np);
1433 if (!soc_pm.data.pmc) {
1434 pr_err("AT91: PM not supported, PMC not found\n");
1435 return;
1436 }
1437
1438 pmc = of_id->data;
1439 soc_pm.data.uhp_udp_mask = pmc->uhp_udp_mask;
1440 soc_pm.data.pmc_mckr_offset = pmc->mckr;
1441 soc_pm.data.pmc_version = pmc->version;
1442
1443 if (pm_idle)
1444 arm_pm_idle = pm_idle;
1445
1446 at91_pm_sram_init();
1447
1448 if (at91_suspend_sram_fn) {
1449 suspend_set_ops(&at91_pm_ops);
1450 pr_info("AT91: PM: standby: %s, suspend: %s\n",
1451 pm_modes[soc_pm.data.standby_mode].pattern,
1452 pm_modes[soc_pm.data.suspend_mode].pattern);
1453 } else {
1454 pr_info("AT91: PM not supported, due to no SRAM allocated\n");
1455 }
1456 }
1457
at91rm9200_pm_init(void)1458 void __init at91rm9200_pm_init(void)
1459 {
1460 int ret;
1461
1462 if (!IS_ENABLED(CONFIG_SOC_AT91RM9200))
1463 return;
1464
1465 /*
1466 * Force STANDBY and ULP0 mode to avoid calling
1467 * at91_pm_modes_validate() which may increase booting time.
1468 * Platform supports anyway only STANDBY and ULP0 modes.
1469 */
1470 soc_pm.data.standby_mode = AT91_PM_STANDBY;
1471 soc_pm.data.suspend_mode = AT91_PM_ULP0;
1472
1473 ret = at91_dt_ramc(false);
1474 if (ret)
1475 return;
1476
1477 /*
1478 * AT91RM9200 SDRAM low-power mode cannot be used with self-refresh.
1479 */
1480 at91_ramc_write(0, AT91_MC_SDRAMC_LPR, 0);
1481
1482 at91_pm_init(at91rm9200_idle);
1483 }
1484
sam9x60_pm_init(void)1485 void __init sam9x60_pm_init(void)
1486 {
1487 static const int modes[] __initconst = {
1488 AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST, AT91_PM_ULP1,
1489 };
1490 static const int iomaps[] __initconst = {
1491 [AT91_PM_ULP1] = AT91_PM_IOMAP(SHDWC),
1492 };
1493 int ret;
1494
1495 if (!IS_ENABLED(CONFIG_SOC_SAM9X60))
1496 return;
1497
1498 at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1499 at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1500 ret = at91_dt_ramc(false);
1501 if (ret)
1502 return;
1503
1504 at91_pm_init(NULL);
1505
1506 soc_pm.ws_ids = sam9x60_ws_ids;
1507 soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws;
1508 }
1509
sam9x7_pm_init(void)1510 void __init sam9x7_pm_init(void)
1511 {
1512 static const int modes[] __initconst = {
1513 AT91_PM_STANDBY, AT91_PM_ULP0,
1514 };
1515 int ret;
1516
1517 if (!IS_ENABLED(CONFIG_SOC_SAM9X7))
1518 return;
1519
1520 at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1521 ret = at91_dt_ramc(false);
1522 if (ret)
1523 return;
1524
1525 at91_pm_init(NULL);
1526
1527 soc_pm.ws_ids = sam9x7_ws_ids;
1528 soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws;
1529 }
1530
at91sam9_pm_init(void)1531 void __init at91sam9_pm_init(void)
1532 {
1533 int ret;
1534
1535 if (!IS_ENABLED(CONFIG_SOC_AT91SAM9))
1536 return;
1537
1538 /*
1539 * Force STANDBY and ULP0 mode to avoid calling
1540 * at91_pm_modes_validate() which may increase booting time.
1541 * Platform supports anyway only STANDBY and ULP0 modes.
1542 */
1543 soc_pm.data.standby_mode = AT91_PM_STANDBY;
1544 soc_pm.data.suspend_mode = AT91_PM_ULP0;
1545
1546 ret = at91_dt_ramc(false);
1547 if (ret)
1548 return;
1549
1550 at91_pm_init(at91sam9_idle);
1551 }
1552
sama5_pm_init(void)1553 void __init sama5_pm_init(void)
1554 {
1555 static const int modes[] __initconst = {
1556 AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST,
1557 };
1558 static const u32 iomaps[] __initconst = {
1559 [AT91_PM_ULP0] = AT91_PM_IOMAP(ETHC),
1560 [AT91_PM_ULP0_FAST] = AT91_PM_IOMAP(ETHC),
1561 };
1562 int ret;
1563
1564 if (!IS_ENABLED(CONFIG_SOC_SAMA5))
1565 return;
1566
1567 at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1568 at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1569 ret = at91_dt_ramc(false);
1570 if (ret)
1571 return;
1572
1573 at91_pm_init(NULL);
1574
1575 /* Quirks applies to ULP0, ULP0 fast and ULP1 modes. */
1576 soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP0) |
1577 BIT(AT91_PM_ULP0_FAST) |
1578 BIT(AT91_PM_ULP1);
1579 /* Do not suspend in ULP0, ULP0 fast if GETH is the only wakeup source. */
1580 soc_pm.quirks.eth[AT91_PM_G_ETH].dns_modes = BIT(AT91_PM_ULP0) |
1581 BIT(AT91_PM_ULP0_FAST);
1582 }
1583
sama5d2_pm_init(void)1584 void __init sama5d2_pm_init(void)
1585 {
1586 static const int modes[] __initconst = {
1587 AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST, AT91_PM_ULP1,
1588 AT91_PM_BACKUP,
1589 };
1590 static const u32 iomaps[] __initconst = {
1591 [AT91_PM_ULP0] = AT91_PM_IOMAP(ETHC),
1592 [AT91_PM_ULP0_FAST] = AT91_PM_IOMAP(ETHC),
1593 [AT91_PM_ULP1] = AT91_PM_IOMAP(SHDWC) |
1594 AT91_PM_IOMAP(ETHC),
1595 [AT91_PM_BACKUP] = AT91_PM_IOMAP(SHDWC) |
1596 AT91_PM_IOMAP(SFRBU),
1597 };
1598 int ret;
1599
1600 if (!IS_ENABLED(CONFIG_SOC_SAMA5D2))
1601 return;
1602
1603 if (IS_ENABLED(CONFIG_ATMEL_SECURE_PM)) {
1604 pr_warn("AT91: Secure PM: ignoring standby mode\n");
1605 at91_pm_secure_init();
1606 return;
1607 }
1608
1609 at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1610 at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1611 ret = at91_dt_ramc(false);
1612 if (ret)
1613 return;
1614
1615 at91_pm_init(NULL);
1616
1617 soc_pm.ws_ids = sama5d2_ws_ids;
1618 soc_pm.config_shdwc_ws = at91_sama5d2_config_shdwc_ws;
1619 soc_pm.config_pmc_ws = at91_sama5d2_config_pmc_ws;
1620
1621 soc_pm.sfrbu_regs.pswbu.key = (0x4BD20C << 8);
1622 soc_pm.sfrbu_regs.pswbu.ctrl = BIT(0);
1623 soc_pm.sfrbu_regs.pswbu.softsw = BIT(1);
1624 soc_pm.sfrbu_regs.pswbu.state = BIT(3);
1625
1626 /* Quirk applies to ULP0, ULP0 fast and ULP1 modes. */
1627 soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP0) |
1628 BIT(AT91_PM_ULP0_FAST) |
1629 BIT(AT91_PM_ULP1);
1630 /*
1631 * Do not suspend in ULP0, ULP0 fast if GETH is the only wakeup
1632 * source.
1633 */
1634 soc_pm.quirks.eth[AT91_PM_G_ETH].dns_modes = BIT(AT91_PM_ULP0) |
1635 BIT(AT91_PM_ULP0_FAST);
1636 }
1637
sama7_pm_init(void)1638 void __init sama7_pm_init(void)
1639 {
1640 static const int modes[] __initconst = {
1641 AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP1, AT91_PM_BACKUP,
1642 };
1643 static const u32 iomaps[] __initconst = {
1644 [AT91_PM_ULP0] = AT91_PM_IOMAP(SFRBU),
1645 [AT91_PM_ULP1] = AT91_PM_IOMAP(SFRBU) |
1646 AT91_PM_IOMAP(SHDWC) |
1647 AT91_PM_IOMAP(ETHC),
1648 [AT91_PM_BACKUP] = AT91_PM_IOMAP(SFRBU) |
1649 AT91_PM_IOMAP(SHDWC),
1650 };
1651 int ret;
1652
1653 if (!IS_ENABLED(CONFIG_SOC_SAMA7))
1654 return;
1655
1656 at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1657
1658 ret = at91_dt_ramc(true);
1659 if (ret)
1660 return;
1661
1662 at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1663 at91_pm_init(NULL);
1664
1665 soc_pm.ws_ids = sama7g5_ws_ids;
1666 soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws;
1667
1668 soc_pm.sfrbu_regs.pswbu.key = (0x4BD20C << 8);
1669 soc_pm.sfrbu_regs.pswbu.ctrl = BIT(0);
1670 soc_pm.sfrbu_regs.pswbu.softsw = BIT(1);
1671 soc_pm.sfrbu_regs.pswbu.state = BIT(2);
1672
1673 /* Quirks applies to ULP1 for both Ethernet interfaces. */
1674 soc_pm.quirks.eth[AT91_PM_E_ETH].modes = BIT(AT91_PM_ULP1);
1675 soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP1);
1676 }
1677
at91_pm_modes_select(char * str)1678 static int __init at91_pm_modes_select(char *str)
1679 {
1680 char *s;
1681 substring_t args[MAX_OPT_ARGS];
1682 int standby, suspend;
1683
1684 if (!str)
1685 return 0;
1686
1687 s = strsep(&str, ",");
1688 standby = match_token(s, pm_modes, args);
1689 if (standby < 0)
1690 return 0;
1691
1692 suspend = match_token(str, pm_modes, args);
1693 if (suspend < 0)
1694 return 0;
1695
1696 soc_pm.data.standby_mode = standby;
1697 soc_pm.data.suspend_mode = suspend;
1698
1699 return 0;
1700 }
1701 early_param("atmel.pm_modes", at91_pm_modes_select);
1702