xref: /linux/arch/arm/mach-at91/pm.c (revision 1504b6f97bad166b484d6f27dc99746fdca5f467)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * arch/arm/mach-at91/pm.c
4  * AT91 Power Management
5  *
6  * Copyright (C) 2005 David Brownell
7  */
8 
9 #include <linux/genalloc.h>
10 #include <linux/io.h>
11 #include <linux/of_address.h>
12 #include <linux/of.h>
13 #include <linux/of_fdt.h>
14 #include <linux/of_platform.h>
15 #include <linux/parser.h>
16 #include <linux/suspend.h>
17 
18 #include <linux/clk.h>
19 #include <linux/clk/at91_pmc.h>
20 #include <linux/platform_data/atmel.h>
21 
22 #include <asm/cacheflush.h>
23 #include <asm/fncpy.h>
24 #include <asm/system_misc.h>
25 #include <asm/suspend.h>
26 
27 #include "generic.h"
28 #include "pm.h"
29 #include "sam_secure.h"
30 
31 #define BACKUP_DDR_PHY_CALIBRATION	(9)
32 
33 /**
34  * struct at91_pm_bu - AT91 power management backup unit data structure
35  * @suspended: true if suspended to backup mode
36  * @reserved: reserved
37  * @canary: canary data for memory checking after exit from backup mode
38  * @resume: resume API
39  * @ddr_phy_calibration: DDR PHY calibration data: ZQ0CR0, first 8 words
40  * of the memory
41  */
42 struct at91_pm_bu {
43 	int suspended;
44 	unsigned long reserved;
45 	phys_addr_t canary;
46 	phys_addr_t resume;
47 	unsigned long ddr_phy_calibration[BACKUP_DDR_PHY_CALIBRATION];
48 };
49 
50 /**
51  * struct at91_pm_sfrbu_regs - registers mapping for SFRBU
52  * @pswbu: power switch BU control registers
53  */
54 struct at91_pm_sfrbu_regs {
55 	struct {
56 		u32 key;
57 		u32 ctrl;
58 		u32 state;
59 		u32 softsw;
60 	} pswbu;
61 };
62 
63 /**
64  * enum at91_pm_eth_clk - Ethernet clock indexes
65  * @AT91_PM_ETH_PCLK: pclk index
66  * @AT91_PM_ETH_HCLK: hclk index
67  * @AT91_PM_ETH_MAX_CLK: max index
68  */
69 enum at91_pm_eth_clk {
70 	AT91_PM_ETH_PCLK,
71 	AT91_PM_ETH_HCLK,
72 	AT91_PM_ETH_MAX_CLK,
73 };
74 
75 /**
76  * enum at91_pm_eth - Ethernet controller indexes
77  * @AT91_PM_G_ETH: gigabit Ethernet controller index
78  * @AT91_PM_E_ETH: megabit Ethernet controller index
79  * @AT91_PM_MAX_ETH: max index
80  */
81 enum at91_pm_eth {
82 	AT91_PM_G_ETH,
83 	AT91_PM_E_ETH,
84 	AT91_PM_MAX_ETH,
85 };
86 
87 /**
88  * struct at91_pm_quirk_eth - AT91 PM Ethernet quirks
89  * @dev: Ethernet device
90  * @np: Ethernet device node
91  * @clks: Ethernet clocks
92  * @modes: power management mode that this quirk applies to
93  * @dns_modes: do not suspend modes: stop suspending if Ethernet is configured
94  *	       as wakeup source but buggy and no other wakeup source is
95  *	       available
96  */
97 struct at91_pm_quirk_eth {
98 	struct device *dev;
99 	struct device_node *np;
100 	struct clk_bulk_data clks[AT91_PM_ETH_MAX_CLK];
101 	u32 modes;
102 	u32 dns_modes;
103 };
104 
105 /**
106  * struct at91_pm_quirks - AT91 PM quirks
107  * @eth: Ethernet quirks
108  */
109 struct at91_pm_quirks {
110 	struct at91_pm_quirk_eth eth[AT91_PM_MAX_ETH];
111 };
112 
113 /**
114  * struct at91_soc_pm - AT91 SoC power management data structure
115  * @config_shdwc_ws: wakeup sources configuration function for SHDWC
116  * @config_pmc_ws: wakeup srouces configuration function for PMC
117  * @ws_ids: wakup sources of_device_id array
118  * @bu: backup unit mapped data (for backup mode)
119  * @quirks: PM quirks
120  * @data: PM data to be used on last phase of suspend
121  * @sfrbu_regs: SFRBU registers mapping
122  * @memcs: memory chip select
123  */
124 struct at91_soc_pm {
125 	int (*config_shdwc_ws)(void __iomem *shdwc, u32 *mode, u32 *polarity);
126 	int (*config_pmc_ws)(void __iomem *pmc, u32 mode, u32 polarity);
127 	const struct of_device_id *ws_ids;
128 	struct at91_pm_bu *bu;
129 	struct at91_pm_quirks quirks;
130 	struct at91_pm_data data;
131 	struct at91_pm_sfrbu_regs sfrbu_regs;
132 	void *memcs;
133 };
134 
135 /**
136  * enum at91_pm_iomaps - IOs that needs to be mapped for different PM modes
137  * @AT91_PM_IOMAP_SHDWC:	SHDWC controller
138  * @AT91_PM_IOMAP_SFRBU:	SFRBU controller
139  * @AT91_PM_IOMAP_ETHC:		Ethernet controller
140  */
141 enum at91_pm_iomaps {
142 	AT91_PM_IOMAP_SHDWC,
143 	AT91_PM_IOMAP_SFRBU,
144 	AT91_PM_IOMAP_ETHC,
145 };
146 
147 #define AT91_PM_IOMAP(name)	BIT(AT91_PM_IOMAP_##name)
148 
149 static struct at91_soc_pm soc_pm = {
150 	.data = {
151 		.standby_mode = AT91_PM_STANDBY,
152 		.suspend_mode = AT91_PM_ULP0,
153 	},
154 };
155 
156 static const match_table_t pm_modes __initconst = {
157 	{ AT91_PM_STANDBY,	"standby" },
158 	{ AT91_PM_ULP0,		"ulp0" },
159 	{ AT91_PM_ULP0_FAST,    "ulp0-fast" },
160 	{ AT91_PM_ULP1,		"ulp1" },
161 	{ AT91_PM_BACKUP,	"backup" },
162 	{ -1, NULL },
163 };
164 
165 #define at91_ramc_read(id, field) \
166 	__raw_readl(soc_pm.data.ramc[id] + field)
167 
168 #define at91_ramc_write(id, field, value) \
169 	__raw_writel(value, soc_pm.data.ramc[id] + field)
170 
171 static int at91_pm_valid_state(suspend_state_t state)
172 {
173 	switch (state) {
174 		case PM_SUSPEND_ON:
175 		case PM_SUSPEND_STANDBY:
176 		case PM_SUSPEND_MEM:
177 			return 1;
178 
179 		default:
180 			return 0;
181 	}
182 }
183 
184 static int canary = 0xA5A5A5A5;
185 
186 struct wakeup_source_info {
187 	unsigned int pmc_fsmr_bit;
188 	unsigned int shdwc_mr_bit;
189 	bool set_polarity;
190 };
191 
192 static const struct wakeup_source_info ws_info[] = {
193 	{ .pmc_fsmr_bit = AT91_PMC_FSTT(10),	.set_polarity = true },
194 	{ .pmc_fsmr_bit = AT91_PMC_RTCAL,	.shdwc_mr_bit = BIT(17) },
195 	{ .pmc_fsmr_bit = AT91_PMC_USBAL },
196 	{ .pmc_fsmr_bit = AT91_PMC_SDMMC_CD },
197 	{ .pmc_fsmr_bit = AT91_PMC_RTTAL },
198 	{ .pmc_fsmr_bit = AT91_PMC_RXLP_MCE },
199 };
200 
201 static const struct of_device_id sama5d2_ws_ids[] = {
202 	{ .compatible = "atmel,sama5d2-gem",		.data = &ws_info[0] },
203 	{ .compatible = "atmel,sama5d2-rtc",		.data = &ws_info[1] },
204 	{ .compatible = "atmel,sama5d3-udc",		.data = &ws_info[2] },
205 	{ .compatible = "atmel,at91rm9200-ohci",	.data = &ws_info[2] },
206 	{ .compatible = "usb-ohci",			.data = &ws_info[2] },
207 	{ .compatible = "atmel,at91sam9g45-ehci",	.data = &ws_info[2] },
208 	{ .compatible = "usb-ehci",			.data = &ws_info[2] },
209 	{ .compatible = "atmel,sama5d2-sdhci",		.data = &ws_info[3] },
210 	{ /* sentinel */ }
211 };
212 
213 static const struct of_device_id sam9x60_ws_ids[] = {
214 	{ .compatible = "microchip,sam9x60-rtc",	.data = &ws_info[1] },
215 	{ .compatible = "atmel,at91rm9200-ohci",	.data = &ws_info[2] },
216 	{ .compatible = "usb-ohci",			.data = &ws_info[2] },
217 	{ .compatible = "atmel,at91sam9g45-ehci",	.data = &ws_info[2] },
218 	{ .compatible = "usb-ehci",			.data = &ws_info[2] },
219 	{ .compatible = "microchip,sam9x60-rtt",	.data = &ws_info[4] },
220 	{ .compatible = "cdns,sam9x60-macb",		.data = &ws_info[5] },
221 	{ /* sentinel */ }
222 };
223 
224 static const struct of_device_id sama7g5_ws_ids[] = {
225 	{ .compatible = "microchip,sama7g5-rtc",	.data = &ws_info[1] },
226 	{ .compatible = "microchip,sama7g5-ohci",	.data = &ws_info[2] },
227 	{ .compatible = "usb-ohci",			.data = &ws_info[2] },
228 	{ .compatible = "atmel,at91sam9g45-ehci",	.data = &ws_info[2] },
229 	{ .compatible = "usb-ehci",			.data = &ws_info[2] },
230 	{ .compatible = "microchip,sama7g5-sdhci",	.data = &ws_info[3] },
231 	{ .compatible = "microchip,sama7g5-rtt",	.data = &ws_info[4] },
232 	{ /* sentinel */ }
233 };
234 
235 static int at91_pm_config_ws(unsigned int pm_mode, bool set)
236 {
237 	const struct wakeup_source_info *wsi;
238 	const struct of_device_id *match;
239 	struct platform_device *pdev;
240 	struct device_node *np;
241 	unsigned int mode = 0, polarity = 0, val = 0;
242 
243 	if (pm_mode != AT91_PM_ULP1)
244 		return 0;
245 
246 	if (!soc_pm.data.pmc || !soc_pm.data.shdwc || !soc_pm.ws_ids)
247 		return -EPERM;
248 
249 	if (!set) {
250 		writel(mode, soc_pm.data.pmc + AT91_PMC_FSMR);
251 		return 0;
252 	}
253 
254 	if (soc_pm.config_shdwc_ws)
255 		soc_pm.config_shdwc_ws(soc_pm.data.shdwc, &mode, &polarity);
256 
257 	/* SHDWC.MR */
258 	val = readl(soc_pm.data.shdwc + 0x04);
259 
260 	/* Loop through defined wakeup sources. */
261 	for_each_matching_node_and_match(np, soc_pm.ws_ids, &match) {
262 		pdev = of_find_device_by_node(np);
263 		if (!pdev)
264 			continue;
265 
266 		if (device_may_wakeup(&pdev->dev)) {
267 			wsi = match->data;
268 
269 			/* Check if enabled on SHDWC. */
270 			if (wsi->shdwc_mr_bit && !(val & wsi->shdwc_mr_bit))
271 				goto put_device;
272 
273 			mode |= wsi->pmc_fsmr_bit;
274 			if (wsi->set_polarity)
275 				polarity |= wsi->pmc_fsmr_bit;
276 		}
277 
278 put_device:
279 		put_device(&pdev->dev);
280 	}
281 
282 	if (mode) {
283 		if (soc_pm.config_pmc_ws)
284 			soc_pm.config_pmc_ws(soc_pm.data.pmc, mode, polarity);
285 	} else {
286 		pr_err("AT91: PM: no ULP1 wakeup sources found!");
287 	}
288 
289 	return mode ? 0 : -EPERM;
290 }
291 
292 static int at91_sama5d2_config_shdwc_ws(void __iomem *shdwc, u32 *mode,
293 					u32 *polarity)
294 {
295 	u32 val;
296 
297 	/* SHDWC.WUIR */
298 	val = readl(shdwc + 0x0c);
299 	*mode |= (val & 0x3ff);
300 	*polarity |= ((val >> 16) & 0x3ff);
301 
302 	return 0;
303 }
304 
305 static int at91_sama5d2_config_pmc_ws(void __iomem *pmc, u32 mode, u32 polarity)
306 {
307 	writel(mode, pmc + AT91_PMC_FSMR);
308 	writel(polarity, pmc + AT91_PMC_FSPR);
309 
310 	return 0;
311 }
312 
313 static int at91_sam9x60_config_pmc_ws(void __iomem *pmc, u32 mode, u32 polarity)
314 {
315 	writel(mode, pmc + AT91_PMC_FSMR);
316 
317 	return 0;
318 }
319 
320 static bool at91_pm_eth_quirk_is_valid(struct at91_pm_quirk_eth *eth)
321 {
322 	struct platform_device *pdev;
323 
324 	/* Interface NA in DT. */
325 	if (!eth->np)
326 		return false;
327 
328 	/* No quirks for this interface and current suspend mode. */
329 	if (!(eth->modes & BIT(soc_pm.data.mode)))
330 		return false;
331 
332 	if (!eth->dev) {
333 		/* Driver not probed. */
334 		pdev = of_find_device_by_node(eth->np);
335 		if (!pdev)
336 			return false;
337 		eth->dev = &pdev->dev;
338 	}
339 
340 	/* No quirks if device isn't a wakeup source. */
341 	if (!device_may_wakeup(eth->dev)) {
342 		put_device(eth->dev);
343 		return false;
344 	}
345 
346 	/* put_device(eth->dev) is called at the end of suspend. */
347 	return true;
348 }
349 
350 static int at91_pm_config_quirks(bool suspend)
351 {
352 	struct at91_pm_quirk_eth *eth;
353 	int i, j, ret, tmp;
354 
355 	/*
356 	 * Ethernet IPs who's device_node pointers are stored into
357 	 * soc_pm.quirks.eth[].np cannot handle WoL packets while in ULP0, ULP1
358 	 * or both due to a hardware bug. If they receive WoL packets while in
359 	 * ULP0 or ULP1 IPs could stop working or the whole system could stop
360 	 * working. We cannot handle this scenario in the ethernet driver itself
361 	 * as the driver is common to multiple vendors and also we only know
362 	 * here, in this file, if we suspend to ULP0 or ULP1 mode. Thus handle
363 	 * these scenarios here, as quirks.
364 	 */
365 	for (i = 0; i < AT91_PM_MAX_ETH; i++) {
366 		eth = &soc_pm.quirks.eth[i];
367 
368 		if (!at91_pm_eth_quirk_is_valid(eth))
369 			continue;
370 
371 		/*
372 		 * For modes in dns_modes mask the system blocks if quirk is not
373 		 * applied but if applied the interface doesn't act at WoL
374 		 * events. Thus take care to avoid suspending if this interface
375 		 * is the only configured wakeup source.
376 		 */
377 		if (suspend && eth->dns_modes & BIT(soc_pm.data.mode)) {
378 			int ws_count = 0;
379 #ifdef CONFIG_PM_SLEEP
380 			struct wakeup_source *ws;
381 
382 			for_each_wakeup_source(ws) {
383 				if (ws->dev == eth->dev)
384 					continue;
385 
386 				ws_count++;
387 				break;
388 			}
389 #endif
390 
391 			/*
392 			 * Checking !ws is good for all platforms with issues
393 			 * even when both G_ETH and E_ETH are available as dns_modes
394 			 * is populated only on G_ETH interface.
395 			 */
396 			if (!ws_count) {
397 				pr_err("AT91: PM: Ethernet cannot resume from WoL!");
398 				ret = -EPERM;
399 				put_device(eth->dev);
400 				eth->dev = NULL;
401 				/* No need to revert clock settings for this eth. */
402 				i--;
403 				goto clk_unconfigure;
404 			}
405 		}
406 
407 		if (suspend) {
408 			clk_bulk_disable_unprepare(AT91_PM_ETH_MAX_CLK, eth->clks);
409 		} else {
410 			ret = clk_bulk_prepare_enable(AT91_PM_ETH_MAX_CLK,
411 						      eth->clks);
412 			if (ret)
413 				goto clk_unconfigure;
414 			/*
415 			 * Release the reference to eth->dev taken in
416 			 * at91_pm_eth_quirk_is_valid().
417 			 */
418 			put_device(eth->dev);
419 			eth->dev = NULL;
420 		}
421 	}
422 
423 	return 0;
424 
425 clk_unconfigure:
426 	/*
427 	 * In case of resume we reach this point if clk_prepare_enable() failed.
428 	 * we don't want to revert the previous clk_prepare_enable() for the
429 	 * other IP.
430 	 */
431 	for (j = i; j >= 0; j--) {
432 		eth = &soc_pm.quirks.eth[j];
433 		if (suspend) {
434 			if (!at91_pm_eth_quirk_is_valid(eth))
435 				continue;
436 
437 			tmp = clk_bulk_prepare_enable(AT91_PM_ETH_MAX_CLK, eth->clks);
438 			if (tmp) {
439 				pr_err("AT91: PM: failed to enable %s clocks\n",
440 				       j == AT91_PM_G_ETH ? "geth" : "eth");
441 			}
442 		} else {
443 			/*
444 			 * Release the reference to eth->dev taken in
445 			 * at91_pm_eth_quirk_is_valid().
446 			 */
447 			put_device(eth->dev);
448 			eth->dev = NULL;
449 		}
450 	}
451 
452 	return ret;
453 }
454 
455 /*
456  * Called after processes are frozen, but before we shutdown devices.
457  */
458 static int at91_pm_begin(suspend_state_t state)
459 {
460 	int ret;
461 
462 	switch (state) {
463 	case PM_SUSPEND_MEM:
464 		soc_pm.data.mode = soc_pm.data.suspend_mode;
465 		break;
466 
467 	case PM_SUSPEND_STANDBY:
468 		soc_pm.data.mode = soc_pm.data.standby_mode;
469 		break;
470 
471 	default:
472 		soc_pm.data.mode = -1;
473 	}
474 
475 	ret = at91_pm_config_ws(soc_pm.data.mode, true);
476 	if (ret)
477 		return ret;
478 
479 	if (soc_pm.data.mode == AT91_PM_BACKUP)
480 		soc_pm.bu->suspended = 1;
481 	else if (soc_pm.bu)
482 		soc_pm.bu->suspended = 0;
483 
484 	return 0;
485 }
486 
487 /*
488  * Verify that all the clocks are correct before entering
489  * slow-clock mode.
490  */
491 static int at91_pm_verify_clocks(void)
492 {
493 	unsigned long scsr;
494 	int i;
495 
496 	scsr = readl(soc_pm.data.pmc + AT91_PMC_SCSR);
497 
498 	/* USB must not be using PLLB */
499 	if ((scsr & soc_pm.data.uhp_udp_mask) != 0) {
500 		pr_err("AT91: PM - Suspend-to-RAM with USB still active\n");
501 		return 0;
502 	}
503 
504 	/* PCK0..PCK3 must be disabled, or configured to use clk32k */
505 	for (i = 0; i < 4; i++) {
506 		u32 css;
507 
508 		if ((scsr & (AT91_PMC_PCK0 << i)) == 0)
509 			continue;
510 		css = readl(soc_pm.data.pmc + AT91_PMC_PCKR(i)) & AT91_PMC_CSS;
511 		if (css != AT91_PMC_CSS_SLOW) {
512 			pr_err("AT91: PM - Suspend-to-RAM with PCK%d src %d\n", i, css);
513 			return 0;
514 		}
515 	}
516 
517 	return 1;
518 }
519 
520 /*
521  * Call this from platform driver suspend() to see how deeply to suspend.
522  * For example, some controllers (like OHCI) need one of the PLL clocks
523  * in order to act as a wakeup source, and those are not available when
524  * going into slow clock mode.
525  *
526  * REVISIT: generalize as clk_will_be_available(clk)?  Other platforms have
527  * the very same problem (but not using at91 main_clk), and it'd be better
528  * to add one generic API rather than lots of platform-specific ones.
529  */
530 int at91_suspend_entering_slow_clock(void)
531 {
532 	return (soc_pm.data.mode >= AT91_PM_ULP0);
533 }
534 EXPORT_SYMBOL(at91_suspend_entering_slow_clock);
535 
536 static void (*at91_suspend_sram_fn)(struct at91_pm_data *);
537 extern void at91_pm_suspend_in_sram(struct at91_pm_data *pm_data);
538 extern u32 at91_pm_suspend_in_sram_sz;
539 
540 static int at91_suspend_finish(unsigned long val)
541 {
542 	unsigned char modified_gray_code[] = {
543 		0x00, 0x01, 0x02, 0x03, 0x06, 0x07, 0x04, 0x05, 0x0c, 0x0d,
544 		0x0e, 0x0f, 0x0a, 0x0b, 0x08, 0x09, 0x18, 0x19, 0x1a, 0x1b,
545 		0x1e, 0x1f, 0x1c, 0x1d, 0x14, 0x15, 0x16, 0x17, 0x12, 0x13,
546 		0x10, 0x11,
547 	};
548 	unsigned int tmp, index;
549 	int i;
550 
551 	if (soc_pm.data.mode == AT91_PM_BACKUP && soc_pm.data.ramc_phy) {
552 		/*
553 		 * Bootloader will perform DDR recalibration and will try to
554 		 * restore the ZQ0SR0 with the value saved here. But the
555 		 * calibration is buggy and restoring some values from ZQ0SR0
556 		 * is forbidden and risky thus we need to provide processed
557 		 * values for these (modified gray code values).
558 		 */
559 		tmp = readl(soc_pm.data.ramc_phy + DDR3PHY_ZQ0SR0);
560 
561 		/* Store pull-down output impedance select. */
562 		index = (tmp >> DDR3PHY_ZQ0SR0_PDO_OFF) & 0x1f;
563 		soc_pm.bu->ddr_phy_calibration[0] = modified_gray_code[index];
564 
565 		/* Store pull-up output impedance select. */
566 		index = (tmp >> DDR3PHY_ZQ0SR0_PUO_OFF) & 0x1f;
567 		soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
568 
569 		/* Store pull-down on-die termination impedance select. */
570 		index = (tmp >> DDR3PHY_ZQ0SR0_PDODT_OFF) & 0x1f;
571 		soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
572 
573 		/* Store pull-up on-die termination impedance select. */
574 		index = (tmp >> DDR3PHY_ZQ0SRO_PUODT_OFF) & 0x1f;
575 		soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
576 
577 		/*
578 		 * The 1st 8 words of memory might get corrupted in the process
579 		 * of DDR PHY recalibration; it is saved here in securam and it
580 		 * will be restored later, after recalibration, by bootloader
581 		 */
582 		for (i = 1; i < BACKUP_DDR_PHY_CALIBRATION; i++)
583 			soc_pm.bu->ddr_phy_calibration[i] =
584 				*((unsigned int *)soc_pm.memcs + (i - 1));
585 	}
586 
587 	flush_cache_all();
588 	outer_disable();
589 
590 	at91_suspend_sram_fn(&soc_pm.data);
591 
592 	return 0;
593 }
594 
595 static void at91_pm_switch_ba_to_vbat(void)
596 {
597 	unsigned int offset = offsetof(struct at91_pm_sfrbu_regs, pswbu);
598 	unsigned int val;
599 
600 	/* Just for safety. */
601 	if (!soc_pm.data.sfrbu)
602 		return;
603 
604 	val = readl(soc_pm.data.sfrbu + offset);
605 
606 	/* Already on VBAT. */
607 	if (!(val & soc_pm.sfrbu_regs.pswbu.state))
608 		return;
609 
610 	val &= ~soc_pm.sfrbu_regs.pswbu.softsw;
611 	val |= soc_pm.sfrbu_regs.pswbu.key | soc_pm.sfrbu_regs.pswbu.ctrl;
612 	writel(val, soc_pm.data.sfrbu + offset);
613 
614 	/* Wait for update. */
615 	val = readl(soc_pm.data.sfrbu + offset);
616 	while (val & soc_pm.sfrbu_regs.pswbu.state)
617 		val = readl(soc_pm.data.sfrbu + offset);
618 }
619 
620 static void at91_pm_suspend(suspend_state_t state)
621 {
622 	if (soc_pm.data.mode == AT91_PM_BACKUP) {
623 		at91_pm_switch_ba_to_vbat();
624 
625 		cpu_suspend(0, at91_suspend_finish);
626 
627 		/* The SRAM is lost between suspend cycles */
628 		at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn,
629 					     &at91_pm_suspend_in_sram,
630 					     at91_pm_suspend_in_sram_sz);
631 	} else {
632 		at91_suspend_finish(0);
633 	}
634 
635 	outer_resume();
636 }
637 
638 /*
639  * STANDBY mode has *all* drivers suspended; ignores irqs not marked as 'wakeup'
640  * event sources; and reduces DRAM power.  But otherwise it's identical to
641  * PM_SUSPEND_ON: cpu idle, and nothing fancy done with main or cpu clocks.
642  *
643  * AT91_PM_ULP0 is like STANDBY plus slow clock mode, so drivers must
644  * suspend more deeply, the master clock switches to the clk32k and turns off
645  * the main oscillator
646  *
647  * AT91_PM_BACKUP turns off the whole SoC after placing the DDR in self refresh
648  */
649 static int at91_pm_enter(suspend_state_t state)
650 {
651 	int ret;
652 
653 	ret = at91_pm_config_quirks(true);
654 	if (ret)
655 		return ret;
656 
657 	switch (state) {
658 	case PM_SUSPEND_MEM:
659 	case PM_SUSPEND_STANDBY:
660 		/*
661 		 * Ensure that clocks are in a valid state.
662 		 */
663 		if (soc_pm.data.mode >= AT91_PM_ULP0 &&
664 		    !at91_pm_verify_clocks())
665 			goto error;
666 
667 		at91_pm_suspend(state);
668 
669 		break;
670 
671 	case PM_SUSPEND_ON:
672 		cpu_do_idle();
673 		break;
674 
675 	default:
676 		pr_debug("AT91: PM - bogus suspend state %d\n", state);
677 		goto error;
678 	}
679 
680 error:
681 	at91_pm_config_quirks(false);
682 	return 0;
683 }
684 
685 /*
686  * Called right prior to thawing processes.
687  */
688 static void at91_pm_end(void)
689 {
690 	at91_pm_config_ws(soc_pm.data.mode, false);
691 }
692 
693 
694 static const struct platform_suspend_ops at91_pm_ops = {
695 	.valid	= at91_pm_valid_state,
696 	.begin	= at91_pm_begin,
697 	.enter	= at91_pm_enter,
698 	.end	= at91_pm_end,
699 };
700 
701 static struct platform_device at91_cpuidle_device = {
702 	.name = "cpuidle-at91",
703 };
704 
705 /*
706  * The AT91RM9200 goes into self-refresh mode with this command, and will
707  * terminate self-refresh automatically on the next SDRAM access.
708  *
709  * Self-refresh mode is exited as soon as a memory access is made, but we don't
710  * know for sure when that happens. However, we need to restore the low-power
711  * mode if it was enabled before going idle. Restoring low-power mode while
712  * still in self-refresh is "not recommended", but seems to work.
713  */
714 static void at91rm9200_standby(void)
715 {
716 	asm volatile(
717 		"b    1f\n\t"
718 		".align    5\n\t"
719 		"1:  mcr    p15, 0, %0, c7, c10, 4\n\t"
720 		"    str    %2, [%1, %3]\n\t"
721 		"    mcr    p15, 0, %0, c7, c0, 4\n\t"
722 		:
723 		: "r" (0), "r" (soc_pm.data.ramc[0]),
724 		  "r" (1), "r" (AT91_MC_SDRAMC_SRR));
725 }
726 
727 /* We manage both DDRAM/SDRAM controllers, we need more than one value to
728  * remember.
729  */
730 static void at91_ddr_standby(void)
731 {
732 	/* Those two values allow us to delay self-refresh activation
733 	 * to the maximum. */
734 	u32 lpr0, lpr1 = 0;
735 	u32 mdr, saved_mdr0, saved_mdr1 = 0;
736 	u32 saved_lpr0, saved_lpr1 = 0;
737 
738 	/* LPDDR1 --> force DDR2 mode during self-refresh */
739 	saved_mdr0 = at91_ramc_read(0, AT91_DDRSDRC_MDR);
740 	if ((saved_mdr0 & AT91_DDRSDRC_MD) == AT91_DDRSDRC_MD_LOW_POWER_DDR) {
741 		mdr = saved_mdr0 & ~AT91_DDRSDRC_MD;
742 		mdr |= AT91_DDRSDRC_MD_DDR2;
743 		at91_ramc_write(0, AT91_DDRSDRC_MDR, mdr);
744 	}
745 
746 	if (soc_pm.data.ramc[1]) {
747 		saved_lpr1 = at91_ramc_read(1, AT91_DDRSDRC_LPR);
748 		lpr1 = saved_lpr1 & ~AT91_DDRSDRC_LPCB;
749 		lpr1 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
750 		saved_mdr1 = at91_ramc_read(1, AT91_DDRSDRC_MDR);
751 		if ((saved_mdr1 & AT91_DDRSDRC_MD) == AT91_DDRSDRC_MD_LOW_POWER_DDR) {
752 			mdr = saved_mdr1 & ~AT91_DDRSDRC_MD;
753 			mdr |= AT91_DDRSDRC_MD_DDR2;
754 			at91_ramc_write(1, AT91_DDRSDRC_MDR, mdr);
755 		}
756 	}
757 
758 	saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
759 	lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
760 	lpr0 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
761 
762 	/* self-refresh mode now */
763 	at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
764 	if (soc_pm.data.ramc[1])
765 		at91_ramc_write(1, AT91_DDRSDRC_LPR, lpr1);
766 
767 	cpu_do_idle();
768 
769 	at91_ramc_write(0, AT91_DDRSDRC_MDR, saved_mdr0);
770 	at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
771 	if (soc_pm.data.ramc[1]) {
772 		at91_ramc_write(0, AT91_DDRSDRC_MDR, saved_mdr1);
773 		at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
774 	}
775 }
776 
777 static void sama5d3_ddr_standby(void)
778 {
779 	u32 lpr0;
780 	u32 saved_lpr0;
781 
782 	saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
783 	lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
784 	lpr0 |= AT91_DDRSDRC_LPCB_POWER_DOWN;
785 
786 	at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
787 
788 	cpu_do_idle();
789 
790 	at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
791 }
792 
793 /* We manage both DDRAM/SDRAM controllers, we need more than one value to
794  * remember.
795  */
796 static void at91sam9_sdram_standby(void)
797 {
798 	u32 lpr0, lpr1 = 0;
799 	u32 saved_lpr0, saved_lpr1 = 0;
800 
801 	if (soc_pm.data.ramc[1]) {
802 		saved_lpr1 = at91_ramc_read(1, AT91_SDRAMC_LPR);
803 		lpr1 = saved_lpr1 & ~AT91_SDRAMC_LPCB;
804 		lpr1 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
805 	}
806 
807 	saved_lpr0 = at91_ramc_read(0, AT91_SDRAMC_LPR);
808 	lpr0 = saved_lpr0 & ~AT91_SDRAMC_LPCB;
809 	lpr0 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
810 
811 	/* self-refresh mode now */
812 	at91_ramc_write(0, AT91_SDRAMC_LPR, lpr0);
813 	if (soc_pm.data.ramc[1])
814 		at91_ramc_write(1, AT91_SDRAMC_LPR, lpr1);
815 
816 	cpu_do_idle();
817 
818 	at91_ramc_write(0, AT91_SDRAMC_LPR, saved_lpr0);
819 	if (soc_pm.data.ramc[1])
820 		at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1);
821 }
822 
823 static void sama7g5_standby(void)
824 {
825 	int pwrtmg, ratio;
826 
827 	pwrtmg = readl(soc_pm.data.ramc[0] + UDDRC_PWRCTL);
828 	ratio = readl(soc_pm.data.pmc + AT91_PMC_RATIO);
829 
830 	/*
831 	 * Place RAM into self-refresh after a maximum idle clocks. The maximum
832 	 * idle clocks is configured by bootloader in
833 	 * UDDRC_PWRMGT.SELFREF_TO_X32.
834 	 */
835 	writel(pwrtmg | UDDRC_PWRCTL_SELFREF_EN,
836 	       soc_pm.data.ramc[0] + UDDRC_PWRCTL);
837 	/* Divide CPU clock by 16. */
838 	writel(ratio & ~AT91_PMC_RATIO_RATIO, soc_pm.data.pmc + AT91_PMC_RATIO);
839 
840 	cpu_do_idle();
841 
842 	/* Restore previous configuration. */
843 	writel(ratio, soc_pm.data.pmc + AT91_PMC_RATIO);
844 	writel(pwrtmg, soc_pm.data.ramc[0] + UDDRC_PWRCTL);
845 }
846 
847 struct ramc_info {
848 	void (*idle)(void);
849 	unsigned int memctrl;
850 };
851 
852 static const struct ramc_info ramc_infos[] __initconst = {
853 	{ .idle = at91rm9200_standby, .memctrl = AT91_MEMCTRL_MC},
854 	{ .idle = at91sam9_sdram_standby, .memctrl = AT91_MEMCTRL_SDRAMC},
855 	{ .idle = at91_ddr_standby, .memctrl = AT91_MEMCTRL_DDRSDR},
856 	{ .idle = sama5d3_ddr_standby, .memctrl = AT91_MEMCTRL_DDRSDR},
857 	{ .idle = sama7g5_standby, },
858 };
859 
860 static const struct of_device_id ramc_ids[] __initconst = {
861 	{ .compatible = "atmel,at91rm9200-sdramc", .data = &ramc_infos[0] },
862 	{ .compatible = "atmel,at91sam9260-sdramc", .data = &ramc_infos[1] },
863 	{ .compatible = "atmel,at91sam9g45-ddramc", .data = &ramc_infos[2] },
864 	{ .compatible = "atmel,sama5d3-ddramc", .data = &ramc_infos[3] },
865 	{ .compatible = "microchip,sama7g5-uddrc", .data = &ramc_infos[4], },
866 	{ /*sentinel*/ }
867 };
868 
869 static const struct of_device_id ramc_phy_ids[] __initconst = {
870 	{ .compatible = "microchip,sama7g5-ddr3phy", },
871 	{ /* Sentinel. */ },
872 };
873 
874 static __init int at91_dt_ramc(bool phy_mandatory)
875 {
876 	struct device_node *np;
877 	const struct of_device_id *of_id;
878 	int idx = 0;
879 	void *standby = NULL;
880 	const struct ramc_info *ramc;
881 	int ret;
882 
883 	for_each_matching_node_and_match(np, ramc_ids, &of_id) {
884 		soc_pm.data.ramc[idx] = of_iomap(np, 0);
885 		if (!soc_pm.data.ramc[idx]) {
886 			pr_err("unable to map ramc[%d] cpu registers\n", idx);
887 			ret = -ENOMEM;
888 			of_node_put(np);
889 			goto unmap_ramc;
890 		}
891 
892 		ramc = of_id->data;
893 		if (ramc) {
894 			if (!standby)
895 				standby = ramc->idle;
896 			soc_pm.data.memctrl = ramc->memctrl;
897 		}
898 
899 		idx++;
900 	}
901 
902 	if (!idx) {
903 		pr_err("unable to find compatible ram controller node in dtb\n");
904 		ret = -ENODEV;
905 		goto unmap_ramc;
906 	}
907 
908 	/* Lookup for DDR PHY node, if any. */
909 	for_each_matching_node_and_match(np, ramc_phy_ids, &of_id) {
910 		soc_pm.data.ramc_phy = of_iomap(np, 0);
911 		if (!soc_pm.data.ramc_phy) {
912 			pr_err("unable to map ramc phy cpu registers\n");
913 			ret = -ENOMEM;
914 			of_node_put(np);
915 			goto unmap_ramc;
916 		}
917 	}
918 
919 	if (phy_mandatory && !soc_pm.data.ramc_phy) {
920 		pr_err("DDR PHY is mandatory!\n");
921 		ret = -ENODEV;
922 		goto unmap_ramc;
923 	}
924 
925 	if (!standby) {
926 		pr_warn("ramc no standby function available\n");
927 		return 0;
928 	}
929 
930 	at91_cpuidle_device.dev.platform_data = standby;
931 
932 	return 0;
933 
934 unmap_ramc:
935 	while (idx)
936 		iounmap(soc_pm.data.ramc[--idx]);
937 
938 	return ret;
939 }
940 
941 static void at91rm9200_idle(void)
942 {
943 	/*
944 	 * Disable the processor clock.  The processor will be automatically
945 	 * re-enabled by an interrupt or by a reset.
946 	 */
947 	writel(AT91_PMC_PCK, soc_pm.data.pmc + AT91_PMC_SCDR);
948 }
949 
950 static void at91sam9_idle(void)
951 {
952 	writel(AT91_PMC_PCK, soc_pm.data.pmc + AT91_PMC_SCDR);
953 	cpu_do_idle();
954 }
955 
956 static void __init at91_pm_sram_init(void)
957 {
958 	struct gen_pool *sram_pool;
959 	phys_addr_t sram_pbase;
960 	unsigned long sram_base;
961 	struct device_node *node;
962 	struct platform_device *pdev = NULL;
963 
964 	for_each_compatible_node(node, NULL, "mmio-sram") {
965 		pdev = of_find_device_by_node(node);
966 		if (pdev) {
967 			of_node_put(node);
968 			break;
969 		}
970 	}
971 
972 	if (!pdev) {
973 		pr_warn("%s: failed to find sram device!\n", __func__);
974 		return;
975 	}
976 
977 	sram_pool = gen_pool_get(&pdev->dev, NULL);
978 	if (!sram_pool) {
979 		pr_warn("%s: sram pool unavailable!\n", __func__);
980 		goto out_put_device;
981 	}
982 
983 	sram_base = gen_pool_alloc(sram_pool, at91_pm_suspend_in_sram_sz);
984 	if (!sram_base) {
985 		pr_warn("%s: unable to alloc sram!\n", __func__);
986 		goto out_put_device;
987 	}
988 
989 	sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base);
990 	at91_suspend_sram_fn = __arm_ioremap_exec(sram_pbase,
991 					at91_pm_suspend_in_sram_sz, false);
992 	if (!at91_suspend_sram_fn) {
993 		pr_warn("SRAM: Could not map\n");
994 		goto out_put_device;
995 	}
996 
997 	/* Copy the pm suspend handler to SRAM */
998 	at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn,
999 			&at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz);
1000 	return;
1001 
1002 out_put_device:
1003 	put_device(&pdev->dev);
1004 	return;
1005 }
1006 
1007 static bool __init at91_is_pm_mode_active(int pm_mode)
1008 {
1009 	return (soc_pm.data.standby_mode == pm_mode ||
1010 		soc_pm.data.suspend_mode == pm_mode);
1011 }
1012 
1013 static int __init at91_pm_backup_scan_memcs(unsigned long node,
1014 					    const char *uname, int depth,
1015 					    void *data)
1016 {
1017 	const char *type;
1018 	const __be32 *reg;
1019 	int *located = data;
1020 	int size;
1021 
1022 	/* Memory node already located. */
1023 	if (*located)
1024 		return 0;
1025 
1026 	type = of_get_flat_dt_prop(node, "device_type", NULL);
1027 
1028 	/* We are scanning "memory" nodes only. */
1029 	if (!type || strcmp(type, "memory"))
1030 		return 0;
1031 
1032 	reg = of_get_flat_dt_prop(node, "reg", &size);
1033 	if (reg) {
1034 		soc_pm.memcs = __va((phys_addr_t)be32_to_cpu(*reg));
1035 		*located = 1;
1036 	}
1037 
1038 	return 0;
1039 }
1040 
1041 static int __init at91_pm_backup_init(void)
1042 {
1043 	struct gen_pool *sram_pool;
1044 	struct device_node *np;
1045 	struct platform_device *pdev;
1046 	int ret = -ENODEV, located = 0;
1047 
1048 	if (!IS_ENABLED(CONFIG_SOC_SAMA5D2) &&
1049 	    !IS_ENABLED(CONFIG_SOC_SAMA7G5))
1050 		return -EPERM;
1051 
1052 	if (!at91_is_pm_mode_active(AT91_PM_BACKUP))
1053 		return 0;
1054 
1055 	np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam");
1056 	if (!np)
1057 		return ret;
1058 
1059 	pdev = of_find_device_by_node(np);
1060 	of_node_put(np);
1061 	if (!pdev) {
1062 		pr_warn("%s: failed to find securam device!\n", __func__);
1063 		return ret;
1064 	}
1065 
1066 	sram_pool = gen_pool_get(&pdev->dev, NULL);
1067 	if (!sram_pool) {
1068 		pr_warn("%s: securam pool unavailable!\n", __func__);
1069 		goto securam_fail;
1070 	}
1071 
1072 	soc_pm.bu = (void *)gen_pool_alloc(sram_pool, sizeof(struct at91_pm_bu));
1073 	if (!soc_pm.bu) {
1074 		pr_warn("%s: unable to alloc securam!\n", __func__);
1075 		ret = -ENOMEM;
1076 		goto securam_fail;
1077 	}
1078 
1079 	soc_pm.bu->suspended = 0;
1080 	soc_pm.bu->canary = __pa_symbol(&canary);
1081 	soc_pm.bu->resume = __pa_symbol(cpu_resume);
1082 	if (soc_pm.data.ramc_phy) {
1083 		of_scan_flat_dt(at91_pm_backup_scan_memcs, &located);
1084 		if (!located)
1085 			goto securam_fail;
1086 	}
1087 
1088 	return 0;
1089 
1090 securam_fail:
1091 	put_device(&pdev->dev);
1092 	return ret;
1093 }
1094 
1095 static void __init at91_pm_secure_init(void)
1096 {
1097 	int suspend_mode;
1098 	struct arm_smccc_res res;
1099 
1100 	suspend_mode = soc_pm.data.suspend_mode;
1101 
1102 	res = sam_smccc_call(SAMA5_SMC_SIP_SET_SUSPEND_MODE,
1103 			     suspend_mode, 0);
1104 	if (res.a0 == 0) {
1105 		pr_info("AT91: Secure PM: suspend mode set to %s\n",
1106 			pm_modes[suspend_mode].pattern);
1107 		return;
1108 	}
1109 
1110 	pr_warn("AT91: Secure PM: %s mode not supported !\n",
1111 		pm_modes[suspend_mode].pattern);
1112 
1113 	res = sam_smccc_call(SAMA5_SMC_SIP_GET_SUSPEND_MODE, 0, 0);
1114 	if (res.a0 == 0) {
1115 		pr_warn("AT91: Secure PM: failed to get default mode\n");
1116 		return;
1117 	}
1118 
1119 	pr_info("AT91: Secure PM: using default suspend mode %s\n",
1120 		pm_modes[suspend_mode].pattern);
1121 
1122 	soc_pm.data.suspend_mode = res.a1;
1123 }
1124 static const struct of_device_id atmel_shdwc_ids[] = {
1125 	{ .compatible = "atmel,sama5d2-shdwc" },
1126 	{ .compatible = "microchip,sam9x60-shdwc" },
1127 	{ .compatible = "microchip,sama7g5-shdwc" },
1128 	{ /* sentinel. */ }
1129 };
1130 
1131 static const struct of_device_id gmac_ids[] __initconst = {
1132 	{ .compatible = "atmel,sama5d3-gem" },
1133 	{ .compatible = "atmel,sama5d2-gem" },
1134 	{ .compatible = "atmel,sama5d29-gem" },
1135 	{ .compatible = "microchip,sama7g5-gem" },
1136 	{ },
1137 };
1138 
1139 static const struct of_device_id emac_ids[] __initconst = {
1140 	{ .compatible = "atmel,sama5d3-macb" },
1141 	{ .compatible = "microchip,sama7g5-emac" },
1142 	{ },
1143 };
1144 
1145 /*
1146  * Replaces _mode_to_replace with a supported mode that doesn't depend
1147  * on controller pointed by _map_bitmask
1148  * @_maps: u32 array containing AT91_PM_IOMAP() flags and indexed by AT91
1149  * PM mode
1150  * @_map_bitmask: AT91_PM_IOMAP() bitmask; if _mode_to_replace depends on
1151  * controller represented by _map_bitmask, _mode_to_replace needs to be
1152  * updated
1153  * @_mode_to_replace: standby_mode or suspend_mode that need to be
1154  * updated
1155  * @_mode_to_check: standby_mode or suspend_mode; this is needed here
1156  * to avoid having standby_mode and suspend_mode set with the same AT91
1157  * PM mode
1158  */
1159 #define AT91_PM_REPLACE_MODE(_maps, _map_bitmask, _mode_to_replace,	\
1160 			     _mode_to_check)				\
1161 	do {								\
1162 		if (((_maps)[(_mode_to_replace)]) & (_map_bitmask)) {	\
1163 			int _mode_to_use, _mode_complementary;		\
1164 			/* Use ULP0 if it doesn't need _map_bitmask. */	\
1165 			if (!((_maps)[AT91_PM_ULP0] & (_map_bitmask))) {\
1166 				_mode_to_use = AT91_PM_ULP0;		\
1167 				_mode_complementary = AT91_PM_STANDBY;	\
1168 			} else {					\
1169 				_mode_to_use = AT91_PM_STANDBY;		\
1170 				_mode_complementary = AT91_PM_STANDBY;	\
1171 			}						\
1172 									\
1173 			if ((_mode_to_check) != _mode_to_use)		\
1174 				(_mode_to_replace) = _mode_to_use;	\
1175 			else						\
1176 				(_mode_to_replace) = _mode_complementary;\
1177 		}							\
1178 	} while (0)
1179 
1180 /*
1181  * Replaces standby and suspend modes with default supported modes:
1182  * ULP0 and STANDBY.
1183  * @_maps: u32 array indexed by AT91 PM mode containing AT91_PM_IOMAP()
1184  * flags
1185  * @_map: controller specific name; standby and suspend mode need to be
1186  * replaced in order to not depend on this controller
1187  */
1188 #define AT91_PM_REPLACE_MODES(_maps, _map)				\
1189 	do {								\
1190 		AT91_PM_REPLACE_MODE((_maps), BIT(AT91_PM_IOMAP_##_map),\
1191 				     (soc_pm.data.standby_mode),	\
1192 				     (soc_pm.data.suspend_mode));	\
1193 		AT91_PM_REPLACE_MODE((_maps), BIT(AT91_PM_IOMAP_##_map),\
1194 				     (soc_pm.data.suspend_mode),	\
1195 				     (soc_pm.data.standby_mode));	\
1196 	} while (0)
1197 
1198 static int __init at91_pm_get_eth_clks(struct device_node *np,
1199 				       struct clk_bulk_data *clks)
1200 {
1201 	clks[AT91_PM_ETH_PCLK].clk = of_clk_get_by_name(np, "pclk");
1202 	if (IS_ERR(clks[AT91_PM_ETH_PCLK].clk))
1203 		return PTR_ERR(clks[AT91_PM_ETH_PCLK].clk);
1204 
1205 	clks[AT91_PM_ETH_HCLK].clk = of_clk_get_by_name(np, "hclk");
1206 	if (IS_ERR(clks[AT91_PM_ETH_HCLK].clk))
1207 		return PTR_ERR(clks[AT91_PM_ETH_HCLK].clk);
1208 
1209 	return 0;
1210 }
1211 
1212 static int __init at91_pm_eth_clks_empty(struct clk_bulk_data *clks)
1213 {
1214 	return IS_ERR(clks[AT91_PM_ETH_PCLK].clk) ||
1215 	       IS_ERR(clks[AT91_PM_ETH_HCLK].clk);
1216 }
1217 
1218 static void __init at91_pm_modes_init(const u32 *maps, int len)
1219 {
1220 	struct at91_pm_quirk_eth *gmac = &soc_pm.quirks.eth[AT91_PM_G_ETH];
1221 	struct at91_pm_quirk_eth *emac = &soc_pm.quirks.eth[AT91_PM_E_ETH];
1222 	struct device_node *np;
1223 	int ret;
1224 
1225 	ret = at91_pm_backup_init();
1226 	if (ret) {
1227 		if (soc_pm.data.standby_mode == AT91_PM_BACKUP)
1228 			soc_pm.data.standby_mode = AT91_PM_ULP0;
1229 		if (soc_pm.data.suspend_mode == AT91_PM_BACKUP)
1230 			soc_pm.data.suspend_mode = AT91_PM_ULP0;
1231 	}
1232 
1233 	if (maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SHDWC) ||
1234 	    maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SHDWC)) {
1235 		np = of_find_matching_node(NULL, atmel_shdwc_ids);
1236 		if (!np) {
1237 			pr_warn("%s: failed to find shdwc!\n", __func__);
1238 			AT91_PM_REPLACE_MODES(maps, SHDWC);
1239 		} else {
1240 			soc_pm.data.shdwc = of_iomap(np, 0);
1241 			of_node_put(np);
1242 		}
1243 	}
1244 
1245 	if (maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SFRBU) ||
1246 	    maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SFRBU)) {
1247 		np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-sfrbu");
1248 		if (!np) {
1249 			pr_warn("%s: failed to find sfrbu!\n", __func__);
1250 			AT91_PM_REPLACE_MODES(maps, SFRBU);
1251 		} else {
1252 			soc_pm.data.sfrbu = of_iomap(np, 0);
1253 			of_node_put(np);
1254 		}
1255 	}
1256 
1257 	if ((at91_is_pm_mode_active(AT91_PM_ULP1) ||
1258 	     at91_is_pm_mode_active(AT91_PM_ULP0) ||
1259 	     at91_is_pm_mode_active(AT91_PM_ULP0_FAST)) &&
1260 	    (maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(ETHC) ||
1261 	     maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(ETHC))) {
1262 		np = of_find_matching_node(NULL, gmac_ids);
1263 		if (!np) {
1264 			np = of_find_matching_node(NULL, emac_ids);
1265 			if (np)
1266 				goto get_emac_clks;
1267 			AT91_PM_REPLACE_MODES(maps, ETHC);
1268 			goto unmap_unused_nodes;
1269 		} else {
1270 			gmac->np = np;
1271 			at91_pm_get_eth_clks(np, gmac->clks);
1272 		}
1273 
1274 		np = of_find_matching_node(NULL, emac_ids);
1275 		if (!np) {
1276 			if (at91_pm_eth_clks_empty(gmac->clks))
1277 				AT91_PM_REPLACE_MODES(maps, ETHC);
1278 		} else {
1279 get_emac_clks:
1280 			emac->np = np;
1281 			ret = at91_pm_get_eth_clks(np, emac->clks);
1282 			if (ret && at91_pm_eth_clks_empty(gmac->clks)) {
1283 				of_node_put(gmac->np);
1284 				of_node_put(emac->np);
1285 				gmac->np = NULL;
1286 				emac->np = NULL;
1287 			}
1288 		}
1289 	}
1290 
1291 unmap_unused_nodes:
1292 	/* Unmap all unnecessary. */
1293 	if (soc_pm.data.shdwc &&
1294 	    !(maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SHDWC) ||
1295 	      maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SHDWC))) {
1296 		iounmap(soc_pm.data.shdwc);
1297 		soc_pm.data.shdwc = NULL;
1298 	}
1299 
1300 	if (soc_pm.data.sfrbu &&
1301 	    !(maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SFRBU) ||
1302 	      maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SFRBU))) {
1303 		iounmap(soc_pm.data.sfrbu);
1304 		soc_pm.data.sfrbu = NULL;
1305 	}
1306 
1307 	return;
1308 }
1309 
1310 struct pmc_info {
1311 	unsigned long uhp_udp_mask;
1312 	unsigned long mckr;
1313 	unsigned long version;
1314 };
1315 
1316 static const struct pmc_info pmc_infos[] __initconst = {
1317 	{
1318 		.uhp_udp_mask = AT91RM9200_PMC_UHP | AT91RM9200_PMC_UDP,
1319 		.mckr = 0x30,
1320 		.version = AT91_PMC_V1,
1321 	},
1322 
1323 	{
1324 		.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP,
1325 		.mckr = 0x30,
1326 		.version = AT91_PMC_V1,
1327 	},
1328 	{
1329 		.uhp_udp_mask = AT91SAM926x_PMC_UHP,
1330 		.mckr = 0x30,
1331 		.version = AT91_PMC_V1,
1332 	},
1333 	{	.uhp_udp_mask = 0,
1334 		.mckr = 0x30,
1335 		.version = AT91_PMC_V1,
1336 	},
1337 	{
1338 		.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP,
1339 		.mckr = 0x28,
1340 		.version = AT91_PMC_V2,
1341 	},
1342 	{
1343 		.mckr = 0x28,
1344 		.version = AT91_PMC_V2,
1345 	},
1346 
1347 };
1348 
1349 static const struct of_device_id atmel_pmc_ids[] __initconst = {
1350 	{ .compatible = "atmel,at91rm9200-pmc", .data = &pmc_infos[0] },
1351 	{ .compatible = "atmel,at91sam9260-pmc", .data = &pmc_infos[1] },
1352 	{ .compatible = "atmel,at91sam9261-pmc", .data = &pmc_infos[1] },
1353 	{ .compatible = "atmel,at91sam9263-pmc", .data = &pmc_infos[1] },
1354 	{ .compatible = "atmel,at91sam9g45-pmc", .data = &pmc_infos[2] },
1355 	{ .compatible = "atmel,at91sam9n12-pmc", .data = &pmc_infos[1] },
1356 	{ .compatible = "atmel,at91sam9rl-pmc", .data = &pmc_infos[3] },
1357 	{ .compatible = "atmel,at91sam9x5-pmc", .data = &pmc_infos[1] },
1358 	{ .compatible = "atmel,sama5d3-pmc", .data = &pmc_infos[1] },
1359 	{ .compatible = "atmel,sama5d4-pmc", .data = &pmc_infos[1] },
1360 	{ .compatible = "atmel,sama5d2-pmc", .data = &pmc_infos[1] },
1361 	{ .compatible = "microchip,sam9x60-pmc", .data = &pmc_infos[4] },
1362 	{ .compatible = "microchip,sama7g5-pmc", .data = &pmc_infos[5] },
1363 	{ /* sentinel */ },
1364 };
1365 
1366 static void __init at91_pm_modes_validate(const int *modes, int len)
1367 {
1368 	u8 i, standby = 0, suspend = 0;
1369 	int mode;
1370 
1371 	for (i = 0; i < len; i++) {
1372 		if (standby && suspend)
1373 			break;
1374 
1375 		if (modes[i] == soc_pm.data.standby_mode && !standby) {
1376 			standby = 1;
1377 			continue;
1378 		}
1379 
1380 		if (modes[i] == soc_pm.data.suspend_mode && !suspend) {
1381 			suspend = 1;
1382 			continue;
1383 		}
1384 	}
1385 
1386 	if (!standby) {
1387 		if (soc_pm.data.suspend_mode == AT91_PM_STANDBY)
1388 			mode = AT91_PM_ULP0;
1389 		else
1390 			mode = AT91_PM_STANDBY;
1391 
1392 		pr_warn("AT91: PM: %s mode not supported! Using %s.\n",
1393 			pm_modes[soc_pm.data.standby_mode].pattern,
1394 			pm_modes[mode].pattern);
1395 		soc_pm.data.standby_mode = mode;
1396 	}
1397 
1398 	if (!suspend) {
1399 		if (soc_pm.data.standby_mode == AT91_PM_ULP0)
1400 			mode = AT91_PM_STANDBY;
1401 		else
1402 			mode = AT91_PM_ULP0;
1403 
1404 		pr_warn("AT91: PM: %s mode not supported! Using %s.\n",
1405 			pm_modes[soc_pm.data.suspend_mode].pattern,
1406 			pm_modes[mode].pattern);
1407 		soc_pm.data.suspend_mode = mode;
1408 	}
1409 }
1410 
1411 static void __init at91_pm_init(void (*pm_idle)(void))
1412 {
1413 	struct device_node *pmc_np;
1414 	const struct of_device_id *of_id;
1415 	const struct pmc_info *pmc;
1416 
1417 	if (at91_cpuidle_device.dev.platform_data)
1418 		platform_device_register(&at91_cpuidle_device);
1419 
1420 	pmc_np = of_find_matching_node_and_match(NULL, atmel_pmc_ids, &of_id);
1421 	soc_pm.data.pmc = of_iomap(pmc_np, 0);
1422 	of_node_put(pmc_np);
1423 	if (!soc_pm.data.pmc) {
1424 		pr_err("AT91: PM not supported, PMC not found\n");
1425 		return;
1426 	}
1427 
1428 	pmc = of_id->data;
1429 	soc_pm.data.uhp_udp_mask = pmc->uhp_udp_mask;
1430 	soc_pm.data.pmc_mckr_offset = pmc->mckr;
1431 	soc_pm.data.pmc_version = pmc->version;
1432 
1433 	if (pm_idle)
1434 		arm_pm_idle = pm_idle;
1435 
1436 	at91_pm_sram_init();
1437 
1438 	if (at91_suspend_sram_fn) {
1439 		suspend_set_ops(&at91_pm_ops);
1440 		pr_info("AT91: PM: standby: %s, suspend: %s\n",
1441 			pm_modes[soc_pm.data.standby_mode].pattern,
1442 			pm_modes[soc_pm.data.suspend_mode].pattern);
1443 	} else {
1444 		pr_info("AT91: PM not supported, due to no SRAM allocated\n");
1445 	}
1446 }
1447 
1448 void __init at91rm9200_pm_init(void)
1449 {
1450 	int ret;
1451 
1452 	if (!IS_ENABLED(CONFIG_SOC_AT91RM9200))
1453 		return;
1454 
1455 	/*
1456 	 * Force STANDBY and ULP0 mode to avoid calling
1457 	 * at91_pm_modes_validate() which may increase booting time.
1458 	 * Platform supports anyway only STANDBY and ULP0 modes.
1459 	 */
1460 	soc_pm.data.standby_mode = AT91_PM_STANDBY;
1461 	soc_pm.data.suspend_mode = AT91_PM_ULP0;
1462 
1463 	ret = at91_dt_ramc(false);
1464 	if (ret)
1465 		return;
1466 
1467 	/*
1468 	 * AT91RM9200 SDRAM low-power mode cannot be used with self-refresh.
1469 	 */
1470 	at91_ramc_write(0, AT91_MC_SDRAMC_LPR, 0);
1471 
1472 	at91_pm_init(at91rm9200_idle);
1473 }
1474 
1475 void __init sam9x60_pm_init(void)
1476 {
1477 	static const int modes[] __initconst = {
1478 		AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST, AT91_PM_ULP1,
1479 	};
1480 	static const int iomaps[] __initconst = {
1481 		[AT91_PM_ULP1]		= AT91_PM_IOMAP(SHDWC),
1482 	};
1483 	int ret;
1484 
1485 	if (!IS_ENABLED(CONFIG_SOC_SAM9X60))
1486 		return;
1487 
1488 	at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1489 	at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1490 	ret = at91_dt_ramc(false);
1491 	if (ret)
1492 		return;
1493 
1494 	at91_pm_init(NULL);
1495 
1496 	soc_pm.ws_ids = sam9x60_ws_ids;
1497 	soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws;
1498 }
1499 
1500 void __init at91sam9_pm_init(void)
1501 {
1502 	int ret;
1503 
1504 	if (!IS_ENABLED(CONFIG_SOC_AT91SAM9))
1505 		return;
1506 
1507 	/*
1508 	 * Force STANDBY and ULP0 mode to avoid calling
1509 	 * at91_pm_modes_validate() which may increase booting time.
1510 	 * Platform supports anyway only STANDBY and ULP0 modes.
1511 	 */
1512 	soc_pm.data.standby_mode = AT91_PM_STANDBY;
1513 	soc_pm.data.suspend_mode = AT91_PM_ULP0;
1514 
1515 	ret = at91_dt_ramc(false);
1516 	if (ret)
1517 		return;
1518 
1519 	at91_pm_init(at91sam9_idle);
1520 }
1521 
1522 void __init sama5_pm_init(void)
1523 {
1524 	static const int modes[] __initconst = {
1525 		AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST,
1526 	};
1527 	static const u32 iomaps[] __initconst = {
1528 		[AT91_PM_ULP0]		= AT91_PM_IOMAP(ETHC),
1529 		[AT91_PM_ULP0_FAST]	= AT91_PM_IOMAP(ETHC),
1530 	};
1531 	int ret;
1532 
1533 	if (!IS_ENABLED(CONFIG_SOC_SAMA5))
1534 		return;
1535 
1536 	at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1537 	at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1538 	ret = at91_dt_ramc(false);
1539 	if (ret)
1540 		return;
1541 
1542 	at91_pm_init(NULL);
1543 
1544 	/* Quirks applies to ULP0, ULP0 fast and ULP1 modes. */
1545 	soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP0) |
1546 						 BIT(AT91_PM_ULP0_FAST) |
1547 						 BIT(AT91_PM_ULP1);
1548 	/* Do not suspend in ULP0, ULP0 fast if GETH is the only wakeup source. */
1549 	soc_pm.quirks.eth[AT91_PM_G_ETH].dns_modes = BIT(AT91_PM_ULP0) |
1550 						     BIT(AT91_PM_ULP0_FAST);
1551 }
1552 
1553 void __init sama5d2_pm_init(void)
1554 {
1555 	static const int modes[] __initconst = {
1556 		AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST, AT91_PM_ULP1,
1557 		AT91_PM_BACKUP,
1558 	};
1559 	static const u32 iomaps[] __initconst = {
1560 		[AT91_PM_ULP0]		= AT91_PM_IOMAP(ETHC),
1561 		[AT91_PM_ULP0_FAST]	= AT91_PM_IOMAP(ETHC),
1562 		[AT91_PM_ULP1]		= AT91_PM_IOMAP(SHDWC) |
1563 					  AT91_PM_IOMAP(ETHC),
1564 		[AT91_PM_BACKUP]	= AT91_PM_IOMAP(SHDWC) |
1565 					  AT91_PM_IOMAP(SFRBU),
1566 	};
1567 	int ret;
1568 
1569 	if (!IS_ENABLED(CONFIG_SOC_SAMA5D2))
1570 		return;
1571 
1572 	if (IS_ENABLED(CONFIG_ATMEL_SECURE_PM)) {
1573 		pr_warn("AT91: Secure PM: ignoring standby mode\n");
1574 		at91_pm_secure_init();
1575 		return;
1576 	}
1577 
1578 	at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1579 	at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1580 	ret = at91_dt_ramc(false);
1581 	if (ret)
1582 		return;
1583 
1584 	at91_pm_init(NULL);
1585 
1586 	soc_pm.ws_ids = sama5d2_ws_ids;
1587 	soc_pm.config_shdwc_ws = at91_sama5d2_config_shdwc_ws;
1588 	soc_pm.config_pmc_ws = at91_sama5d2_config_pmc_ws;
1589 
1590 	soc_pm.sfrbu_regs.pswbu.key = (0x4BD20C << 8);
1591 	soc_pm.sfrbu_regs.pswbu.ctrl = BIT(0);
1592 	soc_pm.sfrbu_regs.pswbu.softsw = BIT(1);
1593 	soc_pm.sfrbu_regs.pswbu.state = BIT(3);
1594 
1595 	/* Quirk applies to ULP0, ULP0 fast and ULP1 modes. */
1596 	soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP0) |
1597 						 BIT(AT91_PM_ULP0_FAST) |
1598 						 BIT(AT91_PM_ULP1);
1599 	/*
1600 	 * Do not suspend in ULP0, ULP0 fast if GETH is the only wakeup
1601 	 * source.
1602 	 */
1603 	soc_pm.quirks.eth[AT91_PM_G_ETH].dns_modes = BIT(AT91_PM_ULP0) |
1604 						     BIT(AT91_PM_ULP0_FAST);
1605 }
1606 
1607 void __init sama7_pm_init(void)
1608 {
1609 	static const int modes[] __initconst = {
1610 		AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP1, AT91_PM_BACKUP,
1611 	};
1612 	static const u32 iomaps[] __initconst = {
1613 		[AT91_PM_ULP0]		= AT91_PM_IOMAP(SFRBU),
1614 		[AT91_PM_ULP1]		= AT91_PM_IOMAP(SFRBU) |
1615 					  AT91_PM_IOMAP(SHDWC) |
1616 					  AT91_PM_IOMAP(ETHC),
1617 		[AT91_PM_BACKUP]	= AT91_PM_IOMAP(SFRBU) |
1618 					  AT91_PM_IOMAP(SHDWC),
1619 	};
1620 	int ret;
1621 
1622 	if (!IS_ENABLED(CONFIG_SOC_SAMA7))
1623 		return;
1624 
1625 	at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1626 
1627 	ret = at91_dt_ramc(true);
1628 	if (ret)
1629 		return;
1630 
1631 	at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1632 	at91_pm_init(NULL);
1633 
1634 	soc_pm.ws_ids = sama7g5_ws_ids;
1635 	soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws;
1636 
1637 	soc_pm.sfrbu_regs.pswbu.key = (0x4BD20C << 8);
1638 	soc_pm.sfrbu_regs.pswbu.ctrl = BIT(0);
1639 	soc_pm.sfrbu_regs.pswbu.softsw = BIT(1);
1640 	soc_pm.sfrbu_regs.pswbu.state = BIT(2);
1641 
1642 	/* Quirks applies to ULP1 for both Ethernet interfaces. */
1643 	soc_pm.quirks.eth[AT91_PM_E_ETH].modes = BIT(AT91_PM_ULP1);
1644 	soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP1);
1645 }
1646 
1647 static int __init at91_pm_modes_select(char *str)
1648 {
1649 	char *s;
1650 	substring_t args[MAX_OPT_ARGS];
1651 	int standby, suspend;
1652 
1653 	if (!str)
1654 		return 0;
1655 
1656 	s = strsep(&str, ",");
1657 	standby = match_token(s, pm_modes, args);
1658 	if (standby < 0)
1659 		return 0;
1660 
1661 	suspend = match_token(str, pm_modes, args);
1662 	if (suspend < 0)
1663 		return 0;
1664 
1665 	soc_pm.data.standby_mode = standby;
1666 	soc_pm.data.suspend_mode = suspend;
1667 
1668 	return 0;
1669 }
1670 early_param("atmel.pm_modes", at91_pm_modes_select);
1671