xref: /linux/drivers/pmdomain/mediatek/mtk-pm-domains.c (revision 68a052239fc4b351e961f698b824f7654a346091)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2020 Collabora Ltd.
4  */
5 #include <linux/clk.h>
6 #include <linux/clk-provider.h>
7 #include <linux/init.h>
8 #include <linux/io.h>
9 #include <linux/iopoll.h>
10 #include <linux/mfd/syscon.h>
11 #include <linux/of.h>
12 #include <linux/of_clk.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_domain.h>
15 #include <linux/regmap.h>
16 #include <linux/regulator/consumer.h>
17 #include <linux/soc/mediatek/infracfg.h>
18 
19 #include "mt6735-pm-domains.h"
20 #include "mt6795-pm-domains.h"
21 #include "mt6893-pm-domains.h"
22 #include "mt8167-pm-domains.h"
23 #include "mt8173-pm-domains.h"
24 #include "mt8183-pm-domains.h"
25 #include "mt8186-pm-domains.h"
26 #include "mt8188-pm-domains.h"
27 #include "mt8192-pm-domains.h"
28 #include "mt8195-pm-domains.h"
29 #include "mt8365-pm-domains.h"
30 
31 #define MTK_POLL_DELAY_US		10
32 #define MTK_POLL_TIMEOUT		USEC_PER_SEC
33 
34 #define PWR_RST_B_BIT			BIT(0)
35 #define PWR_ISO_BIT			BIT(1)
36 #define PWR_ON_BIT			BIT(2)
37 #define PWR_ON_2ND_BIT			BIT(3)
38 #define PWR_CLK_DIS_BIT			BIT(4)
39 #define PWR_SRAM_CLKISO_BIT		BIT(5)
40 #define PWR_SRAM_ISOINT_B_BIT		BIT(6)
41 
42 #define PWR_RTFF_SAVE			BIT(24)
43 #define PWR_RTFF_NRESTORE		BIT(25)
44 #define PWR_RTFF_CLK_DIS		BIT(26)
45 #define PWR_RTFF_SAVE_FLAG		BIT(27)
46 #define PWR_RTFF_UFS_CLK_DIS		BIT(28)
47 
48 struct scpsys_domain {
49 	struct generic_pm_domain genpd;
50 	const struct scpsys_domain_data *data;
51 	struct scpsys *scpsys;
52 	int num_clks;
53 	struct clk_bulk_data *clks;
54 	int num_subsys_clks;
55 	struct clk_bulk_data *subsys_clks;
56 	struct regulator *supply;
57 };
58 
59 struct scpsys {
60 	struct device *dev;
61 	struct regmap *base;
62 	const struct scpsys_soc_data *soc_data;
63 	u8 bus_prot_index[BUS_PROT_BLOCK_COUNT];
64 	struct regmap **bus_prot;
65 	struct genpd_onecell_data pd_data;
66 	struct generic_pm_domain *domains[];
67 };
68 
69 #define to_scpsys_domain(gpd) container_of(gpd, struct scpsys_domain, genpd)
70 
71 static bool scpsys_domain_is_on(struct scpsys_domain *pd)
72 {
73 	struct scpsys *scpsys = pd->scpsys;
74 	u32 status, status2;
75 
76 	regmap_read(scpsys->base, pd->data->pwr_sta_offs, &status);
77 	status &= pd->data->sta_mask;
78 
79 	regmap_read(scpsys->base, pd->data->pwr_sta2nd_offs, &status2);
80 	status2 &= pd->data->sta_mask;
81 
82 	/* A domain is on when both status bits are set. */
83 	return status && status2;
84 }
85 
86 static int scpsys_sram_enable(struct scpsys_domain *pd)
87 {
88 	u32 expected_ack, pdn_ack = pd->data->sram_pdn_ack_bits;
89 	struct scpsys *scpsys = pd->scpsys;
90 	unsigned int tmp;
91 	int ret;
92 
93 	if (MTK_SCPD_CAPS(pd, MTK_SCPD_SRAM_PDN_INVERTED)) {
94 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, pd->data->sram_pdn_bits);
95 		expected_ack = pdn_ack;
96 	} else {
97 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, pd->data->sram_pdn_bits);
98 		expected_ack = 0;
99 	}
100 
101 	/* Either wait until SRAM_PDN_ACK all 1 or 0 */
102 	ret = regmap_read_poll_timeout(scpsys->base, pd->data->ctl_offs, tmp,
103 				       (tmp & pdn_ack) == expected_ack,
104 				       MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
105 	if (ret < 0)
106 		return ret;
107 
108 	if (MTK_SCPD_CAPS(pd, MTK_SCPD_SRAM_ISO)) {
109 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_SRAM_ISOINT_B_BIT);
110 		udelay(1);
111 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_SRAM_CLKISO_BIT);
112 	}
113 
114 	return 0;
115 }
116 
117 static int scpsys_sram_disable(struct scpsys_domain *pd)
118 {
119 	u32 expected_ack, pdn_ack = pd->data->sram_pdn_ack_bits;
120 	struct scpsys *scpsys = pd->scpsys;
121 	unsigned int tmp;
122 
123 	if (MTK_SCPD_CAPS(pd, MTK_SCPD_SRAM_ISO)) {
124 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_SRAM_CLKISO_BIT);
125 		udelay(1);
126 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_SRAM_ISOINT_B_BIT);
127 	}
128 
129 	if (MTK_SCPD_CAPS(pd, MTK_SCPD_SRAM_PDN_INVERTED)) {
130 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, pd->data->sram_pdn_bits);
131 		expected_ack = 0;
132 	} else {
133 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, pd->data->sram_pdn_bits);
134 		expected_ack = pdn_ack;
135 	}
136 
137 	/* Either wait until SRAM_PDN_ACK all 1 or 0 */
138 	return regmap_read_poll_timeout(scpsys->base, pd->data->ctl_offs, tmp,
139 					(tmp & pdn_ack) == expected_ack,
140 					MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
141 }
142 
143 static struct regmap *scpsys_bus_protect_get_regmap(struct scpsys_domain *pd,
144 						    const struct scpsys_bus_prot_data *bpd)
145 {
146 	struct scpsys *scpsys = pd->scpsys;
147 	unsigned short block_idx = scpsys->bus_prot_index[bpd->bus_prot_block];
148 
149 	return scpsys->bus_prot[block_idx];
150 }
151 
152 static struct regmap *scpsys_bus_protect_get_sta_regmap(struct scpsys_domain *pd,
153 							const struct scpsys_bus_prot_data *bpd)
154 {
155 	struct scpsys *scpsys = pd->scpsys;
156 	int block_idx = scpsys->bus_prot_index[bpd->bus_prot_sta_block];
157 
158 	return scpsys->bus_prot[block_idx];
159 }
160 
161 static int scpsys_bus_protect_clear(struct scpsys_domain *pd,
162 				    const struct scpsys_bus_prot_data *bpd)
163 {
164 	struct regmap *sta_regmap = scpsys_bus_protect_get_sta_regmap(pd, bpd);
165 	struct regmap *regmap = scpsys_bus_protect_get_regmap(pd, bpd);
166 	u32 sta_mask = bpd->bus_prot_sta_mask;
167 	u32 expected_ack;
168 	u32 val;
169 
170 	expected_ack = (bpd->bus_prot_sta_block == BUS_PROT_BLOCK_INFRA_NAO ? sta_mask : 0);
171 
172 	if (bpd->flags & BUS_PROT_REG_UPDATE)
173 		regmap_clear_bits(regmap, bpd->bus_prot_clr, bpd->bus_prot_set_clr_mask);
174 	else
175 		regmap_write(regmap, bpd->bus_prot_clr, bpd->bus_prot_set_clr_mask);
176 
177 	if (bpd->flags & BUS_PROT_IGNORE_CLR_ACK)
178 		return 0;
179 
180 	return regmap_read_poll_timeout(sta_regmap, bpd->bus_prot_sta,
181 					val, (val & sta_mask) == expected_ack,
182 					MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
183 }
184 
185 static int scpsys_bus_protect_set(struct scpsys_domain *pd,
186 				  const struct scpsys_bus_prot_data *bpd)
187 {
188 	struct regmap *sta_regmap = scpsys_bus_protect_get_sta_regmap(pd, bpd);
189 	struct regmap *regmap = scpsys_bus_protect_get_regmap(pd, bpd);
190 	u32 sta_mask = bpd->bus_prot_sta_mask;
191 	u32 val;
192 
193 	if (bpd->flags & BUS_PROT_REG_UPDATE)
194 		regmap_set_bits(regmap, bpd->bus_prot_set, bpd->bus_prot_set_clr_mask);
195 	else
196 		regmap_write(regmap, bpd->bus_prot_set, bpd->bus_prot_set_clr_mask);
197 
198 	return regmap_read_poll_timeout(sta_regmap, bpd->bus_prot_sta,
199 					val, (val & sta_mask) == sta_mask,
200 					MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
201 }
202 
203 static int scpsys_bus_protect_enable(struct scpsys_domain *pd)
204 {
205 	for (int i = 0; i < SPM_MAX_BUS_PROT_DATA; i++) {
206 		const struct scpsys_bus_prot_data *bpd = &pd->data->bp_cfg[i];
207 		int ret;
208 
209 		if (!bpd->bus_prot_set_clr_mask)
210 			break;
211 
212 		if (bpd->flags & BUS_PROT_INVERTED)
213 			ret = scpsys_bus_protect_clear(pd, bpd);
214 		else
215 			ret = scpsys_bus_protect_set(pd, bpd);
216 		if (ret)
217 			return ret;
218 	}
219 
220 	return 0;
221 }
222 
223 static int scpsys_bus_protect_disable(struct scpsys_domain *pd)
224 {
225 	for (int i = SPM_MAX_BUS_PROT_DATA - 1; i >= 0; i--) {
226 		const struct scpsys_bus_prot_data *bpd = &pd->data->bp_cfg[i];
227 		int ret;
228 
229 		if (!bpd->bus_prot_set_clr_mask)
230 			continue;
231 
232 		if (bpd->flags & BUS_PROT_INVERTED)
233 			ret = scpsys_bus_protect_set(pd, bpd);
234 		else
235 			ret = scpsys_bus_protect_clear(pd, bpd);
236 		if (ret)
237 			return ret;
238 	}
239 
240 	return 0;
241 }
242 
243 static int scpsys_regulator_enable(struct regulator *supply)
244 {
245 	return supply ? regulator_enable(supply) : 0;
246 }
247 
248 static int scpsys_regulator_disable(struct regulator *supply)
249 {
250 	return supply ? regulator_disable(supply) : 0;
251 }
252 
253 static int scpsys_ctl_pwrseq_on(struct scpsys_domain *pd)
254 {
255 	struct scpsys *scpsys = pd->scpsys;
256 	bool do_rtff_nrestore, tmp;
257 	int ret;
258 
259 	/* subsys power on */
260 	regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT);
261 	regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_2ND_BIT);
262 
263 	/* wait until PWR_ACK = 1 */
264 	ret = readx_poll_timeout(scpsys_domain_is_on, pd, tmp, tmp, MTK_POLL_DELAY_US,
265 				 MTK_POLL_TIMEOUT);
266 	if (ret < 0)
267 		return ret;
268 
269 	if (pd->data->rtff_type == SCPSYS_RTFF_TYPE_PCIE_PHY)
270 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_CLK_DIS);
271 
272 	regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_CLK_DIS_BIT);
273 	regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ISO_BIT);
274 
275 	/* Wait for RTFF HW to sync buck isolation state if this is PCIe PHY RTFF */
276 	if (pd->data->rtff_type == SCPSYS_RTFF_TYPE_PCIE_PHY)
277 		udelay(5);
278 
279 	regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT);
280 
281 	/*
282 	 * RTFF HW state may be modified by secure world or remote processors.
283 	 *
284 	 * With the only exception of STOR_UFS, which always needs save/restore,
285 	 * check if this power domain's RTFF is already on before trying to do
286 	 * the NRESTORE procedure, otherwise the system will lock up.
287 	 */
288 	switch (pd->data->rtff_type) {
289 	case SCPSYS_RTFF_TYPE_GENERIC:
290 	case SCPSYS_RTFF_TYPE_PCIE_PHY:
291 	{
292 		u32 ctl_status;
293 
294 		regmap_read(scpsys->base, pd->data->ctl_offs, &ctl_status);
295 		do_rtff_nrestore = ctl_status & PWR_RTFF_SAVE_FLAG;
296 		break;
297 	}
298 	case SCPSYS_RTFF_TYPE_STOR_UFS:
299 		/* STOR_UFS always needs NRESTORE */
300 		do_rtff_nrestore = true;
301 		break;
302 	default:
303 		do_rtff_nrestore = false;
304 		break;
305 	}
306 
307 	/* Return early if RTFF NRESTORE shall not be done */
308 	if (!do_rtff_nrestore)
309 		return 0;
310 
311 	switch (pd->data->rtff_type) {
312 	case SCPSYS_RTFF_TYPE_GENERIC:
313 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_SAVE_FLAG);
314 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_CLK_DIS);
315 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_NRESTORE);
316 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_NRESTORE);
317 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_CLK_DIS);
318 		break;
319 	case SCPSYS_RTFF_TYPE_PCIE_PHY:
320 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_SAVE_FLAG);
321 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_NRESTORE);
322 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_NRESTORE);
323 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_CLK_DIS);
324 		break;
325 	case SCPSYS_RTFF_TYPE_STOR_UFS:
326 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_UFS_CLK_DIS);
327 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_NRESTORE);
328 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_NRESTORE);
329 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_UFS_CLK_DIS);
330 		break;
331 	default:
332 		break;
333 	}
334 
335 	return 0;
336 }
337 
338 static void scpsys_ctl_pwrseq_off(struct scpsys_domain *pd)
339 {
340 	struct scpsys *scpsys = pd->scpsys;
341 
342 	switch (pd->data->rtff_type) {
343 	case SCPSYS_RTFF_TYPE_GENERIC:
344 	case SCPSYS_RTFF_TYPE_PCIE_PHY:
345 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_CLK_DIS);
346 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_SAVE);
347 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_SAVE);
348 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_CLK_DIS);
349 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_SAVE_FLAG);
350 		break;
351 	case SCPSYS_RTFF_TYPE_STOR_UFS:
352 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_UFS_CLK_DIS);
353 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_SAVE);
354 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_SAVE);
355 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_UFS_CLK_DIS);
356 		break;
357 	default:
358 		break;
359 	}
360 
361 	/* subsys power off */
362 	regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ISO_BIT);
363 
364 	/* Wait for RTFF HW to sync buck isolation state if this is PCIe PHY RTFF */
365 	if (pd->data->rtff_type == SCPSYS_RTFF_TYPE_PCIE_PHY)
366 		udelay(1);
367 
368 	regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_CLK_DIS_BIT);
369 	regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT);
370 	regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_2ND_BIT);
371 	regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT);
372 }
373 
374 static int scpsys_modem_pwrseq_on(struct scpsys_domain *pd)
375 {
376 	struct scpsys *scpsys = pd->scpsys;
377 	bool tmp;
378 	int ret;
379 
380 	if (!MTK_SCPD_CAPS(pd, MTK_SCPD_SKIP_RESET_B))
381 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT);
382 
383 	regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT);
384 
385 	/* wait until PWR_ACK = 1 */
386 	ret = readx_poll_timeout(scpsys_domain_is_on, pd, tmp, tmp, MTK_POLL_DELAY_US,
387 				 MTK_POLL_TIMEOUT);
388 	if (ret < 0)
389 		return ret;
390 
391 	return 0;
392 }
393 
394 static void scpsys_modem_pwrseq_off(struct scpsys_domain *pd)
395 {
396 	struct scpsys *scpsys = pd->scpsys;
397 
398 	regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT);
399 
400 	if (!MTK_SCPD_CAPS(pd, MTK_SCPD_SKIP_RESET_B))
401 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT);
402 }
403 
404 static int scpsys_power_on(struct generic_pm_domain *genpd)
405 {
406 	struct scpsys_domain *pd = container_of(genpd, struct scpsys_domain, genpd);
407 	struct scpsys *scpsys = pd->scpsys;
408 	int ret;
409 
410 	ret = scpsys_regulator_enable(pd->supply);
411 	if (ret)
412 		return ret;
413 
414 	ret = clk_bulk_prepare_enable(pd->num_clks, pd->clks);
415 	if (ret)
416 		goto err_reg;
417 
418 	if (pd->data->ext_buck_iso_offs && MTK_SCPD_CAPS(pd, MTK_SCPD_EXT_BUCK_ISO))
419 		regmap_clear_bits(scpsys->base, pd->data->ext_buck_iso_offs,
420 				  pd->data->ext_buck_iso_mask);
421 
422 	if (MTK_SCPD_CAPS(pd, MTK_SCPD_MODEM_PWRSEQ))
423 		ret = scpsys_modem_pwrseq_on(pd);
424 	else
425 		ret = scpsys_ctl_pwrseq_on(pd);
426 
427 	if (ret)
428 		goto err_pwr_ack;
429 
430 	/*
431 	 * In few Mediatek platforms(e.g. MT6779), the bus protect policy is
432 	 * stricter, which leads to bus protect release must be prior to bus
433 	 * access.
434 	 */
435 	if (!MTK_SCPD_CAPS(pd, MTK_SCPD_STRICT_BUS_PROTECTION)) {
436 		ret = clk_bulk_prepare_enable(pd->num_subsys_clks,
437 					      pd->subsys_clks);
438 		if (ret)
439 			goto err_pwr_ack;
440 	}
441 
442 	ret = scpsys_sram_enable(pd);
443 	if (ret < 0)
444 		goto err_disable_subsys_clks;
445 
446 	ret = scpsys_bus_protect_disable(pd);
447 	if (ret < 0)
448 		goto err_disable_sram;
449 
450 	if (MTK_SCPD_CAPS(pd, MTK_SCPD_STRICT_BUS_PROTECTION)) {
451 		ret = clk_bulk_prepare_enable(pd->num_subsys_clks,
452 					      pd->subsys_clks);
453 		if (ret)
454 			goto err_enable_bus_protect;
455 	}
456 
457 	return 0;
458 
459 err_enable_bus_protect:
460 	scpsys_bus_protect_enable(pd);
461 err_disable_sram:
462 	scpsys_sram_disable(pd);
463 err_disable_subsys_clks:
464 	if (!MTK_SCPD_CAPS(pd, MTK_SCPD_STRICT_BUS_PROTECTION))
465 		clk_bulk_disable_unprepare(pd->num_subsys_clks,
466 					   pd->subsys_clks);
467 err_pwr_ack:
468 	clk_bulk_disable_unprepare(pd->num_clks, pd->clks);
469 err_reg:
470 	scpsys_regulator_disable(pd->supply);
471 	return ret;
472 }
473 
474 static int scpsys_power_off(struct generic_pm_domain *genpd)
475 {
476 	struct scpsys_domain *pd = container_of(genpd, struct scpsys_domain, genpd);
477 	struct scpsys *scpsys = pd->scpsys;
478 	bool tmp;
479 	int ret;
480 
481 	ret = scpsys_bus_protect_enable(pd);
482 	if (ret < 0)
483 		return ret;
484 
485 	ret = scpsys_sram_disable(pd);
486 	if (ret < 0)
487 		return ret;
488 
489 	if (pd->data->ext_buck_iso_offs && MTK_SCPD_CAPS(pd, MTK_SCPD_EXT_BUCK_ISO))
490 		regmap_set_bits(scpsys->base, pd->data->ext_buck_iso_offs,
491 				pd->data->ext_buck_iso_mask);
492 
493 	clk_bulk_disable_unprepare(pd->num_subsys_clks, pd->subsys_clks);
494 
495 	if (MTK_SCPD_CAPS(pd, MTK_SCPD_MODEM_PWRSEQ))
496 		scpsys_modem_pwrseq_off(pd);
497 	else
498 		scpsys_ctl_pwrseq_off(pd);
499 
500 	/* wait until PWR_ACK = 0 */
501 	ret = readx_poll_timeout(scpsys_domain_is_on, pd, tmp, !tmp, MTK_POLL_DELAY_US,
502 				 MTK_POLL_TIMEOUT);
503 	if (ret < 0)
504 		return ret;
505 
506 	clk_bulk_disable_unprepare(pd->num_clks, pd->clks);
507 
508 	scpsys_regulator_disable(pd->supply);
509 
510 	return 0;
511 }
512 
513 static struct
514 generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_node *node)
515 {
516 	const struct scpsys_domain_data *domain_data;
517 	struct scpsys_domain *pd;
518 	struct property *prop;
519 	const char *clk_name;
520 	int i, ret, num_clks;
521 	struct clk *clk;
522 	int clk_ind = 0;
523 	u32 id;
524 
525 	ret = of_property_read_u32(node, "reg", &id);
526 	if (ret) {
527 		dev_err(scpsys->dev, "%pOF: failed to retrieve domain id from reg: %d\n",
528 			node, ret);
529 		return ERR_PTR(-EINVAL);
530 	}
531 
532 	if (id >= scpsys->soc_data->num_domains) {
533 		dev_err(scpsys->dev, "%pOF: invalid domain id %d\n", node, id);
534 		return ERR_PTR(-EINVAL);
535 	}
536 
537 	domain_data = &scpsys->soc_data->domains_data[id];
538 	if (domain_data->sta_mask == 0) {
539 		dev_err(scpsys->dev, "%pOF: undefined domain id %d\n", node, id);
540 		return ERR_PTR(-EINVAL);
541 	}
542 
543 	pd = devm_kzalloc(scpsys->dev, sizeof(*pd), GFP_KERNEL);
544 	if (!pd)
545 		return ERR_PTR(-ENOMEM);
546 
547 	pd->data = domain_data;
548 	pd->scpsys = scpsys;
549 
550 	if (MTK_SCPD_CAPS(pd, MTK_SCPD_DOMAIN_SUPPLY)) {
551 		pd->supply = devm_of_regulator_get_optional(scpsys->dev, node, "domain");
552 		if (IS_ERR(pd->supply))
553 			return dev_err_cast_probe(scpsys->dev, pd->supply,
554 				      "%pOF: failed to get power supply.\n",
555 				      node);
556 	}
557 
558 	num_clks = of_clk_get_parent_count(node);
559 	if (num_clks > 0) {
560 		/* Calculate number of subsys_clks */
561 		of_property_for_each_string(node, "clock-names", prop, clk_name) {
562 			char *subsys;
563 
564 			subsys = strchr(clk_name, '-');
565 			if (subsys)
566 				pd->num_subsys_clks++;
567 			else
568 				pd->num_clks++;
569 		}
570 
571 		pd->clks = devm_kcalloc(scpsys->dev, pd->num_clks, sizeof(*pd->clks), GFP_KERNEL);
572 		if (!pd->clks)
573 			return ERR_PTR(-ENOMEM);
574 
575 		pd->subsys_clks = devm_kcalloc(scpsys->dev, pd->num_subsys_clks,
576 					       sizeof(*pd->subsys_clks), GFP_KERNEL);
577 		if (!pd->subsys_clks)
578 			return ERR_PTR(-ENOMEM);
579 
580 	}
581 
582 	for (i = 0; i < pd->num_clks; i++) {
583 		clk = of_clk_get(node, i);
584 		if (IS_ERR(clk)) {
585 			ret = PTR_ERR(clk);
586 			dev_err_probe(scpsys->dev, ret,
587 				      "%pOF: failed to get clk at index %d\n", node, i);
588 			goto err_put_clocks;
589 		}
590 
591 		pd->clks[clk_ind++].clk = clk;
592 	}
593 
594 	for (i = 0; i < pd->num_subsys_clks; i++) {
595 		clk = of_clk_get(node, i + clk_ind);
596 		if (IS_ERR(clk)) {
597 			ret = PTR_ERR(clk);
598 			dev_err_probe(scpsys->dev, ret,
599 				      "%pOF: failed to get clk at index %d\n", node,
600 				      i + clk_ind);
601 			goto err_put_subsys_clocks;
602 		}
603 
604 		pd->subsys_clks[i].clk = clk;
605 	}
606 
607 	/*
608 	 * Initially turn on all domains to make the domains usable
609 	 * with !CONFIG_PM and to get the hardware in sync with the
610 	 * software.  The unused domains will be switched off during
611 	 * late_init time.
612 	 */
613 	if (MTK_SCPD_CAPS(pd, MTK_SCPD_KEEP_DEFAULT_OFF)) {
614 		if (scpsys_domain_is_on(pd))
615 			dev_warn(scpsys->dev,
616 				 "%pOF: A default off power domain has been ON\n", node);
617 	} else {
618 		ret = scpsys_power_on(&pd->genpd);
619 		if (ret < 0) {
620 			dev_err(scpsys->dev, "%pOF: failed to power on domain: %d\n", node, ret);
621 			goto err_put_subsys_clocks;
622 		}
623 
624 		if (MTK_SCPD_CAPS(pd, MTK_SCPD_ALWAYS_ON))
625 			pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON;
626 	}
627 
628 	if (scpsys->domains[id]) {
629 		ret = -EINVAL;
630 		dev_err(scpsys->dev,
631 			"power domain with id %d already exists, check your device-tree\n", id);
632 		goto err_put_subsys_clocks;
633 	}
634 
635 	if (!pd->data->name)
636 		pd->genpd.name = node->name;
637 	else
638 		pd->genpd.name = pd->data->name;
639 
640 	pd->genpd.power_off = scpsys_power_off;
641 	pd->genpd.power_on = scpsys_power_on;
642 
643 	if (MTK_SCPD_CAPS(pd, MTK_SCPD_ACTIVE_WAKEUP))
644 		pd->genpd.flags |= GENPD_FLAG_ACTIVE_WAKEUP;
645 
646 	if (MTK_SCPD_CAPS(pd, MTK_SCPD_KEEP_DEFAULT_OFF))
647 		pm_genpd_init(&pd->genpd, NULL, true);
648 	else
649 		pm_genpd_init(&pd->genpd, NULL, false);
650 
651 	scpsys->domains[id] = &pd->genpd;
652 
653 	return scpsys->pd_data.domains[id];
654 
655 err_put_subsys_clocks:
656 	clk_bulk_put(pd->num_subsys_clks, pd->subsys_clks);
657 err_put_clocks:
658 	clk_bulk_put(pd->num_clks, pd->clks);
659 	return ERR_PTR(ret);
660 }
661 
662 static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *parent)
663 {
664 	struct generic_pm_domain *child_pd, *parent_pd;
665 	struct device_node *child;
666 	int ret;
667 
668 	for_each_child_of_node(parent, child) {
669 		u32 id;
670 
671 		ret = of_property_read_u32(parent, "reg", &id);
672 		if (ret) {
673 			dev_err(scpsys->dev, "%pOF: failed to get parent domain id\n", child);
674 			goto err_put_node;
675 		}
676 
677 		if (!scpsys->pd_data.domains[id]) {
678 			ret = -EINVAL;
679 			dev_err(scpsys->dev, "power domain with id %d does not exist\n", id);
680 			goto err_put_node;
681 		}
682 
683 		parent_pd = scpsys->pd_data.domains[id];
684 
685 		child_pd = scpsys_add_one_domain(scpsys, child);
686 		if (IS_ERR(child_pd)) {
687 			ret = PTR_ERR(child_pd);
688 			dev_err_probe(scpsys->dev, ret, "%pOF: failed to get child domain id\n",
689 				      child);
690 			goto err_put_node;
691 		}
692 
693 		/* recursive call to add all subdomains */
694 		ret = scpsys_add_subdomain(scpsys, child);
695 		if (ret)
696 			goto err_put_node;
697 
698 		ret = pm_genpd_add_subdomain(parent_pd, child_pd);
699 		if (ret) {
700 			dev_err(scpsys->dev, "failed to add %s subdomain to parent %s\n",
701 				child_pd->name, parent_pd->name);
702 			goto err_put_node;
703 		} else {
704 			dev_dbg(scpsys->dev, "%s add subdomain: %s\n", parent_pd->name,
705 				child_pd->name);
706 		}
707 	}
708 
709 	return 0;
710 
711 err_put_node:
712 	of_node_put(child);
713 	return ret;
714 }
715 
716 static void scpsys_remove_one_domain(struct scpsys_domain *pd)
717 {
718 	int ret;
719 
720 	/*
721 	 * We're in the error cleanup already, so we only complain,
722 	 * but won't emit another error on top of the original one.
723 	 */
724 	ret = pm_genpd_remove(&pd->genpd);
725 	if (ret < 0)
726 		dev_err(pd->scpsys->dev,
727 			"failed to remove domain '%s' : %d - state may be inconsistent\n",
728 			pd->genpd.name, ret);
729 	if (scpsys_domain_is_on(pd))
730 		scpsys_power_off(&pd->genpd);
731 
732 	clk_bulk_put(pd->num_clks, pd->clks);
733 	clk_bulk_put(pd->num_subsys_clks, pd->subsys_clks);
734 }
735 
736 static void scpsys_domain_cleanup(struct scpsys *scpsys)
737 {
738 	struct generic_pm_domain *genpd;
739 	struct scpsys_domain *pd;
740 	int i;
741 
742 	for (i = scpsys->pd_data.num_domains - 1; i >= 0; i--) {
743 		genpd = scpsys->pd_data.domains[i];
744 		if (genpd) {
745 			pd = to_scpsys_domain(genpd);
746 			scpsys_remove_one_domain(pd);
747 		}
748 	}
749 }
750 
751 static int scpsys_get_bus_protection_legacy(struct device *dev, struct scpsys *scpsys)
752 {
753 	const u8 bp_blocks[3] = {
754 		BUS_PROT_BLOCK_INFRA, BUS_PROT_BLOCK_SMI, BUS_PROT_BLOCK_INFRA_NAO
755 	};
756 	struct device_node *np = dev->of_node;
757 	struct device_node *node, *smi_np;
758 	int num_regmaps = 0, i, j;
759 	struct regmap *regmap[3];
760 
761 	/*
762 	 * Legacy code retrieves a maximum of three bus protection handles:
763 	 * some may be optional, or may not be, so the array of bp blocks
764 	 * that is normally passed in as platform data must be dynamically
765 	 * built in this case.
766 	 *
767 	 * Here, try to retrieve all of the regmaps that the legacy code
768 	 * supported and then count the number of the ones that are present,
769 	 * this makes it then possible to allocate the array of bus_prot
770 	 * regmaps and convert all to the new style handling.
771 	 */
772 	node = of_find_node_with_property(np, "mediatek,infracfg");
773 	if (node) {
774 		regmap[0] = syscon_regmap_lookup_by_phandle(node, "mediatek,infracfg");
775 		of_node_put(node);
776 		num_regmaps++;
777 		if (IS_ERR(regmap[0]))
778 			return dev_err_probe(dev, PTR_ERR(regmap[0]),
779 					     "%pOF: failed to get infracfg regmap\n",
780 					     node);
781 	} else {
782 		regmap[0] = NULL;
783 	}
784 
785 	node = of_find_node_with_property(np, "mediatek,smi");
786 	if (node) {
787 		smi_np = of_parse_phandle(node, "mediatek,smi", 0);
788 		of_node_put(node);
789 		if (!smi_np)
790 			return -ENODEV;
791 
792 		regmap[1] = device_node_to_regmap(smi_np);
793 		num_regmaps++;
794 		of_node_put(smi_np);
795 		if (IS_ERR(regmap[1]))
796 			return dev_err_probe(dev, PTR_ERR(regmap[1]),
797 					     "%pOF: failed to get SMI regmap\n",
798 					     node);
799 	} else {
800 		regmap[1] = NULL;
801 	}
802 
803 	node = of_find_node_with_property(np, "mediatek,infracfg-nao");
804 	if (node) {
805 		regmap[2] = syscon_regmap_lookup_by_phandle(node, "mediatek,infracfg-nao");
806 		num_regmaps++;
807 		of_node_put(node);
808 		if (IS_ERR(regmap[2]))
809 			return dev_err_probe(dev, PTR_ERR(regmap[2]),
810 					     "%pOF: failed to get infracfg regmap\n",
811 					     node);
812 	} else {
813 		regmap[2] = NULL;
814 	}
815 
816 	scpsys->bus_prot = devm_kmalloc_array(dev, num_regmaps,
817 					      sizeof(*scpsys->bus_prot), GFP_KERNEL);
818 	if (!scpsys->bus_prot)
819 		return -ENOMEM;
820 
821 	for (i = 0, j = 0; i < ARRAY_SIZE(bp_blocks); i++) {
822 		enum scpsys_bus_prot_block bp_type;
823 
824 		if (!regmap[i])
825 			continue;
826 
827 		bp_type = bp_blocks[i];
828 		scpsys->bus_prot_index[bp_type] = j;
829 		scpsys->bus_prot[j] = regmap[i];
830 
831 		j++;
832 	}
833 
834 	return 0;
835 }
836 
837 static int scpsys_get_bus_protection(struct device *dev, struct scpsys *scpsys)
838 {
839 	const struct scpsys_soc_data *soc = scpsys->soc_data;
840 	struct device_node *np = dev->of_node;
841 	int i, num_handles;
842 
843 	num_handles = of_count_phandle_with_args(np, "access-controllers", NULL);
844 	if (num_handles < 0 || num_handles != soc->num_bus_prot_blocks)
845 		return dev_err_probe(dev, -EINVAL,
846 				     "Cannot get access controllers: expected %u, got %d\n",
847 				     soc->num_bus_prot_blocks, num_handles);
848 
849 	scpsys->bus_prot = devm_kmalloc_array(dev, soc->num_bus_prot_blocks,
850 					      sizeof(*scpsys->bus_prot), GFP_KERNEL);
851 	if (!scpsys->bus_prot)
852 		return -ENOMEM;
853 
854 	for (i = 0; i < soc->num_bus_prot_blocks; i++) {
855 		enum scpsys_bus_prot_block bp_type;
856 		struct device_node *node;
857 
858 		node = of_parse_phandle(np, "access-controllers", i);
859 		if (!node)
860 			return -EINVAL;
861 
862 		/*
863 		 * Index the bus protection regmaps so that we don't have to
864 		 * find the right one by type with a loop at every execution
865 		 * of power sequence(s).
866 		 */
867 		bp_type = soc->bus_prot_blocks[i];
868 		scpsys->bus_prot_index[bp_type] = i;
869 
870 		scpsys->bus_prot[i] = device_node_to_regmap(node);
871 		of_node_put(node);
872 		if (IS_ERR_OR_NULL(scpsys->bus_prot[i]))
873 			return dev_err_probe(dev, scpsys->bus_prot[i] ?
874 					     PTR_ERR(scpsys->bus_prot[i]) : -ENXIO,
875 					     "Cannot get regmap for access controller %d\n", i);
876 	}
877 
878 	return 0;
879 }
880 
881 static const struct of_device_id scpsys_of_match[] = {
882 	{
883 		.compatible = "mediatek,mt6735-power-controller",
884 		.data = &mt6735_scpsys_data,
885 	},
886 	{
887 		.compatible = "mediatek,mt6795-power-controller",
888 		.data = &mt6795_scpsys_data,
889 	},
890 	{
891 		.compatible = "mediatek,mt6893-power-controller",
892 		.data = &mt6893_scpsys_data,
893 	},
894 	{
895 		.compatible = "mediatek,mt8167-power-controller",
896 		.data = &mt8167_scpsys_data,
897 	},
898 	{
899 		.compatible = "mediatek,mt8173-power-controller",
900 		.data = &mt8173_scpsys_data,
901 	},
902 	{
903 		.compatible = "mediatek,mt8183-power-controller",
904 		.data = &mt8183_scpsys_data,
905 	},
906 	{
907 		.compatible = "mediatek,mt8186-power-controller",
908 		.data = &mt8186_scpsys_data,
909 	},
910 	{
911 		.compatible = "mediatek,mt8188-power-controller",
912 		.data = &mt8188_scpsys_data,
913 	},
914 	{
915 		.compatible = "mediatek,mt8192-power-controller",
916 		.data = &mt8192_scpsys_data,
917 	},
918 	{
919 		.compatible = "mediatek,mt8195-power-controller",
920 		.data = &mt8195_scpsys_data,
921 	},
922 	{
923 		.compatible = "mediatek,mt8365-power-controller",
924 		.data = &mt8365_scpsys_data,
925 	},
926 	{ }
927 };
928 
929 static int scpsys_probe(struct platform_device *pdev)
930 {
931 	struct device *dev = &pdev->dev;
932 	struct device_node *np = dev->of_node;
933 	const struct scpsys_soc_data *soc;
934 	struct device_node *node;
935 	struct device *parent;
936 	struct scpsys *scpsys;
937 	int ret;
938 
939 	soc = of_device_get_match_data(&pdev->dev);
940 	if (!soc) {
941 		dev_err(&pdev->dev, "no power controller data\n");
942 		return -EINVAL;
943 	}
944 
945 	scpsys = devm_kzalloc(dev, struct_size(scpsys, domains, soc->num_domains), GFP_KERNEL);
946 	if (!scpsys)
947 		return -ENOMEM;
948 
949 	scpsys->dev = dev;
950 	scpsys->soc_data = soc;
951 
952 	scpsys->pd_data.domains = scpsys->domains;
953 	scpsys->pd_data.num_domains = soc->num_domains;
954 
955 	parent = dev->parent;
956 	if (!parent) {
957 		dev_err(dev, "no parent for syscon devices\n");
958 		return -ENODEV;
959 	}
960 
961 	scpsys->base = syscon_node_to_regmap(parent->of_node);
962 	if (IS_ERR(scpsys->base)) {
963 		dev_err(dev, "no regmap available\n");
964 		return PTR_ERR(scpsys->base);
965 	}
966 
967 	if (of_find_property(np, "access-controllers", NULL))
968 		ret = scpsys_get_bus_protection(dev, scpsys);
969 	else
970 		ret = scpsys_get_bus_protection_legacy(dev, scpsys);
971 
972 	if (ret)
973 		return ret;
974 
975 	ret = -ENODEV;
976 	for_each_available_child_of_node(np, node) {
977 		struct generic_pm_domain *domain;
978 
979 		domain = scpsys_add_one_domain(scpsys, node);
980 		if (IS_ERR(domain)) {
981 			ret = PTR_ERR(domain);
982 			of_node_put(node);
983 			goto err_cleanup_domains;
984 		}
985 
986 		ret = scpsys_add_subdomain(scpsys, node);
987 		if (ret) {
988 			of_node_put(node);
989 			goto err_cleanup_domains;
990 		}
991 	}
992 
993 	if (ret) {
994 		dev_dbg(dev, "no power domains present\n");
995 		return ret;
996 	}
997 
998 	ret = of_genpd_add_provider_onecell(np, &scpsys->pd_data);
999 	if (ret) {
1000 		dev_err(dev, "failed to add provider: %d\n", ret);
1001 		goto err_cleanup_domains;
1002 	}
1003 
1004 	return 0;
1005 
1006 err_cleanup_domains:
1007 	scpsys_domain_cleanup(scpsys);
1008 	return ret;
1009 }
1010 
1011 static struct platform_driver scpsys_pm_domain_driver = {
1012 	.probe = scpsys_probe,
1013 	.driver = {
1014 		.name = "mtk-power-controller",
1015 		.suppress_bind_attrs = true,
1016 		.of_match_table = scpsys_of_match,
1017 	},
1018 };
1019 builtin_platform_driver(scpsys_pm_domain_driver);
1020