xref: /linux/drivers/pmdomain/mediatek/mtk-pm-domains.c (revision 6734a5e86b57fb6f500aea08626bb9b42b6d5d05)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2020 Collabora Ltd.
4  */
5 #include <linux/arm-smccc.h>
6 #include <linux/clk.h>
7 #include <linux/clk-provider.h>
8 #include <linux/init.h>
9 #include <linux/io.h>
10 #include <linux/iopoll.h>
11 #include <linux/mfd/syscon.h>
12 #include <linux/of.h>
13 #include <linux/of_clk.h>
14 #include <linux/platform_device.h>
15 #include <linux/pm_domain.h>
16 #include <linux/regmap.h>
17 #include <linux/regulator/consumer.h>
18 #include <linux/soc/mediatek/infracfg.h>
19 #include <linux/soc/mediatek/mtk_sip_svc.h>
20 
21 #include "mt6735-pm-domains.h"
22 #include "mt6795-pm-domains.h"
23 #include "mt6893-pm-domains.h"
24 #include "mt8167-pm-domains.h"
25 #include "mt8173-pm-domains.h"
26 #include "mt8183-pm-domains.h"
27 #include "mt8186-pm-domains.h"
28 #include "mt8188-pm-domains.h"
29 #include "mt8189-pm-domains.h"
30 #include "mt8192-pm-domains.h"
31 #include "mt8195-pm-domains.h"
32 #include "mt8196-pm-domains.h"
33 #include "mt8365-pm-domains.h"
34 
35 #define MTK_POLL_DELAY_US		10
36 #define MTK_POLL_TIMEOUT		USEC_PER_SEC
37 
38 #define MTK_HWV_POLL_DELAY_US		5
39 #define MTK_HWV_POLL_TIMEOUT		(300 * USEC_PER_MSEC)
40 
41 #define MTK_HWV_PREPARE_DELAY_US	1
42 #define MTK_HWV_PREPARE_TIMEOUT		(3 * USEC_PER_MSEC)
43 
44 #define PWR_RST_B_BIT			BIT(0)
45 #define PWR_ISO_BIT			BIT(1)
46 #define PWR_ON_BIT			BIT(2)
47 #define PWR_ON_2ND_BIT			BIT(3)
48 #define PWR_CLK_DIS_BIT			BIT(4)
49 #define PWR_SRAM_CLKISO_BIT		BIT(5)
50 #define PWR_SRAM_ISOINT_B_BIT		BIT(6)
51 
52 #define PWR_RTFF_SAVE			BIT(24)
53 #define PWR_RTFF_NRESTORE		BIT(25)
54 #define PWR_RTFF_CLK_DIS		BIT(26)
55 #define PWR_RTFF_SAVE_FLAG		BIT(27)
56 #define PWR_RTFF_UFS_CLK_DIS		BIT(28)
57 
58 #define MTK_SIP_KERNEL_HWCCF_CONTROL	MTK_SIP_SMC_CMD(0x540)
59 
60 struct scpsys_domain {
61 	struct generic_pm_domain genpd;
62 	const struct scpsys_domain_data *data;
63 	const struct scpsys_hwv_domain_data *hwv_data;
64 	struct scpsys *scpsys;
65 	int num_clks;
66 	struct clk_bulk_data *clks;
67 	int num_subsys_clks;
68 	struct clk_bulk_data *subsys_clks;
69 	struct regulator *supply;
70 };
71 
72 struct scpsys {
73 	struct device *dev;
74 	struct regmap *base;
75 	const struct scpsys_soc_data *soc_data;
76 	u8 bus_prot_index[BUS_PROT_BLOCK_COUNT];
77 	struct regmap **bus_prot;
78 	struct genpd_onecell_data pd_data;
79 	struct generic_pm_domain *domains[];
80 };
81 
82 #define to_scpsys_domain(gpd) container_of(gpd, struct scpsys_domain, genpd)
83 
84 static bool scpsys_domain_is_on(struct scpsys_domain *pd)
85 {
86 	struct scpsys *scpsys = pd->scpsys;
87 	u32 mask = pd->data->sta_mask;
88 	u32 status, status2, mask2;
89 
90 	mask2 = pd->data->sta2nd_mask ? pd->data->sta2nd_mask : mask;
91 
92 	regmap_read(scpsys->base, pd->data->pwr_sta_offs, &status);
93 	status &= mask;
94 
95 	regmap_read(scpsys->base, pd->data->pwr_sta2nd_offs, &status2);
96 	status2 &= mask2;
97 
98 	/* A domain is on when both status bits are set. */
99 	return status && status2;
100 }
101 
102 static bool scpsys_hwv_domain_is_disable_done(struct scpsys_domain *pd)
103 {
104 	const struct scpsys_hwv_domain_data *hwv = pd->hwv_data;
105 	u32 regs[2] = { hwv->done, hwv->clr_sta };
106 	u32 val[2];
107 	u32 mask = BIT(hwv->setclr_bit);
108 
109 	regmap_multi_reg_read(pd->scpsys->base, regs, val, 2);
110 
111 	/* Disable is done when the bit is set in DONE, cleared in CLR_STA */
112 	return (val[0] & mask) && !(val[1] & mask);
113 }
114 
115 static bool scpsys_hwv_domain_is_enable_done(struct scpsys_domain *pd)
116 {
117 	const struct scpsys_hwv_domain_data *hwv = pd->hwv_data;
118 	u32 regs[3] = { hwv->done, hwv->en, hwv->set_sta };
119 	u32 val[3];
120 	u32 mask = BIT(hwv->setclr_bit);
121 
122 	regmap_multi_reg_read(pd->scpsys->base, regs, val, 3);
123 
124 	/* Enable is done when the bit is set in DONE and EN, cleared in SET_STA */
125 	return (val[0] & mask) && (val[1] & mask) && !(val[2] & mask);
126 }
127 
128 static int scpsys_sec_infra_power_on(bool on)
129 {
130 	struct arm_smccc_res res;
131 	unsigned long cmd = on ? 1 : 0;
132 
133 	arm_smccc_smc(MTK_SIP_KERNEL_HWCCF_CONTROL, cmd, 0, 0, 0, 0, 0, 0, &res);
134 	return res.a0;
135 }
136 
137 static int scpsys_sram_enable(struct scpsys_domain *pd)
138 {
139 	u32 expected_ack, pdn_ack = pd->data->sram_pdn_ack_bits;
140 	struct scpsys *scpsys = pd->scpsys;
141 	unsigned int tmp;
142 	int ret;
143 
144 	if (MTK_SCPD_CAPS(pd, MTK_SCPD_SRAM_PDN_INVERTED)) {
145 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, pd->data->sram_pdn_bits);
146 		expected_ack = pdn_ack;
147 	} else {
148 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, pd->data->sram_pdn_bits);
149 		expected_ack = 0;
150 	}
151 
152 	/* Either wait until SRAM_PDN_ACK all 1 or 0 */
153 	ret = regmap_read_poll_timeout(scpsys->base, pd->data->ctl_offs, tmp,
154 				       (tmp & pdn_ack) == expected_ack,
155 				       MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
156 	if (ret < 0)
157 		return ret;
158 
159 	if (MTK_SCPD_CAPS(pd, MTK_SCPD_SRAM_ISO)) {
160 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_SRAM_ISOINT_B_BIT);
161 		udelay(1);
162 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_SRAM_CLKISO_BIT);
163 	}
164 
165 	return 0;
166 }
167 
168 static int scpsys_sram_disable(struct scpsys_domain *pd)
169 {
170 	u32 expected_ack, pdn_ack = pd->data->sram_pdn_ack_bits;
171 	struct scpsys *scpsys = pd->scpsys;
172 	unsigned int tmp;
173 
174 	if (MTK_SCPD_CAPS(pd, MTK_SCPD_SRAM_ISO)) {
175 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_SRAM_CLKISO_BIT);
176 		udelay(1);
177 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_SRAM_ISOINT_B_BIT);
178 	}
179 
180 	if (MTK_SCPD_CAPS(pd, MTK_SCPD_SRAM_PDN_INVERTED)) {
181 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, pd->data->sram_pdn_bits);
182 		expected_ack = 0;
183 	} else {
184 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, pd->data->sram_pdn_bits);
185 		expected_ack = pdn_ack;
186 	}
187 
188 	/* Either wait until SRAM_PDN_ACK all 1 or 0 */
189 	return regmap_read_poll_timeout(scpsys->base, pd->data->ctl_offs, tmp,
190 					(tmp & pdn_ack) == expected_ack,
191 					MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
192 }
193 
194 static struct regmap *scpsys_bus_protect_get_regmap(struct scpsys_domain *pd,
195 						    const struct scpsys_bus_prot_data *bpd)
196 {
197 	struct scpsys *scpsys = pd->scpsys;
198 	unsigned short block_idx = scpsys->bus_prot_index[bpd->bus_prot_block];
199 
200 	return scpsys->bus_prot[block_idx];
201 }
202 
203 static struct regmap *scpsys_bus_protect_get_sta_regmap(struct scpsys_domain *pd,
204 							const struct scpsys_bus_prot_data *bpd)
205 {
206 	struct scpsys *scpsys = pd->scpsys;
207 	int block_idx = scpsys->bus_prot_index[bpd->bus_prot_sta_block];
208 
209 	return scpsys->bus_prot[block_idx];
210 }
211 
212 static int scpsys_bus_protect_clear(struct scpsys_domain *pd,
213 				    const struct scpsys_bus_prot_data *bpd)
214 {
215 	struct regmap *sta_regmap = scpsys_bus_protect_get_sta_regmap(pd, bpd);
216 	struct regmap *regmap = scpsys_bus_protect_get_regmap(pd, bpd);
217 	u32 sta_mask = bpd->bus_prot_sta_mask;
218 	u32 expected_ack;
219 	u32 val;
220 
221 	expected_ack = (bpd->bus_prot_sta_block == BUS_PROT_BLOCK_INFRA_NAO ? sta_mask : 0);
222 
223 	if (bpd->flags & BUS_PROT_REG_UPDATE)
224 		regmap_clear_bits(regmap, bpd->bus_prot_clr, bpd->bus_prot_set_clr_mask);
225 	else
226 		regmap_write(regmap, bpd->bus_prot_clr, bpd->bus_prot_set_clr_mask);
227 
228 	if (bpd->flags & BUS_PROT_IGNORE_CLR_ACK)
229 		return 0;
230 
231 	return regmap_read_poll_timeout(sta_regmap, bpd->bus_prot_sta,
232 					val, (val & sta_mask) == expected_ack,
233 					MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
234 }
235 
236 static int scpsys_bus_protect_set(struct scpsys_domain *pd,
237 				  const struct scpsys_bus_prot_data *bpd)
238 {
239 	struct regmap *sta_regmap = scpsys_bus_protect_get_sta_regmap(pd, bpd);
240 	struct regmap *regmap = scpsys_bus_protect_get_regmap(pd, bpd);
241 	u32 sta_mask = bpd->bus_prot_sta_mask;
242 	u32 val;
243 
244 	if (bpd->flags & BUS_PROT_REG_UPDATE)
245 		regmap_set_bits(regmap, bpd->bus_prot_set, bpd->bus_prot_set_clr_mask);
246 	else
247 		regmap_write(regmap, bpd->bus_prot_set, bpd->bus_prot_set_clr_mask);
248 
249 	return regmap_read_poll_timeout(sta_regmap, bpd->bus_prot_sta,
250 					val, (val & sta_mask) == sta_mask,
251 					MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
252 }
253 
254 static int scpsys_bus_protect_enable(struct scpsys_domain *pd, u8 flags)
255 {
256 	for (int i = 0; i < SPM_MAX_BUS_PROT_DATA; i++) {
257 		const struct scpsys_bus_prot_data *bpd = &pd->data->bp_cfg[i];
258 		int ret;
259 
260 		if (!bpd->bus_prot_set_clr_mask)
261 			break;
262 
263 		if ((bpd->flags & BUS_PROT_IGNORE_SUBCLK) !=
264 		    (flags & BUS_PROT_IGNORE_SUBCLK))
265 			continue;
266 
267 		if (bpd->flags & BUS_PROT_INVERTED)
268 			ret = scpsys_bus_protect_clear(pd, bpd);
269 		else
270 			ret = scpsys_bus_protect_set(pd, bpd);
271 		if (ret)
272 			return ret;
273 	}
274 
275 	return 0;
276 }
277 
278 static int scpsys_bus_protect_disable(struct scpsys_domain *pd, u8 flags)
279 {
280 	for (int i = SPM_MAX_BUS_PROT_DATA - 1; i >= 0; i--) {
281 		const struct scpsys_bus_prot_data *bpd = &pd->data->bp_cfg[i];
282 		int ret;
283 
284 		if (!bpd->bus_prot_set_clr_mask)
285 			continue;
286 
287 		if ((bpd->flags & BUS_PROT_IGNORE_SUBCLK) !=
288 		    (flags & BUS_PROT_IGNORE_SUBCLK))
289 			continue;
290 
291 		if (bpd->flags & BUS_PROT_INVERTED)
292 			ret = scpsys_bus_protect_set(pd, bpd);
293 		else
294 			ret = scpsys_bus_protect_clear(pd, bpd);
295 		if (ret)
296 			return ret;
297 	}
298 
299 	return 0;
300 }
301 
302 static int scpsys_regulator_enable(struct regulator *supply)
303 {
304 	return supply ? regulator_enable(supply) : 0;
305 }
306 
307 static int scpsys_regulator_disable(struct regulator *supply)
308 {
309 	return supply ? regulator_disable(supply) : 0;
310 }
311 
312 static int scpsys_hwv_power_on(struct generic_pm_domain *genpd)
313 {
314 	struct scpsys_domain *pd = container_of(genpd, struct scpsys_domain, genpd);
315 	const struct scpsys_hwv_domain_data *hwv = pd->hwv_data;
316 	struct scpsys *scpsys = pd->scpsys;
317 	u32 val;
318 	int ret;
319 
320 	if (MTK_SCPD_CAPS(pd, MTK_SCPD_INFRA_PWR_CTL)) {
321 		ret = scpsys_sec_infra_power_on(true);
322 		if (ret)
323 			return ret;
324 	}
325 
326 	ret = scpsys_regulator_enable(pd->supply);
327 	if (ret)
328 		goto err_infra;
329 
330 	ret = clk_bulk_prepare_enable(pd->num_clks, pd->clks);
331 	if (ret)
332 		goto err_reg;
333 
334 	/* For HWV the subsys clocks refer to the HWV low power subsystem */
335 	ret = clk_bulk_prepare_enable(pd->num_subsys_clks, pd->subsys_clks);
336 	if (ret)
337 		goto err_disable_clks;
338 
339 	/* Make sure the HW Voter is idle and able to accept commands */
340 	ret = regmap_read_poll_timeout_atomic(scpsys->base, hwv->done, val,
341 					      val & BIT(hwv->setclr_bit),
342 					      MTK_HWV_POLL_DELAY_US,
343 					      MTK_HWV_POLL_TIMEOUT);
344 	if (ret) {
345 		dev_err(scpsys->dev, "Failed to power on: HW Voter busy.\n");
346 		goto err_disable_subsys_clks;
347 	}
348 
349 	/*
350 	 * Instruct the HWV to power on the MTCMOS (power domain): after that,
351 	 * the same bit will be unset immediately by the hardware.
352 	 */
353 	regmap_write(scpsys->base, hwv->set, BIT(hwv->setclr_bit));
354 
355 	/*
356 	 * Wait until the HWV sets the bit again, signalling that its internal
357 	 * state machine was started and it now processing the vote command.
358 	 */
359 	ret = regmap_read_poll_timeout_atomic(scpsys->base, hwv->set, val,
360 					      val & BIT(hwv->setclr_bit),
361 					      MTK_HWV_PREPARE_DELAY_US,
362 					      MTK_HWV_PREPARE_TIMEOUT);
363 	if (ret) {
364 		dev_err(scpsys->dev, "Failed to power on: HW Voter not starting.\n");
365 		goto err_disable_subsys_clks;
366 	}
367 
368 	/* Wait for ACK, signalling that the MTCMOS was enabled */
369 	ret = readx_poll_timeout_atomic(scpsys_hwv_domain_is_enable_done, pd, val, val,
370 					MTK_HWV_POLL_DELAY_US, MTK_HWV_POLL_TIMEOUT);
371 	if (ret) {
372 		dev_err(scpsys->dev, "Failed to power on: HW Voter ACK timeout.\n");
373 		goto err_disable_subsys_clks;
374 	}
375 
376 	/* It's done! Disable the HWV low power subsystem clocks */
377 	clk_bulk_disable_unprepare(pd->num_subsys_clks, pd->subsys_clks);
378 
379 	if (MTK_SCPD_CAPS(pd, MTK_SCPD_INFRA_PWR_CTL))
380 		scpsys_sec_infra_power_on(false);
381 
382 	return 0;
383 
384 err_disable_subsys_clks:
385 	clk_bulk_disable_unprepare(pd->num_subsys_clks, pd->subsys_clks);
386 err_disable_clks:
387 	clk_bulk_disable_unprepare(pd->num_clks, pd->clks);
388 err_reg:
389 	scpsys_regulator_disable(pd->supply);
390 err_infra:
391 	if (MTK_SCPD_CAPS(pd, MTK_SCPD_INFRA_PWR_CTL))
392 		scpsys_sec_infra_power_on(false);
393 	return ret;
394 };
395 
396 static int scpsys_hwv_power_off(struct generic_pm_domain *genpd)
397 {
398 	struct scpsys_domain *pd = container_of(genpd, struct scpsys_domain, genpd);
399 	const struct scpsys_hwv_domain_data *hwv = pd->hwv_data;
400 	struct scpsys *scpsys = pd->scpsys;
401 	u32 val;
402 	int ret;
403 
404 	if (MTK_SCPD_CAPS(pd, MTK_SCPD_INFRA_PWR_CTL)) {
405 		ret = scpsys_sec_infra_power_on(true);
406 		if (ret)
407 			return ret;
408 	}
409 
410 	ret = clk_bulk_prepare_enable(pd->num_subsys_clks, pd->subsys_clks);
411 	if (ret)
412 		goto err_infra;
413 
414 	/* Make sure the HW Voter is idle and able to accept commands */
415 	ret = regmap_read_poll_timeout_atomic(scpsys->base, hwv->done, val,
416 					      val & BIT(hwv->setclr_bit),
417 					      MTK_HWV_POLL_DELAY_US,
418 					      MTK_HWV_POLL_TIMEOUT);
419 	if (ret)
420 		goto err_disable_subsys_clks;
421 
422 
423 	/*
424 	 * Instruct the HWV to power off the MTCMOS (power domain): differently
425 	 * from poweron, the bit will be kept set.
426 	 */
427 	regmap_write(scpsys->base, hwv->clr, BIT(hwv->setclr_bit));
428 
429 	/*
430 	 * Wait until the HWV clears the bit, signalling that its internal
431 	 * state machine was started and it now processing the clear command.
432 	 */
433 	ret = regmap_read_poll_timeout_atomic(scpsys->base, hwv->clr, val,
434 					      !(val & BIT(hwv->setclr_bit)),
435 					      MTK_HWV_PREPARE_DELAY_US,
436 					      MTK_HWV_PREPARE_TIMEOUT);
437 	if (ret)
438 		goto err_disable_subsys_clks;
439 
440 	/* Poweroff needs 100us for the HW to stabilize */
441 	udelay(100);
442 
443 	/* Wait for ACK, signalling that the MTCMOS was disabled */
444 	ret = readx_poll_timeout_atomic(scpsys_hwv_domain_is_disable_done, pd, val, val,
445 					MTK_HWV_POLL_DELAY_US, MTK_HWV_POLL_TIMEOUT);
446 	if (ret)
447 		goto err_disable_subsys_clks;
448 
449 	clk_bulk_disable_unprepare(pd->num_subsys_clks, pd->subsys_clks);
450 	clk_bulk_disable_unprepare(pd->num_clks, pd->clks);
451 
452 	scpsys_regulator_disable(pd->supply);
453 
454 	if (MTK_SCPD_CAPS(pd, MTK_SCPD_INFRA_PWR_CTL))
455 		scpsys_sec_infra_power_on(false);
456 
457 	return 0;
458 
459 err_disable_subsys_clks:
460 	clk_bulk_disable_unprepare(pd->num_subsys_clks, pd->subsys_clks);
461 err_infra:
462 	if (MTK_SCPD_CAPS(pd, MTK_SCPD_INFRA_PWR_CTL))
463 		scpsys_sec_infra_power_on(false);
464 	return ret;
465 };
466 
467 static int scpsys_ctl_pwrseq_on(struct scpsys_domain *pd)
468 {
469 	struct scpsys *scpsys = pd->scpsys;
470 	bool do_rtff_nrestore, tmp;
471 	int ret;
472 
473 	/* subsys power on */
474 	regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT);
475 	regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_2ND_BIT);
476 
477 	/* wait until PWR_ACK = 1 */
478 	ret = readx_poll_timeout(scpsys_domain_is_on, pd, tmp, tmp, MTK_POLL_DELAY_US,
479 				 MTK_POLL_TIMEOUT);
480 	if (ret < 0)
481 		return ret;
482 
483 	if (pd->data->rtff_type == SCPSYS_RTFF_TYPE_PCIE_PHY)
484 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_CLK_DIS);
485 
486 	regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_CLK_DIS_BIT);
487 	regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ISO_BIT);
488 
489 	/* Wait for RTFF HW to sync buck isolation state if this is PCIe PHY RTFF */
490 	if (pd->data->rtff_type == SCPSYS_RTFF_TYPE_PCIE_PHY)
491 		udelay(5);
492 
493 	regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT);
494 
495 	/*
496 	 * RTFF HW state may be modified by secure world or remote processors.
497 	 *
498 	 * With the only exception of STOR_UFS, which always needs save/restore,
499 	 * check if this power domain's RTFF is already on before trying to do
500 	 * the NRESTORE procedure, otherwise the system will lock up.
501 	 */
502 	switch (pd->data->rtff_type) {
503 	case SCPSYS_RTFF_TYPE_GENERIC:
504 	case SCPSYS_RTFF_TYPE_PCIE_PHY:
505 	{
506 		u32 ctl_status;
507 
508 		regmap_read(scpsys->base, pd->data->ctl_offs, &ctl_status);
509 		do_rtff_nrestore = ctl_status & PWR_RTFF_SAVE_FLAG;
510 		break;
511 	}
512 	case SCPSYS_RTFF_TYPE_STOR_UFS:
513 		/* STOR_UFS always needs NRESTORE */
514 		do_rtff_nrestore = true;
515 		break;
516 	default:
517 		do_rtff_nrestore = false;
518 		break;
519 	}
520 
521 	/* Return early if RTFF NRESTORE shall not be done */
522 	if (!do_rtff_nrestore)
523 		return 0;
524 
525 	switch (pd->data->rtff_type) {
526 	case SCPSYS_RTFF_TYPE_GENERIC:
527 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_SAVE_FLAG);
528 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_CLK_DIS);
529 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_NRESTORE);
530 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_NRESTORE);
531 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_CLK_DIS);
532 		break;
533 	case SCPSYS_RTFF_TYPE_PCIE_PHY:
534 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_SAVE_FLAG);
535 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_NRESTORE);
536 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_NRESTORE);
537 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_CLK_DIS);
538 		break;
539 	case SCPSYS_RTFF_TYPE_STOR_UFS:
540 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_UFS_CLK_DIS);
541 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_NRESTORE);
542 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_NRESTORE);
543 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_UFS_CLK_DIS);
544 		break;
545 	default:
546 		break;
547 	}
548 
549 	return 0;
550 }
551 
552 static void scpsys_ctl_pwrseq_off(struct scpsys_domain *pd)
553 {
554 	struct scpsys *scpsys = pd->scpsys;
555 
556 	switch (pd->data->rtff_type) {
557 	case SCPSYS_RTFF_TYPE_GENERIC:
558 	case SCPSYS_RTFF_TYPE_PCIE_PHY:
559 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_CLK_DIS);
560 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_SAVE);
561 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_SAVE);
562 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_CLK_DIS);
563 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_SAVE_FLAG);
564 		break;
565 	case SCPSYS_RTFF_TYPE_STOR_UFS:
566 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_UFS_CLK_DIS);
567 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_SAVE);
568 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_SAVE);
569 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_UFS_CLK_DIS);
570 		break;
571 	default:
572 		break;
573 	}
574 
575 	/* subsys power off */
576 	regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ISO_BIT);
577 
578 	/* Wait for RTFF HW to sync buck isolation state if this is PCIe PHY RTFF */
579 	if (pd->data->rtff_type == SCPSYS_RTFF_TYPE_PCIE_PHY)
580 		udelay(1);
581 
582 	regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_CLK_DIS_BIT);
583 	regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT);
584 	regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_2ND_BIT);
585 	regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT);
586 }
587 
588 static int scpsys_modem_pwrseq_on(struct scpsys_domain *pd)
589 {
590 	struct scpsys *scpsys = pd->scpsys;
591 	bool tmp;
592 	int ret;
593 
594 	if (!MTK_SCPD_CAPS(pd, MTK_SCPD_SKIP_RESET_B))
595 		regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT);
596 
597 	regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT);
598 
599 	/* wait until PWR_ACK = 1 */
600 	ret = readx_poll_timeout(scpsys_domain_is_on, pd, tmp, tmp, MTK_POLL_DELAY_US,
601 				 MTK_POLL_TIMEOUT);
602 	if (ret < 0)
603 		return ret;
604 
605 	return 0;
606 }
607 
608 static void scpsys_modem_pwrseq_off(struct scpsys_domain *pd)
609 {
610 	struct scpsys *scpsys = pd->scpsys;
611 
612 	regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT);
613 
614 	if (!MTK_SCPD_CAPS(pd, MTK_SCPD_SKIP_RESET_B))
615 		regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT);
616 }
617 
618 static int scpsys_power_on(struct generic_pm_domain *genpd)
619 {
620 	struct scpsys_domain *pd = container_of(genpd, struct scpsys_domain, genpd);
621 	struct scpsys *scpsys = pd->scpsys;
622 	int ret;
623 
624 	ret = scpsys_regulator_enable(pd->supply);
625 	if (ret)
626 		return ret;
627 
628 	ret = clk_bulk_prepare_enable(pd->num_clks, pd->clks);
629 	if (ret)
630 		goto err_reg;
631 
632 	if (pd->data->ext_buck_iso_offs && MTK_SCPD_CAPS(pd, MTK_SCPD_EXT_BUCK_ISO))
633 		regmap_clear_bits(scpsys->base, pd->data->ext_buck_iso_offs,
634 				  pd->data->ext_buck_iso_mask);
635 
636 	if (MTK_SCPD_CAPS(pd, MTK_SCPD_MODEM_PWRSEQ))
637 		ret = scpsys_modem_pwrseq_on(pd);
638 	else
639 		ret = scpsys_ctl_pwrseq_on(pd);
640 
641 	if (ret)
642 		goto err_pwr_ack;
643 
644 	/*
645 	 * In MT8189 mminfra power domain, the bus protect policy separates
646 	 * into two parts, one is set before subsys clocks enabled, and another
647 	 * need to enable after subsys clocks enable.
648 	 */
649 	ret = scpsys_bus_protect_disable(pd, BUS_PROT_IGNORE_SUBCLK);
650 	if (ret < 0)
651 		goto err_pwr_ack;
652 
653 	/*
654 	 * In few Mediatek platforms(e.g. MT6779), the bus protect policy is
655 	 * stricter, which leads to bus protect release must be prior to bus
656 	 * access.
657 	 */
658 	if (!MTK_SCPD_CAPS(pd, MTK_SCPD_STRICT_BUS_PROTECTION)) {
659 		ret = clk_bulk_prepare_enable(pd->num_subsys_clks,
660 					      pd->subsys_clks);
661 		if (ret)
662 			goto err_pwr_ack;
663 	}
664 
665 	ret = scpsys_sram_enable(pd);
666 	if (ret < 0)
667 		goto err_disable_subsys_clks;
668 
669 	ret = scpsys_bus_protect_disable(pd, 0);
670 	if (ret < 0)
671 		goto err_disable_sram;
672 
673 	if (MTK_SCPD_CAPS(pd, MTK_SCPD_STRICT_BUS_PROTECTION)) {
674 		ret = clk_bulk_prepare_enable(pd->num_subsys_clks,
675 					      pd->subsys_clks);
676 		if (ret)
677 			goto err_enable_bus_protect;
678 	}
679 
680 	return 0;
681 
682 err_enable_bus_protect:
683 	scpsys_bus_protect_enable(pd, 0);
684 err_disable_sram:
685 	scpsys_sram_disable(pd);
686 err_disable_subsys_clks:
687 	if (!MTK_SCPD_CAPS(pd, MTK_SCPD_STRICT_BUS_PROTECTION))
688 		clk_bulk_disable_unprepare(pd->num_subsys_clks,
689 					   pd->subsys_clks);
690 err_pwr_ack:
691 	clk_bulk_disable_unprepare(pd->num_clks, pd->clks);
692 err_reg:
693 	scpsys_regulator_disable(pd->supply);
694 	return ret;
695 }
696 
697 static int scpsys_power_off(struct generic_pm_domain *genpd)
698 {
699 	struct scpsys_domain *pd = container_of(genpd, struct scpsys_domain, genpd);
700 	struct scpsys *scpsys = pd->scpsys;
701 	bool tmp;
702 	int ret;
703 
704 	ret = scpsys_bus_protect_enable(pd, 0);
705 	if (ret < 0)
706 		return ret;
707 
708 	ret = scpsys_sram_disable(pd);
709 	if (ret < 0)
710 		return ret;
711 
712 	if (pd->data->ext_buck_iso_offs && MTK_SCPD_CAPS(pd, MTK_SCPD_EXT_BUCK_ISO))
713 		regmap_set_bits(scpsys->base, pd->data->ext_buck_iso_offs,
714 				pd->data->ext_buck_iso_mask);
715 
716 	clk_bulk_disable_unprepare(pd->num_subsys_clks, pd->subsys_clks);
717 
718 	ret = scpsys_bus_protect_enable(pd, BUS_PROT_IGNORE_SUBCLK);
719 	if (ret < 0)
720 		return ret;
721 
722 	if (MTK_SCPD_CAPS(pd, MTK_SCPD_MODEM_PWRSEQ))
723 		scpsys_modem_pwrseq_off(pd);
724 	else
725 		scpsys_ctl_pwrseq_off(pd);
726 
727 	/* wait until PWR_ACK = 0 */
728 	ret = readx_poll_timeout(scpsys_domain_is_on, pd, tmp, !tmp, MTK_POLL_DELAY_US,
729 				 MTK_POLL_TIMEOUT);
730 	if (ret < 0)
731 		return ret;
732 
733 	clk_bulk_disable_unprepare(pd->num_clks, pd->clks);
734 
735 	scpsys_regulator_disable(pd->supply);
736 
737 	return 0;
738 }
739 
740 static struct
741 generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_node *node)
742 {
743 	const struct scpsys_domain_data *domain_data;
744 	const struct scpsys_hwv_domain_data *hwv_domain_data;
745 	struct scpsys_domain *pd;
746 	struct property *prop;
747 	const char *clk_name;
748 	int i, ret, num_clks;
749 	struct clk *clk;
750 	int clk_ind = 0;
751 	u32 id;
752 
753 	ret = of_property_read_u32(node, "reg", &id);
754 	if (ret) {
755 		dev_err(scpsys->dev, "%pOF: failed to retrieve domain id from reg: %d\n",
756 			node, ret);
757 		return ERR_PTR(-EINVAL);
758 	}
759 
760 	switch (scpsys->soc_data->type) {
761 	case SCPSYS_MTCMOS_TYPE_DIRECT_CTL:
762 		if (id >= scpsys->soc_data->num_domains) {
763 			dev_err(scpsys->dev, "%pOF: invalid domain id %d\n", node, id);
764 			return ERR_PTR(-EINVAL);
765 		}
766 
767 		domain_data = &scpsys->soc_data->domains_data[id];
768 		hwv_domain_data = NULL;
769 
770 		if (domain_data->sta_mask == 0) {
771 			dev_err(scpsys->dev, "%pOF: undefined domain id %d\n", node, id);
772 			return ERR_PTR(-EINVAL);
773 		}
774 
775 		break;
776 	case SCPSYS_MTCMOS_TYPE_HW_VOTER:
777 		if (id >= scpsys->soc_data->num_hwv_domains) {
778 			dev_err(scpsys->dev, "%pOF: invalid HWV domain id %d\n", node, id);
779 			return ERR_PTR(-EINVAL);
780 		}
781 
782 		domain_data = NULL;
783 		hwv_domain_data = &scpsys->soc_data->hwv_domains_data[id];
784 
785 		break;
786 	default:
787 		return ERR_PTR(-EINVAL);
788 	}
789 
790 	pd = devm_kzalloc(scpsys->dev, sizeof(*pd), GFP_KERNEL);
791 	if (!pd)
792 		return ERR_PTR(-ENOMEM);
793 
794 	pd->data = domain_data;
795 	pd->hwv_data = hwv_domain_data;
796 	pd->scpsys = scpsys;
797 
798 	if (MTK_SCPD_CAPS(pd, MTK_SCPD_DOMAIN_SUPPLY)) {
799 		pd->supply = devm_of_regulator_get_optional(scpsys->dev, node, "domain");
800 		if (IS_ERR(pd->supply))
801 			return dev_err_cast_probe(scpsys->dev, pd->supply,
802 				      "%pOF: failed to get power supply.\n",
803 				      node);
804 	}
805 
806 	num_clks = of_clk_get_parent_count(node);
807 	if (num_clks > 0) {
808 		/* Calculate number of subsys_clks */
809 		of_property_for_each_string(node, "clock-names", prop, clk_name) {
810 			char *subsys;
811 
812 			subsys = strchr(clk_name, '-');
813 			if (subsys)
814 				pd->num_subsys_clks++;
815 			else
816 				pd->num_clks++;
817 		}
818 
819 		pd->clks = devm_kcalloc(scpsys->dev, pd->num_clks, sizeof(*pd->clks), GFP_KERNEL);
820 		if (!pd->clks)
821 			return ERR_PTR(-ENOMEM);
822 
823 		pd->subsys_clks = devm_kcalloc(scpsys->dev, pd->num_subsys_clks,
824 					       sizeof(*pd->subsys_clks), GFP_KERNEL);
825 		if (!pd->subsys_clks)
826 			return ERR_PTR(-ENOMEM);
827 
828 	}
829 
830 	for (i = 0; i < pd->num_clks; i++) {
831 		clk = of_clk_get(node, i);
832 		if (IS_ERR(clk)) {
833 			ret = PTR_ERR(clk);
834 			dev_err_probe(scpsys->dev, ret,
835 				      "%pOF: failed to get clk at index %d\n", node, i);
836 			goto err_put_clocks;
837 		}
838 
839 		pd->clks[clk_ind++].clk = clk;
840 	}
841 
842 	for (i = 0; i < pd->num_subsys_clks; i++) {
843 		clk = of_clk_get(node, i + clk_ind);
844 		if (IS_ERR(clk)) {
845 			ret = PTR_ERR(clk);
846 			dev_err_probe(scpsys->dev, ret,
847 				      "%pOF: failed to get clk at index %d\n", node,
848 				      i + clk_ind);
849 			goto err_put_subsys_clocks;
850 		}
851 
852 		pd->subsys_clks[i].clk = clk;
853 	}
854 
855 	if (scpsys->domains[id]) {
856 		ret = -EINVAL;
857 		dev_err(scpsys->dev,
858 			"power domain with id %d already exists, check your device-tree\n", id);
859 		goto err_put_subsys_clocks;
860 	}
861 
862 	if (pd->data && pd->data->name)
863 		pd->genpd.name = pd->data->name;
864 	else if (pd->hwv_data && pd->hwv_data->name)
865 		pd->genpd.name = pd->hwv_data->name;
866 	else
867 		pd->genpd.name = node->name;
868 
869 	if (scpsys->soc_data->type == SCPSYS_MTCMOS_TYPE_DIRECT_CTL) {
870 		pd->genpd.power_off = scpsys_power_off;
871 		pd->genpd.power_on = scpsys_power_on;
872 	} else {
873 		pd->genpd.power_off = scpsys_hwv_power_off;
874 		pd->genpd.power_on = scpsys_hwv_power_on;
875 
876 		/* HW-Voter code can be invoked in atomic context */
877 		pd->genpd.flags |= GENPD_FLAG_IRQ_SAFE;
878 	}
879 
880 	/*
881 	 * Initially turn on all domains to make the domains usable
882 	 * with !CONFIG_PM and to get the hardware in sync with the
883 	 * software.  The unused domains will be switched off during
884 	 * late_init time.
885 	 */
886 	if (MTK_SCPD_CAPS(pd, MTK_SCPD_KEEP_DEFAULT_OFF)) {
887 		if (scpsys_domain_is_on(pd))
888 			dev_warn(scpsys->dev,
889 				 "%pOF: A default off power domain has been ON\n", node);
890 	} else {
891 		ret = pd->genpd.power_on(&pd->genpd);
892 		if (ret < 0) {
893 			dev_err(scpsys->dev, "%pOF: failed to power on domain: %d\n", node, ret);
894 			goto err_put_subsys_clocks;
895 		}
896 
897 		if (MTK_SCPD_CAPS(pd, MTK_SCPD_ALWAYS_ON))
898 			pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON;
899 	}
900 
901 	if (MTK_SCPD_CAPS(pd, MTK_SCPD_ACTIVE_WAKEUP))
902 		pd->genpd.flags |= GENPD_FLAG_ACTIVE_WAKEUP;
903 
904 	if (MTK_SCPD_CAPS(pd, MTK_SCPD_KEEP_DEFAULT_OFF))
905 		pm_genpd_init(&pd->genpd, NULL, true);
906 	else
907 		pm_genpd_init(&pd->genpd, NULL, false);
908 
909 	scpsys->domains[id] = &pd->genpd;
910 
911 	return scpsys->pd_data.domains[id];
912 
913 err_put_subsys_clocks:
914 	clk_bulk_put(pd->num_subsys_clks, pd->subsys_clks);
915 err_put_clocks:
916 	clk_bulk_put(pd->num_clks, pd->clks);
917 	return ERR_PTR(ret);
918 }
919 
920 static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *parent)
921 {
922 	struct generic_pm_domain *child_pd, *parent_pd;
923 	struct device_node *child;
924 	int ret;
925 
926 	for_each_child_of_node(parent, child) {
927 		u32 id;
928 
929 		ret = of_property_read_u32(parent, "reg", &id);
930 		if (ret) {
931 			dev_err(scpsys->dev, "%pOF: failed to get parent domain id\n", child);
932 			goto err_put_node;
933 		}
934 
935 		if (!scpsys->pd_data.domains[id]) {
936 			ret = -EINVAL;
937 			dev_err(scpsys->dev, "power domain with id %d does not exist\n", id);
938 			goto err_put_node;
939 		}
940 
941 		parent_pd = scpsys->pd_data.domains[id];
942 
943 		child_pd = scpsys_add_one_domain(scpsys, child);
944 		if (IS_ERR(child_pd)) {
945 			ret = PTR_ERR(child_pd);
946 			dev_err_probe(scpsys->dev, ret, "%pOF: failed to get child domain id\n",
947 				      child);
948 			goto err_put_node;
949 		}
950 
951 		/* recursive call to add all subdomains */
952 		ret = scpsys_add_subdomain(scpsys, child);
953 		if (ret)
954 			goto err_put_node;
955 
956 		ret = pm_genpd_add_subdomain(parent_pd, child_pd);
957 		if (ret) {
958 			dev_err(scpsys->dev, "failed to add %s subdomain to parent %s\n",
959 				child_pd->name, parent_pd->name);
960 			goto err_put_node;
961 		} else {
962 			dev_dbg(scpsys->dev, "%s add subdomain: %s\n", parent_pd->name,
963 				child_pd->name);
964 		}
965 	}
966 
967 	return 0;
968 
969 err_put_node:
970 	of_node_put(child);
971 	return ret;
972 }
973 
974 static void scpsys_remove_one_domain(struct scpsys_domain *pd)
975 {
976 	int ret;
977 
978 	/*
979 	 * We're in the error cleanup already, so we only complain,
980 	 * but won't emit another error on top of the original one.
981 	 */
982 	ret = pm_genpd_remove(&pd->genpd);
983 	if (ret < 0)
984 		dev_err(pd->scpsys->dev,
985 			"failed to remove domain '%s' : %d - state may be inconsistent\n",
986 			pd->genpd.name, ret);
987 	if (scpsys_domain_is_on(pd))
988 		scpsys_power_off(&pd->genpd);
989 
990 	clk_bulk_put(pd->num_clks, pd->clks);
991 	clk_bulk_put(pd->num_subsys_clks, pd->subsys_clks);
992 }
993 
994 static void scpsys_domain_cleanup(struct scpsys *scpsys)
995 {
996 	struct generic_pm_domain *genpd;
997 	struct scpsys_domain *pd;
998 	int i;
999 
1000 	for (i = scpsys->pd_data.num_domains - 1; i >= 0; i--) {
1001 		genpd = scpsys->pd_data.domains[i];
1002 		if (genpd) {
1003 			pd = to_scpsys_domain(genpd);
1004 			scpsys_remove_one_domain(pd);
1005 		}
1006 	}
1007 }
1008 
1009 static int scpsys_get_bus_protection_legacy(struct device *dev, struct scpsys *scpsys)
1010 {
1011 	const u8 bp_blocks[3] = {
1012 		BUS_PROT_BLOCK_INFRA, BUS_PROT_BLOCK_SMI, BUS_PROT_BLOCK_INFRA_NAO
1013 	};
1014 	struct device_node *np = dev->of_node;
1015 	struct device_node *node, *smi_np;
1016 	int num_regmaps = 0, i, j;
1017 	struct regmap *regmap[3];
1018 
1019 	/*
1020 	 * Legacy code retrieves a maximum of three bus protection handles:
1021 	 * some may be optional, or may not be, so the array of bp blocks
1022 	 * that is normally passed in as platform data must be dynamically
1023 	 * built in this case.
1024 	 *
1025 	 * Here, try to retrieve all of the regmaps that the legacy code
1026 	 * supported and then count the number of the ones that are present,
1027 	 * this makes it then possible to allocate the array of bus_prot
1028 	 * regmaps and convert all to the new style handling.
1029 	 */
1030 	of_node_get(np);
1031 	node = of_find_node_with_property(np, "mediatek,infracfg");
1032 	if (node) {
1033 		regmap[0] = syscon_regmap_lookup_by_phandle(node, "mediatek,infracfg");
1034 		of_node_put(node);
1035 		num_regmaps++;
1036 		if (IS_ERR(regmap[0]))
1037 			return dev_err_probe(dev, PTR_ERR(regmap[0]),
1038 					     "%pOF: failed to get infracfg regmap\n",
1039 					     node);
1040 	} else {
1041 		regmap[0] = NULL;
1042 	}
1043 
1044 	of_node_get(np);
1045 	node = of_find_node_with_property(np, "mediatek,smi");
1046 	if (node) {
1047 		smi_np = of_parse_phandle(node, "mediatek,smi", 0);
1048 		of_node_put(node);
1049 		if (!smi_np)
1050 			return -ENODEV;
1051 
1052 		regmap[1] = device_node_to_regmap(smi_np);
1053 		num_regmaps++;
1054 		of_node_put(smi_np);
1055 		if (IS_ERR(regmap[1]))
1056 			return dev_err_probe(dev, PTR_ERR(regmap[1]),
1057 					     "%pOF: failed to get SMI regmap\n",
1058 					     node);
1059 	} else {
1060 		regmap[1] = NULL;
1061 	}
1062 
1063 	of_node_get(np);
1064 	node = of_find_node_with_property(np, "mediatek,infracfg-nao");
1065 	if (node) {
1066 		regmap[2] = syscon_regmap_lookup_by_phandle(node, "mediatek,infracfg-nao");
1067 		num_regmaps++;
1068 		of_node_put(node);
1069 		if (IS_ERR(regmap[2]))
1070 			return dev_err_probe(dev, PTR_ERR(regmap[2]),
1071 					     "%pOF: failed to get infracfg regmap\n",
1072 					     node);
1073 	} else {
1074 		regmap[2] = NULL;
1075 	}
1076 
1077 	scpsys->bus_prot = devm_kmalloc_array(dev, num_regmaps,
1078 					      sizeof(*scpsys->bus_prot), GFP_KERNEL);
1079 	if (!scpsys->bus_prot)
1080 		return -ENOMEM;
1081 
1082 	for (i = 0, j = 0; i < ARRAY_SIZE(bp_blocks); i++) {
1083 		enum scpsys_bus_prot_block bp_type;
1084 
1085 		if (!regmap[i])
1086 			continue;
1087 
1088 		bp_type = bp_blocks[i];
1089 		scpsys->bus_prot_index[bp_type] = j;
1090 		scpsys->bus_prot[j] = regmap[i];
1091 
1092 		j++;
1093 	}
1094 
1095 	return 0;
1096 }
1097 
1098 static int scpsys_get_bus_protection(struct device *dev, struct scpsys *scpsys)
1099 {
1100 	const struct scpsys_soc_data *soc = scpsys->soc_data;
1101 	struct device_node *np = dev->of_node;
1102 	int i, num_handles;
1103 
1104 	num_handles = of_count_phandle_with_args(np, "access-controllers", NULL);
1105 	if (num_handles < 0 || num_handles != soc->num_bus_prot_blocks)
1106 		return dev_err_probe(dev, -EINVAL,
1107 				     "Cannot get access controllers: expected %u, got %d\n",
1108 				     soc->num_bus_prot_blocks, num_handles);
1109 
1110 	scpsys->bus_prot = devm_kmalloc_array(dev, soc->num_bus_prot_blocks,
1111 					      sizeof(*scpsys->bus_prot), GFP_KERNEL);
1112 	if (!scpsys->bus_prot)
1113 		return -ENOMEM;
1114 
1115 	for (i = 0; i < soc->num_bus_prot_blocks; i++) {
1116 		enum scpsys_bus_prot_block bp_type;
1117 		struct device_node *node;
1118 
1119 		node = of_parse_phandle(np, "access-controllers", i);
1120 		if (!node)
1121 			return -EINVAL;
1122 
1123 		/*
1124 		 * Index the bus protection regmaps so that we don't have to
1125 		 * find the right one by type with a loop at every execution
1126 		 * of power sequence(s).
1127 		 */
1128 		bp_type = soc->bus_prot_blocks[i];
1129 		scpsys->bus_prot_index[bp_type] = i;
1130 
1131 		scpsys->bus_prot[i] = device_node_to_regmap(node);
1132 		of_node_put(node);
1133 		if (IS_ERR_OR_NULL(scpsys->bus_prot[i]))
1134 			return dev_err_probe(dev, scpsys->bus_prot[i] ?
1135 					     PTR_ERR(scpsys->bus_prot[i]) : -ENXIO,
1136 					     "Cannot get regmap for access controller %d\n", i);
1137 	}
1138 
1139 	return 0;
1140 }
1141 
1142 static const struct of_device_id scpsys_of_match[] = {
1143 	{
1144 		.compatible = "mediatek,mt6735-power-controller",
1145 		.data = &mt6735_scpsys_data,
1146 	},
1147 	{
1148 		.compatible = "mediatek,mt6795-power-controller",
1149 		.data = &mt6795_scpsys_data,
1150 	},
1151 	{
1152 		.compatible = "mediatek,mt6893-power-controller",
1153 		.data = &mt6893_scpsys_data,
1154 	},
1155 	{
1156 		.compatible = "mediatek,mt8167-power-controller",
1157 		.data = &mt8167_scpsys_data,
1158 	},
1159 	{
1160 		.compatible = "mediatek,mt8173-power-controller",
1161 		.data = &mt8173_scpsys_data,
1162 	},
1163 	{
1164 		.compatible = "mediatek,mt8183-power-controller",
1165 		.data = &mt8183_scpsys_data,
1166 	},
1167 	{
1168 		.compatible = "mediatek,mt8186-power-controller",
1169 		.data = &mt8186_scpsys_data,
1170 	},
1171 	{
1172 		.compatible = "mediatek,mt8188-power-controller",
1173 		.data = &mt8188_scpsys_data,
1174 	},
1175 	{
1176 		.compatible = "mediatek,mt8189-power-controller",
1177 		.data = &mt8189_scpsys_data,
1178 	},
1179 	{
1180 		.compatible = "mediatek,mt8192-power-controller",
1181 		.data = &mt8192_scpsys_data,
1182 	},
1183 	{
1184 		.compatible = "mediatek,mt8195-power-controller",
1185 		.data = &mt8195_scpsys_data,
1186 	},
1187 	{
1188 		.compatible = "mediatek,mt8196-power-controller",
1189 		.data = &mt8196_scpsys_data,
1190 	},
1191 	{
1192 		.compatible = "mediatek,mt8196-hwv-hfrp-power-controller",
1193 		.data = &mt8196_hfrpsys_hwv_data,
1194 	},
1195 	{
1196 		.compatible = "mediatek,mt8196-hwv-scp-power-controller",
1197 		.data = &mt8196_scpsys_hwv_data,
1198 	},
1199 	{
1200 		.compatible = "mediatek,mt8365-power-controller",
1201 		.data = &mt8365_scpsys_data,
1202 	},
1203 	{ }
1204 };
1205 
1206 static int scpsys_probe(struct platform_device *pdev)
1207 {
1208 	struct device *dev = &pdev->dev;
1209 	struct device_node *np = dev->of_node;
1210 	const struct scpsys_soc_data *soc;
1211 	struct device *parent;
1212 	struct scpsys *scpsys;
1213 	int num_domains, ret;
1214 
1215 	soc = of_device_get_match_data(&pdev->dev);
1216 	if (!soc) {
1217 		dev_err(&pdev->dev, "no power controller data\n");
1218 		return -EINVAL;
1219 	}
1220 
1221 	num_domains = soc->num_domains + soc->num_hwv_domains;
1222 
1223 	scpsys = devm_kzalloc(dev, struct_size(scpsys, domains, num_domains), GFP_KERNEL);
1224 	if (!scpsys)
1225 		return -ENOMEM;
1226 
1227 	scpsys->dev = dev;
1228 	scpsys->soc_data = soc;
1229 
1230 	scpsys->pd_data.domains = scpsys->domains;
1231 	scpsys->pd_data.num_domains = num_domains;
1232 
1233 	parent = dev->parent;
1234 	if (!parent) {
1235 		dev_err(dev, "no parent for syscon devices\n");
1236 		return -ENODEV;
1237 	}
1238 
1239 	scpsys->base = syscon_node_to_regmap(parent->of_node);
1240 	if (IS_ERR(scpsys->base)) {
1241 		dev_err(dev, "no regmap available\n");
1242 		return PTR_ERR(scpsys->base);
1243 	}
1244 
1245 	if (of_find_property(np, "access-controllers", NULL))
1246 		ret = scpsys_get_bus_protection(dev, scpsys);
1247 	else
1248 		ret = scpsys_get_bus_protection_legacy(dev, scpsys);
1249 
1250 	if (ret)
1251 		return ret;
1252 
1253 	ret = -ENODEV;
1254 	for_each_available_child_of_node_scoped(np, node) {
1255 		struct generic_pm_domain *domain;
1256 
1257 		domain = scpsys_add_one_domain(scpsys, node);
1258 		if (IS_ERR(domain)) {
1259 			ret = PTR_ERR(domain);
1260 			goto err_cleanup_domains;
1261 		}
1262 
1263 		ret = scpsys_add_subdomain(scpsys, node);
1264 		if (ret)
1265 			goto err_cleanup_domains;
1266 	}
1267 
1268 	if (ret) {
1269 		dev_dbg(dev, "no power domains present\n");
1270 		return ret;
1271 	}
1272 
1273 	ret = of_genpd_add_provider_onecell(np, &scpsys->pd_data);
1274 	if (ret) {
1275 		dev_err(dev, "failed to add provider: %d\n", ret);
1276 		goto err_cleanup_domains;
1277 	}
1278 
1279 	return 0;
1280 
1281 err_cleanup_domains:
1282 	scpsys_domain_cleanup(scpsys);
1283 	return ret;
1284 }
1285 
1286 static struct platform_driver scpsys_pm_domain_driver = {
1287 	.probe = scpsys_probe,
1288 	.driver = {
1289 		.name = "mtk-power-controller",
1290 		.suppress_bind_attrs = true,
1291 		.of_match_table = scpsys_of_match,
1292 	},
1293 };
1294 builtin_platform_driver(scpsys_pm_domain_driver);
1295