xref: /linux/drivers/remoteproc/qcom_q6v5_mss.c (revision b7019ac550eb3916f34d79db583e9b7ea2524afa)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Qualcomm self-authenticating modem subsystem remoteproc driver
4  *
5  * Copyright (C) 2016 Linaro Ltd.
6  * Copyright (C) 2014 Sony Mobile Communications AB
7  * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
8  */
9 
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel.h>
15 #include <linux/mfd/syscon.h>
16 #include <linux/module.h>
17 #include <linux/of_address.h>
18 #include <linux/of_device.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_domain.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/regmap.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/remoteproc.h>
25 #include <linux/reset.h>
26 #include <linux/soc/qcom/mdt_loader.h>
27 #include <linux/iopoll.h>
28 
29 #include "remoteproc_internal.h"
30 #include "qcom_common.h"
31 #include "qcom_q6v5.h"
32 
33 #include <linux/qcom_scm.h>
34 
35 #define MPSS_CRASH_REASON_SMEM		421
36 
37 /* RMB Status Register Values */
38 #define RMB_PBL_SUCCESS			0x1
39 
40 #define RMB_MBA_XPU_UNLOCKED		0x1
41 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED	0x2
42 #define RMB_MBA_META_DATA_AUTH_SUCCESS	0x3
43 #define RMB_MBA_AUTH_COMPLETE		0x4
44 
45 /* PBL/MBA interface registers */
46 #define RMB_MBA_IMAGE_REG		0x00
47 #define RMB_PBL_STATUS_REG		0x04
48 #define RMB_MBA_COMMAND_REG		0x08
49 #define RMB_MBA_STATUS_REG		0x0C
50 #define RMB_PMI_META_DATA_REG		0x10
51 #define RMB_PMI_CODE_START_REG		0x14
52 #define RMB_PMI_CODE_LENGTH_REG		0x18
53 #define RMB_MBA_MSS_STATUS		0x40
54 #define RMB_MBA_ALT_RESET		0x44
55 
56 #define RMB_CMD_META_DATA_READY		0x1
57 #define RMB_CMD_LOAD_READY		0x2
58 
59 /* QDSP6SS Register Offsets */
60 #define QDSP6SS_RESET_REG		0x014
61 #define QDSP6SS_GFMUX_CTL_REG		0x020
62 #define QDSP6SS_PWR_CTL_REG		0x030
63 #define QDSP6SS_MEM_PWR_CTL		0x0B0
64 #define QDSP6SS_STRAP_ACC		0x110
65 
66 /* AXI Halt Register Offsets */
67 #define AXI_HALTREQ_REG			0x0
68 #define AXI_HALTACK_REG			0x4
69 #define AXI_IDLE_REG			0x8
70 
71 #define HALT_ACK_TIMEOUT_MS		100
72 
73 /* QDSP6SS_RESET */
74 #define Q6SS_STOP_CORE			BIT(0)
75 #define Q6SS_CORE_ARES			BIT(1)
76 #define Q6SS_BUS_ARES_ENABLE		BIT(2)
77 
78 /* QDSP6SS_GFMUX_CTL */
79 #define Q6SS_CLK_ENABLE			BIT(1)
80 
81 /* QDSP6SS_PWR_CTL */
82 #define Q6SS_L2DATA_SLP_NRET_N_0	BIT(0)
83 #define Q6SS_L2DATA_SLP_NRET_N_1	BIT(1)
84 #define Q6SS_L2DATA_SLP_NRET_N_2	BIT(2)
85 #define Q6SS_L2TAG_SLP_NRET_N		BIT(16)
86 #define Q6SS_ETB_SLP_NRET_N		BIT(17)
87 #define Q6SS_L2DATA_STBY_N		BIT(18)
88 #define Q6SS_SLP_RET_N			BIT(19)
89 #define Q6SS_CLAMP_IO			BIT(20)
90 #define QDSS_BHS_ON			BIT(21)
91 #define QDSS_LDO_BYP			BIT(22)
92 
93 /* QDSP6v56 parameters */
94 #define QDSP6v56_LDO_BYP		BIT(25)
95 #define QDSP6v56_BHS_ON		BIT(24)
96 #define QDSP6v56_CLAMP_WL		BIT(21)
97 #define QDSP6v56_CLAMP_QMC_MEM		BIT(22)
98 #define HALT_CHECK_MAX_LOOPS		200
99 #define QDSP6SS_XO_CBCR		0x0038
100 #define QDSP6SS_ACC_OVERRIDE_VAL		0x20
101 
102 /* QDSP6v65 parameters */
103 #define QDSP6SS_SLEEP                   0x3C
104 #define QDSP6SS_BOOT_CORE_START         0x400
105 #define QDSP6SS_BOOT_CMD                0x404
106 #define SLEEP_CHECK_MAX_LOOPS           200
107 #define BOOT_FSM_TIMEOUT                10000
108 
109 struct reg_info {
110 	struct regulator *reg;
111 	int uV;
112 	int uA;
113 };
114 
115 struct qcom_mss_reg_res {
116 	const char *supply;
117 	int uV;
118 	int uA;
119 };
120 
121 struct rproc_hexagon_res {
122 	const char *hexagon_mba_image;
123 	struct qcom_mss_reg_res *proxy_supply;
124 	struct qcom_mss_reg_res *active_supply;
125 	char **proxy_clk_names;
126 	char **reset_clk_names;
127 	char **active_clk_names;
128 	char **active_pd_names;
129 	char **proxy_pd_names;
130 	int version;
131 	bool need_mem_protection;
132 	bool has_alt_reset;
133 };
134 
135 struct q6v5 {
136 	struct device *dev;
137 	struct rproc *rproc;
138 
139 	void __iomem *reg_base;
140 	void __iomem *rmb_base;
141 
142 	struct regmap *halt_map;
143 	u32 halt_q6;
144 	u32 halt_modem;
145 	u32 halt_nc;
146 
147 	struct reset_control *mss_restart;
148 	struct reset_control *pdc_reset;
149 
150 	struct qcom_q6v5 q6v5;
151 
152 	struct clk *active_clks[8];
153 	struct clk *reset_clks[4];
154 	struct clk *proxy_clks[4];
155 	struct device *active_pds[1];
156 	struct device *proxy_pds[3];
157 	int active_clk_count;
158 	int reset_clk_count;
159 	int proxy_clk_count;
160 	int active_pd_count;
161 	int proxy_pd_count;
162 
163 	struct reg_info active_regs[1];
164 	struct reg_info proxy_regs[3];
165 	int active_reg_count;
166 	int proxy_reg_count;
167 
168 	bool running;
169 
170 	bool dump_mba_loaded;
171 	unsigned long dump_segment_mask;
172 	unsigned long dump_complete_mask;
173 
174 	phys_addr_t mba_phys;
175 	void *mba_region;
176 	size_t mba_size;
177 
178 	phys_addr_t mpss_phys;
179 	phys_addr_t mpss_reloc;
180 	void *mpss_region;
181 	size_t mpss_size;
182 
183 	struct qcom_rproc_glink glink_subdev;
184 	struct qcom_rproc_subdev smd_subdev;
185 	struct qcom_rproc_ssr ssr_subdev;
186 	struct qcom_sysmon *sysmon;
187 	bool need_mem_protection;
188 	bool has_alt_reset;
189 	int mpss_perm;
190 	int mba_perm;
191 	const char *hexagon_mdt_image;
192 	int version;
193 };
194 
195 enum {
196 	MSS_MSM8916,
197 	MSS_MSM8974,
198 	MSS_MSM8996,
199 	MSS_SDM845,
200 };
201 
202 static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
203 			       const struct qcom_mss_reg_res *reg_res)
204 {
205 	int rc;
206 	int i;
207 
208 	if (!reg_res)
209 		return 0;
210 
211 	for (i = 0; reg_res[i].supply; i++) {
212 		regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
213 		if (IS_ERR(regs[i].reg)) {
214 			rc = PTR_ERR(regs[i].reg);
215 			if (rc != -EPROBE_DEFER)
216 				dev_err(dev, "Failed to get %s\n regulator",
217 					reg_res[i].supply);
218 			return rc;
219 		}
220 
221 		regs[i].uV = reg_res[i].uV;
222 		regs[i].uA = reg_res[i].uA;
223 	}
224 
225 	return i;
226 }
227 
228 static int q6v5_regulator_enable(struct q6v5 *qproc,
229 				 struct reg_info *regs, int count)
230 {
231 	int ret;
232 	int i;
233 
234 	for (i = 0; i < count; i++) {
235 		if (regs[i].uV > 0) {
236 			ret = regulator_set_voltage(regs[i].reg,
237 					regs[i].uV, INT_MAX);
238 			if (ret) {
239 				dev_err(qproc->dev,
240 					"Failed to request voltage for %d.\n",
241 						i);
242 				goto err;
243 			}
244 		}
245 
246 		if (regs[i].uA > 0) {
247 			ret = regulator_set_load(regs[i].reg,
248 						 regs[i].uA);
249 			if (ret < 0) {
250 				dev_err(qproc->dev,
251 					"Failed to set regulator mode\n");
252 				goto err;
253 			}
254 		}
255 
256 		ret = regulator_enable(regs[i].reg);
257 		if (ret) {
258 			dev_err(qproc->dev, "Regulator enable failed\n");
259 			goto err;
260 		}
261 	}
262 
263 	return 0;
264 err:
265 	for (; i >= 0; i--) {
266 		if (regs[i].uV > 0)
267 			regulator_set_voltage(regs[i].reg, 0, INT_MAX);
268 
269 		if (regs[i].uA > 0)
270 			regulator_set_load(regs[i].reg, 0);
271 
272 		regulator_disable(regs[i].reg);
273 	}
274 
275 	return ret;
276 }
277 
278 static void q6v5_regulator_disable(struct q6v5 *qproc,
279 				   struct reg_info *regs, int count)
280 {
281 	int i;
282 
283 	for (i = 0; i < count; i++) {
284 		if (regs[i].uV > 0)
285 			regulator_set_voltage(regs[i].reg, 0, INT_MAX);
286 
287 		if (regs[i].uA > 0)
288 			regulator_set_load(regs[i].reg, 0);
289 
290 		regulator_disable(regs[i].reg);
291 	}
292 }
293 
294 static int q6v5_clk_enable(struct device *dev,
295 			   struct clk **clks, int count)
296 {
297 	int rc;
298 	int i;
299 
300 	for (i = 0; i < count; i++) {
301 		rc = clk_prepare_enable(clks[i]);
302 		if (rc) {
303 			dev_err(dev, "Clock enable failed\n");
304 			goto err;
305 		}
306 	}
307 
308 	return 0;
309 err:
310 	for (i--; i >= 0; i--)
311 		clk_disable_unprepare(clks[i]);
312 
313 	return rc;
314 }
315 
316 static void q6v5_clk_disable(struct device *dev,
317 			     struct clk **clks, int count)
318 {
319 	int i;
320 
321 	for (i = 0; i < count; i++)
322 		clk_disable_unprepare(clks[i]);
323 }
324 
325 static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds,
326 			   size_t pd_count)
327 {
328 	int ret;
329 	int i;
330 
331 	for (i = 0; i < pd_count; i++) {
332 		dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
333 		ret = pm_runtime_get_sync(pds[i]);
334 		if (ret < 0)
335 			goto unroll_pd_votes;
336 	}
337 
338 	return 0;
339 
340 unroll_pd_votes:
341 	for (i--; i >= 0; i--) {
342 		dev_pm_genpd_set_performance_state(pds[i], 0);
343 		pm_runtime_put(pds[i]);
344 	}
345 
346 	return ret;
347 };
348 
349 static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
350 			     size_t pd_count)
351 {
352 	int i;
353 
354 	for (i = 0; i < pd_count; i++) {
355 		dev_pm_genpd_set_performance_state(pds[i], 0);
356 		pm_runtime_put(pds[i]);
357 	}
358 }
359 
360 static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
361 				   bool remote_owner, phys_addr_t addr,
362 				   size_t size)
363 {
364 	struct qcom_scm_vmperm next;
365 
366 	if (!qproc->need_mem_protection)
367 		return 0;
368 	if (remote_owner && *current_perm == BIT(QCOM_SCM_VMID_MSS_MSA))
369 		return 0;
370 	if (!remote_owner && *current_perm == BIT(QCOM_SCM_VMID_HLOS))
371 		return 0;
372 
373 	next.vmid = remote_owner ? QCOM_SCM_VMID_MSS_MSA : QCOM_SCM_VMID_HLOS;
374 	next.perm = remote_owner ? QCOM_SCM_PERM_RW : QCOM_SCM_PERM_RWX;
375 
376 	return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
377 				   current_perm, &next, 1);
378 }
379 
380 static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
381 {
382 	struct q6v5 *qproc = rproc->priv;
383 
384 	memcpy(qproc->mba_region, fw->data, fw->size);
385 
386 	return 0;
387 }
388 
389 static int q6v5_reset_assert(struct q6v5 *qproc)
390 {
391 	int ret;
392 
393 	if (qproc->has_alt_reset) {
394 		reset_control_assert(qproc->pdc_reset);
395 		ret = reset_control_reset(qproc->mss_restart);
396 		reset_control_deassert(qproc->pdc_reset);
397 	} else {
398 		ret = reset_control_assert(qproc->mss_restart);
399 	}
400 
401 	return ret;
402 }
403 
404 static int q6v5_reset_deassert(struct q6v5 *qproc)
405 {
406 	int ret;
407 
408 	if (qproc->has_alt_reset) {
409 		reset_control_assert(qproc->pdc_reset);
410 		writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET);
411 		ret = reset_control_reset(qproc->mss_restart);
412 		writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
413 		reset_control_deassert(qproc->pdc_reset);
414 	} else {
415 		ret = reset_control_deassert(qproc->mss_restart);
416 	}
417 
418 	return ret;
419 }
420 
421 static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
422 {
423 	unsigned long timeout;
424 	s32 val;
425 
426 	timeout = jiffies + msecs_to_jiffies(ms);
427 	for (;;) {
428 		val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
429 		if (val)
430 			break;
431 
432 		if (time_after(jiffies, timeout))
433 			return -ETIMEDOUT;
434 
435 		msleep(1);
436 	}
437 
438 	return val;
439 }
440 
441 static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
442 {
443 
444 	unsigned long timeout;
445 	s32 val;
446 
447 	timeout = jiffies + msecs_to_jiffies(ms);
448 	for (;;) {
449 		val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
450 		if (val < 0)
451 			break;
452 
453 		if (!status && val)
454 			break;
455 		else if (status && val == status)
456 			break;
457 
458 		if (time_after(jiffies, timeout))
459 			return -ETIMEDOUT;
460 
461 		msleep(1);
462 	}
463 
464 	return val;
465 }
466 
467 static int q6v5proc_reset(struct q6v5 *qproc)
468 {
469 	u32 val;
470 	int ret;
471 	int i;
472 
473 	if (qproc->version == MSS_SDM845) {
474 		val = readl(qproc->reg_base + QDSP6SS_SLEEP);
475 		val |= 0x1;
476 		writel(val, qproc->reg_base + QDSP6SS_SLEEP);
477 
478 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
479 					 val, !(val & BIT(31)), 1,
480 					 SLEEP_CHECK_MAX_LOOPS);
481 		if (ret) {
482 			dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
483 			return -ETIMEDOUT;
484 		}
485 
486 		/* De-assert QDSP6 stop core */
487 		writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
488 		/* Trigger boot FSM */
489 		writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
490 
491 		ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
492 				val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
493 		if (ret) {
494 			dev_err(qproc->dev, "Boot FSM failed to complete.\n");
495 			/* Reset the modem so that boot FSM is in reset state */
496 			q6v5_reset_deassert(qproc);
497 			return ret;
498 		}
499 
500 		goto pbl_wait;
501 	} else if (qproc->version == MSS_MSM8996) {
502 		/* Override the ACC value if required */
503 		writel(QDSP6SS_ACC_OVERRIDE_VAL,
504 		       qproc->reg_base + QDSP6SS_STRAP_ACC);
505 
506 		/* Assert resets, stop core */
507 		val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
508 		val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
509 		writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
510 
511 		/* BHS require xo cbcr to be enabled */
512 		val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
513 		val |= 0x1;
514 		writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
515 
516 		/* Read CLKOFF bit to go low indicating CLK is enabled */
517 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
518 					 val, !(val & BIT(31)), 1,
519 					 HALT_CHECK_MAX_LOOPS);
520 		if (ret) {
521 			dev_err(qproc->dev,
522 				"xo cbcr enabling timed out (rc:%d)\n", ret);
523 			return ret;
524 		}
525 		/* Enable power block headswitch and wait for it to stabilize */
526 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
527 		val |= QDSP6v56_BHS_ON;
528 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
529 		val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
530 		udelay(1);
531 
532 		/* Put LDO in bypass mode */
533 		val |= QDSP6v56_LDO_BYP;
534 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
535 
536 		/* Deassert QDSP6 compiler memory clamp */
537 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
538 		val &= ~QDSP6v56_CLAMP_QMC_MEM;
539 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
540 
541 		/* Deassert memory peripheral sleep and L2 memory standby */
542 		val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
543 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
544 
545 		/* Turn on L1, L2, ETB and JU memories 1 at a time */
546 		val = readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL);
547 		for (i = 19; i >= 0; i--) {
548 			val |= BIT(i);
549 			writel(val, qproc->reg_base +
550 						QDSP6SS_MEM_PWR_CTL);
551 			/*
552 			 * Read back value to ensure the write is done then
553 			 * wait for 1us for both memory peripheral and data
554 			 * array to turn on.
555 			 */
556 			val |= readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL);
557 			udelay(1);
558 		}
559 		/* Remove word line clamp */
560 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
561 		val &= ~QDSP6v56_CLAMP_WL;
562 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
563 	} else {
564 		/* Assert resets, stop core */
565 		val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
566 		val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
567 		writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
568 
569 		/* Enable power block headswitch and wait for it to stabilize */
570 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
571 		val |= QDSS_BHS_ON | QDSS_LDO_BYP;
572 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
573 		val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
574 		udelay(1);
575 		/*
576 		 * Turn on memories. L2 banks should be done individually
577 		 * to minimize inrush current.
578 		 */
579 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
580 		val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
581 			Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
582 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
583 		val |= Q6SS_L2DATA_SLP_NRET_N_2;
584 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
585 		val |= Q6SS_L2DATA_SLP_NRET_N_1;
586 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
587 		val |= Q6SS_L2DATA_SLP_NRET_N_0;
588 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
589 	}
590 	/* Remove IO clamp */
591 	val &= ~Q6SS_CLAMP_IO;
592 	writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
593 
594 	/* Bring core out of reset */
595 	val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
596 	val &= ~Q6SS_CORE_ARES;
597 	writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
598 
599 	/* Turn on core clock */
600 	val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
601 	val |= Q6SS_CLK_ENABLE;
602 	writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
603 
604 	/* Start core execution */
605 	val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
606 	val &= ~Q6SS_STOP_CORE;
607 	writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
608 
609 pbl_wait:
610 	/* Wait for PBL status */
611 	ret = q6v5_rmb_pbl_wait(qproc, 1000);
612 	if (ret == -ETIMEDOUT) {
613 		dev_err(qproc->dev, "PBL boot timed out\n");
614 	} else if (ret != RMB_PBL_SUCCESS) {
615 		dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
616 		ret = -EINVAL;
617 	} else {
618 		ret = 0;
619 	}
620 
621 	return ret;
622 }
623 
624 static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
625 				   struct regmap *halt_map,
626 				   u32 offset)
627 {
628 	unsigned long timeout;
629 	unsigned int val;
630 	int ret;
631 
632 	/* Check if we're already idle */
633 	ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
634 	if (!ret && val)
635 		return;
636 
637 	/* Assert halt request */
638 	regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
639 
640 	/* Wait for halt */
641 	timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS);
642 	for (;;) {
643 		ret = regmap_read(halt_map, offset + AXI_HALTACK_REG, &val);
644 		if (ret || val || time_after(jiffies, timeout))
645 			break;
646 
647 		msleep(1);
648 	}
649 
650 	ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
651 	if (ret || !val)
652 		dev_err(qproc->dev, "port failed halt\n");
653 
654 	/* Clear halt request (port will remain halted until reset) */
655 	regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
656 }
657 
658 static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
659 {
660 	unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
661 	dma_addr_t phys;
662 	int mdata_perm;
663 	int xferop_ret;
664 	void *ptr;
665 	int ret;
666 
667 	ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, dma_attrs);
668 	if (!ptr) {
669 		dev_err(qproc->dev, "failed to allocate mdt buffer\n");
670 		return -ENOMEM;
671 	}
672 
673 	memcpy(ptr, fw->data, fw->size);
674 
675 	/* Hypervisor mapping to access metadata by modem */
676 	mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
677 	ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm,
678 				      true, phys, fw->size);
679 	if (ret) {
680 		dev_err(qproc->dev,
681 			"assigning Q6 access to metadata failed: %d\n", ret);
682 		ret = -EAGAIN;
683 		goto free_dma_attrs;
684 	}
685 
686 	writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
687 	writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
688 
689 	ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
690 	if (ret == -ETIMEDOUT)
691 		dev_err(qproc->dev, "MPSS header authentication timed out\n");
692 	else if (ret < 0)
693 		dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
694 
695 	/* Metadata authentication done, remove modem access */
696 	xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm,
697 					     false, phys, fw->size);
698 	if (xferop_ret)
699 		dev_warn(qproc->dev,
700 			 "mdt buffer not reclaimed system may become unstable\n");
701 
702 free_dma_attrs:
703 	dma_free_attrs(qproc->dev, fw->size, ptr, phys, dma_attrs);
704 
705 	return ret < 0 ? ret : 0;
706 }
707 
708 static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
709 {
710 	if (phdr->p_type != PT_LOAD)
711 		return false;
712 
713 	if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
714 		return false;
715 
716 	if (!phdr->p_memsz)
717 		return false;
718 
719 	return true;
720 }
721 
722 static int q6v5_mba_load(struct q6v5 *qproc)
723 {
724 	int ret;
725 	int xfermemop_ret;
726 
727 	qcom_q6v5_prepare(&qproc->q6v5);
728 
729 	ret = q6v5_pds_enable(qproc, qproc->active_pds, qproc->active_pd_count);
730 	if (ret < 0) {
731 		dev_err(qproc->dev, "failed to enable active power domains\n");
732 		goto disable_irqs;
733 	}
734 
735 	ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
736 	if (ret < 0) {
737 		dev_err(qproc->dev, "failed to enable proxy power domains\n");
738 		goto disable_active_pds;
739 	}
740 
741 	ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
742 				    qproc->proxy_reg_count);
743 	if (ret) {
744 		dev_err(qproc->dev, "failed to enable proxy supplies\n");
745 		goto disable_proxy_pds;
746 	}
747 
748 	ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
749 			      qproc->proxy_clk_count);
750 	if (ret) {
751 		dev_err(qproc->dev, "failed to enable proxy clocks\n");
752 		goto disable_proxy_reg;
753 	}
754 
755 	ret = q6v5_regulator_enable(qproc, qproc->active_regs,
756 				    qproc->active_reg_count);
757 	if (ret) {
758 		dev_err(qproc->dev, "failed to enable supplies\n");
759 		goto disable_proxy_clk;
760 	}
761 
762 	ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
763 			      qproc->reset_clk_count);
764 	if (ret) {
765 		dev_err(qproc->dev, "failed to enable reset clocks\n");
766 		goto disable_vdd;
767 	}
768 
769 	ret = q6v5_reset_deassert(qproc);
770 	if (ret) {
771 		dev_err(qproc->dev, "failed to deassert mss restart\n");
772 		goto disable_reset_clks;
773 	}
774 
775 	ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
776 			      qproc->active_clk_count);
777 	if (ret) {
778 		dev_err(qproc->dev, "failed to enable clocks\n");
779 		goto assert_reset;
780 	}
781 
782 	/* Assign MBA image access in DDR to q6 */
783 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
784 				      qproc->mba_phys, qproc->mba_size);
785 	if (ret) {
786 		dev_err(qproc->dev,
787 			"assigning Q6 access to mba memory failed: %d\n", ret);
788 		goto disable_active_clks;
789 	}
790 
791 	writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
792 
793 	ret = q6v5proc_reset(qproc);
794 	if (ret)
795 		goto reclaim_mba;
796 
797 	ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
798 	if (ret == -ETIMEDOUT) {
799 		dev_err(qproc->dev, "MBA boot timed out\n");
800 		goto halt_axi_ports;
801 	} else if (ret != RMB_MBA_XPU_UNLOCKED &&
802 		   ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
803 		dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
804 		ret = -EINVAL;
805 		goto halt_axi_ports;
806 	}
807 
808 	qproc->dump_mba_loaded = true;
809 	return 0;
810 
811 halt_axi_ports:
812 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
813 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
814 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
815 
816 reclaim_mba:
817 	xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
818 						qproc->mba_phys,
819 						qproc->mba_size);
820 	if (xfermemop_ret) {
821 		dev_err(qproc->dev,
822 			"Failed to reclaim mba buffer, system may become unstable\n");
823 	}
824 
825 disable_active_clks:
826 	q6v5_clk_disable(qproc->dev, qproc->active_clks,
827 			 qproc->active_clk_count);
828 assert_reset:
829 	q6v5_reset_assert(qproc);
830 disable_reset_clks:
831 	q6v5_clk_disable(qproc->dev, qproc->reset_clks,
832 			 qproc->reset_clk_count);
833 disable_vdd:
834 	q6v5_regulator_disable(qproc, qproc->active_regs,
835 			       qproc->active_reg_count);
836 disable_proxy_clk:
837 	q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
838 			 qproc->proxy_clk_count);
839 disable_proxy_reg:
840 	q6v5_regulator_disable(qproc, qproc->proxy_regs,
841 			       qproc->proxy_reg_count);
842 disable_proxy_pds:
843 	q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
844 disable_active_pds:
845 	q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
846 disable_irqs:
847 	qcom_q6v5_unprepare(&qproc->q6v5);
848 
849 	return ret;
850 }
851 
852 static void q6v5_mba_reclaim(struct q6v5 *qproc)
853 {
854 	int ret;
855 	u32 val;
856 
857 	qproc->dump_mba_loaded = false;
858 
859 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
860 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
861 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
862 	if (qproc->version == MSS_MSM8996) {
863 		/*
864 		 * To avoid high MX current during LPASS/MSS restart.
865 		 */
866 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
867 		val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
868 			QDSP6v56_CLAMP_QMC_MEM;
869 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
870 	}
871 
872 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
873 				      false, qproc->mpss_phys,
874 				      qproc->mpss_size);
875 	WARN_ON(ret);
876 
877 	q6v5_reset_assert(qproc);
878 
879 	q6v5_clk_disable(qproc->dev, qproc->reset_clks,
880 			 qproc->reset_clk_count);
881 	q6v5_clk_disable(qproc->dev, qproc->active_clks,
882 			 qproc->active_clk_count);
883 	q6v5_regulator_disable(qproc, qproc->active_regs,
884 			       qproc->active_reg_count);
885 	q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
886 
887 	/* In case of failure or coredump scenario where reclaiming MBA memory
888 	 * could not happen reclaim it here.
889 	 */
890 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
891 				      qproc->mba_phys,
892 				      qproc->mba_size);
893 	WARN_ON(ret);
894 
895 	ret = qcom_q6v5_unprepare(&qproc->q6v5);
896 	if (ret) {
897 		q6v5_pds_disable(qproc, qproc->proxy_pds,
898 				 qproc->proxy_pd_count);
899 		q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
900 				 qproc->proxy_clk_count);
901 		q6v5_regulator_disable(qproc, qproc->proxy_regs,
902 				       qproc->proxy_reg_count);
903 	}
904 }
905 
906 static int q6v5_mpss_load(struct q6v5 *qproc)
907 {
908 	const struct elf32_phdr *phdrs;
909 	const struct elf32_phdr *phdr;
910 	const struct firmware *seg_fw;
911 	const struct firmware *fw;
912 	struct elf32_hdr *ehdr;
913 	phys_addr_t mpss_reloc;
914 	phys_addr_t boot_addr;
915 	phys_addr_t min_addr = PHYS_ADDR_MAX;
916 	phys_addr_t max_addr = 0;
917 	bool relocate = false;
918 	char *fw_name;
919 	size_t fw_name_len;
920 	ssize_t offset;
921 	size_t size = 0;
922 	void *ptr;
923 	int ret;
924 	int i;
925 
926 	fw_name_len = strlen(qproc->hexagon_mdt_image);
927 	if (fw_name_len <= 4)
928 		return -EINVAL;
929 
930 	fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL);
931 	if (!fw_name)
932 		return -ENOMEM;
933 
934 	ret = request_firmware(&fw, fw_name, qproc->dev);
935 	if (ret < 0) {
936 		dev_err(qproc->dev, "unable to load %s\n", fw_name);
937 		goto out;
938 	}
939 
940 	/* Initialize the RMB validator */
941 	writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
942 
943 	ret = q6v5_mpss_init_image(qproc, fw);
944 	if (ret)
945 		goto release_firmware;
946 
947 	ehdr = (struct elf32_hdr *)fw->data;
948 	phdrs = (struct elf32_phdr *)(ehdr + 1);
949 
950 	for (i = 0; i < ehdr->e_phnum; i++) {
951 		phdr = &phdrs[i];
952 
953 		if (!q6v5_phdr_valid(phdr))
954 			continue;
955 
956 		if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
957 			relocate = true;
958 
959 		if (phdr->p_paddr < min_addr)
960 			min_addr = phdr->p_paddr;
961 
962 		if (phdr->p_paddr + phdr->p_memsz > max_addr)
963 			max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
964 	}
965 
966 	mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
967 	qproc->mpss_reloc = mpss_reloc;
968 	/* Load firmware segments */
969 	for (i = 0; i < ehdr->e_phnum; i++) {
970 		phdr = &phdrs[i];
971 
972 		if (!q6v5_phdr_valid(phdr))
973 			continue;
974 
975 		offset = phdr->p_paddr - mpss_reloc;
976 		if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
977 			dev_err(qproc->dev, "segment outside memory range\n");
978 			ret = -EINVAL;
979 			goto release_firmware;
980 		}
981 
982 		ptr = qproc->mpss_region + offset;
983 
984 		if (phdr->p_filesz) {
985 			/* Replace "xxx.xxx" with "xxx.bxx" */
986 			sprintf(fw_name + fw_name_len - 3, "b%02d", i);
987 			ret = request_firmware(&seg_fw, fw_name, qproc->dev);
988 			if (ret) {
989 				dev_err(qproc->dev, "failed to load %s\n", fw_name);
990 				goto release_firmware;
991 			}
992 
993 			memcpy(ptr, seg_fw->data, seg_fw->size);
994 
995 			release_firmware(seg_fw);
996 		}
997 
998 		if (phdr->p_memsz > phdr->p_filesz) {
999 			memset(ptr + phdr->p_filesz, 0,
1000 			       phdr->p_memsz - phdr->p_filesz);
1001 		}
1002 		size += phdr->p_memsz;
1003 	}
1004 
1005 	/* Transfer ownership of modem ddr region to q6 */
1006 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true,
1007 				      qproc->mpss_phys, qproc->mpss_size);
1008 	if (ret) {
1009 		dev_err(qproc->dev,
1010 			"assigning Q6 access to mpss memory failed: %d\n", ret);
1011 		ret = -EAGAIN;
1012 		goto release_firmware;
1013 	}
1014 
1015 	boot_addr = relocate ? qproc->mpss_phys : min_addr;
1016 	writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
1017 	writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
1018 	writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1019 
1020 	ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
1021 	if (ret == -ETIMEDOUT)
1022 		dev_err(qproc->dev, "MPSS authentication timed out\n");
1023 	else if (ret < 0)
1024 		dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
1025 
1026 release_firmware:
1027 	release_firmware(fw);
1028 out:
1029 	kfree(fw_name);
1030 
1031 	return ret < 0 ? ret : 0;
1032 }
1033 
1034 static void qcom_q6v5_dump_segment(struct rproc *rproc,
1035 				   struct rproc_dump_segment *segment,
1036 				   void *dest)
1037 {
1038 	int ret = 0;
1039 	struct q6v5 *qproc = rproc->priv;
1040 	unsigned long mask = BIT((unsigned long)segment->priv);
1041 	void *ptr = rproc_da_to_va(rproc, segment->da, segment->size);
1042 
1043 	/* Unlock mba before copying segments */
1044 	if (!qproc->dump_mba_loaded)
1045 		ret = q6v5_mba_load(qproc);
1046 
1047 	if (!ptr || ret)
1048 		memset(dest, 0xff, segment->size);
1049 	else
1050 		memcpy(dest, ptr, segment->size);
1051 
1052 	qproc->dump_segment_mask |= mask;
1053 
1054 	/* Reclaim mba after copying segments */
1055 	if (qproc->dump_segment_mask == qproc->dump_complete_mask) {
1056 		if (qproc->dump_mba_loaded)
1057 			q6v5_mba_reclaim(qproc);
1058 	}
1059 }
1060 
1061 static int q6v5_start(struct rproc *rproc)
1062 {
1063 	struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
1064 	int xfermemop_ret;
1065 	int ret;
1066 
1067 	ret = q6v5_mba_load(qproc);
1068 	if (ret)
1069 		return ret;
1070 
1071 	dev_info(qproc->dev, "MBA booted, loading mpss\n");
1072 
1073 	ret = q6v5_mpss_load(qproc);
1074 	if (ret)
1075 		goto reclaim_mpss;
1076 
1077 	ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000));
1078 	if (ret == -ETIMEDOUT) {
1079 		dev_err(qproc->dev, "start timed out\n");
1080 		goto reclaim_mpss;
1081 	}
1082 
1083 	xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
1084 						qproc->mba_phys,
1085 						qproc->mba_size);
1086 	if (xfermemop_ret)
1087 		dev_err(qproc->dev,
1088 			"Failed to reclaim mba buffer system may become unstable\n");
1089 
1090 	/* Reset Dump Segment Mask */
1091 	qproc->dump_segment_mask = 0;
1092 	qproc->running = true;
1093 
1094 	return 0;
1095 
1096 reclaim_mpss:
1097 	xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
1098 						false, qproc->mpss_phys,
1099 						qproc->mpss_size);
1100 	WARN_ON(xfermemop_ret);
1101 	q6v5_mba_reclaim(qproc);
1102 
1103 	return ret;
1104 }
1105 
1106 static int q6v5_stop(struct rproc *rproc)
1107 {
1108 	struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
1109 	int ret;
1110 
1111 	qproc->running = false;
1112 
1113 	ret = qcom_q6v5_request_stop(&qproc->q6v5);
1114 	if (ret == -ETIMEDOUT)
1115 		dev_err(qproc->dev, "timed out on wait\n");
1116 
1117 	q6v5_mba_reclaim(qproc);
1118 
1119 	return 0;
1120 }
1121 
1122 static void *q6v5_da_to_va(struct rproc *rproc, u64 da, int len)
1123 {
1124 	struct q6v5 *qproc = rproc->priv;
1125 	int offset;
1126 
1127 	offset = da - qproc->mpss_reloc;
1128 	if (offset < 0 || offset + len > qproc->mpss_size)
1129 		return NULL;
1130 
1131 	return qproc->mpss_region + offset;
1132 }
1133 
1134 static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
1135 					    const struct firmware *mba_fw)
1136 {
1137 	const struct firmware *fw;
1138 	const struct elf32_phdr *phdrs;
1139 	const struct elf32_phdr *phdr;
1140 	const struct elf32_hdr *ehdr;
1141 	struct q6v5 *qproc = rproc->priv;
1142 	unsigned long i;
1143 	int ret;
1144 
1145 	ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev);
1146 	if (ret < 0) {
1147 		dev_err(qproc->dev, "unable to load %s\n",
1148 			qproc->hexagon_mdt_image);
1149 		return ret;
1150 	}
1151 
1152 	ehdr = (struct elf32_hdr *)fw->data;
1153 	phdrs = (struct elf32_phdr *)(ehdr + 1);
1154 	qproc->dump_complete_mask = 0;
1155 
1156 	for (i = 0; i < ehdr->e_phnum; i++) {
1157 		phdr = &phdrs[i];
1158 
1159 		if (!q6v5_phdr_valid(phdr))
1160 			continue;
1161 
1162 		ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr,
1163 							phdr->p_memsz,
1164 							qcom_q6v5_dump_segment,
1165 							(void *)i);
1166 		if (ret)
1167 			break;
1168 
1169 		qproc->dump_complete_mask |= BIT(i);
1170 	}
1171 
1172 	release_firmware(fw);
1173 	return ret;
1174 }
1175 
1176 static const struct rproc_ops q6v5_ops = {
1177 	.start = q6v5_start,
1178 	.stop = q6v5_stop,
1179 	.da_to_va = q6v5_da_to_va,
1180 	.parse_fw = qcom_q6v5_register_dump_segments,
1181 	.load = q6v5_load,
1182 };
1183 
1184 static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
1185 {
1186 	struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5);
1187 
1188 	q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1189 			 qproc->proxy_clk_count);
1190 	q6v5_regulator_disable(qproc, qproc->proxy_regs,
1191 			       qproc->proxy_reg_count);
1192 	q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1193 }
1194 
1195 static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
1196 {
1197 	struct of_phandle_args args;
1198 	struct resource *res;
1199 	int ret;
1200 
1201 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
1202 	qproc->reg_base = devm_ioremap_resource(&pdev->dev, res);
1203 	if (IS_ERR(qproc->reg_base))
1204 		return PTR_ERR(qproc->reg_base);
1205 
1206 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
1207 	qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res);
1208 	if (IS_ERR(qproc->rmb_base))
1209 		return PTR_ERR(qproc->rmb_base);
1210 
1211 	ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1212 					       "qcom,halt-regs", 3, 0, &args);
1213 	if (ret < 0) {
1214 		dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
1215 		return -EINVAL;
1216 	}
1217 
1218 	qproc->halt_map = syscon_node_to_regmap(args.np);
1219 	of_node_put(args.np);
1220 	if (IS_ERR(qproc->halt_map))
1221 		return PTR_ERR(qproc->halt_map);
1222 
1223 	qproc->halt_q6 = args.args[0];
1224 	qproc->halt_modem = args.args[1];
1225 	qproc->halt_nc = args.args[2];
1226 
1227 	return 0;
1228 }
1229 
1230 static int q6v5_init_clocks(struct device *dev, struct clk **clks,
1231 		char **clk_names)
1232 {
1233 	int i;
1234 
1235 	if (!clk_names)
1236 		return 0;
1237 
1238 	for (i = 0; clk_names[i]; i++) {
1239 		clks[i] = devm_clk_get(dev, clk_names[i]);
1240 		if (IS_ERR(clks[i])) {
1241 			int rc = PTR_ERR(clks[i]);
1242 
1243 			if (rc != -EPROBE_DEFER)
1244 				dev_err(dev, "Failed to get %s clock\n",
1245 					clk_names[i]);
1246 			return rc;
1247 		}
1248 	}
1249 
1250 	return i;
1251 }
1252 
1253 static int q6v5_pds_attach(struct device *dev, struct device **devs,
1254 			   char **pd_names)
1255 {
1256 	size_t num_pds = 0;
1257 	int ret;
1258 	int i;
1259 
1260 	if (!pd_names)
1261 		return 0;
1262 
1263 	while (pd_names[num_pds])
1264 		num_pds++;
1265 
1266 	for (i = 0; i < num_pds; i++) {
1267 		devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
1268 		if (IS_ERR(devs[i])) {
1269 			ret = PTR_ERR(devs[i]);
1270 			goto unroll_attach;
1271 		}
1272 	}
1273 
1274 	return num_pds;
1275 
1276 unroll_attach:
1277 	for (i--; i >= 0; i--)
1278 		dev_pm_domain_detach(devs[i], false);
1279 
1280 	return ret;
1281 };
1282 
1283 static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds,
1284 			    size_t pd_count)
1285 {
1286 	int i;
1287 
1288 	for (i = 0; i < pd_count; i++)
1289 		dev_pm_domain_detach(pds[i], false);
1290 }
1291 
1292 static int q6v5_init_reset(struct q6v5 *qproc)
1293 {
1294 	qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
1295 							      "mss_restart");
1296 	if (IS_ERR(qproc->mss_restart)) {
1297 		dev_err(qproc->dev, "failed to acquire mss restart\n");
1298 		return PTR_ERR(qproc->mss_restart);
1299 	}
1300 
1301 	if (qproc->has_alt_reset) {
1302 		qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
1303 								    "pdc_reset");
1304 		if (IS_ERR(qproc->pdc_reset)) {
1305 			dev_err(qproc->dev, "failed to acquire pdc reset\n");
1306 			return PTR_ERR(qproc->pdc_reset);
1307 		}
1308 	}
1309 
1310 	return 0;
1311 }
1312 
1313 static int q6v5_alloc_memory_region(struct q6v5 *qproc)
1314 {
1315 	struct device_node *child;
1316 	struct device_node *node;
1317 	struct resource r;
1318 	int ret;
1319 
1320 	child = of_get_child_by_name(qproc->dev->of_node, "mba");
1321 	node = of_parse_phandle(child, "memory-region", 0);
1322 	ret = of_address_to_resource(node, 0, &r);
1323 	if (ret) {
1324 		dev_err(qproc->dev, "unable to resolve mba region\n");
1325 		return ret;
1326 	}
1327 	of_node_put(node);
1328 
1329 	qproc->mba_phys = r.start;
1330 	qproc->mba_size = resource_size(&r);
1331 	qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size);
1332 	if (!qproc->mba_region) {
1333 		dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
1334 			&r.start, qproc->mba_size);
1335 		return -EBUSY;
1336 	}
1337 
1338 	child = of_get_child_by_name(qproc->dev->of_node, "mpss");
1339 	node = of_parse_phandle(child, "memory-region", 0);
1340 	ret = of_address_to_resource(node, 0, &r);
1341 	if (ret) {
1342 		dev_err(qproc->dev, "unable to resolve mpss region\n");
1343 		return ret;
1344 	}
1345 	of_node_put(node);
1346 
1347 	qproc->mpss_phys = qproc->mpss_reloc = r.start;
1348 	qproc->mpss_size = resource_size(&r);
1349 	qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size);
1350 	if (!qproc->mpss_region) {
1351 		dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
1352 			&r.start, qproc->mpss_size);
1353 		return -EBUSY;
1354 	}
1355 
1356 	return 0;
1357 }
1358 
1359 static int q6v5_probe(struct platform_device *pdev)
1360 {
1361 	const struct rproc_hexagon_res *desc;
1362 	struct q6v5 *qproc;
1363 	struct rproc *rproc;
1364 	const char *mba_image;
1365 	int ret;
1366 
1367 	desc = of_device_get_match_data(&pdev->dev);
1368 	if (!desc)
1369 		return -EINVAL;
1370 
1371 	if (desc->need_mem_protection && !qcom_scm_is_available())
1372 		return -EPROBE_DEFER;
1373 
1374 	mba_image = desc->hexagon_mba_image;
1375 	ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1376 					    0, &mba_image);
1377 	if (ret < 0 && ret != -EINVAL)
1378 		return ret;
1379 
1380 	rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
1381 			    mba_image, sizeof(*qproc));
1382 	if (!rproc) {
1383 		dev_err(&pdev->dev, "failed to allocate rproc\n");
1384 		return -ENOMEM;
1385 	}
1386 
1387 	rproc->auto_boot = false;
1388 
1389 	qproc = (struct q6v5 *)rproc->priv;
1390 	qproc->dev = &pdev->dev;
1391 	qproc->rproc = rproc;
1392 	qproc->hexagon_mdt_image = "modem.mdt";
1393 	ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1394 					    1, &qproc->hexagon_mdt_image);
1395 	if (ret < 0 && ret != -EINVAL)
1396 		return ret;
1397 
1398 	platform_set_drvdata(pdev, qproc);
1399 
1400 	ret = q6v5_init_mem(qproc, pdev);
1401 	if (ret)
1402 		goto free_rproc;
1403 
1404 	ret = q6v5_alloc_memory_region(qproc);
1405 	if (ret)
1406 		goto free_rproc;
1407 
1408 	ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
1409 			       desc->proxy_clk_names);
1410 	if (ret < 0) {
1411 		dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
1412 		goto free_rproc;
1413 	}
1414 	qproc->proxy_clk_count = ret;
1415 
1416 	ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks,
1417 			       desc->reset_clk_names);
1418 	if (ret < 0) {
1419 		dev_err(&pdev->dev, "Failed to get reset clocks.\n");
1420 		goto free_rproc;
1421 	}
1422 	qproc->reset_clk_count = ret;
1423 
1424 	ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
1425 			       desc->active_clk_names);
1426 	if (ret < 0) {
1427 		dev_err(&pdev->dev, "Failed to get active clocks.\n");
1428 		goto free_rproc;
1429 	}
1430 	qproc->active_clk_count = ret;
1431 
1432 	ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
1433 				  desc->proxy_supply);
1434 	if (ret < 0) {
1435 		dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
1436 		goto free_rproc;
1437 	}
1438 	qproc->proxy_reg_count = ret;
1439 
1440 	ret = q6v5_regulator_init(&pdev->dev,  qproc->active_regs,
1441 				  desc->active_supply);
1442 	if (ret < 0) {
1443 		dev_err(&pdev->dev, "Failed to get active regulators.\n");
1444 		goto free_rproc;
1445 	}
1446 	qproc->active_reg_count = ret;
1447 
1448 	ret = q6v5_pds_attach(&pdev->dev, qproc->active_pds,
1449 			      desc->active_pd_names);
1450 	if (ret < 0) {
1451 		dev_err(&pdev->dev, "Failed to attach active power domains\n");
1452 		goto free_rproc;
1453 	}
1454 	qproc->active_pd_count = ret;
1455 
1456 	ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds,
1457 			      desc->proxy_pd_names);
1458 	if (ret < 0) {
1459 		dev_err(&pdev->dev, "Failed to init power domains\n");
1460 		goto detach_active_pds;
1461 	}
1462 	qproc->proxy_pd_count = ret;
1463 
1464 	qproc->has_alt_reset = desc->has_alt_reset;
1465 	ret = q6v5_init_reset(qproc);
1466 	if (ret)
1467 		goto detach_proxy_pds;
1468 
1469 	qproc->version = desc->version;
1470 	qproc->need_mem_protection = desc->need_mem_protection;
1471 
1472 	ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM,
1473 			     qcom_msa_handover);
1474 	if (ret)
1475 		goto detach_proxy_pds;
1476 
1477 	qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
1478 	qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
1479 	qcom_add_glink_subdev(rproc, &qproc->glink_subdev);
1480 	qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
1481 	qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
1482 	qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
1483 	if (IS_ERR(qproc->sysmon)) {
1484 		ret = PTR_ERR(qproc->sysmon);
1485 		goto detach_proxy_pds;
1486 	}
1487 
1488 	ret = rproc_add(rproc);
1489 	if (ret)
1490 		goto detach_proxy_pds;
1491 
1492 	return 0;
1493 
1494 detach_proxy_pds:
1495 	q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1496 detach_active_pds:
1497 	q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
1498 free_rproc:
1499 	rproc_free(rproc);
1500 
1501 	return ret;
1502 }
1503 
1504 static int q6v5_remove(struct platform_device *pdev)
1505 {
1506 	struct q6v5 *qproc = platform_get_drvdata(pdev);
1507 
1508 	rproc_del(qproc->rproc);
1509 
1510 	qcom_remove_sysmon_subdev(qproc->sysmon);
1511 	qcom_remove_glink_subdev(qproc->rproc, &qproc->glink_subdev);
1512 	qcom_remove_smd_subdev(qproc->rproc, &qproc->smd_subdev);
1513 	qcom_remove_ssr_subdev(qproc->rproc, &qproc->ssr_subdev);
1514 
1515 	q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
1516 	q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1517 
1518 	rproc_free(qproc->rproc);
1519 
1520 	return 0;
1521 }
1522 
1523 static const struct rproc_hexagon_res sdm845_mss = {
1524 	.hexagon_mba_image = "mba.mbn",
1525 	.proxy_clk_names = (char*[]){
1526 			"xo",
1527 			"prng",
1528 			NULL
1529 	},
1530 	.reset_clk_names = (char*[]){
1531 			"iface",
1532 			"snoc_axi",
1533 			NULL
1534 	},
1535 	.active_clk_names = (char*[]){
1536 			"bus",
1537 			"mem",
1538 			"gpll0_mss",
1539 			"mnoc_axi",
1540 			NULL
1541 	},
1542 	.active_pd_names = (char*[]){
1543 			"load_state",
1544 			NULL
1545 	},
1546 	.proxy_pd_names = (char*[]){
1547 			"cx",
1548 			"mx",
1549 			"mss",
1550 			NULL
1551 	},
1552 	.need_mem_protection = true,
1553 	.has_alt_reset = true,
1554 	.version = MSS_SDM845,
1555 };
1556 
1557 static const struct rproc_hexagon_res msm8996_mss = {
1558 	.hexagon_mba_image = "mba.mbn",
1559 	.proxy_supply = (struct qcom_mss_reg_res[]) {
1560 		{
1561 			.supply = "pll",
1562 			.uA = 100000,
1563 		},
1564 		{}
1565 	},
1566 	.proxy_clk_names = (char*[]){
1567 			"xo",
1568 			"pnoc",
1569 			"qdss",
1570 			NULL
1571 	},
1572 	.active_clk_names = (char*[]){
1573 			"iface",
1574 			"bus",
1575 			"mem",
1576 			"gpll0_mss",
1577 			"snoc_axi",
1578 			"mnoc_axi",
1579 			NULL
1580 	},
1581 	.need_mem_protection = true,
1582 	.has_alt_reset = false,
1583 	.version = MSS_MSM8996,
1584 };
1585 
1586 static const struct rproc_hexagon_res msm8916_mss = {
1587 	.hexagon_mba_image = "mba.mbn",
1588 	.proxy_supply = (struct qcom_mss_reg_res[]) {
1589 		{
1590 			.supply = "mx",
1591 			.uV = 1050000,
1592 		},
1593 		{
1594 			.supply = "cx",
1595 			.uA = 100000,
1596 		},
1597 		{
1598 			.supply = "pll",
1599 			.uA = 100000,
1600 		},
1601 		{}
1602 	},
1603 	.proxy_clk_names = (char*[]){
1604 		"xo",
1605 		NULL
1606 	},
1607 	.active_clk_names = (char*[]){
1608 		"iface",
1609 		"bus",
1610 		"mem",
1611 		NULL
1612 	},
1613 	.need_mem_protection = false,
1614 	.has_alt_reset = false,
1615 	.version = MSS_MSM8916,
1616 };
1617 
1618 static const struct rproc_hexagon_res msm8974_mss = {
1619 	.hexagon_mba_image = "mba.b00",
1620 	.proxy_supply = (struct qcom_mss_reg_res[]) {
1621 		{
1622 			.supply = "mx",
1623 			.uV = 1050000,
1624 		},
1625 		{
1626 			.supply = "cx",
1627 			.uA = 100000,
1628 		},
1629 		{
1630 			.supply = "pll",
1631 			.uA = 100000,
1632 		},
1633 		{}
1634 	},
1635 	.active_supply = (struct qcom_mss_reg_res[]) {
1636 		{
1637 			.supply = "mss",
1638 			.uV = 1050000,
1639 			.uA = 100000,
1640 		},
1641 		{}
1642 	},
1643 	.proxy_clk_names = (char*[]){
1644 		"xo",
1645 		NULL
1646 	},
1647 	.active_clk_names = (char*[]){
1648 		"iface",
1649 		"bus",
1650 		"mem",
1651 		NULL
1652 	},
1653 	.need_mem_protection = false,
1654 	.has_alt_reset = false,
1655 	.version = MSS_MSM8974,
1656 };
1657 
1658 static const struct of_device_id q6v5_of_match[] = {
1659 	{ .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
1660 	{ .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
1661 	{ .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
1662 	{ .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
1663 	{ .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
1664 	{ },
1665 };
1666 MODULE_DEVICE_TABLE(of, q6v5_of_match);
1667 
1668 static struct platform_driver q6v5_driver = {
1669 	.probe = q6v5_probe,
1670 	.remove = q6v5_remove,
1671 	.driver = {
1672 		.name = "qcom-q6v5-mss",
1673 		.of_match_table = q6v5_of_match,
1674 	},
1675 };
1676 module_platform_driver(q6v5_driver);
1677 
1678 MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver");
1679 MODULE_LICENSE("GPL v2");
1680