xref: /linux/drivers/remoteproc/qcom_q6v5_mss.c (revision 3503d56cc7233ced602e38a4c13caa64f00ab2aa)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Qualcomm self-authenticating modem subsystem remoteproc driver
4  *
5  * Copyright (C) 2016 Linaro Ltd.
6  * Copyright (C) 2014 Sony Mobile Communications AB
7  * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
8  */
9 
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel.h>
15 #include <linux/mfd/syscon.h>
16 #include <linux/module.h>
17 #include <linux/of_address.h>
18 #include <linux/of_device.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_domain.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/regmap.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/remoteproc.h>
25 #include "linux/remoteproc/qcom_q6v5_ipa_notify.h"
26 #include <linux/reset.h>
27 #include <linux/soc/qcom/mdt_loader.h>
28 #include <linux/iopoll.h>
29 
30 #include "remoteproc_internal.h"
31 #include "qcom_common.h"
32 #include "qcom_q6v5.h"
33 
34 #include <linux/qcom_scm.h>
35 
36 #define MPSS_CRASH_REASON_SMEM		421
37 
38 /* RMB Status Register Values */
39 #define RMB_PBL_SUCCESS			0x1
40 
41 #define RMB_MBA_XPU_UNLOCKED		0x1
42 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED	0x2
43 #define RMB_MBA_META_DATA_AUTH_SUCCESS	0x3
44 #define RMB_MBA_AUTH_COMPLETE		0x4
45 
46 /* PBL/MBA interface registers */
47 #define RMB_MBA_IMAGE_REG		0x00
48 #define RMB_PBL_STATUS_REG		0x04
49 #define RMB_MBA_COMMAND_REG		0x08
50 #define RMB_MBA_STATUS_REG		0x0C
51 #define RMB_PMI_META_DATA_REG		0x10
52 #define RMB_PMI_CODE_START_REG		0x14
53 #define RMB_PMI_CODE_LENGTH_REG		0x18
54 #define RMB_MBA_MSS_STATUS		0x40
55 #define RMB_MBA_ALT_RESET		0x44
56 
57 #define RMB_CMD_META_DATA_READY		0x1
58 #define RMB_CMD_LOAD_READY		0x2
59 
60 /* QDSP6SS Register Offsets */
61 #define QDSP6SS_RESET_REG		0x014
62 #define QDSP6SS_GFMUX_CTL_REG		0x020
63 #define QDSP6SS_PWR_CTL_REG		0x030
64 #define QDSP6SS_MEM_PWR_CTL		0x0B0
65 #define QDSP6V6SS_MEM_PWR_CTL		0x034
66 #define QDSP6SS_STRAP_ACC		0x110
67 
68 /* AXI Halt Register Offsets */
69 #define AXI_HALTREQ_REG			0x0
70 #define AXI_HALTACK_REG			0x4
71 #define AXI_IDLE_REG			0x8
72 #define AXI_GATING_VALID_OVERRIDE	BIT(0)
73 
74 #define HALT_ACK_TIMEOUT_US		100000
75 
76 /* QDSP6SS_RESET */
77 #define Q6SS_STOP_CORE			BIT(0)
78 #define Q6SS_CORE_ARES			BIT(1)
79 #define Q6SS_BUS_ARES_ENABLE		BIT(2)
80 
81 /* QDSP6SS CBCR */
82 #define Q6SS_CBCR_CLKEN			BIT(0)
83 #define Q6SS_CBCR_CLKOFF		BIT(31)
84 #define Q6SS_CBCR_TIMEOUT_US		200
85 
86 /* QDSP6SS_GFMUX_CTL */
87 #define Q6SS_CLK_ENABLE			BIT(1)
88 
89 /* QDSP6SS_PWR_CTL */
90 #define Q6SS_L2DATA_SLP_NRET_N_0	BIT(0)
91 #define Q6SS_L2DATA_SLP_NRET_N_1	BIT(1)
92 #define Q6SS_L2DATA_SLP_NRET_N_2	BIT(2)
93 #define Q6SS_L2TAG_SLP_NRET_N		BIT(16)
94 #define Q6SS_ETB_SLP_NRET_N		BIT(17)
95 #define Q6SS_L2DATA_STBY_N		BIT(18)
96 #define Q6SS_SLP_RET_N			BIT(19)
97 #define Q6SS_CLAMP_IO			BIT(20)
98 #define QDSS_BHS_ON			BIT(21)
99 #define QDSS_LDO_BYP			BIT(22)
100 
101 /* QDSP6v56 parameters */
102 #define QDSP6v56_LDO_BYP		BIT(25)
103 #define QDSP6v56_BHS_ON		BIT(24)
104 #define QDSP6v56_CLAMP_WL		BIT(21)
105 #define QDSP6v56_CLAMP_QMC_MEM		BIT(22)
106 #define QDSP6SS_XO_CBCR		0x0038
107 #define QDSP6SS_ACC_OVERRIDE_VAL		0x20
108 
109 /* QDSP6v65 parameters */
110 #define QDSP6SS_CORE_CBCR		0x20
111 #define QDSP6SS_SLEEP                   0x3C
112 #define QDSP6SS_BOOT_CORE_START         0x400
113 #define QDSP6SS_BOOT_CMD                0x404
114 #define QDSP6SS_BOOT_STATUS		0x408
115 #define BOOT_STATUS_TIMEOUT_US		200
116 #define BOOT_FSM_TIMEOUT                10000
117 
118 struct reg_info {
119 	struct regulator *reg;
120 	int uV;
121 	int uA;
122 };
123 
124 struct qcom_mss_reg_res {
125 	const char *supply;
126 	int uV;
127 	int uA;
128 };
129 
130 struct rproc_hexagon_res {
131 	const char *hexagon_mba_image;
132 	struct qcom_mss_reg_res *proxy_supply;
133 	struct qcom_mss_reg_res *active_supply;
134 	char **proxy_clk_names;
135 	char **reset_clk_names;
136 	char **active_clk_names;
137 	char **active_pd_names;
138 	char **proxy_pd_names;
139 	int version;
140 	bool need_mem_protection;
141 	bool has_alt_reset;
142 	bool has_spare_reg;
143 };
144 
145 struct q6v5 {
146 	struct device *dev;
147 	struct rproc *rproc;
148 
149 	void __iomem *reg_base;
150 	void __iomem *rmb_base;
151 
152 	struct regmap *halt_map;
153 	struct regmap *conn_map;
154 
155 	u32 halt_q6;
156 	u32 halt_modem;
157 	u32 halt_nc;
158 	u32 conn_box;
159 
160 	struct reset_control *mss_restart;
161 	struct reset_control *pdc_reset;
162 
163 	struct qcom_q6v5 q6v5;
164 
165 	struct clk *active_clks[8];
166 	struct clk *reset_clks[4];
167 	struct clk *proxy_clks[4];
168 	struct device *active_pds[1];
169 	struct device *proxy_pds[3];
170 	int active_clk_count;
171 	int reset_clk_count;
172 	int proxy_clk_count;
173 	int active_pd_count;
174 	int proxy_pd_count;
175 
176 	struct reg_info active_regs[1];
177 	struct reg_info proxy_regs[3];
178 	int active_reg_count;
179 	int proxy_reg_count;
180 
181 	bool running;
182 
183 	bool dump_mba_loaded;
184 	unsigned long dump_segment_mask;
185 	unsigned long dump_complete_mask;
186 
187 	phys_addr_t mba_phys;
188 	void *mba_region;
189 	size_t mba_size;
190 
191 	phys_addr_t mpss_phys;
192 	phys_addr_t mpss_reloc;
193 	size_t mpss_size;
194 
195 	struct qcom_rproc_glink glink_subdev;
196 	struct qcom_rproc_subdev smd_subdev;
197 	struct qcom_rproc_ssr ssr_subdev;
198 	struct qcom_rproc_ipa_notify ipa_notify_subdev;
199 	struct qcom_sysmon *sysmon;
200 	bool need_mem_protection;
201 	bool has_alt_reset;
202 	bool has_spare_reg;
203 	int mpss_perm;
204 	int mba_perm;
205 	const char *hexagon_mdt_image;
206 	int version;
207 };
208 
209 enum {
210 	MSS_MSM8916,
211 	MSS_MSM8974,
212 	MSS_MSM8996,
213 	MSS_MSM8998,
214 	MSS_SC7180,
215 	MSS_SDM845,
216 };
217 
218 static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
219 			       const struct qcom_mss_reg_res *reg_res)
220 {
221 	int rc;
222 	int i;
223 
224 	if (!reg_res)
225 		return 0;
226 
227 	for (i = 0; reg_res[i].supply; i++) {
228 		regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
229 		if (IS_ERR(regs[i].reg)) {
230 			rc = PTR_ERR(regs[i].reg);
231 			if (rc != -EPROBE_DEFER)
232 				dev_err(dev, "Failed to get %s\n regulator",
233 					reg_res[i].supply);
234 			return rc;
235 		}
236 
237 		regs[i].uV = reg_res[i].uV;
238 		regs[i].uA = reg_res[i].uA;
239 	}
240 
241 	return i;
242 }
243 
244 static int q6v5_regulator_enable(struct q6v5 *qproc,
245 				 struct reg_info *regs, int count)
246 {
247 	int ret;
248 	int i;
249 
250 	for (i = 0; i < count; i++) {
251 		if (regs[i].uV > 0) {
252 			ret = regulator_set_voltage(regs[i].reg,
253 					regs[i].uV, INT_MAX);
254 			if (ret) {
255 				dev_err(qproc->dev,
256 					"Failed to request voltage for %d.\n",
257 						i);
258 				goto err;
259 			}
260 		}
261 
262 		if (regs[i].uA > 0) {
263 			ret = regulator_set_load(regs[i].reg,
264 						 regs[i].uA);
265 			if (ret < 0) {
266 				dev_err(qproc->dev,
267 					"Failed to set regulator mode\n");
268 				goto err;
269 			}
270 		}
271 
272 		ret = regulator_enable(regs[i].reg);
273 		if (ret) {
274 			dev_err(qproc->dev, "Regulator enable failed\n");
275 			goto err;
276 		}
277 	}
278 
279 	return 0;
280 err:
281 	for (; i >= 0; i--) {
282 		if (regs[i].uV > 0)
283 			regulator_set_voltage(regs[i].reg, 0, INT_MAX);
284 
285 		if (regs[i].uA > 0)
286 			regulator_set_load(regs[i].reg, 0);
287 
288 		regulator_disable(regs[i].reg);
289 	}
290 
291 	return ret;
292 }
293 
294 static void q6v5_regulator_disable(struct q6v5 *qproc,
295 				   struct reg_info *regs, int count)
296 {
297 	int i;
298 
299 	for (i = 0; i < count; i++) {
300 		if (regs[i].uV > 0)
301 			regulator_set_voltage(regs[i].reg, 0, INT_MAX);
302 
303 		if (regs[i].uA > 0)
304 			regulator_set_load(regs[i].reg, 0);
305 
306 		regulator_disable(regs[i].reg);
307 	}
308 }
309 
310 static int q6v5_clk_enable(struct device *dev,
311 			   struct clk **clks, int count)
312 {
313 	int rc;
314 	int i;
315 
316 	for (i = 0; i < count; i++) {
317 		rc = clk_prepare_enable(clks[i]);
318 		if (rc) {
319 			dev_err(dev, "Clock enable failed\n");
320 			goto err;
321 		}
322 	}
323 
324 	return 0;
325 err:
326 	for (i--; i >= 0; i--)
327 		clk_disable_unprepare(clks[i]);
328 
329 	return rc;
330 }
331 
332 static void q6v5_clk_disable(struct device *dev,
333 			     struct clk **clks, int count)
334 {
335 	int i;
336 
337 	for (i = 0; i < count; i++)
338 		clk_disable_unprepare(clks[i]);
339 }
340 
341 static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds,
342 			   size_t pd_count)
343 {
344 	int ret;
345 	int i;
346 
347 	for (i = 0; i < pd_count; i++) {
348 		dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
349 		ret = pm_runtime_get_sync(pds[i]);
350 		if (ret < 0)
351 			goto unroll_pd_votes;
352 	}
353 
354 	return 0;
355 
356 unroll_pd_votes:
357 	for (i--; i >= 0; i--) {
358 		dev_pm_genpd_set_performance_state(pds[i], 0);
359 		pm_runtime_put(pds[i]);
360 	}
361 
362 	return ret;
363 }
364 
365 static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
366 			     size_t pd_count)
367 {
368 	int i;
369 
370 	for (i = 0; i < pd_count; i++) {
371 		dev_pm_genpd_set_performance_state(pds[i], 0);
372 		pm_runtime_put(pds[i]);
373 	}
374 }
375 
376 static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
377 				   bool local, bool remote, phys_addr_t addr,
378 				   size_t size)
379 {
380 	struct qcom_scm_vmperm next[2];
381 	int perms = 0;
382 
383 	if (!qproc->need_mem_protection)
384 		return 0;
385 
386 	if (local == !!(*current_perm & BIT(QCOM_SCM_VMID_HLOS)) &&
387 	    remote == !!(*current_perm & BIT(QCOM_SCM_VMID_MSS_MSA)))
388 		return 0;
389 
390 	if (local) {
391 		next[perms].vmid = QCOM_SCM_VMID_HLOS;
392 		next[perms].perm = QCOM_SCM_PERM_RWX;
393 		perms++;
394 	}
395 
396 	if (remote) {
397 		next[perms].vmid = QCOM_SCM_VMID_MSS_MSA;
398 		next[perms].perm = QCOM_SCM_PERM_RW;
399 		perms++;
400 	}
401 
402 	return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
403 				   current_perm, next, perms);
404 }
405 
406 static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
407 {
408 	struct q6v5 *qproc = rproc->priv;
409 
410 	memcpy(qproc->mba_region, fw->data, fw->size);
411 
412 	return 0;
413 }
414 
415 static int q6v5_reset_assert(struct q6v5 *qproc)
416 {
417 	int ret;
418 
419 	if (qproc->has_alt_reset) {
420 		reset_control_assert(qproc->pdc_reset);
421 		ret = reset_control_reset(qproc->mss_restart);
422 		reset_control_deassert(qproc->pdc_reset);
423 	} else if (qproc->has_spare_reg) {
424 		/*
425 		 * When the AXI pipeline is being reset with the Q6 modem partly
426 		 * operational there is possibility of AXI valid signal to
427 		 * glitch, leading to spurious transactions and Q6 hangs. A work
428 		 * around is employed by asserting the AXI_GATING_VALID_OVERRIDE
429 		 * BIT before triggering Q6 MSS reset. AXI_GATING_VALID_OVERRIDE
430 		 * is withdrawn post MSS assert followed by a MSS deassert,
431 		 * while holding the PDC reset.
432 		 */
433 		reset_control_assert(qproc->pdc_reset);
434 		regmap_update_bits(qproc->conn_map, qproc->conn_box,
435 				   AXI_GATING_VALID_OVERRIDE, 1);
436 		reset_control_assert(qproc->mss_restart);
437 		reset_control_deassert(qproc->pdc_reset);
438 		regmap_update_bits(qproc->conn_map, qproc->conn_box,
439 				   AXI_GATING_VALID_OVERRIDE, 0);
440 		ret = reset_control_deassert(qproc->mss_restart);
441 	} else {
442 		ret = reset_control_assert(qproc->mss_restart);
443 	}
444 
445 	return ret;
446 }
447 
448 static int q6v5_reset_deassert(struct q6v5 *qproc)
449 {
450 	int ret;
451 
452 	if (qproc->has_alt_reset) {
453 		reset_control_assert(qproc->pdc_reset);
454 		writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET);
455 		ret = reset_control_reset(qproc->mss_restart);
456 		writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
457 		reset_control_deassert(qproc->pdc_reset);
458 	} else if (qproc->has_spare_reg) {
459 		ret = reset_control_reset(qproc->mss_restart);
460 	} else {
461 		ret = reset_control_deassert(qproc->mss_restart);
462 	}
463 
464 	return ret;
465 }
466 
467 static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
468 {
469 	unsigned long timeout;
470 	s32 val;
471 
472 	timeout = jiffies + msecs_to_jiffies(ms);
473 	for (;;) {
474 		val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
475 		if (val)
476 			break;
477 
478 		if (time_after(jiffies, timeout))
479 			return -ETIMEDOUT;
480 
481 		msleep(1);
482 	}
483 
484 	return val;
485 }
486 
487 static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
488 {
489 
490 	unsigned long timeout;
491 	s32 val;
492 
493 	timeout = jiffies + msecs_to_jiffies(ms);
494 	for (;;) {
495 		val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
496 		if (val < 0)
497 			break;
498 
499 		if (!status && val)
500 			break;
501 		else if (status && val == status)
502 			break;
503 
504 		if (time_after(jiffies, timeout))
505 			return -ETIMEDOUT;
506 
507 		msleep(1);
508 	}
509 
510 	return val;
511 }
512 
513 static int q6v5proc_reset(struct q6v5 *qproc)
514 {
515 	u32 val;
516 	int ret;
517 	int i;
518 
519 	if (qproc->version == MSS_SDM845) {
520 		val = readl(qproc->reg_base + QDSP6SS_SLEEP);
521 		val |= Q6SS_CBCR_CLKEN;
522 		writel(val, qproc->reg_base + QDSP6SS_SLEEP);
523 
524 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
525 					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
526 					 Q6SS_CBCR_TIMEOUT_US);
527 		if (ret) {
528 			dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
529 			return -ETIMEDOUT;
530 		}
531 
532 		/* De-assert QDSP6 stop core */
533 		writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
534 		/* Trigger boot FSM */
535 		writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
536 
537 		ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
538 				val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
539 		if (ret) {
540 			dev_err(qproc->dev, "Boot FSM failed to complete.\n");
541 			/* Reset the modem so that boot FSM is in reset state */
542 			q6v5_reset_deassert(qproc);
543 			return ret;
544 		}
545 
546 		goto pbl_wait;
547 	} else if (qproc->version == MSS_SC7180) {
548 		val = readl(qproc->reg_base + QDSP6SS_SLEEP);
549 		val |= Q6SS_CBCR_CLKEN;
550 		writel(val, qproc->reg_base + QDSP6SS_SLEEP);
551 
552 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
553 					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
554 					 Q6SS_CBCR_TIMEOUT_US);
555 		if (ret) {
556 			dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
557 			return -ETIMEDOUT;
558 		}
559 
560 		/* Turn on the XO clock needed for PLL setup */
561 		val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
562 		val |= Q6SS_CBCR_CLKEN;
563 		writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
564 
565 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
566 					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
567 					 Q6SS_CBCR_TIMEOUT_US);
568 		if (ret) {
569 			dev_err(qproc->dev, "QDSP6SS XO clock timed out\n");
570 			return -ETIMEDOUT;
571 		}
572 
573 		/* Configure Q6 core CBCR to auto-enable after reset sequence */
574 		val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR);
575 		val |= Q6SS_CBCR_CLKEN;
576 		writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR);
577 
578 		/* De-assert the Q6 stop core signal */
579 		writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
580 
581 		/* Trigger the boot FSM to start the Q6 out-of-reset sequence */
582 		writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
583 
584 		/* Poll the QDSP6SS_BOOT_STATUS for FSM completion */
585 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_BOOT_STATUS,
586 					 val, (val & BIT(0)) != 0, 1,
587 					 BOOT_STATUS_TIMEOUT_US);
588 		if (ret) {
589 			dev_err(qproc->dev, "Boot FSM failed to complete.\n");
590 			/* Reset the modem so that boot FSM is in reset state */
591 			q6v5_reset_deassert(qproc);
592 			return ret;
593 		}
594 		goto pbl_wait;
595 	} else if (qproc->version == MSS_MSM8996 ||
596 		   qproc->version == MSS_MSM8998) {
597 		int mem_pwr_ctl;
598 
599 		/* Override the ACC value if required */
600 		writel(QDSP6SS_ACC_OVERRIDE_VAL,
601 		       qproc->reg_base + QDSP6SS_STRAP_ACC);
602 
603 		/* Assert resets, stop core */
604 		val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
605 		val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
606 		writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
607 
608 		/* BHS require xo cbcr to be enabled */
609 		val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
610 		val |= Q6SS_CBCR_CLKEN;
611 		writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
612 
613 		/* Read CLKOFF bit to go low indicating CLK is enabled */
614 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
615 					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
616 					 Q6SS_CBCR_TIMEOUT_US);
617 		if (ret) {
618 			dev_err(qproc->dev,
619 				"xo cbcr enabling timed out (rc:%d)\n", ret);
620 			return ret;
621 		}
622 		/* Enable power block headswitch and wait for it to stabilize */
623 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
624 		val |= QDSP6v56_BHS_ON;
625 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
626 		val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
627 		udelay(1);
628 
629 		/* Put LDO in bypass mode */
630 		val |= QDSP6v56_LDO_BYP;
631 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
632 
633 		/* Deassert QDSP6 compiler memory clamp */
634 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
635 		val &= ~QDSP6v56_CLAMP_QMC_MEM;
636 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
637 
638 		/* Deassert memory peripheral sleep and L2 memory standby */
639 		val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
640 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
641 
642 		/* Turn on L1, L2, ETB and JU memories 1 at a time */
643 		if (qproc->version == MSS_MSM8996) {
644 			mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL;
645 			i = 19;
646 		} else {
647 			/* MSS_MSM8998 */
648 			mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL;
649 			i = 28;
650 		}
651 		val = readl(qproc->reg_base + mem_pwr_ctl);
652 		for (; i >= 0; i--) {
653 			val |= BIT(i);
654 			writel(val, qproc->reg_base + mem_pwr_ctl);
655 			/*
656 			 * Read back value to ensure the write is done then
657 			 * wait for 1us for both memory peripheral and data
658 			 * array to turn on.
659 			 */
660 			val |= readl(qproc->reg_base + mem_pwr_ctl);
661 			udelay(1);
662 		}
663 		/* Remove word line clamp */
664 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
665 		val &= ~QDSP6v56_CLAMP_WL;
666 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
667 	} else {
668 		/* Assert resets, stop core */
669 		val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
670 		val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
671 		writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
672 
673 		/* Enable power block headswitch and wait for it to stabilize */
674 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
675 		val |= QDSS_BHS_ON | QDSS_LDO_BYP;
676 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
677 		val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
678 		udelay(1);
679 		/*
680 		 * Turn on memories. L2 banks should be done individually
681 		 * to minimize inrush current.
682 		 */
683 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
684 		val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
685 			Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
686 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
687 		val |= Q6SS_L2DATA_SLP_NRET_N_2;
688 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
689 		val |= Q6SS_L2DATA_SLP_NRET_N_1;
690 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
691 		val |= Q6SS_L2DATA_SLP_NRET_N_0;
692 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
693 	}
694 	/* Remove IO clamp */
695 	val &= ~Q6SS_CLAMP_IO;
696 	writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
697 
698 	/* Bring core out of reset */
699 	val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
700 	val &= ~Q6SS_CORE_ARES;
701 	writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
702 
703 	/* Turn on core clock */
704 	val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
705 	val |= Q6SS_CLK_ENABLE;
706 	writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
707 
708 	/* Start core execution */
709 	val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
710 	val &= ~Q6SS_STOP_CORE;
711 	writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
712 
713 pbl_wait:
714 	/* Wait for PBL status */
715 	ret = q6v5_rmb_pbl_wait(qproc, 1000);
716 	if (ret == -ETIMEDOUT) {
717 		dev_err(qproc->dev, "PBL boot timed out\n");
718 	} else if (ret != RMB_PBL_SUCCESS) {
719 		dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
720 		ret = -EINVAL;
721 	} else {
722 		ret = 0;
723 	}
724 
725 	return ret;
726 }
727 
728 static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
729 				   struct regmap *halt_map,
730 				   u32 offset)
731 {
732 	unsigned int val;
733 	int ret;
734 
735 	/* Check if we're already idle */
736 	ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
737 	if (!ret && val)
738 		return;
739 
740 	/* Assert halt request */
741 	regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
742 
743 	/* Wait for halt */
744 	regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val,
745 				 val, 1000, HALT_ACK_TIMEOUT_US);
746 
747 	ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
748 	if (ret || !val)
749 		dev_err(qproc->dev, "port failed halt\n");
750 
751 	/* Clear halt request (port will remain halted until reset) */
752 	regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
753 }
754 
755 static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
756 {
757 	unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
758 	dma_addr_t phys;
759 	void *metadata;
760 	int mdata_perm;
761 	int xferop_ret;
762 	size_t size;
763 	void *ptr;
764 	int ret;
765 
766 	metadata = qcom_mdt_read_metadata(fw, &size);
767 	if (IS_ERR(metadata))
768 		return PTR_ERR(metadata);
769 
770 	ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
771 	if (!ptr) {
772 		kfree(metadata);
773 		dev_err(qproc->dev, "failed to allocate mdt buffer\n");
774 		return -ENOMEM;
775 	}
776 
777 	memcpy(ptr, metadata, size);
778 
779 	/* Hypervisor mapping to access metadata by modem */
780 	mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
781 	ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, true,
782 				      phys, size);
783 	if (ret) {
784 		dev_err(qproc->dev,
785 			"assigning Q6 access to metadata failed: %d\n", ret);
786 		ret = -EAGAIN;
787 		goto free_dma_attrs;
788 	}
789 
790 	writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
791 	writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
792 
793 	ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
794 	if (ret == -ETIMEDOUT)
795 		dev_err(qproc->dev, "MPSS header authentication timed out\n");
796 	else if (ret < 0)
797 		dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
798 
799 	/* Metadata authentication done, remove modem access */
800 	xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, false,
801 					     phys, size);
802 	if (xferop_ret)
803 		dev_warn(qproc->dev,
804 			 "mdt buffer not reclaimed system may become unstable\n");
805 
806 free_dma_attrs:
807 	dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs);
808 	kfree(metadata);
809 
810 	return ret < 0 ? ret : 0;
811 }
812 
813 static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
814 {
815 	if (phdr->p_type != PT_LOAD)
816 		return false;
817 
818 	if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
819 		return false;
820 
821 	if (!phdr->p_memsz)
822 		return false;
823 
824 	return true;
825 }
826 
827 static int q6v5_mba_load(struct q6v5 *qproc)
828 {
829 	int ret;
830 	int xfermemop_ret;
831 
832 	qcom_q6v5_prepare(&qproc->q6v5);
833 
834 	ret = q6v5_pds_enable(qproc, qproc->active_pds, qproc->active_pd_count);
835 	if (ret < 0) {
836 		dev_err(qproc->dev, "failed to enable active power domains\n");
837 		goto disable_irqs;
838 	}
839 
840 	ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
841 	if (ret < 0) {
842 		dev_err(qproc->dev, "failed to enable proxy power domains\n");
843 		goto disable_active_pds;
844 	}
845 
846 	ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
847 				    qproc->proxy_reg_count);
848 	if (ret) {
849 		dev_err(qproc->dev, "failed to enable proxy supplies\n");
850 		goto disable_proxy_pds;
851 	}
852 
853 	ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
854 			      qproc->proxy_clk_count);
855 	if (ret) {
856 		dev_err(qproc->dev, "failed to enable proxy clocks\n");
857 		goto disable_proxy_reg;
858 	}
859 
860 	ret = q6v5_regulator_enable(qproc, qproc->active_regs,
861 				    qproc->active_reg_count);
862 	if (ret) {
863 		dev_err(qproc->dev, "failed to enable supplies\n");
864 		goto disable_proxy_clk;
865 	}
866 
867 	ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
868 			      qproc->reset_clk_count);
869 	if (ret) {
870 		dev_err(qproc->dev, "failed to enable reset clocks\n");
871 		goto disable_vdd;
872 	}
873 
874 	ret = q6v5_reset_deassert(qproc);
875 	if (ret) {
876 		dev_err(qproc->dev, "failed to deassert mss restart\n");
877 		goto disable_reset_clks;
878 	}
879 
880 	ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
881 			      qproc->active_clk_count);
882 	if (ret) {
883 		dev_err(qproc->dev, "failed to enable clocks\n");
884 		goto assert_reset;
885 	}
886 
887 	/* Assign MBA image access in DDR to q6 */
888 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, true,
889 				      qproc->mba_phys, qproc->mba_size);
890 	if (ret) {
891 		dev_err(qproc->dev,
892 			"assigning Q6 access to mba memory failed: %d\n", ret);
893 		goto disable_active_clks;
894 	}
895 
896 	writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
897 
898 	ret = q6v5proc_reset(qproc);
899 	if (ret)
900 		goto reclaim_mba;
901 
902 	ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
903 	if (ret == -ETIMEDOUT) {
904 		dev_err(qproc->dev, "MBA boot timed out\n");
905 		goto halt_axi_ports;
906 	} else if (ret != RMB_MBA_XPU_UNLOCKED &&
907 		   ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
908 		dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
909 		ret = -EINVAL;
910 		goto halt_axi_ports;
911 	}
912 
913 	qproc->dump_mba_loaded = true;
914 	return 0;
915 
916 halt_axi_ports:
917 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
918 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
919 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
920 
921 reclaim_mba:
922 	xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
923 						false, qproc->mba_phys,
924 						qproc->mba_size);
925 	if (xfermemop_ret) {
926 		dev_err(qproc->dev,
927 			"Failed to reclaim mba buffer, system may become unstable\n");
928 	}
929 
930 disable_active_clks:
931 	q6v5_clk_disable(qproc->dev, qproc->active_clks,
932 			 qproc->active_clk_count);
933 assert_reset:
934 	q6v5_reset_assert(qproc);
935 disable_reset_clks:
936 	q6v5_clk_disable(qproc->dev, qproc->reset_clks,
937 			 qproc->reset_clk_count);
938 disable_vdd:
939 	q6v5_regulator_disable(qproc, qproc->active_regs,
940 			       qproc->active_reg_count);
941 disable_proxy_clk:
942 	q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
943 			 qproc->proxy_clk_count);
944 disable_proxy_reg:
945 	q6v5_regulator_disable(qproc, qproc->proxy_regs,
946 			       qproc->proxy_reg_count);
947 disable_proxy_pds:
948 	q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
949 disable_active_pds:
950 	q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
951 disable_irqs:
952 	qcom_q6v5_unprepare(&qproc->q6v5);
953 
954 	return ret;
955 }
956 
957 static void q6v5_mba_reclaim(struct q6v5 *qproc)
958 {
959 	int ret;
960 	u32 val;
961 
962 	qproc->dump_mba_loaded = false;
963 
964 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
965 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
966 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
967 	if (qproc->version == MSS_MSM8996) {
968 		/*
969 		 * To avoid high MX current during LPASS/MSS restart.
970 		 */
971 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
972 		val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
973 			QDSP6v56_CLAMP_QMC_MEM;
974 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
975 	}
976 
977 	q6v5_reset_assert(qproc);
978 
979 	q6v5_clk_disable(qproc->dev, qproc->reset_clks,
980 			 qproc->reset_clk_count);
981 	q6v5_clk_disable(qproc->dev, qproc->active_clks,
982 			 qproc->active_clk_count);
983 	q6v5_regulator_disable(qproc, qproc->active_regs,
984 			       qproc->active_reg_count);
985 	q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
986 
987 	/* In case of failure or coredump scenario where reclaiming MBA memory
988 	 * could not happen reclaim it here.
989 	 */
990 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false,
991 				      qproc->mba_phys,
992 				      qproc->mba_size);
993 	WARN_ON(ret);
994 
995 	ret = qcom_q6v5_unprepare(&qproc->q6v5);
996 	if (ret) {
997 		q6v5_pds_disable(qproc, qproc->proxy_pds,
998 				 qproc->proxy_pd_count);
999 		q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1000 				 qproc->proxy_clk_count);
1001 		q6v5_regulator_disable(qproc, qproc->proxy_regs,
1002 				       qproc->proxy_reg_count);
1003 	}
1004 }
1005 
1006 static int q6v5_reload_mba(struct rproc *rproc)
1007 {
1008 	struct q6v5 *qproc = rproc->priv;
1009 	const struct firmware *fw;
1010 	int ret;
1011 
1012 	ret = request_firmware(&fw, rproc->firmware, qproc->dev);
1013 	if (ret < 0)
1014 		return ret;
1015 
1016 	q6v5_load(rproc, fw);
1017 	ret = q6v5_mba_load(qproc);
1018 	release_firmware(fw);
1019 
1020 	return ret;
1021 }
1022 
1023 static int q6v5_mpss_load(struct q6v5 *qproc)
1024 {
1025 	const struct elf32_phdr *phdrs;
1026 	const struct elf32_phdr *phdr;
1027 	const struct firmware *seg_fw;
1028 	const struct firmware *fw;
1029 	struct elf32_hdr *ehdr;
1030 	phys_addr_t mpss_reloc;
1031 	phys_addr_t boot_addr;
1032 	phys_addr_t min_addr = PHYS_ADDR_MAX;
1033 	phys_addr_t max_addr = 0;
1034 	u32 code_length;
1035 	bool relocate = false;
1036 	char *fw_name;
1037 	size_t fw_name_len;
1038 	ssize_t offset;
1039 	size_t size = 0;
1040 	void *ptr;
1041 	int ret;
1042 	int i;
1043 
1044 	fw_name_len = strlen(qproc->hexagon_mdt_image);
1045 	if (fw_name_len <= 4)
1046 		return -EINVAL;
1047 
1048 	fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL);
1049 	if (!fw_name)
1050 		return -ENOMEM;
1051 
1052 	ret = request_firmware(&fw, fw_name, qproc->dev);
1053 	if (ret < 0) {
1054 		dev_err(qproc->dev, "unable to load %s\n", fw_name);
1055 		goto out;
1056 	}
1057 
1058 	/* Initialize the RMB validator */
1059 	writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1060 
1061 	ret = q6v5_mpss_init_image(qproc, fw);
1062 	if (ret)
1063 		goto release_firmware;
1064 
1065 	ehdr = (struct elf32_hdr *)fw->data;
1066 	phdrs = (struct elf32_phdr *)(ehdr + 1);
1067 
1068 	for (i = 0; i < ehdr->e_phnum; i++) {
1069 		phdr = &phdrs[i];
1070 
1071 		if (!q6v5_phdr_valid(phdr))
1072 			continue;
1073 
1074 		if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
1075 			relocate = true;
1076 
1077 		if (phdr->p_paddr < min_addr)
1078 			min_addr = phdr->p_paddr;
1079 
1080 		if (phdr->p_paddr + phdr->p_memsz > max_addr)
1081 			max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
1082 	}
1083 
1084 	/**
1085 	 * In case of a modem subsystem restart on secure devices, the modem
1086 	 * memory can be reclaimed only after MBA is loaded. For modem cold
1087 	 * boot this will be a nop
1088 	 */
1089 	q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false,
1090 				qproc->mpss_phys, qproc->mpss_size);
1091 
1092 	/* Share ownership between Linux and MSS, during segment loading */
1093 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, true,
1094 				      qproc->mpss_phys, qproc->mpss_size);
1095 	if (ret) {
1096 		dev_err(qproc->dev,
1097 			"assigning Q6 access to mpss memory failed: %d\n", ret);
1098 		ret = -EAGAIN;
1099 		goto release_firmware;
1100 	}
1101 
1102 	mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
1103 	qproc->mpss_reloc = mpss_reloc;
1104 	/* Load firmware segments */
1105 	for (i = 0; i < ehdr->e_phnum; i++) {
1106 		phdr = &phdrs[i];
1107 
1108 		if (!q6v5_phdr_valid(phdr))
1109 			continue;
1110 
1111 		offset = phdr->p_paddr - mpss_reloc;
1112 		if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
1113 			dev_err(qproc->dev, "segment outside memory range\n");
1114 			ret = -EINVAL;
1115 			goto release_firmware;
1116 		}
1117 
1118 		ptr = ioremap_wc(qproc->mpss_phys + offset, phdr->p_memsz);
1119 		if (!ptr) {
1120 			dev_err(qproc->dev,
1121 				"unable to map memory region: %pa+%zx-%x\n",
1122 				&qproc->mpss_phys, offset, phdr->p_memsz);
1123 			goto release_firmware;
1124 		}
1125 
1126 		if (phdr->p_filesz && phdr->p_offset < fw->size) {
1127 			/* Firmware is large enough to be non-split */
1128 			if (phdr->p_offset + phdr->p_filesz > fw->size) {
1129 				dev_err(qproc->dev,
1130 					"failed to load segment %d from truncated file %s\n",
1131 					i, fw_name);
1132 				ret = -EINVAL;
1133 				iounmap(ptr);
1134 				goto release_firmware;
1135 			}
1136 
1137 			memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz);
1138 		} else if (phdr->p_filesz) {
1139 			/* Replace "xxx.xxx" with "xxx.bxx" */
1140 			sprintf(fw_name + fw_name_len - 3, "b%02d", i);
1141 			ret = request_firmware(&seg_fw, fw_name, qproc->dev);
1142 			if (ret) {
1143 				dev_err(qproc->dev, "failed to load %s\n", fw_name);
1144 				iounmap(ptr);
1145 				goto release_firmware;
1146 			}
1147 
1148 			memcpy(ptr, seg_fw->data, seg_fw->size);
1149 
1150 			release_firmware(seg_fw);
1151 		}
1152 
1153 		if (phdr->p_memsz > phdr->p_filesz) {
1154 			memset(ptr + phdr->p_filesz, 0,
1155 			       phdr->p_memsz - phdr->p_filesz);
1156 		}
1157 		iounmap(ptr);
1158 		size += phdr->p_memsz;
1159 
1160 		code_length = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1161 		if (!code_length) {
1162 			boot_addr = relocate ? qproc->mpss_phys : min_addr;
1163 			writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
1164 			writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
1165 		}
1166 		writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1167 
1168 		ret = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
1169 		if (ret < 0) {
1170 			dev_err(qproc->dev, "MPSS authentication failed: %d\n",
1171 				ret);
1172 			goto release_firmware;
1173 		}
1174 	}
1175 
1176 	/* Transfer ownership of modem ddr region to q6 */
1177 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
1178 				      qproc->mpss_phys, qproc->mpss_size);
1179 	if (ret) {
1180 		dev_err(qproc->dev,
1181 			"assigning Q6 access to mpss memory failed: %d\n", ret);
1182 		ret = -EAGAIN;
1183 		goto release_firmware;
1184 	}
1185 
1186 	ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
1187 	if (ret == -ETIMEDOUT)
1188 		dev_err(qproc->dev, "MPSS authentication timed out\n");
1189 	else if (ret < 0)
1190 		dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
1191 
1192 release_firmware:
1193 	release_firmware(fw);
1194 out:
1195 	kfree(fw_name);
1196 
1197 	return ret < 0 ? ret : 0;
1198 }
1199 
1200 static void qcom_q6v5_dump_segment(struct rproc *rproc,
1201 				   struct rproc_dump_segment *segment,
1202 				   void *dest)
1203 {
1204 	int ret = 0;
1205 	struct q6v5 *qproc = rproc->priv;
1206 	unsigned long mask = BIT((unsigned long)segment->priv);
1207 	int offset = segment->da - qproc->mpss_reloc;
1208 	void *ptr = NULL;
1209 
1210 	/* Unlock mba before copying segments */
1211 	if (!qproc->dump_mba_loaded) {
1212 		ret = q6v5_reload_mba(rproc);
1213 		if (!ret) {
1214 			/* Reset ownership back to Linux to copy segments */
1215 			ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
1216 						      true, false,
1217 						      qproc->mpss_phys,
1218 						      qproc->mpss_size);
1219 		}
1220 	}
1221 
1222 	if (!ret)
1223 		ptr = ioremap_wc(qproc->mpss_phys + offset, segment->size);
1224 
1225 	if (ptr) {
1226 		memcpy(dest, ptr, segment->size);
1227 		iounmap(ptr);
1228 	} else {
1229 		memset(dest, 0xff, segment->size);
1230 	}
1231 
1232 	qproc->dump_segment_mask |= mask;
1233 
1234 	/* Reclaim mba after copying segments */
1235 	if (qproc->dump_segment_mask == qproc->dump_complete_mask) {
1236 		if (qproc->dump_mba_loaded) {
1237 			/* Try to reset ownership back to Q6 */
1238 			q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
1239 						false, true,
1240 						qproc->mpss_phys,
1241 						qproc->mpss_size);
1242 			q6v5_mba_reclaim(qproc);
1243 		}
1244 	}
1245 }
1246 
1247 static int q6v5_start(struct rproc *rproc)
1248 {
1249 	struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
1250 	int xfermemop_ret;
1251 	int ret;
1252 
1253 	ret = q6v5_mba_load(qproc);
1254 	if (ret)
1255 		return ret;
1256 
1257 	dev_info(qproc->dev, "MBA booted, loading mpss\n");
1258 
1259 	ret = q6v5_mpss_load(qproc);
1260 	if (ret)
1261 		goto reclaim_mpss;
1262 
1263 	ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000));
1264 	if (ret == -ETIMEDOUT) {
1265 		dev_err(qproc->dev, "start timed out\n");
1266 		goto reclaim_mpss;
1267 	}
1268 
1269 	xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
1270 						false, qproc->mba_phys,
1271 						qproc->mba_size);
1272 	if (xfermemop_ret)
1273 		dev_err(qproc->dev,
1274 			"Failed to reclaim mba buffer system may become unstable\n");
1275 
1276 	/* Reset Dump Segment Mask */
1277 	qproc->dump_segment_mask = 0;
1278 	qproc->running = true;
1279 
1280 	return 0;
1281 
1282 reclaim_mpss:
1283 	q6v5_mba_reclaim(qproc);
1284 
1285 	return ret;
1286 }
1287 
1288 static int q6v5_stop(struct rproc *rproc)
1289 {
1290 	struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
1291 	int ret;
1292 
1293 	qproc->running = false;
1294 
1295 	ret = qcom_q6v5_request_stop(&qproc->q6v5);
1296 	if (ret == -ETIMEDOUT)
1297 		dev_err(qproc->dev, "timed out on wait\n");
1298 
1299 	q6v5_mba_reclaim(qproc);
1300 
1301 	return 0;
1302 }
1303 
1304 static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
1305 					    const struct firmware *mba_fw)
1306 {
1307 	const struct firmware *fw;
1308 	const struct elf32_phdr *phdrs;
1309 	const struct elf32_phdr *phdr;
1310 	const struct elf32_hdr *ehdr;
1311 	struct q6v5 *qproc = rproc->priv;
1312 	unsigned long i;
1313 	int ret;
1314 
1315 	ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev);
1316 	if (ret < 0) {
1317 		dev_err(qproc->dev, "unable to load %s\n",
1318 			qproc->hexagon_mdt_image);
1319 		return ret;
1320 	}
1321 
1322 	rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
1323 
1324 	ehdr = (struct elf32_hdr *)fw->data;
1325 	phdrs = (struct elf32_phdr *)(ehdr + 1);
1326 	qproc->dump_complete_mask = 0;
1327 
1328 	for (i = 0; i < ehdr->e_phnum; i++) {
1329 		phdr = &phdrs[i];
1330 
1331 		if (!q6v5_phdr_valid(phdr))
1332 			continue;
1333 
1334 		ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr,
1335 							phdr->p_memsz,
1336 							qcom_q6v5_dump_segment,
1337 							(void *)i);
1338 		if (ret)
1339 			break;
1340 
1341 		qproc->dump_complete_mask |= BIT(i);
1342 	}
1343 
1344 	release_firmware(fw);
1345 	return ret;
1346 }
1347 
1348 static const struct rproc_ops q6v5_ops = {
1349 	.start = q6v5_start,
1350 	.stop = q6v5_stop,
1351 	.parse_fw = qcom_q6v5_register_dump_segments,
1352 	.load = q6v5_load,
1353 };
1354 
1355 static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
1356 {
1357 	struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5);
1358 
1359 	q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1360 			 qproc->proxy_clk_count);
1361 	q6v5_regulator_disable(qproc, qproc->proxy_regs,
1362 			       qproc->proxy_reg_count);
1363 	q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1364 }
1365 
1366 static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
1367 {
1368 	struct of_phandle_args args;
1369 	struct resource *res;
1370 	int ret;
1371 
1372 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
1373 	qproc->reg_base = devm_ioremap_resource(&pdev->dev, res);
1374 	if (IS_ERR(qproc->reg_base))
1375 		return PTR_ERR(qproc->reg_base);
1376 
1377 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
1378 	qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res);
1379 	if (IS_ERR(qproc->rmb_base))
1380 		return PTR_ERR(qproc->rmb_base);
1381 
1382 	ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1383 					       "qcom,halt-regs", 3, 0, &args);
1384 	if (ret < 0) {
1385 		dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
1386 		return -EINVAL;
1387 	}
1388 
1389 	qproc->halt_map = syscon_node_to_regmap(args.np);
1390 	of_node_put(args.np);
1391 	if (IS_ERR(qproc->halt_map))
1392 		return PTR_ERR(qproc->halt_map);
1393 
1394 	qproc->halt_q6 = args.args[0];
1395 	qproc->halt_modem = args.args[1];
1396 	qproc->halt_nc = args.args[2];
1397 
1398 	if (qproc->has_spare_reg) {
1399 		ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1400 						       "qcom,spare-regs",
1401 						       1, 0, &args);
1402 		if (ret < 0) {
1403 			dev_err(&pdev->dev, "failed to parse spare-regs\n");
1404 			return -EINVAL;
1405 		}
1406 
1407 		qproc->conn_map = syscon_node_to_regmap(args.np);
1408 		of_node_put(args.np);
1409 		if (IS_ERR(qproc->conn_map))
1410 			return PTR_ERR(qproc->conn_map);
1411 
1412 		qproc->conn_box = args.args[0];
1413 	}
1414 
1415 	return 0;
1416 }
1417 
1418 static int q6v5_init_clocks(struct device *dev, struct clk **clks,
1419 		char **clk_names)
1420 {
1421 	int i;
1422 
1423 	if (!clk_names)
1424 		return 0;
1425 
1426 	for (i = 0; clk_names[i]; i++) {
1427 		clks[i] = devm_clk_get(dev, clk_names[i]);
1428 		if (IS_ERR(clks[i])) {
1429 			int rc = PTR_ERR(clks[i]);
1430 
1431 			if (rc != -EPROBE_DEFER)
1432 				dev_err(dev, "Failed to get %s clock\n",
1433 					clk_names[i]);
1434 			return rc;
1435 		}
1436 	}
1437 
1438 	return i;
1439 }
1440 
1441 static int q6v5_pds_attach(struct device *dev, struct device **devs,
1442 			   char **pd_names)
1443 {
1444 	size_t num_pds = 0;
1445 	int ret;
1446 	int i;
1447 
1448 	if (!pd_names)
1449 		return 0;
1450 
1451 	while (pd_names[num_pds])
1452 		num_pds++;
1453 
1454 	for (i = 0; i < num_pds; i++) {
1455 		devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
1456 		if (IS_ERR_OR_NULL(devs[i])) {
1457 			ret = PTR_ERR(devs[i]) ? : -ENODATA;
1458 			goto unroll_attach;
1459 		}
1460 	}
1461 
1462 	return num_pds;
1463 
1464 unroll_attach:
1465 	for (i--; i >= 0; i--)
1466 		dev_pm_domain_detach(devs[i], false);
1467 
1468 	return ret;
1469 }
1470 
1471 static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds,
1472 			    size_t pd_count)
1473 {
1474 	int i;
1475 
1476 	for (i = 0; i < pd_count; i++)
1477 		dev_pm_domain_detach(pds[i], false);
1478 }
1479 
1480 static int q6v5_init_reset(struct q6v5 *qproc)
1481 {
1482 	qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
1483 							      "mss_restart");
1484 	if (IS_ERR(qproc->mss_restart)) {
1485 		dev_err(qproc->dev, "failed to acquire mss restart\n");
1486 		return PTR_ERR(qproc->mss_restart);
1487 	}
1488 
1489 	if (qproc->has_alt_reset || qproc->has_spare_reg) {
1490 		qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
1491 								    "pdc_reset");
1492 		if (IS_ERR(qproc->pdc_reset)) {
1493 			dev_err(qproc->dev, "failed to acquire pdc reset\n");
1494 			return PTR_ERR(qproc->pdc_reset);
1495 		}
1496 	}
1497 
1498 	return 0;
1499 }
1500 
1501 static int q6v5_alloc_memory_region(struct q6v5 *qproc)
1502 {
1503 	struct device_node *child;
1504 	struct device_node *node;
1505 	struct resource r;
1506 	int ret;
1507 
1508 	/*
1509 	 * In the absence of mba/mpss sub-child, extract the mba and mpss
1510 	 * reserved memory regions from device's memory-region property.
1511 	 */
1512 	child = of_get_child_by_name(qproc->dev->of_node, "mba");
1513 	if (!child)
1514 		node = of_parse_phandle(qproc->dev->of_node,
1515 					"memory-region", 0);
1516 	else
1517 		node = of_parse_phandle(child, "memory-region", 0);
1518 
1519 	ret = of_address_to_resource(node, 0, &r);
1520 	if (ret) {
1521 		dev_err(qproc->dev, "unable to resolve mba region\n");
1522 		return ret;
1523 	}
1524 	of_node_put(node);
1525 
1526 	qproc->mba_phys = r.start;
1527 	qproc->mba_size = resource_size(&r);
1528 	qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size);
1529 	if (!qproc->mba_region) {
1530 		dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
1531 			&r.start, qproc->mba_size);
1532 		return -EBUSY;
1533 	}
1534 
1535 	if (!child) {
1536 		node = of_parse_phandle(qproc->dev->of_node,
1537 					"memory-region", 1);
1538 	} else {
1539 		child = of_get_child_by_name(qproc->dev->of_node, "mpss");
1540 		node = of_parse_phandle(child, "memory-region", 0);
1541 	}
1542 
1543 	ret = of_address_to_resource(node, 0, &r);
1544 	if (ret) {
1545 		dev_err(qproc->dev, "unable to resolve mpss region\n");
1546 		return ret;
1547 	}
1548 	of_node_put(node);
1549 
1550 	qproc->mpss_phys = qproc->mpss_reloc = r.start;
1551 	qproc->mpss_size = resource_size(&r);
1552 
1553 	return 0;
1554 }
1555 
1556 #if IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY)
1557 
1558 /* Register IPA notification function */
1559 int qcom_register_ipa_notify(struct rproc *rproc, qcom_ipa_notify_t notify,
1560 			     void *data)
1561 {
1562 	struct qcom_rproc_ipa_notify *ipa_notify;
1563 	struct q6v5 *qproc = rproc->priv;
1564 
1565 	if (!notify)
1566 		return -EINVAL;
1567 
1568 	ipa_notify = &qproc->ipa_notify_subdev;
1569 	if (ipa_notify->notify)
1570 		return -EBUSY;
1571 
1572 	ipa_notify->notify = notify;
1573 	ipa_notify->data = data;
1574 
1575 	return 0;
1576 }
1577 EXPORT_SYMBOL_GPL(qcom_register_ipa_notify);
1578 
1579 /* Deregister IPA notification function */
1580 void qcom_deregister_ipa_notify(struct rproc *rproc)
1581 {
1582 	struct q6v5 *qproc = rproc->priv;
1583 
1584 	qproc->ipa_notify_subdev.notify = NULL;
1585 }
1586 EXPORT_SYMBOL_GPL(qcom_deregister_ipa_notify);
1587 #endif /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */
1588 
1589 static int q6v5_probe(struct platform_device *pdev)
1590 {
1591 	const struct rproc_hexagon_res *desc;
1592 	struct q6v5 *qproc;
1593 	struct rproc *rproc;
1594 	const char *mba_image;
1595 	int ret;
1596 
1597 	desc = of_device_get_match_data(&pdev->dev);
1598 	if (!desc)
1599 		return -EINVAL;
1600 
1601 	if (desc->need_mem_protection && !qcom_scm_is_available())
1602 		return -EPROBE_DEFER;
1603 
1604 	mba_image = desc->hexagon_mba_image;
1605 	ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1606 					    0, &mba_image);
1607 	if (ret < 0 && ret != -EINVAL)
1608 		return ret;
1609 
1610 	rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
1611 			    mba_image, sizeof(*qproc));
1612 	if (!rproc) {
1613 		dev_err(&pdev->dev, "failed to allocate rproc\n");
1614 		return -ENOMEM;
1615 	}
1616 
1617 	rproc->auto_boot = false;
1618 	rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
1619 
1620 	qproc = (struct q6v5 *)rproc->priv;
1621 	qproc->dev = &pdev->dev;
1622 	qproc->rproc = rproc;
1623 	qproc->hexagon_mdt_image = "modem.mdt";
1624 	ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1625 					    1, &qproc->hexagon_mdt_image);
1626 	if (ret < 0 && ret != -EINVAL)
1627 		goto free_rproc;
1628 
1629 	platform_set_drvdata(pdev, qproc);
1630 
1631 	qproc->has_spare_reg = desc->has_spare_reg;
1632 	ret = q6v5_init_mem(qproc, pdev);
1633 	if (ret)
1634 		goto free_rproc;
1635 
1636 	ret = q6v5_alloc_memory_region(qproc);
1637 	if (ret)
1638 		goto free_rproc;
1639 
1640 	ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
1641 			       desc->proxy_clk_names);
1642 	if (ret < 0) {
1643 		dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
1644 		goto free_rproc;
1645 	}
1646 	qproc->proxy_clk_count = ret;
1647 
1648 	ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks,
1649 			       desc->reset_clk_names);
1650 	if (ret < 0) {
1651 		dev_err(&pdev->dev, "Failed to get reset clocks.\n");
1652 		goto free_rproc;
1653 	}
1654 	qproc->reset_clk_count = ret;
1655 
1656 	ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
1657 			       desc->active_clk_names);
1658 	if (ret < 0) {
1659 		dev_err(&pdev->dev, "Failed to get active clocks.\n");
1660 		goto free_rproc;
1661 	}
1662 	qproc->active_clk_count = ret;
1663 
1664 	ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
1665 				  desc->proxy_supply);
1666 	if (ret < 0) {
1667 		dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
1668 		goto free_rproc;
1669 	}
1670 	qproc->proxy_reg_count = ret;
1671 
1672 	ret = q6v5_regulator_init(&pdev->dev,  qproc->active_regs,
1673 				  desc->active_supply);
1674 	if (ret < 0) {
1675 		dev_err(&pdev->dev, "Failed to get active regulators.\n");
1676 		goto free_rproc;
1677 	}
1678 	qproc->active_reg_count = ret;
1679 
1680 	ret = q6v5_pds_attach(&pdev->dev, qproc->active_pds,
1681 			      desc->active_pd_names);
1682 	if (ret < 0) {
1683 		dev_err(&pdev->dev, "Failed to attach active power domains\n");
1684 		goto free_rproc;
1685 	}
1686 	qproc->active_pd_count = ret;
1687 
1688 	ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds,
1689 			      desc->proxy_pd_names);
1690 	if (ret < 0) {
1691 		dev_err(&pdev->dev, "Failed to init power domains\n");
1692 		goto detach_active_pds;
1693 	}
1694 	qproc->proxy_pd_count = ret;
1695 
1696 	qproc->has_alt_reset = desc->has_alt_reset;
1697 	ret = q6v5_init_reset(qproc);
1698 	if (ret)
1699 		goto detach_proxy_pds;
1700 
1701 	qproc->version = desc->version;
1702 	qproc->need_mem_protection = desc->need_mem_protection;
1703 
1704 	ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM,
1705 			     qcom_msa_handover);
1706 	if (ret)
1707 		goto detach_proxy_pds;
1708 
1709 	qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
1710 	qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
1711 	qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss");
1712 	qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
1713 	qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
1714 	qcom_add_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev);
1715 	qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
1716 	if (IS_ERR(qproc->sysmon)) {
1717 		ret = PTR_ERR(qproc->sysmon);
1718 		goto remove_subdevs;
1719 	}
1720 
1721 	ret = rproc_add(rproc);
1722 	if (ret)
1723 		goto remove_sysmon_subdev;
1724 
1725 	return 0;
1726 
1727 remove_sysmon_subdev:
1728 	qcom_remove_sysmon_subdev(qproc->sysmon);
1729 remove_subdevs:
1730 	qcom_remove_ipa_notify_subdev(qproc->rproc, &qproc->ipa_notify_subdev);
1731 	qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
1732 	qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
1733 	qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
1734 detach_proxy_pds:
1735 	q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1736 detach_active_pds:
1737 	q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
1738 free_rproc:
1739 	rproc_free(rproc);
1740 
1741 	return ret;
1742 }
1743 
1744 static int q6v5_remove(struct platform_device *pdev)
1745 {
1746 	struct q6v5 *qproc = platform_get_drvdata(pdev);
1747 	struct rproc *rproc = qproc->rproc;
1748 
1749 	rproc_del(rproc);
1750 
1751 	qcom_remove_sysmon_subdev(qproc->sysmon);
1752 	qcom_remove_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev);
1753 	qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
1754 	qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
1755 	qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
1756 
1757 	q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1758 	q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
1759 
1760 	rproc_free(rproc);
1761 
1762 	return 0;
1763 }
1764 
1765 static const struct rproc_hexagon_res sc7180_mss = {
1766 	.hexagon_mba_image = "mba.mbn",
1767 	.proxy_clk_names = (char*[]){
1768 		"xo",
1769 		NULL
1770 	},
1771 	.reset_clk_names = (char*[]){
1772 		"iface",
1773 		"bus",
1774 		"snoc_axi",
1775 		NULL
1776 	},
1777 	.active_clk_names = (char*[]){
1778 		"mnoc_axi",
1779 		"nav",
1780 		NULL
1781 	},
1782 	.active_pd_names = (char*[]){
1783 		"load_state",
1784 		NULL
1785 	},
1786 	.proxy_pd_names = (char*[]){
1787 		"cx",
1788 		"mx",
1789 		"mss",
1790 		NULL
1791 	},
1792 	.need_mem_protection = true,
1793 	.has_alt_reset = false,
1794 	.has_spare_reg = true,
1795 	.version = MSS_SC7180,
1796 };
1797 
1798 static const struct rproc_hexagon_res sdm845_mss = {
1799 	.hexagon_mba_image = "mba.mbn",
1800 	.proxy_clk_names = (char*[]){
1801 			"xo",
1802 			"prng",
1803 			NULL
1804 	},
1805 	.reset_clk_names = (char*[]){
1806 			"iface",
1807 			"snoc_axi",
1808 			NULL
1809 	},
1810 	.active_clk_names = (char*[]){
1811 			"bus",
1812 			"mem",
1813 			"gpll0_mss",
1814 			"mnoc_axi",
1815 			NULL
1816 	},
1817 	.active_pd_names = (char*[]){
1818 			"load_state",
1819 			NULL
1820 	},
1821 	.proxy_pd_names = (char*[]){
1822 			"cx",
1823 			"mx",
1824 			"mss",
1825 			NULL
1826 	},
1827 	.need_mem_protection = true,
1828 	.has_alt_reset = true,
1829 	.has_spare_reg = false,
1830 	.version = MSS_SDM845,
1831 };
1832 
1833 static const struct rproc_hexagon_res msm8998_mss = {
1834 	.hexagon_mba_image = "mba.mbn",
1835 	.proxy_clk_names = (char*[]){
1836 			"xo",
1837 			"qdss",
1838 			"mem",
1839 			NULL
1840 	},
1841 	.active_clk_names = (char*[]){
1842 			"iface",
1843 			"bus",
1844 			"gpll0_mss",
1845 			"mnoc_axi",
1846 			"snoc_axi",
1847 			NULL
1848 	},
1849 	.proxy_pd_names = (char*[]){
1850 			"cx",
1851 			"mx",
1852 			NULL
1853 	},
1854 	.need_mem_protection = true,
1855 	.has_alt_reset = false,
1856 	.has_spare_reg = false,
1857 	.version = MSS_MSM8998,
1858 };
1859 
1860 static const struct rproc_hexagon_res msm8996_mss = {
1861 	.hexagon_mba_image = "mba.mbn",
1862 	.proxy_supply = (struct qcom_mss_reg_res[]) {
1863 		{
1864 			.supply = "pll",
1865 			.uA = 100000,
1866 		},
1867 		{}
1868 	},
1869 	.proxy_clk_names = (char*[]){
1870 			"xo",
1871 			"pnoc",
1872 			"qdss",
1873 			NULL
1874 	},
1875 	.active_clk_names = (char*[]){
1876 			"iface",
1877 			"bus",
1878 			"mem",
1879 			"gpll0_mss",
1880 			"snoc_axi",
1881 			"mnoc_axi",
1882 			NULL
1883 	},
1884 	.need_mem_protection = true,
1885 	.has_alt_reset = false,
1886 	.has_spare_reg = false,
1887 	.version = MSS_MSM8996,
1888 };
1889 
1890 static const struct rproc_hexagon_res msm8916_mss = {
1891 	.hexagon_mba_image = "mba.mbn",
1892 	.proxy_supply = (struct qcom_mss_reg_res[]) {
1893 		{
1894 			.supply = "mx",
1895 			.uV = 1050000,
1896 		},
1897 		{
1898 			.supply = "cx",
1899 			.uA = 100000,
1900 		},
1901 		{
1902 			.supply = "pll",
1903 			.uA = 100000,
1904 		},
1905 		{}
1906 	},
1907 	.proxy_clk_names = (char*[]){
1908 		"xo",
1909 		NULL
1910 	},
1911 	.active_clk_names = (char*[]){
1912 		"iface",
1913 		"bus",
1914 		"mem",
1915 		NULL
1916 	},
1917 	.need_mem_protection = false,
1918 	.has_alt_reset = false,
1919 	.has_spare_reg = false,
1920 	.version = MSS_MSM8916,
1921 };
1922 
1923 static const struct rproc_hexagon_res msm8974_mss = {
1924 	.hexagon_mba_image = "mba.b00",
1925 	.proxy_supply = (struct qcom_mss_reg_res[]) {
1926 		{
1927 			.supply = "mx",
1928 			.uV = 1050000,
1929 		},
1930 		{
1931 			.supply = "cx",
1932 			.uA = 100000,
1933 		},
1934 		{
1935 			.supply = "pll",
1936 			.uA = 100000,
1937 		},
1938 		{}
1939 	},
1940 	.active_supply = (struct qcom_mss_reg_res[]) {
1941 		{
1942 			.supply = "mss",
1943 			.uV = 1050000,
1944 			.uA = 100000,
1945 		},
1946 		{}
1947 	},
1948 	.proxy_clk_names = (char*[]){
1949 		"xo",
1950 		NULL
1951 	},
1952 	.active_clk_names = (char*[]){
1953 		"iface",
1954 		"bus",
1955 		"mem",
1956 		NULL
1957 	},
1958 	.need_mem_protection = false,
1959 	.has_alt_reset = false,
1960 	.has_spare_reg = false,
1961 	.version = MSS_MSM8974,
1962 };
1963 
1964 static const struct of_device_id q6v5_of_match[] = {
1965 	{ .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
1966 	{ .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
1967 	{ .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
1968 	{ .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
1969 	{ .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss},
1970 	{ .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss},
1971 	{ .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
1972 	{ },
1973 };
1974 MODULE_DEVICE_TABLE(of, q6v5_of_match);
1975 
1976 static struct platform_driver q6v5_driver = {
1977 	.probe = q6v5_probe,
1978 	.remove = q6v5_remove,
1979 	.driver = {
1980 		.name = "qcom-q6v5-mss",
1981 		.of_match_table = q6v5_of_match,
1982 	},
1983 };
1984 module_platform_driver(q6v5_driver);
1985 
1986 MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver");
1987 MODULE_LICENSE("GPL v2");
1988