xref: /linux/drivers/remoteproc/qcom_q6v5_mss.c (revision e3966940559d52aa1800a008dcfeec218dd31f88)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Qualcomm self-authenticating modem subsystem remoteproc driver
4  *
5  * Copyright (C) 2016 Linaro Ltd.
6  * Copyright (C) 2014 Sony Mobile Communications AB
7  * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
8  */
9 
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/devcoredump.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/mfd/syscon.h>
17 #include <linux/module.h>
18 #include <linux/of.h>
19 #include <linux/of_reserved_mem.h>
20 #include <linux/of_platform.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_domain.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/regmap.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/remoteproc.h>
27 #include <linux/reset.h>
28 #include <linux/soc/qcom/mdt_loader.h>
29 #include <linux/iopoll.h>
30 #include <linux/slab.h>
31 
32 #include "remoteproc_internal.h"
33 #include "qcom_common.h"
34 #include "qcom_pil_info.h"
35 #include "qcom_q6v5.h"
36 
37 #include <linux/firmware/qcom/qcom_scm.h>
38 
39 #define MPSS_CRASH_REASON_SMEM		421
40 
41 #define MBA_LOG_SIZE			SZ_4K
42 
43 #define MPSS_PAS_ID			5
44 
45 /* RMB Status Register Values */
46 #define RMB_PBL_SUCCESS			0x1
47 
48 #define RMB_MBA_XPU_UNLOCKED		0x1
49 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED	0x2
50 #define RMB_MBA_META_DATA_AUTH_SUCCESS	0x3
51 #define RMB_MBA_AUTH_COMPLETE		0x4
52 
53 /* PBL/MBA interface registers */
54 #define RMB_MBA_IMAGE_REG		0x00
55 #define RMB_PBL_STATUS_REG		0x04
56 #define RMB_MBA_COMMAND_REG		0x08
57 #define RMB_MBA_STATUS_REG		0x0C
58 #define RMB_PMI_META_DATA_REG		0x10
59 #define RMB_PMI_CODE_START_REG		0x14
60 #define RMB_PMI_CODE_LENGTH_REG		0x18
61 #define RMB_MBA_MSS_STATUS		0x40
62 #define RMB_MBA_ALT_RESET		0x44
63 
64 #define RMB_CMD_META_DATA_READY		0x1
65 #define RMB_CMD_LOAD_READY		0x2
66 
67 /* QDSP6SS Register Offsets */
68 #define QDSP6SS_RESET_REG		0x014
69 #define QDSP6SS_GFMUX_CTL_REG		0x020
70 #define QDSP6SS_PWR_CTL_REG		0x030
71 #define QDSP6SS_MEM_PWR_CTL		0x0B0
72 #define QDSP6V6SS_MEM_PWR_CTL		0x034
73 #define QDSP6SS_STRAP_ACC		0x110
74 #define QDSP6V62SS_BHS_STATUS		0x0C4
75 
76 /* AXI Halt Register Offsets */
77 #define AXI_HALTREQ_REG			0x0
78 #define AXI_HALTACK_REG			0x4
79 #define AXI_IDLE_REG			0x8
80 #define AXI_GATING_VALID_OVERRIDE	BIT(0)
81 
82 #define HALT_ACK_TIMEOUT_US		100000
83 
84 /* QACCEPT Register Offsets */
85 #define QACCEPT_ACCEPT_REG		0x0
86 #define QACCEPT_ACTIVE_REG		0x4
87 #define QACCEPT_DENY_REG		0x8
88 #define QACCEPT_REQ_REG			0xC
89 
90 #define QACCEPT_TIMEOUT_US		50
91 
92 /* QDSP6SS_RESET */
93 #define Q6SS_STOP_CORE			BIT(0)
94 #define Q6SS_CORE_ARES			BIT(1)
95 #define Q6SS_BUS_ARES_ENABLE		BIT(2)
96 
97 /* QDSP6SS CBCR */
98 #define Q6SS_CBCR_CLKEN			BIT(0)
99 #define Q6SS_CBCR_CLKOFF		BIT(31)
100 #define Q6SS_CBCR_TIMEOUT_US		200
101 
102 /* QDSP6SS_GFMUX_CTL */
103 #define Q6SS_CLK_ENABLE			BIT(1)
104 
105 /* QDSP6SS_PWR_CTL */
106 #define Q6SS_L2DATA_SLP_NRET_N_0	BIT(0)
107 #define Q6SS_L2DATA_SLP_NRET_N_1	BIT(1)
108 #define Q6SS_L2DATA_SLP_NRET_N_2	BIT(2)
109 #define Q6SS_L2TAG_SLP_NRET_N		BIT(16)
110 #define Q6SS_ETB_SLP_NRET_N		BIT(17)
111 #define Q6SS_L2DATA_STBY_N		BIT(18)
112 #define Q6SS_SLP_RET_N			BIT(19)
113 #define Q6SS_CLAMP_IO			BIT(20)
114 #define QDSS_BHS_ON			BIT(21)
115 #define QDSS_LDO_BYP			BIT(22)
116 
117 /* QDSP6v55 parameters */
118 #define QDSP6V55_MEM_BITS		GENMASK(16, 8)
119 
120 /* QDSP6v56 parameters */
121 #define QDSP6v56_LDO_BYP		BIT(25)
122 #define QDSP6v56_BHS_ON		BIT(24)
123 #define QDSP6v56_CLAMP_WL		BIT(21)
124 #define QDSP6v56_CLAMP_QMC_MEM		BIT(22)
125 #define QDSP6SS_XO_CBCR		0x0038
126 #define QDSP6SS_ACC_OVERRIDE_VAL		0x20
127 #define QDSP6v55_BHS_EN_REST_ACK	BIT(0)
128 
129 /* QDSP6v65 parameters */
130 #define QDSP6SS_CORE_CBCR		0x20
131 #define QDSP6SS_SLEEP                   0x3C
132 #define QDSP6SS_BOOT_CORE_START         0x400
133 #define QDSP6SS_BOOT_CMD                0x404
134 #define BOOT_FSM_TIMEOUT                10000
135 #define BHS_CHECK_MAX_LOOPS             200
136 
137 /* External power block headswitch */
138 #define EXTERNAL_BHS_ON			BIT(0)
139 #define EXTERNAL_BHS_STATUS		BIT(4)
140 #define EXTERNAL_BHS_TIMEOUT_US		50
141 
142 struct reg_info {
143 	struct regulator *reg;
144 	int uV;
145 	int uA;
146 };
147 
148 struct qcom_mss_reg_res {
149 	const char *supply;
150 	int uV;
151 	int uA;
152 };
153 
154 struct rproc_hexagon_res {
155 	const char *hexagon_mba_image;
156 	struct qcom_mss_reg_res *proxy_supply;
157 	struct qcom_mss_reg_res *fallback_proxy_supply;
158 	struct qcom_mss_reg_res *active_supply;
159 	char **proxy_clk_names;
160 	char **reset_clk_names;
161 	char **active_clk_names;
162 	char **proxy_pd_names;
163 	int version;
164 	bool need_mem_protection;
165 	bool has_alt_reset;
166 	bool has_mba_logs;
167 	bool has_spare_reg;
168 	bool has_qaccept_regs;
169 	bool has_ext_bhs_reg;
170 	bool has_ext_cntl_regs;
171 	bool has_vq6;
172 };
173 
174 struct q6v5 {
175 	struct device *dev;
176 	struct rproc *rproc;
177 
178 	void __iomem *reg_base;
179 	void __iomem *rmb_base;
180 
181 	struct regmap *halt_map;
182 	struct regmap *conn_map;
183 
184 	u32 halt_q6;
185 	u32 halt_modem;
186 	u32 halt_nc;
187 	u32 halt_vq6;
188 	u32 conn_box;
189 	u32 ext_bhs;
190 
191 	u32 qaccept_mdm;
192 	u32 qaccept_cx;
193 	u32 qaccept_axi;
194 
195 	u32 axim1_clk_off;
196 	u32 crypto_clk_off;
197 	u32 force_clk_on;
198 	u32 rscc_disable;
199 
200 	struct reset_control *mss_restart;
201 	struct reset_control *pdc_reset;
202 
203 	struct qcom_q6v5 q6v5;
204 
205 	struct clk *active_clks[8];
206 	struct clk *reset_clks[4];
207 	struct clk *proxy_clks[4];
208 	struct device *proxy_pds[3];
209 	int active_clk_count;
210 	int reset_clk_count;
211 	int proxy_clk_count;
212 	int proxy_pd_count;
213 
214 	struct reg_info active_regs[1];
215 	struct reg_info proxy_regs[1];
216 	struct reg_info fallback_proxy_regs[2];
217 	int active_reg_count;
218 	int proxy_reg_count;
219 	int fallback_proxy_reg_count;
220 
221 	bool dump_mba_loaded;
222 	size_t current_dump_size;
223 	size_t total_dump_size;
224 
225 	phys_addr_t mba_phys;
226 	size_t mba_size;
227 	size_t dp_size;
228 
229 	phys_addr_t mdata_phys;
230 	size_t mdata_size;
231 
232 	phys_addr_t mpss_phys;
233 	phys_addr_t mpss_reloc;
234 	size_t mpss_size;
235 
236 	struct qcom_rproc_glink glink_subdev;
237 	struct qcom_rproc_subdev smd_subdev;
238 	struct qcom_rproc_pdm pdm_subdev;
239 	struct qcom_rproc_ssr ssr_subdev;
240 	struct qcom_sysmon *sysmon;
241 	struct platform_device *bam_dmux;
242 	bool need_mem_protection;
243 	bool has_alt_reset;
244 	bool has_mba_logs;
245 	bool has_spare_reg;
246 	bool has_qaccept_regs;
247 	bool has_ext_bhs_reg;
248 	bool has_ext_cntl_regs;
249 	bool has_vq6;
250 	u64 mpss_perm;
251 	u64 mba_perm;
252 	const char *hexagon_mdt_image;
253 	int version;
254 };
255 
256 enum {
257 	MSS_MSM8226,
258 	MSS_MSM8909,
259 	MSS_MSM8916,
260 	MSS_MSM8926,
261 	MSS_MSM8953,
262 	MSS_MSM8974,
263 	MSS_MSM8996,
264 	MSS_MSM8998,
265 	MSS_SC7180,
266 	MSS_SC7280,
267 	MSS_SDM660,
268 	MSS_SDM845,
269 };
270 
271 static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
272 			       const struct qcom_mss_reg_res *reg_res)
273 {
274 	int i;
275 
276 	if (!reg_res)
277 		return 0;
278 
279 	for (i = 0; reg_res[i].supply; i++) {
280 		regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
281 		if (IS_ERR(regs[i].reg))
282 			return dev_err_probe(dev, PTR_ERR(regs[i].reg),
283 					     "Failed to get %s\n regulator",
284 					     reg_res[i].supply);
285 
286 		regs[i].uV = reg_res[i].uV;
287 		regs[i].uA = reg_res[i].uA;
288 	}
289 
290 	return i;
291 }
292 
293 static int q6v5_regulator_enable(struct q6v5 *qproc,
294 				 struct reg_info *regs, int count)
295 {
296 	int ret;
297 	int i;
298 
299 	for (i = 0; i < count; i++) {
300 		if (regs[i].uV > 0) {
301 			ret = regulator_set_voltage(regs[i].reg,
302 					regs[i].uV, INT_MAX);
303 			if (ret) {
304 				dev_err(qproc->dev,
305 					"Failed to request voltage for %d.\n",
306 						i);
307 				goto err;
308 			}
309 		}
310 
311 		if (regs[i].uA > 0) {
312 			ret = regulator_set_load(regs[i].reg,
313 						 regs[i].uA);
314 			if (ret < 0) {
315 				dev_err(qproc->dev,
316 					"Failed to set regulator mode\n");
317 				goto err;
318 			}
319 		}
320 
321 		ret = regulator_enable(regs[i].reg);
322 		if (ret) {
323 			dev_err(qproc->dev, "Regulator enable failed\n");
324 			goto err;
325 		}
326 	}
327 
328 	return 0;
329 err:
330 	for (; i >= 0; i--) {
331 		if (regs[i].uV > 0)
332 			regulator_set_voltage(regs[i].reg, 0, INT_MAX);
333 
334 		if (regs[i].uA > 0)
335 			regulator_set_load(regs[i].reg, 0);
336 
337 		regulator_disable(regs[i].reg);
338 	}
339 
340 	return ret;
341 }
342 
343 static void q6v5_regulator_disable(struct q6v5 *qproc,
344 				   struct reg_info *regs, int count)
345 {
346 	int i;
347 
348 	for (i = 0; i < count; i++) {
349 		if (regs[i].uV > 0)
350 			regulator_set_voltage(regs[i].reg, 0, INT_MAX);
351 
352 		if (regs[i].uA > 0)
353 			regulator_set_load(regs[i].reg, 0);
354 
355 		regulator_disable(regs[i].reg);
356 	}
357 }
358 
359 static int q6v5_clk_enable(struct device *dev,
360 			   struct clk **clks, int count)
361 {
362 	int rc;
363 	int i;
364 
365 	for (i = 0; i < count; i++) {
366 		rc = clk_prepare_enable(clks[i]);
367 		if (rc) {
368 			dev_err(dev, "Clock enable failed\n");
369 			goto err;
370 		}
371 	}
372 
373 	return 0;
374 err:
375 	for (i--; i >= 0; i--)
376 		clk_disable_unprepare(clks[i]);
377 
378 	return rc;
379 }
380 
381 static void q6v5_clk_disable(struct device *dev,
382 			     struct clk **clks, int count)
383 {
384 	int i;
385 
386 	for (i = 0; i < count; i++)
387 		clk_disable_unprepare(clks[i]);
388 }
389 
390 static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds,
391 			   size_t pd_count)
392 {
393 	int ret;
394 	int i;
395 
396 	for (i = 0; i < pd_count; i++) {
397 		dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
398 		ret = pm_runtime_get_sync(pds[i]);
399 		if (ret < 0) {
400 			pm_runtime_put_noidle(pds[i]);
401 			dev_pm_genpd_set_performance_state(pds[i], 0);
402 			goto unroll_pd_votes;
403 		}
404 	}
405 
406 	return 0;
407 
408 unroll_pd_votes:
409 	for (i--; i >= 0; i--) {
410 		dev_pm_genpd_set_performance_state(pds[i], 0);
411 		pm_runtime_put(pds[i]);
412 	}
413 
414 	return ret;
415 }
416 
417 static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
418 			     size_t pd_count)
419 {
420 	int i;
421 
422 	for (i = 0; i < pd_count; i++) {
423 		dev_pm_genpd_set_performance_state(pds[i], 0);
424 		pm_runtime_put(pds[i]);
425 	}
426 }
427 
428 static int q6v5_external_bhs_enable(struct q6v5 *qproc)
429 {
430 	u32 val;
431 	int ret = 0;
432 
433 	/*
434 	 * Enable external power block headswitch and wait for it to
435 	 * stabilize
436 	 */
437 	regmap_set_bits(qproc->conn_map, qproc->ext_bhs, EXTERNAL_BHS_ON);
438 
439 	ret = regmap_read_poll_timeout(qproc->conn_map, qproc->ext_bhs,
440 				       val, val & EXTERNAL_BHS_STATUS,
441 				       1, EXTERNAL_BHS_TIMEOUT_US);
442 
443 	if (ret) {
444 		dev_err(qproc->dev, "External BHS timed out\n");
445 		ret = -ETIMEDOUT;
446 	}
447 
448 	return ret;
449 }
450 
451 static void q6v5_external_bhs_disable(struct q6v5 *qproc)
452 {
453 	regmap_clear_bits(qproc->conn_map, qproc->ext_bhs, EXTERNAL_BHS_ON);
454 }
455 
456 static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, u64 *current_perm,
457 				   bool local, bool remote, phys_addr_t addr,
458 				   size_t size)
459 {
460 	struct qcom_scm_vmperm next[2];
461 	int perms = 0;
462 
463 	if (!qproc->need_mem_protection)
464 		return 0;
465 
466 	if (local == !!(*current_perm & BIT(QCOM_SCM_VMID_HLOS)) &&
467 	    remote == !!(*current_perm & BIT(QCOM_SCM_VMID_MSS_MSA)))
468 		return 0;
469 
470 	if (local) {
471 		next[perms].vmid = QCOM_SCM_VMID_HLOS;
472 		next[perms].perm = QCOM_SCM_PERM_RWX;
473 		perms++;
474 	}
475 
476 	if (remote) {
477 		next[perms].vmid = QCOM_SCM_VMID_MSS_MSA;
478 		next[perms].perm = QCOM_SCM_PERM_RW;
479 		perms++;
480 	}
481 
482 	return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
483 				   current_perm, next, perms);
484 }
485 
486 static void q6v5_debug_policy_load(struct q6v5 *qproc, void *mba_region)
487 {
488 	const struct firmware *dp_fw;
489 
490 	if (request_firmware_direct(&dp_fw, "msadp", qproc->dev))
491 		return;
492 
493 	if (SZ_1M + dp_fw->size <= qproc->mba_size) {
494 		memcpy(mba_region + SZ_1M, dp_fw->data, dp_fw->size);
495 		qproc->dp_size = dp_fw->size;
496 	}
497 
498 	release_firmware(dp_fw);
499 }
500 
501 #define MSM8974_B00_OFFSET 0x1000
502 
503 static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
504 {
505 	struct q6v5 *qproc = rproc->priv;
506 	void *mba_region;
507 
508 	/* MBA is restricted to a maximum size of 1M */
509 	if (fw->size > qproc->mba_size || fw->size > SZ_1M) {
510 		dev_err(qproc->dev, "MBA firmware load failed\n");
511 		return -EINVAL;
512 	}
513 
514 	mba_region = memremap(qproc->mba_phys, qproc->mba_size, MEMREMAP_WC);
515 	if (!mba_region) {
516 		dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
517 			&qproc->mba_phys, qproc->mba_size);
518 		return -EBUSY;
519 	}
520 
521 	if ((qproc->version == MSS_MSM8974 ||
522 	     qproc->version == MSS_MSM8226 ||
523 	     qproc->version == MSS_MSM8926) &&
524 	    fw->size > MSM8974_B00_OFFSET &&
525 	    !memcmp(fw->data, ELFMAG, SELFMAG))
526 		memcpy(mba_region, fw->data + MSM8974_B00_OFFSET, fw->size - MSM8974_B00_OFFSET);
527 	else
528 		memcpy(mba_region, fw->data, fw->size);
529 	q6v5_debug_policy_load(qproc, mba_region);
530 	memunmap(mba_region);
531 
532 	return 0;
533 }
534 
535 static int q6v5_reset_assert(struct q6v5 *qproc)
536 {
537 	int ret;
538 
539 	if (qproc->has_alt_reset) {
540 		reset_control_assert(qproc->pdc_reset);
541 		ret = reset_control_reset(qproc->mss_restart);
542 		reset_control_deassert(qproc->pdc_reset);
543 	} else if (qproc->has_spare_reg) {
544 		/*
545 		 * When the AXI pipeline is being reset with the Q6 modem partly
546 		 * operational there is possibility of AXI valid signal to
547 		 * glitch, leading to spurious transactions and Q6 hangs. A work
548 		 * around is employed by asserting the AXI_GATING_VALID_OVERRIDE
549 		 * BIT before triggering Q6 MSS reset. AXI_GATING_VALID_OVERRIDE
550 		 * is withdrawn post MSS assert followed by a MSS deassert,
551 		 * while holding the PDC reset.
552 		 */
553 		reset_control_assert(qproc->pdc_reset);
554 		regmap_update_bits(qproc->conn_map, qproc->conn_box,
555 				   AXI_GATING_VALID_OVERRIDE, 1);
556 		reset_control_assert(qproc->mss_restart);
557 		reset_control_deassert(qproc->pdc_reset);
558 		regmap_update_bits(qproc->conn_map, qproc->conn_box,
559 				   AXI_GATING_VALID_OVERRIDE, 0);
560 		ret = reset_control_deassert(qproc->mss_restart);
561 	} else if (qproc->has_ext_cntl_regs) {
562 		regmap_write(qproc->conn_map, qproc->rscc_disable, 0);
563 		reset_control_assert(qproc->pdc_reset);
564 		reset_control_assert(qproc->mss_restart);
565 		reset_control_deassert(qproc->pdc_reset);
566 		ret = reset_control_deassert(qproc->mss_restart);
567 	} else {
568 		ret = reset_control_assert(qproc->mss_restart);
569 	}
570 
571 	return ret;
572 }
573 
574 static int q6v5_reset_deassert(struct q6v5 *qproc)
575 {
576 	int ret;
577 
578 	if (qproc->has_alt_reset) {
579 		reset_control_assert(qproc->pdc_reset);
580 		writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET);
581 		ret = reset_control_reset(qproc->mss_restart);
582 		writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
583 		reset_control_deassert(qproc->pdc_reset);
584 	} else if (qproc->has_spare_reg || qproc->has_ext_cntl_regs) {
585 		ret = reset_control_reset(qproc->mss_restart);
586 	} else {
587 		ret = reset_control_deassert(qproc->mss_restart);
588 	}
589 
590 	return ret;
591 }
592 
593 static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
594 {
595 	unsigned long timeout;
596 	s32 val;
597 
598 	timeout = jiffies + msecs_to_jiffies(ms);
599 	for (;;) {
600 		val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
601 		if (val)
602 			break;
603 
604 		if (time_after(jiffies, timeout))
605 			return -ETIMEDOUT;
606 
607 		msleep(1);
608 	}
609 
610 	return val;
611 }
612 
613 static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
614 {
615 
616 	unsigned long timeout;
617 	s32 val;
618 
619 	timeout = jiffies + msecs_to_jiffies(ms);
620 	for (;;) {
621 		val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
622 		if (val < 0)
623 			break;
624 
625 		if (!status && val)
626 			break;
627 		else if (status && val == status)
628 			break;
629 
630 		if (time_after(jiffies, timeout))
631 			return -ETIMEDOUT;
632 
633 		msleep(1);
634 	}
635 
636 	return val;
637 }
638 
639 static void q6v5_dump_mba_logs(struct q6v5 *qproc)
640 {
641 	struct rproc *rproc = qproc->rproc;
642 	void *data;
643 	void *mba_region;
644 
645 	if (!qproc->has_mba_logs)
646 		return;
647 
648 	if (q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, qproc->mba_phys,
649 				    qproc->mba_size))
650 		return;
651 
652 	mba_region = memremap(qproc->mba_phys, qproc->mba_size, MEMREMAP_WC);
653 	if (!mba_region)
654 		return;
655 
656 	data = vmalloc(MBA_LOG_SIZE);
657 	if (data) {
658 		memcpy(data, mba_region, MBA_LOG_SIZE);
659 		dev_coredumpv(&rproc->dev, data, MBA_LOG_SIZE, GFP_KERNEL);
660 	}
661 	memunmap(mba_region);
662 }
663 
664 static int q6v5proc_reset(struct q6v5 *qproc)
665 {
666 	u32 val;
667 	int ret;
668 	int i;
669 
670 	if (qproc->version == MSS_SDM845) {
671 		val = readl(qproc->reg_base + QDSP6SS_SLEEP);
672 		val |= Q6SS_CBCR_CLKEN;
673 		writel(val, qproc->reg_base + QDSP6SS_SLEEP);
674 
675 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
676 					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
677 					 Q6SS_CBCR_TIMEOUT_US);
678 		if (ret) {
679 			dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
680 			return -ETIMEDOUT;
681 		}
682 
683 		/* De-assert QDSP6 stop core */
684 		writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
685 		/* Trigger boot FSM */
686 		writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
687 
688 		ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
689 				val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
690 		if (ret) {
691 			dev_err(qproc->dev, "Boot FSM failed to complete.\n");
692 			/* Reset the modem so that boot FSM is in reset state */
693 			q6v5_reset_deassert(qproc);
694 			return ret;
695 		}
696 
697 		goto pbl_wait;
698 	} else if (qproc->version == MSS_SC7180 || qproc->version == MSS_SC7280) {
699 		val = readl(qproc->reg_base + QDSP6SS_SLEEP);
700 		val |= Q6SS_CBCR_CLKEN;
701 		writel(val, qproc->reg_base + QDSP6SS_SLEEP);
702 
703 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
704 					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
705 					 Q6SS_CBCR_TIMEOUT_US);
706 		if (ret) {
707 			dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
708 			return -ETIMEDOUT;
709 		}
710 
711 		/* Turn on the XO clock needed for PLL setup */
712 		val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
713 		val |= Q6SS_CBCR_CLKEN;
714 		writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
715 
716 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
717 					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
718 					 Q6SS_CBCR_TIMEOUT_US);
719 		if (ret) {
720 			dev_err(qproc->dev, "QDSP6SS XO clock timed out\n");
721 			return -ETIMEDOUT;
722 		}
723 
724 		/* Configure Q6 core CBCR to auto-enable after reset sequence */
725 		val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR);
726 		val |= Q6SS_CBCR_CLKEN;
727 		writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR);
728 
729 		/* De-assert the Q6 stop core signal */
730 		writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
731 
732 		/* Wait for 10 us for any staggering logic to settle */
733 		usleep_range(10, 20);
734 
735 		/* Trigger the boot FSM to start the Q6 out-of-reset sequence */
736 		writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
737 
738 		/* Poll the MSS_STATUS for FSM completion */
739 		ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
740 					 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
741 		if (ret) {
742 			dev_err(qproc->dev, "Boot FSM failed to complete.\n");
743 			/* Reset the modem so that boot FSM is in reset state */
744 			q6v5_reset_deassert(qproc);
745 			return ret;
746 		}
747 		goto pbl_wait;
748 	} else if (qproc->version == MSS_MSM8909 ||
749 		   qproc->version == MSS_MSM8953 ||
750 		   qproc->version == MSS_MSM8996 ||
751 		   qproc->version == MSS_MSM8998 ||
752 		   qproc->version == MSS_SDM660) {
753 
754 		if (qproc->version != MSS_MSM8909 &&
755 		    qproc->version != MSS_MSM8953)
756 			/* Override the ACC value if required */
757 			writel(QDSP6SS_ACC_OVERRIDE_VAL,
758 			       qproc->reg_base + QDSP6SS_STRAP_ACC);
759 
760 		/* Assert resets, stop core */
761 		val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
762 		val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
763 		writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
764 
765 		/* BHS require xo cbcr to be enabled */
766 		val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
767 		val |= Q6SS_CBCR_CLKEN;
768 		writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
769 
770 		/* Read CLKOFF bit to go low indicating CLK is enabled */
771 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
772 					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
773 					 Q6SS_CBCR_TIMEOUT_US);
774 		if (ret) {
775 			dev_err(qproc->dev,
776 				"xo cbcr enabling timed out (rc:%d)\n", ret);
777 			return ret;
778 		}
779 		/* Enable power block headswitch and wait for it to stabilize */
780 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
781 		val |= QDSP6v56_BHS_ON;
782 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
783 		val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
784 		udelay(1);
785 
786 		if (qproc->version == MSS_SDM660) {
787 			ret = readl_relaxed_poll_timeout(qproc->reg_base + QDSP6V62SS_BHS_STATUS,
788 							 i, (i & QDSP6v55_BHS_EN_REST_ACK),
789 							 1, BHS_CHECK_MAX_LOOPS);
790 			if (ret == -ETIMEDOUT) {
791 				dev_err(qproc->dev, "BHS_EN_REST_ACK not set!\n");
792 				return -ETIMEDOUT;
793 			}
794 		}
795 
796 		/* Put LDO in bypass mode */
797 		val |= QDSP6v56_LDO_BYP;
798 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
799 
800 		if (qproc->version != MSS_MSM8909) {
801 			int mem_pwr_ctl;
802 
803 			/* Deassert QDSP6 compiler memory clamp */
804 			val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
805 			val &= ~QDSP6v56_CLAMP_QMC_MEM;
806 			writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
807 
808 			/* Deassert memory peripheral sleep and L2 memory standby */
809 			val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
810 			writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
811 
812 			/* Turn on L1, L2, ETB and JU memories 1 at a time */
813 			if (qproc->version == MSS_MSM8953 ||
814 			    qproc->version == MSS_MSM8996) {
815 				mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL;
816 				i = 19;
817 			} else {
818 				/* MSS_MSM8998, MSS_SDM660 */
819 				mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL;
820 				i = 28;
821 			}
822 			val = readl(qproc->reg_base + mem_pwr_ctl);
823 			for (; i >= 0; i--) {
824 				val |= BIT(i);
825 				writel(val, qproc->reg_base + mem_pwr_ctl);
826 				/*
827 				 * Read back value to ensure the write is done then
828 				 * wait for 1us for both memory peripheral and data
829 				 * array to turn on.
830 				 */
831 				val |= readl(qproc->reg_base + mem_pwr_ctl);
832 				udelay(1);
833 			}
834 		} else {
835 			/* Turn on memories */
836 			val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
837 			val |= Q6SS_SLP_RET_N | Q6SS_L2DATA_STBY_N |
838 			       Q6SS_ETB_SLP_NRET_N | QDSP6V55_MEM_BITS;
839 			writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
840 
841 			/* Turn on L2 banks 1 at a time */
842 			for (i = 0; i <= 7; i++) {
843 				val |= BIT(i);
844 				writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
845 			}
846 		}
847 
848 		/* Remove word line clamp */
849 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
850 		val &= ~QDSP6v56_CLAMP_WL;
851 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
852 	} else {
853 		/* Assert resets, stop core */
854 		val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
855 		val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
856 		writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
857 
858 		/* Enable power block headswitch and wait for it to stabilize */
859 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
860 		val |= QDSS_BHS_ON | QDSS_LDO_BYP;
861 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
862 		val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
863 		udelay(1);
864 		/*
865 		 * Turn on memories. L2 banks should be done individually
866 		 * to minimize inrush current.
867 		 */
868 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
869 		val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
870 			Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
871 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
872 		val |= Q6SS_L2DATA_SLP_NRET_N_2;
873 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
874 		val |= Q6SS_L2DATA_SLP_NRET_N_1;
875 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
876 		val |= Q6SS_L2DATA_SLP_NRET_N_0;
877 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
878 	}
879 	/* Remove IO clamp */
880 	val &= ~Q6SS_CLAMP_IO;
881 	writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
882 
883 	/* Bring core out of reset */
884 	val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
885 	val &= ~Q6SS_CORE_ARES;
886 	writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
887 
888 	/* Turn on core clock */
889 	val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
890 	val |= Q6SS_CLK_ENABLE;
891 	writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
892 
893 	/* Start core execution */
894 	val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
895 	val &= ~Q6SS_STOP_CORE;
896 	writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
897 
898 pbl_wait:
899 	/* Wait for PBL status */
900 	ret = q6v5_rmb_pbl_wait(qproc, 1000);
901 	if (ret == -ETIMEDOUT) {
902 		dev_err(qproc->dev, "PBL boot timed out\n");
903 	} else if (ret != RMB_PBL_SUCCESS) {
904 		dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
905 		ret = -EINVAL;
906 	} else {
907 		ret = 0;
908 	}
909 
910 	return ret;
911 }
912 
913 static int q6v5proc_enable_qchannel(struct q6v5 *qproc, struct regmap *map, u32 offset)
914 {
915 	unsigned int val;
916 	int ret;
917 
918 	if (!qproc->has_qaccept_regs)
919 		return 0;
920 
921 	if (qproc->has_ext_cntl_regs) {
922 		regmap_write(qproc->conn_map, qproc->rscc_disable, 0);
923 		regmap_write(qproc->conn_map, qproc->force_clk_on, 1);
924 
925 		ret = regmap_read_poll_timeout(qproc->halt_map, qproc->axim1_clk_off, val,
926 					       !val, 1, Q6SS_CBCR_TIMEOUT_US);
927 		if (ret) {
928 			dev_err(qproc->dev, "failed to enable axim1 clock\n");
929 			return -ETIMEDOUT;
930 		}
931 	}
932 
933 	regmap_write(map, offset + QACCEPT_REQ_REG, 1);
934 
935 	/* Wait for accept */
936 	ret = regmap_read_poll_timeout(map, offset + QACCEPT_ACCEPT_REG, val, val, 5,
937 				       QACCEPT_TIMEOUT_US);
938 	if (ret) {
939 		dev_err(qproc->dev, "qchannel enable failed\n");
940 		return -ETIMEDOUT;
941 	}
942 
943 	return 0;
944 }
945 
946 static void q6v5proc_disable_qchannel(struct q6v5 *qproc, struct regmap *map, u32 offset)
947 {
948 	int ret;
949 	unsigned int val, retry;
950 	unsigned int nretry = 10;
951 	bool takedown_complete = false;
952 
953 	if (!qproc->has_qaccept_regs)
954 		return;
955 
956 	while (!takedown_complete && nretry) {
957 		nretry--;
958 
959 		/* Wait for active transactions to complete */
960 		regmap_read_poll_timeout(map, offset + QACCEPT_ACTIVE_REG, val, !val, 5,
961 					 QACCEPT_TIMEOUT_US);
962 
963 		/* Request Q-channel transaction takedown */
964 		regmap_write(map, offset + QACCEPT_REQ_REG, 0);
965 
966 		/*
967 		 * If the request is denied, reset the Q-channel takedown request,
968 		 * wait for active transactions to complete and retry takedown.
969 		 */
970 		retry = 10;
971 		while (retry) {
972 			usleep_range(5, 10);
973 			retry--;
974 			ret = regmap_read(map, offset + QACCEPT_DENY_REG, &val);
975 			if (!ret && val) {
976 				regmap_write(map, offset + QACCEPT_REQ_REG, 1);
977 				break;
978 			}
979 
980 			ret = regmap_read(map, offset + QACCEPT_ACCEPT_REG, &val);
981 			if (!ret && !val) {
982 				takedown_complete = true;
983 				break;
984 			}
985 		}
986 
987 		if (!retry)
988 			break;
989 	}
990 
991 	/* Rely on mss_restart to clear out pending transactions on takedown failure */
992 	if (!takedown_complete)
993 		dev_err(qproc->dev, "qchannel takedown failed\n");
994 }
995 
996 static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
997 				   struct regmap *halt_map,
998 				   u32 offset)
999 {
1000 	unsigned int val;
1001 	int ret;
1002 
1003 	/* Check if we're already idle */
1004 	ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
1005 	if (!ret && val)
1006 		return;
1007 
1008 	/* Assert halt request */
1009 	regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
1010 
1011 	/* Wait for halt */
1012 	regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val,
1013 				 val, 1000, HALT_ACK_TIMEOUT_US);
1014 
1015 	ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
1016 	if (ret || !val)
1017 		dev_err(qproc->dev, "port failed halt\n");
1018 
1019 	/* Clear halt request (port will remain halted until reset) */
1020 	regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
1021 }
1022 
1023 static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw,
1024 				const char *fw_name)
1025 {
1026 	unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
1027 	dma_addr_t phys;
1028 	void *metadata;
1029 	u64 mdata_perm;
1030 	int xferop_ret;
1031 	size_t size;
1032 	void *ptr;
1033 	int ret;
1034 
1035 	metadata = qcom_mdt_read_metadata(fw, &size, fw_name, qproc->dev);
1036 	if (IS_ERR(metadata))
1037 		return PTR_ERR(metadata);
1038 
1039 	if (qproc->mdata_phys) {
1040 		if (size > qproc->mdata_size) {
1041 			ret = -EINVAL;
1042 			dev_err(qproc->dev, "metadata size outside memory range\n");
1043 			goto free_metadata;
1044 		}
1045 
1046 		phys = qproc->mdata_phys;
1047 		ptr = memremap(qproc->mdata_phys, size, MEMREMAP_WC);
1048 		if (!ptr) {
1049 			ret = -EBUSY;
1050 			dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
1051 				&qproc->mdata_phys, size);
1052 			goto free_metadata;
1053 		}
1054 	} else {
1055 		ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
1056 		if (!ptr) {
1057 			ret = -ENOMEM;
1058 			dev_err(qproc->dev, "failed to allocate mdt buffer\n");
1059 			goto free_metadata;
1060 		}
1061 	}
1062 
1063 	memcpy(ptr, metadata, size);
1064 
1065 	if (qproc->mdata_phys)
1066 		memunmap(ptr);
1067 
1068 	/* Hypervisor mapping to access metadata by modem */
1069 	mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
1070 	ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, true,
1071 				      phys, size);
1072 	if (ret) {
1073 		dev_err(qproc->dev,
1074 			"assigning Q6 access to metadata failed: %d\n", ret);
1075 		ret = -EAGAIN;
1076 		goto free_dma_attrs;
1077 	}
1078 
1079 	writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
1080 	writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
1081 
1082 	ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
1083 	if (ret == -ETIMEDOUT)
1084 		dev_err(qproc->dev, "MPSS header authentication timed out\n");
1085 	else if (ret < 0)
1086 		dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
1087 
1088 	/* Metadata authentication done, remove modem access */
1089 	xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, false,
1090 					     phys, size);
1091 	if (xferop_ret)
1092 		dev_warn(qproc->dev,
1093 			 "mdt buffer not reclaimed system may become unstable\n");
1094 
1095 free_dma_attrs:
1096 	if (!qproc->mdata_phys)
1097 		dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs);
1098 free_metadata:
1099 	kfree(metadata);
1100 
1101 	return ret < 0 ? ret : 0;
1102 }
1103 
1104 static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
1105 {
1106 	if (phdr->p_type != PT_LOAD)
1107 		return false;
1108 
1109 	if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
1110 		return false;
1111 
1112 	if (!phdr->p_memsz)
1113 		return false;
1114 
1115 	return true;
1116 }
1117 
1118 static int q6v5_mba_load(struct q6v5 *qproc)
1119 {
1120 	int ret;
1121 	int xfermemop_ret;
1122 	bool mba_load_err = false;
1123 
1124 	ret = qcom_q6v5_prepare(&qproc->q6v5);
1125 	if (ret)
1126 		return ret;
1127 
1128 	ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1129 	if (ret < 0) {
1130 		dev_err(qproc->dev, "failed to enable proxy power domains\n");
1131 		goto disable_irqs;
1132 	}
1133 
1134 	ret = q6v5_regulator_enable(qproc, qproc->fallback_proxy_regs,
1135 				    qproc->fallback_proxy_reg_count);
1136 	if (ret) {
1137 		dev_err(qproc->dev, "failed to enable fallback proxy supplies\n");
1138 		goto disable_proxy_pds;
1139 	}
1140 
1141 	ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
1142 				    qproc->proxy_reg_count);
1143 	if (ret) {
1144 		dev_err(qproc->dev, "failed to enable proxy supplies\n");
1145 		goto disable_fallback_proxy_reg;
1146 	}
1147 
1148 	ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
1149 			      qproc->proxy_clk_count);
1150 	if (ret) {
1151 		dev_err(qproc->dev, "failed to enable proxy clocks\n");
1152 		goto disable_proxy_reg;
1153 	}
1154 
1155 	ret = q6v5_regulator_enable(qproc, qproc->active_regs,
1156 				    qproc->active_reg_count);
1157 	if (ret) {
1158 		dev_err(qproc->dev, "failed to enable supplies\n");
1159 		goto disable_proxy_clk;
1160 	}
1161 
1162 	if (qproc->has_ext_bhs_reg) {
1163 		ret = q6v5_external_bhs_enable(qproc);
1164 		if (ret < 0)
1165 			goto disable_vdd;
1166 	}
1167 
1168 	ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
1169 			      qproc->reset_clk_count);
1170 	if (ret) {
1171 		dev_err(qproc->dev, "failed to enable reset clocks\n");
1172 		goto disable_ext_bhs;
1173 	}
1174 
1175 	ret = q6v5_reset_deassert(qproc);
1176 	if (ret) {
1177 		dev_err(qproc->dev, "failed to deassert mss restart\n");
1178 		goto disable_reset_clks;
1179 	}
1180 
1181 	ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
1182 			      qproc->active_clk_count);
1183 	if (ret) {
1184 		dev_err(qproc->dev, "failed to enable clocks\n");
1185 		goto assert_reset;
1186 	}
1187 
1188 	ret = q6v5proc_enable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi);
1189 	if (ret) {
1190 		dev_err(qproc->dev, "failed to enable axi bridge\n");
1191 		goto disable_active_clks;
1192 	}
1193 
1194 	/*
1195 	 * Some versions of the MBA firmware will upon boot wipe the MPSS region as well, so provide
1196 	 * the Q6 access to this region.
1197 	 */
1198 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
1199 				      qproc->mpss_phys, qproc->mpss_size);
1200 	if (ret) {
1201 		dev_err(qproc->dev, "assigning Q6 access to mpss memory failed: %d\n", ret);
1202 		goto disable_active_clks;
1203 	}
1204 
1205 	/* Assign MBA image access in DDR to q6 */
1206 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, true,
1207 				      qproc->mba_phys, qproc->mba_size);
1208 	if (ret) {
1209 		dev_err(qproc->dev,
1210 			"assigning Q6 access to mba memory failed: %d\n", ret);
1211 		goto disable_active_clks;
1212 	}
1213 
1214 	if (qproc->has_mba_logs)
1215 		qcom_pil_info_store("mba", qproc->mba_phys, MBA_LOG_SIZE);
1216 
1217 	writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
1218 	if (qproc->dp_size) {
1219 		writel(qproc->mba_phys + SZ_1M, qproc->rmb_base + RMB_PMI_CODE_START_REG);
1220 		writel(qproc->dp_size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1221 	}
1222 
1223 	ret = q6v5proc_reset(qproc);
1224 	if (ret)
1225 		goto reclaim_mba;
1226 
1227 	ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
1228 	if (ret == -ETIMEDOUT) {
1229 		dev_err(qproc->dev, "MBA boot timed out\n");
1230 		goto halt_axi_ports;
1231 	} else if (ret != RMB_MBA_XPU_UNLOCKED &&
1232 		   ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
1233 		dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
1234 		ret = -EINVAL;
1235 		goto halt_axi_ports;
1236 	}
1237 
1238 	qproc->dump_mba_loaded = true;
1239 	return 0;
1240 
1241 halt_axi_ports:
1242 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
1243 	if (qproc->has_vq6)
1244 		q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_vq6);
1245 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
1246 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
1247 	q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_mdm);
1248 	q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_cx);
1249 	q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi);
1250 	mba_load_err = true;
1251 reclaim_mba:
1252 	xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
1253 						false, qproc->mba_phys,
1254 						qproc->mba_size);
1255 	if (xfermemop_ret) {
1256 		dev_err(qproc->dev,
1257 			"Failed to reclaim mba buffer, system may become unstable\n");
1258 	} else if (mba_load_err) {
1259 		q6v5_dump_mba_logs(qproc);
1260 	}
1261 
1262 disable_active_clks:
1263 	q6v5_clk_disable(qproc->dev, qproc->active_clks,
1264 			 qproc->active_clk_count);
1265 assert_reset:
1266 	q6v5_reset_assert(qproc);
1267 disable_reset_clks:
1268 	q6v5_clk_disable(qproc->dev, qproc->reset_clks,
1269 			 qproc->reset_clk_count);
1270 disable_ext_bhs:
1271 	if (qproc->has_ext_bhs_reg)
1272 		q6v5_external_bhs_disable(qproc);
1273 disable_vdd:
1274 	q6v5_regulator_disable(qproc, qproc->active_regs,
1275 			       qproc->active_reg_count);
1276 disable_proxy_clk:
1277 	q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1278 			 qproc->proxy_clk_count);
1279 disable_proxy_reg:
1280 	q6v5_regulator_disable(qproc, qproc->proxy_regs,
1281 			       qproc->proxy_reg_count);
1282 disable_fallback_proxy_reg:
1283 	q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
1284 			       qproc->fallback_proxy_reg_count);
1285 disable_proxy_pds:
1286 	q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1287 disable_irqs:
1288 	qcom_q6v5_unprepare(&qproc->q6v5);
1289 
1290 	return ret;
1291 }
1292 
1293 static void q6v5_mba_reclaim(struct q6v5 *qproc)
1294 {
1295 	int ret;
1296 	u32 val;
1297 
1298 	qproc->dump_mba_loaded = false;
1299 	qproc->dp_size = 0;
1300 
1301 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
1302 	if (qproc->has_vq6)
1303 		q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_vq6);
1304 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
1305 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
1306 	if (qproc->version == MSS_MSM8996) {
1307 		/*
1308 		 * To avoid high MX current during LPASS/MSS restart.
1309 		 */
1310 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
1311 		val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
1312 			QDSP6v56_CLAMP_QMC_MEM;
1313 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
1314 	}
1315 
1316 	if (qproc->has_ext_cntl_regs) {
1317 		regmap_write(qproc->conn_map, qproc->rscc_disable, 1);
1318 
1319 		ret = regmap_read_poll_timeout(qproc->halt_map, qproc->axim1_clk_off, val,
1320 					       !val, 1, Q6SS_CBCR_TIMEOUT_US);
1321 		if (ret)
1322 			dev_err(qproc->dev, "failed to enable axim1 clock\n");
1323 
1324 		ret = regmap_read_poll_timeout(qproc->halt_map, qproc->crypto_clk_off, val,
1325 					       !val, 1, Q6SS_CBCR_TIMEOUT_US);
1326 		if (ret)
1327 			dev_err(qproc->dev, "failed to enable crypto clock\n");
1328 	}
1329 
1330 	q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_mdm);
1331 	q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_cx);
1332 	q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi);
1333 
1334 	q6v5_reset_assert(qproc);
1335 
1336 	q6v5_clk_disable(qproc->dev, qproc->reset_clks,
1337 			 qproc->reset_clk_count);
1338 	q6v5_clk_disable(qproc->dev, qproc->active_clks,
1339 			 qproc->active_clk_count);
1340 	if (qproc->has_ext_bhs_reg)
1341 		q6v5_external_bhs_disable(qproc);
1342 	q6v5_regulator_disable(qproc, qproc->active_regs,
1343 			       qproc->active_reg_count);
1344 
1345 	/* In case of failure or coredump scenario where reclaiming MBA memory
1346 	 * could not happen reclaim it here.
1347 	 */
1348 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false,
1349 				      qproc->mba_phys,
1350 				      qproc->mba_size);
1351 	WARN_ON(ret);
1352 
1353 	ret = qcom_q6v5_unprepare(&qproc->q6v5);
1354 	if (ret) {
1355 		q6v5_pds_disable(qproc, qproc->proxy_pds,
1356 				 qproc->proxy_pd_count);
1357 		q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1358 				 qproc->proxy_clk_count);
1359 		q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
1360 				       qproc->fallback_proxy_reg_count);
1361 		q6v5_regulator_disable(qproc, qproc->proxy_regs,
1362 				       qproc->proxy_reg_count);
1363 	}
1364 }
1365 
1366 static int q6v5_reload_mba(struct rproc *rproc)
1367 {
1368 	struct q6v5 *qproc = rproc->priv;
1369 	const struct firmware *fw;
1370 	int ret;
1371 
1372 	ret = request_firmware(&fw, rproc->firmware, qproc->dev);
1373 	if (ret < 0)
1374 		return ret;
1375 
1376 	q6v5_load(rproc, fw);
1377 	ret = q6v5_mba_load(qproc);
1378 	release_firmware(fw);
1379 
1380 	return ret;
1381 }
1382 
1383 static int q6v5_mpss_load(struct q6v5 *qproc)
1384 {
1385 	const struct elf32_phdr *phdrs;
1386 	const struct elf32_phdr *phdr;
1387 	const struct firmware *seg_fw;
1388 	const struct firmware *fw;
1389 	struct elf32_hdr *ehdr;
1390 	phys_addr_t mpss_reloc;
1391 	phys_addr_t boot_addr;
1392 	phys_addr_t min_addr = PHYS_ADDR_MAX;
1393 	phys_addr_t max_addr = 0;
1394 	u32 code_length;
1395 	bool relocate = false;
1396 	char *fw_name;
1397 	size_t fw_name_len;
1398 	ssize_t offset;
1399 	size_t size = 0;
1400 	void *ptr;
1401 	int ret;
1402 	int i;
1403 
1404 	fw_name_len = strlen(qproc->hexagon_mdt_image);
1405 	if (fw_name_len <= 4)
1406 		return -EINVAL;
1407 
1408 	fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL);
1409 	if (!fw_name)
1410 		return -ENOMEM;
1411 
1412 	ret = request_firmware(&fw, fw_name, qproc->dev);
1413 	if (ret < 0) {
1414 		dev_err(qproc->dev, "unable to load %s\n", fw_name);
1415 		goto out;
1416 	}
1417 
1418 	/* Initialize the RMB validator */
1419 	writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1420 
1421 	ret = q6v5_mpss_init_image(qproc, fw, qproc->hexagon_mdt_image);
1422 	if (ret)
1423 		goto release_firmware;
1424 
1425 	ehdr = (struct elf32_hdr *)fw->data;
1426 	phdrs = (struct elf32_phdr *)(ehdr + 1);
1427 
1428 	for (i = 0; i < ehdr->e_phnum; i++) {
1429 		phdr = &phdrs[i];
1430 
1431 		if (!q6v5_phdr_valid(phdr))
1432 			continue;
1433 
1434 		if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
1435 			relocate = true;
1436 
1437 		if (phdr->p_paddr < min_addr)
1438 			min_addr = phdr->p_paddr;
1439 
1440 		if (phdr->p_paddr + phdr->p_memsz > max_addr)
1441 			max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
1442 	}
1443 
1444 	if (qproc->version == MSS_MSM8953) {
1445 		ret = qcom_scm_pas_mem_setup(MPSS_PAS_ID, qproc->mpss_phys, qproc->mpss_size);
1446 		if (ret) {
1447 			dev_err(qproc->dev,
1448 				"setting up mpss memory failed: %d\n", ret);
1449 			goto release_firmware;
1450 		}
1451 	}
1452 
1453 	/*
1454 	 * In case of a modem subsystem restart on secure devices, the modem
1455 	 * memory can be reclaimed only after MBA is loaded.
1456 	 */
1457 	q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false,
1458 				qproc->mpss_phys, qproc->mpss_size);
1459 
1460 	/* Share ownership between Linux and MSS, during segment loading */
1461 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, true,
1462 				      qproc->mpss_phys, qproc->mpss_size);
1463 	if (ret) {
1464 		dev_err(qproc->dev,
1465 			"assigning Q6 access to mpss memory failed: %d\n", ret);
1466 		ret = -EAGAIN;
1467 		goto release_firmware;
1468 	}
1469 
1470 	mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
1471 	qproc->mpss_reloc = mpss_reloc;
1472 	/* Load firmware segments */
1473 	for (i = 0; i < ehdr->e_phnum; i++) {
1474 		phdr = &phdrs[i];
1475 
1476 		if (!q6v5_phdr_valid(phdr))
1477 			continue;
1478 
1479 		offset = phdr->p_paddr - mpss_reloc;
1480 		if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
1481 			dev_err(qproc->dev, "segment outside memory range\n");
1482 			ret = -EINVAL;
1483 			goto release_firmware;
1484 		}
1485 
1486 		if (phdr->p_filesz > phdr->p_memsz) {
1487 			dev_err(qproc->dev,
1488 				"refusing to load segment %d with p_filesz > p_memsz\n",
1489 				i);
1490 			ret = -EINVAL;
1491 			goto release_firmware;
1492 		}
1493 
1494 		ptr = memremap(qproc->mpss_phys + offset, phdr->p_memsz, MEMREMAP_WC);
1495 		if (!ptr) {
1496 			dev_err(qproc->dev,
1497 				"unable to map memory region: %pa+%zx-%x\n",
1498 				&qproc->mpss_phys, offset, phdr->p_memsz);
1499 			goto release_firmware;
1500 		}
1501 
1502 		if (phdr->p_filesz && phdr->p_offset < fw->size) {
1503 			/* Firmware is large enough to be non-split */
1504 			if (phdr->p_offset + phdr->p_filesz > fw->size) {
1505 				dev_err(qproc->dev,
1506 					"failed to load segment %d from truncated file %s\n",
1507 					i, fw_name);
1508 				ret = -EINVAL;
1509 				memunmap(ptr);
1510 				goto release_firmware;
1511 			}
1512 
1513 			memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz);
1514 		} else if (phdr->p_filesz) {
1515 			/* Replace "xxx.xxx" with "xxx.bxx" */
1516 			sprintf(fw_name + fw_name_len - 3, "b%02d", i);
1517 			ret = request_firmware_into_buf(&seg_fw, fw_name, qproc->dev,
1518 							ptr, phdr->p_filesz);
1519 			if (ret) {
1520 				dev_err(qproc->dev, "failed to load %s\n", fw_name);
1521 				memunmap(ptr);
1522 				goto release_firmware;
1523 			}
1524 
1525 			if (seg_fw->size != phdr->p_filesz) {
1526 				dev_err(qproc->dev,
1527 					"failed to load segment %d from truncated file %s\n",
1528 					i, fw_name);
1529 				ret = -EINVAL;
1530 				release_firmware(seg_fw);
1531 				memunmap(ptr);
1532 				goto release_firmware;
1533 			}
1534 
1535 			release_firmware(seg_fw);
1536 		}
1537 
1538 		if (phdr->p_memsz > phdr->p_filesz) {
1539 			memset(ptr + phdr->p_filesz, 0,
1540 			       phdr->p_memsz - phdr->p_filesz);
1541 		}
1542 		memunmap(ptr);
1543 		size += phdr->p_memsz;
1544 
1545 		code_length = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1546 		if (!code_length) {
1547 			boot_addr = relocate ? qproc->mpss_phys : min_addr;
1548 			writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
1549 			writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
1550 		}
1551 		writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1552 
1553 		ret = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
1554 		if (ret < 0) {
1555 			dev_err(qproc->dev, "MPSS authentication failed: %d\n",
1556 				ret);
1557 			goto release_firmware;
1558 		}
1559 	}
1560 
1561 	/* Transfer ownership of modem ddr region to q6 */
1562 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
1563 				      qproc->mpss_phys, qproc->mpss_size);
1564 	if (ret) {
1565 		dev_err(qproc->dev,
1566 			"assigning Q6 access to mpss memory failed: %d\n", ret);
1567 		ret = -EAGAIN;
1568 		goto release_firmware;
1569 	}
1570 
1571 	ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
1572 	if (ret == -ETIMEDOUT)
1573 		dev_err(qproc->dev, "MPSS authentication timed out\n");
1574 	else if (ret < 0)
1575 		dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
1576 
1577 	qcom_pil_info_store("modem", qproc->mpss_phys, qproc->mpss_size);
1578 
1579 release_firmware:
1580 	release_firmware(fw);
1581 out:
1582 	kfree(fw_name);
1583 
1584 	return ret < 0 ? ret : 0;
1585 }
1586 
1587 static void qcom_q6v5_dump_segment(struct rproc *rproc,
1588 				   struct rproc_dump_segment *segment,
1589 				   void *dest, size_t cp_offset, size_t size)
1590 {
1591 	int ret = 0;
1592 	struct q6v5 *qproc = rproc->priv;
1593 	int offset = segment->da - qproc->mpss_reloc;
1594 	void *ptr = NULL;
1595 
1596 	/* Unlock mba before copying segments */
1597 	if (!qproc->dump_mba_loaded) {
1598 		ret = q6v5_reload_mba(rproc);
1599 		if (!ret) {
1600 			/* Reset ownership back to Linux to copy segments */
1601 			ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
1602 						      true, false,
1603 						      qproc->mpss_phys,
1604 						      qproc->mpss_size);
1605 		}
1606 	}
1607 
1608 	if (!ret)
1609 		ptr = memremap(qproc->mpss_phys + offset + cp_offset, size, MEMREMAP_WC);
1610 
1611 	if (ptr) {
1612 		memcpy(dest, ptr, size);
1613 		memunmap(ptr);
1614 	} else {
1615 		memset(dest, 0xff, size);
1616 	}
1617 
1618 	qproc->current_dump_size += size;
1619 
1620 	/* Reclaim mba after copying segments */
1621 	if (qproc->current_dump_size == qproc->total_dump_size) {
1622 		if (qproc->dump_mba_loaded) {
1623 			/* Try to reset ownership back to Q6 */
1624 			q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
1625 						false, true,
1626 						qproc->mpss_phys,
1627 						qproc->mpss_size);
1628 			q6v5_mba_reclaim(qproc);
1629 		}
1630 	}
1631 }
1632 
1633 static int q6v5_start(struct rproc *rproc)
1634 {
1635 	struct q6v5 *qproc = rproc->priv;
1636 	int xfermemop_ret;
1637 	int ret;
1638 
1639 	ret = q6v5_mba_load(qproc);
1640 	if (ret)
1641 		return ret;
1642 
1643 	dev_info(qproc->dev, "MBA booted with%s debug policy, loading mpss\n",
1644 		 qproc->dp_size ? "" : "out");
1645 
1646 	ret = q6v5_mpss_load(qproc);
1647 	if (ret)
1648 		goto reclaim_mpss;
1649 
1650 	ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000));
1651 	if (ret == -ETIMEDOUT) {
1652 		dev_err(qproc->dev, "start timed out\n");
1653 		goto reclaim_mpss;
1654 	}
1655 
1656 	xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
1657 						false, qproc->mba_phys,
1658 						qproc->mba_size);
1659 	if (xfermemop_ret)
1660 		dev_err(qproc->dev,
1661 			"Failed to reclaim mba buffer system may become unstable\n");
1662 
1663 	/* Reset Dump Segment Mask */
1664 	qproc->current_dump_size = 0;
1665 
1666 	return 0;
1667 
1668 reclaim_mpss:
1669 	q6v5_mba_reclaim(qproc);
1670 	q6v5_dump_mba_logs(qproc);
1671 
1672 	return ret;
1673 }
1674 
1675 static int q6v5_stop(struct rproc *rproc)
1676 {
1677 	struct q6v5 *qproc = rproc->priv;
1678 	int ret;
1679 
1680 	ret = qcom_q6v5_request_stop(&qproc->q6v5, qproc->sysmon);
1681 	if (ret == -ETIMEDOUT)
1682 		dev_err(qproc->dev, "timed out on wait\n");
1683 
1684 	q6v5_mba_reclaim(qproc);
1685 
1686 	return 0;
1687 }
1688 
1689 static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
1690 					    const struct firmware *mba_fw)
1691 {
1692 	const struct firmware *fw;
1693 	const struct elf32_phdr *phdrs;
1694 	const struct elf32_phdr *phdr;
1695 	const struct elf32_hdr *ehdr;
1696 	struct q6v5 *qproc = rproc->priv;
1697 	unsigned long i;
1698 	int ret;
1699 
1700 	ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev);
1701 	if (ret < 0) {
1702 		dev_err(qproc->dev, "unable to load %s\n",
1703 			qproc->hexagon_mdt_image);
1704 		return ret;
1705 	}
1706 
1707 	rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
1708 
1709 	ehdr = (struct elf32_hdr *)fw->data;
1710 	phdrs = (struct elf32_phdr *)(ehdr + 1);
1711 	qproc->total_dump_size = 0;
1712 
1713 	for (i = 0; i < ehdr->e_phnum; i++) {
1714 		phdr = &phdrs[i];
1715 
1716 		if (!q6v5_phdr_valid(phdr))
1717 			continue;
1718 
1719 		ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr,
1720 							phdr->p_memsz,
1721 							qcom_q6v5_dump_segment,
1722 							NULL);
1723 		if (ret)
1724 			break;
1725 
1726 		qproc->total_dump_size += phdr->p_memsz;
1727 	}
1728 
1729 	release_firmware(fw);
1730 	return ret;
1731 }
1732 
1733 static unsigned long q6v5_panic(struct rproc *rproc)
1734 {
1735 	struct q6v5 *qproc = rproc->priv;
1736 
1737 	return qcom_q6v5_panic(&qproc->q6v5);
1738 }
1739 
1740 static const struct rproc_ops q6v5_ops = {
1741 	.start = q6v5_start,
1742 	.stop = q6v5_stop,
1743 	.parse_fw = qcom_q6v5_register_dump_segments,
1744 	.load = q6v5_load,
1745 	.panic = q6v5_panic,
1746 };
1747 
1748 static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
1749 {
1750 	struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5);
1751 
1752 	q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1753 			 qproc->proxy_clk_count);
1754 	q6v5_regulator_disable(qproc, qproc->proxy_regs,
1755 			       qproc->proxy_reg_count);
1756 	q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
1757 			       qproc->fallback_proxy_reg_count);
1758 	q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1759 }
1760 
1761 static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
1762 {
1763 	struct of_phandle_args args;
1764 	int halt_cell_cnt = 3;
1765 	int ret;
1766 
1767 	qproc->reg_base = devm_platform_ioremap_resource_byname(pdev, "qdsp6");
1768 	if (IS_ERR(qproc->reg_base))
1769 		return PTR_ERR(qproc->reg_base);
1770 
1771 	qproc->rmb_base = devm_platform_ioremap_resource_byname(pdev, "rmb");
1772 	if (IS_ERR(qproc->rmb_base))
1773 		return PTR_ERR(qproc->rmb_base);
1774 
1775 	if (qproc->has_vq6)
1776 		halt_cell_cnt++;
1777 
1778 	ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1779 					       "qcom,halt-regs", halt_cell_cnt, 0, &args);
1780 	if (ret < 0) {
1781 		dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
1782 		return -EINVAL;
1783 	}
1784 
1785 	qproc->halt_map = syscon_node_to_regmap(args.np);
1786 	of_node_put(args.np);
1787 	if (IS_ERR(qproc->halt_map))
1788 		return PTR_ERR(qproc->halt_map);
1789 
1790 	qproc->halt_q6 = args.args[0];
1791 	qproc->halt_modem = args.args[1];
1792 	qproc->halt_nc = args.args[2];
1793 
1794 	if (qproc->has_vq6)
1795 		qproc->halt_vq6 = args.args[3];
1796 
1797 	if (qproc->has_qaccept_regs) {
1798 		ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1799 						       "qcom,qaccept-regs",
1800 						       3, 0, &args);
1801 		if (ret < 0) {
1802 			dev_err(&pdev->dev, "failed to parse qaccept-regs\n");
1803 			return -EINVAL;
1804 		}
1805 
1806 		qproc->qaccept_mdm = args.args[0];
1807 		qproc->qaccept_cx = args.args[1];
1808 		qproc->qaccept_axi = args.args[2];
1809 	}
1810 
1811 	if (qproc->has_ext_bhs_reg) {
1812 		ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1813 						       "qcom,ext-bhs-reg",
1814 						       1, 0, &args);
1815 		if (ret < 0) {
1816 			dev_err(&pdev->dev, "failed to parse ext-bhs-reg index 0\n");
1817 			return -EINVAL;
1818 		}
1819 
1820 		qproc->conn_map = syscon_node_to_regmap(args.np);
1821 		of_node_put(args.np);
1822 		if (IS_ERR(qproc->conn_map))
1823 			return PTR_ERR(qproc->conn_map);
1824 
1825 		qproc->ext_bhs = args.args[0];
1826 	}
1827 
1828 	if (qproc->has_ext_cntl_regs) {
1829 		ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1830 						       "qcom,ext-regs",
1831 						       2, 0, &args);
1832 		if (ret < 0) {
1833 			dev_err(&pdev->dev, "failed to parse ext-regs index 0\n");
1834 			return -EINVAL;
1835 		}
1836 
1837 		qproc->conn_map = syscon_node_to_regmap(args.np);
1838 		of_node_put(args.np);
1839 		if (IS_ERR(qproc->conn_map))
1840 			return PTR_ERR(qproc->conn_map);
1841 
1842 		qproc->force_clk_on = args.args[0];
1843 		qproc->rscc_disable = args.args[1];
1844 
1845 		ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1846 						       "qcom,ext-regs",
1847 						       2, 1, &args);
1848 		if (ret < 0) {
1849 			dev_err(&pdev->dev, "failed to parse ext-regs index 1\n");
1850 			return -EINVAL;
1851 		}
1852 
1853 		qproc->axim1_clk_off = args.args[0];
1854 		qproc->crypto_clk_off = args.args[1];
1855 	}
1856 
1857 	if (qproc->has_spare_reg) {
1858 		ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1859 						       "qcom,spare-regs",
1860 						       1, 0, &args);
1861 		if (ret < 0) {
1862 			dev_err(&pdev->dev, "failed to parse spare-regs\n");
1863 			return -EINVAL;
1864 		}
1865 
1866 		qproc->conn_map = syscon_node_to_regmap(args.np);
1867 		of_node_put(args.np);
1868 		if (IS_ERR(qproc->conn_map))
1869 			return PTR_ERR(qproc->conn_map);
1870 
1871 		qproc->conn_box = args.args[0];
1872 	}
1873 
1874 	return 0;
1875 }
1876 
1877 static int q6v5_init_clocks(struct device *dev, struct clk **clks,
1878 		char **clk_names)
1879 {
1880 	int i;
1881 
1882 	if (!clk_names)
1883 		return 0;
1884 
1885 	for (i = 0; clk_names[i]; i++) {
1886 		clks[i] = devm_clk_get(dev, clk_names[i]);
1887 		if (IS_ERR(clks[i]))
1888 			return dev_err_probe(dev, PTR_ERR(clks[i]),
1889 					     "Failed to get %s clock\n",
1890 					     clk_names[i]);
1891 	}
1892 
1893 	return i;
1894 }
1895 
1896 static int q6v5_pds_attach(struct device *dev, struct device **devs,
1897 			   char **pd_names)
1898 {
1899 	size_t num_pds = 0;
1900 	int ret;
1901 	int i;
1902 
1903 	if (!pd_names)
1904 		return 0;
1905 
1906 	while (pd_names[num_pds])
1907 		num_pds++;
1908 
1909 	/* Handle single power domain */
1910 	if (num_pds == 1 && dev->pm_domain) {
1911 		devs[0] = dev;
1912 		pm_runtime_enable(dev);
1913 		return 1;
1914 	}
1915 
1916 	for (i = 0; i < num_pds; i++) {
1917 		devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
1918 		if (IS_ERR_OR_NULL(devs[i])) {
1919 			ret = PTR_ERR(devs[i]) ? : -ENODATA;
1920 			goto unroll_attach;
1921 		}
1922 	}
1923 
1924 	return num_pds;
1925 
1926 unroll_attach:
1927 	for (i--; i >= 0; i--)
1928 		dev_pm_domain_detach(devs[i], false);
1929 
1930 	return ret;
1931 }
1932 
1933 static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds,
1934 			    size_t pd_count)
1935 {
1936 	struct device *dev = qproc->dev;
1937 	int i;
1938 
1939 	/* Handle single power domain */
1940 	if (pd_count == 1 && dev->pm_domain) {
1941 		pm_runtime_disable(dev);
1942 		return;
1943 	}
1944 
1945 	for (i = 0; i < pd_count; i++)
1946 		dev_pm_domain_detach(pds[i], false);
1947 }
1948 
1949 static int q6v5_init_reset(struct q6v5 *qproc)
1950 {
1951 	qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
1952 							      "mss_restart");
1953 	if (IS_ERR(qproc->mss_restart)) {
1954 		dev_err(qproc->dev, "failed to acquire mss restart\n");
1955 		return PTR_ERR(qproc->mss_restart);
1956 	}
1957 
1958 	if (qproc->has_alt_reset || qproc->has_spare_reg || qproc->has_ext_cntl_regs) {
1959 		qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
1960 								    "pdc_reset");
1961 		if (IS_ERR(qproc->pdc_reset)) {
1962 			dev_err(qproc->dev, "failed to acquire pdc reset\n");
1963 			return PTR_ERR(qproc->pdc_reset);
1964 		}
1965 	}
1966 
1967 	return 0;
1968 }
1969 
1970 static int q6v5_alloc_memory_region(struct q6v5 *qproc)
1971 {
1972 	struct device_node *child;
1973 	struct reserved_mem *rmem;
1974 	struct device_node *node;
1975 
1976 	/*
1977 	 * In the absence of mba/mpss sub-child, extract the mba and mpss
1978 	 * reserved memory regions from device's memory-region property.
1979 	 */
1980 	child = of_get_child_by_name(qproc->dev->of_node, "mba");
1981 	if (!child) {
1982 		node = of_parse_phandle(qproc->dev->of_node,
1983 					"memory-region", 0);
1984 	} else {
1985 		node = of_parse_phandle(child, "memory-region", 0);
1986 		of_node_put(child);
1987 	}
1988 
1989 	if (!node) {
1990 		dev_err(qproc->dev, "no mba memory-region specified\n");
1991 		return -EINVAL;
1992 	}
1993 
1994 	rmem = of_reserved_mem_lookup(node);
1995 	of_node_put(node);
1996 	if (!rmem) {
1997 		dev_err(qproc->dev, "unable to resolve mba region\n");
1998 		return -EINVAL;
1999 	}
2000 
2001 	qproc->mba_phys = rmem->base;
2002 	qproc->mba_size = rmem->size;
2003 
2004 	if (!child) {
2005 		node = of_parse_phandle(qproc->dev->of_node,
2006 					"memory-region", 1);
2007 	} else {
2008 		child = of_get_child_by_name(qproc->dev->of_node, "mpss");
2009 		node = of_parse_phandle(child, "memory-region", 0);
2010 		of_node_put(child);
2011 	}
2012 
2013 	if (!node) {
2014 		dev_err(qproc->dev, "no mpss memory-region specified\n");
2015 		return -EINVAL;
2016 	}
2017 
2018 	rmem = of_reserved_mem_lookup(node);
2019 	of_node_put(node);
2020 	if (!rmem) {
2021 		dev_err(qproc->dev, "unable to resolve mpss region\n");
2022 		return -EINVAL;
2023 	}
2024 
2025 	qproc->mpss_phys = qproc->mpss_reloc = rmem->base;
2026 	qproc->mpss_size = rmem->size;
2027 
2028 	if (!child) {
2029 		node = of_parse_phandle(qproc->dev->of_node, "memory-region", 2);
2030 	} else {
2031 		child = of_get_child_by_name(qproc->dev->of_node, "metadata");
2032 		node = of_parse_phandle(child, "memory-region", 0);
2033 		of_node_put(child);
2034 	}
2035 
2036 	if (!node)
2037 		return 0;
2038 
2039 	rmem = of_reserved_mem_lookup(node);
2040 	if (!rmem) {
2041 		dev_err(qproc->dev, "unable to resolve metadata region\n");
2042 		return -EINVAL;
2043 	}
2044 
2045 	qproc->mdata_phys = rmem->base;
2046 	qproc->mdata_size = rmem->size;
2047 
2048 	return 0;
2049 }
2050 
2051 static int q6v5_probe(struct platform_device *pdev)
2052 {
2053 	const struct rproc_hexagon_res *desc;
2054 	struct device_node *node;
2055 	struct q6v5 *qproc;
2056 	struct rproc *rproc;
2057 	const char *mba_image;
2058 	int ret;
2059 
2060 	desc = of_device_get_match_data(&pdev->dev);
2061 	if (!desc)
2062 		return -EINVAL;
2063 
2064 	if (desc->need_mem_protection && !qcom_scm_is_available())
2065 		return -EPROBE_DEFER;
2066 
2067 	mba_image = desc->hexagon_mba_image;
2068 	ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
2069 					    0, &mba_image);
2070 	if (ret < 0 && ret != -EINVAL) {
2071 		dev_err(&pdev->dev, "unable to read mba firmware-name\n");
2072 		return ret;
2073 	}
2074 
2075 	rproc = devm_rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
2076 				 mba_image, sizeof(*qproc));
2077 	if (!rproc) {
2078 		dev_err(&pdev->dev, "failed to allocate rproc\n");
2079 		return -ENOMEM;
2080 	}
2081 
2082 	rproc->auto_boot = false;
2083 	rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
2084 
2085 	qproc = rproc->priv;
2086 	qproc->dev = &pdev->dev;
2087 	qproc->rproc = rproc;
2088 	qproc->hexagon_mdt_image = "modem.mdt";
2089 	ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
2090 					    1, &qproc->hexagon_mdt_image);
2091 	if (ret < 0 && ret != -EINVAL) {
2092 		dev_err(&pdev->dev, "unable to read mpss firmware-name\n");
2093 		return ret;
2094 	}
2095 
2096 	platform_set_drvdata(pdev, qproc);
2097 
2098 	qproc->has_qaccept_regs = desc->has_qaccept_regs;
2099 	qproc->has_ext_bhs_reg = desc->has_ext_bhs_reg;
2100 	qproc->has_ext_cntl_regs = desc->has_ext_cntl_regs;
2101 	qproc->has_vq6 = desc->has_vq6;
2102 	qproc->has_spare_reg = desc->has_spare_reg;
2103 	ret = q6v5_init_mem(qproc, pdev);
2104 	if (ret)
2105 		return ret;
2106 
2107 	ret = q6v5_alloc_memory_region(qproc);
2108 	if (ret)
2109 		return ret;
2110 
2111 	ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
2112 			       desc->proxy_clk_names);
2113 	if (ret < 0)
2114 		return ret;
2115 	qproc->proxy_clk_count = ret;
2116 
2117 	ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks,
2118 			       desc->reset_clk_names);
2119 	if (ret < 0)
2120 		return ret;
2121 	qproc->reset_clk_count = ret;
2122 
2123 	ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
2124 			       desc->active_clk_names);
2125 	if (ret < 0)
2126 		return ret;
2127 	qproc->active_clk_count = ret;
2128 
2129 	ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
2130 				  desc->proxy_supply);
2131 	if (ret < 0)
2132 		return ret;
2133 	qproc->proxy_reg_count = ret;
2134 
2135 	ret = q6v5_regulator_init(&pdev->dev,  qproc->active_regs,
2136 				  desc->active_supply);
2137 	if (ret < 0)
2138 		return ret;
2139 	qproc->active_reg_count = ret;
2140 
2141 	ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds,
2142 			      desc->proxy_pd_names);
2143 	/* Fallback to regulators for old device trees */
2144 	if (ret == -ENODATA && desc->fallback_proxy_supply) {
2145 		ret = q6v5_regulator_init(&pdev->dev,
2146 					  qproc->fallback_proxy_regs,
2147 					  desc->fallback_proxy_supply);
2148 		if (ret < 0)
2149 			return ret;
2150 		qproc->fallback_proxy_reg_count = ret;
2151 	} else if (ret < 0) {
2152 		dev_err(&pdev->dev, "Failed to init power domains\n");
2153 		return ret;
2154 	} else {
2155 		qproc->proxy_pd_count = ret;
2156 	}
2157 
2158 	qproc->has_alt_reset = desc->has_alt_reset;
2159 	ret = q6v5_init_reset(qproc);
2160 	if (ret)
2161 		goto detach_proxy_pds;
2162 
2163 	qproc->version = desc->version;
2164 	qproc->need_mem_protection = desc->need_mem_protection;
2165 	qproc->has_mba_logs = desc->has_mba_logs;
2166 
2167 	ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM, "modem",
2168 			     qcom_msa_handover);
2169 	if (ret)
2170 		goto detach_proxy_pds;
2171 
2172 	qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
2173 	qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
2174 	qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss");
2175 	qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
2176 	qcom_add_pdm_subdev(rproc, &qproc->pdm_subdev);
2177 	qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
2178 	qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
2179 	if (IS_ERR(qproc->sysmon)) {
2180 		ret = PTR_ERR(qproc->sysmon);
2181 		goto remove_subdevs;
2182 	}
2183 
2184 	ret = rproc_add(rproc);
2185 	if (ret)
2186 		goto remove_sysmon_subdev;
2187 
2188 	node = of_get_compatible_child(pdev->dev.of_node, "qcom,bam-dmux");
2189 	qproc->bam_dmux = of_platform_device_create(node, NULL, &pdev->dev);
2190 	of_node_put(node);
2191 
2192 	return 0;
2193 
2194 remove_sysmon_subdev:
2195 	qcom_remove_sysmon_subdev(qproc->sysmon);
2196 remove_subdevs:
2197 	qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
2198 	qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
2199 	qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
2200 detach_proxy_pds:
2201 	q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
2202 
2203 	return ret;
2204 }
2205 
2206 static void q6v5_remove(struct platform_device *pdev)
2207 {
2208 	struct q6v5 *qproc = platform_get_drvdata(pdev);
2209 	struct rproc *rproc = qproc->rproc;
2210 
2211 	if (qproc->bam_dmux)
2212 		of_platform_device_destroy(&qproc->bam_dmux->dev, NULL);
2213 	rproc_del(rproc);
2214 
2215 	qcom_q6v5_deinit(&qproc->q6v5);
2216 	qcom_remove_sysmon_subdev(qproc->sysmon);
2217 	qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
2218 	qcom_remove_pdm_subdev(rproc, &qproc->pdm_subdev);
2219 	qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
2220 	qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
2221 
2222 	q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
2223 }
2224 
2225 static const struct rproc_hexagon_res sc7180_mss = {
2226 	.hexagon_mba_image = "mba.mbn",
2227 	.proxy_clk_names = (char*[]){
2228 		"xo",
2229 		NULL
2230 	},
2231 	.reset_clk_names = (char*[]){
2232 		"iface",
2233 		"bus",
2234 		"snoc_axi",
2235 		NULL
2236 	},
2237 	.active_clk_names = (char*[]){
2238 		"mnoc_axi",
2239 		"nav",
2240 		NULL
2241 	},
2242 	.proxy_pd_names = (char*[]){
2243 		"cx",
2244 		"mx",
2245 		"mss",
2246 		NULL
2247 	},
2248 	.need_mem_protection = true,
2249 	.has_alt_reset = false,
2250 	.has_mba_logs = true,
2251 	.has_spare_reg = true,
2252 	.has_qaccept_regs = false,
2253 	.has_ext_bhs_reg = false,
2254 	.has_ext_cntl_regs = false,
2255 	.has_vq6 = false,
2256 	.version = MSS_SC7180,
2257 };
2258 
2259 static const struct rproc_hexagon_res sc7280_mss = {
2260 	.hexagon_mba_image = "mba.mbn",
2261 	.proxy_clk_names = (char*[]){
2262 		"xo",
2263 		"pka",
2264 		NULL
2265 	},
2266 	.active_clk_names = (char*[]){
2267 		"iface",
2268 		"offline",
2269 		"snoc_axi",
2270 		NULL
2271 	},
2272 	.proxy_pd_names = (char*[]){
2273 		"cx",
2274 		"mss",
2275 		NULL
2276 	},
2277 	.need_mem_protection = true,
2278 	.has_alt_reset = false,
2279 	.has_mba_logs = true,
2280 	.has_spare_reg = false,
2281 	.has_qaccept_regs = true,
2282 	.has_ext_bhs_reg = false,
2283 	.has_ext_cntl_regs = true,
2284 	.has_vq6 = true,
2285 	.version = MSS_SC7280,
2286 };
2287 
2288 static const struct rproc_hexagon_res sdm660_mss = {
2289 	.hexagon_mba_image = "mba.mbn",
2290 	.proxy_clk_names = (char*[]){
2291 			"xo",
2292 			"qdss",
2293 			"mem",
2294 			NULL
2295 	},
2296 	.active_clk_names = (char*[]){
2297 			"iface",
2298 			"bus",
2299 			"gpll0_mss",
2300 			"mnoc_axi",
2301 			"snoc_axi",
2302 			NULL
2303 	},
2304 	.proxy_pd_names = (char*[]){
2305 			"cx",
2306 			"mx",
2307 			NULL
2308 	},
2309 	.need_mem_protection = true,
2310 	.has_alt_reset = false,
2311 	.has_mba_logs = false,
2312 	.has_spare_reg = false,
2313 	.has_qaccept_regs = false,
2314 	.has_ext_bhs_reg = false,
2315 	.has_ext_cntl_regs = false,
2316 	.has_vq6 = false,
2317 	.version = MSS_SDM660,
2318 };
2319 
2320 static const struct rproc_hexagon_res sdm845_mss = {
2321 	.hexagon_mba_image = "mba.mbn",
2322 	.proxy_clk_names = (char*[]){
2323 			"xo",
2324 			"prng",
2325 			NULL
2326 	},
2327 	.reset_clk_names = (char*[]){
2328 			"iface",
2329 			"snoc_axi",
2330 			NULL
2331 	},
2332 	.active_clk_names = (char*[]){
2333 			"bus",
2334 			"mem",
2335 			"gpll0_mss",
2336 			"mnoc_axi",
2337 			NULL
2338 	},
2339 	.proxy_pd_names = (char*[]){
2340 			"cx",
2341 			"mx",
2342 			"mss",
2343 			NULL
2344 	},
2345 	.need_mem_protection = true,
2346 	.has_alt_reset = true,
2347 	.has_mba_logs = false,
2348 	.has_spare_reg = false,
2349 	.has_qaccept_regs = false,
2350 	.has_ext_bhs_reg = false,
2351 	.has_ext_cntl_regs = false,
2352 	.has_vq6 = false,
2353 	.version = MSS_SDM845,
2354 };
2355 
2356 static const struct rproc_hexagon_res msm8998_mss = {
2357 	.hexagon_mba_image = "mba.mbn",
2358 	.proxy_clk_names = (char*[]){
2359 			"xo",
2360 			"qdss",
2361 			"mem",
2362 			NULL
2363 	},
2364 	.active_clk_names = (char*[]){
2365 			"iface",
2366 			"bus",
2367 			"gpll0_mss",
2368 			"mnoc_axi",
2369 			"snoc_axi",
2370 			NULL
2371 	},
2372 	.proxy_pd_names = (char*[]){
2373 			"cx",
2374 			"mx",
2375 			NULL
2376 	},
2377 	.need_mem_protection = true,
2378 	.has_alt_reset = false,
2379 	.has_mba_logs = false,
2380 	.has_spare_reg = false,
2381 	.has_qaccept_regs = false,
2382 	.has_ext_bhs_reg = false,
2383 	.has_ext_cntl_regs = false,
2384 	.has_vq6 = false,
2385 	.version = MSS_MSM8998,
2386 };
2387 
2388 static const struct rproc_hexagon_res msm8996_mss = {
2389 	.hexagon_mba_image = "mba.mbn",
2390 	.proxy_supply = (struct qcom_mss_reg_res[]) {
2391 		{
2392 			.supply = "pll",
2393 			.uA = 100000,
2394 		},
2395 		{}
2396 	},
2397 	.proxy_clk_names = (char*[]){
2398 			"xo",
2399 			"qdss",
2400 			NULL
2401 	},
2402 	.active_clk_names = (char*[]){
2403 			"iface",
2404 			"bus",
2405 			"mem",
2406 			"gpll0_mss",
2407 			"snoc_axi",
2408 			"mnoc_axi",
2409 			NULL
2410 	},
2411 	.proxy_pd_names = (char*[]){
2412 			"mx",
2413 			"cx",
2414 			NULL
2415 	},
2416 	.need_mem_protection = true,
2417 	.has_alt_reset = false,
2418 	.has_mba_logs = false,
2419 	.has_spare_reg = false,
2420 	.has_qaccept_regs = false,
2421 	.has_ext_bhs_reg = false,
2422 	.has_ext_cntl_regs = false,
2423 	.has_vq6 = false,
2424 	.version = MSS_MSM8996,
2425 };
2426 
2427 static const struct rproc_hexagon_res msm8909_mss = {
2428 	.hexagon_mba_image = "mba.mbn",
2429 	.proxy_supply = (struct qcom_mss_reg_res[]) {
2430 		{
2431 			.supply = "pll",
2432 			.uA = 100000,
2433 		},
2434 		{}
2435 	},
2436 	.proxy_clk_names = (char*[]){
2437 		"xo",
2438 		NULL
2439 	},
2440 	.active_clk_names = (char*[]){
2441 		"iface",
2442 		"bus",
2443 		"mem",
2444 		NULL
2445 	},
2446 	.proxy_pd_names = (char*[]){
2447 		"mx",
2448 		"cx",
2449 		NULL
2450 	},
2451 	.need_mem_protection = false,
2452 	.has_alt_reset = false,
2453 	.has_mba_logs = false,
2454 	.has_spare_reg = false,
2455 	.has_qaccept_regs = false,
2456 	.has_ext_bhs_reg = false,
2457 	.has_ext_cntl_regs = false,
2458 	.has_vq6 = false,
2459 	.version = MSS_MSM8909,
2460 };
2461 
2462 static const struct rproc_hexagon_res msm8916_mss = {
2463 	.hexagon_mba_image = "mba.mbn",
2464 	.proxy_supply = (struct qcom_mss_reg_res[]) {
2465 		{
2466 			.supply = "pll",
2467 			.uA = 100000,
2468 		},
2469 		{}
2470 	},
2471 	.fallback_proxy_supply = (struct qcom_mss_reg_res[]) {
2472 		{
2473 			.supply = "mx",
2474 			.uV = 1050000,
2475 		},
2476 		{
2477 			.supply = "cx",
2478 			.uA = 100000,
2479 		},
2480 		{}
2481 	},
2482 	.proxy_clk_names = (char*[]){
2483 		"xo",
2484 		NULL
2485 	},
2486 	.active_clk_names = (char*[]){
2487 		"iface",
2488 		"bus",
2489 		"mem",
2490 		NULL
2491 	},
2492 	.proxy_pd_names = (char*[]){
2493 		"mx",
2494 		"cx",
2495 		NULL
2496 	},
2497 	.need_mem_protection = false,
2498 	.has_alt_reset = false,
2499 	.has_mba_logs = false,
2500 	.has_spare_reg = false,
2501 	.has_qaccept_regs = false,
2502 	.has_ext_bhs_reg = false,
2503 	.has_ext_cntl_regs = false,
2504 	.has_vq6 = false,
2505 	.version = MSS_MSM8916,
2506 };
2507 
2508 static const struct rproc_hexagon_res msm8953_mss = {
2509 	.hexagon_mba_image = "mba.mbn",
2510 	.proxy_supply = (struct qcom_mss_reg_res[]) {
2511 		{
2512 			.supply = "pll",
2513 			.uA = 100000,
2514 		},
2515 		{}
2516 	},
2517 	.proxy_clk_names = (char*[]){
2518 		"xo",
2519 		NULL
2520 	},
2521 	.active_clk_names = (char*[]){
2522 		"iface",
2523 		"bus",
2524 		"mem",
2525 		NULL
2526 	},
2527 	.proxy_pd_names = (char*[]) {
2528 		"cx",
2529 		"mx",
2530 		"mss",
2531 		NULL
2532 	},
2533 	.need_mem_protection = false,
2534 	.has_alt_reset = false,
2535 	.has_mba_logs = false,
2536 	.has_spare_reg = false,
2537 	.has_qaccept_regs = false,
2538 	.has_ext_bhs_reg = false,
2539 	.has_ext_cntl_regs = false,
2540 	.has_vq6 = false,
2541 	.version = MSS_MSM8953,
2542 };
2543 
2544 static const struct rproc_hexagon_res msm8974_mss = {
2545 	.hexagon_mba_image = "mba.b00",
2546 	.proxy_supply = (struct qcom_mss_reg_res[]) {
2547 		{
2548 			.supply = "pll",
2549 			.uA = 100000,
2550 		},
2551 		{
2552 			.supply = "mx",
2553 			.uV = 1050000,
2554 		},
2555 		{}
2556 	},
2557 	.fallback_proxy_supply = (struct qcom_mss_reg_res[]) {
2558 		{
2559 			.supply = "cx",
2560 			.uA = 100000,
2561 		},
2562 		{}
2563 	},
2564 	.active_supply = (struct qcom_mss_reg_res[]) {
2565 		{
2566 			.supply = "mss",
2567 			.uV = 1050000,
2568 			.uA = 100000,
2569 		},
2570 		{}
2571 	},
2572 	.proxy_clk_names = (char*[]){
2573 		"xo",
2574 		NULL
2575 	},
2576 	.active_clk_names = (char*[]){
2577 		"iface",
2578 		"bus",
2579 		"mem",
2580 		NULL
2581 	},
2582 	.proxy_pd_names = (char*[]){
2583 		"cx",
2584 		NULL
2585 	},
2586 	.need_mem_protection = false,
2587 	.has_alt_reset = false,
2588 	.has_mba_logs = false,
2589 	.has_spare_reg = false,
2590 	.has_qaccept_regs = false,
2591 	.has_ext_bhs_reg = false,
2592 	.has_ext_cntl_regs = false,
2593 	.has_vq6 = false,
2594 	.version = MSS_MSM8974,
2595 };
2596 
2597 static const struct rproc_hexagon_res msm8226_mss = {
2598 	.hexagon_mba_image = "mba.b00",
2599 	.proxy_supply = (struct qcom_mss_reg_res[]) {
2600 		{
2601 			.supply = "pll",
2602 			.uA = 100000,
2603 		},
2604 		{
2605 			.supply = "mx",
2606 			.uV = 1050000,
2607 		},
2608 		{}
2609 	},
2610 	.proxy_clk_names = (char*[]){
2611 		"xo",
2612 		NULL
2613 	},
2614 	.active_clk_names = (char*[]){
2615 		"iface",
2616 		"bus",
2617 		"mem",
2618 		NULL
2619 	},
2620 	.proxy_pd_names = (char*[]){
2621 		"cx",
2622 		NULL
2623 	},
2624 	.need_mem_protection = false,
2625 	.has_alt_reset = false,
2626 	.has_mba_logs = false,
2627 	.has_spare_reg = false,
2628 	.has_qaccept_regs = false,
2629 	.has_ext_bhs_reg = true,
2630 	.has_ext_cntl_regs = false,
2631 	.has_vq6 = false,
2632 	.version = MSS_MSM8226,
2633 };
2634 
2635 static const struct rproc_hexagon_res msm8926_mss = {
2636 	.hexagon_mba_image = "mba.b00",
2637 	.proxy_supply = (struct qcom_mss_reg_res[]) {
2638 		{
2639 			.supply = "pll",
2640 			.uA = 100000,
2641 		},
2642 		{
2643 			.supply = "mx",
2644 			.uV = 1050000,
2645 		},
2646 		{}
2647 	},
2648 	.active_supply = (struct qcom_mss_reg_res[]) {
2649 		{
2650 			.supply = "mss",
2651 			.uV = 1050000,
2652 			.uA = 100000,
2653 		},
2654 		{}
2655 	},
2656 	.proxy_clk_names = (char*[]){
2657 		"xo",
2658 		NULL
2659 	},
2660 	.active_clk_names = (char*[]){
2661 		"iface",
2662 		"bus",
2663 		"mem",
2664 		NULL
2665 	},
2666 	.proxy_pd_names = (char*[]){
2667 		"cx",
2668 		NULL
2669 	},
2670 	.need_mem_protection = false,
2671 	.has_alt_reset = false,
2672 	.has_mba_logs = false,
2673 	.has_spare_reg = false,
2674 	.has_qaccept_regs = false,
2675 	.has_ext_bhs_reg = false,
2676 	.has_ext_cntl_regs = false,
2677 	.has_vq6 = false,
2678 	.version = MSS_MSM8926,
2679 };
2680 
2681 static const struct of_device_id q6v5_of_match[] = {
2682 	{ .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
2683 	{ .compatible = "qcom,msm8226-mss-pil", .data = &msm8226_mss},
2684 	{ .compatible = "qcom,msm8909-mss-pil", .data = &msm8909_mss},
2685 	{ .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
2686 	{ .compatible = "qcom,msm8926-mss-pil", .data = &msm8926_mss},
2687 	{ .compatible = "qcom,msm8953-mss-pil", .data = &msm8953_mss},
2688 	{ .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
2689 	{ .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
2690 	{ .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss},
2691 	{ .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss},
2692 	{ .compatible = "qcom,sc7280-mss-pil", .data = &sc7280_mss},
2693 	{ .compatible = "qcom,sdm660-mss-pil", .data = &sdm660_mss},
2694 	{ .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
2695 	{ },
2696 };
2697 MODULE_DEVICE_TABLE(of, q6v5_of_match);
2698 
2699 static struct platform_driver q6v5_driver = {
2700 	.probe = q6v5_probe,
2701 	.remove = q6v5_remove,
2702 	.driver = {
2703 		.name = "qcom-q6v5-mss",
2704 		.of_match_table = q6v5_of_match,
2705 	},
2706 };
2707 module_platform_driver(q6v5_driver);
2708 
2709 MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver");
2710 MODULE_LICENSE("GPL v2");
2711