xref: /linux/drivers/remoteproc/qcom_q6v5_mss.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Qualcomm self-authenticating modem subsystem remoteproc driver
4  *
5  * Copyright (C) 2016 Linaro Ltd.
6  * Copyright (C) 2014 Sony Mobile Communications AB
7  * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
8  */
9 
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/devcoredump.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/mfd/syscon.h>
17 #include <linux/module.h>
18 #include <linux/of.h>
19 #include <linux/of_reserved_mem.h>
20 #include <linux/of_platform.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_domain.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/regmap.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/remoteproc.h>
27 #include <linux/reset.h>
28 #include <linux/soc/qcom/mdt_loader.h>
29 #include <linux/iopoll.h>
30 #include <linux/slab.h>
31 
32 #include "remoteproc_internal.h"
33 #include "qcom_common.h"
34 #include "qcom_pil_info.h"
35 #include "qcom_q6v5.h"
36 
37 #include <linux/firmware/qcom/qcom_scm.h>
38 
39 #define MPSS_CRASH_REASON_SMEM		421
40 
41 #define MBA_LOG_SIZE			SZ_4K
42 
43 #define MPSS_PAS_ID			5
44 
45 /* RMB Status Register Values */
46 #define RMB_PBL_SUCCESS			0x1
47 
48 #define RMB_MBA_XPU_UNLOCKED		0x1
49 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED	0x2
50 #define RMB_MBA_META_DATA_AUTH_SUCCESS	0x3
51 #define RMB_MBA_AUTH_COMPLETE		0x4
52 
53 /* PBL/MBA interface registers */
54 #define RMB_MBA_IMAGE_REG		0x00
55 #define RMB_PBL_STATUS_REG		0x04
56 #define RMB_MBA_COMMAND_REG		0x08
57 #define RMB_MBA_STATUS_REG		0x0C
58 #define RMB_PMI_META_DATA_REG		0x10
59 #define RMB_PMI_CODE_START_REG		0x14
60 #define RMB_PMI_CODE_LENGTH_REG		0x18
61 #define RMB_MBA_MSS_STATUS		0x40
62 #define RMB_MBA_ALT_RESET		0x44
63 
64 #define RMB_CMD_META_DATA_READY		0x1
65 #define RMB_CMD_LOAD_READY		0x2
66 
67 /* QDSP6SS Register Offsets */
68 #define QDSP6SS_RESET_REG		0x014
69 #define QDSP6SS_GFMUX_CTL_REG		0x020
70 #define QDSP6SS_PWR_CTL_REG		0x030
71 #define QDSP6SS_MEM_PWR_CTL		0x0B0
72 #define QDSP6V6SS_MEM_PWR_CTL		0x034
73 #define QDSP6SS_STRAP_ACC		0x110
74 #define QDSP6V62SS_BHS_STATUS		0x0C4
75 
76 /* AXI Halt Register Offsets */
77 #define AXI_HALTREQ_REG			0x0
78 #define AXI_HALTACK_REG			0x4
79 #define AXI_IDLE_REG			0x8
80 #define AXI_GATING_VALID_OVERRIDE	BIT(0)
81 
82 #define HALT_ACK_TIMEOUT_US		100000
83 
84 /* QACCEPT Register Offsets */
85 #define QACCEPT_ACCEPT_REG		0x0
86 #define QACCEPT_ACTIVE_REG		0x4
87 #define QACCEPT_DENY_REG		0x8
88 #define QACCEPT_REQ_REG			0xC
89 
90 #define QACCEPT_TIMEOUT_US		50
91 
92 /* QDSP6SS_RESET */
93 #define Q6SS_STOP_CORE			BIT(0)
94 #define Q6SS_CORE_ARES			BIT(1)
95 #define Q6SS_BUS_ARES_ENABLE		BIT(2)
96 
97 /* QDSP6SS CBCR */
98 #define Q6SS_CBCR_CLKEN			BIT(0)
99 #define Q6SS_CBCR_CLKOFF		BIT(31)
100 #define Q6SS_CBCR_TIMEOUT_US		200
101 
102 /* QDSP6SS_GFMUX_CTL */
103 #define Q6SS_CLK_ENABLE			BIT(1)
104 
105 /* QDSP6SS_PWR_CTL */
106 #define Q6SS_L2DATA_SLP_NRET_N_0	BIT(0)
107 #define Q6SS_L2DATA_SLP_NRET_N_1	BIT(1)
108 #define Q6SS_L2DATA_SLP_NRET_N_2	BIT(2)
109 #define Q6SS_L2TAG_SLP_NRET_N		BIT(16)
110 #define Q6SS_ETB_SLP_NRET_N		BIT(17)
111 #define Q6SS_L2DATA_STBY_N		BIT(18)
112 #define Q6SS_SLP_RET_N			BIT(19)
113 #define Q6SS_CLAMP_IO			BIT(20)
114 #define QDSS_BHS_ON			BIT(21)
115 #define QDSS_LDO_BYP			BIT(22)
116 
117 /* QDSP6v55 parameters */
118 #define QDSP6V55_MEM_BITS		GENMASK(16, 8)
119 
120 /* QDSP6v56 parameters */
121 #define QDSP6v56_LDO_BYP		BIT(25)
122 #define QDSP6v56_BHS_ON		BIT(24)
123 #define QDSP6v56_CLAMP_WL		BIT(21)
124 #define QDSP6v56_CLAMP_QMC_MEM		BIT(22)
125 #define QDSP6SS_XO_CBCR		0x0038
126 #define QDSP6SS_ACC_OVERRIDE_VAL		0x20
127 #define QDSP6v55_BHS_EN_REST_ACK	BIT(0)
128 
129 /* QDSP6v65 parameters */
130 #define QDSP6SS_CORE_CBCR		0x20
131 #define QDSP6SS_SLEEP                   0x3C
132 #define QDSP6SS_BOOT_CORE_START         0x400
133 #define QDSP6SS_BOOT_CMD                0x404
134 #define BOOT_FSM_TIMEOUT                10000
135 #define BHS_CHECK_MAX_LOOPS             200
136 
137 struct reg_info {
138 	struct regulator *reg;
139 	int uV;
140 	int uA;
141 };
142 
143 struct qcom_mss_reg_res {
144 	const char *supply;
145 	int uV;
146 	int uA;
147 };
148 
149 struct rproc_hexagon_res {
150 	const char *hexagon_mba_image;
151 	struct qcom_mss_reg_res *proxy_supply;
152 	struct qcom_mss_reg_res *fallback_proxy_supply;
153 	struct qcom_mss_reg_res *active_supply;
154 	char **proxy_clk_names;
155 	char **reset_clk_names;
156 	char **active_clk_names;
157 	char **proxy_pd_names;
158 	int version;
159 	bool need_mem_protection;
160 	bool has_alt_reset;
161 	bool has_mba_logs;
162 	bool has_spare_reg;
163 	bool has_qaccept_regs;
164 	bool has_ext_cntl_regs;
165 	bool has_vq6;
166 };
167 
168 struct q6v5 {
169 	struct device *dev;
170 	struct rproc *rproc;
171 
172 	void __iomem *reg_base;
173 	void __iomem *rmb_base;
174 
175 	struct regmap *halt_map;
176 	struct regmap *conn_map;
177 
178 	u32 halt_q6;
179 	u32 halt_modem;
180 	u32 halt_nc;
181 	u32 halt_vq6;
182 	u32 conn_box;
183 
184 	u32 qaccept_mdm;
185 	u32 qaccept_cx;
186 	u32 qaccept_axi;
187 
188 	u32 axim1_clk_off;
189 	u32 crypto_clk_off;
190 	u32 force_clk_on;
191 	u32 rscc_disable;
192 
193 	struct reset_control *mss_restart;
194 	struct reset_control *pdc_reset;
195 
196 	struct qcom_q6v5 q6v5;
197 
198 	struct clk *active_clks[8];
199 	struct clk *reset_clks[4];
200 	struct clk *proxy_clks[4];
201 	struct device *proxy_pds[3];
202 	int active_clk_count;
203 	int reset_clk_count;
204 	int proxy_clk_count;
205 	int proxy_pd_count;
206 
207 	struct reg_info active_regs[1];
208 	struct reg_info proxy_regs[1];
209 	struct reg_info fallback_proxy_regs[2];
210 	int active_reg_count;
211 	int proxy_reg_count;
212 	int fallback_proxy_reg_count;
213 
214 	bool dump_mba_loaded;
215 	size_t current_dump_size;
216 	size_t total_dump_size;
217 
218 	phys_addr_t mba_phys;
219 	size_t mba_size;
220 	size_t dp_size;
221 
222 	phys_addr_t mdata_phys;
223 	size_t mdata_size;
224 
225 	phys_addr_t mpss_phys;
226 	phys_addr_t mpss_reloc;
227 	size_t mpss_size;
228 
229 	struct qcom_rproc_glink glink_subdev;
230 	struct qcom_rproc_subdev smd_subdev;
231 	struct qcom_rproc_pdm pdm_subdev;
232 	struct qcom_rproc_ssr ssr_subdev;
233 	struct qcom_sysmon *sysmon;
234 	struct platform_device *bam_dmux;
235 	bool need_mem_protection;
236 	bool has_alt_reset;
237 	bool has_mba_logs;
238 	bool has_spare_reg;
239 	bool has_qaccept_regs;
240 	bool has_ext_cntl_regs;
241 	bool has_vq6;
242 	u64 mpss_perm;
243 	u64 mba_perm;
244 	const char *hexagon_mdt_image;
245 	int version;
246 };
247 
248 enum {
249 	MSS_MSM8909,
250 	MSS_MSM8916,
251 	MSS_MSM8953,
252 	MSS_MSM8974,
253 	MSS_MSM8996,
254 	MSS_MSM8998,
255 	MSS_SC7180,
256 	MSS_SC7280,
257 	MSS_SDM660,
258 	MSS_SDM845,
259 };
260 
q6v5_regulator_init(struct device * dev,struct reg_info * regs,const struct qcom_mss_reg_res * reg_res)261 static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
262 			       const struct qcom_mss_reg_res *reg_res)
263 {
264 	int rc;
265 	int i;
266 
267 	if (!reg_res)
268 		return 0;
269 
270 	for (i = 0; reg_res[i].supply; i++) {
271 		regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
272 		if (IS_ERR(regs[i].reg)) {
273 			rc = PTR_ERR(regs[i].reg);
274 			if (rc != -EPROBE_DEFER)
275 				dev_err(dev, "Failed to get %s\n regulator",
276 					reg_res[i].supply);
277 			return rc;
278 		}
279 
280 		regs[i].uV = reg_res[i].uV;
281 		regs[i].uA = reg_res[i].uA;
282 	}
283 
284 	return i;
285 }
286 
q6v5_regulator_enable(struct q6v5 * qproc,struct reg_info * regs,int count)287 static int q6v5_regulator_enable(struct q6v5 *qproc,
288 				 struct reg_info *regs, int count)
289 {
290 	int ret;
291 	int i;
292 
293 	for (i = 0; i < count; i++) {
294 		if (regs[i].uV > 0) {
295 			ret = regulator_set_voltage(regs[i].reg,
296 					regs[i].uV, INT_MAX);
297 			if (ret) {
298 				dev_err(qproc->dev,
299 					"Failed to request voltage for %d.\n",
300 						i);
301 				goto err;
302 			}
303 		}
304 
305 		if (regs[i].uA > 0) {
306 			ret = regulator_set_load(regs[i].reg,
307 						 regs[i].uA);
308 			if (ret < 0) {
309 				dev_err(qproc->dev,
310 					"Failed to set regulator mode\n");
311 				goto err;
312 			}
313 		}
314 
315 		ret = regulator_enable(regs[i].reg);
316 		if (ret) {
317 			dev_err(qproc->dev, "Regulator enable failed\n");
318 			goto err;
319 		}
320 	}
321 
322 	return 0;
323 err:
324 	for (; i >= 0; i--) {
325 		if (regs[i].uV > 0)
326 			regulator_set_voltage(regs[i].reg, 0, INT_MAX);
327 
328 		if (regs[i].uA > 0)
329 			regulator_set_load(regs[i].reg, 0);
330 
331 		regulator_disable(regs[i].reg);
332 	}
333 
334 	return ret;
335 }
336 
q6v5_regulator_disable(struct q6v5 * qproc,struct reg_info * regs,int count)337 static void q6v5_regulator_disable(struct q6v5 *qproc,
338 				   struct reg_info *regs, int count)
339 {
340 	int i;
341 
342 	for (i = 0; i < count; i++) {
343 		if (regs[i].uV > 0)
344 			regulator_set_voltage(regs[i].reg, 0, INT_MAX);
345 
346 		if (regs[i].uA > 0)
347 			regulator_set_load(regs[i].reg, 0);
348 
349 		regulator_disable(regs[i].reg);
350 	}
351 }
352 
q6v5_clk_enable(struct device * dev,struct clk ** clks,int count)353 static int q6v5_clk_enable(struct device *dev,
354 			   struct clk **clks, int count)
355 {
356 	int rc;
357 	int i;
358 
359 	for (i = 0; i < count; i++) {
360 		rc = clk_prepare_enable(clks[i]);
361 		if (rc) {
362 			dev_err(dev, "Clock enable failed\n");
363 			goto err;
364 		}
365 	}
366 
367 	return 0;
368 err:
369 	for (i--; i >= 0; i--)
370 		clk_disable_unprepare(clks[i]);
371 
372 	return rc;
373 }
374 
q6v5_clk_disable(struct device * dev,struct clk ** clks,int count)375 static void q6v5_clk_disable(struct device *dev,
376 			     struct clk **clks, int count)
377 {
378 	int i;
379 
380 	for (i = 0; i < count; i++)
381 		clk_disable_unprepare(clks[i]);
382 }
383 
q6v5_pds_enable(struct q6v5 * qproc,struct device ** pds,size_t pd_count)384 static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds,
385 			   size_t pd_count)
386 {
387 	int ret;
388 	int i;
389 
390 	for (i = 0; i < pd_count; i++) {
391 		dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
392 		ret = pm_runtime_get_sync(pds[i]);
393 		if (ret < 0) {
394 			pm_runtime_put_noidle(pds[i]);
395 			dev_pm_genpd_set_performance_state(pds[i], 0);
396 			goto unroll_pd_votes;
397 		}
398 	}
399 
400 	return 0;
401 
402 unroll_pd_votes:
403 	for (i--; i >= 0; i--) {
404 		dev_pm_genpd_set_performance_state(pds[i], 0);
405 		pm_runtime_put(pds[i]);
406 	}
407 
408 	return ret;
409 }
410 
q6v5_pds_disable(struct q6v5 * qproc,struct device ** pds,size_t pd_count)411 static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
412 			     size_t pd_count)
413 {
414 	int i;
415 
416 	for (i = 0; i < pd_count; i++) {
417 		dev_pm_genpd_set_performance_state(pds[i], 0);
418 		pm_runtime_put(pds[i]);
419 	}
420 }
421 
q6v5_xfer_mem_ownership(struct q6v5 * qproc,u64 * current_perm,bool local,bool remote,phys_addr_t addr,size_t size)422 static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, u64 *current_perm,
423 				   bool local, bool remote, phys_addr_t addr,
424 				   size_t size)
425 {
426 	struct qcom_scm_vmperm next[2];
427 	int perms = 0;
428 
429 	if (!qproc->need_mem_protection)
430 		return 0;
431 
432 	if (local == !!(*current_perm & BIT(QCOM_SCM_VMID_HLOS)) &&
433 	    remote == !!(*current_perm & BIT(QCOM_SCM_VMID_MSS_MSA)))
434 		return 0;
435 
436 	if (local) {
437 		next[perms].vmid = QCOM_SCM_VMID_HLOS;
438 		next[perms].perm = QCOM_SCM_PERM_RWX;
439 		perms++;
440 	}
441 
442 	if (remote) {
443 		next[perms].vmid = QCOM_SCM_VMID_MSS_MSA;
444 		next[perms].perm = QCOM_SCM_PERM_RW;
445 		perms++;
446 	}
447 
448 	return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
449 				   current_perm, next, perms);
450 }
451 
q6v5_debug_policy_load(struct q6v5 * qproc,void * mba_region)452 static void q6v5_debug_policy_load(struct q6v5 *qproc, void *mba_region)
453 {
454 	const struct firmware *dp_fw;
455 
456 	if (request_firmware_direct(&dp_fw, "msadp", qproc->dev))
457 		return;
458 
459 	if (SZ_1M + dp_fw->size <= qproc->mba_size) {
460 		memcpy(mba_region + SZ_1M, dp_fw->data, dp_fw->size);
461 		qproc->dp_size = dp_fw->size;
462 	}
463 
464 	release_firmware(dp_fw);
465 }
466 
q6v5_load(struct rproc * rproc,const struct firmware * fw)467 static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
468 {
469 	struct q6v5 *qproc = rproc->priv;
470 	void *mba_region;
471 
472 	/* MBA is restricted to a maximum size of 1M */
473 	if (fw->size > qproc->mba_size || fw->size > SZ_1M) {
474 		dev_err(qproc->dev, "MBA firmware load failed\n");
475 		return -EINVAL;
476 	}
477 
478 	mba_region = memremap(qproc->mba_phys, qproc->mba_size, MEMREMAP_WC);
479 	if (!mba_region) {
480 		dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
481 			&qproc->mba_phys, qproc->mba_size);
482 		return -EBUSY;
483 	}
484 
485 	memcpy(mba_region, fw->data, fw->size);
486 	q6v5_debug_policy_load(qproc, mba_region);
487 	memunmap(mba_region);
488 
489 	return 0;
490 }
491 
q6v5_reset_assert(struct q6v5 * qproc)492 static int q6v5_reset_assert(struct q6v5 *qproc)
493 {
494 	int ret;
495 
496 	if (qproc->has_alt_reset) {
497 		reset_control_assert(qproc->pdc_reset);
498 		ret = reset_control_reset(qproc->mss_restart);
499 		reset_control_deassert(qproc->pdc_reset);
500 	} else if (qproc->has_spare_reg) {
501 		/*
502 		 * When the AXI pipeline is being reset with the Q6 modem partly
503 		 * operational there is possibility of AXI valid signal to
504 		 * glitch, leading to spurious transactions and Q6 hangs. A work
505 		 * around is employed by asserting the AXI_GATING_VALID_OVERRIDE
506 		 * BIT before triggering Q6 MSS reset. AXI_GATING_VALID_OVERRIDE
507 		 * is withdrawn post MSS assert followed by a MSS deassert,
508 		 * while holding the PDC reset.
509 		 */
510 		reset_control_assert(qproc->pdc_reset);
511 		regmap_update_bits(qproc->conn_map, qproc->conn_box,
512 				   AXI_GATING_VALID_OVERRIDE, 1);
513 		reset_control_assert(qproc->mss_restart);
514 		reset_control_deassert(qproc->pdc_reset);
515 		regmap_update_bits(qproc->conn_map, qproc->conn_box,
516 				   AXI_GATING_VALID_OVERRIDE, 0);
517 		ret = reset_control_deassert(qproc->mss_restart);
518 	} else if (qproc->has_ext_cntl_regs) {
519 		regmap_write(qproc->conn_map, qproc->rscc_disable, 0);
520 		reset_control_assert(qproc->pdc_reset);
521 		reset_control_assert(qproc->mss_restart);
522 		reset_control_deassert(qproc->pdc_reset);
523 		ret = reset_control_deassert(qproc->mss_restart);
524 	} else {
525 		ret = reset_control_assert(qproc->mss_restart);
526 	}
527 
528 	return ret;
529 }
530 
q6v5_reset_deassert(struct q6v5 * qproc)531 static int q6v5_reset_deassert(struct q6v5 *qproc)
532 {
533 	int ret;
534 
535 	if (qproc->has_alt_reset) {
536 		reset_control_assert(qproc->pdc_reset);
537 		writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET);
538 		ret = reset_control_reset(qproc->mss_restart);
539 		writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
540 		reset_control_deassert(qproc->pdc_reset);
541 	} else if (qproc->has_spare_reg || qproc->has_ext_cntl_regs) {
542 		ret = reset_control_reset(qproc->mss_restart);
543 	} else {
544 		ret = reset_control_deassert(qproc->mss_restart);
545 	}
546 
547 	return ret;
548 }
549 
q6v5_rmb_pbl_wait(struct q6v5 * qproc,int ms)550 static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
551 {
552 	unsigned long timeout;
553 	s32 val;
554 
555 	timeout = jiffies + msecs_to_jiffies(ms);
556 	for (;;) {
557 		val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
558 		if (val)
559 			break;
560 
561 		if (time_after(jiffies, timeout))
562 			return -ETIMEDOUT;
563 
564 		msleep(1);
565 	}
566 
567 	return val;
568 }
569 
q6v5_rmb_mba_wait(struct q6v5 * qproc,u32 status,int ms)570 static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
571 {
572 
573 	unsigned long timeout;
574 	s32 val;
575 
576 	timeout = jiffies + msecs_to_jiffies(ms);
577 	for (;;) {
578 		val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
579 		if (val < 0)
580 			break;
581 
582 		if (!status && val)
583 			break;
584 		else if (status && val == status)
585 			break;
586 
587 		if (time_after(jiffies, timeout))
588 			return -ETIMEDOUT;
589 
590 		msleep(1);
591 	}
592 
593 	return val;
594 }
595 
q6v5_dump_mba_logs(struct q6v5 * qproc)596 static void q6v5_dump_mba_logs(struct q6v5 *qproc)
597 {
598 	struct rproc *rproc = qproc->rproc;
599 	void *data;
600 	void *mba_region;
601 
602 	if (!qproc->has_mba_logs)
603 		return;
604 
605 	if (q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, qproc->mba_phys,
606 				    qproc->mba_size))
607 		return;
608 
609 	mba_region = memremap(qproc->mba_phys, qproc->mba_size, MEMREMAP_WC);
610 	if (!mba_region)
611 		return;
612 
613 	data = vmalloc(MBA_LOG_SIZE);
614 	if (data) {
615 		memcpy(data, mba_region, MBA_LOG_SIZE);
616 		dev_coredumpv(&rproc->dev, data, MBA_LOG_SIZE, GFP_KERNEL);
617 	}
618 	memunmap(mba_region);
619 }
620 
q6v5proc_reset(struct q6v5 * qproc)621 static int q6v5proc_reset(struct q6v5 *qproc)
622 {
623 	u32 val;
624 	int ret;
625 	int i;
626 
627 	if (qproc->version == MSS_SDM845) {
628 		val = readl(qproc->reg_base + QDSP6SS_SLEEP);
629 		val |= Q6SS_CBCR_CLKEN;
630 		writel(val, qproc->reg_base + QDSP6SS_SLEEP);
631 
632 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
633 					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
634 					 Q6SS_CBCR_TIMEOUT_US);
635 		if (ret) {
636 			dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
637 			return -ETIMEDOUT;
638 		}
639 
640 		/* De-assert QDSP6 stop core */
641 		writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
642 		/* Trigger boot FSM */
643 		writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
644 
645 		ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
646 				val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
647 		if (ret) {
648 			dev_err(qproc->dev, "Boot FSM failed to complete.\n");
649 			/* Reset the modem so that boot FSM is in reset state */
650 			q6v5_reset_deassert(qproc);
651 			return ret;
652 		}
653 
654 		goto pbl_wait;
655 	} else if (qproc->version == MSS_SC7180 || qproc->version == MSS_SC7280) {
656 		val = readl(qproc->reg_base + QDSP6SS_SLEEP);
657 		val |= Q6SS_CBCR_CLKEN;
658 		writel(val, qproc->reg_base + QDSP6SS_SLEEP);
659 
660 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
661 					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
662 					 Q6SS_CBCR_TIMEOUT_US);
663 		if (ret) {
664 			dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
665 			return -ETIMEDOUT;
666 		}
667 
668 		/* Turn on the XO clock needed for PLL setup */
669 		val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
670 		val |= Q6SS_CBCR_CLKEN;
671 		writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
672 
673 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
674 					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
675 					 Q6SS_CBCR_TIMEOUT_US);
676 		if (ret) {
677 			dev_err(qproc->dev, "QDSP6SS XO clock timed out\n");
678 			return -ETIMEDOUT;
679 		}
680 
681 		/* Configure Q6 core CBCR to auto-enable after reset sequence */
682 		val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR);
683 		val |= Q6SS_CBCR_CLKEN;
684 		writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR);
685 
686 		/* De-assert the Q6 stop core signal */
687 		writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
688 
689 		/* Wait for 10 us for any staggering logic to settle */
690 		usleep_range(10, 20);
691 
692 		/* Trigger the boot FSM to start the Q6 out-of-reset sequence */
693 		writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
694 
695 		/* Poll the MSS_STATUS for FSM completion */
696 		ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
697 					 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
698 		if (ret) {
699 			dev_err(qproc->dev, "Boot FSM failed to complete.\n");
700 			/* Reset the modem so that boot FSM is in reset state */
701 			q6v5_reset_deassert(qproc);
702 			return ret;
703 		}
704 		goto pbl_wait;
705 	} else if (qproc->version == MSS_MSM8909 ||
706 		   qproc->version == MSS_MSM8953 ||
707 		   qproc->version == MSS_MSM8996 ||
708 		   qproc->version == MSS_MSM8998 ||
709 		   qproc->version == MSS_SDM660) {
710 
711 		if (qproc->version != MSS_MSM8909 &&
712 		    qproc->version != MSS_MSM8953)
713 			/* Override the ACC value if required */
714 			writel(QDSP6SS_ACC_OVERRIDE_VAL,
715 			       qproc->reg_base + QDSP6SS_STRAP_ACC);
716 
717 		/* Assert resets, stop core */
718 		val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
719 		val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
720 		writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
721 
722 		/* BHS require xo cbcr to be enabled */
723 		val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
724 		val |= Q6SS_CBCR_CLKEN;
725 		writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
726 
727 		/* Read CLKOFF bit to go low indicating CLK is enabled */
728 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
729 					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
730 					 Q6SS_CBCR_TIMEOUT_US);
731 		if (ret) {
732 			dev_err(qproc->dev,
733 				"xo cbcr enabling timed out (rc:%d)\n", ret);
734 			return ret;
735 		}
736 		/* Enable power block headswitch and wait for it to stabilize */
737 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
738 		val |= QDSP6v56_BHS_ON;
739 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
740 		val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
741 		udelay(1);
742 
743 		if (qproc->version == MSS_SDM660) {
744 			ret = readl_relaxed_poll_timeout(qproc->reg_base + QDSP6V62SS_BHS_STATUS,
745 							 i, (i & QDSP6v55_BHS_EN_REST_ACK),
746 							 1, BHS_CHECK_MAX_LOOPS);
747 			if (ret == -ETIMEDOUT) {
748 				dev_err(qproc->dev, "BHS_EN_REST_ACK not set!\n");
749 				return -ETIMEDOUT;
750 			}
751 		}
752 
753 		/* Put LDO in bypass mode */
754 		val |= QDSP6v56_LDO_BYP;
755 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
756 
757 		if (qproc->version != MSS_MSM8909) {
758 			int mem_pwr_ctl;
759 
760 			/* Deassert QDSP6 compiler memory clamp */
761 			val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
762 			val &= ~QDSP6v56_CLAMP_QMC_MEM;
763 			writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
764 
765 			/* Deassert memory peripheral sleep and L2 memory standby */
766 			val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
767 			writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
768 
769 			/* Turn on L1, L2, ETB and JU memories 1 at a time */
770 			if (qproc->version == MSS_MSM8953 ||
771 			    qproc->version == MSS_MSM8996) {
772 				mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL;
773 				i = 19;
774 			} else {
775 				/* MSS_MSM8998, MSS_SDM660 */
776 				mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL;
777 				i = 28;
778 			}
779 			val = readl(qproc->reg_base + mem_pwr_ctl);
780 			for (; i >= 0; i--) {
781 				val |= BIT(i);
782 				writel(val, qproc->reg_base + mem_pwr_ctl);
783 				/*
784 				 * Read back value to ensure the write is done then
785 				 * wait for 1us for both memory peripheral and data
786 				 * array to turn on.
787 				 */
788 				val |= readl(qproc->reg_base + mem_pwr_ctl);
789 				udelay(1);
790 			}
791 		} else {
792 			/* Turn on memories */
793 			val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
794 			val |= Q6SS_SLP_RET_N | Q6SS_L2DATA_STBY_N |
795 			       Q6SS_ETB_SLP_NRET_N | QDSP6V55_MEM_BITS;
796 			writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
797 
798 			/* Turn on L2 banks 1 at a time */
799 			for (i = 0; i <= 7; i++) {
800 				val |= BIT(i);
801 				writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
802 			}
803 		}
804 
805 		/* Remove word line clamp */
806 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
807 		val &= ~QDSP6v56_CLAMP_WL;
808 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
809 	} else {
810 		/* Assert resets, stop core */
811 		val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
812 		val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
813 		writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
814 
815 		/* Enable power block headswitch and wait for it to stabilize */
816 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
817 		val |= QDSS_BHS_ON | QDSS_LDO_BYP;
818 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
819 		val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
820 		udelay(1);
821 		/*
822 		 * Turn on memories. L2 banks should be done individually
823 		 * to minimize inrush current.
824 		 */
825 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
826 		val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
827 			Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
828 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
829 		val |= Q6SS_L2DATA_SLP_NRET_N_2;
830 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
831 		val |= Q6SS_L2DATA_SLP_NRET_N_1;
832 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
833 		val |= Q6SS_L2DATA_SLP_NRET_N_0;
834 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
835 	}
836 	/* Remove IO clamp */
837 	val &= ~Q6SS_CLAMP_IO;
838 	writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
839 
840 	/* Bring core out of reset */
841 	val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
842 	val &= ~Q6SS_CORE_ARES;
843 	writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
844 
845 	/* Turn on core clock */
846 	val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
847 	val |= Q6SS_CLK_ENABLE;
848 	writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
849 
850 	/* Start core execution */
851 	val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
852 	val &= ~Q6SS_STOP_CORE;
853 	writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
854 
855 pbl_wait:
856 	/* Wait for PBL status */
857 	ret = q6v5_rmb_pbl_wait(qproc, 1000);
858 	if (ret == -ETIMEDOUT) {
859 		dev_err(qproc->dev, "PBL boot timed out\n");
860 	} else if (ret != RMB_PBL_SUCCESS) {
861 		dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
862 		ret = -EINVAL;
863 	} else {
864 		ret = 0;
865 	}
866 
867 	return ret;
868 }
869 
q6v5proc_enable_qchannel(struct q6v5 * qproc,struct regmap * map,u32 offset)870 static int q6v5proc_enable_qchannel(struct q6v5 *qproc, struct regmap *map, u32 offset)
871 {
872 	unsigned int val;
873 	int ret;
874 
875 	if (!qproc->has_qaccept_regs)
876 		return 0;
877 
878 	if (qproc->has_ext_cntl_regs) {
879 		regmap_write(qproc->conn_map, qproc->rscc_disable, 0);
880 		regmap_write(qproc->conn_map, qproc->force_clk_on, 1);
881 
882 		ret = regmap_read_poll_timeout(qproc->halt_map, qproc->axim1_clk_off, val,
883 					       !val, 1, Q6SS_CBCR_TIMEOUT_US);
884 		if (ret) {
885 			dev_err(qproc->dev, "failed to enable axim1 clock\n");
886 			return -ETIMEDOUT;
887 		}
888 	}
889 
890 	regmap_write(map, offset + QACCEPT_REQ_REG, 1);
891 
892 	/* Wait for accept */
893 	ret = regmap_read_poll_timeout(map, offset + QACCEPT_ACCEPT_REG, val, val, 5,
894 				       QACCEPT_TIMEOUT_US);
895 	if (ret) {
896 		dev_err(qproc->dev, "qchannel enable failed\n");
897 		return -ETIMEDOUT;
898 	}
899 
900 	return 0;
901 }
902 
q6v5proc_disable_qchannel(struct q6v5 * qproc,struct regmap * map,u32 offset)903 static void q6v5proc_disable_qchannel(struct q6v5 *qproc, struct regmap *map, u32 offset)
904 {
905 	int ret;
906 	unsigned int val, retry;
907 	unsigned int nretry = 10;
908 	bool takedown_complete = false;
909 
910 	if (!qproc->has_qaccept_regs)
911 		return;
912 
913 	while (!takedown_complete && nretry) {
914 		nretry--;
915 
916 		/* Wait for active transactions to complete */
917 		regmap_read_poll_timeout(map, offset + QACCEPT_ACTIVE_REG, val, !val, 5,
918 					 QACCEPT_TIMEOUT_US);
919 
920 		/* Request Q-channel transaction takedown */
921 		regmap_write(map, offset + QACCEPT_REQ_REG, 0);
922 
923 		/*
924 		 * If the request is denied, reset the Q-channel takedown request,
925 		 * wait for active transactions to complete and retry takedown.
926 		 */
927 		retry = 10;
928 		while (retry) {
929 			usleep_range(5, 10);
930 			retry--;
931 			ret = regmap_read(map, offset + QACCEPT_DENY_REG, &val);
932 			if (!ret && val) {
933 				regmap_write(map, offset + QACCEPT_REQ_REG, 1);
934 				break;
935 			}
936 
937 			ret = regmap_read(map, offset + QACCEPT_ACCEPT_REG, &val);
938 			if (!ret && !val) {
939 				takedown_complete = true;
940 				break;
941 			}
942 		}
943 
944 		if (!retry)
945 			break;
946 	}
947 
948 	/* Rely on mss_restart to clear out pending transactions on takedown failure */
949 	if (!takedown_complete)
950 		dev_err(qproc->dev, "qchannel takedown failed\n");
951 }
952 
q6v5proc_halt_axi_port(struct q6v5 * qproc,struct regmap * halt_map,u32 offset)953 static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
954 				   struct regmap *halt_map,
955 				   u32 offset)
956 {
957 	unsigned int val;
958 	int ret;
959 
960 	/* Check if we're already idle */
961 	ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
962 	if (!ret && val)
963 		return;
964 
965 	/* Assert halt request */
966 	regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
967 
968 	/* Wait for halt */
969 	regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val,
970 				 val, 1000, HALT_ACK_TIMEOUT_US);
971 
972 	ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
973 	if (ret || !val)
974 		dev_err(qproc->dev, "port failed halt\n");
975 
976 	/* Clear halt request (port will remain halted until reset) */
977 	regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
978 }
979 
q6v5_mpss_init_image(struct q6v5 * qproc,const struct firmware * fw,const char * fw_name)980 static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw,
981 				const char *fw_name)
982 {
983 	unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
984 	dma_addr_t phys;
985 	void *metadata;
986 	u64 mdata_perm;
987 	int xferop_ret;
988 	size_t size;
989 	void *ptr;
990 	int ret;
991 
992 	metadata = qcom_mdt_read_metadata(fw, &size, fw_name, qproc->dev);
993 	if (IS_ERR(metadata))
994 		return PTR_ERR(metadata);
995 
996 	if (qproc->mdata_phys) {
997 		if (size > qproc->mdata_size) {
998 			ret = -EINVAL;
999 			dev_err(qproc->dev, "metadata size outside memory range\n");
1000 			goto free_metadata;
1001 		}
1002 
1003 		phys = qproc->mdata_phys;
1004 		ptr = memremap(qproc->mdata_phys, size, MEMREMAP_WC);
1005 		if (!ptr) {
1006 			ret = -EBUSY;
1007 			dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
1008 				&qproc->mdata_phys, size);
1009 			goto free_metadata;
1010 		}
1011 	} else {
1012 		ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
1013 		if (!ptr) {
1014 			ret = -ENOMEM;
1015 			dev_err(qproc->dev, "failed to allocate mdt buffer\n");
1016 			goto free_metadata;
1017 		}
1018 	}
1019 
1020 	memcpy(ptr, metadata, size);
1021 
1022 	if (qproc->mdata_phys)
1023 		memunmap(ptr);
1024 
1025 	/* Hypervisor mapping to access metadata by modem */
1026 	mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
1027 	ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, true,
1028 				      phys, size);
1029 	if (ret) {
1030 		dev_err(qproc->dev,
1031 			"assigning Q6 access to metadata failed: %d\n", ret);
1032 		ret = -EAGAIN;
1033 		goto free_dma_attrs;
1034 	}
1035 
1036 	writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
1037 	writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
1038 
1039 	ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
1040 	if (ret == -ETIMEDOUT)
1041 		dev_err(qproc->dev, "MPSS header authentication timed out\n");
1042 	else if (ret < 0)
1043 		dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
1044 
1045 	/* Metadata authentication done, remove modem access */
1046 	xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, false,
1047 					     phys, size);
1048 	if (xferop_ret)
1049 		dev_warn(qproc->dev,
1050 			 "mdt buffer not reclaimed system may become unstable\n");
1051 
1052 free_dma_attrs:
1053 	if (!qproc->mdata_phys)
1054 		dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs);
1055 free_metadata:
1056 	kfree(metadata);
1057 
1058 	return ret < 0 ? ret : 0;
1059 }
1060 
q6v5_phdr_valid(const struct elf32_phdr * phdr)1061 static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
1062 {
1063 	if (phdr->p_type != PT_LOAD)
1064 		return false;
1065 
1066 	if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
1067 		return false;
1068 
1069 	if (!phdr->p_memsz)
1070 		return false;
1071 
1072 	return true;
1073 }
1074 
q6v5_mba_load(struct q6v5 * qproc)1075 static int q6v5_mba_load(struct q6v5 *qproc)
1076 {
1077 	int ret;
1078 	int xfermemop_ret;
1079 	bool mba_load_err = false;
1080 
1081 	ret = qcom_q6v5_prepare(&qproc->q6v5);
1082 	if (ret)
1083 		return ret;
1084 
1085 	ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1086 	if (ret < 0) {
1087 		dev_err(qproc->dev, "failed to enable proxy power domains\n");
1088 		goto disable_irqs;
1089 	}
1090 
1091 	ret = q6v5_regulator_enable(qproc, qproc->fallback_proxy_regs,
1092 				    qproc->fallback_proxy_reg_count);
1093 	if (ret) {
1094 		dev_err(qproc->dev, "failed to enable fallback proxy supplies\n");
1095 		goto disable_proxy_pds;
1096 	}
1097 
1098 	ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
1099 				    qproc->proxy_reg_count);
1100 	if (ret) {
1101 		dev_err(qproc->dev, "failed to enable proxy supplies\n");
1102 		goto disable_fallback_proxy_reg;
1103 	}
1104 
1105 	ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
1106 			      qproc->proxy_clk_count);
1107 	if (ret) {
1108 		dev_err(qproc->dev, "failed to enable proxy clocks\n");
1109 		goto disable_proxy_reg;
1110 	}
1111 
1112 	ret = q6v5_regulator_enable(qproc, qproc->active_regs,
1113 				    qproc->active_reg_count);
1114 	if (ret) {
1115 		dev_err(qproc->dev, "failed to enable supplies\n");
1116 		goto disable_proxy_clk;
1117 	}
1118 
1119 	ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
1120 			      qproc->reset_clk_count);
1121 	if (ret) {
1122 		dev_err(qproc->dev, "failed to enable reset clocks\n");
1123 		goto disable_vdd;
1124 	}
1125 
1126 	ret = q6v5_reset_deassert(qproc);
1127 	if (ret) {
1128 		dev_err(qproc->dev, "failed to deassert mss restart\n");
1129 		goto disable_reset_clks;
1130 	}
1131 
1132 	ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
1133 			      qproc->active_clk_count);
1134 	if (ret) {
1135 		dev_err(qproc->dev, "failed to enable clocks\n");
1136 		goto assert_reset;
1137 	}
1138 
1139 	ret = q6v5proc_enable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi);
1140 	if (ret) {
1141 		dev_err(qproc->dev, "failed to enable axi bridge\n");
1142 		goto disable_active_clks;
1143 	}
1144 
1145 	/*
1146 	 * Some versions of the MBA firmware will upon boot wipe the MPSS region as well, so provide
1147 	 * the Q6 access to this region.
1148 	 */
1149 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
1150 				      qproc->mpss_phys, qproc->mpss_size);
1151 	if (ret) {
1152 		dev_err(qproc->dev, "assigning Q6 access to mpss memory failed: %d\n", ret);
1153 		goto disable_active_clks;
1154 	}
1155 
1156 	/* Assign MBA image access in DDR to q6 */
1157 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, true,
1158 				      qproc->mba_phys, qproc->mba_size);
1159 	if (ret) {
1160 		dev_err(qproc->dev,
1161 			"assigning Q6 access to mba memory failed: %d\n", ret);
1162 		goto disable_active_clks;
1163 	}
1164 
1165 	writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
1166 	if (qproc->dp_size) {
1167 		writel(qproc->mba_phys + SZ_1M, qproc->rmb_base + RMB_PMI_CODE_START_REG);
1168 		writel(qproc->dp_size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1169 	}
1170 
1171 	ret = q6v5proc_reset(qproc);
1172 	if (ret)
1173 		goto reclaim_mba;
1174 
1175 	if (qproc->has_mba_logs)
1176 		qcom_pil_info_store("mba", qproc->mba_phys, MBA_LOG_SIZE);
1177 
1178 	ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
1179 	if (ret == -ETIMEDOUT) {
1180 		dev_err(qproc->dev, "MBA boot timed out\n");
1181 		goto halt_axi_ports;
1182 	} else if (ret != RMB_MBA_XPU_UNLOCKED &&
1183 		   ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
1184 		dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
1185 		ret = -EINVAL;
1186 		goto halt_axi_ports;
1187 	}
1188 
1189 	qproc->dump_mba_loaded = true;
1190 	return 0;
1191 
1192 halt_axi_ports:
1193 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
1194 	if (qproc->has_vq6)
1195 		q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_vq6);
1196 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
1197 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
1198 	q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_mdm);
1199 	q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_cx);
1200 	q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi);
1201 	mba_load_err = true;
1202 reclaim_mba:
1203 	xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
1204 						false, qproc->mba_phys,
1205 						qproc->mba_size);
1206 	if (xfermemop_ret) {
1207 		dev_err(qproc->dev,
1208 			"Failed to reclaim mba buffer, system may become unstable\n");
1209 	} else if (mba_load_err) {
1210 		q6v5_dump_mba_logs(qproc);
1211 	}
1212 
1213 disable_active_clks:
1214 	q6v5_clk_disable(qproc->dev, qproc->active_clks,
1215 			 qproc->active_clk_count);
1216 assert_reset:
1217 	q6v5_reset_assert(qproc);
1218 disable_reset_clks:
1219 	q6v5_clk_disable(qproc->dev, qproc->reset_clks,
1220 			 qproc->reset_clk_count);
1221 disable_vdd:
1222 	q6v5_regulator_disable(qproc, qproc->active_regs,
1223 			       qproc->active_reg_count);
1224 disable_proxy_clk:
1225 	q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1226 			 qproc->proxy_clk_count);
1227 disable_proxy_reg:
1228 	q6v5_regulator_disable(qproc, qproc->proxy_regs,
1229 			       qproc->proxy_reg_count);
1230 disable_fallback_proxy_reg:
1231 	q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
1232 			       qproc->fallback_proxy_reg_count);
1233 disable_proxy_pds:
1234 	q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1235 disable_irqs:
1236 	qcom_q6v5_unprepare(&qproc->q6v5);
1237 
1238 	return ret;
1239 }
1240 
q6v5_mba_reclaim(struct q6v5 * qproc)1241 static void q6v5_mba_reclaim(struct q6v5 *qproc)
1242 {
1243 	int ret;
1244 	u32 val;
1245 
1246 	qproc->dump_mba_loaded = false;
1247 	qproc->dp_size = 0;
1248 
1249 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
1250 	if (qproc->has_vq6)
1251 		q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_vq6);
1252 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
1253 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
1254 	if (qproc->version == MSS_MSM8996) {
1255 		/*
1256 		 * To avoid high MX current during LPASS/MSS restart.
1257 		 */
1258 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
1259 		val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
1260 			QDSP6v56_CLAMP_QMC_MEM;
1261 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
1262 	}
1263 
1264 	if (qproc->has_ext_cntl_regs) {
1265 		regmap_write(qproc->conn_map, qproc->rscc_disable, 1);
1266 
1267 		ret = regmap_read_poll_timeout(qproc->halt_map, qproc->axim1_clk_off, val,
1268 					       !val, 1, Q6SS_CBCR_TIMEOUT_US);
1269 		if (ret)
1270 			dev_err(qproc->dev, "failed to enable axim1 clock\n");
1271 
1272 		ret = regmap_read_poll_timeout(qproc->halt_map, qproc->crypto_clk_off, val,
1273 					       !val, 1, Q6SS_CBCR_TIMEOUT_US);
1274 		if (ret)
1275 			dev_err(qproc->dev, "failed to enable crypto clock\n");
1276 	}
1277 
1278 	q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_mdm);
1279 	q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_cx);
1280 	q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi);
1281 
1282 	q6v5_reset_assert(qproc);
1283 
1284 	q6v5_clk_disable(qproc->dev, qproc->reset_clks,
1285 			 qproc->reset_clk_count);
1286 	q6v5_clk_disable(qproc->dev, qproc->active_clks,
1287 			 qproc->active_clk_count);
1288 	q6v5_regulator_disable(qproc, qproc->active_regs,
1289 			       qproc->active_reg_count);
1290 
1291 	/* In case of failure or coredump scenario where reclaiming MBA memory
1292 	 * could not happen reclaim it here.
1293 	 */
1294 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false,
1295 				      qproc->mba_phys,
1296 				      qproc->mba_size);
1297 	WARN_ON(ret);
1298 
1299 	ret = qcom_q6v5_unprepare(&qproc->q6v5);
1300 	if (ret) {
1301 		q6v5_pds_disable(qproc, qproc->proxy_pds,
1302 				 qproc->proxy_pd_count);
1303 		q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1304 				 qproc->proxy_clk_count);
1305 		q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
1306 				       qproc->fallback_proxy_reg_count);
1307 		q6v5_regulator_disable(qproc, qproc->proxy_regs,
1308 				       qproc->proxy_reg_count);
1309 	}
1310 }
1311 
q6v5_reload_mba(struct rproc * rproc)1312 static int q6v5_reload_mba(struct rproc *rproc)
1313 {
1314 	struct q6v5 *qproc = rproc->priv;
1315 	const struct firmware *fw;
1316 	int ret;
1317 
1318 	ret = request_firmware(&fw, rproc->firmware, qproc->dev);
1319 	if (ret < 0)
1320 		return ret;
1321 
1322 	q6v5_load(rproc, fw);
1323 	ret = q6v5_mba_load(qproc);
1324 	release_firmware(fw);
1325 
1326 	return ret;
1327 }
1328 
q6v5_mpss_load(struct q6v5 * qproc)1329 static int q6v5_mpss_load(struct q6v5 *qproc)
1330 {
1331 	const struct elf32_phdr *phdrs;
1332 	const struct elf32_phdr *phdr;
1333 	const struct firmware *seg_fw;
1334 	const struct firmware *fw;
1335 	struct elf32_hdr *ehdr;
1336 	phys_addr_t mpss_reloc;
1337 	phys_addr_t boot_addr;
1338 	phys_addr_t min_addr = PHYS_ADDR_MAX;
1339 	phys_addr_t max_addr = 0;
1340 	u32 code_length;
1341 	bool relocate = false;
1342 	char *fw_name;
1343 	size_t fw_name_len;
1344 	ssize_t offset;
1345 	size_t size = 0;
1346 	void *ptr;
1347 	int ret;
1348 	int i;
1349 
1350 	fw_name_len = strlen(qproc->hexagon_mdt_image);
1351 	if (fw_name_len <= 4)
1352 		return -EINVAL;
1353 
1354 	fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL);
1355 	if (!fw_name)
1356 		return -ENOMEM;
1357 
1358 	ret = request_firmware(&fw, fw_name, qproc->dev);
1359 	if (ret < 0) {
1360 		dev_err(qproc->dev, "unable to load %s\n", fw_name);
1361 		goto out;
1362 	}
1363 
1364 	/* Initialize the RMB validator */
1365 	writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1366 
1367 	ret = q6v5_mpss_init_image(qproc, fw, qproc->hexagon_mdt_image);
1368 	if (ret)
1369 		goto release_firmware;
1370 
1371 	ehdr = (struct elf32_hdr *)fw->data;
1372 	phdrs = (struct elf32_phdr *)(ehdr + 1);
1373 
1374 	for (i = 0; i < ehdr->e_phnum; i++) {
1375 		phdr = &phdrs[i];
1376 
1377 		if (!q6v5_phdr_valid(phdr))
1378 			continue;
1379 
1380 		if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
1381 			relocate = true;
1382 
1383 		if (phdr->p_paddr < min_addr)
1384 			min_addr = phdr->p_paddr;
1385 
1386 		if (phdr->p_paddr + phdr->p_memsz > max_addr)
1387 			max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
1388 	}
1389 
1390 	if (qproc->version == MSS_MSM8953) {
1391 		ret = qcom_scm_pas_mem_setup(MPSS_PAS_ID, qproc->mpss_phys, qproc->mpss_size);
1392 		if (ret) {
1393 			dev_err(qproc->dev,
1394 				"setting up mpss memory failed: %d\n", ret);
1395 			goto release_firmware;
1396 		}
1397 	}
1398 
1399 	/*
1400 	 * In case of a modem subsystem restart on secure devices, the modem
1401 	 * memory can be reclaimed only after MBA is loaded.
1402 	 */
1403 	q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false,
1404 				qproc->mpss_phys, qproc->mpss_size);
1405 
1406 	/* Share ownership between Linux and MSS, during segment loading */
1407 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, true,
1408 				      qproc->mpss_phys, qproc->mpss_size);
1409 	if (ret) {
1410 		dev_err(qproc->dev,
1411 			"assigning Q6 access to mpss memory failed: %d\n", ret);
1412 		ret = -EAGAIN;
1413 		goto release_firmware;
1414 	}
1415 
1416 	mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
1417 	qproc->mpss_reloc = mpss_reloc;
1418 	/* Load firmware segments */
1419 	for (i = 0; i < ehdr->e_phnum; i++) {
1420 		phdr = &phdrs[i];
1421 
1422 		if (!q6v5_phdr_valid(phdr))
1423 			continue;
1424 
1425 		offset = phdr->p_paddr - mpss_reloc;
1426 		if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
1427 			dev_err(qproc->dev, "segment outside memory range\n");
1428 			ret = -EINVAL;
1429 			goto release_firmware;
1430 		}
1431 
1432 		if (phdr->p_filesz > phdr->p_memsz) {
1433 			dev_err(qproc->dev,
1434 				"refusing to load segment %d with p_filesz > p_memsz\n",
1435 				i);
1436 			ret = -EINVAL;
1437 			goto release_firmware;
1438 		}
1439 
1440 		ptr = memremap(qproc->mpss_phys + offset, phdr->p_memsz, MEMREMAP_WC);
1441 		if (!ptr) {
1442 			dev_err(qproc->dev,
1443 				"unable to map memory region: %pa+%zx-%x\n",
1444 				&qproc->mpss_phys, offset, phdr->p_memsz);
1445 			goto release_firmware;
1446 		}
1447 
1448 		if (phdr->p_filesz && phdr->p_offset < fw->size) {
1449 			/* Firmware is large enough to be non-split */
1450 			if (phdr->p_offset + phdr->p_filesz > fw->size) {
1451 				dev_err(qproc->dev,
1452 					"failed to load segment %d from truncated file %s\n",
1453 					i, fw_name);
1454 				ret = -EINVAL;
1455 				memunmap(ptr);
1456 				goto release_firmware;
1457 			}
1458 
1459 			memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz);
1460 		} else if (phdr->p_filesz) {
1461 			/* Replace "xxx.xxx" with "xxx.bxx" */
1462 			sprintf(fw_name + fw_name_len - 3, "b%02d", i);
1463 			ret = request_firmware_into_buf(&seg_fw, fw_name, qproc->dev,
1464 							ptr, phdr->p_filesz);
1465 			if (ret) {
1466 				dev_err(qproc->dev, "failed to load %s\n", fw_name);
1467 				memunmap(ptr);
1468 				goto release_firmware;
1469 			}
1470 
1471 			if (seg_fw->size != phdr->p_filesz) {
1472 				dev_err(qproc->dev,
1473 					"failed to load segment %d from truncated file %s\n",
1474 					i, fw_name);
1475 				ret = -EINVAL;
1476 				release_firmware(seg_fw);
1477 				memunmap(ptr);
1478 				goto release_firmware;
1479 			}
1480 
1481 			release_firmware(seg_fw);
1482 		}
1483 
1484 		if (phdr->p_memsz > phdr->p_filesz) {
1485 			memset(ptr + phdr->p_filesz, 0,
1486 			       phdr->p_memsz - phdr->p_filesz);
1487 		}
1488 		memunmap(ptr);
1489 		size += phdr->p_memsz;
1490 
1491 		code_length = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1492 		if (!code_length) {
1493 			boot_addr = relocate ? qproc->mpss_phys : min_addr;
1494 			writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
1495 			writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
1496 		}
1497 		writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1498 
1499 		ret = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
1500 		if (ret < 0) {
1501 			dev_err(qproc->dev, "MPSS authentication failed: %d\n",
1502 				ret);
1503 			goto release_firmware;
1504 		}
1505 	}
1506 
1507 	/* Transfer ownership of modem ddr region to q6 */
1508 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
1509 				      qproc->mpss_phys, qproc->mpss_size);
1510 	if (ret) {
1511 		dev_err(qproc->dev,
1512 			"assigning Q6 access to mpss memory failed: %d\n", ret);
1513 		ret = -EAGAIN;
1514 		goto release_firmware;
1515 	}
1516 
1517 	ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
1518 	if (ret == -ETIMEDOUT)
1519 		dev_err(qproc->dev, "MPSS authentication timed out\n");
1520 	else if (ret < 0)
1521 		dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
1522 
1523 	qcom_pil_info_store("modem", qproc->mpss_phys, qproc->mpss_size);
1524 
1525 release_firmware:
1526 	release_firmware(fw);
1527 out:
1528 	kfree(fw_name);
1529 
1530 	return ret < 0 ? ret : 0;
1531 }
1532 
qcom_q6v5_dump_segment(struct rproc * rproc,struct rproc_dump_segment * segment,void * dest,size_t cp_offset,size_t size)1533 static void qcom_q6v5_dump_segment(struct rproc *rproc,
1534 				   struct rproc_dump_segment *segment,
1535 				   void *dest, size_t cp_offset, size_t size)
1536 {
1537 	int ret = 0;
1538 	struct q6v5 *qproc = rproc->priv;
1539 	int offset = segment->da - qproc->mpss_reloc;
1540 	void *ptr = NULL;
1541 
1542 	/* Unlock mba before copying segments */
1543 	if (!qproc->dump_mba_loaded) {
1544 		ret = q6v5_reload_mba(rproc);
1545 		if (!ret) {
1546 			/* Reset ownership back to Linux to copy segments */
1547 			ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
1548 						      true, false,
1549 						      qproc->mpss_phys,
1550 						      qproc->mpss_size);
1551 		}
1552 	}
1553 
1554 	if (!ret)
1555 		ptr = memremap(qproc->mpss_phys + offset + cp_offset, size, MEMREMAP_WC);
1556 
1557 	if (ptr) {
1558 		memcpy(dest, ptr, size);
1559 		memunmap(ptr);
1560 	} else {
1561 		memset(dest, 0xff, size);
1562 	}
1563 
1564 	qproc->current_dump_size += size;
1565 
1566 	/* Reclaim mba after copying segments */
1567 	if (qproc->current_dump_size == qproc->total_dump_size) {
1568 		if (qproc->dump_mba_loaded) {
1569 			/* Try to reset ownership back to Q6 */
1570 			q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
1571 						false, true,
1572 						qproc->mpss_phys,
1573 						qproc->mpss_size);
1574 			q6v5_mba_reclaim(qproc);
1575 		}
1576 	}
1577 }
1578 
q6v5_start(struct rproc * rproc)1579 static int q6v5_start(struct rproc *rproc)
1580 {
1581 	struct q6v5 *qproc = rproc->priv;
1582 	int xfermemop_ret;
1583 	int ret;
1584 
1585 	ret = q6v5_mba_load(qproc);
1586 	if (ret)
1587 		return ret;
1588 
1589 	dev_info(qproc->dev, "MBA booted with%s debug policy, loading mpss\n",
1590 		 qproc->dp_size ? "" : "out");
1591 
1592 	ret = q6v5_mpss_load(qproc);
1593 	if (ret)
1594 		goto reclaim_mpss;
1595 
1596 	ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000));
1597 	if (ret == -ETIMEDOUT) {
1598 		dev_err(qproc->dev, "start timed out\n");
1599 		goto reclaim_mpss;
1600 	}
1601 
1602 	xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
1603 						false, qproc->mba_phys,
1604 						qproc->mba_size);
1605 	if (xfermemop_ret)
1606 		dev_err(qproc->dev,
1607 			"Failed to reclaim mba buffer system may become unstable\n");
1608 
1609 	/* Reset Dump Segment Mask */
1610 	qproc->current_dump_size = 0;
1611 
1612 	return 0;
1613 
1614 reclaim_mpss:
1615 	q6v5_mba_reclaim(qproc);
1616 	q6v5_dump_mba_logs(qproc);
1617 
1618 	return ret;
1619 }
1620 
q6v5_stop(struct rproc * rproc)1621 static int q6v5_stop(struct rproc *rproc)
1622 {
1623 	struct q6v5 *qproc = rproc->priv;
1624 	int ret;
1625 
1626 	ret = qcom_q6v5_request_stop(&qproc->q6v5, qproc->sysmon);
1627 	if (ret == -ETIMEDOUT)
1628 		dev_err(qproc->dev, "timed out on wait\n");
1629 
1630 	q6v5_mba_reclaim(qproc);
1631 
1632 	return 0;
1633 }
1634 
qcom_q6v5_register_dump_segments(struct rproc * rproc,const struct firmware * mba_fw)1635 static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
1636 					    const struct firmware *mba_fw)
1637 {
1638 	const struct firmware *fw;
1639 	const struct elf32_phdr *phdrs;
1640 	const struct elf32_phdr *phdr;
1641 	const struct elf32_hdr *ehdr;
1642 	struct q6v5 *qproc = rproc->priv;
1643 	unsigned long i;
1644 	int ret;
1645 
1646 	ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev);
1647 	if (ret < 0) {
1648 		dev_err(qproc->dev, "unable to load %s\n",
1649 			qproc->hexagon_mdt_image);
1650 		return ret;
1651 	}
1652 
1653 	rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
1654 
1655 	ehdr = (struct elf32_hdr *)fw->data;
1656 	phdrs = (struct elf32_phdr *)(ehdr + 1);
1657 	qproc->total_dump_size = 0;
1658 
1659 	for (i = 0; i < ehdr->e_phnum; i++) {
1660 		phdr = &phdrs[i];
1661 
1662 		if (!q6v5_phdr_valid(phdr))
1663 			continue;
1664 
1665 		ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr,
1666 							phdr->p_memsz,
1667 							qcom_q6v5_dump_segment,
1668 							NULL);
1669 		if (ret)
1670 			break;
1671 
1672 		qproc->total_dump_size += phdr->p_memsz;
1673 	}
1674 
1675 	release_firmware(fw);
1676 	return ret;
1677 }
1678 
q6v5_panic(struct rproc * rproc)1679 static unsigned long q6v5_panic(struct rproc *rproc)
1680 {
1681 	struct q6v5 *qproc = rproc->priv;
1682 
1683 	return qcom_q6v5_panic(&qproc->q6v5);
1684 }
1685 
1686 static const struct rproc_ops q6v5_ops = {
1687 	.start = q6v5_start,
1688 	.stop = q6v5_stop,
1689 	.parse_fw = qcom_q6v5_register_dump_segments,
1690 	.load = q6v5_load,
1691 	.panic = q6v5_panic,
1692 };
1693 
qcom_msa_handover(struct qcom_q6v5 * q6v5)1694 static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
1695 {
1696 	struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5);
1697 
1698 	q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1699 			 qproc->proxy_clk_count);
1700 	q6v5_regulator_disable(qproc, qproc->proxy_regs,
1701 			       qproc->proxy_reg_count);
1702 	q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
1703 			       qproc->fallback_proxy_reg_count);
1704 	q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1705 }
1706 
q6v5_init_mem(struct q6v5 * qproc,struct platform_device * pdev)1707 static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
1708 {
1709 	struct of_phandle_args args;
1710 	int halt_cell_cnt = 3;
1711 	int ret;
1712 
1713 	qproc->reg_base = devm_platform_ioremap_resource_byname(pdev, "qdsp6");
1714 	if (IS_ERR(qproc->reg_base))
1715 		return PTR_ERR(qproc->reg_base);
1716 
1717 	qproc->rmb_base = devm_platform_ioremap_resource_byname(pdev, "rmb");
1718 	if (IS_ERR(qproc->rmb_base))
1719 		return PTR_ERR(qproc->rmb_base);
1720 
1721 	if (qproc->has_vq6)
1722 		halt_cell_cnt++;
1723 
1724 	ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1725 					       "qcom,halt-regs", halt_cell_cnt, 0, &args);
1726 	if (ret < 0) {
1727 		dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
1728 		return -EINVAL;
1729 	}
1730 
1731 	qproc->halt_map = syscon_node_to_regmap(args.np);
1732 	of_node_put(args.np);
1733 	if (IS_ERR(qproc->halt_map))
1734 		return PTR_ERR(qproc->halt_map);
1735 
1736 	qproc->halt_q6 = args.args[0];
1737 	qproc->halt_modem = args.args[1];
1738 	qproc->halt_nc = args.args[2];
1739 
1740 	if (qproc->has_vq6)
1741 		qproc->halt_vq6 = args.args[3];
1742 
1743 	if (qproc->has_qaccept_regs) {
1744 		ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1745 						       "qcom,qaccept-regs",
1746 						       3, 0, &args);
1747 		if (ret < 0) {
1748 			dev_err(&pdev->dev, "failed to parse qaccept-regs\n");
1749 			return -EINVAL;
1750 		}
1751 
1752 		qproc->qaccept_mdm = args.args[0];
1753 		qproc->qaccept_cx = args.args[1];
1754 		qproc->qaccept_axi = args.args[2];
1755 	}
1756 
1757 	if (qproc->has_ext_cntl_regs) {
1758 		ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1759 						       "qcom,ext-regs",
1760 						       2, 0, &args);
1761 		if (ret < 0) {
1762 			dev_err(&pdev->dev, "failed to parse ext-regs index 0\n");
1763 			return -EINVAL;
1764 		}
1765 
1766 		qproc->conn_map = syscon_node_to_regmap(args.np);
1767 		of_node_put(args.np);
1768 		if (IS_ERR(qproc->conn_map))
1769 			return PTR_ERR(qproc->conn_map);
1770 
1771 		qproc->force_clk_on = args.args[0];
1772 		qproc->rscc_disable = args.args[1];
1773 
1774 		ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1775 						       "qcom,ext-regs",
1776 						       2, 1, &args);
1777 		if (ret < 0) {
1778 			dev_err(&pdev->dev, "failed to parse ext-regs index 1\n");
1779 			return -EINVAL;
1780 		}
1781 
1782 		qproc->axim1_clk_off = args.args[0];
1783 		qproc->crypto_clk_off = args.args[1];
1784 	}
1785 
1786 	if (qproc->has_spare_reg) {
1787 		ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1788 						       "qcom,spare-regs",
1789 						       1, 0, &args);
1790 		if (ret < 0) {
1791 			dev_err(&pdev->dev, "failed to parse spare-regs\n");
1792 			return -EINVAL;
1793 		}
1794 
1795 		qproc->conn_map = syscon_node_to_regmap(args.np);
1796 		of_node_put(args.np);
1797 		if (IS_ERR(qproc->conn_map))
1798 			return PTR_ERR(qproc->conn_map);
1799 
1800 		qproc->conn_box = args.args[0];
1801 	}
1802 
1803 	return 0;
1804 }
1805 
q6v5_init_clocks(struct device * dev,struct clk ** clks,char ** clk_names)1806 static int q6v5_init_clocks(struct device *dev, struct clk **clks,
1807 		char **clk_names)
1808 {
1809 	int i;
1810 
1811 	if (!clk_names)
1812 		return 0;
1813 
1814 	for (i = 0; clk_names[i]; i++) {
1815 		clks[i] = devm_clk_get(dev, clk_names[i]);
1816 		if (IS_ERR(clks[i])) {
1817 			int rc = PTR_ERR(clks[i]);
1818 
1819 			if (rc != -EPROBE_DEFER)
1820 				dev_err(dev, "Failed to get %s clock\n",
1821 					clk_names[i]);
1822 			return rc;
1823 		}
1824 	}
1825 
1826 	return i;
1827 }
1828 
q6v5_pds_attach(struct device * dev,struct device ** devs,char ** pd_names)1829 static int q6v5_pds_attach(struct device *dev, struct device **devs,
1830 			   char **pd_names)
1831 {
1832 	size_t num_pds = 0;
1833 	int ret;
1834 	int i;
1835 
1836 	if (!pd_names)
1837 		return 0;
1838 
1839 	while (pd_names[num_pds])
1840 		num_pds++;
1841 
1842 	for (i = 0; i < num_pds; i++) {
1843 		devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
1844 		if (IS_ERR_OR_NULL(devs[i])) {
1845 			ret = PTR_ERR(devs[i]) ? : -ENODATA;
1846 			goto unroll_attach;
1847 		}
1848 	}
1849 
1850 	return num_pds;
1851 
1852 unroll_attach:
1853 	for (i--; i >= 0; i--)
1854 		dev_pm_domain_detach(devs[i], false);
1855 
1856 	return ret;
1857 }
1858 
q6v5_pds_detach(struct q6v5 * qproc,struct device ** pds,size_t pd_count)1859 static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds,
1860 			    size_t pd_count)
1861 {
1862 	int i;
1863 
1864 	for (i = 0; i < pd_count; i++)
1865 		dev_pm_domain_detach(pds[i], false);
1866 }
1867 
q6v5_init_reset(struct q6v5 * qproc)1868 static int q6v5_init_reset(struct q6v5 *qproc)
1869 {
1870 	qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
1871 							      "mss_restart");
1872 	if (IS_ERR(qproc->mss_restart)) {
1873 		dev_err(qproc->dev, "failed to acquire mss restart\n");
1874 		return PTR_ERR(qproc->mss_restart);
1875 	}
1876 
1877 	if (qproc->has_alt_reset || qproc->has_spare_reg || qproc->has_ext_cntl_regs) {
1878 		qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
1879 								    "pdc_reset");
1880 		if (IS_ERR(qproc->pdc_reset)) {
1881 			dev_err(qproc->dev, "failed to acquire pdc reset\n");
1882 			return PTR_ERR(qproc->pdc_reset);
1883 		}
1884 	}
1885 
1886 	return 0;
1887 }
1888 
q6v5_alloc_memory_region(struct q6v5 * qproc)1889 static int q6v5_alloc_memory_region(struct q6v5 *qproc)
1890 {
1891 	struct device_node *child;
1892 	struct reserved_mem *rmem;
1893 	struct device_node *node;
1894 
1895 	/*
1896 	 * In the absence of mba/mpss sub-child, extract the mba and mpss
1897 	 * reserved memory regions from device's memory-region property.
1898 	 */
1899 	child = of_get_child_by_name(qproc->dev->of_node, "mba");
1900 	if (!child) {
1901 		node = of_parse_phandle(qproc->dev->of_node,
1902 					"memory-region", 0);
1903 	} else {
1904 		node = of_parse_phandle(child, "memory-region", 0);
1905 		of_node_put(child);
1906 	}
1907 
1908 	if (!node) {
1909 		dev_err(qproc->dev, "no mba memory-region specified\n");
1910 		return -EINVAL;
1911 	}
1912 
1913 	rmem = of_reserved_mem_lookup(node);
1914 	of_node_put(node);
1915 	if (!rmem) {
1916 		dev_err(qproc->dev, "unable to resolve mba region\n");
1917 		return -EINVAL;
1918 	}
1919 
1920 	qproc->mba_phys = rmem->base;
1921 	qproc->mba_size = rmem->size;
1922 
1923 	if (!child) {
1924 		node = of_parse_phandle(qproc->dev->of_node,
1925 					"memory-region", 1);
1926 	} else {
1927 		child = of_get_child_by_name(qproc->dev->of_node, "mpss");
1928 		node = of_parse_phandle(child, "memory-region", 0);
1929 		of_node_put(child);
1930 	}
1931 
1932 	if (!node) {
1933 		dev_err(qproc->dev, "no mpss memory-region specified\n");
1934 		return -EINVAL;
1935 	}
1936 
1937 	rmem = of_reserved_mem_lookup(node);
1938 	of_node_put(node);
1939 	if (!rmem) {
1940 		dev_err(qproc->dev, "unable to resolve mpss region\n");
1941 		return -EINVAL;
1942 	}
1943 
1944 	qproc->mpss_phys = qproc->mpss_reloc = rmem->base;
1945 	qproc->mpss_size = rmem->size;
1946 
1947 	if (!child) {
1948 		node = of_parse_phandle(qproc->dev->of_node, "memory-region", 2);
1949 	} else {
1950 		child = of_get_child_by_name(qproc->dev->of_node, "metadata");
1951 		node = of_parse_phandle(child, "memory-region", 0);
1952 		of_node_put(child);
1953 	}
1954 
1955 	if (!node)
1956 		return 0;
1957 
1958 	rmem = of_reserved_mem_lookup(node);
1959 	if (!rmem) {
1960 		dev_err(qproc->dev, "unable to resolve metadata region\n");
1961 		return -EINVAL;
1962 	}
1963 
1964 	qproc->mdata_phys = rmem->base;
1965 	qproc->mdata_size = rmem->size;
1966 
1967 	return 0;
1968 }
1969 
q6v5_probe(struct platform_device * pdev)1970 static int q6v5_probe(struct platform_device *pdev)
1971 {
1972 	const struct rproc_hexagon_res *desc;
1973 	struct device_node *node;
1974 	struct q6v5 *qproc;
1975 	struct rproc *rproc;
1976 	const char *mba_image;
1977 	int ret;
1978 
1979 	desc = of_device_get_match_data(&pdev->dev);
1980 	if (!desc)
1981 		return -EINVAL;
1982 
1983 	if (desc->need_mem_protection && !qcom_scm_is_available())
1984 		return -EPROBE_DEFER;
1985 
1986 	mba_image = desc->hexagon_mba_image;
1987 	ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1988 					    0, &mba_image);
1989 	if (ret < 0 && ret != -EINVAL) {
1990 		dev_err(&pdev->dev, "unable to read mba firmware-name\n");
1991 		return ret;
1992 	}
1993 
1994 	rproc = devm_rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
1995 				 mba_image, sizeof(*qproc));
1996 	if (!rproc) {
1997 		dev_err(&pdev->dev, "failed to allocate rproc\n");
1998 		return -ENOMEM;
1999 	}
2000 
2001 	rproc->auto_boot = false;
2002 	rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
2003 
2004 	qproc = rproc->priv;
2005 	qproc->dev = &pdev->dev;
2006 	qproc->rproc = rproc;
2007 	qproc->hexagon_mdt_image = "modem.mdt";
2008 	ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
2009 					    1, &qproc->hexagon_mdt_image);
2010 	if (ret < 0 && ret != -EINVAL) {
2011 		dev_err(&pdev->dev, "unable to read mpss firmware-name\n");
2012 		return ret;
2013 	}
2014 
2015 	platform_set_drvdata(pdev, qproc);
2016 
2017 	qproc->has_qaccept_regs = desc->has_qaccept_regs;
2018 	qproc->has_ext_cntl_regs = desc->has_ext_cntl_regs;
2019 	qproc->has_vq6 = desc->has_vq6;
2020 	qproc->has_spare_reg = desc->has_spare_reg;
2021 	ret = q6v5_init_mem(qproc, pdev);
2022 	if (ret)
2023 		return ret;
2024 
2025 	ret = q6v5_alloc_memory_region(qproc);
2026 	if (ret)
2027 		return ret;
2028 
2029 	ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
2030 			       desc->proxy_clk_names);
2031 	if (ret < 0) {
2032 		dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
2033 		return ret;
2034 	}
2035 	qproc->proxy_clk_count = ret;
2036 
2037 	ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks,
2038 			       desc->reset_clk_names);
2039 	if (ret < 0) {
2040 		dev_err(&pdev->dev, "Failed to get reset clocks.\n");
2041 		return ret;
2042 	}
2043 	qproc->reset_clk_count = ret;
2044 
2045 	ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
2046 			       desc->active_clk_names);
2047 	if (ret < 0) {
2048 		dev_err(&pdev->dev, "Failed to get active clocks.\n");
2049 		return ret;
2050 	}
2051 	qproc->active_clk_count = ret;
2052 
2053 	ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
2054 				  desc->proxy_supply);
2055 	if (ret < 0) {
2056 		dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
2057 		return ret;
2058 	}
2059 	qproc->proxy_reg_count = ret;
2060 
2061 	ret = q6v5_regulator_init(&pdev->dev,  qproc->active_regs,
2062 				  desc->active_supply);
2063 	if (ret < 0) {
2064 		dev_err(&pdev->dev, "Failed to get active regulators.\n");
2065 		return ret;
2066 	}
2067 	qproc->active_reg_count = ret;
2068 
2069 	ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds,
2070 			      desc->proxy_pd_names);
2071 	/* Fallback to regulators for old device trees */
2072 	if (ret == -ENODATA && desc->fallback_proxy_supply) {
2073 		ret = q6v5_regulator_init(&pdev->dev,
2074 					  qproc->fallback_proxy_regs,
2075 					  desc->fallback_proxy_supply);
2076 		if (ret < 0) {
2077 			dev_err(&pdev->dev, "Failed to get fallback proxy regulators.\n");
2078 			return ret;
2079 		}
2080 		qproc->fallback_proxy_reg_count = ret;
2081 	} else if (ret < 0) {
2082 		dev_err(&pdev->dev, "Failed to init power domains\n");
2083 		return ret;
2084 	} else {
2085 		qproc->proxy_pd_count = ret;
2086 	}
2087 
2088 	qproc->has_alt_reset = desc->has_alt_reset;
2089 	ret = q6v5_init_reset(qproc);
2090 	if (ret)
2091 		goto detach_proxy_pds;
2092 
2093 	qproc->version = desc->version;
2094 	qproc->need_mem_protection = desc->need_mem_protection;
2095 	qproc->has_mba_logs = desc->has_mba_logs;
2096 
2097 	ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM, "modem",
2098 			     qcom_msa_handover);
2099 	if (ret)
2100 		goto detach_proxy_pds;
2101 
2102 	qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
2103 	qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
2104 	qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss");
2105 	qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
2106 	qcom_add_pdm_subdev(rproc, &qproc->pdm_subdev);
2107 	qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
2108 	qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
2109 	if (IS_ERR(qproc->sysmon)) {
2110 		ret = PTR_ERR(qproc->sysmon);
2111 		goto remove_subdevs;
2112 	}
2113 
2114 	ret = rproc_add(rproc);
2115 	if (ret)
2116 		goto remove_sysmon_subdev;
2117 
2118 	node = of_get_compatible_child(pdev->dev.of_node, "qcom,bam-dmux");
2119 	qproc->bam_dmux = of_platform_device_create(node, NULL, &pdev->dev);
2120 	of_node_put(node);
2121 
2122 	return 0;
2123 
2124 remove_sysmon_subdev:
2125 	qcom_remove_sysmon_subdev(qproc->sysmon);
2126 remove_subdevs:
2127 	qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
2128 	qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
2129 	qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
2130 detach_proxy_pds:
2131 	q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
2132 
2133 	return ret;
2134 }
2135 
q6v5_remove(struct platform_device * pdev)2136 static void q6v5_remove(struct platform_device *pdev)
2137 {
2138 	struct q6v5 *qproc = platform_get_drvdata(pdev);
2139 	struct rproc *rproc = qproc->rproc;
2140 
2141 	if (qproc->bam_dmux)
2142 		of_platform_device_destroy(&qproc->bam_dmux->dev, NULL);
2143 	rproc_del(rproc);
2144 
2145 	qcom_q6v5_deinit(&qproc->q6v5);
2146 	qcom_remove_sysmon_subdev(qproc->sysmon);
2147 	qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
2148 	qcom_remove_pdm_subdev(rproc, &qproc->pdm_subdev);
2149 	qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
2150 	qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
2151 
2152 	q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
2153 }
2154 
2155 static const struct rproc_hexagon_res sc7180_mss = {
2156 	.hexagon_mba_image = "mba.mbn",
2157 	.proxy_clk_names = (char*[]){
2158 		"xo",
2159 		NULL
2160 	},
2161 	.reset_clk_names = (char*[]){
2162 		"iface",
2163 		"bus",
2164 		"snoc_axi",
2165 		NULL
2166 	},
2167 	.active_clk_names = (char*[]){
2168 		"mnoc_axi",
2169 		"nav",
2170 		NULL
2171 	},
2172 	.proxy_pd_names = (char*[]){
2173 		"cx",
2174 		"mx",
2175 		"mss",
2176 		NULL
2177 	},
2178 	.need_mem_protection = true,
2179 	.has_alt_reset = false,
2180 	.has_mba_logs = true,
2181 	.has_spare_reg = true,
2182 	.has_qaccept_regs = false,
2183 	.has_ext_cntl_regs = false,
2184 	.has_vq6 = false,
2185 	.version = MSS_SC7180,
2186 };
2187 
2188 static const struct rproc_hexagon_res sc7280_mss = {
2189 	.hexagon_mba_image = "mba.mbn",
2190 	.proxy_clk_names = (char*[]){
2191 		"xo",
2192 		"pka",
2193 		NULL
2194 	},
2195 	.active_clk_names = (char*[]){
2196 		"iface",
2197 		"offline",
2198 		"snoc_axi",
2199 		NULL
2200 	},
2201 	.proxy_pd_names = (char*[]){
2202 		"cx",
2203 		"mss",
2204 		NULL
2205 	},
2206 	.need_mem_protection = true,
2207 	.has_alt_reset = false,
2208 	.has_mba_logs = true,
2209 	.has_spare_reg = false,
2210 	.has_qaccept_regs = true,
2211 	.has_ext_cntl_regs = true,
2212 	.has_vq6 = true,
2213 	.version = MSS_SC7280,
2214 };
2215 
2216 static const struct rproc_hexagon_res sdm660_mss = {
2217 	.hexagon_mba_image = "mba.mbn",
2218 	.proxy_clk_names = (char*[]){
2219 			"xo",
2220 			"qdss",
2221 			"mem",
2222 			NULL
2223 	},
2224 	.active_clk_names = (char*[]){
2225 			"iface",
2226 			"bus",
2227 			"gpll0_mss",
2228 			"mnoc_axi",
2229 			"snoc_axi",
2230 			NULL
2231 	},
2232 	.proxy_pd_names = (char*[]){
2233 			"cx",
2234 			"mx",
2235 			NULL
2236 	},
2237 	.need_mem_protection = true,
2238 	.has_alt_reset = false,
2239 	.has_mba_logs = false,
2240 	.has_spare_reg = false,
2241 	.has_qaccept_regs = false,
2242 	.has_ext_cntl_regs = false,
2243 	.has_vq6 = false,
2244 	.version = MSS_SDM660,
2245 };
2246 
2247 static const struct rproc_hexagon_res sdm845_mss = {
2248 	.hexagon_mba_image = "mba.mbn",
2249 	.proxy_clk_names = (char*[]){
2250 			"xo",
2251 			"prng",
2252 			NULL
2253 	},
2254 	.reset_clk_names = (char*[]){
2255 			"iface",
2256 			"snoc_axi",
2257 			NULL
2258 	},
2259 	.active_clk_names = (char*[]){
2260 			"bus",
2261 			"mem",
2262 			"gpll0_mss",
2263 			"mnoc_axi",
2264 			NULL
2265 	},
2266 	.proxy_pd_names = (char*[]){
2267 			"cx",
2268 			"mx",
2269 			"mss",
2270 			NULL
2271 	},
2272 	.need_mem_protection = true,
2273 	.has_alt_reset = true,
2274 	.has_mba_logs = false,
2275 	.has_spare_reg = false,
2276 	.has_qaccept_regs = false,
2277 	.has_ext_cntl_regs = false,
2278 	.has_vq6 = false,
2279 	.version = MSS_SDM845,
2280 };
2281 
2282 static const struct rproc_hexagon_res msm8998_mss = {
2283 	.hexagon_mba_image = "mba.mbn",
2284 	.proxy_clk_names = (char*[]){
2285 			"xo",
2286 			"qdss",
2287 			"mem",
2288 			NULL
2289 	},
2290 	.active_clk_names = (char*[]){
2291 			"iface",
2292 			"bus",
2293 			"gpll0_mss",
2294 			"mnoc_axi",
2295 			"snoc_axi",
2296 			NULL
2297 	},
2298 	.proxy_pd_names = (char*[]){
2299 			"cx",
2300 			"mx",
2301 			NULL
2302 	},
2303 	.need_mem_protection = true,
2304 	.has_alt_reset = false,
2305 	.has_mba_logs = false,
2306 	.has_spare_reg = false,
2307 	.has_qaccept_regs = false,
2308 	.has_ext_cntl_regs = false,
2309 	.has_vq6 = false,
2310 	.version = MSS_MSM8998,
2311 };
2312 
2313 static const struct rproc_hexagon_res msm8996_mss = {
2314 	.hexagon_mba_image = "mba.mbn",
2315 	.proxy_supply = (struct qcom_mss_reg_res[]) {
2316 		{
2317 			.supply = "pll",
2318 			.uA = 100000,
2319 		},
2320 		{}
2321 	},
2322 	.proxy_clk_names = (char*[]){
2323 			"xo",
2324 			"qdss",
2325 			NULL
2326 	},
2327 	.active_clk_names = (char*[]){
2328 			"iface",
2329 			"bus",
2330 			"mem",
2331 			"gpll0_mss",
2332 			"snoc_axi",
2333 			"mnoc_axi",
2334 			NULL
2335 	},
2336 	.proxy_pd_names = (char*[]){
2337 			"mx",
2338 			"cx",
2339 			NULL
2340 	},
2341 	.need_mem_protection = true,
2342 	.has_alt_reset = false,
2343 	.has_mba_logs = false,
2344 	.has_spare_reg = false,
2345 	.has_qaccept_regs = false,
2346 	.has_ext_cntl_regs = false,
2347 	.has_vq6 = false,
2348 	.version = MSS_MSM8996,
2349 };
2350 
2351 static const struct rproc_hexagon_res msm8909_mss = {
2352 	.hexagon_mba_image = "mba.mbn",
2353 	.proxy_supply = (struct qcom_mss_reg_res[]) {
2354 		{
2355 			.supply = "pll",
2356 			.uA = 100000,
2357 		},
2358 		{}
2359 	},
2360 	.proxy_clk_names = (char*[]){
2361 		"xo",
2362 		NULL
2363 	},
2364 	.active_clk_names = (char*[]){
2365 		"iface",
2366 		"bus",
2367 		"mem",
2368 		NULL
2369 	},
2370 	.proxy_pd_names = (char*[]){
2371 		"mx",
2372 		"cx",
2373 		NULL
2374 	},
2375 	.need_mem_protection = false,
2376 	.has_alt_reset = false,
2377 	.has_mba_logs = false,
2378 	.has_spare_reg = false,
2379 	.has_qaccept_regs = false,
2380 	.has_ext_cntl_regs = false,
2381 	.has_vq6 = false,
2382 	.version = MSS_MSM8909,
2383 };
2384 
2385 static const struct rproc_hexagon_res msm8916_mss = {
2386 	.hexagon_mba_image = "mba.mbn",
2387 	.proxy_supply = (struct qcom_mss_reg_res[]) {
2388 		{
2389 			.supply = "pll",
2390 			.uA = 100000,
2391 		},
2392 		{}
2393 	},
2394 	.fallback_proxy_supply = (struct qcom_mss_reg_res[]) {
2395 		{
2396 			.supply = "mx",
2397 			.uV = 1050000,
2398 		},
2399 		{
2400 			.supply = "cx",
2401 			.uA = 100000,
2402 		},
2403 		{}
2404 	},
2405 	.proxy_clk_names = (char*[]){
2406 		"xo",
2407 		NULL
2408 	},
2409 	.active_clk_names = (char*[]){
2410 		"iface",
2411 		"bus",
2412 		"mem",
2413 		NULL
2414 	},
2415 	.proxy_pd_names = (char*[]){
2416 		"mx",
2417 		"cx",
2418 		NULL
2419 	},
2420 	.need_mem_protection = false,
2421 	.has_alt_reset = false,
2422 	.has_mba_logs = false,
2423 	.has_spare_reg = false,
2424 	.has_qaccept_regs = false,
2425 	.has_ext_cntl_regs = false,
2426 	.has_vq6 = false,
2427 	.version = MSS_MSM8916,
2428 };
2429 
2430 static const struct rproc_hexagon_res msm8953_mss = {
2431 	.hexagon_mba_image = "mba.mbn",
2432 	.proxy_supply = (struct qcom_mss_reg_res[]) {
2433 		{
2434 			.supply = "pll",
2435 			.uA = 100000,
2436 		},
2437 		{}
2438 	},
2439 	.proxy_clk_names = (char*[]){
2440 		"xo",
2441 		NULL
2442 	},
2443 	.active_clk_names = (char*[]){
2444 		"iface",
2445 		"bus",
2446 		"mem",
2447 		NULL
2448 	},
2449 	.proxy_pd_names = (char*[]) {
2450 		"cx",
2451 		"mx",
2452 		"mss",
2453 		NULL
2454 	},
2455 	.need_mem_protection = false,
2456 	.has_alt_reset = false,
2457 	.has_mba_logs = false,
2458 	.has_spare_reg = false,
2459 	.has_qaccept_regs = false,
2460 	.has_ext_cntl_regs = false,
2461 	.has_vq6 = false,
2462 	.version = MSS_MSM8953,
2463 };
2464 
2465 static const struct rproc_hexagon_res msm8974_mss = {
2466 	.hexagon_mba_image = "mba.b00",
2467 	.proxy_supply = (struct qcom_mss_reg_res[]) {
2468 		{
2469 			.supply = "pll",
2470 			.uA = 100000,
2471 		},
2472 		{}
2473 	},
2474 	.fallback_proxy_supply = (struct qcom_mss_reg_res[]) {
2475 		{
2476 			.supply = "mx",
2477 			.uV = 1050000,
2478 		},
2479 		{
2480 			.supply = "cx",
2481 			.uA = 100000,
2482 		},
2483 		{}
2484 	},
2485 	.active_supply = (struct qcom_mss_reg_res[]) {
2486 		{
2487 			.supply = "mss",
2488 			.uV = 1050000,
2489 			.uA = 100000,
2490 		},
2491 		{}
2492 	},
2493 	.proxy_clk_names = (char*[]){
2494 		"xo",
2495 		NULL
2496 	},
2497 	.active_clk_names = (char*[]){
2498 		"iface",
2499 		"bus",
2500 		"mem",
2501 		NULL
2502 	},
2503 	.proxy_pd_names = (char*[]){
2504 		"mx",
2505 		"cx",
2506 		NULL
2507 	},
2508 	.need_mem_protection = false,
2509 	.has_alt_reset = false,
2510 	.has_mba_logs = false,
2511 	.has_spare_reg = false,
2512 	.has_qaccept_regs = false,
2513 	.has_ext_cntl_regs = false,
2514 	.has_vq6 = false,
2515 	.version = MSS_MSM8974,
2516 };
2517 
2518 static const struct of_device_id q6v5_of_match[] = {
2519 	{ .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
2520 	{ .compatible = "qcom,msm8909-mss-pil", .data = &msm8909_mss},
2521 	{ .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
2522 	{ .compatible = "qcom,msm8953-mss-pil", .data = &msm8953_mss},
2523 	{ .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
2524 	{ .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
2525 	{ .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss},
2526 	{ .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss},
2527 	{ .compatible = "qcom,sc7280-mss-pil", .data = &sc7280_mss},
2528 	{ .compatible = "qcom,sdm660-mss-pil", .data = &sdm660_mss},
2529 	{ .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
2530 	{ },
2531 };
2532 MODULE_DEVICE_TABLE(of, q6v5_of_match);
2533 
2534 static struct platform_driver q6v5_driver = {
2535 	.probe = q6v5_probe,
2536 	.remove_new = q6v5_remove,
2537 	.driver = {
2538 		.name = "qcom-q6v5-mss",
2539 		.of_match_table = q6v5_of_match,
2540 	},
2541 };
2542 module_platform_driver(q6v5_driver);
2543 
2544 MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver");
2545 MODULE_LICENSE("GPL v2");
2546