xref: /linux/drivers/firmware/qcom/qcom_scm.c (revision 90d32e92011eaae8e70a9169b4e7acf4ca8f9d3a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
3  * Copyright (C) 2015 Linaro Ltd.
4  */
5 
6 #include <linux/arm-smccc.h>
7 #include <linux/bitfield.h>
8 #include <linux/bits.h>
9 #include <linux/clk.h>
10 #include <linux/completion.h>
11 #include <linux/cpumask.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/export.h>
14 #include <linux/firmware/qcom/qcom_scm.h>
15 #include <linux/init.h>
16 #include <linux/interconnect.h>
17 #include <linux/interrupt.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/of_address.h>
21 #include <linux/of_irq.h>
22 #include <linux/of_platform.h>
23 #include <linux/platform_device.h>
24 #include <linux/reset-controller.h>
25 #include <linux/types.h>
26 
27 #include "qcom_scm.h"
28 
29 static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
30 module_param(download_mode, bool, 0);
31 
32 struct qcom_scm {
33 	struct device *dev;
34 	struct clk *core_clk;
35 	struct clk *iface_clk;
36 	struct clk *bus_clk;
37 	struct icc_path *path;
38 	struct completion waitq_comp;
39 	struct reset_controller_dev reset;
40 
41 	/* control access to the interconnect path */
42 	struct mutex scm_bw_lock;
43 	int scm_vote_count;
44 
45 	u64 dload_mode_addr;
46 };
47 
48 struct qcom_scm_current_perm_info {
49 	__le32 vmid;
50 	__le32 perm;
51 	__le64 ctx;
52 	__le32 ctx_size;
53 	__le32 unused;
54 };
55 
56 struct qcom_scm_mem_map_info {
57 	__le64 mem_addr;
58 	__le64 mem_size;
59 };
60 
61 /**
62  * struct qcom_scm_qseecom_resp - QSEECOM SCM call response.
63  * @result:    Result or status of the SCM call. See &enum qcom_scm_qseecom_result.
64  * @resp_type: Type of the response. See &enum qcom_scm_qseecom_resp_type.
65  * @data:      Response data. The type of this data is given in @resp_type.
66  */
67 struct qcom_scm_qseecom_resp {
68 	u64 result;
69 	u64 resp_type;
70 	u64 data;
71 };
72 
73 enum qcom_scm_qseecom_result {
74 	QSEECOM_RESULT_SUCCESS			= 0,
75 	QSEECOM_RESULT_INCOMPLETE		= 1,
76 	QSEECOM_RESULT_BLOCKED_ON_LISTENER	= 2,
77 	QSEECOM_RESULT_FAILURE			= 0xFFFFFFFF,
78 };
79 
80 enum qcom_scm_qseecom_resp_type {
81 	QSEECOM_SCM_RES_APP_ID			= 0xEE01,
82 	QSEECOM_SCM_RES_QSEOS_LISTENER_ID	= 0xEE02,
83 };
84 
85 enum qcom_scm_qseecom_tz_owner {
86 	QSEECOM_TZ_OWNER_SIP			= 2,
87 	QSEECOM_TZ_OWNER_TZ_APPS		= 48,
88 	QSEECOM_TZ_OWNER_QSEE_OS		= 50
89 };
90 
91 enum qcom_scm_qseecom_tz_svc {
92 	QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER	= 0,
93 	QSEECOM_TZ_SVC_APP_MGR			= 1,
94 	QSEECOM_TZ_SVC_INFO			= 6,
95 };
96 
97 enum qcom_scm_qseecom_tz_cmd_app {
98 	QSEECOM_TZ_CMD_APP_SEND			= 1,
99 	QSEECOM_TZ_CMD_APP_LOOKUP		= 3,
100 };
101 
102 enum qcom_scm_qseecom_tz_cmd_info {
103 	QSEECOM_TZ_CMD_INFO_VERSION		= 3,
104 };
105 
106 #define QSEECOM_MAX_APP_NAME_SIZE		64
107 
108 /* Each bit configures cold/warm boot address for one of the 4 CPUs */
109 static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
110 	0, BIT(0), BIT(3), BIT(5)
111 };
112 static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
113 	BIT(2), BIT(1), BIT(4), BIT(6)
114 };
115 
116 #define QCOM_SMC_WAITQ_FLAG_WAKE_ONE	BIT(0)
117 #define QCOM_SMC_WAITQ_FLAG_WAKE_ALL	BIT(1)
118 
119 #define QCOM_DLOAD_MASK		GENMASK(5, 4)
120 #define QCOM_DLOAD_NODUMP	0
121 #define QCOM_DLOAD_FULLDUMP	1
122 
123 static const char * const qcom_scm_convention_names[] = {
124 	[SMC_CONVENTION_UNKNOWN] = "unknown",
125 	[SMC_CONVENTION_ARM_32] = "smc arm 32",
126 	[SMC_CONVENTION_ARM_64] = "smc arm 64",
127 	[SMC_CONVENTION_LEGACY] = "smc legacy",
128 };
129 
130 static struct qcom_scm *__scm;
131 
132 static int qcom_scm_clk_enable(void)
133 {
134 	int ret;
135 
136 	ret = clk_prepare_enable(__scm->core_clk);
137 	if (ret)
138 		goto bail;
139 
140 	ret = clk_prepare_enable(__scm->iface_clk);
141 	if (ret)
142 		goto disable_core;
143 
144 	ret = clk_prepare_enable(__scm->bus_clk);
145 	if (ret)
146 		goto disable_iface;
147 
148 	return 0;
149 
150 disable_iface:
151 	clk_disable_unprepare(__scm->iface_clk);
152 disable_core:
153 	clk_disable_unprepare(__scm->core_clk);
154 bail:
155 	return ret;
156 }
157 
158 static void qcom_scm_clk_disable(void)
159 {
160 	clk_disable_unprepare(__scm->core_clk);
161 	clk_disable_unprepare(__scm->iface_clk);
162 	clk_disable_unprepare(__scm->bus_clk);
163 }
164 
165 static int qcom_scm_bw_enable(void)
166 {
167 	int ret = 0;
168 
169 	if (!__scm->path)
170 		return 0;
171 
172 	mutex_lock(&__scm->scm_bw_lock);
173 	if (!__scm->scm_vote_count) {
174 		ret = icc_set_bw(__scm->path, 0, UINT_MAX);
175 		if (ret < 0) {
176 			dev_err(__scm->dev, "failed to set bandwidth request\n");
177 			goto err_bw;
178 		}
179 	}
180 	__scm->scm_vote_count++;
181 err_bw:
182 	mutex_unlock(&__scm->scm_bw_lock);
183 
184 	return ret;
185 }
186 
187 static void qcom_scm_bw_disable(void)
188 {
189 	if (!__scm->path)
190 		return;
191 
192 	mutex_lock(&__scm->scm_bw_lock);
193 	if (__scm->scm_vote_count-- == 1)
194 		icc_set_bw(__scm->path, 0, 0);
195 	mutex_unlock(&__scm->scm_bw_lock);
196 }
197 
198 enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
199 static DEFINE_SPINLOCK(scm_query_lock);
200 
201 static enum qcom_scm_convention __get_convention(void)
202 {
203 	unsigned long flags;
204 	struct qcom_scm_desc desc = {
205 		.svc = QCOM_SCM_SVC_INFO,
206 		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
207 		.args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
208 					   QCOM_SCM_INFO_IS_CALL_AVAIL) |
209 			   (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
210 		.arginfo = QCOM_SCM_ARGS(1),
211 		.owner = ARM_SMCCC_OWNER_SIP,
212 	};
213 	struct qcom_scm_res res;
214 	enum qcom_scm_convention probed_convention;
215 	int ret;
216 	bool forced = false;
217 
218 	if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
219 		return qcom_scm_convention;
220 
221 	/*
222 	 * Per the "SMC calling convention specification", the 64-bit calling
223 	 * convention can only be used when the client is 64-bit, otherwise
224 	 * system will encounter the undefined behaviour.
225 	 */
226 #if IS_ENABLED(CONFIG_ARM64)
227 	/*
228 	 * Device isn't required as there is only one argument - no device
229 	 * needed to dma_map_single to secure world
230 	 */
231 	probed_convention = SMC_CONVENTION_ARM_64;
232 	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
233 	if (!ret && res.result[0] == 1)
234 		goto found;
235 
236 	/*
237 	 * Some SC7180 firmwares didn't implement the
238 	 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
239 	 * calling conventions on these firmwares. Luckily we don't make any
240 	 * early calls into the firmware on these SoCs so the device pointer
241 	 * will be valid here to check if the compatible matches.
242 	 */
243 	if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
244 		forced = true;
245 		goto found;
246 	}
247 #endif
248 
249 	probed_convention = SMC_CONVENTION_ARM_32;
250 	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
251 	if (!ret && res.result[0] == 1)
252 		goto found;
253 
254 	probed_convention = SMC_CONVENTION_LEGACY;
255 found:
256 	spin_lock_irqsave(&scm_query_lock, flags);
257 	if (probed_convention != qcom_scm_convention) {
258 		qcom_scm_convention = probed_convention;
259 		pr_info("qcom_scm: convention: %s%s\n",
260 			qcom_scm_convention_names[qcom_scm_convention],
261 			forced ? " (forced)" : "");
262 	}
263 	spin_unlock_irqrestore(&scm_query_lock, flags);
264 
265 	return qcom_scm_convention;
266 }
267 
268 /**
269  * qcom_scm_call() - Invoke a syscall in the secure world
270  * @dev:	device
271  * @desc:	Descriptor structure containing arguments and return values
272  * @res:        Structure containing results from SMC/HVC call
273  *
274  * Sends a command to the SCM and waits for the command to finish processing.
275  * This should *only* be called in pre-emptible context.
276  */
277 static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
278 			 struct qcom_scm_res *res)
279 {
280 	might_sleep();
281 	switch (__get_convention()) {
282 	case SMC_CONVENTION_ARM_32:
283 	case SMC_CONVENTION_ARM_64:
284 		return scm_smc_call(dev, desc, res, false);
285 	case SMC_CONVENTION_LEGACY:
286 		return scm_legacy_call(dev, desc, res);
287 	default:
288 		pr_err("Unknown current SCM calling convention.\n");
289 		return -EINVAL;
290 	}
291 }
292 
293 /**
294  * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
295  * @dev:	device
296  * @desc:	Descriptor structure containing arguments and return values
297  * @res:	Structure containing results from SMC/HVC call
298  *
299  * Sends a command to the SCM and waits for the command to finish processing.
300  * This can be called in atomic context.
301  */
302 static int qcom_scm_call_atomic(struct device *dev,
303 				const struct qcom_scm_desc *desc,
304 				struct qcom_scm_res *res)
305 {
306 	switch (__get_convention()) {
307 	case SMC_CONVENTION_ARM_32:
308 	case SMC_CONVENTION_ARM_64:
309 		return scm_smc_call(dev, desc, res, true);
310 	case SMC_CONVENTION_LEGACY:
311 		return scm_legacy_call_atomic(dev, desc, res);
312 	default:
313 		pr_err("Unknown current SCM calling convention.\n");
314 		return -EINVAL;
315 	}
316 }
317 
318 static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
319 					 u32 cmd_id)
320 {
321 	int ret;
322 	struct qcom_scm_desc desc = {
323 		.svc = QCOM_SCM_SVC_INFO,
324 		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
325 		.owner = ARM_SMCCC_OWNER_SIP,
326 	};
327 	struct qcom_scm_res res;
328 
329 	desc.arginfo = QCOM_SCM_ARGS(1);
330 	switch (__get_convention()) {
331 	case SMC_CONVENTION_ARM_32:
332 	case SMC_CONVENTION_ARM_64:
333 		desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
334 				(ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
335 		break;
336 	case SMC_CONVENTION_LEGACY:
337 		desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
338 		break;
339 	default:
340 		pr_err("Unknown SMC convention being used\n");
341 		return false;
342 	}
343 
344 	ret = qcom_scm_call(dev, &desc, &res);
345 
346 	return ret ? false : !!res.result[0];
347 }
348 
349 static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits)
350 {
351 	int cpu;
352 	unsigned int flags = 0;
353 	struct qcom_scm_desc desc = {
354 		.svc = QCOM_SCM_SVC_BOOT,
355 		.cmd = QCOM_SCM_BOOT_SET_ADDR,
356 		.arginfo = QCOM_SCM_ARGS(2),
357 		.owner = ARM_SMCCC_OWNER_SIP,
358 	};
359 
360 	for_each_present_cpu(cpu) {
361 		if (cpu >= QCOM_SCM_BOOT_MAX_CPUS)
362 			return -EINVAL;
363 		flags |= cpu_bits[cpu];
364 	}
365 
366 	desc.args[0] = flags;
367 	desc.args[1] = virt_to_phys(entry);
368 
369 	return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
370 }
371 
372 static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags)
373 {
374 	struct qcom_scm_desc desc = {
375 		.svc = QCOM_SCM_SVC_BOOT,
376 		.cmd = QCOM_SCM_BOOT_SET_ADDR_MC,
377 		.owner = ARM_SMCCC_OWNER_SIP,
378 		.arginfo = QCOM_SCM_ARGS(6),
379 		.args = {
380 			virt_to_phys(entry),
381 			/* Apply to all CPUs in all affinity levels */
382 			~0ULL, ~0ULL, ~0ULL, ~0ULL,
383 			flags,
384 		},
385 	};
386 
387 	/* Need a device for DMA of the additional arguments */
388 	if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY)
389 		return -EOPNOTSUPP;
390 
391 	return qcom_scm_call(__scm->dev, &desc, NULL);
392 }
393 
394 /**
395  * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus
396  * @entry: Entry point function for the cpus
397  *
398  * Set the Linux entry point for the SCM to transfer control to when coming
399  * out of a power down. CPU power down may be executed on cpuidle or hotplug.
400  */
401 int qcom_scm_set_warm_boot_addr(void *entry)
402 {
403 	if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT))
404 		/* Fallback to old SCM call */
405 		return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits);
406 	return 0;
407 }
408 EXPORT_SYMBOL_GPL(qcom_scm_set_warm_boot_addr);
409 
410 /**
411  * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus
412  * @entry: Entry point function for the cpus
413  */
414 int qcom_scm_set_cold_boot_addr(void *entry)
415 {
416 	if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT))
417 		/* Fallback to old SCM call */
418 		return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits);
419 	return 0;
420 }
421 EXPORT_SYMBOL_GPL(qcom_scm_set_cold_boot_addr);
422 
423 /**
424  * qcom_scm_cpu_power_down() - Power down the cpu
425  * @flags:	Flags to flush cache
426  *
427  * This is an end point to power down cpu. If there was a pending interrupt,
428  * the control would return from this function, otherwise, the cpu jumps to the
429  * warm boot entry point set for this cpu upon reset.
430  */
431 void qcom_scm_cpu_power_down(u32 flags)
432 {
433 	struct qcom_scm_desc desc = {
434 		.svc = QCOM_SCM_SVC_BOOT,
435 		.cmd = QCOM_SCM_BOOT_TERMINATE_PC,
436 		.args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
437 		.arginfo = QCOM_SCM_ARGS(1),
438 		.owner = ARM_SMCCC_OWNER_SIP,
439 	};
440 
441 	qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
442 }
443 EXPORT_SYMBOL_GPL(qcom_scm_cpu_power_down);
444 
445 int qcom_scm_set_remote_state(u32 state, u32 id)
446 {
447 	struct qcom_scm_desc desc = {
448 		.svc = QCOM_SCM_SVC_BOOT,
449 		.cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
450 		.arginfo = QCOM_SCM_ARGS(2),
451 		.args[0] = state,
452 		.args[1] = id,
453 		.owner = ARM_SMCCC_OWNER_SIP,
454 	};
455 	struct qcom_scm_res res;
456 	int ret;
457 
458 	ret = qcom_scm_call(__scm->dev, &desc, &res);
459 
460 	return ret ? : res.result[0];
461 }
462 EXPORT_SYMBOL_GPL(qcom_scm_set_remote_state);
463 
464 static int qcom_scm_disable_sdi(void)
465 {
466 	int ret;
467 	struct qcom_scm_desc desc = {
468 		.svc = QCOM_SCM_SVC_BOOT,
469 		.cmd = QCOM_SCM_BOOT_SDI_CONFIG,
470 		.args[0] = 1, /* Disable watchdog debug */
471 		.args[1] = 0, /* Disable SDI */
472 		.arginfo = QCOM_SCM_ARGS(2),
473 		.owner = ARM_SMCCC_OWNER_SIP,
474 	};
475 	struct qcom_scm_res res;
476 
477 	ret = qcom_scm_clk_enable();
478 	if (ret)
479 		return ret;
480 	ret = qcom_scm_call(__scm->dev, &desc, &res);
481 
482 	qcom_scm_clk_disable();
483 
484 	return ret ? : res.result[0];
485 }
486 
487 static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
488 {
489 	struct qcom_scm_desc desc = {
490 		.svc = QCOM_SCM_SVC_BOOT,
491 		.cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
492 		.arginfo = QCOM_SCM_ARGS(2),
493 		.args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
494 		.owner = ARM_SMCCC_OWNER_SIP,
495 	};
496 
497 	desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
498 
499 	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
500 }
501 
502 static int qcom_scm_io_rmw(phys_addr_t addr, unsigned int mask, unsigned int val)
503 {
504 	unsigned int old;
505 	unsigned int new;
506 	int ret;
507 
508 	ret = qcom_scm_io_readl(addr, &old);
509 	if (ret)
510 		return ret;
511 
512 	new = (old & ~mask) | (val & mask);
513 
514 	return qcom_scm_io_writel(addr, new);
515 }
516 
517 static void qcom_scm_set_download_mode(bool enable)
518 {
519 	u32 val = enable ? QCOM_DLOAD_FULLDUMP : QCOM_DLOAD_NODUMP;
520 	int ret = 0;
521 
522 	if (__scm->dload_mode_addr) {
523 		ret = qcom_scm_io_rmw(__scm->dload_mode_addr, QCOM_DLOAD_MASK,
524 				      FIELD_PREP(QCOM_DLOAD_MASK, val));
525 	} else if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT,
526 						QCOM_SCM_BOOT_SET_DLOAD_MODE)) {
527 		ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
528 	} else {
529 		dev_err(__scm->dev,
530 			"No available mechanism for setting download mode\n");
531 	}
532 
533 	if (ret)
534 		dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
535 }
536 
537 /**
538  * qcom_scm_pas_init_image() - Initialize peripheral authentication service
539  *			       state machine for a given peripheral, using the
540  *			       metadata
541  * @peripheral: peripheral id
542  * @metadata:	pointer to memory containing ELF header, program header table
543  *		and optional blob of data used for authenticating the metadata
544  *		and the rest of the firmware
545  * @size:	size of the metadata
546  * @ctx:	optional metadata context
547  *
548  * Return: 0 on success.
549  *
550  * Upon successful return, the PAS metadata context (@ctx) will be used to
551  * track the metadata allocation, this needs to be released by invoking
552  * qcom_scm_pas_metadata_release() by the caller.
553  */
554 int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
555 			    struct qcom_scm_pas_metadata *ctx)
556 {
557 	dma_addr_t mdata_phys;
558 	void *mdata_buf;
559 	int ret;
560 	struct qcom_scm_desc desc = {
561 		.svc = QCOM_SCM_SVC_PIL,
562 		.cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
563 		.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
564 		.args[0] = peripheral,
565 		.owner = ARM_SMCCC_OWNER_SIP,
566 	};
567 	struct qcom_scm_res res;
568 
569 	/*
570 	 * During the scm call memory protection will be enabled for the meta
571 	 * data blob, so make sure it's physically contiguous, 4K aligned and
572 	 * non-cachable to avoid XPU violations.
573 	 */
574 	mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
575 				       GFP_KERNEL);
576 	if (!mdata_buf)
577 		return -ENOMEM;
578 
579 	memcpy(mdata_buf, metadata, size);
580 
581 	ret = qcom_scm_clk_enable();
582 	if (ret)
583 		goto out;
584 
585 	ret = qcom_scm_bw_enable();
586 	if (ret)
587 		goto disable_clk;
588 
589 	desc.args[1] = mdata_phys;
590 
591 	ret = qcom_scm_call(__scm->dev, &desc, &res);
592 	qcom_scm_bw_disable();
593 
594 disable_clk:
595 	qcom_scm_clk_disable();
596 
597 out:
598 	if (ret < 0 || !ctx) {
599 		dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
600 	} else if (ctx) {
601 		ctx->ptr = mdata_buf;
602 		ctx->phys = mdata_phys;
603 		ctx->size = size;
604 	}
605 
606 	return ret ? : res.result[0];
607 }
608 EXPORT_SYMBOL_GPL(qcom_scm_pas_init_image);
609 
610 /**
611  * qcom_scm_pas_metadata_release() - release metadata context
612  * @ctx:	metadata context
613  */
614 void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx)
615 {
616 	if (!ctx->ptr)
617 		return;
618 
619 	dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys);
620 
621 	ctx->ptr = NULL;
622 	ctx->phys = 0;
623 	ctx->size = 0;
624 }
625 EXPORT_SYMBOL_GPL(qcom_scm_pas_metadata_release);
626 
627 /**
628  * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
629  *			      for firmware loading
630  * @peripheral:	peripheral id
631  * @addr:	start address of memory area to prepare
632  * @size:	size of the memory area to prepare
633  *
634  * Returns 0 on success.
635  */
636 int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
637 {
638 	int ret;
639 	struct qcom_scm_desc desc = {
640 		.svc = QCOM_SCM_SVC_PIL,
641 		.cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
642 		.arginfo = QCOM_SCM_ARGS(3),
643 		.args[0] = peripheral,
644 		.args[1] = addr,
645 		.args[2] = size,
646 		.owner = ARM_SMCCC_OWNER_SIP,
647 	};
648 	struct qcom_scm_res res;
649 
650 	ret = qcom_scm_clk_enable();
651 	if (ret)
652 		return ret;
653 
654 	ret = qcom_scm_bw_enable();
655 	if (ret)
656 		goto disable_clk;
657 
658 	ret = qcom_scm_call(__scm->dev, &desc, &res);
659 	qcom_scm_bw_disable();
660 
661 disable_clk:
662 	qcom_scm_clk_disable();
663 
664 	return ret ? : res.result[0];
665 }
666 EXPORT_SYMBOL_GPL(qcom_scm_pas_mem_setup);
667 
668 /**
669  * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
670  *				   and reset the remote processor
671  * @peripheral:	peripheral id
672  *
673  * Return 0 on success.
674  */
675 int qcom_scm_pas_auth_and_reset(u32 peripheral)
676 {
677 	int ret;
678 	struct qcom_scm_desc desc = {
679 		.svc = QCOM_SCM_SVC_PIL,
680 		.cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
681 		.arginfo = QCOM_SCM_ARGS(1),
682 		.args[0] = peripheral,
683 		.owner = ARM_SMCCC_OWNER_SIP,
684 	};
685 	struct qcom_scm_res res;
686 
687 	ret = qcom_scm_clk_enable();
688 	if (ret)
689 		return ret;
690 
691 	ret = qcom_scm_bw_enable();
692 	if (ret)
693 		goto disable_clk;
694 
695 	ret = qcom_scm_call(__scm->dev, &desc, &res);
696 	qcom_scm_bw_disable();
697 
698 disable_clk:
699 	qcom_scm_clk_disable();
700 
701 	return ret ? : res.result[0];
702 }
703 EXPORT_SYMBOL_GPL(qcom_scm_pas_auth_and_reset);
704 
705 /**
706  * qcom_scm_pas_shutdown() - Shut down the remote processor
707  * @peripheral: peripheral id
708  *
709  * Returns 0 on success.
710  */
711 int qcom_scm_pas_shutdown(u32 peripheral)
712 {
713 	int ret;
714 	struct qcom_scm_desc desc = {
715 		.svc = QCOM_SCM_SVC_PIL,
716 		.cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
717 		.arginfo = QCOM_SCM_ARGS(1),
718 		.args[0] = peripheral,
719 		.owner = ARM_SMCCC_OWNER_SIP,
720 	};
721 	struct qcom_scm_res res;
722 
723 	ret = qcom_scm_clk_enable();
724 	if (ret)
725 		return ret;
726 
727 	ret = qcom_scm_bw_enable();
728 	if (ret)
729 		goto disable_clk;
730 
731 	ret = qcom_scm_call(__scm->dev, &desc, &res);
732 	qcom_scm_bw_disable();
733 
734 disable_clk:
735 	qcom_scm_clk_disable();
736 
737 	return ret ? : res.result[0];
738 }
739 EXPORT_SYMBOL_GPL(qcom_scm_pas_shutdown);
740 
741 /**
742  * qcom_scm_pas_supported() - Check if the peripheral authentication service is
743  *			      available for the given peripherial
744  * @peripheral:	peripheral id
745  *
746  * Returns true if PAS is supported for this peripheral, otherwise false.
747  */
748 bool qcom_scm_pas_supported(u32 peripheral)
749 {
750 	int ret;
751 	struct qcom_scm_desc desc = {
752 		.svc = QCOM_SCM_SVC_PIL,
753 		.cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
754 		.arginfo = QCOM_SCM_ARGS(1),
755 		.args[0] = peripheral,
756 		.owner = ARM_SMCCC_OWNER_SIP,
757 	};
758 	struct qcom_scm_res res;
759 
760 	if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
761 					  QCOM_SCM_PIL_PAS_IS_SUPPORTED))
762 		return false;
763 
764 	ret = qcom_scm_call(__scm->dev, &desc, &res);
765 
766 	return ret ? false : !!res.result[0];
767 }
768 EXPORT_SYMBOL_GPL(qcom_scm_pas_supported);
769 
770 static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
771 {
772 	struct qcom_scm_desc desc = {
773 		.svc = QCOM_SCM_SVC_PIL,
774 		.cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
775 		.arginfo = QCOM_SCM_ARGS(2),
776 		.args[0] = reset,
777 		.args[1] = 0,
778 		.owner = ARM_SMCCC_OWNER_SIP,
779 	};
780 	struct qcom_scm_res res;
781 	int ret;
782 
783 	ret = qcom_scm_call(__scm->dev, &desc, &res);
784 
785 	return ret ? : res.result[0];
786 }
787 
788 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
789 				     unsigned long idx)
790 {
791 	if (idx != 0)
792 		return -EINVAL;
793 
794 	return __qcom_scm_pas_mss_reset(__scm->dev, 1);
795 }
796 
797 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
798 				       unsigned long idx)
799 {
800 	if (idx != 0)
801 		return -EINVAL;
802 
803 	return __qcom_scm_pas_mss_reset(__scm->dev, 0);
804 }
805 
806 static const struct reset_control_ops qcom_scm_pas_reset_ops = {
807 	.assert = qcom_scm_pas_reset_assert,
808 	.deassert = qcom_scm_pas_reset_deassert,
809 };
810 
811 int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
812 {
813 	struct qcom_scm_desc desc = {
814 		.svc = QCOM_SCM_SVC_IO,
815 		.cmd = QCOM_SCM_IO_READ,
816 		.arginfo = QCOM_SCM_ARGS(1),
817 		.args[0] = addr,
818 		.owner = ARM_SMCCC_OWNER_SIP,
819 	};
820 	struct qcom_scm_res res;
821 	int ret;
822 
823 
824 	ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
825 	if (ret >= 0)
826 		*val = res.result[0];
827 
828 	return ret < 0 ? ret : 0;
829 }
830 EXPORT_SYMBOL_GPL(qcom_scm_io_readl);
831 
832 int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
833 {
834 	struct qcom_scm_desc desc = {
835 		.svc = QCOM_SCM_SVC_IO,
836 		.cmd = QCOM_SCM_IO_WRITE,
837 		.arginfo = QCOM_SCM_ARGS(2),
838 		.args[0] = addr,
839 		.args[1] = val,
840 		.owner = ARM_SMCCC_OWNER_SIP,
841 	};
842 
843 	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
844 }
845 EXPORT_SYMBOL_GPL(qcom_scm_io_writel);
846 
847 /**
848  * qcom_scm_restore_sec_cfg_available() - Check if secure environment
849  * supports restore security config interface.
850  *
851  * Return true if restore-cfg interface is supported, false if not.
852  */
853 bool qcom_scm_restore_sec_cfg_available(void)
854 {
855 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
856 					    QCOM_SCM_MP_RESTORE_SEC_CFG);
857 }
858 EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg_available);
859 
860 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
861 {
862 	struct qcom_scm_desc desc = {
863 		.svc = QCOM_SCM_SVC_MP,
864 		.cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
865 		.arginfo = QCOM_SCM_ARGS(2),
866 		.args[0] = device_id,
867 		.args[1] = spare,
868 		.owner = ARM_SMCCC_OWNER_SIP,
869 	};
870 	struct qcom_scm_res res;
871 	int ret;
872 
873 	ret = qcom_scm_call(__scm->dev, &desc, &res);
874 
875 	return ret ? : res.result[0];
876 }
877 EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg);
878 
879 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
880 {
881 	struct qcom_scm_desc desc = {
882 		.svc = QCOM_SCM_SVC_MP,
883 		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
884 		.arginfo = QCOM_SCM_ARGS(1),
885 		.args[0] = spare,
886 		.owner = ARM_SMCCC_OWNER_SIP,
887 	};
888 	struct qcom_scm_res res;
889 	int ret;
890 
891 	ret = qcom_scm_call(__scm->dev, &desc, &res);
892 
893 	if (size)
894 		*size = res.result[0];
895 
896 	return ret ? : res.result[1];
897 }
898 EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_size);
899 
900 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
901 {
902 	struct qcom_scm_desc desc = {
903 		.svc = QCOM_SCM_SVC_MP,
904 		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
905 		.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
906 					 QCOM_SCM_VAL),
907 		.args[0] = addr,
908 		.args[1] = size,
909 		.args[2] = spare,
910 		.owner = ARM_SMCCC_OWNER_SIP,
911 	};
912 	int ret;
913 
914 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
915 
916 	/* the pg table has been initialized already, ignore the error */
917 	if (ret == -EPERM)
918 		ret = 0;
919 
920 	return ret;
921 }
922 EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_init);
923 
924 int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size)
925 {
926 	struct qcom_scm_desc desc = {
927 		.svc = QCOM_SCM_SVC_MP,
928 		.cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE,
929 		.arginfo = QCOM_SCM_ARGS(2),
930 		.args[0] = size,
931 		.args[1] = spare,
932 		.owner = ARM_SMCCC_OWNER_SIP,
933 	};
934 
935 	return qcom_scm_call(__scm->dev, &desc, NULL);
936 }
937 EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_cp_pool_size);
938 
939 int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
940 				   u32 cp_nonpixel_start,
941 				   u32 cp_nonpixel_size)
942 {
943 	int ret;
944 	struct qcom_scm_desc desc = {
945 		.svc = QCOM_SCM_SVC_MP,
946 		.cmd = QCOM_SCM_MP_VIDEO_VAR,
947 		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
948 					 QCOM_SCM_VAL, QCOM_SCM_VAL),
949 		.args[0] = cp_start,
950 		.args[1] = cp_size,
951 		.args[2] = cp_nonpixel_start,
952 		.args[3] = cp_nonpixel_size,
953 		.owner = ARM_SMCCC_OWNER_SIP,
954 	};
955 	struct qcom_scm_res res;
956 
957 	ret = qcom_scm_call(__scm->dev, &desc, &res);
958 
959 	return ret ? : res.result[0];
960 }
961 EXPORT_SYMBOL_GPL(qcom_scm_mem_protect_video_var);
962 
963 static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
964 				 size_t mem_sz, phys_addr_t src, size_t src_sz,
965 				 phys_addr_t dest, size_t dest_sz)
966 {
967 	int ret;
968 	struct qcom_scm_desc desc = {
969 		.svc = QCOM_SCM_SVC_MP,
970 		.cmd = QCOM_SCM_MP_ASSIGN,
971 		.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
972 					 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
973 					 QCOM_SCM_VAL, QCOM_SCM_VAL),
974 		.args[0] = mem_region,
975 		.args[1] = mem_sz,
976 		.args[2] = src,
977 		.args[3] = src_sz,
978 		.args[4] = dest,
979 		.args[5] = dest_sz,
980 		.args[6] = 0,
981 		.owner = ARM_SMCCC_OWNER_SIP,
982 	};
983 	struct qcom_scm_res res;
984 
985 	ret = qcom_scm_call(dev, &desc, &res);
986 
987 	return ret ? : res.result[0];
988 }
989 
990 /**
991  * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
992  * @mem_addr: mem region whose ownership need to be reassigned
993  * @mem_sz:   size of the region.
994  * @srcvm:    vmid for current set of owners, each set bit in
995  *            flag indicate a unique owner
996  * @newvm:    array having new owners and corresponding permission
997  *            flags
998  * @dest_cnt: number of owners in next set.
999  *
1000  * Return negative errno on failure or 0 on success with @srcvm updated.
1001  */
1002 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
1003 			u64 *srcvm,
1004 			const struct qcom_scm_vmperm *newvm,
1005 			unsigned int dest_cnt)
1006 {
1007 	struct qcom_scm_current_perm_info *destvm;
1008 	struct qcom_scm_mem_map_info *mem_to_map;
1009 	phys_addr_t mem_to_map_phys;
1010 	phys_addr_t dest_phys;
1011 	dma_addr_t ptr_phys;
1012 	size_t mem_to_map_sz;
1013 	size_t dest_sz;
1014 	size_t src_sz;
1015 	size_t ptr_sz;
1016 	int next_vm;
1017 	__le32 *src;
1018 	void *ptr;
1019 	int ret, i, b;
1020 	u64 srcvm_bits = *srcvm;
1021 
1022 	src_sz = hweight64(srcvm_bits) * sizeof(*src);
1023 	mem_to_map_sz = sizeof(*mem_to_map);
1024 	dest_sz = dest_cnt * sizeof(*destvm);
1025 	ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
1026 			ALIGN(dest_sz, SZ_64);
1027 
1028 	ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
1029 	if (!ptr)
1030 		return -ENOMEM;
1031 
1032 	/* Fill source vmid detail */
1033 	src = ptr;
1034 	i = 0;
1035 	for (b = 0; b < BITS_PER_TYPE(u64); b++) {
1036 		if (srcvm_bits & BIT(b))
1037 			src[i++] = cpu_to_le32(b);
1038 	}
1039 
1040 	/* Fill details of mem buff to map */
1041 	mem_to_map = ptr + ALIGN(src_sz, SZ_64);
1042 	mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
1043 	mem_to_map->mem_addr = cpu_to_le64(mem_addr);
1044 	mem_to_map->mem_size = cpu_to_le64(mem_sz);
1045 
1046 	next_vm = 0;
1047 	/* Fill details of next vmid detail */
1048 	destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
1049 	dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
1050 	for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
1051 		destvm->vmid = cpu_to_le32(newvm->vmid);
1052 		destvm->perm = cpu_to_le32(newvm->perm);
1053 		destvm->ctx = 0;
1054 		destvm->ctx_size = 0;
1055 		next_vm |= BIT(newvm->vmid);
1056 	}
1057 
1058 	ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
1059 				    ptr_phys, src_sz, dest_phys, dest_sz);
1060 	dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys);
1061 	if (ret) {
1062 		dev_err(__scm->dev,
1063 			"Assign memory protection call failed %d\n", ret);
1064 		return -EINVAL;
1065 	}
1066 
1067 	*srcvm = next_vm;
1068 	return 0;
1069 }
1070 EXPORT_SYMBOL_GPL(qcom_scm_assign_mem);
1071 
1072 /**
1073  * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
1074  */
1075 bool qcom_scm_ocmem_lock_available(void)
1076 {
1077 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
1078 					    QCOM_SCM_OCMEM_LOCK_CMD);
1079 }
1080 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock_available);
1081 
1082 /**
1083  * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
1084  * region to the specified initiator
1085  *
1086  * @id:     tz initiator id
1087  * @offset: OCMEM offset
1088  * @size:   OCMEM size
1089  * @mode:   access mode (WIDE/NARROW)
1090  */
1091 int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
1092 			u32 mode)
1093 {
1094 	struct qcom_scm_desc desc = {
1095 		.svc = QCOM_SCM_SVC_OCMEM,
1096 		.cmd = QCOM_SCM_OCMEM_LOCK_CMD,
1097 		.args[0] = id,
1098 		.args[1] = offset,
1099 		.args[2] = size,
1100 		.args[3] = mode,
1101 		.arginfo = QCOM_SCM_ARGS(4),
1102 	};
1103 
1104 	return qcom_scm_call(__scm->dev, &desc, NULL);
1105 }
1106 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock);
1107 
1108 /**
1109  * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
1110  * region from the specified initiator
1111  *
1112  * @id:     tz initiator id
1113  * @offset: OCMEM offset
1114  * @size:   OCMEM size
1115  */
1116 int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
1117 {
1118 	struct qcom_scm_desc desc = {
1119 		.svc = QCOM_SCM_SVC_OCMEM,
1120 		.cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
1121 		.args[0] = id,
1122 		.args[1] = offset,
1123 		.args[2] = size,
1124 		.arginfo = QCOM_SCM_ARGS(3),
1125 	};
1126 
1127 	return qcom_scm_call(__scm->dev, &desc, NULL);
1128 }
1129 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_unlock);
1130 
1131 /**
1132  * qcom_scm_ice_available() - Is the ICE key programming interface available?
1133  *
1134  * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
1135  *	   qcom_scm_ice_set_key() are available.
1136  */
1137 bool qcom_scm_ice_available(void)
1138 {
1139 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1140 					    QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
1141 		__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1142 					     QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
1143 }
1144 EXPORT_SYMBOL_GPL(qcom_scm_ice_available);
1145 
1146 /**
1147  * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
1148  * @index: the keyslot to invalidate
1149  *
1150  * The UFSHCI and eMMC standards define a standard way to do this, but it
1151  * doesn't work on these SoCs; only this SCM call does.
1152  *
1153  * It is assumed that the SoC has only one ICE instance being used, as this SCM
1154  * call doesn't specify which ICE instance the keyslot belongs to.
1155  *
1156  * Return: 0 on success; -errno on failure.
1157  */
1158 int qcom_scm_ice_invalidate_key(u32 index)
1159 {
1160 	struct qcom_scm_desc desc = {
1161 		.svc = QCOM_SCM_SVC_ES,
1162 		.cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
1163 		.arginfo = QCOM_SCM_ARGS(1),
1164 		.args[0] = index,
1165 		.owner = ARM_SMCCC_OWNER_SIP,
1166 	};
1167 
1168 	return qcom_scm_call(__scm->dev, &desc, NULL);
1169 }
1170 EXPORT_SYMBOL_GPL(qcom_scm_ice_invalidate_key);
1171 
1172 /**
1173  * qcom_scm_ice_set_key() - Set an inline encryption key
1174  * @index: the keyslot into which to set the key
1175  * @key: the key to program
1176  * @key_size: the size of the key in bytes
1177  * @cipher: the encryption algorithm the key is for
1178  * @data_unit_size: the encryption data unit size, i.e. the size of each
1179  *		    individual plaintext and ciphertext.  Given in 512-byte
1180  *		    units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
1181  *
1182  * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
1183  * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline.
1184  *
1185  * The UFSHCI and eMMC standards define a standard way to do this, but it
1186  * doesn't work on these SoCs; only this SCM call does.
1187  *
1188  * It is assumed that the SoC has only one ICE instance being used, as this SCM
1189  * call doesn't specify which ICE instance the keyslot belongs to.
1190  *
1191  * Return: 0 on success; -errno on failure.
1192  */
1193 int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
1194 			 enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
1195 {
1196 	struct qcom_scm_desc desc = {
1197 		.svc = QCOM_SCM_SVC_ES,
1198 		.cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
1199 		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
1200 					 QCOM_SCM_VAL, QCOM_SCM_VAL,
1201 					 QCOM_SCM_VAL),
1202 		.args[0] = index,
1203 		.args[2] = key_size,
1204 		.args[3] = cipher,
1205 		.args[4] = data_unit_size,
1206 		.owner = ARM_SMCCC_OWNER_SIP,
1207 	};
1208 	void *keybuf;
1209 	dma_addr_t key_phys;
1210 	int ret;
1211 
1212 	/*
1213 	 * 'key' may point to vmalloc()'ed memory, but we need to pass a
1214 	 * physical address that's been properly flushed.  The sanctioned way to
1215 	 * do this is by using the DMA API.  But as is best practice for crypto
1216 	 * keys, we also must wipe the key after use.  This makes kmemdup() +
1217 	 * dma_map_single() not clearly correct, since the DMA API can use
1218 	 * bounce buffers.  Instead, just use dma_alloc_coherent().  Programming
1219 	 * keys is normally rare and thus not performance-critical.
1220 	 */
1221 
1222 	keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys,
1223 				    GFP_KERNEL);
1224 	if (!keybuf)
1225 		return -ENOMEM;
1226 	memcpy(keybuf, key, key_size);
1227 	desc.args[1] = key_phys;
1228 
1229 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1230 
1231 	memzero_explicit(keybuf, key_size);
1232 
1233 	dma_free_coherent(__scm->dev, key_size, keybuf, key_phys);
1234 	return ret;
1235 }
1236 EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key);
1237 
1238 /**
1239  * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
1240  *
1241  * Return true if HDCP is supported, false if not.
1242  */
1243 bool qcom_scm_hdcp_available(void)
1244 {
1245 	bool avail;
1246 	int ret = qcom_scm_clk_enable();
1247 
1248 	if (ret)
1249 		return ret;
1250 
1251 	avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
1252 						QCOM_SCM_HDCP_INVOKE);
1253 
1254 	qcom_scm_clk_disable();
1255 
1256 	return avail;
1257 }
1258 EXPORT_SYMBOL_GPL(qcom_scm_hdcp_available);
1259 
1260 /**
1261  * qcom_scm_hdcp_req() - Send HDCP request.
1262  * @req: HDCP request array
1263  * @req_cnt: HDCP request array count
1264  * @resp: response buffer passed to SCM
1265  *
1266  * Write HDCP register(s) through SCM.
1267  */
1268 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
1269 {
1270 	int ret;
1271 	struct qcom_scm_desc desc = {
1272 		.svc = QCOM_SCM_SVC_HDCP,
1273 		.cmd = QCOM_SCM_HDCP_INVOKE,
1274 		.arginfo = QCOM_SCM_ARGS(10),
1275 		.args = {
1276 			req[0].addr,
1277 			req[0].val,
1278 			req[1].addr,
1279 			req[1].val,
1280 			req[2].addr,
1281 			req[2].val,
1282 			req[3].addr,
1283 			req[3].val,
1284 			req[4].addr,
1285 			req[4].val
1286 		},
1287 		.owner = ARM_SMCCC_OWNER_SIP,
1288 	};
1289 	struct qcom_scm_res res;
1290 
1291 	if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
1292 		return -ERANGE;
1293 
1294 	ret = qcom_scm_clk_enable();
1295 	if (ret)
1296 		return ret;
1297 
1298 	ret = qcom_scm_call(__scm->dev, &desc, &res);
1299 	*resp = res.result[0];
1300 
1301 	qcom_scm_clk_disable();
1302 
1303 	return ret;
1304 }
1305 EXPORT_SYMBOL_GPL(qcom_scm_hdcp_req);
1306 
1307 int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt)
1308 {
1309 	struct qcom_scm_desc desc = {
1310 		.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1311 		.cmd = QCOM_SCM_SMMU_PT_FORMAT,
1312 		.arginfo = QCOM_SCM_ARGS(3),
1313 		.args[0] = sec_id,
1314 		.args[1] = ctx_num,
1315 		.args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */
1316 		.owner = ARM_SMCCC_OWNER_SIP,
1317 	};
1318 
1319 	return qcom_scm_call(__scm->dev, &desc, NULL);
1320 }
1321 EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_pt_format);
1322 
1323 int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
1324 {
1325 	struct qcom_scm_desc desc = {
1326 		.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1327 		.cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
1328 		.arginfo = QCOM_SCM_ARGS(2),
1329 		.args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
1330 		.args[1] = en,
1331 		.owner = ARM_SMCCC_OWNER_SIP,
1332 	};
1333 
1334 
1335 	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
1336 }
1337 EXPORT_SYMBOL_GPL(qcom_scm_qsmmu500_wait_safe_toggle);
1338 
1339 bool qcom_scm_lmh_dcvsh_available(void)
1340 {
1341 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH);
1342 }
1343 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available);
1344 
1345 int qcom_scm_lmh_profile_change(u32 profile_id)
1346 {
1347 	struct qcom_scm_desc desc = {
1348 		.svc = QCOM_SCM_SVC_LMH,
1349 		.cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE,
1350 		.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
1351 		.args[0] = profile_id,
1352 		.owner = ARM_SMCCC_OWNER_SIP,
1353 	};
1354 
1355 	return qcom_scm_call(__scm->dev, &desc, NULL);
1356 }
1357 EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change);
1358 
1359 int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
1360 		       u64 limit_node, u32 node_id, u64 version)
1361 {
1362 	dma_addr_t payload_phys;
1363 	u32 *payload_buf;
1364 	int ret, payload_size = 5 * sizeof(u32);
1365 
1366 	struct qcom_scm_desc desc = {
1367 		.svc = QCOM_SCM_SVC_LMH,
1368 		.cmd = QCOM_SCM_LMH_LIMIT_DCVSH,
1369 		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL,
1370 					QCOM_SCM_VAL, QCOM_SCM_VAL),
1371 		.args[1] = payload_size,
1372 		.args[2] = limit_node,
1373 		.args[3] = node_id,
1374 		.args[4] = version,
1375 		.owner = ARM_SMCCC_OWNER_SIP,
1376 	};
1377 
1378 	payload_buf = dma_alloc_coherent(__scm->dev, payload_size, &payload_phys, GFP_KERNEL);
1379 	if (!payload_buf)
1380 		return -ENOMEM;
1381 
1382 	payload_buf[0] = payload_fn;
1383 	payload_buf[1] = 0;
1384 	payload_buf[2] = payload_reg;
1385 	payload_buf[3] = 1;
1386 	payload_buf[4] = payload_val;
1387 
1388 	desc.args[0] = payload_phys;
1389 
1390 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1391 
1392 	dma_free_coherent(__scm->dev, payload_size, payload_buf, payload_phys);
1393 	return ret;
1394 }
1395 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh);
1396 
1397 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
1398 {
1399 	struct device_node *tcsr;
1400 	struct device_node *np = dev->of_node;
1401 	struct resource res;
1402 	u32 offset;
1403 	int ret;
1404 
1405 	tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
1406 	if (!tcsr)
1407 		return 0;
1408 
1409 	ret = of_address_to_resource(tcsr, 0, &res);
1410 	of_node_put(tcsr);
1411 	if (ret)
1412 		return ret;
1413 
1414 	ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
1415 	if (ret < 0)
1416 		return ret;
1417 
1418 	*addr = res.start + offset;
1419 
1420 	return 0;
1421 }
1422 
1423 #ifdef CONFIG_QCOM_QSEECOM
1424 
1425 /* Lock for QSEECOM SCM call executions */
1426 static DEFINE_MUTEX(qcom_scm_qseecom_call_lock);
1427 
1428 static int __qcom_scm_qseecom_call(const struct qcom_scm_desc *desc,
1429 				   struct qcom_scm_qseecom_resp *res)
1430 {
1431 	struct qcom_scm_res scm_res = {};
1432 	int status;
1433 
1434 	/*
1435 	 * QSEECOM SCM calls should not be executed concurrently. Therefore, we
1436 	 * require the respective call lock to be held.
1437 	 */
1438 	lockdep_assert_held(&qcom_scm_qseecom_call_lock);
1439 
1440 	status = qcom_scm_call(__scm->dev, desc, &scm_res);
1441 
1442 	res->result = scm_res.result[0];
1443 	res->resp_type = scm_res.result[1];
1444 	res->data = scm_res.result[2];
1445 
1446 	if (status)
1447 		return status;
1448 
1449 	return 0;
1450 }
1451 
1452 /**
1453  * qcom_scm_qseecom_call() - Perform a QSEECOM SCM call.
1454  * @desc: SCM call descriptor.
1455  * @res:  SCM call response (output).
1456  *
1457  * Performs the QSEECOM SCM call described by @desc, returning the response in
1458  * @rsp.
1459  *
1460  * Return: Zero on success, nonzero on failure.
1461  */
1462 static int qcom_scm_qseecom_call(const struct qcom_scm_desc *desc,
1463 				 struct qcom_scm_qseecom_resp *res)
1464 {
1465 	int status;
1466 
1467 	/*
1468 	 * Note: Multiple QSEECOM SCM calls should not be executed same time,
1469 	 * so lock things here. This needs to be extended to callback/listener
1470 	 * handling when support for that is implemented.
1471 	 */
1472 
1473 	mutex_lock(&qcom_scm_qseecom_call_lock);
1474 	status = __qcom_scm_qseecom_call(desc, res);
1475 	mutex_unlock(&qcom_scm_qseecom_call_lock);
1476 
1477 	dev_dbg(__scm->dev, "%s: owner=%x, svc=%x, cmd=%x, result=%lld, type=%llx, data=%llx\n",
1478 		__func__, desc->owner, desc->svc, desc->cmd, res->result,
1479 		res->resp_type, res->data);
1480 
1481 	if (status) {
1482 		dev_err(__scm->dev, "qseecom: scm call failed with error %d\n", status);
1483 		return status;
1484 	}
1485 
1486 	/*
1487 	 * TODO: Handle incomplete and blocked calls:
1488 	 *
1489 	 * Incomplete and blocked calls are not supported yet. Some devices
1490 	 * and/or commands require those, some don't. Let's warn about them
1491 	 * prominently in case someone attempts to try these commands with a
1492 	 * device/command combination that isn't supported yet.
1493 	 */
1494 	WARN_ON(res->result == QSEECOM_RESULT_INCOMPLETE);
1495 	WARN_ON(res->result == QSEECOM_RESULT_BLOCKED_ON_LISTENER);
1496 
1497 	return 0;
1498 }
1499 
1500 /**
1501  * qcom_scm_qseecom_get_version() - Query the QSEECOM version.
1502  * @version: Pointer where the QSEECOM version will be stored.
1503  *
1504  * Performs the QSEECOM SCM querying the QSEECOM version currently running in
1505  * the TrustZone.
1506  *
1507  * Return: Zero on success, nonzero on failure.
1508  */
1509 static int qcom_scm_qseecom_get_version(u32 *version)
1510 {
1511 	struct qcom_scm_desc desc = {};
1512 	struct qcom_scm_qseecom_resp res = {};
1513 	u32 feature = 10;
1514 	int ret;
1515 
1516 	desc.owner = QSEECOM_TZ_OWNER_SIP;
1517 	desc.svc = QSEECOM_TZ_SVC_INFO;
1518 	desc.cmd = QSEECOM_TZ_CMD_INFO_VERSION;
1519 	desc.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL);
1520 	desc.args[0] = feature;
1521 
1522 	ret = qcom_scm_qseecom_call(&desc, &res);
1523 	if (ret)
1524 		return ret;
1525 
1526 	*version = res.result;
1527 	return 0;
1528 }
1529 
1530 /**
1531  * qcom_scm_qseecom_app_get_id() - Query the app ID for a given QSEE app name.
1532  * @app_name: The name of the app.
1533  * @app_id:   The returned app ID.
1534  *
1535  * Query and return the application ID of the SEE app identified by the given
1536  * name. This returned ID is the unique identifier of the app required for
1537  * subsequent communication.
1538  *
1539  * Return: Zero on success, nonzero on failure, -ENOENT if the app has not been
1540  * loaded or could not be found.
1541  */
1542 int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id)
1543 {
1544 	unsigned long name_buf_size = QSEECOM_MAX_APP_NAME_SIZE;
1545 	unsigned long app_name_len = strlen(app_name);
1546 	struct qcom_scm_desc desc = {};
1547 	struct qcom_scm_qseecom_resp res = {};
1548 	dma_addr_t name_buf_phys;
1549 	char *name_buf;
1550 	int status;
1551 
1552 	if (app_name_len >= name_buf_size)
1553 		return -EINVAL;
1554 
1555 	name_buf = kzalloc(name_buf_size, GFP_KERNEL);
1556 	if (!name_buf)
1557 		return -ENOMEM;
1558 
1559 	memcpy(name_buf, app_name, app_name_len);
1560 
1561 	name_buf_phys = dma_map_single(__scm->dev, name_buf, name_buf_size, DMA_TO_DEVICE);
1562 	status = dma_mapping_error(__scm->dev, name_buf_phys);
1563 	if (status) {
1564 		kfree(name_buf);
1565 		dev_err(__scm->dev, "qseecom: failed to map dma address\n");
1566 		return status;
1567 	}
1568 
1569 	desc.owner = QSEECOM_TZ_OWNER_QSEE_OS;
1570 	desc.svc = QSEECOM_TZ_SVC_APP_MGR;
1571 	desc.cmd = QSEECOM_TZ_CMD_APP_LOOKUP;
1572 	desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL);
1573 	desc.args[0] = name_buf_phys;
1574 	desc.args[1] = app_name_len;
1575 
1576 	status = qcom_scm_qseecom_call(&desc, &res);
1577 	dma_unmap_single(__scm->dev, name_buf_phys, name_buf_size, DMA_TO_DEVICE);
1578 	kfree(name_buf);
1579 
1580 	if (status)
1581 		return status;
1582 
1583 	if (res.result == QSEECOM_RESULT_FAILURE)
1584 		return -ENOENT;
1585 
1586 	if (res.result != QSEECOM_RESULT_SUCCESS)
1587 		return -EINVAL;
1588 
1589 	if (res.resp_type != QSEECOM_SCM_RES_APP_ID)
1590 		return -EINVAL;
1591 
1592 	*app_id = res.data;
1593 	return 0;
1594 }
1595 EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id);
1596 
1597 /**
1598  * qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app.
1599  * @app_id:   The ID of the target app.
1600  * @req:      DMA address of the request buffer sent to the app.
1601  * @req_size: Size of the request buffer.
1602  * @rsp:      DMA address of the response buffer, written to by the app.
1603  * @rsp_size: Size of the response buffer.
1604  *
1605  * Sends a request to the QSEE app associated with the given ID and read back
1606  * its response. The caller must provide two DMA memory regions, one for the
1607  * request and one for the response, and fill out the @req region with the
1608  * respective (app-specific) request data. The QSEE app reads this and returns
1609  * its response in the @rsp region.
1610  *
1611  * Return: Zero on success, nonzero on failure.
1612  */
1613 int qcom_scm_qseecom_app_send(u32 app_id, dma_addr_t req, size_t req_size,
1614 			      dma_addr_t rsp, size_t rsp_size)
1615 {
1616 	struct qcom_scm_qseecom_resp res = {};
1617 	struct qcom_scm_desc desc = {};
1618 	int status;
1619 
1620 	desc.owner = QSEECOM_TZ_OWNER_TZ_APPS;
1621 	desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER;
1622 	desc.cmd = QSEECOM_TZ_CMD_APP_SEND;
1623 	desc.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL,
1624 				     QCOM_SCM_RW, QCOM_SCM_VAL,
1625 				     QCOM_SCM_RW, QCOM_SCM_VAL);
1626 	desc.args[0] = app_id;
1627 	desc.args[1] = req;
1628 	desc.args[2] = req_size;
1629 	desc.args[3] = rsp;
1630 	desc.args[4] = rsp_size;
1631 
1632 	status = qcom_scm_qseecom_call(&desc, &res);
1633 
1634 	if (status)
1635 		return status;
1636 
1637 	if (res.result != QSEECOM_RESULT_SUCCESS)
1638 		return -EIO;
1639 
1640 	return 0;
1641 }
1642 EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
1643 
1644 /*
1645  * We do not yet support re-entrant calls via the qseecom interface. To prevent
1646  + any potential issues with this, only allow validated machines for now.
1647  */
1648 static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
1649 	{ .compatible = "lenovo,flex-5g" },
1650 	{ .compatible = "lenovo,thinkpad-x13s", },
1651 	{ .compatible = "qcom,sc8180x-primus" },
1652 	{ }
1653 };
1654 
1655 static bool qcom_scm_qseecom_machine_is_allowed(void)
1656 {
1657 	struct device_node *np;
1658 	bool match;
1659 
1660 	np = of_find_node_by_path("/");
1661 	if (!np)
1662 		return false;
1663 
1664 	match = of_match_node(qcom_scm_qseecom_allowlist, np);
1665 	of_node_put(np);
1666 
1667 	return match;
1668 }
1669 
1670 static void qcom_scm_qseecom_free(void *data)
1671 {
1672 	struct platform_device *qseecom_dev = data;
1673 
1674 	platform_device_del(qseecom_dev);
1675 	platform_device_put(qseecom_dev);
1676 }
1677 
1678 static int qcom_scm_qseecom_init(struct qcom_scm *scm)
1679 {
1680 	struct platform_device *qseecom_dev;
1681 	u32 version;
1682 	int ret;
1683 
1684 	/*
1685 	 * Note: We do two steps of validation here: First, we try to query the
1686 	 * QSEECOM version as a check to see if the interface exists on this
1687 	 * device. Second, we check against known good devices due to current
1688 	 * driver limitations (see comment in qcom_scm_qseecom_allowlist).
1689 	 *
1690 	 * Note that we deliberately do the machine check after the version
1691 	 * check so that we can log potentially supported devices. This should
1692 	 * be safe as downstream sources indicate that the version query is
1693 	 * neither blocking nor reentrant.
1694 	 */
1695 	ret = qcom_scm_qseecom_get_version(&version);
1696 	if (ret)
1697 		return 0;
1698 
1699 	dev_info(scm->dev, "qseecom: found qseecom with version 0x%x\n", version);
1700 
1701 	if (!qcom_scm_qseecom_machine_is_allowed()) {
1702 		dev_info(scm->dev, "qseecom: untested machine, skipping\n");
1703 		return 0;
1704 	}
1705 
1706 	/*
1707 	 * Set up QSEECOM interface device. All application clients will be
1708 	 * set up and managed by the corresponding driver for it.
1709 	 */
1710 	qseecom_dev = platform_device_alloc("qcom_qseecom", -1);
1711 	if (!qseecom_dev)
1712 		return -ENOMEM;
1713 
1714 	qseecom_dev->dev.parent = scm->dev;
1715 
1716 	ret = platform_device_add(qseecom_dev);
1717 	if (ret) {
1718 		platform_device_put(qseecom_dev);
1719 		return ret;
1720 	}
1721 
1722 	return devm_add_action_or_reset(scm->dev, qcom_scm_qseecom_free, qseecom_dev);
1723 }
1724 
1725 #else /* CONFIG_QCOM_QSEECOM */
1726 
1727 static int qcom_scm_qseecom_init(struct qcom_scm *scm)
1728 {
1729 	return 0;
1730 }
1731 
1732 #endif /* CONFIG_QCOM_QSEECOM */
1733 
1734 /**
1735  * qcom_scm_is_available() - Checks if SCM is available
1736  */
1737 bool qcom_scm_is_available(void)
1738 {
1739 	return !!READ_ONCE(__scm);
1740 }
1741 EXPORT_SYMBOL_GPL(qcom_scm_is_available);
1742 
1743 static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx)
1744 {
1745 	/* FW currently only supports a single wq_ctx (zero).
1746 	 * TODO: Update this logic to include dynamic allocation and lookup of
1747 	 * completion structs when FW supports more wq_ctx values.
1748 	 */
1749 	if (wq_ctx != 0) {
1750 		dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n");
1751 		return -EINVAL;
1752 	}
1753 
1754 	return 0;
1755 }
1756 
1757 int qcom_scm_wait_for_wq_completion(u32 wq_ctx)
1758 {
1759 	int ret;
1760 
1761 	ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
1762 	if (ret)
1763 		return ret;
1764 
1765 	wait_for_completion(&__scm->waitq_comp);
1766 
1767 	return 0;
1768 }
1769 
1770 static int qcom_scm_waitq_wakeup(unsigned int wq_ctx)
1771 {
1772 	int ret;
1773 
1774 	ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
1775 	if (ret)
1776 		return ret;
1777 
1778 	complete(&__scm->waitq_comp);
1779 
1780 	return 0;
1781 }
1782 
1783 static irqreturn_t qcom_scm_irq_handler(int irq, void *data)
1784 {
1785 	int ret;
1786 	struct qcom_scm *scm = data;
1787 	u32 wq_ctx, flags, more_pending = 0;
1788 
1789 	do {
1790 		ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending);
1791 		if (ret) {
1792 			dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret);
1793 			goto out;
1794 		}
1795 
1796 		if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE &&
1797 		    flags != QCOM_SMC_WAITQ_FLAG_WAKE_ALL) {
1798 			dev_err(scm->dev, "Invalid flags found for wq_ctx: %u\n", flags);
1799 			goto out;
1800 		}
1801 
1802 		ret = qcom_scm_waitq_wakeup(wq_ctx);
1803 		if (ret)
1804 			goto out;
1805 	} while (more_pending);
1806 
1807 out:
1808 	return IRQ_HANDLED;
1809 }
1810 
1811 static int qcom_scm_probe(struct platform_device *pdev)
1812 {
1813 	struct qcom_scm *scm;
1814 	int irq, ret;
1815 
1816 	scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
1817 	if (!scm)
1818 		return -ENOMEM;
1819 
1820 	scm->dev = &pdev->dev;
1821 	ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
1822 	if (ret < 0)
1823 		return ret;
1824 
1825 	init_completion(&scm->waitq_comp);
1826 	mutex_init(&scm->scm_bw_lock);
1827 
1828 	scm->path = devm_of_icc_get(&pdev->dev, NULL);
1829 	if (IS_ERR(scm->path))
1830 		return dev_err_probe(&pdev->dev, PTR_ERR(scm->path),
1831 				     "failed to acquire interconnect path\n");
1832 
1833 	scm->core_clk = devm_clk_get_optional(&pdev->dev, "core");
1834 	if (IS_ERR(scm->core_clk))
1835 		return PTR_ERR(scm->core_clk);
1836 
1837 	scm->iface_clk = devm_clk_get_optional(&pdev->dev, "iface");
1838 	if (IS_ERR(scm->iface_clk))
1839 		return PTR_ERR(scm->iface_clk);
1840 
1841 	scm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus");
1842 	if (IS_ERR(scm->bus_clk))
1843 		return PTR_ERR(scm->bus_clk);
1844 
1845 	scm->reset.ops = &qcom_scm_pas_reset_ops;
1846 	scm->reset.nr_resets = 1;
1847 	scm->reset.of_node = pdev->dev.of_node;
1848 	ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
1849 	if (ret)
1850 		return ret;
1851 
1852 	/* vote for max clk rate for highest performance */
1853 	ret = clk_set_rate(scm->core_clk, INT_MAX);
1854 	if (ret)
1855 		return ret;
1856 
1857 	/* Let all above stores be available after this */
1858 	smp_store_release(&__scm, scm);
1859 
1860 	irq = platform_get_irq_optional(pdev, 0);
1861 	if (irq < 0) {
1862 		if (irq != -ENXIO)
1863 			return irq;
1864 	} else {
1865 		ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler,
1866 						IRQF_ONESHOT, "qcom-scm", __scm);
1867 		if (ret < 0)
1868 			return dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
1869 	}
1870 
1871 	__get_convention();
1872 
1873 	/*
1874 	 * If requested enable "download mode", from this point on warmboot
1875 	 * will cause the boot stages to enter download mode, unless
1876 	 * disabled below by a clean shutdown/reboot.
1877 	 */
1878 	if (download_mode)
1879 		qcom_scm_set_download_mode(true);
1880 
1881 
1882 	/*
1883 	 * Disable SDI if indicated by DT that it is enabled by default.
1884 	 */
1885 	if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled"))
1886 		qcom_scm_disable_sdi();
1887 
1888 	/*
1889 	 * Initialize the QSEECOM interface.
1890 	 *
1891 	 * Note: QSEECOM is fairly self-contained and this only adds the
1892 	 * interface device (the driver of which does most of the heavy
1893 	 * lifting). So any errors returned here should be either -ENOMEM or
1894 	 * -EINVAL (with the latter only in case there's a bug in our code).
1895 	 * This means that there is no need to bring down the whole SCM driver.
1896 	 * Just log the error instead and let SCM live.
1897 	 */
1898 	ret = qcom_scm_qseecom_init(scm);
1899 	WARN(ret < 0, "failed to initialize qseecom: %d\n", ret);
1900 
1901 	return 0;
1902 }
1903 
1904 static void qcom_scm_shutdown(struct platform_device *pdev)
1905 {
1906 	/* Clean shutdown, disable download mode to allow normal restart */
1907 	qcom_scm_set_download_mode(false);
1908 }
1909 
1910 static const struct of_device_id qcom_scm_dt_match[] = {
1911 	{ .compatible = "qcom,scm" },
1912 
1913 	/* Legacy entries kept for backwards compatibility */
1914 	{ .compatible = "qcom,scm-apq8064" },
1915 	{ .compatible = "qcom,scm-apq8084" },
1916 	{ .compatible = "qcom,scm-ipq4019" },
1917 	{ .compatible = "qcom,scm-msm8953" },
1918 	{ .compatible = "qcom,scm-msm8974" },
1919 	{ .compatible = "qcom,scm-msm8996" },
1920 	{}
1921 };
1922 MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
1923 
1924 static struct platform_driver qcom_scm_driver = {
1925 	.driver = {
1926 		.name	= "qcom_scm",
1927 		.of_match_table = qcom_scm_dt_match,
1928 		.suppress_bind_attrs = true,
1929 	},
1930 	.probe = qcom_scm_probe,
1931 	.shutdown = qcom_scm_shutdown,
1932 };
1933 
1934 static int __init qcom_scm_init(void)
1935 {
1936 	return platform_driver_register(&qcom_scm_driver);
1937 }
1938 subsys_initcall(qcom_scm_init);
1939 
1940 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver");
1941 MODULE_LICENSE("GPL v2");
1942