xref: /linux/drivers/firmware/qcom/qcom_scm.c (revision ccd207ec848e768da41465352a0f52081eec6bb1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
3  * Copyright (C) 2015 Linaro Ltd.
4  */
5 
6 #include <linux/arm-smccc.h>
7 #include <linux/bitfield.h>
8 #include <linux/bits.h>
9 #include <linux/cleanup.h>
10 #include <linux/clk.h>
11 #include <linux/completion.h>
12 #include <linux/cpumask.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/err.h>
15 #include <linux/export.h>
16 #include <linux/firmware/qcom/qcom_scm.h>
17 #include <linux/firmware/qcom/qcom_tzmem.h>
18 #include <linux/init.h>
19 #include <linux/interconnect.h>
20 #include <linux/interrupt.h>
21 #include <linux/kstrtox.h>
22 #include <linux/module.h>
23 #include <linux/of.h>
24 #include <linux/of_address.h>
25 #include <linux/of_irq.h>
26 #include <linux/of_platform.h>
27 #include <linux/of_reserved_mem.h>
28 #include <linux/platform_device.h>
29 #include <linux/reset-controller.h>
30 #include <linux/sizes.h>
31 #include <linux/types.h>
32 
33 #include <dt-bindings/interrupt-controller/arm-gic.h>
34 
35 #include "qcom_scm.h"
36 #include "qcom_tzmem.h"
37 
38 static u32 download_mode;
39 
40 #define GIC_SPI_BASE        32
41 #define GIC_MAX_SPI       1019  // SPIs in GICv3 spec range from 32..1019
42 #define GIC_ESPI_BASE     4096
43 #define GIC_MAX_ESPI      5119 // ESPIs in GICv3 spec range from 4096..5119
44 
45 struct qcom_scm {
46 	struct device *dev;
47 	struct clk *core_clk;
48 	struct clk *iface_clk;
49 	struct clk *bus_clk;
50 	struct icc_path *path;
51 	struct completion *waitq_comps;
52 	struct reset_controller_dev reset;
53 
54 	/* control access to the interconnect path */
55 	struct mutex scm_bw_lock;
56 	int scm_vote_count;
57 
58 	u64 dload_mode_addr;
59 
60 	struct qcom_tzmem_pool *mempool;
61 	unsigned int wq_cnt;
62 };
63 
64 struct qcom_scm_current_perm_info {
65 	__le32 vmid;
66 	__le32 perm;
67 	__le64 ctx;
68 	__le32 ctx_size;
69 	__le32 unused;
70 };
71 
72 struct qcom_scm_mem_map_info {
73 	__le64 mem_addr;
74 	__le64 mem_size;
75 };
76 
77 /**
78  * struct qcom_scm_qseecom_resp - QSEECOM SCM call response.
79  * @result:    Result or status of the SCM call. See &enum qcom_scm_qseecom_result.
80  * @resp_type: Type of the response. See &enum qcom_scm_qseecom_resp_type.
81  * @data:      Response data. The type of this data is given in @resp_type.
82  */
83 struct qcom_scm_qseecom_resp {
84 	u64 result;
85 	u64 resp_type;
86 	u64 data;
87 };
88 
89 enum qcom_scm_qseecom_result {
90 	QSEECOM_RESULT_SUCCESS			= 0,
91 	QSEECOM_RESULT_INCOMPLETE		= 1,
92 	QSEECOM_RESULT_BLOCKED_ON_LISTENER	= 2,
93 	QSEECOM_RESULT_FAILURE			= 0xFFFFFFFF,
94 };
95 
96 enum qcom_scm_qseecom_resp_type {
97 	QSEECOM_SCM_RES_APP_ID			= 0xEE01,
98 	QSEECOM_SCM_RES_QSEOS_LISTENER_ID	= 0xEE02,
99 };
100 
101 enum qcom_scm_qseecom_tz_owner {
102 	QSEECOM_TZ_OWNER_SIP			= 2,
103 	QSEECOM_TZ_OWNER_TZ_APPS		= 48,
104 	QSEECOM_TZ_OWNER_QSEE_OS		= 50
105 };
106 
107 enum qcom_scm_qseecom_tz_svc {
108 	QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER	= 0,
109 	QSEECOM_TZ_SVC_APP_MGR			= 1,
110 	QSEECOM_TZ_SVC_INFO			= 6,
111 };
112 
113 enum qcom_scm_qseecom_tz_cmd_app {
114 	QSEECOM_TZ_CMD_APP_SEND			= 1,
115 	QSEECOM_TZ_CMD_APP_LOOKUP		= 3,
116 };
117 
118 enum qcom_scm_qseecom_tz_cmd_info {
119 	QSEECOM_TZ_CMD_INFO_VERSION		= 3,
120 };
121 
122 #define QSEECOM_MAX_APP_NAME_SIZE		64
123 #define SHMBRIDGE_RESULT_NOTSUPP		4
124 
125 /* Each bit configures cold/warm boot address for one of the 4 CPUs */
126 static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
127 	0, BIT(0), BIT(3), BIT(5)
128 };
129 static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
130 	BIT(2), BIT(1), BIT(4), BIT(6)
131 };
132 
133 #define QCOM_SMC_WAITQ_FLAG_WAKE_ONE	BIT(0)
134 
135 #define QCOM_DLOAD_MASK		GENMASK(5, 4)
136 #define QCOM_DLOAD_NODUMP	0
137 #define QCOM_DLOAD_FULLDUMP	1
138 #define QCOM_DLOAD_MINIDUMP	2
139 #define QCOM_DLOAD_BOTHDUMP	3
140 
141 #define QCOM_SCM_DEFAULT_WAITQ_COUNT 1
142 
143 static const char * const qcom_scm_convention_names[] = {
144 	[SMC_CONVENTION_UNKNOWN] = "unknown",
145 	[SMC_CONVENTION_ARM_32] = "smc arm 32",
146 	[SMC_CONVENTION_ARM_64] = "smc arm 64",
147 	[SMC_CONVENTION_LEGACY] = "smc legacy",
148 };
149 
150 static const char * const download_mode_name[] = {
151 	[QCOM_DLOAD_NODUMP]	= "off",
152 	[QCOM_DLOAD_FULLDUMP]	= "full",
153 	[QCOM_DLOAD_MINIDUMP]	= "mini",
154 	[QCOM_DLOAD_BOTHDUMP]	= "full,mini",
155 };
156 
157 static struct qcom_scm *__scm;
158 
159 static int qcom_scm_clk_enable(void)
160 {
161 	int ret;
162 
163 	ret = clk_prepare_enable(__scm->core_clk);
164 	if (ret)
165 		goto bail;
166 
167 	ret = clk_prepare_enable(__scm->iface_clk);
168 	if (ret)
169 		goto disable_core;
170 
171 	ret = clk_prepare_enable(__scm->bus_clk);
172 	if (ret)
173 		goto disable_iface;
174 
175 	return 0;
176 
177 disable_iface:
178 	clk_disable_unprepare(__scm->iface_clk);
179 disable_core:
180 	clk_disable_unprepare(__scm->core_clk);
181 bail:
182 	return ret;
183 }
184 
185 static void qcom_scm_clk_disable(void)
186 {
187 	clk_disable_unprepare(__scm->core_clk);
188 	clk_disable_unprepare(__scm->iface_clk);
189 	clk_disable_unprepare(__scm->bus_clk);
190 }
191 
192 static int qcom_scm_bw_enable(void)
193 {
194 	int ret = 0;
195 
196 	if (!__scm->path)
197 		return 0;
198 
199 	mutex_lock(&__scm->scm_bw_lock);
200 	if (!__scm->scm_vote_count) {
201 		ret = icc_set_bw(__scm->path, 0, UINT_MAX);
202 		if (ret < 0) {
203 			dev_err(__scm->dev, "failed to set bandwidth request\n");
204 			goto err_bw;
205 		}
206 	}
207 	__scm->scm_vote_count++;
208 err_bw:
209 	mutex_unlock(&__scm->scm_bw_lock);
210 
211 	return ret;
212 }
213 
214 static void qcom_scm_bw_disable(void)
215 {
216 	if (!__scm->path)
217 		return;
218 
219 	mutex_lock(&__scm->scm_bw_lock);
220 	if (__scm->scm_vote_count-- == 1)
221 		icc_set_bw(__scm->path, 0, 0);
222 	mutex_unlock(&__scm->scm_bw_lock);
223 }
224 
225 enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
226 static DEFINE_SPINLOCK(scm_query_lock);
227 
228 struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void)
229 {
230 	if (!qcom_scm_is_available())
231 		return NULL;
232 
233 	return __scm->mempool;
234 }
235 
236 static enum qcom_scm_convention __get_convention(void)
237 {
238 	unsigned long flags;
239 	struct qcom_scm_desc desc = {
240 		.svc = QCOM_SCM_SVC_INFO,
241 		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
242 		.args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
243 					   QCOM_SCM_INFO_IS_CALL_AVAIL) |
244 			   (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
245 		.arginfo = QCOM_SCM_ARGS(1),
246 		.owner = ARM_SMCCC_OWNER_SIP,
247 	};
248 	struct qcom_scm_res res;
249 	enum qcom_scm_convention probed_convention;
250 	int ret;
251 	bool forced = false;
252 
253 	if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
254 		return qcom_scm_convention;
255 
256 	/*
257 	 * Per the "SMC calling convention specification", the 64-bit calling
258 	 * convention can only be used when the client is 64-bit, otherwise
259 	 * system will encounter the undefined behaviour.
260 	 */
261 #if IS_ENABLED(CONFIG_ARM64)
262 	/*
263 	 * Device isn't required as there is only one argument - no device
264 	 * needed to dma_map_single to secure world
265 	 */
266 	probed_convention = SMC_CONVENTION_ARM_64;
267 	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
268 	if (!ret && res.result[0] == 1)
269 		goto found;
270 
271 	/*
272 	 * Some SC7180 firmwares didn't implement the
273 	 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
274 	 * calling conventions on these firmwares. Luckily we don't make any
275 	 * early calls into the firmware on these SoCs so the device pointer
276 	 * will be valid here to check if the compatible matches.
277 	 */
278 	if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
279 		forced = true;
280 		goto found;
281 	}
282 #endif
283 
284 	probed_convention = SMC_CONVENTION_ARM_32;
285 	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
286 	if (!ret && res.result[0] == 1)
287 		goto found;
288 
289 	probed_convention = SMC_CONVENTION_LEGACY;
290 found:
291 	spin_lock_irqsave(&scm_query_lock, flags);
292 	if (probed_convention != qcom_scm_convention) {
293 		qcom_scm_convention = probed_convention;
294 		pr_info("qcom_scm: convention: %s%s\n",
295 			qcom_scm_convention_names[qcom_scm_convention],
296 			forced ? " (forced)" : "");
297 	}
298 	spin_unlock_irqrestore(&scm_query_lock, flags);
299 
300 	return qcom_scm_convention;
301 }
302 
303 /**
304  * qcom_scm_call() - Invoke a syscall in the secure world
305  * @dev:	device
306  * @desc:	Descriptor structure containing arguments and return values
307  * @res:        Structure containing results from SMC/HVC call
308  *
309  * Sends a command to the SCM and waits for the command to finish processing.
310  * This should *only* be called in pre-emptible context.
311  */
312 static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
313 			 struct qcom_scm_res *res)
314 {
315 	might_sleep();
316 	switch (__get_convention()) {
317 	case SMC_CONVENTION_ARM_32:
318 	case SMC_CONVENTION_ARM_64:
319 		return scm_smc_call(dev, desc, res, false);
320 	case SMC_CONVENTION_LEGACY:
321 		return scm_legacy_call(dev, desc, res);
322 	default:
323 		pr_err("Unknown current SCM calling convention.\n");
324 		return -EINVAL;
325 	}
326 }
327 
328 /**
329  * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
330  * @dev:	device
331  * @desc:	Descriptor structure containing arguments and return values
332  * @res:	Structure containing results from SMC/HVC call
333  *
334  * Sends a command to the SCM and waits for the command to finish processing.
335  * This can be called in atomic context.
336  */
337 static int qcom_scm_call_atomic(struct device *dev,
338 				const struct qcom_scm_desc *desc,
339 				struct qcom_scm_res *res)
340 {
341 	switch (__get_convention()) {
342 	case SMC_CONVENTION_ARM_32:
343 	case SMC_CONVENTION_ARM_64:
344 		return scm_smc_call(dev, desc, res, true);
345 	case SMC_CONVENTION_LEGACY:
346 		return scm_legacy_call_atomic(dev, desc, res);
347 	default:
348 		pr_err("Unknown current SCM calling convention.\n");
349 		return -EINVAL;
350 	}
351 }
352 
353 static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
354 					 u32 cmd_id)
355 {
356 	int ret;
357 	struct qcom_scm_desc desc = {
358 		.svc = QCOM_SCM_SVC_INFO,
359 		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
360 		.owner = ARM_SMCCC_OWNER_SIP,
361 	};
362 	struct qcom_scm_res res;
363 
364 	desc.arginfo = QCOM_SCM_ARGS(1);
365 	switch (__get_convention()) {
366 	case SMC_CONVENTION_ARM_32:
367 	case SMC_CONVENTION_ARM_64:
368 		desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
369 				(ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
370 		break;
371 	case SMC_CONVENTION_LEGACY:
372 		desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
373 		break;
374 	default:
375 		pr_err("Unknown SMC convention being used\n");
376 		return false;
377 	}
378 
379 	ret = qcom_scm_call(dev, &desc, &res);
380 
381 	return ret ? false : !!res.result[0];
382 }
383 
384 static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits)
385 {
386 	int cpu;
387 	unsigned int flags = 0;
388 	struct qcom_scm_desc desc = {
389 		.svc = QCOM_SCM_SVC_BOOT,
390 		.cmd = QCOM_SCM_BOOT_SET_ADDR,
391 		.arginfo = QCOM_SCM_ARGS(2),
392 		.owner = ARM_SMCCC_OWNER_SIP,
393 	};
394 
395 	for_each_present_cpu(cpu) {
396 		if (cpu >= QCOM_SCM_BOOT_MAX_CPUS)
397 			return -EINVAL;
398 		flags |= cpu_bits[cpu];
399 	}
400 
401 	desc.args[0] = flags;
402 	desc.args[1] = virt_to_phys(entry);
403 
404 	return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
405 }
406 
407 static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags)
408 {
409 	struct qcom_scm_desc desc = {
410 		.svc = QCOM_SCM_SVC_BOOT,
411 		.cmd = QCOM_SCM_BOOT_SET_ADDR_MC,
412 		.owner = ARM_SMCCC_OWNER_SIP,
413 		.arginfo = QCOM_SCM_ARGS(6),
414 		.args = {
415 			virt_to_phys(entry),
416 			/* Apply to all CPUs in all affinity levels */
417 			~0ULL, ~0ULL, ~0ULL, ~0ULL,
418 			flags,
419 		},
420 	};
421 
422 	/* Need a device for DMA of the additional arguments */
423 	if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY)
424 		return -EOPNOTSUPP;
425 
426 	return qcom_scm_call(__scm->dev, &desc, NULL);
427 }
428 
429 /**
430  * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus
431  * @entry: Entry point function for the cpus
432  *
433  * Set the Linux entry point for the SCM to transfer control to when coming
434  * out of a power down. CPU power down may be executed on cpuidle or hotplug.
435  */
436 int qcom_scm_set_warm_boot_addr(void *entry)
437 {
438 	if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT))
439 		/* Fallback to old SCM call */
440 		return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits);
441 	return 0;
442 }
443 EXPORT_SYMBOL_GPL(qcom_scm_set_warm_boot_addr);
444 
445 /**
446  * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus
447  * @entry: Entry point function for the cpus
448  */
449 int qcom_scm_set_cold_boot_addr(void *entry)
450 {
451 	if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT))
452 		/* Fallback to old SCM call */
453 		return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits);
454 	return 0;
455 }
456 EXPORT_SYMBOL_GPL(qcom_scm_set_cold_boot_addr);
457 
458 /**
459  * qcom_scm_cpu_power_down() - Power down the cpu
460  * @flags:	Flags to flush cache
461  *
462  * This is an end point to power down cpu. If there was a pending interrupt,
463  * the control would return from this function, otherwise, the cpu jumps to the
464  * warm boot entry point set for this cpu upon reset.
465  */
466 void qcom_scm_cpu_power_down(u32 flags)
467 {
468 	struct qcom_scm_desc desc = {
469 		.svc = QCOM_SCM_SVC_BOOT,
470 		.cmd = QCOM_SCM_BOOT_TERMINATE_PC,
471 		.args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
472 		.arginfo = QCOM_SCM_ARGS(1),
473 		.owner = ARM_SMCCC_OWNER_SIP,
474 	};
475 
476 	qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
477 }
478 EXPORT_SYMBOL_GPL(qcom_scm_cpu_power_down);
479 
480 int qcom_scm_set_remote_state(u32 state, u32 id)
481 {
482 	struct qcom_scm_desc desc = {
483 		.svc = QCOM_SCM_SVC_BOOT,
484 		.cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
485 		.arginfo = QCOM_SCM_ARGS(2),
486 		.args[0] = state,
487 		.args[1] = id,
488 		.owner = ARM_SMCCC_OWNER_SIP,
489 	};
490 	struct qcom_scm_res res;
491 	int ret;
492 
493 	ret = qcom_scm_call(__scm->dev, &desc, &res);
494 
495 	return ret ? : res.result[0];
496 }
497 EXPORT_SYMBOL_GPL(qcom_scm_set_remote_state);
498 
499 static int qcom_scm_disable_sdi(void)
500 {
501 	int ret;
502 	struct qcom_scm_desc desc = {
503 		.svc = QCOM_SCM_SVC_BOOT,
504 		.cmd = QCOM_SCM_BOOT_SDI_CONFIG,
505 		.args[0] = 1, /* Disable watchdog debug */
506 		.args[1] = 0, /* Disable SDI */
507 		.arginfo = QCOM_SCM_ARGS(2),
508 		.owner = ARM_SMCCC_OWNER_SIP,
509 	};
510 	struct qcom_scm_res res;
511 
512 	ret = qcom_scm_clk_enable();
513 	if (ret)
514 		return ret;
515 	ret = qcom_scm_call(__scm->dev, &desc, &res);
516 
517 	qcom_scm_clk_disable();
518 
519 	return ret ? : res.result[0];
520 }
521 
522 static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
523 {
524 	struct qcom_scm_desc desc = {
525 		.svc = QCOM_SCM_SVC_BOOT,
526 		.cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
527 		.arginfo = QCOM_SCM_ARGS(2),
528 		.args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
529 		.owner = ARM_SMCCC_OWNER_SIP,
530 	};
531 
532 	desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
533 
534 	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
535 }
536 
537 static int qcom_scm_io_rmw(phys_addr_t addr, unsigned int mask, unsigned int val)
538 {
539 	unsigned int old;
540 	unsigned int new;
541 	int ret;
542 
543 	ret = qcom_scm_io_readl(addr, &old);
544 	if (ret)
545 		return ret;
546 
547 	new = (old & ~mask) | (val & mask);
548 
549 	return qcom_scm_io_writel(addr, new);
550 }
551 
552 static void qcom_scm_set_download_mode(u32 dload_mode)
553 {
554 	int ret = 0;
555 
556 	if (__scm->dload_mode_addr) {
557 		ret = qcom_scm_io_rmw(__scm->dload_mode_addr, QCOM_DLOAD_MASK,
558 				      FIELD_PREP(QCOM_DLOAD_MASK, dload_mode));
559 	} else if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT,
560 						QCOM_SCM_BOOT_SET_DLOAD_MODE)) {
561 		ret = __qcom_scm_set_dload_mode(__scm->dev, !!dload_mode);
562 	} else if (dload_mode) {
563 		dev_err(__scm->dev,
564 			"No available mechanism for setting download mode\n");
565 	}
566 
567 	if (ret)
568 		dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
569 }
570 
571 /**
572  * qcom_scm_pas_init_image() - Initialize peripheral authentication service
573  *			       state machine for a given peripheral, using the
574  *			       metadata
575  * @peripheral: peripheral id
576  * @metadata:	pointer to memory containing ELF header, program header table
577  *		and optional blob of data used for authenticating the metadata
578  *		and the rest of the firmware
579  * @size:	size of the metadata
580  * @ctx:	optional metadata context
581  *
582  * Return: 0 on success.
583  *
584  * Upon successful return, the PAS metadata context (@ctx) will be used to
585  * track the metadata allocation, this needs to be released by invoking
586  * qcom_scm_pas_metadata_release() by the caller.
587  */
588 int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
589 			    struct qcom_scm_pas_metadata *ctx)
590 {
591 	dma_addr_t mdata_phys;
592 	void *mdata_buf;
593 	int ret;
594 	struct qcom_scm_desc desc = {
595 		.svc = QCOM_SCM_SVC_PIL,
596 		.cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
597 		.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
598 		.args[0] = peripheral,
599 		.owner = ARM_SMCCC_OWNER_SIP,
600 	};
601 	struct qcom_scm_res res;
602 
603 	/*
604 	 * During the scm call memory protection will be enabled for the meta
605 	 * data blob, so make sure it's physically contiguous, 4K aligned and
606 	 * non-cachable to avoid XPU violations.
607 	 *
608 	 * For PIL calls the hypervisor creates SHM Bridges for the blob
609 	 * buffers on behalf of Linux so we must not do it ourselves hence
610 	 * not using the TZMem allocator here.
611 	 *
612 	 * If we pass a buffer that is already part of an SHM Bridge to this
613 	 * call, it will fail.
614 	 */
615 	mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
616 				       GFP_KERNEL);
617 	if (!mdata_buf)
618 		return -ENOMEM;
619 
620 	memcpy(mdata_buf, metadata, size);
621 
622 	ret = qcom_scm_clk_enable();
623 	if (ret)
624 		goto out;
625 
626 	ret = qcom_scm_bw_enable();
627 	if (ret)
628 		goto disable_clk;
629 
630 	desc.args[1] = mdata_phys;
631 
632 	ret = qcom_scm_call(__scm->dev, &desc, &res);
633 	qcom_scm_bw_disable();
634 
635 disable_clk:
636 	qcom_scm_clk_disable();
637 
638 out:
639 	if (ret < 0 || !ctx) {
640 		dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
641 	} else if (ctx) {
642 		ctx->ptr = mdata_buf;
643 		ctx->phys = mdata_phys;
644 		ctx->size = size;
645 	}
646 
647 	return ret ? : res.result[0];
648 }
649 EXPORT_SYMBOL_GPL(qcom_scm_pas_init_image);
650 
651 /**
652  * qcom_scm_pas_metadata_release() - release metadata context
653  * @ctx:	metadata context
654  */
655 void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx)
656 {
657 	if (!ctx->ptr)
658 		return;
659 
660 	dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys);
661 
662 	ctx->ptr = NULL;
663 	ctx->phys = 0;
664 	ctx->size = 0;
665 }
666 EXPORT_SYMBOL_GPL(qcom_scm_pas_metadata_release);
667 
668 /**
669  * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
670  *			      for firmware loading
671  * @peripheral:	peripheral id
672  * @addr:	start address of memory area to prepare
673  * @size:	size of the memory area to prepare
674  *
675  * Returns 0 on success.
676  */
677 int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
678 {
679 	int ret;
680 	struct qcom_scm_desc desc = {
681 		.svc = QCOM_SCM_SVC_PIL,
682 		.cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
683 		.arginfo = QCOM_SCM_ARGS(3),
684 		.args[0] = peripheral,
685 		.args[1] = addr,
686 		.args[2] = size,
687 		.owner = ARM_SMCCC_OWNER_SIP,
688 	};
689 	struct qcom_scm_res res;
690 
691 	ret = qcom_scm_clk_enable();
692 	if (ret)
693 		return ret;
694 
695 	ret = qcom_scm_bw_enable();
696 	if (ret)
697 		goto disable_clk;
698 
699 	ret = qcom_scm_call(__scm->dev, &desc, &res);
700 	qcom_scm_bw_disable();
701 
702 disable_clk:
703 	qcom_scm_clk_disable();
704 
705 	return ret ? : res.result[0];
706 }
707 EXPORT_SYMBOL_GPL(qcom_scm_pas_mem_setup);
708 
709 /**
710  * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
711  *				   and reset the remote processor
712  * @peripheral:	peripheral id
713  *
714  * Return 0 on success.
715  */
716 int qcom_scm_pas_auth_and_reset(u32 peripheral)
717 {
718 	int ret;
719 	struct qcom_scm_desc desc = {
720 		.svc = QCOM_SCM_SVC_PIL,
721 		.cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
722 		.arginfo = QCOM_SCM_ARGS(1),
723 		.args[0] = peripheral,
724 		.owner = ARM_SMCCC_OWNER_SIP,
725 	};
726 	struct qcom_scm_res res;
727 
728 	ret = qcom_scm_clk_enable();
729 	if (ret)
730 		return ret;
731 
732 	ret = qcom_scm_bw_enable();
733 	if (ret)
734 		goto disable_clk;
735 
736 	ret = qcom_scm_call(__scm->dev, &desc, &res);
737 	qcom_scm_bw_disable();
738 
739 disable_clk:
740 	qcom_scm_clk_disable();
741 
742 	return ret ? : res.result[0];
743 }
744 EXPORT_SYMBOL_GPL(qcom_scm_pas_auth_and_reset);
745 
746 /**
747  * qcom_scm_pas_shutdown() - Shut down the remote processor
748  * @peripheral: peripheral id
749  *
750  * Returns 0 on success.
751  */
752 int qcom_scm_pas_shutdown(u32 peripheral)
753 {
754 	int ret;
755 	struct qcom_scm_desc desc = {
756 		.svc = QCOM_SCM_SVC_PIL,
757 		.cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
758 		.arginfo = QCOM_SCM_ARGS(1),
759 		.args[0] = peripheral,
760 		.owner = ARM_SMCCC_OWNER_SIP,
761 	};
762 	struct qcom_scm_res res;
763 
764 	ret = qcom_scm_clk_enable();
765 	if (ret)
766 		return ret;
767 
768 	ret = qcom_scm_bw_enable();
769 	if (ret)
770 		goto disable_clk;
771 
772 	ret = qcom_scm_call(__scm->dev, &desc, &res);
773 	qcom_scm_bw_disable();
774 
775 disable_clk:
776 	qcom_scm_clk_disable();
777 
778 	return ret ? : res.result[0];
779 }
780 EXPORT_SYMBOL_GPL(qcom_scm_pas_shutdown);
781 
782 /**
783  * qcom_scm_pas_supported() - Check if the peripheral authentication service is
784  *			      available for the given peripherial
785  * @peripheral:	peripheral id
786  *
787  * Returns true if PAS is supported for this peripheral, otherwise false.
788  */
789 bool qcom_scm_pas_supported(u32 peripheral)
790 {
791 	int ret;
792 	struct qcom_scm_desc desc = {
793 		.svc = QCOM_SCM_SVC_PIL,
794 		.cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
795 		.arginfo = QCOM_SCM_ARGS(1),
796 		.args[0] = peripheral,
797 		.owner = ARM_SMCCC_OWNER_SIP,
798 	};
799 	struct qcom_scm_res res;
800 
801 	if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
802 					  QCOM_SCM_PIL_PAS_IS_SUPPORTED))
803 		return false;
804 
805 	ret = qcom_scm_call(__scm->dev, &desc, &res);
806 
807 	return ret ? false : !!res.result[0];
808 }
809 EXPORT_SYMBOL_GPL(qcom_scm_pas_supported);
810 
811 static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
812 {
813 	struct qcom_scm_desc desc = {
814 		.svc = QCOM_SCM_SVC_PIL,
815 		.cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
816 		.arginfo = QCOM_SCM_ARGS(2),
817 		.args[0] = reset,
818 		.args[1] = 0,
819 		.owner = ARM_SMCCC_OWNER_SIP,
820 	};
821 	struct qcom_scm_res res;
822 	int ret;
823 
824 	ret = qcom_scm_call(__scm->dev, &desc, &res);
825 
826 	return ret ? : res.result[0];
827 }
828 
829 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
830 				     unsigned long idx)
831 {
832 	if (idx != 0)
833 		return -EINVAL;
834 
835 	return __qcom_scm_pas_mss_reset(__scm->dev, 1);
836 }
837 
838 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
839 				       unsigned long idx)
840 {
841 	if (idx != 0)
842 		return -EINVAL;
843 
844 	return __qcom_scm_pas_mss_reset(__scm->dev, 0);
845 }
846 
847 static const struct reset_control_ops qcom_scm_pas_reset_ops = {
848 	.assert = qcom_scm_pas_reset_assert,
849 	.deassert = qcom_scm_pas_reset_deassert,
850 };
851 
852 int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
853 {
854 	struct qcom_scm_desc desc = {
855 		.svc = QCOM_SCM_SVC_IO,
856 		.cmd = QCOM_SCM_IO_READ,
857 		.arginfo = QCOM_SCM_ARGS(1),
858 		.args[0] = addr,
859 		.owner = ARM_SMCCC_OWNER_SIP,
860 	};
861 	struct qcom_scm_res res;
862 	int ret;
863 
864 
865 	ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
866 	if (ret >= 0)
867 		*val = res.result[0];
868 
869 	return ret < 0 ? ret : 0;
870 }
871 EXPORT_SYMBOL_GPL(qcom_scm_io_readl);
872 
873 int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
874 {
875 	struct qcom_scm_desc desc = {
876 		.svc = QCOM_SCM_SVC_IO,
877 		.cmd = QCOM_SCM_IO_WRITE,
878 		.arginfo = QCOM_SCM_ARGS(2),
879 		.args[0] = addr,
880 		.args[1] = val,
881 		.owner = ARM_SMCCC_OWNER_SIP,
882 	};
883 
884 	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
885 }
886 EXPORT_SYMBOL_GPL(qcom_scm_io_writel);
887 
888 /**
889  * qcom_scm_restore_sec_cfg_available() - Check if secure environment
890  * supports restore security config interface.
891  *
892  * Return true if restore-cfg interface is supported, false if not.
893  */
894 bool qcom_scm_restore_sec_cfg_available(void)
895 {
896 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
897 					    QCOM_SCM_MP_RESTORE_SEC_CFG);
898 }
899 EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg_available);
900 
901 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
902 {
903 	struct qcom_scm_desc desc = {
904 		.svc = QCOM_SCM_SVC_MP,
905 		.cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
906 		.arginfo = QCOM_SCM_ARGS(2),
907 		.args[0] = device_id,
908 		.args[1] = spare,
909 		.owner = ARM_SMCCC_OWNER_SIP,
910 	};
911 	struct qcom_scm_res res;
912 	int ret;
913 
914 	ret = qcom_scm_call(__scm->dev, &desc, &res);
915 
916 	return ret ? : res.result[0];
917 }
918 EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg);
919 
920 #define QCOM_SCM_CP_APERTURE_CONTEXT_MASK	GENMASK(7, 0)
921 
922 bool qcom_scm_set_gpu_smmu_aperture_is_available(void)
923 {
924 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
925 					    QCOM_SCM_MP_CP_SMMU_APERTURE_ID);
926 }
927 EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture_is_available);
928 
929 int qcom_scm_set_gpu_smmu_aperture(unsigned int context_bank)
930 {
931 	struct qcom_scm_desc desc = {
932 		.svc = QCOM_SCM_SVC_MP,
933 		.cmd = QCOM_SCM_MP_CP_SMMU_APERTURE_ID,
934 		.arginfo = QCOM_SCM_ARGS(4),
935 		.args[0] = 0xffff0000 | FIELD_PREP(QCOM_SCM_CP_APERTURE_CONTEXT_MASK, context_bank),
936 		.args[1] = 0xffffffff,
937 		.args[2] = 0xffffffff,
938 		.args[3] = 0xffffffff,
939 		.owner = ARM_SMCCC_OWNER_SIP
940 	};
941 
942 	return qcom_scm_call(__scm->dev, &desc, NULL);
943 }
944 EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture);
945 
946 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
947 {
948 	struct qcom_scm_desc desc = {
949 		.svc = QCOM_SCM_SVC_MP,
950 		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
951 		.arginfo = QCOM_SCM_ARGS(1),
952 		.args[0] = spare,
953 		.owner = ARM_SMCCC_OWNER_SIP,
954 	};
955 	struct qcom_scm_res res;
956 	int ret;
957 
958 	ret = qcom_scm_call(__scm->dev, &desc, &res);
959 
960 	if (size)
961 		*size = res.result[0];
962 
963 	return ret ? : res.result[1];
964 }
965 EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_size);
966 
967 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
968 {
969 	struct qcom_scm_desc desc = {
970 		.svc = QCOM_SCM_SVC_MP,
971 		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
972 		.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
973 					 QCOM_SCM_VAL),
974 		.args[0] = addr,
975 		.args[1] = size,
976 		.args[2] = spare,
977 		.owner = ARM_SMCCC_OWNER_SIP,
978 	};
979 	int ret;
980 
981 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
982 
983 	/* the pg table has been initialized already, ignore the error */
984 	if (ret == -EPERM)
985 		ret = 0;
986 
987 	return ret;
988 }
989 EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_init);
990 
991 int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size)
992 {
993 	struct qcom_scm_desc desc = {
994 		.svc = QCOM_SCM_SVC_MP,
995 		.cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE,
996 		.arginfo = QCOM_SCM_ARGS(2),
997 		.args[0] = size,
998 		.args[1] = spare,
999 		.owner = ARM_SMCCC_OWNER_SIP,
1000 	};
1001 
1002 	return qcom_scm_call(__scm->dev, &desc, NULL);
1003 }
1004 EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_cp_pool_size);
1005 
1006 int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
1007 				   u32 cp_nonpixel_start,
1008 				   u32 cp_nonpixel_size)
1009 {
1010 	int ret;
1011 	struct qcom_scm_desc desc = {
1012 		.svc = QCOM_SCM_SVC_MP,
1013 		.cmd = QCOM_SCM_MP_VIDEO_VAR,
1014 		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
1015 					 QCOM_SCM_VAL, QCOM_SCM_VAL),
1016 		.args[0] = cp_start,
1017 		.args[1] = cp_size,
1018 		.args[2] = cp_nonpixel_start,
1019 		.args[3] = cp_nonpixel_size,
1020 		.owner = ARM_SMCCC_OWNER_SIP,
1021 	};
1022 	struct qcom_scm_res res;
1023 
1024 	ret = qcom_scm_call(__scm->dev, &desc, &res);
1025 
1026 	return ret ? : res.result[0];
1027 }
1028 EXPORT_SYMBOL_GPL(qcom_scm_mem_protect_video_var);
1029 
1030 static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
1031 				 size_t mem_sz, phys_addr_t src, size_t src_sz,
1032 				 phys_addr_t dest, size_t dest_sz)
1033 {
1034 	int ret;
1035 	struct qcom_scm_desc desc = {
1036 		.svc = QCOM_SCM_SVC_MP,
1037 		.cmd = QCOM_SCM_MP_ASSIGN,
1038 		.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
1039 					 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
1040 					 QCOM_SCM_VAL, QCOM_SCM_VAL),
1041 		.args[0] = mem_region,
1042 		.args[1] = mem_sz,
1043 		.args[2] = src,
1044 		.args[3] = src_sz,
1045 		.args[4] = dest,
1046 		.args[5] = dest_sz,
1047 		.args[6] = 0,
1048 		.owner = ARM_SMCCC_OWNER_SIP,
1049 	};
1050 	struct qcom_scm_res res;
1051 
1052 	ret = qcom_scm_call(dev, &desc, &res);
1053 
1054 	return ret ? : res.result[0];
1055 }
1056 
1057 /**
1058  * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
1059  * @mem_addr: mem region whose ownership need to be reassigned
1060  * @mem_sz:   size of the region.
1061  * @srcvm:    vmid for current set of owners, each set bit in
1062  *            flag indicate a unique owner
1063  * @newvm:    array having new owners and corresponding permission
1064  *            flags
1065  * @dest_cnt: number of owners in next set.
1066  *
1067  * Return negative errno on failure or 0 on success with @srcvm updated.
1068  */
1069 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
1070 			u64 *srcvm,
1071 			const struct qcom_scm_vmperm *newvm,
1072 			unsigned int dest_cnt)
1073 {
1074 	struct qcom_scm_current_perm_info *destvm;
1075 	struct qcom_scm_mem_map_info *mem_to_map;
1076 	phys_addr_t mem_to_map_phys;
1077 	phys_addr_t dest_phys;
1078 	phys_addr_t ptr_phys;
1079 	size_t mem_to_map_sz;
1080 	size_t dest_sz;
1081 	size_t src_sz;
1082 	size_t ptr_sz;
1083 	int next_vm;
1084 	__le32 *src;
1085 	int ret, i, b;
1086 	u64 srcvm_bits = *srcvm;
1087 
1088 	src_sz = hweight64(srcvm_bits) * sizeof(*src);
1089 	mem_to_map_sz = sizeof(*mem_to_map);
1090 	dest_sz = dest_cnt * sizeof(*destvm);
1091 	ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
1092 			ALIGN(dest_sz, SZ_64);
1093 
1094 	void *ptr __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1095 							ptr_sz, GFP_KERNEL);
1096 	if (!ptr)
1097 		return -ENOMEM;
1098 
1099 	ptr_phys = qcom_tzmem_to_phys(ptr);
1100 
1101 	/* Fill source vmid detail */
1102 	src = ptr;
1103 	i = 0;
1104 	for (b = 0; b < BITS_PER_TYPE(u64); b++) {
1105 		if (srcvm_bits & BIT(b))
1106 			src[i++] = cpu_to_le32(b);
1107 	}
1108 
1109 	/* Fill details of mem buff to map */
1110 	mem_to_map = ptr + ALIGN(src_sz, SZ_64);
1111 	mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
1112 	mem_to_map->mem_addr = cpu_to_le64(mem_addr);
1113 	mem_to_map->mem_size = cpu_to_le64(mem_sz);
1114 
1115 	next_vm = 0;
1116 	/* Fill details of next vmid detail */
1117 	destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
1118 	dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
1119 	for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
1120 		destvm->vmid = cpu_to_le32(newvm->vmid);
1121 		destvm->perm = cpu_to_le32(newvm->perm);
1122 		destvm->ctx = 0;
1123 		destvm->ctx_size = 0;
1124 		next_vm |= BIT(newvm->vmid);
1125 	}
1126 
1127 	ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
1128 				    ptr_phys, src_sz, dest_phys, dest_sz);
1129 	if (ret) {
1130 		dev_err(__scm->dev,
1131 			"Assign memory protection call failed %d\n", ret);
1132 		return ret;
1133 	}
1134 
1135 	*srcvm = next_vm;
1136 	return 0;
1137 }
1138 EXPORT_SYMBOL_GPL(qcom_scm_assign_mem);
1139 
1140 /**
1141  * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
1142  */
1143 bool qcom_scm_ocmem_lock_available(void)
1144 {
1145 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
1146 					    QCOM_SCM_OCMEM_LOCK_CMD);
1147 }
1148 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock_available);
1149 
1150 /**
1151  * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
1152  * region to the specified initiator
1153  *
1154  * @id:     tz initiator id
1155  * @offset: OCMEM offset
1156  * @size:   OCMEM size
1157  * @mode:   access mode (WIDE/NARROW)
1158  */
1159 int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
1160 			u32 mode)
1161 {
1162 	struct qcom_scm_desc desc = {
1163 		.svc = QCOM_SCM_SVC_OCMEM,
1164 		.cmd = QCOM_SCM_OCMEM_LOCK_CMD,
1165 		.args[0] = id,
1166 		.args[1] = offset,
1167 		.args[2] = size,
1168 		.args[3] = mode,
1169 		.arginfo = QCOM_SCM_ARGS(4),
1170 	};
1171 
1172 	return qcom_scm_call(__scm->dev, &desc, NULL);
1173 }
1174 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock);
1175 
1176 /**
1177  * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
1178  * region from the specified initiator
1179  *
1180  * @id:     tz initiator id
1181  * @offset: OCMEM offset
1182  * @size:   OCMEM size
1183  */
1184 int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
1185 {
1186 	struct qcom_scm_desc desc = {
1187 		.svc = QCOM_SCM_SVC_OCMEM,
1188 		.cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
1189 		.args[0] = id,
1190 		.args[1] = offset,
1191 		.args[2] = size,
1192 		.arginfo = QCOM_SCM_ARGS(3),
1193 	};
1194 
1195 	return qcom_scm_call(__scm->dev, &desc, NULL);
1196 }
1197 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_unlock);
1198 
1199 /**
1200  * qcom_scm_ice_available() - Is the ICE key programming interface available?
1201  *
1202  * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
1203  *	   qcom_scm_ice_set_key() are available.
1204  */
1205 bool qcom_scm_ice_available(void)
1206 {
1207 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1208 					    QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
1209 		__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1210 					     QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
1211 }
1212 EXPORT_SYMBOL_GPL(qcom_scm_ice_available);
1213 
1214 /**
1215  * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
1216  * @index: the keyslot to invalidate
1217  *
1218  * The UFSHCI and eMMC standards define a standard way to do this, but it
1219  * doesn't work on these SoCs; only this SCM call does.
1220  *
1221  * It is assumed that the SoC has only one ICE instance being used, as this SCM
1222  * call doesn't specify which ICE instance the keyslot belongs to.
1223  *
1224  * Return: 0 on success; -errno on failure.
1225  */
1226 int qcom_scm_ice_invalidate_key(u32 index)
1227 {
1228 	struct qcom_scm_desc desc = {
1229 		.svc = QCOM_SCM_SVC_ES,
1230 		.cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
1231 		.arginfo = QCOM_SCM_ARGS(1),
1232 		.args[0] = index,
1233 		.owner = ARM_SMCCC_OWNER_SIP,
1234 	};
1235 
1236 	return qcom_scm_call(__scm->dev, &desc, NULL);
1237 }
1238 EXPORT_SYMBOL_GPL(qcom_scm_ice_invalidate_key);
1239 
1240 /**
1241  * qcom_scm_ice_set_key() - Set an inline encryption key
1242  * @index: the keyslot into which to set the key
1243  * @key: the key to program
1244  * @key_size: the size of the key in bytes
1245  * @cipher: the encryption algorithm the key is for
1246  * @data_unit_size: the encryption data unit size, i.e. the size of each
1247  *		    individual plaintext and ciphertext.  Given in 512-byte
1248  *		    units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
1249  *
1250  * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
1251  * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline.
1252  *
1253  * The UFSHCI and eMMC standards define a standard way to do this, but it
1254  * doesn't work on these SoCs; only this SCM call does.
1255  *
1256  * It is assumed that the SoC has only one ICE instance being used, as this SCM
1257  * call doesn't specify which ICE instance the keyslot belongs to.
1258  *
1259  * Return: 0 on success; -errno on failure.
1260  */
1261 int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
1262 			 enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
1263 {
1264 	struct qcom_scm_desc desc = {
1265 		.svc = QCOM_SCM_SVC_ES,
1266 		.cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
1267 		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
1268 					 QCOM_SCM_VAL, QCOM_SCM_VAL,
1269 					 QCOM_SCM_VAL),
1270 		.args[0] = index,
1271 		.args[2] = key_size,
1272 		.args[3] = cipher,
1273 		.args[4] = data_unit_size,
1274 		.owner = ARM_SMCCC_OWNER_SIP,
1275 	};
1276 
1277 	int ret;
1278 
1279 	void *keybuf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1280 							   key_size,
1281 							   GFP_KERNEL);
1282 	if (!keybuf)
1283 		return -ENOMEM;
1284 	memcpy(keybuf, key, key_size);
1285 	desc.args[1] = qcom_tzmem_to_phys(keybuf);
1286 
1287 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1288 
1289 	memzero_explicit(keybuf, key_size);
1290 
1291 	return ret;
1292 }
1293 EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key);
1294 
1295 bool qcom_scm_has_wrapped_key_support(void)
1296 {
1297 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1298 					    QCOM_SCM_ES_DERIVE_SW_SECRET) &&
1299 	       __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1300 					    QCOM_SCM_ES_GENERATE_ICE_KEY) &&
1301 	       __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1302 					    QCOM_SCM_ES_PREPARE_ICE_KEY) &&
1303 	       __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1304 					    QCOM_SCM_ES_IMPORT_ICE_KEY);
1305 }
1306 EXPORT_SYMBOL_GPL(qcom_scm_has_wrapped_key_support);
1307 
1308 /**
1309  * qcom_scm_derive_sw_secret() - Derive software secret from wrapped key
1310  * @eph_key: an ephemerally-wrapped key
1311  * @eph_key_size: size of @eph_key in bytes
1312  * @sw_secret: output buffer for the software secret
1313  * @sw_secret_size: size of the software secret to derive in bytes
1314  *
1315  * Derive a software secret from an ephemerally-wrapped key for software crypto
1316  * operations.  This is done by calling into the secure execution environment,
1317  * which then calls into the hardware to unwrap and derive the secret.
1318  *
1319  * For more information on sw_secret, see the "Hardware-wrapped keys" section of
1320  * Documentation/block/inline-encryption.rst.
1321  *
1322  * Return: 0 on success; -errno on failure.
1323  */
1324 int qcom_scm_derive_sw_secret(const u8 *eph_key, size_t eph_key_size,
1325 			      u8 *sw_secret, size_t sw_secret_size)
1326 {
1327 	struct qcom_scm_desc desc = {
1328 		.svc = QCOM_SCM_SVC_ES,
1329 		.cmd = QCOM_SCM_ES_DERIVE_SW_SECRET,
1330 		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RW, QCOM_SCM_VAL,
1331 					 QCOM_SCM_RW, QCOM_SCM_VAL),
1332 		.owner = ARM_SMCCC_OWNER_SIP,
1333 	};
1334 	int ret;
1335 
1336 	void *eph_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1337 								eph_key_size,
1338 								GFP_KERNEL);
1339 	if (!eph_key_buf)
1340 		return -ENOMEM;
1341 
1342 	void *sw_secret_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1343 								  sw_secret_size,
1344 								  GFP_KERNEL);
1345 	if (!sw_secret_buf)
1346 		return -ENOMEM;
1347 
1348 	memcpy(eph_key_buf, eph_key, eph_key_size);
1349 	desc.args[0] = qcom_tzmem_to_phys(eph_key_buf);
1350 	desc.args[1] = eph_key_size;
1351 	desc.args[2] = qcom_tzmem_to_phys(sw_secret_buf);
1352 	desc.args[3] = sw_secret_size;
1353 
1354 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1355 	if (!ret)
1356 		memcpy(sw_secret, sw_secret_buf, sw_secret_size);
1357 
1358 	memzero_explicit(eph_key_buf, eph_key_size);
1359 	memzero_explicit(sw_secret_buf, sw_secret_size);
1360 	return ret;
1361 }
1362 EXPORT_SYMBOL_GPL(qcom_scm_derive_sw_secret);
1363 
1364 /**
1365  * qcom_scm_generate_ice_key() - Generate a wrapped key for storage encryption
1366  * @lt_key: output buffer for the long-term wrapped key
1367  * @lt_key_size: size of @lt_key in bytes.  Must be the exact wrapped key size
1368  *		 used by the SoC.
1369  *
1370  * Generate a key using the built-in HW module in the SoC.  The resulting key is
1371  * returned wrapped with the platform-specific Key Encryption Key.
1372  *
1373  * Return: 0 on success; -errno on failure.
1374  */
1375 int qcom_scm_generate_ice_key(u8 *lt_key, size_t lt_key_size)
1376 {
1377 	struct qcom_scm_desc desc = {
1378 		.svc = QCOM_SCM_SVC_ES,
1379 		.cmd =  QCOM_SCM_ES_GENERATE_ICE_KEY,
1380 		.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL),
1381 		.owner = ARM_SMCCC_OWNER_SIP,
1382 	};
1383 	int ret;
1384 
1385 	void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1386 							       lt_key_size,
1387 							       GFP_KERNEL);
1388 	if (!lt_key_buf)
1389 		return -ENOMEM;
1390 
1391 	desc.args[0] = qcom_tzmem_to_phys(lt_key_buf);
1392 	desc.args[1] = lt_key_size;
1393 
1394 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1395 	if (!ret)
1396 		memcpy(lt_key, lt_key_buf, lt_key_size);
1397 
1398 	memzero_explicit(lt_key_buf, lt_key_size);
1399 	return ret;
1400 }
1401 EXPORT_SYMBOL_GPL(qcom_scm_generate_ice_key);
1402 
1403 /**
1404  * qcom_scm_prepare_ice_key() - Re-wrap a key with the per-boot ephemeral key
1405  * @lt_key: a long-term wrapped key
1406  * @lt_key_size: size of @lt_key in bytes
1407  * @eph_key: output buffer for the ephemerally-wrapped key
1408  * @eph_key_size: size of @eph_key in bytes.  Must be the exact wrapped key size
1409  *		  used by the SoC.
1410  *
1411  * Given a long-term wrapped key, re-wrap it with the per-boot ephemeral key for
1412  * added protection.  The resulting key will only be valid for the current boot.
1413  *
1414  * Return: 0 on success; -errno on failure.
1415  */
1416 int qcom_scm_prepare_ice_key(const u8 *lt_key, size_t lt_key_size,
1417 			     u8 *eph_key, size_t eph_key_size)
1418 {
1419 	struct qcom_scm_desc desc = {
1420 		.svc = QCOM_SCM_SVC_ES,
1421 		.cmd =  QCOM_SCM_ES_PREPARE_ICE_KEY,
1422 		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL,
1423 					 QCOM_SCM_RW, QCOM_SCM_VAL),
1424 		.owner = ARM_SMCCC_OWNER_SIP,
1425 	};
1426 	int ret;
1427 
1428 	void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1429 							       lt_key_size,
1430 							       GFP_KERNEL);
1431 	if (!lt_key_buf)
1432 		return -ENOMEM;
1433 
1434 	void *eph_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1435 								eph_key_size,
1436 								GFP_KERNEL);
1437 	if (!eph_key_buf)
1438 		return -ENOMEM;
1439 
1440 	memcpy(lt_key_buf, lt_key, lt_key_size);
1441 	desc.args[0] = qcom_tzmem_to_phys(lt_key_buf);
1442 	desc.args[1] = lt_key_size;
1443 	desc.args[2] = qcom_tzmem_to_phys(eph_key_buf);
1444 	desc.args[3] = eph_key_size;
1445 
1446 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1447 	if (!ret)
1448 		memcpy(eph_key, eph_key_buf, eph_key_size);
1449 
1450 	memzero_explicit(lt_key_buf, lt_key_size);
1451 	memzero_explicit(eph_key_buf, eph_key_size);
1452 	return ret;
1453 }
1454 EXPORT_SYMBOL_GPL(qcom_scm_prepare_ice_key);
1455 
1456 /**
1457  * qcom_scm_import_ice_key() - Import key for storage encryption
1458  * @raw_key: the raw key to import
1459  * @raw_key_size: size of @raw_key in bytes
1460  * @lt_key: output buffer for the long-term wrapped key
1461  * @lt_key_size: size of @lt_key in bytes.  Must be the exact wrapped key size
1462  *		 used by the SoC.
1463  *
1464  * Import a raw key and return a long-term wrapped key.  Uses the SoC's HWKM to
1465  * wrap the raw key using the platform-specific Key Encryption Key.
1466  *
1467  * Return: 0 on success; -errno on failure.
1468  */
1469 int qcom_scm_import_ice_key(const u8 *raw_key, size_t raw_key_size,
1470 			    u8 *lt_key, size_t lt_key_size)
1471 {
1472 	struct qcom_scm_desc desc = {
1473 		.svc = QCOM_SCM_SVC_ES,
1474 		.cmd =  QCOM_SCM_ES_IMPORT_ICE_KEY,
1475 		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL,
1476 					 QCOM_SCM_RW, QCOM_SCM_VAL),
1477 		.owner = ARM_SMCCC_OWNER_SIP,
1478 	};
1479 	int ret;
1480 
1481 	void *raw_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1482 								raw_key_size,
1483 								GFP_KERNEL);
1484 	if (!raw_key_buf)
1485 		return -ENOMEM;
1486 
1487 	void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1488 							       lt_key_size,
1489 							       GFP_KERNEL);
1490 	if (!lt_key_buf)
1491 		return -ENOMEM;
1492 
1493 	memcpy(raw_key_buf, raw_key, raw_key_size);
1494 	desc.args[0] = qcom_tzmem_to_phys(raw_key_buf);
1495 	desc.args[1] = raw_key_size;
1496 	desc.args[2] = qcom_tzmem_to_phys(lt_key_buf);
1497 	desc.args[3] = lt_key_size;
1498 
1499 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1500 	if (!ret)
1501 		memcpy(lt_key, lt_key_buf, lt_key_size);
1502 
1503 	memzero_explicit(raw_key_buf, raw_key_size);
1504 	memzero_explicit(lt_key_buf, lt_key_size);
1505 	return ret;
1506 }
1507 EXPORT_SYMBOL_GPL(qcom_scm_import_ice_key);
1508 
1509 /**
1510  * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
1511  *
1512  * Return true if HDCP is supported, false if not.
1513  */
1514 bool qcom_scm_hdcp_available(void)
1515 {
1516 	bool avail;
1517 	int ret = qcom_scm_clk_enable();
1518 
1519 	if (ret)
1520 		return ret;
1521 
1522 	avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
1523 						QCOM_SCM_HDCP_INVOKE);
1524 
1525 	qcom_scm_clk_disable();
1526 
1527 	return avail;
1528 }
1529 EXPORT_SYMBOL_GPL(qcom_scm_hdcp_available);
1530 
1531 /**
1532  * qcom_scm_hdcp_req() - Send HDCP request.
1533  * @req: HDCP request array
1534  * @req_cnt: HDCP request array count
1535  * @resp: response buffer passed to SCM
1536  *
1537  * Write HDCP register(s) through SCM.
1538  */
1539 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
1540 {
1541 	int ret;
1542 	struct qcom_scm_desc desc = {
1543 		.svc = QCOM_SCM_SVC_HDCP,
1544 		.cmd = QCOM_SCM_HDCP_INVOKE,
1545 		.arginfo = QCOM_SCM_ARGS(10),
1546 		.args = {
1547 			req[0].addr,
1548 			req[0].val,
1549 			req[1].addr,
1550 			req[1].val,
1551 			req[2].addr,
1552 			req[2].val,
1553 			req[3].addr,
1554 			req[3].val,
1555 			req[4].addr,
1556 			req[4].val
1557 		},
1558 		.owner = ARM_SMCCC_OWNER_SIP,
1559 	};
1560 	struct qcom_scm_res res;
1561 
1562 	if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
1563 		return -ERANGE;
1564 
1565 	ret = qcom_scm_clk_enable();
1566 	if (ret)
1567 		return ret;
1568 
1569 	ret = qcom_scm_call(__scm->dev, &desc, &res);
1570 	*resp = res.result[0];
1571 
1572 	qcom_scm_clk_disable();
1573 
1574 	return ret;
1575 }
1576 EXPORT_SYMBOL_GPL(qcom_scm_hdcp_req);
1577 
1578 int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt)
1579 {
1580 	struct qcom_scm_desc desc = {
1581 		.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1582 		.cmd = QCOM_SCM_SMMU_PT_FORMAT,
1583 		.arginfo = QCOM_SCM_ARGS(3),
1584 		.args[0] = sec_id,
1585 		.args[1] = ctx_num,
1586 		.args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */
1587 		.owner = ARM_SMCCC_OWNER_SIP,
1588 	};
1589 
1590 	return qcom_scm_call(__scm->dev, &desc, NULL);
1591 }
1592 EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_pt_format);
1593 
1594 int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
1595 {
1596 	struct qcom_scm_desc desc = {
1597 		.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1598 		.cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
1599 		.arginfo = QCOM_SCM_ARGS(2),
1600 		.args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
1601 		.args[1] = en,
1602 		.owner = ARM_SMCCC_OWNER_SIP,
1603 	};
1604 
1605 
1606 	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
1607 }
1608 EXPORT_SYMBOL_GPL(qcom_scm_qsmmu500_wait_safe_toggle);
1609 
1610 bool qcom_scm_lmh_dcvsh_available(void)
1611 {
1612 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH);
1613 }
1614 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available);
1615 
1616 /*
1617  * This is only supposed to be called once by the TZMem module. It takes the
1618  * SCM struct device as argument and uses it to pass the call as at the time
1619  * the SHM Bridge is enabled, the SCM is not yet fully set up and doesn't
1620  * accept global user calls. Don't try to use the __scm pointer here.
1621  */
1622 int qcom_scm_shm_bridge_enable(struct device *scm_dev)
1623 {
1624 	int ret;
1625 
1626 	struct qcom_scm_desc desc = {
1627 		.svc = QCOM_SCM_SVC_MP,
1628 		.cmd = QCOM_SCM_MP_SHM_BRIDGE_ENABLE,
1629 		.owner = ARM_SMCCC_OWNER_SIP
1630 	};
1631 
1632 	struct qcom_scm_res res;
1633 
1634 	if (!__qcom_scm_is_call_available(scm_dev, QCOM_SCM_SVC_MP,
1635 					  QCOM_SCM_MP_SHM_BRIDGE_ENABLE))
1636 		return -EOPNOTSUPP;
1637 
1638 	ret = qcom_scm_call(scm_dev, &desc, &res);
1639 
1640 	if (ret)
1641 		return ret;
1642 
1643 	if (res.result[0] == SHMBRIDGE_RESULT_NOTSUPP)
1644 		return -EOPNOTSUPP;
1645 
1646 	return res.result[0];
1647 }
1648 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_enable);
1649 
1650 int qcom_scm_shm_bridge_create(u64 pfn_and_ns_perm_flags,
1651 			       u64 ipfn_and_s_perm_flags, u64 size_and_flags,
1652 			       u64 ns_vmids, u64 *handle)
1653 {
1654 	struct qcom_scm_desc desc = {
1655 		.svc = QCOM_SCM_SVC_MP,
1656 		.cmd = QCOM_SCM_MP_SHM_BRIDGE_CREATE,
1657 		.owner = ARM_SMCCC_OWNER_SIP,
1658 		.args[0] = pfn_and_ns_perm_flags,
1659 		.args[1] = ipfn_and_s_perm_flags,
1660 		.args[2] = size_and_flags,
1661 		.args[3] = ns_vmids,
1662 		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
1663 					 QCOM_SCM_VAL, QCOM_SCM_VAL),
1664 	};
1665 
1666 	struct qcom_scm_res res;
1667 	int ret;
1668 
1669 	ret = qcom_scm_call(__scm->dev, &desc, &res);
1670 
1671 	if (handle && !ret)
1672 		*handle = res.result[1];
1673 
1674 	return ret ?: res.result[0];
1675 }
1676 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_create);
1677 
1678 int qcom_scm_shm_bridge_delete(u64 handle)
1679 {
1680 	struct qcom_scm_desc desc = {
1681 		.svc = QCOM_SCM_SVC_MP,
1682 		.cmd = QCOM_SCM_MP_SHM_BRIDGE_DELETE,
1683 		.owner = ARM_SMCCC_OWNER_SIP,
1684 		.args[0] = handle,
1685 		.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
1686 	};
1687 
1688 	return qcom_scm_call(__scm->dev, &desc, NULL);
1689 }
1690 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_delete);
1691 
1692 int qcom_scm_lmh_profile_change(u32 profile_id)
1693 {
1694 	struct qcom_scm_desc desc = {
1695 		.svc = QCOM_SCM_SVC_LMH,
1696 		.cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE,
1697 		.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
1698 		.args[0] = profile_id,
1699 		.owner = ARM_SMCCC_OWNER_SIP,
1700 	};
1701 
1702 	return qcom_scm_call(__scm->dev, &desc, NULL);
1703 }
1704 EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change);
1705 
1706 int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
1707 		       u64 limit_node, u32 node_id, u64 version)
1708 {
1709 	int ret, payload_size = 5 * sizeof(u32);
1710 
1711 	struct qcom_scm_desc desc = {
1712 		.svc = QCOM_SCM_SVC_LMH,
1713 		.cmd = QCOM_SCM_LMH_LIMIT_DCVSH,
1714 		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL,
1715 					QCOM_SCM_VAL, QCOM_SCM_VAL),
1716 		.args[1] = payload_size,
1717 		.args[2] = limit_node,
1718 		.args[3] = node_id,
1719 		.args[4] = version,
1720 		.owner = ARM_SMCCC_OWNER_SIP,
1721 	};
1722 
1723 	u32 *payload_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1724 							       payload_size,
1725 							       GFP_KERNEL);
1726 	if (!payload_buf)
1727 		return -ENOMEM;
1728 
1729 	payload_buf[0] = payload_fn;
1730 	payload_buf[1] = 0;
1731 	payload_buf[2] = payload_reg;
1732 	payload_buf[3] = 1;
1733 	payload_buf[4] = payload_val;
1734 
1735 	desc.args[0] = qcom_tzmem_to_phys(payload_buf);
1736 
1737 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1738 
1739 	return ret;
1740 }
1741 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh);
1742 
1743 int qcom_scm_gpu_init_regs(u32 gpu_req)
1744 {
1745 	struct qcom_scm_desc desc = {
1746 		.svc = QCOM_SCM_SVC_GPU,
1747 		.cmd = QCOM_SCM_SVC_GPU_INIT_REGS,
1748 		.arginfo = QCOM_SCM_ARGS(1),
1749 		.args[0] = gpu_req,
1750 		.owner = ARM_SMCCC_OWNER_SIP,
1751 	};
1752 
1753 	return qcom_scm_call(__scm->dev, &desc, NULL);
1754 }
1755 EXPORT_SYMBOL_GPL(qcom_scm_gpu_init_regs);
1756 
1757 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
1758 {
1759 	struct device_node *tcsr;
1760 	struct device_node *np = dev->of_node;
1761 	struct resource res;
1762 	u32 offset;
1763 	int ret;
1764 
1765 	tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
1766 	if (!tcsr)
1767 		return 0;
1768 
1769 	ret = of_address_to_resource(tcsr, 0, &res);
1770 	of_node_put(tcsr);
1771 	if (ret)
1772 		return ret;
1773 
1774 	ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
1775 	if (ret < 0)
1776 		return ret;
1777 
1778 	*addr = res.start + offset;
1779 
1780 	return 0;
1781 }
1782 
1783 #ifdef CONFIG_QCOM_QSEECOM
1784 
1785 /* Lock for QSEECOM SCM call executions */
1786 static DEFINE_MUTEX(qcom_scm_qseecom_call_lock);
1787 
1788 static int __qcom_scm_qseecom_call(const struct qcom_scm_desc *desc,
1789 				   struct qcom_scm_qseecom_resp *res)
1790 {
1791 	struct qcom_scm_res scm_res = {};
1792 	int status;
1793 
1794 	/*
1795 	 * QSEECOM SCM calls should not be executed concurrently. Therefore, we
1796 	 * require the respective call lock to be held.
1797 	 */
1798 	lockdep_assert_held(&qcom_scm_qseecom_call_lock);
1799 
1800 	status = qcom_scm_call(__scm->dev, desc, &scm_res);
1801 
1802 	res->result = scm_res.result[0];
1803 	res->resp_type = scm_res.result[1];
1804 	res->data = scm_res.result[2];
1805 
1806 	if (status)
1807 		return status;
1808 
1809 	return 0;
1810 }
1811 
1812 /**
1813  * qcom_scm_qseecom_call() - Perform a QSEECOM SCM call.
1814  * @desc: SCM call descriptor.
1815  * @res:  SCM call response (output).
1816  *
1817  * Performs the QSEECOM SCM call described by @desc, returning the response in
1818  * @rsp.
1819  *
1820  * Return: Zero on success, nonzero on failure.
1821  */
1822 static int qcom_scm_qseecom_call(const struct qcom_scm_desc *desc,
1823 				 struct qcom_scm_qseecom_resp *res)
1824 {
1825 	int status;
1826 
1827 	/*
1828 	 * Note: Multiple QSEECOM SCM calls should not be executed same time,
1829 	 * so lock things here. This needs to be extended to callback/listener
1830 	 * handling when support for that is implemented.
1831 	 */
1832 
1833 	mutex_lock(&qcom_scm_qseecom_call_lock);
1834 	status = __qcom_scm_qseecom_call(desc, res);
1835 	mutex_unlock(&qcom_scm_qseecom_call_lock);
1836 
1837 	dev_dbg(__scm->dev, "%s: owner=%x, svc=%x, cmd=%x, result=%lld, type=%llx, data=%llx\n",
1838 		__func__, desc->owner, desc->svc, desc->cmd, res->result,
1839 		res->resp_type, res->data);
1840 
1841 	if (status) {
1842 		dev_err(__scm->dev, "qseecom: scm call failed with error %d\n", status);
1843 		return status;
1844 	}
1845 
1846 	/*
1847 	 * TODO: Handle incomplete and blocked calls:
1848 	 *
1849 	 * Incomplete and blocked calls are not supported yet. Some devices
1850 	 * and/or commands require those, some don't. Let's warn about them
1851 	 * prominently in case someone attempts to try these commands with a
1852 	 * device/command combination that isn't supported yet.
1853 	 */
1854 	WARN_ON(res->result == QSEECOM_RESULT_INCOMPLETE);
1855 	WARN_ON(res->result == QSEECOM_RESULT_BLOCKED_ON_LISTENER);
1856 
1857 	return 0;
1858 }
1859 
1860 /**
1861  * qcom_scm_qseecom_get_version() - Query the QSEECOM version.
1862  * @version: Pointer where the QSEECOM version will be stored.
1863  *
1864  * Performs the QSEECOM SCM querying the QSEECOM version currently running in
1865  * the TrustZone.
1866  *
1867  * Return: Zero on success, nonzero on failure.
1868  */
1869 static int qcom_scm_qseecom_get_version(u32 *version)
1870 {
1871 	struct qcom_scm_desc desc = {};
1872 	struct qcom_scm_qseecom_resp res = {};
1873 	u32 feature = 10;
1874 	int ret;
1875 
1876 	desc.owner = QSEECOM_TZ_OWNER_SIP;
1877 	desc.svc = QSEECOM_TZ_SVC_INFO;
1878 	desc.cmd = QSEECOM_TZ_CMD_INFO_VERSION;
1879 	desc.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL);
1880 	desc.args[0] = feature;
1881 
1882 	ret = qcom_scm_qseecom_call(&desc, &res);
1883 	if (ret)
1884 		return ret;
1885 
1886 	*version = res.result;
1887 	return 0;
1888 }
1889 
1890 /**
1891  * qcom_scm_qseecom_app_get_id() - Query the app ID for a given QSEE app name.
1892  * @app_name: The name of the app.
1893  * @app_id:   The returned app ID.
1894  *
1895  * Query and return the application ID of the SEE app identified by the given
1896  * name. This returned ID is the unique identifier of the app required for
1897  * subsequent communication.
1898  *
1899  * Return: Zero on success, nonzero on failure, -ENOENT if the app has not been
1900  * loaded or could not be found.
1901  */
1902 int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id)
1903 {
1904 	unsigned long name_buf_size = QSEECOM_MAX_APP_NAME_SIZE;
1905 	unsigned long app_name_len = strlen(app_name);
1906 	struct qcom_scm_desc desc = {};
1907 	struct qcom_scm_qseecom_resp res = {};
1908 	int status;
1909 
1910 	if (app_name_len >= name_buf_size)
1911 		return -EINVAL;
1912 
1913 	char *name_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1914 							     name_buf_size,
1915 							     GFP_KERNEL);
1916 	if (!name_buf)
1917 		return -ENOMEM;
1918 
1919 	memcpy(name_buf, app_name, app_name_len);
1920 
1921 	desc.owner = QSEECOM_TZ_OWNER_QSEE_OS;
1922 	desc.svc = QSEECOM_TZ_SVC_APP_MGR;
1923 	desc.cmd = QSEECOM_TZ_CMD_APP_LOOKUP;
1924 	desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL);
1925 	desc.args[0] = qcom_tzmem_to_phys(name_buf);
1926 	desc.args[1] = app_name_len;
1927 
1928 	status = qcom_scm_qseecom_call(&desc, &res);
1929 
1930 	if (status)
1931 		return status;
1932 
1933 	if (res.result == QSEECOM_RESULT_FAILURE)
1934 		return -ENOENT;
1935 
1936 	if (res.result != QSEECOM_RESULT_SUCCESS)
1937 		return -EINVAL;
1938 
1939 	if (res.resp_type != QSEECOM_SCM_RES_APP_ID)
1940 		return -EINVAL;
1941 
1942 	*app_id = res.data;
1943 	return 0;
1944 }
1945 EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id);
1946 
1947 /**
1948  * qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app.
1949  * @app_id:   The ID of the target app.
1950  * @req:      Request buffer sent to the app (must be TZ memory)
1951  * @req_size: Size of the request buffer.
1952  * @rsp:      Response buffer, written to by the app (must be TZ memory)
1953  * @rsp_size: Size of the response buffer.
1954  *
1955  * Sends a request to the QSEE app associated with the given ID and read back
1956  * its response. The caller must provide two DMA memory regions, one for the
1957  * request and one for the response, and fill out the @req region with the
1958  * respective (app-specific) request data. The QSEE app reads this and returns
1959  * its response in the @rsp region.
1960  *
1961  * Return: Zero on success, nonzero on failure.
1962  */
1963 int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size,
1964 			      void *rsp, size_t rsp_size)
1965 {
1966 	struct qcom_scm_qseecom_resp res = {};
1967 	struct qcom_scm_desc desc = {};
1968 	phys_addr_t req_phys;
1969 	phys_addr_t rsp_phys;
1970 	int status;
1971 
1972 	req_phys = qcom_tzmem_to_phys(req);
1973 	rsp_phys = qcom_tzmem_to_phys(rsp);
1974 
1975 	desc.owner = QSEECOM_TZ_OWNER_TZ_APPS;
1976 	desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER;
1977 	desc.cmd = QSEECOM_TZ_CMD_APP_SEND;
1978 	desc.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL,
1979 				     QCOM_SCM_RW, QCOM_SCM_VAL,
1980 				     QCOM_SCM_RW, QCOM_SCM_VAL);
1981 	desc.args[0] = app_id;
1982 	desc.args[1] = req_phys;
1983 	desc.args[2] = req_size;
1984 	desc.args[3] = rsp_phys;
1985 	desc.args[4] = rsp_size;
1986 
1987 	status = qcom_scm_qseecom_call(&desc, &res);
1988 
1989 	if (status)
1990 		return status;
1991 
1992 	if (res.result != QSEECOM_RESULT_SUCCESS)
1993 		return -EIO;
1994 
1995 	return 0;
1996 }
1997 EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
1998 
1999 /*
2000  * We do not yet support re-entrant calls via the qseecom interface. To prevent
2001  + any potential issues with this, only allow validated machines for now.
2002  */
2003 static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
2004 	{ .compatible = "asus,vivobook-s15" },
2005 	{ .compatible = "asus,zenbook-a14-ux3407qa" },
2006 	{ .compatible = "asus,zenbook-a14-ux3407ra" },
2007 	{ .compatible = "dell,inspiron-14-plus-7441" },
2008 	{ .compatible = "dell,latitude-7455" },
2009 	{ .compatible = "dell,xps13-9345" },
2010 	{ .compatible = "hp,elitebook-ultra-g1q" },
2011 	{ .compatible = "hp,omnibook-x14" },
2012 	{ .compatible = "huawei,gaokun3" },
2013 	{ .compatible = "lenovo,flex-5g" },
2014 	{ .compatible = "lenovo,thinkbook-16" },
2015 	{ .compatible = "lenovo,thinkpad-t14s" },
2016 	{ .compatible = "lenovo,thinkpad-x13s", },
2017 	{ .compatible = "lenovo,yoga-slim7x" },
2018 	{ .compatible = "microsoft,arcata", },
2019 	{ .compatible = "microsoft,blackrock" },
2020 	{ .compatible = "microsoft,denali", },
2021 	{ .compatible = "microsoft,romulus13", },
2022 	{ .compatible = "microsoft,romulus15", },
2023 	{ .compatible = "qcom,hamoa-iot-evk" },
2024 	{ .compatible = "qcom,sc8180x-primus" },
2025 	{ .compatible = "qcom,x1e001de-devkit" },
2026 	{ .compatible = "qcom,x1e80100-crd" },
2027 	{ .compatible = "qcom,x1e80100-qcp" },
2028 	{ .compatible = "qcom,x1p42100-crd" },
2029 	{ }
2030 };
2031 
2032 static void qcom_scm_qseecom_free(void *data)
2033 {
2034 	struct platform_device *qseecom_dev = data;
2035 
2036 	platform_device_del(qseecom_dev);
2037 	platform_device_put(qseecom_dev);
2038 }
2039 
2040 static int qcom_scm_qseecom_init(struct qcom_scm *scm)
2041 {
2042 	struct platform_device *qseecom_dev;
2043 	u32 version;
2044 	int ret;
2045 
2046 	/*
2047 	 * Note: We do two steps of validation here: First, we try to query the
2048 	 * QSEECOM version as a check to see if the interface exists on this
2049 	 * device. Second, we check against known good devices due to current
2050 	 * driver limitations (see comment in qcom_scm_qseecom_allowlist).
2051 	 *
2052 	 * Note that we deliberately do the machine check after the version
2053 	 * check so that we can log potentially supported devices. This should
2054 	 * be safe as downstream sources indicate that the version query is
2055 	 * neither blocking nor reentrant.
2056 	 */
2057 	ret = qcom_scm_qseecom_get_version(&version);
2058 	if (ret)
2059 		return 0;
2060 
2061 	dev_info(scm->dev, "qseecom: found qseecom with version 0x%x\n", version);
2062 
2063 	if (!of_machine_device_match(qcom_scm_qseecom_allowlist)) {
2064 		dev_info(scm->dev, "qseecom: untested machine, skipping\n");
2065 		return 0;
2066 	}
2067 
2068 	/*
2069 	 * Set up QSEECOM interface device. All application clients will be
2070 	 * set up and managed by the corresponding driver for it.
2071 	 */
2072 	qseecom_dev = platform_device_alloc("qcom_qseecom", -1);
2073 	if (!qseecom_dev)
2074 		return -ENOMEM;
2075 
2076 	qseecom_dev->dev.parent = scm->dev;
2077 
2078 	ret = platform_device_add(qseecom_dev);
2079 	if (ret) {
2080 		platform_device_put(qseecom_dev);
2081 		return ret;
2082 	}
2083 
2084 	return devm_add_action_or_reset(scm->dev, qcom_scm_qseecom_free, qseecom_dev);
2085 }
2086 
2087 #else /* CONFIG_QCOM_QSEECOM */
2088 
2089 static int qcom_scm_qseecom_init(struct qcom_scm *scm)
2090 {
2091 	return 0;
2092 }
2093 
2094 #endif /* CONFIG_QCOM_QSEECOM */
2095 
2096 /**
2097  * qcom_scm_qtee_invoke_smc() - Invoke a QTEE object.
2098  * @inbuf: start address of memory area used for inbound buffer.
2099  * @inbuf_size: size of the memory area used for inbound buffer.
2100  * @outbuf: start address of memory area used for outbound buffer.
2101  * @outbuf_size: size of the memory area used for outbound buffer.
2102  * @result: result of QTEE object invocation.
2103  * @response_type: response type returned by QTEE.
2104  *
2105  * @response_type determines how the contents of @inbuf and @outbuf
2106  * should be processed.
2107  *
2108  * Return: On success, return 0 or <0 on failure.
2109  */
2110 int qcom_scm_qtee_invoke_smc(phys_addr_t inbuf, size_t inbuf_size,
2111 			     phys_addr_t outbuf, size_t outbuf_size,
2112 			     u64 *result, u64 *response_type)
2113 {
2114 	struct qcom_scm_desc desc = {
2115 		.svc = QCOM_SCM_SVC_SMCINVOKE,
2116 		.cmd = QCOM_SCM_SMCINVOKE_INVOKE,
2117 		.owner = ARM_SMCCC_OWNER_TRUSTED_OS,
2118 		.args[0] = inbuf,
2119 		.args[1] = inbuf_size,
2120 		.args[2] = outbuf,
2121 		.args[3] = outbuf_size,
2122 		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RW, QCOM_SCM_VAL,
2123 					 QCOM_SCM_RW, QCOM_SCM_VAL),
2124 	};
2125 	struct qcom_scm_res res;
2126 	int ret;
2127 
2128 	ret = qcom_scm_call(__scm->dev, &desc, &res);
2129 	if (ret)
2130 		return ret;
2131 
2132 	if (response_type)
2133 		*response_type = res.result[0];
2134 
2135 	if (result)
2136 		*result = res.result[1];
2137 
2138 	return 0;
2139 }
2140 EXPORT_SYMBOL(qcom_scm_qtee_invoke_smc);
2141 
2142 /**
2143  * qcom_scm_qtee_callback_response() - Submit response for callback request.
2144  * @buf: start address of memory area used for outbound buffer.
2145  * @buf_size: size of the memory area used for outbound buffer.
2146  * @result: Result of QTEE object invocation.
2147  * @response_type: Response type returned by QTEE.
2148  *
2149  * @response_type determines how the contents of @buf should be processed.
2150  *
2151  * Return: On success, return 0 or <0 on failure.
2152  */
2153 int qcom_scm_qtee_callback_response(phys_addr_t buf, size_t buf_size,
2154 				    u64 *result, u64 *response_type)
2155 {
2156 	struct qcom_scm_desc desc = {
2157 		.svc = QCOM_SCM_SVC_SMCINVOKE,
2158 		.cmd = QCOM_SCM_SMCINVOKE_CB_RSP,
2159 		.owner = ARM_SMCCC_OWNER_TRUSTED_OS,
2160 		.args[0] = buf,
2161 		.args[1] = buf_size,
2162 		.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL),
2163 	};
2164 	struct qcom_scm_res res;
2165 	int ret;
2166 
2167 	ret = qcom_scm_call(__scm->dev, &desc, &res);
2168 	if (ret)
2169 		return ret;
2170 
2171 	if (response_type)
2172 		*response_type = res.result[0];
2173 
2174 	if (result)
2175 		*result = res.result[1];
2176 
2177 	return 0;
2178 }
2179 EXPORT_SYMBOL(qcom_scm_qtee_callback_response);
2180 
2181 static void qcom_scm_qtee_free(void *data)
2182 {
2183 	struct platform_device *qtee_dev = data;
2184 
2185 	platform_device_unregister(qtee_dev);
2186 }
2187 
2188 static void qcom_scm_qtee_init(struct qcom_scm *scm)
2189 {
2190 	struct platform_device *qtee_dev;
2191 	u64 result, response_type;
2192 	int ret;
2193 
2194 	/*
2195 	 * Probe for smcinvoke support. This will fail due to invalid buffers,
2196 	 * but first, it checks whether the call is supported in QTEE syscall
2197 	 * handler. If it is not supported, -EIO is returned.
2198 	 */
2199 	ret = qcom_scm_qtee_invoke_smc(0, 0, 0, 0, &result, &response_type);
2200 	if (ret == -EIO)
2201 		return;
2202 
2203 	/* Setup QTEE interface device. */
2204 	qtee_dev = platform_device_register_data(scm->dev, "qcomtee",
2205 						 PLATFORM_DEVID_NONE, NULL, 0);
2206 	if (IS_ERR(qtee_dev))
2207 		return;
2208 
2209 	devm_add_action_or_reset(scm->dev, qcom_scm_qtee_free, qtee_dev);
2210 }
2211 
2212 /**
2213  * qcom_scm_is_available() - Checks if SCM is available
2214  */
2215 bool qcom_scm_is_available(void)
2216 {
2217 	/* Paired with smp_store_release() in qcom_scm_probe */
2218 	return !!smp_load_acquire(&__scm);
2219 }
2220 EXPORT_SYMBOL_GPL(qcom_scm_is_available);
2221 
2222 static int qcom_scm_fill_irq_fwspec_params(struct irq_fwspec *fwspec, u32 hwirq)
2223 {
2224 	if (hwirq >= GIC_SPI_BASE && hwirq <= GIC_MAX_SPI) {
2225 		fwspec->param[0] = GIC_SPI;
2226 		fwspec->param[1] = hwirq - GIC_SPI_BASE;
2227 	} else if (hwirq >= GIC_ESPI_BASE && hwirq <= GIC_MAX_ESPI) {
2228 		fwspec->param[0] = GIC_ESPI;
2229 		fwspec->param[1] = hwirq - GIC_ESPI_BASE;
2230 	} else {
2231 		WARN(1, "Unexpected hwirq: %d\n", hwirq);
2232 		return -ENXIO;
2233 	}
2234 
2235 	fwspec->param[2] = IRQ_TYPE_EDGE_RISING;
2236 	fwspec->param_count = 3;
2237 
2238 	return 0;
2239 }
2240 
2241 static int qcom_scm_query_waitq_count(struct qcom_scm *scm)
2242 {
2243 	struct qcom_scm_desc desc = {
2244 		.svc = QCOM_SCM_SVC_WAITQ,
2245 		.cmd = QCOM_SCM_WAITQ_GET_INFO,
2246 		.owner = ARM_SMCCC_OWNER_SIP
2247 	};
2248 	struct qcom_scm_res res;
2249 	int ret;
2250 
2251 	ret = qcom_scm_call_atomic(scm->dev, &desc, &res);
2252 	if (ret)
2253 		return ret;
2254 
2255 	return res.result[0] & GENMASK(7, 0);
2256 }
2257 
2258 static int qcom_scm_get_waitq_irq(struct qcom_scm *scm)
2259 {
2260 	struct qcom_scm_desc desc = {
2261 		.svc = QCOM_SCM_SVC_WAITQ,
2262 		.cmd = QCOM_SCM_WAITQ_GET_INFO,
2263 		.owner = ARM_SMCCC_OWNER_SIP
2264 	};
2265 	struct device_node *parent_irq_node;
2266 	struct irq_fwspec fwspec;
2267 	struct qcom_scm_res res;
2268 	u32 hwirq;
2269 	int ret;
2270 
2271 	ret = qcom_scm_call_atomic(scm->dev, &desc, &res);
2272 	if (ret)
2273 		return ret;
2274 
2275 	hwirq = res.result[1] & GENMASK(15, 0);
2276 	ret = qcom_scm_fill_irq_fwspec_params(&fwspec, hwirq);
2277 	if (ret)
2278 		return ret;
2279 
2280 	parent_irq_node = of_irq_find_parent(scm->dev->of_node);
2281 	if (!parent_irq_node)
2282 		return -ENODEV;
2283 
2284 	fwspec.fwnode = of_fwnode_handle(parent_irq_node);
2285 
2286 	return irq_create_fwspec_mapping(&fwspec);
2287 }
2288 
2289 static struct completion *qcom_scm_get_completion(u32 wq_ctx)
2290 {
2291 	struct completion *wq;
2292 
2293 	if (WARN_ON_ONCE(wq_ctx >= __scm->wq_cnt))
2294 		return ERR_PTR(-EINVAL);
2295 
2296 	wq = &__scm->waitq_comps[wq_ctx];
2297 
2298 	return wq;
2299 }
2300 
2301 int qcom_scm_wait_for_wq_completion(u32 wq_ctx)
2302 {
2303 	struct completion *wq;
2304 
2305 	wq = qcom_scm_get_completion(wq_ctx);
2306 	if (IS_ERR(wq))
2307 		return PTR_ERR(wq);
2308 
2309 	wait_for_completion(wq);
2310 
2311 	return 0;
2312 }
2313 
2314 static int qcom_scm_waitq_wakeup(unsigned int wq_ctx)
2315 {
2316 	struct completion *wq;
2317 
2318 	wq = qcom_scm_get_completion(wq_ctx);
2319 	if (IS_ERR(wq))
2320 		return PTR_ERR(wq);
2321 
2322 	complete(wq);
2323 
2324 	return 0;
2325 }
2326 
2327 static irqreturn_t qcom_scm_irq_handler(int irq, void *data)
2328 {
2329 	int ret;
2330 	struct qcom_scm *scm = data;
2331 	u32 wq_ctx, flags, more_pending = 0;
2332 
2333 	do {
2334 		ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending);
2335 		if (ret) {
2336 			dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret);
2337 			goto out;
2338 		}
2339 
2340 		if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE) {
2341 			dev_err(scm->dev, "Invalid flags received for wq_ctx: %u\n", flags);
2342 			goto out;
2343 		}
2344 
2345 		ret = qcom_scm_waitq_wakeup(wq_ctx);
2346 		if (ret)
2347 			goto out;
2348 	} while (more_pending);
2349 
2350 out:
2351 	return IRQ_HANDLED;
2352 }
2353 
2354 static int get_download_mode(char *buffer, const struct kernel_param *kp)
2355 {
2356 	if (download_mode >= ARRAY_SIZE(download_mode_name))
2357 		return sysfs_emit(buffer, "unknown mode\n");
2358 
2359 	return sysfs_emit(buffer, "%s\n", download_mode_name[download_mode]);
2360 }
2361 
2362 static int set_download_mode(const char *val, const struct kernel_param *kp)
2363 {
2364 	bool tmp;
2365 	int ret;
2366 
2367 	ret = sysfs_match_string(download_mode_name, val);
2368 	if (ret < 0) {
2369 		ret = kstrtobool(val, &tmp);
2370 		if (ret < 0) {
2371 			pr_err("qcom_scm: err: %d\n", ret);
2372 			return ret;
2373 		}
2374 
2375 		ret = tmp ? 1 : 0;
2376 	}
2377 
2378 	download_mode = ret;
2379 	if (__scm)
2380 		qcom_scm_set_download_mode(download_mode);
2381 
2382 	return 0;
2383 }
2384 
2385 static const struct kernel_param_ops download_mode_param_ops = {
2386 	.get = get_download_mode,
2387 	.set = set_download_mode,
2388 };
2389 
2390 module_param_cb(download_mode, &download_mode_param_ops, NULL, 0644);
2391 MODULE_PARM_DESC(download_mode, "download mode: off/0/N for no dump mode, full/on/1/Y for full dump mode, mini for minidump mode and full,mini for both full and minidump mode together are acceptable values");
2392 
2393 static int qcom_scm_probe(struct platform_device *pdev)
2394 {
2395 	struct qcom_tzmem_pool_config pool_config;
2396 	struct qcom_scm *scm;
2397 	int irq, ret;
2398 	int i;
2399 
2400 	scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
2401 	if (!scm)
2402 		return -ENOMEM;
2403 
2404 	scm->dev = &pdev->dev;
2405 	ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
2406 	if (ret < 0)
2407 		return ret;
2408 
2409 	mutex_init(&scm->scm_bw_lock);
2410 
2411 	scm->path = devm_of_icc_get(&pdev->dev, NULL);
2412 	if (IS_ERR(scm->path))
2413 		return dev_err_probe(&pdev->dev, PTR_ERR(scm->path),
2414 				     "failed to acquire interconnect path\n");
2415 
2416 	scm->core_clk = devm_clk_get_optional(&pdev->dev, "core");
2417 	if (IS_ERR(scm->core_clk))
2418 		return PTR_ERR(scm->core_clk);
2419 
2420 	scm->iface_clk = devm_clk_get_optional(&pdev->dev, "iface");
2421 	if (IS_ERR(scm->iface_clk))
2422 		return PTR_ERR(scm->iface_clk);
2423 
2424 	scm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus");
2425 	if (IS_ERR(scm->bus_clk))
2426 		return PTR_ERR(scm->bus_clk);
2427 
2428 	scm->reset.ops = &qcom_scm_pas_reset_ops;
2429 	scm->reset.nr_resets = 1;
2430 	scm->reset.of_node = pdev->dev.of_node;
2431 	ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
2432 	if (ret)
2433 		return ret;
2434 
2435 	/* vote for max clk rate for highest performance */
2436 	ret = clk_set_rate(scm->core_clk, INT_MAX);
2437 	if (ret)
2438 		return ret;
2439 
2440 	ret = of_reserved_mem_device_init(scm->dev);
2441 	if (ret && ret != -ENODEV)
2442 		return dev_err_probe(scm->dev, ret,
2443 				     "Failed to setup the reserved memory region for TZ mem\n");
2444 
2445 	ret = qcom_tzmem_enable(scm->dev);
2446 	if (ret)
2447 		return dev_err_probe(scm->dev, ret,
2448 				     "Failed to enable the TrustZone memory allocator\n");
2449 
2450 	memset(&pool_config, 0, sizeof(pool_config));
2451 	pool_config.initial_size = 0;
2452 	pool_config.policy = QCOM_TZMEM_POLICY_ON_DEMAND;
2453 	pool_config.max_size = SZ_256K;
2454 
2455 	scm->mempool = devm_qcom_tzmem_pool_new(scm->dev, &pool_config);
2456 	if (IS_ERR(scm->mempool))
2457 		return dev_err_probe(scm->dev, PTR_ERR(scm->mempool),
2458 				     "Failed to create the SCM memory pool\n");
2459 
2460 	ret = qcom_scm_query_waitq_count(scm);
2461 	scm->wq_cnt = ret < 0 ? QCOM_SCM_DEFAULT_WAITQ_COUNT : ret;
2462 	scm->waitq_comps = devm_kcalloc(&pdev->dev, scm->wq_cnt, sizeof(*scm->waitq_comps),
2463 					GFP_KERNEL);
2464 	if (!scm->waitq_comps)
2465 		return -ENOMEM;
2466 
2467 	for (i = 0; i < scm->wq_cnt; i++)
2468 		init_completion(&scm->waitq_comps[i]);
2469 
2470 	irq = qcom_scm_get_waitq_irq(scm);
2471 	if (irq < 0)
2472 		irq = platform_get_irq_optional(pdev, 0);
2473 
2474 	if (irq < 0) {
2475 		if (irq != -ENXIO)
2476 			return irq;
2477 	} else {
2478 		ret = devm_request_threaded_irq(scm->dev, irq, NULL, qcom_scm_irq_handler,
2479 						IRQF_ONESHOT, "qcom-scm", scm);
2480 		if (ret < 0)
2481 			return dev_err_probe(scm->dev, ret,
2482 					     "Failed to request qcom-scm irq\n");
2483 	}
2484 
2485 	/*
2486 	 * Paired with smp_load_acquire() in qcom_scm_is_available().
2487 	 *
2488 	 * This marks the SCM API as ready to accept user calls and can only
2489 	 * be called after the TrustZone memory pool is initialized and the
2490 	 * waitqueue interrupt requested.
2491 	 */
2492 	smp_store_release(&__scm, scm);
2493 
2494 	__get_convention();
2495 
2496 	/*
2497 	 * If "download mode" is requested, from this point on warmboot
2498 	 * will cause the boot stages to enter download mode, unless
2499 	 * disabled below by a clean shutdown/reboot.
2500 	 */
2501 	qcom_scm_set_download_mode(download_mode);
2502 
2503 	/*
2504 	 * Disable SDI if indicated by DT that it is enabled by default.
2505 	 */
2506 	if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled") || !download_mode)
2507 		qcom_scm_disable_sdi();
2508 
2509 	/*
2510 	 * Initialize the QSEECOM interface.
2511 	 *
2512 	 * Note: QSEECOM is fairly self-contained and this only adds the
2513 	 * interface device (the driver of which does most of the heavy
2514 	 * lifting). So any errors returned here should be either -ENOMEM or
2515 	 * -EINVAL (with the latter only in case there's a bug in our code).
2516 	 * This means that there is no need to bring down the whole SCM driver.
2517 	 * Just log the error instead and let SCM live.
2518 	 */
2519 	ret = qcom_scm_qseecom_init(scm);
2520 	WARN(ret < 0, "failed to initialize qseecom: %d\n", ret);
2521 
2522 	/* Initialize the QTEE object interface. */
2523 	qcom_scm_qtee_init(scm);
2524 
2525 	return 0;
2526 }
2527 
2528 static void qcom_scm_shutdown(struct platform_device *pdev)
2529 {
2530 	/* Clean shutdown, disable download mode to allow normal restart */
2531 	qcom_scm_set_download_mode(QCOM_DLOAD_NODUMP);
2532 }
2533 
2534 static const struct of_device_id qcom_scm_dt_match[] = {
2535 	{ .compatible = "qcom,scm" },
2536 
2537 	/* Legacy entries kept for backwards compatibility */
2538 	{ .compatible = "qcom,scm-apq8064" },
2539 	{ .compatible = "qcom,scm-apq8084" },
2540 	{ .compatible = "qcom,scm-ipq4019" },
2541 	{ .compatible = "qcom,scm-msm8953" },
2542 	{ .compatible = "qcom,scm-msm8974" },
2543 	{ .compatible = "qcom,scm-msm8996" },
2544 	{}
2545 };
2546 MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
2547 
2548 static struct platform_driver qcom_scm_driver = {
2549 	.driver = {
2550 		.name	= "qcom_scm",
2551 		.of_match_table = qcom_scm_dt_match,
2552 		.suppress_bind_attrs = true,
2553 	},
2554 	.probe = qcom_scm_probe,
2555 	.shutdown = qcom_scm_shutdown,
2556 };
2557 
2558 static int __init qcom_scm_init(void)
2559 {
2560 	return platform_driver_register(&qcom_scm_driver);
2561 }
2562 subsys_initcall(qcom_scm_init);
2563 
2564 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver");
2565 MODULE_LICENSE("GPL v2");
2566