xref: /linux/drivers/firmware/qcom/qcom_scm.c (revision 8b9d2050cfa0c22c05622df103e366933fc045ed)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
3  * Copyright (C) 2015 Linaro Ltd.
4  */
5 
6 #include <linux/arm-smccc.h>
7 #include <linux/bitfield.h>
8 #include <linux/bits.h>
9 #include <linux/cleanup.h>
10 #include <linux/clk.h>
11 #include <linux/completion.h>
12 #include <linux/cpumask.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/err.h>
15 #include <linux/export.h>
16 #include <linux/firmware/qcom/qcom_scm.h>
17 #include <linux/firmware/qcom/qcom_tzmem.h>
18 #include <linux/init.h>
19 #include <linux/interconnect.h>
20 #include <linux/interrupt.h>
21 #include <linux/kstrtox.h>
22 #include <linux/module.h>
23 #include <linux/of.h>
24 #include <linux/of_address.h>
25 #include <linux/of_irq.h>
26 #include <linux/of_platform.h>
27 #include <linux/of_reserved_mem.h>
28 #include <linux/platform_device.h>
29 #include <linux/reset-controller.h>
30 #include <linux/remoteproc.h>
31 #include <linux/sizes.h>
32 #include <linux/types.h>
33 
34 #include "qcom_scm.h"
35 #include "qcom_tzmem.h"
36 
37 static u32 download_mode;
38 
39 struct qcom_scm {
40 	struct device *dev;
41 	struct clk *core_clk;
42 	struct clk *iface_clk;
43 	struct clk *bus_clk;
44 	struct icc_path *path;
45 	struct completion waitq_comp;
46 	struct reset_controller_dev reset;
47 
48 	/* control access to the interconnect path */
49 	struct mutex scm_bw_lock;
50 	int scm_vote_count;
51 
52 	u64 dload_mode_addr;
53 
54 	struct qcom_tzmem_pool *mempool;
55 };
56 
57 struct qcom_scm_current_perm_info {
58 	__le32 vmid;
59 	__le32 perm;
60 	__le64 ctx;
61 	__le32 ctx_size;
62 	__le32 unused;
63 };
64 
65 struct qcom_scm_mem_map_info {
66 	__le64 mem_addr;
67 	__le64 mem_size;
68 };
69 
70 /**
71  * struct qcom_scm_qseecom_resp - QSEECOM SCM call response.
72  * @result:    Result or status of the SCM call. See &enum qcom_scm_qseecom_result.
73  * @resp_type: Type of the response. See &enum qcom_scm_qseecom_resp_type.
74  * @data:      Response data. The type of this data is given in @resp_type.
75  */
76 struct qcom_scm_qseecom_resp {
77 	u64 result;
78 	u64 resp_type;
79 	u64 data;
80 };
81 
82 enum qcom_scm_qseecom_result {
83 	QSEECOM_RESULT_SUCCESS			= 0,
84 	QSEECOM_RESULT_INCOMPLETE		= 1,
85 	QSEECOM_RESULT_BLOCKED_ON_LISTENER	= 2,
86 	QSEECOM_RESULT_FAILURE			= 0xFFFFFFFF,
87 };
88 
89 enum qcom_scm_qseecom_resp_type {
90 	QSEECOM_SCM_RES_APP_ID			= 0xEE01,
91 	QSEECOM_SCM_RES_QSEOS_LISTENER_ID	= 0xEE02,
92 };
93 
94 enum qcom_scm_qseecom_tz_owner {
95 	QSEECOM_TZ_OWNER_SIP			= 2,
96 	QSEECOM_TZ_OWNER_TZ_APPS		= 48,
97 	QSEECOM_TZ_OWNER_QSEE_OS		= 50
98 };
99 
100 enum qcom_scm_qseecom_tz_svc {
101 	QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER	= 0,
102 	QSEECOM_TZ_SVC_APP_MGR			= 1,
103 	QSEECOM_TZ_SVC_INFO			= 6,
104 };
105 
106 enum qcom_scm_qseecom_tz_cmd_app {
107 	QSEECOM_TZ_CMD_APP_SEND			= 1,
108 	QSEECOM_TZ_CMD_APP_LOOKUP		= 3,
109 };
110 
111 enum qcom_scm_qseecom_tz_cmd_info {
112 	QSEECOM_TZ_CMD_INFO_VERSION		= 3,
113 };
114 
115 #define RSCTABLE_BUFFER_NOT_SUFFICIENT		20
116 
117 #define QSEECOM_MAX_APP_NAME_SIZE		64
118 #define SHMBRIDGE_RESULT_NOTSUPP		4
119 
120 /* Each bit configures cold/warm boot address for one of the 4 CPUs */
121 static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
122 	0, BIT(0), BIT(3), BIT(5)
123 };
124 static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
125 	BIT(2), BIT(1), BIT(4), BIT(6)
126 };
127 
128 #define QCOM_SMC_WAITQ_FLAG_WAKE_ONE	BIT(0)
129 
130 #define QCOM_DLOAD_MASK		GENMASK(5, 4)
131 #define QCOM_DLOAD_NODUMP	0
132 #define QCOM_DLOAD_FULLDUMP	1
133 #define QCOM_DLOAD_MINIDUMP	2
134 #define QCOM_DLOAD_BOTHDUMP	3
135 
136 static const char * const qcom_scm_convention_names[] = {
137 	[SMC_CONVENTION_UNKNOWN] = "unknown",
138 	[SMC_CONVENTION_ARM_32] = "smc arm 32",
139 	[SMC_CONVENTION_ARM_64] = "smc arm 64",
140 	[SMC_CONVENTION_LEGACY] = "smc legacy",
141 };
142 
143 static const char * const download_mode_name[] = {
144 	[QCOM_DLOAD_NODUMP]	= "off",
145 	[QCOM_DLOAD_FULLDUMP]	= "full",
146 	[QCOM_DLOAD_MINIDUMP]	= "mini",
147 	[QCOM_DLOAD_BOTHDUMP]	= "full,mini",
148 };
149 
150 static struct qcom_scm *__scm;
151 
152 static int qcom_scm_clk_enable(void)
153 {
154 	int ret;
155 
156 	ret = clk_prepare_enable(__scm->core_clk);
157 	if (ret)
158 		goto bail;
159 
160 	ret = clk_prepare_enable(__scm->iface_clk);
161 	if (ret)
162 		goto disable_core;
163 
164 	ret = clk_prepare_enable(__scm->bus_clk);
165 	if (ret)
166 		goto disable_iface;
167 
168 	return 0;
169 
170 disable_iface:
171 	clk_disable_unprepare(__scm->iface_clk);
172 disable_core:
173 	clk_disable_unprepare(__scm->core_clk);
174 bail:
175 	return ret;
176 }
177 
178 static void qcom_scm_clk_disable(void)
179 {
180 	clk_disable_unprepare(__scm->core_clk);
181 	clk_disable_unprepare(__scm->iface_clk);
182 	clk_disable_unprepare(__scm->bus_clk);
183 }
184 
185 static int qcom_scm_bw_enable(void)
186 {
187 	int ret = 0;
188 
189 	if (!__scm->path)
190 		return 0;
191 
192 	mutex_lock(&__scm->scm_bw_lock);
193 	if (!__scm->scm_vote_count) {
194 		ret = icc_set_bw(__scm->path, 0, UINT_MAX);
195 		if (ret < 0) {
196 			dev_err(__scm->dev, "failed to set bandwidth request\n");
197 			goto err_bw;
198 		}
199 	}
200 	__scm->scm_vote_count++;
201 err_bw:
202 	mutex_unlock(&__scm->scm_bw_lock);
203 
204 	return ret;
205 }
206 
207 static void qcom_scm_bw_disable(void)
208 {
209 	if (!__scm->path)
210 		return;
211 
212 	mutex_lock(&__scm->scm_bw_lock);
213 	if (__scm->scm_vote_count-- == 1)
214 		icc_set_bw(__scm->path, 0, 0);
215 	mutex_unlock(&__scm->scm_bw_lock);
216 }
217 
218 enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
219 static DEFINE_SPINLOCK(scm_query_lock);
220 
221 struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void)
222 {
223 	if (!qcom_scm_is_available())
224 		return NULL;
225 
226 	return __scm->mempool;
227 }
228 
229 static enum qcom_scm_convention __get_convention(void)
230 {
231 	unsigned long flags;
232 	struct qcom_scm_desc desc = {
233 		.svc = QCOM_SCM_SVC_INFO,
234 		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
235 		.args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
236 					   QCOM_SCM_INFO_IS_CALL_AVAIL) |
237 			   (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
238 		.arginfo = QCOM_SCM_ARGS(1),
239 		.owner = ARM_SMCCC_OWNER_SIP,
240 	};
241 	struct qcom_scm_res res;
242 	enum qcom_scm_convention probed_convention;
243 	int ret;
244 	bool forced = false;
245 
246 	if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
247 		return qcom_scm_convention;
248 
249 	/*
250 	 * Per the "SMC calling convention specification", the 64-bit calling
251 	 * convention can only be used when the client is 64-bit, otherwise
252 	 * system will encounter the undefined behaviour.
253 	 */
254 #if IS_ENABLED(CONFIG_ARM64)
255 	/*
256 	 * Device isn't required as there is only one argument - no device
257 	 * needed to dma_map_single to secure world
258 	 */
259 	probed_convention = SMC_CONVENTION_ARM_64;
260 	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
261 	if (!ret && res.result[0] == 1)
262 		goto found;
263 
264 	/*
265 	 * Some SC7180 firmwares didn't implement the
266 	 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
267 	 * calling conventions on these firmwares. Luckily we don't make any
268 	 * early calls into the firmware on these SoCs so the device pointer
269 	 * will be valid here to check if the compatible matches.
270 	 */
271 	if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
272 		forced = true;
273 		goto found;
274 	}
275 #endif
276 
277 	probed_convention = SMC_CONVENTION_ARM_32;
278 	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
279 	if (!ret && res.result[0] == 1)
280 		goto found;
281 
282 	probed_convention = SMC_CONVENTION_LEGACY;
283 found:
284 	spin_lock_irqsave(&scm_query_lock, flags);
285 	if (probed_convention != qcom_scm_convention) {
286 		qcom_scm_convention = probed_convention;
287 		pr_info("qcom_scm: convention: %s%s\n",
288 			qcom_scm_convention_names[qcom_scm_convention],
289 			forced ? " (forced)" : "");
290 	}
291 	spin_unlock_irqrestore(&scm_query_lock, flags);
292 
293 	return qcom_scm_convention;
294 }
295 
296 /**
297  * qcom_scm_call() - Invoke a syscall in the secure world
298  * @dev:	device
299  * @desc:	Descriptor structure containing arguments and return values
300  * @res:        Structure containing results from SMC/HVC call
301  *
302  * Sends a command to the SCM and waits for the command to finish processing.
303  * This should *only* be called in pre-emptible context.
304  */
305 static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
306 			 struct qcom_scm_res *res)
307 {
308 	might_sleep();
309 	switch (__get_convention()) {
310 	case SMC_CONVENTION_ARM_32:
311 	case SMC_CONVENTION_ARM_64:
312 		return scm_smc_call(dev, desc, res, false);
313 	case SMC_CONVENTION_LEGACY:
314 		return scm_legacy_call(dev, desc, res);
315 	default:
316 		pr_err("Unknown current SCM calling convention.\n");
317 		return -EINVAL;
318 	}
319 }
320 
321 /**
322  * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
323  * @dev:	device
324  * @desc:	Descriptor structure containing arguments and return values
325  * @res:	Structure containing results from SMC/HVC call
326  *
327  * Sends a command to the SCM and waits for the command to finish processing.
328  * This can be called in atomic context.
329  */
330 static int qcom_scm_call_atomic(struct device *dev,
331 				const struct qcom_scm_desc *desc,
332 				struct qcom_scm_res *res)
333 {
334 	switch (__get_convention()) {
335 	case SMC_CONVENTION_ARM_32:
336 	case SMC_CONVENTION_ARM_64:
337 		return scm_smc_call(dev, desc, res, true);
338 	case SMC_CONVENTION_LEGACY:
339 		return scm_legacy_call_atomic(dev, desc, res);
340 	default:
341 		pr_err("Unknown current SCM calling convention.\n");
342 		return -EINVAL;
343 	}
344 }
345 
346 static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
347 					 u32 cmd_id)
348 {
349 	int ret;
350 	struct qcom_scm_desc desc = {
351 		.svc = QCOM_SCM_SVC_INFO,
352 		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
353 		.owner = ARM_SMCCC_OWNER_SIP,
354 	};
355 	struct qcom_scm_res res;
356 
357 	desc.arginfo = QCOM_SCM_ARGS(1);
358 	switch (__get_convention()) {
359 	case SMC_CONVENTION_ARM_32:
360 	case SMC_CONVENTION_ARM_64:
361 		desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
362 				(ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
363 		break;
364 	case SMC_CONVENTION_LEGACY:
365 		desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
366 		break;
367 	default:
368 		pr_err("Unknown SMC convention being used\n");
369 		return false;
370 	}
371 
372 	ret = qcom_scm_call(dev, &desc, &res);
373 
374 	return ret ? false : !!res.result[0];
375 }
376 
377 static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits)
378 {
379 	int cpu;
380 	unsigned int flags = 0;
381 	struct qcom_scm_desc desc = {
382 		.svc = QCOM_SCM_SVC_BOOT,
383 		.cmd = QCOM_SCM_BOOT_SET_ADDR,
384 		.arginfo = QCOM_SCM_ARGS(2),
385 		.owner = ARM_SMCCC_OWNER_SIP,
386 	};
387 
388 	for_each_present_cpu(cpu) {
389 		if (cpu >= QCOM_SCM_BOOT_MAX_CPUS)
390 			return -EINVAL;
391 		flags |= cpu_bits[cpu];
392 	}
393 
394 	desc.args[0] = flags;
395 	desc.args[1] = virt_to_phys(entry);
396 
397 	return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
398 }
399 
400 static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags)
401 {
402 	struct qcom_scm_desc desc = {
403 		.svc = QCOM_SCM_SVC_BOOT,
404 		.cmd = QCOM_SCM_BOOT_SET_ADDR_MC,
405 		.owner = ARM_SMCCC_OWNER_SIP,
406 		.arginfo = QCOM_SCM_ARGS(6),
407 		.args = {
408 			virt_to_phys(entry),
409 			/* Apply to all CPUs in all affinity levels */
410 			~0ULL, ~0ULL, ~0ULL, ~0ULL,
411 			flags,
412 		},
413 	};
414 
415 	/* Need a device for DMA of the additional arguments */
416 	if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY)
417 		return -EOPNOTSUPP;
418 
419 	return qcom_scm_call(__scm->dev, &desc, NULL);
420 }
421 
422 /**
423  * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus
424  * @entry: Entry point function for the cpus
425  *
426  * Set the Linux entry point for the SCM to transfer control to when coming
427  * out of a power down. CPU power down may be executed on cpuidle or hotplug.
428  */
429 int qcom_scm_set_warm_boot_addr(void *entry)
430 {
431 	if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT))
432 		/* Fallback to old SCM call */
433 		return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits);
434 	return 0;
435 }
436 EXPORT_SYMBOL_GPL(qcom_scm_set_warm_boot_addr);
437 
438 /**
439  * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus
440  * @entry: Entry point function for the cpus
441  */
442 int qcom_scm_set_cold_boot_addr(void *entry)
443 {
444 	if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT))
445 		/* Fallback to old SCM call */
446 		return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits);
447 	return 0;
448 }
449 EXPORT_SYMBOL_GPL(qcom_scm_set_cold_boot_addr);
450 
451 /**
452  * qcom_scm_cpu_power_down() - Power down the cpu
453  * @flags:	Flags to flush cache
454  *
455  * This is an end point to power down cpu. If there was a pending interrupt,
456  * the control would return from this function, otherwise, the cpu jumps to the
457  * warm boot entry point set for this cpu upon reset.
458  */
459 void qcom_scm_cpu_power_down(u32 flags)
460 {
461 	struct qcom_scm_desc desc = {
462 		.svc = QCOM_SCM_SVC_BOOT,
463 		.cmd = QCOM_SCM_BOOT_TERMINATE_PC,
464 		.args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
465 		.arginfo = QCOM_SCM_ARGS(1),
466 		.owner = ARM_SMCCC_OWNER_SIP,
467 	};
468 
469 	qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
470 }
471 EXPORT_SYMBOL_GPL(qcom_scm_cpu_power_down);
472 
473 int qcom_scm_set_remote_state(u32 state, u32 id)
474 {
475 	struct qcom_scm_desc desc = {
476 		.svc = QCOM_SCM_SVC_BOOT,
477 		.cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
478 		.arginfo = QCOM_SCM_ARGS(2),
479 		.args[0] = state,
480 		.args[1] = id,
481 		.owner = ARM_SMCCC_OWNER_SIP,
482 	};
483 	struct qcom_scm_res res;
484 	int ret;
485 
486 	ret = qcom_scm_call(__scm->dev, &desc, &res);
487 
488 	return ret ? : res.result[0];
489 }
490 EXPORT_SYMBOL_GPL(qcom_scm_set_remote_state);
491 
492 static int qcom_scm_disable_sdi(void)
493 {
494 	int ret;
495 	struct qcom_scm_desc desc = {
496 		.svc = QCOM_SCM_SVC_BOOT,
497 		.cmd = QCOM_SCM_BOOT_SDI_CONFIG,
498 		.args[0] = 1, /* Disable watchdog debug */
499 		.args[1] = 0, /* Disable SDI */
500 		.arginfo = QCOM_SCM_ARGS(2),
501 		.owner = ARM_SMCCC_OWNER_SIP,
502 	};
503 	struct qcom_scm_res res;
504 
505 	ret = qcom_scm_clk_enable();
506 	if (ret)
507 		return ret;
508 	ret = qcom_scm_call(__scm->dev, &desc, &res);
509 
510 	qcom_scm_clk_disable();
511 
512 	return ret ? : res.result[0];
513 }
514 
515 static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
516 {
517 	struct qcom_scm_desc desc = {
518 		.svc = QCOM_SCM_SVC_BOOT,
519 		.cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
520 		.arginfo = QCOM_SCM_ARGS(2),
521 		.args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
522 		.owner = ARM_SMCCC_OWNER_SIP,
523 	};
524 
525 	desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
526 
527 	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
528 }
529 
530 static int qcom_scm_io_rmw(phys_addr_t addr, unsigned int mask, unsigned int val)
531 {
532 	unsigned int old;
533 	unsigned int new;
534 	int ret;
535 
536 	ret = qcom_scm_io_readl(addr, &old);
537 	if (ret)
538 		return ret;
539 
540 	new = (old & ~mask) | (val & mask);
541 
542 	return qcom_scm_io_writel(addr, new);
543 }
544 
545 static void qcom_scm_set_download_mode(u32 dload_mode)
546 {
547 	int ret = 0;
548 
549 	if (__scm->dload_mode_addr) {
550 		ret = qcom_scm_io_rmw(__scm->dload_mode_addr, QCOM_DLOAD_MASK,
551 				      FIELD_PREP(QCOM_DLOAD_MASK, dload_mode));
552 	} else if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT,
553 						QCOM_SCM_BOOT_SET_DLOAD_MODE)) {
554 		ret = __qcom_scm_set_dload_mode(__scm->dev, !!dload_mode);
555 	} else if (dload_mode) {
556 		dev_err(__scm->dev,
557 			"No available mechanism for setting download mode\n");
558 	}
559 
560 	if (ret)
561 		dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
562 }
563 
564 /**
565  * devm_qcom_scm_pas_context_alloc() - Allocate peripheral authentication service
566  *				       context for a given peripheral
567  *
568  * PAS context is device-resource managed, so the caller does not need
569  * to worry about freeing the context memory.
570  *
571  * @dev:	  PAS firmware device
572  * @pas_id:	  peripheral authentication service id
573  * @mem_phys:	  Subsystem reserve memory start address
574  * @mem_size:	  Subsystem reserve memory size
575  *
576  * Returns: The new PAS context, or ERR_PTR() on failure.
577  */
578 struct qcom_scm_pas_context *devm_qcom_scm_pas_context_alloc(struct device *dev,
579 							     u32 pas_id,
580 							     phys_addr_t mem_phys,
581 							     size_t mem_size)
582 {
583 	struct qcom_scm_pas_context *ctx;
584 
585 	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
586 	if (!ctx)
587 		return ERR_PTR(-ENOMEM);
588 
589 	ctx->dev = dev;
590 	ctx->pas_id = pas_id;
591 	ctx->mem_phys = mem_phys;
592 	ctx->mem_size = mem_size;
593 
594 	return ctx;
595 }
596 EXPORT_SYMBOL_GPL(devm_qcom_scm_pas_context_alloc);
597 
598 static int __qcom_scm_pas_init_image(u32 pas_id, dma_addr_t mdata_phys,
599 				     struct qcom_scm_res *res)
600 {
601 	struct qcom_scm_desc desc = {
602 		.svc = QCOM_SCM_SVC_PIL,
603 		.cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
604 		.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
605 		.args[0] = pas_id,
606 		.owner = ARM_SMCCC_OWNER_SIP,
607 	};
608 	int ret;
609 
610 	ret = qcom_scm_clk_enable();
611 	if (ret)
612 		return ret;
613 
614 	ret = qcom_scm_bw_enable();
615 	if (ret)
616 		goto disable_clk;
617 
618 	desc.args[1] = mdata_phys;
619 
620 	ret = qcom_scm_call(__scm->dev, &desc, res);
621 	qcom_scm_bw_disable();
622 
623 disable_clk:
624 	qcom_scm_clk_disable();
625 
626 	return ret;
627 }
628 
629 static int qcom_scm_pas_prep_and_init_image(struct qcom_scm_pas_context *ctx,
630 					    const void *metadata, size_t size)
631 {
632 	struct qcom_scm_res res;
633 	phys_addr_t mdata_phys;
634 	void *mdata_buf;
635 	int ret;
636 
637 	mdata_buf = qcom_tzmem_alloc(__scm->mempool, size, GFP_KERNEL);
638 	if (!mdata_buf)
639 		return -ENOMEM;
640 
641 	memcpy(mdata_buf, metadata, size);
642 	mdata_phys = qcom_tzmem_to_phys(mdata_buf);
643 
644 	ret = __qcom_scm_pas_init_image(ctx->pas_id, mdata_phys, &res);
645 	if (ret < 0)
646 		qcom_tzmem_free(mdata_buf);
647 	else
648 		ctx->ptr = mdata_buf;
649 
650 	return ret ? : res.result[0];
651 }
652 
653 /**
654  * qcom_scm_pas_init_image() - Initialize peripheral authentication service
655  *			       state machine for a given peripheral, using the
656  *			       metadata
657  * @pas_id:	peripheral authentication service id
658  * @metadata:	pointer to memory containing ELF header, program header table
659  *		and optional blob of data used for authenticating the metadata
660  *		and the rest of the firmware
661  * @size:	size of the metadata
662  * @ctx:	optional pas context
663  *
664  * Return: 0 on success.
665  *
666  * Upon successful return, the PAS metadata context (@ctx) will be used to
667  * track the metadata allocation, this needs to be released by invoking
668  * qcom_scm_pas_metadata_release() by the caller.
669  */
670 int qcom_scm_pas_init_image(u32 pas_id, const void *metadata, size_t size,
671 			    struct qcom_scm_pas_context *ctx)
672 {
673 	struct qcom_scm_res res;
674 	dma_addr_t mdata_phys;
675 	void *mdata_buf;
676 	int ret;
677 
678 	if (ctx && ctx->use_tzmem)
679 		return qcom_scm_pas_prep_and_init_image(ctx, metadata, size);
680 
681 	/*
682 	 * During the scm call memory protection will be enabled for the meta
683 	 * data blob, so make sure it's physically contiguous, 4K aligned and
684 	 * non-cachable to avoid XPU violations.
685 	 *
686 	 * For PIL calls the hypervisor creates SHM Bridges for the blob
687 	 * buffers on behalf of Linux so we must not do it ourselves hence
688 	 * not using the TZMem allocator here.
689 	 *
690 	 * If we pass a buffer that is already part of an SHM Bridge to this
691 	 * call, it will fail.
692 	 */
693 	mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
694 				       GFP_KERNEL);
695 	if (!mdata_buf)
696 		return -ENOMEM;
697 
698 	memcpy(mdata_buf, metadata, size);
699 
700 	ret = __qcom_scm_pas_init_image(pas_id, mdata_phys, &res);
701 	if (ret < 0 || !ctx) {
702 		dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
703 	} else if (ctx) {
704 		ctx->ptr = mdata_buf;
705 		ctx->phys = mdata_phys;
706 		ctx->size = size;
707 	}
708 
709 	return ret ? : res.result[0];
710 }
711 EXPORT_SYMBOL_GPL(qcom_scm_pas_init_image);
712 
713 /**
714  * qcom_scm_pas_metadata_release() - release metadata context
715  * @ctx:	pas context
716  */
717 void qcom_scm_pas_metadata_release(struct qcom_scm_pas_context *ctx)
718 {
719 	if (!ctx->ptr)
720 		return;
721 
722 	if (ctx->use_tzmem)
723 		qcom_tzmem_free(ctx->ptr);
724 	else
725 		dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys);
726 
727 	ctx->ptr = NULL;
728 }
729 EXPORT_SYMBOL_GPL(qcom_scm_pas_metadata_release);
730 
731 /**
732  * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
733  *			      for firmware loading
734  * @pas_id:	peripheral authentication service id
735  * @addr:	start address of memory area to prepare
736  * @size:	size of the memory area to prepare
737  *
738  * Returns 0 on success.
739  */
740 int qcom_scm_pas_mem_setup(u32 pas_id, phys_addr_t addr, phys_addr_t size)
741 {
742 	int ret;
743 	struct qcom_scm_desc desc = {
744 		.svc = QCOM_SCM_SVC_PIL,
745 		.cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
746 		.arginfo = QCOM_SCM_ARGS(3),
747 		.args[0] = pas_id,
748 		.args[1] = addr,
749 		.args[2] = size,
750 		.owner = ARM_SMCCC_OWNER_SIP,
751 	};
752 	struct qcom_scm_res res;
753 
754 	ret = qcom_scm_clk_enable();
755 	if (ret)
756 		return ret;
757 
758 	ret = qcom_scm_bw_enable();
759 	if (ret)
760 		goto disable_clk;
761 
762 	ret = qcom_scm_call(__scm->dev, &desc, &res);
763 	qcom_scm_bw_disable();
764 
765 disable_clk:
766 	qcom_scm_clk_disable();
767 
768 	return ret ? : res.result[0];
769 }
770 EXPORT_SYMBOL_GPL(qcom_scm_pas_mem_setup);
771 
772 static void *__qcom_scm_pas_get_rsc_table(u32 pas_id, void *input_rt_tzm,
773 					  size_t input_rt_size,
774 					  size_t *output_rt_size)
775 {
776 	struct qcom_scm_desc desc = {
777 		.svc = QCOM_SCM_SVC_PIL,
778 		.cmd = QCOM_SCM_PIL_PAS_GET_RSCTABLE,
779 		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RO, QCOM_SCM_VAL,
780 					 QCOM_SCM_RW, QCOM_SCM_VAL),
781 		.args[0] = pas_id,
782 		.owner = ARM_SMCCC_OWNER_SIP,
783 	};
784 	struct qcom_scm_res res;
785 	void *output_rt_tzm;
786 	int ret;
787 
788 	output_rt_tzm = qcom_tzmem_alloc(__scm->mempool, *output_rt_size, GFP_KERNEL);
789 	if (!output_rt_tzm)
790 		return ERR_PTR(-ENOMEM);
791 
792 	desc.args[1] = qcom_tzmem_to_phys(input_rt_tzm);
793 	desc.args[2] = input_rt_size;
794 	desc.args[3] = qcom_tzmem_to_phys(output_rt_tzm);
795 	desc.args[4] = *output_rt_size;
796 
797 	/*
798 	 * Whether SMC fail or pass, res.result[2] will hold actual resource table
799 	 * size.
800 	 *
801 	 * If passed 'output_rt_size' buffer size is not sufficient to hold the
802 	 * resource table TrustZone sends, response code in res.result[1] as
803 	 * RSCTABLE_BUFFER_NOT_SUFFICIENT so that caller can retry this SMC call
804 	 * with output_rt_tzm buffer with res.result[2] size however, It should not
805 	 * be of unresonable size.
806 	 */
807 	ret = qcom_scm_call(__scm->dev, &desc, &res);
808 	if (!ret && res.result[2] > SZ_1G) {
809 		ret = -E2BIG;
810 		goto free_output_rt;
811 	}
812 
813 	*output_rt_size = res.result[2];
814 	if (ret && res.result[1] == RSCTABLE_BUFFER_NOT_SUFFICIENT)
815 		ret = -EOVERFLOW;
816 
817 free_output_rt:
818 	if (ret)
819 		qcom_tzmem_free(output_rt_tzm);
820 
821 	return ret ? ERR_PTR(ret) : output_rt_tzm;
822 }
823 
824 /**
825  * qcom_scm_pas_get_rsc_table() - Retrieve the resource table in passed output buffer
826  *				  for a given peripheral.
827  *
828  * Qualcomm remote processor may rely on both static and dynamic resources for
829  * its functionality. Static resources typically refer to memory-mapped addresses
830  * required by the subsystem and are often embedded within the firmware binary
831  * and dynamic resources, such as shared memory in DDR etc., are determined at
832  * runtime during the boot process.
833  *
834  * On Qualcomm Technologies devices, it's possible that static resources are not
835  * embedded in the firmware binary and instead are provided by TrustZone However,
836  * dynamic resources are always expected to come from TrustZone. This indicates
837  * that for Qualcomm devices, all resources (static and dynamic) will be provided
838  * by TrustZone via the SMC call.
839  *
840  * If the remote processor firmware binary does contain static resources, they
841  * should be passed in input_rt. These will be forwarded to TrustZone for
842  * authentication. TrustZone will then append the dynamic resources and return
843  * the complete resource table in output_rt_tzm.
844  *
845  * If the remote processor firmware binary does not include a resource table,
846  * the caller of this function should set input_rt as NULL and input_rt_size
847  * as zero respectively.
848  *
849  * More about documentation on resource table data structures can be found in
850  * include/linux/remoteproc.h
851  *
852  * @ctx:	    PAS context
853  * @pas_id:	    peripheral authentication service id
854  * @input_rt:       resource table buffer which is present in firmware binary
855  * @input_rt_size:  size of the resource table present in firmware binary
856  * @output_rt_size: TrustZone expects caller should pass worst case size for
857  *		    the output_rt_tzm.
858  *
859  * Return:
860  *  On success, returns a pointer to the allocated buffer containing the final
861  *  resource table and output_rt_size will have actual resource table size from
862  *  TrustZone. The caller is responsible for freeing the buffer. On failure,
863  *  returns ERR_PTR(-errno).
864  */
865 struct resource_table *qcom_scm_pas_get_rsc_table(struct qcom_scm_pas_context *ctx,
866 						  void *input_rt,
867 						  size_t input_rt_size,
868 						  size_t *output_rt_size)
869 {
870 	struct resource_table empty_rsc = {};
871 	size_t size = SZ_16K;
872 	void *output_rt_tzm;
873 	void *input_rt_tzm;
874 	void *tbl_ptr;
875 	int ret;
876 
877 	ret = qcom_scm_clk_enable();
878 	if (ret)
879 		return ERR_PTR(ret);
880 
881 	ret = qcom_scm_bw_enable();
882 	if (ret)
883 		goto disable_clk;
884 
885 	/*
886 	 * TrustZone can not accept buffer as NULL value as argument hence,
887 	 * we need to pass a input buffer indicating that subsystem firmware
888 	 * does not have resource table by filling resource table structure.
889 	 */
890 	if (!input_rt) {
891 		input_rt = &empty_rsc;
892 		input_rt_size = sizeof(empty_rsc);
893 	}
894 
895 	input_rt_tzm = qcom_tzmem_alloc(__scm->mempool, input_rt_size, GFP_KERNEL);
896 	if (!input_rt_tzm) {
897 		ret = -ENOMEM;
898 		goto disable_scm_bw;
899 	}
900 
901 	memcpy(input_rt_tzm, input_rt, input_rt_size);
902 
903 	output_rt_tzm = __qcom_scm_pas_get_rsc_table(ctx->pas_id, input_rt_tzm,
904 						     input_rt_size, &size);
905 	if (PTR_ERR(output_rt_tzm) == -EOVERFLOW)
906 		/* Try again with the size requested by the TZ */
907 		output_rt_tzm = __qcom_scm_pas_get_rsc_table(ctx->pas_id,
908 							     input_rt_tzm,
909 							     input_rt_size,
910 							     &size);
911 	if (IS_ERR(output_rt_tzm)) {
912 		ret = PTR_ERR(output_rt_tzm);
913 		goto free_input_rt;
914 	}
915 
916 	tbl_ptr = kzalloc(size, GFP_KERNEL);
917 	if (!tbl_ptr) {
918 		qcom_tzmem_free(output_rt_tzm);
919 		ret = -ENOMEM;
920 		goto free_input_rt;
921 	}
922 
923 	memcpy(tbl_ptr, output_rt_tzm, size);
924 	*output_rt_size = size;
925 	qcom_tzmem_free(output_rt_tzm);
926 
927 free_input_rt:
928 	qcom_tzmem_free(input_rt_tzm);
929 
930 disable_scm_bw:
931 	qcom_scm_bw_disable();
932 
933 disable_clk:
934 	qcom_scm_clk_disable();
935 
936 	return ret ? ERR_PTR(ret) : tbl_ptr;
937 }
938 EXPORT_SYMBOL_GPL(qcom_scm_pas_get_rsc_table);
939 
940 /**
941  * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
942  *				   and reset the remote processor
943  * @pas_id:	peripheral authentication service id
944  *
945  * Return 0 on success.
946  */
947 int qcom_scm_pas_auth_and_reset(u32 pas_id)
948 {
949 	int ret;
950 	struct qcom_scm_desc desc = {
951 		.svc = QCOM_SCM_SVC_PIL,
952 		.cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
953 		.arginfo = QCOM_SCM_ARGS(1),
954 		.args[0] = pas_id,
955 		.owner = ARM_SMCCC_OWNER_SIP,
956 	};
957 	struct qcom_scm_res res;
958 
959 	ret = qcom_scm_clk_enable();
960 	if (ret)
961 		return ret;
962 
963 	ret = qcom_scm_bw_enable();
964 	if (ret)
965 		goto disable_clk;
966 
967 	ret = qcom_scm_call(__scm->dev, &desc, &res);
968 	qcom_scm_bw_disable();
969 
970 disable_clk:
971 	qcom_scm_clk_disable();
972 
973 	return ret ? : res.result[0];
974 }
975 EXPORT_SYMBOL_GPL(qcom_scm_pas_auth_and_reset);
976 
977 /**
978  * qcom_scm_pas_prepare_and_auth_reset() - Prepare, authenticate, and reset the
979  *					   remote processor
980  *
981  * @ctx:	Context saved during call to qcom_scm_pas_context_init()
982  *
983  * This function performs the necessary steps to prepare a PAS subsystem,
984  * authenticate it using the provided metadata, and initiate a reset sequence.
985  *
986  * It should be used when Linux is in control setting up the IOMMU hardware
987  * for remote subsystem during secure firmware loading processes. The preparation
988  * step sets up a shmbridge over the firmware memory before TrustZone accesses the
989  * firmware memory region for authentication. The authentication step verifies
990  * the integrity and authenticity of the firmware or configuration using secure
991  * metadata. Finally, the reset step ensures the subsystem starts in a clean and
992  * sane state.
993  *
994  * Return: 0 on success, negative errno on failure.
995  */
996 int qcom_scm_pas_prepare_and_auth_reset(struct qcom_scm_pas_context *ctx)
997 {
998 	u64 handle;
999 	int ret;
1000 
1001 	/*
1002 	 * When Linux running @ EL1, Gunyah hypervisor running @ EL2 traps the
1003 	 * auth_and_reset call and create an shmbridge on the remote subsystem
1004 	 * memory region and then invokes a call to TrustZone to authenticate.
1005 	 */
1006 	if (!ctx->use_tzmem)
1007 		return qcom_scm_pas_auth_and_reset(ctx->pas_id);
1008 
1009 	/*
1010 	 * When Linux runs @ EL2 Linux must create the shmbridge itself and then
1011 	 * subsequently call TrustZone for authenticate and reset.
1012 	 */
1013 	ret = qcom_tzmem_shm_bridge_create(ctx->mem_phys, ctx->mem_size, &handle);
1014 	if (ret)
1015 		return ret;
1016 
1017 	ret = qcom_scm_pas_auth_and_reset(ctx->pas_id);
1018 	qcom_tzmem_shm_bridge_delete(handle);
1019 
1020 	return ret;
1021 }
1022 EXPORT_SYMBOL_GPL(qcom_scm_pas_prepare_and_auth_reset);
1023 
1024 /**
1025  * qcom_scm_pas_shutdown() - Shut down the remote processor
1026  * @pas_id:	peripheral authentication service id
1027  *
1028  * Returns 0 on success.
1029  */
1030 int qcom_scm_pas_shutdown(u32 pas_id)
1031 {
1032 	int ret;
1033 	struct qcom_scm_desc desc = {
1034 		.svc = QCOM_SCM_SVC_PIL,
1035 		.cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
1036 		.arginfo = QCOM_SCM_ARGS(1),
1037 		.args[0] = pas_id,
1038 		.owner = ARM_SMCCC_OWNER_SIP,
1039 	};
1040 	struct qcom_scm_res res;
1041 
1042 	ret = qcom_scm_clk_enable();
1043 	if (ret)
1044 		return ret;
1045 
1046 	ret = qcom_scm_bw_enable();
1047 	if (ret)
1048 		goto disable_clk;
1049 
1050 	ret = qcom_scm_call(__scm->dev, &desc, &res);
1051 	qcom_scm_bw_disable();
1052 
1053 disable_clk:
1054 	qcom_scm_clk_disable();
1055 
1056 	return ret ? : res.result[0];
1057 }
1058 EXPORT_SYMBOL_GPL(qcom_scm_pas_shutdown);
1059 
1060 /**
1061  * qcom_scm_pas_supported() - Check if the peripheral authentication service is
1062  *			      available for the given peripherial
1063  * @pas_id:	peripheral authentication service id
1064  *
1065  * Returns true if PAS is supported for this peripheral, otherwise false.
1066  */
1067 bool qcom_scm_pas_supported(u32 pas_id)
1068 {
1069 	int ret;
1070 	struct qcom_scm_desc desc = {
1071 		.svc = QCOM_SCM_SVC_PIL,
1072 		.cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
1073 		.arginfo = QCOM_SCM_ARGS(1),
1074 		.args[0] = pas_id,
1075 		.owner = ARM_SMCCC_OWNER_SIP,
1076 	};
1077 	struct qcom_scm_res res;
1078 
1079 	if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
1080 					  QCOM_SCM_PIL_PAS_IS_SUPPORTED))
1081 		return false;
1082 
1083 	ret = qcom_scm_call(__scm->dev, &desc, &res);
1084 
1085 	return ret ? false : !!res.result[0];
1086 }
1087 EXPORT_SYMBOL_GPL(qcom_scm_pas_supported);
1088 
1089 static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
1090 {
1091 	struct qcom_scm_desc desc = {
1092 		.svc = QCOM_SCM_SVC_PIL,
1093 		.cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
1094 		.arginfo = QCOM_SCM_ARGS(2),
1095 		.args[0] = reset,
1096 		.args[1] = 0,
1097 		.owner = ARM_SMCCC_OWNER_SIP,
1098 	};
1099 	struct qcom_scm_res res;
1100 	int ret;
1101 
1102 	ret = qcom_scm_call(__scm->dev, &desc, &res);
1103 
1104 	return ret ? : res.result[0];
1105 }
1106 
1107 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
1108 				     unsigned long idx)
1109 {
1110 	if (idx != 0)
1111 		return -EINVAL;
1112 
1113 	return __qcom_scm_pas_mss_reset(__scm->dev, 1);
1114 }
1115 
1116 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
1117 				       unsigned long idx)
1118 {
1119 	if (idx != 0)
1120 		return -EINVAL;
1121 
1122 	return __qcom_scm_pas_mss_reset(__scm->dev, 0);
1123 }
1124 
1125 static const struct reset_control_ops qcom_scm_pas_reset_ops = {
1126 	.assert = qcom_scm_pas_reset_assert,
1127 	.deassert = qcom_scm_pas_reset_deassert,
1128 };
1129 
1130 int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
1131 {
1132 	struct qcom_scm_desc desc = {
1133 		.svc = QCOM_SCM_SVC_IO,
1134 		.cmd = QCOM_SCM_IO_READ,
1135 		.arginfo = QCOM_SCM_ARGS(1),
1136 		.args[0] = addr,
1137 		.owner = ARM_SMCCC_OWNER_SIP,
1138 	};
1139 	struct qcom_scm_res res;
1140 	int ret;
1141 
1142 
1143 	ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
1144 	if (ret >= 0)
1145 		*val = res.result[0];
1146 
1147 	return ret < 0 ? ret : 0;
1148 }
1149 EXPORT_SYMBOL_GPL(qcom_scm_io_readl);
1150 
1151 int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
1152 {
1153 	struct qcom_scm_desc desc = {
1154 		.svc = QCOM_SCM_SVC_IO,
1155 		.cmd = QCOM_SCM_IO_WRITE,
1156 		.arginfo = QCOM_SCM_ARGS(2),
1157 		.args[0] = addr,
1158 		.args[1] = val,
1159 		.owner = ARM_SMCCC_OWNER_SIP,
1160 	};
1161 
1162 	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
1163 }
1164 EXPORT_SYMBOL_GPL(qcom_scm_io_writel);
1165 
1166 /**
1167  * qcom_scm_restore_sec_cfg_available() - Check if secure environment
1168  * supports restore security config interface.
1169  *
1170  * Return true if restore-cfg interface is supported, false if not.
1171  */
1172 bool qcom_scm_restore_sec_cfg_available(void)
1173 {
1174 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
1175 					    QCOM_SCM_MP_RESTORE_SEC_CFG);
1176 }
1177 EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg_available);
1178 
1179 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
1180 {
1181 	struct qcom_scm_desc desc = {
1182 		.svc = QCOM_SCM_SVC_MP,
1183 		.cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
1184 		.arginfo = QCOM_SCM_ARGS(2),
1185 		.args[0] = device_id,
1186 		.args[1] = spare,
1187 		.owner = ARM_SMCCC_OWNER_SIP,
1188 	};
1189 	struct qcom_scm_res res;
1190 	int ret;
1191 
1192 	ret = qcom_scm_call(__scm->dev, &desc, &res);
1193 
1194 	return ret ? : res.result[0];
1195 }
1196 EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg);
1197 
1198 #define QCOM_SCM_CP_APERTURE_CONTEXT_MASK	GENMASK(7, 0)
1199 
1200 bool qcom_scm_set_gpu_smmu_aperture_is_available(void)
1201 {
1202 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
1203 					    QCOM_SCM_MP_CP_SMMU_APERTURE_ID);
1204 }
1205 EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture_is_available);
1206 
1207 int qcom_scm_set_gpu_smmu_aperture(unsigned int context_bank)
1208 {
1209 	struct qcom_scm_desc desc = {
1210 		.svc = QCOM_SCM_SVC_MP,
1211 		.cmd = QCOM_SCM_MP_CP_SMMU_APERTURE_ID,
1212 		.arginfo = QCOM_SCM_ARGS(4),
1213 		.args[0] = 0xffff0000 | FIELD_PREP(QCOM_SCM_CP_APERTURE_CONTEXT_MASK, context_bank),
1214 		.args[1] = 0xffffffff,
1215 		.args[2] = 0xffffffff,
1216 		.args[3] = 0xffffffff,
1217 		.owner = ARM_SMCCC_OWNER_SIP
1218 	};
1219 
1220 	return qcom_scm_call(__scm->dev, &desc, NULL);
1221 }
1222 EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture);
1223 
1224 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
1225 {
1226 	struct qcom_scm_desc desc = {
1227 		.svc = QCOM_SCM_SVC_MP,
1228 		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
1229 		.arginfo = QCOM_SCM_ARGS(1),
1230 		.args[0] = spare,
1231 		.owner = ARM_SMCCC_OWNER_SIP,
1232 	};
1233 	struct qcom_scm_res res;
1234 	int ret;
1235 
1236 	ret = qcom_scm_call(__scm->dev, &desc, &res);
1237 
1238 	if (size)
1239 		*size = res.result[0];
1240 
1241 	return ret ? : res.result[1];
1242 }
1243 EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_size);
1244 
1245 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
1246 {
1247 	struct qcom_scm_desc desc = {
1248 		.svc = QCOM_SCM_SVC_MP,
1249 		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
1250 		.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
1251 					 QCOM_SCM_VAL),
1252 		.args[0] = addr,
1253 		.args[1] = size,
1254 		.args[2] = spare,
1255 		.owner = ARM_SMCCC_OWNER_SIP,
1256 	};
1257 	int ret;
1258 
1259 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1260 
1261 	/* the pg table has been initialized already, ignore the error */
1262 	if (ret == -EPERM)
1263 		ret = 0;
1264 
1265 	return ret;
1266 }
1267 EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_init);
1268 
1269 int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size)
1270 {
1271 	struct qcom_scm_desc desc = {
1272 		.svc = QCOM_SCM_SVC_MP,
1273 		.cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE,
1274 		.arginfo = QCOM_SCM_ARGS(2),
1275 		.args[0] = size,
1276 		.args[1] = spare,
1277 		.owner = ARM_SMCCC_OWNER_SIP,
1278 	};
1279 
1280 	return qcom_scm_call(__scm->dev, &desc, NULL);
1281 }
1282 EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_cp_pool_size);
1283 
1284 int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
1285 				   u32 cp_nonpixel_start,
1286 				   u32 cp_nonpixel_size)
1287 {
1288 	int ret;
1289 	struct qcom_scm_desc desc = {
1290 		.svc = QCOM_SCM_SVC_MP,
1291 		.cmd = QCOM_SCM_MP_VIDEO_VAR,
1292 		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
1293 					 QCOM_SCM_VAL, QCOM_SCM_VAL),
1294 		.args[0] = cp_start,
1295 		.args[1] = cp_size,
1296 		.args[2] = cp_nonpixel_start,
1297 		.args[3] = cp_nonpixel_size,
1298 		.owner = ARM_SMCCC_OWNER_SIP,
1299 	};
1300 	struct qcom_scm_res res;
1301 
1302 	ret = qcom_scm_call(__scm->dev, &desc, &res);
1303 
1304 	return ret ? : res.result[0];
1305 }
1306 EXPORT_SYMBOL_GPL(qcom_scm_mem_protect_video_var);
1307 
1308 static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
1309 				 size_t mem_sz, phys_addr_t src, size_t src_sz,
1310 				 phys_addr_t dest, size_t dest_sz)
1311 {
1312 	int ret;
1313 	struct qcom_scm_desc desc = {
1314 		.svc = QCOM_SCM_SVC_MP,
1315 		.cmd = QCOM_SCM_MP_ASSIGN,
1316 		.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
1317 					 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
1318 					 QCOM_SCM_VAL, QCOM_SCM_VAL),
1319 		.args[0] = mem_region,
1320 		.args[1] = mem_sz,
1321 		.args[2] = src,
1322 		.args[3] = src_sz,
1323 		.args[4] = dest,
1324 		.args[5] = dest_sz,
1325 		.args[6] = 0,
1326 		.owner = ARM_SMCCC_OWNER_SIP,
1327 	};
1328 	struct qcom_scm_res res;
1329 
1330 	ret = qcom_scm_call(dev, &desc, &res);
1331 
1332 	return ret ? : res.result[0];
1333 }
1334 
1335 /**
1336  * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
1337  * @mem_addr: mem region whose ownership need to be reassigned
1338  * @mem_sz:   size of the region.
1339  * @srcvm:    vmid for current set of owners, each set bit in
1340  *            flag indicate a unique owner
1341  * @newvm:    array having new owners and corresponding permission
1342  *            flags
1343  * @dest_cnt: number of owners in next set.
1344  *
1345  * Return negative errno on failure or 0 on success with @srcvm updated.
1346  */
1347 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
1348 			u64 *srcvm,
1349 			const struct qcom_scm_vmperm *newvm,
1350 			unsigned int dest_cnt)
1351 {
1352 	struct qcom_scm_current_perm_info *destvm;
1353 	struct qcom_scm_mem_map_info *mem_to_map;
1354 	phys_addr_t mem_to_map_phys;
1355 	phys_addr_t dest_phys;
1356 	phys_addr_t ptr_phys;
1357 	size_t mem_to_map_sz;
1358 	size_t dest_sz;
1359 	size_t src_sz;
1360 	size_t ptr_sz;
1361 	int next_vm;
1362 	__le32 *src;
1363 	int ret, i, b;
1364 	u64 srcvm_bits = *srcvm;
1365 
1366 	src_sz = hweight64(srcvm_bits) * sizeof(*src);
1367 	mem_to_map_sz = sizeof(*mem_to_map);
1368 	dest_sz = dest_cnt * sizeof(*destvm);
1369 	ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
1370 			ALIGN(dest_sz, SZ_64);
1371 
1372 	void *ptr __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1373 							ptr_sz, GFP_KERNEL);
1374 	if (!ptr)
1375 		return -ENOMEM;
1376 
1377 	ptr_phys = qcom_tzmem_to_phys(ptr);
1378 
1379 	/* Fill source vmid detail */
1380 	src = ptr;
1381 	i = 0;
1382 	for (b = 0; b < BITS_PER_TYPE(u64); b++) {
1383 		if (srcvm_bits & BIT(b))
1384 			src[i++] = cpu_to_le32(b);
1385 	}
1386 
1387 	/* Fill details of mem buff to map */
1388 	mem_to_map = ptr + ALIGN(src_sz, SZ_64);
1389 	mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
1390 	mem_to_map->mem_addr = cpu_to_le64(mem_addr);
1391 	mem_to_map->mem_size = cpu_to_le64(mem_sz);
1392 
1393 	next_vm = 0;
1394 	/* Fill details of next vmid detail */
1395 	destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
1396 	dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
1397 	for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
1398 		destvm->vmid = cpu_to_le32(newvm->vmid);
1399 		destvm->perm = cpu_to_le32(newvm->perm);
1400 		destvm->ctx = 0;
1401 		destvm->ctx_size = 0;
1402 		next_vm |= BIT(newvm->vmid);
1403 	}
1404 
1405 	ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
1406 				    ptr_phys, src_sz, dest_phys, dest_sz);
1407 	if (ret) {
1408 		dev_err(__scm->dev,
1409 			"Assign memory protection call failed %d\n", ret);
1410 		return ret;
1411 	}
1412 
1413 	*srcvm = next_vm;
1414 	return 0;
1415 }
1416 EXPORT_SYMBOL_GPL(qcom_scm_assign_mem);
1417 
1418 /**
1419  * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
1420  */
1421 bool qcom_scm_ocmem_lock_available(void)
1422 {
1423 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
1424 					    QCOM_SCM_OCMEM_LOCK_CMD);
1425 }
1426 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock_available);
1427 
1428 /**
1429  * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
1430  * region to the specified initiator
1431  *
1432  * @id:     tz initiator id
1433  * @offset: OCMEM offset
1434  * @size:   OCMEM size
1435  * @mode:   access mode (WIDE/NARROW)
1436  */
1437 int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
1438 			u32 mode)
1439 {
1440 	struct qcom_scm_desc desc = {
1441 		.svc = QCOM_SCM_SVC_OCMEM,
1442 		.cmd = QCOM_SCM_OCMEM_LOCK_CMD,
1443 		.args[0] = id,
1444 		.args[1] = offset,
1445 		.args[2] = size,
1446 		.args[3] = mode,
1447 		.arginfo = QCOM_SCM_ARGS(4),
1448 	};
1449 
1450 	return qcom_scm_call(__scm->dev, &desc, NULL);
1451 }
1452 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock);
1453 
1454 /**
1455  * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
1456  * region from the specified initiator
1457  *
1458  * @id:     tz initiator id
1459  * @offset: OCMEM offset
1460  * @size:   OCMEM size
1461  */
1462 int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
1463 {
1464 	struct qcom_scm_desc desc = {
1465 		.svc = QCOM_SCM_SVC_OCMEM,
1466 		.cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
1467 		.args[0] = id,
1468 		.args[1] = offset,
1469 		.args[2] = size,
1470 		.arginfo = QCOM_SCM_ARGS(3),
1471 	};
1472 
1473 	return qcom_scm_call(__scm->dev, &desc, NULL);
1474 }
1475 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_unlock);
1476 
1477 /**
1478  * qcom_scm_ice_available() - Is the ICE key programming interface available?
1479  *
1480  * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
1481  *	   qcom_scm_ice_set_key() are available.
1482  */
1483 bool qcom_scm_ice_available(void)
1484 {
1485 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1486 					    QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
1487 		__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1488 					     QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
1489 }
1490 EXPORT_SYMBOL_GPL(qcom_scm_ice_available);
1491 
1492 /**
1493  * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
1494  * @index: the keyslot to invalidate
1495  *
1496  * The UFSHCI and eMMC standards define a standard way to do this, but it
1497  * doesn't work on these SoCs; only this SCM call does.
1498  *
1499  * It is assumed that the SoC has only one ICE instance being used, as this SCM
1500  * call doesn't specify which ICE instance the keyslot belongs to.
1501  *
1502  * Return: 0 on success; -errno on failure.
1503  */
1504 int qcom_scm_ice_invalidate_key(u32 index)
1505 {
1506 	struct qcom_scm_desc desc = {
1507 		.svc = QCOM_SCM_SVC_ES,
1508 		.cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
1509 		.arginfo = QCOM_SCM_ARGS(1),
1510 		.args[0] = index,
1511 		.owner = ARM_SMCCC_OWNER_SIP,
1512 	};
1513 
1514 	return qcom_scm_call(__scm->dev, &desc, NULL);
1515 }
1516 EXPORT_SYMBOL_GPL(qcom_scm_ice_invalidate_key);
1517 
1518 /**
1519  * qcom_scm_ice_set_key() - Set an inline encryption key
1520  * @index: the keyslot into which to set the key
1521  * @key: the key to program
1522  * @key_size: the size of the key in bytes
1523  * @cipher: the encryption algorithm the key is for
1524  * @data_unit_size: the encryption data unit size, i.e. the size of each
1525  *		    individual plaintext and ciphertext.  Given in 512-byte
1526  *		    units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
1527  *
1528  * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
1529  * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline.
1530  *
1531  * The UFSHCI and eMMC standards define a standard way to do this, but it
1532  * doesn't work on these SoCs; only this SCM call does.
1533  *
1534  * It is assumed that the SoC has only one ICE instance being used, as this SCM
1535  * call doesn't specify which ICE instance the keyslot belongs to.
1536  *
1537  * Return: 0 on success; -errno on failure.
1538  */
1539 int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
1540 			 enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
1541 {
1542 	struct qcom_scm_desc desc = {
1543 		.svc = QCOM_SCM_SVC_ES,
1544 		.cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
1545 		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
1546 					 QCOM_SCM_VAL, QCOM_SCM_VAL,
1547 					 QCOM_SCM_VAL),
1548 		.args[0] = index,
1549 		.args[2] = key_size,
1550 		.args[3] = cipher,
1551 		.args[4] = data_unit_size,
1552 		.owner = ARM_SMCCC_OWNER_SIP,
1553 	};
1554 
1555 	int ret;
1556 
1557 	void *keybuf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1558 							   key_size,
1559 							   GFP_KERNEL);
1560 	if (!keybuf)
1561 		return -ENOMEM;
1562 	memcpy(keybuf, key, key_size);
1563 	desc.args[1] = qcom_tzmem_to_phys(keybuf);
1564 
1565 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1566 
1567 	memzero_explicit(keybuf, key_size);
1568 
1569 	return ret;
1570 }
1571 EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key);
1572 
1573 bool qcom_scm_has_wrapped_key_support(void)
1574 {
1575 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1576 					    QCOM_SCM_ES_DERIVE_SW_SECRET) &&
1577 	       __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1578 					    QCOM_SCM_ES_GENERATE_ICE_KEY) &&
1579 	       __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1580 					    QCOM_SCM_ES_PREPARE_ICE_KEY) &&
1581 	       __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1582 					    QCOM_SCM_ES_IMPORT_ICE_KEY);
1583 }
1584 EXPORT_SYMBOL_GPL(qcom_scm_has_wrapped_key_support);
1585 
1586 /**
1587  * qcom_scm_derive_sw_secret() - Derive software secret from wrapped key
1588  * @eph_key: an ephemerally-wrapped key
1589  * @eph_key_size: size of @eph_key in bytes
1590  * @sw_secret: output buffer for the software secret
1591  * @sw_secret_size: size of the software secret to derive in bytes
1592  *
1593  * Derive a software secret from an ephemerally-wrapped key for software crypto
1594  * operations.  This is done by calling into the secure execution environment,
1595  * which then calls into the hardware to unwrap and derive the secret.
1596  *
1597  * For more information on sw_secret, see the "Hardware-wrapped keys" section of
1598  * Documentation/block/inline-encryption.rst.
1599  *
1600  * Return: 0 on success; -errno on failure.
1601  */
1602 int qcom_scm_derive_sw_secret(const u8 *eph_key, size_t eph_key_size,
1603 			      u8 *sw_secret, size_t sw_secret_size)
1604 {
1605 	struct qcom_scm_desc desc = {
1606 		.svc = QCOM_SCM_SVC_ES,
1607 		.cmd = QCOM_SCM_ES_DERIVE_SW_SECRET,
1608 		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RW, QCOM_SCM_VAL,
1609 					 QCOM_SCM_RW, QCOM_SCM_VAL),
1610 		.owner = ARM_SMCCC_OWNER_SIP,
1611 	};
1612 	int ret;
1613 
1614 	void *eph_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1615 								eph_key_size,
1616 								GFP_KERNEL);
1617 	if (!eph_key_buf)
1618 		return -ENOMEM;
1619 
1620 	void *sw_secret_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1621 								  sw_secret_size,
1622 								  GFP_KERNEL);
1623 	if (!sw_secret_buf)
1624 		return -ENOMEM;
1625 
1626 	memcpy(eph_key_buf, eph_key, eph_key_size);
1627 	desc.args[0] = qcom_tzmem_to_phys(eph_key_buf);
1628 	desc.args[1] = eph_key_size;
1629 	desc.args[2] = qcom_tzmem_to_phys(sw_secret_buf);
1630 	desc.args[3] = sw_secret_size;
1631 
1632 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1633 	if (!ret)
1634 		memcpy(sw_secret, sw_secret_buf, sw_secret_size);
1635 
1636 	memzero_explicit(eph_key_buf, eph_key_size);
1637 	memzero_explicit(sw_secret_buf, sw_secret_size);
1638 	return ret;
1639 }
1640 EXPORT_SYMBOL_GPL(qcom_scm_derive_sw_secret);
1641 
1642 /**
1643  * qcom_scm_generate_ice_key() - Generate a wrapped key for storage encryption
1644  * @lt_key: output buffer for the long-term wrapped key
1645  * @lt_key_size: size of @lt_key in bytes.  Must be the exact wrapped key size
1646  *		 used by the SoC.
1647  *
1648  * Generate a key using the built-in HW module in the SoC.  The resulting key is
1649  * returned wrapped with the platform-specific Key Encryption Key.
1650  *
1651  * Return: 0 on success; -errno on failure.
1652  */
1653 int qcom_scm_generate_ice_key(u8 *lt_key, size_t lt_key_size)
1654 {
1655 	struct qcom_scm_desc desc = {
1656 		.svc = QCOM_SCM_SVC_ES,
1657 		.cmd =  QCOM_SCM_ES_GENERATE_ICE_KEY,
1658 		.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL),
1659 		.owner = ARM_SMCCC_OWNER_SIP,
1660 	};
1661 	int ret;
1662 
1663 	void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1664 							       lt_key_size,
1665 							       GFP_KERNEL);
1666 	if (!lt_key_buf)
1667 		return -ENOMEM;
1668 
1669 	desc.args[0] = qcom_tzmem_to_phys(lt_key_buf);
1670 	desc.args[1] = lt_key_size;
1671 
1672 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1673 	if (!ret)
1674 		memcpy(lt_key, lt_key_buf, lt_key_size);
1675 
1676 	memzero_explicit(lt_key_buf, lt_key_size);
1677 	return ret;
1678 }
1679 EXPORT_SYMBOL_GPL(qcom_scm_generate_ice_key);
1680 
1681 /**
1682  * qcom_scm_prepare_ice_key() - Re-wrap a key with the per-boot ephemeral key
1683  * @lt_key: a long-term wrapped key
1684  * @lt_key_size: size of @lt_key in bytes
1685  * @eph_key: output buffer for the ephemerally-wrapped key
1686  * @eph_key_size: size of @eph_key in bytes.  Must be the exact wrapped key size
1687  *		  used by the SoC.
1688  *
1689  * Given a long-term wrapped key, re-wrap it with the per-boot ephemeral key for
1690  * added protection.  The resulting key will only be valid for the current boot.
1691  *
1692  * Return: 0 on success; -errno on failure.
1693  */
1694 int qcom_scm_prepare_ice_key(const u8 *lt_key, size_t lt_key_size,
1695 			     u8 *eph_key, size_t eph_key_size)
1696 {
1697 	struct qcom_scm_desc desc = {
1698 		.svc = QCOM_SCM_SVC_ES,
1699 		.cmd =  QCOM_SCM_ES_PREPARE_ICE_KEY,
1700 		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL,
1701 					 QCOM_SCM_RW, QCOM_SCM_VAL),
1702 		.owner = ARM_SMCCC_OWNER_SIP,
1703 	};
1704 	int ret;
1705 
1706 	void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1707 							       lt_key_size,
1708 							       GFP_KERNEL);
1709 	if (!lt_key_buf)
1710 		return -ENOMEM;
1711 
1712 	void *eph_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1713 								eph_key_size,
1714 								GFP_KERNEL);
1715 	if (!eph_key_buf)
1716 		return -ENOMEM;
1717 
1718 	memcpy(lt_key_buf, lt_key, lt_key_size);
1719 	desc.args[0] = qcom_tzmem_to_phys(lt_key_buf);
1720 	desc.args[1] = lt_key_size;
1721 	desc.args[2] = qcom_tzmem_to_phys(eph_key_buf);
1722 	desc.args[3] = eph_key_size;
1723 
1724 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1725 	if (!ret)
1726 		memcpy(eph_key, eph_key_buf, eph_key_size);
1727 
1728 	memzero_explicit(lt_key_buf, lt_key_size);
1729 	memzero_explicit(eph_key_buf, eph_key_size);
1730 	return ret;
1731 }
1732 EXPORT_SYMBOL_GPL(qcom_scm_prepare_ice_key);
1733 
1734 /**
1735  * qcom_scm_import_ice_key() - Import key for storage encryption
1736  * @raw_key: the raw key to import
1737  * @raw_key_size: size of @raw_key in bytes
1738  * @lt_key: output buffer for the long-term wrapped key
1739  * @lt_key_size: size of @lt_key in bytes.  Must be the exact wrapped key size
1740  *		 used by the SoC.
1741  *
1742  * Import a raw key and return a long-term wrapped key.  Uses the SoC's HWKM to
1743  * wrap the raw key using the platform-specific Key Encryption Key.
1744  *
1745  * Return: 0 on success; -errno on failure.
1746  */
1747 int qcom_scm_import_ice_key(const u8 *raw_key, size_t raw_key_size,
1748 			    u8 *lt_key, size_t lt_key_size)
1749 {
1750 	struct qcom_scm_desc desc = {
1751 		.svc = QCOM_SCM_SVC_ES,
1752 		.cmd =  QCOM_SCM_ES_IMPORT_ICE_KEY,
1753 		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL,
1754 					 QCOM_SCM_RW, QCOM_SCM_VAL),
1755 		.owner = ARM_SMCCC_OWNER_SIP,
1756 	};
1757 	int ret;
1758 
1759 	void *raw_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1760 								raw_key_size,
1761 								GFP_KERNEL);
1762 	if (!raw_key_buf)
1763 		return -ENOMEM;
1764 
1765 	void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1766 							       lt_key_size,
1767 							       GFP_KERNEL);
1768 	if (!lt_key_buf)
1769 		return -ENOMEM;
1770 
1771 	memcpy(raw_key_buf, raw_key, raw_key_size);
1772 	desc.args[0] = qcom_tzmem_to_phys(raw_key_buf);
1773 	desc.args[1] = raw_key_size;
1774 	desc.args[2] = qcom_tzmem_to_phys(lt_key_buf);
1775 	desc.args[3] = lt_key_size;
1776 
1777 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1778 	if (!ret)
1779 		memcpy(lt_key, lt_key_buf, lt_key_size);
1780 
1781 	memzero_explicit(raw_key_buf, raw_key_size);
1782 	memzero_explicit(lt_key_buf, lt_key_size);
1783 	return ret;
1784 }
1785 EXPORT_SYMBOL_GPL(qcom_scm_import_ice_key);
1786 
1787 /**
1788  * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
1789  *
1790  * Return true if HDCP is supported, false if not.
1791  */
1792 bool qcom_scm_hdcp_available(void)
1793 {
1794 	bool avail;
1795 	int ret = qcom_scm_clk_enable();
1796 
1797 	if (ret)
1798 		return ret;
1799 
1800 	avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
1801 						QCOM_SCM_HDCP_INVOKE);
1802 
1803 	qcom_scm_clk_disable();
1804 
1805 	return avail;
1806 }
1807 EXPORT_SYMBOL_GPL(qcom_scm_hdcp_available);
1808 
1809 /**
1810  * qcom_scm_hdcp_req() - Send HDCP request.
1811  * @req: HDCP request array
1812  * @req_cnt: HDCP request array count
1813  * @resp: response buffer passed to SCM
1814  *
1815  * Write HDCP register(s) through SCM.
1816  */
1817 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
1818 {
1819 	int ret;
1820 	struct qcom_scm_desc desc = {
1821 		.svc = QCOM_SCM_SVC_HDCP,
1822 		.cmd = QCOM_SCM_HDCP_INVOKE,
1823 		.arginfo = QCOM_SCM_ARGS(10),
1824 		.args = {
1825 			req[0].addr,
1826 			req[0].val,
1827 			req[1].addr,
1828 			req[1].val,
1829 			req[2].addr,
1830 			req[2].val,
1831 			req[3].addr,
1832 			req[3].val,
1833 			req[4].addr,
1834 			req[4].val
1835 		},
1836 		.owner = ARM_SMCCC_OWNER_SIP,
1837 	};
1838 	struct qcom_scm_res res;
1839 
1840 	if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
1841 		return -ERANGE;
1842 
1843 	ret = qcom_scm_clk_enable();
1844 	if (ret)
1845 		return ret;
1846 
1847 	ret = qcom_scm_call(__scm->dev, &desc, &res);
1848 	*resp = res.result[0];
1849 
1850 	qcom_scm_clk_disable();
1851 
1852 	return ret;
1853 }
1854 EXPORT_SYMBOL_GPL(qcom_scm_hdcp_req);
1855 
1856 int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt)
1857 {
1858 	struct qcom_scm_desc desc = {
1859 		.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1860 		.cmd = QCOM_SCM_SMMU_PT_FORMAT,
1861 		.arginfo = QCOM_SCM_ARGS(3),
1862 		.args[0] = sec_id,
1863 		.args[1] = ctx_num,
1864 		.args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */
1865 		.owner = ARM_SMCCC_OWNER_SIP,
1866 	};
1867 
1868 	return qcom_scm_call(__scm->dev, &desc, NULL);
1869 }
1870 EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_pt_format);
1871 
1872 int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
1873 {
1874 	struct qcom_scm_desc desc = {
1875 		.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1876 		.cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
1877 		.arginfo = QCOM_SCM_ARGS(2),
1878 		.args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
1879 		.args[1] = en,
1880 		.owner = ARM_SMCCC_OWNER_SIP,
1881 	};
1882 
1883 
1884 	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
1885 }
1886 EXPORT_SYMBOL_GPL(qcom_scm_qsmmu500_wait_safe_toggle);
1887 
1888 bool qcom_scm_lmh_dcvsh_available(void)
1889 {
1890 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH);
1891 }
1892 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available);
1893 
1894 /*
1895  * This is only supposed to be called once by the TZMem module. It takes the
1896  * SCM struct device as argument and uses it to pass the call as at the time
1897  * the SHM Bridge is enabled, the SCM is not yet fully set up and doesn't
1898  * accept global user calls. Don't try to use the __scm pointer here.
1899  */
1900 int qcom_scm_shm_bridge_enable(struct device *scm_dev)
1901 {
1902 	int ret;
1903 
1904 	struct qcom_scm_desc desc = {
1905 		.svc = QCOM_SCM_SVC_MP,
1906 		.cmd = QCOM_SCM_MP_SHM_BRIDGE_ENABLE,
1907 		.owner = ARM_SMCCC_OWNER_SIP
1908 	};
1909 
1910 	struct qcom_scm_res res;
1911 
1912 	if (!__qcom_scm_is_call_available(scm_dev, QCOM_SCM_SVC_MP,
1913 					  QCOM_SCM_MP_SHM_BRIDGE_ENABLE))
1914 		return -EOPNOTSUPP;
1915 
1916 	ret = qcom_scm_call(scm_dev, &desc, &res);
1917 
1918 	if (ret)
1919 		return ret;
1920 
1921 	if (res.result[0] == SHMBRIDGE_RESULT_NOTSUPP)
1922 		return -EOPNOTSUPP;
1923 
1924 	return res.result[0];
1925 }
1926 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_enable);
1927 
1928 int qcom_scm_shm_bridge_create(u64 pfn_and_ns_perm_flags,
1929 			       u64 ipfn_and_s_perm_flags, u64 size_and_flags,
1930 			       u64 ns_vmids, u64 *handle)
1931 {
1932 	struct qcom_scm_desc desc = {
1933 		.svc = QCOM_SCM_SVC_MP,
1934 		.cmd = QCOM_SCM_MP_SHM_BRIDGE_CREATE,
1935 		.owner = ARM_SMCCC_OWNER_SIP,
1936 		.args[0] = pfn_and_ns_perm_flags,
1937 		.args[1] = ipfn_and_s_perm_flags,
1938 		.args[2] = size_and_flags,
1939 		.args[3] = ns_vmids,
1940 		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
1941 					 QCOM_SCM_VAL, QCOM_SCM_VAL),
1942 	};
1943 
1944 	struct qcom_scm_res res;
1945 	int ret;
1946 
1947 	ret = qcom_scm_call(__scm->dev, &desc, &res);
1948 
1949 	if (handle && !ret)
1950 		*handle = res.result[1];
1951 
1952 	return ret ?: res.result[0];
1953 }
1954 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_create);
1955 
1956 int qcom_scm_shm_bridge_delete(u64 handle)
1957 {
1958 	struct qcom_scm_desc desc = {
1959 		.svc = QCOM_SCM_SVC_MP,
1960 		.cmd = QCOM_SCM_MP_SHM_BRIDGE_DELETE,
1961 		.owner = ARM_SMCCC_OWNER_SIP,
1962 		.args[0] = handle,
1963 		.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
1964 	};
1965 
1966 	return qcom_scm_call(__scm->dev, &desc, NULL);
1967 }
1968 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_delete);
1969 
1970 int qcom_scm_lmh_profile_change(u32 profile_id)
1971 {
1972 	struct qcom_scm_desc desc = {
1973 		.svc = QCOM_SCM_SVC_LMH,
1974 		.cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE,
1975 		.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
1976 		.args[0] = profile_id,
1977 		.owner = ARM_SMCCC_OWNER_SIP,
1978 	};
1979 
1980 	return qcom_scm_call(__scm->dev, &desc, NULL);
1981 }
1982 EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change);
1983 
1984 int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
1985 		       u64 limit_node, u32 node_id, u64 version)
1986 {
1987 	int ret, payload_size = 5 * sizeof(u32);
1988 
1989 	struct qcom_scm_desc desc = {
1990 		.svc = QCOM_SCM_SVC_LMH,
1991 		.cmd = QCOM_SCM_LMH_LIMIT_DCVSH,
1992 		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL,
1993 					QCOM_SCM_VAL, QCOM_SCM_VAL),
1994 		.args[1] = payload_size,
1995 		.args[2] = limit_node,
1996 		.args[3] = node_id,
1997 		.args[4] = version,
1998 		.owner = ARM_SMCCC_OWNER_SIP,
1999 	};
2000 
2001 	u32 *payload_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
2002 							       payload_size,
2003 							       GFP_KERNEL);
2004 	if (!payload_buf)
2005 		return -ENOMEM;
2006 
2007 	payload_buf[0] = payload_fn;
2008 	payload_buf[1] = 0;
2009 	payload_buf[2] = payload_reg;
2010 	payload_buf[3] = 1;
2011 	payload_buf[4] = payload_val;
2012 
2013 	desc.args[0] = qcom_tzmem_to_phys(payload_buf);
2014 
2015 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
2016 
2017 	return ret;
2018 }
2019 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh);
2020 
2021 int qcom_scm_gpu_init_regs(u32 gpu_req)
2022 {
2023 	struct qcom_scm_desc desc = {
2024 		.svc = QCOM_SCM_SVC_GPU,
2025 		.cmd = QCOM_SCM_SVC_GPU_INIT_REGS,
2026 		.arginfo = QCOM_SCM_ARGS(1),
2027 		.args[0] = gpu_req,
2028 		.owner = ARM_SMCCC_OWNER_SIP,
2029 	};
2030 
2031 	return qcom_scm_call(__scm->dev, &desc, NULL);
2032 }
2033 EXPORT_SYMBOL_GPL(qcom_scm_gpu_init_regs);
2034 
2035 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
2036 {
2037 	struct device_node *tcsr;
2038 	struct device_node *np = dev->of_node;
2039 	struct resource res;
2040 	u32 offset;
2041 	int ret;
2042 
2043 	tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
2044 	if (!tcsr)
2045 		return 0;
2046 
2047 	ret = of_address_to_resource(tcsr, 0, &res);
2048 	of_node_put(tcsr);
2049 	if (ret)
2050 		return ret;
2051 
2052 	ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
2053 	if (ret < 0)
2054 		return ret;
2055 
2056 	*addr = res.start + offset;
2057 
2058 	return 0;
2059 }
2060 
2061 #ifdef CONFIG_QCOM_QSEECOM
2062 
2063 /* Lock for QSEECOM SCM call executions */
2064 static DEFINE_MUTEX(qcom_scm_qseecom_call_lock);
2065 
2066 static int __qcom_scm_qseecom_call(const struct qcom_scm_desc *desc,
2067 				   struct qcom_scm_qseecom_resp *res)
2068 {
2069 	struct qcom_scm_res scm_res = {};
2070 	int status;
2071 
2072 	/*
2073 	 * QSEECOM SCM calls should not be executed concurrently. Therefore, we
2074 	 * require the respective call lock to be held.
2075 	 */
2076 	lockdep_assert_held(&qcom_scm_qseecom_call_lock);
2077 
2078 	status = qcom_scm_call(__scm->dev, desc, &scm_res);
2079 
2080 	res->result = scm_res.result[0];
2081 	res->resp_type = scm_res.result[1];
2082 	res->data = scm_res.result[2];
2083 
2084 	if (status)
2085 		return status;
2086 
2087 	return 0;
2088 }
2089 
2090 /**
2091  * qcom_scm_qseecom_call() - Perform a QSEECOM SCM call.
2092  * @desc: SCM call descriptor.
2093  * @res:  SCM call response (output).
2094  *
2095  * Performs the QSEECOM SCM call described by @desc, returning the response in
2096  * @rsp.
2097  *
2098  * Return: Zero on success, nonzero on failure.
2099  */
2100 static int qcom_scm_qseecom_call(const struct qcom_scm_desc *desc,
2101 				 struct qcom_scm_qseecom_resp *res)
2102 {
2103 	int status;
2104 
2105 	/*
2106 	 * Note: Multiple QSEECOM SCM calls should not be executed same time,
2107 	 * so lock things here. This needs to be extended to callback/listener
2108 	 * handling when support for that is implemented.
2109 	 */
2110 
2111 	mutex_lock(&qcom_scm_qseecom_call_lock);
2112 	status = __qcom_scm_qseecom_call(desc, res);
2113 	mutex_unlock(&qcom_scm_qseecom_call_lock);
2114 
2115 	dev_dbg(__scm->dev, "%s: owner=%x, svc=%x, cmd=%x, result=%lld, type=%llx, data=%llx\n",
2116 		__func__, desc->owner, desc->svc, desc->cmd, res->result,
2117 		res->resp_type, res->data);
2118 
2119 	if (status) {
2120 		dev_err(__scm->dev, "qseecom: scm call failed with error %d\n", status);
2121 		return status;
2122 	}
2123 
2124 	/*
2125 	 * TODO: Handle incomplete and blocked calls:
2126 	 *
2127 	 * Incomplete and blocked calls are not supported yet. Some devices
2128 	 * and/or commands require those, some don't. Let's warn about them
2129 	 * prominently in case someone attempts to try these commands with a
2130 	 * device/command combination that isn't supported yet.
2131 	 */
2132 	WARN_ON(res->result == QSEECOM_RESULT_INCOMPLETE);
2133 	WARN_ON(res->result == QSEECOM_RESULT_BLOCKED_ON_LISTENER);
2134 
2135 	return 0;
2136 }
2137 
2138 /**
2139  * qcom_scm_qseecom_get_version() - Query the QSEECOM version.
2140  * @version: Pointer where the QSEECOM version will be stored.
2141  *
2142  * Performs the QSEECOM SCM querying the QSEECOM version currently running in
2143  * the TrustZone.
2144  *
2145  * Return: Zero on success, nonzero on failure.
2146  */
2147 static int qcom_scm_qseecom_get_version(u32 *version)
2148 {
2149 	struct qcom_scm_desc desc = {};
2150 	struct qcom_scm_qseecom_resp res = {};
2151 	u32 feature = 10;
2152 	int ret;
2153 
2154 	desc.owner = QSEECOM_TZ_OWNER_SIP;
2155 	desc.svc = QSEECOM_TZ_SVC_INFO;
2156 	desc.cmd = QSEECOM_TZ_CMD_INFO_VERSION;
2157 	desc.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL);
2158 	desc.args[0] = feature;
2159 
2160 	ret = qcom_scm_qseecom_call(&desc, &res);
2161 	if (ret)
2162 		return ret;
2163 
2164 	*version = res.result;
2165 	return 0;
2166 }
2167 
2168 /**
2169  * qcom_scm_qseecom_app_get_id() - Query the app ID for a given QSEE app name.
2170  * @app_name: The name of the app.
2171  * @app_id:   The returned app ID.
2172  *
2173  * Query and return the application ID of the SEE app identified by the given
2174  * name. This returned ID is the unique identifier of the app required for
2175  * subsequent communication.
2176  *
2177  * Return: Zero on success, nonzero on failure, -ENOENT if the app has not been
2178  * loaded or could not be found.
2179  */
2180 int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id)
2181 {
2182 	unsigned long name_buf_size = QSEECOM_MAX_APP_NAME_SIZE;
2183 	unsigned long app_name_len = strlen(app_name);
2184 	struct qcom_scm_desc desc = {};
2185 	struct qcom_scm_qseecom_resp res = {};
2186 	int status;
2187 
2188 	if (app_name_len >= name_buf_size)
2189 		return -EINVAL;
2190 
2191 	char *name_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
2192 							     name_buf_size,
2193 							     GFP_KERNEL);
2194 	if (!name_buf)
2195 		return -ENOMEM;
2196 
2197 	memcpy(name_buf, app_name, app_name_len);
2198 
2199 	desc.owner = QSEECOM_TZ_OWNER_QSEE_OS;
2200 	desc.svc = QSEECOM_TZ_SVC_APP_MGR;
2201 	desc.cmd = QSEECOM_TZ_CMD_APP_LOOKUP;
2202 	desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL);
2203 	desc.args[0] = qcom_tzmem_to_phys(name_buf);
2204 	desc.args[1] = app_name_len;
2205 
2206 	status = qcom_scm_qseecom_call(&desc, &res);
2207 
2208 	if (status)
2209 		return status;
2210 
2211 	if (res.result == QSEECOM_RESULT_FAILURE)
2212 		return -ENOENT;
2213 
2214 	if (res.result != QSEECOM_RESULT_SUCCESS)
2215 		return -EINVAL;
2216 
2217 	if (res.resp_type != QSEECOM_SCM_RES_APP_ID)
2218 		return -EINVAL;
2219 
2220 	*app_id = res.data;
2221 	return 0;
2222 }
2223 EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id);
2224 
2225 /**
2226  * qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app.
2227  * @app_id:   The ID of the target app.
2228  * @req:      Request buffer sent to the app (must be TZ memory)
2229  * @req_size: Size of the request buffer.
2230  * @rsp:      Response buffer, written to by the app (must be TZ memory)
2231  * @rsp_size: Size of the response buffer.
2232  *
2233  * Sends a request to the QSEE app associated with the given ID and read back
2234  * its response. The caller must provide two DMA memory regions, one for the
2235  * request and one for the response, and fill out the @req region with the
2236  * respective (app-specific) request data. The QSEE app reads this and returns
2237  * its response in the @rsp region.
2238  *
2239  * Return: Zero on success, nonzero on failure.
2240  */
2241 int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size,
2242 			      void *rsp, size_t rsp_size)
2243 {
2244 	struct qcom_scm_qseecom_resp res = {};
2245 	struct qcom_scm_desc desc = {};
2246 	phys_addr_t req_phys;
2247 	phys_addr_t rsp_phys;
2248 	int status;
2249 
2250 	req_phys = qcom_tzmem_to_phys(req);
2251 	rsp_phys = qcom_tzmem_to_phys(rsp);
2252 
2253 	desc.owner = QSEECOM_TZ_OWNER_TZ_APPS;
2254 	desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER;
2255 	desc.cmd = QSEECOM_TZ_CMD_APP_SEND;
2256 	desc.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL,
2257 				     QCOM_SCM_RW, QCOM_SCM_VAL,
2258 				     QCOM_SCM_RW, QCOM_SCM_VAL);
2259 	desc.args[0] = app_id;
2260 	desc.args[1] = req_phys;
2261 	desc.args[2] = req_size;
2262 	desc.args[3] = rsp_phys;
2263 	desc.args[4] = rsp_size;
2264 
2265 	status = qcom_scm_qseecom_call(&desc, &res);
2266 
2267 	if (status)
2268 		return status;
2269 
2270 	if (res.result != QSEECOM_RESULT_SUCCESS)
2271 		return -EIO;
2272 
2273 	return 0;
2274 }
2275 EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
2276 
2277 /*
2278  * We do not yet support re-entrant calls via the qseecom interface. To prevent
2279  + any potential issues with this, only allow validated machines for now.
2280  */
2281 static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
2282 	{ .compatible = "asus,vivobook-s15" },
2283 	{ .compatible = "asus,zenbook-a14-ux3407qa" },
2284 	{ .compatible = "asus,zenbook-a14-ux3407ra" },
2285 	{ .compatible = "dell,inspiron-14-plus-7441" },
2286 	{ .compatible = "dell,latitude-7455" },
2287 	{ .compatible = "dell,xps13-9345" },
2288 	{ .compatible = "hp,elitebook-ultra-g1q" },
2289 	{ .compatible = "hp,omnibook-x14" },
2290 	{ .compatible = "huawei,gaokun3" },
2291 	{ .compatible = "lenovo,flex-5g" },
2292 	{ .compatible = "lenovo,thinkbook-16" },
2293 	{ .compatible = "lenovo,thinkpad-t14s" },
2294 	{ .compatible = "lenovo,thinkpad-x13s", },
2295 	{ .compatible = "lenovo,yoga-slim7x" },
2296 	{ .compatible = "microsoft,arcata", },
2297 	{ .compatible = "microsoft,blackrock" },
2298 	{ .compatible = "microsoft,romulus13", },
2299 	{ .compatible = "microsoft,romulus15", },
2300 	{ .compatible = "qcom,hamoa-iot-evk" },
2301 	{ .compatible = "qcom,sc8180x-primus" },
2302 	{ .compatible = "qcom,x1e001de-devkit" },
2303 	{ .compatible = "qcom,x1e80100-crd" },
2304 	{ .compatible = "qcom,x1e80100-qcp" },
2305 	{ .compatible = "qcom,x1p42100-crd" },
2306 	{ }
2307 };
2308 
2309 static void qcom_scm_qseecom_free(void *data)
2310 {
2311 	struct platform_device *qseecom_dev = data;
2312 
2313 	platform_device_del(qseecom_dev);
2314 	platform_device_put(qseecom_dev);
2315 }
2316 
2317 static int qcom_scm_qseecom_init(struct qcom_scm *scm)
2318 {
2319 	struct platform_device *qseecom_dev;
2320 	u32 version;
2321 	int ret;
2322 
2323 	/*
2324 	 * Note: We do two steps of validation here: First, we try to query the
2325 	 * QSEECOM version as a check to see if the interface exists on this
2326 	 * device. Second, we check against known good devices due to current
2327 	 * driver limitations (see comment in qcom_scm_qseecom_allowlist).
2328 	 *
2329 	 * Note that we deliberately do the machine check after the version
2330 	 * check so that we can log potentially supported devices. This should
2331 	 * be safe as downstream sources indicate that the version query is
2332 	 * neither blocking nor reentrant.
2333 	 */
2334 	ret = qcom_scm_qseecom_get_version(&version);
2335 	if (ret)
2336 		return 0;
2337 
2338 	dev_info(scm->dev, "qseecom: found qseecom with version 0x%x\n", version);
2339 
2340 	if (!of_machine_device_match(qcom_scm_qseecom_allowlist)) {
2341 		dev_info(scm->dev, "qseecom: untested machine, skipping\n");
2342 		return 0;
2343 	}
2344 
2345 	/*
2346 	 * Set up QSEECOM interface device. All application clients will be
2347 	 * set up and managed by the corresponding driver for it.
2348 	 */
2349 	qseecom_dev = platform_device_alloc("qcom_qseecom", -1);
2350 	if (!qseecom_dev)
2351 		return -ENOMEM;
2352 
2353 	qseecom_dev->dev.parent = scm->dev;
2354 
2355 	ret = platform_device_add(qseecom_dev);
2356 	if (ret) {
2357 		platform_device_put(qseecom_dev);
2358 		return ret;
2359 	}
2360 
2361 	return devm_add_action_or_reset(scm->dev, qcom_scm_qseecom_free, qseecom_dev);
2362 }
2363 
2364 #else /* CONFIG_QCOM_QSEECOM */
2365 
2366 static int qcom_scm_qseecom_init(struct qcom_scm *scm)
2367 {
2368 	return 0;
2369 }
2370 
2371 #endif /* CONFIG_QCOM_QSEECOM */
2372 
2373 /**
2374  * qcom_scm_qtee_invoke_smc() - Invoke a QTEE object.
2375  * @inbuf: start address of memory area used for inbound buffer.
2376  * @inbuf_size: size of the memory area used for inbound buffer.
2377  * @outbuf: start address of memory area used for outbound buffer.
2378  * @outbuf_size: size of the memory area used for outbound buffer.
2379  * @result: result of QTEE object invocation.
2380  * @response_type: response type returned by QTEE.
2381  *
2382  * @response_type determines how the contents of @inbuf and @outbuf
2383  * should be processed.
2384  *
2385  * Return: On success, return 0 or <0 on failure.
2386  */
2387 int qcom_scm_qtee_invoke_smc(phys_addr_t inbuf, size_t inbuf_size,
2388 			     phys_addr_t outbuf, size_t outbuf_size,
2389 			     u64 *result, u64 *response_type)
2390 {
2391 	struct qcom_scm_desc desc = {
2392 		.svc = QCOM_SCM_SVC_SMCINVOKE,
2393 		.cmd = QCOM_SCM_SMCINVOKE_INVOKE,
2394 		.owner = ARM_SMCCC_OWNER_TRUSTED_OS,
2395 		.args[0] = inbuf,
2396 		.args[1] = inbuf_size,
2397 		.args[2] = outbuf,
2398 		.args[3] = outbuf_size,
2399 		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RW, QCOM_SCM_VAL,
2400 					 QCOM_SCM_RW, QCOM_SCM_VAL),
2401 	};
2402 	struct qcom_scm_res res;
2403 	int ret;
2404 
2405 	ret = qcom_scm_call(__scm->dev, &desc, &res);
2406 	if (ret)
2407 		return ret;
2408 
2409 	if (response_type)
2410 		*response_type = res.result[0];
2411 
2412 	if (result)
2413 		*result = res.result[1];
2414 
2415 	return 0;
2416 }
2417 EXPORT_SYMBOL(qcom_scm_qtee_invoke_smc);
2418 
2419 /**
2420  * qcom_scm_qtee_callback_response() - Submit response for callback request.
2421  * @buf: start address of memory area used for outbound buffer.
2422  * @buf_size: size of the memory area used for outbound buffer.
2423  * @result: Result of QTEE object invocation.
2424  * @response_type: Response type returned by QTEE.
2425  *
2426  * @response_type determines how the contents of @buf should be processed.
2427  *
2428  * Return: On success, return 0 or <0 on failure.
2429  */
2430 int qcom_scm_qtee_callback_response(phys_addr_t buf, size_t buf_size,
2431 				    u64 *result, u64 *response_type)
2432 {
2433 	struct qcom_scm_desc desc = {
2434 		.svc = QCOM_SCM_SVC_SMCINVOKE,
2435 		.cmd = QCOM_SCM_SMCINVOKE_CB_RSP,
2436 		.owner = ARM_SMCCC_OWNER_TRUSTED_OS,
2437 		.args[0] = buf,
2438 		.args[1] = buf_size,
2439 		.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL),
2440 	};
2441 	struct qcom_scm_res res;
2442 	int ret;
2443 
2444 	ret = qcom_scm_call(__scm->dev, &desc, &res);
2445 	if (ret)
2446 		return ret;
2447 
2448 	if (response_type)
2449 		*response_type = res.result[0];
2450 
2451 	if (result)
2452 		*result = res.result[1];
2453 
2454 	return 0;
2455 }
2456 EXPORT_SYMBOL(qcom_scm_qtee_callback_response);
2457 
2458 static void qcom_scm_qtee_free(void *data)
2459 {
2460 	struct platform_device *qtee_dev = data;
2461 
2462 	platform_device_unregister(qtee_dev);
2463 }
2464 
2465 static void qcom_scm_qtee_init(struct qcom_scm *scm)
2466 {
2467 	struct platform_device *qtee_dev;
2468 	u64 result, response_type;
2469 	int ret;
2470 
2471 	/*
2472 	 * Probe for smcinvoke support. This will fail due to invalid buffers,
2473 	 * but first, it checks whether the call is supported in QTEE syscall
2474 	 * handler. If it is not supported, -EIO is returned.
2475 	 */
2476 	ret = qcom_scm_qtee_invoke_smc(0, 0, 0, 0, &result, &response_type);
2477 	if (ret == -EIO)
2478 		return;
2479 
2480 	/* Setup QTEE interface device. */
2481 	qtee_dev = platform_device_register_data(scm->dev, "qcomtee",
2482 						 PLATFORM_DEVID_NONE, NULL, 0);
2483 	if (IS_ERR(qtee_dev))
2484 		return;
2485 
2486 	devm_add_action_or_reset(scm->dev, qcom_scm_qtee_free, qtee_dev);
2487 }
2488 
2489 /**
2490  * qcom_scm_is_available() - Checks if SCM is available
2491  */
2492 bool qcom_scm_is_available(void)
2493 {
2494 	/* Paired with smp_store_release() in qcom_scm_probe */
2495 	return !!smp_load_acquire(&__scm);
2496 }
2497 EXPORT_SYMBOL_GPL(qcom_scm_is_available);
2498 
2499 static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx)
2500 {
2501 	/* FW currently only supports a single wq_ctx (zero).
2502 	 * TODO: Update this logic to include dynamic allocation and lookup of
2503 	 * completion structs when FW supports more wq_ctx values.
2504 	 */
2505 	if (wq_ctx != 0) {
2506 		dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n");
2507 		return -EINVAL;
2508 	}
2509 
2510 	return 0;
2511 }
2512 
2513 int qcom_scm_wait_for_wq_completion(u32 wq_ctx)
2514 {
2515 	int ret;
2516 
2517 	ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
2518 	if (ret)
2519 		return ret;
2520 
2521 	wait_for_completion(&__scm->waitq_comp);
2522 
2523 	return 0;
2524 }
2525 
2526 static int qcom_scm_waitq_wakeup(unsigned int wq_ctx)
2527 {
2528 	int ret;
2529 
2530 	ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
2531 	if (ret)
2532 		return ret;
2533 
2534 	complete(&__scm->waitq_comp);
2535 
2536 	return 0;
2537 }
2538 
2539 static irqreturn_t qcom_scm_irq_handler(int irq, void *data)
2540 {
2541 	int ret;
2542 	struct qcom_scm *scm = data;
2543 	u32 wq_ctx, flags, more_pending = 0;
2544 
2545 	do {
2546 		ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending);
2547 		if (ret) {
2548 			dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret);
2549 			goto out;
2550 		}
2551 
2552 		if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE) {
2553 			dev_err(scm->dev, "Invalid flags received for wq_ctx: %u\n", flags);
2554 			goto out;
2555 		}
2556 
2557 		ret = qcom_scm_waitq_wakeup(wq_ctx);
2558 		if (ret)
2559 			goto out;
2560 	} while (more_pending);
2561 
2562 out:
2563 	return IRQ_HANDLED;
2564 }
2565 
2566 static int get_download_mode(char *buffer, const struct kernel_param *kp)
2567 {
2568 	if (download_mode >= ARRAY_SIZE(download_mode_name))
2569 		return sysfs_emit(buffer, "unknown mode\n");
2570 
2571 	return sysfs_emit(buffer, "%s\n", download_mode_name[download_mode]);
2572 }
2573 
2574 static int set_download_mode(const char *val, const struct kernel_param *kp)
2575 {
2576 	bool tmp;
2577 	int ret;
2578 
2579 	ret = sysfs_match_string(download_mode_name, val);
2580 	if (ret < 0) {
2581 		ret = kstrtobool(val, &tmp);
2582 		if (ret < 0) {
2583 			pr_err("qcom_scm: err: %d\n", ret);
2584 			return ret;
2585 		}
2586 
2587 		ret = tmp ? 1 : 0;
2588 	}
2589 
2590 	download_mode = ret;
2591 	if (__scm)
2592 		qcom_scm_set_download_mode(download_mode);
2593 
2594 	return 0;
2595 }
2596 
2597 static const struct kernel_param_ops download_mode_param_ops = {
2598 	.get = get_download_mode,
2599 	.set = set_download_mode,
2600 };
2601 
2602 module_param_cb(download_mode, &download_mode_param_ops, NULL, 0644);
2603 MODULE_PARM_DESC(download_mode, "download mode: off/0/N for no dump mode, full/on/1/Y for full dump mode, mini for minidump mode and full,mini for both full and minidump mode together are acceptable values");
2604 
2605 static int qcom_scm_probe(struct platform_device *pdev)
2606 {
2607 	struct qcom_tzmem_pool_config pool_config;
2608 	struct qcom_scm *scm;
2609 	int irq, ret;
2610 
2611 	scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
2612 	if (!scm)
2613 		return -ENOMEM;
2614 
2615 	scm->dev = &pdev->dev;
2616 	ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
2617 	if (ret < 0)
2618 		return ret;
2619 
2620 	init_completion(&scm->waitq_comp);
2621 	mutex_init(&scm->scm_bw_lock);
2622 
2623 	scm->path = devm_of_icc_get(&pdev->dev, NULL);
2624 	if (IS_ERR(scm->path))
2625 		return dev_err_probe(&pdev->dev, PTR_ERR(scm->path),
2626 				     "failed to acquire interconnect path\n");
2627 
2628 	scm->core_clk = devm_clk_get_optional(&pdev->dev, "core");
2629 	if (IS_ERR(scm->core_clk))
2630 		return PTR_ERR(scm->core_clk);
2631 
2632 	scm->iface_clk = devm_clk_get_optional(&pdev->dev, "iface");
2633 	if (IS_ERR(scm->iface_clk))
2634 		return PTR_ERR(scm->iface_clk);
2635 
2636 	scm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus");
2637 	if (IS_ERR(scm->bus_clk))
2638 		return PTR_ERR(scm->bus_clk);
2639 
2640 	scm->reset.ops = &qcom_scm_pas_reset_ops;
2641 	scm->reset.nr_resets = 1;
2642 	scm->reset.of_node = pdev->dev.of_node;
2643 	ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
2644 	if (ret)
2645 		return ret;
2646 
2647 	/* vote for max clk rate for highest performance */
2648 	ret = clk_set_rate(scm->core_clk, INT_MAX);
2649 	if (ret)
2650 		return ret;
2651 
2652 	ret = of_reserved_mem_device_init(scm->dev);
2653 	if (ret && ret != -ENODEV)
2654 		return dev_err_probe(scm->dev, ret,
2655 				     "Failed to setup the reserved memory region for TZ mem\n");
2656 
2657 	ret = qcom_tzmem_enable(scm->dev);
2658 	if (ret)
2659 		return dev_err_probe(scm->dev, ret,
2660 				     "Failed to enable the TrustZone memory allocator\n");
2661 
2662 	memset(&pool_config, 0, sizeof(pool_config));
2663 	pool_config.initial_size = 0;
2664 	pool_config.policy = QCOM_TZMEM_POLICY_ON_DEMAND;
2665 	pool_config.max_size = SZ_256K;
2666 
2667 	scm->mempool = devm_qcom_tzmem_pool_new(scm->dev, &pool_config);
2668 	if (IS_ERR(scm->mempool))
2669 		return dev_err_probe(scm->dev, PTR_ERR(scm->mempool),
2670 				     "Failed to create the SCM memory pool\n");
2671 
2672 	irq = platform_get_irq_optional(pdev, 0);
2673 	if (irq < 0) {
2674 		if (irq != -ENXIO)
2675 			return irq;
2676 	} else {
2677 		ret = devm_request_threaded_irq(scm->dev, irq, NULL, qcom_scm_irq_handler,
2678 						IRQF_ONESHOT, "qcom-scm", scm);
2679 		if (ret < 0)
2680 			return dev_err_probe(scm->dev, ret,
2681 					     "Failed to request qcom-scm irq\n");
2682 	}
2683 
2684 	/*
2685 	 * Paired with smp_load_acquire() in qcom_scm_is_available().
2686 	 *
2687 	 * This marks the SCM API as ready to accept user calls and can only
2688 	 * be called after the TrustZone memory pool is initialized and the
2689 	 * waitqueue interrupt requested.
2690 	 */
2691 	smp_store_release(&__scm, scm);
2692 
2693 	__get_convention();
2694 
2695 	/*
2696 	 * If "download mode" is requested, from this point on warmboot
2697 	 * will cause the boot stages to enter download mode, unless
2698 	 * disabled below by a clean shutdown/reboot.
2699 	 */
2700 	qcom_scm_set_download_mode(download_mode);
2701 
2702 	/*
2703 	 * Disable SDI if indicated by DT that it is enabled by default.
2704 	 */
2705 	if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled") || !download_mode)
2706 		qcom_scm_disable_sdi();
2707 
2708 	/*
2709 	 * Initialize the QSEECOM interface.
2710 	 *
2711 	 * Note: QSEECOM is fairly self-contained and this only adds the
2712 	 * interface device (the driver of which does most of the heavy
2713 	 * lifting). So any errors returned here should be either -ENOMEM or
2714 	 * -EINVAL (with the latter only in case there's a bug in our code).
2715 	 * This means that there is no need to bring down the whole SCM driver.
2716 	 * Just log the error instead and let SCM live.
2717 	 */
2718 	ret = qcom_scm_qseecom_init(scm);
2719 	WARN(ret < 0, "failed to initialize qseecom: %d\n", ret);
2720 
2721 	/* Initialize the QTEE object interface. */
2722 	qcom_scm_qtee_init(scm);
2723 
2724 	return 0;
2725 }
2726 
2727 static void qcom_scm_shutdown(struct platform_device *pdev)
2728 {
2729 	/* Clean shutdown, disable download mode to allow normal restart */
2730 	qcom_scm_set_download_mode(QCOM_DLOAD_NODUMP);
2731 }
2732 
2733 static const struct of_device_id qcom_scm_dt_match[] = {
2734 	{ .compatible = "qcom,scm" },
2735 
2736 	/* Legacy entries kept for backwards compatibility */
2737 	{ .compatible = "qcom,scm-apq8064" },
2738 	{ .compatible = "qcom,scm-apq8084" },
2739 	{ .compatible = "qcom,scm-ipq4019" },
2740 	{ .compatible = "qcom,scm-msm8953" },
2741 	{ .compatible = "qcom,scm-msm8974" },
2742 	{ .compatible = "qcom,scm-msm8996" },
2743 	{}
2744 };
2745 MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
2746 
2747 static struct platform_driver qcom_scm_driver = {
2748 	.driver = {
2749 		.name	= "qcom_scm",
2750 		.of_match_table = qcom_scm_dt_match,
2751 		.suppress_bind_attrs = true,
2752 	},
2753 	.probe = qcom_scm_probe,
2754 	.shutdown = qcom_scm_shutdown,
2755 };
2756 
2757 static int __init qcom_scm_init(void)
2758 {
2759 	return platform_driver_register(&qcom_scm_driver);
2760 }
2761 subsys_initcall(qcom_scm_init);
2762 
2763 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver");
2764 MODULE_LICENSE("GPL v2");
2765