xref: /linux/drivers/firmware/qcom/qcom_scm.c (revision fc444ada131001812c5d10b380837238c9cf7c8c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
3  * Copyright (C) 2015 Linaro Ltd.
4  */
5 
6 #include <linux/arm-smccc.h>
7 #include <linux/bitfield.h>
8 #include <linux/bits.h>
9 #include <linux/cleanup.h>
10 #include <linux/clk.h>
11 #include <linux/completion.h>
12 #include <linux/cpumask.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/err.h>
15 #include <linux/export.h>
16 #include <linux/firmware/qcom/qcom_scm.h>
17 #include <linux/firmware/qcom/qcom_tzmem.h>
18 #include <linux/init.h>
19 #include <linux/interconnect.h>
20 #include <linux/interrupt.h>
21 #include <linux/kstrtox.h>
22 #include <linux/module.h>
23 #include <linux/of.h>
24 #include <linux/of_address.h>
25 #include <linux/of_irq.h>
26 #include <linux/of_platform.h>
27 #include <linux/of_reserved_mem.h>
28 #include <linux/platform_device.h>
29 #include <linux/reset-controller.h>
30 #include <linux/sizes.h>
31 #include <linux/types.h>
32 
33 #include "qcom_scm.h"
34 #include "qcom_tzmem.h"
35 
36 static u32 download_mode;
37 
38 struct qcom_scm {
39 	struct device *dev;
40 	struct clk *core_clk;
41 	struct clk *iface_clk;
42 	struct clk *bus_clk;
43 	struct icc_path *path;
44 	struct completion waitq_comp;
45 	struct reset_controller_dev reset;
46 
47 	/* control access to the interconnect path */
48 	struct mutex scm_bw_lock;
49 	int scm_vote_count;
50 
51 	u64 dload_mode_addr;
52 
53 	struct qcom_tzmem_pool *mempool;
54 };
55 
56 struct qcom_scm_current_perm_info {
57 	__le32 vmid;
58 	__le32 perm;
59 	__le64 ctx;
60 	__le32 ctx_size;
61 	__le32 unused;
62 };
63 
64 struct qcom_scm_mem_map_info {
65 	__le64 mem_addr;
66 	__le64 mem_size;
67 };
68 
69 /**
70  * struct qcom_scm_qseecom_resp - QSEECOM SCM call response.
71  * @result:    Result or status of the SCM call. See &enum qcom_scm_qseecom_result.
72  * @resp_type: Type of the response. See &enum qcom_scm_qseecom_resp_type.
73  * @data:      Response data. The type of this data is given in @resp_type.
74  */
75 struct qcom_scm_qseecom_resp {
76 	u64 result;
77 	u64 resp_type;
78 	u64 data;
79 };
80 
81 enum qcom_scm_qseecom_result {
82 	QSEECOM_RESULT_SUCCESS			= 0,
83 	QSEECOM_RESULT_INCOMPLETE		= 1,
84 	QSEECOM_RESULT_BLOCKED_ON_LISTENER	= 2,
85 	QSEECOM_RESULT_FAILURE			= 0xFFFFFFFF,
86 };
87 
88 enum qcom_scm_qseecom_resp_type {
89 	QSEECOM_SCM_RES_APP_ID			= 0xEE01,
90 	QSEECOM_SCM_RES_QSEOS_LISTENER_ID	= 0xEE02,
91 };
92 
93 enum qcom_scm_qseecom_tz_owner {
94 	QSEECOM_TZ_OWNER_SIP			= 2,
95 	QSEECOM_TZ_OWNER_TZ_APPS		= 48,
96 	QSEECOM_TZ_OWNER_QSEE_OS		= 50
97 };
98 
99 enum qcom_scm_qseecom_tz_svc {
100 	QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER	= 0,
101 	QSEECOM_TZ_SVC_APP_MGR			= 1,
102 	QSEECOM_TZ_SVC_INFO			= 6,
103 };
104 
105 enum qcom_scm_qseecom_tz_cmd_app {
106 	QSEECOM_TZ_CMD_APP_SEND			= 1,
107 	QSEECOM_TZ_CMD_APP_LOOKUP		= 3,
108 };
109 
110 enum qcom_scm_qseecom_tz_cmd_info {
111 	QSEECOM_TZ_CMD_INFO_VERSION		= 3,
112 };
113 
114 #define QSEECOM_MAX_APP_NAME_SIZE		64
115 #define SHMBRIDGE_RESULT_NOTSUPP		4
116 
117 /* Each bit configures cold/warm boot address for one of the 4 CPUs */
118 static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
119 	0, BIT(0), BIT(3), BIT(5)
120 };
121 static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
122 	BIT(2), BIT(1), BIT(4), BIT(6)
123 };
124 
125 #define QCOM_SMC_WAITQ_FLAG_WAKE_ONE	BIT(0)
126 
127 #define QCOM_DLOAD_MASK		GENMASK(5, 4)
128 #define QCOM_DLOAD_NODUMP	0
129 #define QCOM_DLOAD_FULLDUMP	1
130 #define QCOM_DLOAD_MINIDUMP	2
131 #define QCOM_DLOAD_BOTHDUMP	3
132 
133 static const char * const qcom_scm_convention_names[] = {
134 	[SMC_CONVENTION_UNKNOWN] = "unknown",
135 	[SMC_CONVENTION_ARM_32] = "smc arm 32",
136 	[SMC_CONVENTION_ARM_64] = "smc arm 64",
137 	[SMC_CONVENTION_LEGACY] = "smc legacy",
138 };
139 
140 static const char * const download_mode_name[] = {
141 	[QCOM_DLOAD_NODUMP]	= "off",
142 	[QCOM_DLOAD_FULLDUMP]	= "full",
143 	[QCOM_DLOAD_MINIDUMP]	= "mini",
144 	[QCOM_DLOAD_BOTHDUMP]	= "full,mini",
145 };
146 
147 static struct qcom_scm *__scm;
148 
qcom_scm_clk_enable(void)149 static int qcom_scm_clk_enable(void)
150 {
151 	int ret;
152 
153 	ret = clk_prepare_enable(__scm->core_clk);
154 	if (ret)
155 		goto bail;
156 
157 	ret = clk_prepare_enable(__scm->iface_clk);
158 	if (ret)
159 		goto disable_core;
160 
161 	ret = clk_prepare_enable(__scm->bus_clk);
162 	if (ret)
163 		goto disable_iface;
164 
165 	return 0;
166 
167 disable_iface:
168 	clk_disable_unprepare(__scm->iface_clk);
169 disable_core:
170 	clk_disable_unprepare(__scm->core_clk);
171 bail:
172 	return ret;
173 }
174 
qcom_scm_clk_disable(void)175 static void qcom_scm_clk_disable(void)
176 {
177 	clk_disable_unprepare(__scm->core_clk);
178 	clk_disable_unprepare(__scm->iface_clk);
179 	clk_disable_unprepare(__scm->bus_clk);
180 }
181 
qcom_scm_bw_enable(void)182 static int qcom_scm_bw_enable(void)
183 {
184 	int ret = 0;
185 
186 	if (!__scm->path)
187 		return 0;
188 
189 	mutex_lock(&__scm->scm_bw_lock);
190 	if (!__scm->scm_vote_count) {
191 		ret = icc_set_bw(__scm->path, 0, UINT_MAX);
192 		if (ret < 0) {
193 			dev_err(__scm->dev, "failed to set bandwidth request\n");
194 			goto err_bw;
195 		}
196 	}
197 	__scm->scm_vote_count++;
198 err_bw:
199 	mutex_unlock(&__scm->scm_bw_lock);
200 
201 	return ret;
202 }
203 
qcom_scm_bw_disable(void)204 static void qcom_scm_bw_disable(void)
205 {
206 	if (!__scm->path)
207 		return;
208 
209 	mutex_lock(&__scm->scm_bw_lock);
210 	if (__scm->scm_vote_count-- == 1)
211 		icc_set_bw(__scm->path, 0, 0);
212 	mutex_unlock(&__scm->scm_bw_lock);
213 }
214 
215 enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
216 static DEFINE_SPINLOCK(scm_query_lock);
217 
qcom_scm_get_tzmem_pool(void)218 struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void)
219 {
220 	if (!qcom_scm_is_available())
221 		return NULL;
222 
223 	return __scm->mempool;
224 }
225 
__get_convention(void)226 static enum qcom_scm_convention __get_convention(void)
227 {
228 	unsigned long flags;
229 	struct qcom_scm_desc desc = {
230 		.svc = QCOM_SCM_SVC_INFO,
231 		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
232 		.args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
233 					   QCOM_SCM_INFO_IS_CALL_AVAIL) |
234 			   (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
235 		.arginfo = QCOM_SCM_ARGS(1),
236 		.owner = ARM_SMCCC_OWNER_SIP,
237 	};
238 	struct qcom_scm_res res;
239 	enum qcom_scm_convention probed_convention;
240 	int ret;
241 	bool forced = false;
242 
243 	if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
244 		return qcom_scm_convention;
245 
246 	/*
247 	 * Per the "SMC calling convention specification", the 64-bit calling
248 	 * convention can only be used when the client is 64-bit, otherwise
249 	 * system will encounter the undefined behaviour.
250 	 */
251 #if IS_ENABLED(CONFIG_ARM64)
252 	/*
253 	 * Device isn't required as there is only one argument - no device
254 	 * needed to dma_map_single to secure world
255 	 */
256 	probed_convention = SMC_CONVENTION_ARM_64;
257 	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
258 	if (!ret && res.result[0] == 1)
259 		goto found;
260 
261 	/*
262 	 * Some SC7180 firmwares didn't implement the
263 	 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
264 	 * calling conventions on these firmwares. Luckily we don't make any
265 	 * early calls into the firmware on these SoCs so the device pointer
266 	 * will be valid here to check if the compatible matches.
267 	 */
268 	if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
269 		forced = true;
270 		goto found;
271 	}
272 #endif
273 
274 	probed_convention = SMC_CONVENTION_ARM_32;
275 	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
276 	if (!ret && res.result[0] == 1)
277 		goto found;
278 
279 	probed_convention = SMC_CONVENTION_LEGACY;
280 found:
281 	spin_lock_irqsave(&scm_query_lock, flags);
282 	if (probed_convention != qcom_scm_convention) {
283 		qcom_scm_convention = probed_convention;
284 		pr_info("qcom_scm: convention: %s%s\n",
285 			qcom_scm_convention_names[qcom_scm_convention],
286 			forced ? " (forced)" : "");
287 	}
288 	spin_unlock_irqrestore(&scm_query_lock, flags);
289 
290 	return qcom_scm_convention;
291 }
292 
293 /**
294  * qcom_scm_call() - Invoke a syscall in the secure world
295  * @dev:	device
296  * @desc:	Descriptor structure containing arguments and return values
297  * @res:        Structure containing results from SMC/HVC call
298  *
299  * Sends a command to the SCM and waits for the command to finish processing.
300  * This should *only* be called in pre-emptible context.
301  */
qcom_scm_call(struct device * dev,const struct qcom_scm_desc * desc,struct qcom_scm_res * res)302 static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
303 			 struct qcom_scm_res *res)
304 {
305 	might_sleep();
306 	switch (__get_convention()) {
307 	case SMC_CONVENTION_ARM_32:
308 	case SMC_CONVENTION_ARM_64:
309 		return scm_smc_call(dev, desc, res, false);
310 	case SMC_CONVENTION_LEGACY:
311 		return scm_legacy_call(dev, desc, res);
312 	default:
313 		pr_err("Unknown current SCM calling convention.\n");
314 		return -EINVAL;
315 	}
316 }
317 
318 /**
319  * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
320  * @dev:	device
321  * @desc:	Descriptor structure containing arguments and return values
322  * @res:	Structure containing results from SMC/HVC call
323  *
324  * Sends a command to the SCM and waits for the command to finish processing.
325  * This can be called in atomic context.
326  */
qcom_scm_call_atomic(struct device * dev,const struct qcom_scm_desc * desc,struct qcom_scm_res * res)327 static int qcom_scm_call_atomic(struct device *dev,
328 				const struct qcom_scm_desc *desc,
329 				struct qcom_scm_res *res)
330 {
331 	switch (__get_convention()) {
332 	case SMC_CONVENTION_ARM_32:
333 	case SMC_CONVENTION_ARM_64:
334 		return scm_smc_call(dev, desc, res, true);
335 	case SMC_CONVENTION_LEGACY:
336 		return scm_legacy_call_atomic(dev, desc, res);
337 	default:
338 		pr_err("Unknown current SCM calling convention.\n");
339 		return -EINVAL;
340 	}
341 }
342 
__qcom_scm_is_call_available(struct device * dev,u32 svc_id,u32 cmd_id)343 static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
344 					 u32 cmd_id)
345 {
346 	int ret;
347 	struct qcom_scm_desc desc = {
348 		.svc = QCOM_SCM_SVC_INFO,
349 		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
350 		.owner = ARM_SMCCC_OWNER_SIP,
351 	};
352 	struct qcom_scm_res res;
353 
354 	desc.arginfo = QCOM_SCM_ARGS(1);
355 	switch (__get_convention()) {
356 	case SMC_CONVENTION_ARM_32:
357 	case SMC_CONVENTION_ARM_64:
358 		desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
359 				(ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
360 		break;
361 	case SMC_CONVENTION_LEGACY:
362 		desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
363 		break;
364 	default:
365 		pr_err("Unknown SMC convention being used\n");
366 		return false;
367 	}
368 
369 	ret = qcom_scm_call(dev, &desc, &res);
370 
371 	return ret ? false : !!res.result[0];
372 }
373 
qcom_scm_set_boot_addr(void * entry,const u8 * cpu_bits)374 static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits)
375 {
376 	int cpu;
377 	unsigned int flags = 0;
378 	struct qcom_scm_desc desc = {
379 		.svc = QCOM_SCM_SVC_BOOT,
380 		.cmd = QCOM_SCM_BOOT_SET_ADDR,
381 		.arginfo = QCOM_SCM_ARGS(2),
382 		.owner = ARM_SMCCC_OWNER_SIP,
383 	};
384 
385 	for_each_present_cpu(cpu) {
386 		if (cpu >= QCOM_SCM_BOOT_MAX_CPUS)
387 			return -EINVAL;
388 		flags |= cpu_bits[cpu];
389 	}
390 
391 	desc.args[0] = flags;
392 	desc.args[1] = virt_to_phys(entry);
393 
394 	return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
395 }
396 
qcom_scm_set_boot_addr_mc(void * entry,unsigned int flags)397 static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags)
398 {
399 	struct qcom_scm_desc desc = {
400 		.svc = QCOM_SCM_SVC_BOOT,
401 		.cmd = QCOM_SCM_BOOT_SET_ADDR_MC,
402 		.owner = ARM_SMCCC_OWNER_SIP,
403 		.arginfo = QCOM_SCM_ARGS(6),
404 		.args = {
405 			virt_to_phys(entry),
406 			/* Apply to all CPUs in all affinity levels */
407 			~0ULL, ~0ULL, ~0ULL, ~0ULL,
408 			flags,
409 		},
410 	};
411 
412 	/* Need a device for DMA of the additional arguments */
413 	if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY)
414 		return -EOPNOTSUPP;
415 
416 	return qcom_scm_call(__scm->dev, &desc, NULL);
417 }
418 
419 /**
420  * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus
421  * @entry: Entry point function for the cpus
422  *
423  * Set the Linux entry point for the SCM to transfer control to when coming
424  * out of a power down. CPU power down may be executed on cpuidle or hotplug.
425  */
qcom_scm_set_warm_boot_addr(void * entry)426 int qcom_scm_set_warm_boot_addr(void *entry)
427 {
428 	if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT))
429 		/* Fallback to old SCM call */
430 		return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits);
431 	return 0;
432 }
433 EXPORT_SYMBOL_GPL(qcom_scm_set_warm_boot_addr);
434 
435 /**
436  * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus
437  * @entry: Entry point function for the cpus
438  */
qcom_scm_set_cold_boot_addr(void * entry)439 int qcom_scm_set_cold_boot_addr(void *entry)
440 {
441 	if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT))
442 		/* Fallback to old SCM call */
443 		return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits);
444 	return 0;
445 }
446 EXPORT_SYMBOL_GPL(qcom_scm_set_cold_boot_addr);
447 
448 /**
449  * qcom_scm_cpu_power_down() - Power down the cpu
450  * @flags:	Flags to flush cache
451  *
452  * This is an end point to power down cpu. If there was a pending interrupt,
453  * the control would return from this function, otherwise, the cpu jumps to the
454  * warm boot entry point set for this cpu upon reset.
455  */
qcom_scm_cpu_power_down(u32 flags)456 void qcom_scm_cpu_power_down(u32 flags)
457 {
458 	struct qcom_scm_desc desc = {
459 		.svc = QCOM_SCM_SVC_BOOT,
460 		.cmd = QCOM_SCM_BOOT_TERMINATE_PC,
461 		.args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
462 		.arginfo = QCOM_SCM_ARGS(1),
463 		.owner = ARM_SMCCC_OWNER_SIP,
464 	};
465 
466 	qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
467 }
468 EXPORT_SYMBOL_GPL(qcom_scm_cpu_power_down);
469 
qcom_scm_set_remote_state(u32 state,u32 id)470 int qcom_scm_set_remote_state(u32 state, u32 id)
471 {
472 	struct qcom_scm_desc desc = {
473 		.svc = QCOM_SCM_SVC_BOOT,
474 		.cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
475 		.arginfo = QCOM_SCM_ARGS(2),
476 		.args[0] = state,
477 		.args[1] = id,
478 		.owner = ARM_SMCCC_OWNER_SIP,
479 	};
480 	struct qcom_scm_res res;
481 	int ret;
482 
483 	ret = qcom_scm_call(__scm->dev, &desc, &res);
484 
485 	return ret ? : res.result[0];
486 }
487 EXPORT_SYMBOL_GPL(qcom_scm_set_remote_state);
488 
qcom_scm_disable_sdi(void)489 static int qcom_scm_disable_sdi(void)
490 {
491 	int ret;
492 	struct qcom_scm_desc desc = {
493 		.svc = QCOM_SCM_SVC_BOOT,
494 		.cmd = QCOM_SCM_BOOT_SDI_CONFIG,
495 		.args[0] = 1, /* Disable watchdog debug */
496 		.args[1] = 0, /* Disable SDI */
497 		.arginfo = QCOM_SCM_ARGS(2),
498 		.owner = ARM_SMCCC_OWNER_SIP,
499 	};
500 	struct qcom_scm_res res;
501 
502 	ret = qcom_scm_clk_enable();
503 	if (ret)
504 		return ret;
505 	ret = qcom_scm_call(__scm->dev, &desc, &res);
506 
507 	qcom_scm_clk_disable();
508 
509 	return ret ? : res.result[0];
510 }
511 
__qcom_scm_set_dload_mode(struct device * dev,bool enable)512 static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
513 {
514 	struct qcom_scm_desc desc = {
515 		.svc = QCOM_SCM_SVC_BOOT,
516 		.cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
517 		.arginfo = QCOM_SCM_ARGS(2),
518 		.args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
519 		.owner = ARM_SMCCC_OWNER_SIP,
520 	};
521 
522 	desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
523 
524 	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
525 }
526 
qcom_scm_io_rmw(phys_addr_t addr,unsigned int mask,unsigned int val)527 static int qcom_scm_io_rmw(phys_addr_t addr, unsigned int mask, unsigned int val)
528 {
529 	unsigned int old;
530 	unsigned int new;
531 	int ret;
532 
533 	ret = qcom_scm_io_readl(addr, &old);
534 	if (ret)
535 		return ret;
536 
537 	new = (old & ~mask) | (val & mask);
538 
539 	return qcom_scm_io_writel(addr, new);
540 }
541 
qcom_scm_set_download_mode(u32 dload_mode)542 static void qcom_scm_set_download_mode(u32 dload_mode)
543 {
544 	int ret = 0;
545 
546 	if (__scm->dload_mode_addr) {
547 		ret = qcom_scm_io_rmw(__scm->dload_mode_addr, QCOM_DLOAD_MASK,
548 				      FIELD_PREP(QCOM_DLOAD_MASK, dload_mode));
549 	} else if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT,
550 						QCOM_SCM_BOOT_SET_DLOAD_MODE)) {
551 		ret = __qcom_scm_set_dload_mode(__scm->dev, !!dload_mode);
552 	} else if (dload_mode) {
553 		dev_err(__scm->dev,
554 			"No available mechanism for setting download mode\n");
555 	}
556 
557 	if (ret)
558 		dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
559 }
560 
561 /**
562  * qcom_scm_pas_init_image() - Initialize peripheral authentication service
563  *			       state machine for a given peripheral, using the
564  *			       metadata
565  * @peripheral: peripheral id
566  * @metadata:	pointer to memory containing ELF header, program header table
567  *		and optional blob of data used for authenticating the metadata
568  *		and the rest of the firmware
569  * @size:	size of the metadata
570  * @ctx:	optional metadata context
571  *
572  * Return: 0 on success.
573  *
574  * Upon successful return, the PAS metadata context (@ctx) will be used to
575  * track the metadata allocation, this needs to be released by invoking
576  * qcom_scm_pas_metadata_release() by the caller.
577  */
qcom_scm_pas_init_image(u32 peripheral,const void * metadata,size_t size,struct qcom_scm_pas_metadata * ctx)578 int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
579 			    struct qcom_scm_pas_metadata *ctx)
580 {
581 	dma_addr_t mdata_phys;
582 	void *mdata_buf;
583 	int ret;
584 	struct qcom_scm_desc desc = {
585 		.svc = QCOM_SCM_SVC_PIL,
586 		.cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
587 		.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
588 		.args[0] = peripheral,
589 		.owner = ARM_SMCCC_OWNER_SIP,
590 	};
591 	struct qcom_scm_res res;
592 
593 	/*
594 	 * During the scm call memory protection will be enabled for the meta
595 	 * data blob, so make sure it's physically contiguous, 4K aligned and
596 	 * non-cachable to avoid XPU violations.
597 	 *
598 	 * For PIL calls the hypervisor creates SHM Bridges for the blob
599 	 * buffers on behalf of Linux so we must not do it ourselves hence
600 	 * not using the TZMem allocator here.
601 	 *
602 	 * If we pass a buffer that is already part of an SHM Bridge to this
603 	 * call, it will fail.
604 	 */
605 	mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
606 				       GFP_KERNEL);
607 	if (!mdata_buf)
608 		return -ENOMEM;
609 
610 	memcpy(mdata_buf, metadata, size);
611 
612 	ret = qcom_scm_clk_enable();
613 	if (ret)
614 		goto out;
615 
616 	ret = qcom_scm_bw_enable();
617 	if (ret)
618 		goto disable_clk;
619 
620 	desc.args[1] = mdata_phys;
621 
622 	ret = qcom_scm_call(__scm->dev, &desc, &res);
623 	qcom_scm_bw_disable();
624 
625 disable_clk:
626 	qcom_scm_clk_disable();
627 
628 out:
629 	if (ret < 0 || !ctx) {
630 		dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
631 	} else if (ctx) {
632 		ctx->ptr = mdata_buf;
633 		ctx->phys = mdata_phys;
634 		ctx->size = size;
635 	}
636 
637 	return ret ? : res.result[0];
638 }
639 EXPORT_SYMBOL_GPL(qcom_scm_pas_init_image);
640 
641 /**
642  * qcom_scm_pas_metadata_release() - release metadata context
643  * @ctx:	metadata context
644  */
qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata * ctx)645 void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx)
646 {
647 	if (!ctx->ptr)
648 		return;
649 
650 	dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys);
651 
652 	ctx->ptr = NULL;
653 	ctx->phys = 0;
654 	ctx->size = 0;
655 }
656 EXPORT_SYMBOL_GPL(qcom_scm_pas_metadata_release);
657 
658 /**
659  * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
660  *			      for firmware loading
661  * @peripheral:	peripheral id
662  * @addr:	start address of memory area to prepare
663  * @size:	size of the memory area to prepare
664  *
665  * Returns 0 on success.
666  */
qcom_scm_pas_mem_setup(u32 peripheral,phys_addr_t addr,phys_addr_t size)667 int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
668 {
669 	int ret;
670 	struct qcom_scm_desc desc = {
671 		.svc = QCOM_SCM_SVC_PIL,
672 		.cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
673 		.arginfo = QCOM_SCM_ARGS(3),
674 		.args[0] = peripheral,
675 		.args[1] = addr,
676 		.args[2] = size,
677 		.owner = ARM_SMCCC_OWNER_SIP,
678 	};
679 	struct qcom_scm_res res;
680 
681 	ret = qcom_scm_clk_enable();
682 	if (ret)
683 		return ret;
684 
685 	ret = qcom_scm_bw_enable();
686 	if (ret)
687 		goto disable_clk;
688 
689 	ret = qcom_scm_call(__scm->dev, &desc, &res);
690 	qcom_scm_bw_disable();
691 
692 disable_clk:
693 	qcom_scm_clk_disable();
694 
695 	return ret ? : res.result[0];
696 }
697 EXPORT_SYMBOL_GPL(qcom_scm_pas_mem_setup);
698 
699 /**
700  * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
701  *				   and reset the remote processor
702  * @peripheral:	peripheral id
703  *
704  * Return 0 on success.
705  */
qcom_scm_pas_auth_and_reset(u32 peripheral)706 int qcom_scm_pas_auth_and_reset(u32 peripheral)
707 {
708 	int ret;
709 	struct qcom_scm_desc desc = {
710 		.svc = QCOM_SCM_SVC_PIL,
711 		.cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
712 		.arginfo = QCOM_SCM_ARGS(1),
713 		.args[0] = peripheral,
714 		.owner = ARM_SMCCC_OWNER_SIP,
715 	};
716 	struct qcom_scm_res res;
717 
718 	ret = qcom_scm_clk_enable();
719 	if (ret)
720 		return ret;
721 
722 	ret = qcom_scm_bw_enable();
723 	if (ret)
724 		goto disable_clk;
725 
726 	ret = qcom_scm_call(__scm->dev, &desc, &res);
727 	qcom_scm_bw_disable();
728 
729 disable_clk:
730 	qcom_scm_clk_disable();
731 
732 	return ret ? : res.result[0];
733 }
734 EXPORT_SYMBOL_GPL(qcom_scm_pas_auth_and_reset);
735 
736 /**
737  * qcom_scm_pas_shutdown() - Shut down the remote processor
738  * @peripheral: peripheral id
739  *
740  * Returns 0 on success.
741  */
qcom_scm_pas_shutdown(u32 peripheral)742 int qcom_scm_pas_shutdown(u32 peripheral)
743 {
744 	int ret;
745 	struct qcom_scm_desc desc = {
746 		.svc = QCOM_SCM_SVC_PIL,
747 		.cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
748 		.arginfo = QCOM_SCM_ARGS(1),
749 		.args[0] = peripheral,
750 		.owner = ARM_SMCCC_OWNER_SIP,
751 	};
752 	struct qcom_scm_res res;
753 
754 	ret = qcom_scm_clk_enable();
755 	if (ret)
756 		return ret;
757 
758 	ret = qcom_scm_bw_enable();
759 	if (ret)
760 		goto disable_clk;
761 
762 	ret = qcom_scm_call(__scm->dev, &desc, &res);
763 	qcom_scm_bw_disable();
764 
765 disable_clk:
766 	qcom_scm_clk_disable();
767 
768 	return ret ? : res.result[0];
769 }
770 EXPORT_SYMBOL_GPL(qcom_scm_pas_shutdown);
771 
772 /**
773  * qcom_scm_pas_supported() - Check if the peripheral authentication service is
774  *			      available for the given peripherial
775  * @peripheral:	peripheral id
776  *
777  * Returns true if PAS is supported for this peripheral, otherwise false.
778  */
qcom_scm_pas_supported(u32 peripheral)779 bool qcom_scm_pas_supported(u32 peripheral)
780 {
781 	int ret;
782 	struct qcom_scm_desc desc = {
783 		.svc = QCOM_SCM_SVC_PIL,
784 		.cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
785 		.arginfo = QCOM_SCM_ARGS(1),
786 		.args[0] = peripheral,
787 		.owner = ARM_SMCCC_OWNER_SIP,
788 	};
789 	struct qcom_scm_res res;
790 
791 	if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
792 					  QCOM_SCM_PIL_PAS_IS_SUPPORTED))
793 		return false;
794 
795 	ret = qcom_scm_call(__scm->dev, &desc, &res);
796 
797 	return ret ? false : !!res.result[0];
798 }
799 EXPORT_SYMBOL_GPL(qcom_scm_pas_supported);
800 
__qcom_scm_pas_mss_reset(struct device * dev,bool reset)801 static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
802 {
803 	struct qcom_scm_desc desc = {
804 		.svc = QCOM_SCM_SVC_PIL,
805 		.cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
806 		.arginfo = QCOM_SCM_ARGS(2),
807 		.args[0] = reset,
808 		.args[1] = 0,
809 		.owner = ARM_SMCCC_OWNER_SIP,
810 	};
811 	struct qcom_scm_res res;
812 	int ret;
813 
814 	ret = qcom_scm_call(__scm->dev, &desc, &res);
815 
816 	return ret ? : res.result[0];
817 }
818 
qcom_scm_pas_reset_assert(struct reset_controller_dev * rcdev,unsigned long idx)819 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
820 				     unsigned long idx)
821 {
822 	if (idx != 0)
823 		return -EINVAL;
824 
825 	return __qcom_scm_pas_mss_reset(__scm->dev, 1);
826 }
827 
qcom_scm_pas_reset_deassert(struct reset_controller_dev * rcdev,unsigned long idx)828 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
829 				       unsigned long idx)
830 {
831 	if (idx != 0)
832 		return -EINVAL;
833 
834 	return __qcom_scm_pas_mss_reset(__scm->dev, 0);
835 }
836 
837 static const struct reset_control_ops qcom_scm_pas_reset_ops = {
838 	.assert = qcom_scm_pas_reset_assert,
839 	.deassert = qcom_scm_pas_reset_deassert,
840 };
841 
qcom_scm_io_readl(phys_addr_t addr,unsigned int * val)842 int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
843 {
844 	struct qcom_scm_desc desc = {
845 		.svc = QCOM_SCM_SVC_IO,
846 		.cmd = QCOM_SCM_IO_READ,
847 		.arginfo = QCOM_SCM_ARGS(1),
848 		.args[0] = addr,
849 		.owner = ARM_SMCCC_OWNER_SIP,
850 	};
851 	struct qcom_scm_res res;
852 	int ret;
853 
854 
855 	ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
856 	if (ret >= 0)
857 		*val = res.result[0];
858 
859 	return ret < 0 ? ret : 0;
860 }
861 EXPORT_SYMBOL_GPL(qcom_scm_io_readl);
862 
qcom_scm_io_writel(phys_addr_t addr,unsigned int val)863 int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
864 {
865 	struct qcom_scm_desc desc = {
866 		.svc = QCOM_SCM_SVC_IO,
867 		.cmd = QCOM_SCM_IO_WRITE,
868 		.arginfo = QCOM_SCM_ARGS(2),
869 		.args[0] = addr,
870 		.args[1] = val,
871 		.owner = ARM_SMCCC_OWNER_SIP,
872 	};
873 
874 	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
875 }
876 EXPORT_SYMBOL_GPL(qcom_scm_io_writel);
877 
878 /**
879  * qcom_scm_restore_sec_cfg_available() - Check if secure environment
880  * supports restore security config interface.
881  *
882  * Return true if restore-cfg interface is supported, false if not.
883  */
qcom_scm_restore_sec_cfg_available(void)884 bool qcom_scm_restore_sec_cfg_available(void)
885 {
886 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
887 					    QCOM_SCM_MP_RESTORE_SEC_CFG);
888 }
889 EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg_available);
890 
qcom_scm_restore_sec_cfg(u32 device_id,u32 spare)891 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
892 {
893 	struct qcom_scm_desc desc = {
894 		.svc = QCOM_SCM_SVC_MP,
895 		.cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
896 		.arginfo = QCOM_SCM_ARGS(2),
897 		.args[0] = device_id,
898 		.args[1] = spare,
899 		.owner = ARM_SMCCC_OWNER_SIP,
900 	};
901 	struct qcom_scm_res res;
902 	int ret;
903 
904 	ret = qcom_scm_call(__scm->dev, &desc, &res);
905 
906 	return ret ? : res.result[0];
907 }
908 EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg);
909 
910 #define QCOM_SCM_CP_APERTURE_CONTEXT_MASK	GENMASK(7, 0)
911 
qcom_scm_set_gpu_smmu_aperture_is_available(void)912 bool qcom_scm_set_gpu_smmu_aperture_is_available(void)
913 {
914 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
915 					    QCOM_SCM_MP_CP_SMMU_APERTURE_ID);
916 }
917 EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture_is_available);
918 
qcom_scm_set_gpu_smmu_aperture(unsigned int context_bank)919 int qcom_scm_set_gpu_smmu_aperture(unsigned int context_bank)
920 {
921 	struct qcom_scm_desc desc = {
922 		.svc = QCOM_SCM_SVC_MP,
923 		.cmd = QCOM_SCM_MP_CP_SMMU_APERTURE_ID,
924 		.arginfo = QCOM_SCM_ARGS(4),
925 		.args[0] = 0xffff0000 | FIELD_PREP(QCOM_SCM_CP_APERTURE_CONTEXT_MASK, context_bank),
926 		.args[1] = 0xffffffff,
927 		.args[2] = 0xffffffff,
928 		.args[3] = 0xffffffff,
929 		.owner = ARM_SMCCC_OWNER_SIP
930 	};
931 
932 	return qcom_scm_call(__scm->dev, &desc, NULL);
933 }
934 EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture);
935 
qcom_scm_iommu_secure_ptbl_size(u32 spare,size_t * size)936 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
937 {
938 	struct qcom_scm_desc desc = {
939 		.svc = QCOM_SCM_SVC_MP,
940 		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
941 		.arginfo = QCOM_SCM_ARGS(1),
942 		.args[0] = spare,
943 		.owner = ARM_SMCCC_OWNER_SIP,
944 	};
945 	struct qcom_scm_res res;
946 	int ret;
947 
948 	ret = qcom_scm_call(__scm->dev, &desc, &res);
949 
950 	if (size)
951 		*size = res.result[0];
952 
953 	return ret ? : res.result[1];
954 }
955 EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_size);
956 
qcom_scm_iommu_secure_ptbl_init(u64 addr,u32 size,u32 spare)957 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
958 {
959 	struct qcom_scm_desc desc = {
960 		.svc = QCOM_SCM_SVC_MP,
961 		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
962 		.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
963 					 QCOM_SCM_VAL),
964 		.args[0] = addr,
965 		.args[1] = size,
966 		.args[2] = spare,
967 		.owner = ARM_SMCCC_OWNER_SIP,
968 	};
969 	int ret;
970 
971 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
972 
973 	/* the pg table has been initialized already, ignore the error */
974 	if (ret == -EPERM)
975 		ret = 0;
976 
977 	return ret;
978 }
979 EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_init);
980 
qcom_scm_iommu_set_cp_pool_size(u32 spare,u32 size)981 int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size)
982 {
983 	struct qcom_scm_desc desc = {
984 		.svc = QCOM_SCM_SVC_MP,
985 		.cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE,
986 		.arginfo = QCOM_SCM_ARGS(2),
987 		.args[0] = size,
988 		.args[1] = spare,
989 		.owner = ARM_SMCCC_OWNER_SIP,
990 	};
991 
992 	return qcom_scm_call(__scm->dev, &desc, NULL);
993 }
994 EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_cp_pool_size);
995 
qcom_scm_mem_protect_video_var(u32 cp_start,u32 cp_size,u32 cp_nonpixel_start,u32 cp_nonpixel_size)996 int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
997 				   u32 cp_nonpixel_start,
998 				   u32 cp_nonpixel_size)
999 {
1000 	int ret;
1001 	struct qcom_scm_desc desc = {
1002 		.svc = QCOM_SCM_SVC_MP,
1003 		.cmd = QCOM_SCM_MP_VIDEO_VAR,
1004 		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
1005 					 QCOM_SCM_VAL, QCOM_SCM_VAL),
1006 		.args[0] = cp_start,
1007 		.args[1] = cp_size,
1008 		.args[2] = cp_nonpixel_start,
1009 		.args[3] = cp_nonpixel_size,
1010 		.owner = ARM_SMCCC_OWNER_SIP,
1011 	};
1012 	struct qcom_scm_res res;
1013 
1014 	ret = qcom_scm_call(__scm->dev, &desc, &res);
1015 
1016 	return ret ? : res.result[0];
1017 }
1018 EXPORT_SYMBOL_GPL(qcom_scm_mem_protect_video_var);
1019 
__qcom_scm_assign_mem(struct device * dev,phys_addr_t mem_region,size_t mem_sz,phys_addr_t src,size_t src_sz,phys_addr_t dest,size_t dest_sz)1020 static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
1021 				 size_t mem_sz, phys_addr_t src, size_t src_sz,
1022 				 phys_addr_t dest, size_t dest_sz)
1023 {
1024 	int ret;
1025 	struct qcom_scm_desc desc = {
1026 		.svc = QCOM_SCM_SVC_MP,
1027 		.cmd = QCOM_SCM_MP_ASSIGN,
1028 		.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
1029 					 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
1030 					 QCOM_SCM_VAL, QCOM_SCM_VAL),
1031 		.args[0] = mem_region,
1032 		.args[1] = mem_sz,
1033 		.args[2] = src,
1034 		.args[3] = src_sz,
1035 		.args[4] = dest,
1036 		.args[5] = dest_sz,
1037 		.args[6] = 0,
1038 		.owner = ARM_SMCCC_OWNER_SIP,
1039 	};
1040 	struct qcom_scm_res res;
1041 
1042 	ret = qcom_scm_call(dev, &desc, &res);
1043 
1044 	return ret ? : res.result[0];
1045 }
1046 
1047 /**
1048  * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
1049  * @mem_addr: mem region whose ownership need to be reassigned
1050  * @mem_sz:   size of the region.
1051  * @srcvm:    vmid for current set of owners, each set bit in
1052  *            flag indicate a unique owner
1053  * @newvm:    array having new owners and corresponding permission
1054  *            flags
1055  * @dest_cnt: number of owners in next set.
1056  *
1057  * Return negative errno on failure or 0 on success with @srcvm updated.
1058  */
qcom_scm_assign_mem(phys_addr_t mem_addr,size_t mem_sz,u64 * srcvm,const struct qcom_scm_vmperm * newvm,unsigned int dest_cnt)1059 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
1060 			u64 *srcvm,
1061 			const struct qcom_scm_vmperm *newvm,
1062 			unsigned int dest_cnt)
1063 {
1064 	struct qcom_scm_current_perm_info *destvm;
1065 	struct qcom_scm_mem_map_info *mem_to_map;
1066 	phys_addr_t mem_to_map_phys;
1067 	phys_addr_t dest_phys;
1068 	phys_addr_t ptr_phys;
1069 	size_t mem_to_map_sz;
1070 	size_t dest_sz;
1071 	size_t src_sz;
1072 	size_t ptr_sz;
1073 	int next_vm;
1074 	__le32 *src;
1075 	int ret, i, b;
1076 	u64 srcvm_bits = *srcvm;
1077 
1078 	src_sz = hweight64(srcvm_bits) * sizeof(*src);
1079 	mem_to_map_sz = sizeof(*mem_to_map);
1080 	dest_sz = dest_cnt * sizeof(*destvm);
1081 	ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
1082 			ALIGN(dest_sz, SZ_64);
1083 
1084 	void *ptr __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1085 							ptr_sz, GFP_KERNEL);
1086 	if (!ptr)
1087 		return -ENOMEM;
1088 
1089 	ptr_phys = qcom_tzmem_to_phys(ptr);
1090 
1091 	/* Fill source vmid detail */
1092 	src = ptr;
1093 	i = 0;
1094 	for (b = 0; b < BITS_PER_TYPE(u64); b++) {
1095 		if (srcvm_bits & BIT(b))
1096 			src[i++] = cpu_to_le32(b);
1097 	}
1098 
1099 	/* Fill details of mem buff to map */
1100 	mem_to_map = ptr + ALIGN(src_sz, SZ_64);
1101 	mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
1102 	mem_to_map->mem_addr = cpu_to_le64(mem_addr);
1103 	mem_to_map->mem_size = cpu_to_le64(mem_sz);
1104 
1105 	next_vm = 0;
1106 	/* Fill details of next vmid detail */
1107 	destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
1108 	dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
1109 	for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
1110 		destvm->vmid = cpu_to_le32(newvm->vmid);
1111 		destvm->perm = cpu_to_le32(newvm->perm);
1112 		destvm->ctx = 0;
1113 		destvm->ctx_size = 0;
1114 		next_vm |= BIT(newvm->vmid);
1115 	}
1116 
1117 	ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
1118 				    ptr_phys, src_sz, dest_phys, dest_sz);
1119 	if (ret) {
1120 		dev_err(__scm->dev,
1121 			"Assign memory protection call failed %d\n", ret);
1122 		return -EINVAL;
1123 	}
1124 
1125 	*srcvm = next_vm;
1126 	return 0;
1127 }
1128 EXPORT_SYMBOL_GPL(qcom_scm_assign_mem);
1129 
1130 /**
1131  * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
1132  */
qcom_scm_ocmem_lock_available(void)1133 bool qcom_scm_ocmem_lock_available(void)
1134 {
1135 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
1136 					    QCOM_SCM_OCMEM_LOCK_CMD);
1137 }
1138 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock_available);
1139 
1140 /**
1141  * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
1142  * region to the specified initiator
1143  *
1144  * @id:     tz initiator id
1145  * @offset: OCMEM offset
1146  * @size:   OCMEM size
1147  * @mode:   access mode (WIDE/NARROW)
1148  */
qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id,u32 offset,u32 size,u32 mode)1149 int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
1150 			u32 mode)
1151 {
1152 	struct qcom_scm_desc desc = {
1153 		.svc = QCOM_SCM_SVC_OCMEM,
1154 		.cmd = QCOM_SCM_OCMEM_LOCK_CMD,
1155 		.args[0] = id,
1156 		.args[1] = offset,
1157 		.args[2] = size,
1158 		.args[3] = mode,
1159 		.arginfo = QCOM_SCM_ARGS(4),
1160 	};
1161 
1162 	return qcom_scm_call(__scm->dev, &desc, NULL);
1163 }
1164 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock);
1165 
1166 /**
1167  * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
1168  * region from the specified initiator
1169  *
1170  * @id:     tz initiator id
1171  * @offset: OCMEM offset
1172  * @size:   OCMEM size
1173  */
qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id,u32 offset,u32 size)1174 int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
1175 {
1176 	struct qcom_scm_desc desc = {
1177 		.svc = QCOM_SCM_SVC_OCMEM,
1178 		.cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
1179 		.args[0] = id,
1180 		.args[1] = offset,
1181 		.args[2] = size,
1182 		.arginfo = QCOM_SCM_ARGS(3),
1183 	};
1184 
1185 	return qcom_scm_call(__scm->dev, &desc, NULL);
1186 }
1187 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_unlock);
1188 
1189 /**
1190  * qcom_scm_ice_available() - Is the ICE key programming interface available?
1191  *
1192  * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
1193  *	   qcom_scm_ice_set_key() are available.
1194  */
qcom_scm_ice_available(void)1195 bool qcom_scm_ice_available(void)
1196 {
1197 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1198 					    QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
1199 		__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1200 					     QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
1201 }
1202 EXPORT_SYMBOL_GPL(qcom_scm_ice_available);
1203 
1204 /**
1205  * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
1206  * @index: the keyslot to invalidate
1207  *
1208  * The UFSHCI and eMMC standards define a standard way to do this, but it
1209  * doesn't work on these SoCs; only this SCM call does.
1210  *
1211  * It is assumed that the SoC has only one ICE instance being used, as this SCM
1212  * call doesn't specify which ICE instance the keyslot belongs to.
1213  *
1214  * Return: 0 on success; -errno on failure.
1215  */
qcom_scm_ice_invalidate_key(u32 index)1216 int qcom_scm_ice_invalidate_key(u32 index)
1217 {
1218 	struct qcom_scm_desc desc = {
1219 		.svc = QCOM_SCM_SVC_ES,
1220 		.cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
1221 		.arginfo = QCOM_SCM_ARGS(1),
1222 		.args[0] = index,
1223 		.owner = ARM_SMCCC_OWNER_SIP,
1224 	};
1225 
1226 	return qcom_scm_call(__scm->dev, &desc, NULL);
1227 }
1228 EXPORT_SYMBOL_GPL(qcom_scm_ice_invalidate_key);
1229 
1230 /**
1231  * qcom_scm_ice_set_key() - Set an inline encryption key
1232  * @index: the keyslot into which to set the key
1233  * @key: the key to program
1234  * @key_size: the size of the key in bytes
1235  * @cipher: the encryption algorithm the key is for
1236  * @data_unit_size: the encryption data unit size, i.e. the size of each
1237  *		    individual plaintext and ciphertext.  Given in 512-byte
1238  *		    units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
1239  *
1240  * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
1241  * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline.
1242  *
1243  * The UFSHCI and eMMC standards define a standard way to do this, but it
1244  * doesn't work on these SoCs; only this SCM call does.
1245  *
1246  * It is assumed that the SoC has only one ICE instance being used, as this SCM
1247  * call doesn't specify which ICE instance the keyslot belongs to.
1248  *
1249  * Return: 0 on success; -errno on failure.
1250  */
qcom_scm_ice_set_key(u32 index,const u8 * key,u32 key_size,enum qcom_scm_ice_cipher cipher,u32 data_unit_size)1251 int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
1252 			 enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
1253 {
1254 	struct qcom_scm_desc desc = {
1255 		.svc = QCOM_SCM_SVC_ES,
1256 		.cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
1257 		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
1258 					 QCOM_SCM_VAL, QCOM_SCM_VAL,
1259 					 QCOM_SCM_VAL),
1260 		.args[0] = index,
1261 		.args[2] = key_size,
1262 		.args[3] = cipher,
1263 		.args[4] = data_unit_size,
1264 		.owner = ARM_SMCCC_OWNER_SIP,
1265 	};
1266 
1267 	int ret;
1268 
1269 	void *keybuf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1270 							   key_size,
1271 							   GFP_KERNEL);
1272 	if (!keybuf)
1273 		return -ENOMEM;
1274 	memcpy(keybuf, key, key_size);
1275 	desc.args[1] = qcom_tzmem_to_phys(keybuf);
1276 
1277 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1278 
1279 	memzero_explicit(keybuf, key_size);
1280 
1281 	return ret;
1282 }
1283 EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key);
1284 
qcom_scm_has_wrapped_key_support(void)1285 bool qcom_scm_has_wrapped_key_support(void)
1286 {
1287 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1288 					    QCOM_SCM_ES_DERIVE_SW_SECRET) &&
1289 	       __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1290 					    QCOM_SCM_ES_GENERATE_ICE_KEY) &&
1291 	       __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1292 					    QCOM_SCM_ES_PREPARE_ICE_KEY) &&
1293 	       __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1294 					    QCOM_SCM_ES_IMPORT_ICE_KEY);
1295 }
1296 EXPORT_SYMBOL_GPL(qcom_scm_has_wrapped_key_support);
1297 
1298 /**
1299  * qcom_scm_derive_sw_secret() - Derive software secret from wrapped key
1300  * @eph_key: an ephemerally-wrapped key
1301  * @eph_key_size: size of @eph_key in bytes
1302  * @sw_secret: output buffer for the software secret
1303  * @sw_secret_size: size of the software secret to derive in bytes
1304  *
1305  * Derive a software secret from an ephemerally-wrapped key for software crypto
1306  * operations.  This is done by calling into the secure execution environment,
1307  * which then calls into the hardware to unwrap and derive the secret.
1308  *
1309  * For more information on sw_secret, see the "Hardware-wrapped keys" section of
1310  * Documentation/block/inline-encryption.rst.
1311  *
1312  * Return: 0 on success; -errno on failure.
1313  */
qcom_scm_derive_sw_secret(const u8 * eph_key,size_t eph_key_size,u8 * sw_secret,size_t sw_secret_size)1314 int qcom_scm_derive_sw_secret(const u8 *eph_key, size_t eph_key_size,
1315 			      u8 *sw_secret, size_t sw_secret_size)
1316 {
1317 	struct qcom_scm_desc desc = {
1318 		.svc = QCOM_SCM_SVC_ES,
1319 		.cmd = QCOM_SCM_ES_DERIVE_SW_SECRET,
1320 		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RW, QCOM_SCM_VAL,
1321 					 QCOM_SCM_RW, QCOM_SCM_VAL),
1322 		.owner = ARM_SMCCC_OWNER_SIP,
1323 	};
1324 	int ret;
1325 
1326 	void *eph_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1327 								eph_key_size,
1328 								GFP_KERNEL);
1329 	if (!eph_key_buf)
1330 		return -ENOMEM;
1331 
1332 	void *sw_secret_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1333 								  sw_secret_size,
1334 								  GFP_KERNEL);
1335 	if (!sw_secret_buf)
1336 		return -ENOMEM;
1337 
1338 	memcpy(eph_key_buf, eph_key, eph_key_size);
1339 	desc.args[0] = qcom_tzmem_to_phys(eph_key_buf);
1340 	desc.args[1] = eph_key_size;
1341 	desc.args[2] = qcom_tzmem_to_phys(sw_secret_buf);
1342 	desc.args[3] = sw_secret_size;
1343 
1344 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1345 	if (!ret)
1346 		memcpy(sw_secret, sw_secret_buf, sw_secret_size);
1347 
1348 	memzero_explicit(eph_key_buf, eph_key_size);
1349 	memzero_explicit(sw_secret_buf, sw_secret_size);
1350 	return ret;
1351 }
1352 EXPORT_SYMBOL_GPL(qcom_scm_derive_sw_secret);
1353 
1354 /**
1355  * qcom_scm_generate_ice_key() - Generate a wrapped key for storage encryption
1356  * @lt_key: output buffer for the long-term wrapped key
1357  * @lt_key_size: size of @lt_key in bytes.  Must be the exact wrapped key size
1358  *		 used by the SoC.
1359  *
1360  * Generate a key using the built-in HW module in the SoC.  The resulting key is
1361  * returned wrapped with the platform-specific Key Encryption Key.
1362  *
1363  * Return: 0 on success; -errno on failure.
1364  */
qcom_scm_generate_ice_key(u8 * lt_key,size_t lt_key_size)1365 int qcom_scm_generate_ice_key(u8 *lt_key, size_t lt_key_size)
1366 {
1367 	struct qcom_scm_desc desc = {
1368 		.svc = QCOM_SCM_SVC_ES,
1369 		.cmd =  QCOM_SCM_ES_GENERATE_ICE_KEY,
1370 		.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL),
1371 		.owner = ARM_SMCCC_OWNER_SIP,
1372 	};
1373 	int ret;
1374 
1375 	void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1376 							       lt_key_size,
1377 							       GFP_KERNEL);
1378 	if (!lt_key_buf)
1379 		return -ENOMEM;
1380 
1381 	desc.args[0] = qcom_tzmem_to_phys(lt_key_buf);
1382 	desc.args[1] = lt_key_size;
1383 
1384 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1385 	if (!ret)
1386 		memcpy(lt_key, lt_key_buf, lt_key_size);
1387 
1388 	memzero_explicit(lt_key_buf, lt_key_size);
1389 	return ret;
1390 }
1391 EXPORT_SYMBOL_GPL(qcom_scm_generate_ice_key);
1392 
1393 /**
1394  * qcom_scm_prepare_ice_key() - Re-wrap a key with the per-boot ephemeral key
1395  * @lt_key: a long-term wrapped key
1396  * @lt_key_size: size of @lt_key in bytes
1397  * @eph_key: output buffer for the ephemerally-wrapped key
1398  * @eph_key_size: size of @eph_key in bytes.  Must be the exact wrapped key size
1399  *		  used by the SoC.
1400  *
1401  * Given a long-term wrapped key, re-wrap it with the per-boot ephemeral key for
1402  * added protection.  The resulting key will only be valid for the current boot.
1403  *
1404  * Return: 0 on success; -errno on failure.
1405  */
qcom_scm_prepare_ice_key(const u8 * lt_key,size_t lt_key_size,u8 * eph_key,size_t eph_key_size)1406 int qcom_scm_prepare_ice_key(const u8 *lt_key, size_t lt_key_size,
1407 			     u8 *eph_key, size_t eph_key_size)
1408 {
1409 	struct qcom_scm_desc desc = {
1410 		.svc = QCOM_SCM_SVC_ES,
1411 		.cmd =  QCOM_SCM_ES_PREPARE_ICE_KEY,
1412 		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL,
1413 					 QCOM_SCM_RW, QCOM_SCM_VAL),
1414 		.owner = ARM_SMCCC_OWNER_SIP,
1415 	};
1416 	int ret;
1417 
1418 	void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1419 							       lt_key_size,
1420 							       GFP_KERNEL);
1421 	if (!lt_key_buf)
1422 		return -ENOMEM;
1423 
1424 	void *eph_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1425 								eph_key_size,
1426 								GFP_KERNEL);
1427 	if (!eph_key_buf)
1428 		return -ENOMEM;
1429 
1430 	memcpy(lt_key_buf, lt_key, lt_key_size);
1431 	desc.args[0] = qcom_tzmem_to_phys(lt_key_buf);
1432 	desc.args[1] = lt_key_size;
1433 	desc.args[2] = qcom_tzmem_to_phys(eph_key_buf);
1434 	desc.args[3] = eph_key_size;
1435 
1436 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1437 	if (!ret)
1438 		memcpy(eph_key, eph_key_buf, eph_key_size);
1439 
1440 	memzero_explicit(lt_key_buf, lt_key_size);
1441 	memzero_explicit(eph_key_buf, eph_key_size);
1442 	return ret;
1443 }
1444 EXPORT_SYMBOL_GPL(qcom_scm_prepare_ice_key);
1445 
1446 /**
1447  * qcom_scm_import_ice_key() - Import key for storage encryption
1448  * @raw_key: the raw key to import
1449  * @raw_key_size: size of @raw_key in bytes
1450  * @lt_key: output buffer for the long-term wrapped key
1451  * @lt_key_size: size of @lt_key in bytes.  Must be the exact wrapped key size
1452  *		 used by the SoC.
1453  *
1454  * Import a raw key and return a long-term wrapped key.  Uses the SoC's HWKM to
1455  * wrap the raw key using the platform-specific Key Encryption Key.
1456  *
1457  * Return: 0 on success; -errno on failure.
1458  */
qcom_scm_import_ice_key(const u8 * raw_key,size_t raw_key_size,u8 * lt_key,size_t lt_key_size)1459 int qcom_scm_import_ice_key(const u8 *raw_key, size_t raw_key_size,
1460 			    u8 *lt_key, size_t lt_key_size)
1461 {
1462 	struct qcom_scm_desc desc = {
1463 		.svc = QCOM_SCM_SVC_ES,
1464 		.cmd =  QCOM_SCM_ES_IMPORT_ICE_KEY,
1465 		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL,
1466 					 QCOM_SCM_RW, QCOM_SCM_VAL),
1467 		.owner = ARM_SMCCC_OWNER_SIP,
1468 	};
1469 	int ret;
1470 
1471 	void *raw_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1472 								raw_key_size,
1473 								GFP_KERNEL);
1474 	if (!raw_key_buf)
1475 		return -ENOMEM;
1476 
1477 	void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1478 							       lt_key_size,
1479 							       GFP_KERNEL);
1480 	if (!lt_key_buf)
1481 		return -ENOMEM;
1482 
1483 	memcpy(raw_key_buf, raw_key, raw_key_size);
1484 	desc.args[0] = qcom_tzmem_to_phys(raw_key_buf);
1485 	desc.args[1] = raw_key_size;
1486 	desc.args[2] = qcom_tzmem_to_phys(lt_key_buf);
1487 	desc.args[3] = lt_key_size;
1488 
1489 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1490 	if (!ret)
1491 		memcpy(lt_key, lt_key_buf, lt_key_size);
1492 
1493 	memzero_explicit(raw_key_buf, raw_key_size);
1494 	memzero_explicit(lt_key_buf, lt_key_size);
1495 	return ret;
1496 }
1497 EXPORT_SYMBOL_GPL(qcom_scm_import_ice_key);
1498 
1499 /**
1500  * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
1501  *
1502  * Return true if HDCP is supported, false if not.
1503  */
qcom_scm_hdcp_available(void)1504 bool qcom_scm_hdcp_available(void)
1505 {
1506 	bool avail;
1507 	int ret = qcom_scm_clk_enable();
1508 
1509 	if (ret)
1510 		return ret;
1511 
1512 	avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
1513 						QCOM_SCM_HDCP_INVOKE);
1514 
1515 	qcom_scm_clk_disable();
1516 
1517 	return avail;
1518 }
1519 EXPORT_SYMBOL_GPL(qcom_scm_hdcp_available);
1520 
1521 /**
1522  * qcom_scm_hdcp_req() - Send HDCP request.
1523  * @req: HDCP request array
1524  * @req_cnt: HDCP request array count
1525  * @resp: response buffer passed to SCM
1526  *
1527  * Write HDCP register(s) through SCM.
1528  */
qcom_scm_hdcp_req(struct qcom_scm_hdcp_req * req,u32 req_cnt,u32 * resp)1529 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
1530 {
1531 	int ret;
1532 	struct qcom_scm_desc desc = {
1533 		.svc = QCOM_SCM_SVC_HDCP,
1534 		.cmd = QCOM_SCM_HDCP_INVOKE,
1535 		.arginfo = QCOM_SCM_ARGS(10),
1536 		.args = {
1537 			req[0].addr,
1538 			req[0].val,
1539 			req[1].addr,
1540 			req[1].val,
1541 			req[2].addr,
1542 			req[2].val,
1543 			req[3].addr,
1544 			req[3].val,
1545 			req[4].addr,
1546 			req[4].val
1547 		},
1548 		.owner = ARM_SMCCC_OWNER_SIP,
1549 	};
1550 	struct qcom_scm_res res;
1551 
1552 	if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
1553 		return -ERANGE;
1554 
1555 	ret = qcom_scm_clk_enable();
1556 	if (ret)
1557 		return ret;
1558 
1559 	ret = qcom_scm_call(__scm->dev, &desc, &res);
1560 	*resp = res.result[0];
1561 
1562 	qcom_scm_clk_disable();
1563 
1564 	return ret;
1565 }
1566 EXPORT_SYMBOL_GPL(qcom_scm_hdcp_req);
1567 
qcom_scm_iommu_set_pt_format(u32 sec_id,u32 ctx_num,u32 pt_fmt)1568 int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt)
1569 {
1570 	struct qcom_scm_desc desc = {
1571 		.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1572 		.cmd = QCOM_SCM_SMMU_PT_FORMAT,
1573 		.arginfo = QCOM_SCM_ARGS(3),
1574 		.args[0] = sec_id,
1575 		.args[1] = ctx_num,
1576 		.args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */
1577 		.owner = ARM_SMCCC_OWNER_SIP,
1578 	};
1579 
1580 	return qcom_scm_call(__scm->dev, &desc, NULL);
1581 }
1582 EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_pt_format);
1583 
qcom_scm_qsmmu500_wait_safe_toggle(bool en)1584 int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
1585 {
1586 	struct qcom_scm_desc desc = {
1587 		.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1588 		.cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
1589 		.arginfo = QCOM_SCM_ARGS(2),
1590 		.args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
1591 		.args[1] = en,
1592 		.owner = ARM_SMCCC_OWNER_SIP,
1593 	};
1594 
1595 
1596 	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
1597 }
1598 EXPORT_SYMBOL_GPL(qcom_scm_qsmmu500_wait_safe_toggle);
1599 
qcom_scm_lmh_dcvsh_available(void)1600 bool qcom_scm_lmh_dcvsh_available(void)
1601 {
1602 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH);
1603 }
1604 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available);
1605 
qcom_scm_shm_bridge_enable(void)1606 int qcom_scm_shm_bridge_enable(void)
1607 {
1608 	int ret;
1609 
1610 	struct qcom_scm_desc desc = {
1611 		.svc = QCOM_SCM_SVC_MP,
1612 		.cmd = QCOM_SCM_MP_SHM_BRIDGE_ENABLE,
1613 		.owner = ARM_SMCCC_OWNER_SIP
1614 	};
1615 
1616 	struct qcom_scm_res res;
1617 
1618 	if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
1619 					  QCOM_SCM_MP_SHM_BRIDGE_ENABLE))
1620 		return -EOPNOTSUPP;
1621 
1622 	ret = qcom_scm_call(__scm->dev, &desc, &res);
1623 
1624 	if (ret)
1625 		return ret;
1626 
1627 	if (res.result[0] == SHMBRIDGE_RESULT_NOTSUPP)
1628 		return -EOPNOTSUPP;
1629 
1630 	return res.result[0];
1631 }
1632 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_enable);
1633 
qcom_scm_shm_bridge_create(struct device * dev,u64 pfn_and_ns_perm_flags,u64 ipfn_and_s_perm_flags,u64 size_and_flags,u64 ns_vmids,u64 * handle)1634 int qcom_scm_shm_bridge_create(struct device *dev, u64 pfn_and_ns_perm_flags,
1635 			       u64 ipfn_and_s_perm_flags, u64 size_and_flags,
1636 			       u64 ns_vmids, u64 *handle)
1637 {
1638 	struct qcom_scm_desc desc = {
1639 		.svc = QCOM_SCM_SVC_MP,
1640 		.cmd = QCOM_SCM_MP_SHM_BRIDGE_CREATE,
1641 		.owner = ARM_SMCCC_OWNER_SIP,
1642 		.args[0] = pfn_and_ns_perm_flags,
1643 		.args[1] = ipfn_and_s_perm_flags,
1644 		.args[2] = size_and_flags,
1645 		.args[3] = ns_vmids,
1646 		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
1647 					 QCOM_SCM_VAL, QCOM_SCM_VAL),
1648 	};
1649 
1650 	struct qcom_scm_res res;
1651 	int ret;
1652 
1653 	ret = qcom_scm_call(__scm->dev, &desc, &res);
1654 
1655 	if (handle && !ret)
1656 		*handle = res.result[1];
1657 
1658 	return ret ?: res.result[0];
1659 }
1660 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_create);
1661 
qcom_scm_shm_bridge_delete(struct device * dev,u64 handle)1662 int qcom_scm_shm_bridge_delete(struct device *dev, u64 handle)
1663 {
1664 	struct qcom_scm_desc desc = {
1665 		.svc = QCOM_SCM_SVC_MP,
1666 		.cmd = QCOM_SCM_MP_SHM_BRIDGE_DELETE,
1667 		.owner = ARM_SMCCC_OWNER_SIP,
1668 		.args[0] = handle,
1669 		.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
1670 	};
1671 
1672 	return qcom_scm_call(__scm->dev, &desc, NULL);
1673 }
1674 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_delete);
1675 
qcom_scm_lmh_profile_change(u32 profile_id)1676 int qcom_scm_lmh_profile_change(u32 profile_id)
1677 {
1678 	struct qcom_scm_desc desc = {
1679 		.svc = QCOM_SCM_SVC_LMH,
1680 		.cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE,
1681 		.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
1682 		.args[0] = profile_id,
1683 		.owner = ARM_SMCCC_OWNER_SIP,
1684 	};
1685 
1686 	return qcom_scm_call(__scm->dev, &desc, NULL);
1687 }
1688 EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change);
1689 
qcom_scm_lmh_dcvsh(u32 payload_fn,u32 payload_reg,u32 payload_val,u64 limit_node,u32 node_id,u64 version)1690 int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
1691 		       u64 limit_node, u32 node_id, u64 version)
1692 {
1693 	int ret, payload_size = 5 * sizeof(u32);
1694 
1695 	struct qcom_scm_desc desc = {
1696 		.svc = QCOM_SCM_SVC_LMH,
1697 		.cmd = QCOM_SCM_LMH_LIMIT_DCVSH,
1698 		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL,
1699 					QCOM_SCM_VAL, QCOM_SCM_VAL),
1700 		.args[1] = payload_size,
1701 		.args[2] = limit_node,
1702 		.args[3] = node_id,
1703 		.args[4] = version,
1704 		.owner = ARM_SMCCC_OWNER_SIP,
1705 	};
1706 
1707 	u32 *payload_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1708 							       payload_size,
1709 							       GFP_KERNEL);
1710 	if (!payload_buf)
1711 		return -ENOMEM;
1712 
1713 	payload_buf[0] = payload_fn;
1714 	payload_buf[1] = 0;
1715 	payload_buf[2] = payload_reg;
1716 	payload_buf[3] = 1;
1717 	payload_buf[4] = payload_val;
1718 
1719 	desc.args[0] = qcom_tzmem_to_phys(payload_buf);
1720 
1721 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1722 
1723 	return ret;
1724 }
1725 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh);
1726 
qcom_scm_gpu_init_regs(u32 gpu_req)1727 int qcom_scm_gpu_init_regs(u32 gpu_req)
1728 {
1729 	struct qcom_scm_desc desc = {
1730 		.svc = QCOM_SCM_SVC_GPU,
1731 		.cmd = QCOM_SCM_SVC_GPU_INIT_REGS,
1732 		.arginfo = QCOM_SCM_ARGS(1),
1733 		.args[0] = gpu_req,
1734 		.owner = ARM_SMCCC_OWNER_SIP,
1735 	};
1736 
1737 	return qcom_scm_call(__scm->dev, &desc, NULL);
1738 }
1739 EXPORT_SYMBOL_GPL(qcom_scm_gpu_init_regs);
1740 
qcom_scm_find_dload_address(struct device * dev,u64 * addr)1741 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
1742 {
1743 	struct device_node *tcsr;
1744 	struct device_node *np = dev->of_node;
1745 	struct resource res;
1746 	u32 offset;
1747 	int ret;
1748 
1749 	tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
1750 	if (!tcsr)
1751 		return 0;
1752 
1753 	ret = of_address_to_resource(tcsr, 0, &res);
1754 	of_node_put(tcsr);
1755 	if (ret)
1756 		return ret;
1757 
1758 	ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
1759 	if (ret < 0)
1760 		return ret;
1761 
1762 	*addr = res.start + offset;
1763 
1764 	return 0;
1765 }
1766 
1767 #ifdef CONFIG_QCOM_QSEECOM
1768 
1769 /* Lock for QSEECOM SCM call executions */
1770 static DEFINE_MUTEX(qcom_scm_qseecom_call_lock);
1771 
__qcom_scm_qseecom_call(const struct qcom_scm_desc * desc,struct qcom_scm_qseecom_resp * res)1772 static int __qcom_scm_qseecom_call(const struct qcom_scm_desc *desc,
1773 				   struct qcom_scm_qseecom_resp *res)
1774 {
1775 	struct qcom_scm_res scm_res = {};
1776 	int status;
1777 
1778 	/*
1779 	 * QSEECOM SCM calls should not be executed concurrently. Therefore, we
1780 	 * require the respective call lock to be held.
1781 	 */
1782 	lockdep_assert_held(&qcom_scm_qseecom_call_lock);
1783 
1784 	status = qcom_scm_call(__scm->dev, desc, &scm_res);
1785 
1786 	res->result = scm_res.result[0];
1787 	res->resp_type = scm_res.result[1];
1788 	res->data = scm_res.result[2];
1789 
1790 	if (status)
1791 		return status;
1792 
1793 	return 0;
1794 }
1795 
1796 /**
1797  * qcom_scm_qseecom_call() - Perform a QSEECOM SCM call.
1798  * @desc: SCM call descriptor.
1799  * @res:  SCM call response (output).
1800  *
1801  * Performs the QSEECOM SCM call described by @desc, returning the response in
1802  * @rsp.
1803  *
1804  * Return: Zero on success, nonzero on failure.
1805  */
qcom_scm_qseecom_call(const struct qcom_scm_desc * desc,struct qcom_scm_qseecom_resp * res)1806 static int qcom_scm_qseecom_call(const struct qcom_scm_desc *desc,
1807 				 struct qcom_scm_qseecom_resp *res)
1808 {
1809 	int status;
1810 
1811 	/*
1812 	 * Note: Multiple QSEECOM SCM calls should not be executed same time,
1813 	 * so lock things here. This needs to be extended to callback/listener
1814 	 * handling when support for that is implemented.
1815 	 */
1816 
1817 	mutex_lock(&qcom_scm_qseecom_call_lock);
1818 	status = __qcom_scm_qseecom_call(desc, res);
1819 	mutex_unlock(&qcom_scm_qseecom_call_lock);
1820 
1821 	dev_dbg(__scm->dev, "%s: owner=%x, svc=%x, cmd=%x, result=%lld, type=%llx, data=%llx\n",
1822 		__func__, desc->owner, desc->svc, desc->cmd, res->result,
1823 		res->resp_type, res->data);
1824 
1825 	if (status) {
1826 		dev_err(__scm->dev, "qseecom: scm call failed with error %d\n", status);
1827 		return status;
1828 	}
1829 
1830 	/*
1831 	 * TODO: Handle incomplete and blocked calls:
1832 	 *
1833 	 * Incomplete and blocked calls are not supported yet. Some devices
1834 	 * and/or commands require those, some don't. Let's warn about them
1835 	 * prominently in case someone attempts to try these commands with a
1836 	 * device/command combination that isn't supported yet.
1837 	 */
1838 	WARN_ON(res->result == QSEECOM_RESULT_INCOMPLETE);
1839 	WARN_ON(res->result == QSEECOM_RESULT_BLOCKED_ON_LISTENER);
1840 
1841 	return 0;
1842 }
1843 
1844 /**
1845  * qcom_scm_qseecom_get_version() - Query the QSEECOM version.
1846  * @version: Pointer where the QSEECOM version will be stored.
1847  *
1848  * Performs the QSEECOM SCM querying the QSEECOM version currently running in
1849  * the TrustZone.
1850  *
1851  * Return: Zero on success, nonzero on failure.
1852  */
qcom_scm_qseecom_get_version(u32 * version)1853 static int qcom_scm_qseecom_get_version(u32 *version)
1854 {
1855 	struct qcom_scm_desc desc = {};
1856 	struct qcom_scm_qseecom_resp res = {};
1857 	u32 feature = 10;
1858 	int ret;
1859 
1860 	desc.owner = QSEECOM_TZ_OWNER_SIP;
1861 	desc.svc = QSEECOM_TZ_SVC_INFO;
1862 	desc.cmd = QSEECOM_TZ_CMD_INFO_VERSION;
1863 	desc.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL);
1864 	desc.args[0] = feature;
1865 
1866 	ret = qcom_scm_qseecom_call(&desc, &res);
1867 	if (ret)
1868 		return ret;
1869 
1870 	*version = res.result;
1871 	return 0;
1872 }
1873 
1874 /**
1875  * qcom_scm_qseecom_app_get_id() - Query the app ID for a given QSEE app name.
1876  * @app_name: The name of the app.
1877  * @app_id:   The returned app ID.
1878  *
1879  * Query and return the application ID of the SEE app identified by the given
1880  * name. This returned ID is the unique identifier of the app required for
1881  * subsequent communication.
1882  *
1883  * Return: Zero on success, nonzero on failure, -ENOENT if the app has not been
1884  * loaded or could not be found.
1885  */
qcom_scm_qseecom_app_get_id(const char * app_name,u32 * app_id)1886 int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id)
1887 {
1888 	unsigned long name_buf_size = QSEECOM_MAX_APP_NAME_SIZE;
1889 	unsigned long app_name_len = strlen(app_name);
1890 	struct qcom_scm_desc desc = {};
1891 	struct qcom_scm_qseecom_resp res = {};
1892 	int status;
1893 
1894 	if (app_name_len >= name_buf_size)
1895 		return -EINVAL;
1896 
1897 	char *name_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1898 							     name_buf_size,
1899 							     GFP_KERNEL);
1900 	if (!name_buf)
1901 		return -ENOMEM;
1902 
1903 	memcpy(name_buf, app_name, app_name_len);
1904 
1905 	desc.owner = QSEECOM_TZ_OWNER_QSEE_OS;
1906 	desc.svc = QSEECOM_TZ_SVC_APP_MGR;
1907 	desc.cmd = QSEECOM_TZ_CMD_APP_LOOKUP;
1908 	desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL);
1909 	desc.args[0] = qcom_tzmem_to_phys(name_buf);
1910 	desc.args[1] = app_name_len;
1911 
1912 	status = qcom_scm_qseecom_call(&desc, &res);
1913 
1914 	if (status)
1915 		return status;
1916 
1917 	if (res.result == QSEECOM_RESULT_FAILURE)
1918 		return -ENOENT;
1919 
1920 	if (res.result != QSEECOM_RESULT_SUCCESS)
1921 		return -EINVAL;
1922 
1923 	if (res.resp_type != QSEECOM_SCM_RES_APP_ID)
1924 		return -EINVAL;
1925 
1926 	*app_id = res.data;
1927 	return 0;
1928 }
1929 EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id);
1930 
1931 /**
1932  * qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app.
1933  * @app_id:   The ID of the target app.
1934  * @req:      Request buffer sent to the app (must be TZ memory)
1935  * @req_size: Size of the request buffer.
1936  * @rsp:      Response buffer, written to by the app (must be TZ memory)
1937  * @rsp_size: Size of the response buffer.
1938  *
1939  * Sends a request to the QSEE app associated with the given ID and read back
1940  * its response. The caller must provide two DMA memory regions, one for the
1941  * request and one for the response, and fill out the @req region with the
1942  * respective (app-specific) request data. The QSEE app reads this and returns
1943  * its response in the @rsp region.
1944  *
1945  * Return: Zero on success, nonzero on failure.
1946  */
qcom_scm_qseecom_app_send(u32 app_id,void * req,size_t req_size,void * rsp,size_t rsp_size)1947 int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size,
1948 			      void *rsp, size_t rsp_size)
1949 {
1950 	struct qcom_scm_qseecom_resp res = {};
1951 	struct qcom_scm_desc desc = {};
1952 	phys_addr_t req_phys;
1953 	phys_addr_t rsp_phys;
1954 	int status;
1955 
1956 	req_phys = qcom_tzmem_to_phys(req);
1957 	rsp_phys = qcom_tzmem_to_phys(rsp);
1958 
1959 	desc.owner = QSEECOM_TZ_OWNER_TZ_APPS;
1960 	desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER;
1961 	desc.cmd = QSEECOM_TZ_CMD_APP_SEND;
1962 	desc.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL,
1963 				     QCOM_SCM_RW, QCOM_SCM_VAL,
1964 				     QCOM_SCM_RW, QCOM_SCM_VAL);
1965 	desc.args[0] = app_id;
1966 	desc.args[1] = req_phys;
1967 	desc.args[2] = req_size;
1968 	desc.args[3] = rsp_phys;
1969 	desc.args[4] = rsp_size;
1970 
1971 	status = qcom_scm_qseecom_call(&desc, &res);
1972 
1973 	if (status)
1974 		return status;
1975 
1976 	if (res.result != QSEECOM_RESULT_SUCCESS)
1977 		return -EIO;
1978 
1979 	return 0;
1980 }
1981 EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
1982 
1983 /*
1984  * We do not yet support re-entrant calls via the qseecom interface. To prevent
1985  + any potential issues with this, only allow validated machines for now.
1986  */
1987 static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
1988 	{ .compatible = "asus,vivobook-s15" },
1989 	{ .compatible = "dell,xps13-9345" },
1990 	{ .compatible = "hp,omnibook-x14" },
1991 	{ .compatible = "huawei,gaokun3" },
1992 	{ .compatible = "lenovo,flex-5g" },
1993 	{ .compatible = "lenovo,thinkpad-t14s" },
1994 	{ .compatible = "lenovo,thinkpad-x13s", },
1995 	{ .compatible = "lenovo,yoga-slim7x" },
1996 	{ .compatible = "microsoft,arcata", },
1997 	{ .compatible = "microsoft,blackrock" },
1998 	{ .compatible = "microsoft,romulus13", },
1999 	{ .compatible = "microsoft,romulus15", },
2000 	{ .compatible = "qcom,sc8180x-primus" },
2001 	{ .compatible = "qcom,x1e001de-devkit" },
2002 	{ .compatible = "qcom,x1e80100-crd" },
2003 	{ .compatible = "qcom,x1e80100-qcp" },
2004 	{ .compatible = "qcom,x1p42100-crd" },
2005 	{ }
2006 };
2007 
qcom_scm_qseecom_machine_is_allowed(void)2008 static bool qcom_scm_qseecom_machine_is_allowed(void)
2009 {
2010 	struct device_node *np;
2011 	bool match;
2012 
2013 	np = of_find_node_by_path("/");
2014 	if (!np)
2015 		return false;
2016 
2017 	match = of_match_node(qcom_scm_qseecom_allowlist, np);
2018 	of_node_put(np);
2019 
2020 	return match;
2021 }
2022 
qcom_scm_qseecom_free(void * data)2023 static void qcom_scm_qseecom_free(void *data)
2024 {
2025 	struct platform_device *qseecom_dev = data;
2026 
2027 	platform_device_del(qseecom_dev);
2028 	platform_device_put(qseecom_dev);
2029 }
2030 
qcom_scm_qseecom_init(struct qcom_scm * scm)2031 static int qcom_scm_qseecom_init(struct qcom_scm *scm)
2032 {
2033 	struct platform_device *qseecom_dev;
2034 	u32 version;
2035 	int ret;
2036 
2037 	/*
2038 	 * Note: We do two steps of validation here: First, we try to query the
2039 	 * QSEECOM version as a check to see if the interface exists on this
2040 	 * device. Second, we check against known good devices due to current
2041 	 * driver limitations (see comment in qcom_scm_qseecom_allowlist).
2042 	 *
2043 	 * Note that we deliberately do the machine check after the version
2044 	 * check so that we can log potentially supported devices. This should
2045 	 * be safe as downstream sources indicate that the version query is
2046 	 * neither blocking nor reentrant.
2047 	 */
2048 	ret = qcom_scm_qseecom_get_version(&version);
2049 	if (ret)
2050 		return 0;
2051 
2052 	dev_info(scm->dev, "qseecom: found qseecom with version 0x%x\n", version);
2053 
2054 	if (!qcom_scm_qseecom_machine_is_allowed()) {
2055 		dev_info(scm->dev, "qseecom: untested machine, skipping\n");
2056 		return 0;
2057 	}
2058 
2059 	/*
2060 	 * Set up QSEECOM interface device. All application clients will be
2061 	 * set up and managed by the corresponding driver for it.
2062 	 */
2063 	qseecom_dev = platform_device_alloc("qcom_qseecom", -1);
2064 	if (!qseecom_dev)
2065 		return -ENOMEM;
2066 
2067 	qseecom_dev->dev.parent = scm->dev;
2068 
2069 	ret = platform_device_add(qseecom_dev);
2070 	if (ret) {
2071 		platform_device_put(qseecom_dev);
2072 		return ret;
2073 	}
2074 
2075 	return devm_add_action_or_reset(scm->dev, qcom_scm_qseecom_free, qseecom_dev);
2076 }
2077 
2078 #else /* CONFIG_QCOM_QSEECOM */
2079 
qcom_scm_qseecom_init(struct qcom_scm * scm)2080 static int qcom_scm_qseecom_init(struct qcom_scm *scm)
2081 {
2082 	return 0;
2083 }
2084 
2085 #endif /* CONFIG_QCOM_QSEECOM */
2086 
2087 /**
2088  * qcom_scm_is_available() - Checks if SCM is available
2089  */
qcom_scm_is_available(void)2090 bool qcom_scm_is_available(void)
2091 {
2092 	/* Paired with smp_store_release() in qcom_scm_probe */
2093 	return !!smp_load_acquire(&__scm);
2094 }
2095 EXPORT_SYMBOL_GPL(qcom_scm_is_available);
2096 
qcom_scm_assert_valid_wq_ctx(u32 wq_ctx)2097 static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx)
2098 {
2099 	/* FW currently only supports a single wq_ctx (zero).
2100 	 * TODO: Update this logic to include dynamic allocation and lookup of
2101 	 * completion structs when FW supports more wq_ctx values.
2102 	 */
2103 	if (wq_ctx != 0) {
2104 		dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n");
2105 		return -EINVAL;
2106 	}
2107 
2108 	return 0;
2109 }
2110 
qcom_scm_wait_for_wq_completion(u32 wq_ctx)2111 int qcom_scm_wait_for_wq_completion(u32 wq_ctx)
2112 {
2113 	int ret;
2114 
2115 	ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
2116 	if (ret)
2117 		return ret;
2118 
2119 	wait_for_completion(&__scm->waitq_comp);
2120 
2121 	return 0;
2122 }
2123 
qcom_scm_waitq_wakeup(unsigned int wq_ctx)2124 static int qcom_scm_waitq_wakeup(unsigned int wq_ctx)
2125 {
2126 	int ret;
2127 
2128 	ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
2129 	if (ret)
2130 		return ret;
2131 
2132 	complete(&__scm->waitq_comp);
2133 
2134 	return 0;
2135 }
2136 
qcom_scm_irq_handler(int irq,void * data)2137 static irqreturn_t qcom_scm_irq_handler(int irq, void *data)
2138 {
2139 	int ret;
2140 	struct qcom_scm *scm = data;
2141 	u32 wq_ctx, flags, more_pending = 0;
2142 
2143 	do {
2144 		ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending);
2145 		if (ret) {
2146 			dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret);
2147 			goto out;
2148 		}
2149 
2150 		if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE) {
2151 			dev_err(scm->dev, "Invalid flags received for wq_ctx: %u\n", flags);
2152 			goto out;
2153 		}
2154 
2155 		ret = qcom_scm_waitq_wakeup(wq_ctx);
2156 		if (ret)
2157 			goto out;
2158 	} while (more_pending);
2159 
2160 out:
2161 	return IRQ_HANDLED;
2162 }
2163 
get_download_mode(char * buffer,const struct kernel_param * kp)2164 static int get_download_mode(char *buffer, const struct kernel_param *kp)
2165 {
2166 	if (download_mode >= ARRAY_SIZE(download_mode_name))
2167 		return sysfs_emit(buffer, "unknown mode\n");
2168 
2169 	return sysfs_emit(buffer, "%s\n", download_mode_name[download_mode]);
2170 }
2171 
set_download_mode(const char * val,const struct kernel_param * kp)2172 static int set_download_mode(const char *val, const struct kernel_param *kp)
2173 {
2174 	bool tmp;
2175 	int ret;
2176 
2177 	ret = sysfs_match_string(download_mode_name, val);
2178 	if (ret < 0) {
2179 		ret = kstrtobool(val, &tmp);
2180 		if (ret < 0) {
2181 			pr_err("qcom_scm: err: %d\n", ret);
2182 			return ret;
2183 		}
2184 
2185 		ret = tmp ? 1 : 0;
2186 	}
2187 
2188 	download_mode = ret;
2189 	if (__scm)
2190 		qcom_scm_set_download_mode(download_mode);
2191 
2192 	return 0;
2193 }
2194 
2195 static const struct kernel_param_ops download_mode_param_ops = {
2196 	.get = get_download_mode,
2197 	.set = set_download_mode,
2198 };
2199 
2200 module_param_cb(download_mode, &download_mode_param_ops, NULL, 0644);
2201 MODULE_PARM_DESC(download_mode, "download mode: off/0/N for no dump mode, full/on/1/Y for full dump mode, mini for minidump mode and full,mini for both full and minidump mode together are acceptable values");
2202 
qcom_scm_probe(struct platform_device * pdev)2203 static int qcom_scm_probe(struct platform_device *pdev)
2204 {
2205 	struct qcom_tzmem_pool_config pool_config;
2206 	struct qcom_scm *scm;
2207 	int irq, ret;
2208 
2209 	scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
2210 	if (!scm)
2211 		return -ENOMEM;
2212 
2213 	scm->dev = &pdev->dev;
2214 	ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
2215 	if (ret < 0)
2216 		return ret;
2217 
2218 	init_completion(&scm->waitq_comp);
2219 	mutex_init(&scm->scm_bw_lock);
2220 
2221 	scm->path = devm_of_icc_get(&pdev->dev, NULL);
2222 	if (IS_ERR(scm->path))
2223 		return dev_err_probe(&pdev->dev, PTR_ERR(scm->path),
2224 				     "failed to acquire interconnect path\n");
2225 
2226 	scm->core_clk = devm_clk_get_optional(&pdev->dev, "core");
2227 	if (IS_ERR(scm->core_clk))
2228 		return PTR_ERR(scm->core_clk);
2229 
2230 	scm->iface_clk = devm_clk_get_optional(&pdev->dev, "iface");
2231 	if (IS_ERR(scm->iface_clk))
2232 		return PTR_ERR(scm->iface_clk);
2233 
2234 	scm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus");
2235 	if (IS_ERR(scm->bus_clk))
2236 		return PTR_ERR(scm->bus_clk);
2237 
2238 	scm->reset.ops = &qcom_scm_pas_reset_ops;
2239 	scm->reset.nr_resets = 1;
2240 	scm->reset.of_node = pdev->dev.of_node;
2241 	ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
2242 	if (ret)
2243 		return ret;
2244 
2245 	/* vote for max clk rate for highest performance */
2246 	ret = clk_set_rate(scm->core_clk, INT_MAX);
2247 	if (ret)
2248 		return ret;
2249 
2250 	/* Paired with smp_load_acquire() in qcom_scm_is_available(). */
2251 	smp_store_release(&__scm, scm);
2252 
2253 	irq = platform_get_irq_optional(pdev, 0);
2254 	if (irq < 0) {
2255 		if (irq != -ENXIO) {
2256 			ret = irq;
2257 			goto err;
2258 		}
2259 	} else {
2260 		ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler,
2261 						IRQF_ONESHOT, "qcom-scm", __scm);
2262 		if (ret < 0) {
2263 			dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
2264 			goto err;
2265 		}
2266 	}
2267 
2268 	__get_convention();
2269 
2270 	/*
2271 	 * If "download mode" is requested, from this point on warmboot
2272 	 * will cause the boot stages to enter download mode, unless
2273 	 * disabled below by a clean shutdown/reboot.
2274 	 */
2275 	qcom_scm_set_download_mode(download_mode);
2276 
2277 	/*
2278 	 * Disable SDI if indicated by DT that it is enabled by default.
2279 	 */
2280 	if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled") || !download_mode)
2281 		qcom_scm_disable_sdi();
2282 
2283 	ret = of_reserved_mem_device_init(__scm->dev);
2284 	if (ret && ret != -ENODEV) {
2285 		dev_err_probe(__scm->dev, ret,
2286 			      "Failed to setup the reserved memory region for TZ mem\n");
2287 		goto err;
2288 	}
2289 
2290 	ret = qcom_tzmem_enable(__scm->dev);
2291 	if (ret) {
2292 		dev_err_probe(__scm->dev, ret,
2293 			      "Failed to enable the TrustZone memory allocator\n");
2294 		goto err;
2295 	}
2296 
2297 	memset(&pool_config, 0, sizeof(pool_config));
2298 	pool_config.initial_size = 0;
2299 	pool_config.policy = QCOM_TZMEM_POLICY_ON_DEMAND;
2300 	pool_config.max_size = SZ_256K;
2301 
2302 	__scm->mempool = devm_qcom_tzmem_pool_new(__scm->dev, &pool_config);
2303 	if (IS_ERR(__scm->mempool)) {
2304 		ret = dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
2305 				    "Failed to create the SCM memory pool\n");
2306 		goto err;
2307 	}
2308 
2309 	/*
2310 	 * Initialize the QSEECOM interface.
2311 	 *
2312 	 * Note: QSEECOM is fairly self-contained and this only adds the
2313 	 * interface device (the driver of which does most of the heavy
2314 	 * lifting). So any errors returned here should be either -ENOMEM or
2315 	 * -EINVAL (with the latter only in case there's a bug in our code).
2316 	 * This means that there is no need to bring down the whole SCM driver.
2317 	 * Just log the error instead and let SCM live.
2318 	 */
2319 	ret = qcom_scm_qseecom_init(scm);
2320 	WARN(ret < 0, "failed to initialize qseecom: %d\n", ret);
2321 
2322 	return 0;
2323 
2324 err:
2325 	/* Paired with smp_load_acquire() in qcom_scm_is_available(). */
2326 	smp_store_release(&__scm, NULL);
2327 
2328 	return ret;
2329 }
2330 
qcom_scm_shutdown(struct platform_device * pdev)2331 static void qcom_scm_shutdown(struct platform_device *pdev)
2332 {
2333 	/* Clean shutdown, disable download mode to allow normal restart */
2334 	qcom_scm_set_download_mode(QCOM_DLOAD_NODUMP);
2335 }
2336 
2337 static const struct of_device_id qcom_scm_dt_match[] = {
2338 	{ .compatible = "qcom,scm" },
2339 
2340 	/* Legacy entries kept for backwards compatibility */
2341 	{ .compatible = "qcom,scm-apq8064" },
2342 	{ .compatible = "qcom,scm-apq8084" },
2343 	{ .compatible = "qcom,scm-ipq4019" },
2344 	{ .compatible = "qcom,scm-msm8953" },
2345 	{ .compatible = "qcom,scm-msm8974" },
2346 	{ .compatible = "qcom,scm-msm8996" },
2347 	{}
2348 };
2349 MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
2350 
2351 static struct platform_driver qcom_scm_driver = {
2352 	.driver = {
2353 		.name	= "qcom_scm",
2354 		.of_match_table = qcom_scm_dt_match,
2355 		.suppress_bind_attrs = true,
2356 	},
2357 	.probe = qcom_scm_probe,
2358 	.shutdown = qcom_scm_shutdown,
2359 };
2360 
qcom_scm_init(void)2361 static int __init qcom_scm_init(void)
2362 {
2363 	return platform_driver_register(&qcom_scm_driver);
2364 }
2365 subsys_initcall(qcom_scm_init);
2366 
2367 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver");
2368 MODULE_LICENSE("GPL v2");
2369