1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved. 3 * Copyright (C) 2015 Linaro Ltd. 4 */ 5 6 #include <linux/arm-smccc.h> 7 #include <linux/bitfield.h> 8 #include <linux/bits.h> 9 #include <linux/cleanup.h> 10 #include <linux/clk.h> 11 #include <linux/completion.h> 12 #include <linux/cpumask.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/err.h> 15 #include <linux/export.h> 16 #include <linux/firmware/qcom/qcom_scm.h> 17 #include <linux/firmware/qcom/qcom_tzmem.h> 18 #include <linux/init.h> 19 #include <linux/interconnect.h> 20 #include <linux/interrupt.h> 21 #include <linux/kstrtox.h> 22 #include <linux/module.h> 23 #include <linux/of.h> 24 #include <linux/of_address.h> 25 #include <linux/of_irq.h> 26 #include <linux/of_platform.h> 27 #include <linux/of_reserved_mem.h> 28 #include <linux/platform_device.h> 29 #include <linux/reset-controller.h> 30 #include <linux/sizes.h> 31 #include <linux/types.h> 32 33 #include "qcom_scm.h" 34 #include "qcom_tzmem.h" 35 36 static u32 download_mode; 37 38 struct qcom_scm { 39 struct device *dev; 40 struct clk *core_clk; 41 struct clk *iface_clk; 42 struct clk *bus_clk; 43 struct icc_path *path; 44 struct completion waitq_comp; 45 struct reset_controller_dev reset; 46 47 /* control access to the interconnect path */ 48 struct mutex scm_bw_lock; 49 int scm_vote_count; 50 51 u64 dload_mode_addr; 52 53 struct qcom_tzmem_pool *mempool; 54 }; 55 56 struct qcom_scm_current_perm_info { 57 __le32 vmid; 58 __le32 perm; 59 __le64 ctx; 60 __le32 ctx_size; 61 __le32 unused; 62 }; 63 64 struct qcom_scm_mem_map_info { 65 __le64 mem_addr; 66 __le64 mem_size; 67 }; 68 69 /** 70 * struct qcom_scm_qseecom_resp - QSEECOM SCM call response. 71 * @result: Result or status of the SCM call. See &enum qcom_scm_qseecom_result. 72 * @resp_type: Type of the response. See &enum qcom_scm_qseecom_resp_type. 73 * @data: Response data. The type of this data is given in @resp_type. 74 */ 75 struct qcom_scm_qseecom_resp { 76 u64 result; 77 u64 resp_type; 78 u64 data; 79 }; 80 81 enum qcom_scm_qseecom_result { 82 QSEECOM_RESULT_SUCCESS = 0, 83 QSEECOM_RESULT_INCOMPLETE = 1, 84 QSEECOM_RESULT_BLOCKED_ON_LISTENER = 2, 85 QSEECOM_RESULT_FAILURE = 0xFFFFFFFF, 86 }; 87 88 enum qcom_scm_qseecom_resp_type { 89 QSEECOM_SCM_RES_APP_ID = 0xEE01, 90 QSEECOM_SCM_RES_QSEOS_LISTENER_ID = 0xEE02, 91 }; 92 93 enum qcom_scm_qseecom_tz_owner { 94 QSEECOM_TZ_OWNER_SIP = 2, 95 QSEECOM_TZ_OWNER_TZ_APPS = 48, 96 QSEECOM_TZ_OWNER_QSEE_OS = 50 97 }; 98 99 enum qcom_scm_qseecom_tz_svc { 100 QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER = 0, 101 QSEECOM_TZ_SVC_APP_MGR = 1, 102 QSEECOM_TZ_SVC_INFO = 6, 103 }; 104 105 enum qcom_scm_qseecom_tz_cmd_app { 106 QSEECOM_TZ_CMD_APP_SEND = 1, 107 QSEECOM_TZ_CMD_APP_LOOKUP = 3, 108 }; 109 110 enum qcom_scm_qseecom_tz_cmd_info { 111 QSEECOM_TZ_CMD_INFO_VERSION = 3, 112 }; 113 114 #define QSEECOM_MAX_APP_NAME_SIZE 64 115 #define SHMBRIDGE_RESULT_NOTSUPP 4 116 117 /* Each bit configures cold/warm boot address for one of the 4 CPUs */ 118 static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = { 119 0, BIT(0), BIT(3), BIT(5) 120 }; 121 static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = { 122 BIT(2), BIT(1), BIT(4), BIT(6) 123 }; 124 125 #define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0) 126 127 #define QCOM_DLOAD_MASK GENMASK(5, 4) 128 #define QCOM_DLOAD_NODUMP 0 129 #define QCOM_DLOAD_FULLDUMP 1 130 #define QCOM_DLOAD_MINIDUMP 2 131 #define QCOM_DLOAD_BOTHDUMP 3 132 133 static const char * const qcom_scm_convention_names[] = { 134 [SMC_CONVENTION_UNKNOWN] = "unknown", 135 [SMC_CONVENTION_ARM_32] = "smc arm 32", 136 [SMC_CONVENTION_ARM_64] = "smc arm 64", 137 [SMC_CONVENTION_LEGACY] = "smc legacy", 138 }; 139 140 static const char * const download_mode_name[] = { 141 [QCOM_DLOAD_NODUMP] = "off", 142 [QCOM_DLOAD_FULLDUMP] = "full", 143 [QCOM_DLOAD_MINIDUMP] = "mini", 144 [QCOM_DLOAD_BOTHDUMP] = "full,mini", 145 }; 146 147 static struct qcom_scm *__scm; 148 149 static int qcom_scm_clk_enable(void) 150 { 151 int ret; 152 153 ret = clk_prepare_enable(__scm->core_clk); 154 if (ret) 155 goto bail; 156 157 ret = clk_prepare_enable(__scm->iface_clk); 158 if (ret) 159 goto disable_core; 160 161 ret = clk_prepare_enable(__scm->bus_clk); 162 if (ret) 163 goto disable_iface; 164 165 return 0; 166 167 disable_iface: 168 clk_disable_unprepare(__scm->iface_clk); 169 disable_core: 170 clk_disable_unprepare(__scm->core_clk); 171 bail: 172 return ret; 173 } 174 175 static void qcom_scm_clk_disable(void) 176 { 177 clk_disable_unprepare(__scm->core_clk); 178 clk_disable_unprepare(__scm->iface_clk); 179 clk_disable_unprepare(__scm->bus_clk); 180 } 181 182 static int qcom_scm_bw_enable(void) 183 { 184 int ret = 0; 185 186 if (!__scm->path) 187 return 0; 188 189 mutex_lock(&__scm->scm_bw_lock); 190 if (!__scm->scm_vote_count) { 191 ret = icc_set_bw(__scm->path, 0, UINT_MAX); 192 if (ret < 0) { 193 dev_err(__scm->dev, "failed to set bandwidth request\n"); 194 goto err_bw; 195 } 196 } 197 __scm->scm_vote_count++; 198 err_bw: 199 mutex_unlock(&__scm->scm_bw_lock); 200 201 return ret; 202 } 203 204 static void qcom_scm_bw_disable(void) 205 { 206 if (!__scm->path) 207 return; 208 209 mutex_lock(&__scm->scm_bw_lock); 210 if (__scm->scm_vote_count-- == 1) 211 icc_set_bw(__scm->path, 0, 0); 212 mutex_unlock(&__scm->scm_bw_lock); 213 } 214 215 enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN; 216 static DEFINE_SPINLOCK(scm_query_lock); 217 218 struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void) 219 { 220 if (!qcom_scm_is_available()) 221 return NULL; 222 223 return __scm->mempool; 224 } 225 226 static enum qcom_scm_convention __get_convention(void) 227 { 228 unsigned long flags; 229 struct qcom_scm_desc desc = { 230 .svc = QCOM_SCM_SVC_INFO, 231 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, 232 .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO, 233 QCOM_SCM_INFO_IS_CALL_AVAIL) | 234 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT), 235 .arginfo = QCOM_SCM_ARGS(1), 236 .owner = ARM_SMCCC_OWNER_SIP, 237 }; 238 struct qcom_scm_res res; 239 enum qcom_scm_convention probed_convention; 240 int ret; 241 bool forced = false; 242 243 if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN)) 244 return qcom_scm_convention; 245 246 /* 247 * Per the "SMC calling convention specification", the 64-bit calling 248 * convention can only be used when the client is 64-bit, otherwise 249 * system will encounter the undefined behaviour. 250 */ 251 #if IS_ENABLED(CONFIG_ARM64) 252 /* 253 * Device isn't required as there is only one argument - no device 254 * needed to dma_map_single to secure world 255 */ 256 probed_convention = SMC_CONVENTION_ARM_64; 257 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); 258 if (!ret && res.result[0] == 1) 259 goto found; 260 261 /* 262 * Some SC7180 firmwares didn't implement the 263 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64 264 * calling conventions on these firmwares. Luckily we don't make any 265 * early calls into the firmware on these SoCs so the device pointer 266 * will be valid here to check if the compatible matches. 267 */ 268 if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) { 269 forced = true; 270 goto found; 271 } 272 #endif 273 274 probed_convention = SMC_CONVENTION_ARM_32; 275 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); 276 if (!ret && res.result[0] == 1) 277 goto found; 278 279 probed_convention = SMC_CONVENTION_LEGACY; 280 found: 281 spin_lock_irqsave(&scm_query_lock, flags); 282 if (probed_convention != qcom_scm_convention) { 283 qcom_scm_convention = probed_convention; 284 pr_info("qcom_scm: convention: %s%s\n", 285 qcom_scm_convention_names[qcom_scm_convention], 286 forced ? " (forced)" : ""); 287 } 288 spin_unlock_irqrestore(&scm_query_lock, flags); 289 290 return qcom_scm_convention; 291 } 292 293 /** 294 * qcom_scm_call() - Invoke a syscall in the secure world 295 * @dev: device 296 * @desc: Descriptor structure containing arguments and return values 297 * @res: Structure containing results from SMC/HVC call 298 * 299 * Sends a command to the SCM and waits for the command to finish processing. 300 * This should *only* be called in pre-emptible context. 301 */ 302 static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc, 303 struct qcom_scm_res *res) 304 { 305 might_sleep(); 306 switch (__get_convention()) { 307 case SMC_CONVENTION_ARM_32: 308 case SMC_CONVENTION_ARM_64: 309 return scm_smc_call(dev, desc, res, false); 310 case SMC_CONVENTION_LEGACY: 311 return scm_legacy_call(dev, desc, res); 312 default: 313 pr_err("Unknown current SCM calling convention.\n"); 314 return -EINVAL; 315 } 316 } 317 318 /** 319 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call() 320 * @dev: device 321 * @desc: Descriptor structure containing arguments and return values 322 * @res: Structure containing results from SMC/HVC call 323 * 324 * Sends a command to the SCM and waits for the command to finish processing. 325 * This can be called in atomic context. 326 */ 327 static int qcom_scm_call_atomic(struct device *dev, 328 const struct qcom_scm_desc *desc, 329 struct qcom_scm_res *res) 330 { 331 switch (__get_convention()) { 332 case SMC_CONVENTION_ARM_32: 333 case SMC_CONVENTION_ARM_64: 334 return scm_smc_call(dev, desc, res, true); 335 case SMC_CONVENTION_LEGACY: 336 return scm_legacy_call_atomic(dev, desc, res); 337 default: 338 pr_err("Unknown current SCM calling convention.\n"); 339 return -EINVAL; 340 } 341 } 342 343 static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id, 344 u32 cmd_id) 345 { 346 int ret; 347 struct qcom_scm_desc desc = { 348 .svc = QCOM_SCM_SVC_INFO, 349 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, 350 .owner = ARM_SMCCC_OWNER_SIP, 351 }; 352 struct qcom_scm_res res; 353 354 desc.arginfo = QCOM_SCM_ARGS(1); 355 switch (__get_convention()) { 356 case SMC_CONVENTION_ARM_32: 357 case SMC_CONVENTION_ARM_64: 358 desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) | 359 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT); 360 break; 361 case SMC_CONVENTION_LEGACY: 362 desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id); 363 break; 364 default: 365 pr_err("Unknown SMC convention being used\n"); 366 return false; 367 } 368 369 ret = qcom_scm_call(dev, &desc, &res); 370 371 return ret ? false : !!res.result[0]; 372 } 373 374 static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits) 375 { 376 int cpu; 377 unsigned int flags = 0; 378 struct qcom_scm_desc desc = { 379 .svc = QCOM_SCM_SVC_BOOT, 380 .cmd = QCOM_SCM_BOOT_SET_ADDR, 381 .arginfo = QCOM_SCM_ARGS(2), 382 .owner = ARM_SMCCC_OWNER_SIP, 383 }; 384 385 for_each_present_cpu(cpu) { 386 if (cpu >= QCOM_SCM_BOOT_MAX_CPUS) 387 return -EINVAL; 388 flags |= cpu_bits[cpu]; 389 } 390 391 desc.args[0] = flags; 392 desc.args[1] = virt_to_phys(entry); 393 394 return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); 395 } 396 397 static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags) 398 { 399 struct qcom_scm_desc desc = { 400 .svc = QCOM_SCM_SVC_BOOT, 401 .cmd = QCOM_SCM_BOOT_SET_ADDR_MC, 402 .owner = ARM_SMCCC_OWNER_SIP, 403 .arginfo = QCOM_SCM_ARGS(6), 404 .args = { 405 virt_to_phys(entry), 406 /* Apply to all CPUs in all affinity levels */ 407 ~0ULL, ~0ULL, ~0ULL, ~0ULL, 408 flags, 409 }, 410 }; 411 412 /* Need a device for DMA of the additional arguments */ 413 if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY) 414 return -EOPNOTSUPP; 415 416 return qcom_scm_call(__scm->dev, &desc, NULL); 417 } 418 419 /** 420 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus 421 * @entry: Entry point function for the cpus 422 * 423 * Set the Linux entry point for the SCM to transfer control to when coming 424 * out of a power down. CPU power down may be executed on cpuidle or hotplug. 425 */ 426 int qcom_scm_set_warm_boot_addr(void *entry) 427 { 428 if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT)) 429 /* Fallback to old SCM call */ 430 return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits); 431 return 0; 432 } 433 EXPORT_SYMBOL_GPL(qcom_scm_set_warm_boot_addr); 434 435 /** 436 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus 437 * @entry: Entry point function for the cpus 438 */ 439 int qcom_scm_set_cold_boot_addr(void *entry) 440 { 441 if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT)) 442 /* Fallback to old SCM call */ 443 return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits); 444 return 0; 445 } 446 EXPORT_SYMBOL_GPL(qcom_scm_set_cold_boot_addr); 447 448 /** 449 * qcom_scm_cpu_power_down() - Power down the cpu 450 * @flags: Flags to flush cache 451 * 452 * This is an end point to power down cpu. If there was a pending interrupt, 453 * the control would return from this function, otherwise, the cpu jumps to the 454 * warm boot entry point set for this cpu upon reset. 455 */ 456 void qcom_scm_cpu_power_down(u32 flags) 457 { 458 struct qcom_scm_desc desc = { 459 .svc = QCOM_SCM_SVC_BOOT, 460 .cmd = QCOM_SCM_BOOT_TERMINATE_PC, 461 .args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK, 462 .arginfo = QCOM_SCM_ARGS(1), 463 .owner = ARM_SMCCC_OWNER_SIP, 464 }; 465 466 qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); 467 } 468 EXPORT_SYMBOL_GPL(qcom_scm_cpu_power_down); 469 470 int qcom_scm_set_remote_state(u32 state, u32 id) 471 { 472 struct qcom_scm_desc desc = { 473 .svc = QCOM_SCM_SVC_BOOT, 474 .cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE, 475 .arginfo = QCOM_SCM_ARGS(2), 476 .args[0] = state, 477 .args[1] = id, 478 .owner = ARM_SMCCC_OWNER_SIP, 479 }; 480 struct qcom_scm_res res; 481 int ret; 482 483 ret = qcom_scm_call(__scm->dev, &desc, &res); 484 485 return ret ? : res.result[0]; 486 } 487 EXPORT_SYMBOL_GPL(qcom_scm_set_remote_state); 488 489 static int qcom_scm_disable_sdi(void) 490 { 491 int ret; 492 struct qcom_scm_desc desc = { 493 .svc = QCOM_SCM_SVC_BOOT, 494 .cmd = QCOM_SCM_BOOT_SDI_CONFIG, 495 .args[0] = 1, /* Disable watchdog debug */ 496 .args[1] = 0, /* Disable SDI */ 497 .arginfo = QCOM_SCM_ARGS(2), 498 .owner = ARM_SMCCC_OWNER_SIP, 499 }; 500 struct qcom_scm_res res; 501 502 ret = qcom_scm_clk_enable(); 503 if (ret) 504 return ret; 505 ret = qcom_scm_call(__scm->dev, &desc, &res); 506 507 qcom_scm_clk_disable(); 508 509 return ret ? : res.result[0]; 510 } 511 512 static int __qcom_scm_set_dload_mode(struct device *dev, bool enable) 513 { 514 struct qcom_scm_desc desc = { 515 .svc = QCOM_SCM_SVC_BOOT, 516 .cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE, 517 .arginfo = QCOM_SCM_ARGS(2), 518 .args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE, 519 .owner = ARM_SMCCC_OWNER_SIP, 520 }; 521 522 desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0; 523 524 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 525 } 526 527 static int qcom_scm_io_rmw(phys_addr_t addr, unsigned int mask, unsigned int val) 528 { 529 unsigned int old; 530 unsigned int new; 531 int ret; 532 533 ret = qcom_scm_io_readl(addr, &old); 534 if (ret) 535 return ret; 536 537 new = (old & ~mask) | (val & mask); 538 539 return qcom_scm_io_writel(addr, new); 540 } 541 542 static void qcom_scm_set_download_mode(u32 dload_mode) 543 { 544 int ret = 0; 545 546 if (__scm->dload_mode_addr) { 547 ret = qcom_scm_io_rmw(__scm->dload_mode_addr, QCOM_DLOAD_MASK, 548 FIELD_PREP(QCOM_DLOAD_MASK, dload_mode)); 549 } else if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT, 550 QCOM_SCM_BOOT_SET_DLOAD_MODE)) { 551 ret = __qcom_scm_set_dload_mode(__scm->dev, !!dload_mode); 552 } else if (dload_mode) { 553 dev_err(__scm->dev, 554 "No available mechanism for setting download mode\n"); 555 } 556 557 if (ret) 558 dev_err(__scm->dev, "failed to set download mode: %d\n", ret); 559 } 560 561 /** 562 * devm_qcom_scm_pas_context_alloc() - Allocate peripheral authentication service 563 * context for a given peripheral 564 * 565 * PAS context is device-resource managed, so the caller does not need 566 * to worry about freeing the context memory. 567 * 568 * @dev: PAS firmware device 569 * @pas_id: peripheral authentication service id 570 * @mem_phys: Subsystem reserve memory start address 571 * @mem_size: Subsystem reserve memory size 572 * 573 * Returns: The new PAS context, or ERR_PTR() on failure. 574 */ 575 struct qcom_scm_pas_context *devm_qcom_scm_pas_context_alloc(struct device *dev, 576 u32 pas_id, 577 phys_addr_t mem_phys, 578 size_t mem_size) 579 { 580 struct qcom_scm_pas_context *ctx; 581 582 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 583 if (!ctx) 584 return ERR_PTR(-ENOMEM); 585 586 ctx->dev = dev; 587 ctx->pas_id = pas_id; 588 ctx->mem_phys = mem_phys; 589 ctx->mem_size = mem_size; 590 591 return ctx; 592 } 593 EXPORT_SYMBOL_GPL(devm_qcom_scm_pas_context_alloc); 594 595 /** 596 * qcom_scm_pas_init_image() - Initialize peripheral authentication service 597 * state machine for a given peripheral, using the 598 * metadata 599 * @pas_id: peripheral authentication service id 600 * @metadata: pointer to memory containing ELF header, program header table 601 * and optional blob of data used for authenticating the metadata 602 * and the rest of the firmware 603 * @size: size of the metadata 604 * @ctx: optional pas context 605 * 606 * Return: 0 on success. 607 * 608 * Upon successful return, the PAS metadata context (@ctx) will be used to 609 * track the metadata allocation, this needs to be released by invoking 610 * qcom_scm_pas_metadata_release() by the caller. 611 */ 612 int qcom_scm_pas_init_image(u32 pas_id, const void *metadata, size_t size, 613 struct qcom_scm_pas_context *ctx) 614 { 615 dma_addr_t mdata_phys; 616 void *mdata_buf; 617 int ret; 618 struct qcom_scm_desc desc = { 619 .svc = QCOM_SCM_SVC_PIL, 620 .cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE, 621 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW), 622 .args[0] = pas_id, 623 .owner = ARM_SMCCC_OWNER_SIP, 624 }; 625 struct qcom_scm_res res; 626 627 /* 628 * During the scm call memory protection will be enabled for the meta 629 * data blob, so make sure it's physically contiguous, 4K aligned and 630 * non-cachable to avoid XPU violations. 631 * 632 * For PIL calls the hypervisor creates SHM Bridges for the blob 633 * buffers on behalf of Linux so we must not do it ourselves hence 634 * not using the TZMem allocator here. 635 * 636 * If we pass a buffer that is already part of an SHM Bridge to this 637 * call, it will fail. 638 */ 639 mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys, 640 GFP_KERNEL); 641 if (!mdata_buf) 642 return -ENOMEM; 643 644 memcpy(mdata_buf, metadata, size); 645 646 ret = qcom_scm_clk_enable(); 647 if (ret) 648 goto out; 649 650 ret = qcom_scm_bw_enable(); 651 if (ret) 652 goto disable_clk; 653 654 desc.args[1] = mdata_phys; 655 656 ret = qcom_scm_call(__scm->dev, &desc, &res); 657 qcom_scm_bw_disable(); 658 659 disable_clk: 660 qcom_scm_clk_disable(); 661 662 out: 663 if (ret < 0 || !ctx) { 664 dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys); 665 } else if (ctx) { 666 ctx->ptr = mdata_buf; 667 ctx->phys = mdata_phys; 668 ctx->size = size; 669 } 670 671 return ret ? : res.result[0]; 672 } 673 EXPORT_SYMBOL_GPL(qcom_scm_pas_init_image); 674 675 /** 676 * qcom_scm_pas_metadata_release() - release metadata context 677 * @ctx: pas context 678 */ 679 void qcom_scm_pas_metadata_release(struct qcom_scm_pas_context *ctx) 680 { 681 if (!ctx->ptr) 682 return; 683 684 dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys); 685 686 ctx->ptr = NULL; 687 } 688 EXPORT_SYMBOL_GPL(qcom_scm_pas_metadata_release); 689 690 /** 691 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral 692 * for firmware loading 693 * @pas_id: peripheral authentication service id 694 * @addr: start address of memory area to prepare 695 * @size: size of the memory area to prepare 696 * 697 * Returns 0 on success. 698 */ 699 int qcom_scm_pas_mem_setup(u32 pas_id, phys_addr_t addr, phys_addr_t size) 700 { 701 int ret; 702 struct qcom_scm_desc desc = { 703 .svc = QCOM_SCM_SVC_PIL, 704 .cmd = QCOM_SCM_PIL_PAS_MEM_SETUP, 705 .arginfo = QCOM_SCM_ARGS(3), 706 .args[0] = pas_id, 707 .args[1] = addr, 708 .args[2] = size, 709 .owner = ARM_SMCCC_OWNER_SIP, 710 }; 711 struct qcom_scm_res res; 712 713 ret = qcom_scm_clk_enable(); 714 if (ret) 715 return ret; 716 717 ret = qcom_scm_bw_enable(); 718 if (ret) 719 goto disable_clk; 720 721 ret = qcom_scm_call(__scm->dev, &desc, &res); 722 qcom_scm_bw_disable(); 723 724 disable_clk: 725 qcom_scm_clk_disable(); 726 727 return ret ? : res.result[0]; 728 } 729 EXPORT_SYMBOL_GPL(qcom_scm_pas_mem_setup); 730 731 /** 732 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware 733 * and reset the remote processor 734 * @pas_id: peripheral authentication service id 735 * 736 * Return 0 on success. 737 */ 738 int qcom_scm_pas_auth_and_reset(u32 pas_id) 739 { 740 int ret; 741 struct qcom_scm_desc desc = { 742 .svc = QCOM_SCM_SVC_PIL, 743 .cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET, 744 .arginfo = QCOM_SCM_ARGS(1), 745 .args[0] = pas_id, 746 .owner = ARM_SMCCC_OWNER_SIP, 747 }; 748 struct qcom_scm_res res; 749 750 ret = qcom_scm_clk_enable(); 751 if (ret) 752 return ret; 753 754 ret = qcom_scm_bw_enable(); 755 if (ret) 756 goto disable_clk; 757 758 ret = qcom_scm_call(__scm->dev, &desc, &res); 759 qcom_scm_bw_disable(); 760 761 disable_clk: 762 qcom_scm_clk_disable(); 763 764 return ret ? : res.result[0]; 765 } 766 EXPORT_SYMBOL_GPL(qcom_scm_pas_auth_and_reset); 767 768 /** 769 * qcom_scm_pas_shutdown() - Shut down the remote processor 770 * @pas_id: peripheral authentication service id 771 * 772 * Returns 0 on success. 773 */ 774 int qcom_scm_pas_shutdown(u32 pas_id) 775 { 776 int ret; 777 struct qcom_scm_desc desc = { 778 .svc = QCOM_SCM_SVC_PIL, 779 .cmd = QCOM_SCM_PIL_PAS_SHUTDOWN, 780 .arginfo = QCOM_SCM_ARGS(1), 781 .args[0] = pas_id, 782 .owner = ARM_SMCCC_OWNER_SIP, 783 }; 784 struct qcom_scm_res res; 785 786 ret = qcom_scm_clk_enable(); 787 if (ret) 788 return ret; 789 790 ret = qcom_scm_bw_enable(); 791 if (ret) 792 goto disable_clk; 793 794 ret = qcom_scm_call(__scm->dev, &desc, &res); 795 qcom_scm_bw_disable(); 796 797 disable_clk: 798 qcom_scm_clk_disable(); 799 800 return ret ? : res.result[0]; 801 } 802 EXPORT_SYMBOL_GPL(qcom_scm_pas_shutdown); 803 804 /** 805 * qcom_scm_pas_supported() - Check if the peripheral authentication service is 806 * available for the given peripherial 807 * @pas_id: peripheral authentication service id 808 * 809 * Returns true if PAS is supported for this peripheral, otherwise false. 810 */ 811 bool qcom_scm_pas_supported(u32 pas_id) 812 { 813 int ret; 814 struct qcom_scm_desc desc = { 815 .svc = QCOM_SCM_SVC_PIL, 816 .cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED, 817 .arginfo = QCOM_SCM_ARGS(1), 818 .args[0] = pas_id, 819 .owner = ARM_SMCCC_OWNER_SIP, 820 }; 821 struct qcom_scm_res res; 822 823 if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL, 824 QCOM_SCM_PIL_PAS_IS_SUPPORTED)) 825 return false; 826 827 ret = qcom_scm_call(__scm->dev, &desc, &res); 828 829 return ret ? false : !!res.result[0]; 830 } 831 EXPORT_SYMBOL_GPL(qcom_scm_pas_supported); 832 833 static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) 834 { 835 struct qcom_scm_desc desc = { 836 .svc = QCOM_SCM_SVC_PIL, 837 .cmd = QCOM_SCM_PIL_PAS_MSS_RESET, 838 .arginfo = QCOM_SCM_ARGS(2), 839 .args[0] = reset, 840 .args[1] = 0, 841 .owner = ARM_SMCCC_OWNER_SIP, 842 }; 843 struct qcom_scm_res res; 844 int ret; 845 846 ret = qcom_scm_call(__scm->dev, &desc, &res); 847 848 return ret ? : res.result[0]; 849 } 850 851 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev, 852 unsigned long idx) 853 { 854 if (idx != 0) 855 return -EINVAL; 856 857 return __qcom_scm_pas_mss_reset(__scm->dev, 1); 858 } 859 860 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev, 861 unsigned long idx) 862 { 863 if (idx != 0) 864 return -EINVAL; 865 866 return __qcom_scm_pas_mss_reset(__scm->dev, 0); 867 } 868 869 static const struct reset_control_ops qcom_scm_pas_reset_ops = { 870 .assert = qcom_scm_pas_reset_assert, 871 .deassert = qcom_scm_pas_reset_deassert, 872 }; 873 874 int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) 875 { 876 struct qcom_scm_desc desc = { 877 .svc = QCOM_SCM_SVC_IO, 878 .cmd = QCOM_SCM_IO_READ, 879 .arginfo = QCOM_SCM_ARGS(1), 880 .args[0] = addr, 881 .owner = ARM_SMCCC_OWNER_SIP, 882 }; 883 struct qcom_scm_res res; 884 int ret; 885 886 887 ret = qcom_scm_call_atomic(__scm->dev, &desc, &res); 888 if (ret >= 0) 889 *val = res.result[0]; 890 891 return ret < 0 ? ret : 0; 892 } 893 EXPORT_SYMBOL_GPL(qcom_scm_io_readl); 894 895 int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) 896 { 897 struct qcom_scm_desc desc = { 898 .svc = QCOM_SCM_SVC_IO, 899 .cmd = QCOM_SCM_IO_WRITE, 900 .arginfo = QCOM_SCM_ARGS(2), 901 .args[0] = addr, 902 .args[1] = val, 903 .owner = ARM_SMCCC_OWNER_SIP, 904 }; 905 906 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 907 } 908 EXPORT_SYMBOL_GPL(qcom_scm_io_writel); 909 910 /** 911 * qcom_scm_restore_sec_cfg_available() - Check if secure environment 912 * supports restore security config interface. 913 * 914 * Return true if restore-cfg interface is supported, false if not. 915 */ 916 bool qcom_scm_restore_sec_cfg_available(void) 917 { 918 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, 919 QCOM_SCM_MP_RESTORE_SEC_CFG); 920 } 921 EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg_available); 922 923 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) 924 { 925 struct qcom_scm_desc desc = { 926 .svc = QCOM_SCM_SVC_MP, 927 .cmd = QCOM_SCM_MP_RESTORE_SEC_CFG, 928 .arginfo = QCOM_SCM_ARGS(2), 929 .args[0] = device_id, 930 .args[1] = spare, 931 .owner = ARM_SMCCC_OWNER_SIP, 932 }; 933 struct qcom_scm_res res; 934 int ret; 935 936 ret = qcom_scm_call(__scm->dev, &desc, &res); 937 938 return ret ? : res.result[0]; 939 } 940 EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg); 941 942 #define QCOM_SCM_CP_APERTURE_CONTEXT_MASK GENMASK(7, 0) 943 944 bool qcom_scm_set_gpu_smmu_aperture_is_available(void) 945 { 946 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, 947 QCOM_SCM_MP_CP_SMMU_APERTURE_ID); 948 } 949 EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture_is_available); 950 951 int qcom_scm_set_gpu_smmu_aperture(unsigned int context_bank) 952 { 953 struct qcom_scm_desc desc = { 954 .svc = QCOM_SCM_SVC_MP, 955 .cmd = QCOM_SCM_MP_CP_SMMU_APERTURE_ID, 956 .arginfo = QCOM_SCM_ARGS(4), 957 .args[0] = 0xffff0000 | FIELD_PREP(QCOM_SCM_CP_APERTURE_CONTEXT_MASK, context_bank), 958 .args[1] = 0xffffffff, 959 .args[2] = 0xffffffff, 960 .args[3] = 0xffffffff, 961 .owner = ARM_SMCCC_OWNER_SIP 962 }; 963 964 return qcom_scm_call(__scm->dev, &desc, NULL); 965 } 966 EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture); 967 968 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) 969 { 970 struct qcom_scm_desc desc = { 971 .svc = QCOM_SCM_SVC_MP, 972 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE, 973 .arginfo = QCOM_SCM_ARGS(1), 974 .args[0] = spare, 975 .owner = ARM_SMCCC_OWNER_SIP, 976 }; 977 struct qcom_scm_res res; 978 int ret; 979 980 ret = qcom_scm_call(__scm->dev, &desc, &res); 981 982 if (size) 983 *size = res.result[0]; 984 985 return ret ? : res.result[1]; 986 } 987 EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_size); 988 989 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) 990 { 991 struct qcom_scm_desc desc = { 992 .svc = QCOM_SCM_SVC_MP, 993 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT, 994 .arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, 995 QCOM_SCM_VAL), 996 .args[0] = addr, 997 .args[1] = size, 998 .args[2] = spare, 999 .owner = ARM_SMCCC_OWNER_SIP, 1000 }; 1001 int ret; 1002 1003 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1004 1005 /* the pg table has been initialized already, ignore the error */ 1006 if (ret == -EPERM) 1007 ret = 0; 1008 1009 return ret; 1010 } 1011 EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_init); 1012 1013 int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size) 1014 { 1015 struct qcom_scm_desc desc = { 1016 .svc = QCOM_SCM_SVC_MP, 1017 .cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE, 1018 .arginfo = QCOM_SCM_ARGS(2), 1019 .args[0] = size, 1020 .args[1] = spare, 1021 .owner = ARM_SMCCC_OWNER_SIP, 1022 }; 1023 1024 return qcom_scm_call(__scm->dev, &desc, NULL); 1025 } 1026 EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_cp_pool_size); 1027 1028 int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size, 1029 u32 cp_nonpixel_start, 1030 u32 cp_nonpixel_size) 1031 { 1032 int ret; 1033 struct qcom_scm_desc desc = { 1034 .svc = QCOM_SCM_SVC_MP, 1035 .cmd = QCOM_SCM_MP_VIDEO_VAR, 1036 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL, 1037 QCOM_SCM_VAL, QCOM_SCM_VAL), 1038 .args[0] = cp_start, 1039 .args[1] = cp_size, 1040 .args[2] = cp_nonpixel_start, 1041 .args[3] = cp_nonpixel_size, 1042 .owner = ARM_SMCCC_OWNER_SIP, 1043 }; 1044 struct qcom_scm_res res; 1045 1046 ret = qcom_scm_call(__scm->dev, &desc, &res); 1047 1048 return ret ? : res.result[0]; 1049 } 1050 EXPORT_SYMBOL_GPL(qcom_scm_mem_protect_video_var); 1051 1052 static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region, 1053 size_t mem_sz, phys_addr_t src, size_t src_sz, 1054 phys_addr_t dest, size_t dest_sz) 1055 { 1056 int ret; 1057 struct qcom_scm_desc desc = { 1058 .svc = QCOM_SCM_SVC_MP, 1059 .cmd = QCOM_SCM_MP_ASSIGN, 1060 .arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL, 1061 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO, 1062 QCOM_SCM_VAL, QCOM_SCM_VAL), 1063 .args[0] = mem_region, 1064 .args[1] = mem_sz, 1065 .args[2] = src, 1066 .args[3] = src_sz, 1067 .args[4] = dest, 1068 .args[5] = dest_sz, 1069 .args[6] = 0, 1070 .owner = ARM_SMCCC_OWNER_SIP, 1071 }; 1072 struct qcom_scm_res res; 1073 1074 ret = qcom_scm_call(dev, &desc, &res); 1075 1076 return ret ? : res.result[0]; 1077 } 1078 1079 /** 1080 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership 1081 * @mem_addr: mem region whose ownership need to be reassigned 1082 * @mem_sz: size of the region. 1083 * @srcvm: vmid for current set of owners, each set bit in 1084 * flag indicate a unique owner 1085 * @newvm: array having new owners and corresponding permission 1086 * flags 1087 * @dest_cnt: number of owners in next set. 1088 * 1089 * Return negative errno on failure or 0 on success with @srcvm updated. 1090 */ 1091 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, 1092 u64 *srcvm, 1093 const struct qcom_scm_vmperm *newvm, 1094 unsigned int dest_cnt) 1095 { 1096 struct qcom_scm_current_perm_info *destvm; 1097 struct qcom_scm_mem_map_info *mem_to_map; 1098 phys_addr_t mem_to_map_phys; 1099 phys_addr_t dest_phys; 1100 phys_addr_t ptr_phys; 1101 size_t mem_to_map_sz; 1102 size_t dest_sz; 1103 size_t src_sz; 1104 size_t ptr_sz; 1105 int next_vm; 1106 __le32 *src; 1107 int ret, i, b; 1108 u64 srcvm_bits = *srcvm; 1109 1110 src_sz = hweight64(srcvm_bits) * sizeof(*src); 1111 mem_to_map_sz = sizeof(*mem_to_map); 1112 dest_sz = dest_cnt * sizeof(*destvm); 1113 ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + 1114 ALIGN(dest_sz, SZ_64); 1115 1116 void *ptr __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1117 ptr_sz, GFP_KERNEL); 1118 if (!ptr) 1119 return -ENOMEM; 1120 1121 ptr_phys = qcom_tzmem_to_phys(ptr); 1122 1123 /* Fill source vmid detail */ 1124 src = ptr; 1125 i = 0; 1126 for (b = 0; b < BITS_PER_TYPE(u64); b++) { 1127 if (srcvm_bits & BIT(b)) 1128 src[i++] = cpu_to_le32(b); 1129 } 1130 1131 /* Fill details of mem buff to map */ 1132 mem_to_map = ptr + ALIGN(src_sz, SZ_64); 1133 mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64); 1134 mem_to_map->mem_addr = cpu_to_le64(mem_addr); 1135 mem_to_map->mem_size = cpu_to_le64(mem_sz); 1136 1137 next_vm = 0; 1138 /* Fill details of next vmid detail */ 1139 destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); 1140 dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); 1141 for (i = 0; i < dest_cnt; i++, destvm++, newvm++) { 1142 destvm->vmid = cpu_to_le32(newvm->vmid); 1143 destvm->perm = cpu_to_le32(newvm->perm); 1144 destvm->ctx = 0; 1145 destvm->ctx_size = 0; 1146 next_vm |= BIT(newvm->vmid); 1147 } 1148 1149 ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, 1150 ptr_phys, src_sz, dest_phys, dest_sz); 1151 if (ret) { 1152 dev_err(__scm->dev, 1153 "Assign memory protection call failed %d\n", ret); 1154 return ret; 1155 } 1156 1157 *srcvm = next_vm; 1158 return 0; 1159 } 1160 EXPORT_SYMBOL_GPL(qcom_scm_assign_mem); 1161 1162 /** 1163 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available 1164 */ 1165 bool qcom_scm_ocmem_lock_available(void) 1166 { 1167 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM, 1168 QCOM_SCM_OCMEM_LOCK_CMD); 1169 } 1170 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock_available); 1171 1172 /** 1173 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM 1174 * region to the specified initiator 1175 * 1176 * @id: tz initiator id 1177 * @offset: OCMEM offset 1178 * @size: OCMEM size 1179 * @mode: access mode (WIDE/NARROW) 1180 */ 1181 int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size, 1182 u32 mode) 1183 { 1184 struct qcom_scm_desc desc = { 1185 .svc = QCOM_SCM_SVC_OCMEM, 1186 .cmd = QCOM_SCM_OCMEM_LOCK_CMD, 1187 .args[0] = id, 1188 .args[1] = offset, 1189 .args[2] = size, 1190 .args[3] = mode, 1191 .arginfo = QCOM_SCM_ARGS(4), 1192 }; 1193 1194 return qcom_scm_call(__scm->dev, &desc, NULL); 1195 } 1196 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock); 1197 1198 /** 1199 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM 1200 * region from the specified initiator 1201 * 1202 * @id: tz initiator id 1203 * @offset: OCMEM offset 1204 * @size: OCMEM size 1205 */ 1206 int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size) 1207 { 1208 struct qcom_scm_desc desc = { 1209 .svc = QCOM_SCM_SVC_OCMEM, 1210 .cmd = QCOM_SCM_OCMEM_UNLOCK_CMD, 1211 .args[0] = id, 1212 .args[1] = offset, 1213 .args[2] = size, 1214 .arginfo = QCOM_SCM_ARGS(3), 1215 }; 1216 1217 return qcom_scm_call(__scm->dev, &desc, NULL); 1218 } 1219 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_unlock); 1220 1221 /** 1222 * qcom_scm_ice_available() - Is the ICE key programming interface available? 1223 * 1224 * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and 1225 * qcom_scm_ice_set_key() are available. 1226 */ 1227 bool qcom_scm_ice_available(void) 1228 { 1229 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1230 QCOM_SCM_ES_INVALIDATE_ICE_KEY) && 1231 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1232 QCOM_SCM_ES_CONFIG_SET_ICE_KEY); 1233 } 1234 EXPORT_SYMBOL_GPL(qcom_scm_ice_available); 1235 1236 /** 1237 * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key 1238 * @index: the keyslot to invalidate 1239 * 1240 * The UFSHCI and eMMC standards define a standard way to do this, but it 1241 * doesn't work on these SoCs; only this SCM call does. 1242 * 1243 * It is assumed that the SoC has only one ICE instance being used, as this SCM 1244 * call doesn't specify which ICE instance the keyslot belongs to. 1245 * 1246 * Return: 0 on success; -errno on failure. 1247 */ 1248 int qcom_scm_ice_invalidate_key(u32 index) 1249 { 1250 struct qcom_scm_desc desc = { 1251 .svc = QCOM_SCM_SVC_ES, 1252 .cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY, 1253 .arginfo = QCOM_SCM_ARGS(1), 1254 .args[0] = index, 1255 .owner = ARM_SMCCC_OWNER_SIP, 1256 }; 1257 1258 return qcom_scm_call(__scm->dev, &desc, NULL); 1259 } 1260 EXPORT_SYMBOL_GPL(qcom_scm_ice_invalidate_key); 1261 1262 /** 1263 * qcom_scm_ice_set_key() - Set an inline encryption key 1264 * @index: the keyslot into which to set the key 1265 * @key: the key to program 1266 * @key_size: the size of the key in bytes 1267 * @cipher: the encryption algorithm the key is for 1268 * @data_unit_size: the encryption data unit size, i.e. the size of each 1269 * individual plaintext and ciphertext. Given in 512-byte 1270 * units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc. 1271 * 1272 * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it 1273 * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline. 1274 * 1275 * The UFSHCI and eMMC standards define a standard way to do this, but it 1276 * doesn't work on these SoCs; only this SCM call does. 1277 * 1278 * It is assumed that the SoC has only one ICE instance being used, as this SCM 1279 * call doesn't specify which ICE instance the keyslot belongs to. 1280 * 1281 * Return: 0 on success; -errno on failure. 1282 */ 1283 int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, 1284 enum qcom_scm_ice_cipher cipher, u32 data_unit_size) 1285 { 1286 struct qcom_scm_desc desc = { 1287 .svc = QCOM_SCM_SVC_ES, 1288 .cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY, 1289 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW, 1290 QCOM_SCM_VAL, QCOM_SCM_VAL, 1291 QCOM_SCM_VAL), 1292 .args[0] = index, 1293 .args[2] = key_size, 1294 .args[3] = cipher, 1295 .args[4] = data_unit_size, 1296 .owner = ARM_SMCCC_OWNER_SIP, 1297 }; 1298 1299 int ret; 1300 1301 void *keybuf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1302 key_size, 1303 GFP_KERNEL); 1304 if (!keybuf) 1305 return -ENOMEM; 1306 memcpy(keybuf, key, key_size); 1307 desc.args[1] = qcom_tzmem_to_phys(keybuf); 1308 1309 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1310 1311 memzero_explicit(keybuf, key_size); 1312 1313 return ret; 1314 } 1315 EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key); 1316 1317 bool qcom_scm_has_wrapped_key_support(void) 1318 { 1319 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1320 QCOM_SCM_ES_DERIVE_SW_SECRET) && 1321 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1322 QCOM_SCM_ES_GENERATE_ICE_KEY) && 1323 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1324 QCOM_SCM_ES_PREPARE_ICE_KEY) && 1325 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1326 QCOM_SCM_ES_IMPORT_ICE_KEY); 1327 } 1328 EXPORT_SYMBOL_GPL(qcom_scm_has_wrapped_key_support); 1329 1330 /** 1331 * qcom_scm_derive_sw_secret() - Derive software secret from wrapped key 1332 * @eph_key: an ephemerally-wrapped key 1333 * @eph_key_size: size of @eph_key in bytes 1334 * @sw_secret: output buffer for the software secret 1335 * @sw_secret_size: size of the software secret to derive in bytes 1336 * 1337 * Derive a software secret from an ephemerally-wrapped key for software crypto 1338 * operations. This is done by calling into the secure execution environment, 1339 * which then calls into the hardware to unwrap and derive the secret. 1340 * 1341 * For more information on sw_secret, see the "Hardware-wrapped keys" section of 1342 * Documentation/block/inline-encryption.rst. 1343 * 1344 * Return: 0 on success; -errno on failure. 1345 */ 1346 int qcom_scm_derive_sw_secret(const u8 *eph_key, size_t eph_key_size, 1347 u8 *sw_secret, size_t sw_secret_size) 1348 { 1349 struct qcom_scm_desc desc = { 1350 .svc = QCOM_SCM_SVC_ES, 1351 .cmd = QCOM_SCM_ES_DERIVE_SW_SECRET, 1352 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RW, QCOM_SCM_VAL, 1353 QCOM_SCM_RW, QCOM_SCM_VAL), 1354 .owner = ARM_SMCCC_OWNER_SIP, 1355 }; 1356 int ret; 1357 1358 void *eph_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1359 eph_key_size, 1360 GFP_KERNEL); 1361 if (!eph_key_buf) 1362 return -ENOMEM; 1363 1364 void *sw_secret_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1365 sw_secret_size, 1366 GFP_KERNEL); 1367 if (!sw_secret_buf) 1368 return -ENOMEM; 1369 1370 memcpy(eph_key_buf, eph_key, eph_key_size); 1371 desc.args[0] = qcom_tzmem_to_phys(eph_key_buf); 1372 desc.args[1] = eph_key_size; 1373 desc.args[2] = qcom_tzmem_to_phys(sw_secret_buf); 1374 desc.args[3] = sw_secret_size; 1375 1376 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1377 if (!ret) 1378 memcpy(sw_secret, sw_secret_buf, sw_secret_size); 1379 1380 memzero_explicit(eph_key_buf, eph_key_size); 1381 memzero_explicit(sw_secret_buf, sw_secret_size); 1382 return ret; 1383 } 1384 EXPORT_SYMBOL_GPL(qcom_scm_derive_sw_secret); 1385 1386 /** 1387 * qcom_scm_generate_ice_key() - Generate a wrapped key for storage encryption 1388 * @lt_key: output buffer for the long-term wrapped key 1389 * @lt_key_size: size of @lt_key in bytes. Must be the exact wrapped key size 1390 * used by the SoC. 1391 * 1392 * Generate a key using the built-in HW module in the SoC. The resulting key is 1393 * returned wrapped with the platform-specific Key Encryption Key. 1394 * 1395 * Return: 0 on success; -errno on failure. 1396 */ 1397 int qcom_scm_generate_ice_key(u8 *lt_key, size_t lt_key_size) 1398 { 1399 struct qcom_scm_desc desc = { 1400 .svc = QCOM_SCM_SVC_ES, 1401 .cmd = QCOM_SCM_ES_GENERATE_ICE_KEY, 1402 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL), 1403 .owner = ARM_SMCCC_OWNER_SIP, 1404 }; 1405 int ret; 1406 1407 void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1408 lt_key_size, 1409 GFP_KERNEL); 1410 if (!lt_key_buf) 1411 return -ENOMEM; 1412 1413 desc.args[0] = qcom_tzmem_to_phys(lt_key_buf); 1414 desc.args[1] = lt_key_size; 1415 1416 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1417 if (!ret) 1418 memcpy(lt_key, lt_key_buf, lt_key_size); 1419 1420 memzero_explicit(lt_key_buf, lt_key_size); 1421 return ret; 1422 } 1423 EXPORT_SYMBOL_GPL(qcom_scm_generate_ice_key); 1424 1425 /** 1426 * qcom_scm_prepare_ice_key() - Re-wrap a key with the per-boot ephemeral key 1427 * @lt_key: a long-term wrapped key 1428 * @lt_key_size: size of @lt_key in bytes 1429 * @eph_key: output buffer for the ephemerally-wrapped key 1430 * @eph_key_size: size of @eph_key in bytes. Must be the exact wrapped key size 1431 * used by the SoC. 1432 * 1433 * Given a long-term wrapped key, re-wrap it with the per-boot ephemeral key for 1434 * added protection. The resulting key will only be valid for the current boot. 1435 * 1436 * Return: 0 on success; -errno on failure. 1437 */ 1438 int qcom_scm_prepare_ice_key(const u8 *lt_key, size_t lt_key_size, 1439 u8 *eph_key, size_t eph_key_size) 1440 { 1441 struct qcom_scm_desc desc = { 1442 .svc = QCOM_SCM_SVC_ES, 1443 .cmd = QCOM_SCM_ES_PREPARE_ICE_KEY, 1444 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL, 1445 QCOM_SCM_RW, QCOM_SCM_VAL), 1446 .owner = ARM_SMCCC_OWNER_SIP, 1447 }; 1448 int ret; 1449 1450 void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1451 lt_key_size, 1452 GFP_KERNEL); 1453 if (!lt_key_buf) 1454 return -ENOMEM; 1455 1456 void *eph_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1457 eph_key_size, 1458 GFP_KERNEL); 1459 if (!eph_key_buf) 1460 return -ENOMEM; 1461 1462 memcpy(lt_key_buf, lt_key, lt_key_size); 1463 desc.args[0] = qcom_tzmem_to_phys(lt_key_buf); 1464 desc.args[1] = lt_key_size; 1465 desc.args[2] = qcom_tzmem_to_phys(eph_key_buf); 1466 desc.args[3] = eph_key_size; 1467 1468 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1469 if (!ret) 1470 memcpy(eph_key, eph_key_buf, eph_key_size); 1471 1472 memzero_explicit(lt_key_buf, lt_key_size); 1473 memzero_explicit(eph_key_buf, eph_key_size); 1474 return ret; 1475 } 1476 EXPORT_SYMBOL_GPL(qcom_scm_prepare_ice_key); 1477 1478 /** 1479 * qcom_scm_import_ice_key() - Import key for storage encryption 1480 * @raw_key: the raw key to import 1481 * @raw_key_size: size of @raw_key in bytes 1482 * @lt_key: output buffer for the long-term wrapped key 1483 * @lt_key_size: size of @lt_key in bytes. Must be the exact wrapped key size 1484 * used by the SoC. 1485 * 1486 * Import a raw key and return a long-term wrapped key. Uses the SoC's HWKM to 1487 * wrap the raw key using the platform-specific Key Encryption Key. 1488 * 1489 * Return: 0 on success; -errno on failure. 1490 */ 1491 int qcom_scm_import_ice_key(const u8 *raw_key, size_t raw_key_size, 1492 u8 *lt_key, size_t lt_key_size) 1493 { 1494 struct qcom_scm_desc desc = { 1495 .svc = QCOM_SCM_SVC_ES, 1496 .cmd = QCOM_SCM_ES_IMPORT_ICE_KEY, 1497 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL, 1498 QCOM_SCM_RW, QCOM_SCM_VAL), 1499 .owner = ARM_SMCCC_OWNER_SIP, 1500 }; 1501 int ret; 1502 1503 void *raw_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1504 raw_key_size, 1505 GFP_KERNEL); 1506 if (!raw_key_buf) 1507 return -ENOMEM; 1508 1509 void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1510 lt_key_size, 1511 GFP_KERNEL); 1512 if (!lt_key_buf) 1513 return -ENOMEM; 1514 1515 memcpy(raw_key_buf, raw_key, raw_key_size); 1516 desc.args[0] = qcom_tzmem_to_phys(raw_key_buf); 1517 desc.args[1] = raw_key_size; 1518 desc.args[2] = qcom_tzmem_to_phys(lt_key_buf); 1519 desc.args[3] = lt_key_size; 1520 1521 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1522 if (!ret) 1523 memcpy(lt_key, lt_key_buf, lt_key_size); 1524 1525 memzero_explicit(raw_key_buf, raw_key_size); 1526 memzero_explicit(lt_key_buf, lt_key_size); 1527 return ret; 1528 } 1529 EXPORT_SYMBOL_GPL(qcom_scm_import_ice_key); 1530 1531 /** 1532 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP. 1533 * 1534 * Return true if HDCP is supported, false if not. 1535 */ 1536 bool qcom_scm_hdcp_available(void) 1537 { 1538 bool avail; 1539 int ret = qcom_scm_clk_enable(); 1540 1541 if (ret) 1542 return ret; 1543 1544 avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP, 1545 QCOM_SCM_HDCP_INVOKE); 1546 1547 qcom_scm_clk_disable(); 1548 1549 return avail; 1550 } 1551 EXPORT_SYMBOL_GPL(qcom_scm_hdcp_available); 1552 1553 /** 1554 * qcom_scm_hdcp_req() - Send HDCP request. 1555 * @req: HDCP request array 1556 * @req_cnt: HDCP request array count 1557 * @resp: response buffer passed to SCM 1558 * 1559 * Write HDCP register(s) through SCM. 1560 */ 1561 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) 1562 { 1563 int ret; 1564 struct qcom_scm_desc desc = { 1565 .svc = QCOM_SCM_SVC_HDCP, 1566 .cmd = QCOM_SCM_HDCP_INVOKE, 1567 .arginfo = QCOM_SCM_ARGS(10), 1568 .args = { 1569 req[0].addr, 1570 req[0].val, 1571 req[1].addr, 1572 req[1].val, 1573 req[2].addr, 1574 req[2].val, 1575 req[3].addr, 1576 req[3].val, 1577 req[4].addr, 1578 req[4].val 1579 }, 1580 .owner = ARM_SMCCC_OWNER_SIP, 1581 }; 1582 struct qcom_scm_res res; 1583 1584 if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT) 1585 return -ERANGE; 1586 1587 ret = qcom_scm_clk_enable(); 1588 if (ret) 1589 return ret; 1590 1591 ret = qcom_scm_call(__scm->dev, &desc, &res); 1592 *resp = res.result[0]; 1593 1594 qcom_scm_clk_disable(); 1595 1596 return ret; 1597 } 1598 EXPORT_SYMBOL_GPL(qcom_scm_hdcp_req); 1599 1600 int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt) 1601 { 1602 struct qcom_scm_desc desc = { 1603 .svc = QCOM_SCM_SVC_SMMU_PROGRAM, 1604 .cmd = QCOM_SCM_SMMU_PT_FORMAT, 1605 .arginfo = QCOM_SCM_ARGS(3), 1606 .args[0] = sec_id, 1607 .args[1] = ctx_num, 1608 .args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */ 1609 .owner = ARM_SMCCC_OWNER_SIP, 1610 }; 1611 1612 return qcom_scm_call(__scm->dev, &desc, NULL); 1613 } 1614 EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_pt_format); 1615 1616 int qcom_scm_qsmmu500_wait_safe_toggle(bool en) 1617 { 1618 struct qcom_scm_desc desc = { 1619 .svc = QCOM_SCM_SVC_SMMU_PROGRAM, 1620 .cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1, 1621 .arginfo = QCOM_SCM_ARGS(2), 1622 .args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL, 1623 .args[1] = en, 1624 .owner = ARM_SMCCC_OWNER_SIP, 1625 }; 1626 1627 1628 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 1629 } 1630 EXPORT_SYMBOL_GPL(qcom_scm_qsmmu500_wait_safe_toggle); 1631 1632 bool qcom_scm_lmh_dcvsh_available(void) 1633 { 1634 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH); 1635 } 1636 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available); 1637 1638 /* 1639 * This is only supposed to be called once by the TZMem module. It takes the 1640 * SCM struct device as argument and uses it to pass the call as at the time 1641 * the SHM Bridge is enabled, the SCM is not yet fully set up and doesn't 1642 * accept global user calls. Don't try to use the __scm pointer here. 1643 */ 1644 int qcom_scm_shm_bridge_enable(struct device *scm_dev) 1645 { 1646 int ret; 1647 1648 struct qcom_scm_desc desc = { 1649 .svc = QCOM_SCM_SVC_MP, 1650 .cmd = QCOM_SCM_MP_SHM_BRIDGE_ENABLE, 1651 .owner = ARM_SMCCC_OWNER_SIP 1652 }; 1653 1654 struct qcom_scm_res res; 1655 1656 if (!__qcom_scm_is_call_available(scm_dev, QCOM_SCM_SVC_MP, 1657 QCOM_SCM_MP_SHM_BRIDGE_ENABLE)) 1658 return -EOPNOTSUPP; 1659 1660 ret = qcom_scm_call(scm_dev, &desc, &res); 1661 1662 if (ret) 1663 return ret; 1664 1665 if (res.result[0] == SHMBRIDGE_RESULT_NOTSUPP) 1666 return -EOPNOTSUPP; 1667 1668 return res.result[0]; 1669 } 1670 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_enable); 1671 1672 int qcom_scm_shm_bridge_create(u64 pfn_and_ns_perm_flags, 1673 u64 ipfn_and_s_perm_flags, u64 size_and_flags, 1674 u64 ns_vmids, u64 *handle) 1675 { 1676 struct qcom_scm_desc desc = { 1677 .svc = QCOM_SCM_SVC_MP, 1678 .cmd = QCOM_SCM_MP_SHM_BRIDGE_CREATE, 1679 .owner = ARM_SMCCC_OWNER_SIP, 1680 .args[0] = pfn_and_ns_perm_flags, 1681 .args[1] = ipfn_and_s_perm_flags, 1682 .args[2] = size_and_flags, 1683 .args[3] = ns_vmids, 1684 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL, 1685 QCOM_SCM_VAL, QCOM_SCM_VAL), 1686 }; 1687 1688 struct qcom_scm_res res; 1689 int ret; 1690 1691 ret = qcom_scm_call(__scm->dev, &desc, &res); 1692 1693 if (handle && !ret) 1694 *handle = res.result[1]; 1695 1696 return ret ?: res.result[0]; 1697 } 1698 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_create); 1699 1700 int qcom_scm_shm_bridge_delete(u64 handle) 1701 { 1702 struct qcom_scm_desc desc = { 1703 .svc = QCOM_SCM_SVC_MP, 1704 .cmd = QCOM_SCM_MP_SHM_BRIDGE_DELETE, 1705 .owner = ARM_SMCCC_OWNER_SIP, 1706 .args[0] = handle, 1707 .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL), 1708 }; 1709 1710 return qcom_scm_call(__scm->dev, &desc, NULL); 1711 } 1712 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_delete); 1713 1714 int qcom_scm_lmh_profile_change(u32 profile_id) 1715 { 1716 struct qcom_scm_desc desc = { 1717 .svc = QCOM_SCM_SVC_LMH, 1718 .cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE, 1719 .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL), 1720 .args[0] = profile_id, 1721 .owner = ARM_SMCCC_OWNER_SIP, 1722 }; 1723 1724 return qcom_scm_call(__scm->dev, &desc, NULL); 1725 } 1726 EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change); 1727 1728 int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, 1729 u64 limit_node, u32 node_id, u64 version) 1730 { 1731 int ret, payload_size = 5 * sizeof(u32); 1732 1733 struct qcom_scm_desc desc = { 1734 .svc = QCOM_SCM_SVC_LMH, 1735 .cmd = QCOM_SCM_LMH_LIMIT_DCVSH, 1736 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL, 1737 QCOM_SCM_VAL, QCOM_SCM_VAL), 1738 .args[1] = payload_size, 1739 .args[2] = limit_node, 1740 .args[3] = node_id, 1741 .args[4] = version, 1742 .owner = ARM_SMCCC_OWNER_SIP, 1743 }; 1744 1745 u32 *payload_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1746 payload_size, 1747 GFP_KERNEL); 1748 if (!payload_buf) 1749 return -ENOMEM; 1750 1751 payload_buf[0] = payload_fn; 1752 payload_buf[1] = 0; 1753 payload_buf[2] = payload_reg; 1754 payload_buf[3] = 1; 1755 payload_buf[4] = payload_val; 1756 1757 desc.args[0] = qcom_tzmem_to_phys(payload_buf); 1758 1759 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1760 1761 return ret; 1762 } 1763 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh); 1764 1765 int qcom_scm_gpu_init_regs(u32 gpu_req) 1766 { 1767 struct qcom_scm_desc desc = { 1768 .svc = QCOM_SCM_SVC_GPU, 1769 .cmd = QCOM_SCM_SVC_GPU_INIT_REGS, 1770 .arginfo = QCOM_SCM_ARGS(1), 1771 .args[0] = gpu_req, 1772 .owner = ARM_SMCCC_OWNER_SIP, 1773 }; 1774 1775 return qcom_scm_call(__scm->dev, &desc, NULL); 1776 } 1777 EXPORT_SYMBOL_GPL(qcom_scm_gpu_init_regs); 1778 1779 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr) 1780 { 1781 struct device_node *tcsr; 1782 struct device_node *np = dev->of_node; 1783 struct resource res; 1784 u32 offset; 1785 int ret; 1786 1787 tcsr = of_parse_phandle(np, "qcom,dload-mode", 0); 1788 if (!tcsr) 1789 return 0; 1790 1791 ret = of_address_to_resource(tcsr, 0, &res); 1792 of_node_put(tcsr); 1793 if (ret) 1794 return ret; 1795 1796 ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset); 1797 if (ret < 0) 1798 return ret; 1799 1800 *addr = res.start + offset; 1801 1802 return 0; 1803 } 1804 1805 #ifdef CONFIG_QCOM_QSEECOM 1806 1807 /* Lock for QSEECOM SCM call executions */ 1808 static DEFINE_MUTEX(qcom_scm_qseecom_call_lock); 1809 1810 static int __qcom_scm_qseecom_call(const struct qcom_scm_desc *desc, 1811 struct qcom_scm_qseecom_resp *res) 1812 { 1813 struct qcom_scm_res scm_res = {}; 1814 int status; 1815 1816 /* 1817 * QSEECOM SCM calls should not be executed concurrently. Therefore, we 1818 * require the respective call lock to be held. 1819 */ 1820 lockdep_assert_held(&qcom_scm_qseecom_call_lock); 1821 1822 status = qcom_scm_call(__scm->dev, desc, &scm_res); 1823 1824 res->result = scm_res.result[0]; 1825 res->resp_type = scm_res.result[1]; 1826 res->data = scm_res.result[2]; 1827 1828 if (status) 1829 return status; 1830 1831 return 0; 1832 } 1833 1834 /** 1835 * qcom_scm_qseecom_call() - Perform a QSEECOM SCM call. 1836 * @desc: SCM call descriptor. 1837 * @res: SCM call response (output). 1838 * 1839 * Performs the QSEECOM SCM call described by @desc, returning the response in 1840 * @rsp. 1841 * 1842 * Return: Zero on success, nonzero on failure. 1843 */ 1844 static int qcom_scm_qseecom_call(const struct qcom_scm_desc *desc, 1845 struct qcom_scm_qseecom_resp *res) 1846 { 1847 int status; 1848 1849 /* 1850 * Note: Multiple QSEECOM SCM calls should not be executed same time, 1851 * so lock things here. This needs to be extended to callback/listener 1852 * handling when support for that is implemented. 1853 */ 1854 1855 mutex_lock(&qcom_scm_qseecom_call_lock); 1856 status = __qcom_scm_qseecom_call(desc, res); 1857 mutex_unlock(&qcom_scm_qseecom_call_lock); 1858 1859 dev_dbg(__scm->dev, "%s: owner=%x, svc=%x, cmd=%x, result=%lld, type=%llx, data=%llx\n", 1860 __func__, desc->owner, desc->svc, desc->cmd, res->result, 1861 res->resp_type, res->data); 1862 1863 if (status) { 1864 dev_err(__scm->dev, "qseecom: scm call failed with error %d\n", status); 1865 return status; 1866 } 1867 1868 /* 1869 * TODO: Handle incomplete and blocked calls: 1870 * 1871 * Incomplete and blocked calls are not supported yet. Some devices 1872 * and/or commands require those, some don't. Let's warn about them 1873 * prominently in case someone attempts to try these commands with a 1874 * device/command combination that isn't supported yet. 1875 */ 1876 WARN_ON(res->result == QSEECOM_RESULT_INCOMPLETE); 1877 WARN_ON(res->result == QSEECOM_RESULT_BLOCKED_ON_LISTENER); 1878 1879 return 0; 1880 } 1881 1882 /** 1883 * qcom_scm_qseecom_get_version() - Query the QSEECOM version. 1884 * @version: Pointer where the QSEECOM version will be stored. 1885 * 1886 * Performs the QSEECOM SCM querying the QSEECOM version currently running in 1887 * the TrustZone. 1888 * 1889 * Return: Zero on success, nonzero on failure. 1890 */ 1891 static int qcom_scm_qseecom_get_version(u32 *version) 1892 { 1893 struct qcom_scm_desc desc = {}; 1894 struct qcom_scm_qseecom_resp res = {}; 1895 u32 feature = 10; 1896 int ret; 1897 1898 desc.owner = QSEECOM_TZ_OWNER_SIP; 1899 desc.svc = QSEECOM_TZ_SVC_INFO; 1900 desc.cmd = QSEECOM_TZ_CMD_INFO_VERSION; 1901 desc.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL); 1902 desc.args[0] = feature; 1903 1904 ret = qcom_scm_qseecom_call(&desc, &res); 1905 if (ret) 1906 return ret; 1907 1908 *version = res.result; 1909 return 0; 1910 } 1911 1912 /** 1913 * qcom_scm_qseecom_app_get_id() - Query the app ID for a given QSEE app name. 1914 * @app_name: The name of the app. 1915 * @app_id: The returned app ID. 1916 * 1917 * Query and return the application ID of the SEE app identified by the given 1918 * name. This returned ID is the unique identifier of the app required for 1919 * subsequent communication. 1920 * 1921 * Return: Zero on success, nonzero on failure, -ENOENT if the app has not been 1922 * loaded or could not be found. 1923 */ 1924 int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id) 1925 { 1926 unsigned long name_buf_size = QSEECOM_MAX_APP_NAME_SIZE; 1927 unsigned long app_name_len = strlen(app_name); 1928 struct qcom_scm_desc desc = {}; 1929 struct qcom_scm_qseecom_resp res = {}; 1930 int status; 1931 1932 if (app_name_len >= name_buf_size) 1933 return -EINVAL; 1934 1935 char *name_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1936 name_buf_size, 1937 GFP_KERNEL); 1938 if (!name_buf) 1939 return -ENOMEM; 1940 1941 memcpy(name_buf, app_name, app_name_len); 1942 1943 desc.owner = QSEECOM_TZ_OWNER_QSEE_OS; 1944 desc.svc = QSEECOM_TZ_SVC_APP_MGR; 1945 desc.cmd = QSEECOM_TZ_CMD_APP_LOOKUP; 1946 desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL); 1947 desc.args[0] = qcom_tzmem_to_phys(name_buf); 1948 desc.args[1] = app_name_len; 1949 1950 status = qcom_scm_qseecom_call(&desc, &res); 1951 1952 if (status) 1953 return status; 1954 1955 if (res.result == QSEECOM_RESULT_FAILURE) 1956 return -ENOENT; 1957 1958 if (res.result != QSEECOM_RESULT_SUCCESS) 1959 return -EINVAL; 1960 1961 if (res.resp_type != QSEECOM_SCM_RES_APP_ID) 1962 return -EINVAL; 1963 1964 *app_id = res.data; 1965 return 0; 1966 } 1967 EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id); 1968 1969 /** 1970 * qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app. 1971 * @app_id: The ID of the target app. 1972 * @req: Request buffer sent to the app (must be TZ memory) 1973 * @req_size: Size of the request buffer. 1974 * @rsp: Response buffer, written to by the app (must be TZ memory) 1975 * @rsp_size: Size of the response buffer. 1976 * 1977 * Sends a request to the QSEE app associated with the given ID and read back 1978 * its response. The caller must provide two DMA memory regions, one for the 1979 * request and one for the response, and fill out the @req region with the 1980 * respective (app-specific) request data. The QSEE app reads this and returns 1981 * its response in the @rsp region. 1982 * 1983 * Return: Zero on success, nonzero on failure. 1984 */ 1985 int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, 1986 void *rsp, size_t rsp_size) 1987 { 1988 struct qcom_scm_qseecom_resp res = {}; 1989 struct qcom_scm_desc desc = {}; 1990 phys_addr_t req_phys; 1991 phys_addr_t rsp_phys; 1992 int status; 1993 1994 req_phys = qcom_tzmem_to_phys(req); 1995 rsp_phys = qcom_tzmem_to_phys(rsp); 1996 1997 desc.owner = QSEECOM_TZ_OWNER_TZ_APPS; 1998 desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER; 1999 desc.cmd = QSEECOM_TZ_CMD_APP_SEND; 2000 desc.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, 2001 QCOM_SCM_RW, QCOM_SCM_VAL, 2002 QCOM_SCM_RW, QCOM_SCM_VAL); 2003 desc.args[0] = app_id; 2004 desc.args[1] = req_phys; 2005 desc.args[2] = req_size; 2006 desc.args[3] = rsp_phys; 2007 desc.args[4] = rsp_size; 2008 2009 status = qcom_scm_qseecom_call(&desc, &res); 2010 2011 if (status) 2012 return status; 2013 2014 if (res.result != QSEECOM_RESULT_SUCCESS) 2015 return -EIO; 2016 2017 return 0; 2018 } 2019 EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send); 2020 2021 /* 2022 * We do not yet support re-entrant calls via the qseecom interface. To prevent 2023 + any potential issues with this, only allow validated machines for now. 2024 */ 2025 static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = { 2026 { .compatible = "asus,vivobook-s15" }, 2027 { .compatible = "asus,zenbook-a14-ux3407qa" }, 2028 { .compatible = "asus,zenbook-a14-ux3407ra" }, 2029 { .compatible = "dell,inspiron-14-plus-7441" }, 2030 { .compatible = "dell,latitude-7455" }, 2031 { .compatible = "dell,xps13-9345" }, 2032 { .compatible = "hp,elitebook-ultra-g1q" }, 2033 { .compatible = "hp,omnibook-x14" }, 2034 { .compatible = "huawei,gaokun3" }, 2035 { .compatible = "lenovo,flex-5g" }, 2036 { .compatible = "lenovo,thinkbook-16" }, 2037 { .compatible = "lenovo,thinkpad-t14s" }, 2038 { .compatible = "lenovo,thinkpad-x13s", }, 2039 { .compatible = "lenovo,yoga-slim7x" }, 2040 { .compatible = "microsoft,arcata", }, 2041 { .compatible = "microsoft,blackrock" }, 2042 { .compatible = "microsoft,romulus13", }, 2043 { .compatible = "microsoft,romulus15", }, 2044 { .compatible = "qcom,hamoa-iot-evk" }, 2045 { .compatible = "qcom,sc8180x-primus" }, 2046 { .compatible = "qcom,x1e001de-devkit" }, 2047 { .compatible = "qcom,x1e80100-crd" }, 2048 { .compatible = "qcom,x1e80100-qcp" }, 2049 { .compatible = "qcom,x1p42100-crd" }, 2050 { } 2051 }; 2052 2053 static void qcom_scm_qseecom_free(void *data) 2054 { 2055 struct platform_device *qseecom_dev = data; 2056 2057 platform_device_del(qseecom_dev); 2058 platform_device_put(qseecom_dev); 2059 } 2060 2061 static int qcom_scm_qseecom_init(struct qcom_scm *scm) 2062 { 2063 struct platform_device *qseecom_dev; 2064 u32 version; 2065 int ret; 2066 2067 /* 2068 * Note: We do two steps of validation here: First, we try to query the 2069 * QSEECOM version as a check to see if the interface exists on this 2070 * device. Second, we check against known good devices due to current 2071 * driver limitations (see comment in qcom_scm_qseecom_allowlist). 2072 * 2073 * Note that we deliberately do the machine check after the version 2074 * check so that we can log potentially supported devices. This should 2075 * be safe as downstream sources indicate that the version query is 2076 * neither blocking nor reentrant. 2077 */ 2078 ret = qcom_scm_qseecom_get_version(&version); 2079 if (ret) 2080 return 0; 2081 2082 dev_info(scm->dev, "qseecom: found qseecom with version 0x%x\n", version); 2083 2084 if (!of_machine_device_match(qcom_scm_qseecom_allowlist)) { 2085 dev_info(scm->dev, "qseecom: untested machine, skipping\n"); 2086 return 0; 2087 } 2088 2089 /* 2090 * Set up QSEECOM interface device. All application clients will be 2091 * set up and managed by the corresponding driver for it. 2092 */ 2093 qseecom_dev = platform_device_alloc("qcom_qseecom", -1); 2094 if (!qseecom_dev) 2095 return -ENOMEM; 2096 2097 qseecom_dev->dev.parent = scm->dev; 2098 2099 ret = platform_device_add(qseecom_dev); 2100 if (ret) { 2101 platform_device_put(qseecom_dev); 2102 return ret; 2103 } 2104 2105 return devm_add_action_or_reset(scm->dev, qcom_scm_qseecom_free, qseecom_dev); 2106 } 2107 2108 #else /* CONFIG_QCOM_QSEECOM */ 2109 2110 static int qcom_scm_qseecom_init(struct qcom_scm *scm) 2111 { 2112 return 0; 2113 } 2114 2115 #endif /* CONFIG_QCOM_QSEECOM */ 2116 2117 /** 2118 * qcom_scm_qtee_invoke_smc() - Invoke a QTEE object. 2119 * @inbuf: start address of memory area used for inbound buffer. 2120 * @inbuf_size: size of the memory area used for inbound buffer. 2121 * @outbuf: start address of memory area used for outbound buffer. 2122 * @outbuf_size: size of the memory area used for outbound buffer. 2123 * @result: result of QTEE object invocation. 2124 * @response_type: response type returned by QTEE. 2125 * 2126 * @response_type determines how the contents of @inbuf and @outbuf 2127 * should be processed. 2128 * 2129 * Return: On success, return 0 or <0 on failure. 2130 */ 2131 int qcom_scm_qtee_invoke_smc(phys_addr_t inbuf, size_t inbuf_size, 2132 phys_addr_t outbuf, size_t outbuf_size, 2133 u64 *result, u64 *response_type) 2134 { 2135 struct qcom_scm_desc desc = { 2136 .svc = QCOM_SCM_SVC_SMCINVOKE, 2137 .cmd = QCOM_SCM_SMCINVOKE_INVOKE, 2138 .owner = ARM_SMCCC_OWNER_TRUSTED_OS, 2139 .args[0] = inbuf, 2140 .args[1] = inbuf_size, 2141 .args[2] = outbuf, 2142 .args[3] = outbuf_size, 2143 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RW, QCOM_SCM_VAL, 2144 QCOM_SCM_RW, QCOM_SCM_VAL), 2145 }; 2146 struct qcom_scm_res res; 2147 int ret; 2148 2149 ret = qcom_scm_call(__scm->dev, &desc, &res); 2150 if (ret) 2151 return ret; 2152 2153 if (response_type) 2154 *response_type = res.result[0]; 2155 2156 if (result) 2157 *result = res.result[1]; 2158 2159 return 0; 2160 } 2161 EXPORT_SYMBOL(qcom_scm_qtee_invoke_smc); 2162 2163 /** 2164 * qcom_scm_qtee_callback_response() - Submit response for callback request. 2165 * @buf: start address of memory area used for outbound buffer. 2166 * @buf_size: size of the memory area used for outbound buffer. 2167 * @result: Result of QTEE object invocation. 2168 * @response_type: Response type returned by QTEE. 2169 * 2170 * @response_type determines how the contents of @buf should be processed. 2171 * 2172 * Return: On success, return 0 or <0 on failure. 2173 */ 2174 int qcom_scm_qtee_callback_response(phys_addr_t buf, size_t buf_size, 2175 u64 *result, u64 *response_type) 2176 { 2177 struct qcom_scm_desc desc = { 2178 .svc = QCOM_SCM_SVC_SMCINVOKE, 2179 .cmd = QCOM_SCM_SMCINVOKE_CB_RSP, 2180 .owner = ARM_SMCCC_OWNER_TRUSTED_OS, 2181 .args[0] = buf, 2182 .args[1] = buf_size, 2183 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL), 2184 }; 2185 struct qcom_scm_res res; 2186 int ret; 2187 2188 ret = qcom_scm_call(__scm->dev, &desc, &res); 2189 if (ret) 2190 return ret; 2191 2192 if (response_type) 2193 *response_type = res.result[0]; 2194 2195 if (result) 2196 *result = res.result[1]; 2197 2198 return 0; 2199 } 2200 EXPORT_SYMBOL(qcom_scm_qtee_callback_response); 2201 2202 static void qcom_scm_qtee_free(void *data) 2203 { 2204 struct platform_device *qtee_dev = data; 2205 2206 platform_device_unregister(qtee_dev); 2207 } 2208 2209 static void qcom_scm_qtee_init(struct qcom_scm *scm) 2210 { 2211 struct platform_device *qtee_dev; 2212 u64 result, response_type; 2213 int ret; 2214 2215 /* 2216 * Probe for smcinvoke support. This will fail due to invalid buffers, 2217 * but first, it checks whether the call is supported in QTEE syscall 2218 * handler. If it is not supported, -EIO is returned. 2219 */ 2220 ret = qcom_scm_qtee_invoke_smc(0, 0, 0, 0, &result, &response_type); 2221 if (ret == -EIO) 2222 return; 2223 2224 /* Setup QTEE interface device. */ 2225 qtee_dev = platform_device_register_data(scm->dev, "qcomtee", 2226 PLATFORM_DEVID_NONE, NULL, 0); 2227 if (IS_ERR(qtee_dev)) 2228 return; 2229 2230 devm_add_action_or_reset(scm->dev, qcom_scm_qtee_free, qtee_dev); 2231 } 2232 2233 /** 2234 * qcom_scm_is_available() - Checks if SCM is available 2235 */ 2236 bool qcom_scm_is_available(void) 2237 { 2238 /* Paired with smp_store_release() in qcom_scm_probe */ 2239 return !!smp_load_acquire(&__scm); 2240 } 2241 EXPORT_SYMBOL_GPL(qcom_scm_is_available); 2242 2243 static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx) 2244 { 2245 /* FW currently only supports a single wq_ctx (zero). 2246 * TODO: Update this logic to include dynamic allocation and lookup of 2247 * completion structs when FW supports more wq_ctx values. 2248 */ 2249 if (wq_ctx != 0) { 2250 dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n"); 2251 return -EINVAL; 2252 } 2253 2254 return 0; 2255 } 2256 2257 int qcom_scm_wait_for_wq_completion(u32 wq_ctx) 2258 { 2259 int ret; 2260 2261 ret = qcom_scm_assert_valid_wq_ctx(wq_ctx); 2262 if (ret) 2263 return ret; 2264 2265 wait_for_completion(&__scm->waitq_comp); 2266 2267 return 0; 2268 } 2269 2270 static int qcom_scm_waitq_wakeup(unsigned int wq_ctx) 2271 { 2272 int ret; 2273 2274 ret = qcom_scm_assert_valid_wq_ctx(wq_ctx); 2275 if (ret) 2276 return ret; 2277 2278 complete(&__scm->waitq_comp); 2279 2280 return 0; 2281 } 2282 2283 static irqreturn_t qcom_scm_irq_handler(int irq, void *data) 2284 { 2285 int ret; 2286 struct qcom_scm *scm = data; 2287 u32 wq_ctx, flags, more_pending = 0; 2288 2289 do { 2290 ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending); 2291 if (ret) { 2292 dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret); 2293 goto out; 2294 } 2295 2296 if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE) { 2297 dev_err(scm->dev, "Invalid flags received for wq_ctx: %u\n", flags); 2298 goto out; 2299 } 2300 2301 ret = qcom_scm_waitq_wakeup(wq_ctx); 2302 if (ret) 2303 goto out; 2304 } while (more_pending); 2305 2306 out: 2307 return IRQ_HANDLED; 2308 } 2309 2310 static int get_download_mode(char *buffer, const struct kernel_param *kp) 2311 { 2312 if (download_mode >= ARRAY_SIZE(download_mode_name)) 2313 return sysfs_emit(buffer, "unknown mode\n"); 2314 2315 return sysfs_emit(buffer, "%s\n", download_mode_name[download_mode]); 2316 } 2317 2318 static int set_download_mode(const char *val, const struct kernel_param *kp) 2319 { 2320 bool tmp; 2321 int ret; 2322 2323 ret = sysfs_match_string(download_mode_name, val); 2324 if (ret < 0) { 2325 ret = kstrtobool(val, &tmp); 2326 if (ret < 0) { 2327 pr_err("qcom_scm: err: %d\n", ret); 2328 return ret; 2329 } 2330 2331 ret = tmp ? 1 : 0; 2332 } 2333 2334 download_mode = ret; 2335 if (__scm) 2336 qcom_scm_set_download_mode(download_mode); 2337 2338 return 0; 2339 } 2340 2341 static const struct kernel_param_ops download_mode_param_ops = { 2342 .get = get_download_mode, 2343 .set = set_download_mode, 2344 }; 2345 2346 module_param_cb(download_mode, &download_mode_param_ops, NULL, 0644); 2347 MODULE_PARM_DESC(download_mode, "download mode: off/0/N for no dump mode, full/on/1/Y for full dump mode, mini for minidump mode and full,mini for both full and minidump mode together are acceptable values"); 2348 2349 static int qcom_scm_probe(struct platform_device *pdev) 2350 { 2351 struct qcom_tzmem_pool_config pool_config; 2352 struct qcom_scm *scm; 2353 int irq, ret; 2354 2355 scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL); 2356 if (!scm) 2357 return -ENOMEM; 2358 2359 scm->dev = &pdev->dev; 2360 ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr); 2361 if (ret < 0) 2362 return ret; 2363 2364 init_completion(&scm->waitq_comp); 2365 mutex_init(&scm->scm_bw_lock); 2366 2367 scm->path = devm_of_icc_get(&pdev->dev, NULL); 2368 if (IS_ERR(scm->path)) 2369 return dev_err_probe(&pdev->dev, PTR_ERR(scm->path), 2370 "failed to acquire interconnect path\n"); 2371 2372 scm->core_clk = devm_clk_get_optional(&pdev->dev, "core"); 2373 if (IS_ERR(scm->core_clk)) 2374 return PTR_ERR(scm->core_clk); 2375 2376 scm->iface_clk = devm_clk_get_optional(&pdev->dev, "iface"); 2377 if (IS_ERR(scm->iface_clk)) 2378 return PTR_ERR(scm->iface_clk); 2379 2380 scm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus"); 2381 if (IS_ERR(scm->bus_clk)) 2382 return PTR_ERR(scm->bus_clk); 2383 2384 scm->reset.ops = &qcom_scm_pas_reset_ops; 2385 scm->reset.nr_resets = 1; 2386 scm->reset.of_node = pdev->dev.of_node; 2387 ret = devm_reset_controller_register(&pdev->dev, &scm->reset); 2388 if (ret) 2389 return ret; 2390 2391 /* vote for max clk rate for highest performance */ 2392 ret = clk_set_rate(scm->core_clk, INT_MAX); 2393 if (ret) 2394 return ret; 2395 2396 ret = of_reserved_mem_device_init(scm->dev); 2397 if (ret && ret != -ENODEV) 2398 return dev_err_probe(scm->dev, ret, 2399 "Failed to setup the reserved memory region for TZ mem\n"); 2400 2401 ret = qcom_tzmem_enable(scm->dev); 2402 if (ret) 2403 return dev_err_probe(scm->dev, ret, 2404 "Failed to enable the TrustZone memory allocator\n"); 2405 2406 memset(&pool_config, 0, sizeof(pool_config)); 2407 pool_config.initial_size = 0; 2408 pool_config.policy = QCOM_TZMEM_POLICY_ON_DEMAND; 2409 pool_config.max_size = SZ_256K; 2410 2411 scm->mempool = devm_qcom_tzmem_pool_new(scm->dev, &pool_config); 2412 if (IS_ERR(scm->mempool)) 2413 return dev_err_probe(scm->dev, PTR_ERR(scm->mempool), 2414 "Failed to create the SCM memory pool\n"); 2415 2416 irq = platform_get_irq_optional(pdev, 0); 2417 if (irq < 0) { 2418 if (irq != -ENXIO) 2419 return irq; 2420 } else { 2421 ret = devm_request_threaded_irq(scm->dev, irq, NULL, qcom_scm_irq_handler, 2422 IRQF_ONESHOT, "qcom-scm", scm); 2423 if (ret < 0) 2424 return dev_err_probe(scm->dev, ret, 2425 "Failed to request qcom-scm irq\n"); 2426 } 2427 2428 /* 2429 * Paired with smp_load_acquire() in qcom_scm_is_available(). 2430 * 2431 * This marks the SCM API as ready to accept user calls and can only 2432 * be called after the TrustZone memory pool is initialized and the 2433 * waitqueue interrupt requested. 2434 */ 2435 smp_store_release(&__scm, scm); 2436 2437 __get_convention(); 2438 2439 /* 2440 * If "download mode" is requested, from this point on warmboot 2441 * will cause the boot stages to enter download mode, unless 2442 * disabled below by a clean shutdown/reboot. 2443 */ 2444 qcom_scm_set_download_mode(download_mode); 2445 2446 /* 2447 * Disable SDI if indicated by DT that it is enabled by default. 2448 */ 2449 if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled") || !download_mode) 2450 qcom_scm_disable_sdi(); 2451 2452 /* 2453 * Initialize the QSEECOM interface. 2454 * 2455 * Note: QSEECOM is fairly self-contained and this only adds the 2456 * interface device (the driver of which does most of the heavy 2457 * lifting). So any errors returned here should be either -ENOMEM or 2458 * -EINVAL (with the latter only in case there's a bug in our code). 2459 * This means that there is no need to bring down the whole SCM driver. 2460 * Just log the error instead and let SCM live. 2461 */ 2462 ret = qcom_scm_qseecom_init(scm); 2463 WARN(ret < 0, "failed to initialize qseecom: %d\n", ret); 2464 2465 /* Initialize the QTEE object interface. */ 2466 qcom_scm_qtee_init(scm); 2467 2468 return 0; 2469 } 2470 2471 static void qcom_scm_shutdown(struct platform_device *pdev) 2472 { 2473 /* Clean shutdown, disable download mode to allow normal restart */ 2474 qcom_scm_set_download_mode(QCOM_DLOAD_NODUMP); 2475 } 2476 2477 static const struct of_device_id qcom_scm_dt_match[] = { 2478 { .compatible = "qcom,scm" }, 2479 2480 /* Legacy entries kept for backwards compatibility */ 2481 { .compatible = "qcom,scm-apq8064" }, 2482 { .compatible = "qcom,scm-apq8084" }, 2483 { .compatible = "qcom,scm-ipq4019" }, 2484 { .compatible = "qcom,scm-msm8953" }, 2485 { .compatible = "qcom,scm-msm8974" }, 2486 { .compatible = "qcom,scm-msm8996" }, 2487 {} 2488 }; 2489 MODULE_DEVICE_TABLE(of, qcom_scm_dt_match); 2490 2491 static struct platform_driver qcom_scm_driver = { 2492 .driver = { 2493 .name = "qcom_scm", 2494 .of_match_table = qcom_scm_dt_match, 2495 .suppress_bind_attrs = true, 2496 }, 2497 .probe = qcom_scm_probe, 2498 .shutdown = qcom_scm_shutdown, 2499 }; 2500 2501 static int __init qcom_scm_init(void) 2502 { 2503 return platform_driver_register(&qcom_scm_driver); 2504 } 2505 subsys_initcall(qcom_scm_init); 2506 2507 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver"); 2508 MODULE_LICENSE("GPL v2"); 2509