1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved. 3 * Copyright (C) 2015 Linaro Ltd. 4 */ 5 6 #include <linux/arm-smccc.h> 7 #include <linux/bitfield.h> 8 #include <linux/bits.h> 9 #include <linux/cleanup.h> 10 #include <linux/clk.h> 11 #include <linux/completion.h> 12 #include <linux/cpumask.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/err.h> 15 #include <linux/export.h> 16 #include <linux/firmware/qcom/qcom_scm.h> 17 #include <linux/firmware/qcom/qcom_tzmem.h> 18 #include <linux/init.h> 19 #include <linux/interconnect.h> 20 #include <linux/interrupt.h> 21 #include <linux/kstrtox.h> 22 #include <linux/module.h> 23 #include <linux/of.h> 24 #include <linux/of_address.h> 25 #include <linux/of_irq.h> 26 #include <linux/of_platform.h> 27 #include <linux/of_reserved_mem.h> 28 #include <linux/platform_device.h> 29 #include <linux/reset-controller.h> 30 #include <linux/sizes.h> 31 #include <linux/types.h> 32 33 #include <dt-bindings/interrupt-controller/arm-gic.h> 34 35 #include "qcom_scm.h" 36 #include "qcom_tzmem.h" 37 38 static u32 download_mode; 39 40 #define GIC_SPI_BASE 32 41 #define GIC_MAX_SPI 1019 // SPIs in GICv3 spec range from 32..1019 42 #define GIC_ESPI_BASE 4096 43 #define GIC_MAX_ESPI 5119 // ESPIs in GICv3 spec range from 4096..5119 44 45 struct qcom_scm { 46 struct device *dev; 47 struct clk *core_clk; 48 struct clk *iface_clk; 49 struct clk *bus_clk; 50 struct icc_path *path; 51 struct completion waitq_comp; 52 struct reset_controller_dev reset; 53 54 /* control access to the interconnect path */ 55 struct mutex scm_bw_lock; 56 int scm_vote_count; 57 58 u64 dload_mode_addr; 59 60 struct qcom_tzmem_pool *mempool; 61 }; 62 63 struct qcom_scm_current_perm_info { 64 __le32 vmid; 65 __le32 perm; 66 __le64 ctx; 67 __le32 ctx_size; 68 __le32 unused; 69 }; 70 71 struct qcom_scm_mem_map_info { 72 __le64 mem_addr; 73 __le64 mem_size; 74 }; 75 76 /** 77 * struct qcom_scm_qseecom_resp - QSEECOM SCM call response. 78 * @result: Result or status of the SCM call. See &enum qcom_scm_qseecom_result. 79 * @resp_type: Type of the response. See &enum qcom_scm_qseecom_resp_type. 80 * @data: Response data. The type of this data is given in @resp_type. 81 */ 82 struct qcom_scm_qseecom_resp { 83 u64 result; 84 u64 resp_type; 85 u64 data; 86 }; 87 88 enum qcom_scm_qseecom_result { 89 QSEECOM_RESULT_SUCCESS = 0, 90 QSEECOM_RESULT_INCOMPLETE = 1, 91 QSEECOM_RESULT_BLOCKED_ON_LISTENER = 2, 92 QSEECOM_RESULT_FAILURE = 0xFFFFFFFF, 93 }; 94 95 enum qcom_scm_qseecom_resp_type { 96 QSEECOM_SCM_RES_APP_ID = 0xEE01, 97 QSEECOM_SCM_RES_QSEOS_LISTENER_ID = 0xEE02, 98 }; 99 100 enum qcom_scm_qseecom_tz_owner { 101 QSEECOM_TZ_OWNER_SIP = 2, 102 QSEECOM_TZ_OWNER_TZ_APPS = 48, 103 QSEECOM_TZ_OWNER_QSEE_OS = 50 104 }; 105 106 enum qcom_scm_qseecom_tz_svc { 107 QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER = 0, 108 QSEECOM_TZ_SVC_APP_MGR = 1, 109 QSEECOM_TZ_SVC_INFO = 6, 110 }; 111 112 enum qcom_scm_qseecom_tz_cmd_app { 113 QSEECOM_TZ_CMD_APP_SEND = 1, 114 QSEECOM_TZ_CMD_APP_LOOKUP = 3, 115 }; 116 117 enum qcom_scm_qseecom_tz_cmd_info { 118 QSEECOM_TZ_CMD_INFO_VERSION = 3, 119 }; 120 121 #define QSEECOM_MAX_APP_NAME_SIZE 64 122 #define SHMBRIDGE_RESULT_NOTSUPP 4 123 124 /* Each bit configures cold/warm boot address for one of the 4 CPUs */ 125 static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = { 126 0, BIT(0), BIT(3), BIT(5) 127 }; 128 static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = { 129 BIT(2), BIT(1), BIT(4), BIT(6) 130 }; 131 132 #define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0) 133 134 #define QCOM_DLOAD_MASK GENMASK(5, 4) 135 #define QCOM_DLOAD_NODUMP 0 136 #define QCOM_DLOAD_FULLDUMP 1 137 #define QCOM_DLOAD_MINIDUMP 2 138 #define QCOM_DLOAD_BOTHDUMP 3 139 140 static const char * const qcom_scm_convention_names[] = { 141 [SMC_CONVENTION_UNKNOWN] = "unknown", 142 [SMC_CONVENTION_ARM_32] = "smc arm 32", 143 [SMC_CONVENTION_ARM_64] = "smc arm 64", 144 [SMC_CONVENTION_LEGACY] = "smc legacy", 145 }; 146 147 static const char * const download_mode_name[] = { 148 [QCOM_DLOAD_NODUMP] = "off", 149 [QCOM_DLOAD_FULLDUMP] = "full", 150 [QCOM_DLOAD_MINIDUMP] = "mini", 151 [QCOM_DLOAD_BOTHDUMP] = "full,mini", 152 }; 153 154 static struct qcom_scm *__scm; 155 156 static int qcom_scm_clk_enable(void) 157 { 158 int ret; 159 160 ret = clk_prepare_enable(__scm->core_clk); 161 if (ret) 162 goto bail; 163 164 ret = clk_prepare_enable(__scm->iface_clk); 165 if (ret) 166 goto disable_core; 167 168 ret = clk_prepare_enable(__scm->bus_clk); 169 if (ret) 170 goto disable_iface; 171 172 return 0; 173 174 disable_iface: 175 clk_disable_unprepare(__scm->iface_clk); 176 disable_core: 177 clk_disable_unprepare(__scm->core_clk); 178 bail: 179 return ret; 180 } 181 182 static void qcom_scm_clk_disable(void) 183 { 184 clk_disable_unprepare(__scm->core_clk); 185 clk_disable_unprepare(__scm->iface_clk); 186 clk_disable_unprepare(__scm->bus_clk); 187 } 188 189 static int qcom_scm_bw_enable(void) 190 { 191 int ret = 0; 192 193 if (!__scm->path) 194 return 0; 195 196 mutex_lock(&__scm->scm_bw_lock); 197 if (!__scm->scm_vote_count) { 198 ret = icc_set_bw(__scm->path, 0, UINT_MAX); 199 if (ret < 0) { 200 dev_err(__scm->dev, "failed to set bandwidth request\n"); 201 goto err_bw; 202 } 203 } 204 __scm->scm_vote_count++; 205 err_bw: 206 mutex_unlock(&__scm->scm_bw_lock); 207 208 return ret; 209 } 210 211 static void qcom_scm_bw_disable(void) 212 { 213 if (!__scm->path) 214 return; 215 216 mutex_lock(&__scm->scm_bw_lock); 217 if (__scm->scm_vote_count-- == 1) 218 icc_set_bw(__scm->path, 0, 0); 219 mutex_unlock(&__scm->scm_bw_lock); 220 } 221 222 enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN; 223 static DEFINE_SPINLOCK(scm_query_lock); 224 225 struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void) 226 { 227 if (!qcom_scm_is_available()) 228 return NULL; 229 230 return __scm->mempool; 231 } 232 233 static enum qcom_scm_convention __get_convention(void) 234 { 235 unsigned long flags; 236 struct qcom_scm_desc desc = { 237 .svc = QCOM_SCM_SVC_INFO, 238 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, 239 .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO, 240 QCOM_SCM_INFO_IS_CALL_AVAIL) | 241 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT), 242 .arginfo = QCOM_SCM_ARGS(1), 243 .owner = ARM_SMCCC_OWNER_SIP, 244 }; 245 struct qcom_scm_res res; 246 enum qcom_scm_convention probed_convention; 247 int ret; 248 bool forced = false; 249 250 if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN)) 251 return qcom_scm_convention; 252 253 /* 254 * Per the "SMC calling convention specification", the 64-bit calling 255 * convention can only be used when the client is 64-bit, otherwise 256 * system will encounter the undefined behaviour. 257 */ 258 #if IS_ENABLED(CONFIG_ARM64) 259 /* 260 * Device isn't required as there is only one argument - no device 261 * needed to dma_map_single to secure world 262 */ 263 probed_convention = SMC_CONVENTION_ARM_64; 264 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); 265 if (!ret && res.result[0] == 1) 266 goto found; 267 268 /* 269 * Some SC7180 firmwares didn't implement the 270 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64 271 * calling conventions on these firmwares. Luckily we don't make any 272 * early calls into the firmware on these SoCs so the device pointer 273 * will be valid here to check if the compatible matches. 274 */ 275 if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) { 276 forced = true; 277 goto found; 278 } 279 #endif 280 281 probed_convention = SMC_CONVENTION_ARM_32; 282 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); 283 if (!ret && res.result[0] == 1) 284 goto found; 285 286 probed_convention = SMC_CONVENTION_LEGACY; 287 found: 288 spin_lock_irqsave(&scm_query_lock, flags); 289 if (probed_convention != qcom_scm_convention) { 290 qcom_scm_convention = probed_convention; 291 pr_info("qcom_scm: convention: %s%s\n", 292 qcom_scm_convention_names[qcom_scm_convention], 293 forced ? " (forced)" : ""); 294 } 295 spin_unlock_irqrestore(&scm_query_lock, flags); 296 297 return qcom_scm_convention; 298 } 299 300 /** 301 * qcom_scm_call() - Invoke a syscall in the secure world 302 * @dev: device 303 * @desc: Descriptor structure containing arguments and return values 304 * @res: Structure containing results from SMC/HVC call 305 * 306 * Sends a command to the SCM and waits for the command to finish processing. 307 * This should *only* be called in pre-emptible context. 308 */ 309 static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc, 310 struct qcom_scm_res *res) 311 { 312 might_sleep(); 313 switch (__get_convention()) { 314 case SMC_CONVENTION_ARM_32: 315 case SMC_CONVENTION_ARM_64: 316 return scm_smc_call(dev, desc, res, false); 317 case SMC_CONVENTION_LEGACY: 318 return scm_legacy_call(dev, desc, res); 319 default: 320 pr_err("Unknown current SCM calling convention.\n"); 321 return -EINVAL; 322 } 323 } 324 325 /** 326 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call() 327 * @dev: device 328 * @desc: Descriptor structure containing arguments and return values 329 * @res: Structure containing results from SMC/HVC call 330 * 331 * Sends a command to the SCM and waits for the command to finish processing. 332 * This can be called in atomic context. 333 */ 334 static int qcom_scm_call_atomic(struct device *dev, 335 const struct qcom_scm_desc *desc, 336 struct qcom_scm_res *res) 337 { 338 switch (__get_convention()) { 339 case SMC_CONVENTION_ARM_32: 340 case SMC_CONVENTION_ARM_64: 341 return scm_smc_call(dev, desc, res, true); 342 case SMC_CONVENTION_LEGACY: 343 return scm_legacy_call_atomic(dev, desc, res); 344 default: 345 pr_err("Unknown current SCM calling convention.\n"); 346 return -EINVAL; 347 } 348 } 349 350 static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id, 351 u32 cmd_id) 352 { 353 int ret; 354 struct qcom_scm_desc desc = { 355 .svc = QCOM_SCM_SVC_INFO, 356 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, 357 .owner = ARM_SMCCC_OWNER_SIP, 358 }; 359 struct qcom_scm_res res; 360 361 desc.arginfo = QCOM_SCM_ARGS(1); 362 switch (__get_convention()) { 363 case SMC_CONVENTION_ARM_32: 364 case SMC_CONVENTION_ARM_64: 365 desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) | 366 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT); 367 break; 368 case SMC_CONVENTION_LEGACY: 369 desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id); 370 break; 371 default: 372 pr_err("Unknown SMC convention being used\n"); 373 return false; 374 } 375 376 ret = qcom_scm_call(dev, &desc, &res); 377 378 return ret ? false : !!res.result[0]; 379 } 380 381 static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits) 382 { 383 int cpu; 384 unsigned int flags = 0; 385 struct qcom_scm_desc desc = { 386 .svc = QCOM_SCM_SVC_BOOT, 387 .cmd = QCOM_SCM_BOOT_SET_ADDR, 388 .arginfo = QCOM_SCM_ARGS(2), 389 .owner = ARM_SMCCC_OWNER_SIP, 390 }; 391 392 for_each_present_cpu(cpu) { 393 if (cpu >= QCOM_SCM_BOOT_MAX_CPUS) 394 return -EINVAL; 395 flags |= cpu_bits[cpu]; 396 } 397 398 desc.args[0] = flags; 399 desc.args[1] = virt_to_phys(entry); 400 401 return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); 402 } 403 404 static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags) 405 { 406 struct qcom_scm_desc desc = { 407 .svc = QCOM_SCM_SVC_BOOT, 408 .cmd = QCOM_SCM_BOOT_SET_ADDR_MC, 409 .owner = ARM_SMCCC_OWNER_SIP, 410 .arginfo = QCOM_SCM_ARGS(6), 411 .args = { 412 virt_to_phys(entry), 413 /* Apply to all CPUs in all affinity levels */ 414 ~0ULL, ~0ULL, ~0ULL, ~0ULL, 415 flags, 416 }, 417 }; 418 419 /* Need a device for DMA of the additional arguments */ 420 if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY) 421 return -EOPNOTSUPP; 422 423 return qcom_scm_call(__scm->dev, &desc, NULL); 424 } 425 426 /** 427 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus 428 * @entry: Entry point function for the cpus 429 * 430 * Set the Linux entry point for the SCM to transfer control to when coming 431 * out of a power down. CPU power down may be executed on cpuidle or hotplug. 432 */ 433 int qcom_scm_set_warm_boot_addr(void *entry) 434 { 435 if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT)) 436 /* Fallback to old SCM call */ 437 return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits); 438 return 0; 439 } 440 EXPORT_SYMBOL_GPL(qcom_scm_set_warm_boot_addr); 441 442 /** 443 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus 444 * @entry: Entry point function for the cpus 445 */ 446 int qcom_scm_set_cold_boot_addr(void *entry) 447 { 448 if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT)) 449 /* Fallback to old SCM call */ 450 return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits); 451 return 0; 452 } 453 EXPORT_SYMBOL_GPL(qcom_scm_set_cold_boot_addr); 454 455 /** 456 * qcom_scm_cpu_power_down() - Power down the cpu 457 * @flags: Flags to flush cache 458 * 459 * This is an end point to power down cpu. If there was a pending interrupt, 460 * the control would return from this function, otherwise, the cpu jumps to the 461 * warm boot entry point set for this cpu upon reset. 462 */ 463 void qcom_scm_cpu_power_down(u32 flags) 464 { 465 struct qcom_scm_desc desc = { 466 .svc = QCOM_SCM_SVC_BOOT, 467 .cmd = QCOM_SCM_BOOT_TERMINATE_PC, 468 .args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK, 469 .arginfo = QCOM_SCM_ARGS(1), 470 .owner = ARM_SMCCC_OWNER_SIP, 471 }; 472 473 qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); 474 } 475 EXPORT_SYMBOL_GPL(qcom_scm_cpu_power_down); 476 477 int qcom_scm_set_remote_state(u32 state, u32 id) 478 { 479 struct qcom_scm_desc desc = { 480 .svc = QCOM_SCM_SVC_BOOT, 481 .cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE, 482 .arginfo = QCOM_SCM_ARGS(2), 483 .args[0] = state, 484 .args[1] = id, 485 .owner = ARM_SMCCC_OWNER_SIP, 486 }; 487 struct qcom_scm_res res; 488 int ret; 489 490 ret = qcom_scm_call(__scm->dev, &desc, &res); 491 492 return ret ? : res.result[0]; 493 } 494 EXPORT_SYMBOL_GPL(qcom_scm_set_remote_state); 495 496 static int qcom_scm_disable_sdi(void) 497 { 498 int ret; 499 struct qcom_scm_desc desc = { 500 .svc = QCOM_SCM_SVC_BOOT, 501 .cmd = QCOM_SCM_BOOT_SDI_CONFIG, 502 .args[0] = 1, /* Disable watchdog debug */ 503 .args[1] = 0, /* Disable SDI */ 504 .arginfo = QCOM_SCM_ARGS(2), 505 .owner = ARM_SMCCC_OWNER_SIP, 506 }; 507 struct qcom_scm_res res; 508 509 ret = qcom_scm_clk_enable(); 510 if (ret) 511 return ret; 512 ret = qcom_scm_call(__scm->dev, &desc, &res); 513 514 qcom_scm_clk_disable(); 515 516 return ret ? : res.result[0]; 517 } 518 519 static int __qcom_scm_set_dload_mode(struct device *dev, bool enable) 520 { 521 struct qcom_scm_desc desc = { 522 .svc = QCOM_SCM_SVC_BOOT, 523 .cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE, 524 .arginfo = QCOM_SCM_ARGS(2), 525 .args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE, 526 .owner = ARM_SMCCC_OWNER_SIP, 527 }; 528 529 desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0; 530 531 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 532 } 533 534 static int qcom_scm_io_rmw(phys_addr_t addr, unsigned int mask, unsigned int val) 535 { 536 unsigned int old; 537 unsigned int new; 538 int ret; 539 540 ret = qcom_scm_io_readl(addr, &old); 541 if (ret) 542 return ret; 543 544 new = (old & ~mask) | (val & mask); 545 546 return qcom_scm_io_writel(addr, new); 547 } 548 549 static void qcom_scm_set_download_mode(u32 dload_mode) 550 { 551 int ret = 0; 552 553 if (__scm->dload_mode_addr) { 554 ret = qcom_scm_io_rmw(__scm->dload_mode_addr, QCOM_DLOAD_MASK, 555 FIELD_PREP(QCOM_DLOAD_MASK, dload_mode)); 556 } else if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT, 557 QCOM_SCM_BOOT_SET_DLOAD_MODE)) { 558 ret = __qcom_scm_set_dload_mode(__scm->dev, !!dload_mode); 559 } else if (dload_mode) { 560 dev_err(__scm->dev, 561 "No available mechanism for setting download mode\n"); 562 } 563 564 if (ret) 565 dev_err(__scm->dev, "failed to set download mode: %d\n", ret); 566 } 567 568 /** 569 * qcom_scm_pas_init_image() - Initialize peripheral authentication service 570 * state machine for a given peripheral, using the 571 * metadata 572 * @peripheral: peripheral id 573 * @metadata: pointer to memory containing ELF header, program header table 574 * and optional blob of data used for authenticating the metadata 575 * and the rest of the firmware 576 * @size: size of the metadata 577 * @ctx: optional metadata context 578 * 579 * Return: 0 on success. 580 * 581 * Upon successful return, the PAS metadata context (@ctx) will be used to 582 * track the metadata allocation, this needs to be released by invoking 583 * qcom_scm_pas_metadata_release() by the caller. 584 */ 585 int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size, 586 struct qcom_scm_pas_metadata *ctx) 587 { 588 dma_addr_t mdata_phys; 589 void *mdata_buf; 590 int ret; 591 struct qcom_scm_desc desc = { 592 .svc = QCOM_SCM_SVC_PIL, 593 .cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE, 594 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW), 595 .args[0] = peripheral, 596 .owner = ARM_SMCCC_OWNER_SIP, 597 }; 598 struct qcom_scm_res res; 599 600 /* 601 * During the scm call memory protection will be enabled for the meta 602 * data blob, so make sure it's physically contiguous, 4K aligned and 603 * non-cachable to avoid XPU violations. 604 * 605 * For PIL calls the hypervisor creates SHM Bridges for the blob 606 * buffers on behalf of Linux so we must not do it ourselves hence 607 * not using the TZMem allocator here. 608 * 609 * If we pass a buffer that is already part of an SHM Bridge to this 610 * call, it will fail. 611 */ 612 mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys, 613 GFP_KERNEL); 614 if (!mdata_buf) 615 return -ENOMEM; 616 617 memcpy(mdata_buf, metadata, size); 618 619 ret = qcom_scm_clk_enable(); 620 if (ret) 621 goto out; 622 623 ret = qcom_scm_bw_enable(); 624 if (ret) 625 goto disable_clk; 626 627 desc.args[1] = mdata_phys; 628 629 ret = qcom_scm_call(__scm->dev, &desc, &res); 630 qcom_scm_bw_disable(); 631 632 disable_clk: 633 qcom_scm_clk_disable(); 634 635 out: 636 if (ret < 0 || !ctx) { 637 dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys); 638 } else if (ctx) { 639 ctx->ptr = mdata_buf; 640 ctx->phys = mdata_phys; 641 ctx->size = size; 642 } 643 644 return ret ? : res.result[0]; 645 } 646 EXPORT_SYMBOL_GPL(qcom_scm_pas_init_image); 647 648 /** 649 * qcom_scm_pas_metadata_release() - release metadata context 650 * @ctx: metadata context 651 */ 652 void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx) 653 { 654 if (!ctx->ptr) 655 return; 656 657 dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys); 658 659 ctx->ptr = NULL; 660 ctx->phys = 0; 661 ctx->size = 0; 662 } 663 EXPORT_SYMBOL_GPL(qcom_scm_pas_metadata_release); 664 665 /** 666 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral 667 * for firmware loading 668 * @peripheral: peripheral id 669 * @addr: start address of memory area to prepare 670 * @size: size of the memory area to prepare 671 * 672 * Returns 0 on success. 673 */ 674 int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size) 675 { 676 int ret; 677 struct qcom_scm_desc desc = { 678 .svc = QCOM_SCM_SVC_PIL, 679 .cmd = QCOM_SCM_PIL_PAS_MEM_SETUP, 680 .arginfo = QCOM_SCM_ARGS(3), 681 .args[0] = peripheral, 682 .args[1] = addr, 683 .args[2] = size, 684 .owner = ARM_SMCCC_OWNER_SIP, 685 }; 686 struct qcom_scm_res res; 687 688 ret = qcom_scm_clk_enable(); 689 if (ret) 690 return ret; 691 692 ret = qcom_scm_bw_enable(); 693 if (ret) 694 goto disable_clk; 695 696 ret = qcom_scm_call(__scm->dev, &desc, &res); 697 qcom_scm_bw_disable(); 698 699 disable_clk: 700 qcom_scm_clk_disable(); 701 702 return ret ? : res.result[0]; 703 } 704 EXPORT_SYMBOL_GPL(qcom_scm_pas_mem_setup); 705 706 /** 707 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware 708 * and reset the remote processor 709 * @peripheral: peripheral id 710 * 711 * Return 0 on success. 712 */ 713 int qcom_scm_pas_auth_and_reset(u32 peripheral) 714 { 715 int ret; 716 struct qcom_scm_desc desc = { 717 .svc = QCOM_SCM_SVC_PIL, 718 .cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET, 719 .arginfo = QCOM_SCM_ARGS(1), 720 .args[0] = peripheral, 721 .owner = ARM_SMCCC_OWNER_SIP, 722 }; 723 struct qcom_scm_res res; 724 725 ret = qcom_scm_clk_enable(); 726 if (ret) 727 return ret; 728 729 ret = qcom_scm_bw_enable(); 730 if (ret) 731 goto disable_clk; 732 733 ret = qcom_scm_call(__scm->dev, &desc, &res); 734 qcom_scm_bw_disable(); 735 736 disable_clk: 737 qcom_scm_clk_disable(); 738 739 return ret ? : res.result[0]; 740 } 741 EXPORT_SYMBOL_GPL(qcom_scm_pas_auth_and_reset); 742 743 /** 744 * qcom_scm_pas_shutdown() - Shut down the remote processor 745 * @peripheral: peripheral id 746 * 747 * Returns 0 on success. 748 */ 749 int qcom_scm_pas_shutdown(u32 peripheral) 750 { 751 int ret; 752 struct qcom_scm_desc desc = { 753 .svc = QCOM_SCM_SVC_PIL, 754 .cmd = QCOM_SCM_PIL_PAS_SHUTDOWN, 755 .arginfo = QCOM_SCM_ARGS(1), 756 .args[0] = peripheral, 757 .owner = ARM_SMCCC_OWNER_SIP, 758 }; 759 struct qcom_scm_res res; 760 761 ret = qcom_scm_clk_enable(); 762 if (ret) 763 return ret; 764 765 ret = qcom_scm_bw_enable(); 766 if (ret) 767 goto disable_clk; 768 769 ret = qcom_scm_call(__scm->dev, &desc, &res); 770 qcom_scm_bw_disable(); 771 772 disable_clk: 773 qcom_scm_clk_disable(); 774 775 return ret ? : res.result[0]; 776 } 777 EXPORT_SYMBOL_GPL(qcom_scm_pas_shutdown); 778 779 /** 780 * qcom_scm_pas_supported() - Check if the peripheral authentication service is 781 * available for the given peripherial 782 * @peripheral: peripheral id 783 * 784 * Returns true if PAS is supported for this peripheral, otherwise false. 785 */ 786 bool qcom_scm_pas_supported(u32 peripheral) 787 { 788 int ret; 789 struct qcom_scm_desc desc = { 790 .svc = QCOM_SCM_SVC_PIL, 791 .cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED, 792 .arginfo = QCOM_SCM_ARGS(1), 793 .args[0] = peripheral, 794 .owner = ARM_SMCCC_OWNER_SIP, 795 }; 796 struct qcom_scm_res res; 797 798 if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL, 799 QCOM_SCM_PIL_PAS_IS_SUPPORTED)) 800 return false; 801 802 ret = qcom_scm_call(__scm->dev, &desc, &res); 803 804 return ret ? false : !!res.result[0]; 805 } 806 EXPORT_SYMBOL_GPL(qcom_scm_pas_supported); 807 808 static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) 809 { 810 struct qcom_scm_desc desc = { 811 .svc = QCOM_SCM_SVC_PIL, 812 .cmd = QCOM_SCM_PIL_PAS_MSS_RESET, 813 .arginfo = QCOM_SCM_ARGS(2), 814 .args[0] = reset, 815 .args[1] = 0, 816 .owner = ARM_SMCCC_OWNER_SIP, 817 }; 818 struct qcom_scm_res res; 819 int ret; 820 821 ret = qcom_scm_call(__scm->dev, &desc, &res); 822 823 return ret ? : res.result[0]; 824 } 825 826 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev, 827 unsigned long idx) 828 { 829 if (idx != 0) 830 return -EINVAL; 831 832 return __qcom_scm_pas_mss_reset(__scm->dev, 1); 833 } 834 835 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev, 836 unsigned long idx) 837 { 838 if (idx != 0) 839 return -EINVAL; 840 841 return __qcom_scm_pas_mss_reset(__scm->dev, 0); 842 } 843 844 static const struct reset_control_ops qcom_scm_pas_reset_ops = { 845 .assert = qcom_scm_pas_reset_assert, 846 .deassert = qcom_scm_pas_reset_deassert, 847 }; 848 849 int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) 850 { 851 struct qcom_scm_desc desc = { 852 .svc = QCOM_SCM_SVC_IO, 853 .cmd = QCOM_SCM_IO_READ, 854 .arginfo = QCOM_SCM_ARGS(1), 855 .args[0] = addr, 856 .owner = ARM_SMCCC_OWNER_SIP, 857 }; 858 struct qcom_scm_res res; 859 int ret; 860 861 862 ret = qcom_scm_call_atomic(__scm->dev, &desc, &res); 863 if (ret >= 0) 864 *val = res.result[0]; 865 866 return ret < 0 ? ret : 0; 867 } 868 EXPORT_SYMBOL_GPL(qcom_scm_io_readl); 869 870 int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) 871 { 872 struct qcom_scm_desc desc = { 873 .svc = QCOM_SCM_SVC_IO, 874 .cmd = QCOM_SCM_IO_WRITE, 875 .arginfo = QCOM_SCM_ARGS(2), 876 .args[0] = addr, 877 .args[1] = val, 878 .owner = ARM_SMCCC_OWNER_SIP, 879 }; 880 881 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 882 } 883 EXPORT_SYMBOL_GPL(qcom_scm_io_writel); 884 885 /** 886 * qcom_scm_restore_sec_cfg_available() - Check if secure environment 887 * supports restore security config interface. 888 * 889 * Return true if restore-cfg interface is supported, false if not. 890 */ 891 bool qcom_scm_restore_sec_cfg_available(void) 892 { 893 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, 894 QCOM_SCM_MP_RESTORE_SEC_CFG); 895 } 896 EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg_available); 897 898 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) 899 { 900 struct qcom_scm_desc desc = { 901 .svc = QCOM_SCM_SVC_MP, 902 .cmd = QCOM_SCM_MP_RESTORE_SEC_CFG, 903 .arginfo = QCOM_SCM_ARGS(2), 904 .args[0] = device_id, 905 .args[1] = spare, 906 .owner = ARM_SMCCC_OWNER_SIP, 907 }; 908 struct qcom_scm_res res; 909 int ret; 910 911 ret = qcom_scm_call(__scm->dev, &desc, &res); 912 913 return ret ? : res.result[0]; 914 } 915 EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg); 916 917 #define QCOM_SCM_CP_APERTURE_CONTEXT_MASK GENMASK(7, 0) 918 919 bool qcom_scm_set_gpu_smmu_aperture_is_available(void) 920 { 921 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, 922 QCOM_SCM_MP_CP_SMMU_APERTURE_ID); 923 } 924 EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture_is_available); 925 926 int qcom_scm_set_gpu_smmu_aperture(unsigned int context_bank) 927 { 928 struct qcom_scm_desc desc = { 929 .svc = QCOM_SCM_SVC_MP, 930 .cmd = QCOM_SCM_MP_CP_SMMU_APERTURE_ID, 931 .arginfo = QCOM_SCM_ARGS(4), 932 .args[0] = 0xffff0000 | FIELD_PREP(QCOM_SCM_CP_APERTURE_CONTEXT_MASK, context_bank), 933 .args[1] = 0xffffffff, 934 .args[2] = 0xffffffff, 935 .args[3] = 0xffffffff, 936 .owner = ARM_SMCCC_OWNER_SIP 937 }; 938 939 return qcom_scm_call(__scm->dev, &desc, NULL); 940 } 941 EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture); 942 943 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) 944 { 945 struct qcom_scm_desc desc = { 946 .svc = QCOM_SCM_SVC_MP, 947 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE, 948 .arginfo = QCOM_SCM_ARGS(1), 949 .args[0] = spare, 950 .owner = ARM_SMCCC_OWNER_SIP, 951 }; 952 struct qcom_scm_res res; 953 int ret; 954 955 ret = qcom_scm_call(__scm->dev, &desc, &res); 956 957 if (size) 958 *size = res.result[0]; 959 960 return ret ? : res.result[1]; 961 } 962 EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_size); 963 964 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) 965 { 966 struct qcom_scm_desc desc = { 967 .svc = QCOM_SCM_SVC_MP, 968 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT, 969 .arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, 970 QCOM_SCM_VAL), 971 .args[0] = addr, 972 .args[1] = size, 973 .args[2] = spare, 974 .owner = ARM_SMCCC_OWNER_SIP, 975 }; 976 int ret; 977 978 ret = qcom_scm_call(__scm->dev, &desc, NULL); 979 980 /* the pg table has been initialized already, ignore the error */ 981 if (ret == -EPERM) 982 ret = 0; 983 984 return ret; 985 } 986 EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_init); 987 988 int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size) 989 { 990 struct qcom_scm_desc desc = { 991 .svc = QCOM_SCM_SVC_MP, 992 .cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE, 993 .arginfo = QCOM_SCM_ARGS(2), 994 .args[0] = size, 995 .args[1] = spare, 996 .owner = ARM_SMCCC_OWNER_SIP, 997 }; 998 999 return qcom_scm_call(__scm->dev, &desc, NULL); 1000 } 1001 EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_cp_pool_size); 1002 1003 int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size, 1004 u32 cp_nonpixel_start, 1005 u32 cp_nonpixel_size) 1006 { 1007 int ret; 1008 struct qcom_scm_desc desc = { 1009 .svc = QCOM_SCM_SVC_MP, 1010 .cmd = QCOM_SCM_MP_VIDEO_VAR, 1011 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL, 1012 QCOM_SCM_VAL, QCOM_SCM_VAL), 1013 .args[0] = cp_start, 1014 .args[1] = cp_size, 1015 .args[2] = cp_nonpixel_start, 1016 .args[3] = cp_nonpixel_size, 1017 .owner = ARM_SMCCC_OWNER_SIP, 1018 }; 1019 struct qcom_scm_res res; 1020 1021 ret = qcom_scm_call(__scm->dev, &desc, &res); 1022 1023 return ret ? : res.result[0]; 1024 } 1025 EXPORT_SYMBOL_GPL(qcom_scm_mem_protect_video_var); 1026 1027 static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region, 1028 size_t mem_sz, phys_addr_t src, size_t src_sz, 1029 phys_addr_t dest, size_t dest_sz) 1030 { 1031 int ret; 1032 struct qcom_scm_desc desc = { 1033 .svc = QCOM_SCM_SVC_MP, 1034 .cmd = QCOM_SCM_MP_ASSIGN, 1035 .arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL, 1036 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO, 1037 QCOM_SCM_VAL, QCOM_SCM_VAL), 1038 .args[0] = mem_region, 1039 .args[1] = mem_sz, 1040 .args[2] = src, 1041 .args[3] = src_sz, 1042 .args[4] = dest, 1043 .args[5] = dest_sz, 1044 .args[6] = 0, 1045 .owner = ARM_SMCCC_OWNER_SIP, 1046 }; 1047 struct qcom_scm_res res; 1048 1049 ret = qcom_scm_call(dev, &desc, &res); 1050 1051 return ret ? : res.result[0]; 1052 } 1053 1054 /** 1055 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership 1056 * @mem_addr: mem region whose ownership need to be reassigned 1057 * @mem_sz: size of the region. 1058 * @srcvm: vmid for current set of owners, each set bit in 1059 * flag indicate a unique owner 1060 * @newvm: array having new owners and corresponding permission 1061 * flags 1062 * @dest_cnt: number of owners in next set. 1063 * 1064 * Return negative errno on failure or 0 on success with @srcvm updated. 1065 */ 1066 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, 1067 u64 *srcvm, 1068 const struct qcom_scm_vmperm *newvm, 1069 unsigned int dest_cnt) 1070 { 1071 struct qcom_scm_current_perm_info *destvm; 1072 struct qcom_scm_mem_map_info *mem_to_map; 1073 phys_addr_t mem_to_map_phys; 1074 phys_addr_t dest_phys; 1075 phys_addr_t ptr_phys; 1076 size_t mem_to_map_sz; 1077 size_t dest_sz; 1078 size_t src_sz; 1079 size_t ptr_sz; 1080 int next_vm; 1081 __le32 *src; 1082 int ret, i, b; 1083 u64 srcvm_bits = *srcvm; 1084 1085 src_sz = hweight64(srcvm_bits) * sizeof(*src); 1086 mem_to_map_sz = sizeof(*mem_to_map); 1087 dest_sz = dest_cnt * sizeof(*destvm); 1088 ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + 1089 ALIGN(dest_sz, SZ_64); 1090 1091 void *ptr __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1092 ptr_sz, GFP_KERNEL); 1093 if (!ptr) 1094 return -ENOMEM; 1095 1096 ptr_phys = qcom_tzmem_to_phys(ptr); 1097 1098 /* Fill source vmid detail */ 1099 src = ptr; 1100 i = 0; 1101 for (b = 0; b < BITS_PER_TYPE(u64); b++) { 1102 if (srcvm_bits & BIT(b)) 1103 src[i++] = cpu_to_le32(b); 1104 } 1105 1106 /* Fill details of mem buff to map */ 1107 mem_to_map = ptr + ALIGN(src_sz, SZ_64); 1108 mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64); 1109 mem_to_map->mem_addr = cpu_to_le64(mem_addr); 1110 mem_to_map->mem_size = cpu_to_le64(mem_sz); 1111 1112 next_vm = 0; 1113 /* Fill details of next vmid detail */ 1114 destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); 1115 dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); 1116 for (i = 0; i < dest_cnt; i++, destvm++, newvm++) { 1117 destvm->vmid = cpu_to_le32(newvm->vmid); 1118 destvm->perm = cpu_to_le32(newvm->perm); 1119 destvm->ctx = 0; 1120 destvm->ctx_size = 0; 1121 next_vm |= BIT(newvm->vmid); 1122 } 1123 1124 ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, 1125 ptr_phys, src_sz, dest_phys, dest_sz); 1126 if (ret) { 1127 dev_err(__scm->dev, 1128 "Assign memory protection call failed %d\n", ret); 1129 return ret; 1130 } 1131 1132 *srcvm = next_vm; 1133 return 0; 1134 } 1135 EXPORT_SYMBOL_GPL(qcom_scm_assign_mem); 1136 1137 /** 1138 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available 1139 */ 1140 bool qcom_scm_ocmem_lock_available(void) 1141 { 1142 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM, 1143 QCOM_SCM_OCMEM_LOCK_CMD); 1144 } 1145 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock_available); 1146 1147 /** 1148 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM 1149 * region to the specified initiator 1150 * 1151 * @id: tz initiator id 1152 * @offset: OCMEM offset 1153 * @size: OCMEM size 1154 * @mode: access mode (WIDE/NARROW) 1155 */ 1156 int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size, 1157 u32 mode) 1158 { 1159 struct qcom_scm_desc desc = { 1160 .svc = QCOM_SCM_SVC_OCMEM, 1161 .cmd = QCOM_SCM_OCMEM_LOCK_CMD, 1162 .args[0] = id, 1163 .args[1] = offset, 1164 .args[2] = size, 1165 .args[3] = mode, 1166 .arginfo = QCOM_SCM_ARGS(4), 1167 }; 1168 1169 return qcom_scm_call(__scm->dev, &desc, NULL); 1170 } 1171 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock); 1172 1173 /** 1174 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM 1175 * region from the specified initiator 1176 * 1177 * @id: tz initiator id 1178 * @offset: OCMEM offset 1179 * @size: OCMEM size 1180 */ 1181 int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size) 1182 { 1183 struct qcom_scm_desc desc = { 1184 .svc = QCOM_SCM_SVC_OCMEM, 1185 .cmd = QCOM_SCM_OCMEM_UNLOCK_CMD, 1186 .args[0] = id, 1187 .args[1] = offset, 1188 .args[2] = size, 1189 .arginfo = QCOM_SCM_ARGS(3), 1190 }; 1191 1192 return qcom_scm_call(__scm->dev, &desc, NULL); 1193 } 1194 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_unlock); 1195 1196 /** 1197 * qcom_scm_ice_available() - Is the ICE key programming interface available? 1198 * 1199 * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and 1200 * qcom_scm_ice_set_key() are available. 1201 */ 1202 bool qcom_scm_ice_available(void) 1203 { 1204 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1205 QCOM_SCM_ES_INVALIDATE_ICE_KEY) && 1206 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1207 QCOM_SCM_ES_CONFIG_SET_ICE_KEY); 1208 } 1209 EXPORT_SYMBOL_GPL(qcom_scm_ice_available); 1210 1211 /** 1212 * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key 1213 * @index: the keyslot to invalidate 1214 * 1215 * The UFSHCI and eMMC standards define a standard way to do this, but it 1216 * doesn't work on these SoCs; only this SCM call does. 1217 * 1218 * It is assumed that the SoC has only one ICE instance being used, as this SCM 1219 * call doesn't specify which ICE instance the keyslot belongs to. 1220 * 1221 * Return: 0 on success; -errno on failure. 1222 */ 1223 int qcom_scm_ice_invalidate_key(u32 index) 1224 { 1225 struct qcom_scm_desc desc = { 1226 .svc = QCOM_SCM_SVC_ES, 1227 .cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY, 1228 .arginfo = QCOM_SCM_ARGS(1), 1229 .args[0] = index, 1230 .owner = ARM_SMCCC_OWNER_SIP, 1231 }; 1232 1233 return qcom_scm_call(__scm->dev, &desc, NULL); 1234 } 1235 EXPORT_SYMBOL_GPL(qcom_scm_ice_invalidate_key); 1236 1237 /** 1238 * qcom_scm_ice_set_key() - Set an inline encryption key 1239 * @index: the keyslot into which to set the key 1240 * @key: the key to program 1241 * @key_size: the size of the key in bytes 1242 * @cipher: the encryption algorithm the key is for 1243 * @data_unit_size: the encryption data unit size, i.e. the size of each 1244 * individual plaintext and ciphertext. Given in 512-byte 1245 * units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc. 1246 * 1247 * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it 1248 * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline. 1249 * 1250 * The UFSHCI and eMMC standards define a standard way to do this, but it 1251 * doesn't work on these SoCs; only this SCM call does. 1252 * 1253 * It is assumed that the SoC has only one ICE instance being used, as this SCM 1254 * call doesn't specify which ICE instance the keyslot belongs to. 1255 * 1256 * Return: 0 on success; -errno on failure. 1257 */ 1258 int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, 1259 enum qcom_scm_ice_cipher cipher, u32 data_unit_size) 1260 { 1261 struct qcom_scm_desc desc = { 1262 .svc = QCOM_SCM_SVC_ES, 1263 .cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY, 1264 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW, 1265 QCOM_SCM_VAL, QCOM_SCM_VAL, 1266 QCOM_SCM_VAL), 1267 .args[0] = index, 1268 .args[2] = key_size, 1269 .args[3] = cipher, 1270 .args[4] = data_unit_size, 1271 .owner = ARM_SMCCC_OWNER_SIP, 1272 }; 1273 1274 int ret; 1275 1276 void *keybuf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1277 key_size, 1278 GFP_KERNEL); 1279 if (!keybuf) 1280 return -ENOMEM; 1281 memcpy(keybuf, key, key_size); 1282 desc.args[1] = qcom_tzmem_to_phys(keybuf); 1283 1284 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1285 1286 memzero_explicit(keybuf, key_size); 1287 1288 return ret; 1289 } 1290 EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key); 1291 1292 bool qcom_scm_has_wrapped_key_support(void) 1293 { 1294 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1295 QCOM_SCM_ES_DERIVE_SW_SECRET) && 1296 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1297 QCOM_SCM_ES_GENERATE_ICE_KEY) && 1298 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1299 QCOM_SCM_ES_PREPARE_ICE_KEY) && 1300 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1301 QCOM_SCM_ES_IMPORT_ICE_KEY); 1302 } 1303 EXPORT_SYMBOL_GPL(qcom_scm_has_wrapped_key_support); 1304 1305 /** 1306 * qcom_scm_derive_sw_secret() - Derive software secret from wrapped key 1307 * @eph_key: an ephemerally-wrapped key 1308 * @eph_key_size: size of @eph_key in bytes 1309 * @sw_secret: output buffer for the software secret 1310 * @sw_secret_size: size of the software secret to derive in bytes 1311 * 1312 * Derive a software secret from an ephemerally-wrapped key for software crypto 1313 * operations. This is done by calling into the secure execution environment, 1314 * which then calls into the hardware to unwrap and derive the secret. 1315 * 1316 * For more information on sw_secret, see the "Hardware-wrapped keys" section of 1317 * Documentation/block/inline-encryption.rst. 1318 * 1319 * Return: 0 on success; -errno on failure. 1320 */ 1321 int qcom_scm_derive_sw_secret(const u8 *eph_key, size_t eph_key_size, 1322 u8 *sw_secret, size_t sw_secret_size) 1323 { 1324 struct qcom_scm_desc desc = { 1325 .svc = QCOM_SCM_SVC_ES, 1326 .cmd = QCOM_SCM_ES_DERIVE_SW_SECRET, 1327 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RW, QCOM_SCM_VAL, 1328 QCOM_SCM_RW, QCOM_SCM_VAL), 1329 .owner = ARM_SMCCC_OWNER_SIP, 1330 }; 1331 int ret; 1332 1333 void *eph_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1334 eph_key_size, 1335 GFP_KERNEL); 1336 if (!eph_key_buf) 1337 return -ENOMEM; 1338 1339 void *sw_secret_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1340 sw_secret_size, 1341 GFP_KERNEL); 1342 if (!sw_secret_buf) 1343 return -ENOMEM; 1344 1345 memcpy(eph_key_buf, eph_key, eph_key_size); 1346 desc.args[0] = qcom_tzmem_to_phys(eph_key_buf); 1347 desc.args[1] = eph_key_size; 1348 desc.args[2] = qcom_tzmem_to_phys(sw_secret_buf); 1349 desc.args[3] = sw_secret_size; 1350 1351 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1352 if (!ret) 1353 memcpy(sw_secret, sw_secret_buf, sw_secret_size); 1354 1355 memzero_explicit(eph_key_buf, eph_key_size); 1356 memzero_explicit(sw_secret_buf, sw_secret_size); 1357 return ret; 1358 } 1359 EXPORT_SYMBOL_GPL(qcom_scm_derive_sw_secret); 1360 1361 /** 1362 * qcom_scm_generate_ice_key() - Generate a wrapped key for storage encryption 1363 * @lt_key: output buffer for the long-term wrapped key 1364 * @lt_key_size: size of @lt_key in bytes. Must be the exact wrapped key size 1365 * used by the SoC. 1366 * 1367 * Generate a key using the built-in HW module in the SoC. The resulting key is 1368 * returned wrapped with the platform-specific Key Encryption Key. 1369 * 1370 * Return: 0 on success; -errno on failure. 1371 */ 1372 int qcom_scm_generate_ice_key(u8 *lt_key, size_t lt_key_size) 1373 { 1374 struct qcom_scm_desc desc = { 1375 .svc = QCOM_SCM_SVC_ES, 1376 .cmd = QCOM_SCM_ES_GENERATE_ICE_KEY, 1377 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL), 1378 .owner = ARM_SMCCC_OWNER_SIP, 1379 }; 1380 int ret; 1381 1382 void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1383 lt_key_size, 1384 GFP_KERNEL); 1385 if (!lt_key_buf) 1386 return -ENOMEM; 1387 1388 desc.args[0] = qcom_tzmem_to_phys(lt_key_buf); 1389 desc.args[1] = lt_key_size; 1390 1391 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1392 if (!ret) 1393 memcpy(lt_key, lt_key_buf, lt_key_size); 1394 1395 memzero_explicit(lt_key_buf, lt_key_size); 1396 return ret; 1397 } 1398 EXPORT_SYMBOL_GPL(qcom_scm_generate_ice_key); 1399 1400 /** 1401 * qcom_scm_prepare_ice_key() - Re-wrap a key with the per-boot ephemeral key 1402 * @lt_key: a long-term wrapped key 1403 * @lt_key_size: size of @lt_key in bytes 1404 * @eph_key: output buffer for the ephemerally-wrapped key 1405 * @eph_key_size: size of @eph_key in bytes. Must be the exact wrapped key size 1406 * used by the SoC. 1407 * 1408 * Given a long-term wrapped key, re-wrap it with the per-boot ephemeral key for 1409 * added protection. The resulting key will only be valid for the current boot. 1410 * 1411 * Return: 0 on success; -errno on failure. 1412 */ 1413 int qcom_scm_prepare_ice_key(const u8 *lt_key, size_t lt_key_size, 1414 u8 *eph_key, size_t eph_key_size) 1415 { 1416 struct qcom_scm_desc desc = { 1417 .svc = QCOM_SCM_SVC_ES, 1418 .cmd = QCOM_SCM_ES_PREPARE_ICE_KEY, 1419 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL, 1420 QCOM_SCM_RW, QCOM_SCM_VAL), 1421 .owner = ARM_SMCCC_OWNER_SIP, 1422 }; 1423 int ret; 1424 1425 void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1426 lt_key_size, 1427 GFP_KERNEL); 1428 if (!lt_key_buf) 1429 return -ENOMEM; 1430 1431 void *eph_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1432 eph_key_size, 1433 GFP_KERNEL); 1434 if (!eph_key_buf) 1435 return -ENOMEM; 1436 1437 memcpy(lt_key_buf, lt_key, lt_key_size); 1438 desc.args[0] = qcom_tzmem_to_phys(lt_key_buf); 1439 desc.args[1] = lt_key_size; 1440 desc.args[2] = qcom_tzmem_to_phys(eph_key_buf); 1441 desc.args[3] = eph_key_size; 1442 1443 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1444 if (!ret) 1445 memcpy(eph_key, eph_key_buf, eph_key_size); 1446 1447 memzero_explicit(lt_key_buf, lt_key_size); 1448 memzero_explicit(eph_key_buf, eph_key_size); 1449 return ret; 1450 } 1451 EXPORT_SYMBOL_GPL(qcom_scm_prepare_ice_key); 1452 1453 /** 1454 * qcom_scm_import_ice_key() - Import key for storage encryption 1455 * @raw_key: the raw key to import 1456 * @raw_key_size: size of @raw_key in bytes 1457 * @lt_key: output buffer for the long-term wrapped key 1458 * @lt_key_size: size of @lt_key in bytes. Must be the exact wrapped key size 1459 * used by the SoC. 1460 * 1461 * Import a raw key and return a long-term wrapped key. Uses the SoC's HWKM to 1462 * wrap the raw key using the platform-specific Key Encryption Key. 1463 * 1464 * Return: 0 on success; -errno on failure. 1465 */ 1466 int qcom_scm_import_ice_key(const u8 *raw_key, size_t raw_key_size, 1467 u8 *lt_key, size_t lt_key_size) 1468 { 1469 struct qcom_scm_desc desc = { 1470 .svc = QCOM_SCM_SVC_ES, 1471 .cmd = QCOM_SCM_ES_IMPORT_ICE_KEY, 1472 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL, 1473 QCOM_SCM_RW, QCOM_SCM_VAL), 1474 .owner = ARM_SMCCC_OWNER_SIP, 1475 }; 1476 int ret; 1477 1478 void *raw_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1479 raw_key_size, 1480 GFP_KERNEL); 1481 if (!raw_key_buf) 1482 return -ENOMEM; 1483 1484 void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1485 lt_key_size, 1486 GFP_KERNEL); 1487 if (!lt_key_buf) 1488 return -ENOMEM; 1489 1490 memcpy(raw_key_buf, raw_key, raw_key_size); 1491 desc.args[0] = qcom_tzmem_to_phys(raw_key_buf); 1492 desc.args[1] = raw_key_size; 1493 desc.args[2] = qcom_tzmem_to_phys(lt_key_buf); 1494 desc.args[3] = lt_key_size; 1495 1496 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1497 if (!ret) 1498 memcpy(lt_key, lt_key_buf, lt_key_size); 1499 1500 memzero_explicit(raw_key_buf, raw_key_size); 1501 memzero_explicit(lt_key_buf, lt_key_size); 1502 return ret; 1503 } 1504 EXPORT_SYMBOL_GPL(qcom_scm_import_ice_key); 1505 1506 /** 1507 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP. 1508 * 1509 * Return true if HDCP is supported, false if not. 1510 */ 1511 bool qcom_scm_hdcp_available(void) 1512 { 1513 bool avail; 1514 int ret = qcom_scm_clk_enable(); 1515 1516 if (ret) 1517 return ret; 1518 1519 avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP, 1520 QCOM_SCM_HDCP_INVOKE); 1521 1522 qcom_scm_clk_disable(); 1523 1524 return avail; 1525 } 1526 EXPORT_SYMBOL_GPL(qcom_scm_hdcp_available); 1527 1528 /** 1529 * qcom_scm_hdcp_req() - Send HDCP request. 1530 * @req: HDCP request array 1531 * @req_cnt: HDCP request array count 1532 * @resp: response buffer passed to SCM 1533 * 1534 * Write HDCP register(s) through SCM. 1535 */ 1536 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) 1537 { 1538 int ret; 1539 struct qcom_scm_desc desc = { 1540 .svc = QCOM_SCM_SVC_HDCP, 1541 .cmd = QCOM_SCM_HDCP_INVOKE, 1542 .arginfo = QCOM_SCM_ARGS(10), 1543 .args = { 1544 req[0].addr, 1545 req[0].val, 1546 req[1].addr, 1547 req[1].val, 1548 req[2].addr, 1549 req[2].val, 1550 req[3].addr, 1551 req[3].val, 1552 req[4].addr, 1553 req[4].val 1554 }, 1555 .owner = ARM_SMCCC_OWNER_SIP, 1556 }; 1557 struct qcom_scm_res res; 1558 1559 if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT) 1560 return -ERANGE; 1561 1562 ret = qcom_scm_clk_enable(); 1563 if (ret) 1564 return ret; 1565 1566 ret = qcom_scm_call(__scm->dev, &desc, &res); 1567 *resp = res.result[0]; 1568 1569 qcom_scm_clk_disable(); 1570 1571 return ret; 1572 } 1573 EXPORT_SYMBOL_GPL(qcom_scm_hdcp_req); 1574 1575 int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt) 1576 { 1577 struct qcom_scm_desc desc = { 1578 .svc = QCOM_SCM_SVC_SMMU_PROGRAM, 1579 .cmd = QCOM_SCM_SMMU_PT_FORMAT, 1580 .arginfo = QCOM_SCM_ARGS(3), 1581 .args[0] = sec_id, 1582 .args[1] = ctx_num, 1583 .args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */ 1584 .owner = ARM_SMCCC_OWNER_SIP, 1585 }; 1586 1587 return qcom_scm_call(__scm->dev, &desc, NULL); 1588 } 1589 EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_pt_format); 1590 1591 int qcom_scm_qsmmu500_wait_safe_toggle(bool en) 1592 { 1593 struct qcom_scm_desc desc = { 1594 .svc = QCOM_SCM_SVC_SMMU_PROGRAM, 1595 .cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1, 1596 .arginfo = QCOM_SCM_ARGS(2), 1597 .args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL, 1598 .args[1] = en, 1599 .owner = ARM_SMCCC_OWNER_SIP, 1600 }; 1601 1602 1603 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 1604 } 1605 EXPORT_SYMBOL_GPL(qcom_scm_qsmmu500_wait_safe_toggle); 1606 1607 bool qcom_scm_lmh_dcvsh_available(void) 1608 { 1609 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH); 1610 } 1611 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available); 1612 1613 /* 1614 * This is only supposed to be called once by the TZMem module. It takes the 1615 * SCM struct device as argument and uses it to pass the call as at the time 1616 * the SHM Bridge is enabled, the SCM is not yet fully set up and doesn't 1617 * accept global user calls. Don't try to use the __scm pointer here. 1618 */ 1619 int qcom_scm_shm_bridge_enable(struct device *scm_dev) 1620 { 1621 int ret; 1622 1623 struct qcom_scm_desc desc = { 1624 .svc = QCOM_SCM_SVC_MP, 1625 .cmd = QCOM_SCM_MP_SHM_BRIDGE_ENABLE, 1626 .owner = ARM_SMCCC_OWNER_SIP 1627 }; 1628 1629 struct qcom_scm_res res; 1630 1631 if (!__qcom_scm_is_call_available(scm_dev, QCOM_SCM_SVC_MP, 1632 QCOM_SCM_MP_SHM_BRIDGE_ENABLE)) 1633 return -EOPNOTSUPP; 1634 1635 ret = qcom_scm_call(scm_dev, &desc, &res); 1636 1637 if (ret) 1638 return ret; 1639 1640 if (res.result[0] == SHMBRIDGE_RESULT_NOTSUPP) 1641 return -EOPNOTSUPP; 1642 1643 return res.result[0]; 1644 } 1645 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_enable); 1646 1647 int qcom_scm_shm_bridge_create(u64 pfn_and_ns_perm_flags, 1648 u64 ipfn_and_s_perm_flags, u64 size_and_flags, 1649 u64 ns_vmids, u64 *handle) 1650 { 1651 struct qcom_scm_desc desc = { 1652 .svc = QCOM_SCM_SVC_MP, 1653 .cmd = QCOM_SCM_MP_SHM_BRIDGE_CREATE, 1654 .owner = ARM_SMCCC_OWNER_SIP, 1655 .args[0] = pfn_and_ns_perm_flags, 1656 .args[1] = ipfn_and_s_perm_flags, 1657 .args[2] = size_and_flags, 1658 .args[3] = ns_vmids, 1659 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL, 1660 QCOM_SCM_VAL, QCOM_SCM_VAL), 1661 }; 1662 1663 struct qcom_scm_res res; 1664 int ret; 1665 1666 ret = qcom_scm_call(__scm->dev, &desc, &res); 1667 1668 if (handle && !ret) 1669 *handle = res.result[1]; 1670 1671 return ret ?: res.result[0]; 1672 } 1673 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_create); 1674 1675 int qcom_scm_shm_bridge_delete(u64 handle) 1676 { 1677 struct qcom_scm_desc desc = { 1678 .svc = QCOM_SCM_SVC_MP, 1679 .cmd = QCOM_SCM_MP_SHM_BRIDGE_DELETE, 1680 .owner = ARM_SMCCC_OWNER_SIP, 1681 .args[0] = handle, 1682 .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL), 1683 }; 1684 1685 return qcom_scm_call(__scm->dev, &desc, NULL); 1686 } 1687 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_delete); 1688 1689 int qcom_scm_lmh_profile_change(u32 profile_id) 1690 { 1691 struct qcom_scm_desc desc = { 1692 .svc = QCOM_SCM_SVC_LMH, 1693 .cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE, 1694 .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL), 1695 .args[0] = profile_id, 1696 .owner = ARM_SMCCC_OWNER_SIP, 1697 }; 1698 1699 return qcom_scm_call(__scm->dev, &desc, NULL); 1700 } 1701 EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change); 1702 1703 int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, 1704 u64 limit_node, u32 node_id, u64 version) 1705 { 1706 int ret, payload_size = 5 * sizeof(u32); 1707 1708 struct qcom_scm_desc desc = { 1709 .svc = QCOM_SCM_SVC_LMH, 1710 .cmd = QCOM_SCM_LMH_LIMIT_DCVSH, 1711 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL, 1712 QCOM_SCM_VAL, QCOM_SCM_VAL), 1713 .args[1] = payload_size, 1714 .args[2] = limit_node, 1715 .args[3] = node_id, 1716 .args[4] = version, 1717 .owner = ARM_SMCCC_OWNER_SIP, 1718 }; 1719 1720 u32 *payload_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1721 payload_size, 1722 GFP_KERNEL); 1723 if (!payload_buf) 1724 return -ENOMEM; 1725 1726 payload_buf[0] = payload_fn; 1727 payload_buf[1] = 0; 1728 payload_buf[2] = payload_reg; 1729 payload_buf[3] = 1; 1730 payload_buf[4] = payload_val; 1731 1732 desc.args[0] = qcom_tzmem_to_phys(payload_buf); 1733 1734 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1735 1736 return ret; 1737 } 1738 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh); 1739 1740 int qcom_scm_gpu_init_regs(u32 gpu_req) 1741 { 1742 struct qcom_scm_desc desc = { 1743 .svc = QCOM_SCM_SVC_GPU, 1744 .cmd = QCOM_SCM_SVC_GPU_INIT_REGS, 1745 .arginfo = QCOM_SCM_ARGS(1), 1746 .args[0] = gpu_req, 1747 .owner = ARM_SMCCC_OWNER_SIP, 1748 }; 1749 1750 return qcom_scm_call(__scm->dev, &desc, NULL); 1751 } 1752 EXPORT_SYMBOL_GPL(qcom_scm_gpu_init_regs); 1753 1754 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr) 1755 { 1756 struct device_node *tcsr; 1757 struct device_node *np = dev->of_node; 1758 struct resource res; 1759 u32 offset; 1760 int ret; 1761 1762 tcsr = of_parse_phandle(np, "qcom,dload-mode", 0); 1763 if (!tcsr) 1764 return 0; 1765 1766 ret = of_address_to_resource(tcsr, 0, &res); 1767 of_node_put(tcsr); 1768 if (ret) 1769 return ret; 1770 1771 ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset); 1772 if (ret < 0) 1773 return ret; 1774 1775 *addr = res.start + offset; 1776 1777 return 0; 1778 } 1779 1780 #ifdef CONFIG_QCOM_QSEECOM 1781 1782 /* Lock for QSEECOM SCM call executions */ 1783 static DEFINE_MUTEX(qcom_scm_qseecom_call_lock); 1784 1785 static int __qcom_scm_qseecom_call(const struct qcom_scm_desc *desc, 1786 struct qcom_scm_qseecom_resp *res) 1787 { 1788 struct qcom_scm_res scm_res = {}; 1789 int status; 1790 1791 /* 1792 * QSEECOM SCM calls should not be executed concurrently. Therefore, we 1793 * require the respective call lock to be held. 1794 */ 1795 lockdep_assert_held(&qcom_scm_qseecom_call_lock); 1796 1797 status = qcom_scm_call(__scm->dev, desc, &scm_res); 1798 1799 res->result = scm_res.result[0]; 1800 res->resp_type = scm_res.result[1]; 1801 res->data = scm_res.result[2]; 1802 1803 if (status) 1804 return status; 1805 1806 return 0; 1807 } 1808 1809 /** 1810 * qcom_scm_qseecom_call() - Perform a QSEECOM SCM call. 1811 * @desc: SCM call descriptor. 1812 * @res: SCM call response (output). 1813 * 1814 * Performs the QSEECOM SCM call described by @desc, returning the response in 1815 * @rsp. 1816 * 1817 * Return: Zero on success, nonzero on failure. 1818 */ 1819 static int qcom_scm_qseecom_call(const struct qcom_scm_desc *desc, 1820 struct qcom_scm_qseecom_resp *res) 1821 { 1822 int status; 1823 1824 /* 1825 * Note: Multiple QSEECOM SCM calls should not be executed same time, 1826 * so lock things here. This needs to be extended to callback/listener 1827 * handling when support for that is implemented. 1828 */ 1829 1830 mutex_lock(&qcom_scm_qseecom_call_lock); 1831 status = __qcom_scm_qseecom_call(desc, res); 1832 mutex_unlock(&qcom_scm_qseecom_call_lock); 1833 1834 dev_dbg(__scm->dev, "%s: owner=%x, svc=%x, cmd=%x, result=%lld, type=%llx, data=%llx\n", 1835 __func__, desc->owner, desc->svc, desc->cmd, res->result, 1836 res->resp_type, res->data); 1837 1838 if (status) { 1839 dev_err(__scm->dev, "qseecom: scm call failed with error %d\n", status); 1840 return status; 1841 } 1842 1843 /* 1844 * TODO: Handle incomplete and blocked calls: 1845 * 1846 * Incomplete and blocked calls are not supported yet. Some devices 1847 * and/or commands require those, some don't. Let's warn about them 1848 * prominently in case someone attempts to try these commands with a 1849 * device/command combination that isn't supported yet. 1850 */ 1851 WARN_ON(res->result == QSEECOM_RESULT_INCOMPLETE); 1852 WARN_ON(res->result == QSEECOM_RESULT_BLOCKED_ON_LISTENER); 1853 1854 return 0; 1855 } 1856 1857 /** 1858 * qcom_scm_qseecom_get_version() - Query the QSEECOM version. 1859 * @version: Pointer where the QSEECOM version will be stored. 1860 * 1861 * Performs the QSEECOM SCM querying the QSEECOM version currently running in 1862 * the TrustZone. 1863 * 1864 * Return: Zero on success, nonzero on failure. 1865 */ 1866 static int qcom_scm_qseecom_get_version(u32 *version) 1867 { 1868 struct qcom_scm_desc desc = {}; 1869 struct qcom_scm_qseecom_resp res = {}; 1870 u32 feature = 10; 1871 int ret; 1872 1873 desc.owner = QSEECOM_TZ_OWNER_SIP; 1874 desc.svc = QSEECOM_TZ_SVC_INFO; 1875 desc.cmd = QSEECOM_TZ_CMD_INFO_VERSION; 1876 desc.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL); 1877 desc.args[0] = feature; 1878 1879 ret = qcom_scm_qseecom_call(&desc, &res); 1880 if (ret) 1881 return ret; 1882 1883 *version = res.result; 1884 return 0; 1885 } 1886 1887 /** 1888 * qcom_scm_qseecom_app_get_id() - Query the app ID for a given QSEE app name. 1889 * @app_name: The name of the app. 1890 * @app_id: The returned app ID. 1891 * 1892 * Query and return the application ID of the SEE app identified by the given 1893 * name. This returned ID is the unique identifier of the app required for 1894 * subsequent communication. 1895 * 1896 * Return: Zero on success, nonzero on failure, -ENOENT if the app has not been 1897 * loaded or could not be found. 1898 */ 1899 int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id) 1900 { 1901 unsigned long name_buf_size = QSEECOM_MAX_APP_NAME_SIZE; 1902 unsigned long app_name_len = strlen(app_name); 1903 struct qcom_scm_desc desc = {}; 1904 struct qcom_scm_qseecom_resp res = {}; 1905 int status; 1906 1907 if (app_name_len >= name_buf_size) 1908 return -EINVAL; 1909 1910 char *name_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1911 name_buf_size, 1912 GFP_KERNEL); 1913 if (!name_buf) 1914 return -ENOMEM; 1915 1916 memcpy(name_buf, app_name, app_name_len); 1917 1918 desc.owner = QSEECOM_TZ_OWNER_QSEE_OS; 1919 desc.svc = QSEECOM_TZ_SVC_APP_MGR; 1920 desc.cmd = QSEECOM_TZ_CMD_APP_LOOKUP; 1921 desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL); 1922 desc.args[0] = qcom_tzmem_to_phys(name_buf); 1923 desc.args[1] = app_name_len; 1924 1925 status = qcom_scm_qseecom_call(&desc, &res); 1926 1927 if (status) 1928 return status; 1929 1930 if (res.result == QSEECOM_RESULT_FAILURE) 1931 return -ENOENT; 1932 1933 if (res.result != QSEECOM_RESULT_SUCCESS) 1934 return -EINVAL; 1935 1936 if (res.resp_type != QSEECOM_SCM_RES_APP_ID) 1937 return -EINVAL; 1938 1939 *app_id = res.data; 1940 return 0; 1941 } 1942 EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id); 1943 1944 /** 1945 * qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app. 1946 * @app_id: The ID of the target app. 1947 * @req: Request buffer sent to the app (must be TZ memory) 1948 * @req_size: Size of the request buffer. 1949 * @rsp: Response buffer, written to by the app (must be TZ memory) 1950 * @rsp_size: Size of the response buffer. 1951 * 1952 * Sends a request to the QSEE app associated with the given ID and read back 1953 * its response. The caller must provide two DMA memory regions, one for the 1954 * request and one for the response, and fill out the @req region with the 1955 * respective (app-specific) request data. The QSEE app reads this and returns 1956 * its response in the @rsp region. 1957 * 1958 * Return: Zero on success, nonzero on failure. 1959 */ 1960 int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, 1961 void *rsp, size_t rsp_size) 1962 { 1963 struct qcom_scm_qseecom_resp res = {}; 1964 struct qcom_scm_desc desc = {}; 1965 phys_addr_t req_phys; 1966 phys_addr_t rsp_phys; 1967 int status; 1968 1969 req_phys = qcom_tzmem_to_phys(req); 1970 rsp_phys = qcom_tzmem_to_phys(rsp); 1971 1972 desc.owner = QSEECOM_TZ_OWNER_TZ_APPS; 1973 desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER; 1974 desc.cmd = QSEECOM_TZ_CMD_APP_SEND; 1975 desc.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, 1976 QCOM_SCM_RW, QCOM_SCM_VAL, 1977 QCOM_SCM_RW, QCOM_SCM_VAL); 1978 desc.args[0] = app_id; 1979 desc.args[1] = req_phys; 1980 desc.args[2] = req_size; 1981 desc.args[3] = rsp_phys; 1982 desc.args[4] = rsp_size; 1983 1984 status = qcom_scm_qseecom_call(&desc, &res); 1985 1986 if (status) 1987 return status; 1988 1989 if (res.result != QSEECOM_RESULT_SUCCESS) 1990 return -EIO; 1991 1992 return 0; 1993 } 1994 EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send); 1995 1996 /* 1997 * We do not yet support re-entrant calls via the qseecom interface. To prevent 1998 + any potential issues with this, only allow validated machines for now. 1999 */ 2000 static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = { 2001 { .compatible = "asus,vivobook-s15" }, 2002 { .compatible = "asus,zenbook-a14-ux3407qa" }, 2003 { .compatible = "asus,zenbook-a14-ux3407ra" }, 2004 { .compatible = "dell,inspiron-14-plus-7441" }, 2005 { .compatible = "dell,latitude-7455" }, 2006 { .compatible = "dell,xps13-9345" }, 2007 { .compatible = "hp,elitebook-ultra-g1q" }, 2008 { .compatible = "hp,omnibook-x14" }, 2009 { .compatible = "huawei,gaokun3" }, 2010 { .compatible = "lenovo,flex-5g" }, 2011 { .compatible = "lenovo,thinkbook-16" }, 2012 { .compatible = "lenovo,thinkpad-t14s" }, 2013 { .compatible = "lenovo,thinkpad-x13s", }, 2014 { .compatible = "lenovo,yoga-slim7x" }, 2015 { .compatible = "microsoft,arcata", }, 2016 { .compatible = "microsoft,blackrock" }, 2017 { .compatible = "microsoft,denali", }, 2018 { .compatible = "microsoft,romulus13", }, 2019 { .compatible = "microsoft,romulus15", }, 2020 { .compatible = "qcom,hamoa-iot-evk" }, 2021 { .compatible = "qcom,sc8180x-primus" }, 2022 { .compatible = "qcom,x1e001de-devkit" }, 2023 { .compatible = "qcom,x1e80100-crd" }, 2024 { .compatible = "qcom,x1e80100-qcp" }, 2025 { .compatible = "qcom,x1p42100-crd" }, 2026 { } 2027 }; 2028 2029 static void qcom_scm_qseecom_free(void *data) 2030 { 2031 struct platform_device *qseecom_dev = data; 2032 2033 platform_device_del(qseecom_dev); 2034 platform_device_put(qseecom_dev); 2035 } 2036 2037 static int qcom_scm_qseecom_init(struct qcom_scm *scm) 2038 { 2039 struct platform_device *qseecom_dev; 2040 u32 version; 2041 int ret; 2042 2043 /* 2044 * Note: We do two steps of validation here: First, we try to query the 2045 * QSEECOM version as a check to see if the interface exists on this 2046 * device. Second, we check against known good devices due to current 2047 * driver limitations (see comment in qcom_scm_qseecom_allowlist). 2048 * 2049 * Note that we deliberately do the machine check after the version 2050 * check so that we can log potentially supported devices. This should 2051 * be safe as downstream sources indicate that the version query is 2052 * neither blocking nor reentrant. 2053 */ 2054 ret = qcom_scm_qseecom_get_version(&version); 2055 if (ret) 2056 return 0; 2057 2058 dev_info(scm->dev, "qseecom: found qseecom with version 0x%x\n", version); 2059 2060 if (!of_machine_device_match(qcom_scm_qseecom_allowlist)) { 2061 dev_info(scm->dev, "qseecom: untested machine, skipping\n"); 2062 return 0; 2063 } 2064 2065 /* 2066 * Set up QSEECOM interface device. All application clients will be 2067 * set up and managed by the corresponding driver for it. 2068 */ 2069 qseecom_dev = platform_device_alloc("qcom_qseecom", -1); 2070 if (!qseecom_dev) 2071 return -ENOMEM; 2072 2073 qseecom_dev->dev.parent = scm->dev; 2074 2075 ret = platform_device_add(qseecom_dev); 2076 if (ret) { 2077 platform_device_put(qseecom_dev); 2078 return ret; 2079 } 2080 2081 return devm_add_action_or_reset(scm->dev, qcom_scm_qseecom_free, qseecom_dev); 2082 } 2083 2084 #else /* CONFIG_QCOM_QSEECOM */ 2085 2086 static int qcom_scm_qseecom_init(struct qcom_scm *scm) 2087 { 2088 return 0; 2089 } 2090 2091 #endif /* CONFIG_QCOM_QSEECOM */ 2092 2093 /** 2094 * qcom_scm_qtee_invoke_smc() - Invoke a QTEE object. 2095 * @inbuf: start address of memory area used for inbound buffer. 2096 * @inbuf_size: size of the memory area used for inbound buffer. 2097 * @outbuf: start address of memory area used for outbound buffer. 2098 * @outbuf_size: size of the memory area used for outbound buffer. 2099 * @result: result of QTEE object invocation. 2100 * @response_type: response type returned by QTEE. 2101 * 2102 * @response_type determines how the contents of @inbuf and @outbuf 2103 * should be processed. 2104 * 2105 * Return: On success, return 0 or <0 on failure. 2106 */ 2107 int qcom_scm_qtee_invoke_smc(phys_addr_t inbuf, size_t inbuf_size, 2108 phys_addr_t outbuf, size_t outbuf_size, 2109 u64 *result, u64 *response_type) 2110 { 2111 struct qcom_scm_desc desc = { 2112 .svc = QCOM_SCM_SVC_SMCINVOKE, 2113 .cmd = QCOM_SCM_SMCINVOKE_INVOKE, 2114 .owner = ARM_SMCCC_OWNER_TRUSTED_OS, 2115 .args[0] = inbuf, 2116 .args[1] = inbuf_size, 2117 .args[2] = outbuf, 2118 .args[3] = outbuf_size, 2119 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RW, QCOM_SCM_VAL, 2120 QCOM_SCM_RW, QCOM_SCM_VAL), 2121 }; 2122 struct qcom_scm_res res; 2123 int ret; 2124 2125 ret = qcom_scm_call(__scm->dev, &desc, &res); 2126 if (ret) 2127 return ret; 2128 2129 if (response_type) 2130 *response_type = res.result[0]; 2131 2132 if (result) 2133 *result = res.result[1]; 2134 2135 return 0; 2136 } 2137 EXPORT_SYMBOL(qcom_scm_qtee_invoke_smc); 2138 2139 /** 2140 * qcom_scm_qtee_callback_response() - Submit response for callback request. 2141 * @buf: start address of memory area used for outbound buffer. 2142 * @buf_size: size of the memory area used for outbound buffer. 2143 * @result: Result of QTEE object invocation. 2144 * @response_type: Response type returned by QTEE. 2145 * 2146 * @response_type determines how the contents of @buf should be processed. 2147 * 2148 * Return: On success, return 0 or <0 on failure. 2149 */ 2150 int qcom_scm_qtee_callback_response(phys_addr_t buf, size_t buf_size, 2151 u64 *result, u64 *response_type) 2152 { 2153 struct qcom_scm_desc desc = { 2154 .svc = QCOM_SCM_SVC_SMCINVOKE, 2155 .cmd = QCOM_SCM_SMCINVOKE_CB_RSP, 2156 .owner = ARM_SMCCC_OWNER_TRUSTED_OS, 2157 .args[0] = buf, 2158 .args[1] = buf_size, 2159 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL), 2160 }; 2161 struct qcom_scm_res res; 2162 int ret; 2163 2164 ret = qcom_scm_call(__scm->dev, &desc, &res); 2165 if (ret) 2166 return ret; 2167 2168 if (response_type) 2169 *response_type = res.result[0]; 2170 2171 if (result) 2172 *result = res.result[1]; 2173 2174 return 0; 2175 } 2176 EXPORT_SYMBOL(qcom_scm_qtee_callback_response); 2177 2178 static void qcom_scm_qtee_free(void *data) 2179 { 2180 struct platform_device *qtee_dev = data; 2181 2182 platform_device_unregister(qtee_dev); 2183 } 2184 2185 static void qcom_scm_qtee_init(struct qcom_scm *scm) 2186 { 2187 struct platform_device *qtee_dev; 2188 u64 result, response_type; 2189 int ret; 2190 2191 /* 2192 * Probe for smcinvoke support. This will fail due to invalid buffers, 2193 * but first, it checks whether the call is supported in QTEE syscall 2194 * handler. If it is not supported, -EIO is returned. 2195 */ 2196 ret = qcom_scm_qtee_invoke_smc(0, 0, 0, 0, &result, &response_type); 2197 if (ret == -EIO) 2198 return; 2199 2200 /* Setup QTEE interface device. */ 2201 qtee_dev = platform_device_register_data(scm->dev, "qcomtee", 2202 PLATFORM_DEVID_NONE, NULL, 0); 2203 if (IS_ERR(qtee_dev)) 2204 return; 2205 2206 devm_add_action_or_reset(scm->dev, qcom_scm_qtee_free, qtee_dev); 2207 } 2208 2209 /** 2210 * qcom_scm_is_available() - Checks if SCM is available 2211 */ 2212 bool qcom_scm_is_available(void) 2213 { 2214 /* Paired with smp_store_release() in qcom_scm_probe */ 2215 return !!smp_load_acquire(&__scm); 2216 } 2217 EXPORT_SYMBOL_GPL(qcom_scm_is_available); 2218 2219 static int qcom_scm_fill_irq_fwspec_params(struct irq_fwspec *fwspec, u32 hwirq) 2220 { 2221 if (hwirq >= GIC_SPI_BASE && hwirq <= GIC_MAX_SPI) { 2222 fwspec->param[0] = GIC_SPI; 2223 fwspec->param[1] = hwirq - GIC_SPI_BASE; 2224 } else if (hwirq >= GIC_ESPI_BASE && hwirq <= GIC_MAX_ESPI) { 2225 fwspec->param[0] = GIC_ESPI; 2226 fwspec->param[1] = hwirq - GIC_ESPI_BASE; 2227 } else { 2228 WARN(1, "Unexpected hwirq: %d\n", hwirq); 2229 return -ENXIO; 2230 } 2231 2232 fwspec->param[2] = IRQ_TYPE_EDGE_RISING; 2233 fwspec->param_count = 3; 2234 2235 return 0; 2236 } 2237 2238 static int qcom_scm_get_waitq_irq(struct qcom_scm *scm) 2239 { 2240 struct qcom_scm_desc desc = { 2241 .svc = QCOM_SCM_SVC_WAITQ, 2242 .cmd = QCOM_SCM_WAITQ_GET_INFO, 2243 .owner = ARM_SMCCC_OWNER_SIP 2244 }; 2245 struct device_node *parent_irq_node; 2246 struct irq_fwspec fwspec; 2247 struct qcom_scm_res res; 2248 u32 hwirq; 2249 int ret; 2250 2251 ret = qcom_scm_call_atomic(scm->dev, &desc, &res); 2252 if (ret) 2253 return ret; 2254 2255 hwirq = res.result[1] & GENMASK(15, 0); 2256 ret = qcom_scm_fill_irq_fwspec_params(&fwspec, hwirq); 2257 if (ret) 2258 return ret; 2259 2260 parent_irq_node = of_irq_find_parent(scm->dev->of_node); 2261 if (!parent_irq_node) 2262 return -ENODEV; 2263 2264 fwspec.fwnode = of_fwnode_handle(parent_irq_node); 2265 2266 return irq_create_fwspec_mapping(&fwspec); 2267 } 2268 2269 static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx) 2270 { 2271 /* FW currently only supports a single wq_ctx (zero). 2272 * TODO: Update this logic to include dynamic allocation and lookup of 2273 * completion structs when FW supports more wq_ctx values. 2274 */ 2275 if (wq_ctx != 0) { 2276 dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n"); 2277 return -EINVAL; 2278 } 2279 2280 return 0; 2281 } 2282 2283 int qcom_scm_wait_for_wq_completion(u32 wq_ctx) 2284 { 2285 int ret; 2286 2287 ret = qcom_scm_assert_valid_wq_ctx(wq_ctx); 2288 if (ret) 2289 return ret; 2290 2291 wait_for_completion(&__scm->waitq_comp); 2292 2293 return 0; 2294 } 2295 2296 static int qcom_scm_waitq_wakeup(unsigned int wq_ctx) 2297 { 2298 int ret; 2299 2300 ret = qcom_scm_assert_valid_wq_ctx(wq_ctx); 2301 if (ret) 2302 return ret; 2303 2304 complete(&__scm->waitq_comp); 2305 2306 return 0; 2307 } 2308 2309 static irqreturn_t qcom_scm_irq_handler(int irq, void *data) 2310 { 2311 int ret; 2312 struct qcom_scm *scm = data; 2313 u32 wq_ctx, flags, more_pending = 0; 2314 2315 do { 2316 ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending); 2317 if (ret) { 2318 dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret); 2319 goto out; 2320 } 2321 2322 if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE) { 2323 dev_err(scm->dev, "Invalid flags received for wq_ctx: %u\n", flags); 2324 goto out; 2325 } 2326 2327 ret = qcom_scm_waitq_wakeup(wq_ctx); 2328 if (ret) 2329 goto out; 2330 } while (more_pending); 2331 2332 out: 2333 return IRQ_HANDLED; 2334 } 2335 2336 static int get_download_mode(char *buffer, const struct kernel_param *kp) 2337 { 2338 if (download_mode >= ARRAY_SIZE(download_mode_name)) 2339 return sysfs_emit(buffer, "unknown mode\n"); 2340 2341 return sysfs_emit(buffer, "%s\n", download_mode_name[download_mode]); 2342 } 2343 2344 static int set_download_mode(const char *val, const struct kernel_param *kp) 2345 { 2346 bool tmp; 2347 int ret; 2348 2349 ret = sysfs_match_string(download_mode_name, val); 2350 if (ret < 0) { 2351 ret = kstrtobool(val, &tmp); 2352 if (ret < 0) { 2353 pr_err("qcom_scm: err: %d\n", ret); 2354 return ret; 2355 } 2356 2357 ret = tmp ? 1 : 0; 2358 } 2359 2360 download_mode = ret; 2361 if (__scm) 2362 qcom_scm_set_download_mode(download_mode); 2363 2364 return 0; 2365 } 2366 2367 static const struct kernel_param_ops download_mode_param_ops = { 2368 .get = get_download_mode, 2369 .set = set_download_mode, 2370 }; 2371 2372 module_param_cb(download_mode, &download_mode_param_ops, NULL, 0644); 2373 MODULE_PARM_DESC(download_mode, "download mode: off/0/N for no dump mode, full/on/1/Y for full dump mode, mini for minidump mode and full,mini for both full and minidump mode together are acceptable values"); 2374 2375 static int qcom_scm_probe(struct platform_device *pdev) 2376 { 2377 struct qcom_tzmem_pool_config pool_config; 2378 struct qcom_scm *scm; 2379 int irq, ret; 2380 2381 scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL); 2382 if (!scm) 2383 return -ENOMEM; 2384 2385 scm->dev = &pdev->dev; 2386 ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr); 2387 if (ret < 0) 2388 return ret; 2389 2390 init_completion(&scm->waitq_comp); 2391 mutex_init(&scm->scm_bw_lock); 2392 2393 scm->path = devm_of_icc_get(&pdev->dev, NULL); 2394 if (IS_ERR(scm->path)) 2395 return dev_err_probe(&pdev->dev, PTR_ERR(scm->path), 2396 "failed to acquire interconnect path\n"); 2397 2398 scm->core_clk = devm_clk_get_optional(&pdev->dev, "core"); 2399 if (IS_ERR(scm->core_clk)) 2400 return PTR_ERR(scm->core_clk); 2401 2402 scm->iface_clk = devm_clk_get_optional(&pdev->dev, "iface"); 2403 if (IS_ERR(scm->iface_clk)) 2404 return PTR_ERR(scm->iface_clk); 2405 2406 scm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus"); 2407 if (IS_ERR(scm->bus_clk)) 2408 return PTR_ERR(scm->bus_clk); 2409 2410 scm->reset.ops = &qcom_scm_pas_reset_ops; 2411 scm->reset.nr_resets = 1; 2412 scm->reset.of_node = pdev->dev.of_node; 2413 ret = devm_reset_controller_register(&pdev->dev, &scm->reset); 2414 if (ret) 2415 return ret; 2416 2417 /* vote for max clk rate for highest performance */ 2418 ret = clk_set_rate(scm->core_clk, INT_MAX); 2419 if (ret) 2420 return ret; 2421 2422 ret = of_reserved_mem_device_init(scm->dev); 2423 if (ret && ret != -ENODEV) 2424 return dev_err_probe(scm->dev, ret, 2425 "Failed to setup the reserved memory region for TZ mem\n"); 2426 2427 ret = qcom_tzmem_enable(scm->dev); 2428 if (ret) 2429 return dev_err_probe(scm->dev, ret, 2430 "Failed to enable the TrustZone memory allocator\n"); 2431 2432 memset(&pool_config, 0, sizeof(pool_config)); 2433 pool_config.initial_size = 0; 2434 pool_config.policy = QCOM_TZMEM_POLICY_ON_DEMAND; 2435 pool_config.max_size = SZ_256K; 2436 2437 scm->mempool = devm_qcom_tzmem_pool_new(scm->dev, &pool_config); 2438 if (IS_ERR(scm->mempool)) 2439 return dev_err_probe(scm->dev, PTR_ERR(scm->mempool), 2440 "Failed to create the SCM memory pool\n"); 2441 2442 irq = qcom_scm_get_waitq_irq(scm); 2443 if (irq < 0) 2444 irq = platform_get_irq_optional(pdev, 0); 2445 2446 if (irq < 0) { 2447 if (irq != -ENXIO) 2448 return irq; 2449 } else { 2450 ret = devm_request_threaded_irq(scm->dev, irq, NULL, qcom_scm_irq_handler, 2451 IRQF_ONESHOT, "qcom-scm", scm); 2452 if (ret < 0) 2453 return dev_err_probe(scm->dev, ret, 2454 "Failed to request qcom-scm irq\n"); 2455 } 2456 2457 /* 2458 * Paired with smp_load_acquire() in qcom_scm_is_available(). 2459 * 2460 * This marks the SCM API as ready to accept user calls and can only 2461 * be called after the TrustZone memory pool is initialized and the 2462 * waitqueue interrupt requested. 2463 */ 2464 smp_store_release(&__scm, scm); 2465 2466 __get_convention(); 2467 2468 /* 2469 * If "download mode" is requested, from this point on warmboot 2470 * will cause the boot stages to enter download mode, unless 2471 * disabled below by a clean shutdown/reboot. 2472 */ 2473 qcom_scm_set_download_mode(download_mode); 2474 2475 /* 2476 * Disable SDI if indicated by DT that it is enabled by default. 2477 */ 2478 if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled") || !download_mode) 2479 qcom_scm_disable_sdi(); 2480 2481 /* 2482 * Initialize the QSEECOM interface. 2483 * 2484 * Note: QSEECOM is fairly self-contained and this only adds the 2485 * interface device (the driver of which does most of the heavy 2486 * lifting). So any errors returned here should be either -ENOMEM or 2487 * -EINVAL (with the latter only in case there's a bug in our code). 2488 * This means that there is no need to bring down the whole SCM driver. 2489 * Just log the error instead and let SCM live. 2490 */ 2491 ret = qcom_scm_qseecom_init(scm); 2492 WARN(ret < 0, "failed to initialize qseecom: %d\n", ret); 2493 2494 /* Initialize the QTEE object interface. */ 2495 qcom_scm_qtee_init(scm); 2496 2497 return 0; 2498 } 2499 2500 static void qcom_scm_shutdown(struct platform_device *pdev) 2501 { 2502 /* Clean shutdown, disable download mode to allow normal restart */ 2503 qcom_scm_set_download_mode(QCOM_DLOAD_NODUMP); 2504 } 2505 2506 static const struct of_device_id qcom_scm_dt_match[] = { 2507 { .compatible = "qcom,scm" }, 2508 2509 /* Legacy entries kept for backwards compatibility */ 2510 { .compatible = "qcom,scm-apq8064" }, 2511 { .compatible = "qcom,scm-apq8084" }, 2512 { .compatible = "qcom,scm-ipq4019" }, 2513 { .compatible = "qcom,scm-msm8953" }, 2514 { .compatible = "qcom,scm-msm8974" }, 2515 { .compatible = "qcom,scm-msm8996" }, 2516 {} 2517 }; 2518 MODULE_DEVICE_TABLE(of, qcom_scm_dt_match); 2519 2520 static struct platform_driver qcom_scm_driver = { 2521 .driver = { 2522 .name = "qcom_scm", 2523 .of_match_table = qcom_scm_dt_match, 2524 .suppress_bind_attrs = true, 2525 }, 2526 .probe = qcom_scm_probe, 2527 .shutdown = qcom_scm_shutdown, 2528 }; 2529 2530 static int __init qcom_scm_init(void) 2531 { 2532 return platform_driver_register(&qcom_scm_driver); 2533 } 2534 subsys_initcall(qcom_scm_init); 2535 2536 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver"); 2537 MODULE_LICENSE("GPL v2"); 2538