1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved. 3 * Copyright (C) 2015 Linaro Ltd. 4 */ 5 6 #include <linux/arm-smccc.h> 7 #include <linux/bitfield.h> 8 #include <linux/bits.h> 9 #include <linux/cleanup.h> 10 #include <linux/clk.h> 11 #include <linux/completion.h> 12 #include <linux/cpumask.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/err.h> 15 #include <linux/export.h> 16 #include <linux/firmware/qcom/qcom_scm.h> 17 #include <linux/firmware/qcom/qcom_tzmem.h> 18 #include <linux/init.h> 19 #include <linux/interconnect.h> 20 #include <linux/interrupt.h> 21 #include <linux/module.h> 22 #include <linux/of.h> 23 #include <linux/of_address.h> 24 #include <linux/of_irq.h> 25 #include <linux/of_platform.h> 26 #include <linux/of_reserved_mem.h> 27 #include <linux/platform_device.h> 28 #include <linux/reset-controller.h> 29 #include <linux/sizes.h> 30 #include <linux/types.h> 31 32 #include "qcom_scm.h" 33 #include "qcom_tzmem.h" 34 35 static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT); 36 module_param(download_mode, bool, 0); 37 38 struct qcom_scm { 39 struct device *dev; 40 struct clk *core_clk; 41 struct clk *iface_clk; 42 struct clk *bus_clk; 43 struct icc_path *path; 44 struct completion waitq_comp; 45 struct reset_controller_dev reset; 46 47 /* control access to the interconnect path */ 48 struct mutex scm_bw_lock; 49 int scm_vote_count; 50 51 u64 dload_mode_addr; 52 53 struct qcom_tzmem_pool *mempool; 54 }; 55 56 struct qcom_scm_current_perm_info { 57 __le32 vmid; 58 __le32 perm; 59 __le64 ctx; 60 __le32 ctx_size; 61 __le32 unused; 62 }; 63 64 struct qcom_scm_mem_map_info { 65 __le64 mem_addr; 66 __le64 mem_size; 67 }; 68 69 /** 70 * struct qcom_scm_qseecom_resp - QSEECOM SCM call response. 71 * @result: Result or status of the SCM call. See &enum qcom_scm_qseecom_result. 72 * @resp_type: Type of the response. See &enum qcom_scm_qseecom_resp_type. 73 * @data: Response data. The type of this data is given in @resp_type. 74 */ 75 struct qcom_scm_qseecom_resp { 76 u64 result; 77 u64 resp_type; 78 u64 data; 79 }; 80 81 enum qcom_scm_qseecom_result { 82 QSEECOM_RESULT_SUCCESS = 0, 83 QSEECOM_RESULT_INCOMPLETE = 1, 84 QSEECOM_RESULT_BLOCKED_ON_LISTENER = 2, 85 QSEECOM_RESULT_FAILURE = 0xFFFFFFFF, 86 }; 87 88 enum qcom_scm_qseecom_resp_type { 89 QSEECOM_SCM_RES_APP_ID = 0xEE01, 90 QSEECOM_SCM_RES_QSEOS_LISTENER_ID = 0xEE02, 91 }; 92 93 enum qcom_scm_qseecom_tz_owner { 94 QSEECOM_TZ_OWNER_SIP = 2, 95 QSEECOM_TZ_OWNER_TZ_APPS = 48, 96 QSEECOM_TZ_OWNER_QSEE_OS = 50 97 }; 98 99 enum qcom_scm_qseecom_tz_svc { 100 QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER = 0, 101 QSEECOM_TZ_SVC_APP_MGR = 1, 102 QSEECOM_TZ_SVC_INFO = 6, 103 }; 104 105 enum qcom_scm_qseecom_tz_cmd_app { 106 QSEECOM_TZ_CMD_APP_SEND = 1, 107 QSEECOM_TZ_CMD_APP_LOOKUP = 3, 108 }; 109 110 enum qcom_scm_qseecom_tz_cmd_info { 111 QSEECOM_TZ_CMD_INFO_VERSION = 3, 112 }; 113 114 #define QSEECOM_MAX_APP_NAME_SIZE 64 115 116 /* Each bit configures cold/warm boot address for one of the 4 CPUs */ 117 static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = { 118 0, BIT(0), BIT(3), BIT(5) 119 }; 120 static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = { 121 BIT(2), BIT(1), BIT(4), BIT(6) 122 }; 123 124 #define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0) 125 126 #define QCOM_DLOAD_MASK GENMASK(5, 4) 127 #define QCOM_DLOAD_NODUMP 0 128 #define QCOM_DLOAD_FULLDUMP 1 129 130 static const char * const qcom_scm_convention_names[] = { 131 [SMC_CONVENTION_UNKNOWN] = "unknown", 132 [SMC_CONVENTION_ARM_32] = "smc arm 32", 133 [SMC_CONVENTION_ARM_64] = "smc arm 64", 134 [SMC_CONVENTION_LEGACY] = "smc legacy", 135 }; 136 137 static struct qcom_scm *__scm; 138 139 static int qcom_scm_clk_enable(void) 140 { 141 int ret; 142 143 ret = clk_prepare_enable(__scm->core_clk); 144 if (ret) 145 goto bail; 146 147 ret = clk_prepare_enable(__scm->iface_clk); 148 if (ret) 149 goto disable_core; 150 151 ret = clk_prepare_enable(__scm->bus_clk); 152 if (ret) 153 goto disable_iface; 154 155 return 0; 156 157 disable_iface: 158 clk_disable_unprepare(__scm->iface_clk); 159 disable_core: 160 clk_disable_unprepare(__scm->core_clk); 161 bail: 162 return ret; 163 } 164 165 static void qcom_scm_clk_disable(void) 166 { 167 clk_disable_unprepare(__scm->core_clk); 168 clk_disable_unprepare(__scm->iface_clk); 169 clk_disable_unprepare(__scm->bus_clk); 170 } 171 172 static int qcom_scm_bw_enable(void) 173 { 174 int ret = 0; 175 176 if (!__scm->path) 177 return 0; 178 179 mutex_lock(&__scm->scm_bw_lock); 180 if (!__scm->scm_vote_count) { 181 ret = icc_set_bw(__scm->path, 0, UINT_MAX); 182 if (ret < 0) { 183 dev_err(__scm->dev, "failed to set bandwidth request\n"); 184 goto err_bw; 185 } 186 } 187 __scm->scm_vote_count++; 188 err_bw: 189 mutex_unlock(&__scm->scm_bw_lock); 190 191 return ret; 192 } 193 194 static void qcom_scm_bw_disable(void) 195 { 196 if (!__scm->path) 197 return; 198 199 mutex_lock(&__scm->scm_bw_lock); 200 if (__scm->scm_vote_count-- == 1) 201 icc_set_bw(__scm->path, 0, 0); 202 mutex_unlock(&__scm->scm_bw_lock); 203 } 204 205 enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN; 206 static DEFINE_SPINLOCK(scm_query_lock); 207 208 struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void) 209 { 210 return __scm->mempool; 211 } 212 213 static enum qcom_scm_convention __get_convention(void) 214 { 215 unsigned long flags; 216 struct qcom_scm_desc desc = { 217 .svc = QCOM_SCM_SVC_INFO, 218 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, 219 .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO, 220 QCOM_SCM_INFO_IS_CALL_AVAIL) | 221 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT), 222 .arginfo = QCOM_SCM_ARGS(1), 223 .owner = ARM_SMCCC_OWNER_SIP, 224 }; 225 struct qcom_scm_res res; 226 enum qcom_scm_convention probed_convention; 227 int ret; 228 bool forced = false; 229 230 if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN)) 231 return qcom_scm_convention; 232 233 /* 234 * Per the "SMC calling convention specification", the 64-bit calling 235 * convention can only be used when the client is 64-bit, otherwise 236 * system will encounter the undefined behaviour. 237 */ 238 #if IS_ENABLED(CONFIG_ARM64) 239 /* 240 * Device isn't required as there is only one argument - no device 241 * needed to dma_map_single to secure world 242 */ 243 probed_convention = SMC_CONVENTION_ARM_64; 244 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); 245 if (!ret && res.result[0] == 1) 246 goto found; 247 248 /* 249 * Some SC7180 firmwares didn't implement the 250 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64 251 * calling conventions on these firmwares. Luckily we don't make any 252 * early calls into the firmware on these SoCs so the device pointer 253 * will be valid here to check if the compatible matches. 254 */ 255 if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) { 256 forced = true; 257 goto found; 258 } 259 #endif 260 261 probed_convention = SMC_CONVENTION_ARM_32; 262 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); 263 if (!ret && res.result[0] == 1) 264 goto found; 265 266 probed_convention = SMC_CONVENTION_LEGACY; 267 found: 268 spin_lock_irqsave(&scm_query_lock, flags); 269 if (probed_convention != qcom_scm_convention) { 270 qcom_scm_convention = probed_convention; 271 pr_info("qcom_scm: convention: %s%s\n", 272 qcom_scm_convention_names[qcom_scm_convention], 273 forced ? " (forced)" : ""); 274 } 275 spin_unlock_irqrestore(&scm_query_lock, flags); 276 277 return qcom_scm_convention; 278 } 279 280 /** 281 * qcom_scm_call() - Invoke a syscall in the secure world 282 * @dev: device 283 * @desc: Descriptor structure containing arguments and return values 284 * @res: Structure containing results from SMC/HVC call 285 * 286 * Sends a command to the SCM and waits for the command to finish processing. 287 * This should *only* be called in pre-emptible context. 288 */ 289 static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc, 290 struct qcom_scm_res *res) 291 { 292 might_sleep(); 293 switch (__get_convention()) { 294 case SMC_CONVENTION_ARM_32: 295 case SMC_CONVENTION_ARM_64: 296 return scm_smc_call(dev, desc, res, false); 297 case SMC_CONVENTION_LEGACY: 298 return scm_legacy_call(dev, desc, res); 299 default: 300 pr_err("Unknown current SCM calling convention.\n"); 301 return -EINVAL; 302 } 303 } 304 305 /** 306 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call() 307 * @dev: device 308 * @desc: Descriptor structure containing arguments and return values 309 * @res: Structure containing results from SMC/HVC call 310 * 311 * Sends a command to the SCM and waits for the command to finish processing. 312 * This can be called in atomic context. 313 */ 314 static int qcom_scm_call_atomic(struct device *dev, 315 const struct qcom_scm_desc *desc, 316 struct qcom_scm_res *res) 317 { 318 switch (__get_convention()) { 319 case SMC_CONVENTION_ARM_32: 320 case SMC_CONVENTION_ARM_64: 321 return scm_smc_call(dev, desc, res, true); 322 case SMC_CONVENTION_LEGACY: 323 return scm_legacy_call_atomic(dev, desc, res); 324 default: 325 pr_err("Unknown current SCM calling convention.\n"); 326 return -EINVAL; 327 } 328 } 329 330 static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id, 331 u32 cmd_id) 332 { 333 int ret; 334 struct qcom_scm_desc desc = { 335 .svc = QCOM_SCM_SVC_INFO, 336 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, 337 .owner = ARM_SMCCC_OWNER_SIP, 338 }; 339 struct qcom_scm_res res; 340 341 desc.arginfo = QCOM_SCM_ARGS(1); 342 switch (__get_convention()) { 343 case SMC_CONVENTION_ARM_32: 344 case SMC_CONVENTION_ARM_64: 345 desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) | 346 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT); 347 break; 348 case SMC_CONVENTION_LEGACY: 349 desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id); 350 break; 351 default: 352 pr_err("Unknown SMC convention being used\n"); 353 return false; 354 } 355 356 ret = qcom_scm_call(dev, &desc, &res); 357 358 return ret ? false : !!res.result[0]; 359 } 360 361 static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits) 362 { 363 int cpu; 364 unsigned int flags = 0; 365 struct qcom_scm_desc desc = { 366 .svc = QCOM_SCM_SVC_BOOT, 367 .cmd = QCOM_SCM_BOOT_SET_ADDR, 368 .arginfo = QCOM_SCM_ARGS(2), 369 .owner = ARM_SMCCC_OWNER_SIP, 370 }; 371 372 for_each_present_cpu(cpu) { 373 if (cpu >= QCOM_SCM_BOOT_MAX_CPUS) 374 return -EINVAL; 375 flags |= cpu_bits[cpu]; 376 } 377 378 desc.args[0] = flags; 379 desc.args[1] = virt_to_phys(entry); 380 381 return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); 382 } 383 384 static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags) 385 { 386 struct qcom_scm_desc desc = { 387 .svc = QCOM_SCM_SVC_BOOT, 388 .cmd = QCOM_SCM_BOOT_SET_ADDR_MC, 389 .owner = ARM_SMCCC_OWNER_SIP, 390 .arginfo = QCOM_SCM_ARGS(6), 391 .args = { 392 virt_to_phys(entry), 393 /* Apply to all CPUs in all affinity levels */ 394 ~0ULL, ~0ULL, ~0ULL, ~0ULL, 395 flags, 396 }, 397 }; 398 399 /* Need a device for DMA of the additional arguments */ 400 if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY) 401 return -EOPNOTSUPP; 402 403 return qcom_scm_call(__scm->dev, &desc, NULL); 404 } 405 406 /** 407 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus 408 * @entry: Entry point function for the cpus 409 * 410 * Set the Linux entry point for the SCM to transfer control to when coming 411 * out of a power down. CPU power down may be executed on cpuidle or hotplug. 412 */ 413 int qcom_scm_set_warm_boot_addr(void *entry) 414 { 415 if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT)) 416 /* Fallback to old SCM call */ 417 return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits); 418 return 0; 419 } 420 EXPORT_SYMBOL_GPL(qcom_scm_set_warm_boot_addr); 421 422 /** 423 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus 424 * @entry: Entry point function for the cpus 425 */ 426 int qcom_scm_set_cold_boot_addr(void *entry) 427 { 428 if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT)) 429 /* Fallback to old SCM call */ 430 return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits); 431 return 0; 432 } 433 EXPORT_SYMBOL_GPL(qcom_scm_set_cold_boot_addr); 434 435 /** 436 * qcom_scm_cpu_power_down() - Power down the cpu 437 * @flags: Flags to flush cache 438 * 439 * This is an end point to power down cpu. If there was a pending interrupt, 440 * the control would return from this function, otherwise, the cpu jumps to the 441 * warm boot entry point set for this cpu upon reset. 442 */ 443 void qcom_scm_cpu_power_down(u32 flags) 444 { 445 struct qcom_scm_desc desc = { 446 .svc = QCOM_SCM_SVC_BOOT, 447 .cmd = QCOM_SCM_BOOT_TERMINATE_PC, 448 .args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK, 449 .arginfo = QCOM_SCM_ARGS(1), 450 .owner = ARM_SMCCC_OWNER_SIP, 451 }; 452 453 qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); 454 } 455 EXPORT_SYMBOL_GPL(qcom_scm_cpu_power_down); 456 457 int qcom_scm_set_remote_state(u32 state, u32 id) 458 { 459 struct qcom_scm_desc desc = { 460 .svc = QCOM_SCM_SVC_BOOT, 461 .cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE, 462 .arginfo = QCOM_SCM_ARGS(2), 463 .args[0] = state, 464 .args[1] = id, 465 .owner = ARM_SMCCC_OWNER_SIP, 466 }; 467 struct qcom_scm_res res; 468 int ret; 469 470 ret = qcom_scm_call(__scm->dev, &desc, &res); 471 472 return ret ? : res.result[0]; 473 } 474 EXPORT_SYMBOL_GPL(qcom_scm_set_remote_state); 475 476 static int qcom_scm_disable_sdi(void) 477 { 478 int ret; 479 struct qcom_scm_desc desc = { 480 .svc = QCOM_SCM_SVC_BOOT, 481 .cmd = QCOM_SCM_BOOT_SDI_CONFIG, 482 .args[0] = 1, /* Disable watchdog debug */ 483 .args[1] = 0, /* Disable SDI */ 484 .arginfo = QCOM_SCM_ARGS(2), 485 .owner = ARM_SMCCC_OWNER_SIP, 486 }; 487 struct qcom_scm_res res; 488 489 ret = qcom_scm_clk_enable(); 490 if (ret) 491 return ret; 492 ret = qcom_scm_call(__scm->dev, &desc, &res); 493 494 qcom_scm_clk_disable(); 495 496 return ret ? : res.result[0]; 497 } 498 499 static int __qcom_scm_set_dload_mode(struct device *dev, bool enable) 500 { 501 struct qcom_scm_desc desc = { 502 .svc = QCOM_SCM_SVC_BOOT, 503 .cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE, 504 .arginfo = QCOM_SCM_ARGS(2), 505 .args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE, 506 .owner = ARM_SMCCC_OWNER_SIP, 507 }; 508 509 desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0; 510 511 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 512 } 513 514 static int qcom_scm_io_rmw(phys_addr_t addr, unsigned int mask, unsigned int val) 515 { 516 unsigned int old; 517 unsigned int new; 518 int ret; 519 520 ret = qcom_scm_io_readl(addr, &old); 521 if (ret) 522 return ret; 523 524 new = (old & ~mask) | (val & mask); 525 526 return qcom_scm_io_writel(addr, new); 527 } 528 529 static void qcom_scm_set_download_mode(bool enable) 530 { 531 u32 val = enable ? QCOM_DLOAD_FULLDUMP : QCOM_DLOAD_NODUMP; 532 int ret = 0; 533 534 if (__scm->dload_mode_addr) { 535 ret = qcom_scm_io_rmw(__scm->dload_mode_addr, QCOM_DLOAD_MASK, 536 FIELD_PREP(QCOM_DLOAD_MASK, val)); 537 } else if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT, 538 QCOM_SCM_BOOT_SET_DLOAD_MODE)) { 539 ret = __qcom_scm_set_dload_mode(__scm->dev, enable); 540 } else { 541 dev_err(__scm->dev, 542 "No available mechanism for setting download mode\n"); 543 } 544 545 if (ret) 546 dev_err(__scm->dev, "failed to set download mode: %d\n", ret); 547 } 548 549 /** 550 * qcom_scm_pas_init_image() - Initialize peripheral authentication service 551 * state machine for a given peripheral, using the 552 * metadata 553 * @peripheral: peripheral id 554 * @metadata: pointer to memory containing ELF header, program header table 555 * and optional blob of data used for authenticating the metadata 556 * and the rest of the firmware 557 * @size: size of the metadata 558 * @ctx: optional metadata context 559 * 560 * Return: 0 on success. 561 * 562 * Upon successful return, the PAS metadata context (@ctx) will be used to 563 * track the metadata allocation, this needs to be released by invoking 564 * qcom_scm_pas_metadata_release() by the caller. 565 */ 566 int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size, 567 struct qcom_scm_pas_metadata *ctx) 568 { 569 dma_addr_t mdata_phys; 570 void *mdata_buf; 571 int ret; 572 struct qcom_scm_desc desc = { 573 .svc = QCOM_SCM_SVC_PIL, 574 .cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE, 575 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW), 576 .args[0] = peripheral, 577 .owner = ARM_SMCCC_OWNER_SIP, 578 }; 579 struct qcom_scm_res res; 580 581 /* 582 * During the scm call memory protection will be enabled for the meta 583 * data blob, so make sure it's physically contiguous, 4K aligned and 584 * non-cachable to avoid XPU violations. 585 * 586 * For PIL calls the hypervisor creates SHM Bridges for the blob 587 * buffers on behalf of Linux so we must not do it ourselves hence 588 * not using the TZMem allocator here. 589 * 590 * If we pass a buffer that is already part of an SHM Bridge to this 591 * call, it will fail. 592 */ 593 mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys, 594 GFP_KERNEL); 595 if (!mdata_buf) 596 return -ENOMEM; 597 598 memcpy(mdata_buf, metadata, size); 599 600 ret = qcom_scm_clk_enable(); 601 if (ret) 602 goto out; 603 604 ret = qcom_scm_bw_enable(); 605 if (ret) 606 goto disable_clk; 607 608 desc.args[1] = mdata_phys; 609 610 ret = qcom_scm_call(__scm->dev, &desc, &res); 611 qcom_scm_bw_disable(); 612 613 disable_clk: 614 qcom_scm_clk_disable(); 615 616 out: 617 if (ret < 0 || !ctx) { 618 dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys); 619 } else if (ctx) { 620 ctx->ptr = mdata_buf; 621 ctx->phys = mdata_phys; 622 ctx->size = size; 623 } 624 625 return ret ? : res.result[0]; 626 } 627 EXPORT_SYMBOL_GPL(qcom_scm_pas_init_image); 628 629 /** 630 * qcom_scm_pas_metadata_release() - release metadata context 631 * @ctx: metadata context 632 */ 633 void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx) 634 { 635 if (!ctx->ptr) 636 return; 637 638 dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys); 639 640 ctx->ptr = NULL; 641 ctx->phys = 0; 642 ctx->size = 0; 643 } 644 EXPORT_SYMBOL_GPL(qcom_scm_pas_metadata_release); 645 646 /** 647 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral 648 * for firmware loading 649 * @peripheral: peripheral id 650 * @addr: start address of memory area to prepare 651 * @size: size of the memory area to prepare 652 * 653 * Returns 0 on success. 654 */ 655 int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size) 656 { 657 int ret; 658 struct qcom_scm_desc desc = { 659 .svc = QCOM_SCM_SVC_PIL, 660 .cmd = QCOM_SCM_PIL_PAS_MEM_SETUP, 661 .arginfo = QCOM_SCM_ARGS(3), 662 .args[0] = peripheral, 663 .args[1] = addr, 664 .args[2] = size, 665 .owner = ARM_SMCCC_OWNER_SIP, 666 }; 667 struct qcom_scm_res res; 668 669 ret = qcom_scm_clk_enable(); 670 if (ret) 671 return ret; 672 673 ret = qcom_scm_bw_enable(); 674 if (ret) 675 goto disable_clk; 676 677 ret = qcom_scm_call(__scm->dev, &desc, &res); 678 qcom_scm_bw_disable(); 679 680 disable_clk: 681 qcom_scm_clk_disable(); 682 683 return ret ? : res.result[0]; 684 } 685 EXPORT_SYMBOL_GPL(qcom_scm_pas_mem_setup); 686 687 /** 688 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware 689 * and reset the remote processor 690 * @peripheral: peripheral id 691 * 692 * Return 0 on success. 693 */ 694 int qcom_scm_pas_auth_and_reset(u32 peripheral) 695 { 696 int ret; 697 struct qcom_scm_desc desc = { 698 .svc = QCOM_SCM_SVC_PIL, 699 .cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET, 700 .arginfo = QCOM_SCM_ARGS(1), 701 .args[0] = peripheral, 702 .owner = ARM_SMCCC_OWNER_SIP, 703 }; 704 struct qcom_scm_res res; 705 706 ret = qcom_scm_clk_enable(); 707 if (ret) 708 return ret; 709 710 ret = qcom_scm_bw_enable(); 711 if (ret) 712 goto disable_clk; 713 714 ret = qcom_scm_call(__scm->dev, &desc, &res); 715 qcom_scm_bw_disable(); 716 717 disable_clk: 718 qcom_scm_clk_disable(); 719 720 return ret ? : res.result[0]; 721 } 722 EXPORT_SYMBOL_GPL(qcom_scm_pas_auth_and_reset); 723 724 /** 725 * qcom_scm_pas_shutdown() - Shut down the remote processor 726 * @peripheral: peripheral id 727 * 728 * Returns 0 on success. 729 */ 730 int qcom_scm_pas_shutdown(u32 peripheral) 731 { 732 int ret; 733 struct qcom_scm_desc desc = { 734 .svc = QCOM_SCM_SVC_PIL, 735 .cmd = QCOM_SCM_PIL_PAS_SHUTDOWN, 736 .arginfo = QCOM_SCM_ARGS(1), 737 .args[0] = peripheral, 738 .owner = ARM_SMCCC_OWNER_SIP, 739 }; 740 struct qcom_scm_res res; 741 742 ret = qcom_scm_clk_enable(); 743 if (ret) 744 return ret; 745 746 ret = qcom_scm_bw_enable(); 747 if (ret) 748 goto disable_clk; 749 750 ret = qcom_scm_call(__scm->dev, &desc, &res); 751 qcom_scm_bw_disable(); 752 753 disable_clk: 754 qcom_scm_clk_disable(); 755 756 return ret ? : res.result[0]; 757 } 758 EXPORT_SYMBOL_GPL(qcom_scm_pas_shutdown); 759 760 /** 761 * qcom_scm_pas_supported() - Check if the peripheral authentication service is 762 * available for the given peripherial 763 * @peripheral: peripheral id 764 * 765 * Returns true if PAS is supported for this peripheral, otherwise false. 766 */ 767 bool qcom_scm_pas_supported(u32 peripheral) 768 { 769 int ret; 770 struct qcom_scm_desc desc = { 771 .svc = QCOM_SCM_SVC_PIL, 772 .cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED, 773 .arginfo = QCOM_SCM_ARGS(1), 774 .args[0] = peripheral, 775 .owner = ARM_SMCCC_OWNER_SIP, 776 }; 777 struct qcom_scm_res res; 778 779 if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL, 780 QCOM_SCM_PIL_PAS_IS_SUPPORTED)) 781 return false; 782 783 ret = qcom_scm_call(__scm->dev, &desc, &res); 784 785 return ret ? false : !!res.result[0]; 786 } 787 EXPORT_SYMBOL_GPL(qcom_scm_pas_supported); 788 789 static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) 790 { 791 struct qcom_scm_desc desc = { 792 .svc = QCOM_SCM_SVC_PIL, 793 .cmd = QCOM_SCM_PIL_PAS_MSS_RESET, 794 .arginfo = QCOM_SCM_ARGS(2), 795 .args[0] = reset, 796 .args[1] = 0, 797 .owner = ARM_SMCCC_OWNER_SIP, 798 }; 799 struct qcom_scm_res res; 800 int ret; 801 802 ret = qcom_scm_call(__scm->dev, &desc, &res); 803 804 return ret ? : res.result[0]; 805 } 806 807 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev, 808 unsigned long idx) 809 { 810 if (idx != 0) 811 return -EINVAL; 812 813 return __qcom_scm_pas_mss_reset(__scm->dev, 1); 814 } 815 816 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev, 817 unsigned long idx) 818 { 819 if (idx != 0) 820 return -EINVAL; 821 822 return __qcom_scm_pas_mss_reset(__scm->dev, 0); 823 } 824 825 static const struct reset_control_ops qcom_scm_pas_reset_ops = { 826 .assert = qcom_scm_pas_reset_assert, 827 .deassert = qcom_scm_pas_reset_deassert, 828 }; 829 830 int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) 831 { 832 struct qcom_scm_desc desc = { 833 .svc = QCOM_SCM_SVC_IO, 834 .cmd = QCOM_SCM_IO_READ, 835 .arginfo = QCOM_SCM_ARGS(1), 836 .args[0] = addr, 837 .owner = ARM_SMCCC_OWNER_SIP, 838 }; 839 struct qcom_scm_res res; 840 int ret; 841 842 843 ret = qcom_scm_call_atomic(__scm->dev, &desc, &res); 844 if (ret >= 0) 845 *val = res.result[0]; 846 847 return ret < 0 ? ret : 0; 848 } 849 EXPORT_SYMBOL_GPL(qcom_scm_io_readl); 850 851 int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) 852 { 853 struct qcom_scm_desc desc = { 854 .svc = QCOM_SCM_SVC_IO, 855 .cmd = QCOM_SCM_IO_WRITE, 856 .arginfo = QCOM_SCM_ARGS(2), 857 .args[0] = addr, 858 .args[1] = val, 859 .owner = ARM_SMCCC_OWNER_SIP, 860 }; 861 862 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 863 } 864 EXPORT_SYMBOL_GPL(qcom_scm_io_writel); 865 866 /** 867 * qcom_scm_restore_sec_cfg_available() - Check if secure environment 868 * supports restore security config interface. 869 * 870 * Return true if restore-cfg interface is supported, false if not. 871 */ 872 bool qcom_scm_restore_sec_cfg_available(void) 873 { 874 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, 875 QCOM_SCM_MP_RESTORE_SEC_CFG); 876 } 877 EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg_available); 878 879 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) 880 { 881 struct qcom_scm_desc desc = { 882 .svc = QCOM_SCM_SVC_MP, 883 .cmd = QCOM_SCM_MP_RESTORE_SEC_CFG, 884 .arginfo = QCOM_SCM_ARGS(2), 885 .args[0] = device_id, 886 .args[1] = spare, 887 .owner = ARM_SMCCC_OWNER_SIP, 888 }; 889 struct qcom_scm_res res; 890 int ret; 891 892 ret = qcom_scm_call(__scm->dev, &desc, &res); 893 894 return ret ? : res.result[0]; 895 } 896 EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg); 897 898 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) 899 { 900 struct qcom_scm_desc desc = { 901 .svc = QCOM_SCM_SVC_MP, 902 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE, 903 .arginfo = QCOM_SCM_ARGS(1), 904 .args[0] = spare, 905 .owner = ARM_SMCCC_OWNER_SIP, 906 }; 907 struct qcom_scm_res res; 908 int ret; 909 910 ret = qcom_scm_call(__scm->dev, &desc, &res); 911 912 if (size) 913 *size = res.result[0]; 914 915 return ret ? : res.result[1]; 916 } 917 EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_size); 918 919 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) 920 { 921 struct qcom_scm_desc desc = { 922 .svc = QCOM_SCM_SVC_MP, 923 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT, 924 .arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, 925 QCOM_SCM_VAL), 926 .args[0] = addr, 927 .args[1] = size, 928 .args[2] = spare, 929 .owner = ARM_SMCCC_OWNER_SIP, 930 }; 931 int ret; 932 933 ret = qcom_scm_call(__scm->dev, &desc, NULL); 934 935 /* the pg table has been initialized already, ignore the error */ 936 if (ret == -EPERM) 937 ret = 0; 938 939 return ret; 940 } 941 EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_init); 942 943 int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size) 944 { 945 struct qcom_scm_desc desc = { 946 .svc = QCOM_SCM_SVC_MP, 947 .cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE, 948 .arginfo = QCOM_SCM_ARGS(2), 949 .args[0] = size, 950 .args[1] = spare, 951 .owner = ARM_SMCCC_OWNER_SIP, 952 }; 953 954 return qcom_scm_call(__scm->dev, &desc, NULL); 955 } 956 EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_cp_pool_size); 957 958 int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size, 959 u32 cp_nonpixel_start, 960 u32 cp_nonpixel_size) 961 { 962 int ret; 963 struct qcom_scm_desc desc = { 964 .svc = QCOM_SCM_SVC_MP, 965 .cmd = QCOM_SCM_MP_VIDEO_VAR, 966 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL, 967 QCOM_SCM_VAL, QCOM_SCM_VAL), 968 .args[0] = cp_start, 969 .args[1] = cp_size, 970 .args[2] = cp_nonpixel_start, 971 .args[3] = cp_nonpixel_size, 972 .owner = ARM_SMCCC_OWNER_SIP, 973 }; 974 struct qcom_scm_res res; 975 976 ret = qcom_scm_call(__scm->dev, &desc, &res); 977 978 return ret ? : res.result[0]; 979 } 980 EXPORT_SYMBOL_GPL(qcom_scm_mem_protect_video_var); 981 982 static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region, 983 size_t mem_sz, phys_addr_t src, size_t src_sz, 984 phys_addr_t dest, size_t dest_sz) 985 { 986 int ret; 987 struct qcom_scm_desc desc = { 988 .svc = QCOM_SCM_SVC_MP, 989 .cmd = QCOM_SCM_MP_ASSIGN, 990 .arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL, 991 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO, 992 QCOM_SCM_VAL, QCOM_SCM_VAL), 993 .args[0] = mem_region, 994 .args[1] = mem_sz, 995 .args[2] = src, 996 .args[3] = src_sz, 997 .args[4] = dest, 998 .args[5] = dest_sz, 999 .args[6] = 0, 1000 .owner = ARM_SMCCC_OWNER_SIP, 1001 }; 1002 struct qcom_scm_res res; 1003 1004 ret = qcom_scm_call(dev, &desc, &res); 1005 1006 return ret ? : res.result[0]; 1007 } 1008 1009 /** 1010 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership 1011 * @mem_addr: mem region whose ownership need to be reassigned 1012 * @mem_sz: size of the region. 1013 * @srcvm: vmid for current set of owners, each set bit in 1014 * flag indicate a unique owner 1015 * @newvm: array having new owners and corresponding permission 1016 * flags 1017 * @dest_cnt: number of owners in next set. 1018 * 1019 * Return negative errno on failure or 0 on success with @srcvm updated. 1020 */ 1021 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, 1022 u64 *srcvm, 1023 const struct qcom_scm_vmperm *newvm, 1024 unsigned int dest_cnt) 1025 { 1026 struct qcom_scm_current_perm_info *destvm; 1027 struct qcom_scm_mem_map_info *mem_to_map; 1028 phys_addr_t mem_to_map_phys; 1029 phys_addr_t dest_phys; 1030 phys_addr_t ptr_phys; 1031 size_t mem_to_map_sz; 1032 size_t dest_sz; 1033 size_t src_sz; 1034 size_t ptr_sz; 1035 int next_vm; 1036 __le32 *src; 1037 int ret, i, b; 1038 u64 srcvm_bits = *srcvm; 1039 1040 src_sz = hweight64(srcvm_bits) * sizeof(*src); 1041 mem_to_map_sz = sizeof(*mem_to_map); 1042 dest_sz = dest_cnt * sizeof(*destvm); 1043 ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + 1044 ALIGN(dest_sz, SZ_64); 1045 1046 void *ptr __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1047 ptr_sz, GFP_KERNEL); 1048 if (!ptr) 1049 return -ENOMEM; 1050 1051 ptr_phys = qcom_tzmem_to_phys(ptr); 1052 1053 /* Fill source vmid detail */ 1054 src = ptr; 1055 i = 0; 1056 for (b = 0; b < BITS_PER_TYPE(u64); b++) { 1057 if (srcvm_bits & BIT(b)) 1058 src[i++] = cpu_to_le32(b); 1059 } 1060 1061 /* Fill details of mem buff to map */ 1062 mem_to_map = ptr + ALIGN(src_sz, SZ_64); 1063 mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64); 1064 mem_to_map->mem_addr = cpu_to_le64(mem_addr); 1065 mem_to_map->mem_size = cpu_to_le64(mem_sz); 1066 1067 next_vm = 0; 1068 /* Fill details of next vmid detail */ 1069 destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); 1070 dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); 1071 for (i = 0; i < dest_cnt; i++, destvm++, newvm++) { 1072 destvm->vmid = cpu_to_le32(newvm->vmid); 1073 destvm->perm = cpu_to_le32(newvm->perm); 1074 destvm->ctx = 0; 1075 destvm->ctx_size = 0; 1076 next_vm |= BIT(newvm->vmid); 1077 } 1078 1079 ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, 1080 ptr_phys, src_sz, dest_phys, dest_sz); 1081 if (ret) { 1082 dev_err(__scm->dev, 1083 "Assign memory protection call failed %d\n", ret); 1084 return -EINVAL; 1085 } 1086 1087 *srcvm = next_vm; 1088 return 0; 1089 } 1090 EXPORT_SYMBOL_GPL(qcom_scm_assign_mem); 1091 1092 /** 1093 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available 1094 */ 1095 bool qcom_scm_ocmem_lock_available(void) 1096 { 1097 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM, 1098 QCOM_SCM_OCMEM_LOCK_CMD); 1099 } 1100 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock_available); 1101 1102 /** 1103 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM 1104 * region to the specified initiator 1105 * 1106 * @id: tz initiator id 1107 * @offset: OCMEM offset 1108 * @size: OCMEM size 1109 * @mode: access mode (WIDE/NARROW) 1110 */ 1111 int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size, 1112 u32 mode) 1113 { 1114 struct qcom_scm_desc desc = { 1115 .svc = QCOM_SCM_SVC_OCMEM, 1116 .cmd = QCOM_SCM_OCMEM_LOCK_CMD, 1117 .args[0] = id, 1118 .args[1] = offset, 1119 .args[2] = size, 1120 .args[3] = mode, 1121 .arginfo = QCOM_SCM_ARGS(4), 1122 }; 1123 1124 return qcom_scm_call(__scm->dev, &desc, NULL); 1125 } 1126 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock); 1127 1128 /** 1129 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM 1130 * region from the specified initiator 1131 * 1132 * @id: tz initiator id 1133 * @offset: OCMEM offset 1134 * @size: OCMEM size 1135 */ 1136 int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size) 1137 { 1138 struct qcom_scm_desc desc = { 1139 .svc = QCOM_SCM_SVC_OCMEM, 1140 .cmd = QCOM_SCM_OCMEM_UNLOCK_CMD, 1141 .args[0] = id, 1142 .args[1] = offset, 1143 .args[2] = size, 1144 .arginfo = QCOM_SCM_ARGS(3), 1145 }; 1146 1147 return qcom_scm_call(__scm->dev, &desc, NULL); 1148 } 1149 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_unlock); 1150 1151 /** 1152 * qcom_scm_ice_available() - Is the ICE key programming interface available? 1153 * 1154 * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and 1155 * qcom_scm_ice_set_key() are available. 1156 */ 1157 bool qcom_scm_ice_available(void) 1158 { 1159 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1160 QCOM_SCM_ES_INVALIDATE_ICE_KEY) && 1161 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1162 QCOM_SCM_ES_CONFIG_SET_ICE_KEY); 1163 } 1164 EXPORT_SYMBOL_GPL(qcom_scm_ice_available); 1165 1166 /** 1167 * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key 1168 * @index: the keyslot to invalidate 1169 * 1170 * The UFSHCI and eMMC standards define a standard way to do this, but it 1171 * doesn't work on these SoCs; only this SCM call does. 1172 * 1173 * It is assumed that the SoC has only one ICE instance being used, as this SCM 1174 * call doesn't specify which ICE instance the keyslot belongs to. 1175 * 1176 * Return: 0 on success; -errno on failure. 1177 */ 1178 int qcom_scm_ice_invalidate_key(u32 index) 1179 { 1180 struct qcom_scm_desc desc = { 1181 .svc = QCOM_SCM_SVC_ES, 1182 .cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY, 1183 .arginfo = QCOM_SCM_ARGS(1), 1184 .args[0] = index, 1185 .owner = ARM_SMCCC_OWNER_SIP, 1186 }; 1187 1188 return qcom_scm_call(__scm->dev, &desc, NULL); 1189 } 1190 EXPORT_SYMBOL_GPL(qcom_scm_ice_invalidate_key); 1191 1192 /** 1193 * qcom_scm_ice_set_key() - Set an inline encryption key 1194 * @index: the keyslot into which to set the key 1195 * @key: the key to program 1196 * @key_size: the size of the key in bytes 1197 * @cipher: the encryption algorithm the key is for 1198 * @data_unit_size: the encryption data unit size, i.e. the size of each 1199 * individual plaintext and ciphertext. Given in 512-byte 1200 * units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc. 1201 * 1202 * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it 1203 * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline. 1204 * 1205 * The UFSHCI and eMMC standards define a standard way to do this, but it 1206 * doesn't work on these SoCs; only this SCM call does. 1207 * 1208 * It is assumed that the SoC has only one ICE instance being used, as this SCM 1209 * call doesn't specify which ICE instance the keyslot belongs to. 1210 * 1211 * Return: 0 on success; -errno on failure. 1212 */ 1213 int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, 1214 enum qcom_scm_ice_cipher cipher, u32 data_unit_size) 1215 { 1216 struct qcom_scm_desc desc = { 1217 .svc = QCOM_SCM_SVC_ES, 1218 .cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY, 1219 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW, 1220 QCOM_SCM_VAL, QCOM_SCM_VAL, 1221 QCOM_SCM_VAL), 1222 .args[0] = index, 1223 .args[2] = key_size, 1224 .args[3] = cipher, 1225 .args[4] = data_unit_size, 1226 .owner = ARM_SMCCC_OWNER_SIP, 1227 }; 1228 1229 int ret; 1230 1231 void *keybuf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1232 key_size, 1233 GFP_KERNEL); 1234 if (!keybuf) 1235 return -ENOMEM; 1236 memcpy(keybuf, key, key_size); 1237 desc.args[1] = qcom_tzmem_to_phys(keybuf); 1238 1239 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1240 1241 memzero_explicit(keybuf, key_size); 1242 1243 return ret; 1244 } 1245 EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key); 1246 1247 /** 1248 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP. 1249 * 1250 * Return true if HDCP is supported, false if not. 1251 */ 1252 bool qcom_scm_hdcp_available(void) 1253 { 1254 bool avail; 1255 int ret = qcom_scm_clk_enable(); 1256 1257 if (ret) 1258 return ret; 1259 1260 avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP, 1261 QCOM_SCM_HDCP_INVOKE); 1262 1263 qcom_scm_clk_disable(); 1264 1265 return avail; 1266 } 1267 EXPORT_SYMBOL_GPL(qcom_scm_hdcp_available); 1268 1269 /** 1270 * qcom_scm_hdcp_req() - Send HDCP request. 1271 * @req: HDCP request array 1272 * @req_cnt: HDCP request array count 1273 * @resp: response buffer passed to SCM 1274 * 1275 * Write HDCP register(s) through SCM. 1276 */ 1277 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) 1278 { 1279 int ret; 1280 struct qcom_scm_desc desc = { 1281 .svc = QCOM_SCM_SVC_HDCP, 1282 .cmd = QCOM_SCM_HDCP_INVOKE, 1283 .arginfo = QCOM_SCM_ARGS(10), 1284 .args = { 1285 req[0].addr, 1286 req[0].val, 1287 req[1].addr, 1288 req[1].val, 1289 req[2].addr, 1290 req[2].val, 1291 req[3].addr, 1292 req[3].val, 1293 req[4].addr, 1294 req[4].val 1295 }, 1296 .owner = ARM_SMCCC_OWNER_SIP, 1297 }; 1298 struct qcom_scm_res res; 1299 1300 if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT) 1301 return -ERANGE; 1302 1303 ret = qcom_scm_clk_enable(); 1304 if (ret) 1305 return ret; 1306 1307 ret = qcom_scm_call(__scm->dev, &desc, &res); 1308 *resp = res.result[0]; 1309 1310 qcom_scm_clk_disable(); 1311 1312 return ret; 1313 } 1314 EXPORT_SYMBOL_GPL(qcom_scm_hdcp_req); 1315 1316 int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt) 1317 { 1318 struct qcom_scm_desc desc = { 1319 .svc = QCOM_SCM_SVC_SMMU_PROGRAM, 1320 .cmd = QCOM_SCM_SMMU_PT_FORMAT, 1321 .arginfo = QCOM_SCM_ARGS(3), 1322 .args[0] = sec_id, 1323 .args[1] = ctx_num, 1324 .args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */ 1325 .owner = ARM_SMCCC_OWNER_SIP, 1326 }; 1327 1328 return qcom_scm_call(__scm->dev, &desc, NULL); 1329 } 1330 EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_pt_format); 1331 1332 int qcom_scm_qsmmu500_wait_safe_toggle(bool en) 1333 { 1334 struct qcom_scm_desc desc = { 1335 .svc = QCOM_SCM_SVC_SMMU_PROGRAM, 1336 .cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1, 1337 .arginfo = QCOM_SCM_ARGS(2), 1338 .args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL, 1339 .args[1] = en, 1340 .owner = ARM_SMCCC_OWNER_SIP, 1341 }; 1342 1343 1344 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 1345 } 1346 EXPORT_SYMBOL_GPL(qcom_scm_qsmmu500_wait_safe_toggle); 1347 1348 bool qcom_scm_lmh_dcvsh_available(void) 1349 { 1350 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH); 1351 } 1352 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available); 1353 1354 int qcom_scm_shm_bridge_enable(void) 1355 { 1356 struct qcom_scm_desc desc = { 1357 .svc = QCOM_SCM_SVC_MP, 1358 .cmd = QCOM_SCM_MP_SHM_BRIDGE_ENABLE, 1359 .owner = ARM_SMCCC_OWNER_SIP 1360 }; 1361 1362 struct qcom_scm_res res; 1363 1364 if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, 1365 QCOM_SCM_MP_SHM_BRIDGE_ENABLE)) 1366 return -EOPNOTSUPP; 1367 1368 return qcom_scm_call(__scm->dev, &desc, &res) ?: res.result[0]; 1369 } 1370 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_enable); 1371 1372 int qcom_scm_shm_bridge_create(struct device *dev, u64 pfn_and_ns_perm_flags, 1373 u64 ipfn_and_s_perm_flags, u64 size_and_flags, 1374 u64 ns_vmids, u64 *handle) 1375 { 1376 struct qcom_scm_desc desc = { 1377 .svc = QCOM_SCM_SVC_MP, 1378 .cmd = QCOM_SCM_MP_SHM_BRIDGE_CREATE, 1379 .owner = ARM_SMCCC_OWNER_SIP, 1380 .args[0] = pfn_and_ns_perm_flags, 1381 .args[1] = ipfn_and_s_perm_flags, 1382 .args[2] = size_and_flags, 1383 .args[3] = ns_vmids, 1384 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL, 1385 QCOM_SCM_VAL, QCOM_SCM_VAL), 1386 }; 1387 1388 struct qcom_scm_res res; 1389 int ret; 1390 1391 ret = qcom_scm_call(__scm->dev, &desc, &res); 1392 1393 if (handle && !ret) 1394 *handle = res.result[1]; 1395 1396 return ret ?: res.result[0]; 1397 } 1398 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_create); 1399 1400 int qcom_scm_shm_bridge_delete(struct device *dev, u64 handle) 1401 { 1402 struct qcom_scm_desc desc = { 1403 .svc = QCOM_SCM_SVC_MP, 1404 .cmd = QCOM_SCM_MP_SHM_BRIDGE_DELETE, 1405 .owner = ARM_SMCCC_OWNER_SIP, 1406 .args[0] = handle, 1407 .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL), 1408 }; 1409 1410 return qcom_scm_call(__scm->dev, &desc, NULL); 1411 } 1412 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_delete); 1413 1414 int qcom_scm_lmh_profile_change(u32 profile_id) 1415 { 1416 struct qcom_scm_desc desc = { 1417 .svc = QCOM_SCM_SVC_LMH, 1418 .cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE, 1419 .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL), 1420 .args[0] = profile_id, 1421 .owner = ARM_SMCCC_OWNER_SIP, 1422 }; 1423 1424 return qcom_scm_call(__scm->dev, &desc, NULL); 1425 } 1426 EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change); 1427 1428 int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, 1429 u64 limit_node, u32 node_id, u64 version) 1430 { 1431 int ret, payload_size = 5 * sizeof(u32); 1432 1433 struct qcom_scm_desc desc = { 1434 .svc = QCOM_SCM_SVC_LMH, 1435 .cmd = QCOM_SCM_LMH_LIMIT_DCVSH, 1436 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL, 1437 QCOM_SCM_VAL, QCOM_SCM_VAL), 1438 .args[1] = payload_size, 1439 .args[2] = limit_node, 1440 .args[3] = node_id, 1441 .args[4] = version, 1442 .owner = ARM_SMCCC_OWNER_SIP, 1443 }; 1444 1445 u32 *payload_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1446 payload_size, 1447 GFP_KERNEL); 1448 if (!payload_buf) 1449 return -ENOMEM; 1450 1451 payload_buf[0] = payload_fn; 1452 payload_buf[1] = 0; 1453 payload_buf[2] = payload_reg; 1454 payload_buf[3] = 1; 1455 payload_buf[4] = payload_val; 1456 1457 desc.args[0] = qcom_tzmem_to_phys(payload_buf); 1458 1459 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1460 1461 return ret; 1462 } 1463 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh); 1464 1465 int qcom_scm_gpu_init_regs(u32 gpu_req) 1466 { 1467 struct qcom_scm_desc desc = { 1468 .svc = QCOM_SCM_SVC_GPU, 1469 .cmd = QCOM_SCM_SVC_GPU_INIT_REGS, 1470 .arginfo = QCOM_SCM_ARGS(1), 1471 .args[0] = gpu_req, 1472 .owner = ARM_SMCCC_OWNER_SIP, 1473 }; 1474 1475 return qcom_scm_call(__scm->dev, &desc, NULL); 1476 } 1477 EXPORT_SYMBOL_GPL(qcom_scm_gpu_init_regs); 1478 1479 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr) 1480 { 1481 struct device_node *tcsr; 1482 struct device_node *np = dev->of_node; 1483 struct resource res; 1484 u32 offset; 1485 int ret; 1486 1487 tcsr = of_parse_phandle(np, "qcom,dload-mode", 0); 1488 if (!tcsr) 1489 return 0; 1490 1491 ret = of_address_to_resource(tcsr, 0, &res); 1492 of_node_put(tcsr); 1493 if (ret) 1494 return ret; 1495 1496 ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset); 1497 if (ret < 0) 1498 return ret; 1499 1500 *addr = res.start + offset; 1501 1502 return 0; 1503 } 1504 1505 #ifdef CONFIG_QCOM_QSEECOM 1506 1507 /* Lock for QSEECOM SCM call executions */ 1508 static DEFINE_MUTEX(qcom_scm_qseecom_call_lock); 1509 1510 static int __qcom_scm_qseecom_call(const struct qcom_scm_desc *desc, 1511 struct qcom_scm_qseecom_resp *res) 1512 { 1513 struct qcom_scm_res scm_res = {}; 1514 int status; 1515 1516 /* 1517 * QSEECOM SCM calls should not be executed concurrently. Therefore, we 1518 * require the respective call lock to be held. 1519 */ 1520 lockdep_assert_held(&qcom_scm_qseecom_call_lock); 1521 1522 status = qcom_scm_call(__scm->dev, desc, &scm_res); 1523 1524 res->result = scm_res.result[0]; 1525 res->resp_type = scm_res.result[1]; 1526 res->data = scm_res.result[2]; 1527 1528 if (status) 1529 return status; 1530 1531 return 0; 1532 } 1533 1534 /** 1535 * qcom_scm_qseecom_call() - Perform a QSEECOM SCM call. 1536 * @desc: SCM call descriptor. 1537 * @res: SCM call response (output). 1538 * 1539 * Performs the QSEECOM SCM call described by @desc, returning the response in 1540 * @rsp. 1541 * 1542 * Return: Zero on success, nonzero on failure. 1543 */ 1544 static int qcom_scm_qseecom_call(const struct qcom_scm_desc *desc, 1545 struct qcom_scm_qseecom_resp *res) 1546 { 1547 int status; 1548 1549 /* 1550 * Note: Multiple QSEECOM SCM calls should not be executed same time, 1551 * so lock things here. This needs to be extended to callback/listener 1552 * handling when support for that is implemented. 1553 */ 1554 1555 mutex_lock(&qcom_scm_qseecom_call_lock); 1556 status = __qcom_scm_qseecom_call(desc, res); 1557 mutex_unlock(&qcom_scm_qseecom_call_lock); 1558 1559 dev_dbg(__scm->dev, "%s: owner=%x, svc=%x, cmd=%x, result=%lld, type=%llx, data=%llx\n", 1560 __func__, desc->owner, desc->svc, desc->cmd, res->result, 1561 res->resp_type, res->data); 1562 1563 if (status) { 1564 dev_err(__scm->dev, "qseecom: scm call failed with error %d\n", status); 1565 return status; 1566 } 1567 1568 /* 1569 * TODO: Handle incomplete and blocked calls: 1570 * 1571 * Incomplete and blocked calls are not supported yet. Some devices 1572 * and/or commands require those, some don't. Let's warn about them 1573 * prominently in case someone attempts to try these commands with a 1574 * device/command combination that isn't supported yet. 1575 */ 1576 WARN_ON(res->result == QSEECOM_RESULT_INCOMPLETE); 1577 WARN_ON(res->result == QSEECOM_RESULT_BLOCKED_ON_LISTENER); 1578 1579 return 0; 1580 } 1581 1582 /** 1583 * qcom_scm_qseecom_get_version() - Query the QSEECOM version. 1584 * @version: Pointer where the QSEECOM version will be stored. 1585 * 1586 * Performs the QSEECOM SCM querying the QSEECOM version currently running in 1587 * the TrustZone. 1588 * 1589 * Return: Zero on success, nonzero on failure. 1590 */ 1591 static int qcom_scm_qseecom_get_version(u32 *version) 1592 { 1593 struct qcom_scm_desc desc = {}; 1594 struct qcom_scm_qseecom_resp res = {}; 1595 u32 feature = 10; 1596 int ret; 1597 1598 desc.owner = QSEECOM_TZ_OWNER_SIP; 1599 desc.svc = QSEECOM_TZ_SVC_INFO; 1600 desc.cmd = QSEECOM_TZ_CMD_INFO_VERSION; 1601 desc.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL); 1602 desc.args[0] = feature; 1603 1604 ret = qcom_scm_qseecom_call(&desc, &res); 1605 if (ret) 1606 return ret; 1607 1608 *version = res.result; 1609 return 0; 1610 } 1611 1612 /** 1613 * qcom_scm_qseecom_app_get_id() - Query the app ID for a given QSEE app name. 1614 * @app_name: The name of the app. 1615 * @app_id: The returned app ID. 1616 * 1617 * Query and return the application ID of the SEE app identified by the given 1618 * name. This returned ID is the unique identifier of the app required for 1619 * subsequent communication. 1620 * 1621 * Return: Zero on success, nonzero on failure, -ENOENT if the app has not been 1622 * loaded or could not be found. 1623 */ 1624 int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id) 1625 { 1626 unsigned long name_buf_size = QSEECOM_MAX_APP_NAME_SIZE; 1627 unsigned long app_name_len = strlen(app_name); 1628 struct qcom_scm_desc desc = {}; 1629 struct qcom_scm_qseecom_resp res = {}; 1630 int status; 1631 1632 if (app_name_len >= name_buf_size) 1633 return -EINVAL; 1634 1635 char *name_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1636 name_buf_size, 1637 GFP_KERNEL); 1638 if (!name_buf) 1639 return -ENOMEM; 1640 1641 memcpy(name_buf, app_name, app_name_len); 1642 1643 desc.owner = QSEECOM_TZ_OWNER_QSEE_OS; 1644 desc.svc = QSEECOM_TZ_SVC_APP_MGR; 1645 desc.cmd = QSEECOM_TZ_CMD_APP_LOOKUP; 1646 desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL); 1647 desc.args[0] = qcom_tzmem_to_phys(name_buf); 1648 desc.args[1] = app_name_len; 1649 1650 status = qcom_scm_qseecom_call(&desc, &res); 1651 1652 if (status) 1653 return status; 1654 1655 if (res.result == QSEECOM_RESULT_FAILURE) 1656 return -ENOENT; 1657 1658 if (res.result != QSEECOM_RESULT_SUCCESS) 1659 return -EINVAL; 1660 1661 if (res.resp_type != QSEECOM_SCM_RES_APP_ID) 1662 return -EINVAL; 1663 1664 *app_id = res.data; 1665 return 0; 1666 } 1667 EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id); 1668 1669 /** 1670 * qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app. 1671 * @app_id: The ID of the target app. 1672 * @req: Request buffer sent to the app (must be TZ memory) 1673 * @req_size: Size of the request buffer. 1674 * @rsp: Response buffer, written to by the app (must be TZ memory) 1675 * @rsp_size: Size of the response buffer. 1676 * 1677 * Sends a request to the QSEE app associated with the given ID and read back 1678 * its response. The caller must provide two DMA memory regions, one for the 1679 * request and one for the response, and fill out the @req region with the 1680 * respective (app-specific) request data. The QSEE app reads this and returns 1681 * its response in the @rsp region. 1682 * 1683 * Return: Zero on success, nonzero on failure. 1684 */ 1685 int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, 1686 void *rsp, size_t rsp_size) 1687 { 1688 struct qcom_scm_qseecom_resp res = {}; 1689 struct qcom_scm_desc desc = {}; 1690 phys_addr_t req_phys; 1691 phys_addr_t rsp_phys; 1692 int status; 1693 1694 req_phys = qcom_tzmem_to_phys(req); 1695 rsp_phys = qcom_tzmem_to_phys(rsp); 1696 1697 desc.owner = QSEECOM_TZ_OWNER_TZ_APPS; 1698 desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER; 1699 desc.cmd = QSEECOM_TZ_CMD_APP_SEND; 1700 desc.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, 1701 QCOM_SCM_RW, QCOM_SCM_VAL, 1702 QCOM_SCM_RW, QCOM_SCM_VAL); 1703 desc.args[0] = app_id; 1704 desc.args[1] = req_phys; 1705 desc.args[2] = req_size; 1706 desc.args[3] = rsp_phys; 1707 desc.args[4] = rsp_size; 1708 1709 status = qcom_scm_qseecom_call(&desc, &res); 1710 1711 if (status) 1712 return status; 1713 1714 if (res.result != QSEECOM_RESULT_SUCCESS) 1715 return -EIO; 1716 1717 return 0; 1718 } 1719 EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send); 1720 1721 /* 1722 * We do not yet support re-entrant calls via the qseecom interface. To prevent 1723 + any potential issues with this, only allow validated machines for now. 1724 */ 1725 static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = { 1726 { .compatible = "lenovo,flex-5g" }, 1727 { .compatible = "lenovo,thinkpad-x13s", }, 1728 { .compatible = "qcom,sc8180x-primus" }, 1729 { .compatible = "qcom,x1e80100-crd" }, 1730 { .compatible = "qcom,x1e80100-qcp" }, 1731 { } 1732 }; 1733 1734 static bool qcom_scm_qseecom_machine_is_allowed(void) 1735 { 1736 struct device_node *np; 1737 bool match; 1738 1739 np = of_find_node_by_path("/"); 1740 if (!np) 1741 return false; 1742 1743 match = of_match_node(qcom_scm_qseecom_allowlist, np); 1744 of_node_put(np); 1745 1746 return match; 1747 } 1748 1749 static void qcom_scm_qseecom_free(void *data) 1750 { 1751 struct platform_device *qseecom_dev = data; 1752 1753 platform_device_del(qseecom_dev); 1754 platform_device_put(qseecom_dev); 1755 } 1756 1757 static int qcom_scm_qseecom_init(struct qcom_scm *scm) 1758 { 1759 struct platform_device *qseecom_dev; 1760 u32 version; 1761 int ret; 1762 1763 /* 1764 * Note: We do two steps of validation here: First, we try to query the 1765 * QSEECOM version as a check to see if the interface exists on this 1766 * device. Second, we check against known good devices due to current 1767 * driver limitations (see comment in qcom_scm_qseecom_allowlist). 1768 * 1769 * Note that we deliberately do the machine check after the version 1770 * check so that we can log potentially supported devices. This should 1771 * be safe as downstream sources indicate that the version query is 1772 * neither blocking nor reentrant. 1773 */ 1774 ret = qcom_scm_qseecom_get_version(&version); 1775 if (ret) 1776 return 0; 1777 1778 dev_info(scm->dev, "qseecom: found qseecom with version 0x%x\n", version); 1779 1780 if (!qcom_scm_qseecom_machine_is_allowed()) { 1781 dev_info(scm->dev, "qseecom: untested machine, skipping\n"); 1782 return 0; 1783 } 1784 1785 /* 1786 * Set up QSEECOM interface device. All application clients will be 1787 * set up and managed by the corresponding driver for it. 1788 */ 1789 qseecom_dev = platform_device_alloc("qcom_qseecom", -1); 1790 if (!qseecom_dev) 1791 return -ENOMEM; 1792 1793 qseecom_dev->dev.parent = scm->dev; 1794 1795 ret = platform_device_add(qseecom_dev); 1796 if (ret) { 1797 platform_device_put(qseecom_dev); 1798 return ret; 1799 } 1800 1801 return devm_add_action_or_reset(scm->dev, qcom_scm_qseecom_free, qseecom_dev); 1802 } 1803 1804 #else /* CONFIG_QCOM_QSEECOM */ 1805 1806 static int qcom_scm_qseecom_init(struct qcom_scm *scm) 1807 { 1808 return 0; 1809 } 1810 1811 #endif /* CONFIG_QCOM_QSEECOM */ 1812 1813 /** 1814 * qcom_scm_is_available() - Checks if SCM is available 1815 */ 1816 bool qcom_scm_is_available(void) 1817 { 1818 return !!READ_ONCE(__scm); 1819 } 1820 EXPORT_SYMBOL_GPL(qcom_scm_is_available); 1821 1822 static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx) 1823 { 1824 /* FW currently only supports a single wq_ctx (zero). 1825 * TODO: Update this logic to include dynamic allocation and lookup of 1826 * completion structs when FW supports more wq_ctx values. 1827 */ 1828 if (wq_ctx != 0) { 1829 dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n"); 1830 return -EINVAL; 1831 } 1832 1833 return 0; 1834 } 1835 1836 int qcom_scm_wait_for_wq_completion(u32 wq_ctx) 1837 { 1838 int ret; 1839 1840 ret = qcom_scm_assert_valid_wq_ctx(wq_ctx); 1841 if (ret) 1842 return ret; 1843 1844 wait_for_completion(&__scm->waitq_comp); 1845 1846 return 0; 1847 } 1848 1849 static int qcom_scm_waitq_wakeup(unsigned int wq_ctx) 1850 { 1851 int ret; 1852 1853 ret = qcom_scm_assert_valid_wq_ctx(wq_ctx); 1854 if (ret) 1855 return ret; 1856 1857 complete(&__scm->waitq_comp); 1858 1859 return 0; 1860 } 1861 1862 static irqreturn_t qcom_scm_irq_handler(int irq, void *data) 1863 { 1864 int ret; 1865 struct qcom_scm *scm = data; 1866 u32 wq_ctx, flags, more_pending = 0; 1867 1868 do { 1869 ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending); 1870 if (ret) { 1871 dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret); 1872 goto out; 1873 } 1874 1875 if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE) { 1876 dev_err(scm->dev, "Invalid flags received for wq_ctx: %u\n", flags); 1877 goto out; 1878 } 1879 1880 ret = qcom_scm_waitq_wakeup(wq_ctx); 1881 if (ret) 1882 goto out; 1883 } while (more_pending); 1884 1885 out: 1886 return IRQ_HANDLED; 1887 } 1888 1889 static int qcom_scm_probe(struct platform_device *pdev) 1890 { 1891 struct qcom_tzmem_pool_config pool_config; 1892 struct qcom_scm *scm; 1893 int irq, ret; 1894 1895 scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL); 1896 if (!scm) 1897 return -ENOMEM; 1898 1899 scm->dev = &pdev->dev; 1900 ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr); 1901 if (ret < 0) 1902 return ret; 1903 1904 init_completion(&scm->waitq_comp); 1905 mutex_init(&scm->scm_bw_lock); 1906 1907 scm->path = devm_of_icc_get(&pdev->dev, NULL); 1908 if (IS_ERR(scm->path)) 1909 return dev_err_probe(&pdev->dev, PTR_ERR(scm->path), 1910 "failed to acquire interconnect path\n"); 1911 1912 scm->core_clk = devm_clk_get_optional(&pdev->dev, "core"); 1913 if (IS_ERR(scm->core_clk)) 1914 return PTR_ERR(scm->core_clk); 1915 1916 scm->iface_clk = devm_clk_get_optional(&pdev->dev, "iface"); 1917 if (IS_ERR(scm->iface_clk)) 1918 return PTR_ERR(scm->iface_clk); 1919 1920 scm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus"); 1921 if (IS_ERR(scm->bus_clk)) 1922 return PTR_ERR(scm->bus_clk); 1923 1924 scm->reset.ops = &qcom_scm_pas_reset_ops; 1925 scm->reset.nr_resets = 1; 1926 scm->reset.of_node = pdev->dev.of_node; 1927 ret = devm_reset_controller_register(&pdev->dev, &scm->reset); 1928 if (ret) 1929 return ret; 1930 1931 /* vote for max clk rate for highest performance */ 1932 ret = clk_set_rate(scm->core_clk, INT_MAX); 1933 if (ret) 1934 return ret; 1935 1936 /* Let all above stores be available after this */ 1937 smp_store_release(&__scm, scm); 1938 1939 irq = platform_get_irq_optional(pdev, 0); 1940 if (irq < 0) { 1941 if (irq != -ENXIO) 1942 return irq; 1943 } else { 1944 ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler, 1945 IRQF_ONESHOT, "qcom-scm", __scm); 1946 if (ret < 0) 1947 return dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n"); 1948 } 1949 1950 __get_convention(); 1951 1952 /* 1953 * If requested enable "download mode", from this point on warmboot 1954 * will cause the boot stages to enter download mode, unless 1955 * disabled below by a clean shutdown/reboot. 1956 */ 1957 if (download_mode) 1958 qcom_scm_set_download_mode(true); 1959 1960 1961 /* 1962 * Disable SDI if indicated by DT that it is enabled by default. 1963 */ 1964 if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled")) 1965 qcom_scm_disable_sdi(); 1966 1967 ret = of_reserved_mem_device_init(__scm->dev); 1968 if (ret && ret != -ENODEV) 1969 return dev_err_probe(__scm->dev, ret, 1970 "Failed to setup the reserved memory region for TZ mem\n"); 1971 1972 ret = qcom_tzmem_enable(__scm->dev); 1973 if (ret) 1974 return dev_err_probe(__scm->dev, ret, 1975 "Failed to enable the TrustZone memory allocator\n"); 1976 1977 memset(&pool_config, 0, sizeof(pool_config)); 1978 pool_config.initial_size = 0; 1979 pool_config.policy = QCOM_TZMEM_POLICY_ON_DEMAND; 1980 pool_config.max_size = SZ_256K; 1981 1982 __scm->mempool = devm_qcom_tzmem_pool_new(__scm->dev, &pool_config); 1983 if (IS_ERR(__scm->mempool)) 1984 return dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool), 1985 "Failed to create the SCM memory pool\n"); 1986 1987 /* 1988 * Initialize the QSEECOM interface. 1989 * 1990 * Note: QSEECOM is fairly self-contained and this only adds the 1991 * interface device (the driver of which does most of the heavy 1992 * lifting). So any errors returned here should be either -ENOMEM or 1993 * -EINVAL (with the latter only in case there's a bug in our code). 1994 * This means that there is no need to bring down the whole SCM driver. 1995 * Just log the error instead and let SCM live. 1996 */ 1997 ret = qcom_scm_qseecom_init(scm); 1998 WARN(ret < 0, "failed to initialize qseecom: %d\n", ret); 1999 2000 return 0; 2001 } 2002 2003 static void qcom_scm_shutdown(struct platform_device *pdev) 2004 { 2005 /* Clean shutdown, disable download mode to allow normal restart */ 2006 qcom_scm_set_download_mode(false); 2007 } 2008 2009 static const struct of_device_id qcom_scm_dt_match[] = { 2010 { .compatible = "qcom,scm" }, 2011 2012 /* Legacy entries kept for backwards compatibility */ 2013 { .compatible = "qcom,scm-apq8064" }, 2014 { .compatible = "qcom,scm-apq8084" }, 2015 { .compatible = "qcom,scm-ipq4019" }, 2016 { .compatible = "qcom,scm-msm8953" }, 2017 { .compatible = "qcom,scm-msm8974" }, 2018 { .compatible = "qcom,scm-msm8996" }, 2019 {} 2020 }; 2021 MODULE_DEVICE_TABLE(of, qcom_scm_dt_match); 2022 2023 static struct platform_driver qcom_scm_driver = { 2024 .driver = { 2025 .name = "qcom_scm", 2026 .of_match_table = qcom_scm_dt_match, 2027 .suppress_bind_attrs = true, 2028 }, 2029 .probe = qcom_scm_probe, 2030 .shutdown = qcom_scm_shutdown, 2031 }; 2032 2033 static int __init qcom_scm_init(void) 2034 { 2035 return platform_driver_register(&qcom_scm_driver); 2036 } 2037 subsys_initcall(qcom_scm_init); 2038 2039 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver"); 2040 MODULE_LICENSE("GPL v2"); 2041