1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved. 3 * Copyright (C) 2015 Linaro Ltd. 4 */ 5 6 #include <linux/arm-smccc.h> 7 #include <linux/bitfield.h> 8 #include <linux/bits.h> 9 #include <linux/cleanup.h> 10 #include <linux/clk.h> 11 #include <linux/completion.h> 12 #include <linux/cpumask.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/err.h> 15 #include <linux/export.h> 16 #include <linux/firmware/qcom/qcom_scm.h> 17 #include <linux/firmware/qcom/qcom_tzmem.h> 18 #include <linux/init.h> 19 #include <linux/interconnect.h> 20 #include <linux/interrupt.h> 21 #include <linux/kstrtox.h> 22 #include <linux/module.h> 23 #include <linux/of.h> 24 #include <linux/of_address.h> 25 #include <linux/of_irq.h> 26 #include <linux/of_platform.h> 27 #include <linux/of_reserved_mem.h> 28 #include <linux/platform_device.h> 29 #include <linux/reset-controller.h> 30 #include <linux/sizes.h> 31 #include <linux/types.h> 32 33 #include "qcom_scm.h" 34 #include "qcom_tzmem.h" 35 36 static u32 download_mode; 37 38 struct qcom_scm { 39 struct device *dev; 40 struct clk *core_clk; 41 struct clk *iface_clk; 42 struct clk *bus_clk; 43 struct icc_path *path; 44 struct completion waitq_comp; 45 struct reset_controller_dev reset; 46 47 /* control access to the interconnect path */ 48 struct mutex scm_bw_lock; 49 int scm_vote_count; 50 51 u64 dload_mode_addr; 52 53 struct qcom_tzmem_pool *mempool; 54 }; 55 56 struct qcom_scm_current_perm_info { 57 __le32 vmid; 58 __le32 perm; 59 __le64 ctx; 60 __le32 ctx_size; 61 __le32 unused; 62 }; 63 64 struct qcom_scm_mem_map_info { 65 __le64 mem_addr; 66 __le64 mem_size; 67 }; 68 69 /** 70 * struct qcom_scm_qseecom_resp - QSEECOM SCM call response. 71 * @result: Result or status of the SCM call. See &enum qcom_scm_qseecom_result. 72 * @resp_type: Type of the response. See &enum qcom_scm_qseecom_resp_type. 73 * @data: Response data. The type of this data is given in @resp_type. 74 */ 75 struct qcom_scm_qseecom_resp { 76 u64 result; 77 u64 resp_type; 78 u64 data; 79 }; 80 81 enum qcom_scm_qseecom_result { 82 QSEECOM_RESULT_SUCCESS = 0, 83 QSEECOM_RESULT_INCOMPLETE = 1, 84 QSEECOM_RESULT_BLOCKED_ON_LISTENER = 2, 85 QSEECOM_RESULT_FAILURE = 0xFFFFFFFF, 86 }; 87 88 enum qcom_scm_qseecom_resp_type { 89 QSEECOM_SCM_RES_APP_ID = 0xEE01, 90 QSEECOM_SCM_RES_QSEOS_LISTENER_ID = 0xEE02, 91 }; 92 93 enum qcom_scm_qseecom_tz_owner { 94 QSEECOM_TZ_OWNER_SIP = 2, 95 QSEECOM_TZ_OWNER_TZ_APPS = 48, 96 QSEECOM_TZ_OWNER_QSEE_OS = 50 97 }; 98 99 enum qcom_scm_qseecom_tz_svc { 100 QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER = 0, 101 QSEECOM_TZ_SVC_APP_MGR = 1, 102 QSEECOM_TZ_SVC_INFO = 6, 103 }; 104 105 enum qcom_scm_qseecom_tz_cmd_app { 106 QSEECOM_TZ_CMD_APP_SEND = 1, 107 QSEECOM_TZ_CMD_APP_LOOKUP = 3, 108 }; 109 110 enum qcom_scm_qseecom_tz_cmd_info { 111 QSEECOM_TZ_CMD_INFO_VERSION = 3, 112 }; 113 114 #define QSEECOM_MAX_APP_NAME_SIZE 64 115 #define SHMBRIDGE_RESULT_NOTSUPP 4 116 117 /* Each bit configures cold/warm boot address for one of the 4 CPUs */ 118 static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = { 119 0, BIT(0), BIT(3), BIT(5) 120 }; 121 static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = { 122 BIT(2), BIT(1), BIT(4), BIT(6) 123 }; 124 125 #define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0) 126 127 #define QCOM_DLOAD_MASK GENMASK(5, 4) 128 #define QCOM_DLOAD_NODUMP 0 129 #define QCOM_DLOAD_FULLDUMP 1 130 #define QCOM_DLOAD_MINIDUMP 2 131 #define QCOM_DLOAD_BOTHDUMP 3 132 133 static const char * const qcom_scm_convention_names[] = { 134 [SMC_CONVENTION_UNKNOWN] = "unknown", 135 [SMC_CONVENTION_ARM_32] = "smc arm 32", 136 [SMC_CONVENTION_ARM_64] = "smc arm 64", 137 [SMC_CONVENTION_LEGACY] = "smc legacy", 138 }; 139 140 static const char * const download_mode_name[] = { 141 [QCOM_DLOAD_NODUMP] = "off", 142 [QCOM_DLOAD_FULLDUMP] = "full", 143 [QCOM_DLOAD_MINIDUMP] = "mini", 144 [QCOM_DLOAD_BOTHDUMP] = "full,mini", 145 }; 146 147 static struct qcom_scm *__scm; 148 149 static int qcom_scm_clk_enable(void) 150 { 151 int ret; 152 153 ret = clk_prepare_enable(__scm->core_clk); 154 if (ret) 155 goto bail; 156 157 ret = clk_prepare_enable(__scm->iface_clk); 158 if (ret) 159 goto disable_core; 160 161 ret = clk_prepare_enable(__scm->bus_clk); 162 if (ret) 163 goto disable_iface; 164 165 return 0; 166 167 disable_iface: 168 clk_disable_unprepare(__scm->iface_clk); 169 disable_core: 170 clk_disable_unprepare(__scm->core_clk); 171 bail: 172 return ret; 173 } 174 175 static void qcom_scm_clk_disable(void) 176 { 177 clk_disable_unprepare(__scm->core_clk); 178 clk_disable_unprepare(__scm->iface_clk); 179 clk_disable_unprepare(__scm->bus_clk); 180 } 181 182 static int qcom_scm_bw_enable(void) 183 { 184 int ret = 0; 185 186 if (!__scm->path) 187 return 0; 188 189 mutex_lock(&__scm->scm_bw_lock); 190 if (!__scm->scm_vote_count) { 191 ret = icc_set_bw(__scm->path, 0, UINT_MAX); 192 if (ret < 0) { 193 dev_err(__scm->dev, "failed to set bandwidth request\n"); 194 goto err_bw; 195 } 196 } 197 __scm->scm_vote_count++; 198 err_bw: 199 mutex_unlock(&__scm->scm_bw_lock); 200 201 return ret; 202 } 203 204 static void qcom_scm_bw_disable(void) 205 { 206 if (!__scm->path) 207 return; 208 209 mutex_lock(&__scm->scm_bw_lock); 210 if (__scm->scm_vote_count-- == 1) 211 icc_set_bw(__scm->path, 0, 0); 212 mutex_unlock(&__scm->scm_bw_lock); 213 } 214 215 enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN; 216 static DEFINE_SPINLOCK(scm_query_lock); 217 218 struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void) 219 { 220 return __scm ? __scm->mempool : NULL; 221 } 222 223 static enum qcom_scm_convention __get_convention(void) 224 { 225 unsigned long flags; 226 struct qcom_scm_desc desc = { 227 .svc = QCOM_SCM_SVC_INFO, 228 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, 229 .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO, 230 QCOM_SCM_INFO_IS_CALL_AVAIL) | 231 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT), 232 .arginfo = QCOM_SCM_ARGS(1), 233 .owner = ARM_SMCCC_OWNER_SIP, 234 }; 235 struct qcom_scm_res res; 236 enum qcom_scm_convention probed_convention; 237 int ret; 238 bool forced = false; 239 240 if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN)) 241 return qcom_scm_convention; 242 243 /* 244 * Per the "SMC calling convention specification", the 64-bit calling 245 * convention can only be used when the client is 64-bit, otherwise 246 * system will encounter the undefined behaviour. 247 */ 248 #if IS_ENABLED(CONFIG_ARM64) 249 /* 250 * Device isn't required as there is only one argument - no device 251 * needed to dma_map_single to secure world 252 */ 253 probed_convention = SMC_CONVENTION_ARM_64; 254 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); 255 if (!ret && res.result[0] == 1) 256 goto found; 257 258 /* 259 * Some SC7180 firmwares didn't implement the 260 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64 261 * calling conventions on these firmwares. Luckily we don't make any 262 * early calls into the firmware on these SoCs so the device pointer 263 * will be valid here to check if the compatible matches. 264 */ 265 if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) { 266 forced = true; 267 goto found; 268 } 269 #endif 270 271 probed_convention = SMC_CONVENTION_ARM_32; 272 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); 273 if (!ret && res.result[0] == 1) 274 goto found; 275 276 probed_convention = SMC_CONVENTION_LEGACY; 277 found: 278 spin_lock_irqsave(&scm_query_lock, flags); 279 if (probed_convention != qcom_scm_convention) { 280 qcom_scm_convention = probed_convention; 281 pr_info("qcom_scm: convention: %s%s\n", 282 qcom_scm_convention_names[qcom_scm_convention], 283 forced ? " (forced)" : ""); 284 } 285 spin_unlock_irqrestore(&scm_query_lock, flags); 286 287 return qcom_scm_convention; 288 } 289 290 /** 291 * qcom_scm_call() - Invoke a syscall in the secure world 292 * @dev: device 293 * @desc: Descriptor structure containing arguments and return values 294 * @res: Structure containing results from SMC/HVC call 295 * 296 * Sends a command to the SCM and waits for the command to finish processing. 297 * This should *only* be called in pre-emptible context. 298 */ 299 static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc, 300 struct qcom_scm_res *res) 301 { 302 might_sleep(); 303 switch (__get_convention()) { 304 case SMC_CONVENTION_ARM_32: 305 case SMC_CONVENTION_ARM_64: 306 return scm_smc_call(dev, desc, res, false); 307 case SMC_CONVENTION_LEGACY: 308 return scm_legacy_call(dev, desc, res); 309 default: 310 pr_err("Unknown current SCM calling convention.\n"); 311 return -EINVAL; 312 } 313 } 314 315 /** 316 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call() 317 * @dev: device 318 * @desc: Descriptor structure containing arguments and return values 319 * @res: Structure containing results from SMC/HVC call 320 * 321 * Sends a command to the SCM and waits for the command to finish processing. 322 * This can be called in atomic context. 323 */ 324 static int qcom_scm_call_atomic(struct device *dev, 325 const struct qcom_scm_desc *desc, 326 struct qcom_scm_res *res) 327 { 328 switch (__get_convention()) { 329 case SMC_CONVENTION_ARM_32: 330 case SMC_CONVENTION_ARM_64: 331 return scm_smc_call(dev, desc, res, true); 332 case SMC_CONVENTION_LEGACY: 333 return scm_legacy_call_atomic(dev, desc, res); 334 default: 335 pr_err("Unknown current SCM calling convention.\n"); 336 return -EINVAL; 337 } 338 } 339 340 static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id, 341 u32 cmd_id) 342 { 343 int ret; 344 struct qcom_scm_desc desc = { 345 .svc = QCOM_SCM_SVC_INFO, 346 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, 347 .owner = ARM_SMCCC_OWNER_SIP, 348 }; 349 struct qcom_scm_res res; 350 351 desc.arginfo = QCOM_SCM_ARGS(1); 352 switch (__get_convention()) { 353 case SMC_CONVENTION_ARM_32: 354 case SMC_CONVENTION_ARM_64: 355 desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) | 356 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT); 357 break; 358 case SMC_CONVENTION_LEGACY: 359 desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id); 360 break; 361 default: 362 pr_err("Unknown SMC convention being used\n"); 363 return false; 364 } 365 366 ret = qcom_scm_call(dev, &desc, &res); 367 368 return ret ? false : !!res.result[0]; 369 } 370 371 static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits) 372 { 373 int cpu; 374 unsigned int flags = 0; 375 struct qcom_scm_desc desc = { 376 .svc = QCOM_SCM_SVC_BOOT, 377 .cmd = QCOM_SCM_BOOT_SET_ADDR, 378 .arginfo = QCOM_SCM_ARGS(2), 379 .owner = ARM_SMCCC_OWNER_SIP, 380 }; 381 382 for_each_present_cpu(cpu) { 383 if (cpu >= QCOM_SCM_BOOT_MAX_CPUS) 384 return -EINVAL; 385 flags |= cpu_bits[cpu]; 386 } 387 388 desc.args[0] = flags; 389 desc.args[1] = virt_to_phys(entry); 390 391 return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); 392 } 393 394 static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags) 395 { 396 struct qcom_scm_desc desc = { 397 .svc = QCOM_SCM_SVC_BOOT, 398 .cmd = QCOM_SCM_BOOT_SET_ADDR_MC, 399 .owner = ARM_SMCCC_OWNER_SIP, 400 .arginfo = QCOM_SCM_ARGS(6), 401 .args = { 402 virt_to_phys(entry), 403 /* Apply to all CPUs in all affinity levels */ 404 ~0ULL, ~0ULL, ~0ULL, ~0ULL, 405 flags, 406 }, 407 }; 408 409 /* Need a device for DMA of the additional arguments */ 410 if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY) 411 return -EOPNOTSUPP; 412 413 return qcom_scm_call(__scm->dev, &desc, NULL); 414 } 415 416 /** 417 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus 418 * @entry: Entry point function for the cpus 419 * 420 * Set the Linux entry point for the SCM to transfer control to when coming 421 * out of a power down. CPU power down may be executed on cpuidle or hotplug. 422 */ 423 int qcom_scm_set_warm_boot_addr(void *entry) 424 { 425 if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT)) 426 /* Fallback to old SCM call */ 427 return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits); 428 return 0; 429 } 430 EXPORT_SYMBOL_GPL(qcom_scm_set_warm_boot_addr); 431 432 /** 433 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus 434 * @entry: Entry point function for the cpus 435 */ 436 int qcom_scm_set_cold_boot_addr(void *entry) 437 { 438 if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT)) 439 /* Fallback to old SCM call */ 440 return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits); 441 return 0; 442 } 443 EXPORT_SYMBOL_GPL(qcom_scm_set_cold_boot_addr); 444 445 /** 446 * qcom_scm_cpu_power_down() - Power down the cpu 447 * @flags: Flags to flush cache 448 * 449 * This is an end point to power down cpu. If there was a pending interrupt, 450 * the control would return from this function, otherwise, the cpu jumps to the 451 * warm boot entry point set for this cpu upon reset. 452 */ 453 void qcom_scm_cpu_power_down(u32 flags) 454 { 455 struct qcom_scm_desc desc = { 456 .svc = QCOM_SCM_SVC_BOOT, 457 .cmd = QCOM_SCM_BOOT_TERMINATE_PC, 458 .args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK, 459 .arginfo = QCOM_SCM_ARGS(1), 460 .owner = ARM_SMCCC_OWNER_SIP, 461 }; 462 463 qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); 464 } 465 EXPORT_SYMBOL_GPL(qcom_scm_cpu_power_down); 466 467 int qcom_scm_set_remote_state(u32 state, u32 id) 468 { 469 struct qcom_scm_desc desc = { 470 .svc = QCOM_SCM_SVC_BOOT, 471 .cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE, 472 .arginfo = QCOM_SCM_ARGS(2), 473 .args[0] = state, 474 .args[1] = id, 475 .owner = ARM_SMCCC_OWNER_SIP, 476 }; 477 struct qcom_scm_res res; 478 int ret; 479 480 ret = qcom_scm_call(__scm->dev, &desc, &res); 481 482 return ret ? : res.result[0]; 483 } 484 EXPORT_SYMBOL_GPL(qcom_scm_set_remote_state); 485 486 static int qcom_scm_disable_sdi(void) 487 { 488 int ret; 489 struct qcom_scm_desc desc = { 490 .svc = QCOM_SCM_SVC_BOOT, 491 .cmd = QCOM_SCM_BOOT_SDI_CONFIG, 492 .args[0] = 1, /* Disable watchdog debug */ 493 .args[1] = 0, /* Disable SDI */ 494 .arginfo = QCOM_SCM_ARGS(2), 495 .owner = ARM_SMCCC_OWNER_SIP, 496 }; 497 struct qcom_scm_res res; 498 499 ret = qcom_scm_clk_enable(); 500 if (ret) 501 return ret; 502 ret = qcom_scm_call(__scm->dev, &desc, &res); 503 504 qcom_scm_clk_disable(); 505 506 return ret ? : res.result[0]; 507 } 508 509 static int __qcom_scm_set_dload_mode(struct device *dev, bool enable) 510 { 511 struct qcom_scm_desc desc = { 512 .svc = QCOM_SCM_SVC_BOOT, 513 .cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE, 514 .arginfo = QCOM_SCM_ARGS(2), 515 .args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE, 516 .owner = ARM_SMCCC_OWNER_SIP, 517 }; 518 519 desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0; 520 521 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 522 } 523 524 static int qcom_scm_io_rmw(phys_addr_t addr, unsigned int mask, unsigned int val) 525 { 526 unsigned int old; 527 unsigned int new; 528 int ret; 529 530 ret = qcom_scm_io_readl(addr, &old); 531 if (ret) 532 return ret; 533 534 new = (old & ~mask) | (val & mask); 535 536 return qcom_scm_io_writel(addr, new); 537 } 538 539 static void qcom_scm_set_download_mode(u32 dload_mode) 540 { 541 int ret = 0; 542 543 if (__scm->dload_mode_addr) { 544 ret = qcom_scm_io_rmw(__scm->dload_mode_addr, QCOM_DLOAD_MASK, 545 FIELD_PREP(QCOM_DLOAD_MASK, dload_mode)); 546 } else if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT, 547 QCOM_SCM_BOOT_SET_DLOAD_MODE)) { 548 ret = __qcom_scm_set_dload_mode(__scm->dev, !!dload_mode); 549 } else if (dload_mode) { 550 dev_err(__scm->dev, 551 "No available mechanism for setting download mode\n"); 552 } 553 554 if (ret) 555 dev_err(__scm->dev, "failed to set download mode: %d\n", ret); 556 } 557 558 /** 559 * qcom_scm_pas_init_image() - Initialize peripheral authentication service 560 * state machine for a given peripheral, using the 561 * metadata 562 * @peripheral: peripheral id 563 * @metadata: pointer to memory containing ELF header, program header table 564 * and optional blob of data used for authenticating the metadata 565 * and the rest of the firmware 566 * @size: size of the metadata 567 * @ctx: optional metadata context 568 * 569 * Return: 0 on success. 570 * 571 * Upon successful return, the PAS metadata context (@ctx) will be used to 572 * track the metadata allocation, this needs to be released by invoking 573 * qcom_scm_pas_metadata_release() by the caller. 574 */ 575 int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size, 576 struct qcom_scm_pas_metadata *ctx) 577 { 578 dma_addr_t mdata_phys; 579 void *mdata_buf; 580 int ret; 581 struct qcom_scm_desc desc = { 582 .svc = QCOM_SCM_SVC_PIL, 583 .cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE, 584 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW), 585 .args[0] = peripheral, 586 .owner = ARM_SMCCC_OWNER_SIP, 587 }; 588 struct qcom_scm_res res; 589 590 /* 591 * During the scm call memory protection will be enabled for the meta 592 * data blob, so make sure it's physically contiguous, 4K aligned and 593 * non-cachable to avoid XPU violations. 594 * 595 * For PIL calls the hypervisor creates SHM Bridges for the blob 596 * buffers on behalf of Linux so we must not do it ourselves hence 597 * not using the TZMem allocator here. 598 * 599 * If we pass a buffer that is already part of an SHM Bridge to this 600 * call, it will fail. 601 */ 602 mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys, 603 GFP_KERNEL); 604 if (!mdata_buf) 605 return -ENOMEM; 606 607 memcpy(mdata_buf, metadata, size); 608 609 ret = qcom_scm_clk_enable(); 610 if (ret) 611 goto out; 612 613 ret = qcom_scm_bw_enable(); 614 if (ret) 615 goto disable_clk; 616 617 desc.args[1] = mdata_phys; 618 619 ret = qcom_scm_call(__scm->dev, &desc, &res); 620 qcom_scm_bw_disable(); 621 622 disable_clk: 623 qcom_scm_clk_disable(); 624 625 out: 626 if (ret < 0 || !ctx) { 627 dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys); 628 } else if (ctx) { 629 ctx->ptr = mdata_buf; 630 ctx->phys = mdata_phys; 631 ctx->size = size; 632 } 633 634 return ret ? : res.result[0]; 635 } 636 EXPORT_SYMBOL_GPL(qcom_scm_pas_init_image); 637 638 /** 639 * qcom_scm_pas_metadata_release() - release metadata context 640 * @ctx: metadata context 641 */ 642 void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx) 643 { 644 if (!ctx->ptr) 645 return; 646 647 dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys); 648 649 ctx->ptr = NULL; 650 ctx->phys = 0; 651 ctx->size = 0; 652 } 653 EXPORT_SYMBOL_GPL(qcom_scm_pas_metadata_release); 654 655 /** 656 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral 657 * for firmware loading 658 * @peripheral: peripheral id 659 * @addr: start address of memory area to prepare 660 * @size: size of the memory area to prepare 661 * 662 * Returns 0 on success. 663 */ 664 int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size) 665 { 666 int ret; 667 struct qcom_scm_desc desc = { 668 .svc = QCOM_SCM_SVC_PIL, 669 .cmd = QCOM_SCM_PIL_PAS_MEM_SETUP, 670 .arginfo = QCOM_SCM_ARGS(3), 671 .args[0] = peripheral, 672 .args[1] = addr, 673 .args[2] = size, 674 .owner = ARM_SMCCC_OWNER_SIP, 675 }; 676 struct qcom_scm_res res; 677 678 ret = qcom_scm_clk_enable(); 679 if (ret) 680 return ret; 681 682 ret = qcom_scm_bw_enable(); 683 if (ret) 684 goto disable_clk; 685 686 ret = qcom_scm_call(__scm->dev, &desc, &res); 687 qcom_scm_bw_disable(); 688 689 disable_clk: 690 qcom_scm_clk_disable(); 691 692 return ret ? : res.result[0]; 693 } 694 EXPORT_SYMBOL_GPL(qcom_scm_pas_mem_setup); 695 696 /** 697 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware 698 * and reset the remote processor 699 * @peripheral: peripheral id 700 * 701 * Return 0 on success. 702 */ 703 int qcom_scm_pas_auth_and_reset(u32 peripheral) 704 { 705 int ret; 706 struct qcom_scm_desc desc = { 707 .svc = QCOM_SCM_SVC_PIL, 708 .cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET, 709 .arginfo = QCOM_SCM_ARGS(1), 710 .args[0] = peripheral, 711 .owner = ARM_SMCCC_OWNER_SIP, 712 }; 713 struct qcom_scm_res res; 714 715 ret = qcom_scm_clk_enable(); 716 if (ret) 717 return ret; 718 719 ret = qcom_scm_bw_enable(); 720 if (ret) 721 goto disable_clk; 722 723 ret = qcom_scm_call(__scm->dev, &desc, &res); 724 qcom_scm_bw_disable(); 725 726 disable_clk: 727 qcom_scm_clk_disable(); 728 729 return ret ? : res.result[0]; 730 } 731 EXPORT_SYMBOL_GPL(qcom_scm_pas_auth_and_reset); 732 733 /** 734 * qcom_scm_pas_shutdown() - Shut down the remote processor 735 * @peripheral: peripheral id 736 * 737 * Returns 0 on success. 738 */ 739 int qcom_scm_pas_shutdown(u32 peripheral) 740 { 741 int ret; 742 struct qcom_scm_desc desc = { 743 .svc = QCOM_SCM_SVC_PIL, 744 .cmd = QCOM_SCM_PIL_PAS_SHUTDOWN, 745 .arginfo = QCOM_SCM_ARGS(1), 746 .args[0] = peripheral, 747 .owner = ARM_SMCCC_OWNER_SIP, 748 }; 749 struct qcom_scm_res res; 750 751 ret = qcom_scm_clk_enable(); 752 if (ret) 753 return ret; 754 755 ret = qcom_scm_bw_enable(); 756 if (ret) 757 goto disable_clk; 758 759 ret = qcom_scm_call(__scm->dev, &desc, &res); 760 qcom_scm_bw_disable(); 761 762 disable_clk: 763 qcom_scm_clk_disable(); 764 765 return ret ? : res.result[0]; 766 } 767 EXPORT_SYMBOL_GPL(qcom_scm_pas_shutdown); 768 769 /** 770 * qcom_scm_pas_supported() - Check if the peripheral authentication service is 771 * available for the given peripherial 772 * @peripheral: peripheral id 773 * 774 * Returns true if PAS is supported for this peripheral, otherwise false. 775 */ 776 bool qcom_scm_pas_supported(u32 peripheral) 777 { 778 int ret; 779 struct qcom_scm_desc desc = { 780 .svc = QCOM_SCM_SVC_PIL, 781 .cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED, 782 .arginfo = QCOM_SCM_ARGS(1), 783 .args[0] = peripheral, 784 .owner = ARM_SMCCC_OWNER_SIP, 785 }; 786 struct qcom_scm_res res; 787 788 if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL, 789 QCOM_SCM_PIL_PAS_IS_SUPPORTED)) 790 return false; 791 792 ret = qcom_scm_call(__scm->dev, &desc, &res); 793 794 return ret ? false : !!res.result[0]; 795 } 796 EXPORT_SYMBOL_GPL(qcom_scm_pas_supported); 797 798 static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) 799 { 800 struct qcom_scm_desc desc = { 801 .svc = QCOM_SCM_SVC_PIL, 802 .cmd = QCOM_SCM_PIL_PAS_MSS_RESET, 803 .arginfo = QCOM_SCM_ARGS(2), 804 .args[0] = reset, 805 .args[1] = 0, 806 .owner = ARM_SMCCC_OWNER_SIP, 807 }; 808 struct qcom_scm_res res; 809 int ret; 810 811 ret = qcom_scm_call(__scm->dev, &desc, &res); 812 813 return ret ? : res.result[0]; 814 } 815 816 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev, 817 unsigned long idx) 818 { 819 if (idx != 0) 820 return -EINVAL; 821 822 return __qcom_scm_pas_mss_reset(__scm->dev, 1); 823 } 824 825 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev, 826 unsigned long idx) 827 { 828 if (idx != 0) 829 return -EINVAL; 830 831 return __qcom_scm_pas_mss_reset(__scm->dev, 0); 832 } 833 834 static const struct reset_control_ops qcom_scm_pas_reset_ops = { 835 .assert = qcom_scm_pas_reset_assert, 836 .deassert = qcom_scm_pas_reset_deassert, 837 }; 838 839 int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) 840 { 841 struct qcom_scm_desc desc = { 842 .svc = QCOM_SCM_SVC_IO, 843 .cmd = QCOM_SCM_IO_READ, 844 .arginfo = QCOM_SCM_ARGS(1), 845 .args[0] = addr, 846 .owner = ARM_SMCCC_OWNER_SIP, 847 }; 848 struct qcom_scm_res res; 849 int ret; 850 851 852 ret = qcom_scm_call_atomic(__scm->dev, &desc, &res); 853 if (ret >= 0) 854 *val = res.result[0]; 855 856 return ret < 0 ? ret : 0; 857 } 858 EXPORT_SYMBOL_GPL(qcom_scm_io_readl); 859 860 int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) 861 { 862 struct qcom_scm_desc desc = { 863 .svc = QCOM_SCM_SVC_IO, 864 .cmd = QCOM_SCM_IO_WRITE, 865 .arginfo = QCOM_SCM_ARGS(2), 866 .args[0] = addr, 867 .args[1] = val, 868 .owner = ARM_SMCCC_OWNER_SIP, 869 }; 870 871 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 872 } 873 EXPORT_SYMBOL_GPL(qcom_scm_io_writel); 874 875 /** 876 * qcom_scm_restore_sec_cfg_available() - Check if secure environment 877 * supports restore security config interface. 878 * 879 * Return true if restore-cfg interface is supported, false if not. 880 */ 881 bool qcom_scm_restore_sec_cfg_available(void) 882 { 883 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, 884 QCOM_SCM_MP_RESTORE_SEC_CFG); 885 } 886 EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg_available); 887 888 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) 889 { 890 struct qcom_scm_desc desc = { 891 .svc = QCOM_SCM_SVC_MP, 892 .cmd = QCOM_SCM_MP_RESTORE_SEC_CFG, 893 .arginfo = QCOM_SCM_ARGS(2), 894 .args[0] = device_id, 895 .args[1] = spare, 896 .owner = ARM_SMCCC_OWNER_SIP, 897 }; 898 struct qcom_scm_res res; 899 int ret; 900 901 ret = qcom_scm_call(__scm->dev, &desc, &res); 902 903 return ret ? : res.result[0]; 904 } 905 EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg); 906 907 #define QCOM_SCM_CP_APERTURE_CONTEXT_MASK GENMASK(7, 0) 908 909 bool qcom_scm_set_gpu_smmu_aperture_is_available(void) 910 { 911 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, 912 QCOM_SCM_MP_CP_SMMU_APERTURE_ID); 913 } 914 EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture_is_available); 915 916 int qcom_scm_set_gpu_smmu_aperture(unsigned int context_bank) 917 { 918 struct qcom_scm_desc desc = { 919 .svc = QCOM_SCM_SVC_MP, 920 .cmd = QCOM_SCM_MP_CP_SMMU_APERTURE_ID, 921 .arginfo = QCOM_SCM_ARGS(4), 922 .args[0] = 0xffff0000 | FIELD_PREP(QCOM_SCM_CP_APERTURE_CONTEXT_MASK, context_bank), 923 .args[1] = 0xffffffff, 924 .args[2] = 0xffffffff, 925 .args[3] = 0xffffffff, 926 .owner = ARM_SMCCC_OWNER_SIP 927 }; 928 929 return qcom_scm_call(__scm->dev, &desc, NULL); 930 } 931 EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture); 932 933 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) 934 { 935 struct qcom_scm_desc desc = { 936 .svc = QCOM_SCM_SVC_MP, 937 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE, 938 .arginfo = QCOM_SCM_ARGS(1), 939 .args[0] = spare, 940 .owner = ARM_SMCCC_OWNER_SIP, 941 }; 942 struct qcom_scm_res res; 943 int ret; 944 945 ret = qcom_scm_call(__scm->dev, &desc, &res); 946 947 if (size) 948 *size = res.result[0]; 949 950 return ret ? : res.result[1]; 951 } 952 EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_size); 953 954 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) 955 { 956 struct qcom_scm_desc desc = { 957 .svc = QCOM_SCM_SVC_MP, 958 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT, 959 .arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, 960 QCOM_SCM_VAL), 961 .args[0] = addr, 962 .args[1] = size, 963 .args[2] = spare, 964 .owner = ARM_SMCCC_OWNER_SIP, 965 }; 966 int ret; 967 968 ret = qcom_scm_call(__scm->dev, &desc, NULL); 969 970 /* the pg table has been initialized already, ignore the error */ 971 if (ret == -EPERM) 972 ret = 0; 973 974 return ret; 975 } 976 EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_init); 977 978 int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size) 979 { 980 struct qcom_scm_desc desc = { 981 .svc = QCOM_SCM_SVC_MP, 982 .cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE, 983 .arginfo = QCOM_SCM_ARGS(2), 984 .args[0] = size, 985 .args[1] = spare, 986 .owner = ARM_SMCCC_OWNER_SIP, 987 }; 988 989 return qcom_scm_call(__scm->dev, &desc, NULL); 990 } 991 EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_cp_pool_size); 992 993 int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size, 994 u32 cp_nonpixel_start, 995 u32 cp_nonpixel_size) 996 { 997 int ret; 998 struct qcom_scm_desc desc = { 999 .svc = QCOM_SCM_SVC_MP, 1000 .cmd = QCOM_SCM_MP_VIDEO_VAR, 1001 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL, 1002 QCOM_SCM_VAL, QCOM_SCM_VAL), 1003 .args[0] = cp_start, 1004 .args[1] = cp_size, 1005 .args[2] = cp_nonpixel_start, 1006 .args[3] = cp_nonpixel_size, 1007 .owner = ARM_SMCCC_OWNER_SIP, 1008 }; 1009 struct qcom_scm_res res; 1010 1011 ret = qcom_scm_call(__scm->dev, &desc, &res); 1012 1013 return ret ? : res.result[0]; 1014 } 1015 EXPORT_SYMBOL_GPL(qcom_scm_mem_protect_video_var); 1016 1017 static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region, 1018 size_t mem_sz, phys_addr_t src, size_t src_sz, 1019 phys_addr_t dest, size_t dest_sz) 1020 { 1021 int ret; 1022 struct qcom_scm_desc desc = { 1023 .svc = QCOM_SCM_SVC_MP, 1024 .cmd = QCOM_SCM_MP_ASSIGN, 1025 .arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL, 1026 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO, 1027 QCOM_SCM_VAL, QCOM_SCM_VAL), 1028 .args[0] = mem_region, 1029 .args[1] = mem_sz, 1030 .args[2] = src, 1031 .args[3] = src_sz, 1032 .args[4] = dest, 1033 .args[5] = dest_sz, 1034 .args[6] = 0, 1035 .owner = ARM_SMCCC_OWNER_SIP, 1036 }; 1037 struct qcom_scm_res res; 1038 1039 ret = qcom_scm_call(dev, &desc, &res); 1040 1041 return ret ? : res.result[0]; 1042 } 1043 1044 /** 1045 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership 1046 * @mem_addr: mem region whose ownership need to be reassigned 1047 * @mem_sz: size of the region. 1048 * @srcvm: vmid for current set of owners, each set bit in 1049 * flag indicate a unique owner 1050 * @newvm: array having new owners and corresponding permission 1051 * flags 1052 * @dest_cnt: number of owners in next set. 1053 * 1054 * Return negative errno on failure or 0 on success with @srcvm updated. 1055 */ 1056 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, 1057 u64 *srcvm, 1058 const struct qcom_scm_vmperm *newvm, 1059 unsigned int dest_cnt) 1060 { 1061 struct qcom_scm_current_perm_info *destvm; 1062 struct qcom_scm_mem_map_info *mem_to_map; 1063 phys_addr_t mem_to_map_phys; 1064 phys_addr_t dest_phys; 1065 phys_addr_t ptr_phys; 1066 size_t mem_to_map_sz; 1067 size_t dest_sz; 1068 size_t src_sz; 1069 size_t ptr_sz; 1070 int next_vm; 1071 __le32 *src; 1072 int ret, i, b; 1073 u64 srcvm_bits = *srcvm; 1074 1075 src_sz = hweight64(srcvm_bits) * sizeof(*src); 1076 mem_to_map_sz = sizeof(*mem_to_map); 1077 dest_sz = dest_cnt * sizeof(*destvm); 1078 ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + 1079 ALIGN(dest_sz, SZ_64); 1080 1081 void *ptr __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1082 ptr_sz, GFP_KERNEL); 1083 if (!ptr) 1084 return -ENOMEM; 1085 1086 ptr_phys = qcom_tzmem_to_phys(ptr); 1087 1088 /* Fill source vmid detail */ 1089 src = ptr; 1090 i = 0; 1091 for (b = 0; b < BITS_PER_TYPE(u64); b++) { 1092 if (srcvm_bits & BIT(b)) 1093 src[i++] = cpu_to_le32(b); 1094 } 1095 1096 /* Fill details of mem buff to map */ 1097 mem_to_map = ptr + ALIGN(src_sz, SZ_64); 1098 mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64); 1099 mem_to_map->mem_addr = cpu_to_le64(mem_addr); 1100 mem_to_map->mem_size = cpu_to_le64(mem_sz); 1101 1102 next_vm = 0; 1103 /* Fill details of next vmid detail */ 1104 destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); 1105 dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); 1106 for (i = 0; i < dest_cnt; i++, destvm++, newvm++) { 1107 destvm->vmid = cpu_to_le32(newvm->vmid); 1108 destvm->perm = cpu_to_le32(newvm->perm); 1109 destvm->ctx = 0; 1110 destvm->ctx_size = 0; 1111 next_vm |= BIT(newvm->vmid); 1112 } 1113 1114 ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, 1115 ptr_phys, src_sz, dest_phys, dest_sz); 1116 if (ret) { 1117 dev_err(__scm->dev, 1118 "Assign memory protection call failed %d\n", ret); 1119 return -EINVAL; 1120 } 1121 1122 *srcvm = next_vm; 1123 return 0; 1124 } 1125 EXPORT_SYMBOL_GPL(qcom_scm_assign_mem); 1126 1127 /** 1128 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available 1129 */ 1130 bool qcom_scm_ocmem_lock_available(void) 1131 { 1132 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM, 1133 QCOM_SCM_OCMEM_LOCK_CMD); 1134 } 1135 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock_available); 1136 1137 /** 1138 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM 1139 * region to the specified initiator 1140 * 1141 * @id: tz initiator id 1142 * @offset: OCMEM offset 1143 * @size: OCMEM size 1144 * @mode: access mode (WIDE/NARROW) 1145 */ 1146 int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size, 1147 u32 mode) 1148 { 1149 struct qcom_scm_desc desc = { 1150 .svc = QCOM_SCM_SVC_OCMEM, 1151 .cmd = QCOM_SCM_OCMEM_LOCK_CMD, 1152 .args[0] = id, 1153 .args[1] = offset, 1154 .args[2] = size, 1155 .args[3] = mode, 1156 .arginfo = QCOM_SCM_ARGS(4), 1157 }; 1158 1159 return qcom_scm_call(__scm->dev, &desc, NULL); 1160 } 1161 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock); 1162 1163 /** 1164 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM 1165 * region from the specified initiator 1166 * 1167 * @id: tz initiator id 1168 * @offset: OCMEM offset 1169 * @size: OCMEM size 1170 */ 1171 int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size) 1172 { 1173 struct qcom_scm_desc desc = { 1174 .svc = QCOM_SCM_SVC_OCMEM, 1175 .cmd = QCOM_SCM_OCMEM_UNLOCK_CMD, 1176 .args[0] = id, 1177 .args[1] = offset, 1178 .args[2] = size, 1179 .arginfo = QCOM_SCM_ARGS(3), 1180 }; 1181 1182 return qcom_scm_call(__scm->dev, &desc, NULL); 1183 } 1184 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_unlock); 1185 1186 /** 1187 * qcom_scm_ice_available() - Is the ICE key programming interface available? 1188 * 1189 * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and 1190 * qcom_scm_ice_set_key() are available. 1191 */ 1192 bool qcom_scm_ice_available(void) 1193 { 1194 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1195 QCOM_SCM_ES_INVALIDATE_ICE_KEY) && 1196 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1197 QCOM_SCM_ES_CONFIG_SET_ICE_KEY); 1198 } 1199 EXPORT_SYMBOL_GPL(qcom_scm_ice_available); 1200 1201 /** 1202 * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key 1203 * @index: the keyslot to invalidate 1204 * 1205 * The UFSHCI and eMMC standards define a standard way to do this, but it 1206 * doesn't work on these SoCs; only this SCM call does. 1207 * 1208 * It is assumed that the SoC has only one ICE instance being used, as this SCM 1209 * call doesn't specify which ICE instance the keyslot belongs to. 1210 * 1211 * Return: 0 on success; -errno on failure. 1212 */ 1213 int qcom_scm_ice_invalidate_key(u32 index) 1214 { 1215 struct qcom_scm_desc desc = { 1216 .svc = QCOM_SCM_SVC_ES, 1217 .cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY, 1218 .arginfo = QCOM_SCM_ARGS(1), 1219 .args[0] = index, 1220 .owner = ARM_SMCCC_OWNER_SIP, 1221 }; 1222 1223 return qcom_scm_call(__scm->dev, &desc, NULL); 1224 } 1225 EXPORT_SYMBOL_GPL(qcom_scm_ice_invalidate_key); 1226 1227 /** 1228 * qcom_scm_ice_set_key() - Set an inline encryption key 1229 * @index: the keyslot into which to set the key 1230 * @key: the key to program 1231 * @key_size: the size of the key in bytes 1232 * @cipher: the encryption algorithm the key is for 1233 * @data_unit_size: the encryption data unit size, i.e. the size of each 1234 * individual plaintext and ciphertext. Given in 512-byte 1235 * units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc. 1236 * 1237 * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it 1238 * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline. 1239 * 1240 * The UFSHCI and eMMC standards define a standard way to do this, but it 1241 * doesn't work on these SoCs; only this SCM call does. 1242 * 1243 * It is assumed that the SoC has only one ICE instance being used, as this SCM 1244 * call doesn't specify which ICE instance the keyslot belongs to. 1245 * 1246 * Return: 0 on success; -errno on failure. 1247 */ 1248 int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, 1249 enum qcom_scm_ice_cipher cipher, u32 data_unit_size) 1250 { 1251 struct qcom_scm_desc desc = { 1252 .svc = QCOM_SCM_SVC_ES, 1253 .cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY, 1254 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW, 1255 QCOM_SCM_VAL, QCOM_SCM_VAL, 1256 QCOM_SCM_VAL), 1257 .args[0] = index, 1258 .args[2] = key_size, 1259 .args[3] = cipher, 1260 .args[4] = data_unit_size, 1261 .owner = ARM_SMCCC_OWNER_SIP, 1262 }; 1263 1264 int ret; 1265 1266 void *keybuf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1267 key_size, 1268 GFP_KERNEL); 1269 if (!keybuf) 1270 return -ENOMEM; 1271 memcpy(keybuf, key, key_size); 1272 desc.args[1] = qcom_tzmem_to_phys(keybuf); 1273 1274 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1275 1276 memzero_explicit(keybuf, key_size); 1277 1278 return ret; 1279 } 1280 EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key); 1281 1282 /** 1283 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP. 1284 * 1285 * Return true if HDCP is supported, false if not. 1286 */ 1287 bool qcom_scm_hdcp_available(void) 1288 { 1289 bool avail; 1290 int ret = qcom_scm_clk_enable(); 1291 1292 if (ret) 1293 return ret; 1294 1295 avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP, 1296 QCOM_SCM_HDCP_INVOKE); 1297 1298 qcom_scm_clk_disable(); 1299 1300 return avail; 1301 } 1302 EXPORT_SYMBOL_GPL(qcom_scm_hdcp_available); 1303 1304 /** 1305 * qcom_scm_hdcp_req() - Send HDCP request. 1306 * @req: HDCP request array 1307 * @req_cnt: HDCP request array count 1308 * @resp: response buffer passed to SCM 1309 * 1310 * Write HDCP register(s) through SCM. 1311 */ 1312 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) 1313 { 1314 int ret; 1315 struct qcom_scm_desc desc = { 1316 .svc = QCOM_SCM_SVC_HDCP, 1317 .cmd = QCOM_SCM_HDCP_INVOKE, 1318 .arginfo = QCOM_SCM_ARGS(10), 1319 .args = { 1320 req[0].addr, 1321 req[0].val, 1322 req[1].addr, 1323 req[1].val, 1324 req[2].addr, 1325 req[2].val, 1326 req[3].addr, 1327 req[3].val, 1328 req[4].addr, 1329 req[4].val 1330 }, 1331 .owner = ARM_SMCCC_OWNER_SIP, 1332 }; 1333 struct qcom_scm_res res; 1334 1335 if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT) 1336 return -ERANGE; 1337 1338 ret = qcom_scm_clk_enable(); 1339 if (ret) 1340 return ret; 1341 1342 ret = qcom_scm_call(__scm->dev, &desc, &res); 1343 *resp = res.result[0]; 1344 1345 qcom_scm_clk_disable(); 1346 1347 return ret; 1348 } 1349 EXPORT_SYMBOL_GPL(qcom_scm_hdcp_req); 1350 1351 int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt) 1352 { 1353 struct qcom_scm_desc desc = { 1354 .svc = QCOM_SCM_SVC_SMMU_PROGRAM, 1355 .cmd = QCOM_SCM_SMMU_PT_FORMAT, 1356 .arginfo = QCOM_SCM_ARGS(3), 1357 .args[0] = sec_id, 1358 .args[1] = ctx_num, 1359 .args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */ 1360 .owner = ARM_SMCCC_OWNER_SIP, 1361 }; 1362 1363 return qcom_scm_call(__scm->dev, &desc, NULL); 1364 } 1365 EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_pt_format); 1366 1367 int qcom_scm_qsmmu500_wait_safe_toggle(bool en) 1368 { 1369 struct qcom_scm_desc desc = { 1370 .svc = QCOM_SCM_SVC_SMMU_PROGRAM, 1371 .cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1, 1372 .arginfo = QCOM_SCM_ARGS(2), 1373 .args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL, 1374 .args[1] = en, 1375 .owner = ARM_SMCCC_OWNER_SIP, 1376 }; 1377 1378 1379 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 1380 } 1381 EXPORT_SYMBOL_GPL(qcom_scm_qsmmu500_wait_safe_toggle); 1382 1383 bool qcom_scm_lmh_dcvsh_available(void) 1384 { 1385 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH); 1386 } 1387 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available); 1388 1389 int qcom_scm_shm_bridge_enable(void) 1390 { 1391 int ret; 1392 1393 struct qcom_scm_desc desc = { 1394 .svc = QCOM_SCM_SVC_MP, 1395 .cmd = QCOM_SCM_MP_SHM_BRIDGE_ENABLE, 1396 .owner = ARM_SMCCC_OWNER_SIP 1397 }; 1398 1399 struct qcom_scm_res res; 1400 1401 if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, 1402 QCOM_SCM_MP_SHM_BRIDGE_ENABLE)) 1403 return -EOPNOTSUPP; 1404 1405 ret = qcom_scm_call(__scm->dev, &desc, &res); 1406 1407 if (ret) 1408 return ret; 1409 1410 if (res.result[0] == SHMBRIDGE_RESULT_NOTSUPP) 1411 return -EOPNOTSUPP; 1412 1413 return res.result[0]; 1414 } 1415 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_enable); 1416 1417 int qcom_scm_shm_bridge_create(struct device *dev, u64 pfn_and_ns_perm_flags, 1418 u64 ipfn_and_s_perm_flags, u64 size_and_flags, 1419 u64 ns_vmids, u64 *handle) 1420 { 1421 struct qcom_scm_desc desc = { 1422 .svc = QCOM_SCM_SVC_MP, 1423 .cmd = QCOM_SCM_MP_SHM_BRIDGE_CREATE, 1424 .owner = ARM_SMCCC_OWNER_SIP, 1425 .args[0] = pfn_and_ns_perm_flags, 1426 .args[1] = ipfn_and_s_perm_flags, 1427 .args[2] = size_and_flags, 1428 .args[3] = ns_vmids, 1429 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL, 1430 QCOM_SCM_VAL, QCOM_SCM_VAL), 1431 }; 1432 1433 struct qcom_scm_res res; 1434 int ret; 1435 1436 ret = qcom_scm_call(__scm->dev, &desc, &res); 1437 1438 if (handle && !ret) 1439 *handle = res.result[1]; 1440 1441 return ret ?: res.result[0]; 1442 } 1443 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_create); 1444 1445 int qcom_scm_shm_bridge_delete(struct device *dev, u64 handle) 1446 { 1447 struct qcom_scm_desc desc = { 1448 .svc = QCOM_SCM_SVC_MP, 1449 .cmd = QCOM_SCM_MP_SHM_BRIDGE_DELETE, 1450 .owner = ARM_SMCCC_OWNER_SIP, 1451 .args[0] = handle, 1452 .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL), 1453 }; 1454 1455 return qcom_scm_call(__scm->dev, &desc, NULL); 1456 } 1457 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_delete); 1458 1459 int qcom_scm_lmh_profile_change(u32 profile_id) 1460 { 1461 struct qcom_scm_desc desc = { 1462 .svc = QCOM_SCM_SVC_LMH, 1463 .cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE, 1464 .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL), 1465 .args[0] = profile_id, 1466 .owner = ARM_SMCCC_OWNER_SIP, 1467 }; 1468 1469 return qcom_scm_call(__scm->dev, &desc, NULL); 1470 } 1471 EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change); 1472 1473 int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, 1474 u64 limit_node, u32 node_id, u64 version) 1475 { 1476 int ret, payload_size = 5 * sizeof(u32); 1477 1478 struct qcom_scm_desc desc = { 1479 .svc = QCOM_SCM_SVC_LMH, 1480 .cmd = QCOM_SCM_LMH_LIMIT_DCVSH, 1481 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL, 1482 QCOM_SCM_VAL, QCOM_SCM_VAL), 1483 .args[1] = payload_size, 1484 .args[2] = limit_node, 1485 .args[3] = node_id, 1486 .args[4] = version, 1487 .owner = ARM_SMCCC_OWNER_SIP, 1488 }; 1489 1490 u32 *payload_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1491 payload_size, 1492 GFP_KERNEL); 1493 if (!payload_buf) 1494 return -ENOMEM; 1495 1496 payload_buf[0] = payload_fn; 1497 payload_buf[1] = 0; 1498 payload_buf[2] = payload_reg; 1499 payload_buf[3] = 1; 1500 payload_buf[4] = payload_val; 1501 1502 desc.args[0] = qcom_tzmem_to_phys(payload_buf); 1503 1504 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1505 1506 return ret; 1507 } 1508 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh); 1509 1510 int qcom_scm_gpu_init_regs(u32 gpu_req) 1511 { 1512 struct qcom_scm_desc desc = { 1513 .svc = QCOM_SCM_SVC_GPU, 1514 .cmd = QCOM_SCM_SVC_GPU_INIT_REGS, 1515 .arginfo = QCOM_SCM_ARGS(1), 1516 .args[0] = gpu_req, 1517 .owner = ARM_SMCCC_OWNER_SIP, 1518 }; 1519 1520 return qcom_scm_call(__scm->dev, &desc, NULL); 1521 } 1522 EXPORT_SYMBOL_GPL(qcom_scm_gpu_init_regs); 1523 1524 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr) 1525 { 1526 struct device_node *tcsr; 1527 struct device_node *np = dev->of_node; 1528 struct resource res; 1529 u32 offset; 1530 int ret; 1531 1532 tcsr = of_parse_phandle(np, "qcom,dload-mode", 0); 1533 if (!tcsr) 1534 return 0; 1535 1536 ret = of_address_to_resource(tcsr, 0, &res); 1537 of_node_put(tcsr); 1538 if (ret) 1539 return ret; 1540 1541 ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset); 1542 if (ret < 0) 1543 return ret; 1544 1545 *addr = res.start + offset; 1546 1547 return 0; 1548 } 1549 1550 #ifdef CONFIG_QCOM_QSEECOM 1551 1552 /* Lock for QSEECOM SCM call executions */ 1553 static DEFINE_MUTEX(qcom_scm_qseecom_call_lock); 1554 1555 static int __qcom_scm_qseecom_call(const struct qcom_scm_desc *desc, 1556 struct qcom_scm_qseecom_resp *res) 1557 { 1558 struct qcom_scm_res scm_res = {}; 1559 int status; 1560 1561 /* 1562 * QSEECOM SCM calls should not be executed concurrently. Therefore, we 1563 * require the respective call lock to be held. 1564 */ 1565 lockdep_assert_held(&qcom_scm_qseecom_call_lock); 1566 1567 status = qcom_scm_call(__scm->dev, desc, &scm_res); 1568 1569 res->result = scm_res.result[0]; 1570 res->resp_type = scm_res.result[1]; 1571 res->data = scm_res.result[2]; 1572 1573 if (status) 1574 return status; 1575 1576 return 0; 1577 } 1578 1579 /** 1580 * qcom_scm_qseecom_call() - Perform a QSEECOM SCM call. 1581 * @desc: SCM call descriptor. 1582 * @res: SCM call response (output). 1583 * 1584 * Performs the QSEECOM SCM call described by @desc, returning the response in 1585 * @rsp. 1586 * 1587 * Return: Zero on success, nonzero on failure. 1588 */ 1589 static int qcom_scm_qseecom_call(const struct qcom_scm_desc *desc, 1590 struct qcom_scm_qseecom_resp *res) 1591 { 1592 int status; 1593 1594 /* 1595 * Note: Multiple QSEECOM SCM calls should not be executed same time, 1596 * so lock things here. This needs to be extended to callback/listener 1597 * handling when support for that is implemented. 1598 */ 1599 1600 mutex_lock(&qcom_scm_qseecom_call_lock); 1601 status = __qcom_scm_qseecom_call(desc, res); 1602 mutex_unlock(&qcom_scm_qseecom_call_lock); 1603 1604 dev_dbg(__scm->dev, "%s: owner=%x, svc=%x, cmd=%x, result=%lld, type=%llx, data=%llx\n", 1605 __func__, desc->owner, desc->svc, desc->cmd, res->result, 1606 res->resp_type, res->data); 1607 1608 if (status) { 1609 dev_err(__scm->dev, "qseecom: scm call failed with error %d\n", status); 1610 return status; 1611 } 1612 1613 /* 1614 * TODO: Handle incomplete and blocked calls: 1615 * 1616 * Incomplete and blocked calls are not supported yet. Some devices 1617 * and/or commands require those, some don't. Let's warn about them 1618 * prominently in case someone attempts to try these commands with a 1619 * device/command combination that isn't supported yet. 1620 */ 1621 WARN_ON(res->result == QSEECOM_RESULT_INCOMPLETE); 1622 WARN_ON(res->result == QSEECOM_RESULT_BLOCKED_ON_LISTENER); 1623 1624 return 0; 1625 } 1626 1627 /** 1628 * qcom_scm_qseecom_get_version() - Query the QSEECOM version. 1629 * @version: Pointer where the QSEECOM version will be stored. 1630 * 1631 * Performs the QSEECOM SCM querying the QSEECOM version currently running in 1632 * the TrustZone. 1633 * 1634 * Return: Zero on success, nonzero on failure. 1635 */ 1636 static int qcom_scm_qseecom_get_version(u32 *version) 1637 { 1638 struct qcom_scm_desc desc = {}; 1639 struct qcom_scm_qseecom_resp res = {}; 1640 u32 feature = 10; 1641 int ret; 1642 1643 desc.owner = QSEECOM_TZ_OWNER_SIP; 1644 desc.svc = QSEECOM_TZ_SVC_INFO; 1645 desc.cmd = QSEECOM_TZ_CMD_INFO_VERSION; 1646 desc.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL); 1647 desc.args[0] = feature; 1648 1649 ret = qcom_scm_qseecom_call(&desc, &res); 1650 if (ret) 1651 return ret; 1652 1653 *version = res.result; 1654 return 0; 1655 } 1656 1657 /** 1658 * qcom_scm_qseecom_app_get_id() - Query the app ID for a given QSEE app name. 1659 * @app_name: The name of the app. 1660 * @app_id: The returned app ID. 1661 * 1662 * Query and return the application ID of the SEE app identified by the given 1663 * name. This returned ID is the unique identifier of the app required for 1664 * subsequent communication. 1665 * 1666 * Return: Zero on success, nonzero on failure, -ENOENT if the app has not been 1667 * loaded or could not be found. 1668 */ 1669 int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id) 1670 { 1671 unsigned long name_buf_size = QSEECOM_MAX_APP_NAME_SIZE; 1672 unsigned long app_name_len = strlen(app_name); 1673 struct qcom_scm_desc desc = {}; 1674 struct qcom_scm_qseecom_resp res = {}; 1675 int status; 1676 1677 if (app_name_len >= name_buf_size) 1678 return -EINVAL; 1679 1680 char *name_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1681 name_buf_size, 1682 GFP_KERNEL); 1683 if (!name_buf) 1684 return -ENOMEM; 1685 1686 memcpy(name_buf, app_name, app_name_len); 1687 1688 desc.owner = QSEECOM_TZ_OWNER_QSEE_OS; 1689 desc.svc = QSEECOM_TZ_SVC_APP_MGR; 1690 desc.cmd = QSEECOM_TZ_CMD_APP_LOOKUP; 1691 desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL); 1692 desc.args[0] = qcom_tzmem_to_phys(name_buf); 1693 desc.args[1] = app_name_len; 1694 1695 status = qcom_scm_qseecom_call(&desc, &res); 1696 1697 if (status) 1698 return status; 1699 1700 if (res.result == QSEECOM_RESULT_FAILURE) 1701 return -ENOENT; 1702 1703 if (res.result != QSEECOM_RESULT_SUCCESS) 1704 return -EINVAL; 1705 1706 if (res.resp_type != QSEECOM_SCM_RES_APP_ID) 1707 return -EINVAL; 1708 1709 *app_id = res.data; 1710 return 0; 1711 } 1712 EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id); 1713 1714 /** 1715 * qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app. 1716 * @app_id: The ID of the target app. 1717 * @req: Request buffer sent to the app (must be TZ memory) 1718 * @req_size: Size of the request buffer. 1719 * @rsp: Response buffer, written to by the app (must be TZ memory) 1720 * @rsp_size: Size of the response buffer. 1721 * 1722 * Sends a request to the QSEE app associated with the given ID and read back 1723 * its response. The caller must provide two DMA memory regions, one for the 1724 * request and one for the response, and fill out the @req region with the 1725 * respective (app-specific) request data. The QSEE app reads this and returns 1726 * its response in the @rsp region. 1727 * 1728 * Return: Zero on success, nonzero on failure. 1729 */ 1730 int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, 1731 void *rsp, size_t rsp_size) 1732 { 1733 struct qcom_scm_qseecom_resp res = {}; 1734 struct qcom_scm_desc desc = {}; 1735 phys_addr_t req_phys; 1736 phys_addr_t rsp_phys; 1737 int status; 1738 1739 req_phys = qcom_tzmem_to_phys(req); 1740 rsp_phys = qcom_tzmem_to_phys(rsp); 1741 1742 desc.owner = QSEECOM_TZ_OWNER_TZ_APPS; 1743 desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER; 1744 desc.cmd = QSEECOM_TZ_CMD_APP_SEND; 1745 desc.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, 1746 QCOM_SCM_RW, QCOM_SCM_VAL, 1747 QCOM_SCM_RW, QCOM_SCM_VAL); 1748 desc.args[0] = app_id; 1749 desc.args[1] = req_phys; 1750 desc.args[2] = req_size; 1751 desc.args[3] = rsp_phys; 1752 desc.args[4] = rsp_size; 1753 1754 status = qcom_scm_qseecom_call(&desc, &res); 1755 1756 if (status) 1757 return status; 1758 1759 if (res.result != QSEECOM_RESULT_SUCCESS) 1760 return -EIO; 1761 1762 return 0; 1763 } 1764 EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send); 1765 1766 /* 1767 * We do not yet support re-entrant calls via the qseecom interface. To prevent 1768 + any potential issues with this, only allow validated machines for now. 1769 */ 1770 static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = { 1771 { .compatible = "dell,xps13-9345" }, 1772 { .compatible = "lenovo,flex-5g" }, 1773 { .compatible = "lenovo,thinkpad-t14s" }, 1774 { .compatible = "lenovo,thinkpad-x13s", }, 1775 { .compatible = "lenovo,yoga-slim7x" }, 1776 { .compatible = "microsoft,arcata", }, 1777 { .compatible = "microsoft,romulus13", }, 1778 { .compatible = "microsoft,romulus15", }, 1779 { .compatible = "qcom,sc8180x-primus" }, 1780 { .compatible = "qcom,x1e001de-devkit" }, 1781 { .compatible = "qcom,x1e80100-crd" }, 1782 { .compatible = "qcom,x1e80100-qcp" }, 1783 { } 1784 }; 1785 1786 static bool qcom_scm_qseecom_machine_is_allowed(void) 1787 { 1788 struct device_node *np; 1789 bool match; 1790 1791 np = of_find_node_by_path("/"); 1792 if (!np) 1793 return false; 1794 1795 match = of_match_node(qcom_scm_qseecom_allowlist, np); 1796 of_node_put(np); 1797 1798 return match; 1799 } 1800 1801 static void qcom_scm_qseecom_free(void *data) 1802 { 1803 struct platform_device *qseecom_dev = data; 1804 1805 platform_device_del(qseecom_dev); 1806 platform_device_put(qseecom_dev); 1807 } 1808 1809 static int qcom_scm_qseecom_init(struct qcom_scm *scm) 1810 { 1811 struct platform_device *qseecom_dev; 1812 u32 version; 1813 int ret; 1814 1815 /* 1816 * Note: We do two steps of validation here: First, we try to query the 1817 * QSEECOM version as a check to see if the interface exists on this 1818 * device. Second, we check against known good devices due to current 1819 * driver limitations (see comment in qcom_scm_qseecom_allowlist). 1820 * 1821 * Note that we deliberately do the machine check after the version 1822 * check so that we can log potentially supported devices. This should 1823 * be safe as downstream sources indicate that the version query is 1824 * neither blocking nor reentrant. 1825 */ 1826 ret = qcom_scm_qseecom_get_version(&version); 1827 if (ret) 1828 return 0; 1829 1830 dev_info(scm->dev, "qseecom: found qseecom with version 0x%x\n", version); 1831 1832 if (!qcom_scm_qseecom_machine_is_allowed()) { 1833 dev_info(scm->dev, "qseecom: untested machine, skipping\n"); 1834 return 0; 1835 } 1836 1837 /* 1838 * Set up QSEECOM interface device. All application clients will be 1839 * set up and managed by the corresponding driver for it. 1840 */ 1841 qseecom_dev = platform_device_alloc("qcom_qseecom", -1); 1842 if (!qseecom_dev) 1843 return -ENOMEM; 1844 1845 qseecom_dev->dev.parent = scm->dev; 1846 1847 ret = platform_device_add(qseecom_dev); 1848 if (ret) { 1849 platform_device_put(qseecom_dev); 1850 return ret; 1851 } 1852 1853 return devm_add_action_or_reset(scm->dev, qcom_scm_qseecom_free, qseecom_dev); 1854 } 1855 1856 #else /* CONFIG_QCOM_QSEECOM */ 1857 1858 static int qcom_scm_qseecom_init(struct qcom_scm *scm) 1859 { 1860 return 0; 1861 } 1862 1863 #endif /* CONFIG_QCOM_QSEECOM */ 1864 1865 /** 1866 * qcom_scm_is_available() - Checks if SCM is available 1867 */ 1868 bool qcom_scm_is_available(void) 1869 { 1870 return !!READ_ONCE(__scm); 1871 } 1872 EXPORT_SYMBOL_GPL(qcom_scm_is_available); 1873 1874 static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx) 1875 { 1876 /* FW currently only supports a single wq_ctx (zero). 1877 * TODO: Update this logic to include dynamic allocation and lookup of 1878 * completion structs when FW supports more wq_ctx values. 1879 */ 1880 if (wq_ctx != 0) { 1881 dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n"); 1882 return -EINVAL; 1883 } 1884 1885 return 0; 1886 } 1887 1888 int qcom_scm_wait_for_wq_completion(u32 wq_ctx) 1889 { 1890 int ret; 1891 1892 ret = qcom_scm_assert_valid_wq_ctx(wq_ctx); 1893 if (ret) 1894 return ret; 1895 1896 wait_for_completion(&__scm->waitq_comp); 1897 1898 return 0; 1899 } 1900 1901 static int qcom_scm_waitq_wakeup(unsigned int wq_ctx) 1902 { 1903 int ret; 1904 1905 ret = qcom_scm_assert_valid_wq_ctx(wq_ctx); 1906 if (ret) 1907 return ret; 1908 1909 complete(&__scm->waitq_comp); 1910 1911 return 0; 1912 } 1913 1914 static irqreturn_t qcom_scm_irq_handler(int irq, void *data) 1915 { 1916 int ret; 1917 struct qcom_scm *scm = data; 1918 u32 wq_ctx, flags, more_pending = 0; 1919 1920 do { 1921 ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending); 1922 if (ret) { 1923 dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret); 1924 goto out; 1925 } 1926 1927 if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE) { 1928 dev_err(scm->dev, "Invalid flags received for wq_ctx: %u\n", flags); 1929 goto out; 1930 } 1931 1932 ret = qcom_scm_waitq_wakeup(wq_ctx); 1933 if (ret) 1934 goto out; 1935 } while (more_pending); 1936 1937 out: 1938 return IRQ_HANDLED; 1939 } 1940 1941 static int get_download_mode(char *buffer, const struct kernel_param *kp) 1942 { 1943 if (download_mode >= ARRAY_SIZE(download_mode_name)) 1944 return sysfs_emit(buffer, "unknown mode\n"); 1945 1946 return sysfs_emit(buffer, "%s\n", download_mode_name[download_mode]); 1947 } 1948 1949 static int set_download_mode(const char *val, const struct kernel_param *kp) 1950 { 1951 bool tmp; 1952 int ret; 1953 1954 ret = sysfs_match_string(download_mode_name, val); 1955 if (ret < 0) { 1956 ret = kstrtobool(val, &tmp); 1957 if (ret < 0) { 1958 pr_err("qcom_scm: err: %d\n", ret); 1959 return ret; 1960 } 1961 1962 ret = tmp ? 1 : 0; 1963 } 1964 1965 download_mode = ret; 1966 if (__scm) 1967 qcom_scm_set_download_mode(download_mode); 1968 1969 return 0; 1970 } 1971 1972 static const struct kernel_param_ops download_mode_param_ops = { 1973 .get = get_download_mode, 1974 .set = set_download_mode, 1975 }; 1976 1977 module_param_cb(download_mode, &download_mode_param_ops, NULL, 0644); 1978 MODULE_PARM_DESC(download_mode, "download mode: off/0/N for no dump mode, full/on/1/Y for full dump mode, mini for minidump mode and full,mini for both full and minidump mode together are acceptable values"); 1979 1980 static int qcom_scm_probe(struct platform_device *pdev) 1981 { 1982 struct qcom_tzmem_pool_config pool_config; 1983 struct qcom_scm *scm; 1984 int irq, ret; 1985 1986 scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL); 1987 if (!scm) 1988 return -ENOMEM; 1989 1990 scm->dev = &pdev->dev; 1991 ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr); 1992 if (ret < 0) 1993 return ret; 1994 1995 init_completion(&scm->waitq_comp); 1996 mutex_init(&scm->scm_bw_lock); 1997 1998 scm->path = devm_of_icc_get(&pdev->dev, NULL); 1999 if (IS_ERR(scm->path)) 2000 return dev_err_probe(&pdev->dev, PTR_ERR(scm->path), 2001 "failed to acquire interconnect path\n"); 2002 2003 scm->core_clk = devm_clk_get_optional(&pdev->dev, "core"); 2004 if (IS_ERR(scm->core_clk)) 2005 return PTR_ERR(scm->core_clk); 2006 2007 scm->iface_clk = devm_clk_get_optional(&pdev->dev, "iface"); 2008 if (IS_ERR(scm->iface_clk)) 2009 return PTR_ERR(scm->iface_clk); 2010 2011 scm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus"); 2012 if (IS_ERR(scm->bus_clk)) 2013 return PTR_ERR(scm->bus_clk); 2014 2015 scm->reset.ops = &qcom_scm_pas_reset_ops; 2016 scm->reset.nr_resets = 1; 2017 scm->reset.of_node = pdev->dev.of_node; 2018 ret = devm_reset_controller_register(&pdev->dev, &scm->reset); 2019 if (ret) 2020 return ret; 2021 2022 /* vote for max clk rate for highest performance */ 2023 ret = clk_set_rate(scm->core_clk, INT_MAX); 2024 if (ret) 2025 return ret; 2026 2027 /* Let all above stores be available after this */ 2028 smp_store_release(&__scm, scm); 2029 2030 irq = platform_get_irq_optional(pdev, 0); 2031 if (irq < 0) { 2032 if (irq != -ENXIO) 2033 return irq; 2034 } else { 2035 ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler, 2036 IRQF_ONESHOT, "qcom-scm", __scm); 2037 if (ret < 0) 2038 return dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n"); 2039 } 2040 2041 __get_convention(); 2042 2043 /* 2044 * If "download mode" is requested, from this point on warmboot 2045 * will cause the boot stages to enter download mode, unless 2046 * disabled below by a clean shutdown/reboot. 2047 */ 2048 qcom_scm_set_download_mode(download_mode); 2049 2050 /* 2051 * Disable SDI if indicated by DT that it is enabled by default. 2052 */ 2053 if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled") || !download_mode) 2054 qcom_scm_disable_sdi(); 2055 2056 ret = of_reserved_mem_device_init(__scm->dev); 2057 if (ret && ret != -ENODEV) 2058 return dev_err_probe(__scm->dev, ret, 2059 "Failed to setup the reserved memory region for TZ mem\n"); 2060 2061 ret = qcom_tzmem_enable(__scm->dev); 2062 if (ret) 2063 return dev_err_probe(__scm->dev, ret, 2064 "Failed to enable the TrustZone memory allocator\n"); 2065 2066 memset(&pool_config, 0, sizeof(pool_config)); 2067 pool_config.initial_size = 0; 2068 pool_config.policy = QCOM_TZMEM_POLICY_ON_DEMAND; 2069 pool_config.max_size = SZ_256K; 2070 2071 __scm->mempool = devm_qcom_tzmem_pool_new(__scm->dev, &pool_config); 2072 if (IS_ERR(__scm->mempool)) 2073 return dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool), 2074 "Failed to create the SCM memory pool\n"); 2075 2076 /* 2077 * Initialize the QSEECOM interface. 2078 * 2079 * Note: QSEECOM is fairly self-contained and this only adds the 2080 * interface device (the driver of which does most of the heavy 2081 * lifting). So any errors returned here should be either -ENOMEM or 2082 * -EINVAL (with the latter only in case there's a bug in our code). 2083 * This means that there is no need to bring down the whole SCM driver. 2084 * Just log the error instead and let SCM live. 2085 */ 2086 ret = qcom_scm_qseecom_init(scm); 2087 WARN(ret < 0, "failed to initialize qseecom: %d\n", ret); 2088 2089 return 0; 2090 } 2091 2092 static void qcom_scm_shutdown(struct platform_device *pdev) 2093 { 2094 /* Clean shutdown, disable download mode to allow normal restart */ 2095 qcom_scm_set_download_mode(QCOM_DLOAD_NODUMP); 2096 } 2097 2098 static const struct of_device_id qcom_scm_dt_match[] = { 2099 { .compatible = "qcom,scm" }, 2100 2101 /* Legacy entries kept for backwards compatibility */ 2102 { .compatible = "qcom,scm-apq8064" }, 2103 { .compatible = "qcom,scm-apq8084" }, 2104 { .compatible = "qcom,scm-ipq4019" }, 2105 { .compatible = "qcom,scm-msm8953" }, 2106 { .compatible = "qcom,scm-msm8974" }, 2107 { .compatible = "qcom,scm-msm8996" }, 2108 {} 2109 }; 2110 MODULE_DEVICE_TABLE(of, qcom_scm_dt_match); 2111 2112 static struct platform_driver qcom_scm_driver = { 2113 .driver = { 2114 .name = "qcom_scm", 2115 .of_match_table = qcom_scm_dt_match, 2116 .suppress_bind_attrs = true, 2117 }, 2118 .probe = qcom_scm_probe, 2119 .shutdown = qcom_scm_shutdown, 2120 }; 2121 2122 static int __init qcom_scm_init(void) 2123 { 2124 return platform_driver_register(&qcom_scm_driver); 2125 } 2126 subsys_initcall(qcom_scm_init); 2127 2128 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver"); 2129 MODULE_LICENSE("GPL v2"); 2130