1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved. 3 * Copyright (C) 2015 Linaro Ltd. 4 */ 5 6 #include <linux/arm-smccc.h> 7 #include <linux/bitfield.h> 8 #include <linux/bits.h> 9 #include <linux/cleanup.h> 10 #include <linux/clk.h> 11 #include <linux/completion.h> 12 #include <linux/cpumask.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/err.h> 15 #include <linux/export.h> 16 #include <linux/firmware/qcom/qcom_scm.h> 17 #include <linux/firmware/qcom/qcom_tzmem.h> 18 #include <linux/init.h> 19 #include <linux/interconnect.h> 20 #include <linux/interrupt.h> 21 #include <linux/kstrtox.h> 22 #include <linux/module.h> 23 #include <linux/of.h> 24 #include <linux/of_address.h> 25 #include <linux/of_irq.h> 26 #include <linux/of_platform.h> 27 #include <linux/of_reserved_mem.h> 28 #include <linux/platform_device.h> 29 #include <linux/reset-controller.h> 30 #include <linux/sizes.h> 31 #include <linux/types.h> 32 33 #include "qcom_scm.h" 34 #include "qcom_tzmem.h" 35 36 static u32 download_mode; 37 38 struct qcom_scm { 39 struct device *dev; 40 struct clk *core_clk; 41 struct clk *iface_clk; 42 struct clk *bus_clk; 43 struct icc_path *path; 44 struct completion waitq_comp; 45 struct reset_controller_dev reset; 46 47 /* control access to the interconnect path */ 48 struct mutex scm_bw_lock; 49 int scm_vote_count; 50 51 u64 dload_mode_addr; 52 53 struct qcom_tzmem_pool *mempool; 54 }; 55 56 struct qcom_scm_current_perm_info { 57 __le32 vmid; 58 __le32 perm; 59 __le64 ctx; 60 __le32 ctx_size; 61 __le32 unused; 62 }; 63 64 struct qcom_scm_mem_map_info { 65 __le64 mem_addr; 66 __le64 mem_size; 67 }; 68 69 /** 70 * struct qcom_scm_qseecom_resp - QSEECOM SCM call response. 71 * @result: Result or status of the SCM call. See &enum qcom_scm_qseecom_result. 72 * @resp_type: Type of the response. See &enum qcom_scm_qseecom_resp_type. 73 * @data: Response data. The type of this data is given in @resp_type. 74 */ 75 struct qcom_scm_qseecom_resp { 76 u64 result; 77 u64 resp_type; 78 u64 data; 79 }; 80 81 enum qcom_scm_qseecom_result { 82 QSEECOM_RESULT_SUCCESS = 0, 83 QSEECOM_RESULT_INCOMPLETE = 1, 84 QSEECOM_RESULT_BLOCKED_ON_LISTENER = 2, 85 QSEECOM_RESULT_FAILURE = 0xFFFFFFFF, 86 }; 87 88 enum qcom_scm_qseecom_resp_type { 89 QSEECOM_SCM_RES_APP_ID = 0xEE01, 90 QSEECOM_SCM_RES_QSEOS_LISTENER_ID = 0xEE02, 91 }; 92 93 enum qcom_scm_qseecom_tz_owner { 94 QSEECOM_TZ_OWNER_SIP = 2, 95 QSEECOM_TZ_OWNER_TZ_APPS = 48, 96 QSEECOM_TZ_OWNER_QSEE_OS = 50 97 }; 98 99 enum qcom_scm_qseecom_tz_svc { 100 QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER = 0, 101 QSEECOM_TZ_SVC_APP_MGR = 1, 102 QSEECOM_TZ_SVC_INFO = 6, 103 }; 104 105 enum qcom_scm_qseecom_tz_cmd_app { 106 QSEECOM_TZ_CMD_APP_SEND = 1, 107 QSEECOM_TZ_CMD_APP_LOOKUP = 3, 108 }; 109 110 enum qcom_scm_qseecom_tz_cmd_info { 111 QSEECOM_TZ_CMD_INFO_VERSION = 3, 112 }; 113 114 #define QSEECOM_MAX_APP_NAME_SIZE 64 115 #define SHMBRIDGE_RESULT_NOTSUPP 4 116 117 /* Each bit configures cold/warm boot address for one of the 4 CPUs */ 118 static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = { 119 0, BIT(0), BIT(3), BIT(5) 120 }; 121 static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = { 122 BIT(2), BIT(1), BIT(4), BIT(6) 123 }; 124 125 #define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0) 126 127 #define QCOM_DLOAD_MASK GENMASK(5, 4) 128 #define QCOM_DLOAD_NODUMP 0 129 #define QCOM_DLOAD_FULLDUMP 1 130 #define QCOM_DLOAD_MINIDUMP 2 131 #define QCOM_DLOAD_BOTHDUMP 3 132 133 static const char * const qcom_scm_convention_names[] = { 134 [SMC_CONVENTION_UNKNOWN] = "unknown", 135 [SMC_CONVENTION_ARM_32] = "smc arm 32", 136 [SMC_CONVENTION_ARM_64] = "smc arm 64", 137 [SMC_CONVENTION_LEGACY] = "smc legacy", 138 }; 139 140 static const char * const download_mode_name[] = { 141 [QCOM_DLOAD_NODUMP] = "off", 142 [QCOM_DLOAD_FULLDUMP] = "full", 143 [QCOM_DLOAD_MINIDUMP] = "mini", 144 [QCOM_DLOAD_BOTHDUMP] = "full,mini", 145 }; 146 147 static struct qcom_scm *__scm; 148 149 static int qcom_scm_clk_enable(void) 150 { 151 int ret; 152 153 ret = clk_prepare_enable(__scm->core_clk); 154 if (ret) 155 goto bail; 156 157 ret = clk_prepare_enable(__scm->iface_clk); 158 if (ret) 159 goto disable_core; 160 161 ret = clk_prepare_enable(__scm->bus_clk); 162 if (ret) 163 goto disable_iface; 164 165 return 0; 166 167 disable_iface: 168 clk_disable_unprepare(__scm->iface_clk); 169 disable_core: 170 clk_disable_unprepare(__scm->core_clk); 171 bail: 172 return ret; 173 } 174 175 static void qcom_scm_clk_disable(void) 176 { 177 clk_disable_unprepare(__scm->core_clk); 178 clk_disable_unprepare(__scm->iface_clk); 179 clk_disable_unprepare(__scm->bus_clk); 180 } 181 182 static int qcom_scm_bw_enable(void) 183 { 184 int ret = 0; 185 186 if (!__scm->path) 187 return 0; 188 189 mutex_lock(&__scm->scm_bw_lock); 190 if (!__scm->scm_vote_count) { 191 ret = icc_set_bw(__scm->path, 0, UINT_MAX); 192 if (ret < 0) { 193 dev_err(__scm->dev, "failed to set bandwidth request\n"); 194 goto err_bw; 195 } 196 } 197 __scm->scm_vote_count++; 198 err_bw: 199 mutex_unlock(&__scm->scm_bw_lock); 200 201 return ret; 202 } 203 204 static void qcom_scm_bw_disable(void) 205 { 206 if (!__scm->path) 207 return; 208 209 mutex_lock(&__scm->scm_bw_lock); 210 if (__scm->scm_vote_count-- == 1) 211 icc_set_bw(__scm->path, 0, 0); 212 mutex_unlock(&__scm->scm_bw_lock); 213 } 214 215 enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN; 216 static DEFINE_SPINLOCK(scm_query_lock); 217 218 struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void) 219 { 220 if (!qcom_scm_is_available()) 221 return NULL; 222 223 return __scm->mempool; 224 } 225 226 static enum qcom_scm_convention __get_convention(void) 227 { 228 unsigned long flags; 229 struct qcom_scm_desc desc = { 230 .svc = QCOM_SCM_SVC_INFO, 231 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, 232 .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO, 233 QCOM_SCM_INFO_IS_CALL_AVAIL) | 234 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT), 235 .arginfo = QCOM_SCM_ARGS(1), 236 .owner = ARM_SMCCC_OWNER_SIP, 237 }; 238 struct qcom_scm_res res; 239 enum qcom_scm_convention probed_convention; 240 int ret; 241 bool forced = false; 242 243 if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN)) 244 return qcom_scm_convention; 245 246 /* 247 * Per the "SMC calling convention specification", the 64-bit calling 248 * convention can only be used when the client is 64-bit, otherwise 249 * system will encounter the undefined behaviour. 250 */ 251 #if IS_ENABLED(CONFIG_ARM64) 252 /* 253 * Device isn't required as there is only one argument - no device 254 * needed to dma_map_single to secure world 255 */ 256 probed_convention = SMC_CONVENTION_ARM_64; 257 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); 258 if (!ret && res.result[0] == 1) 259 goto found; 260 261 /* 262 * Some SC7180 firmwares didn't implement the 263 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64 264 * calling conventions on these firmwares. Luckily we don't make any 265 * early calls into the firmware on these SoCs so the device pointer 266 * will be valid here to check if the compatible matches. 267 */ 268 if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) { 269 forced = true; 270 goto found; 271 } 272 #endif 273 274 probed_convention = SMC_CONVENTION_ARM_32; 275 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); 276 if (!ret && res.result[0] == 1) 277 goto found; 278 279 probed_convention = SMC_CONVENTION_LEGACY; 280 found: 281 spin_lock_irqsave(&scm_query_lock, flags); 282 if (probed_convention != qcom_scm_convention) { 283 qcom_scm_convention = probed_convention; 284 pr_info("qcom_scm: convention: %s%s\n", 285 qcom_scm_convention_names[qcom_scm_convention], 286 forced ? " (forced)" : ""); 287 } 288 spin_unlock_irqrestore(&scm_query_lock, flags); 289 290 return qcom_scm_convention; 291 } 292 293 /** 294 * qcom_scm_call() - Invoke a syscall in the secure world 295 * @dev: device 296 * @desc: Descriptor structure containing arguments and return values 297 * @res: Structure containing results from SMC/HVC call 298 * 299 * Sends a command to the SCM and waits for the command to finish processing. 300 * This should *only* be called in pre-emptible context. 301 */ 302 static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc, 303 struct qcom_scm_res *res) 304 { 305 might_sleep(); 306 switch (__get_convention()) { 307 case SMC_CONVENTION_ARM_32: 308 case SMC_CONVENTION_ARM_64: 309 return scm_smc_call(dev, desc, res, false); 310 case SMC_CONVENTION_LEGACY: 311 return scm_legacy_call(dev, desc, res); 312 default: 313 pr_err("Unknown current SCM calling convention.\n"); 314 return -EINVAL; 315 } 316 } 317 318 /** 319 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call() 320 * @dev: device 321 * @desc: Descriptor structure containing arguments and return values 322 * @res: Structure containing results from SMC/HVC call 323 * 324 * Sends a command to the SCM and waits for the command to finish processing. 325 * This can be called in atomic context. 326 */ 327 static int qcom_scm_call_atomic(struct device *dev, 328 const struct qcom_scm_desc *desc, 329 struct qcom_scm_res *res) 330 { 331 switch (__get_convention()) { 332 case SMC_CONVENTION_ARM_32: 333 case SMC_CONVENTION_ARM_64: 334 return scm_smc_call(dev, desc, res, true); 335 case SMC_CONVENTION_LEGACY: 336 return scm_legacy_call_atomic(dev, desc, res); 337 default: 338 pr_err("Unknown current SCM calling convention.\n"); 339 return -EINVAL; 340 } 341 } 342 343 static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id, 344 u32 cmd_id) 345 { 346 int ret; 347 struct qcom_scm_desc desc = { 348 .svc = QCOM_SCM_SVC_INFO, 349 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, 350 .owner = ARM_SMCCC_OWNER_SIP, 351 }; 352 struct qcom_scm_res res; 353 354 desc.arginfo = QCOM_SCM_ARGS(1); 355 switch (__get_convention()) { 356 case SMC_CONVENTION_ARM_32: 357 case SMC_CONVENTION_ARM_64: 358 desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) | 359 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT); 360 break; 361 case SMC_CONVENTION_LEGACY: 362 desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id); 363 break; 364 default: 365 pr_err("Unknown SMC convention being used\n"); 366 return false; 367 } 368 369 ret = qcom_scm_call(dev, &desc, &res); 370 371 return ret ? false : !!res.result[0]; 372 } 373 374 static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits) 375 { 376 int cpu; 377 unsigned int flags = 0; 378 struct qcom_scm_desc desc = { 379 .svc = QCOM_SCM_SVC_BOOT, 380 .cmd = QCOM_SCM_BOOT_SET_ADDR, 381 .arginfo = QCOM_SCM_ARGS(2), 382 .owner = ARM_SMCCC_OWNER_SIP, 383 }; 384 385 for_each_present_cpu(cpu) { 386 if (cpu >= QCOM_SCM_BOOT_MAX_CPUS) 387 return -EINVAL; 388 flags |= cpu_bits[cpu]; 389 } 390 391 desc.args[0] = flags; 392 desc.args[1] = virt_to_phys(entry); 393 394 return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); 395 } 396 397 static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags) 398 { 399 struct qcom_scm_desc desc = { 400 .svc = QCOM_SCM_SVC_BOOT, 401 .cmd = QCOM_SCM_BOOT_SET_ADDR_MC, 402 .owner = ARM_SMCCC_OWNER_SIP, 403 .arginfo = QCOM_SCM_ARGS(6), 404 .args = { 405 virt_to_phys(entry), 406 /* Apply to all CPUs in all affinity levels */ 407 ~0ULL, ~0ULL, ~0ULL, ~0ULL, 408 flags, 409 }, 410 }; 411 412 /* Need a device for DMA of the additional arguments */ 413 if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY) 414 return -EOPNOTSUPP; 415 416 return qcom_scm_call(__scm->dev, &desc, NULL); 417 } 418 419 /** 420 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus 421 * @entry: Entry point function for the cpus 422 * 423 * Set the Linux entry point for the SCM to transfer control to when coming 424 * out of a power down. CPU power down may be executed on cpuidle or hotplug. 425 */ 426 int qcom_scm_set_warm_boot_addr(void *entry) 427 { 428 if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT)) 429 /* Fallback to old SCM call */ 430 return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits); 431 return 0; 432 } 433 EXPORT_SYMBOL_GPL(qcom_scm_set_warm_boot_addr); 434 435 /** 436 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus 437 * @entry: Entry point function for the cpus 438 */ 439 int qcom_scm_set_cold_boot_addr(void *entry) 440 { 441 if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT)) 442 /* Fallback to old SCM call */ 443 return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits); 444 return 0; 445 } 446 EXPORT_SYMBOL_GPL(qcom_scm_set_cold_boot_addr); 447 448 /** 449 * qcom_scm_cpu_power_down() - Power down the cpu 450 * @flags: Flags to flush cache 451 * 452 * This is an end point to power down cpu. If there was a pending interrupt, 453 * the control would return from this function, otherwise, the cpu jumps to the 454 * warm boot entry point set for this cpu upon reset. 455 */ 456 void qcom_scm_cpu_power_down(u32 flags) 457 { 458 struct qcom_scm_desc desc = { 459 .svc = QCOM_SCM_SVC_BOOT, 460 .cmd = QCOM_SCM_BOOT_TERMINATE_PC, 461 .args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK, 462 .arginfo = QCOM_SCM_ARGS(1), 463 .owner = ARM_SMCCC_OWNER_SIP, 464 }; 465 466 qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); 467 } 468 EXPORT_SYMBOL_GPL(qcom_scm_cpu_power_down); 469 470 int qcom_scm_set_remote_state(u32 state, u32 id) 471 { 472 struct qcom_scm_desc desc = { 473 .svc = QCOM_SCM_SVC_BOOT, 474 .cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE, 475 .arginfo = QCOM_SCM_ARGS(2), 476 .args[0] = state, 477 .args[1] = id, 478 .owner = ARM_SMCCC_OWNER_SIP, 479 }; 480 struct qcom_scm_res res; 481 int ret; 482 483 ret = qcom_scm_call(__scm->dev, &desc, &res); 484 485 return ret ? : res.result[0]; 486 } 487 EXPORT_SYMBOL_GPL(qcom_scm_set_remote_state); 488 489 static int qcom_scm_disable_sdi(void) 490 { 491 int ret; 492 struct qcom_scm_desc desc = { 493 .svc = QCOM_SCM_SVC_BOOT, 494 .cmd = QCOM_SCM_BOOT_SDI_CONFIG, 495 .args[0] = 1, /* Disable watchdog debug */ 496 .args[1] = 0, /* Disable SDI */ 497 .arginfo = QCOM_SCM_ARGS(2), 498 .owner = ARM_SMCCC_OWNER_SIP, 499 }; 500 struct qcom_scm_res res; 501 502 ret = qcom_scm_clk_enable(); 503 if (ret) 504 return ret; 505 ret = qcom_scm_call(__scm->dev, &desc, &res); 506 507 qcom_scm_clk_disable(); 508 509 return ret ? : res.result[0]; 510 } 511 512 static int __qcom_scm_set_dload_mode(struct device *dev, bool enable) 513 { 514 struct qcom_scm_desc desc = { 515 .svc = QCOM_SCM_SVC_BOOT, 516 .cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE, 517 .arginfo = QCOM_SCM_ARGS(2), 518 .args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE, 519 .owner = ARM_SMCCC_OWNER_SIP, 520 }; 521 522 desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0; 523 524 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 525 } 526 527 static int qcom_scm_io_rmw(phys_addr_t addr, unsigned int mask, unsigned int val) 528 { 529 unsigned int old; 530 unsigned int new; 531 int ret; 532 533 ret = qcom_scm_io_readl(addr, &old); 534 if (ret) 535 return ret; 536 537 new = (old & ~mask) | (val & mask); 538 539 return qcom_scm_io_writel(addr, new); 540 } 541 542 static void qcom_scm_set_download_mode(u32 dload_mode) 543 { 544 int ret = 0; 545 546 if (__scm->dload_mode_addr) { 547 ret = qcom_scm_io_rmw(__scm->dload_mode_addr, QCOM_DLOAD_MASK, 548 FIELD_PREP(QCOM_DLOAD_MASK, dload_mode)); 549 } else if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT, 550 QCOM_SCM_BOOT_SET_DLOAD_MODE)) { 551 ret = __qcom_scm_set_dload_mode(__scm->dev, !!dload_mode); 552 } else if (dload_mode) { 553 dev_err(__scm->dev, 554 "No available mechanism for setting download mode\n"); 555 } 556 557 if (ret) 558 dev_err(__scm->dev, "failed to set download mode: %d\n", ret); 559 } 560 561 /** 562 * devm_qcom_scm_pas_context_alloc() - Allocate peripheral authentication service 563 * context for a given peripheral 564 * 565 * PAS context is device-resource managed, so the caller does not need 566 * to worry about freeing the context memory. 567 * 568 * @dev: PAS firmware device 569 * @pas_id: peripheral authentication service id 570 * @mem_phys: Subsystem reserve memory start address 571 * @mem_size: Subsystem reserve memory size 572 * 573 * Returns: The new PAS context, or ERR_PTR() on failure. 574 */ 575 struct qcom_scm_pas_context *devm_qcom_scm_pas_context_alloc(struct device *dev, 576 u32 pas_id, 577 phys_addr_t mem_phys, 578 size_t mem_size) 579 { 580 struct qcom_scm_pas_context *ctx; 581 582 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 583 if (!ctx) 584 return ERR_PTR(-ENOMEM); 585 586 ctx->dev = dev; 587 ctx->pas_id = pas_id; 588 ctx->mem_phys = mem_phys; 589 ctx->mem_size = mem_size; 590 591 return ctx; 592 } 593 EXPORT_SYMBOL_GPL(devm_qcom_scm_pas_context_alloc); 594 595 static int __qcom_scm_pas_init_image(u32 pas_id, dma_addr_t mdata_phys, 596 struct qcom_scm_res *res) 597 { 598 struct qcom_scm_desc desc = { 599 .svc = QCOM_SCM_SVC_PIL, 600 .cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE, 601 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW), 602 .args[0] = pas_id, 603 .owner = ARM_SMCCC_OWNER_SIP, 604 }; 605 int ret; 606 607 ret = qcom_scm_clk_enable(); 608 if (ret) 609 return ret; 610 611 ret = qcom_scm_bw_enable(); 612 if (ret) 613 goto disable_clk; 614 615 desc.args[1] = mdata_phys; 616 617 ret = qcom_scm_call(__scm->dev, &desc, res); 618 qcom_scm_bw_disable(); 619 620 disable_clk: 621 qcom_scm_clk_disable(); 622 623 return ret; 624 } 625 626 static int qcom_scm_pas_prep_and_init_image(struct qcom_scm_pas_context *ctx, 627 const void *metadata, size_t size) 628 { 629 struct qcom_scm_res res; 630 phys_addr_t mdata_phys; 631 void *mdata_buf; 632 int ret; 633 634 mdata_buf = qcom_tzmem_alloc(__scm->mempool, size, GFP_KERNEL); 635 if (!mdata_buf) 636 return -ENOMEM; 637 638 memcpy(mdata_buf, metadata, size); 639 mdata_phys = qcom_tzmem_to_phys(mdata_buf); 640 641 ret = __qcom_scm_pas_init_image(ctx->pas_id, mdata_phys, &res); 642 if (ret < 0) 643 qcom_tzmem_free(mdata_buf); 644 else 645 ctx->ptr = mdata_buf; 646 647 return ret ? : res.result[0]; 648 } 649 650 /** 651 * qcom_scm_pas_init_image() - Initialize peripheral authentication service 652 * state machine for a given peripheral, using the 653 * metadata 654 * @pas_id: peripheral authentication service id 655 * @metadata: pointer to memory containing ELF header, program header table 656 * and optional blob of data used for authenticating the metadata 657 * and the rest of the firmware 658 * @size: size of the metadata 659 * @ctx: optional pas context 660 * 661 * Return: 0 on success. 662 * 663 * Upon successful return, the PAS metadata context (@ctx) will be used to 664 * track the metadata allocation, this needs to be released by invoking 665 * qcom_scm_pas_metadata_release() by the caller. 666 */ 667 int qcom_scm_pas_init_image(u32 pas_id, const void *metadata, size_t size, 668 struct qcom_scm_pas_context *ctx) 669 { 670 struct qcom_scm_res res; 671 dma_addr_t mdata_phys; 672 void *mdata_buf; 673 int ret; 674 675 if (ctx && ctx->use_tzmem) 676 return qcom_scm_pas_prep_and_init_image(ctx, metadata, size); 677 678 /* 679 * During the scm call memory protection will be enabled for the meta 680 * data blob, so make sure it's physically contiguous, 4K aligned and 681 * non-cachable to avoid XPU violations. 682 * 683 * For PIL calls the hypervisor creates SHM Bridges for the blob 684 * buffers on behalf of Linux so we must not do it ourselves hence 685 * not using the TZMem allocator here. 686 * 687 * If we pass a buffer that is already part of an SHM Bridge to this 688 * call, it will fail. 689 */ 690 mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys, 691 GFP_KERNEL); 692 if (!mdata_buf) 693 return -ENOMEM; 694 695 memcpy(mdata_buf, metadata, size); 696 697 ret = __qcom_scm_pas_init_image(pas_id, mdata_phys, &res); 698 if (ret < 0 || !ctx) { 699 dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys); 700 } else if (ctx) { 701 ctx->ptr = mdata_buf; 702 ctx->phys = mdata_phys; 703 ctx->size = size; 704 } 705 706 return ret ? : res.result[0]; 707 } 708 EXPORT_SYMBOL_GPL(qcom_scm_pas_init_image); 709 710 /** 711 * qcom_scm_pas_metadata_release() - release metadata context 712 * @ctx: pas context 713 */ 714 void qcom_scm_pas_metadata_release(struct qcom_scm_pas_context *ctx) 715 { 716 if (!ctx->ptr) 717 return; 718 719 if (ctx->use_tzmem) 720 qcom_tzmem_free(ctx->ptr); 721 else 722 dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys); 723 724 ctx->ptr = NULL; 725 } 726 EXPORT_SYMBOL_GPL(qcom_scm_pas_metadata_release); 727 728 /** 729 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral 730 * for firmware loading 731 * @pas_id: peripheral authentication service id 732 * @addr: start address of memory area to prepare 733 * @size: size of the memory area to prepare 734 * 735 * Returns 0 on success. 736 */ 737 int qcom_scm_pas_mem_setup(u32 pas_id, phys_addr_t addr, phys_addr_t size) 738 { 739 int ret; 740 struct qcom_scm_desc desc = { 741 .svc = QCOM_SCM_SVC_PIL, 742 .cmd = QCOM_SCM_PIL_PAS_MEM_SETUP, 743 .arginfo = QCOM_SCM_ARGS(3), 744 .args[0] = pas_id, 745 .args[1] = addr, 746 .args[2] = size, 747 .owner = ARM_SMCCC_OWNER_SIP, 748 }; 749 struct qcom_scm_res res; 750 751 ret = qcom_scm_clk_enable(); 752 if (ret) 753 return ret; 754 755 ret = qcom_scm_bw_enable(); 756 if (ret) 757 goto disable_clk; 758 759 ret = qcom_scm_call(__scm->dev, &desc, &res); 760 qcom_scm_bw_disable(); 761 762 disable_clk: 763 qcom_scm_clk_disable(); 764 765 return ret ? : res.result[0]; 766 } 767 EXPORT_SYMBOL_GPL(qcom_scm_pas_mem_setup); 768 769 /** 770 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware 771 * and reset the remote processor 772 * @pas_id: peripheral authentication service id 773 * 774 * Return 0 on success. 775 */ 776 int qcom_scm_pas_auth_and_reset(u32 pas_id) 777 { 778 int ret; 779 struct qcom_scm_desc desc = { 780 .svc = QCOM_SCM_SVC_PIL, 781 .cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET, 782 .arginfo = QCOM_SCM_ARGS(1), 783 .args[0] = pas_id, 784 .owner = ARM_SMCCC_OWNER_SIP, 785 }; 786 struct qcom_scm_res res; 787 788 ret = qcom_scm_clk_enable(); 789 if (ret) 790 return ret; 791 792 ret = qcom_scm_bw_enable(); 793 if (ret) 794 goto disable_clk; 795 796 ret = qcom_scm_call(__scm->dev, &desc, &res); 797 qcom_scm_bw_disable(); 798 799 disable_clk: 800 qcom_scm_clk_disable(); 801 802 return ret ? : res.result[0]; 803 } 804 EXPORT_SYMBOL_GPL(qcom_scm_pas_auth_and_reset); 805 806 /** 807 * qcom_scm_pas_prepare_and_auth_reset() - Prepare, authenticate, and reset the 808 * remote processor 809 * 810 * @ctx: Context saved during call to qcom_scm_pas_context_init() 811 * 812 * This function performs the necessary steps to prepare a PAS subsystem, 813 * authenticate it using the provided metadata, and initiate a reset sequence. 814 * 815 * It should be used when Linux is in control setting up the IOMMU hardware 816 * for remote subsystem during secure firmware loading processes. The preparation 817 * step sets up a shmbridge over the firmware memory before TrustZone accesses the 818 * firmware memory region for authentication. The authentication step verifies 819 * the integrity and authenticity of the firmware or configuration using secure 820 * metadata. Finally, the reset step ensures the subsystem starts in a clean and 821 * sane state. 822 * 823 * Return: 0 on success, negative errno on failure. 824 */ 825 int qcom_scm_pas_prepare_and_auth_reset(struct qcom_scm_pas_context *ctx) 826 { 827 u64 handle; 828 int ret; 829 830 /* 831 * When Linux running @ EL1, Gunyah hypervisor running @ EL2 traps the 832 * auth_and_reset call and create an shmbridge on the remote subsystem 833 * memory region and then invokes a call to TrustZone to authenticate. 834 */ 835 if (!ctx->use_tzmem) 836 return qcom_scm_pas_auth_and_reset(ctx->pas_id); 837 838 /* 839 * When Linux runs @ EL2 Linux must create the shmbridge itself and then 840 * subsequently call TrustZone for authenticate and reset. 841 */ 842 ret = qcom_tzmem_shm_bridge_create(ctx->mem_phys, ctx->mem_size, &handle); 843 if (ret) 844 return ret; 845 846 ret = qcom_scm_pas_auth_and_reset(ctx->pas_id); 847 qcom_tzmem_shm_bridge_delete(handle); 848 849 return ret; 850 } 851 EXPORT_SYMBOL_GPL(qcom_scm_pas_prepare_and_auth_reset); 852 853 /** 854 * qcom_scm_pas_shutdown() - Shut down the remote processor 855 * @pas_id: peripheral authentication service id 856 * 857 * Returns 0 on success. 858 */ 859 int qcom_scm_pas_shutdown(u32 pas_id) 860 { 861 int ret; 862 struct qcom_scm_desc desc = { 863 .svc = QCOM_SCM_SVC_PIL, 864 .cmd = QCOM_SCM_PIL_PAS_SHUTDOWN, 865 .arginfo = QCOM_SCM_ARGS(1), 866 .args[0] = pas_id, 867 .owner = ARM_SMCCC_OWNER_SIP, 868 }; 869 struct qcom_scm_res res; 870 871 ret = qcom_scm_clk_enable(); 872 if (ret) 873 return ret; 874 875 ret = qcom_scm_bw_enable(); 876 if (ret) 877 goto disable_clk; 878 879 ret = qcom_scm_call(__scm->dev, &desc, &res); 880 qcom_scm_bw_disable(); 881 882 disable_clk: 883 qcom_scm_clk_disable(); 884 885 return ret ? : res.result[0]; 886 } 887 EXPORT_SYMBOL_GPL(qcom_scm_pas_shutdown); 888 889 /** 890 * qcom_scm_pas_supported() - Check if the peripheral authentication service is 891 * available for the given peripherial 892 * @pas_id: peripheral authentication service id 893 * 894 * Returns true if PAS is supported for this peripheral, otherwise false. 895 */ 896 bool qcom_scm_pas_supported(u32 pas_id) 897 { 898 int ret; 899 struct qcom_scm_desc desc = { 900 .svc = QCOM_SCM_SVC_PIL, 901 .cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED, 902 .arginfo = QCOM_SCM_ARGS(1), 903 .args[0] = pas_id, 904 .owner = ARM_SMCCC_OWNER_SIP, 905 }; 906 struct qcom_scm_res res; 907 908 if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL, 909 QCOM_SCM_PIL_PAS_IS_SUPPORTED)) 910 return false; 911 912 ret = qcom_scm_call(__scm->dev, &desc, &res); 913 914 return ret ? false : !!res.result[0]; 915 } 916 EXPORT_SYMBOL_GPL(qcom_scm_pas_supported); 917 918 static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) 919 { 920 struct qcom_scm_desc desc = { 921 .svc = QCOM_SCM_SVC_PIL, 922 .cmd = QCOM_SCM_PIL_PAS_MSS_RESET, 923 .arginfo = QCOM_SCM_ARGS(2), 924 .args[0] = reset, 925 .args[1] = 0, 926 .owner = ARM_SMCCC_OWNER_SIP, 927 }; 928 struct qcom_scm_res res; 929 int ret; 930 931 ret = qcom_scm_call(__scm->dev, &desc, &res); 932 933 return ret ? : res.result[0]; 934 } 935 936 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev, 937 unsigned long idx) 938 { 939 if (idx != 0) 940 return -EINVAL; 941 942 return __qcom_scm_pas_mss_reset(__scm->dev, 1); 943 } 944 945 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev, 946 unsigned long idx) 947 { 948 if (idx != 0) 949 return -EINVAL; 950 951 return __qcom_scm_pas_mss_reset(__scm->dev, 0); 952 } 953 954 static const struct reset_control_ops qcom_scm_pas_reset_ops = { 955 .assert = qcom_scm_pas_reset_assert, 956 .deassert = qcom_scm_pas_reset_deassert, 957 }; 958 959 int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) 960 { 961 struct qcom_scm_desc desc = { 962 .svc = QCOM_SCM_SVC_IO, 963 .cmd = QCOM_SCM_IO_READ, 964 .arginfo = QCOM_SCM_ARGS(1), 965 .args[0] = addr, 966 .owner = ARM_SMCCC_OWNER_SIP, 967 }; 968 struct qcom_scm_res res; 969 int ret; 970 971 972 ret = qcom_scm_call_atomic(__scm->dev, &desc, &res); 973 if (ret >= 0) 974 *val = res.result[0]; 975 976 return ret < 0 ? ret : 0; 977 } 978 EXPORT_SYMBOL_GPL(qcom_scm_io_readl); 979 980 int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) 981 { 982 struct qcom_scm_desc desc = { 983 .svc = QCOM_SCM_SVC_IO, 984 .cmd = QCOM_SCM_IO_WRITE, 985 .arginfo = QCOM_SCM_ARGS(2), 986 .args[0] = addr, 987 .args[1] = val, 988 .owner = ARM_SMCCC_OWNER_SIP, 989 }; 990 991 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 992 } 993 EXPORT_SYMBOL_GPL(qcom_scm_io_writel); 994 995 /** 996 * qcom_scm_restore_sec_cfg_available() - Check if secure environment 997 * supports restore security config interface. 998 * 999 * Return true if restore-cfg interface is supported, false if not. 1000 */ 1001 bool qcom_scm_restore_sec_cfg_available(void) 1002 { 1003 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, 1004 QCOM_SCM_MP_RESTORE_SEC_CFG); 1005 } 1006 EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg_available); 1007 1008 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) 1009 { 1010 struct qcom_scm_desc desc = { 1011 .svc = QCOM_SCM_SVC_MP, 1012 .cmd = QCOM_SCM_MP_RESTORE_SEC_CFG, 1013 .arginfo = QCOM_SCM_ARGS(2), 1014 .args[0] = device_id, 1015 .args[1] = spare, 1016 .owner = ARM_SMCCC_OWNER_SIP, 1017 }; 1018 struct qcom_scm_res res; 1019 int ret; 1020 1021 ret = qcom_scm_call(__scm->dev, &desc, &res); 1022 1023 return ret ? : res.result[0]; 1024 } 1025 EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg); 1026 1027 #define QCOM_SCM_CP_APERTURE_CONTEXT_MASK GENMASK(7, 0) 1028 1029 bool qcom_scm_set_gpu_smmu_aperture_is_available(void) 1030 { 1031 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, 1032 QCOM_SCM_MP_CP_SMMU_APERTURE_ID); 1033 } 1034 EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture_is_available); 1035 1036 int qcom_scm_set_gpu_smmu_aperture(unsigned int context_bank) 1037 { 1038 struct qcom_scm_desc desc = { 1039 .svc = QCOM_SCM_SVC_MP, 1040 .cmd = QCOM_SCM_MP_CP_SMMU_APERTURE_ID, 1041 .arginfo = QCOM_SCM_ARGS(4), 1042 .args[0] = 0xffff0000 | FIELD_PREP(QCOM_SCM_CP_APERTURE_CONTEXT_MASK, context_bank), 1043 .args[1] = 0xffffffff, 1044 .args[2] = 0xffffffff, 1045 .args[3] = 0xffffffff, 1046 .owner = ARM_SMCCC_OWNER_SIP 1047 }; 1048 1049 return qcom_scm_call(__scm->dev, &desc, NULL); 1050 } 1051 EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture); 1052 1053 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) 1054 { 1055 struct qcom_scm_desc desc = { 1056 .svc = QCOM_SCM_SVC_MP, 1057 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE, 1058 .arginfo = QCOM_SCM_ARGS(1), 1059 .args[0] = spare, 1060 .owner = ARM_SMCCC_OWNER_SIP, 1061 }; 1062 struct qcom_scm_res res; 1063 int ret; 1064 1065 ret = qcom_scm_call(__scm->dev, &desc, &res); 1066 1067 if (size) 1068 *size = res.result[0]; 1069 1070 return ret ? : res.result[1]; 1071 } 1072 EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_size); 1073 1074 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) 1075 { 1076 struct qcom_scm_desc desc = { 1077 .svc = QCOM_SCM_SVC_MP, 1078 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT, 1079 .arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, 1080 QCOM_SCM_VAL), 1081 .args[0] = addr, 1082 .args[1] = size, 1083 .args[2] = spare, 1084 .owner = ARM_SMCCC_OWNER_SIP, 1085 }; 1086 int ret; 1087 1088 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1089 1090 /* the pg table has been initialized already, ignore the error */ 1091 if (ret == -EPERM) 1092 ret = 0; 1093 1094 return ret; 1095 } 1096 EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_init); 1097 1098 int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size) 1099 { 1100 struct qcom_scm_desc desc = { 1101 .svc = QCOM_SCM_SVC_MP, 1102 .cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE, 1103 .arginfo = QCOM_SCM_ARGS(2), 1104 .args[0] = size, 1105 .args[1] = spare, 1106 .owner = ARM_SMCCC_OWNER_SIP, 1107 }; 1108 1109 return qcom_scm_call(__scm->dev, &desc, NULL); 1110 } 1111 EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_cp_pool_size); 1112 1113 int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size, 1114 u32 cp_nonpixel_start, 1115 u32 cp_nonpixel_size) 1116 { 1117 int ret; 1118 struct qcom_scm_desc desc = { 1119 .svc = QCOM_SCM_SVC_MP, 1120 .cmd = QCOM_SCM_MP_VIDEO_VAR, 1121 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL, 1122 QCOM_SCM_VAL, QCOM_SCM_VAL), 1123 .args[0] = cp_start, 1124 .args[1] = cp_size, 1125 .args[2] = cp_nonpixel_start, 1126 .args[3] = cp_nonpixel_size, 1127 .owner = ARM_SMCCC_OWNER_SIP, 1128 }; 1129 struct qcom_scm_res res; 1130 1131 ret = qcom_scm_call(__scm->dev, &desc, &res); 1132 1133 return ret ? : res.result[0]; 1134 } 1135 EXPORT_SYMBOL_GPL(qcom_scm_mem_protect_video_var); 1136 1137 static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region, 1138 size_t mem_sz, phys_addr_t src, size_t src_sz, 1139 phys_addr_t dest, size_t dest_sz) 1140 { 1141 int ret; 1142 struct qcom_scm_desc desc = { 1143 .svc = QCOM_SCM_SVC_MP, 1144 .cmd = QCOM_SCM_MP_ASSIGN, 1145 .arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL, 1146 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO, 1147 QCOM_SCM_VAL, QCOM_SCM_VAL), 1148 .args[0] = mem_region, 1149 .args[1] = mem_sz, 1150 .args[2] = src, 1151 .args[3] = src_sz, 1152 .args[4] = dest, 1153 .args[5] = dest_sz, 1154 .args[6] = 0, 1155 .owner = ARM_SMCCC_OWNER_SIP, 1156 }; 1157 struct qcom_scm_res res; 1158 1159 ret = qcom_scm_call(dev, &desc, &res); 1160 1161 return ret ? : res.result[0]; 1162 } 1163 1164 /** 1165 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership 1166 * @mem_addr: mem region whose ownership need to be reassigned 1167 * @mem_sz: size of the region. 1168 * @srcvm: vmid for current set of owners, each set bit in 1169 * flag indicate a unique owner 1170 * @newvm: array having new owners and corresponding permission 1171 * flags 1172 * @dest_cnt: number of owners in next set. 1173 * 1174 * Return negative errno on failure or 0 on success with @srcvm updated. 1175 */ 1176 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, 1177 u64 *srcvm, 1178 const struct qcom_scm_vmperm *newvm, 1179 unsigned int dest_cnt) 1180 { 1181 struct qcom_scm_current_perm_info *destvm; 1182 struct qcom_scm_mem_map_info *mem_to_map; 1183 phys_addr_t mem_to_map_phys; 1184 phys_addr_t dest_phys; 1185 phys_addr_t ptr_phys; 1186 size_t mem_to_map_sz; 1187 size_t dest_sz; 1188 size_t src_sz; 1189 size_t ptr_sz; 1190 int next_vm; 1191 __le32 *src; 1192 int ret, i, b; 1193 u64 srcvm_bits = *srcvm; 1194 1195 src_sz = hweight64(srcvm_bits) * sizeof(*src); 1196 mem_to_map_sz = sizeof(*mem_to_map); 1197 dest_sz = dest_cnt * sizeof(*destvm); 1198 ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + 1199 ALIGN(dest_sz, SZ_64); 1200 1201 void *ptr __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1202 ptr_sz, GFP_KERNEL); 1203 if (!ptr) 1204 return -ENOMEM; 1205 1206 ptr_phys = qcom_tzmem_to_phys(ptr); 1207 1208 /* Fill source vmid detail */ 1209 src = ptr; 1210 i = 0; 1211 for (b = 0; b < BITS_PER_TYPE(u64); b++) { 1212 if (srcvm_bits & BIT(b)) 1213 src[i++] = cpu_to_le32(b); 1214 } 1215 1216 /* Fill details of mem buff to map */ 1217 mem_to_map = ptr + ALIGN(src_sz, SZ_64); 1218 mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64); 1219 mem_to_map->mem_addr = cpu_to_le64(mem_addr); 1220 mem_to_map->mem_size = cpu_to_le64(mem_sz); 1221 1222 next_vm = 0; 1223 /* Fill details of next vmid detail */ 1224 destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); 1225 dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); 1226 for (i = 0; i < dest_cnt; i++, destvm++, newvm++) { 1227 destvm->vmid = cpu_to_le32(newvm->vmid); 1228 destvm->perm = cpu_to_le32(newvm->perm); 1229 destvm->ctx = 0; 1230 destvm->ctx_size = 0; 1231 next_vm |= BIT(newvm->vmid); 1232 } 1233 1234 ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, 1235 ptr_phys, src_sz, dest_phys, dest_sz); 1236 if (ret) { 1237 dev_err(__scm->dev, 1238 "Assign memory protection call failed %d\n", ret); 1239 return ret; 1240 } 1241 1242 *srcvm = next_vm; 1243 return 0; 1244 } 1245 EXPORT_SYMBOL_GPL(qcom_scm_assign_mem); 1246 1247 /** 1248 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available 1249 */ 1250 bool qcom_scm_ocmem_lock_available(void) 1251 { 1252 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM, 1253 QCOM_SCM_OCMEM_LOCK_CMD); 1254 } 1255 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock_available); 1256 1257 /** 1258 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM 1259 * region to the specified initiator 1260 * 1261 * @id: tz initiator id 1262 * @offset: OCMEM offset 1263 * @size: OCMEM size 1264 * @mode: access mode (WIDE/NARROW) 1265 */ 1266 int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size, 1267 u32 mode) 1268 { 1269 struct qcom_scm_desc desc = { 1270 .svc = QCOM_SCM_SVC_OCMEM, 1271 .cmd = QCOM_SCM_OCMEM_LOCK_CMD, 1272 .args[0] = id, 1273 .args[1] = offset, 1274 .args[2] = size, 1275 .args[3] = mode, 1276 .arginfo = QCOM_SCM_ARGS(4), 1277 }; 1278 1279 return qcom_scm_call(__scm->dev, &desc, NULL); 1280 } 1281 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock); 1282 1283 /** 1284 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM 1285 * region from the specified initiator 1286 * 1287 * @id: tz initiator id 1288 * @offset: OCMEM offset 1289 * @size: OCMEM size 1290 */ 1291 int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size) 1292 { 1293 struct qcom_scm_desc desc = { 1294 .svc = QCOM_SCM_SVC_OCMEM, 1295 .cmd = QCOM_SCM_OCMEM_UNLOCK_CMD, 1296 .args[0] = id, 1297 .args[1] = offset, 1298 .args[2] = size, 1299 .arginfo = QCOM_SCM_ARGS(3), 1300 }; 1301 1302 return qcom_scm_call(__scm->dev, &desc, NULL); 1303 } 1304 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_unlock); 1305 1306 /** 1307 * qcom_scm_ice_available() - Is the ICE key programming interface available? 1308 * 1309 * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and 1310 * qcom_scm_ice_set_key() are available. 1311 */ 1312 bool qcom_scm_ice_available(void) 1313 { 1314 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1315 QCOM_SCM_ES_INVALIDATE_ICE_KEY) && 1316 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1317 QCOM_SCM_ES_CONFIG_SET_ICE_KEY); 1318 } 1319 EXPORT_SYMBOL_GPL(qcom_scm_ice_available); 1320 1321 /** 1322 * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key 1323 * @index: the keyslot to invalidate 1324 * 1325 * The UFSHCI and eMMC standards define a standard way to do this, but it 1326 * doesn't work on these SoCs; only this SCM call does. 1327 * 1328 * It is assumed that the SoC has only one ICE instance being used, as this SCM 1329 * call doesn't specify which ICE instance the keyslot belongs to. 1330 * 1331 * Return: 0 on success; -errno on failure. 1332 */ 1333 int qcom_scm_ice_invalidate_key(u32 index) 1334 { 1335 struct qcom_scm_desc desc = { 1336 .svc = QCOM_SCM_SVC_ES, 1337 .cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY, 1338 .arginfo = QCOM_SCM_ARGS(1), 1339 .args[0] = index, 1340 .owner = ARM_SMCCC_OWNER_SIP, 1341 }; 1342 1343 return qcom_scm_call(__scm->dev, &desc, NULL); 1344 } 1345 EXPORT_SYMBOL_GPL(qcom_scm_ice_invalidate_key); 1346 1347 /** 1348 * qcom_scm_ice_set_key() - Set an inline encryption key 1349 * @index: the keyslot into which to set the key 1350 * @key: the key to program 1351 * @key_size: the size of the key in bytes 1352 * @cipher: the encryption algorithm the key is for 1353 * @data_unit_size: the encryption data unit size, i.e. the size of each 1354 * individual plaintext and ciphertext. Given in 512-byte 1355 * units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc. 1356 * 1357 * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it 1358 * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline. 1359 * 1360 * The UFSHCI and eMMC standards define a standard way to do this, but it 1361 * doesn't work on these SoCs; only this SCM call does. 1362 * 1363 * It is assumed that the SoC has only one ICE instance being used, as this SCM 1364 * call doesn't specify which ICE instance the keyslot belongs to. 1365 * 1366 * Return: 0 on success; -errno on failure. 1367 */ 1368 int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, 1369 enum qcom_scm_ice_cipher cipher, u32 data_unit_size) 1370 { 1371 struct qcom_scm_desc desc = { 1372 .svc = QCOM_SCM_SVC_ES, 1373 .cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY, 1374 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW, 1375 QCOM_SCM_VAL, QCOM_SCM_VAL, 1376 QCOM_SCM_VAL), 1377 .args[0] = index, 1378 .args[2] = key_size, 1379 .args[3] = cipher, 1380 .args[4] = data_unit_size, 1381 .owner = ARM_SMCCC_OWNER_SIP, 1382 }; 1383 1384 int ret; 1385 1386 void *keybuf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1387 key_size, 1388 GFP_KERNEL); 1389 if (!keybuf) 1390 return -ENOMEM; 1391 memcpy(keybuf, key, key_size); 1392 desc.args[1] = qcom_tzmem_to_phys(keybuf); 1393 1394 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1395 1396 memzero_explicit(keybuf, key_size); 1397 1398 return ret; 1399 } 1400 EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key); 1401 1402 bool qcom_scm_has_wrapped_key_support(void) 1403 { 1404 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1405 QCOM_SCM_ES_DERIVE_SW_SECRET) && 1406 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1407 QCOM_SCM_ES_GENERATE_ICE_KEY) && 1408 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1409 QCOM_SCM_ES_PREPARE_ICE_KEY) && 1410 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1411 QCOM_SCM_ES_IMPORT_ICE_KEY); 1412 } 1413 EXPORT_SYMBOL_GPL(qcom_scm_has_wrapped_key_support); 1414 1415 /** 1416 * qcom_scm_derive_sw_secret() - Derive software secret from wrapped key 1417 * @eph_key: an ephemerally-wrapped key 1418 * @eph_key_size: size of @eph_key in bytes 1419 * @sw_secret: output buffer for the software secret 1420 * @sw_secret_size: size of the software secret to derive in bytes 1421 * 1422 * Derive a software secret from an ephemerally-wrapped key for software crypto 1423 * operations. This is done by calling into the secure execution environment, 1424 * which then calls into the hardware to unwrap and derive the secret. 1425 * 1426 * For more information on sw_secret, see the "Hardware-wrapped keys" section of 1427 * Documentation/block/inline-encryption.rst. 1428 * 1429 * Return: 0 on success; -errno on failure. 1430 */ 1431 int qcom_scm_derive_sw_secret(const u8 *eph_key, size_t eph_key_size, 1432 u8 *sw_secret, size_t sw_secret_size) 1433 { 1434 struct qcom_scm_desc desc = { 1435 .svc = QCOM_SCM_SVC_ES, 1436 .cmd = QCOM_SCM_ES_DERIVE_SW_SECRET, 1437 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RW, QCOM_SCM_VAL, 1438 QCOM_SCM_RW, QCOM_SCM_VAL), 1439 .owner = ARM_SMCCC_OWNER_SIP, 1440 }; 1441 int ret; 1442 1443 void *eph_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1444 eph_key_size, 1445 GFP_KERNEL); 1446 if (!eph_key_buf) 1447 return -ENOMEM; 1448 1449 void *sw_secret_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1450 sw_secret_size, 1451 GFP_KERNEL); 1452 if (!sw_secret_buf) 1453 return -ENOMEM; 1454 1455 memcpy(eph_key_buf, eph_key, eph_key_size); 1456 desc.args[0] = qcom_tzmem_to_phys(eph_key_buf); 1457 desc.args[1] = eph_key_size; 1458 desc.args[2] = qcom_tzmem_to_phys(sw_secret_buf); 1459 desc.args[3] = sw_secret_size; 1460 1461 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1462 if (!ret) 1463 memcpy(sw_secret, sw_secret_buf, sw_secret_size); 1464 1465 memzero_explicit(eph_key_buf, eph_key_size); 1466 memzero_explicit(sw_secret_buf, sw_secret_size); 1467 return ret; 1468 } 1469 EXPORT_SYMBOL_GPL(qcom_scm_derive_sw_secret); 1470 1471 /** 1472 * qcom_scm_generate_ice_key() - Generate a wrapped key for storage encryption 1473 * @lt_key: output buffer for the long-term wrapped key 1474 * @lt_key_size: size of @lt_key in bytes. Must be the exact wrapped key size 1475 * used by the SoC. 1476 * 1477 * Generate a key using the built-in HW module in the SoC. The resulting key is 1478 * returned wrapped with the platform-specific Key Encryption Key. 1479 * 1480 * Return: 0 on success; -errno on failure. 1481 */ 1482 int qcom_scm_generate_ice_key(u8 *lt_key, size_t lt_key_size) 1483 { 1484 struct qcom_scm_desc desc = { 1485 .svc = QCOM_SCM_SVC_ES, 1486 .cmd = QCOM_SCM_ES_GENERATE_ICE_KEY, 1487 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL), 1488 .owner = ARM_SMCCC_OWNER_SIP, 1489 }; 1490 int ret; 1491 1492 void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1493 lt_key_size, 1494 GFP_KERNEL); 1495 if (!lt_key_buf) 1496 return -ENOMEM; 1497 1498 desc.args[0] = qcom_tzmem_to_phys(lt_key_buf); 1499 desc.args[1] = lt_key_size; 1500 1501 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1502 if (!ret) 1503 memcpy(lt_key, lt_key_buf, lt_key_size); 1504 1505 memzero_explicit(lt_key_buf, lt_key_size); 1506 return ret; 1507 } 1508 EXPORT_SYMBOL_GPL(qcom_scm_generate_ice_key); 1509 1510 /** 1511 * qcom_scm_prepare_ice_key() - Re-wrap a key with the per-boot ephemeral key 1512 * @lt_key: a long-term wrapped key 1513 * @lt_key_size: size of @lt_key in bytes 1514 * @eph_key: output buffer for the ephemerally-wrapped key 1515 * @eph_key_size: size of @eph_key in bytes. Must be the exact wrapped key size 1516 * used by the SoC. 1517 * 1518 * Given a long-term wrapped key, re-wrap it with the per-boot ephemeral key for 1519 * added protection. The resulting key will only be valid for the current boot. 1520 * 1521 * Return: 0 on success; -errno on failure. 1522 */ 1523 int qcom_scm_prepare_ice_key(const u8 *lt_key, size_t lt_key_size, 1524 u8 *eph_key, size_t eph_key_size) 1525 { 1526 struct qcom_scm_desc desc = { 1527 .svc = QCOM_SCM_SVC_ES, 1528 .cmd = QCOM_SCM_ES_PREPARE_ICE_KEY, 1529 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL, 1530 QCOM_SCM_RW, QCOM_SCM_VAL), 1531 .owner = ARM_SMCCC_OWNER_SIP, 1532 }; 1533 int ret; 1534 1535 void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1536 lt_key_size, 1537 GFP_KERNEL); 1538 if (!lt_key_buf) 1539 return -ENOMEM; 1540 1541 void *eph_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1542 eph_key_size, 1543 GFP_KERNEL); 1544 if (!eph_key_buf) 1545 return -ENOMEM; 1546 1547 memcpy(lt_key_buf, lt_key, lt_key_size); 1548 desc.args[0] = qcom_tzmem_to_phys(lt_key_buf); 1549 desc.args[1] = lt_key_size; 1550 desc.args[2] = qcom_tzmem_to_phys(eph_key_buf); 1551 desc.args[3] = eph_key_size; 1552 1553 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1554 if (!ret) 1555 memcpy(eph_key, eph_key_buf, eph_key_size); 1556 1557 memzero_explicit(lt_key_buf, lt_key_size); 1558 memzero_explicit(eph_key_buf, eph_key_size); 1559 return ret; 1560 } 1561 EXPORT_SYMBOL_GPL(qcom_scm_prepare_ice_key); 1562 1563 /** 1564 * qcom_scm_import_ice_key() - Import key for storage encryption 1565 * @raw_key: the raw key to import 1566 * @raw_key_size: size of @raw_key in bytes 1567 * @lt_key: output buffer for the long-term wrapped key 1568 * @lt_key_size: size of @lt_key in bytes. Must be the exact wrapped key size 1569 * used by the SoC. 1570 * 1571 * Import a raw key and return a long-term wrapped key. Uses the SoC's HWKM to 1572 * wrap the raw key using the platform-specific Key Encryption Key. 1573 * 1574 * Return: 0 on success; -errno on failure. 1575 */ 1576 int qcom_scm_import_ice_key(const u8 *raw_key, size_t raw_key_size, 1577 u8 *lt_key, size_t lt_key_size) 1578 { 1579 struct qcom_scm_desc desc = { 1580 .svc = QCOM_SCM_SVC_ES, 1581 .cmd = QCOM_SCM_ES_IMPORT_ICE_KEY, 1582 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL, 1583 QCOM_SCM_RW, QCOM_SCM_VAL), 1584 .owner = ARM_SMCCC_OWNER_SIP, 1585 }; 1586 int ret; 1587 1588 void *raw_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1589 raw_key_size, 1590 GFP_KERNEL); 1591 if (!raw_key_buf) 1592 return -ENOMEM; 1593 1594 void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1595 lt_key_size, 1596 GFP_KERNEL); 1597 if (!lt_key_buf) 1598 return -ENOMEM; 1599 1600 memcpy(raw_key_buf, raw_key, raw_key_size); 1601 desc.args[0] = qcom_tzmem_to_phys(raw_key_buf); 1602 desc.args[1] = raw_key_size; 1603 desc.args[2] = qcom_tzmem_to_phys(lt_key_buf); 1604 desc.args[3] = lt_key_size; 1605 1606 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1607 if (!ret) 1608 memcpy(lt_key, lt_key_buf, lt_key_size); 1609 1610 memzero_explicit(raw_key_buf, raw_key_size); 1611 memzero_explicit(lt_key_buf, lt_key_size); 1612 return ret; 1613 } 1614 EXPORT_SYMBOL_GPL(qcom_scm_import_ice_key); 1615 1616 /** 1617 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP. 1618 * 1619 * Return true if HDCP is supported, false if not. 1620 */ 1621 bool qcom_scm_hdcp_available(void) 1622 { 1623 bool avail; 1624 int ret = qcom_scm_clk_enable(); 1625 1626 if (ret) 1627 return ret; 1628 1629 avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP, 1630 QCOM_SCM_HDCP_INVOKE); 1631 1632 qcom_scm_clk_disable(); 1633 1634 return avail; 1635 } 1636 EXPORT_SYMBOL_GPL(qcom_scm_hdcp_available); 1637 1638 /** 1639 * qcom_scm_hdcp_req() - Send HDCP request. 1640 * @req: HDCP request array 1641 * @req_cnt: HDCP request array count 1642 * @resp: response buffer passed to SCM 1643 * 1644 * Write HDCP register(s) through SCM. 1645 */ 1646 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) 1647 { 1648 int ret; 1649 struct qcom_scm_desc desc = { 1650 .svc = QCOM_SCM_SVC_HDCP, 1651 .cmd = QCOM_SCM_HDCP_INVOKE, 1652 .arginfo = QCOM_SCM_ARGS(10), 1653 .args = { 1654 req[0].addr, 1655 req[0].val, 1656 req[1].addr, 1657 req[1].val, 1658 req[2].addr, 1659 req[2].val, 1660 req[3].addr, 1661 req[3].val, 1662 req[4].addr, 1663 req[4].val 1664 }, 1665 .owner = ARM_SMCCC_OWNER_SIP, 1666 }; 1667 struct qcom_scm_res res; 1668 1669 if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT) 1670 return -ERANGE; 1671 1672 ret = qcom_scm_clk_enable(); 1673 if (ret) 1674 return ret; 1675 1676 ret = qcom_scm_call(__scm->dev, &desc, &res); 1677 *resp = res.result[0]; 1678 1679 qcom_scm_clk_disable(); 1680 1681 return ret; 1682 } 1683 EXPORT_SYMBOL_GPL(qcom_scm_hdcp_req); 1684 1685 int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt) 1686 { 1687 struct qcom_scm_desc desc = { 1688 .svc = QCOM_SCM_SVC_SMMU_PROGRAM, 1689 .cmd = QCOM_SCM_SMMU_PT_FORMAT, 1690 .arginfo = QCOM_SCM_ARGS(3), 1691 .args[0] = sec_id, 1692 .args[1] = ctx_num, 1693 .args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */ 1694 .owner = ARM_SMCCC_OWNER_SIP, 1695 }; 1696 1697 return qcom_scm_call(__scm->dev, &desc, NULL); 1698 } 1699 EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_pt_format); 1700 1701 int qcom_scm_qsmmu500_wait_safe_toggle(bool en) 1702 { 1703 struct qcom_scm_desc desc = { 1704 .svc = QCOM_SCM_SVC_SMMU_PROGRAM, 1705 .cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1, 1706 .arginfo = QCOM_SCM_ARGS(2), 1707 .args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL, 1708 .args[1] = en, 1709 .owner = ARM_SMCCC_OWNER_SIP, 1710 }; 1711 1712 1713 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 1714 } 1715 EXPORT_SYMBOL_GPL(qcom_scm_qsmmu500_wait_safe_toggle); 1716 1717 bool qcom_scm_lmh_dcvsh_available(void) 1718 { 1719 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH); 1720 } 1721 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available); 1722 1723 /* 1724 * This is only supposed to be called once by the TZMem module. It takes the 1725 * SCM struct device as argument and uses it to pass the call as at the time 1726 * the SHM Bridge is enabled, the SCM is not yet fully set up and doesn't 1727 * accept global user calls. Don't try to use the __scm pointer here. 1728 */ 1729 int qcom_scm_shm_bridge_enable(struct device *scm_dev) 1730 { 1731 int ret; 1732 1733 struct qcom_scm_desc desc = { 1734 .svc = QCOM_SCM_SVC_MP, 1735 .cmd = QCOM_SCM_MP_SHM_BRIDGE_ENABLE, 1736 .owner = ARM_SMCCC_OWNER_SIP 1737 }; 1738 1739 struct qcom_scm_res res; 1740 1741 if (!__qcom_scm_is_call_available(scm_dev, QCOM_SCM_SVC_MP, 1742 QCOM_SCM_MP_SHM_BRIDGE_ENABLE)) 1743 return -EOPNOTSUPP; 1744 1745 ret = qcom_scm_call(scm_dev, &desc, &res); 1746 1747 if (ret) 1748 return ret; 1749 1750 if (res.result[0] == SHMBRIDGE_RESULT_NOTSUPP) 1751 return -EOPNOTSUPP; 1752 1753 return res.result[0]; 1754 } 1755 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_enable); 1756 1757 int qcom_scm_shm_bridge_create(u64 pfn_and_ns_perm_flags, 1758 u64 ipfn_and_s_perm_flags, u64 size_and_flags, 1759 u64 ns_vmids, u64 *handle) 1760 { 1761 struct qcom_scm_desc desc = { 1762 .svc = QCOM_SCM_SVC_MP, 1763 .cmd = QCOM_SCM_MP_SHM_BRIDGE_CREATE, 1764 .owner = ARM_SMCCC_OWNER_SIP, 1765 .args[0] = pfn_and_ns_perm_flags, 1766 .args[1] = ipfn_and_s_perm_flags, 1767 .args[2] = size_and_flags, 1768 .args[3] = ns_vmids, 1769 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL, 1770 QCOM_SCM_VAL, QCOM_SCM_VAL), 1771 }; 1772 1773 struct qcom_scm_res res; 1774 int ret; 1775 1776 ret = qcom_scm_call(__scm->dev, &desc, &res); 1777 1778 if (handle && !ret) 1779 *handle = res.result[1]; 1780 1781 return ret ?: res.result[0]; 1782 } 1783 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_create); 1784 1785 int qcom_scm_shm_bridge_delete(u64 handle) 1786 { 1787 struct qcom_scm_desc desc = { 1788 .svc = QCOM_SCM_SVC_MP, 1789 .cmd = QCOM_SCM_MP_SHM_BRIDGE_DELETE, 1790 .owner = ARM_SMCCC_OWNER_SIP, 1791 .args[0] = handle, 1792 .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL), 1793 }; 1794 1795 return qcom_scm_call(__scm->dev, &desc, NULL); 1796 } 1797 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_delete); 1798 1799 int qcom_scm_lmh_profile_change(u32 profile_id) 1800 { 1801 struct qcom_scm_desc desc = { 1802 .svc = QCOM_SCM_SVC_LMH, 1803 .cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE, 1804 .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL), 1805 .args[0] = profile_id, 1806 .owner = ARM_SMCCC_OWNER_SIP, 1807 }; 1808 1809 return qcom_scm_call(__scm->dev, &desc, NULL); 1810 } 1811 EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change); 1812 1813 int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, 1814 u64 limit_node, u32 node_id, u64 version) 1815 { 1816 int ret, payload_size = 5 * sizeof(u32); 1817 1818 struct qcom_scm_desc desc = { 1819 .svc = QCOM_SCM_SVC_LMH, 1820 .cmd = QCOM_SCM_LMH_LIMIT_DCVSH, 1821 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL, 1822 QCOM_SCM_VAL, QCOM_SCM_VAL), 1823 .args[1] = payload_size, 1824 .args[2] = limit_node, 1825 .args[3] = node_id, 1826 .args[4] = version, 1827 .owner = ARM_SMCCC_OWNER_SIP, 1828 }; 1829 1830 u32 *payload_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1831 payload_size, 1832 GFP_KERNEL); 1833 if (!payload_buf) 1834 return -ENOMEM; 1835 1836 payload_buf[0] = payload_fn; 1837 payload_buf[1] = 0; 1838 payload_buf[2] = payload_reg; 1839 payload_buf[3] = 1; 1840 payload_buf[4] = payload_val; 1841 1842 desc.args[0] = qcom_tzmem_to_phys(payload_buf); 1843 1844 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1845 1846 return ret; 1847 } 1848 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh); 1849 1850 int qcom_scm_gpu_init_regs(u32 gpu_req) 1851 { 1852 struct qcom_scm_desc desc = { 1853 .svc = QCOM_SCM_SVC_GPU, 1854 .cmd = QCOM_SCM_SVC_GPU_INIT_REGS, 1855 .arginfo = QCOM_SCM_ARGS(1), 1856 .args[0] = gpu_req, 1857 .owner = ARM_SMCCC_OWNER_SIP, 1858 }; 1859 1860 return qcom_scm_call(__scm->dev, &desc, NULL); 1861 } 1862 EXPORT_SYMBOL_GPL(qcom_scm_gpu_init_regs); 1863 1864 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr) 1865 { 1866 struct device_node *tcsr; 1867 struct device_node *np = dev->of_node; 1868 struct resource res; 1869 u32 offset; 1870 int ret; 1871 1872 tcsr = of_parse_phandle(np, "qcom,dload-mode", 0); 1873 if (!tcsr) 1874 return 0; 1875 1876 ret = of_address_to_resource(tcsr, 0, &res); 1877 of_node_put(tcsr); 1878 if (ret) 1879 return ret; 1880 1881 ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset); 1882 if (ret < 0) 1883 return ret; 1884 1885 *addr = res.start + offset; 1886 1887 return 0; 1888 } 1889 1890 #ifdef CONFIG_QCOM_QSEECOM 1891 1892 /* Lock for QSEECOM SCM call executions */ 1893 static DEFINE_MUTEX(qcom_scm_qseecom_call_lock); 1894 1895 static int __qcom_scm_qseecom_call(const struct qcom_scm_desc *desc, 1896 struct qcom_scm_qseecom_resp *res) 1897 { 1898 struct qcom_scm_res scm_res = {}; 1899 int status; 1900 1901 /* 1902 * QSEECOM SCM calls should not be executed concurrently. Therefore, we 1903 * require the respective call lock to be held. 1904 */ 1905 lockdep_assert_held(&qcom_scm_qseecom_call_lock); 1906 1907 status = qcom_scm_call(__scm->dev, desc, &scm_res); 1908 1909 res->result = scm_res.result[0]; 1910 res->resp_type = scm_res.result[1]; 1911 res->data = scm_res.result[2]; 1912 1913 if (status) 1914 return status; 1915 1916 return 0; 1917 } 1918 1919 /** 1920 * qcom_scm_qseecom_call() - Perform a QSEECOM SCM call. 1921 * @desc: SCM call descriptor. 1922 * @res: SCM call response (output). 1923 * 1924 * Performs the QSEECOM SCM call described by @desc, returning the response in 1925 * @rsp. 1926 * 1927 * Return: Zero on success, nonzero on failure. 1928 */ 1929 static int qcom_scm_qseecom_call(const struct qcom_scm_desc *desc, 1930 struct qcom_scm_qseecom_resp *res) 1931 { 1932 int status; 1933 1934 /* 1935 * Note: Multiple QSEECOM SCM calls should not be executed same time, 1936 * so lock things here. This needs to be extended to callback/listener 1937 * handling when support for that is implemented. 1938 */ 1939 1940 mutex_lock(&qcom_scm_qseecom_call_lock); 1941 status = __qcom_scm_qseecom_call(desc, res); 1942 mutex_unlock(&qcom_scm_qseecom_call_lock); 1943 1944 dev_dbg(__scm->dev, "%s: owner=%x, svc=%x, cmd=%x, result=%lld, type=%llx, data=%llx\n", 1945 __func__, desc->owner, desc->svc, desc->cmd, res->result, 1946 res->resp_type, res->data); 1947 1948 if (status) { 1949 dev_err(__scm->dev, "qseecom: scm call failed with error %d\n", status); 1950 return status; 1951 } 1952 1953 /* 1954 * TODO: Handle incomplete and blocked calls: 1955 * 1956 * Incomplete and blocked calls are not supported yet. Some devices 1957 * and/or commands require those, some don't. Let's warn about them 1958 * prominently in case someone attempts to try these commands with a 1959 * device/command combination that isn't supported yet. 1960 */ 1961 WARN_ON(res->result == QSEECOM_RESULT_INCOMPLETE); 1962 WARN_ON(res->result == QSEECOM_RESULT_BLOCKED_ON_LISTENER); 1963 1964 return 0; 1965 } 1966 1967 /** 1968 * qcom_scm_qseecom_get_version() - Query the QSEECOM version. 1969 * @version: Pointer where the QSEECOM version will be stored. 1970 * 1971 * Performs the QSEECOM SCM querying the QSEECOM version currently running in 1972 * the TrustZone. 1973 * 1974 * Return: Zero on success, nonzero on failure. 1975 */ 1976 static int qcom_scm_qseecom_get_version(u32 *version) 1977 { 1978 struct qcom_scm_desc desc = {}; 1979 struct qcom_scm_qseecom_resp res = {}; 1980 u32 feature = 10; 1981 int ret; 1982 1983 desc.owner = QSEECOM_TZ_OWNER_SIP; 1984 desc.svc = QSEECOM_TZ_SVC_INFO; 1985 desc.cmd = QSEECOM_TZ_CMD_INFO_VERSION; 1986 desc.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL); 1987 desc.args[0] = feature; 1988 1989 ret = qcom_scm_qseecom_call(&desc, &res); 1990 if (ret) 1991 return ret; 1992 1993 *version = res.result; 1994 return 0; 1995 } 1996 1997 /** 1998 * qcom_scm_qseecom_app_get_id() - Query the app ID for a given QSEE app name. 1999 * @app_name: The name of the app. 2000 * @app_id: The returned app ID. 2001 * 2002 * Query and return the application ID of the SEE app identified by the given 2003 * name. This returned ID is the unique identifier of the app required for 2004 * subsequent communication. 2005 * 2006 * Return: Zero on success, nonzero on failure, -ENOENT if the app has not been 2007 * loaded or could not be found. 2008 */ 2009 int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id) 2010 { 2011 unsigned long name_buf_size = QSEECOM_MAX_APP_NAME_SIZE; 2012 unsigned long app_name_len = strlen(app_name); 2013 struct qcom_scm_desc desc = {}; 2014 struct qcom_scm_qseecom_resp res = {}; 2015 int status; 2016 2017 if (app_name_len >= name_buf_size) 2018 return -EINVAL; 2019 2020 char *name_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 2021 name_buf_size, 2022 GFP_KERNEL); 2023 if (!name_buf) 2024 return -ENOMEM; 2025 2026 memcpy(name_buf, app_name, app_name_len); 2027 2028 desc.owner = QSEECOM_TZ_OWNER_QSEE_OS; 2029 desc.svc = QSEECOM_TZ_SVC_APP_MGR; 2030 desc.cmd = QSEECOM_TZ_CMD_APP_LOOKUP; 2031 desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL); 2032 desc.args[0] = qcom_tzmem_to_phys(name_buf); 2033 desc.args[1] = app_name_len; 2034 2035 status = qcom_scm_qseecom_call(&desc, &res); 2036 2037 if (status) 2038 return status; 2039 2040 if (res.result == QSEECOM_RESULT_FAILURE) 2041 return -ENOENT; 2042 2043 if (res.result != QSEECOM_RESULT_SUCCESS) 2044 return -EINVAL; 2045 2046 if (res.resp_type != QSEECOM_SCM_RES_APP_ID) 2047 return -EINVAL; 2048 2049 *app_id = res.data; 2050 return 0; 2051 } 2052 EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id); 2053 2054 /** 2055 * qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app. 2056 * @app_id: The ID of the target app. 2057 * @req: Request buffer sent to the app (must be TZ memory) 2058 * @req_size: Size of the request buffer. 2059 * @rsp: Response buffer, written to by the app (must be TZ memory) 2060 * @rsp_size: Size of the response buffer. 2061 * 2062 * Sends a request to the QSEE app associated with the given ID and read back 2063 * its response. The caller must provide two DMA memory regions, one for the 2064 * request and one for the response, and fill out the @req region with the 2065 * respective (app-specific) request data. The QSEE app reads this and returns 2066 * its response in the @rsp region. 2067 * 2068 * Return: Zero on success, nonzero on failure. 2069 */ 2070 int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, 2071 void *rsp, size_t rsp_size) 2072 { 2073 struct qcom_scm_qseecom_resp res = {}; 2074 struct qcom_scm_desc desc = {}; 2075 phys_addr_t req_phys; 2076 phys_addr_t rsp_phys; 2077 int status; 2078 2079 req_phys = qcom_tzmem_to_phys(req); 2080 rsp_phys = qcom_tzmem_to_phys(rsp); 2081 2082 desc.owner = QSEECOM_TZ_OWNER_TZ_APPS; 2083 desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER; 2084 desc.cmd = QSEECOM_TZ_CMD_APP_SEND; 2085 desc.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, 2086 QCOM_SCM_RW, QCOM_SCM_VAL, 2087 QCOM_SCM_RW, QCOM_SCM_VAL); 2088 desc.args[0] = app_id; 2089 desc.args[1] = req_phys; 2090 desc.args[2] = req_size; 2091 desc.args[3] = rsp_phys; 2092 desc.args[4] = rsp_size; 2093 2094 status = qcom_scm_qseecom_call(&desc, &res); 2095 2096 if (status) 2097 return status; 2098 2099 if (res.result != QSEECOM_RESULT_SUCCESS) 2100 return -EIO; 2101 2102 return 0; 2103 } 2104 EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send); 2105 2106 /* 2107 * We do not yet support re-entrant calls via the qseecom interface. To prevent 2108 + any potential issues with this, only allow validated machines for now. 2109 */ 2110 static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = { 2111 { .compatible = "asus,vivobook-s15" }, 2112 { .compatible = "asus,zenbook-a14-ux3407qa" }, 2113 { .compatible = "asus,zenbook-a14-ux3407ra" }, 2114 { .compatible = "dell,inspiron-14-plus-7441" }, 2115 { .compatible = "dell,latitude-7455" }, 2116 { .compatible = "dell,xps13-9345" }, 2117 { .compatible = "hp,elitebook-ultra-g1q" }, 2118 { .compatible = "hp,omnibook-x14" }, 2119 { .compatible = "huawei,gaokun3" }, 2120 { .compatible = "lenovo,flex-5g" }, 2121 { .compatible = "lenovo,thinkbook-16" }, 2122 { .compatible = "lenovo,thinkpad-t14s" }, 2123 { .compatible = "lenovo,thinkpad-x13s", }, 2124 { .compatible = "lenovo,yoga-slim7x" }, 2125 { .compatible = "microsoft,arcata", }, 2126 { .compatible = "microsoft,blackrock" }, 2127 { .compatible = "microsoft,romulus13", }, 2128 { .compatible = "microsoft,romulus15", }, 2129 { .compatible = "qcom,hamoa-iot-evk" }, 2130 { .compatible = "qcom,sc8180x-primus" }, 2131 { .compatible = "qcom,x1e001de-devkit" }, 2132 { .compatible = "qcom,x1e80100-crd" }, 2133 { .compatible = "qcom,x1e80100-qcp" }, 2134 { .compatible = "qcom,x1p42100-crd" }, 2135 { } 2136 }; 2137 2138 static void qcom_scm_qseecom_free(void *data) 2139 { 2140 struct platform_device *qseecom_dev = data; 2141 2142 platform_device_del(qseecom_dev); 2143 platform_device_put(qseecom_dev); 2144 } 2145 2146 static int qcom_scm_qseecom_init(struct qcom_scm *scm) 2147 { 2148 struct platform_device *qseecom_dev; 2149 u32 version; 2150 int ret; 2151 2152 /* 2153 * Note: We do two steps of validation here: First, we try to query the 2154 * QSEECOM version as a check to see if the interface exists on this 2155 * device. Second, we check against known good devices due to current 2156 * driver limitations (see comment in qcom_scm_qseecom_allowlist). 2157 * 2158 * Note that we deliberately do the machine check after the version 2159 * check so that we can log potentially supported devices. This should 2160 * be safe as downstream sources indicate that the version query is 2161 * neither blocking nor reentrant. 2162 */ 2163 ret = qcom_scm_qseecom_get_version(&version); 2164 if (ret) 2165 return 0; 2166 2167 dev_info(scm->dev, "qseecom: found qseecom with version 0x%x\n", version); 2168 2169 if (!of_machine_device_match(qcom_scm_qseecom_allowlist)) { 2170 dev_info(scm->dev, "qseecom: untested machine, skipping\n"); 2171 return 0; 2172 } 2173 2174 /* 2175 * Set up QSEECOM interface device. All application clients will be 2176 * set up and managed by the corresponding driver for it. 2177 */ 2178 qseecom_dev = platform_device_alloc("qcom_qseecom", -1); 2179 if (!qseecom_dev) 2180 return -ENOMEM; 2181 2182 qseecom_dev->dev.parent = scm->dev; 2183 2184 ret = platform_device_add(qseecom_dev); 2185 if (ret) { 2186 platform_device_put(qseecom_dev); 2187 return ret; 2188 } 2189 2190 return devm_add_action_or_reset(scm->dev, qcom_scm_qseecom_free, qseecom_dev); 2191 } 2192 2193 #else /* CONFIG_QCOM_QSEECOM */ 2194 2195 static int qcom_scm_qseecom_init(struct qcom_scm *scm) 2196 { 2197 return 0; 2198 } 2199 2200 #endif /* CONFIG_QCOM_QSEECOM */ 2201 2202 /** 2203 * qcom_scm_qtee_invoke_smc() - Invoke a QTEE object. 2204 * @inbuf: start address of memory area used for inbound buffer. 2205 * @inbuf_size: size of the memory area used for inbound buffer. 2206 * @outbuf: start address of memory area used for outbound buffer. 2207 * @outbuf_size: size of the memory area used for outbound buffer. 2208 * @result: result of QTEE object invocation. 2209 * @response_type: response type returned by QTEE. 2210 * 2211 * @response_type determines how the contents of @inbuf and @outbuf 2212 * should be processed. 2213 * 2214 * Return: On success, return 0 or <0 on failure. 2215 */ 2216 int qcom_scm_qtee_invoke_smc(phys_addr_t inbuf, size_t inbuf_size, 2217 phys_addr_t outbuf, size_t outbuf_size, 2218 u64 *result, u64 *response_type) 2219 { 2220 struct qcom_scm_desc desc = { 2221 .svc = QCOM_SCM_SVC_SMCINVOKE, 2222 .cmd = QCOM_SCM_SMCINVOKE_INVOKE, 2223 .owner = ARM_SMCCC_OWNER_TRUSTED_OS, 2224 .args[0] = inbuf, 2225 .args[1] = inbuf_size, 2226 .args[2] = outbuf, 2227 .args[3] = outbuf_size, 2228 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RW, QCOM_SCM_VAL, 2229 QCOM_SCM_RW, QCOM_SCM_VAL), 2230 }; 2231 struct qcom_scm_res res; 2232 int ret; 2233 2234 ret = qcom_scm_call(__scm->dev, &desc, &res); 2235 if (ret) 2236 return ret; 2237 2238 if (response_type) 2239 *response_type = res.result[0]; 2240 2241 if (result) 2242 *result = res.result[1]; 2243 2244 return 0; 2245 } 2246 EXPORT_SYMBOL(qcom_scm_qtee_invoke_smc); 2247 2248 /** 2249 * qcom_scm_qtee_callback_response() - Submit response for callback request. 2250 * @buf: start address of memory area used for outbound buffer. 2251 * @buf_size: size of the memory area used for outbound buffer. 2252 * @result: Result of QTEE object invocation. 2253 * @response_type: Response type returned by QTEE. 2254 * 2255 * @response_type determines how the contents of @buf should be processed. 2256 * 2257 * Return: On success, return 0 or <0 on failure. 2258 */ 2259 int qcom_scm_qtee_callback_response(phys_addr_t buf, size_t buf_size, 2260 u64 *result, u64 *response_type) 2261 { 2262 struct qcom_scm_desc desc = { 2263 .svc = QCOM_SCM_SVC_SMCINVOKE, 2264 .cmd = QCOM_SCM_SMCINVOKE_CB_RSP, 2265 .owner = ARM_SMCCC_OWNER_TRUSTED_OS, 2266 .args[0] = buf, 2267 .args[1] = buf_size, 2268 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL), 2269 }; 2270 struct qcom_scm_res res; 2271 int ret; 2272 2273 ret = qcom_scm_call(__scm->dev, &desc, &res); 2274 if (ret) 2275 return ret; 2276 2277 if (response_type) 2278 *response_type = res.result[0]; 2279 2280 if (result) 2281 *result = res.result[1]; 2282 2283 return 0; 2284 } 2285 EXPORT_SYMBOL(qcom_scm_qtee_callback_response); 2286 2287 static void qcom_scm_qtee_free(void *data) 2288 { 2289 struct platform_device *qtee_dev = data; 2290 2291 platform_device_unregister(qtee_dev); 2292 } 2293 2294 static void qcom_scm_qtee_init(struct qcom_scm *scm) 2295 { 2296 struct platform_device *qtee_dev; 2297 u64 result, response_type; 2298 int ret; 2299 2300 /* 2301 * Probe for smcinvoke support. This will fail due to invalid buffers, 2302 * but first, it checks whether the call is supported in QTEE syscall 2303 * handler. If it is not supported, -EIO is returned. 2304 */ 2305 ret = qcom_scm_qtee_invoke_smc(0, 0, 0, 0, &result, &response_type); 2306 if (ret == -EIO) 2307 return; 2308 2309 /* Setup QTEE interface device. */ 2310 qtee_dev = platform_device_register_data(scm->dev, "qcomtee", 2311 PLATFORM_DEVID_NONE, NULL, 0); 2312 if (IS_ERR(qtee_dev)) 2313 return; 2314 2315 devm_add_action_or_reset(scm->dev, qcom_scm_qtee_free, qtee_dev); 2316 } 2317 2318 /** 2319 * qcom_scm_is_available() - Checks if SCM is available 2320 */ 2321 bool qcom_scm_is_available(void) 2322 { 2323 /* Paired with smp_store_release() in qcom_scm_probe */ 2324 return !!smp_load_acquire(&__scm); 2325 } 2326 EXPORT_SYMBOL_GPL(qcom_scm_is_available); 2327 2328 static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx) 2329 { 2330 /* FW currently only supports a single wq_ctx (zero). 2331 * TODO: Update this logic to include dynamic allocation and lookup of 2332 * completion structs when FW supports more wq_ctx values. 2333 */ 2334 if (wq_ctx != 0) { 2335 dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n"); 2336 return -EINVAL; 2337 } 2338 2339 return 0; 2340 } 2341 2342 int qcom_scm_wait_for_wq_completion(u32 wq_ctx) 2343 { 2344 int ret; 2345 2346 ret = qcom_scm_assert_valid_wq_ctx(wq_ctx); 2347 if (ret) 2348 return ret; 2349 2350 wait_for_completion(&__scm->waitq_comp); 2351 2352 return 0; 2353 } 2354 2355 static int qcom_scm_waitq_wakeup(unsigned int wq_ctx) 2356 { 2357 int ret; 2358 2359 ret = qcom_scm_assert_valid_wq_ctx(wq_ctx); 2360 if (ret) 2361 return ret; 2362 2363 complete(&__scm->waitq_comp); 2364 2365 return 0; 2366 } 2367 2368 static irqreturn_t qcom_scm_irq_handler(int irq, void *data) 2369 { 2370 int ret; 2371 struct qcom_scm *scm = data; 2372 u32 wq_ctx, flags, more_pending = 0; 2373 2374 do { 2375 ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending); 2376 if (ret) { 2377 dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret); 2378 goto out; 2379 } 2380 2381 if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE) { 2382 dev_err(scm->dev, "Invalid flags received for wq_ctx: %u\n", flags); 2383 goto out; 2384 } 2385 2386 ret = qcom_scm_waitq_wakeup(wq_ctx); 2387 if (ret) 2388 goto out; 2389 } while (more_pending); 2390 2391 out: 2392 return IRQ_HANDLED; 2393 } 2394 2395 static int get_download_mode(char *buffer, const struct kernel_param *kp) 2396 { 2397 if (download_mode >= ARRAY_SIZE(download_mode_name)) 2398 return sysfs_emit(buffer, "unknown mode\n"); 2399 2400 return sysfs_emit(buffer, "%s\n", download_mode_name[download_mode]); 2401 } 2402 2403 static int set_download_mode(const char *val, const struct kernel_param *kp) 2404 { 2405 bool tmp; 2406 int ret; 2407 2408 ret = sysfs_match_string(download_mode_name, val); 2409 if (ret < 0) { 2410 ret = kstrtobool(val, &tmp); 2411 if (ret < 0) { 2412 pr_err("qcom_scm: err: %d\n", ret); 2413 return ret; 2414 } 2415 2416 ret = tmp ? 1 : 0; 2417 } 2418 2419 download_mode = ret; 2420 if (__scm) 2421 qcom_scm_set_download_mode(download_mode); 2422 2423 return 0; 2424 } 2425 2426 static const struct kernel_param_ops download_mode_param_ops = { 2427 .get = get_download_mode, 2428 .set = set_download_mode, 2429 }; 2430 2431 module_param_cb(download_mode, &download_mode_param_ops, NULL, 0644); 2432 MODULE_PARM_DESC(download_mode, "download mode: off/0/N for no dump mode, full/on/1/Y for full dump mode, mini for minidump mode and full,mini for both full and minidump mode together are acceptable values"); 2433 2434 static int qcom_scm_probe(struct platform_device *pdev) 2435 { 2436 struct qcom_tzmem_pool_config pool_config; 2437 struct qcom_scm *scm; 2438 int irq, ret; 2439 2440 scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL); 2441 if (!scm) 2442 return -ENOMEM; 2443 2444 scm->dev = &pdev->dev; 2445 ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr); 2446 if (ret < 0) 2447 return ret; 2448 2449 init_completion(&scm->waitq_comp); 2450 mutex_init(&scm->scm_bw_lock); 2451 2452 scm->path = devm_of_icc_get(&pdev->dev, NULL); 2453 if (IS_ERR(scm->path)) 2454 return dev_err_probe(&pdev->dev, PTR_ERR(scm->path), 2455 "failed to acquire interconnect path\n"); 2456 2457 scm->core_clk = devm_clk_get_optional(&pdev->dev, "core"); 2458 if (IS_ERR(scm->core_clk)) 2459 return PTR_ERR(scm->core_clk); 2460 2461 scm->iface_clk = devm_clk_get_optional(&pdev->dev, "iface"); 2462 if (IS_ERR(scm->iface_clk)) 2463 return PTR_ERR(scm->iface_clk); 2464 2465 scm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus"); 2466 if (IS_ERR(scm->bus_clk)) 2467 return PTR_ERR(scm->bus_clk); 2468 2469 scm->reset.ops = &qcom_scm_pas_reset_ops; 2470 scm->reset.nr_resets = 1; 2471 scm->reset.of_node = pdev->dev.of_node; 2472 ret = devm_reset_controller_register(&pdev->dev, &scm->reset); 2473 if (ret) 2474 return ret; 2475 2476 /* vote for max clk rate for highest performance */ 2477 ret = clk_set_rate(scm->core_clk, INT_MAX); 2478 if (ret) 2479 return ret; 2480 2481 ret = of_reserved_mem_device_init(scm->dev); 2482 if (ret && ret != -ENODEV) 2483 return dev_err_probe(scm->dev, ret, 2484 "Failed to setup the reserved memory region for TZ mem\n"); 2485 2486 ret = qcom_tzmem_enable(scm->dev); 2487 if (ret) 2488 return dev_err_probe(scm->dev, ret, 2489 "Failed to enable the TrustZone memory allocator\n"); 2490 2491 memset(&pool_config, 0, sizeof(pool_config)); 2492 pool_config.initial_size = 0; 2493 pool_config.policy = QCOM_TZMEM_POLICY_ON_DEMAND; 2494 pool_config.max_size = SZ_256K; 2495 2496 scm->mempool = devm_qcom_tzmem_pool_new(scm->dev, &pool_config); 2497 if (IS_ERR(scm->mempool)) 2498 return dev_err_probe(scm->dev, PTR_ERR(scm->mempool), 2499 "Failed to create the SCM memory pool\n"); 2500 2501 irq = platform_get_irq_optional(pdev, 0); 2502 if (irq < 0) { 2503 if (irq != -ENXIO) 2504 return irq; 2505 } else { 2506 ret = devm_request_threaded_irq(scm->dev, irq, NULL, qcom_scm_irq_handler, 2507 IRQF_ONESHOT, "qcom-scm", scm); 2508 if (ret < 0) 2509 return dev_err_probe(scm->dev, ret, 2510 "Failed to request qcom-scm irq\n"); 2511 } 2512 2513 /* 2514 * Paired with smp_load_acquire() in qcom_scm_is_available(). 2515 * 2516 * This marks the SCM API as ready to accept user calls and can only 2517 * be called after the TrustZone memory pool is initialized and the 2518 * waitqueue interrupt requested. 2519 */ 2520 smp_store_release(&__scm, scm); 2521 2522 __get_convention(); 2523 2524 /* 2525 * If "download mode" is requested, from this point on warmboot 2526 * will cause the boot stages to enter download mode, unless 2527 * disabled below by a clean shutdown/reboot. 2528 */ 2529 qcom_scm_set_download_mode(download_mode); 2530 2531 /* 2532 * Disable SDI if indicated by DT that it is enabled by default. 2533 */ 2534 if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled") || !download_mode) 2535 qcom_scm_disable_sdi(); 2536 2537 /* 2538 * Initialize the QSEECOM interface. 2539 * 2540 * Note: QSEECOM is fairly self-contained and this only adds the 2541 * interface device (the driver of which does most of the heavy 2542 * lifting). So any errors returned here should be either -ENOMEM or 2543 * -EINVAL (with the latter only in case there's a bug in our code). 2544 * This means that there is no need to bring down the whole SCM driver. 2545 * Just log the error instead and let SCM live. 2546 */ 2547 ret = qcom_scm_qseecom_init(scm); 2548 WARN(ret < 0, "failed to initialize qseecom: %d\n", ret); 2549 2550 /* Initialize the QTEE object interface. */ 2551 qcom_scm_qtee_init(scm); 2552 2553 return 0; 2554 } 2555 2556 static void qcom_scm_shutdown(struct platform_device *pdev) 2557 { 2558 /* Clean shutdown, disable download mode to allow normal restart */ 2559 qcom_scm_set_download_mode(QCOM_DLOAD_NODUMP); 2560 } 2561 2562 static const struct of_device_id qcom_scm_dt_match[] = { 2563 { .compatible = "qcom,scm" }, 2564 2565 /* Legacy entries kept for backwards compatibility */ 2566 { .compatible = "qcom,scm-apq8064" }, 2567 { .compatible = "qcom,scm-apq8084" }, 2568 { .compatible = "qcom,scm-ipq4019" }, 2569 { .compatible = "qcom,scm-msm8953" }, 2570 { .compatible = "qcom,scm-msm8974" }, 2571 { .compatible = "qcom,scm-msm8996" }, 2572 {} 2573 }; 2574 MODULE_DEVICE_TABLE(of, qcom_scm_dt_match); 2575 2576 static struct platform_driver qcom_scm_driver = { 2577 .driver = { 2578 .name = "qcom_scm", 2579 .of_match_table = qcom_scm_dt_match, 2580 .suppress_bind_attrs = true, 2581 }, 2582 .probe = qcom_scm_probe, 2583 .shutdown = qcom_scm_shutdown, 2584 }; 2585 2586 static int __init qcom_scm_init(void) 2587 { 2588 return platform_driver_register(&qcom_scm_driver); 2589 } 2590 subsys_initcall(qcom_scm_init); 2591 2592 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver"); 2593 MODULE_LICENSE("GPL v2"); 2594