1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved. 3 * Copyright (C) 2015 Linaro Ltd. 4 */ 5 6 #include <linux/arm-smccc.h> 7 #include <linux/bitfield.h> 8 #include <linux/bits.h> 9 #include <linux/cleanup.h> 10 #include <linux/clk.h> 11 #include <linux/completion.h> 12 #include <linux/cpumask.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/err.h> 15 #include <linux/export.h> 16 #include <linux/firmware/qcom/qcom_scm.h> 17 #include <linux/firmware/qcom/qcom_tzmem.h> 18 #include <linux/init.h> 19 #include <linux/interconnect.h> 20 #include <linux/interrupt.h> 21 #include <linux/kstrtox.h> 22 #include <linux/module.h> 23 #include <linux/of.h> 24 #include <linux/of_address.h> 25 #include <linux/of_irq.h> 26 #include <linux/of_platform.h> 27 #include <linux/of_reserved_mem.h> 28 #include <linux/platform_device.h> 29 #include <linux/reset-controller.h> 30 #include <linux/remoteproc.h> 31 #include <linux/sizes.h> 32 #include <linux/types.h> 33 34 #include <dt-bindings/interrupt-controller/arm-gic.h> 35 36 #include "qcom_scm.h" 37 #include "qcom_tzmem.h" 38 39 static u32 download_mode; 40 41 #define GIC_SPI_BASE 32 42 #define GIC_MAX_SPI 1019 // SPIs in GICv3 spec range from 32..1019 43 #define GIC_ESPI_BASE 4096 44 #define GIC_MAX_ESPI 5119 // ESPIs in GICv3 spec range from 4096..5119 45 46 struct qcom_scm { 47 struct device *dev; 48 struct clk *core_clk; 49 struct clk *iface_clk; 50 struct clk *bus_clk; 51 struct icc_path *path; 52 struct completion *waitq_comps; 53 struct reset_controller_dev reset; 54 55 /* control access to the interconnect path */ 56 struct mutex scm_bw_lock; 57 int scm_vote_count; 58 59 u64 dload_mode_addr; 60 61 struct qcom_tzmem_pool *mempool; 62 unsigned int wq_cnt; 63 }; 64 65 struct qcom_scm_current_perm_info { 66 __le32 vmid; 67 __le32 perm; 68 __le64 ctx; 69 __le32 ctx_size; 70 __le32 unused; 71 }; 72 73 struct qcom_scm_mem_map_info { 74 __le64 mem_addr; 75 __le64 mem_size; 76 }; 77 78 /** 79 * struct qcom_scm_qseecom_resp - QSEECOM SCM call response. 80 * @result: Result or status of the SCM call. See &enum qcom_scm_qseecom_result. 81 * @resp_type: Type of the response. See &enum qcom_scm_qseecom_resp_type. 82 * @data: Response data. The type of this data is given in @resp_type. 83 */ 84 struct qcom_scm_qseecom_resp { 85 u64 result; 86 u64 resp_type; 87 u64 data; 88 }; 89 90 enum qcom_scm_qseecom_result { 91 QSEECOM_RESULT_SUCCESS = 0, 92 QSEECOM_RESULT_INCOMPLETE = 1, 93 QSEECOM_RESULT_BLOCKED_ON_LISTENER = 2, 94 QSEECOM_RESULT_FAILURE = 0xFFFFFFFF, 95 }; 96 97 enum qcom_scm_qseecom_resp_type { 98 QSEECOM_SCM_RES_APP_ID = 0xEE01, 99 QSEECOM_SCM_RES_QSEOS_LISTENER_ID = 0xEE02, 100 }; 101 102 enum qcom_scm_qseecom_tz_owner { 103 QSEECOM_TZ_OWNER_SIP = 2, 104 QSEECOM_TZ_OWNER_TZ_APPS = 48, 105 QSEECOM_TZ_OWNER_QSEE_OS = 50 106 }; 107 108 enum qcom_scm_qseecom_tz_svc { 109 QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER = 0, 110 QSEECOM_TZ_SVC_APP_MGR = 1, 111 QSEECOM_TZ_SVC_INFO = 6, 112 }; 113 114 enum qcom_scm_qseecom_tz_cmd_app { 115 QSEECOM_TZ_CMD_APP_SEND = 1, 116 QSEECOM_TZ_CMD_APP_LOOKUP = 3, 117 }; 118 119 enum qcom_scm_qseecom_tz_cmd_info { 120 QSEECOM_TZ_CMD_INFO_VERSION = 3, 121 }; 122 123 #define RSCTABLE_BUFFER_NOT_SUFFICIENT 20 124 125 #define QSEECOM_MAX_APP_NAME_SIZE 64 126 #define SHMBRIDGE_RESULT_NOTSUPP 4 127 128 /* Each bit configures cold/warm boot address for one of the 4 CPUs */ 129 static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = { 130 0, BIT(0), BIT(3), BIT(5) 131 }; 132 static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = { 133 BIT(2), BIT(1), BIT(4), BIT(6) 134 }; 135 136 #define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0) 137 138 #define QCOM_DLOAD_MASK GENMASK(5, 4) 139 #define QCOM_DLOAD_NODUMP 0 140 #define QCOM_DLOAD_FULLDUMP 1 141 #define QCOM_DLOAD_MINIDUMP 2 142 #define QCOM_DLOAD_BOTHDUMP 3 143 144 #define QCOM_SCM_DEFAULT_WAITQ_COUNT 1 145 146 static const char * const qcom_scm_convention_names[] = { 147 [SMC_CONVENTION_UNKNOWN] = "unknown", 148 [SMC_CONVENTION_ARM_32] = "smc arm 32", 149 [SMC_CONVENTION_ARM_64] = "smc arm 64", 150 [SMC_CONVENTION_LEGACY] = "smc legacy", 151 }; 152 153 static const char * const download_mode_name[] = { 154 [QCOM_DLOAD_NODUMP] = "off", 155 [QCOM_DLOAD_FULLDUMP] = "full", 156 [QCOM_DLOAD_MINIDUMP] = "mini", 157 [QCOM_DLOAD_BOTHDUMP] = "full,mini", 158 }; 159 160 static struct qcom_scm *__scm; 161 162 static int qcom_scm_clk_enable(void) 163 { 164 int ret; 165 166 ret = clk_prepare_enable(__scm->core_clk); 167 if (ret) 168 goto bail; 169 170 ret = clk_prepare_enable(__scm->iface_clk); 171 if (ret) 172 goto disable_core; 173 174 ret = clk_prepare_enable(__scm->bus_clk); 175 if (ret) 176 goto disable_iface; 177 178 return 0; 179 180 disable_iface: 181 clk_disable_unprepare(__scm->iface_clk); 182 disable_core: 183 clk_disable_unprepare(__scm->core_clk); 184 bail: 185 return ret; 186 } 187 188 static void qcom_scm_clk_disable(void) 189 { 190 clk_disable_unprepare(__scm->core_clk); 191 clk_disable_unprepare(__scm->iface_clk); 192 clk_disable_unprepare(__scm->bus_clk); 193 } 194 195 static int qcom_scm_bw_enable(void) 196 { 197 int ret = 0; 198 199 if (!__scm->path) 200 return 0; 201 202 mutex_lock(&__scm->scm_bw_lock); 203 if (!__scm->scm_vote_count) { 204 ret = icc_set_bw(__scm->path, 0, UINT_MAX); 205 if (ret < 0) { 206 dev_err(__scm->dev, "failed to set bandwidth request\n"); 207 goto err_bw; 208 } 209 } 210 __scm->scm_vote_count++; 211 err_bw: 212 mutex_unlock(&__scm->scm_bw_lock); 213 214 return ret; 215 } 216 217 static void qcom_scm_bw_disable(void) 218 { 219 if (!__scm->path) 220 return; 221 222 mutex_lock(&__scm->scm_bw_lock); 223 if (__scm->scm_vote_count-- == 1) 224 icc_set_bw(__scm->path, 0, 0); 225 mutex_unlock(&__scm->scm_bw_lock); 226 } 227 228 enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN; 229 static DEFINE_SPINLOCK(scm_query_lock); 230 231 struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void) 232 { 233 if (!qcom_scm_is_available()) 234 return NULL; 235 236 return __scm->mempool; 237 } 238 239 static enum qcom_scm_convention __get_convention(void) 240 { 241 unsigned long flags; 242 struct qcom_scm_desc desc = { 243 .svc = QCOM_SCM_SVC_INFO, 244 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, 245 .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO, 246 QCOM_SCM_INFO_IS_CALL_AVAIL) | 247 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT), 248 .arginfo = QCOM_SCM_ARGS(1), 249 .owner = ARM_SMCCC_OWNER_SIP, 250 }; 251 struct qcom_scm_res res; 252 enum qcom_scm_convention probed_convention; 253 int ret; 254 bool forced = false; 255 256 if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN)) 257 return qcom_scm_convention; 258 259 /* 260 * Per the "SMC calling convention specification", the 64-bit calling 261 * convention can only be used when the client is 64-bit, otherwise 262 * system will encounter the undefined behaviour. 263 */ 264 #if IS_ENABLED(CONFIG_ARM64) 265 /* 266 * Device isn't required as there is only one argument - no device 267 * needed to dma_map_single to secure world 268 */ 269 probed_convention = SMC_CONVENTION_ARM_64; 270 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); 271 if (!ret && res.result[0] == 1) 272 goto found; 273 274 /* 275 * Some SC7180 firmwares didn't implement the 276 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64 277 * calling conventions on these firmwares. Luckily we don't make any 278 * early calls into the firmware on these SoCs so the device pointer 279 * will be valid here to check if the compatible matches. 280 */ 281 if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) { 282 forced = true; 283 goto found; 284 } 285 #endif 286 287 probed_convention = SMC_CONVENTION_ARM_32; 288 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); 289 if (!ret && res.result[0] == 1) 290 goto found; 291 292 probed_convention = SMC_CONVENTION_LEGACY; 293 found: 294 spin_lock_irqsave(&scm_query_lock, flags); 295 if (probed_convention != qcom_scm_convention) { 296 qcom_scm_convention = probed_convention; 297 pr_info("qcom_scm: convention: %s%s\n", 298 qcom_scm_convention_names[qcom_scm_convention], 299 forced ? " (forced)" : ""); 300 } 301 spin_unlock_irqrestore(&scm_query_lock, flags); 302 303 return qcom_scm_convention; 304 } 305 306 /** 307 * qcom_scm_call() - Invoke a syscall in the secure world 308 * @dev: device 309 * @desc: Descriptor structure containing arguments and return values 310 * @res: Structure containing results from SMC/HVC call 311 * 312 * Sends a command to the SCM and waits for the command to finish processing. 313 * This should *only* be called in pre-emptible context. 314 */ 315 static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc, 316 struct qcom_scm_res *res) 317 { 318 might_sleep(); 319 switch (__get_convention()) { 320 case SMC_CONVENTION_ARM_32: 321 case SMC_CONVENTION_ARM_64: 322 return scm_smc_call(dev, desc, res, false); 323 case SMC_CONVENTION_LEGACY: 324 return scm_legacy_call(dev, desc, res); 325 default: 326 pr_err("Unknown current SCM calling convention.\n"); 327 return -EINVAL; 328 } 329 } 330 331 /** 332 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call() 333 * @dev: device 334 * @desc: Descriptor structure containing arguments and return values 335 * @res: Structure containing results from SMC/HVC call 336 * 337 * Sends a command to the SCM and waits for the command to finish processing. 338 * This can be called in atomic context. 339 */ 340 static int qcom_scm_call_atomic(struct device *dev, 341 const struct qcom_scm_desc *desc, 342 struct qcom_scm_res *res) 343 { 344 switch (__get_convention()) { 345 case SMC_CONVENTION_ARM_32: 346 case SMC_CONVENTION_ARM_64: 347 return scm_smc_call(dev, desc, res, true); 348 case SMC_CONVENTION_LEGACY: 349 return scm_legacy_call_atomic(dev, desc, res); 350 default: 351 pr_err("Unknown current SCM calling convention.\n"); 352 return -EINVAL; 353 } 354 } 355 356 static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id, 357 u32 cmd_id) 358 { 359 int ret; 360 struct qcom_scm_desc desc = { 361 .svc = QCOM_SCM_SVC_INFO, 362 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, 363 .owner = ARM_SMCCC_OWNER_SIP, 364 }; 365 struct qcom_scm_res res; 366 367 desc.arginfo = QCOM_SCM_ARGS(1); 368 switch (__get_convention()) { 369 case SMC_CONVENTION_ARM_32: 370 case SMC_CONVENTION_ARM_64: 371 desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) | 372 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT); 373 break; 374 case SMC_CONVENTION_LEGACY: 375 desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id); 376 break; 377 default: 378 pr_err("Unknown SMC convention being used\n"); 379 return false; 380 } 381 382 ret = qcom_scm_call(dev, &desc, &res); 383 384 return ret ? false : !!res.result[0]; 385 } 386 387 static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits) 388 { 389 int cpu; 390 unsigned int flags = 0; 391 struct qcom_scm_desc desc = { 392 .svc = QCOM_SCM_SVC_BOOT, 393 .cmd = QCOM_SCM_BOOT_SET_ADDR, 394 .arginfo = QCOM_SCM_ARGS(2), 395 .owner = ARM_SMCCC_OWNER_SIP, 396 }; 397 398 for_each_present_cpu(cpu) { 399 if (cpu >= QCOM_SCM_BOOT_MAX_CPUS) 400 return -EINVAL; 401 flags |= cpu_bits[cpu]; 402 } 403 404 desc.args[0] = flags; 405 desc.args[1] = virt_to_phys(entry); 406 407 return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); 408 } 409 410 static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags) 411 { 412 struct qcom_scm_desc desc = { 413 .svc = QCOM_SCM_SVC_BOOT, 414 .cmd = QCOM_SCM_BOOT_SET_ADDR_MC, 415 .owner = ARM_SMCCC_OWNER_SIP, 416 .arginfo = QCOM_SCM_ARGS(6), 417 .args = { 418 virt_to_phys(entry), 419 /* Apply to all CPUs in all affinity levels */ 420 ~0ULL, ~0ULL, ~0ULL, ~0ULL, 421 flags, 422 }, 423 }; 424 425 /* Need a device for DMA of the additional arguments */ 426 if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY) 427 return -EOPNOTSUPP; 428 429 return qcom_scm_call(__scm->dev, &desc, NULL); 430 } 431 432 /** 433 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus 434 * @entry: Entry point function for the cpus 435 * 436 * Set the Linux entry point for the SCM to transfer control to when coming 437 * out of a power down. CPU power down may be executed on cpuidle or hotplug. 438 */ 439 int qcom_scm_set_warm_boot_addr(void *entry) 440 { 441 if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT)) 442 /* Fallback to old SCM call */ 443 return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits); 444 return 0; 445 } 446 EXPORT_SYMBOL_GPL(qcom_scm_set_warm_boot_addr); 447 448 /** 449 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus 450 * @entry: Entry point function for the cpus 451 */ 452 int qcom_scm_set_cold_boot_addr(void *entry) 453 { 454 if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT)) 455 /* Fallback to old SCM call */ 456 return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits); 457 return 0; 458 } 459 EXPORT_SYMBOL_GPL(qcom_scm_set_cold_boot_addr); 460 461 /** 462 * qcom_scm_cpu_power_down() - Power down the cpu 463 * @flags: Flags to flush cache 464 * 465 * This is an end point to power down cpu. If there was a pending interrupt, 466 * the control would return from this function, otherwise, the cpu jumps to the 467 * warm boot entry point set for this cpu upon reset. 468 */ 469 void qcom_scm_cpu_power_down(u32 flags) 470 { 471 struct qcom_scm_desc desc = { 472 .svc = QCOM_SCM_SVC_BOOT, 473 .cmd = QCOM_SCM_BOOT_TERMINATE_PC, 474 .args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK, 475 .arginfo = QCOM_SCM_ARGS(1), 476 .owner = ARM_SMCCC_OWNER_SIP, 477 }; 478 479 qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); 480 } 481 EXPORT_SYMBOL_GPL(qcom_scm_cpu_power_down); 482 483 int qcom_scm_set_remote_state(u32 state, u32 id) 484 { 485 struct qcom_scm_desc desc = { 486 .svc = QCOM_SCM_SVC_BOOT, 487 .cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE, 488 .arginfo = QCOM_SCM_ARGS(2), 489 .args[0] = state, 490 .args[1] = id, 491 .owner = ARM_SMCCC_OWNER_SIP, 492 }; 493 struct qcom_scm_res res; 494 int ret; 495 496 ret = qcom_scm_call(__scm->dev, &desc, &res); 497 498 return ret ? : res.result[0]; 499 } 500 EXPORT_SYMBOL_GPL(qcom_scm_set_remote_state); 501 502 static int qcom_scm_disable_sdi(void) 503 { 504 int ret; 505 struct qcom_scm_desc desc = { 506 .svc = QCOM_SCM_SVC_BOOT, 507 .cmd = QCOM_SCM_BOOT_SDI_CONFIG, 508 .args[0] = 1, /* Disable watchdog debug */ 509 .args[1] = 0, /* Disable SDI */ 510 .arginfo = QCOM_SCM_ARGS(2), 511 .owner = ARM_SMCCC_OWNER_SIP, 512 }; 513 struct qcom_scm_res res; 514 515 ret = qcom_scm_clk_enable(); 516 if (ret) 517 return ret; 518 ret = qcom_scm_call(__scm->dev, &desc, &res); 519 520 qcom_scm_clk_disable(); 521 522 return ret ? : res.result[0]; 523 } 524 525 static int __qcom_scm_set_dload_mode(struct device *dev, bool enable) 526 { 527 struct qcom_scm_desc desc = { 528 .svc = QCOM_SCM_SVC_BOOT, 529 .cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE, 530 .arginfo = QCOM_SCM_ARGS(2), 531 .args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE, 532 .owner = ARM_SMCCC_OWNER_SIP, 533 }; 534 535 desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0; 536 537 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 538 } 539 540 static int qcom_scm_io_rmw(phys_addr_t addr, unsigned int mask, unsigned int val) 541 { 542 unsigned int old; 543 unsigned int new; 544 int ret; 545 546 ret = qcom_scm_io_readl(addr, &old); 547 if (ret) 548 return ret; 549 550 new = (old & ~mask) | (val & mask); 551 552 return qcom_scm_io_writel(addr, new); 553 } 554 555 static void qcom_scm_set_download_mode(u32 dload_mode) 556 { 557 int ret = 0; 558 559 if (__scm->dload_mode_addr) { 560 ret = qcom_scm_io_rmw(__scm->dload_mode_addr, QCOM_DLOAD_MASK, 561 FIELD_PREP(QCOM_DLOAD_MASK, dload_mode)); 562 } else if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT, 563 QCOM_SCM_BOOT_SET_DLOAD_MODE)) { 564 ret = __qcom_scm_set_dload_mode(__scm->dev, !!dload_mode); 565 } else if (dload_mode) { 566 dev_err(__scm->dev, 567 "No available mechanism for setting download mode\n"); 568 } 569 570 if (ret) 571 dev_err(__scm->dev, "failed to set download mode: %d\n", ret); 572 } 573 574 /** 575 * devm_qcom_scm_pas_context_alloc() - Allocate peripheral authentication service 576 * context for a given peripheral 577 * 578 * PAS context is device-resource managed, so the caller does not need 579 * to worry about freeing the context memory. 580 * 581 * @dev: PAS firmware device 582 * @pas_id: peripheral authentication service id 583 * @mem_phys: Subsystem reserve memory start address 584 * @mem_size: Subsystem reserve memory size 585 * 586 * Returns: The new PAS context, or ERR_PTR() on failure. 587 */ 588 struct qcom_scm_pas_context *devm_qcom_scm_pas_context_alloc(struct device *dev, 589 u32 pas_id, 590 phys_addr_t mem_phys, 591 size_t mem_size) 592 { 593 struct qcom_scm_pas_context *ctx; 594 595 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 596 if (!ctx) 597 return ERR_PTR(-ENOMEM); 598 599 ctx->dev = dev; 600 ctx->pas_id = pas_id; 601 ctx->mem_phys = mem_phys; 602 ctx->mem_size = mem_size; 603 604 return ctx; 605 } 606 EXPORT_SYMBOL_GPL(devm_qcom_scm_pas_context_alloc); 607 608 static int __qcom_scm_pas_init_image(u32 pas_id, dma_addr_t mdata_phys, 609 struct qcom_scm_res *res) 610 { 611 struct qcom_scm_desc desc = { 612 .svc = QCOM_SCM_SVC_PIL, 613 .cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE, 614 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW), 615 .args[0] = pas_id, 616 .owner = ARM_SMCCC_OWNER_SIP, 617 }; 618 int ret; 619 620 ret = qcom_scm_clk_enable(); 621 if (ret) 622 return ret; 623 624 ret = qcom_scm_bw_enable(); 625 if (ret) 626 goto disable_clk; 627 628 desc.args[1] = mdata_phys; 629 630 ret = qcom_scm_call(__scm->dev, &desc, res); 631 qcom_scm_bw_disable(); 632 633 disable_clk: 634 qcom_scm_clk_disable(); 635 636 return ret; 637 } 638 639 static int qcom_scm_pas_prep_and_init_image(struct qcom_scm_pas_context *ctx, 640 const void *metadata, size_t size) 641 { 642 struct qcom_scm_res res; 643 phys_addr_t mdata_phys; 644 void *mdata_buf; 645 int ret; 646 647 mdata_buf = qcom_tzmem_alloc(__scm->mempool, size, GFP_KERNEL); 648 if (!mdata_buf) 649 return -ENOMEM; 650 651 memcpy(mdata_buf, metadata, size); 652 mdata_phys = qcom_tzmem_to_phys(mdata_buf); 653 654 ret = __qcom_scm_pas_init_image(ctx->pas_id, mdata_phys, &res); 655 if (ret < 0) 656 qcom_tzmem_free(mdata_buf); 657 else 658 ctx->ptr = mdata_buf; 659 660 return ret ? : res.result[0]; 661 } 662 663 /** 664 * qcom_scm_pas_init_image() - Initialize peripheral authentication service 665 * state machine for a given peripheral, using the 666 * metadata 667 * @pas_id: peripheral authentication service id 668 * @metadata: pointer to memory containing ELF header, program header table 669 * and optional blob of data used for authenticating the metadata 670 * and the rest of the firmware 671 * @size: size of the metadata 672 * @ctx: optional pas context 673 * 674 * Return: 0 on success. 675 * 676 * Upon successful return, the PAS metadata context (@ctx) will be used to 677 * track the metadata allocation, this needs to be released by invoking 678 * qcom_scm_pas_metadata_release() by the caller. 679 */ 680 int qcom_scm_pas_init_image(u32 pas_id, const void *metadata, size_t size, 681 struct qcom_scm_pas_context *ctx) 682 { 683 struct qcom_scm_res res; 684 dma_addr_t mdata_phys; 685 void *mdata_buf; 686 int ret; 687 688 if (ctx && ctx->use_tzmem) 689 return qcom_scm_pas_prep_and_init_image(ctx, metadata, size); 690 691 /* 692 * During the scm call memory protection will be enabled for the meta 693 * data blob, so make sure it's physically contiguous, 4K aligned and 694 * non-cachable to avoid XPU violations. 695 * 696 * For PIL calls the hypervisor creates SHM Bridges for the blob 697 * buffers on behalf of Linux so we must not do it ourselves hence 698 * not using the TZMem allocator here. 699 * 700 * If we pass a buffer that is already part of an SHM Bridge to this 701 * call, it will fail. 702 */ 703 mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys, 704 GFP_KERNEL); 705 if (!mdata_buf) 706 return -ENOMEM; 707 708 memcpy(mdata_buf, metadata, size); 709 710 ret = __qcom_scm_pas_init_image(pas_id, mdata_phys, &res); 711 if (ret < 0 || !ctx) { 712 dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys); 713 } else if (ctx) { 714 ctx->ptr = mdata_buf; 715 ctx->phys = mdata_phys; 716 ctx->size = size; 717 } 718 719 return ret ? : res.result[0]; 720 } 721 EXPORT_SYMBOL_GPL(qcom_scm_pas_init_image); 722 723 /** 724 * qcom_scm_pas_metadata_release() - release metadata context 725 * @ctx: pas context 726 */ 727 void qcom_scm_pas_metadata_release(struct qcom_scm_pas_context *ctx) 728 { 729 if (!ctx->ptr) 730 return; 731 732 if (ctx->use_tzmem) 733 qcom_tzmem_free(ctx->ptr); 734 else 735 dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys); 736 737 ctx->ptr = NULL; 738 } 739 EXPORT_SYMBOL_GPL(qcom_scm_pas_metadata_release); 740 741 /** 742 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral 743 * for firmware loading 744 * @pas_id: peripheral authentication service id 745 * @addr: start address of memory area to prepare 746 * @size: size of the memory area to prepare 747 * 748 * Returns 0 on success. 749 */ 750 int qcom_scm_pas_mem_setup(u32 pas_id, phys_addr_t addr, phys_addr_t size) 751 { 752 int ret; 753 struct qcom_scm_desc desc = { 754 .svc = QCOM_SCM_SVC_PIL, 755 .cmd = QCOM_SCM_PIL_PAS_MEM_SETUP, 756 .arginfo = QCOM_SCM_ARGS(3), 757 .args[0] = pas_id, 758 .args[1] = addr, 759 .args[2] = size, 760 .owner = ARM_SMCCC_OWNER_SIP, 761 }; 762 struct qcom_scm_res res; 763 764 ret = qcom_scm_clk_enable(); 765 if (ret) 766 return ret; 767 768 ret = qcom_scm_bw_enable(); 769 if (ret) 770 goto disable_clk; 771 772 ret = qcom_scm_call(__scm->dev, &desc, &res); 773 qcom_scm_bw_disable(); 774 775 disable_clk: 776 qcom_scm_clk_disable(); 777 778 return ret ? : res.result[0]; 779 } 780 EXPORT_SYMBOL_GPL(qcom_scm_pas_mem_setup); 781 782 static void *__qcom_scm_pas_get_rsc_table(u32 pas_id, void *input_rt_tzm, 783 size_t input_rt_size, 784 size_t *output_rt_size) 785 { 786 struct qcom_scm_desc desc = { 787 .svc = QCOM_SCM_SVC_PIL, 788 .cmd = QCOM_SCM_PIL_PAS_GET_RSCTABLE, 789 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RO, QCOM_SCM_VAL, 790 QCOM_SCM_RW, QCOM_SCM_VAL), 791 .args[0] = pas_id, 792 .owner = ARM_SMCCC_OWNER_SIP, 793 }; 794 struct qcom_scm_res res; 795 void *output_rt_tzm; 796 int ret; 797 798 output_rt_tzm = qcom_tzmem_alloc(__scm->mempool, *output_rt_size, GFP_KERNEL); 799 if (!output_rt_tzm) 800 return ERR_PTR(-ENOMEM); 801 802 desc.args[1] = qcom_tzmem_to_phys(input_rt_tzm); 803 desc.args[2] = input_rt_size; 804 desc.args[3] = qcom_tzmem_to_phys(output_rt_tzm); 805 desc.args[4] = *output_rt_size; 806 807 /* 808 * Whether SMC fail or pass, res.result[2] will hold actual resource table 809 * size. 810 * 811 * If passed 'output_rt_size' buffer size is not sufficient to hold the 812 * resource table TrustZone sends, response code in res.result[1] as 813 * RSCTABLE_BUFFER_NOT_SUFFICIENT so that caller can retry this SMC call 814 * with output_rt_tzm buffer with res.result[2] size however, It should not 815 * be of unresonable size. 816 */ 817 ret = qcom_scm_call(__scm->dev, &desc, &res); 818 if (!ret && res.result[2] > SZ_1G) { 819 ret = -E2BIG; 820 goto free_output_rt; 821 } 822 823 *output_rt_size = res.result[2]; 824 if (ret && res.result[1] == RSCTABLE_BUFFER_NOT_SUFFICIENT) 825 ret = -EOVERFLOW; 826 827 free_output_rt: 828 if (ret) 829 qcom_tzmem_free(output_rt_tzm); 830 831 return ret ? ERR_PTR(ret) : output_rt_tzm; 832 } 833 834 /** 835 * qcom_scm_pas_get_rsc_table() - Retrieve the resource table in passed output buffer 836 * for a given peripheral. 837 * 838 * Qualcomm remote processor may rely on both static and dynamic resources for 839 * its functionality. Static resources typically refer to memory-mapped addresses 840 * required by the subsystem and are often embedded within the firmware binary 841 * and dynamic resources, such as shared memory in DDR etc., are determined at 842 * runtime during the boot process. 843 * 844 * On Qualcomm Technologies devices, it's possible that static resources are not 845 * embedded in the firmware binary and instead are provided by TrustZone However, 846 * dynamic resources are always expected to come from TrustZone. This indicates 847 * that for Qualcomm devices, all resources (static and dynamic) will be provided 848 * by TrustZone via the SMC call. 849 * 850 * If the remote processor firmware binary does contain static resources, they 851 * should be passed in input_rt. These will be forwarded to TrustZone for 852 * authentication. TrustZone will then append the dynamic resources and return 853 * the complete resource table in output_rt_tzm. 854 * 855 * If the remote processor firmware binary does not include a resource table, 856 * the caller of this function should set input_rt as NULL and input_rt_size 857 * as zero respectively. 858 * 859 * More about documentation on resource table data structures can be found in 860 * include/linux/remoteproc.h 861 * 862 * @ctx: PAS context 863 * @pas_id: peripheral authentication service id 864 * @input_rt: resource table buffer which is present in firmware binary 865 * @input_rt_size: size of the resource table present in firmware binary 866 * @output_rt_size: TrustZone expects caller should pass worst case size for 867 * the output_rt_tzm. 868 * 869 * Return: 870 * On success, returns a pointer to the allocated buffer containing the final 871 * resource table and output_rt_size will have actual resource table size from 872 * TrustZone. The caller is responsible for freeing the buffer. On failure, 873 * returns ERR_PTR(-errno). 874 */ 875 struct resource_table *qcom_scm_pas_get_rsc_table(struct qcom_scm_pas_context *ctx, 876 void *input_rt, 877 size_t input_rt_size, 878 size_t *output_rt_size) 879 { 880 struct resource_table empty_rsc = {}; 881 size_t size = SZ_16K; 882 void *output_rt_tzm; 883 void *input_rt_tzm; 884 void *tbl_ptr; 885 int ret; 886 887 ret = qcom_scm_clk_enable(); 888 if (ret) 889 return ERR_PTR(ret); 890 891 ret = qcom_scm_bw_enable(); 892 if (ret) 893 goto disable_clk; 894 895 /* 896 * TrustZone can not accept buffer as NULL value as argument hence, 897 * we need to pass a input buffer indicating that subsystem firmware 898 * does not have resource table by filling resource table structure. 899 */ 900 if (!input_rt) { 901 input_rt = &empty_rsc; 902 input_rt_size = sizeof(empty_rsc); 903 } 904 905 input_rt_tzm = qcom_tzmem_alloc(__scm->mempool, input_rt_size, GFP_KERNEL); 906 if (!input_rt_tzm) { 907 ret = -ENOMEM; 908 goto disable_scm_bw; 909 } 910 911 memcpy(input_rt_tzm, input_rt, input_rt_size); 912 913 output_rt_tzm = __qcom_scm_pas_get_rsc_table(ctx->pas_id, input_rt_tzm, 914 input_rt_size, &size); 915 if (PTR_ERR(output_rt_tzm) == -EOVERFLOW) 916 /* Try again with the size requested by the TZ */ 917 output_rt_tzm = __qcom_scm_pas_get_rsc_table(ctx->pas_id, 918 input_rt_tzm, 919 input_rt_size, 920 &size); 921 if (IS_ERR(output_rt_tzm)) { 922 ret = PTR_ERR(output_rt_tzm); 923 goto free_input_rt; 924 } 925 926 tbl_ptr = kzalloc(size, GFP_KERNEL); 927 if (!tbl_ptr) { 928 qcom_tzmem_free(output_rt_tzm); 929 ret = -ENOMEM; 930 goto free_input_rt; 931 } 932 933 memcpy(tbl_ptr, output_rt_tzm, size); 934 *output_rt_size = size; 935 qcom_tzmem_free(output_rt_tzm); 936 937 free_input_rt: 938 qcom_tzmem_free(input_rt_tzm); 939 940 disable_scm_bw: 941 qcom_scm_bw_disable(); 942 943 disable_clk: 944 qcom_scm_clk_disable(); 945 946 return ret ? ERR_PTR(ret) : tbl_ptr; 947 } 948 EXPORT_SYMBOL_GPL(qcom_scm_pas_get_rsc_table); 949 950 /** 951 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware 952 * and reset the remote processor 953 * @pas_id: peripheral authentication service id 954 * 955 * Return 0 on success. 956 */ 957 int qcom_scm_pas_auth_and_reset(u32 pas_id) 958 { 959 int ret; 960 struct qcom_scm_desc desc = { 961 .svc = QCOM_SCM_SVC_PIL, 962 .cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET, 963 .arginfo = QCOM_SCM_ARGS(1), 964 .args[0] = pas_id, 965 .owner = ARM_SMCCC_OWNER_SIP, 966 }; 967 struct qcom_scm_res res; 968 969 ret = qcom_scm_clk_enable(); 970 if (ret) 971 return ret; 972 973 ret = qcom_scm_bw_enable(); 974 if (ret) 975 goto disable_clk; 976 977 ret = qcom_scm_call(__scm->dev, &desc, &res); 978 qcom_scm_bw_disable(); 979 980 disable_clk: 981 qcom_scm_clk_disable(); 982 983 return ret ? : res.result[0]; 984 } 985 EXPORT_SYMBOL_GPL(qcom_scm_pas_auth_and_reset); 986 987 /** 988 * qcom_scm_pas_prepare_and_auth_reset() - Prepare, authenticate, and reset the 989 * remote processor 990 * 991 * @ctx: Context saved during call to qcom_scm_pas_context_init() 992 * 993 * This function performs the necessary steps to prepare a PAS subsystem, 994 * authenticate it using the provided metadata, and initiate a reset sequence. 995 * 996 * It should be used when Linux is in control setting up the IOMMU hardware 997 * for remote subsystem during secure firmware loading processes. The preparation 998 * step sets up a shmbridge over the firmware memory before TrustZone accesses the 999 * firmware memory region for authentication. The authentication step verifies 1000 * the integrity and authenticity of the firmware or configuration using secure 1001 * metadata. Finally, the reset step ensures the subsystem starts in a clean and 1002 * sane state. 1003 * 1004 * Return: 0 on success, negative errno on failure. 1005 */ 1006 int qcom_scm_pas_prepare_and_auth_reset(struct qcom_scm_pas_context *ctx) 1007 { 1008 u64 handle; 1009 int ret; 1010 1011 /* 1012 * When Linux running @ EL1, Gunyah hypervisor running @ EL2 traps the 1013 * auth_and_reset call and create an shmbridge on the remote subsystem 1014 * memory region and then invokes a call to TrustZone to authenticate. 1015 */ 1016 if (!ctx->use_tzmem) 1017 return qcom_scm_pas_auth_and_reset(ctx->pas_id); 1018 1019 /* 1020 * When Linux runs @ EL2 Linux must create the shmbridge itself and then 1021 * subsequently call TrustZone for authenticate and reset. 1022 */ 1023 ret = qcom_tzmem_shm_bridge_create(ctx->mem_phys, ctx->mem_size, &handle); 1024 if (ret) 1025 return ret; 1026 1027 ret = qcom_scm_pas_auth_and_reset(ctx->pas_id); 1028 qcom_tzmem_shm_bridge_delete(handle); 1029 1030 return ret; 1031 } 1032 EXPORT_SYMBOL_GPL(qcom_scm_pas_prepare_and_auth_reset); 1033 1034 /** 1035 * qcom_scm_pas_shutdown() - Shut down the remote processor 1036 * @pas_id: peripheral authentication service id 1037 * 1038 * Returns 0 on success. 1039 */ 1040 int qcom_scm_pas_shutdown(u32 pas_id) 1041 { 1042 int ret; 1043 struct qcom_scm_desc desc = { 1044 .svc = QCOM_SCM_SVC_PIL, 1045 .cmd = QCOM_SCM_PIL_PAS_SHUTDOWN, 1046 .arginfo = QCOM_SCM_ARGS(1), 1047 .args[0] = pas_id, 1048 .owner = ARM_SMCCC_OWNER_SIP, 1049 }; 1050 struct qcom_scm_res res; 1051 1052 ret = qcom_scm_clk_enable(); 1053 if (ret) 1054 return ret; 1055 1056 ret = qcom_scm_bw_enable(); 1057 if (ret) 1058 goto disable_clk; 1059 1060 ret = qcom_scm_call(__scm->dev, &desc, &res); 1061 qcom_scm_bw_disable(); 1062 1063 disable_clk: 1064 qcom_scm_clk_disable(); 1065 1066 return ret ? : res.result[0]; 1067 } 1068 EXPORT_SYMBOL_GPL(qcom_scm_pas_shutdown); 1069 1070 /** 1071 * qcom_scm_pas_supported() - Check if the peripheral authentication service is 1072 * available for the given peripherial 1073 * @pas_id: peripheral authentication service id 1074 * 1075 * Returns true if PAS is supported for this peripheral, otherwise false. 1076 */ 1077 bool qcom_scm_pas_supported(u32 pas_id) 1078 { 1079 int ret; 1080 struct qcom_scm_desc desc = { 1081 .svc = QCOM_SCM_SVC_PIL, 1082 .cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED, 1083 .arginfo = QCOM_SCM_ARGS(1), 1084 .args[0] = pas_id, 1085 .owner = ARM_SMCCC_OWNER_SIP, 1086 }; 1087 struct qcom_scm_res res; 1088 1089 if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL, 1090 QCOM_SCM_PIL_PAS_IS_SUPPORTED)) 1091 return false; 1092 1093 ret = qcom_scm_call(__scm->dev, &desc, &res); 1094 1095 return ret ? false : !!res.result[0]; 1096 } 1097 EXPORT_SYMBOL_GPL(qcom_scm_pas_supported); 1098 1099 static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) 1100 { 1101 struct qcom_scm_desc desc = { 1102 .svc = QCOM_SCM_SVC_PIL, 1103 .cmd = QCOM_SCM_PIL_PAS_MSS_RESET, 1104 .arginfo = QCOM_SCM_ARGS(2), 1105 .args[0] = reset, 1106 .args[1] = 0, 1107 .owner = ARM_SMCCC_OWNER_SIP, 1108 }; 1109 struct qcom_scm_res res; 1110 int ret; 1111 1112 ret = qcom_scm_call(__scm->dev, &desc, &res); 1113 1114 return ret ? : res.result[0]; 1115 } 1116 1117 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev, 1118 unsigned long idx) 1119 { 1120 if (idx != 0) 1121 return -EINVAL; 1122 1123 return __qcom_scm_pas_mss_reset(__scm->dev, 1); 1124 } 1125 1126 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev, 1127 unsigned long idx) 1128 { 1129 if (idx != 0) 1130 return -EINVAL; 1131 1132 return __qcom_scm_pas_mss_reset(__scm->dev, 0); 1133 } 1134 1135 static const struct reset_control_ops qcom_scm_pas_reset_ops = { 1136 .assert = qcom_scm_pas_reset_assert, 1137 .deassert = qcom_scm_pas_reset_deassert, 1138 }; 1139 1140 int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) 1141 { 1142 struct qcom_scm_desc desc = { 1143 .svc = QCOM_SCM_SVC_IO, 1144 .cmd = QCOM_SCM_IO_READ, 1145 .arginfo = QCOM_SCM_ARGS(1), 1146 .args[0] = addr, 1147 .owner = ARM_SMCCC_OWNER_SIP, 1148 }; 1149 struct qcom_scm_res res; 1150 int ret; 1151 1152 1153 ret = qcom_scm_call_atomic(__scm->dev, &desc, &res); 1154 if (ret >= 0) 1155 *val = res.result[0]; 1156 1157 return ret < 0 ? ret : 0; 1158 } 1159 EXPORT_SYMBOL_GPL(qcom_scm_io_readl); 1160 1161 int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) 1162 { 1163 struct qcom_scm_desc desc = { 1164 .svc = QCOM_SCM_SVC_IO, 1165 .cmd = QCOM_SCM_IO_WRITE, 1166 .arginfo = QCOM_SCM_ARGS(2), 1167 .args[0] = addr, 1168 .args[1] = val, 1169 .owner = ARM_SMCCC_OWNER_SIP, 1170 }; 1171 1172 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 1173 } 1174 EXPORT_SYMBOL_GPL(qcom_scm_io_writel); 1175 1176 /** 1177 * qcom_scm_restore_sec_cfg_available() - Check if secure environment 1178 * supports restore security config interface. 1179 * 1180 * Return true if restore-cfg interface is supported, false if not. 1181 */ 1182 bool qcom_scm_restore_sec_cfg_available(void) 1183 { 1184 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, 1185 QCOM_SCM_MP_RESTORE_SEC_CFG); 1186 } 1187 EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg_available); 1188 1189 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) 1190 { 1191 struct qcom_scm_desc desc = { 1192 .svc = QCOM_SCM_SVC_MP, 1193 .cmd = QCOM_SCM_MP_RESTORE_SEC_CFG, 1194 .arginfo = QCOM_SCM_ARGS(2), 1195 .args[0] = device_id, 1196 .args[1] = spare, 1197 .owner = ARM_SMCCC_OWNER_SIP, 1198 }; 1199 struct qcom_scm_res res; 1200 int ret; 1201 1202 ret = qcom_scm_call(__scm->dev, &desc, &res); 1203 1204 return ret ? : res.result[0]; 1205 } 1206 EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg); 1207 1208 #define QCOM_SCM_CP_APERTURE_CONTEXT_MASK GENMASK(7, 0) 1209 1210 bool qcom_scm_set_gpu_smmu_aperture_is_available(void) 1211 { 1212 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, 1213 QCOM_SCM_MP_CP_SMMU_APERTURE_ID); 1214 } 1215 EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture_is_available); 1216 1217 int qcom_scm_set_gpu_smmu_aperture(unsigned int context_bank) 1218 { 1219 struct qcom_scm_desc desc = { 1220 .svc = QCOM_SCM_SVC_MP, 1221 .cmd = QCOM_SCM_MP_CP_SMMU_APERTURE_ID, 1222 .arginfo = QCOM_SCM_ARGS(4), 1223 .args[0] = 0xffff0000 | FIELD_PREP(QCOM_SCM_CP_APERTURE_CONTEXT_MASK, context_bank), 1224 .args[1] = 0xffffffff, 1225 .args[2] = 0xffffffff, 1226 .args[3] = 0xffffffff, 1227 .owner = ARM_SMCCC_OWNER_SIP 1228 }; 1229 1230 return qcom_scm_call(__scm->dev, &desc, NULL); 1231 } 1232 EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture); 1233 1234 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) 1235 { 1236 struct qcom_scm_desc desc = { 1237 .svc = QCOM_SCM_SVC_MP, 1238 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE, 1239 .arginfo = QCOM_SCM_ARGS(1), 1240 .args[0] = spare, 1241 .owner = ARM_SMCCC_OWNER_SIP, 1242 }; 1243 struct qcom_scm_res res; 1244 int ret; 1245 1246 ret = qcom_scm_call(__scm->dev, &desc, &res); 1247 1248 if (size) 1249 *size = res.result[0]; 1250 1251 return ret ? : res.result[1]; 1252 } 1253 EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_size); 1254 1255 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) 1256 { 1257 struct qcom_scm_desc desc = { 1258 .svc = QCOM_SCM_SVC_MP, 1259 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT, 1260 .arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, 1261 QCOM_SCM_VAL), 1262 .args[0] = addr, 1263 .args[1] = size, 1264 .args[2] = spare, 1265 .owner = ARM_SMCCC_OWNER_SIP, 1266 }; 1267 int ret; 1268 1269 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1270 1271 /* the pg table has been initialized already, ignore the error */ 1272 if (ret == -EPERM) 1273 ret = 0; 1274 1275 return ret; 1276 } 1277 EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_init); 1278 1279 int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size) 1280 { 1281 struct qcom_scm_desc desc = { 1282 .svc = QCOM_SCM_SVC_MP, 1283 .cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE, 1284 .arginfo = QCOM_SCM_ARGS(2), 1285 .args[0] = size, 1286 .args[1] = spare, 1287 .owner = ARM_SMCCC_OWNER_SIP, 1288 }; 1289 1290 return qcom_scm_call(__scm->dev, &desc, NULL); 1291 } 1292 EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_cp_pool_size); 1293 1294 int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size, 1295 u32 cp_nonpixel_start, 1296 u32 cp_nonpixel_size) 1297 { 1298 int ret; 1299 struct qcom_scm_desc desc = { 1300 .svc = QCOM_SCM_SVC_MP, 1301 .cmd = QCOM_SCM_MP_VIDEO_VAR, 1302 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL, 1303 QCOM_SCM_VAL, QCOM_SCM_VAL), 1304 .args[0] = cp_start, 1305 .args[1] = cp_size, 1306 .args[2] = cp_nonpixel_start, 1307 .args[3] = cp_nonpixel_size, 1308 .owner = ARM_SMCCC_OWNER_SIP, 1309 }; 1310 struct qcom_scm_res res; 1311 1312 ret = qcom_scm_call(__scm->dev, &desc, &res); 1313 1314 return ret ? : res.result[0]; 1315 } 1316 EXPORT_SYMBOL_GPL(qcom_scm_mem_protect_video_var); 1317 1318 static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region, 1319 size_t mem_sz, phys_addr_t src, size_t src_sz, 1320 phys_addr_t dest, size_t dest_sz) 1321 { 1322 int ret; 1323 struct qcom_scm_desc desc = { 1324 .svc = QCOM_SCM_SVC_MP, 1325 .cmd = QCOM_SCM_MP_ASSIGN, 1326 .arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL, 1327 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO, 1328 QCOM_SCM_VAL, QCOM_SCM_VAL), 1329 .args[0] = mem_region, 1330 .args[1] = mem_sz, 1331 .args[2] = src, 1332 .args[3] = src_sz, 1333 .args[4] = dest, 1334 .args[5] = dest_sz, 1335 .args[6] = 0, 1336 .owner = ARM_SMCCC_OWNER_SIP, 1337 }; 1338 struct qcom_scm_res res; 1339 1340 ret = qcom_scm_call(dev, &desc, &res); 1341 1342 return ret ? : res.result[0]; 1343 } 1344 1345 /** 1346 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership 1347 * @mem_addr: mem region whose ownership need to be reassigned 1348 * @mem_sz: size of the region. 1349 * @srcvm: vmid for current set of owners, each set bit in 1350 * flag indicate a unique owner 1351 * @newvm: array having new owners and corresponding permission 1352 * flags 1353 * @dest_cnt: number of owners in next set. 1354 * 1355 * Return negative errno on failure or 0 on success with @srcvm updated. 1356 */ 1357 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, 1358 u64 *srcvm, 1359 const struct qcom_scm_vmperm *newvm, 1360 unsigned int dest_cnt) 1361 { 1362 struct qcom_scm_current_perm_info *destvm; 1363 struct qcom_scm_mem_map_info *mem_to_map; 1364 phys_addr_t mem_to_map_phys; 1365 phys_addr_t dest_phys; 1366 phys_addr_t ptr_phys; 1367 size_t mem_to_map_sz; 1368 size_t dest_sz; 1369 size_t src_sz; 1370 size_t ptr_sz; 1371 int next_vm; 1372 __le32 *src; 1373 int ret, i, b; 1374 u64 srcvm_bits = *srcvm; 1375 1376 src_sz = hweight64(srcvm_bits) * sizeof(*src); 1377 mem_to_map_sz = sizeof(*mem_to_map); 1378 dest_sz = dest_cnt * sizeof(*destvm); 1379 ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + 1380 ALIGN(dest_sz, SZ_64); 1381 1382 void *ptr __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1383 ptr_sz, GFP_KERNEL); 1384 if (!ptr) 1385 return -ENOMEM; 1386 1387 ptr_phys = qcom_tzmem_to_phys(ptr); 1388 1389 /* Fill source vmid detail */ 1390 src = ptr; 1391 i = 0; 1392 for (b = 0; b < BITS_PER_TYPE(u64); b++) { 1393 if (srcvm_bits & BIT(b)) 1394 src[i++] = cpu_to_le32(b); 1395 } 1396 1397 /* Fill details of mem buff to map */ 1398 mem_to_map = ptr + ALIGN(src_sz, SZ_64); 1399 mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64); 1400 mem_to_map->mem_addr = cpu_to_le64(mem_addr); 1401 mem_to_map->mem_size = cpu_to_le64(mem_sz); 1402 1403 next_vm = 0; 1404 /* Fill details of next vmid detail */ 1405 destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); 1406 dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); 1407 for (i = 0; i < dest_cnt; i++, destvm++, newvm++) { 1408 destvm->vmid = cpu_to_le32(newvm->vmid); 1409 destvm->perm = cpu_to_le32(newvm->perm); 1410 destvm->ctx = 0; 1411 destvm->ctx_size = 0; 1412 next_vm |= BIT(newvm->vmid); 1413 } 1414 1415 ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, 1416 ptr_phys, src_sz, dest_phys, dest_sz); 1417 if (ret) { 1418 dev_err(__scm->dev, 1419 "Assign memory protection call failed %d\n", ret); 1420 return ret; 1421 } 1422 1423 *srcvm = next_vm; 1424 return 0; 1425 } 1426 EXPORT_SYMBOL_GPL(qcom_scm_assign_mem); 1427 1428 /** 1429 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available 1430 */ 1431 bool qcom_scm_ocmem_lock_available(void) 1432 { 1433 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM, 1434 QCOM_SCM_OCMEM_LOCK_CMD); 1435 } 1436 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock_available); 1437 1438 /** 1439 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM 1440 * region to the specified initiator 1441 * 1442 * @id: tz initiator id 1443 * @offset: OCMEM offset 1444 * @size: OCMEM size 1445 * @mode: access mode (WIDE/NARROW) 1446 */ 1447 int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size, 1448 u32 mode) 1449 { 1450 struct qcom_scm_desc desc = { 1451 .svc = QCOM_SCM_SVC_OCMEM, 1452 .cmd = QCOM_SCM_OCMEM_LOCK_CMD, 1453 .args[0] = id, 1454 .args[1] = offset, 1455 .args[2] = size, 1456 .args[3] = mode, 1457 .arginfo = QCOM_SCM_ARGS(4), 1458 }; 1459 1460 return qcom_scm_call(__scm->dev, &desc, NULL); 1461 } 1462 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock); 1463 1464 /** 1465 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM 1466 * region from the specified initiator 1467 * 1468 * @id: tz initiator id 1469 * @offset: OCMEM offset 1470 * @size: OCMEM size 1471 */ 1472 int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size) 1473 { 1474 struct qcom_scm_desc desc = { 1475 .svc = QCOM_SCM_SVC_OCMEM, 1476 .cmd = QCOM_SCM_OCMEM_UNLOCK_CMD, 1477 .args[0] = id, 1478 .args[1] = offset, 1479 .args[2] = size, 1480 .arginfo = QCOM_SCM_ARGS(3), 1481 }; 1482 1483 return qcom_scm_call(__scm->dev, &desc, NULL); 1484 } 1485 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_unlock); 1486 1487 /** 1488 * qcom_scm_ice_available() - Is the ICE key programming interface available? 1489 * 1490 * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and 1491 * qcom_scm_ice_set_key() are available. 1492 */ 1493 bool qcom_scm_ice_available(void) 1494 { 1495 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1496 QCOM_SCM_ES_INVALIDATE_ICE_KEY) && 1497 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1498 QCOM_SCM_ES_CONFIG_SET_ICE_KEY); 1499 } 1500 EXPORT_SYMBOL_GPL(qcom_scm_ice_available); 1501 1502 /** 1503 * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key 1504 * @index: the keyslot to invalidate 1505 * 1506 * The UFSHCI and eMMC standards define a standard way to do this, but it 1507 * doesn't work on these SoCs; only this SCM call does. 1508 * 1509 * It is assumed that the SoC has only one ICE instance being used, as this SCM 1510 * call doesn't specify which ICE instance the keyslot belongs to. 1511 * 1512 * Return: 0 on success; -errno on failure. 1513 */ 1514 int qcom_scm_ice_invalidate_key(u32 index) 1515 { 1516 struct qcom_scm_desc desc = { 1517 .svc = QCOM_SCM_SVC_ES, 1518 .cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY, 1519 .arginfo = QCOM_SCM_ARGS(1), 1520 .args[0] = index, 1521 .owner = ARM_SMCCC_OWNER_SIP, 1522 }; 1523 1524 return qcom_scm_call(__scm->dev, &desc, NULL); 1525 } 1526 EXPORT_SYMBOL_GPL(qcom_scm_ice_invalidate_key); 1527 1528 /** 1529 * qcom_scm_ice_set_key() - Set an inline encryption key 1530 * @index: the keyslot into which to set the key 1531 * @key: the key to program 1532 * @key_size: the size of the key in bytes 1533 * @cipher: the encryption algorithm the key is for 1534 * @data_unit_size: the encryption data unit size, i.e. the size of each 1535 * individual plaintext and ciphertext. Given in 512-byte 1536 * units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc. 1537 * 1538 * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it 1539 * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline. 1540 * 1541 * The UFSHCI and eMMC standards define a standard way to do this, but it 1542 * doesn't work on these SoCs; only this SCM call does. 1543 * 1544 * It is assumed that the SoC has only one ICE instance being used, as this SCM 1545 * call doesn't specify which ICE instance the keyslot belongs to. 1546 * 1547 * Return: 0 on success; -errno on failure. 1548 */ 1549 int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, 1550 enum qcom_scm_ice_cipher cipher, u32 data_unit_size) 1551 { 1552 struct qcom_scm_desc desc = { 1553 .svc = QCOM_SCM_SVC_ES, 1554 .cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY, 1555 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW, 1556 QCOM_SCM_VAL, QCOM_SCM_VAL, 1557 QCOM_SCM_VAL), 1558 .args[0] = index, 1559 .args[2] = key_size, 1560 .args[3] = cipher, 1561 .args[4] = data_unit_size, 1562 .owner = ARM_SMCCC_OWNER_SIP, 1563 }; 1564 1565 int ret; 1566 1567 void *keybuf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1568 key_size, 1569 GFP_KERNEL); 1570 if (!keybuf) 1571 return -ENOMEM; 1572 memcpy(keybuf, key, key_size); 1573 desc.args[1] = qcom_tzmem_to_phys(keybuf); 1574 1575 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1576 1577 memzero_explicit(keybuf, key_size); 1578 1579 return ret; 1580 } 1581 EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key); 1582 1583 bool qcom_scm_has_wrapped_key_support(void) 1584 { 1585 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1586 QCOM_SCM_ES_DERIVE_SW_SECRET) && 1587 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1588 QCOM_SCM_ES_GENERATE_ICE_KEY) && 1589 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1590 QCOM_SCM_ES_PREPARE_ICE_KEY) && 1591 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1592 QCOM_SCM_ES_IMPORT_ICE_KEY); 1593 } 1594 EXPORT_SYMBOL_GPL(qcom_scm_has_wrapped_key_support); 1595 1596 /** 1597 * qcom_scm_derive_sw_secret() - Derive software secret from wrapped key 1598 * @eph_key: an ephemerally-wrapped key 1599 * @eph_key_size: size of @eph_key in bytes 1600 * @sw_secret: output buffer for the software secret 1601 * @sw_secret_size: size of the software secret to derive in bytes 1602 * 1603 * Derive a software secret from an ephemerally-wrapped key for software crypto 1604 * operations. This is done by calling into the secure execution environment, 1605 * which then calls into the hardware to unwrap and derive the secret. 1606 * 1607 * For more information on sw_secret, see the "Hardware-wrapped keys" section of 1608 * Documentation/block/inline-encryption.rst. 1609 * 1610 * Return: 0 on success; -errno on failure. 1611 */ 1612 int qcom_scm_derive_sw_secret(const u8 *eph_key, size_t eph_key_size, 1613 u8 *sw_secret, size_t sw_secret_size) 1614 { 1615 struct qcom_scm_desc desc = { 1616 .svc = QCOM_SCM_SVC_ES, 1617 .cmd = QCOM_SCM_ES_DERIVE_SW_SECRET, 1618 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RW, QCOM_SCM_VAL, 1619 QCOM_SCM_RW, QCOM_SCM_VAL), 1620 .owner = ARM_SMCCC_OWNER_SIP, 1621 }; 1622 int ret; 1623 1624 void *eph_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1625 eph_key_size, 1626 GFP_KERNEL); 1627 if (!eph_key_buf) 1628 return -ENOMEM; 1629 1630 void *sw_secret_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1631 sw_secret_size, 1632 GFP_KERNEL); 1633 if (!sw_secret_buf) 1634 return -ENOMEM; 1635 1636 memcpy(eph_key_buf, eph_key, eph_key_size); 1637 desc.args[0] = qcom_tzmem_to_phys(eph_key_buf); 1638 desc.args[1] = eph_key_size; 1639 desc.args[2] = qcom_tzmem_to_phys(sw_secret_buf); 1640 desc.args[3] = sw_secret_size; 1641 1642 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1643 if (!ret) 1644 memcpy(sw_secret, sw_secret_buf, sw_secret_size); 1645 1646 memzero_explicit(eph_key_buf, eph_key_size); 1647 memzero_explicit(sw_secret_buf, sw_secret_size); 1648 return ret; 1649 } 1650 EXPORT_SYMBOL_GPL(qcom_scm_derive_sw_secret); 1651 1652 /** 1653 * qcom_scm_generate_ice_key() - Generate a wrapped key for storage encryption 1654 * @lt_key: output buffer for the long-term wrapped key 1655 * @lt_key_size: size of @lt_key in bytes. Must be the exact wrapped key size 1656 * used by the SoC. 1657 * 1658 * Generate a key using the built-in HW module in the SoC. The resulting key is 1659 * returned wrapped with the platform-specific Key Encryption Key. 1660 * 1661 * Return: 0 on success; -errno on failure. 1662 */ 1663 int qcom_scm_generate_ice_key(u8 *lt_key, size_t lt_key_size) 1664 { 1665 struct qcom_scm_desc desc = { 1666 .svc = QCOM_SCM_SVC_ES, 1667 .cmd = QCOM_SCM_ES_GENERATE_ICE_KEY, 1668 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL), 1669 .owner = ARM_SMCCC_OWNER_SIP, 1670 }; 1671 int ret; 1672 1673 void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1674 lt_key_size, 1675 GFP_KERNEL); 1676 if (!lt_key_buf) 1677 return -ENOMEM; 1678 1679 desc.args[0] = qcom_tzmem_to_phys(lt_key_buf); 1680 desc.args[1] = lt_key_size; 1681 1682 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1683 if (!ret) 1684 memcpy(lt_key, lt_key_buf, lt_key_size); 1685 1686 memzero_explicit(lt_key_buf, lt_key_size); 1687 return ret; 1688 } 1689 EXPORT_SYMBOL_GPL(qcom_scm_generate_ice_key); 1690 1691 /** 1692 * qcom_scm_prepare_ice_key() - Re-wrap a key with the per-boot ephemeral key 1693 * @lt_key: a long-term wrapped key 1694 * @lt_key_size: size of @lt_key in bytes 1695 * @eph_key: output buffer for the ephemerally-wrapped key 1696 * @eph_key_size: size of @eph_key in bytes. Must be the exact wrapped key size 1697 * used by the SoC. 1698 * 1699 * Given a long-term wrapped key, re-wrap it with the per-boot ephemeral key for 1700 * added protection. The resulting key will only be valid for the current boot. 1701 * 1702 * Return: 0 on success; -errno on failure. 1703 */ 1704 int qcom_scm_prepare_ice_key(const u8 *lt_key, size_t lt_key_size, 1705 u8 *eph_key, size_t eph_key_size) 1706 { 1707 struct qcom_scm_desc desc = { 1708 .svc = QCOM_SCM_SVC_ES, 1709 .cmd = QCOM_SCM_ES_PREPARE_ICE_KEY, 1710 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL, 1711 QCOM_SCM_RW, QCOM_SCM_VAL), 1712 .owner = ARM_SMCCC_OWNER_SIP, 1713 }; 1714 int ret; 1715 1716 void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1717 lt_key_size, 1718 GFP_KERNEL); 1719 if (!lt_key_buf) 1720 return -ENOMEM; 1721 1722 void *eph_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1723 eph_key_size, 1724 GFP_KERNEL); 1725 if (!eph_key_buf) 1726 return -ENOMEM; 1727 1728 memcpy(lt_key_buf, lt_key, lt_key_size); 1729 desc.args[0] = qcom_tzmem_to_phys(lt_key_buf); 1730 desc.args[1] = lt_key_size; 1731 desc.args[2] = qcom_tzmem_to_phys(eph_key_buf); 1732 desc.args[3] = eph_key_size; 1733 1734 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1735 if (!ret) 1736 memcpy(eph_key, eph_key_buf, eph_key_size); 1737 1738 memzero_explicit(lt_key_buf, lt_key_size); 1739 memzero_explicit(eph_key_buf, eph_key_size); 1740 return ret; 1741 } 1742 EXPORT_SYMBOL_GPL(qcom_scm_prepare_ice_key); 1743 1744 /** 1745 * qcom_scm_import_ice_key() - Import key for storage encryption 1746 * @raw_key: the raw key to import 1747 * @raw_key_size: size of @raw_key in bytes 1748 * @lt_key: output buffer for the long-term wrapped key 1749 * @lt_key_size: size of @lt_key in bytes. Must be the exact wrapped key size 1750 * used by the SoC. 1751 * 1752 * Import a raw key and return a long-term wrapped key. Uses the SoC's HWKM to 1753 * wrap the raw key using the platform-specific Key Encryption Key. 1754 * 1755 * Return: 0 on success; -errno on failure. 1756 */ 1757 int qcom_scm_import_ice_key(const u8 *raw_key, size_t raw_key_size, 1758 u8 *lt_key, size_t lt_key_size) 1759 { 1760 struct qcom_scm_desc desc = { 1761 .svc = QCOM_SCM_SVC_ES, 1762 .cmd = QCOM_SCM_ES_IMPORT_ICE_KEY, 1763 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL, 1764 QCOM_SCM_RW, QCOM_SCM_VAL), 1765 .owner = ARM_SMCCC_OWNER_SIP, 1766 }; 1767 int ret; 1768 1769 void *raw_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1770 raw_key_size, 1771 GFP_KERNEL); 1772 if (!raw_key_buf) 1773 return -ENOMEM; 1774 1775 void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1776 lt_key_size, 1777 GFP_KERNEL); 1778 if (!lt_key_buf) 1779 return -ENOMEM; 1780 1781 memcpy(raw_key_buf, raw_key, raw_key_size); 1782 desc.args[0] = qcom_tzmem_to_phys(raw_key_buf); 1783 desc.args[1] = raw_key_size; 1784 desc.args[2] = qcom_tzmem_to_phys(lt_key_buf); 1785 desc.args[3] = lt_key_size; 1786 1787 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1788 if (!ret) 1789 memcpy(lt_key, lt_key_buf, lt_key_size); 1790 1791 memzero_explicit(raw_key_buf, raw_key_size); 1792 memzero_explicit(lt_key_buf, lt_key_size); 1793 return ret; 1794 } 1795 EXPORT_SYMBOL_GPL(qcom_scm_import_ice_key); 1796 1797 /** 1798 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP. 1799 * 1800 * Return true if HDCP is supported, false if not. 1801 */ 1802 bool qcom_scm_hdcp_available(void) 1803 { 1804 bool avail; 1805 int ret = qcom_scm_clk_enable(); 1806 1807 if (ret) 1808 return ret; 1809 1810 avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP, 1811 QCOM_SCM_HDCP_INVOKE); 1812 1813 qcom_scm_clk_disable(); 1814 1815 return avail; 1816 } 1817 EXPORT_SYMBOL_GPL(qcom_scm_hdcp_available); 1818 1819 /** 1820 * qcom_scm_hdcp_req() - Send HDCP request. 1821 * @req: HDCP request array 1822 * @req_cnt: HDCP request array count 1823 * @resp: response buffer passed to SCM 1824 * 1825 * Write HDCP register(s) through SCM. 1826 */ 1827 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) 1828 { 1829 int ret; 1830 struct qcom_scm_desc desc = { 1831 .svc = QCOM_SCM_SVC_HDCP, 1832 .cmd = QCOM_SCM_HDCP_INVOKE, 1833 .arginfo = QCOM_SCM_ARGS(10), 1834 .args = { 1835 req[0].addr, 1836 req[0].val, 1837 req[1].addr, 1838 req[1].val, 1839 req[2].addr, 1840 req[2].val, 1841 req[3].addr, 1842 req[3].val, 1843 req[4].addr, 1844 req[4].val 1845 }, 1846 .owner = ARM_SMCCC_OWNER_SIP, 1847 }; 1848 struct qcom_scm_res res; 1849 1850 if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT) 1851 return -ERANGE; 1852 1853 ret = qcom_scm_clk_enable(); 1854 if (ret) 1855 return ret; 1856 1857 ret = qcom_scm_call(__scm->dev, &desc, &res); 1858 *resp = res.result[0]; 1859 1860 qcom_scm_clk_disable(); 1861 1862 return ret; 1863 } 1864 EXPORT_SYMBOL_GPL(qcom_scm_hdcp_req); 1865 1866 int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt) 1867 { 1868 struct qcom_scm_desc desc = { 1869 .svc = QCOM_SCM_SVC_SMMU_PROGRAM, 1870 .cmd = QCOM_SCM_SMMU_PT_FORMAT, 1871 .arginfo = QCOM_SCM_ARGS(3), 1872 .args[0] = sec_id, 1873 .args[1] = ctx_num, 1874 .args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */ 1875 .owner = ARM_SMCCC_OWNER_SIP, 1876 }; 1877 1878 return qcom_scm_call(__scm->dev, &desc, NULL); 1879 } 1880 EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_pt_format); 1881 1882 int qcom_scm_qsmmu500_wait_safe_toggle(bool en) 1883 { 1884 struct qcom_scm_desc desc = { 1885 .svc = QCOM_SCM_SVC_SMMU_PROGRAM, 1886 .cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1, 1887 .arginfo = QCOM_SCM_ARGS(2), 1888 .args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL, 1889 .args[1] = en, 1890 .owner = ARM_SMCCC_OWNER_SIP, 1891 }; 1892 1893 1894 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 1895 } 1896 EXPORT_SYMBOL_GPL(qcom_scm_qsmmu500_wait_safe_toggle); 1897 1898 bool qcom_scm_lmh_dcvsh_available(void) 1899 { 1900 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH); 1901 } 1902 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available); 1903 1904 /* 1905 * This is only supposed to be called once by the TZMem module. It takes the 1906 * SCM struct device as argument and uses it to pass the call as at the time 1907 * the SHM Bridge is enabled, the SCM is not yet fully set up and doesn't 1908 * accept global user calls. Don't try to use the __scm pointer here. 1909 */ 1910 int qcom_scm_shm_bridge_enable(struct device *scm_dev) 1911 { 1912 int ret; 1913 1914 struct qcom_scm_desc desc = { 1915 .svc = QCOM_SCM_SVC_MP, 1916 .cmd = QCOM_SCM_MP_SHM_BRIDGE_ENABLE, 1917 .owner = ARM_SMCCC_OWNER_SIP 1918 }; 1919 1920 struct qcom_scm_res res; 1921 1922 if (!__qcom_scm_is_call_available(scm_dev, QCOM_SCM_SVC_MP, 1923 QCOM_SCM_MP_SHM_BRIDGE_ENABLE)) 1924 return -EOPNOTSUPP; 1925 1926 ret = qcom_scm_call(scm_dev, &desc, &res); 1927 1928 if (ret) 1929 return ret; 1930 1931 if (res.result[0] == SHMBRIDGE_RESULT_NOTSUPP) 1932 return -EOPNOTSUPP; 1933 1934 return res.result[0]; 1935 } 1936 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_enable); 1937 1938 int qcom_scm_shm_bridge_create(u64 pfn_and_ns_perm_flags, 1939 u64 ipfn_and_s_perm_flags, u64 size_and_flags, 1940 u64 ns_vmids, u64 *handle) 1941 { 1942 struct qcom_scm_desc desc = { 1943 .svc = QCOM_SCM_SVC_MP, 1944 .cmd = QCOM_SCM_MP_SHM_BRIDGE_CREATE, 1945 .owner = ARM_SMCCC_OWNER_SIP, 1946 .args[0] = pfn_and_ns_perm_flags, 1947 .args[1] = ipfn_and_s_perm_flags, 1948 .args[2] = size_and_flags, 1949 .args[3] = ns_vmids, 1950 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL, 1951 QCOM_SCM_VAL, QCOM_SCM_VAL), 1952 }; 1953 1954 struct qcom_scm_res res; 1955 int ret; 1956 1957 ret = qcom_scm_call(__scm->dev, &desc, &res); 1958 1959 if (handle && !ret) 1960 *handle = res.result[1]; 1961 1962 return ret ?: res.result[0]; 1963 } 1964 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_create); 1965 1966 int qcom_scm_shm_bridge_delete(u64 handle) 1967 { 1968 struct qcom_scm_desc desc = { 1969 .svc = QCOM_SCM_SVC_MP, 1970 .cmd = QCOM_SCM_MP_SHM_BRIDGE_DELETE, 1971 .owner = ARM_SMCCC_OWNER_SIP, 1972 .args[0] = handle, 1973 .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL), 1974 }; 1975 1976 return qcom_scm_call(__scm->dev, &desc, NULL); 1977 } 1978 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_delete); 1979 1980 int qcom_scm_lmh_profile_change(u32 profile_id) 1981 { 1982 struct qcom_scm_desc desc = { 1983 .svc = QCOM_SCM_SVC_LMH, 1984 .cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE, 1985 .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL), 1986 .args[0] = profile_id, 1987 .owner = ARM_SMCCC_OWNER_SIP, 1988 }; 1989 1990 return qcom_scm_call(__scm->dev, &desc, NULL); 1991 } 1992 EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change); 1993 1994 int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, 1995 u64 limit_node, u32 node_id, u64 version) 1996 { 1997 int ret, payload_size = 5 * sizeof(u32); 1998 1999 struct qcom_scm_desc desc = { 2000 .svc = QCOM_SCM_SVC_LMH, 2001 .cmd = QCOM_SCM_LMH_LIMIT_DCVSH, 2002 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL, 2003 QCOM_SCM_VAL, QCOM_SCM_VAL), 2004 .args[1] = payload_size, 2005 .args[2] = limit_node, 2006 .args[3] = node_id, 2007 .args[4] = version, 2008 .owner = ARM_SMCCC_OWNER_SIP, 2009 }; 2010 2011 u32 *payload_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 2012 payload_size, 2013 GFP_KERNEL); 2014 if (!payload_buf) 2015 return -ENOMEM; 2016 2017 payload_buf[0] = payload_fn; 2018 payload_buf[1] = 0; 2019 payload_buf[2] = payload_reg; 2020 payload_buf[3] = 1; 2021 payload_buf[4] = payload_val; 2022 2023 desc.args[0] = qcom_tzmem_to_phys(payload_buf); 2024 2025 ret = qcom_scm_call(__scm->dev, &desc, NULL); 2026 2027 return ret; 2028 } 2029 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh); 2030 2031 int qcom_scm_gpu_init_regs(u32 gpu_req) 2032 { 2033 struct qcom_scm_desc desc = { 2034 .svc = QCOM_SCM_SVC_GPU, 2035 .cmd = QCOM_SCM_SVC_GPU_INIT_REGS, 2036 .arginfo = QCOM_SCM_ARGS(1), 2037 .args[0] = gpu_req, 2038 .owner = ARM_SMCCC_OWNER_SIP, 2039 }; 2040 2041 return qcom_scm_call(__scm->dev, &desc, NULL); 2042 } 2043 EXPORT_SYMBOL_GPL(qcom_scm_gpu_init_regs); 2044 2045 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr) 2046 { 2047 struct device_node *tcsr; 2048 struct device_node *np = dev->of_node; 2049 struct resource res; 2050 u32 offset; 2051 int ret; 2052 2053 tcsr = of_parse_phandle(np, "qcom,dload-mode", 0); 2054 if (!tcsr) 2055 return 0; 2056 2057 ret = of_address_to_resource(tcsr, 0, &res); 2058 of_node_put(tcsr); 2059 if (ret) 2060 return ret; 2061 2062 ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset); 2063 if (ret < 0) 2064 return ret; 2065 2066 *addr = res.start + offset; 2067 2068 return 0; 2069 } 2070 2071 #ifdef CONFIG_QCOM_QSEECOM 2072 2073 /* Lock for QSEECOM SCM call executions */ 2074 static DEFINE_MUTEX(qcom_scm_qseecom_call_lock); 2075 2076 static int __qcom_scm_qseecom_call(const struct qcom_scm_desc *desc, 2077 struct qcom_scm_qseecom_resp *res) 2078 { 2079 struct qcom_scm_res scm_res = {}; 2080 int status; 2081 2082 /* 2083 * QSEECOM SCM calls should not be executed concurrently. Therefore, we 2084 * require the respective call lock to be held. 2085 */ 2086 lockdep_assert_held(&qcom_scm_qseecom_call_lock); 2087 2088 status = qcom_scm_call(__scm->dev, desc, &scm_res); 2089 2090 res->result = scm_res.result[0]; 2091 res->resp_type = scm_res.result[1]; 2092 res->data = scm_res.result[2]; 2093 2094 if (status) 2095 return status; 2096 2097 return 0; 2098 } 2099 2100 /** 2101 * qcom_scm_qseecom_call() - Perform a QSEECOM SCM call. 2102 * @desc: SCM call descriptor. 2103 * @res: SCM call response (output). 2104 * 2105 * Performs the QSEECOM SCM call described by @desc, returning the response in 2106 * @rsp. 2107 * 2108 * Return: Zero on success, nonzero on failure. 2109 */ 2110 static int qcom_scm_qseecom_call(const struct qcom_scm_desc *desc, 2111 struct qcom_scm_qseecom_resp *res) 2112 { 2113 int status; 2114 2115 /* 2116 * Note: Multiple QSEECOM SCM calls should not be executed same time, 2117 * so lock things here. This needs to be extended to callback/listener 2118 * handling when support for that is implemented. 2119 */ 2120 2121 mutex_lock(&qcom_scm_qseecom_call_lock); 2122 status = __qcom_scm_qseecom_call(desc, res); 2123 mutex_unlock(&qcom_scm_qseecom_call_lock); 2124 2125 dev_dbg(__scm->dev, "%s: owner=%x, svc=%x, cmd=%x, result=%lld, type=%llx, data=%llx\n", 2126 __func__, desc->owner, desc->svc, desc->cmd, res->result, 2127 res->resp_type, res->data); 2128 2129 if (status) { 2130 dev_err(__scm->dev, "qseecom: scm call failed with error %d\n", status); 2131 return status; 2132 } 2133 2134 /* 2135 * TODO: Handle incomplete and blocked calls: 2136 * 2137 * Incomplete and blocked calls are not supported yet. Some devices 2138 * and/or commands require those, some don't. Let's warn about them 2139 * prominently in case someone attempts to try these commands with a 2140 * device/command combination that isn't supported yet. 2141 */ 2142 WARN_ON(res->result == QSEECOM_RESULT_INCOMPLETE); 2143 WARN_ON(res->result == QSEECOM_RESULT_BLOCKED_ON_LISTENER); 2144 2145 return 0; 2146 } 2147 2148 /** 2149 * qcom_scm_qseecom_get_version() - Query the QSEECOM version. 2150 * @version: Pointer where the QSEECOM version will be stored. 2151 * 2152 * Performs the QSEECOM SCM querying the QSEECOM version currently running in 2153 * the TrustZone. 2154 * 2155 * Return: Zero on success, nonzero on failure. 2156 */ 2157 static int qcom_scm_qseecom_get_version(u32 *version) 2158 { 2159 struct qcom_scm_desc desc = {}; 2160 struct qcom_scm_qseecom_resp res = {}; 2161 u32 feature = 10; 2162 int ret; 2163 2164 desc.owner = QSEECOM_TZ_OWNER_SIP; 2165 desc.svc = QSEECOM_TZ_SVC_INFO; 2166 desc.cmd = QSEECOM_TZ_CMD_INFO_VERSION; 2167 desc.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL); 2168 desc.args[0] = feature; 2169 2170 ret = qcom_scm_qseecom_call(&desc, &res); 2171 if (ret) 2172 return ret; 2173 2174 *version = res.result; 2175 return 0; 2176 } 2177 2178 /** 2179 * qcom_scm_qseecom_app_get_id() - Query the app ID for a given QSEE app name. 2180 * @app_name: The name of the app. 2181 * @app_id: The returned app ID. 2182 * 2183 * Query and return the application ID of the SEE app identified by the given 2184 * name. This returned ID is the unique identifier of the app required for 2185 * subsequent communication. 2186 * 2187 * Return: Zero on success, nonzero on failure, -ENOENT if the app has not been 2188 * loaded or could not be found. 2189 */ 2190 int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id) 2191 { 2192 unsigned long name_buf_size = QSEECOM_MAX_APP_NAME_SIZE; 2193 unsigned long app_name_len = strlen(app_name); 2194 struct qcom_scm_desc desc = {}; 2195 struct qcom_scm_qseecom_resp res = {}; 2196 int status; 2197 2198 if (app_name_len >= name_buf_size) 2199 return -EINVAL; 2200 2201 char *name_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 2202 name_buf_size, 2203 GFP_KERNEL); 2204 if (!name_buf) 2205 return -ENOMEM; 2206 2207 memcpy(name_buf, app_name, app_name_len); 2208 2209 desc.owner = QSEECOM_TZ_OWNER_QSEE_OS; 2210 desc.svc = QSEECOM_TZ_SVC_APP_MGR; 2211 desc.cmd = QSEECOM_TZ_CMD_APP_LOOKUP; 2212 desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL); 2213 desc.args[0] = qcom_tzmem_to_phys(name_buf); 2214 desc.args[1] = app_name_len; 2215 2216 status = qcom_scm_qseecom_call(&desc, &res); 2217 2218 if (status) 2219 return status; 2220 2221 if (res.result == QSEECOM_RESULT_FAILURE) 2222 return -ENOENT; 2223 2224 if (res.result != QSEECOM_RESULT_SUCCESS) 2225 return -EINVAL; 2226 2227 if (res.resp_type != QSEECOM_SCM_RES_APP_ID) 2228 return -EINVAL; 2229 2230 *app_id = res.data; 2231 return 0; 2232 } 2233 EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id); 2234 2235 /** 2236 * qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app. 2237 * @app_id: The ID of the target app. 2238 * @req: Request buffer sent to the app (must be TZ memory) 2239 * @req_size: Size of the request buffer. 2240 * @rsp: Response buffer, written to by the app (must be TZ memory) 2241 * @rsp_size: Size of the response buffer. 2242 * 2243 * Sends a request to the QSEE app associated with the given ID and read back 2244 * its response. The caller must provide two DMA memory regions, one for the 2245 * request and one for the response, and fill out the @req region with the 2246 * respective (app-specific) request data. The QSEE app reads this and returns 2247 * its response in the @rsp region. 2248 * 2249 * Return: Zero on success, nonzero on failure. 2250 */ 2251 int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, 2252 void *rsp, size_t rsp_size) 2253 { 2254 struct qcom_scm_qseecom_resp res = {}; 2255 struct qcom_scm_desc desc = {}; 2256 phys_addr_t req_phys; 2257 phys_addr_t rsp_phys; 2258 int status; 2259 2260 req_phys = qcom_tzmem_to_phys(req); 2261 rsp_phys = qcom_tzmem_to_phys(rsp); 2262 2263 desc.owner = QSEECOM_TZ_OWNER_TZ_APPS; 2264 desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER; 2265 desc.cmd = QSEECOM_TZ_CMD_APP_SEND; 2266 desc.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, 2267 QCOM_SCM_RW, QCOM_SCM_VAL, 2268 QCOM_SCM_RW, QCOM_SCM_VAL); 2269 desc.args[0] = app_id; 2270 desc.args[1] = req_phys; 2271 desc.args[2] = req_size; 2272 desc.args[3] = rsp_phys; 2273 desc.args[4] = rsp_size; 2274 2275 status = qcom_scm_qseecom_call(&desc, &res); 2276 2277 if (status) 2278 return status; 2279 2280 if (res.result != QSEECOM_RESULT_SUCCESS) 2281 return -EIO; 2282 2283 return 0; 2284 } 2285 EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send); 2286 2287 /* 2288 * We do not yet support re-entrant calls via the qseecom interface. To prevent 2289 + any potential issues with this, only allow validated machines for now. 2290 */ 2291 static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = { 2292 { .compatible = "asus,vivobook-s15" }, 2293 { .compatible = "asus,zenbook-a14-ux3407qa" }, 2294 { .compatible = "asus,zenbook-a14-ux3407ra" }, 2295 { .compatible = "dell,inspiron-14-plus-7441" }, 2296 { .compatible = "dell,latitude-7455" }, 2297 { .compatible = "dell,xps13-9345" }, 2298 { .compatible = "hp,elitebook-ultra-g1q" }, 2299 { .compatible = "hp,omnibook-x14" }, 2300 { .compatible = "huawei,gaokun3" }, 2301 { .compatible = "lenovo,flex-5g" }, 2302 { .compatible = "lenovo,thinkbook-16" }, 2303 { .compatible = "lenovo,thinkpad-t14s" }, 2304 { .compatible = "lenovo,thinkpad-x13s", }, 2305 { .compatible = "lenovo,yoga-slim7x" }, 2306 { .compatible = "medion,sprchrgd14s1" }, 2307 { .compatible = "microsoft,arcata", }, 2308 { .compatible = "microsoft,blackrock" }, 2309 { .compatible = "microsoft,denali", }, 2310 { .compatible = "microsoft,romulus13", }, 2311 { .compatible = "microsoft,romulus15", }, 2312 { .compatible = "qcom,hamoa-iot-evk" }, 2313 { .compatible = "qcom,sc8180x-primus" }, 2314 { .compatible = "qcom,x1e001de-devkit" }, 2315 { .compatible = "qcom,x1e80100-crd" }, 2316 { .compatible = "qcom,x1e80100-qcp" }, 2317 { .compatible = "qcom,x1p42100-crd" }, 2318 { } 2319 }; 2320 2321 static void qcom_scm_qseecom_free(void *data) 2322 { 2323 struct platform_device *qseecom_dev = data; 2324 2325 platform_device_del(qseecom_dev); 2326 platform_device_put(qseecom_dev); 2327 } 2328 2329 static int qcom_scm_qseecom_init(struct qcom_scm *scm) 2330 { 2331 struct platform_device *qseecom_dev; 2332 u32 version; 2333 int ret; 2334 2335 /* 2336 * Note: We do two steps of validation here: First, we try to query the 2337 * QSEECOM version as a check to see if the interface exists on this 2338 * device. Second, we check against known good devices due to current 2339 * driver limitations (see comment in qcom_scm_qseecom_allowlist). 2340 * 2341 * Note that we deliberately do the machine check after the version 2342 * check so that we can log potentially supported devices. This should 2343 * be safe as downstream sources indicate that the version query is 2344 * neither blocking nor reentrant. 2345 */ 2346 ret = qcom_scm_qseecom_get_version(&version); 2347 if (ret) 2348 return 0; 2349 2350 dev_info(scm->dev, "qseecom: found qseecom with version 0x%x\n", version); 2351 2352 if (!of_machine_device_match(qcom_scm_qseecom_allowlist)) { 2353 dev_info(scm->dev, "qseecom: untested machine, skipping\n"); 2354 return 0; 2355 } 2356 2357 /* 2358 * Set up QSEECOM interface device. All application clients will be 2359 * set up and managed by the corresponding driver for it. 2360 */ 2361 qseecom_dev = platform_device_alloc("qcom_qseecom", -1); 2362 if (!qseecom_dev) 2363 return -ENOMEM; 2364 2365 qseecom_dev->dev.parent = scm->dev; 2366 2367 ret = platform_device_add(qseecom_dev); 2368 if (ret) { 2369 platform_device_put(qseecom_dev); 2370 return ret; 2371 } 2372 2373 return devm_add_action_or_reset(scm->dev, qcom_scm_qseecom_free, qseecom_dev); 2374 } 2375 2376 #else /* CONFIG_QCOM_QSEECOM */ 2377 2378 static int qcom_scm_qseecom_init(struct qcom_scm *scm) 2379 { 2380 return 0; 2381 } 2382 2383 #endif /* CONFIG_QCOM_QSEECOM */ 2384 2385 /** 2386 * qcom_scm_qtee_invoke_smc() - Invoke a QTEE object. 2387 * @inbuf: start address of memory area used for inbound buffer. 2388 * @inbuf_size: size of the memory area used for inbound buffer. 2389 * @outbuf: start address of memory area used for outbound buffer. 2390 * @outbuf_size: size of the memory area used for outbound buffer. 2391 * @result: result of QTEE object invocation. 2392 * @response_type: response type returned by QTEE. 2393 * 2394 * @response_type determines how the contents of @inbuf and @outbuf 2395 * should be processed. 2396 * 2397 * Return: On success, return 0 or <0 on failure. 2398 */ 2399 int qcom_scm_qtee_invoke_smc(phys_addr_t inbuf, size_t inbuf_size, 2400 phys_addr_t outbuf, size_t outbuf_size, 2401 u64 *result, u64 *response_type) 2402 { 2403 struct qcom_scm_desc desc = { 2404 .svc = QCOM_SCM_SVC_SMCINVOKE, 2405 .cmd = QCOM_SCM_SMCINVOKE_INVOKE, 2406 .owner = ARM_SMCCC_OWNER_TRUSTED_OS, 2407 .args[0] = inbuf, 2408 .args[1] = inbuf_size, 2409 .args[2] = outbuf, 2410 .args[3] = outbuf_size, 2411 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RW, QCOM_SCM_VAL, 2412 QCOM_SCM_RW, QCOM_SCM_VAL), 2413 }; 2414 struct qcom_scm_res res; 2415 int ret; 2416 2417 ret = qcom_scm_call(__scm->dev, &desc, &res); 2418 if (ret) 2419 return ret; 2420 2421 if (response_type) 2422 *response_type = res.result[0]; 2423 2424 if (result) 2425 *result = res.result[1]; 2426 2427 return 0; 2428 } 2429 EXPORT_SYMBOL(qcom_scm_qtee_invoke_smc); 2430 2431 /** 2432 * qcom_scm_qtee_callback_response() - Submit response for callback request. 2433 * @buf: start address of memory area used for outbound buffer. 2434 * @buf_size: size of the memory area used for outbound buffer. 2435 * @result: Result of QTEE object invocation. 2436 * @response_type: Response type returned by QTEE. 2437 * 2438 * @response_type determines how the contents of @buf should be processed. 2439 * 2440 * Return: On success, return 0 or <0 on failure. 2441 */ 2442 int qcom_scm_qtee_callback_response(phys_addr_t buf, size_t buf_size, 2443 u64 *result, u64 *response_type) 2444 { 2445 struct qcom_scm_desc desc = { 2446 .svc = QCOM_SCM_SVC_SMCINVOKE, 2447 .cmd = QCOM_SCM_SMCINVOKE_CB_RSP, 2448 .owner = ARM_SMCCC_OWNER_TRUSTED_OS, 2449 .args[0] = buf, 2450 .args[1] = buf_size, 2451 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL), 2452 }; 2453 struct qcom_scm_res res; 2454 int ret; 2455 2456 ret = qcom_scm_call(__scm->dev, &desc, &res); 2457 if (ret) 2458 return ret; 2459 2460 if (response_type) 2461 *response_type = res.result[0]; 2462 2463 if (result) 2464 *result = res.result[1]; 2465 2466 return 0; 2467 } 2468 EXPORT_SYMBOL(qcom_scm_qtee_callback_response); 2469 2470 static void qcom_scm_qtee_free(void *data) 2471 { 2472 struct platform_device *qtee_dev = data; 2473 2474 platform_device_unregister(qtee_dev); 2475 } 2476 2477 static void qcom_scm_qtee_init(struct qcom_scm *scm) 2478 { 2479 struct platform_device *qtee_dev; 2480 u64 result, response_type; 2481 int ret; 2482 2483 /* 2484 * Probe for smcinvoke support. This will fail due to invalid buffers, 2485 * but first, it checks whether the call is supported in QTEE syscall 2486 * handler. If it is not supported, -EIO is returned. 2487 */ 2488 ret = qcom_scm_qtee_invoke_smc(0, 0, 0, 0, &result, &response_type); 2489 if (ret == -EIO) 2490 return; 2491 2492 /* Setup QTEE interface device. */ 2493 qtee_dev = platform_device_register_data(scm->dev, "qcomtee", 2494 PLATFORM_DEVID_NONE, NULL, 0); 2495 if (IS_ERR(qtee_dev)) 2496 return; 2497 2498 devm_add_action_or_reset(scm->dev, qcom_scm_qtee_free, qtee_dev); 2499 } 2500 2501 /** 2502 * qcom_scm_is_available() - Checks if SCM is available 2503 */ 2504 bool qcom_scm_is_available(void) 2505 { 2506 /* Paired with smp_store_release() in qcom_scm_probe */ 2507 return !!smp_load_acquire(&__scm); 2508 } 2509 EXPORT_SYMBOL_GPL(qcom_scm_is_available); 2510 2511 static int qcom_scm_fill_irq_fwspec_params(struct irq_fwspec *fwspec, u32 hwirq) 2512 { 2513 if (hwirq >= GIC_SPI_BASE && hwirq <= GIC_MAX_SPI) { 2514 fwspec->param[0] = GIC_SPI; 2515 fwspec->param[1] = hwirq - GIC_SPI_BASE; 2516 } else if (hwirq >= GIC_ESPI_BASE && hwirq <= GIC_MAX_ESPI) { 2517 fwspec->param[0] = GIC_ESPI; 2518 fwspec->param[1] = hwirq - GIC_ESPI_BASE; 2519 } else { 2520 WARN(1, "Unexpected hwirq: %d\n", hwirq); 2521 return -ENXIO; 2522 } 2523 2524 fwspec->param[2] = IRQ_TYPE_EDGE_RISING; 2525 fwspec->param_count = 3; 2526 2527 return 0; 2528 } 2529 2530 static int qcom_scm_query_waitq_count(struct qcom_scm *scm) 2531 { 2532 struct qcom_scm_desc desc = { 2533 .svc = QCOM_SCM_SVC_WAITQ, 2534 .cmd = QCOM_SCM_WAITQ_GET_INFO, 2535 .owner = ARM_SMCCC_OWNER_SIP 2536 }; 2537 struct qcom_scm_res res; 2538 int ret; 2539 2540 ret = qcom_scm_call_atomic(scm->dev, &desc, &res); 2541 if (ret) 2542 return ret; 2543 2544 return res.result[0] & GENMASK(7, 0); 2545 } 2546 2547 static int qcom_scm_get_waitq_irq(struct qcom_scm *scm) 2548 { 2549 struct qcom_scm_desc desc = { 2550 .svc = QCOM_SCM_SVC_WAITQ, 2551 .cmd = QCOM_SCM_WAITQ_GET_INFO, 2552 .owner = ARM_SMCCC_OWNER_SIP 2553 }; 2554 struct device_node *parent_irq_node; 2555 struct irq_fwspec fwspec; 2556 struct qcom_scm_res res; 2557 u32 hwirq; 2558 int ret; 2559 2560 ret = qcom_scm_call_atomic(scm->dev, &desc, &res); 2561 if (ret) 2562 return ret; 2563 2564 hwirq = res.result[1] & GENMASK(15, 0); 2565 ret = qcom_scm_fill_irq_fwspec_params(&fwspec, hwirq); 2566 if (ret) 2567 return ret; 2568 2569 parent_irq_node = of_irq_find_parent(scm->dev->of_node); 2570 if (!parent_irq_node) 2571 return -ENODEV; 2572 2573 fwspec.fwnode = of_fwnode_handle(parent_irq_node); 2574 2575 return irq_create_fwspec_mapping(&fwspec); 2576 } 2577 2578 static struct completion *qcom_scm_get_completion(u32 wq_ctx) 2579 { 2580 struct completion *wq; 2581 2582 if (WARN_ON_ONCE(wq_ctx >= __scm->wq_cnt)) 2583 return ERR_PTR(-EINVAL); 2584 2585 wq = &__scm->waitq_comps[wq_ctx]; 2586 2587 return wq; 2588 } 2589 2590 int qcom_scm_wait_for_wq_completion(u32 wq_ctx) 2591 { 2592 struct completion *wq; 2593 2594 wq = qcom_scm_get_completion(wq_ctx); 2595 if (IS_ERR(wq)) 2596 return PTR_ERR(wq); 2597 2598 wait_for_completion_state(wq, TASK_IDLE); 2599 2600 return 0; 2601 } 2602 2603 static int qcom_scm_waitq_wakeup(unsigned int wq_ctx) 2604 { 2605 struct completion *wq; 2606 2607 wq = qcom_scm_get_completion(wq_ctx); 2608 if (IS_ERR(wq)) 2609 return PTR_ERR(wq); 2610 2611 complete(wq); 2612 2613 return 0; 2614 } 2615 2616 static irqreturn_t qcom_scm_irq_handler(int irq, void *data) 2617 { 2618 int ret; 2619 struct qcom_scm *scm = data; 2620 u32 wq_ctx, flags, more_pending = 0; 2621 2622 do { 2623 ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending); 2624 if (ret) { 2625 dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret); 2626 goto out; 2627 } 2628 2629 if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE) { 2630 dev_err(scm->dev, "Invalid flags received for wq_ctx: %u\n", flags); 2631 goto out; 2632 } 2633 2634 ret = qcom_scm_waitq_wakeup(wq_ctx); 2635 if (ret) 2636 goto out; 2637 } while (more_pending); 2638 2639 out: 2640 return IRQ_HANDLED; 2641 } 2642 2643 static int get_download_mode(char *buffer, const struct kernel_param *kp) 2644 { 2645 if (download_mode >= ARRAY_SIZE(download_mode_name)) 2646 return sysfs_emit(buffer, "unknown mode\n"); 2647 2648 return sysfs_emit(buffer, "%s\n", download_mode_name[download_mode]); 2649 } 2650 2651 static int set_download_mode(const char *val, const struct kernel_param *kp) 2652 { 2653 bool tmp; 2654 int ret; 2655 2656 ret = sysfs_match_string(download_mode_name, val); 2657 if (ret < 0) { 2658 ret = kstrtobool(val, &tmp); 2659 if (ret < 0) { 2660 pr_err("qcom_scm: err: %d\n", ret); 2661 return ret; 2662 } 2663 2664 ret = tmp ? 1 : 0; 2665 } 2666 2667 download_mode = ret; 2668 if (__scm) 2669 qcom_scm_set_download_mode(download_mode); 2670 2671 return 0; 2672 } 2673 2674 static const struct kernel_param_ops download_mode_param_ops = { 2675 .get = get_download_mode, 2676 .set = set_download_mode, 2677 }; 2678 2679 module_param_cb(download_mode, &download_mode_param_ops, NULL, 0644); 2680 MODULE_PARM_DESC(download_mode, "download mode: off/0/N for no dump mode, full/on/1/Y for full dump mode, mini for minidump mode and full,mini for both full and minidump mode together are acceptable values"); 2681 2682 static int qcom_scm_probe(struct platform_device *pdev) 2683 { 2684 struct qcom_tzmem_pool_config pool_config; 2685 struct qcom_scm *scm; 2686 int irq, ret; 2687 int i; 2688 2689 scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL); 2690 if (!scm) 2691 return -ENOMEM; 2692 2693 scm->dev = &pdev->dev; 2694 ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr); 2695 if (ret < 0) 2696 return ret; 2697 2698 mutex_init(&scm->scm_bw_lock); 2699 2700 scm->path = devm_of_icc_get(&pdev->dev, NULL); 2701 if (IS_ERR(scm->path)) 2702 return dev_err_probe(&pdev->dev, PTR_ERR(scm->path), 2703 "failed to acquire interconnect path\n"); 2704 2705 scm->core_clk = devm_clk_get_optional(&pdev->dev, "core"); 2706 if (IS_ERR(scm->core_clk)) 2707 return PTR_ERR(scm->core_clk); 2708 2709 scm->iface_clk = devm_clk_get_optional(&pdev->dev, "iface"); 2710 if (IS_ERR(scm->iface_clk)) 2711 return PTR_ERR(scm->iface_clk); 2712 2713 scm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus"); 2714 if (IS_ERR(scm->bus_clk)) 2715 return PTR_ERR(scm->bus_clk); 2716 2717 scm->reset.ops = &qcom_scm_pas_reset_ops; 2718 scm->reset.nr_resets = 1; 2719 scm->reset.of_node = pdev->dev.of_node; 2720 ret = devm_reset_controller_register(&pdev->dev, &scm->reset); 2721 if (ret) 2722 return ret; 2723 2724 /* vote for max clk rate for highest performance */ 2725 ret = clk_set_rate(scm->core_clk, INT_MAX); 2726 if (ret) 2727 return ret; 2728 2729 ret = of_reserved_mem_device_init(scm->dev); 2730 if (ret && ret != -ENODEV) 2731 return dev_err_probe(scm->dev, ret, 2732 "Failed to setup the reserved memory region for TZ mem\n"); 2733 2734 ret = qcom_tzmem_enable(scm->dev); 2735 if (ret) 2736 return dev_err_probe(scm->dev, ret, 2737 "Failed to enable the TrustZone memory allocator\n"); 2738 2739 memset(&pool_config, 0, sizeof(pool_config)); 2740 pool_config.initial_size = 0; 2741 pool_config.policy = QCOM_TZMEM_POLICY_ON_DEMAND; 2742 pool_config.max_size = SZ_256K; 2743 2744 scm->mempool = devm_qcom_tzmem_pool_new(scm->dev, &pool_config); 2745 if (IS_ERR(scm->mempool)) 2746 return dev_err_probe(scm->dev, PTR_ERR(scm->mempool), 2747 "Failed to create the SCM memory pool\n"); 2748 2749 ret = qcom_scm_query_waitq_count(scm); 2750 scm->wq_cnt = ret < 0 ? QCOM_SCM_DEFAULT_WAITQ_COUNT : ret; 2751 scm->waitq_comps = devm_kcalloc(&pdev->dev, scm->wq_cnt, sizeof(*scm->waitq_comps), 2752 GFP_KERNEL); 2753 if (!scm->waitq_comps) 2754 return -ENOMEM; 2755 2756 for (i = 0; i < scm->wq_cnt; i++) 2757 init_completion(&scm->waitq_comps[i]); 2758 2759 irq = qcom_scm_get_waitq_irq(scm); 2760 if (irq < 0) 2761 irq = platform_get_irq_optional(pdev, 0); 2762 2763 if (irq < 0) { 2764 if (irq != -ENXIO) 2765 return irq; 2766 } else { 2767 ret = devm_request_threaded_irq(scm->dev, irq, NULL, qcom_scm_irq_handler, 2768 IRQF_ONESHOT, "qcom-scm", scm); 2769 if (ret < 0) 2770 return dev_err_probe(scm->dev, ret, 2771 "Failed to request qcom-scm irq\n"); 2772 } 2773 2774 /* 2775 * Paired with smp_load_acquire() in qcom_scm_is_available(). 2776 * 2777 * This marks the SCM API as ready to accept user calls and can only 2778 * be called after the TrustZone memory pool is initialized and the 2779 * waitqueue interrupt requested. 2780 */ 2781 smp_store_release(&__scm, scm); 2782 2783 __get_convention(); 2784 2785 /* 2786 * If "download mode" is requested, from this point on warmboot 2787 * will cause the boot stages to enter download mode, unless 2788 * disabled below by a clean shutdown/reboot. 2789 */ 2790 qcom_scm_set_download_mode(download_mode); 2791 2792 /* 2793 * Disable SDI if indicated by DT that it is enabled by default. 2794 */ 2795 if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled") || !download_mode) 2796 qcom_scm_disable_sdi(); 2797 2798 /* 2799 * Initialize the QSEECOM interface. 2800 * 2801 * Note: QSEECOM is fairly self-contained and this only adds the 2802 * interface device (the driver of which does most of the heavy 2803 * lifting). So any errors returned here should be either -ENOMEM or 2804 * -EINVAL (with the latter only in case there's a bug in our code). 2805 * This means that there is no need to bring down the whole SCM driver. 2806 * Just log the error instead and let SCM live. 2807 */ 2808 ret = qcom_scm_qseecom_init(scm); 2809 WARN(ret < 0, "failed to initialize qseecom: %d\n", ret); 2810 2811 /* Initialize the QTEE object interface. */ 2812 qcom_scm_qtee_init(scm); 2813 2814 return 0; 2815 } 2816 2817 static void qcom_scm_shutdown(struct platform_device *pdev) 2818 { 2819 /* Clean shutdown, disable download mode to allow normal restart */ 2820 qcom_scm_set_download_mode(QCOM_DLOAD_NODUMP); 2821 } 2822 2823 static const struct of_device_id qcom_scm_dt_match[] = { 2824 { .compatible = "qcom,scm" }, 2825 2826 /* Legacy entries kept for backwards compatibility */ 2827 { .compatible = "qcom,scm-apq8064" }, 2828 { .compatible = "qcom,scm-apq8084" }, 2829 { .compatible = "qcom,scm-ipq4019" }, 2830 { .compatible = "qcom,scm-msm8953" }, 2831 { .compatible = "qcom,scm-msm8974" }, 2832 { .compatible = "qcom,scm-msm8996" }, 2833 {} 2834 }; 2835 MODULE_DEVICE_TABLE(of, qcom_scm_dt_match); 2836 2837 static struct platform_driver qcom_scm_driver = { 2838 .driver = { 2839 .name = "qcom_scm", 2840 .of_match_table = qcom_scm_dt_match, 2841 .suppress_bind_attrs = true, 2842 }, 2843 .probe = qcom_scm_probe, 2844 .shutdown = qcom_scm_shutdown, 2845 }; 2846 2847 static int __init qcom_scm_init(void) 2848 { 2849 return platform_driver_register(&qcom_scm_driver); 2850 } 2851 subsys_initcall(qcom_scm_init); 2852 2853 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver"); 2854 MODULE_LICENSE("GPL v2"); 2855