1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved. 3 * Copyright (C) 2015 Linaro Ltd. 4 */ 5 6 #include <linux/arm-smccc.h> 7 #include <linux/bitfield.h> 8 #include <linux/bits.h> 9 #include <linux/cleanup.h> 10 #include <linux/clk.h> 11 #include <linux/completion.h> 12 #include <linux/cpumask.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/err.h> 15 #include <linux/export.h> 16 #include <linux/firmware/qcom/qcom_scm.h> 17 #include <linux/firmware/qcom/qcom_tzmem.h> 18 #include <linux/init.h> 19 #include <linux/interconnect.h> 20 #include <linux/interrupt.h> 21 #include <linux/kstrtox.h> 22 #include <linux/module.h> 23 #include <linux/of.h> 24 #include <linux/of_address.h> 25 #include <linux/of_irq.h> 26 #include <linux/of_platform.h> 27 #include <linux/of_reserved_mem.h> 28 #include <linux/platform_device.h> 29 #include <linux/reset-controller.h> 30 #include <linux/remoteproc.h> 31 #include <linux/sizes.h> 32 #include <linux/types.h> 33 34 #include <dt-bindings/interrupt-controller/arm-gic.h> 35 36 #include "qcom_scm.h" 37 #include "qcom_tzmem.h" 38 39 static u32 download_mode; 40 41 #define GIC_SPI_BASE 32 42 #define GIC_MAX_SPI 1019 // SPIs in GICv3 spec range from 32..1019 43 #define GIC_ESPI_BASE 4096 44 #define GIC_MAX_ESPI 5119 // ESPIs in GICv3 spec range from 4096..5119 45 46 struct qcom_scm { 47 struct device *dev; 48 struct clk *core_clk; 49 struct clk *iface_clk; 50 struct clk *bus_clk; 51 struct icc_path *path; 52 struct completion *waitq_comps; 53 struct reset_controller_dev reset; 54 55 /* control access to the interconnect path */ 56 struct mutex scm_bw_lock; 57 int scm_vote_count; 58 59 u64 dload_mode_addr; 60 61 struct qcom_tzmem_pool *mempool; 62 unsigned int wq_cnt; 63 }; 64 65 struct qcom_scm_current_perm_info { 66 __le32 vmid; 67 __le32 perm; 68 __le64 ctx; 69 __le32 ctx_size; 70 __le32 unused; 71 }; 72 73 struct qcom_scm_mem_map_info { 74 __le64 mem_addr; 75 __le64 mem_size; 76 }; 77 78 /** 79 * struct qcom_scm_qseecom_resp - QSEECOM SCM call response. 80 * @result: Result or status of the SCM call. See &enum qcom_scm_qseecom_result. 81 * @resp_type: Type of the response. See &enum qcom_scm_qseecom_resp_type. 82 * @data: Response data. The type of this data is given in @resp_type. 83 */ 84 struct qcom_scm_qseecom_resp { 85 u64 result; 86 u64 resp_type; 87 u64 data; 88 }; 89 90 enum qcom_scm_qseecom_result { 91 QSEECOM_RESULT_SUCCESS = 0, 92 QSEECOM_RESULT_INCOMPLETE = 1, 93 QSEECOM_RESULT_BLOCKED_ON_LISTENER = 2, 94 QSEECOM_RESULT_FAILURE = 0xFFFFFFFF, 95 }; 96 97 enum qcom_scm_qseecom_resp_type { 98 QSEECOM_SCM_RES_APP_ID = 0xEE01, 99 QSEECOM_SCM_RES_QSEOS_LISTENER_ID = 0xEE02, 100 }; 101 102 enum qcom_scm_qseecom_tz_owner { 103 QSEECOM_TZ_OWNER_SIP = 2, 104 QSEECOM_TZ_OWNER_TZ_APPS = 48, 105 QSEECOM_TZ_OWNER_QSEE_OS = 50 106 }; 107 108 enum qcom_scm_qseecom_tz_svc { 109 QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER = 0, 110 QSEECOM_TZ_SVC_APP_MGR = 1, 111 QSEECOM_TZ_SVC_INFO = 6, 112 }; 113 114 enum qcom_scm_qseecom_tz_cmd_app { 115 QSEECOM_TZ_CMD_APP_SEND = 1, 116 QSEECOM_TZ_CMD_APP_LOOKUP = 3, 117 }; 118 119 enum qcom_scm_qseecom_tz_cmd_info { 120 QSEECOM_TZ_CMD_INFO_VERSION = 3, 121 }; 122 123 #define RSCTABLE_BUFFER_NOT_SUFFICIENT 20 124 125 #define QSEECOM_MAX_APP_NAME_SIZE 64 126 #define SHMBRIDGE_RESULT_NOTSUPP 4 127 128 /* Each bit configures cold/warm boot address for one of the 4 CPUs */ 129 static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = { 130 0, BIT(0), BIT(3), BIT(5) 131 }; 132 static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = { 133 BIT(2), BIT(1), BIT(4), BIT(6) 134 }; 135 136 #define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0) 137 138 #define QCOM_DLOAD_MASK GENMASK(5, 4) 139 #define QCOM_DLOAD_NODUMP 0 140 #define QCOM_DLOAD_FULLDUMP 1 141 #define QCOM_DLOAD_MINIDUMP 2 142 #define QCOM_DLOAD_BOTHDUMP 3 143 144 #define QCOM_SCM_DEFAULT_WAITQ_COUNT 1 145 146 static const char * const qcom_scm_convention_names[] = { 147 [SMC_CONVENTION_UNKNOWN] = "unknown", 148 [SMC_CONVENTION_ARM_32] = "smc arm 32", 149 [SMC_CONVENTION_ARM_64] = "smc arm 64", 150 [SMC_CONVENTION_LEGACY] = "smc legacy", 151 }; 152 153 static const char * const download_mode_name[] = { 154 [QCOM_DLOAD_NODUMP] = "off", 155 [QCOM_DLOAD_FULLDUMP] = "full", 156 [QCOM_DLOAD_MINIDUMP] = "mini", 157 [QCOM_DLOAD_BOTHDUMP] = "full,mini", 158 }; 159 160 static struct qcom_scm *__scm; 161 162 static int qcom_scm_clk_enable(void) 163 { 164 int ret; 165 166 ret = clk_prepare_enable(__scm->core_clk); 167 if (ret) 168 goto bail; 169 170 ret = clk_prepare_enable(__scm->iface_clk); 171 if (ret) 172 goto disable_core; 173 174 ret = clk_prepare_enable(__scm->bus_clk); 175 if (ret) 176 goto disable_iface; 177 178 return 0; 179 180 disable_iface: 181 clk_disable_unprepare(__scm->iface_clk); 182 disable_core: 183 clk_disable_unprepare(__scm->core_clk); 184 bail: 185 return ret; 186 } 187 188 static void qcom_scm_clk_disable(void) 189 { 190 clk_disable_unprepare(__scm->core_clk); 191 clk_disable_unprepare(__scm->iface_clk); 192 clk_disable_unprepare(__scm->bus_clk); 193 } 194 195 static int qcom_scm_bw_enable(void) 196 { 197 int ret = 0; 198 199 if (!__scm->path) 200 return 0; 201 202 guard(mutex)(&__scm->scm_bw_lock); 203 204 if (!__scm->scm_vote_count) { 205 ret = icc_set_bw(__scm->path, 0, UINT_MAX); 206 if (ret < 0) { 207 dev_err(__scm->dev, "failed to set bandwidth request\n"); 208 return ret; 209 } 210 } 211 __scm->scm_vote_count++; 212 213 return 0; 214 } 215 216 static void qcom_scm_bw_disable(void) 217 { 218 if (!__scm->path) 219 return; 220 221 mutex_lock(&__scm->scm_bw_lock); 222 if (__scm->scm_vote_count-- == 1) 223 icc_set_bw(__scm->path, 0, 0); 224 mutex_unlock(&__scm->scm_bw_lock); 225 } 226 227 enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN; 228 static DEFINE_SPINLOCK(scm_query_lock); 229 230 struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void) 231 { 232 if (!qcom_scm_is_available()) 233 return NULL; 234 235 return __scm->mempool; 236 } 237 238 static enum qcom_scm_convention __get_convention(void) 239 { 240 unsigned long flags; 241 struct qcom_scm_desc desc = { 242 .svc = QCOM_SCM_SVC_INFO, 243 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, 244 .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO, 245 QCOM_SCM_INFO_IS_CALL_AVAIL) | 246 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT), 247 .arginfo = QCOM_SCM_ARGS(1), 248 .owner = ARM_SMCCC_OWNER_SIP, 249 }; 250 struct qcom_scm_res res; 251 enum qcom_scm_convention probed_convention; 252 int ret; 253 bool forced = false; 254 255 if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN)) 256 return qcom_scm_convention; 257 258 /* 259 * Per the "SMC calling convention specification", the 64-bit calling 260 * convention can only be used when the client is 64-bit, otherwise 261 * system will encounter the undefined behaviour. 262 */ 263 #if IS_ENABLED(CONFIG_ARM64) 264 /* 265 * Device isn't required as there is only one argument - no device 266 * needed to dma_map_single to secure world 267 */ 268 probed_convention = SMC_CONVENTION_ARM_64; 269 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); 270 if (!ret && res.result[0] == 1) 271 goto found; 272 273 /* 274 * Some SC7180 firmwares didn't implement the 275 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64 276 * calling conventions on these firmwares. Luckily we don't make any 277 * early calls into the firmware on these SoCs so the device pointer 278 * will be valid here to check if the compatible matches. 279 */ 280 if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) { 281 forced = true; 282 goto found; 283 } 284 #endif 285 286 probed_convention = SMC_CONVENTION_ARM_32; 287 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); 288 if (!ret && res.result[0] == 1) 289 goto found; 290 291 probed_convention = SMC_CONVENTION_LEGACY; 292 found: 293 spin_lock_irqsave(&scm_query_lock, flags); 294 if (probed_convention != qcom_scm_convention) { 295 qcom_scm_convention = probed_convention; 296 pr_info("qcom_scm: convention: %s%s\n", 297 qcom_scm_convention_names[qcom_scm_convention], 298 forced ? " (forced)" : ""); 299 } 300 spin_unlock_irqrestore(&scm_query_lock, flags); 301 302 return qcom_scm_convention; 303 } 304 305 /** 306 * qcom_scm_call() - Invoke a syscall in the secure world 307 * @dev: device 308 * @desc: Descriptor structure containing arguments and return values 309 * @res: Structure containing results from SMC/HVC call 310 * 311 * Sends a command to the SCM and waits for the command to finish processing. 312 * This should *only* be called in pre-emptible context. 313 */ 314 static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc, 315 struct qcom_scm_res *res) 316 { 317 might_sleep(); 318 switch (__get_convention()) { 319 case SMC_CONVENTION_ARM_32: 320 case SMC_CONVENTION_ARM_64: 321 return scm_smc_call(dev, desc, res, false); 322 case SMC_CONVENTION_LEGACY: 323 return scm_legacy_call(dev, desc, res); 324 default: 325 pr_err("Unknown current SCM calling convention.\n"); 326 return -EINVAL; 327 } 328 } 329 330 /** 331 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call() 332 * @dev: device 333 * @desc: Descriptor structure containing arguments and return values 334 * @res: Structure containing results from SMC/HVC call 335 * 336 * Sends a command to the SCM and waits for the command to finish processing. 337 * This can be called in atomic context. 338 */ 339 static int qcom_scm_call_atomic(struct device *dev, 340 const struct qcom_scm_desc *desc, 341 struct qcom_scm_res *res) 342 { 343 switch (__get_convention()) { 344 case SMC_CONVENTION_ARM_32: 345 case SMC_CONVENTION_ARM_64: 346 return scm_smc_call(dev, desc, res, true); 347 case SMC_CONVENTION_LEGACY: 348 return scm_legacy_call_atomic(dev, desc, res); 349 default: 350 pr_err("Unknown current SCM calling convention.\n"); 351 return -EINVAL; 352 } 353 } 354 355 static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id, 356 u32 cmd_id) 357 { 358 int ret; 359 struct qcom_scm_desc desc = { 360 .svc = QCOM_SCM_SVC_INFO, 361 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, 362 .owner = ARM_SMCCC_OWNER_SIP, 363 }; 364 struct qcom_scm_res res; 365 366 desc.arginfo = QCOM_SCM_ARGS(1); 367 switch (__get_convention()) { 368 case SMC_CONVENTION_ARM_32: 369 case SMC_CONVENTION_ARM_64: 370 desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) | 371 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT); 372 break; 373 case SMC_CONVENTION_LEGACY: 374 desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id); 375 break; 376 default: 377 pr_err("Unknown SMC convention being used\n"); 378 return false; 379 } 380 381 ret = qcom_scm_call(dev, &desc, &res); 382 383 return ret ? false : !!res.result[0]; 384 } 385 386 static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits) 387 { 388 int cpu; 389 unsigned int flags = 0; 390 struct qcom_scm_desc desc = { 391 .svc = QCOM_SCM_SVC_BOOT, 392 .cmd = QCOM_SCM_BOOT_SET_ADDR, 393 .arginfo = QCOM_SCM_ARGS(2), 394 .owner = ARM_SMCCC_OWNER_SIP, 395 }; 396 397 for_each_present_cpu(cpu) { 398 if (cpu >= QCOM_SCM_BOOT_MAX_CPUS) 399 return -EINVAL; 400 flags |= cpu_bits[cpu]; 401 } 402 403 desc.args[0] = flags; 404 desc.args[1] = virt_to_phys(entry); 405 406 return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); 407 } 408 409 static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags) 410 { 411 struct qcom_scm_desc desc = { 412 .svc = QCOM_SCM_SVC_BOOT, 413 .cmd = QCOM_SCM_BOOT_SET_ADDR_MC, 414 .owner = ARM_SMCCC_OWNER_SIP, 415 .arginfo = QCOM_SCM_ARGS(6), 416 .args = { 417 virt_to_phys(entry), 418 /* Apply to all CPUs in all affinity levels */ 419 ~0ULL, ~0ULL, ~0ULL, ~0ULL, 420 flags, 421 }, 422 }; 423 424 /* Need a device for DMA of the additional arguments */ 425 if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY) 426 return -EOPNOTSUPP; 427 428 return qcom_scm_call(__scm->dev, &desc, NULL); 429 } 430 431 /** 432 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus 433 * @entry: Entry point function for the cpus 434 * 435 * Set the Linux entry point for the SCM to transfer control to when coming 436 * out of a power down. CPU power down may be executed on cpuidle or hotplug. 437 */ 438 int qcom_scm_set_warm_boot_addr(void *entry) 439 { 440 if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT)) 441 /* Fallback to old SCM call */ 442 return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits); 443 return 0; 444 } 445 EXPORT_SYMBOL_GPL(qcom_scm_set_warm_boot_addr); 446 447 /** 448 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus 449 * @entry: Entry point function for the cpus 450 */ 451 int qcom_scm_set_cold_boot_addr(void *entry) 452 { 453 if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT)) 454 /* Fallback to old SCM call */ 455 return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits); 456 return 0; 457 } 458 EXPORT_SYMBOL_GPL(qcom_scm_set_cold_boot_addr); 459 460 /** 461 * qcom_scm_cpu_power_down() - Power down the cpu 462 * @flags: Flags to flush cache 463 * 464 * This is an end point to power down cpu. If there was a pending interrupt, 465 * the control would return from this function, otherwise, the cpu jumps to the 466 * warm boot entry point set for this cpu upon reset. 467 */ 468 void qcom_scm_cpu_power_down(u32 flags) 469 { 470 struct qcom_scm_desc desc = { 471 .svc = QCOM_SCM_SVC_BOOT, 472 .cmd = QCOM_SCM_BOOT_TERMINATE_PC, 473 .args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK, 474 .arginfo = QCOM_SCM_ARGS(1), 475 .owner = ARM_SMCCC_OWNER_SIP, 476 }; 477 478 qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); 479 } 480 EXPORT_SYMBOL_GPL(qcom_scm_cpu_power_down); 481 482 int qcom_scm_set_remote_state(u32 state, u32 id) 483 { 484 struct qcom_scm_desc desc = { 485 .svc = QCOM_SCM_SVC_BOOT, 486 .cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE, 487 .arginfo = QCOM_SCM_ARGS(2), 488 .args[0] = state, 489 .args[1] = id, 490 .owner = ARM_SMCCC_OWNER_SIP, 491 }; 492 struct qcom_scm_res res; 493 int ret; 494 495 ret = qcom_scm_call(__scm->dev, &desc, &res); 496 497 return ret ? : res.result[0]; 498 } 499 EXPORT_SYMBOL_GPL(qcom_scm_set_remote_state); 500 501 static int qcom_scm_disable_sdi(void) 502 { 503 int ret; 504 struct qcom_scm_desc desc = { 505 .svc = QCOM_SCM_SVC_BOOT, 506 .cmd = QCOM_SCM_BOOT_SDI_CONFIG, 507 .args[0] = 1, /* Disable watchdog debug */ 508 .args[1] = 0, /* Disable SDI */ 509 .arginfo = QCOM_SCM_ARGS(2), 510 .owner = ARM_SMCCC_OWNER_SIP, 511 }; 512 struct qcom_scm_res res; 513 514 ret = qcom_scm_clk_enable(); 515 if (ret) 516 return ret; 517 ret = qcom_scm_call(__scm->dev, &desc, &res); 518 519 qcom_scm_clk_disable(); 520 521 return ret ? : res.result[0]; 522 } 523 524 static int __qcom_scm_set_dload_mode(struct device *dev, bool enable) 525 { 526 struct qcom_scm_desc desc = { 527 .svc = QCOM_SCM_SVC_BOOT, 528 .cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE, 529 .arginfo = QCOM_SCM_ARGS(2), 530 .args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE, 531 .owner = ARM_SMCCC_OWNER_SIP, 532 }; 533 534 desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0; 535 536 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 537 } 538 539 static int qcom_scm_io_rmw(phys_addr_t addr, unsigned int mask, unsigned int val) 540 { 541 unsigned int old; 542 unsigned int new; 543 int ret; 544 545 ret = qcom_scm_io_readl(addr, &old); 546 if (ret) 547 return ret; 548 549 new = (old & ~mask) | (val & mask); 550 551 return qcom_scm_io_writel(addr, new); 552 } 553 554 static void qcom_scm_set_download_mode(u32 dload_mode) 555 { 556 int ret = 0; 557 558 if (__scm->dload_mode_addr) { 559 ret = qcom_scm_io_rmw(__scm->dload_mode_addr, QCOM_DLOAD_MASK, 560 FIELD_PREP(QCOM_DLOAD_MASK, dload_mode)); 561 } else if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT, 562 QCOM_SCM_BOOT_SET_DLOAD_MODE)) { 563 ret = __qcom_scm_set_dload_mode(__scm->dev, !!dload_mode); 564 } else if (dload_mode) { 565 dev_err(__scm->dev, 566 "No available mechanism for setting download mode\n"); 567 } 568 569 if (ret) 570 dev_err(__scm->dev, "failed to set download mode: %d\n", ret); 571 } 572 573 /** 574 * devm_qcom_scm_pas_context_alloc() - Allocate peripheral authentication service 575 * context for a given peripheral 576 * 577 * PAS context is device-resource managed, so the caller does not need 578 * to worry about freeing the context memory. 579 * 580 * @dev: PAS firmware device 581 * @pas_id: peripheral authentication service id 582 * @mem_phys: Subsystem reserve memory start address 583 * @mem_size: Subsystem reserve memory size 584 * 585 * Returns: The new PAS context, or ERR_PTR() on failure. 586 */ 587 struct qcom_scm_pas_context *devm_qcom_scm_pas_context_alloc(struct device *dev, 588 u32 pas_id, 589 phys_addr_t mem_phys, 590 size_t mem_size) 591 { 592 struct qcom_scm_pas_context *ctx; 593 594 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 595 if (!ctx) 596 return ERR_PTR(-ENOMEM); 597 598 ctx->dev = dev; 599 ctx->pas_id = pas_id; 600 ctx->mem_phys = mem_phys; 601 ctx->mem_size = mem_size; 602 603 return ctx; 604 } 605 EXPORT_SYMBOL_GPL(devm_qcom_scm_pas_context_alloc); 606 607 static int __qcom_scm_pas_init_image(u32 pas_id, dma_addr_t mdata_phys, 608 struct qcom_scm_res *res) 609 { 610 struct qcom_scm_desc desc = { 611 .svc = QCOM_SCM_SVC_PIL, 612 .cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE, 613 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW), 614 .args[0] = pas_id, 615 .owner = ARM_SMCCC_OWNER_SIP, 616 }; 617 int ret; 618 619 ret = qcom_scm_clk_enable(); 620 if (ret) 621 return ret; 622 623 ret = qcom_scm_bw_enable(); 624 if (ret) 625 goto disable_clk; 626 627 desc.args[1] = mdata_phys; 628 629 ret = qcom_scm_call(__scm->dev, &desc, res); 630 qcom_scm_bw_disable(); 631 632 disable_clk: 633 qcom_scm_clk_disable(); 634 635 return ret; 636 } 637 638 static int qcom_scm_pas_prep_and_init_image(struct qcom_scm_pas_context *ctx, 639 const void *metadata, size_t size) 640 { 641 struct qcom_scm_res res; 642 phys_addr_t mdata_phys; 643 void *mdata_buf; 644 int ret; 645 646 mdata_buf = qcom_tzmem_alloc(__scm->mempool, size, GFP_KERNEL); 647 if (!mdata_buf) 648 return -ENOMEM; 649 650 memcpy(mdata_buf, metadata, size); 651 mdata_phys = qcom_tzmem_to_phys(mdata_buf); 652 653 ret = __qcom_scm_pas_init_image(ctx->pas_id, mdata_phys, &res); 654 if (ret < 0) 655 qcom_tzmem_free(mdata_buf); 656 else 657 ctx->ptr = mdata_buf; 658 659 return ret ? : res.result[0]; 660 } 661 662 /** 663 * qcom_scm_pas_init_image() - Initialize peripheral authentication service 664 * state machine for a given peripheral, using the 665 * metadata 666 * @pas_id: peripheral authentication service id 667 * @metadata: pointer to memory containing ELF header, program header table 668 * and optional blob of data used for authenticating the metadata 669 * and the rest of the firmware 670 * @size: size of the metadata 671 * @ctx: optional pas context 672 * 673 * Return: 0 on success. 674 * 675 * Upon successful return, the PAS metadata context (@ctx) will be used to 676 * track the metadata allocation, this needs to be released by invoking 677 * qcom_scm_pas_metadata_release() by the caller. 678 */ 679 int qcom_scm_pas_init_image(u32 pas_id, const void *metadata, size_t size, 680 struct qcom_scm_pas_context *ctx) 681 { 682 struct qcom_scm_res res; 683 dma_addr_t mdata_phys; 684 void *mdata_buf; 685 int ret; 686 687 if (ctx && ctx->use_tzmem) 688 return qcom_scm_pas_prep_and_init_image(ctx, metadata, size); 689 690 /* 691 * During the scm call memory protection will be enabled for the meta 692 * data blob, so make sure it's physically contiguous, 4K aligned and 693 * non-cachable to avoid XPU violations. 694 * 695 * For PIL calls the hypervisor creates SHM Bridges for the blob 696 * buffers on behalf of Linux so we must not do it ourselves hence 697 * not using the TZMem allocator here. 698 * 699 * If we pass a buffer that is already part of an SHM Bridge to this 700 * call, it will fail. 701 */ 702 mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys, 703 GFP_KERNEL); 704 if (!mdata_buf) 705 return -ENOMEM; 706 707 memcpy(mdata_buf, metadata, size); 708 709 ret = __qcom_scm_pas_init_image(pas_id, mdata_phys, &res); 710 if (ret < 0 || !ctx) { 711 dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys); 712 } else if (ctx) { 713 ctx->ptr = mdata_buf; 714 ctx->phys = mdata_phys; 715 ctx->size = size; 716 } 717 718 return ret ? : res.result[0]; 719 } 720 EXPORT_SYMBOL_GPL(qcom_scm_pas_init_image); 721 722 /** 723 * qcom_scm_pas_metadata_release() - release metadata context 724 * @ctx: pas context 725 */ 726 void qcom_scm_pas_metadata_release(struct qcom_scm_pas_context *ctx) 727 { 728 if (!ctx->ptr) 729 return; 730 731 if (ctx->use_tzmem) 732 qcom_tzmem_free(ctx->ptr); 733 else 734 dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys); 735 736 ctx->ptr = NULL; 737 } 738 EXPORT_SYMBOL_GPL(qcom_scm_pas_metadata_release); 739 740 /** 741 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral 742 * for firmware loading 743 * @pas_id: peripheral authentication service id 744 * @addr: start address of memory area to prepare 745 * @size: size of the memory area to prepare 746 * 747 * Returns 0 on success. 748 */ 749 int qcom_scm_pas_mem_setup(u32 pas_id, phys_addr_t addr, phys_addr_t size) 750 { 751 int ret; 752 struct qcom_scm_desc desc = { 753 .svc = QCOM_SCM_SVC_PIL, 754 .cmd = QCOM_SCM_PIL_PAS_MEM_SETUP, 755 .arginfo = QCOM_SCM_ARGS(3), 756 .args[0] = pas_id, 757 .args[1] = addr, 758 .args[2] = size, 759 .owner = ARM_SMCCC_OWNER_SIP, 760 }; 761 struct qcom_scm_res res; 762 763 ret = qcom_scm_clk_enable(); 764 if (ret) 765 return ret; 766 767 ret = qcom_scm_bw_enable(); 768 if (ret) 769 goto disable_clk; 770 771 ret = qcom_scm_call(__scm->dev, &desc, &res); 772 qcom_scm_bw_disable(); 773 774 disable_clk: 775 qcom_scm_clk_disable(); 776 777 return ret ? : res.result[0]; 778 } 779 EXPORT_SYMBOL_GPL(qcom_scm_pas_mem_setup); 780 781 static void *__qcom_scm_pas_get_rsc_table(u32 pas_id, void *input_rt_tzm, 782 size_t input_rt_size, 783 size_t *output_rt_size) 784 { 785 struct qcom_scm_desc desc = { 786 .svc = QCOM_SCM_SVC_PIL, 787 .cmd = QCOM_SCM_PIL_PAS_GET_RSCTABLE, 788 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RO, QCOM_SCM_VAL, 789 QCOM_SCM_RW, QCOM_SCM_VAL), 790 .args[0] = pas_id, 791 .owner = ARM_SMCCC_OWNER_SIP, 792 }; 793 struct qcom_scm_res res; 794 void *output_rt_tzm; 795 int ret; 796 797 output_rt_tzm = qcom_tzmem_alloc(__scm->mempool, *output_rt_size, GFP_KERNEL); 798 if (!output_rt_tzm) 799 return ERR_PTR(-ENOMEM); 800 801 desc.args[1] = qcom_tzmem_to_phys(input_rt_tzm); 802 desc.args[2] = input_rt_size; 803 desc.args[3] = qcom_tzmem_to_phys(output_rt_tzm); 804 desc.args[4] = *output_rt_size; 805 806 /* 807 * Whether SMC fail or pass, res.result[2] will hold actual resource table 808 * size. 809 * 810 * If passed 'output_rt_size' buffer size is not sufficient to hold the 811 * resource table TrustZone sends, response code in res.result[1] as 812 * RSCTABLE_BUFFER_NOT_SUFFICIENT so that caller can retry this SMC call 813 * with output_rt_tzm buffer with res.result[2] size however, It should not 814 * be of unresonable size. 815 */ 816 ret = qcom_scm_call(__scm->dev, &desc, &res); 817 if (!ret && res.result[2] > SZ_1G) { 818 ret = -E2BIG; 819 goto free_output_rt; 820 } 821 822 *output_rt_size = res.result[2]; 823 if (ret && res.result[1] == RSCTABLE_BUFFER_NOT_SUFFICIENT) 824 ret = -EOVERFLOW; 825 826 free_output_rt: 827 if (ret) 828 qcom_tzmem_free(output_rt_tzm); 829 830 return ret ? ERR_PTR(ret) : output_rt_tzm; 831 } 832 833 /** 834 * qcom_scm_pas_get_rsc_table() - Retrieve the resource table in passed output buffer 835 * for a given peripheral. 836 * 837 * Qualcomm remote processor may rely on both static and dynamic resources for 838 * its functionality. Static resources typically refer to memory-mapped addresses 839 * required by the subsystem and are often embedded within the firmware binary 840 * and dynamic resources, such as shared memory in DDR etc., are determined at 841 * runtime during the boot process. 842 * 843 * On Qualcomm Technologies devices, it's possible that static resources are not 844 * embedded in the firmware binary and instead are provided by TrustZone However, 845 * dynamic resources are always expected to come from TrustZone. This indicates 846 * that for Qualcomm devices, all resources (static and dynamic) will be provided 847 * by TrustZone via the SMC call. 848 * 849 * If the remote processor firmware binary does contain static resources, they 850 * should be passed in input_rt. These will be forwarded to TrustZone for 851 * authentication. TrustZone will then append the dynamic resources and return 852 * the complete resource table in output_rt_tzm. 853 * 854 * If the remote processor firmware binary does not include a resource table, 855 * the caller of this function should set input_rt as NULL and input_rt_size 856 * as zero respectively. 857 * 858 * More about documentation on resource table data structures can be found in 859 * include/linux/remoteproc.h 860 * 861 * @ctx: PAS context 862 * @pas_id: peripheral authentication service id 863 * @input_rt: resource table buffer which is present in firmware binary 864 * @input_rt_size: size of the resource table present in firmware binary 865 * @output_rt_size: TrustZone expects caller should pass worst case size for 866 * the output_rt_tzm. 867 * 868 * Return: 869 * On success, returns a pointer to the allocated buffer containing the final 870 * resource table and output_rt_size will have actual resource table size from 871 * TrustZone. The caller is responsible for freeing the buffer. On failure, 872 * returns ERR_PTR(-errno). 873 */ 874 struct resource_table *qcom_scm_pas_get_rsc_table(struct qcom_scm_pas_context *ctx, 875 void *input_rt, 876 size_t input_rt_size, 877 size_t *output_rt_size) 878 { 879 struct resource_table empty_rsc = {}; 880 size_t size = SZ_16K; 881 void *output_rt_tzm; 882 void *input_rt_tzm; 883 void *tbl_ptr; 884 int ret; 885 886 ret = qcom_scm_clk_enable(); 887 if (ret) 888 return ERR_PTR(ret); 889 890 ret = qcom_scm_bw_enable(); 891 if (ret) 892 goto disable_clk; 893 894 /* 895 * TrustZone can not accept buffer as NULL value as argument hence, 896 * we need to pass a input buffer indicating that subsystem firmware 897 * does not have resource table by filling resource table structure. 898 */ 899 if (!input_rt) { 900 input_rt = &empty_rsc; 901 input_rt_size = sizeof(empty_rsc); 902 } 903 904 input_rt_tzm = qcom_tzmem_alloc(__scm->mempool, input_rt_size, GFP_KERNEL); 905 if (!input_rt_tzm) { 906 ret = -ENOMEM; 907 goto disable_scm_bw; 908 } 909 910 memcpy(input_rt_tzm, input_rt, input_rt_size); 911 912 output_rt_tzm = __qcom_scm_pas_get_rsc_table(ctx->pas_id, input_rt_tzm, 913 input_rt_size, &size); 914 if (PTR_ERR(output_rt_tzm) == -EOVERFLOW) 915 /* Try again with the size requested by the TZ */ 916 output_rt_tzm = __qcom_scm_pas_get_rsc_table(ctx->pas_id, 917 input_rt_tzm, 918 input_rt_size, 919 &size); 920 if (IS_ERR(output_rt_tzm)) { 921 ret = PTR_ERR(output_rt_tzm); 922 goto free_input_rt; 923 } 924 925 tbl_ptr = kmemdup(output_rt_tzm, size, GFP_KERNEL); 926 if (!tbl_ptr) { 927 qcom_tzmem_free(output_rt_tzm); 928 ret = -ENOMEM; 929 goto free_input_rt; 930 } 931 932 *output_rt_size = size; 933 qcom_tzmem_free(output_rt_tzm); 934 935 free_input_rt: 936 qcom_tzmem_free(input_rt_tzm); 937 938 disable_scm_bw: 939 qcom_scm_bw_disable(); 940 941 disable_clk: 942 qcom_scm_clk_disable(); 943 944 return ret ? ERR_PTR(ret) : tbl_ptr; 945 } 946 EXPORT_SYMBOL_GPL(qcom_scm_pas_get_rsc_table); 947 948 /** 949 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware 950 * and reset the remote processor 951 * @pas_id: peripheral authentication service id 952 * 953 * Return 0 on success. 954 */ 955 int qcom_scm_pas_auth_and_reset(u32 pas_id) 956 { 957 int ret; 958 struct qcom_scm_desc desc = { 959 .svc = QCOM_SCM_SVC_PIL, 960 .cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET, 961 .arginfo = QCOM_SCM_ARGS(1), 962 .args[0] = pas_id, 963 .owner = ARM_SMCCC_OWNER_SIP, 964 }; 965 struct qcom_scm_res res; 966 967 ret = qcom_scm_clk_enable(); 968 if (ret) 969 return ret; 970 971 ret = qcom_scm_bw_enable(); 972 if (ret) 973 goto disable_clk; 974 975 ret = qcom_scm_call(__scm->dev, &desc, &res); 976 qcom_scm_bw_disable(); 977 978 disable_clk: 979 qcom_scm_clk_disable(); 980 981 return ret ? : res.result[0]; 982 } 983 EXPORT_SYMBOL_GPL(qcom_scm_pas_auth_and_reset); 984 985 /** 986 * qcom_scm_pas_prepare_and_auth_reset() - Prepare, authenticate, and reset the 987 * remote processor 988 * 989 * @ctx: Context saved during call to qcom_scm_pas_context_init() 990 * 991 * This function performs the necessary steps to prepare a PAS subsystem, 992 * authenticate it using the provided metadata, and initiate a reset sequence. 993 * 994 * It should be used when Linux is in control setting up the IOMMU hardware 995 * for remote subsystem during secure firmware loading processes. The preparation 996 * step sets up a shmbridge over the firmware memory before TrustZone accesses the 997 * firmware memory region for authentication. The authentication step verifies 998 * the integrity and authenticity of the firmware or configuration using secure 999 * metadata. Finally, the reset step ensures the subsystem starts in a clean and 1000 * sane state. 1001 * 1002 * Return: 0 on success, negative errno on failure. 1003 */ 1004 int qcom_scm_pas_prepare_and_auth_reset(struct qcom_scm_pas_context *ctx) 1005 { 1006 u64 handle; 1007 int ret; 1008 1009 /* 1010 * When Linux running @ EL1, Gunyah hypervisor running @ EL2 traps the 1011 * auth_and_reset call and create an shmbridge on the remote subsystem 1012 * memory region and then invokes a call to TrustZone to authenticate. 1013 */ 1014 if (!ctx->use_tzmem) 1015 return qcom_scm_pas_auth_and_reset(ctx->pas_id); 1016 1017 /* 1018 * When Linux runs @ EL2 Linux must create the shmbridge itself and then 1019 * subsequently call TrustZone for authenticate and reset. 1020 */ 1021 ret = qcom_tzmem_shm_bridge_create(ctx->mem_phys, ctx->mem_size, &handle); 1022 if (ret) 1023 return ret; 1024 1025 ret = qcom_scm_pas_auth_and_reset(ctx->pas_id); 1026 qcom_tzmem_shm_bridge_delete(handle); 1027 1028 return ret; 1029 } 1030 EXPORT_SYMBOL_GPL(qcom_scm_pas_prepare_and_auth_reset); 1031 1032 /** 1033 * qcom_scm_pas_shutdown() - Shut down the remote processor 1034 * @pas_id: peripheral authentication service id 1035 * 1036 * Returns 0 on success. 1037 */ 1038 int qcom_scm_pas_shutdown(u32 pas_id) 1039 { 1040 int ret; 1041 struct qcom_scm_desc desc = { 1042 .svc = QCOM_SCM_SVC_PIL, 1043 .cmd = QCOM_SCM_PIL_PAS_SHUTDOWN, 1044 .arginfo = QCOM_SCM_ARGS(1), 1045 .args[0] = pas_id, 1046 .owner = ARM_SMCCC_OWNER_SIP, 1047 }; 1048 struct qcom_scm_res res; 1049 1050 ret = qcom_scm_clk_enable(); 1051 if (ret) 1052 return ret; 1053 1054 ret = qcom_scm_bw_enable(); 1055 if (ret) 1056 goto disable_clk; 1057 1058 ret = qcom_scm_call(__scm->dev, &desc, &res); 1059 qcom_scm_bw_disable(); 1060 1061 disable_clk: 1062 qcom_scm_clk_disable(); 1063 1064 return ret ? : res.result[0]; 1065 } 1066 EXPORT_SYMBOL_GPL(qcom_scm_pas_shutdown); 1067 1068 /** 1069 * qcom_scm_pas_supported() - Check if the peripheral authentication service is 1070 * available for the given peripherial 1071 * @pas_id: peripheral authentication service id 1072 * 1073 * Returns true if PAS is supported for this peripheral, otherwise false. 1074 */ 1075 bool qcom_scm_pas_supported(u32 pas_id) 1076 { 1077 int ret; 1078 struct qcom_scm_desc desc = { 1079 .svc = QCOM_SCM_SVC_PIL, 1080 .cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED, 1081 .arginfo = QCOM_SCM_ARGS(1), 1082 .args[0] = pas_id, 1083 .owner = ARM_SMCCC_OWNER_SIP, 1084 }; 1085 struct qcom_scm_res res; 1086 1087 if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL, 1088 QCOM_SCM_PIL_PAS_IS_SUPPORTED)) 1089 return false; 1090 1091 ret = qcom_scm_call(__scm->dev, &desc, &res); 1092 1093 return ret ? false : !!res.result[0]; 1094 } 1095 EXPORT_SYMBOL_GPL(qcom_scm_pas_supported); 1096 1097 static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) 1098 { 1099 struct qcom_scm_desc desc = { 1100 .svc = QCOM_SCM_SVC_PIL, 1101 .cmd = QCOM_SCM_PIL_PAS_MSS_RESET, 1102 .arginfo = QCOM_SCM_ARGS(2), 1103 .args[0] = reset, 1104 .args[1] = 0, 1105 .owner = ARM_SMCCC_OWNER_SIP, 1106 }; 1107 struct qcom_scm_res res; 1108 int ret; 1109 1110 ret = qcom_scm_call(__scm->dev, &desc, &res); 1111 1112 return ret ? : res.result[0]; 1113 } 1114 1115 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev, 1116 unsigned long idx) 1117 { 1118 if (idx != 0) 1119 return -EINVAL; 1120 1121 return __qcom_scm_pas_mss_reset(__scm->dev, 1); 1122 } 1123 1124 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev, 1125 unsigned long idx) 1126 { 1127 if (idx != 0) 1128 return -EINVAL; 1129 1130 return __qcom_scm_pas_mss_reset(__scm->dev, 0); 1131 } 1132 1133 static const struct reset_control_ops qcom_scm_pas_reset_ops = { 1134 .assert = qcom_scm_pas_reset_assert, 1135 .deassert = qcom_scm_pas_reset_deassert, 1136 }; 1137 1138 int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) 1139 { 1140 struct qcom_scm_desc desc = { 1141 .svc = QCOM_SCM_SVC_IO, 1142 .cmd = QCOM_SCM_IO_READ, 1143 .arginfo = QCOM_SCM_ARGS(1), 1144 .args[0] = addr, 1145 .owner = ARM_SMCCC_OWNER_SIP, 1146 }; 1147 struct qcom_scm_res res; 1148 int ret; 1149 1150 1151 ret = qcom_scm_call_atomic(__scm->dev, &desc, &res); 1152 if (ret >= 0) 1153 *val = res.result[0]; 1154 1155 return ret < 0 ? ret : 0; 1156 } 1157 EXPORT_SYMBOL_GPL(qcom_scm_io_readl); 1158 1159 int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) 1160 { 1161 struct qcom_scm_desc desc = { 1162 .svc = QCOM_SCM_SVC_IO, 1163 .cmd = QCOM_SCM_IO_WRITE, 1164 .arginfo = QCOM_SCM_ARGS(2), 1165 .args[0] = addr, 1166 .args[1] = val, 1167 .owner = ARM_SMCCC_OWNER_SIP, 1168 }; 1169 1170 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 1171 } 1172 EXPORT_SYMBOL_GPL(qcom_scm_io_writel); 1173 1174 /** 1175 * qcom_scm_restore_sec_cfg_available() - Check if secure environment 1176 * supports restore security config interface. 1177 * 1178 * Return true if restore-cfg interface is supported, false if not. 1179 */ 1180 bool qcom_scm_restore_sec_cfg_available(void) 1181 { 1182 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, 1183 QCOM_SCM_MP_RESTORE_SEC_CFG); 1184 } 1185 EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg_available); 1186 1187 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) 1188 { 1189 struct qcom_scm_desc desc = { 1190 .svc = QCOM_SCM_SVC_MP, 1191 .cmd = QCOM_SCM_MP_RESTORE_SEC_CFG, 1192 .arginfo = QCOM_SCM_ARGS(2), 1193 .args[0] = device_id, 1194 .args[1] = spare, 1195 .owner = ARM_SMCCC_OWNER_SIP, 1196 }; 1197 struct qcom_scm_res res; 1198 int ret; 1199 1200 ret = qcom_scm_call(__scm->dev, &desc, &res); 1201 1202 return ret ? : res.result[0]; 1203 } 1204 EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg); 1205 1206 #define QCOM_SCM_CP_APERTURE_CONTEXT_MASK GENMASK(7, 0) 1207 1208 bool qcom_scm_set_gpu_smmu_aperture_is_available(void) 1209 { 1210 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, 1211 QCOM_SCM_MP_CP_SMMU_APERTURE_ID); 1212 } 1213 EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture_is_available); 1214 1215 int qcom_scm_set_gpu_smmu_aperture(unsigned int context_bank) 1216 { 1217 struct qcom_scm_desc desc = { 1218 .svc = QCOM_SCM_SVC_MP, 1219 .cmd = QCOM_SCM_MP_CP_SMMU_APERTURE_ID, 1220 .arginfo = QCOM_SCM_ARGS(4), 1221 .args[0] = 0xffff0000 | FIELD_PREP(QCOM_SCM_CP_APERTURE_CONTEXT_MASK, context_bank), 1222 .args[1] = 0xffffffff, 1223 .args[2] = 0xffffffff, 1224 .args[3] = 0xffffffff, 1225 .owner = ARM_SMCCC_OWNER_SIP 1226 }; 1227 1228 return qcom_scm_call(__scm->dev, &desc, NULL); 1229 } 1230 EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture); 1231 1232 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) 1233 { 1234 struct qcom_scm_desc desc = { 1235 .svc = QCOM_SCM_SVC_MP, 1236 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE, 1237 .arginfo = QCOM_SCM_ARGS(1), 1238 .args[0] = spare, 1239 .owner = ARM_SMCCC_OWNER_SIP, 1240 }; 1241 struct qcom_scm_res res; 1242 int ret; 1243 1244 ret = qcom_scm_call(__scm->dev, &desc, &res); 1245 1246 if (size) 1247 *size = res.result[0]; 1248 1249 return ret ? : res.result[1]; 1250 } 1251 EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_size); 1252 1253 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) 1254 { 1255 struct qcom_scm_desc desc = { 1256 .svc = QCOM_SCM_SVC_MP, 1257 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT, 1258 .arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, 1259 QCOM_SCM_VAL), 1260 .args[0] = addr, 1261 .args[1] = size, 1262 .args[2] = spare, 1263 .owner = ARM_SMCCC_OWNER_SIP, 1264 }; 1265 int ret; 1266 1267 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1268 1269 /* the pg table has been initialized already, ignore the error */ 1270 if (ret == -EPERM) 1271 ret = 0; 1272 1273 return ret; 1274 } 1275 EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_init); 1276 1277 int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size) 1278 { 1279 struct qcom_scm_desc desc = { 1280 .svc = QCOM_SCM_SVC_MP, 1281 .cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE, 1282 .arginfo = QCOM_SCM_ARGS(2), 1283 .args[0] = size, 1284 .args[1] = spare, 1285 .owner = ARM_SMCCC_OWNER_SIP, 1286 }; 1287 1288 return qcom_scm_call(__scm->dev, &desc, NULL); 1289 } 1290 EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_cp_pool_size); 1291 1292 int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size, 1293 u32 cp_nonpixel_start, 1294 u32 cp_nonpixel_size) 1295 { 1296 int ret; 1297 struct qcom_scm_desc desc = { 1298 .svc = QCOM_SCM_SVC_MP, 1299 .cmd = QCOM_SCM_MP_VIDEO_VAR, 1300 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL, 1301 QCOM_SCM_VAL, QCOM_SCM_VAL), 1302 .args[0] = cp_start, 1303 .args[1] = cp_size, 1304 .args[2] = cp_nonpixel_start, 1305 .args[3] = cp_nonpixel_size, 1306 .owner = ARM_SMCCC_OWNER_SIP, 1307 }; 1308 struct qcom_scm_res res; 1309 1310 ret = qcom_scm_call(__scm->dev, &desc, &res); 1311 1312 return ret ? : res.result[0]; 1313 } 1314 EXPORT_SYMBOL_GPL(qcom_scm_mem_protect_video_var); 1315 1316 static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region, 1317 size_t mem_sz, phys_addr_t src, size_t src_sz, 1318 phys_addr_t dest, size_t dest_sz) 1319 { 1320 int ret; 1321 struct qcom_scm_desc desc = { 1322 .svc = QCOM_SCM_SVC_MP, 1323 .cmd = QCOM_SCM_MP_ASSIGN, 1324 .arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL, 1325 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO, 1326 QCOM_SCM_VAL, QCOM_SCM_VAL), 1327 .args[0] = mem_region, 1328 .args[1] = mem_sz, 1329 .args[2] = src, 1330 .args[3] = src_sz, 1331 .args[4] = dest, 1332 .args[5] = dest_sz, 1333 .args[6] = 0, 1334 .owner = ARM_SMCCC_OWNER_SIP, 1335 }; 1336 struct qcom_scm_res res; 1337 1338 ret = qcom_scm_call(dev, &desc, &res); 1339 1340 return ret ? : res.result[0]; 1341 } 1342 1343 /** 1344 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership 1345 * @mem_addr: mem region whose ownership need to be reassigned 1346 * @mem_sz: size of the region. 1347 * @srcvm: vmid for current set of owners, each set bit in 1348 * flag indicate a unique owner 1349 * @newvm: array having new owners and corresponding permission 1350 * flags 1351 * @dest_cnt: number of owners in next set. 1352 * 1353 * Return negative errno on failure or 0 on success with @srcvm updated. 1354 */ 1355 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, 1356 u64 *srcvm, 1357 const struct qcom_scm_vmperm *newvm, 1358 unsigned int dest_cnt) 1359 { 1360 struct qcom_scm_current_perm_info *destvm; 1361 struct qcom_scm_mem_map_info *mem_to_map; 1362 phys_addr_t mem_to_map_phys; 1363 phys_addr_t dest_phys; 1364 phys_addr_t ptr_phys; 1365 size_t mem_to_map_sz; 1366 size_t dest_sz; 1367 size_t src_sz; 1368 size_t ptr_sz; 1369 int next_vm; 1370 __le32 *src; 1371 int ret, i, b; 1372 u64 srcvm_bits = *srcvm; 1373 1374 src_sz = hweight64(srcvm_bits) * sizeof(*src); 1375 mem_to_map_sz = sizeof(*mem_to_map); 1376 dest_sz = dest_cnt * sizeof(*destvm); 1377 ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + 1378 ALIGN(dest_sz, SZ_64); 1379 1380 void *ptr __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1381 ptr_sz, GFP_KERNEL); 1382 if (!ptr) 1383 return -ENOMEM; 1384 1385 ptr_phys = qcom_tzmem_to_phys(ptr); 1386 1387 /* Fill source vmid detail */ 1388 src = ptr; 1389 i = 0; 1390 for (b = 0; b < BITS_PER_TYPE(u64); b++) { 1391 if (srcvm_bits & BIT(b)) 1392 src[i++] = cpu_to_le32(b); 1393 } 1394 1395 /* Fill details of mem buff to map */ 1396 mem_to_map = ptr + ALIGN(src_sz, SZ_64); 1397 mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64); 1398 mem_to_map->mem_addr = cpu_to_le64(mem_addr); 1399 mem_to_map->mem_size = cpu_to_le64(mem_sz); 1400 1401 next_vm = 0; 1402 /* Fill details of next vmid detail */ 1403 destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); 1404 dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); 1405 for (i = 0; i < dest_cnt; i++, destvm++, newvm++) { 1406 destvm->vmid = cpu_to_le32(newvm->vmid); 1407 destvm->perm = cpu_to_le32(newvm->perm); 1408 destvm->ctx = 0; 1409 destvm->ctx_size = 0; 1410 next_vm |= BIT(newvm->vmid); 1411 } 1412 1413 ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, 1414 ptr_phys, src_sz, dest_phys, dest_sz); 1415 if (ret) { 1416 dev_err(__scm->dev, 1417 "Assign memory protection call failed %d\n", ret); 1418 return ret; 1419 } 1420 1421 *srcvm = next_vm; 1422 return 0; 1423 } 1424 EXPORT_SYMBOL_GPL(qcom_scm_assign_mem); 1425 1426 /** 1427 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available 1428 */ 1429 bool qcom_scm_ocmem_lock_available(void) 1430 { 1431 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM, 1432 QCOM_SCM_OCMEM_LOCK_CMD); 1433 } 1434 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock_available); 1435 1436 /** 1437 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM 1438 * region to the specified initiator 1439 * 1440 * @id: tz initiator id 1441 * @offset: OCMEM offset 1442 * @size: OCMEM size 1443 * @mode: access mode (WIDE/NARROW) 1444 */ 1445 int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size, 1446 u32 mode) 1447 { 1448 struct qcom_scm_desc desc = { 1449 .svc = QCOM_SCM_SVC_OCMEM, 1450 .cmd = QCOM_SCM_OCMEM_LOCK_CMD, 1451 .args[0] = id, 1452 .args[1] = offset, 1453 .args[2] = size, 1454 .args[3] = mode, 1455 .arginfo = QCOM_SCM_ARGS(4), 1456 }; 1457 1458 return qcom_scm_call(__scm->dev, &desc, NULL); 1459 } 1460 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock); 1461 1462 /** 1463 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM 1464 * region from the specified initiator 1465 * 1466 * @id: tz initiator id 1467 * @offset: OCMEM offset 1468 * @size: OCMEM size 1469 */ 1470 int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size) 1471 { 1472 struct qcom_scm_desc desc = { 1473 .svc = QCOM_SCM_SVC_OCMEM, 1474 .cmd = QCOM_SCM_OCMEM_UNLOCK_CMD, 1475 .args[0] = id, 1476 .args[1] = offset, 1477 .args[2] = size, 1478 .arginfo = QCOM_SCM_ARGS(3), 1479 }; 1480 1481 return qcom_scm_call(__scm->dev, &desc, NULL); 1482 } 1483 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_unlock); 1484 1485 /** 1486 * qcom_scm_ice_available() - Is the ICE key programming interface available? 1487 * 1488 * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and 1489 * qcom_scm_ice_set_key() are available. 1490 */ 1491 bool qcom_scm_ice_available(void) 1492 { 1493 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1494 QCOM_SCM_ES_INVALIDATE_ICE_KEY) && 1495 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1496 QCOM_SCM_ES_CONFIG_SET_ICE_KEY); 1497 } 1498 EXPORT_SYMBOL_GPL(qcom_scm_ice_available); 1499 1500 /** 1501 * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key 1502 * @index: the keyslot to invalidate 1503 * 1504 * The UFSHCI and eMMC standards define a standard way to do this, but it 1505 * doesn't work on these SoCs; only this SCM call does. 1506 * 1507 * It is assumed that the SoC has only one ICE instance being used, as this SCM 1508 * call doesn't specify which ICE instance the keyslot belongs to. 1509 * 1510 * Return: 0 on success; -errno on failure. 1511 */ 1512 int qcom_scm_ice_invalidate_key(u32 index) 1513 { 1514 struct qcom_scm_desc desc = { 1515 .svc = QCOM_SCM_SVC_ES, 1516 .cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY, 1517 .arginfo = QCOM_SCM_ARGS(1), 1518 .args[0] = index, 1519 .owner = ARM_SMCCC_OWNER_SIP, 1520 }; 1521 1522 return qcom_scm_call(__scm->dev, &desc, NULL); 1523 } 1524 EXPORT_SYMBOL_GPL(qcom_scm_ice_invalidate_key); 1525 1526 /** 1527 * qcom_scm_ice_set_key() - Set an inline encryption key 1528 * @index: the keyslot into which to set the key 1529 * @key: the key to program 1530 * @key_size: the size of the key in bytes 1531 * @cipher: the encryption algorithm the key is for 1532 * @data_unit_size: the encryption data unit size, i.e. the size of each 1533 * individual plaintext and ciphertext. Given in 512-byte 1534 * units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc. 1535 * 1536 * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it 1537 * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline. 1538 * 1539 * The UFSHCI and eMMC standards define a standard way to do this, but it 1540 * doesn't work on these SoCs; only this SCM call does. 1541 * 1542 * It is assumed that the SoC has only one ICE instance being used, as this SCM 1543 * call doesn't specify which ICE instance the keyslot belongs to. 1544 * 1545 * Return: 0 on success; -errno on failure. 1546 */ 1547 int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, 1548 enum qcom_scm_ice_cipher cipher, u32 data_unit_size) 1549 { 1550 struct qcom_scm_desc desc = { 1551 .svc = QCOM_SCM_SVC_ES, 1552 .cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY, 1553 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW, 1554 QCOM_SCM_VAL, QCOM_SCM_VAL, 1555 QCOM_SCM_VAL), 1556 .args[0] = index, 1557 .args[2] = key_size, 1558 .args[3] = cipher, 1559 .args[4] = data_unit_size, 1560 .owner = ARM_SMCCC_OWNER_SIP, 1561 }; 1562 1563 int ret; 1564 1565 void *keybuf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1566 key_size, 1567 GFP_KERNEL); 1568 if (!keybuf) 1569 return -ENOMEM; 1570 memcpy(keybuf, key, key_size); 1571 desc.args[1] = qcom_tzmem_to_phys(keybuf); 1572 1573 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1574 1575 memzero_explicit(keybuf, key_size); 1576 1577 return ret; 1578 } 1579 EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key); 1580 1581 bool qcom_scm_has_wrapped_key_support(void) 1582 { 1583 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1584 QCOM_SCM_ES_DERIVE_SW_SECRET) && 1585 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1586 QCOM_SCM_ES_GENERATE_ICE_KEY) && 1587 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1588 QCOM_SCM_ES_PREPARE_ICE_KEY) && 1589 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1590 QCOM_SCM_ES_IMPORT_ICE_KEY); 1591 } 1592 EXPORT_SYMBOL_GPL(qcom_scm_has_wrapped_key_support); 1593 1594 /** 1595 * qcom_scm_derive_sw_secret() - Derive software secret from wrapped key 1596 * @eph_key: an ephemerally-wrapped key 1597 * @eph_key_size: size of @eph_key in bytes 1598 * @sw_secret: output buffer for the software secret 1599 * @sw_secret_size: size of the software secret to derive in bytes 1600 * 1601 * Derive a software secret from an ephemerally-wrapped key for software crypto 1602 * operations. This is done by calling into the secure execution environment, 1603 * which then calls into the hardware to unwrap and derive the secret. 1604 * 1605 * For more information on sw_secret, see the "Hardware-wrapped keys" section of 1606 * Documentation/block/inline-encryption.rst. 1607 * 1608 * Return: 0 on success; -errno on failure. 1609 */ 1610 int qcom_scm_derive_sw_secret(const u8 *eph_key, size_t eph_key_size, 1611 u8 *sw_secret, size_t sw_secret_size) 1612 { 1613 struct qcom_scm_desc desc = { 1614 .svc = QCOM_SCM_SVC_ES, 1615 .cmd = QCOM_SCM_ES_DERIVE_SW_SECRET, 1616 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RW, QCOM_SCM_VAL, 1617 QCOM_SCM_RW, QCOM_SCM_VAL), 1618 .owner = ARM_SMCCC_OWNER_SIP, 1619 }; 1620 int ret; 1621 1622 void *eph_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1623 eph_key_size, 1624 GFP_KERNEL); 1625 if (!eph_key_buf) 1626 return -ENOMEM; 1627 1628 void *sw_secret_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1629 sw_secret_size, 1630 GFP_KERNEL); 1631 if (!sw_secret_buf) 1632 return -ENOMEM; 1633 1634 memcpy(eph_key_buf, eph_key, eph_key_size); 1635 desc.args[0] = qcom_tzmem_to_phys(eph_key_buf); 1636 desc.args[1] = eph_key_size; 1637 desc.args[2] = qcom_tzmem_to_phys(sw_secret_buf); 1638 desc.args[3] = sw_secret_size; 1639 1640 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1641 if (!ret) 1642 memcpy(sw_secret, sw_secret_buf, sw_secret_size); 1643 1644 memzero_explicit(eph_key_buf, eph_key_size); 1645 memzero_explicit(sw_secret_buf, sw_secret_size); 1646 return ret; 1647 } 1648 EXPORT_SYMBOL_GPL(qcom_scm_derive_sw_secret); 1649 1650 /** 1651 * qcom_scm_generate_ice_key() - Generate a wrapped key for storage encryption 1652 * @lt_key: output buffer for the long-term wrapped key 1653 * @lt_key_size: size of @lt_key in bytes. Must be the exact wrapped key size 1654 * used by the SoC. 1655 * 1656 * Generate a key using the built-in HW module in the SoC. The resulting key is 1657 * returned wrapped with the platform-specific Key Encryption Key. 1658 * 1659 * Return: 0 on success; -errno on failure. 1660 */ 1661 int qcom_scm_generate_ice_key(u8 *lt_key, size_t lt_key_size) 1662 { 1663 struct qcom_scm_desc desc = { 1664 .svc = QCOM_SCM_SVC_ES, 1665 .cmd = QCOM_SCM_ES_GENERATE_ICE_KEY, 1666 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL), 1667 .owner = ARM_SMCCC_OWNER_SIP, 1668 }; 1669 int ret; 1670 1671 void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1672 lt_key_size, 1673 GFP_KERNEL); 1674 if (!lt_key_buf) 1675 return -ENOMEM; 1676 1677 desc.args[0] = qcom_tzmem_to_phys(lt_key_buf); 1678 desc.args[1] = lt_key_size; 1679 1680 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1681 if (!ret) 1682 memcpy(lt_key, lt_key_buf, lt_key_size); 1683 1684 memzero_explicit(lt_key_buf, lt_key_size); 1685 return ret; 1686 } 1687 EXPORT_SYMBOL_GPL(qcom_scm_generate_ice_key); 1688 1689 /** 1690 * qcom_scm_prepare_ice_key() - Re-wrap a key with the per-boot ephemeral key 1691 * @lt_key: a long-term wrapped key 1692 * @lt_key_size: size of @lt_key in bytes 1693 * @eph_key: output buffer for the ephemerally-wrapped key 1694 * @eph_key_size: size of @eph_key in bytes. Must be the exact wrapped key size 1695 * used by the SoC. 1696 * 1697 * Given a long-term wrapped key, re-wrap it with the per-boot ephemeral key for 1698 * added protection. The resulting key will only be valid for the current boot. 1699 * 1700 * Return: 0 on success; -errno on failure. 1701 */ 1702 int qcom_scm_prepare_ice_key(const u8 *lt_key, size_t lt_key_size, 1703 u8 *eph_key, size_t eph_key_size) 1704 { 1705 struct qcom_scm_desc desc = { 1706 .svc = QCOM_SCM_SVC_ES, 1707 .cmd = QCOM_SCM_ES_PREPARE_ICE_KEY, 1708 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL, 1709 QCOM_SCM_RW, QCOM_SCM_VAL), 1710 .owner = ARM_SMCCC_OWNER_SIP, 1711 }; 1712 int ret; 1713 1714 void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1715 lt_key_size, 1716 GFP_KERNEL); 1717 if (!lt_key_buf) 1718 return -ENOMEM; 1719 1720 void *eph_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1721 eph_key_size, 1722 GFP_KERNEL); 1723 if (!eph_key_buf) 1724 return -ENOMEM; 1725 1726 memcpy(lt_key_buf, lt_key, lt_key_size); 1727 desc.args[0] = qcom_tzmem_to_phys(lt_key_buf); 1728 desc.args[1] = lt_key_size; 1729 desc.args[2] = qcom_tzmem_to_phys(eph_key_buf); 1730 desc.args[3] = eph_key_size; 1731 1732 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1733 if (!ret) 1734 memcpy(eph_key, eph_key_buf, eph_key_size); 1735 1736 memzero_explicit(lt_key_buf, lt_key_size); 1737 memzero_explicit(eph_key_buf, eph_key_size); 1738 return ret; 1739 } 1740 EXPORT_SYMBOL_GPL(qcom_scm_prepare_ice_key); 1741 1742 /** 1743 * qcom_scm_import_ice_key() - Import key for storage encryption 1744 * @raw_key: the raw key to import 1745 * @raw_key_size: size of @raw_key in bytes 1746 * @lt_key: output buffer for the long-term wrapped key 1747 * @lt_key_size: size of @lt_key in bytes. Must be the exact wrapped key size 1748 * used by the SoC. 1749 * 1750 * Import a raw key and return a long-term wrapped key. Uses the SoC's HWKM to 1751 * wrap the raw key using the platform-specific Key Encryption Key. 1752 * 1753 * Return: 0 on success; -errno on failure. 1754 */ 1755 int qcom_scm_import_ice_key(const u8 *raw_key, size_t raw_key_size, 1756 u8 *lt_key, size_t lt_key_size) 1757 { 1758 struct qcom_scm_desc desc = { 1759 .svc = QCOM_SCM_SVC_ES, 1760 .cmd = QCOM_SCM_ES_IMPORT_ICE_KEY, 1761 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL, 1762 QCOM_SCM_RW, QCOM_SCM_VAL), 1763 .owner = ARM_SMCCC_OWNER_SIP, 1764 }; 1765 int ret; 1766 1767 void *raw_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1768 raw_key_size, 1769 GFP_KERNEL); 1770 if (!raw_key_buf) 1771 return -ENOMEM; 1772 1773 void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1774 lt_key_size, 1775 GFP_KERNEL); 1776 if (!lt_key_buf) 1777 return -ENOMEM; 1778 1779 memcpy(raw_key_buf, raw_key, raw_key_size); 1780 desc.args[0] = qcom_tzmem_to_phys(raw_key_buf); 1781 desc.args[1] = raw_key_size; 1782 desc.args[2] = qcom_tzmem_to_phys(lt_key_buf); 1783 desc.args[3] = lt_key_size; 1784 1785 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1786 if (!ret) 1787 memcpy(lt_key, lt_key_buf, lt_key_size); 1788 1789 memzero_explicit(raw_key_buf, raw_key_size); 1790 memzero_explicit(lt_key_buf, lt_key_size); 1791 return ret; 1792 } 1793 EXPORT_SYMBOL_GPL(qcom_scm_import_ice_key); 1794 1795 /** 1796 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP. 1797 * 1798 * Return true if HDCP is supported, false if not. 1799 */ 1800 bool qcom_scm_hdcp_available(void) 1801 { 1802 bool avail; 1803 int ret = qcom_scm_clk_enable(); 1804 1805 if (ret) 1806 return ret; 1807 1808 avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP, 1809 QCOM_SCM_HDCP_INVOKE); 1810 1811 qcom_scm_clk_disable(); 1812 1813 return avail; 1814 } 1815 EXPORT_SYMBOL_GPL(qcom_scm_hdcp_available); 1816 1817 /** 1818 * qcom_scm_hdcp_req() - Send HDCP request. 1819 * @req: HDCP request array 1820 * @req_cnt: HDCP request array count 1821 * @resp: response buffer passed to SCM 1822 * 1823 * Write HDCP register(s) through SCM. 1824 */ 1825 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) 1826 { 1827 int ret; 1828 struct qcom_scm_desc desc = { 1829 .svc = QCOM_SCM_SVC_HDCP, 1830 .cmd = QCOM_SCM_HDCP_INVOKE, 1831 .arginfo = QCOM_SCM_ARGS(10), 1832 .args = { 1833 req[0].addr, 1834 req[0].val, 1835 req[1].addr, 1836 req[1].val, 1837 req[2].addr, 1838 req[2].val, 1839 req[3].addr, 1840 req[3].val, 1841 req[4].addr, 1842 req[4].val 1843 }, 1844 .owner = ARM_SMCCC_OWNER_SIP, 1845 }; 1846 struct qcom_scm_res res; 1847 1848 if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT) 1849 return -ERANGE; 1850 1851 ret = qcom_scm_clk_enable(); 1852 if (ret) 1853 return ret; 1854 1855 ret = qcom_scm_call(__scm->dev, &desc, &res); 1856 *resp = res.result[0]; 1857 1858 qcom_scm_clk_disable(); 1859 1860 return ret; 1861 } 1862 EXPORT_SYMBOL_GPL(qcom_scm_hdcp_req); 1863 1864 int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt) 1865 { 1866 struct qcom_scm_desc desc = { 1867 .svc = QCOM_SCM_SVC_SMMU_PROGRAM, 1868 .cmd = QCOM_SCM_SMMU_PT_FORMAT, 1869 .arginfo = QCOM_SCM_ARGS(3), 1870 .args[0] = sec_id, 1871 .args[1] = ctx_num, 1872 .args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */ 1873 .owner = ARM_SMCCC_OWNER_SIP, 1874 }; 1875 1876 return qcom_scm_call(__scm->dev, &desc, NULL); 1877 } 1878 EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_pt_format); 1879 1880 int qcom_scm_qsmmu500_wait_safe_toggle(bool en) 1881 { 1882 struct qcom_scm_desc desc = { 1883 .svc = QCOM_SCM_SVC_SMMU_PROGRAM, 1884 .cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1, 1885 .arginfo = QCOM_SCM_ARGS(2), 1886 .args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL, 1887 .args[1] = en, 1888 .owner = ARM_SMCCC_OWNER_SIP, 1889 }; 1890 1891 1892 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 1893 } 1894 EXPORT_SYMBOL_GPL(qcom_scm_qsmmu500_wait_safe_toggle); 1895 1896 bool qcom_scm_lmh_dcvsh_available(void) 1897 { 1898 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH); 1899 } 1900 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available); 1901 1902 /* 1903 * This is only supposed to be called once by the TZMem module. It takes the 1904 * SCM struct device as argument and uses it to pass the call as at the time 1905 * the SHM Bridge is enabled, the SCM is not yet fully set up and doesn't 1906 * accept global user calls. Don't try to use the __scm pointer here. 1907 */ 1908 int qcom_scm_shm_bridge_enable(struct device *scm_dev) 1909 { 1910 int ret; 1911 1912 struct qcom_scm_desc desc = { 1913 .svc = QCOM_SCM_SVC_MP, 1914 .cmd = QCOM_SCM_MP_SHM_BRIDGE_ENABLE, 1915 .owner = ARM_SMCCC_OWNER_SIP 1916 }; 1917 1918 struct qcom_scm_res res; 1919 1920 if (!__qcom_scm_is_call_available(scm_dev, QCOM_SCM_SVC_MP, 1921 QCOM_SCM_MP_SHM_BRIDGE_ENABLE)) 1922 return -EOPNOTSUPP; 1923 1924 ret = qcom_scm_call(scm_dev, &desc, &res); 1925 1926 if (ret) 1927 return ret; 1928 1929 if (res.result[0] == SHMBRIDGE_RESULT_NOTSUPP) 1930 return -EOPNOTSUPP; 1931 1932 return res.result[0]; 1933 } 1934 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_enable); 1935 1936 int qcom_scm_shm_bridge_create(u64 pfn_and_ns_perm_flags, 1937 u64 ipfn_and_s_perm_flags, u64 size_and_flags, 1938 u64 ns_vmids, u64 *handle) 1939 { 1940 struct qcom_scm_desc desc = { 1941 .svc = QCOM_SCM_SVC_MP, 1942 .cmd = QCOM_SCM_MP_SHM_BRIDGE_CREATE, 1943 .owner = ARM_SMCCC_OWNER_SIP, 1944 .args[0] = pfn_and_ns_perm_flags, 1945 .args[1] = ipfn_and_s_perm_flags, 1946 .args[2] = size_and_flags, 1947 .args[3] = ns_vmids, 1948 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL, 1949 QCOM_SCM_VAL, QCOM_SCM_VAL), 1950 }; 1951 1952 struct qcom_scm_res res; 1953 int ret; 1954 1955 ret = qcom_scm_call(__scm->dev, &desc, &res); 1956 1957 if (handle && !ret) 1958 *handle = res.result[1]; 1959 1960 return ret ?: res.result[0]; 1961 } 1962 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_create); 1963 1964 int qcom_scm_shm_bridge_delete(u64 handle) 1965 { 1966 struct qcom_scm_desc desc = { 1967 .svc = QCOM_SCM_SVC_MP, 1968 .cmd = QCOM_SCM_MP_SHM_BRIDGE_DELETE, 1969 .owner = ARM_SMCCC_OWNER_SIP, 1970 .args[0] = handle, 1971 .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL), 1972 }; 1973 1974 return qcom_scm_call(__scm->dev, &desc, NULL); 1975 } 1976 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_delete); 1977 1978 int qcom_scm_lmh_profile_change(u32 profile_id) 1979 { 1980 struct qcom_scm_desc desc = { 1981 .svc = QCOM_SCM_SVC_LMH, 1982 .cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE, 1983 .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL), 1984 .args[0] = profile_id, 1985 .owner = ARM_SMCCC_OWNER_SIP, 1986 }; 1987 1988 return qcom_scm_call(__scm->dev, &desc, NULL); 1989 } 1990 EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change); 1991 1992 int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, 1993 u64 limit_node, u32 node_id, u64 version) 1994 { 1995 int ret, payload_size = 5 * sizeof(u32); 1996 1997 struct qcom_scm_desc desc = { 1998 .svc = QCOM_SCM_SVC_LMH, 1999 .cmd = QCOM_SCM_LMH_LIMIT_DCVSH, 2000 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL, 2001 QCOM_SCM_VAL, QCOM_SCM_VAL), 2002 .args[1] = payload_size, 2003 .args[2] = limit_node, 2004 .args[3] = node_id, 2005 .args[4] = version, 2006 .owner = ARM_SMCCC_OWNER_SIP, 2007 }; 2008 2009 u32 *payload_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 2010 payload_size, 2011 GFP_KERNEL); 2012 if (!payload_buf) 2013 return -ENOMEM; 2014 2015 payload_buf[0] = payload_fn; 2016 payload_buf[1] = 0; 2017 payload_buf[2] = payload_reg; 2018 payload_buf[3] = 1; 2019 payload_buf[4] = payload_val; 2020 2021 desc.args[0] = qcom_tzmem_to_phys(payload_buf); 2022 2023 ret = qcom_scm_call(__scm->dev, &desc, NULL); 2024 2025 return ret; 2026 } 2027 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh); 2028 2029 int qcom_scm_gpu_init_regs(u32 gpu_req) 2030 { 2031 struct qcom_scm_desc desc = { 2032 .svc = QCOM_SCM_SVC_GPU, 2033 .cmd = QCOM_SCM_SVC_GPU_INIT_REGS, 2034 .arginfo = QCOM_SCM_ARGS(1), 2035 .args[0] = gpu_req, 2036 .owner = ARM_SMCCC_OWNER_SIP, 2037 }; 2038 2039 return qcom_scm_call(__scm->dev, &desc, NULL); 2040 } 2041 EXPORT_SYMBOL_GPL(qcom_scm_gpu_init_regs); 2042 2043 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr) 2044 { 2045 struct device_node *tcsr; 2046 struct device_node *np = dev->of_node; 2047 struct resource res; 2048 u32 offset; 2049 int ret; 2050 2051 tcsr = of_parse_phandle(np, "qcom,dload-mode", 0); 2052 if (!tcsr) 2053 return 0; 2054 2055 ret = of_address_to_resource(tcsr, 0, &res); 2056 of_node_put(tcsr); 2057 if (ret) 2058 return ret; 2059 2060 ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset); 2061 if (ret < 0) 2062 return ret; 2063 2064 *addr = res.start + offset; 2065 2066 return 0; 2067 } 2068 2069 #ifdef CONFIG_QCOM_QSEECOM 2070 2071 /* Lock for QSEECOM SCM call executions */ 2072 static DEFINE_MUTEX(qcom_scm_qseecom_call_lock); 2073 2074 static int __qcom_scm_qseecom_call(const struct qcom_scm_desc *desc, 2075 struct qcom_scm_qseecom_resp *res) 2076 { 2077 struct qcom_scm_res scm_res = {}; 2078 int status; 2079 2080 /* 2081 * QSEECOM SCM calls should not be executed concurrently. Therefore, we 2082 * require the respective call lock to be held. 2083 */ 2084 lockdep_assert_held(&qcom_scm_qseecom_call_lock); 2085 2086 status = qcom_scm_call(__scm->dev, desc, &scm_res); 2087 2088 res->result = scm_res.result[0]; 2089 res->resp_type = scm_res.result[1]; 2090 res->data = scm_res.result[2]; 2091 2092 if (status) 2093 return status; 2094 2095 return 0; 2096 } 2097 2098 /** 2099 * qcom_scm_qseecom_call() - Perform a QSEECOM SCM call. 2100 * @desc: SCM call descriptor. 2101 * @res: SCM call response (output). 2102 * 2103 * Performs the QSEECOM SCM call described by @desc, returning the response in 2104 * @rsp. 2105 * 2106 * Return: Zero on success, nonzero on failure. 2107 */ 2108 static int qcom_scm_qseecom_call(const struct qcom_scm_desc *desc, 2109 struct qcom_scm_qseecom_resp *res) 2110 { 2111 int status; 2112 2113 /* 2114 * Note: Multiple QSEECOM SCM calls should not be executed same time, 2115 * so lock things here. This needs to be extended to callback/listener 2116 * handling when support for that is implemented. 2117 */ 2118 2119 mutex_lock(&qcom_scm_qseecom_call_lock); 2120 status = __qcom_scm_qseecom_call(desc, res); 2121 mutex_unlock(&qcom_scm_qseecom_call_lock); 2122 2123 dev_dbg(__scm->dev, "%s: owner=%x, svc=%x, cmd=%x, result=%lld, type=%llx, data=%llx\n", 2124 __func__, desc->owner, desc->svc, desc->cmd, res->result, 2125 res->resp_type, res->data); 2126 2127 if (status) { 2128 dev_err(__scm->dev, "qseecom: scm call failed with error %d\n", status); 2129 return status; 2130 } 2131 2132 /* 2133 * TODO: Handle incomplete and blocked calls: 2134 * 2135 * Incomplete and blocked calls are not supported yet. Some devices 2136 * and/or commands require those, some don't. Let's warn about them 2137 * prominently in case someone attempts to try these commands with a 2138 * device/command combination that isn't supported yet. 2139 */ 2140 WARN_ON(res->result == QSEECOM_RESULT_INCOMPLETE); 2141 WARN_ON(res->result == QSEECOM_RESULT_BLOCKED_ON_LISTENER); 2142 2143 return 0; 2144 } 2145 2146 /** 2147 * qcom_scm_qseecom_get_version() - Query the QSEECOM version. 2148 * @version: Pointer where the QSEECOM version will be stored. 2149 * 2150 * Performs the QSEECOM SCM querying the QSEECOM version currently running in 2151 * the TrustZone. 2152 * 2153 * Return: Zero on success, nonzero on failure. 2154 */ 2155 static int qcom_scm_qseecom_get_version(u32 *version) 2156 { 2157 struct qcom_scm_desc desc = {}; 2158 struct qcom_scm_qseecom_resp res = {}; 2159 u32 feature = 10; 2160 int ret; 2161 2162 desc.owner = QSEECOM_TZ_OWNER_SIP; 2163 desc.svc = QSEECOM_TZ_SVC_INFO; 2164 desc.cmd = QSEECOM_TZ_CMD_INFO_VERSION; 2165 desc.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL); 2166 desc.args[0] = feature; 2167 2168 ret = qcom_scm_qseecom_call(&desc, &res); 2169 if (ret) 2170 return ret; 2171 2172 *version = res.result; 2173 return 0; 2174 } 2175 2176 /** 2177 * qcom_scm_qseecom_app_get_id() - Query the app ID for a given QSEE app name. 2178 * @app_name: The name of the app. 2179 * @app_id: The returned app ID. 2180 * 2181 * Query and return the application ID of the SEE app identified by the given 2182 * name. This returned ID is the unique identifier of the app required for 2183 * subsequent communication. 2184 * 2185 * Return: Zero on success, nonzero on failure, -ENOENT if the app has not been 2186 * loaded or could not be found. 2187 */ 2188 int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id) 2189 { 2190 unsigned long name_buf_size = QSEECOM_MAX_APP_NAME_SIZE; 2191 unsigned long app_name_len = strlen(app_name); 2192 struct qcom_scm_desc desc = {}; 2193 struct qcom_scm_qseecom_resp res = {}; 2194 int status; 2195 2196 if (app_name_len >= name_buf_size) 2197 return -EINVAL; 2198 2199 char *name_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 2200 name_buf_size, 2201 GFP_KERNEL); 2202 if (!name_buf) 2203 return -ENOMEM; 2204 2205 memcpy(name_buf, app_name, app_name_len); 2206 2207 desc.owner = QSEECOM_TZ_OWNER_QSEE_OS; 2208 desc.svc = QSEECOM_TZ_SVC_APP_MGR; 2209 desc.cmd = QSEECOM_TZ_CMD_APP_LOOKUP; 2210 desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL); 2211 desc.args[0] = qcom_tzmem_to_phys(name_buf); 2212 desc.args[1] = app_name_len; 2213 2214 status = qcom_scm_qseecom_call(&desc, &res); 2215 2216 if (status) 2217 return status; 2218 2219 if (res.result == QSEECOM_RESULT_FAILURE) 2220 return -ENOENT; 2221 2222 if (res.result != QSEECOM_RESULT_SUCCESS) 2223 return -EINVAL; 2224 2225 if (res.resp_type != QSEECOM_SCM_RES_APP_ID) 2226 return -EINVAL; 2227 2228 *app_id = res.data; 2229 return 0; 2230 } 2231 EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id); 2232 2233 /** 2234 * qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app. 2235 * @app_id: The ID of the target app. 2236 * @req: Request buffer sent to the app (must be TZ memory) 2237 * @req_size: Size of the request buffer. 2238 * @rsp: Response buffer, written to by the app (must be TZ memory) 2239 * @rsp_size: Size of the response buffer. 2240 * 2241 * Sends a request to the QSEE app associated with the given ID and read back 2242 * its response. The caller must provide two DMA memory regions, one for the 2243 * request and one for the response, and fill out the @req region with the 2244 * respective (app-specific) request data. The QSEE app reads this and returns 2245 * its response in the @rsp region. 2246 * 2247 * Return: Zero on success, nonzero on failure. 2248 */ 2249 int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, 2250 void *rsp, size_t rsp_size) 2251 { 2252 struct qcom_scm_qseecom_resp res = {}; 2253 struct qcom_scm_desc desc = {}; 2254 phys_addr_t req_phys; 2255 phys_addr_t rsp_phys; 2256 int status; 2257 2258 req_phys = qcom_tzmem_to_phys(req); 2259 rsp_phys = qcom_tzmem_to_phys(rsp); 2260 2261 desc.owner = QSEECOM_TZ_OWNER_TZ_APPS; 2262 desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER; 2263 desc.cmd = QSEECOM_TZ_CMD_APP_SEND; 2264 desc.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, 2265 QCOM_SCM_RW, QCOM_SCM_VAL, 2266 QCOM_SCM_RW, QCOM_SCM_VAL); 2267 desc.args[0] = app_id; 2268 desc.args[1] = req_phys; 2269 desc.args[2] = req_size; 2270 desc.args[3] = rsp_phys; 2271 desc.args[4] = rsp_size; 2272 2273 status = qcom_scm_qseecom_call(&desc, &res); 2274 2275 if (status) 2276 return status; 2277 2278 if (res.result != QSEECOM_RESULT_SUCCESS) 2279 return -EIO; 2280 2281 return 0; 2282 } 2283 EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send); 2284 2285 /* 2286 * We do not yet support re-entrant calls via the qseecom interface. To prevent 2287 + any potential issues with this, only allow validated machines for now. 2288 */ 2289 static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = { 2290 { .compatible = "asus,vivobook-s15" }, 2291 { .compatible = "asus,vivobook-s15-x1p4" }, 2292 { .compatible = "asus,zenbook-a14-ux3407qa" }, 2293 { .compatible = "asus,zenbook-a14-ux3407ra" }, 2294 { .compatible = "dell,inspiron-14-plus-7441" }, 2295 { .compatible = "dell,latitude-7455" }, 2296 { .compatible = "dell,xps13-9345" }, 2297 { .compatible = "ecs,liva-qc710" }, 2298 { .compatible = "hp,elitebook-ultra-g1q" }, 2299 { .compatible = "hp,omnibook-x14" }, 2300 { .compatible = "huawei,gaokun3" }, 2301 { .compatible = "lenovo,flex-5g" }, 2302 { .compatible = "lenovo,ideacentre-mini-01q8x10" }, 2303 { .compatible = "lenovo,thinkbook-16" }, 2304 { .compatible = "lenovo,thinkpad-t14s" }, 2305 { .compatible = "lenovo,thinkpad-x13s", }, 2306 { .compatible = "lenovo,yoga-slim7x" }, 2307 { .compatible = "medion,sprchrgd14s1" }, 2308 { .compatible = "microsoft,arcata", }, 2309 { .compatible = "microsoft,blackrock" }, 2310 { .compatible = "microsoft,denali", }, 2311 { .compatible = "microsoft,romulus13", }, 2312 { .compatible = "microsoft,romulus15", }, 2313 { .compatible = "qcom,glymur-crd" }, 2314 { .compatible = "qcom,hamoa-iot-evk" }, 2315 { .compatible = "qcom,mahua-crd" }, 2316 { .compatible = "qcom,purwa-iot-evk" }, 2317 { .compatible = "qcom,sc8180x-primus" }, 2318 { .compatible = "qcom,x1e001de-devkit" }, 2319 { .compatible = "qcom,x1e80100-crd" }, 2320 { .compatible = "qcom,x1e80100-qcp" }, 2321 { .compatible = "qcom,x1p42100-crd" }, 2322 { } 2323 }; 2324 2325 static void qcom_scm_qseecom_free(void *data) 2326 { 2327 struct platform_device *qseecom_dev = data; 2328 2329 platform_device_del(qseecom_dev); 2330 platform_device_put(qseecom_dev); 2331 } 2332 2333 static int qcom_scm_qseecom_init(struct qcom_scm *scm) 2334 { 2335 struct platform_device *qseecom_dev; 2336 u32 version; 2337 int ret; 2338 2339 /* 2340 * Note: We do two steps of validation here: First, we try to query the 2341 * QSEECOM version as a check to see if the interface exists on this 2342 * device. Second, we check against known good devices due to current 2343 * driver limitations (see comment in qcom_scm_qseecom_allowlist). 2344 * 2345 * Note that we deliberately do the machine check after the version 2346 * check so that we can log potentially supported devices. This should 2347 * be safe as downstream sources indicate that the version query is 2348 * neither blocking nor reentrant. 2349 */ 2350 ret = qcom_scm_qseecom_get_version(&version); 2351 if (ret) 2352 return 0; 2353 2354 dev_info(scm->dev, "qseecom: found qseecom with version 0x%x\n", version); 2355 2356 if (!of_machine_device_match(qcom_scm_qseecom_allowlist)) { 2357 dev_info(scm->dev, "qseecom: untested machine, skipping\n"); 2358 return 0; 2359 } 2360 2361 /* 2362 * Set up QSEECOM interface device. All application clients will be 2363 * set up and managed by the corresponding driver for it. 2364 */ 2365 qseecom_dev = platform_device_alloc("qcom_qseecom", -1); 2366 if (!qseecom_dev) 2367 return -ENOMEM; 2368 2369 qseecom_dev->dev.parent = scm->dev; 2370 2371 ret = platform_device_add(qseecom_dev); 2372 if (ret) { 2373 platform_device_put(qseecom_dev); 2374 return ret; 2375 } 2376 2377 return devm_add_action_or_reset(scm->dev, qcom_scm_qseecom_free, qseecom_dev); 2378 } 2379 2380 #else /* CONFIG_QCOM_QSEECOM */ 2381 2382 static int qcom_scm_qseecom_init(struct qcom_scm *scm) 2383 { 2384 return 0; 2385 } 2386 2387 #endif /* CONFIG_QCOM_QSEECOM */ 2388 2389 /** 2390 * qcom_scm_qtee_invoke_smc() - Invoke a QTEE object. 2391 * @inbuf: start address of memory area used for inbound buffer. 2392 * @inbuf_size: size of the memory area used for inbound buffer. 2393 * @outbuf: start address of memory area used for outbound buffer. 2394 * @outbuf_size: size of the memory area used for outbound buffer. 2395 * @result: result of QTEE object invocation. 2396 * @response_type: response type returned by QTEE. 2397 * 2398 * @response_type determines how the contents of @inbuf and @outbuf 2399 * should be processed. 2400 * 2401 * Return: On success, return 0 or <0 on failure. 2402 */ 2403 int qcom_scm_qtee_invoke_smc(phys_addr_t inbuf, size_t inbuf_size, 2404 phys_addr_t outbuf, size_t outbuf_size, 2405 u64 *result, u64 *response_type) 2406 { 2407 struct qcom_scm_desc desc = { 2408 .svc = QCOM_SCM_SVC_SMCINVOKE, 2409 .cmd = QCOM_SCM_SMCINVOKE_INVOKE, 2410 .owner = ARM_SMCCC_OWNER_TRUSTED_OS, 2411 .args[0] = inbuf, 2412 .args[1] = inbuf_size, 2413 .args[2] = outbuf, 2414 .args[3] = outbuf_size, 2415 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RW, QCOM_SCM_VAL, 2416 QCOM_SCM_RW, QCOM_SCM_VAL), 2417 }; 2418 struct qcom_scm_res res; 2419 int ret; 2420 2421 ret = qcom_scm_call(__scm->dev, &desc, &res); 2422 if (ret) 2423 return ret; 2424 2425 if (response_type) 2426 *response_type = res.result[0]; 2427 2428 if (result) 2429 *result = res.result[1]; 2430 2431 return 0; 2432 } 2433 EXPORT_SYMBOL(qcom_scm_qtee_invoke_smc); 2434 2435 /** 2436 * qcom_scm_qtee_callback_response() - Submit response for callback request. 2437 * @buf: start address of memory area used for outbound buffer. 2438 * @buf_size: size of the memory area used for outbound buffer. 2439 * @result: Result of QTEE object invocation. 2440 * @response_type: Response type returned by QTEE. 2441 * 2442 * @response_type determines how the contents of @buf should be processed. 2443 * 2444 * Return: On success, return 0 or <0 on failure. 2445 */ 2446 int qcom_scm_qtee_callback_response(phys_addr_t buf, size_t buf_size, 2447 u64 *result, u64 *response_type) 2448 { 2449 struct qcom_scm_desc desc = { 2450 .svc = QCOM_SCM_SVC_SMCINVOKE, 2451 .cmd = QCOM_SCM_SMCINVOKE_CB_RSP, 2452 .owner = ARM_SMCCC_OWNER_TRUSTED_OS, 2453 .args[0] = buf, 2454 .args[1] = buf_size, 2455 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL), 2456 }; 2457 struct qcom_scm_res res; 2458 int ret; 2459 2460 ret = qcom_scm_call(__scm->dev, &desc, &res); 2461 if (ret) 2462 return ret; 2463 2464 if (response_type) 2465 *response_type = res.result[0]; 2466 2467 if (result) 2468 *result = res.result[1]; 2469 2470 return 0; 2471 } 2472 EXPORT_SYMBOL(qcom_scm_qtee_callback_response); 2473 2474 static void qcom_scm_gunyah_wdt_free(void *data) 2475 { 2476 struct platform_device *gunyah_wdt_dev = data; 2477 2478 platform_device_unregister(gunyah_wdt_dev); 2479 } 2480 2481 static void qcom_scm_gunyah_wdt_init(struct qcom_scm *scm) 2482 { 2483 struct platform_device *gunyah_wdt_dev; 2484 struct device_node *np; 2485 bool of_wdt_available; 2486 int i; 2487 static const uuid_t gunyah_uuid = UUID_INIT(0xc1d58fcd, 0xa453, 0x5fdb, 2488 0x92, 0x65, 0xce, 0x36, 2489 0x67, 0x3d, 0x5f, 0x14); 2490 static const char * const of_wdt_compatible[] = { 2491 "qcom,kpss-wdt", 2492 "arm,sbsa-gwdt", 2493 }; 2494 2495 /* Bail out if we are not running under Gunyah */ 2496 if (!IS_ENABLED(CONFIG_HAVE_ARM_SMCCC_DISCOVERY) || 2497 !arm_smccc_hypervisor_has_uuid(&gunyah_uuid)) 2498 return; 2499 2500 /* 2501 * Gunyah emulates either of Qualcomm watchdog or ARM SBSA watchdog on 2502 * newer platforms. Bail out if we find them in the devicetree. 2503 */ 2504 for (i = 0; i < ARRAY_SIZE(of_wdt_compatible); i++) { 2505 np = of_find_compatible_node(NULL, NULL, of_wdt_compatible[i]); 2506 of_wdt_available = of_device_is_available(np); 2507 of_node_put(np); 2508 if (of_wdt_available) 2509 return; 2510 } 2511 2512 gunyah_wdt_dev = platform_device_register_simple("gunyah-wdt", -1, 2513 NULL, 0); 2514 if (IS_ERR(gunyah_wdt_dev)) { 2515 dev_err(scm->dev, "Failed to register Gunyah watchdog device: %ld\n", 2516 PTR_ERR(gunyah_wdt_dev)); 2517 return; 2518 } 2519 2520 devm_add_action_or_reset(scm->dev, qcom_scm_gunyah_wdt_free, 2521 gunyah_wdt_dev); 2522 } 2523 2524 static void qcom_scm_qtee_free(void *data) 2525 { 2526 struct platform_device *qtee_dev = data; 2527 2528 platform_device_unregister(qtee_dev); 2529 } 2530 2531 static void qcom_scm_qtee_init(struct qcom_scm *scm) 2532 { 2533 struct platform_device *qtee_dev; 2534 u64 result, response_type; 2535 int ret; 2536 2537 /* 2538 * Probe for smcinvoke support. This will fail due to invalid buffers, 2539 * but first, it checks whether the call is supported in QTEE syscall 2540 * handler. If it is not supported, -EIO is returned. 2541 */ 2542 ret = qcom_scm_qtee_invoke_smc(0, 0, 0, 0, &result, &response_type); 2543 if (ret == -EIO) 2544 return; 2545 2546 /* Setup QTEE interface device. */ 2547 qtee_dev = platform_device_register_data(scm->dev, "qcomtee", 2548 PLATFORM_DEVID_NONE, NULL, 0); 2549 if (IS_ERR(qtee_dev)) 2550 return; 2551 2552 devm_add_action_or_reset(scm->dev, qcom_scm_qtee_free, qtee_dev); 2553 } 2554 2555 /** 2556 * qcom_scm_is_available() - Checks if SCM is available 2557 */ 2558 bool qcom_scm_is_available(void) 2559 { 2560 /* Paired with smp_store_release() in qcom_scm_probe */ 2561 return !!smp_load_acquire(&__scm); 2562 } 2563 EXPORT_SYMBOL_GPL(qcom_scm_is_available); 2564 2565 static int qcom_scm_fill_irq_fwspec_params(struct irq_fwspec *fwspec, u32 hwirq) 2566 { 2567 if (hwirq >= GIC_SPI_BASE && hwirq <= GIC_MAX_SPI) { 2568 fwspec->param[0] = GIC_SPI; 2569 fwspec->param[1] = hwirq - GIC_SPI_BASE; 2570 } else if (hwirq >= GIC_ESPI_BASE && hwirq <= GIC_MAX_ESPI) { 2571 fwspec->param[0] = GIC_ESPI; 2572 fwspec->param[1] = hwirq - GIC_ESPI_BASE; 2573 } else { 2574 WARN(1, "Unexpected hwirq: %d\n", hwirq); 2575 return -ENXIO; 2576 } 2577 2578 fwspec->param[2] = IRQ_TYPE_EDGE_RISING; 2579 fwspec->param_count = 3; 2580 2581 return 0; 2582 } 2583 2584 static int qcom_scm_query_waitq_count(struct qcom_scm *scm) 2585 { 2586 struct qcom_scm_desc desc = { 2587 .svc = QCOM_SCM_SVC_WAITQ, 2588 .cmd = QCOM_SCM_WAITQ_GET_INFO, 2589 .owner = ARM_SMCCC_OWNER_SIP 2590 }; 2591 struct qcom_scm_res res; 2592 int ret; 2593 2594 ret = qcom_scm_call_atomic(scm->dev, &desc, &res); 2595 if (ret) 2596 return ret; 2597 2598 return res.result[0] & GENMASK(7, 0); 2599 } 2600 2601 static int qcom_scm_get_waitq_irq(struct qcom_scm *scm) 2602 { 2603 struct qcom_scm_desc desc = { 2604 .svc = QCOM_SCM_SVC_WAITQ, 2605 .cmd = QCOM_SCM_WAITQ_GET_INFO, 2606 .owner = ARM_SMCCC_OWNER_SIP 2607 }; 2608 struct device_node *parent_irq_node; 2609 struct irq_fwspec fwspec; 2610 struct qcom_scm_res res; 2611 u32 hwirq; 2612 int ret; 2613 2614 ret = qcom_scm_call_atomic(scm->dev, &desc, &res); 2615 if (ret) 2616 return ret; 2617 2618 hwirq = res.result[1] & GENMASK(15, 0); 2619 ret = qcom_scm_fill_irq_fwspec_params(&fwspec, hwirq); 2620 if (ret) 2621 return ret; 2622 2623 parent_irq_node = of_irq_find_parent(scm->dev->of_node); 2624 if (!parent_irq_node) 2625 return -ENODEV; 2626 2627 fwspec.fwnode = of_fwnode_handle(parent_irq_node); 2628 2629 return irq_create_fwspec_mapping(&fwspec); 2630 } 2631 2632 static struct completion *qcom_scm_get_completion(u32 wq_ctx) 2633 { 2634 struct completion *wq; 2635 2636 if (WARN_ON_ONCE(wq_ctx >= __scm->wq_cnt)) 2637 return ERR_PTR(-EINVAL); 2638 2639 wq = &__scm->waitq_comps[wq_ctx]; 2640 2641 return wq; 2642 } 2643 2644 int qcom_scm_wait_for_wq_completion(u32 wq_ctx) 2645 { 2646 struct completion *wq; 2647 2648 wq = qcom_scm_get_completion(wq_ctx); 2649 if (IS_ERR(wq)) 2650 return PTR_ERR(wq); 2651 2652 wait_for_completion_state(wq, TASK_IDLE); 2653 2654 return 0; 2655 } 2656 2657 static int qcom_scm_waitq_wakeup(unsigned int wq_ctx) 2658 { 2659 struct completion *wq; 2660 2661 wq = qcom_scm_get_completion(wq_ctx); 2662 if (IS_ERR(wq)) 2663 return PTR_ERR(wq); 2664 2665 complete(wq); 2666 2667 return 0; 2668 } 2669 2670 static irqreturn_t qcom_scm_irq_handler(int irq, void *data) 2671 { 2672 int ret; 2673 struct qcom_scm *scm = data; 2674 u32 wq_ctx, flags, more_pending = 0; 2675 2676 do { 2677 ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending); 2678 if (ret) { 2679 dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret); 2680 goto out; 2681 } 2682 2683 if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE) { 2684 dev_err(scm->dev, "Invalid flags received for wq_ctx: %u\n", flags); 2685 goto out; 2686 } 2687 2688 ret = qcom_scm_waitq_wakeup(wq_ctx); 2689 if (ret) 2690 goto out; 2691 } while (more_pending); 2692 2693 out: 2694 return IRQ_HANDLED; 2695 } 2696 2697 static int get_download_mode(char *buffer, const struct kernel_param *kp) 2698 { 2699 if (download_mode >= ARRAY_SIZE(download_mode_name)) 2700 return sysfs_emit(buffer, "unknown mode\n"); 2701 2702 return sysfs_emit(buffer, "%s\n", download_mode_name[download_mode]); 2703 } 2704 2705 static int set_download_mode(const char *val, const struct kernel_param *kp) 2706 { 2707 bool tmp; 2708 int ret; 2709 2710 ret = sysfs_match_string(download_mode_name, val); 2711 if (ret < 0) { 2712 ret = kstrtobool(val, &tmp); 2713 if (ret < 0) { 2714 pr_err("qcom_scm: err: %d\n", ret); 2715 return ret; 2716 } 2717 2718 ret = tmp ? 1 : 0; 2719 } 2720 2721 download_mode = ret; 2722 if (__scm) 2723 qcom_scm_set_download_mode(download_mode); 2724 2725 return 0; 2726 } 2727 2728 static const struct kernel_param_ops download_mode_param_ops = { 2729 .get = get_download_mode, 2730 .set = set_download_mode, 2731 }; 2732 2733 module_param_cb(download_mode, &download_mode_param_ops, NULL, 0644); 2734 MODULE_PARM_DESC(download_mode, "download mode: off/0/N for no dump mode, full/on/1/Y for full dump mode, mini for minidump mode and full,mini for both full and minidump mode together are acceptable values"); 2735 2736 static int qcom_scm_probe(struct platform_device *pdev) 2737 { 2738 struct qcom_tzmem_pool_config pool_config; 2739 struct qcom_scm *scm; 2740 int irq, ret; 2741 int i; 2742 2743 scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL); 2744 if (!scm) 2745 return -ENOMEM; 2746 2747 scm->dev = &pdev->dev; 2748 ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr); 2749 if (ret < 0) 2750 return ret; 2751 2752 mutex_init(&scm->scm_bw_lock); 2753 2754 scm->path = devm_of_icc_get(&pdev->dev, NULL); 2755 if (IS_ERR(scm->path)) 2756 return dev_err_probe(&pdev->dev, PTR_ERR(scm->path), 2757 "failed to acquire interconnect path\n"); 2758 2759 scm->core_clk = devm_clk_get_optional(&pdev->dev, "core"); 2760 if (IS_ERR(scm->core_clk)) 2761 return PTR_ERR(scm->core_clk); 2762 2763 scm->iface_clk = devm_clk_get_optional(&pdev->dev, "iface"); 2764 if (IS_ERR(scm->iface_clk)) 2765 return PTR_ERR(scm->iface_clk); 2766 2767 scm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus"); 2768 if (IS_ERR(scm->bus_clk)) 2769 return PTR_ERR(scm->bus_clk); 2770 2771 scm->reset.ops = &qcom_scm_pas_reset_ops; 2772 scm->reset.nr_resets = 1; 2773 scm->reset.of_node = pdev->dev.of_node; 2774 ret = devm_reset_controller_register(&pdev->dev, &scm->reset); 2775 if (ret) 2776 return ret; 2777 2778 /* vote for max clk rate for highest performance */ 2779 ret = clk_set_rate(scm->core_clk, INT_MAX); 2780 if (ret) 2781 return ret; 2782 2783 ret = of_reserved_mem_device_init(scm->dev); 2784 if (ret && ret != -ENODEV) 2785 return dev_err_probe(scm->dev, ret, 2786 "Failed to setup the reserved memory region for TZ mem\n"); 2787 2788 ret = qcom_tzmem_enable(scm->dev); 2789 if (ret) 2790 return dev_err_probe(scm->dev, ret, 2791 "Failed to enable the TrustZone memory allocator\n"); 2792 2793 memset(&pool_config, 0, sizeof(pool_config)); 2794 pool_config.initial_size = 0; 2795 pool_config.policy = QCOM_TZMEM_POLICY_ON_DEMAND; 2796 pool_config.max_size = SZ_256K; 2797 2798 scm->mempool = devm_qcom_tzmem_pool_new(scm->dev, &pool_config); 2799 if (IS_ERR(scm->mempool)) 2800 return dev_err_probe(scm->dev, PTR_ERR(scm->mempool), 2801 "Failed to create the SCM memory pool\n"); 2802 2803 ret = qcom_scm_query_waitq_count(scm); 2804 scm->wq_cnt = ret < 0 ? QCOM_SCM_DEFAULT_WAITQ_COUNT : ret; 2805 scm->waitq_comps = devm_kcalloc(&pdev->dev, scm->wq_cnt, sizeof(*scm->waitq_comps), 2806 GFP_KERNEL); 2807 if (!scm->waitq_comps) 2808 return -ENOMEM; 2809 2810 for (i = 0; i < scm->wq_cnt; i++) 2811 init_completion(&scm->waitq_comps[i]); 2812 2813 irq = qcom_scm_get_waitq_irq(scm); 2814 if (irq < 0) 2815 irq = platform_get_irq_optional(pdev, 0); 2816 2817 if (irq < 0) { 2818 if (irq != -ENXIO) 2819 return irq; 2820 } else { 2821 ret = devm_request_threaded_irq(scm->dev, irq, NULL, qcom_scm_irq_handler, 2822 IRQF_ONESHOT, "qcom-scm", scm); 2823 if (ret < 0) 2824 return dev_err_probe(scm->dev, ret, 2825 "Failed to request qcom-scm irq\n"); 2826 } 2827 2828 /* 2829 * Paired with smp_load_acquire() in qcom_scm_is_available(). 2830 * 2831 * This marks the SCM API as ready to accept user calls and can only 2832 * be called after the TrustZone memory pool is initialized and the 2833 * waitqueue interrupt requested. 2834 */ 2835 smp_store_release(&__scm, scm); 2836 2837 __get_convention(); 2838 2839 /* 2840 * If "download mode" is requested, from this point on warmboot 2841 * will cause the boot stages to enter download mode, unless 2842 * disabled below by a clean shutdown/reboot. 2843 */ 2844 qcom_scm_set_download_mode(download_mode); 2845 2846 /* 2847 * Disable SDI if indicated by DT that it is enabled by default. 2848 */ 2849 if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled") || !download_mode) 2850 qcom_scm_disable_sdi(); 2851 2852 /* 2853 * Initialize the QSEECOM interface. 2854 * 2855 * Note: QSEECOM is fairly self-contained and this only adds the 2856 * interface device (the driver of which does most of the heavy 2857 * lifting). So any errors returned here should be either -ENOMEM or 2858 * -EINVAL (with the latter only in case there's a bug in our code). 2859 * This means that there is no need to bring down the whole SCM driver. 2860 * Just log the error instead and let SCM live. 2861 */ 2862 ret = qcom_scm_qseecom_init(scm); 2863 WARN(ret < 0, "failed to initialize qseecom: %d\n", ret); 2864 2865 /* Initialize the QTEE object interface. */ 2866 qcom_scm_qtee_init(scm); 2867 2868 /* Initialize the Gunyah watchdog platform device. */ 2869 qcom_scm_gunyah_wdt_init(scm); 2870 2871 return 0; 2872 } 2873 2874 static void qcom_scm_shutdown(struct platform_device *pdev) 2875 { 2876 /* Clean shutdown, disable download mode to allow normal restart */ 2877 qcom_scm_set_download_mode(QCOM_DLOAD_NODUMP); 2878 } 2879 2880 static const struct of_device_id qcom_scm_dt_match[] = { 2881 { .compatible = "qcom,scm" }, 2882 2883 /* Legacy entries kept for backwards compatibility */ 2884 { .compatible = "qcom,scm-apq8064" }, 2885 { .compatible = "qcom,scm-apq8084" }, 2886 { .compatible = "qcom,scm-ipq4019" }, 2887 { .compatible = "qcom,scm-msm8953" }, 2888 { .compatible = "qcom,scm-msm8974" }, 2889 { .compatible = "qcom,scm-msm8996" }, 2890 {} 2891 }; 2892 MODULE_DEVICE_TABLE(of, qcom_scm_dt_match); 2893 2894 static struct platform_driver qcom_scm_driver = { 2895 .driver = { 2896 .name = "qcom_scm", 2897 .of_match_table = qcom_scm_dt_match, 2898 .suppress_bind_attrs = true, 2899 }, 2900 .probe = qcom_scm_probe, 2901 .shutdown = qcom_scm_shutdown, 2902 }; 2903 2904 static int __init qcom_scm_init(void) 2905 { 2906 return platform_driver_register(&qcom_scm_driver); 2907 } 2908 subsys_initcall(qcom_scm_init); 2909 2910 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver"); 2911 MODULE_LICENSE("GPL v2"); 2912