1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * 4 * Procedures for interfacing to the RTAS on CHRP machines. 5 * 6 * Peter Bergner, IBM March 2001. 7 * Copyright (C) 2001 IBM. 8 */ 9 10 #define pr_fmt(fmt) "rtas: " fmt 11 12 #include <linux/bsearch.h> 13 #include <linux/capability.h> 14 #include <linux/delay.h> 15 #include <linux/export.h> 16 #include <linux/init.h> 17 #include <linux/kconfig.h> 18 #include <linux/kernel.h> 19 #include <linux/lockdep.h> 20 #include <linux/memblock.h> 21 #include <linux/mutex.h> 22 #include <linux/nospec.h> 23 #include <linux/of.h> 24 #include <linux/of_fdt.h> 25 #include <linux/reboot.h> 26 #include <linux/sched.h> 27 #include <linux/security.h> 28 #include <linux/slab.h> 29 #include <linux/spinlock.h> 30 #include <linux/stdarg.h> 31 #include <linux/syscalls.h> 32 #include <linux/types.h> 33 #include <linux/uaccess.h> 34 #include <linux/xarray.h> 35 36 #include <asm/delay.h> 37 #include <asm/firmware.h> 38 #include <asm/interrupt.h> 39 #include <asm/machdep.h> 40 #include <asm/mmu.h> 41 #include <asm/page.h> 42 #include <asm/rtas-work-area.h> 43 #include <asm/rtas.h> 44 #include <asm/time.h> 45 #include <asm/trace.h> 46 #include <asm/udbg.h> 47 48 struct rtas_filter { 49 /* Indexes into the args buffer, -1 if not used */ 50 const int buf_idx1; 51 const int size_idx1; 52 const int buf_idx2; 53 const int size_idx2; 54 /* 55 * Assumed buffer size per the spec if the function does not 56 * have a size parameter, e.g. ibm,errinjct. 0 if unused. 57 */ 58 const int fixed_size; 59 }; 60 61 /** 62 * struct rtas_function - Descriptor for RTAS functions. 63 * 64 * @token: Value of @name if it exists under the /rtas node. 65 * @name: Function name. 66 * @filter: If non-NULL, invoking this function via the rtas syscall is 67 * generally allowed, and @filter describes constraints on the 68 * arguments. See also @banned_for_syscall_on_le. 69 * @banned_for_syscall_on_le: Set when call via sys_rtas is generally allowed 70 * but specifically restricted on ppc64le. Such 71 * functions are believed to have no users on 72 * ppc64le, and we want to keep it that way. It does 73 * not make sense for this to be set when @filter 74 * is NULL. 75 * @lock: Pointer to an optional dedicated per-function mutex. This 76 * should be set for functions that require multiple calls in 77 * sequence to complete a single operation, and such sequences 78 * will disrupt each other if allowed to interleave. Users of 79 * this function are required to hold the associated lock for 80 * the duration of the call sequence. Add an explanatory 81 * comment to the function table entry if setting this member. 82 */ 83 struct rtas_function { 84 s32 token; 85 const bool banned_for_syscall_on_le:1; 86 const char * const name; 87 const struct rtas_filter *filter; 88 struct mutex *lock; 89 }; 90 91 /* 92 * Per-function locks for sequence-based RTAS functions. 93 */ 94 static DEFINE_MUTEX(rtas_ibm_activate_firmware_lock); 95 static DEFINE_MUTEX(rtas_ibm_get_dynamic_sensor_state_lock); 96 static DEFINE_MUTEX(rtas_ibm_get_indices_lock); 97 static DEFINE_MUTEX(rtas_ibm_lpar_perftools_lock); 98 static DEFINE_MUTEX(rtas_ibm_physical_attestation_lock); 99 static DEFINE_MUTEX(rtas_ibm_set_dynamic_indicator_lock); 100 DEFINE_MUTEX(rtas_ibm_get_vpd_lock); 101 102 static struct rtas_function rtas_function_table[] __ro_after_init = { 103 [RTAS_FNIDX__CHECK_EXCEPTION] = { 104 .name = "check-exception", 105 }, 106 [RTAS_FNIDX__DISPLAY_CHARACTER] = { 107 .name = "display-character", 108 .filter = &(const struct rtas_filter) { 109 .buf_idx1 = -1, .size_idx1 = -1, 110 .buf_idx2 = -1, .size_idx2 = -1, 111 }, 112 }, 113 [RTAS_FNIDX__EVENT_SCAN] = { 114 .name = "event-scan", 115 }, 116 [RTAS_FNIDX__FREEZE_TIME_BASE] = { 117 .name = "freeze-time-base", 118 }, 119 [RTAS_FNIDX__GET_POWER_LEVEL] = { 120 .name = "get-power-level", 121 .filter = &(const struct rtas_filter) { 122 .buf_idx1 = -1, .size_idx1 = -1, 123 .buf_idx2 = -1, .size_idx2 = -1, 124 }, 125 }, 126 [RTAS_FNIDX__GET_SENSOR_STATE] = { 127 .name = "get-sensor-state", 128 .filter = &(const struct rtas_filter) { 129 .buf_idx1 = -1, .size_idx1 = -1, 130 .buf_idx2 = -1, .size_idx2 = -1, 131 }, 132 }, 133 [RTAS_FNIDX__GET_TERM_CHAR] = { 134 .name = "get-term-char", 135 }, 136 [RTAS_FNIDX__GET_TIME_OF_DAY] = { 137 .name = "get-time-of-day", 138 .filter = &(const struct rtas_filter) { 139 .buf_idx1 = -1, .size_idx1 = -1, 140 .buf_idx2 = -1, .size_idx2 = -1, 141 }, 142 }, 143 [RTAS_FNIDX__IBM_ACTIVATE_FIRMWARE] = { 144 .name = "ibm,activate-firmware", 145 .filter = &(const struct rtas_filter) { 146 .buf_idx1 = -1, .size_idx1 = -1, 147 .buf_idx2 = -1, .size_idx2 = -1, 148 }, 149 /* 150 * PAPR+ as of v2.13 doesn't explicitly impose any 151 * restriction, but this typically requires multiple 152 * calls before success, and there's no reason to 153 * allow sequences to interleave. 154 */ 155 .lock = &rtas_ibm_activate_firmware_lock, 156 }, 157 [RTAS_FNIDX__IBM_CBE_START_PTCAL] = { 158 .name = "ibm,cbe-start-ptcal", 159 }, 160 [RTAS_FNIDX__IBM_CBE_STOP_PTCAL] = { 161 .name = "ibm,cbe-stop-ptcal", 162 }, 163 [RTAS_FNIDX__IBM_CHANGE_MSI] = { 164 .name = "ibm,change-msi", 165 }, 166 [RTAS_FNIDX__IBM_CLOSE_ERRINJCT] = { 167 .name = "ibm,close-errinjct", 168 .filter = &(const struct rtas_filter) { 169 .buf_idx1 = -1, .size_idx1 = -1, 170 .buf_idx2 = -1, .size_idx2 = -1, 171 }, 172 }, 173 [RTAS_FNIDX__IBM_CONFIGURE_BRIDGE] = { 174 .name = "ibm,configure-bridge", 175 }, 176 [RTAS_FNIDX__IBM_CONFIGURE_CONNECTOR] = { 177 .name = "ibm,configure-connector", 178 .filter = &(const struct rtas_filter) { 179 .buf_idx1 = 0, .size_idx1 = -1, 180 .buf_idx2 = 1, .size_idx2 = -1, 181 .fixed_size = 4096, 182 }, 183 }, 184 [RTAS_FNIDX__IBM_CONFIGURE_KERNEL_DUMP] = { 185 .name = "ibm,configure-kernel-dump", 186 }, 187 [RTAS_FNIDX__IBM_CONFIGURE_PE] = { 188 .name = "ibm,configure-pe", 189 }, 190 [RTAS_FNIDX__IBM_CREATE_PE_DMA_WINDOW] = { 191 .name = "ibm,create-pe-dma-window", 192 }, 193 [RTAS_FNIDX__IBM_DISPLAY_MESSAGE] = { 194 .name = "ibm,display-message", 195 .filter = &(const struct rtas_filter) { 196 .buf_idx1 = 0, .size_idx1 = -1, 197 .buf_idx2 = -1, .size_idx2 = -1, 198 }, 199 }, 200 [RTAS_FNIDX__IBM_ERRINJCT] = { 201 .name = "ibm,errinjct", 202 .filter = &(const struct rtas_filter) { 203 .buf_idx1 = 2, .size_idx1 = -1, 204 .buf_idx2 = -1, .size_idx2 = -1, 205 .fixed_size = 1024, 206 }, 207 }, 208 [RTAS_FNIDX__IBM_EXTI2C] = { 209 .name = "ibm,exti2c", 210 }, 211 [RTAS_FNIDX__IBM_GET_CONFIG_ADDR_INFO] = { 212 .name = "ibm,get-config-addr-info", 213 }, 214 [RTAS_FNIDX__IBM_GET_CONFIG_ADDR_INFO2] = { 215 .name = "ibm,get-config-addr-info2", 216 .filter = &(const struct rtas_filter) { 217 .buf_idx1 = -1, .size_idx1 = -1, 218 .buf_idx2 = -1, .size_idx2 = -1, 219 }, 220 }, 221 [RTAS_FNIDX__IBM_GET_DYNAMIC_SENSOR_STATE] = { 222 .name = "ibm,get-dynamic-sensor-state", 223 .filter = &(const struct rtas_filter) { 224 .buf_idx1 = 1, .size_idx1 = -1, 225 .buf_idx2 = -1, .size_idx2 = -1, 226 }, 227 /* 228 * PAPR+ v2.13 R1–7.3.19–3 is explicit that the OS 229 * must not call ibm,get-dynamic-sensor-state with 230 * different inputs until a non-retry status has been 231 * returned. 232 */ 233 .lock = &rtas_ibm_get_dynamic_sensor_state_lock, 234 }, 235 [RTAS_FNIDX__IBM_GET_INDICES] = { 236 .name = "ibm,get-indices", 237 .filter = &(const struct rtas_filter) { 238 .buf_idx1 = 2, .size_idx1 = 3, 239 .buf_idx2 = -1, .size_idx2 = -1, 240 }, 241 /* 242 * PAPR+ v2.13 R1–7.3.17–2 says that the OS must not 243 * interleave ibm,get-indices call sequences with 244 * different inputs. 245 */ 246 .lock = &rtas_ibm_get_indices_lock, 247 }, 248 [RTAS_FNIDX__IBM_GET_RIO_TOPOLOGY] = { 249 .name = "ibm,get-rio-topology", 250 }, 251 [RTAS_FNIDX__IBM_GET_SYSTEM_PARAMETER] = { 252 .name = "ibm,get-system-parameter", 253 .filter = &(const struct rtas_filter) { 254 .buf_idx1 = 1, .size_idx1 = 2, 255 .buf_idx2 = -1, .size_idx2 = -1, 256 }, 257 }, 258 [RTAS_FNIDX__IBM_GET_VPD] = { 259 .name = "ibm,get-vpd", 260 .filter = &(const struct rtas_filter) { 261 .buf_idx1 = 0, .size_idx1 = -1, 262 .buf_idx2 = 1, .size_idx2 = 2, 263 }, 264 /* 265 * PAPR+ v2.13 R1–7.3.20–4 indicates that sequences 266 * should not be allowed to interleave. 267 */ 268 .lock = &rtas_ibm_get_vpd_lock, 269 }, 270 [RTAS_FNIDX__IBM_GET_XIVE] = { 271 .name = "ibm,get-xive", 272 }, 273 [RTAS_FNIDX__IBM_INT_OFF] = { 274 .name = "ibm,int-off", 275 }, 276 [RTAS_FNIDX__IBM_INT_ON] = { 277 .name = "ibm,int-on", 278 }, 279 [RTAS_FNIDX__IBM_IO_QUIESCE_ACK] = { 280 .name = "ibm,io-quiesce-ack", 281 }, 282 [RTAS_FNIDX__IBM_LPAR_PERFTOOLS] = { 283 .name = "ibm,lpar-perftools", 284 .filter = &(const struct rtas_filter) { 285 .buf_idx1 = 2, .size_idx1 = 3, 286 .buf_idx2 = -1, .size_idx2 = -1, 287 }, 288 /* 289 * PAPR+ v2.13 R1–7.3.26–6 says the OS should allow 290 * only one call sequence in progress at a time. 291 */ 292 .lock = &rtas_ibm_lpar_perftools_lock, 293 }, 294 [RTAS_FNIDX__IBM_MANAGE_FLASH_IMAGE] = { 295 .name = "ibm,manage-flash-image", 296 }, 297 [RTAS_FNIDX__IBM_MANAGE_STORAGE_PRESERVATION] = { 298 .name = "ibm,manage-storage-preservation", 299 }, 300 [RTAS_FNIDX__IBM_NMI_INTERLOCK] = { 301 .name = "ibm,nmi-interlock", 302 }, 303 [RTAS_FNIDX__IBM_NMI_REGISTER] = { 304 .name = "ibm,nmi-register", 305 }, 306 [RTAS_FNIDX__IBM_OPEN_ERRINJCT] = { 307 .name = "ibm,open-errinjct", 308 .filter = &(const struct rtas_filter) { 309 .buf_idx1 = -1, .size_idx1 = -1, 310 .buf_idx2 = -1, .size_idx2 = -1, 311 }, 312 }, 313 [RTAS_FNIDX__IBM_OPEN_SRIOV_ALLOW_UNFREEZE] = { 314 .name = "ibm,open-sriov-allow-unfreeze", 315 }, 316 [RTAS_FNIDX__IBM_OPEN_SRIOV_MAP_PE_NUMBER] = { 317 .name = "ibm,open-sriov-map-pe-number", 318 }, 319 [RTAS_FNIDX__IBM_OS_TERM] = { 320 .name = "ibm,os-term", 321 }, 322 [RTAS_FNIDX__IBM_PARTNER_CONTROL] = { 323 .name = "ibm,partner-control", 324 }, 325 [RTAS_FNIDX__IBM_PHYSICAL_ATTESTATION] = { 326 .name = "ibm,physical-attestation", 327 .filter = &(const struct rtas_filter) { 328 .buf_idx1 = 0, .size_idx1 = 1, 329 .buf_idx2 = -1, .size_idx2 = -1, 330 }, 331 /* 332 * This follows a sequence-based pattern similar to 333 * ibm,get-vpd et al. Since PAPR+ restricts 334 * interleaving call sequences for other functions of 335 * this style, assume the restriction applies here, 336 * even though it's not explicit in the spec. 337 */ 338 .lock = &rtas_ibm_physical_attestation_lock, 339 }, 340 [RTAS_FNIDX__IBM_PLATFORM_DUMP] = { 341 .name = "ibm,platform-dump", 342 .filter = &(const struct rtas_filter) { 343 .buf_idx1 = 4, .size_idx1 = 5, 344 .buf_idx2 = -1, .size_idx2 = -1, 345 }, 346 /* 347 * PAPR+ v2.13 7.3.3.4.1 indicates that concurrent 348 * sequences of ibm,platform-dump are allowed if they 349 * are operating on different dump tags. So leave the 350 * lock pointer unset for now. This may need 351 * reconsideration if kernel-internal users appear. 352 */ 353 }, 354 [RTAS_FNIDX__IBM_POWER_OFF_UPS] = { 355 .name = "ibm,power-off-ups", 356 }, 357 [RTAS_FNIDX__IBM_QUERY_INTERRUPT_SOURCE_NUMBER] = { 358 .name = "ibm,query-interrupt-source-number", 359 }, 360 [RTAS_FNIDX__IBM_QUERY_PE_DMA_WINDOW] = { 361 .name = "ibm,query-pe-dma-window", 362 }, 363 [RTAS_FNIDX__IBM_READ_PCI_CONFIG] = { 364 .name = "ibm,read-pci-config", 365 }, 366 [RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE] = { 367 .name = "ibm,read-slot-reset-state", 368 .filter = &(const struct rtas_filter) { 369 .buf_idx1 = -1, .size_idx1 = -1, 370 .buf_idx2 = -1, .size_idx2 = -1, 371 }, 372 }, 373 [RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE2] = { 374 .name = "ibm,read-slot-reset-state2", 375 }, 376 [RTAS_FNIDX__IBM_REMOVE_PE_DMA_WINDOW] = { 377 .name = "ibm,remove-pe-dma-window", 378 }, 379 [RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOW] = { 380 /* 381 * Note: PAPR+ v2.13 7.3.31.4.1 spells this as 382 * "ibm,reset-pe-dma-windows" (plural), but RTAS 383 * implementations use the singular form in practice. 384 */ 385 .name = "ibm,reset-pe-dma-window", 386 }, 387 [RTAS_FNIDX__IBM_SCAN_LOG_DUMP] = { 388 .name = "ibm,scan-log-dump", 389 .filter = &(const struct rtas_filter) { 390 .buf_idx1 = 0, .size_idx1 = 1, 391 .buf_idx2 = -1, .size_idx2 = -1, 392 }, 393 }, 394 [RTAS_FNIDX__IBM_SET_DYNAMIC_INDICATOR] = { 395 .name = "ibm,set-dynamic-indicator", 396 .filter = &(const struct rtas_filter) { 397 .buf_idx1 = 2, .size_idx1 = -1, 398 .buf_idx2 = -1, .size_idx2 = -1, 399 }, 400 /* 401 * PAPR+ v2.13 R1–7.3.18–3 says the OS must not call 402 * this function with different inputs until a 403 * non-retry status has been returned. 404 */ 405 .lock = &rtas_ibm_set_dynamic_indicator_lock, 406 }, 407 [RTAS_FNIDX__IBM_SET_EEH_OPTION] = { 408 .name = "ibm,set-eeh-option", 409 .filter = &(const struct rtas_filter) { 410 .buf_idx1 = -1, .size_idx1 = -1, 411 .buf_idx2 = -1, .size_idx2 = -1, 412 }, 413 }, 414 [RTAS_FNIDX__IBM_SET_SLOT_RESET] = { 415 .name = "ibm,set-slot-reset", 416 }, 417 [RTAS_FNIDX__IBM_SET_SYSTEM_PARAMETER] = { 418 .name = "ibm,set-system-parameter", 419 .filter = &(const struct rtas_filter) { 420 .buf_idx1 = 1, .size_idx1 = -1, 421 .buf_idx2 = -1, .size_idx2 = -1, 422 }, 423 }, 424 [RTAS_FNIDX__IBM_SET_XIVE] = { 425 .name = "ibm,set-xive", 426 }, 427 [RTAS_FNIDX__IBM_SLOT_ERROR_DETAIL] = { 428 .name = "ibm,slot-error-detail", 429 }, 430 [RTAS_FNIDX__IBM_SUSPEND_ME] = { 431 .name = "ibm,suspend-me", 432 .banned_for_syscall_on_le = true, 433 .filter = &(const struct rtas_filter) { 434 .buf_idx1 = -1, .size_idx1 = -1, 435 .buf_idx2 = -1, .size_idx2 = -1, 436 }, 437 }, 438 [RTAS_FNIDX__IBM_TUNE_DMA_PARMS] = { 439 .name = "ibm,tune-dma-parms", 440 }, 441 [RTAS_FNIDX__IBM_UPDATE_FLASH_64_AND_REBOOT] = { 442 .name = "ibm,update-flash-64-and-reboot", 443 }, 444 [RTAS_FNIDX__IBM_UPDATE_NODES] = { 445 .name = "ibm,update-nodes", 446 .banned_for_syscall_on_le = true, 447 .filter = &(const struct rtas_filter) { 448 .buf_idx1 = 0, .size_idx1 = -1, 449 .buf_idx2 = -1, .size_idx2 = -1, 450 .fixed_size = 4096, 451 }, 452 }, 453 [RTAS_FNIDX__IBM_UPDATE_PROPERTIES] = { 454 .name = "ibm,update-properties", 455 .banned_for_syscall_on_le = true, 456 .filter = &(const struct rtas_filter) { 457 .buf_idx1 = 0, .size_idx1 = -1, 458 .buf_idx2 = -1, .size_idx2 = -1, 459 .fixed_size = 4096, 460 }, 461 }, 462 [RTAS_FNIDX__IBM_VALIDATE_FLASH_IMAGE] = { 463 .name = "ibm,validate-flash-image", 464 }, 465 [RTAS_FNIDX__IBM_WRITE_PCI_CONFIG] = { 466 .name = "ibm,write-pci-config", 467 }, 468 [RTAS_FNIDX__NVRAM_FETCH] = { 469 .name = "nvram-fetch", 470 }, 471 [RTAS_FNIDX__NVRAM_STORE] = { 472 .name = "nvram-store", 473 }, 474 [RTAS_FNIDX__POWER_OFF] = { 475 .name = "power-off", 476 }, 477 [RTAS_FNIDX__PUT_TERM_CHAR] = { 478 .name = "put-term-char", 479 }, 480 [RTAS_FNIDX__QUERY_CPU_STOPPED_STATE] = { 481 .name = "query-cpu-stopped-state", 482 }, 483 [RTAS_FNIDX__READ_PCI_CONFIG] = { 484 .name = "read-pci-config", 485 }, 486 [RTAS_FNIDX__RTAS_LAST_ERROR] = { 487 .name = "rtas-last-error", 488 }, 489 [RTAS_FNIDX__SET_INDICATOR] = { 490 .name = "set-indicator", 491 .filter = &(const struct rtas_filter) { 492 .buf_idx1 = -1, .size_idx1 = -1, 493 .buf_idx2 = -1, .size_idx2 = -1, 494 }, 495 }, 496 [RTAS_FNIDX__SET_POWER_LEVEL] = { 497 .name = "set-power-level", 498 .filter = &(const struct rtas_filter) { 499 .buf_idx1 = -1, .size_idx1 = -1, 500 .buf_idx2 = -1, .size_idx2 = -1, 501 }, 502 }, 503 [RTAS_FNIDX__SET_TIME_FOR_POWER_ON] = { 504 .name = "set-time-for-power-on", 505 .filter = &(const struct rtas_filter) { 506 .buf_idx1 = -1, .size_idx1 = -1, 507 .buf_idx2 = -1, .size_idx2 = -1, 508 }, 509 }, 510 [RTAS_FNIDX__SET_TIME_OF_DAY] = { 511 .name = "set-time-of-day", 512 .filter = &(const struct rtas_filter) { 513 .buf_idx1 = -1, .size_idx1 = -1, 514 .buf_idx2 = -1, .size_idx2 = -1, 515 }, 516 }, 517 [RTAS_FNIDX__START_CPU] = { 518 .name = "start-cpu", 519 }, 520 [RTAS_FNIDX__STOP_SELF] = { 521 .name = "stop-self", 522 }, 523 [RTAS_FNIDX__SYSTEM_REBOOT] = { 524 .name = "system-reboot", 525 }, 526 [RTAS_FNIDX__THAW_TIME_BASE] = { 527 .name = "thaw-time-base", 528 }, 529 [RTAS_FNIDX__WRITE_PCI_CONFIG] = { 530 .name = "write-pci-config", 531 }, 532 }; 533 534 #define for_each_rtas_function(funcp) \ 535 for (funcp = &rtas_function_table[0]; \ 536 funcp < &rtas_function_table[ARRAY_SIZE(rtas_function_table)]; \ 537 ++funcp) 538 539 /* 540 * Nearly all RTAS calls need to be serialized. All uses of the 541 * default rtas_args block must hold rtas_lock. 542 * 543 * Exceptions to the RTAS serialization requirement (e.g. stop-self) 544 * must use a separate rtas_args structure. 545 */ 546 static DEFINE_RAW_SPINLOCK(rtas_lock); 547 static struct rtas_args rtas_args; 548 549 /** 550 * rtas_function_token() - RTAS function token lookup. 551 * @handle: Function handle, e.g. RTAS_FN_EVENT_SCAN. 552 * 553 * Context: Any context. 554 * Return: the token value for the function if implemented by this platform, 555 * otherwise RTAS_UNKNOWN_SERVICE. 556 */ 557 s32 rtas_function_token(const rtas_fn_handle_t handle) 558 { 559 const size_t index = handle.index; 560 const bool out_of_bounds = index >= ARRAY_SIZE(rtas_function_table); 561 562 if (WARN_ONCE(out_of_bounds, "invalid function index %zu", index)) 563 return RTAS_UNKNOWN_SERVICE; 564 /* 565 * Various drivers attempt token lookups on non-RTAS 566 * platforms. 567 */ 568 if (!rtas.dev) 569 return RTAS_UNKNOWN_SERVICE; 570 571 return rtas_function_table[index].token; 572 } 573 EXPORT_SYMBOL_GPL(rtas_function_token); 574 575 static int rtas_function_cmp(const void *a, const void *b) 576 { 577 const struct rtas_function *f1 = a; 578 const struct rtas_function *f2 = b; 579 580 return strcmp(f1->name, f2->name); 581 } 582 583 /* 584 * Boot-time initialization of the function table needs the lookup to 585 * return a non-const-qualified object. Use rtas_name_to_function() 586 * in all other contexts. 587 */ 588 static struct rtas_function *__rtas_name_to_function(const char *name) 589 { 590 const struct rtas_function key = { 591 .name = name, 592 }; 593 struct rtas_function *found; 594 595 found = bsearch(&key, rtas_function_table, ARRAY_SIZE(rtas_function_table), 596 sizeof(rtas_function_table[0]), rtas_function_cmp); 597 598 return found; 599 } 600 601 static const struct rtas_function *rtas_name_to_function(const char *name) 602 { 603 return __rtas_name_to_function(name); 604 } 605 606 static DEFINE_XARRAY(rtas_token_to_function_xarray); 607 608 static int __init rtas_token_to_function_xarray_init(void) 609 { 610 const struct rtas_function *func; 611 int err = 0; 612 613 for_each_rtas_function(func) { 614 const s32 token = func->token; 615 616 if (token == RTAS_UNKNOWN_SERVICE) 617 continue; 618 619 err = xa_err(xa_store(&rtas_token_to_function_xarray, 620 token, (void *)func, GFP_KERNEL)); 621 if (err) 622 break; 623 } 624 625 return err; 626 } 627 arch_initcall(rtas_token_to_function_xarray_init); 628 629 /* 630 * For use by sys_rtas(), where the token value is provided by user 631 * space and we don't want to warn on failed lookups. 632 */ 633 static const struct rtas_function *rtas_token_to_function_untrusted(s32 token) 634 { 635 return xa_load(&rtas_token_to_function_xarray, token); 636 } 637 638 /* 639 * Reverse lookup for deriving the function descriptor from a 640 * known-good token value in contexts where the former is not already 641 * available. @token must be valid, e.g. derived from the result of a 642 * prior lookup against the function table. 643 */ 644 static const struct rtas_function *rtas_token_to_function(s32 token) 645 { 646 const struct rtas_function *func; 647 648 if (WARN_ONCE(token < 0, "invalid token %d", token)) 649 return NULL; 650 651 func = rtas_token_to_function_untrusted(token); 652 if (func) 653 return func; 654 /* 655 * Fall back to linear scan in case the reverse mapping hasn't 656 * been initialized yet. 657 */ 658 if (xa_empty(&rtas_token_to_function_xarray)) { 659 for_each_rtas_function(func) { 660 if (func->token == token) 661 return func; 662 } 663 } 664 665 WARN_ONCE(true, "unexpected failed lookup for token %d", token); 666 return NULL; 667 } 668 669 /* This is here deliberately so it's only used in this file */ 670 void enter_rtas(unsigned long); 671 672 static void __do_enter_rtas(struct rtas_args *args) 673 { 674 enter_rtas(__pa(args)); 675 srr_regs_clobbered(); /* rtas uses SRRs, invalidate */ 676 } 677 678 static void __do_enter_rtas_trace(struct rtas_args *args) 679 { 680 const struct rtas_function *func = rtas_token_to_function(be32_to_cpu(args->token)); 681 682 /* 683 * If there is a per-function lock, it must be held by the 684 * caller. 685 */ 686 if (func->lock) 687 lockdep_assert_held(func->lock); 688 689 if (args == &rtas_args) 690 lockdep_assert_held(&rtas_lock); 691 692 trace_rtas_input(args, func->name); 693 trace_rtas_ll_entry(args); 694 695 __do_enter_rtas(args); 696 697 trace_rtas_ll_exit(args); 698 trace_rtas_output(args, func->name); 699 } 700 701 static void do_enter_rtas(struct rtas_args *args) 702 { 703 const unsigned long msr = mfmsr(); 704 /* 705 * Situations where we want to skip any active tracepoints for 706 * safety reasons: 707 * 708 * 1. The last code executed on an offline CPU as it stops, 709 * i.e. we're about to call stop-self. The tracepoints' 710 * function name lookup uses xarray, which uses RCU, which 711 * isn't valid to call on an offline CPU. Any events 712 * emitted on an offline CPU will be discarded anyway. 713 * 714 * 2. In real mode, as when invoking ibm,nmi-interlock from 715 * the pseries MCE handler. We cannot count on trace 716 * buffers or the entries in rtas_token_to_function_xarray 717 * to be contained in the RMO. 718 */ 719 const unsigned long mask = MSR_IR | MSR_DR; 720 const bool can_trace = likely(cpu_online(raw_smp_processor_id()) && 721 (msr & mask) == mask); 722 /* 723 * Make sure MSR[RI] is currently enabled as it will be forced later 724 * in enter_rtas. 725 */ 726 BUG_ON(!(msr & MSR_RI)); 727 728 BUG_ON(!irqs_disabled()); 729 730 hard_irq_disable(); /* Ensure MSR[EE] is disabled on PPC64 */ 731 732 if (can_trace) 733 __do_enter_rtas_trace(args); 734 else 735 __do_enter_rtas(args); 736 } 737 738 struct rtas_t rtas; 739 740 DEFINE_SPINLOCK(rtas_data_buf_lock); 741 EXPORT_SYMBOL_GPL(rtas_data_buf_lock); 742 743 char rtas_data_buf[RTAS_DATA_BUF_SIZE] __aligned(SZ_4K); 744 EXPORT_SYMBOL_GPL(rtas_data_buf); 745 746 unsigned long rtas_rmo_buf; 747 748 /* 749 * If non-NULL, this gets called when the kernel terminates. 750 * This is done like this so rtas_flash can be a module. 751 */ 752 void (*rtas_flash_term_hook)(int); 753 EXPORT_SYMBOL_GPL(rtas_flash_term_hook); 754 755 /* 756 * call_rtas_display_status and call_rtas_display_status_delay 757 * are designed only for very early low-level debugging, which 758 * is why the token is hard-coded to 10. 759 */ 760 static void call_rtas_display_status(unsigned char c) 761 { 762 unsigned long flags; 763 764 if (!rtas.base) 765 return; 766 767 raw_spin_lock_irqsave(&rtas_lock, flags); 768 rtas_call_unlocked(&rtas_args, 10, 1, 1, NULL, c); 769 raw_spin_unlock_irqrestore(&rtas_lock, flags); 770 } 771 772 static void call_rtas_display_status_delay(char c) 773 { 774 static int pending_newline = 0; /* did last write end with unprinted newline? */ 775 static int width = 16; 776 777 if (c == '\n') { 778 while (width-- > 0) 779 call_rtas_display_status(' '); 780 width = 16; 781 mdelay(500); 782 pending_newline = 1; 783 } else { 784 if (pending_newline) { 785 call_rtas_display_status('\r'); 786 call_rtas_display_status('\n'); 787 } 788 pending_newline = 0; 789 if (width--) { 790 call_rtas_display_status(c); 791 udelay(10000); 792 } 793 } 794 } 795 796 void __init udbg_init_rtas_panel(void) 797 { 798 udbg_putc = call_rtas_display_status_delay; 799 } 800 801 #ifdef CONFIG_UDBG_RTAS_CONSOLE 802 803 /* If you think you're dying before early_init_dt_scan_rtas() does its 804 * work, you can hard code the token values for your firmware here and 805 * hardcode rtas.base/entry etc. 806 */ 807 static unsigned int rtas_putchar_token = RTAS_UNKNOWN_SERVICE; 808 static unsigned int rtas_getchar_token = RTAS_UNKNOWN_SERVICE; 809 810 static void udbg_rtascon_putc(char c) 811 { 812 int tries; 813 814 if (!rtas.base) 815 return; 816 817 /* Add CRs before LFs */ 818 if (c == '\n') 819 udbg_rtascon_putc('\r'); 820 821 /* if there is more than one character to be displayed, wait a bit */ 822 for (tries = 0; tries < 16; tries++) { 823 if (rtas_call(rtas_putchar_token, 1, 1, NULL, c) == 0) 824 break; 825 udelay(1000); 826 } 827 } 828 829 static int udbg_rtascon_getc_poll(void) 830 { 831 int c; 832 833 if (!rtas.base) 834 return -1; 835 836 if (rtas_call(rtas_getchar_token, 0, 2, &c)) 837 return -1; 838 839 return c; 840 } 841 842 static int udbg_rtascon_getc(void) 843 { 844 int c; 845 846 while ((c = udbg_rtascon_getc_poll()) == -1) 847 ; 848 849 return c; 850 } 851 852 853 void __init udbg_init_rtas_console(void) 854 { 855 udbg_putc = udbg_rtascon_putc; 856 udbg_getc = udbg_rtascon_getc; 857 udbg_getc_poll = udbg_rtascon_getc_poll; 858 } 859 #endif /* CONFIG_UDBG_RTAS_CONSOLE */ 860 861 void rtas_progress(char *s, unsigned short hex) 862 { 863 struct device_node *root; 864 int width; 865 const __be32 *p; 866 char *os; 867 static int display_character, set_indicator; 868 static int display_width, display_lines, form_feed; 869 static const int *row_width; 870 static DEFINE_SPINLOCK(progress_lock); 871 static int current_line; 872 static int pending_newline = 0; /* did last write end with unprinted newline? */ 873 874 if (!rtas.base) 875 return; 876 877 if (display_width == 0) { 878 display_width = 0x10; 879 if ((root = of_find_node_by_path("/rtas"))) { 880 if ((p = of_get_property(root, 881 "ibm,display-line-length", NULL))) 882 display_width = be32_to_cpu(*p); 883 if ((p = of_get_property(root, 884 "ibm,form-feed", NULL))) 885 form_feed = be32_to_cpu(*p); 886 if ((p = of_get_property(root, 887 "ibm,display-number-of-lines", NULL))) 888 display_lines = be32_to_cpu(*p); 889 row_width = of_get_property(root, 890 "ibm,display-truncation-length", NULL); 891 of_node_put(root); 892 } 893 display_character = rtas_function_token(RTAS_FN_DISPLAY_CHARACTER); 894 set_indicator = rtas_function_token(RTAS_FN_SET_INDICATOR); 895 } 896 897 if (display_character == RTAS_UNKNOWN_SERVICE) { 898 /* use hex display if available */ 899 if (set_indicator != RTAS_UNKNOWN_SERVICE) 900 rtas_call(set_indicator, 3, 1, NULL, 6, 0, hex); 901 return; 902 } 903 904 spin_lock(&progress_lock); 905 906 /* 907 * Last write ended with newline, but we didn't print it since 908 * it would just clear the bottom line of output. Print it now 909 * instead. 910 * 911 * If no newline is pending and form feed is supported, clear the 912 * display with a form feed; otherwise, print a CR to start output 913 * at the beginning of the line. 914 */ 915 if (pending_newline) { 916 rtas_call(display_character, 1, 1, NULL, '\r'); 917 rtas_call(display_character, 1, 1, NULL, '\n'); 918 pending_newline = 0; 919 } else { 920 current_line = 0; 921 if (form_feed) 922 rtas_call(display_character, 1, 1, NULL, 923 (char)form_feed); 924 else 925 rtas_call(display_character, 1, 1, NULL, '\r'); 926 } 927 928 if (row_width) 929 width = row_width[current_line]; 930 else 931 width = display_width; 932 os = s; 933 while (*os) { 934 if (*os == '\n' || *os == '\r') { 935 /* If newline is the last character, save it 936 * until next call to avoid bumping up the 937 * display output. 938 */ 939 if (*os == '\n' && !os[1]) { 940 pending_newline = 1; 941 current_line++; 942 if (current_line > display_lines-1) 943 current_line = display_lines-1; 944 spin_unlock(&progress_lock); 945 return; 946 } 947 948 /* RTAS wants CR-LF, not just LF */ 949 950 if (*os == '\n') { 951 rtas_call(display_character, 1, 1, NULL, '\r'); 952 rtas_call(display_character, 1, 1, NULL, '\n'); 953 } else { 954 /* CR might be used to re-draw a line, so we'll 955 * leave it alone and not add LF. 956 */ 957 rtas_call(display_character, 1, 1, NULL, *os); 958 } 959 960 if (row_width) 961 width = row_width[current_line]; 962 else 963 width = display_width; 964 } else { 965 width--; 966 rtas_call(display_character, 1, 1, NULL, *os); 967 } 968 969 os++; 970 971 /* if we overwrite the screen length */ 972 if (width <= 0) 973 while ((*os != 0) && (*os != '\n') && (*os != '\r')) 974 os++; 975 } 976 977 spin_unlock(&progress_lock); 978 } 979 EXPORT_SYMBOL_GPL(rtas_progress); /* needed by rtas_flash module */ 980 981 int rtas_token(const char *service) 982 { 983 const struct rtas_function *func; 984 const __be32 *tokp; 985 986 if (rtas.dev == NULL) 987 return RTAS_UNKNOWN_SERVICE; 988 989 func = rtas_name_to_function(service); 990 if (func) 991 return func->token; 992 /* 993 * The caller is looking up a name that is not known to be an 994 * RTAS function. Either it's a function that needs to be 995 * added to the table, or they're misusing rtas_token() to 996 * access non-function properties of the /rtas node. Warn and 997 * fall back to the legacy behavior. 998 */ 999 WARN_ONCE(1, "unknown function `%s`, should it be added to rtas_function_table?\n", 1000 service); 1001 1002 tokp = of_get_property(rtas.dev, service, NULL); 1003 return tokp ? be32_to_cpu(*tokp) : RTAS_UNKNOWN_SERVICE; 1004 } 1005 EXPORT_SYMBOL_GPL(rtas_token); 1006 1007 #ifdef CONFIG_RTAS_ERROR_LOGGING 1008 1009 static u32 rtas_error_log_max __ro_after_init = RTAS_ERROR_LOG_MAX; 1010 1011 /* 1012 * Return the firmware-specified size of the error log buffer 1013 * for all rtas calls that require an error buffer argument. 1014 * This includes 'check-exception' and 'rtas-last-error'. 1015 */ 1016 int rtas_get_error_log_max(void) 1017 { 1018 return rtas_error_log_max; 1019 } 1020 1021 static void __init init_error_log_max(void) 1022 { 1023 static const char propname[] __initconst = "rtas-error-log-max"; 1024 u32 max; 1025 1026 if (of_property_read_u32(rtas.dev, propname, &max)) { 1027 pr_warn("%s not found, using default of %u\n", 1028 propname, RTAS_ERROR_LOG_MAX); 1029 max = RTAS_ERROR_LOG_MAX; 1030 } 1031 1032 if (max > RTAS_ERROR_LOG_MAX) { 1033 pr_warn("%s = %u, clamping max error log size to %u\n", 1034 propname, max, RTAS_ERROR_LOG_MAX); 1035 max = RTAS_ERROR_LOG_MAX; 1036 } 1037 1038 rtas_error_log_max = max; 1039 } 1040 1041 1042 static char rtas_err_buf[RTAS_ERROR_LOG_MAX]; 1043 1044 /** Return a copy of the detailed error text associated with the 1045 * most recent failed call to rtas. Because the error text 1046 * might go stale if there are any other intervening rtas calls, 1047 * this routine must be called atomically with whatever produced 1048 * the error (i.e. with rtas_lock still held from the previous call). 1049 */ 1050 static char *__fetch_rtas_last_error(char *altbuf) 1051 { 1052 const s32 token = rtas_function_token(RTAS_FN_RTAS_LAST_ERROR); 1053 struct rtas_args err_args, save_args; 1054 u32 bufsz; 1055 char *buf = NULL; 1056 1057 lockdep_assert_held(&rtas_lock); 1058 1059 if (token == -1) 1060 return NULL; 1061 1062 bufsz = rtas_get_error_log_max(); 1063 1064 err_args.token = cpu_to_be32(token); 1065 err_args.nargs = cpu_to_be32(2); 1066 err_args.nret = cpu_to_be32(1); 1067 err_args.args[0] = cpu_to_be32(__pa(rtas_err_buf)); 1068 err_args.args[1] = cpu_to_be32(bufsz); 1069 err_args.args[2] = 0; 1070 1071 save_args = rtas_args; 1072 rtas_args = err_args; 1073 1074 do_enter_rtas(&rtas_args); 1075 1076 err_args = rtas_args; 1077 rtas_args = save_args; 1078 1079 /* Log the error in the unlikely case that there was one. */ 1080 if (unlikely(err_args.args[2] == 0)) { 1081 if (altbuf) { 1082 buf = altbuf; 1083 } else { 1084 buf = rtas_err_buf; 1085 if (slab_is_available()) 1086 buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC); 1087 } 1088 if (buf) 1089 memmove(buf, rtas_err_buf, RTAS_ERROR_LOG_MAX); 1090 } 1091 1092 return buf; 1093 } 1094 1095 #define get_errorlog_buffer() kmalloc(RTAS_ERROR_LOG_MAX, GFP_KERNEL) 1096 1097 #else /* CONFIG_RTAS_ERROR_LOGGING */ 1098 #define __fetch_rtas_last_error(x) NULL 1099 #define get_errorlog_buffer() NULL 1100 static void __init init_error_log_max(void) {} 1101 #endif 1102 1103 1104 static void 1105 va_rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret, 1106 va_list list) 1107 { 1108 int i; 1109 1110 args->token = cpu_to_be32(token); 1111 args->nargs = cpu_to_be32(nargs); 1112 args->nret = cpu_to_be32(nret); 1113 args->rets = &(args->args[nargs]); 1114 1115 for (i = 0; i < nargs; ++i) 1116 args->args[i] = cpu_to_be32(va_arg(list, __u32)); 1117 1118 for (i = 0; i < nret; ++i) 1119 args->rets[i] = 0; 1120 1121 do_enter_rtas(args); 1122 } 1123 1124 /** 1125 * rtas_call_unlocked() - Invoke an RTAS firmware function without synchronization. 1126 * @args: RTAS parameter block to be used for the call, must obey RTAS addressing 1127 * constraints. 1128 * @token: Identifies the function being invoked. 1129 * @nargs: Number of input parameters. Does not include token. 1130 * @nret: Number of output parameters, including the call status. 1131 * @....: List of @nargs input parameters. 1132 * 1133 * Invokes the RTAS function indicated by @token, which the caller 1134 * should obtain via rtas_function_token(). 1135 * 1136 * This function is similar to rtas_call(), but must be used with a 1137 * limited set of RTAS calls specifically exempted from the general 1138 * requirement that only one RTAS call may be in progress at any 1139 * time. Examples include stop-self and ibm,nmi-interlock. 1140 */ 1141 void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret, ...) 1142 { 1143 va_list list; 1144 1145 va_start(list, nret); 1146 va_rtas_call_unlocked(args, token, nargs, nret, list); 1147 va_end(list); 1148 } 1149 1150 static bool token_is_restricted_errinjct(s32 token) 1151 { 1152 return token == rtas_function_token(RTAS_FN_IBM_OPEN_ERRINJCT) || 1153 token == rtas_function_token(RTAS_FN_IBM_ERRINJCT); 1154 } 1155 1156 /** 1157 * rtas_call() - Invoke an RTAS firmware function. 1158 * @token: Identifies the function being invoked. 1159 * @nargs: Number of input parameters. Does not include token. 1160 * @nret: Number of output parameters, including the call status. 1161 * @outputs: Array of @nret output words. 1162 * @....: List of @nargs input parameters. 1163 * 1164 * Invokes the RTAS function indicated by @token, which the caller 1165 * should obtain via rtas_function_token(). 1166 * 1167 * The @nargs and @nret arguments must match the number of input and 1168 * output parameters specified for the RTAS function. 1169 * 1170 * rtas_call() returns RTAS status codes, not conventional Linux errno 1171 * values. Callers must translate any failure to an appropriate errno 1172 * in syscall context. Most callers of RTAS functions that can return 1173 * -2 or 990x should use rtas_busy_delay() to correctly handle those 1174 * statuses before calling again. 1175 * 1176 * The return value descriptions are adapted from 7.2.8 [RTAS] Return 1177 * Codes of the PAPR and CHRP specifications. 1178 * 1179 * Context: Process context preferably, interrupt context if 1180 * necessary. Acquires an internal spinlock and may perform 1181 * GFP_ATOMIC slab allocation in error path. Unsafe for NMI 1182 * context. 1183 * Return: 1184 * * 0 - RTAS function call succeeded. 1185 * * -1 - RTAS function encountered a hardware or 1186 * platform error, or the token is invalid, 1187 * or the function is restricted by kernel policy. 1188 * * -2 - Specs say "A necessary hardware device was busy, 1189 * and the requested function could not be 1190 * performed. The operation should be retried at 1191 * a later time." This is misleading, at least with 1192 * respect to current RTAS implementations. What it 1193 * usually means in practice is that the function 1194 * could not be completed while meeting RTAS's 1195 * deadline for returning control to the OS (250us 1196 * for PAPR/PowerVM, typically), but the call may be 1197 * immediately reattempted to resume work on it. 1198 * * -3 - Parameter error. 1199 * * -7 - Unexpected state change. 1200 * * 9000...9899 - Vendor-specific success codes. 1201 * * 9900...9905 - Advisory extended delay. Caller should try 1202 * again after ~10^x ms has elapsed, where x is 1203 * the last digit of the status [0-5]. Again going 1204 * beyond the PAPR text, 990x on PowerVM indicates 1205 * contention for RTAS-internal resources. Other 1206 * RTAS call sequences in progress should be 1207 * allowed to complete before reattempting the 1208 * call. 1209 * * -9000 - Multi-level isolation error. 1210 * * -9999...-9004 - Vendor-specific error codes. 1211 * * Additional negative values - Function-specific error. 1212 * * Additional positive values - Function-specific success. 1213 */ 1214 int rtas_call(int token, int nargs, int nret, int *outputs, ...) 1215 { 1216 struct pin_cookie cookie; 1217 va_list list; 1218 int i; 1219 unsigned long flags; 1220 struct rtas_args *args; 1221 char *buff_copy = NULL; 1222 int ret; 1223 1224 if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE) 1225 return -1; 1226 1227 if (token_is_restricted_errinjct(token)) { 1228 /* 1229 * It would be nicer to not discard the error value 1230 * from security_locked_down(), but callers expect an 1231 * RTAS status, not an errno. 1232 */ 1233 if (security_locked_down(LOCKDOWN_RTAS_ERROR_INJECTION)) 1234 return -1; 1235 } 1236 1237 if ((mfmsr() & (MSR_IR|MSR_DR)) != (MSR_IR|MSR_DR)) { 1238 WARN_ON_ONCE(1); 1239 return -1; 1240 } 1241 1242 raw_spin_lock_irqsave(&rtas_lock, flags); 1243 cookie = lockdep_pin_lock(&rtas_lock); 1244 1245 /* We use the global rtas args buffer */ 1246 args = &rtas_args; 1247 1248 va_start(list, outputs); 1249 va_rtas_call_unlocked(args, token, nargs, nret, list); 1250 va_end(list); 1251 1252 /* A -1 return code indicates that the last command couldn't 1253 be completed due to a hardware error. */ 1254 if (be32_to_cpu(args->rets[0]) == -1) 1255 buff_copy = __fetch_rtas_last_error(NULL); 1256 1257 if (nret > 1 && outputs != NULL) 1258 for (i = 0; i < nret-1; ++i) 1259 outputs[i] = be32_to_cpu(args->rets[i + 1]); 1260 ret = (nret > 0) ? be32_to_cpu(args->rets[0]) : 0; 1261 1262 lockdep_unpin_lock(&rtas_lock, cookie); 1263 raw_spin_unlock_irqrestore(&rtas_lock, flags); 1264 1265 if (buff_copy) { 1266 log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0); 1267 if (slab_is_available()) 1268 kfree(buff_copy); 1269 } 1270 return ret; 1271 } 1272 EXPORT_SYMBOL_GPL(rtas_call); 1273 1274 /** 1275 * rtas_busy_delay_time() - From an RTAS status value, calculate the 1276 * suggested delay time in milliseconds. 1277 * 1278 * @status: a value returned from rtas_call() or similar APIs which return 1279 * the status of a RTAS function call. 1280 * 1281 * Context: Any context. 1282 * 1283 * Return: 1284 * * 100000 - If @status is 9905. 1285 * * 10000 - If @status is 9904. 1286 * * 1000 - If @status is 9903. 1287 * * 100 - If @status is 9902. 1288 * * 10 - If @status is 9901. 1289 * * 1 - If @status is either 9900 or -2. This is "wrong" for -2, but 1290 * some callers depend on this behavior, and the worst outcome 1291 * is that they will delay for longer than necessary. 1292 * * 0 - If @status is not a busy or extended delay value. 1293 */ 1294 unsigned int rtas_busy_delay_time(int status) 1295 { 1296 int order; 1297 unsigned int ms = 0; 1298 1299 if (status == RTAS_BUSY) { 1300 ms = 1; 1301 } else if (status >= RTAS_EXTENDED_DELAY_MIN && 1302 status <= RTAS_EXTENDED_DELAY_MAX) { 1303 order = status - RTAS_EXTENDED_DELAY_MIN; 1304 for (ms = 1; order > 0; order--) 1305 ms *= 10; 1306 } 1307 1308 return ms; 1309 } 1310 1311 /* 1312 * Early boot fallback for rtas_busy_delay(). 1313 */ 1314 static bool __init rtas_busy_delay_early(int status) 1315 { 1316 static size_t successive_ext_delays __initdata; 1317 bool retry; 1318 1319 switch (status) { 1320 case RTAS_EXTENDED_DELAY_MIN...RTAS_EXTENDED_DELAY_MAX: 1321 /* 1322 * In the unlikely case that we receive an extended 1323 * delay status in early boot, the OS is probably not 1324 * the cause, and there's nothing we can do to clear 1325 * the condition. Best we can do is delay for a bit 1326 * and hope it's transient. Lie to the caller if it 1327 * seems like we're stuck in a retry loop. 1328 */ 1329 mdelay(1); 1330 retry = true; 1331 successive_ext_delays += 1; 1332 if (successive_ext_delays > 1000) { 1333 pr_err("too many extended delays, giving up\n"); 1334 dump_stack(); 1335 retry = false; 1336 successive_ext_delays = 0; 1337 } 1338 break; 1339 case RTAS_BUSY: 1340 retry = true; 1341 successive_ext_delays = 0; 1342 break; 1343 default: 1344 retry = false; 1345 successive_ext_delays = 0; 1346 break; 1347 } 1348 1349 return retry; 1350 } 1351 1352 /** 1353 * rtas_busy_delay() - helper for RTAS busy and extended delay statuses 1354 * 1355 * @status: a value returned from rtas_call() or similar APIs which return 1356 * the status of a RTAS function call. 1357 * 1358 * Context: Process context. May sleep or schedule. 1359 * 1360 * Return: 1361 * * true - @status is RTAS_BUSY or an extended delay hint. The 1362 * caller may assume that the CPU has been yielded if necessary, 1363 * and that an appropriate delay for @status has elapsed. 1364 * Generally the caller should reattempt the RTAS call which 1365 * yielded @status. 1366 * 1367 * * false - @status is not @RTAS_BUSY nor an extended delay hint. The 1368 * caller is responsible for handling @status. 1369 */ 1370 bool __ref rtas_busy_delay(int status) 1371 { 1372 unsigned int ms; 1373 bool ret; 1374 1375 /* 1376 * Can't do timed sleeps before timekeeping is up. 1377 */ 1378 if (system_state < SYSTEM_SCHEDULING) 1379 return rtas_busy_delay_early(status); 1380 1381 switch (status) { 1382 case RTAS_EXTENDED_DELAY_MIN...RTAS_EXTENDED_DELAY_MAX: 1383 ret = true; 1384 ms = rtas_busy_delay_time(status); 1385 /* 1386 * The extended delay hint can be as high as 100 seconds. 1387 * Surely any function returning such a status is either 1388 * buggy or isn't going to be significantly slowed by us 1389 * polling at 1HZ. Clamp the sleep time to one second. 1390 */ 1391 ms = clamp(ms, 1U, 1000U); 1392 /* 1393 * The delay hint is an order-of-magnitude suggestion, not a 1394 * minimum. It is fine, possibly even advantageous, for us to 1395 * pause for less time than hinted. To make sure pause time will 1396 * not be way longer than requested independent of HZ 1397 * configuration, use fsleep(). See fsleep() for details of 1398 * used sleeping functions. 1399 */ 1400 fsleep(ms * 1000); 1401 break; 1402 case RTAS_BUSY: 1403 ret = true; 1404 /* 1405 * We should call again immediately if there's no other 1406 * work to do. 1407 */ 1408 cond_resched(); 1409 break; 1410 default: 1411 ret = false; 1412 /* 1413 * Not a busy or extended delay status; the caller should 1414 * handle @status itself. Ensure we warn on misuses in 1415 * atomic context regardless. 1416 */ 1417 might_sleep(); 1418 break; 1419 } 1420 1421 return ret; 1422 } 1423 EXPORT_SYMBOL_GPL(rtas_busy_delay); 1424 1425 int rtas_error_rc(int rtas_rc) 1426 { 1427 int rc; 1428 1429 switch (rtas_rc) { 1430 case RTAS_HARDWARE_ERROR: /* Hardware Error */ 1431 rc = -EIO; 1432 break; 1433 case RTAS_INVALID_PARAMETER: /* Bad indicator/domain/etc */ 1434 rc = -EINVAL; 1435 break; 1436 case -9000: /* Isolation error */ 1437 rc = -EFAULT; 1438 break; 1439 case -9001: /* Outstanding TCE/PTE */ 1440 rc = -EEXIST; 1441 break; 1442 case -9002: /* No usable slot */ 1443 rc = -ENODEV; 1444 break; 1445 default: 1446 pr_err("%s: unexpected error %d\n", __func__, rtas_rc); 1447 rc = -ERANGE; 1448 break; 1449 } 1450 return rc; 1451 } 1452 EXPORT_SYMBOL_GPL(rtas_error_rc); 1453 1454 int rtas_get_power_level(int powerdomain, int *level) 1455 { 1456 int token = rtas_function_token(RTAS_FN_GET_POWER_LEVEL); 1457 int rc; 1458 1459 if (token == RTAS_UNKNOWN_SERVICE) 1460 return -ENOENT; 1461 1462 while ((rc = rtas_call(token, 1, 2, level, powerdomain)) == RTAS_BUSY) 1463 udelay(1); 1464 1465 if (rc < 0) 1466 return rtas_error_rc(rc); 1467 return rc; 1468 } 1469 EXPORT_SYMBOL_GPL(rtas_get_power_level); 1470 1471 int rtas_set_power_level(int powerdomain, int level, int *setlevel) 1472 { 1473 int token = rtas_function_token(RTAS_FN_SET_POWER_LEVEL); 1474 int rc; 1475 1476 if (token == RTAS_UNKNOWN_SERVICE) 1477 return -ENOENT; 1478 1479 do { 1480 rc = rtas_call(token, 2, 2, setlevel, powerdomain, level); 1481 } while (rtas_busy_delay(rc)); 1482 1483 if (rc < 0) 1484 return rtas_error_rc(rc); 1485 return rc; 1486 } 1487 EXPORT_SYMBOL_GPL(rtas_set_power_level); 1488 1489 int rtas_get_sensor(int sensor, int index, int *state) 1490 { 1491 int token = rtas_function_token(RTAS_FN_GET_SENSOR_STATE); 1492 int rc; 1493 1494 if (token == RTAS_UNKNOWN_SERVICE) 1495 return -ENOENT; 1496 1497 do { 1498 rc = rtas_call(token, 2, 2, state, sensor, index); 1499 } while (rtas_busy_delay(rc)); 1500 1501 if (rc < 0) 1502 return rtas_error_rc(rc); 1503 return rc; 1504 } 1505 EXPORT_SYMBOL_GPL(rtas_get_sensor); 1506 1507 int rtas_get_sensor_fast(int sensor, int index, int *state) 1508 { 1509 int token = rtas_function_token(RTAS_FN_GET_SENSOR_STATE); 1510 int rc; 1511 1512 if (token == RTAS_UNKNOWN_SERVICE) 1513 return -ENOENT; 1514 1515 rc = rtas_call(token, 2, 2, state, sensor, index); 1516 WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN && 1517 rc <= RTAS_EXTENDED_DELAY_MAX)); 1518 1519 if (rc < 0) 1520 return rtas_error_rc(rc); 1521 return rc; 1522 } 1523 1524 bool rtas_indicator_present(int token, int *maxindex) 1525 { 1526 int proplen, count, i; 1527 const struct indicator_elem { 1528 __be32 token; 1529 __be32 maxindex; 1530 } *indicators; 1531 1532 indicators = of_get_property(rtas.dev, "rtas-indicators", &proplen); 1533 if (!indicators) 1534 return false; 1535 1536 count = proplen / sizeof(struct indicator_elem); 1537 1538 for (i = 0; i < count; i++) { 1539 if (__be32_to_cpu(indicators[i].token) != token) 1540 continue; 1541 if (maxindex) 1542 *maxindex = __be32_to_cpu(indicators[i].maxindex); 1543 return true; 1544 } 1545 1546 return false; 1547 } 1548 1549 int rtas_set_indicator(int indicator, int index, int new_value) 1550 { 1551 int token = rtas_function_token(RTAS_FN_SET_INDICATOR); 1552 int rc; 1553 1554 if (token == RTAS_UNKNOWN_SERVICE) 1555 return -ENOENT; 1556 1557 do { 1558 rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value); 1559 } while (rtas_busy_delay(rc)); 1560 1561 if (rc < 0) 1562 return rtas_error_rc(rc); 1563 return rc; 1564 } 1565 EXPORT_SYMBOL_GPL(rtas_set_indicator); 1566 1567 /* 1568 * Ignoring RTAS extended delay 1569 */ 1570 int rtas_set_indicator_fast(int indicator, int index, int new_value) 1571 { 1572 int token = rtas_function_token(RTAS_FN_SET_INDICATOR); 1573 int rc; 1574 1575 if (token == RTAS_UNKNOWN_SERVICE) 1576 return -ENOENT; 1577 1578 rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value); 1579 1580 WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN && 1581 rc <= RTAS_EXTENDED_DELAY_MAX)); 1582 1583 if (rc < 0) 1584 return rtas_error_rc(rc); 1585 1586 return rc; 1587 } 1588 1589 /** 1590 * rtas_ibm_suspend_me() - Call ibm,suspend-me to suspend the LPAR. 1591 * 1592 * @fw_status: RTAS call status will be placed here if not NULL. 1593 * 1594 * rtas_ibm_suspend_me() should be called only on a CPU which has 1595 * received H_CONTINUE from the H_JOIN hcall. All other active CPUs 1596 * should be waiting to return from H_JOIN. 1597 * 1598 * rtas_ibm_suspend_me() may suspend execution of the OS 1599 * indefinitely. Callers should take appropriate measures upon return, such as 1600 * resetting watchdog facilities. 1601 * 1602 * Callers may choose to retry this call if @fw_status is 1603 * %RTAS_THREADS_ACTIVE. 1604 * 1605 * Return: 1606 * 0 - The partition has resumed from suspend, possibly after 1607 * migration to a different host. 1608 * -ECANCELED - The operation was aborted. 1609 * -EAGAIN - There were other CPUs not in H_JOIN at the time of the call. 1610 * -EBUSY - Some other condition prevented the suspend from succeeding. 1611 * -EIO - Hardware/platform error. 1612 */ 1613 int rtas_ibm_suspend_me(int *fw_status) 1614 { 1615 int token = rtas_function_token(RTAS_FN_IBM_SUSPEND_ME); 1616 int fwrc; 1617 int ret; 1618 1619 fwrc = rtas_call(token, 0, 1, NULL); 1620 1621 switch (fwrc) { 1622 case 0: 1623 ret = 0; 1624 break; 1625 case RTAS_SUSPEND_ABORTED: 1626 ret = -ECANCELED; 1627 break; 1628 case RTAS_THREADS_ACTIVE: 1629 ret = -EAGAIN; 1630 break; 1631 case RTAS_NOT_SUSPENDABLE: 1632 case RTAS_OUTSTANDING_COPROC: 1633 ret = -EBUSY; 1634 break; 1635 case -1: 1636 default: 1637 ret = -EIO; 1638 break; 1639 } 1640 1641 if (fw_status) 1642 *fw_status = fwrc; 1643 1644 return ret; 1645 } 1646 1647 void __noreturn rtas_restart(char *cmd) 1648 { 1649 if (rtas_flash_term_hook) 1650 rtas_flash_term_hook(SYS_RESTART); 1651 pr_emerg("system-reboot returned %d\n", 1652 rtas_call(rtas_function_token(RTAS_FN_SYSTEM_REBOOT), 0, 1, NULL)); 1653 for (;;); 1654 } 1655 1656 void rtas_power_off(void) 1657 { 1658 if (rtas_flash_term_hook) 1659 rtas_flash_term_hook(SYS_POWER_OFF); 1660 /* allow power on only with power button press */ 1661 pr_emerg("power-off returned %d\n", 1662 rtas_call(rtas_function_token(RTAS_FN_POWER_OFF), 2, 1, NULL, -1, -1)); 1663 for (;;); 1664 } 1665 1666 void __noreturn rtas_halt(void) 1667 { 1668 if (rtas_flash_term_hook) 1669 rtas_flash_term_hook(SYS_HALT); 1670 /* allow power on only with power button press */ 1671 pr_emerg("power-off returned %d\n", 1672 rtas_call(rtas_function_token(RTAS_FN_POWER_OFF), 2, 1, NULL, -1, -1)); 1673 for (;;); 1674 } 1675 1676 /* Must be in the RMO region, so we place it here */ 1677 static char rtas_os_term_buf[2048]; 1678 static bool ibm_extended_os_term; 1679 1680 void rtas_os_term(char *str) 1681 { 1682 s32 token = rtas_function_token(RTAS_FN_IBM_OS_TERM); 1683 static struct rtas_args args; 1684 int status; 1685 1686 /* 1687 * Firmware with the ibm,extended-os-term property is guaranteed 1688 * to always return from an ibm,os-term call. Earlier versions without 1689 * this property may terminate the partition which we want to avoid 1690 * since it interferes with panic_timeout. 1691 */ 1692 1693 if (token == RTAS_UNKNOWN_SERVICE || !ibm_extended_os_term) 1694 return; 1695 1696 snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str); 1697 1698 /* 1699 * Keep calling as long as RTAS returns a "try again" status, 1700 * but don't use rtas_busy_delay(), which potentially 1701 * schedules. 1702 */ 1703 do { 1704 rtas_call_unlocked(&args, token, 1, 1, NULL, __pa(rtas_os_term_buf)); 1705 status = be32_to_cpu(args.rets[0]); 1706 } while (rtas_busy_delay_time(status)); 1707 1708 if (status != 0) 1709 pr_emerg("ibm,os-term call failed %d\n", status); 1710 } 1711 1712 /** 1713 * rtas_activate_firmware() - Activate a new version of firmware. 1714 * 1715 * Context: This function may sleep. 1716 * 1717 * Activate a new version of partition firmware. The OS must call this 1718 * after resuming from a partition hibernation or migration in order 1719 * to maintain the ability to perform live firmware updates. It's not 1720 * catastrophic for this method to be absent or to fail; just log the 1721 * condition in that case. 1722 */ 1723 void rtas_activate_firmware(void) 1724 { 1725 int token = rtas_function_token(RTAS_FN_IBM_ACTIVATE_FIRMWARE); 1726 int fwrc; 1727 1728 if (token == RTAS_UNKNOWN_SERVICE) { 1729 pr_notice("ibm,activate-firmware method unavailable\n"); 1730 return; 1731 } 1732 1733 mutex_lock(&rtas_ibm_activate_firmware_lock); 1734 1735 do { 1736 fwrc = rtas_call(token, 0, 1, NULL); 1737 } while (rtas_busy_delay(fwrc)); 1738 1739 mutex_unlock(&rtas_ibm_activate_firmware_lock); 1740 1741 if (fwrc) 1742 pr_err("ibm,activate-firmware failed (%i)\n", fwrc); 1743 } 1744 1745 /** 1746 * get_pseries_errorlog() - Find a specific pseries error log in an RTAS 1747 * extended event log. 1748 * @log: RTAS error/event log 1749 * @section_id: two character section identifier 1750 * 1751 * Return: A pointer to the specified errorlog or NULL if not found. 1752 */ 1753 noinstr struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log, 1754 uint16_t section_id) 1755 { 1756 struct rtas_ext_event_log_v6 *ext_log = 1757 (struct rtas_ext_event_log_v6 *)log->buffer; 1758 struct pseries_errorlog *sect; 1759 unsigned char *p, *log_end; 1760 uint32_t ext_log_length = rtas_error_extended_log_length(log); 1761 uint8_t log_format = rtas_ext_event_log_format(ext_log); 1762 uint32_t company_id = rtas_ext_event_company_id(ext_log); 1763 1764 /* Check that we understand the format */ 1765 if (ext_log_length < sizeof(struct rtas_ext_event_log_v6) || 1766 log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG || 1767 company_id != RTAS_V6EXT_COMPANY_ID_IBM) 1768 return NULL; 1769 1770 log_end = log->buffer + ext_log_length; 1771 p = ext_log->vendor_log; 1772 1773 while (p < log_end) { 1774 sect = (struct pseries_errorlog *)p; 1775 if (pseries_errorlog_id(sect) == section_id) 1776 return sect; 1777 p += pseries_errorlog_length(sect); 1778 } 1779 1780 return NULL; 1781 } 1782 1783 /* 1784 * The sys_rtas syscall, as originally designed, allows root to pass 1785 * arbitrary physical addresses to RTAS calls. A number of RTAS calls 1786 * can be abused to write to arbitrary memory and do other things that 1787 * are potentially harmful to system integrity, and thus should only 1788 * be used inside the kernel and not exposed to userspace. 1789 * 1790 * All known legitimate users of the sys_rtas syscall will only ever 1791 * pass addresses that fall within the RMO buffer, and use a known 1792 * subset of RTAS calls. 1793 * 1794 * Accordingly, we filter RTAS requests to check that the call is 1795 * permitted, and that provided pointers fall within the RMO buffer. 1796 * If a function is allowed to be invoked via the syscall, then its 1797 * entry in the rtas_functions table points to a rtas_filter that 1798 * describes its constraints, with the indexes of the parameters which 1799 * are expected to contain addresses and sizes of buffers allocated 1800 * inside the RMO buffer. 1801 */ 1802 1803 static bool in_rmo_buf(u32 base, u32 end) 1804 { 1805 return base >= rtas_rmo_buf && 1806 base < (rtas_rmo_buf + RTAS_USER_REGION_SIZE) && 1807 base <= end && 1808 end >= rtas_rmo_buf && 1809 end < (rtas_rmo_buf + RTAS_USER_REGION_SIZE); 1810 } 1811 1812 static bool block_rtas_call(const struct rtas_function *func, int nargs, 1813 struct rtas_args *args) 1814 { 1815 const struct rtas_filter *f; 1816 const bool is_platform_dump = 1817 func == &rtas_function_table[RTAS_FNIDX__IBM_PLATFORM_DUMP]; 1818 const bool is_config_conn = 1819 func == &rtas_function_table[RTAS_FNIDX__IBM_CONFIGURE_CONNECTOR]; 1820 u32 base, size, end; 1821 1822 /* 1823 * Only functions with filters attached are allowed. 1824 */ 1825 f = func->filter; 1826 if (!f) 1827 goto err; 1828 /* 1829 * And some functions aren't allowed on LE. 1830 */ 1831 if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) && func->banned_for_syscall_on_le) 1832 goto err; 1833 1834 if (f->buf_idx1 != -1) { 1835 base = be32_to_cpu(args->args[f->buf_idx1]); 1836 if (f->size_idx1 != -1) 1837 size = be32_to_cpu(args->args[f->size_idx1]); 1838 else if (f->fixed_size) 1839 size = f->fixed_size; 1840 else 1841 size = 1; 1842 1843 end = base + size - 1; 1844 1845 /* 1846 * Special case for ibm,platform-dump - NULL buffer 1847 * address is used to indicate end of dump processing 1848 */ 1849 if (is_platform_dump && base == 0) 1850 return false; 1851 1852 if (!in_rmo_buf(base, end)) 1853 goto err; 1854 } 1855 1856 if (f->buf_idx2 != -1) { 1857 base = be32_to_cpu(args->args[f->buf_idx2]); 1858 if (f->size_idx2 != -1) 1859 size = be32_to_cpu(args->args[f->size_idx2]); 1860 else if (f->fixed_size) 1861 size = f->fixed_size; 1862 else 1863 size = 1; 1864 end = base + size - 1; 1865 1866 /* 1867 * Special case for ibm,configure-connector where the 1868 * address can be 0 1869 */ 1870 if (is_config_conn && base == 0) 1871 return false; 1872 1873 if (!in_rmo_buf(base, end)) 1874 goto err; 1875 } 1876 1877 return false; 1878 err: 1879 pr_err_ratelimited("sys_rtas: RTAS call blocked - exploit attempt?\n"); 1880 pr_err_ratelimited("sys_rtas: %s nargs=%d (called by %s)\n", 1881 func->name, nargs, current->comm); 1882 return true; 1883 } 1884 1885 /* We assume to be passed big endian arguments */ 1886 SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs) 1887 { 1888 const struct rtas_function *func; 1889 struct pin_cookie cookie; 1890 struct rtas_args args; 1891 unsigned long flags; 1892 char *buff_copy, *errbuf = NULL; 1893 int nargs, nret, token; 1894 1895 if (!capable(CAP_SYS_ADMIN)) 1896 return -EPERM; 1897 1898 if (!rtas.entry) 1899 return -EINVAL; 1900 1901 if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0) 1902 return -EFAULT; 1903 1904 nargs = be32_to_cpu(args.nargs); 1905 nret = be32_to_cpu(args.nret); 1906 token = be32_to_cpu(args.token); 1907 1908 if (nargs >= ARRAY_SIZE(args.args) 1909 || nret > ARRAY_SIZE(args.args) 1910 || nargs + nret > ARRAY_SIZE(args.args)) 1911 return -EINVAL; 1912 1913 nargs = array_index_nospec(nargs, ARRAY_SIZE(args.args)); 1914 nret = array_index_nospec(nret, ARRAY_SIZE(args.args) - nargs); 1915 1916 /* Copy in args. */ 1917 if (copy_from_user(args.args, uargs->args, 1918 nargs * sizeof(rtas_arg_t)) != 0) 1919 return -EFAULT; 1920 1921 /* 1922 * If this token doesn't correspond to a function the kernel 1923 * understands, you're not allowed to call it. 1924 */ 1925 func = rtas_token_to_function_untrusted(token); 1926 if (!func) 1927 return -EINVAL; 1928 1929 args.rets = &args.args[nargs]; 1930 memset(args.rets, 0, nret * sizeof(rtas_arg_t)); 1931 1932 if (block_rtas_call(func, nargs, &args)) 1933 return -EINVAL; 1934 1935 if (token_is_restricted_errinjct(token)) { 1936 int err; 1937 1938 err = security_locked_down(LOCKDOWN_RTAS_ERROR_INJECTION); 1939 if (err) 1940 return err; 1941 } 1942 1943 /* Need to handle ibm,suspend_me call specially */ 1944 if (token == rtas_function_token(RTAS_FN_IBM_SUSPEND_ME)) { 1945 1946 /* 1947 * rtas_ibm_suspend_me assumes the streamid handle is in cpu 1948 * endian, or at least the hcall within it requires it. 1949 */ 1950 int rc = 0; 1951 u64 handle = ((u64)be32_to_cpu(args.args[0]) << 32) 1952 | be32_to_cpu(args.args[1]); 1953 rc = rtas_syscall_dispatch_ibm_suspend_me(handle); 1954 if (rc == -EAGAIN) 1955 args.rets[0] = cpu_to_be32(RTAS_NOT_SUSPENDABLE); 1956 else if (rc == -EIO) 1957 args.rets[0] = cpu_to_be32(-1); 1958 else if (rc) 1959 return rc; 1960 goto copy_return; 1961 } 1962 1963 buff_copy = get_errorlog_buffer(); 1964 1965 /* 1966 * If this function has a mutex assigned to it, we must 1967 * acquire it to avoid interleaving with any kernel-based uses 1968 * of the same function. Kernel-based sequences acquire the 1969 * appropriate mutex explicitly. 1970 */ 1971 if (func->lock) 1972 mutex_lock(func->lock); 1973 1974 raw_spin_lock_irqsave(&rtas_lock, flags); 1975 cookie = lockdep_pin_lock(&rtas_lock); 1976 1977 rtas_args = args; 1978 do_enter_rtas(&rtas_args); 1979 args = rtas_args; 1980 1981 /* A -1 return code indicates that the last command couldn't 1982 be completed due to a hardware error. */ 1983 if (be32_to_cpu(args.rets[0]) == -1) 1984 errbuf = __fetch_rtas_last_error(buff_copy); 1985 1986 lockdep_unpin_lock(&rtas_lock, cookie); 1987 raw_spin_unlock_irqrestore(&rtas_lock, flags); 1988 1989 if (func->lock) 1990 mutex_unlock(func->lock); 1991 1992 if (buff_copy) { 1993 if (errbuf) 1994 log_error(errbuf, ERR_TYPE_RTAS_LOG, 0); 1995 kfree(buff_copy); 1996 } 1997 1998 copy_return: 1999 /* Copy out args. */ 2000 if (copy_to_user(uargs->args + nargs, 2001 args.args + nargs, 2002 nret * sizeof(rtas_arg_t)) != 0) 2003 return -EFAULT; 2004 2005 return 0; 2006 } 2007 2008 static void __init rtas_function_table_init(void) 2009 { 2010 struct property *prop; 2011 2012 for (size_t i = 0; i < ARRAY_SIZE(rtas_function_table); ++i) { 2013 struct rtas_function *curr = &rtas_function_table[i]; 2014 struct rtas_function *prior; 2015 int cmp; 2016 2017 curr->token = RTAS_UNKNOWN_SERVICE; 2018 2019 if (i == 0) 2020 continue; 2021 /* 2022 * Ensure table is sorted correctly for binary search 2023 * on function names. 2024 */ 2025 prior = &rtas_function_table[i - 1]; 2026 2027 cmp = strcmp(prior->name, curr->name); 2028 if (cmp < 0) 2029 continue; 2030 2031 if (cmp == 0) { 2032 pr_err("'%s' has duplicate function table entries\n", 2033 curr->name); 2034 } else { 2035 pr_err("function table unsorted: '%s' wrongly precedes '%s'\n", 2036 prior->name, curr->name); 2037 } 2038 } 2039 2040 for_each_property_of_node(rtas.dev, prop) { 2041 struct rtas_function *func; 2042 2043 if (prop->length != sizeof(u32)) 2044 continue; 2045 2046 func = __rtas_name_to_function(prop->name); 2047 if (!func) 2048 continue; 2049 2050 func->token = be32_to_cpup((__be32 *)prop->value); 2051 2052 pr_debug("function %s has token %u\n", func->name, func->token); 2053 } 2054 } 2055 2056 /* 2057 * Call early during boot, before mem init, to retrieve the RTAS 2058 * information from the device-tree and allocate the RMO buffer for userland 2059 * accesses. 2060 */ 2061 void __init rtas_initialize(void) 2062 { 2063 unsigned long rtas_region = RTAS_INSTANTIATE_MAX; 2064 u32 base, size, entry; 2065 int no_base, no_size, no_entry; 2066 2067 /* Get RTAS dev node and fill up our "rtas" structure with infos 2068 * about it. 2069 */ 2070 rtas.dev = of_find_node_by_name(NULL, "rtas"); 2071 if (!rtas.dev) 2072 return; 2073 2074 no_base = of_property_read_u32(rtas.dev, "linux,rtas-base", &base); 2075 no_size = of_property_read_u32(rtas.dev, "rtas-size", &size); 2076 if (no_base || no_size) { 2077 of_node_put(rtas.dev); 2078 rtas.dev = NULL; 2079 return; 2080 } 2081 2082 rtas.base = base; 2083 rtas.size = size; 2084 no_entry = of_property_read_u32(rtas.dev, "linux,rtas-entry", &entry); 2085 rtas.entry = no_entry ? rtas.base : entry; 2086 2087 init_error_log_max(); 2088 2089 /* Must be called before any function token lookups */ 2090 rtas_function_table_init(); 2091 2092 /* 2093 * Discover this now to avoid a device tree lookup in the 2094 * panic path. 2095 */ 2096 ibm_extended_os_term = of_property_read_bool(rtas.dev, "ibm,extended-os-term"); 2097 2098 /* If RTAS was found, allocate the RMO buffer for it and look for 2099 * the stop-self token if any 2100 */ 2101 #ifdef CONFIG_PPC64 2102 if (firmware_has_feature(FW_FEATURE_LPAR)) 2103 rtas_region = min(ppc64_rma_size, RTAS_INSTANTIATE_MAX); 2104 #endif 2105 rtas_rmo_buf = memblock_phys_alloc_range(RTAS_USER_REGION_SIZE, PAGE_SIZE, 2106 0, rtas_region); 2107 if (!rtas_rmo_buf) 2108 panic("ERROR: RTAS: Failed to allocate %lx bytes below %pa\n", 2109 PAGE_SIZE, &rtas_region); 2110 2111 rtas_work_area_reserve_arena(rtas_region); 2112 } 2113 2114 int __init early_init_dt_scan_rtas(unsigned long node, 2115 const char *uname, int depth, void *data) 2116 { 2117 const u32 *basep, *entryp, *sizep; 2118 2119 if (depth != 1 || strcmp(uname, "rtas") != 0) 2120 return 0; 2121 2122 basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL); 2123 entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL); 2124 sizep = of_get_flat_dt_prop(node, "rtas-size", NULL); 2125 2126 #ifdef CONFIG_PPC64 2127 /* need this feature to decide the crashkernel offset */ 2128 if (of_get_flat_dt_prop(node, "ibm,hypertas-functions", NULL)) 2129 powerpc_firmware_features |= FW_FEATURE_LPAR; 2130 #endif 2131 2132 if (basep && entryp && sizep) { 2133 rtas.base = *basep; 2134 rtas.entry = *entryp; 2135 rtas.size = *sizep; 2136 } 2137 2138 #ifdef CONFIG_UDBG_RTAS_CONSOLE 2139 basep = of_get_flat_dt_prop(node, "put-term-char", NULL); 2140 if (basep) 2141 rtas_putchar_token = *basep; 2142 2143 basep = of_get_flat_dt_prop(node, "get-term-char", NULL); 2144 if (basep) 2145 rtas_getchar_token = *basep; 2146 2147 if (rtas_putchar_token != RTAS_UNKNOWN_SERVICE && 2148 rtas_getchar_token != RTAS_UNKNOWN_SERVICE) 2149 udbg_init_rtas_console(); 2150 2151 #endif 2152 2153 /* break now */ 2154 return 1; 2155 } 2156 2157 static DEFINE_RAW_SPINLOCK(timebase_lock); 2158 static u64 timebase = 0; 2159 2160 void rtas_give_timebase(void) 2161 { 2162 unsigned long flags; 2163 2164 raw_spin_lock_irqsave(&timebase_lock, flags); 2165 hard_irq_disable(); 2166 rtas_call(rtas_function_token(RTAS_FN_FREEZE_TIME_BASE), 0, 1, NULL); 2167 timebase = get_tb(); 2168 raw_spin_unlock(&timebase_lock); 2169 2170 while (timebase) 2171 barrier(); 2172 rtas_call(rtas_function_token(RTAS_FN_THAW_TIME_BASE), 0, 1, NULL); 2173 local_irq_restore(flags); 2174 } 2175 2176 void rtas_take_timebase(void) 2177 { 2178 while (!timebase) 2179 barrier(); 2180 raw_spin_lock(&timebase_lock); 2181 set_tb(timebase >> 32, timebase & 0xffffffff); 2182 timebase = 0; 2183 raw_spin_unlock(&timebase_lock); 2184 } 2185