1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Power Management Service Unit(PMSU) support for Armada 370/XP platforms. 4 * 5 * Copyright (C) 2012 Marvell 6 * 7 * Yehuda Yitschak <yehuday@marvell.com> 8 * Gregory Clement <gregory.clement@free-electrons.com> 9 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> 10 * 11 * The Armada 370 and Armada XP SOCs have a power management service 12 * unit which is responsible for powering down and waking up CPUs and 13 * other SOC units 14 */ 15 16 #define pr_fmt(fmt) "mvebu-pmsu: " fmt 17 18 #include <linux/clk.h> 19 #include <linux/cpu_pm.h> 20 #include <linux/delay.h> 21 #include <linux/init.h> 22 #include <linux/io.h> 23 #include <linux/kernel.h> 24 #include <linux/mbus.h> 25 #include <linux/mvebu-pmsu.h> 26 #include <linux/of_address.h> 27 #include <linux/of_device.h> 28 #include <linux/platform_device.h> 29 #include <linux/resource.h> 30 #include <linux/slab.h> 31 #include <linux/smp.h> 32 #include <asm/cacheflush.h> 33 #include <asm/cp15.h> 34 #include <asm/smp_scu.h> 35 #include <asm/smp_plat.h> 36 #include <asm/suspend.h> 37 #include <asm/tlbflush.h> 38 #include "common.h" 39 #include "pmsu.h" 40 41 #define PMSU_BASE_OFFSET 0x100 42 #define PMSU_REG_SIZE 0x1000 43 44 /* PMSU MP registers */ 45 #define PMSU_CONTROL_AND_CONFIG(cpu) ((cpu * 0x100) + 0x104) 46 #define PMSU_CONTROL_AND_CONFIG_DFS_REQ BIT(18) 47 #define PMSU_CONTROL_AND_CONFIG_PWDDN_REQ BIT(16) 48 #define PMSU_CONTROL_AND_CONFIG_L2_PWDDN BIT(20) 49 50 #define PMSU_CPU_POWER_DOWN_CONTROL(cpu) ((cpu * 0x100) + 0x108) 51 52 #define PMSU_CPU_POWER_DOWN_DIS_SNP_Q_SKIP BIT(0) 53 54 #define PMSU_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x10c) 55 #define PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT BIT(16) 56 #define PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT BIT(17) 57 #define PMSU_STATUS_AND_MASK_IRQ_WAKEUP BIT(20) 58 #define PMSU_STATUS_AND_MASK_FIQ_WAKEUP BIT(21) 59 #define PMSU_STATUS_AND_MASK_DBG_WAKEUP BIT(22) 60 #define PMSU_STATUS_AND_MASK_IRQ_MASK BIT(24) 61 #define PMSU_STATUS_AND_MASK_FIQ_MASK BIT(25) 62 63 #define PMSU_EVENT_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x120) 64 #define PMSU_EVENT_STATUS_AND_MASK_DFS_DONE BIT(1) 65 #define PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK BIT(17) 66 67 #define PMSU_BOOT_ADDR_REDIRECT_OFFSET(cpu) ((cpu * 0x100) + 0x124) 68 69 /* PMSU fabric registers */ 70 #define L2C_NFABRIC_PM_CTL 0x4 71 #define L2C_NFABRIC_PM_CTL_PWR_DOWN BIT(20) 72 73 /* PMSU delay registers */ 74 #define PMSU_POWERDOWN_DELAY 0xF04 75 #define PMSU_POWERDOWN_DELAY_PMU BIT(1) 76 #define PMSU_POWERDOWN_DELAY_MASK 0xFFFE 77 #define PMSU_DFLT_ARMADA38X_DELAY 0x64 78 79 /* CA9 MPcore SoC Control registers */ 80 81 #define MPCORE_RESET_CTL 0x64 82 #define MPCORE_RESET_CTL_L2 BIT(0) 83 #define MPCORE_RESET_CTL_DEBUG BIT(16) 84 85 #define SRAM_PHYS_BASE 0xFFFF0000 86 #define BOOTROM_BASE 0xFFF00000 87 #define BOOTROM_SIZE 0x100000 88 89 #define ARMADA_370_CRYPT0_ENG_TARGET 0x9 90 #define ARMADA_370_CRYPT0_ENG_ATTR 0x1 91 92 extern void ll_disable_coherency(void); 93 extern void ll_enable_coherency(void); 94 95 extern void armada_370_xp_cpu_resume(void); 96 extern void armada_38x_cpu_resume(void); 97 98 static phys_addr_t pmsu_mp_phys_base; 99 static void __iomem *pmsu_mp_base; 100 101 static void *mvebu_cpu_resume; 102 103 static const struct of_device_id of_pmsu_table[] = { 104 { .compatible = "marvell,armada-370-pmsu", }, 105 { .compatible = "marvell,armada-370-xp-pmsu", }, 106 { .compatible = "marvell,armada-380-pmsu", }, 107 { /* end of list */ }, 108 }; 109 110 void mvebu_pmsu_set_cpu_boot_addr(int hw_cpu, void *boot_addr) 111 { 112 writel(__pa_symbol(boot_addr), pmsu_mp_base + 113 PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu)); 114 } 115 116 extern unsigned char mvebu_boot_wa_start[]; 117 extern unsigned char mvebu_boot_wa_end[]; 118 119 /* 120 * This function sets up the boot address workaround needed for SMP 121 * boot on Armada 375 Z1 and cpuidle on Armada 370. It unmaps the 122 * BootROM Mbus window, and instead remaps a crypto SRAM into which a 123 * custom piece of code is copied to replace the problematic BootROM. 124 */ 125 int mvebu_setup_boot_addr_wa(unsigned int crypto_eng_target, 126 unsigned int crypto_eng_attribute, 127 phys_addr_t resume_addr_reg) 128 { 129 void __iomem *sram_virt_base; 130 u32 code_len = mvebu_boot_wa_end - mvebu_boot_wa_start; 131 132 mvebu_mbus_del_window(BOOTROM_BASE, BOOTROM_SIZE); 133 mvebu_mbus_add_window_by_id(crypto_eng_target, crypto_eng_attribute, 134 SRAM_PHYS_BASE, SZ_64K); 135 136 sram_virt_base = ioremap(SRAM_PHYS_BASE, SZ_64K); 137 if (!sram_virt_base) { 138 pr_err("Unable to map SRAM to setup the boot address WA\n"); 139 return -ENOMEM; 140 } 141 142 memcpy(sram_virt_base, &mvebu_boot_wa_start, code_len); 143 144 /* 145 * The last word of the code copied in SRAM must contain the 146 * physical base address of the PMSU register. We 147 * intentionally store this address in the native endianness 148 * of the system. 149 */ 150 __raw_writel((unsigned long)resume_addr_reg, 151 sram_virt_base + code_len - 4); 152 153 iounmap(sram_virt_base); 154 155 return 0; 156 } 157 158 static int __init mvebu_v7_pmsu_init(void) 159 { 160 struct device_node *np; 161 struct resource res; 162 int ret = 0; 163 164 np = of_find_matching_node(NULL, of_pmsu_table); 165 if (!np) 166 return 0; 167 168 pr_info("Initializing Power Management Service Unit\n"); 169 170 if (of_address_to_resource(np, 0, &res)) { 171 pr_err("unable to get resource\n"); 172 ret = -ENOENT; 173 goto out; 174 } 175 176 if (of_device_is_compatible(np, "marvell,armada-370-xp-pmsu")) { 177 pr_warn(FW_WARN "deprecated pmsu binding\n"); 178 res.start = res.start - PMSU_BASE_OFFSET; 179 res.end = res.start + PMSU_REG_SIZE - 1; 180 } 181 182 if (!request_mem_region(res.start, resource_size(&res), 183 np->full_name)) { 184 pr_err("unable to request region\n"); 185 ret = -EBUSY; 186 goto out; 187 } 188 189 pmsu_mp_phys_base = res.start; 190 191 pmsu_mp_base = ioremap(res.start, resource_size(&res)); 192 if (!pmsu_mp_base) { 193 pr_err("unable to map registers\n"); 194 release_mem_region(res.start, resource_size(&res)); 195 ret = -ENOMEM; 196 goto out; 197 } 198 199 out: 200 of_node_put(np); 201 return ret; 202 } 203 204 static void mvebu_v7_pmsu_enable_l2_powerdown_onidle(void) 205 { 206 u32 reg; 207 208 if (pmsu_mp_base == NULL) 209 return; 210 211 /* Enable L2 & Fabric powerdown in Deep-Idle mode - Fabric */ 212 reg = readl(pmsu_mp_base + L2C_NFABRIC_PM_CTL); 213 reg |= L2C_NFABRIC_PM_CTL_PWR_DOWN; 214 writel(reg, pmsu_mp_base + L2C_NFABRIC_PM_CTL); 215 } 216 217 enum pmsu_idle_prepare_flags { 218 PMSU_PREPARE_NORMAL = 0, 219 PMSU_PREPARE_DEEP_IDLE = BIT(0), 220 PMSU_PREPARE_SNOOP_DISABLE = BIT(1), 221 }; 222 223 /* No locking is needed because we only access per-CPU registers */ 224 static int mvebu_v7_pmsu_idle_prepare(unsigned long flags) 225 { 226 unsigned int hw_cpu = cpu_logical_map(smp_processor_id()); 227 u32 reg; 228 229 if (pmsu_mp_base == NULL) 230 return -EINVAL; 231 232 /* 233 * Adjust the PMSU configuration to wait for WFI signal, enable 234 * IRQ and FIQ as wakeup events, set wait for snoop queue empty 235 * indication and mask IRQ and FIQ from CPU 236 */ 237 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); 238 reg |= PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT | 239 PMSU_STATUS_AND_MASK_IRQ_WAKEUP | 240 PMSU_STATUS_AND_MASK_FIQ_WAKEUP | 241 PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT | 242 PMSU_STATUS_AND_MASK_IRQ_MASK | 243 PMSU_STATUS_AND_MASK_FIQ_MASK; 244 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); 245 246 reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); 247 /* ask HW to power down the L2 Cache if needed */ 248 if (flags & PMSU_PREPARE_DEEP_IDLE) 249 reg |= PMSU_CONTROL_AND_CONFIG_L2_PWDDN; 250 251 /* request power down */ 252 reg |= PMSU_CONTROL_AND_CONFIG_PWDDN_REQ; 253 writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); 254 255 if (flags & PMSU_PREPARE_SNOOP_DISABLE) { 256 /* Disable snoop disable by HW - SW is taking care of it */ 257 reg = readl(pmsu_mp_base + PMSU_CPU_POWER_DOWN_CONTROL(hw_cpu)); 258 reg |= PMSU_CPU_POWER_DOWN_DIS_SNP_Q_SKIP; 259 writel(reg, pmsu_mp_base + PMSU_CPU_POWER_DOWN_CONTROL(hw_cpu)); 260 } 261 262 return 0; 263 } 264 265 int armada_370_xp_pmsu_idle_enter(unsigned long deepidle) 266 { 267 unsigned long flags = PMSU_PREPARE_SNOOP_DISABLE; 268 int ret; 269 270 if (deepidle) 271 flags |= PMSU_PREPARE_DEEP_IDLE; 272 273 ret = mvebu_v7_pmsu_idle_prepare(flags); 274 if (ret) 275 return ret; 276 277 v7_exit_coherency_flush(all); 278 279 ll_disable_coherency(); 280 281 dsb(); 282 283 wfi(); 284 285 /* If we are here, wfi failed. As processors run out of 286 * coherency for some time, tlbs might be stale, so flush them 287 */ 288 local_flush_tlb_all(); 289 290 ll_enable_coherency(); 291 292 /* Test the CR_C bit and set it if it was cleared */ 293 asm volatile( 294 ".arch armv7-a\n\t" 295 "mrc p15, 0, r0, c1, c0, 0 \n\t" 296 "tst r0, %0 \n\t" 297 "orreq r0, r0, #(1 << 2) \n\t" 298 "mcreq p15, 0, r0, c1, c0, 0 \n\t" 299 "isb " 300 : : "Ir" (CR_C) : "r0"); 301 302 pr_debug("Failed to suspend the system\n"); 303 304 return 0; 305 } 306 307 static int armada_370_xp_cpu_suspend(unsigned long deepidle) 308 { 309 return cpu_suspend(deepidle, armada_370_xp_pmsu_idle_enter); 310 } 311 312 int armada_38x_do_cpu_suspend(unsigned long deepidle) 313 { 314 unsigned long flags = 0; 315 316 if (deepidle) 317 flags |= PMSU_PREPARE_DEEP_IDLE; 318 319 mvebu_v7_pmsu_idle_prepare(flags); 320 /* 321 * Already flushed cache, but do it again as the outer cache 322 * functions dirty the cache with spinlocks 323 */ 324 v7_exit_coherency_flush(louis); 325 326 scu_power_mode(mvebu_get_scu_base(), SCU_PM_POWEROFF); 327 328 cpu_do_idle(); 329 330 return 1; 331 } 332 333 static int armada_38x_cpu_suspend(unsigned long deepidle) 334 { 335 return cpu_suspend(false, armada_38x_do_cpu_suspend); 336 } 337 338 /* No locking is needed because we only access per-CPU registers */ 339 void mvebu_v7_pmsu_idle_exit(void) 340 { 341 unsigned int hw_cpu = cpu_logical_map(smp_processor_id()); 342 u32 reg; 343 344 if (pmsu_mp_base == NULL) 345 return; 346 /* cancel ask HW to power down the L2 Cache if possible */ 347 reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); 348 reg &= ~PMSU_CONTROL_AND_CONFIG_L2_PWDDN; 349 writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); 350 351 /* cancel Enable wakeup events and mask interrupts */ 352 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); 353 reg &= ~(PMSU_STATUS_AND_MASK_IRQ_WAKEUP | PMSU_STATUS_AND_MASK_FIQ_WAKEUP); 354 reg &= ~PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT; 355 reg &= ~PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT; 356 reg &= ~(PMSU_STATUS_AND_MASK_IRQ_MASK | PMSU_STATUS_AND_MASK_FIQ_MASK); 357 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); 358 } 359 360 static int mvebu_v7_cpu_pm_notify(struct notifier_block *self, 361 unsigned long action, void *hcpu) 362 { 363 if (action == CPU_PM_ENTER) { 364 unsigned int hw_cpu = cpu_logical_map(smp_processor_id()); 365 mvebu_pmsu_set_cpu_boot_addr(hw_cpu, mvebu_cpu_resume); 366 } else if (action == CPU_PM_EXIT) { 367 mvebu_v7_pmsu_idle_exit(); 368 } 369 370 return NOTIFY_OK; 371 } 372 373 static struct notifier_block mvebu_v7_cpu_pm_notifier = { 374 .notifier_call = mvebu_v7_cpu_pm_notify, 375 }; 376 377 static struct platform_device mvebu_v7_cpuidle_device; 378 379 static int broken_idle(struct device_node *np) 380 { 381 if (of_property_read_bool(np, "broken-idle")) { 382 pr_warn("CPU idle is currently broken: disabling\n"); 383 return 1; 384 } 385 386 return 0; 387 } 388 389 static __init int armada_370_cpuidle_init(void) 390 { 391 struct device_node *np; 392 phys_addr_t redirect_reg; 393 394 np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric"); 395 if (!np) 396 return -ENODEV; 397 398 if (broken_idle(np)) 399 goto end; 400 401 /* 402 * On Armada 370, there is "a slow exit process from the deep 403 * idle state due to heavy L1/L2 cache cleanup operations 404 * performed by the BootROM software". To avoid this, we 405 * replace the restart code of the bootrom by a a simple jump 406 * to the boot address. Then the code located at this boot 407 * address will take care of the initialization. 408 */ 409 redirect_reg = pmsu_mp_phys_base + PMSU_BOOT_ADDR_REDIRECT_OFFSET(0); 410 mvebu_setup_boot_addr_wa(ARMADA_370_CRYPT0_ENG_TARGET, 411 ARMADA_370_CRYPT0_ENG_ATTR, 412 redirect_reg); 413 414 mvebu_cpu_resume = armada_370_xp_cpu_resume; 415 mvebu_v7_cpuidle_device.dev.platform_data = armada_370_xp_cpu_suspend; 416 mvebu_v7_cpuidle_device.name = "cpuidle-armada-370"; 417 418 end: 419 of_node_put(np); 420 return 0; 421 } 422 423 static __init int armada_38x_cpuidle_init(void) 424 { 425 struct device_node *np; 426 void __iomem *mpsoc_base; 427 u32 reg; 428 429 pr_warn("CPU idle is currently broken on Armada 38x: disabling\n"); 430 return 0; 431 432 np = of_find_compatible_node(NULL, NULL, 433 "marvell,armada-380-coherency-fabric"); 434 if (!np) 435 return -ENODEV; 436 437 if (broken_idle(np)) 438 goto end; 439 440 of_node_put(np); 441 442 np = of_find_compatible_node(NULL, NULL, 443 "marvell,armada-380-mpcore-soc-ctrl"); 444 if (!np) 445 return -ENODEV; 446 mpsoc_base = of_iomap(np, 0); 447 BUG_ON(!mpsoc_base); 448 449 /* Set up reset mask when powering down the cpus */ 450 reg = readl(mpsoc_base + MPCORE_RESET_CTL); 451 reg |= MPCORE_RESET_CTL_L2; 452 reg |= MPCORE_RESET_CTL_DEBUG; 453 writel(reg, mpsoc_base + MPCORE_RESET_CTL); 454 iounmap(mpsoc_base); 455 456 /* Set up delay */ 457 reg = readl(pmsu_mp_base + PMSU_POWERDOWN_DELAY); 458 reg &= ~PMSU_POWERDOWN_DELAY_MASK; 459 reg |= PMSU_DFLT_ARMADA38X_DELAY; 460 reg |= PMSU_POWERDOWN_DELAY_PMU; 461 writel(reg, pmsu_mp_base + PMSU_POWERDOWN_DELAY); 462 463 mvebu_cpu_resume = armada_38x_cpu_resume; 464 mvebu_v7_cpuidle_device.dev.platform_data = armada_38x_cpu_suspend; 465 mvebu_v7_cpuidle_device.name = "cpuidle-armada-38x"; 466 467 end: 468 of_node_put(np); 469 return 0; 470 } 471 472 static __init int armada_xp_cpuidle_init(void) 473 { 474 struct device_node *np; 475 476 np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric"); 477 if (!np) 478 return -ENODEV; 479 480 if (broken_idle(np)) 481 goto end; 482 483 mvebu_cpu_resume = armada_370_xp_cpu_resume; 484 mvebu_v7_cpuidle_device.dev.platform_data = armada_370_xp_cpu_suspend; 485 mvebu_v7_cpuidle_device.name = "cpuidle-armada-xp"; 486 487 end: 488 of_node_put(np); 489 return 0; 490 } 491 492 static int __init mvebu_v7_cpu_pm_init(void) 493 { 494 struct device_node *np; 495 int ret; 496 497 np = of_find_matching_node(NULL, of_pmsu_table); 498 if (!np) 499 return 0; 500 of_node_put(np); 501 502 /* 503 * Currently the CPU idle support for Armada 38x is broken, as 504 * the CPU hotplug uses some of the CPU idle functions it is 505 * broken too, so let's disable it 506 */ 507 if (of_machine_is_compatible("marvell,armada380")) { 508 cpu_hotplug_disable(); 509 pr_warn("CPU hotplug support is currently broken on Armada 38x: disabling\n"); 510 } 511 512 if (of_machine_is_compatible("marvell,armadaxp")) 513 ret = armada_xp_cpuidle_init(); 514 else if (of_machine_is_compatible("marvell,armada370")) 515 ret = armada_370_cpuidle_init(); 516 else if (of_machine_is_compatible("marvell,armada380")) 517 ret = armada_38x_cpuidle_init(); 518 else 519 return 0; 520 521 if (ret) 522 return ret; 523 524 mvebu_v7_pmsu_enable_l2_powerdown_onidle(); 525 if (mvebu_v7_cpuidle_device.name) 526 platform_device_register(&mvebu_v7_cpuidle_device); 527 cpu_pm_register_notifier(&mvebu_v7_cpu_pm_notifier); 528 529 return 0; 530 } 531 532 arch_initcall(mvebu_v7_cpu_pm_init); 533 early_initcall(mvebu_v7_pmsu_init); 534 535 static void mvebu_pmsu_dfs_request_local(void *data) 536 { 537 u32 reg; 538 u32 cpu = smp_processor_id(); 539 unsigned long flags; 540 541 local_irq_save(flags); 542 543 /* Prepare to enter idle */ 544 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); 545 reg |= PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT | 546 PMSU_STATUS_AND_MASK_IRQ_MASK | 547 PMSU_STATUS_AND_MASK_FIQ_MASK; 548 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); 549 550 /* Request the DFS transition */ 551 reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu)); 552 reg |= PMSU_CONTROL_AND_CONFIG_DFS_REQ; 553 writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu)); 554 555 /* The fact of entering idle will trigger the DFS transition */ 556 wfi(); 557 558 /* 559 * We're back from idle, the DFS transition has completed, 560 * clear the idle wait indication. 561 */ 562 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); 563 reg &= ~PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT; 564 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); 565 566 local_irq_restore(flags); 567 } 568 569 int mvebu_pmsu_dfs_request(int cpu) 570 { 571 unsigned long timeout; 572 int hwcpu = cpu_logical_map(cpu); 573 u32 reg; 574 575 /* Clear any previous DFS DONE event */ 576 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 577 reg &= ~PMSU_EVENT_STATUS_AND_MASK_DFS_DONE; 578 writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 579 580 /* Mask the DFS done interrupt, since we are going to poll */ 581 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 582 reg |= PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK; 583 writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 584 585 /* Trigger the DFS on the appropriate CPU */ 586 smp_call_function_single(cpu, mvebu_pmsu_dfs_request_local, 587 NULL, false); 588 589 /* Poll until the DFS done event is generated */ 590 timeout = jiffies + HZ; 591 while (time_before(jiffies, timeout)) { 592 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 593 if (reg & PMSU_EVENT_STATUS_AND_MASK_DFS_DONE) 594 break; 595 udelay(10); 596 } 597 598 if (time_after(jiffies, timeout)) 599 return -ETIME; 600 601 /* Restore the DFS mask to its original state */ 602 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 603 reg &= ~PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK; 604 writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 605 606 return 0; 607 } 608